What would be the hsv range for this in Opencv? - c++

I am a bit confused right now, i can't find the correct value for this green square. Here is the image
The hsv values that i choose are:-
cv::inRange(src, Scalar(25, 20, 20), Scalar(85, 255, 200), src);
Here is the output from this:-
What is the correct value for hsv that i should choose?

These ranges should work good enough:
inRange(hsv, Scalar(35, 20, 20), Scalar(85, 255, 255), mask);
Remember that OpenCV stores images as BGR, and not RGB. So when you convert to HSV be sure to use COLOR_BGR2HSV, and not COLOR_RGB2HSV.
#include <opencv2/opencv.hpp>
using namespace cv;
int main()
{
Mat3b img = imread("path_to_image");
Mat3b hsv;
cvtColor(img, hsv, COLOR_BGR2HSV);
Mat1b mask;
inRange(hsv, Scalar(35, 20, 20), Scalar(85, 255, 255), mask);
imshow("Mask", mask);
waitKey();
return 0;
}
You can find additional details on HSV ranges here and here

Related

Why cv::findContours return so many contours?

I've read this post, but even after using cv::threshold to create a really binarry image, I still get ~500 contours. What am I doing wrong?
Shouldn't cv::findContours return only 13 contours since there are clear 13 blobs?
Mat img = imread("img.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat img_thresh;
threshold(img, img_thresh, 0, 255, CV_THRESH_BINARY);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
cv::findContours(img_thresh, contours, hierarchy, RetrievalModes::RETR_TREE, ContourApproximationModes::CHAIN_APPROX_SIMPLE);
RNG rng(12345);
Mat drawing = Mat::zeros(img_thresh.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
}
imshow("drawing", drawing);
waitKey();
UPDATE1
Using cv::RETR_EXTERNAL instead of cv::RETR_TREE, but still return much more contours than should be.
If you check your binary image you will see there are a lot of independent contours:
So you first need to clean up them by eroding and dilating as below code:
And you will get this result:
Which is cleaner than the original.
It is all the code:
cv::namedWindow("result", cv::WINDOW_FREERATIO);
cv::Mat img = cv::imread(R"(rUYLL.png)");
// to gray
cv::Mat gray;
cv::cvtColor(img, gray, cv::COLOR_BGR2GRAY);
cv::threshold(gray, gray, 0, 255, cv::THRESH_BINARY);
cv::erode(gray, gray, cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)));
cv::dilate(gray, gray, cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)));
std::vector<std::vector<cv::Point> > contours;
cv::findContours(gray, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
cv::drawContours(img, contours, -1, cv::Scalar(0, 255, 0), 2, 8);
cv::imshow("result", img);
cv::waitKey();
And it is the output:
Hope it helps!
And one simplest way which you can also consider if it works for you, just increase the lower threshold from 0 to 80, and DONE
cv::threshold(gray, gray, 80, 255, cv::THRESH_BINARY);
JUST PLAY WITH THRESHOLD and check the result.
The same output just with changing the threshold value:

OpenCV - how many black objects are there in a photo?

Okay, so I'm trying openCV with c++, and I want to do a simple detection program for objects that are black colored. So I have this simple code:
int main()
{
Mat3b bgr = imread("C:/Users/sesoa/Desktop/photos/shapes.png");
Mat3b hsv;
cvtColor(bgr, hsv, COLOR_BGR2HSV);
Mat1b mask1, mask2;
inRange(hsv, Scalar(0, 0, 0, 0), Scalar(180, 255, 30, 0), mask1);
inRange(hsv, Scalar(0, 0, 0, 0), Scalar(180, 255, 40, 0), mask2);
Mat1b mask = mask1 | mask2;
imshow("Mask", mask);
waitKey();
return 0;
}
shapes.png is this:
all the shapes are rounded with color black. I would like for my program to tell me how many of connected black objects are there. Also the writing under the shapes is also black. So It shows me this as well, that's okay, cause this is a test photo anyway.
How can I modify my program to detect how many connected black objects are in the photo? (In this photo, the output should be "60" as there are 8 objects and 49 letters + 3 letters are 'i' so we have to count the dots).
EDIT:
I want the program to count black objects. I already get all black objects out like this:
If you want to count the number of objects just do the following:
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) ); // canny_output is the binary image
This will give you all the contours in the binary image (contours.size()). If you want only specific contours you can filter with contour area.
Hope it helps!

OpenCV convert one color in to any other

SSorry for my bad english . I have an image that Show 3 circles of different color, one red, one green and one blue and I can display this image into the 3 channels, but they appear white and in the code I display an image call "copy R" and I dont know how to make this copy R into another image whit any color I want to overlap the original image and changing the red color. How can i do this ???
this is my code, sorry is the first time i make a question and dont know how to publish properly
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#define w 400
using namespace cv;
/// Function headers
void MyFilledCircle(Mat img, Point center);
void MyFilledCircle1(Mat img, Point center);
void MyFilledCircle2(Mat img, Point center);
int main(void) {
//![create_images]
char window[] = "Original";
/// Create black empty images
Mat image = Mat::zeros(w, w, CV_8UC3);
/// 1.b. Creating circles
MyFilledCircle(image, Point(200, 200));
MyFilledCircle1(image, Point(150, 150));
MyFilledCircle2(image, Point(250, 250));
Mat channel[3];
split(image, channel);
//channel[0] = Mat::zeros(image.rows, image.cols, CV_8UC1);
merge(channel, 3, image);
Mat imageHSV;
Mat copy;
imshow(window, image);
//imshow("Color 1", imageHSV);
inRange(image, Scalar(0, 0, 255), Scalar(0, 0, 255), copy);
imshow("copy R", copy);
imshow("B", channel[0]);
imshow("G", channel[1]);
imshow("R", channel[2]);
//imshow("0", canal0);
//imwrite("dest.jpg", image);
waitKey(0);
return(0);
}
/// Function Declaration
//![myfilledcircle]
void MyFilledCircle1(Mat img, Point center)
{
circle(img,
center,
50,
Scalar(0, 255, 0),
FILLED,
LINE_8);
}
void MyFilledCircle(Mat img, Point center)
{
circle(img,
center,
50,
Scalar(0, 0, 255),
FILLED,
LINE_8);
}
void MyFilledCircle2(Mat img, Point center)
{
circle(img,
center,
50,
Scalar(255, 0, 0),
FILLED,
LINE_8);
}
From the looks of it, it looks like copy is a binary mask, and you want to superimpose this mask on image, such that only the non-zero pixels in the mask retain their original color.
If my assumption is correct, then using the subtract method, as shown below, should help you out:
Mat result;
cvtColor(copy,copy,CV_GRAY2BGR);//change copy to a 3 channel image
absdiff(image,image,result);//initialize mask as a black image of img.size()
subtract(copy,image,result);
subtract(copy,result,result);

OpenCV copy bounded text area to new image

I am new to OpenCV and I am using this code to bound the text area in image. After that I am filtering contours and putting the bounded rectangle to a vector<Rect> to copy these to new image.
Mat large = img1;
Mat rgb;
// downsample and use it for processing
pyrUp(large, rgb);
Mat small;
cvtColor(rgb, small, CV_BGR2GRAY);
// morphological gradient
Mat grad;
Mat morphKernel = getStructuringElement(MORPH_ELLIPSE, Size(2, 2));
morphologyEx(small, grad, MORPH_GRADIENT, morphKernel);
// binarize
Mat bw;
threshold(grad, bw, 0.0, 255.0, THRESH_BINARY | THRESH_OTSU);
// connect horizontally oriented regions
Mat connected;
//morphKernel = getStructuringElement(MORPH_RECT, Size(7, 1));
//morphologyEx(bw, connected, MORPH_CLOSE, morphKernel);
// find contours
connected = bw;
Mat mask = Mat::zeros(bw.size(), CV_8UC1);
Mat mask2;
Mat mask3;
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(connected, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/*drawContours(mask2, contours, -1, Scalar(255), CV_FILLED);
Mat Crop(img1.rows, img1.cols, CV_8UC3);
Crop.setTo(Scalar(0, 255, 0));
img1.copyTo(Crop, mask2);
normalize(mask2.clone(), mask2, 0.0, 255.0, CV_MINMAX, CV_8UC1);
*/
vector<Rect> rect1;
int i = 0;
//filter contours
for (int idx = 0; idx >= 0; idx = hierarchy[idx][0])
{
Rect rect = boundingRect(contours[idx]);
Mat maskROI(mask, rect);
maskROI = Scalar(0, 0, 0);
// fill the contour
drawContours(mask, contours, idx, Scalar(255, 255, 255), CV_FILLED);
// ratio of non-zero pixels in the filled region
double r = (double)countNonZero(maskROI) / (rect.width*rect.height);
if (r > .45 /* assume at least 45% of the area is filled if it contains text */
&&
(rect.height > 10 && rect.width > 10 && rect.height<150 && rect.width<150) /* constraints on region size */
/* these two conditions alone are not very robust. better to use something
like the number of significant peaks in a horizontal projection as a third condition */
)
{
//making rectangles on bounded area
rectangle(rgb, rect, Scalar(0, 255, 0), 2);
//pushing bounding rectangles in vector for new mask
rect1.push_back(rect);
}
}
Input output I am getting after bounded text ares is:
After that I am using this code to copy the bounded area only to new mask
//copying bounded rectangles area from small to new mask2
for (int i = 0; i < rect1.size(); i++){
mask2 = rgb(rect1[i]);
}
but by using this I only get this last bounded text area:
How can I get or update the mask2 rows or cols to get all the mapping of bounded text areas from rgb to mask2.
That's because mask2 will be equal to the last rgb(rect1[i]) called.
You can easily solve this in two ways (using copyTo):
Create a mask (black initialized, same size as input image), where you draw (white) rectangles. Then you copy the original image to a black initialized image of the same size, using the obtained mask.
Copy each sub-image directly to a black initialized image.
Starting from this image, where the red rectangles will be your detected rectangles:
With first approach you'll get a mask like:
and, for both approaches, the final result will be:
Code for first approach:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Your image
Mat3b img = imread("path_to_image");
// Your rectangles
vector<Rect> rects{Rect(100, 100, 100, 200), Rect(300, 200, 200, 100), Rect(500, 400, 80, 130)};
// Mask for rectangles (black initializeds)
Mat1b mask(img.rows, img.cols, uchar(0));
Mat3b dbgRects = img.clone();
for (int i = 0; i < rects.size(); ++i)
{
// Draw white rectangles on mask
rectangle(mask, rects[i], Scalar(255), CV_FILLED);
// Show rectangles
rectangle(dbgRects, rects[i], Scalar(0, 0, 255), 2);
}
// Black initizlied result
Mat3b result(img.rows, img.cols, Vec3b(0,0,0));
img.copyTo(result, mask);
imshow("Rectangles", dbgRects);
imshow("Result", result);
waitKey();
return 0;
}
Code for second approach:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Your image
Mat3b img = imread("path_to_image");
// Your rectangles
vector<Rect> rects{Rect(100, 100, 100, 200), Rect(300, 200, 200, 100), Rect(500, 400, 80, 130)};
// Black initizlied result
Mat3b result(img.rows, img.cols, Vec3b(0, 0, 0));
Mat3b dbgRects = img.clone();
for (int i = 0; i < rects.size(); ++i)
{
img(rects[i]).copyTo(result(rects[i]));
// Show rectangles
rectangle(dbgRects, rects[i], Scalar(0, 0, 255), 2);
}
imshow("Rectangles", dbgRects);
imshow("Result", result);
waitKey();
return 0;
}

OpenCV Watershed segmentation miss some objects

My code is the same as this tutorial.
When I see the result image after using cv::watershed(), there is a object(upper-right) that I want to find out, but it's missing.
There are indeed six marks in image after using cv::drawContours().
Is this normal because the inaccuracy of the watershed algorithm exist?
Here is part of my code:
Mat src = imread("result01.png");
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
Mat thresh;
threshold(gray, thresh, 0, 255, THRESH_BINARY | THRESH_OTSU);
// noise removal
Mat kernel = Mat::ones(3, 3, CV_8UC1);
Mat opening;
morphologyEx(thresh, opening, MORPH_OPEN, kernel, Point(-1, -1), 2);
// Perform the distance transform algorithm
Mat dist_transform;
distanceTransform(opening, dist_transform, CV_DIST_L2, 5);
// Normalize the distance image for range = {0.0, 1.0}
// so we can visualize and threshold it
normalize(dist_transform, dist_transform, 0, 1., NORM_MINMAX);
// Threshold to obtain the peaks
// This will be the markers for the foreground objects
Mat dist_thresh;
threshold(dist_transform, dist_thresh, 0.5, 1., CV_THRESH_BINARY);
Mat dist_8u;
dist_thresh.convertTo(dist_8u, CV_8U);
// Find total markers
vector<vector<Point> > contours;
findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
// Create the marker image for the watershed algorithm
Mat markers = Mat::zeros(dist_thresh.size(), CV_32SC1);
// Draw the foreground markers
for (size_t i = 0; i < contours.size(); i++)
drawContours(markers, contours, static_cast<int>(i), Scalar::all(static_cast<int>(i)+1), -1);
// Perform the watershed algorithm
watershed(src, markers);
Original image:
Result after watershed:
You can find original, intermediate and result image here:
Result images after specific process
In your example, what you consider background is given the same label (5) as the "missing" object.
You can easily adjust this by setting a label (>0) to background, too.
You can find what is for sure background dilating and negating the thresh image.
Then, when creating a marker, you define the labels as:
0: unknown
1: background
>1 : your objects
In your output image, markers will have:
-1 : the edges between objects
0: the background (as intended by watershed)
1: the background (as you defined)
>1 : your objects.
This code should help:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
Mat3b src = imread("path_to_image");
Mat1b gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
Mat1b thresh;
threshold(gray, thresh, 0, 255, THRESH_BINARY | THRESH_OTSU);
// noise removal
Mat1b kernel = getStructuringElement(MORPH_RECT, Size(3,3));
Mat1b opening;
morphologyEx(thresh, opening, MORPH_OPEN, kernel, Point(-1, -1), 2);
Mat1b kernelb = getStructuringElement(MORPH_RECT, Size(21, 21));
Mat1b background;
morphologyEx(thresh, background, MORPH_DILATE, kernelb);
background = ~background;
// Perform the distance transform algorithm
Mat1f dist_transform;
distanceTransform(opening, dist_transform, CV_DIST_L2, 5);
// Normalize the distance image for range = {0.0, 1.0}
// so we can visualize and threshold it
normalize(dist_transform, dist_transform, 0, 1., NORM_MINMAX);
// Threshold to obtain the peaks
// This will be the markers for the foreground objects
Mat1f dist_thresh;
threshold(dist_transform, dist_thresh, 0.5, 1., CV_THRESH_BINARY);
Mat1b dist_8u;
dist_thresh.convertTo(dist_8u, CV_8U);
// Find total markers
vector<vector<Point> > contours;
findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
// Create the marker image for the watershed algorithm
Mat1i markers(dist_thresh.rows, dist_thresh.cols, int(0));
// Background as 1
Mat1i one(markers.rows, markers.cols, int(1));
bitwise_or(one, markers, markers, background);
// Draw the foreground markers (from 2 up)
for (int i = 0; i < int(contours.size()); i++)
drawContours(markers, contours, i, Scalar(i+2), -1);
// Perform the watershed algorithm
Mat3b dbg;
cvtColor(opening, dbg, COLOR_GRAY2BGR);
watershed(dbg, markers);
Mat res;
markers.convertTo(res, CV_8U);
normalize(res, res, 0, 255, NORM_MINMAX);
return 0;
}
Result:
there is very little emgu cv data on watershed.
here is my translation to this problem using c#. i know it is not the right forum but this answer kips popping up so to all the searchers:
//Mat3b src = imread("path_to_image");
//cvtColor(src, gray, COLOR_BGR2GRAY);
Image<Gray, byte> gray = smallImage.Convert<Gray, byte>();
//threshold(gray, thresh, 0, 255, THRESH_BINARY | THRESH_OTSU);
Image<Gray, byte> thresh = gray.ThresholdBinaryInv(new Gray(55), new Gray(255));
// noise removal
Mat kernel = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(3, 3), new Point(-1, -1));
//Mat1b opening;
//morphologyEx(thresh, opening, MORPH_OPEN, kernel, Point(-1, -1), 2);
Image<Gray, byte> opening = thresh.MorphologyEx(MorphOp.Open, kernel, new Point(-1, -1), 2, BorderType.Default, new MCvScalar(255));
//Mat1b kernelb = getStructuringElement(MORPH_RECT, Size(21, 21));
Mat kernel1 = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(3, 3), new Point(-1, -1));
//Mat1b background;
//morphologyEx(thresh, background, MORPH_DILATE, kernelb);
Image<Gray, byte> background = thresh.MorphologyEx(MorphOp.Dilate, kernel, new Point(-1, -1), 2, BorderType.Default, new MCvScalar(255));
background = ~background;
//// Perform the distance transform algorithm
//Mat1f dist_transform;
//distanceTransform(opening, dist_transform, CV_DIST_L2, 5);
Image<Gray, float> dist_transform = new Image<Gray, float>(opening.Width, opening.Height);
CvInvoke.DistanceTransform(opening, dist_transform, null, DistType.L2, 5);
//// Normalize the distance image for range = {0.0, 1.0}
//// so we can visualize and threshold it
//normalize(dist_transform, dist_transform, 0, 1., NORM_MINMAX);
CvInvoke.Normalize(dist_transform, dist_transform, 0, 1.0, NormType.MinMax, DepthType.Default);
//// Threshold to obtain the peaks
//// This will be the markers for the foreground objects
//Mat1f dist_thresh;
//threshold(dist_transform, dist_thresh, 0.5, 1., CV_THRESH_BINARY);
Image<Gray, float> dist_thresh = new Image<Gray, float>(opening.Width, opening.Height);
CvInvoke.Threshold(dist_transform, dist_thresh, 0.5, 1.0, ThresholdType.Binary);
//Mat1b dist_8u;
//dist_thresh.convertTo(dist_8u, CV_8U);
Image<Gray, Byte> dist_8u = dist_thresh.Convert<Gray, Byte>();
//// Find total markers
//vector<vector<Point>> contours;
//findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
CvInvoke.FindContours(dist_8u, contours, null, RetrType.External, ChainApproxMethod.ChainApproxSimple);
//// Create the marker image for the watershed algorithm
//Mat1i markers(dist_thresh.rows, dist_thresh.cols, int(0));
Image<Gray, int> markers = new Image<Gray, int>(dist_thresh.Width, dist_thresh.Height, new Gray(0));
//// Background as 1
//Mat1i one(markers.rows, markers.cols, int(1));
//bitwise_or(one, markers, markers, background);
Image<Gray, int> one = new Image<Gray, int>(markers.Cols, markers.Rows, new Gray(1));
CvInvoke.BitwiseOr(one, markers, markers, background);
//// Draw the foreground markers (from 2 up)
for (int i = 0; i < contours.Size; i++)
// drawContours(markers, contours, i, Scalar(i + 2), -1);
CvInvoke.DrawContours(markers, contours, i, new MCvScalar(i + 2));
//// Perform the watershed algorithm
//Mat3b dbg;
//cvtColor(opening, dbg, COLOR_GRAY2BGR);
//watershed(dbg, markers);
Image<Bgr, byte> dbg = new Image<Bgr, byte>(markers.Cols, markers.Rows);
CvInvoke.CvtColor(opening, dbg, ColorConversion.Gray2Bgr);
CvInvoke.Watershed(dbg, markers);
//Mat res;
//markers.convertTo(res, CV_8U);
//normalize(res, res, 0, 255, NORM_MINMAX);
CvInvoke.Normalize(markers, markers, 0, 255, NormType.MinMax);
//return 0;
to find light objects on dark background replace ThresholdBinaryInv with ThresholdBinary