OpenCV : How to find the pixels inside a contour in c++ - c++

Suppose if we are working on an image, is there any way to access the pixels inside the contour?
I have already found the contour using the function findContours() and even found the moments but I couldn't find the pixels inside the contour.
Any suggestions are Welcome!!
Thank you!

As #Miki already mentioned you can use connectedComponents to perform a labeling. Then you iterate through the bounding box of your object like #Amitay Nachmani suggested. But instead of using pointPolygonTest you can check if the value at your current positions matches your current label Here is a small example:
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <vector>
using namespace cv;
using namespace std;
Mat binary, labels, stats, centroids;
int main()
{
Mat src = imread("C:\\Users\\phili\\Pictures\\t06-4.png",0);
threshold(src, binary, 0, 255, CV_THRESH_OTSU);
int nLabels = connectedComponentsWithStats(binary, labels, stats, centroids);
vector<vector<Point>> blobs(nLabels-1);
for (int i = 1; i < nLabels; i++) //0 is background
{
//get bounding rect
int left = stats.at<int>(i, CC_STAT_LEFT) ;
int top = stats.at<int>(i, CC_STAT_TOP);
int width = stats.at<int>(i, CC_STAT_WIDTH);
int height = stats.at<int>(i, CC_STAT_HEIGHT);
blobs[i - 1].reserve(width*height);
int x_end = left + width;
int y_end = top + height;
for (int x = left; x < x_end; x++)
{
for (int y = top; y < y_end; y++)
{
Point p(x, y);
if (i == labels.at<int>(p))
{
blobs[i-1].push_back(p);
}
}
}
}
}
EDIT:
Since youre using OpenCV 2.4 there are two ways to achieve the same results.
First you could use findContours to detect the blobs, then draw them (filled) into a new image with a specific color as label (be aware that your blobs could contain holes) Then iterate through the image inside the bounding rectangle of each contour and get all points with the label of your current contour. If you just iterate through the bounding rectangle inside your binary image, you have problems with objects overlapping the bounding rectangle.
Here is the code:
int getBlobs(Mat binary, vector<vector<Point>> & blobs)
{
Mat labels(src.size(), CV_32S);
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(binary, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE);
blobs.clear();
blobs.reserve(contours.size());
int count = 1; //0 is background
for (int i = 0; i < contours.size(); i++) // iterate through each contour.
{
//if contour[i] is not a hole
if (hierarchy[i][3] == -1)
{
//draw contour without holes
drawContours(labels, contours, i, Scalar(count),CV_FILLED, 0, hierarchy, 2, Point());
Rect rect = boundingRect(contours[i]);
int left = rect.x;
int top = rect.y;
int width = rect.width;
int height = rect.height;
int x_end = left + width;
int y_end = top + height;
vector<Point> blob;
blob.reserve(width*height);
for (size_t x = left; x < x_end; x++)
{
for (size_t y = top; y < y_end; y++)
{
Point p(x, y);
if (count == labels.at<int>(p))
{
blob.push_back(p);
}
}
}
blobs.push_back(blob);
count++;
}
}
count--;
return count;
}
Second you can perform your own labling with floodfill. Therefore you iterate through your image and start floodfill for every white pixel, iterate through the bounding rectangle and get all points that have the same seedColor.
Here is the code:
int labeling(Mat binary, vector<vector<Point>> &blobs)
{
FindBlobs(binary, blobs);
return blobs.size();
}
with
void FindBlobs(const Mat &binary, vector<vector<Point>> &blobs)
{
blobs.clear();
// Fill the label_image with the blobs
// 0 - background
// 1 - unlabelled foreground
// 2+ - labelled foreground
cv::Mat label_image;
binary.convertTo(label_image, CV_32FC1);
float label_count = 2; // starts at 2 because 0,1 are used already
for (int y = 0; y < label_image.rows; y++) {
float *row = (float*)label_image.ptr(y);
for (int x = 0; x < label_image.cols; x++) {
if (row[x] != 255) {
continue;
}
cv::Rect rect;
cv::floodFill(label_image, Point(x, y), Scalar(label_count), &rect, Scalar(0), Scalar(0), 4 );
vector<Point> blob;
blob.reserve(rect.width*rect.height);
for (int i = rect.y; i < (rect.y + rect.height); i++) {
float *row2 = (float*)label_image.ptr(i);
for (int j = rect.x; j < (rect.x + rect.width); j++) {
if (row2[j] != label_count)
{
continue;
}
blob.push_back(Point(j, i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
I used this image:
And here are the bounding boxes and the points inside the contour for visualization:

Create a new image with filled contours using fillPoly.
fillPoly(filledImage, contours, Scalar(255, 255, 255));
Then find the non-zero pixels within that image using findNonZero.
vector<Point> indices;
findNonZero(filledImage, indices);
The "indices" result refer to pixels inside the contour

Use the pointPolygonTest http://docs.opencv.org/2.4/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=pointpolygontest#pointpolygontest on the all the pixels inside the bounding box of the contour contour.

Related

[OpenCv][C++] Creating a smooth, airbrush-like brush stroke, similar to Photoshop?

I have a circular brush of with a diameter of 200px and hardness of 0 (the brush is a circular gradient). The spacing between each brush is 25% of the brush diameter. However, when I compare the stroke my program draws and the stroke Photoshop draws, where all settings are equal...
It is clear that photoshop's is much smoother! I can't reduce the spacing because that causes the edges to become harder
How can i make my stroke like photoshop's?
Here is the relevant code from my program...
//defining a circle
Mat alphaBrush(2*outerRadius,2*outerRadius,CV_32FC1);
float floatInnerRadius = outerRadius * hardness;
for(int i = 0; i < alphaBrush.rows; i++ ){
for(int j=0; j<alphaBrush.cols; j++ ){
int x = outerRadius - i;
int y = outerRadius - j;
float radius=hypot((float) x, (float) y );
auto& pixel = alphaBrush.at<float>(i,j);
if(radius>outerRadius){ pixel=0.0; continue;} // transparent
if(radius<floatInnerRadius){ pixel=1.0; continue;} // solid
pixel=1-((radius-floatInnerRadius)/(outerRadius-floatInnerRadius)); // partial
}
}
/*
(...irrelevant stuff)
*/
//drawing the brush onto the canvas
for (int j = 0; j < inMatROI.rows; j++) {
Vec3b *thisBgRow = inMatROI.ptr<Vec3b>(j);
float *thisAlphaRow = brushROI.ptr<float>(j);
for (int i = 0; i < inMatROI.cols; i++) {
for (int c = 0; c < 3; c++) {
thisBgRow[i][c] = saturate_cast<uchar>((brightness * thisAlphaRow[i]) + ((1.0 - thisAlphaRow[i]) * thisBgRow[i][c]));
}
}
}
I have also tried resultValue = max(backgroundValue, brushValue), but the intersection between the two circles is pretty obvious.
this is the approach, drawing a solid thin line and afterwards computing the distance of each pixel to that line.
As you can see there are some artifacts, probably mostly because of only approximated distance values from cv::distanceTransform. If you compute the distances precisely (and maybe in double precision) you should get very smooth results.
int main()
{
cv::Mat canvas = cv::Mat(768, 768, CV_8UC3, cv::Scalar::all(255));
cv::Mat canvasMask = cv::Mat::zeros(canvas.size(), CV_8UC1);
// make sure the stroke has always a size of >= 2, otherwise will be cv::line way not work...
std::vector<cv::Point> strokeSampling;
strokeSampling.push_back(cv::Point(250, 100));
strokeSampling.push_back(cv::Point(250, 200));
strokeSampling.push_back(cv::Point(600, 300));
strokeSampling.push_back(cv::Point(600, 400));
strokeSampling.push_back(cv::Point(250, 500));
strokeSampling.push_back(cv::Point(250, 650));
for (int i = 0; i < strokeSampling.size() - 1; ++i)
cv::line(canvasMask, strokeSampling[i], strokeSampling[i + 1], cv::Scalar::all(255));
// computing a distance map:
cv::Mat tmp1 = 255 - canvasMask;
cv::Mat distMap;
cv::distanceTransform(tmp1, distMap, CV_DIST_L2, CV_DIST_MASK_PRECISE);
float outerRadius = 50;
float innerRadius = 10;
cv::Scalar strokeColor = cv::Scalar::all(0);
for (int y = 0; y < distMap.rows; ++y)
for (int x = 0; x < distMap.cols; ++x)
{
float percentage = 0.0f;
float radius = distMap.at<float>(y, x);
if (radius>outerRadius){ percentage = 0.0; } // transparent
else
if (radius<innerRadius){ percentage = 1.0; } // solid
else
{
percentage = 1 - ((radius - innerRadius) / (outerRadius - innerRadius)); // partial
}
if (percentage > 0)
{
// here you could use the canvasMask if you like to, instead of directly drawing on the canvas
cv::Vec3b canvasColor = canvas.at<cv::Vec3b>(y, x);
cv::Vec3b cColor = cv::Vec3b(strokeColor[0], strokeColor[1], strokeColor[2]);
canvas.at<cv::Vec3b>(y, x) = percentage*cColor + (1 - percentage) * canvasColor;
}
}
cv::imshow("out", canvas);
cv::imwrite("C:/StackOverflow/Output/stroke.png", canvas);
cv::waitKey(0);
}

Subtract all contours from image except the one with the largest area

cv::Mat thr;
std::vector<std::vector<cv::Point> > contours;
std::vector<std::vector<cv::Vec4i> > hierarchy;
int largest_area = 0;
int largest_contour_index = 0;
cv::findContours( thr, contours, hierarchy,CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); // Find the contours in the image
for( int i = 0; i < contours.size(); i++ ) // iterate through each contour.
{
double a = contourArea( contours[i], false ); // Find the area of contour
if(a > largest_area)
{
largest_area = a;
largest_contour_index = i; // Store the index of largest contour
}
}
What should I do after finding the index of the largest contour? How I can delete all the other contours with its inner areas?
Image is binary (cv::Mat thr). Just black background with white areas.
Thanks.
In your case, deleting contours with its inner areas is equal to fill them to black. This can be done by drawing the contour regions with black color:
for (size_t i=0; i<contours.size(); ++i) {
if (i != largest_contour_index) { // not the largest one
cv::drawContours(thr, contours, i, cv::Scalar(0,0,0), CV_FILLED);
}
}
After finding contours find index of biggest contour and draw that contour on Mat.
int indexOfBiggestContour = -1;
int sizeOfBiggestContour = 0;
for (int i = 0; i < contours.size(); i++)
{
if (contours[i].size() > sizeOfBiggestContour)
{
sizeOfBiggestContour = contours[i].size();
indexOfBiggestContour = i;
}
}
cv::Mat newImage;
drawContours(newImage, contours, indexOfBiggestContour, Scalar(255), CV_FILLED, 8, hierarchy);

Partitions Image into boxes containing object

I have encountered a problem to partition binarized image with boxes/ subimage containing object (Note: the boxes can be irregular while the object is in circle of any other primitive shapes). This could be explained with images as below:
Figure 1: Image with circle as objects of interest
Figure 2: Image with boxes of arbitrary size containing objects of interest
Thus, any opinion that this can be done?
Since you mentioned that:
the boxes can be irregular
you can use the Voronoi diagram (computed by distanceTransform):
Code:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
Mat1b img = imread("path_to_image", IMREAD_GRAYSCALE);
Mat1f dist;
Mat1i labels;
distanceTransform(img, dist, labels, CV_DIST_L2, 3, DIST_LABEL_CCOMP);
// Show result
Mat1b labels1b;
labels.convertTo(labels1b, CV_8U);
normalize(labels1b, labels1b, 0, 255, NORM_MINMAX);
Mat3b res;
applyColorMap(labels1b, res, COLORMAP_JET);
res.setTo(Scalar(0,0,0), ~img);
imshow("Result", res);
waitKey();
return 0;
}
Update
If you need the boxes to be rectangles, you can look at recursive XY Cut algorithm. Here is a modified version of XY Cut algorithm, that makes the rectangles not touching the foreground objects, so that the sum of all rectangles covers the whole image area. Here I inverted the image since usually black is background, and white is foreground.
Code:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
vector<Rect> XYCut_projH(const Mat1b& src, Rect roi)
{
Mat1b projH;
reduce(src(roi), projH, 1, CV_REDUCE_MAX);
vector<Rect> rects;
bool bOut = true;
vector<int> coords;
coords.push_back(0);
for (int i = 0; i < projH.rows; ++i)
{
if (bOut && projH(i) > 0)
{
coords.back() = (coords.back() + i) / 2;
bOut = false;
}
else if (!bOut && projH(i) == 0)
{
coords.push_back(i);
bOut = true;
}
}
coords.front() = 0;
coords.back() = projH.rows;
if (coords.size() <= 1) return rects;
for (int i = 0; i < coords.size() - 1; ++i)
{
Rect r(0, coords[i], src.cols, coords[i + 1] - coords[i]);
r = (r + roi.tl()) & roi;
rects.push_back(r);
}
return rects;
}
vector<Rect> XYCut_projV(const Mat1b& src, Rect roi)
{
Mat1b projV;
reduce(src(roi), projV, 0, CV_REDUCE_MAX);
vector<Rect> rects;
bool bOut = true;
vector<int> coords;
coords.push_back(0);
for (int i = 0; i < projV.cols; ++i)
{
if (bOut && projV(i) > 0)
{
coords.back() = (coords.back() + i) / 2;
bOut = false;
}
else if (!bOut && projV(i) == 0)
{
coords.push_back(i);
bOut = true;
}
}
coords.front() = 0;
coords.back() = projV.cols;
if (coords.size() <= 1) return rects;
for (int i = 0; i < coords.size() - 1; ++i)
{
Rect r(coords[i], 0, coords[i + 1] - coords[i], src.rows);
r = (r + roi.tl()) & roi;
rects.push_back(r);
}
return rects;
}
void XYCut_step(const Mat1b& src, Rect roi, vector<Rect>& rects, bool bAlternate)
{
vector<Rect> step;
if (bAlternate)
{
step = XYCut_projH(src, roi);
if ((step.size() == 1) && (step[0] == roi) && (XYCut_projV(src, roi).size() == 1))
{
rects.push_back(roi);
return;
}
}
else
{
step = XYCut_projV(src, roi);
if ((step.size() == 1) && (step[0] == roi) && (XYCut_projH(src, roi).size() == 1))
{
rects.push_back(roi);
return;
}
}
for (int i = 0; i < step.size(); ++i)
{
XYCut_step(src, step[i], rects, !bAlternate);
}
}
void XYCut(const Mat1b& src, vector<Rect>& rects)
{
bool bAlternate = true;
Rect roi(0, 0, src.cols, src.rows);
XYCut_step(src, roi, rects, bAlternate);
}
int main()
{
Mat1b img = imread("path_to_image", IMREAD_GRAYSCALE);
// invert image, if needed
img = ~img;
// Apply (modified) XY Cut
vector<Rect> rects;
XYCut(img, rects);
// Show results
Mat3b res;
cvtColor(img, res, COLOR_GRAY2BGR);
for (int i = 0; i < rects.size(); ++i)
{
rectangle(res, rects[i], Scalar(0,255,0));
}
imshow("Result", res);
waitKey();
return 0;
}
Note that this algorithm works only if it's possible to make a cut along X or Y dimension, i.e. there is a horizontal or vertical line with all background pixels. This means that this won't work in a very cluttered image.

C++ Stretching an equalized image

From an (2)equalized image I have to create a (3).
Original image: http://i.imgur.com/X5MKF6z.jpg
Equalized image : http://i.imgur.com/oFBVUJp.png
Equalized and Stretch image: http://i.imgur.com/V7jeaRQ.png
With OpenCV I could have used equalizeHist() that does both equalization and stretching.
So without using OPENCV, how can I do stretching from an equalization image. The equalization part is done below.
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv/highgui.h>
#include <cstring>
using std::cout;
using std::cin;
using std::endl;
using namespace cv;
void imhist(Mat image, int histogram[])
{
// initialize all intensity values to 0
for (int i = 0; i < 256; i++)
{
histogram[i] = 0;
}
// calculate the no of pixels for each intensity values
for (int y = 0; y < image.rows; y++)
for (int x = 0; x < image.cols; x++)
histogram[(int)image.at<uchar>(y, x)]++;
}
void cumhist(int histogram[], int cumhistogram[])
{
cumhistogram[0] = histogram[0];
for (int i = 1; i < 256; i++)
{
cumhistogram[i] = histogram[i] + cumhistogram[i - 1];
}
}
int main()
{
// Load the image
Mat image = imread("y1.jpg", CV_LOAD_IMAGE_GRAYSCALE);
// Generate the histogram
int histogram[256];
imhist(image, histogram);
// Caluculate the size of image
int size = image.rows * image.cols;
float alpha = 255.0 / size;
// Calculate the probability of each intensity
float PrRk[256];
for (int i = 0; i < 256; i++)
{
PrRk[i] = (double)histogram[i] / size;
}
// Generate cumulative frequency histogram
int cumhistogram[256];
cumhist(histogram, cumhistogram);
// Scale the histogram
int Sk[256];
for (int i = 0; i < 256; i++)
{
Sk[i] = cvRound((double)cumhistogram[i] * alpha);
}
// Generate the equlized image
Mat new_image = image.clone();
for (int y = 0; y < image.rows; y++)
for (int x = 0; x < image.cols; x++)
new_image.at<uchar>(y, x) = saturate_cast<uchar>(Sk[image.at<uchar>(y, x)]);
//////////////////////////////////////////
// // Generate the histogram stretched image
Mat str_image = new_image.clone();
//for (int a = 0; a < str_image.rows; a++)
// for (int b = 0; b < str_image.cols; b++)
// Display the original Image
namedWindow("Original Image");
imshow("Original Image", image);
// Display equilized image
namedWindow("Equalized Image");
imshow("Equalized Image", new_image);
waitKey();
return 0;
}
The normal way to do this is to find your darkest pixel, and your brightest. You can do this in a singe loop iterating over all your pixels, pseudo-code like this:
darkest=pixel[0,0] // assume first pixel is darkest for now, and overwrite later
brightest=pixel[0,0] // assume first pixel is lightest for now, and overwrite later
for all pixels
if this pixel < darkest
darkest = this pixel
else if this pixel > brightest
brightest = this pixel
endif
end for
Simple enough. So, let's say the darkest and brightest are 80 and 220 respectively. Now you need to stretch this range 80..220 onto the full range 0..255.
So you subtract 80 from every pixel in your image to shift down to zero at the left end of the histogram, so your range is now 0..140. So now you need to multiply every pixel by 255/140 to stretch the right end out to 255. Of course, you can do both pieces of arithmetic in a single pass over your pixel array.
for all pixels
newvalue = int((current value - darkest)*255/(brightest-darkest))
end for

Detect largest rectangle from an image using OpenCV

I asked a previous question here and following the advice from the answer I built the below program which I thought would detect large rectangle but it doesn't detect the rectangle at all. It does work on this image though.
Original Image
Desired Image
I want the solution to work on not only this image but different images of this kind. Major part of the code below is from different answers on SO
My full program:
#include <cv.h>
#include <highgui.h>
using namespace cv;
using namespace std;
double angle( Point pt1, Point pt2, Point pt0 ) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
void find_squares( Mat& image, vector< vector< Point> >& squares)
{
// blur will enhance edge detection
Mat blurred(image);
medianBlur(image, blurred, 9);
Mat gray0(blurred.size(), CV_8U), gray;
vector< vector< Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector< Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP( Mat(contours[i]), approx, arcLength( Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea( Mat(approx))) > 1000 &&
isContourConvex( Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}
}
void find_largest_square(const vector<vector <Point> >& squares, vector<Point>& biggest_square) {
if (!squares.size()) {
return;
}
int max_width = 0;
int max_height = 0;
int max_square_idx = 0;
const int n_points = 4;
for (size_t i = 0; i < squares.size(); i++) {
Rect rectangle = boundingRect(Mat(squares[i]));
if ((rectangle.width >= max_width) && (rectangle.height >= max_height)) {
max_width = rectangle.width;
max_height = rectangle.height;
max_square_idx = i;
}
}
biggest_square = squares[max_square_idx];
}
int main(int argc, char* argv[])
{
Mat img = imread(argv[1]);
if (img.empty())
{
cout << "!!! imread() failed to open target image" << endl;
return -1;
}
vector< vector< Point> > squares;
find_squares(img, squares);
vector<Point> largest_square;
find_largest_square(squares, largest_square);
for (int i = 0; i < 4; ++i) {
line(img, largest_square[i], largest_square[(i+1)%4], Scalar(0, 255, 0), 1, CV_AA);
}
imwrite("squares.png", img);
imshow("squares", img);
waitKey(0);
return 0;
}
I think you can do it easily using findContours function - http://docs.opencv.org/doc/tutorials/imgproc/shapedescriptors/find_contours/find_contours.html The biggest contour (or eventually second biggest) should be contour of black rectangle. Then just find the smallest rectangle which will surround this contour (just find points with the biggest/smallest x/y coordinates).