Ok, I need to compute the distance between the cap (top of the bottle) and the liquid. I am trying to get the color pixel by pixel from the top down to the liquid. Any ideas how can I do that measure? (I am not sure how to use dilate properly)
#include <opencv/cvaux.h>
#include <opencv/highgui.h>
#include <opencv/cxcore.h>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src = imread("cocaCorte.jpg");
Mat srcTampa, srcLiquido, pixelColor;
Point3_<uchar>* p;
int posicaoX,posicaoY;
void find_moments( Mat grey );
int main(int argc, char* argv[])
{
///Criacao das janelas
cvNamedWindow( "Original", CV_WINDOW_AUTOSIZE);
cvNamedWindow( "Tampa Processada", CV_WINDOW_AUTOSIZE);
cvNamedWindow( "Liquido Processado", CV_WINDOW_AUTOSIZE);
cvNamedWindow( "Bitwise OR", CV_WINDOW_AUTOSIZE);
///Processamento do liquido
inRange( src, Scalar( 0, 0, 0), Scalar( 60, 60, 60), srcLiquido);
GaussianBlur( srcLiquido, srcLiquido, Size( 7, 7), 5, 5);
threshold( srcLiquido, srcLiquido, 100, 255, CV_THRESH_BINARY);
dilate( srcLiquido, srcLiquido, Mat(), Point(-1, -1), 2, 1, 1);
dilate( srcLiquido, srcLiquido, Mat(), Point(-1, -1), 2, 1, 1);
dilate( srcLiquido, srcLiquido, Mat(), Point(-1, -1), 2, 1, 1);
dilate( srcLiquido, srcLiquido, Mat(), Point(-1, -1), 2, 1, 1);
dilate(srcLiquido, srcLiquido, Mat(), Point(-1, -1), 2, 1, 1);
dilate(srcLiquido, srcLiquido, Mat(), Point(-1, -1), 2, 1, 1);
///Processamento da tampa
inRange( src, Scalar( 25, 20, 138), Scalar( 90, 115, 205), srcTampa);
GaussianBlur( srcTampa, srcTampa, Size( 7, 7), 5, 5);
threshold( srcTampa, srcTampa, 100, 255, CV_THRESH_BINARY);
find_moments( srcTampa);
///Juntar as duas imagens
bitwise_or( srcTampa, srcLiquido, pixelColor);
///Calculos
///Reach the white part represent by the liquid starting fromthe top
while(true){
p = pixelColor.ptr<Point3_<uchar> >( posicaoY, posicaoX);
if((p->z=0)&&(p->y=0)&&(p->x=0)){
//that means the liquid was found
break;
}
posicaoY++;
}
cout<<"FOUND"<<endl; //Print the result...
///Mostrar imagens
imshow("Tampa Processada", srcTampa);
imshow("Liquido Processado", srcLiquido);
imshow("Bitwise OR", pixelColor);
waitKey();
destroyAllWindows();
return 0;
}
void find_moments( Mat gray )
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny( gray, canny_output, 50, 150, 3 );
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Get the moments
vector<Moments> mu(contours.size() );
for( int i = 0; i < contours.size(); i++ )
{
mu[i] = moments( contours[i], false );
}
/// Get the mass centers:
vector<Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{
mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 );
}
posicaoX = mc[0].x;
posicaoY = mc[0].y;
/// Draw contours
for( int i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( 255, 0, 0);
drawContours( src, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle(src, mc[i], 2, color, -1, 1, 0 );
}
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", src );
}
Related
Recently i switched from opencv-python to c++ version of opencv, because i want to speed up my real time video-processing app with CUDA. I am new to C++ so i found some unclear moments with memory management while optimizing my code.
For example, i have some filter chain like this:
void apply_blue_edgess(cv::Mat& matrix, cv::Mat& mask, cv::Mat& inverted_mask) {
cv::Mat gray_image, blured, canny, canny_3d, in_range_mask;
cv::cvtColor( matrix, gray_image, CV_BGR2GRAY );
cv::GaussianBlur( gray_image, blured, cv::Size( 5, 5 ), 0, 0 );
cv::Canny(blured, canny, 0, 100);
cv::cvtColor( canny, canny_3d, CV_GRAY2BGR );
cv::inRange(canny_3d, cv::Scalar(255,255,255), cv::Scalar(255,255,255), in_range_mask);
canny_3d.setTo(cv::Scalar(0, 171, 255), in_range_mask);
cv::GaussianBlur( canny_3d, matrix, cv::Size( 5, 5 ), 0, 0 );
cv::bitwise_and(matrix, mask, matrix);
}
Is it ok to use new Mat object at every step of the filter chain (gray_image, blured, canny, canny_3d, in_range_mask) ? Is such continuous memory allocation bad for performance? If so, how should i write similar functions?
As was suggested in the comment section, i ended up doing functor wrapper:
struct blue_edges_filter {
blue_edges_filter(int width, int height)
: gray_image(width, height, CV_8UC1),
blured(width, height, CV_8UC1),
canny(width, height, CV_8UC1),
canny_3d(width, height, CV_8UC3),
in_range_mask(width, height, CV_8UC3)
{ }
int operator()(cv::Mat& matrix, cv::Mat& mask, cv::Mat& inverted_mask) {
cv::bitwise_and(matrix, mask, internal_mask_matrix);
cv::bitwise_and(matrix, inverted_mask, external_mask_matrix);
cv::cvtColor( matrix, gray_image, CV_BGR2GRAY );
cv::GaussianBlur( gray_image, blured, cv::Size( 5, 5 ), 0, 0 );
cv::Canny(blured, canny, 0, 100);
cv::cvtColor( canny, canny_3d, CV_GRAY2BGR );
cv::inRange(canny_3d, cv::Scalar(255,255,255), cv::Scalar(255,255,255), in_range_mask);
canny_3d.setTo(cv::Scalar(0, 171, 255), in_range_mask);
cv::GaussianBlur( canny_3d, matrix, cv::Size( 5, 5 ), 0, 0 );
cv::bitwise_and(matrix, mask, matrix);
}
private:
cv::Mat gray_image, blured, canny, canny_3d, in_range_mask;
};
//Usage
blue_edges_filter apply_blue_edgess(1024, 576);
apply_blue_edgess(matrix, mask, inverted_mask);
You can reuse memory without allocation. Create temporal images:
void apply_blue_edgess(cv::Mat& matrix, cv::Mat& mask, cv::Mat& inverted_mask)
{
cv::Mat tmp[2];
int srcInd = 1;
auto InvInd = [&]() -> int { return srcInd ? 0 : 1; };
cv::cvtColor( matrix, tmp[InvInd()], CV_BGR2GRAY );
srcInd = InvInd();
cv::GaussianBlur( tmp[srcInd], tmp[InvInd()], cv::Size( 5, 5 ), 0, 0 );
srcInd = InvInd();
cv::Canny(tmp[srcInd], tmp[InvInd()], 0, 100);
srcInd = InvInd();
cv::cvtColor( tmp[srcInd], tmp[InvInd()], CV_GRAY2BGR );
srcInd = InvInd();
cv::inRange(tmp[srcInd], cv::Scalar(255,255,255), cv::Scalar(255,255,255), tmp[InvInd()]);
tmp[srcInd].setTo(cv::Scalar(0, 171, 255), tmp[InvInd()]);
cv::GaussianBlur( tmp[srcInd], matrix, cv::Size( 5, 5 ), 0, 0 );
cv::bitwise_and(matrix, mask, matrix);
}
I am working on a code to detect the edges of boxes in a picture, but the boxes attached to the edge of the picture have missing lines.
as seen in the previous above the first square have only 2 lines appears.
I am using open cv C++
here is my code
cv::Mat src;
cv::Mat gray;
cv::Mat dst;
std::vector<std::vector<cv::Point> > contours;
std::vector<std::vector<cv::Point> > contours2;
std::vector<cv::Point> approx;
Mat img = imread("shapes-noisy.jpg", CV_LOAD_IMAGE_COLOR);
Mat _color = img.clone();
threshold(img, img, 250, 255, 0);
src = img;
cv::cvtColor(src, gray, CV_BGR2GRAY);
Mat img6, img7, img8, img9, img10;
cv::threshold(gray, img6, 250, 255.0, THRESH_BINARY);
Mat element = getStructuringElement( MORPH_RECT,Size(3, 3 ), Point( 1, 1 ) );
dilate( img6, img6, element);
Laplacian(img6, img7, CV_16S, 3, 1, 0, BORDER_DEFAULT);
convertScaleAbs(img7, img8);
vector<Vec4i> hierarchy;
RNG rng(0);
Scalar value;
cv::findContours(img8.clone(), contours, hierarchy,CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
Mat drawing,drawing2;
medianBlur(img8, drawing, 3);
threshold(drawing, drawing, 255, 0, 1);
threshold(drawing, drawing2, 255, 0, 1);
cv::findContours(img8.clone(), contours2, hierarchy,CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
for( int i = 0; i< contours.size(); i++ )
{
cv::approxPolyDP(cv::Mat(contours[i]), approx, 10, true);
if(approx.size() > 3){
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing2, contours2, i, color, 2, 8, hierarchy, 0, Point(0,0) );
}
}
imshow("approx", drawing2);
Hi I am trying to find all the circles in the following image and Identify the defect.
this is my code:
static void findCircles2(const Mat& image)
{
vector<Vec3f> circles;
int thresh1 = 5;
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
for( int c = 0; c < 3; c++ )
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
Canny(gray0, gray, 0, thresh1, 5);
//dilate(gray, gray, Mat(), Point(-1,-1));
gray = gray0 >= (1)*255/N;
gray = gray0 >= (2)*255/N;
gray = gray0 >= (6)*255/N;
namedWindow( "Hough Circle Transform Demo 1", CV_WINDOW_AUTOSIZE );
imshow( "Hough Circle Transform Demo 1", gray );
waitKey(0);
HoughCircles( gray, circles, CV_HOUGH_GRADIENT, 1, gray.rows/8, 200, 100, 0, 0 );
cout<<"size of circles: "<<circles.size()<<endl;
for( size_t i = 0; i < circles.size(); i++ )
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
circle( gray, center, 3, Scalar(0,255,0), -1, 8, 0 );
circle( gray, center, radius, Scalar(0,0,255), 3, 8, 0 );
}
/// Show your results
namedWindow( "Hough Circle Transform Demo 2", CV_WINDOW_AUTOSIZE );
imshow( "Hough Circle Transform Demo 2", gray );
waitKey(0);
}
}
Picture:
however the code is unable to find anything, I played arround with the thresholds but it doesnt help. please advise.
Development platform: VS2010, Opencv version: 2.4.10
Because the circles are so small and not that standard, so you cann't just do HoughCircles on the binary image.
An alternative method is to findContours, then filter the contours by ratio between the value of contourArea and the value of minEnclosingCircle.
I am trying a number recognition. However after contour finding. I get bounding boxes inside the main bounding box for numbers 0,6,8 ... as shown in figure. Please help me with this initial step of image processing.
I have tried using group rectangles but they are not working. Please check the code below. Thank you.
Image: http://tinypic.com/r/1twx05/5
int main()
{
Mat inimage, gray;
inimage = imread("sample.jpg");
cvtColor(inimage, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, gray, Size(5,5), 0);
adaptiveThreshold(gray, gray, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY_INV, 11, 0);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours( gray, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
vector<vector<Point> > contours_poly( contours.size() );
vector<Rect> boundRect( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{
approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
boundRect[i] = boundingRect( Mat(contours_poly[i]) );
}
//groupRectangles(boundRect, 1, 0.2);
Scalar color = Scalar(0,0,255);
for( int i = 0; i< contours.size(); i++ )
{
//drawContours( inimage, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
rectangle( inimage, boundRect[i].tl(), boundRect[i].br(), color, 1, 8, 0 );
}
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", inimage );
waitKey(0);
return 0;
}
try to use the flag: CV_RETR_EXTERNAL instead of CV_RETR_TREE
as stated in the docs it tells to take only outer contours.
Or follow the tree hierarchy to drop nested contours (read the docs for how-to)
I follow the instruction in this page: http://wiki.elphel.com/index.php?title=OpenCV_Tennis_balls_recognizing_tutorial
to detect the tennis ball. This code may be run for Python and it's requirement is V4L/AVLD for Morphological operations. it's use function cvClose() and cvOpen() to dilate and erode the mask. I write my code in c++, so cvDilate() and cvErode() are used instead, but the result isn't as good as the that site.
Here is my result:output.jpg. (i'm sorry, because i don't have reputation enough to post image T_T)
Here is my full code:
#include "highgui.h"
#include "cv.h"
void main()
{
IplImage* img = cvLoadImage("tennis.jpg",1);
CvSize size = cvGetSize(img);
IplImage *hsv = cvCreateImage(size, IPL_DEPTH_8U, 3);
cvCvtColor(img, hsv, CV_BGR2HSV);
CvMat *mask = cvCreateMat(size.height, size.width, CV_8UC1);
cvInRangeS(hsv, cvScalar(0.11*256, 0.60*256, 0.20*256, 0),
cvScalar(0.14*256, 1.00*256, 1.00*256, 0), mask);
cvReleaseImage(&hsv);
IplConvKernel *se21 = cvCreateStructuringElementEx(21, 21, 10, 10, CV_SHAPE_RECT, NULL);
IplConvKernel *se11 = cvCreateStructuringElementEx(11, 11, 5, 5, CV_SHAPE_RECT, NULL);
cvErode(mask, mask, se21);
cvDilate(mask, mask, se11);
cvReleaseStructuringElement(&se21);
cvReleaseStructuringElement(&se11);
/* Copy mask into a grayscale image */
IplImage *hough_in = cvCreateImage(size, 8, 1);
cvCopy(mask, hough_in, NULL);
cvSmooth(hough_in, hough_in, CV_GAUSSIAN, 15, 15, 0, 0);
/* Run the Hough function */
CvMemStorage *storage = cvCreateMemStorage(0);
CvSeq *circles = cvHoughCircles(hough_in, storage,CV_HOUGH_GRADIENT, 4, size.height/10, 100, 40, 0, 0);
cvReleaseMemStorage(&storage);
int i;
for (i = 0; i < circles->total; i++) {
float *p = (float*)cvGetSeqElem(circles, i);
CvPoint center = cvPoint(cvRound(p[0]),cvRound(p[1]));
CvScalar val = cvGet2D(mask, center.y, center.x);
if (val.val[0] < 1) continue;
cvCircle(img, center, 3, CV_RGB(0,255,0), -1, CV_AA, 0);
cvCircle(img, center, cvRound(p[2]), CV_RGB(255,0,0), 3, CV_AA, 0);
cvCircle(mask, center, 3, CV_RGB(0,255,0), -1, CV_AA, 0);
cvCircle(mask, center, cvRound(p[2]), CV_RGB(255,0,0), 3, CV_AA, 0);
}
cvNamedWindow( "Output", CV_WINDOW_AUTOSIZE );
cvShowImage( "Output", img );
cvNamedWindow( "mask", CV_WINDOW_AUTOSIZE );
cvShowImage( "mask", mask );
cvWaitKey(0);
}
Can someone help me to get V4L/AVLD and improve this code ? thank you very much.
V4L/AVLD is for webcam. It doesn't have anything to do with the code or algorithm. http://allonlinux.free.fr/Projets/AVLD/
If you are using Linux, v4l-utls package needs to be installed in order to use webcam.