Recently i switched from opencv-python to c++ version of opencv, because i want to speed up my real time video-processing app with CUDA. I am new to C++ so i found some unclear moments with memory management while optimizing my code.
For example, i have some filter chain like this:
void apply_blue_edgess(cv::Mat& matrix, cv::Mat& mask, cv::Mat& inverted_mask) {
cv::Mat gray_image, blured, canny, canny_3d, in_range_mask;
cv::cvtColor( matrix, gray_image, CV_BGR2GRAY );
cv::GaussianBlur( gray_image, blured, cv::Size( 5, 5 ), 0, 0 );
cv::Canny(blured, canny, 0, 100);
cv::cvtColor( canny, canny_3d, CV_GRAY2BGR );
cv::inRange(canny_3d, cv::Scalar(255,255,255), cv::Scalar(255,255,255), in_range_mask);
canny_3d.setTo(cv::Scalar(0, 171, 255), in_range_mask);
cv::GaussianBlur( canny_3d, matrix, cv::Size( 5, 5 ), 0, 0 );
cv::bitwise_and(matrix, mask, matrix);
}
Is it ok to use new Mat object at every step of the filter chain (gray_image, blured, canny, canny_3d, in_range_mask) ? Is such continuous memory allocation bad for performance? If so, how should i write similar functions?
As was suggested in the comment section, i ended up doing functor wrapper:
struct blue_edges_filter {
blue_edges_filter(int width, int height)
: gray_image(width, height, CV_8UC1),
blured(width, height, CV_8UC1),
canny(width, height, CV_8UC1),
canny_3d(width, height, CV_8UC3),
in_range_mask(width, height, CV_8UC3)
{ }
int operator()(cv::Mat& matrix, cv::Mat& mask, cv::Mat& inverted_mask) {
cv::bitwise_and(matrix, mask, internal_mask_matrix);
cv::bitwise_and(matrix, inverted_mask, external_mask_matrix);
cv::cvtColor( matrix, gray_image, CV_BGR2GRAY );
cv::GaussianBlur( gray_image, blured, cv::Size( 5, 5 ), 0, 0 );
cv::Canny(blured, canny, 0, 100);
cv::cvtColor( canny, canny_3d, CV_GRAY2BGR );
cv::inRange(canny_3d, cv::Scalar(255,255,255), cv::Scalar(255,255,255), in_range_mask);
canny_3d.setTo(cv::Scalar(0, 171, 255), in_range_mask);
cv::GaussianBlur( canny_3d, matrix, cv::Size( 5, 5 ), 0, 0 );
cv::bitwise_and(matrix, mask, matrix);
}
private:
cv::Mat gray_image, blured, canny, canny_3d, in_range_mask;
};
//Usage
blue_edges_filter apply_blue_edgess(1024, 576);
apply_blue_edgess(matrix, mask, inverted_mask);
You can reuse memory without allocation. Create temporal images:
void apply_blue_edgess(cv::Mat& matrix, cv::Mat& mask, cv::Mat& inverted_mask)
{
cv::Mat tmp[2];
int srcInd = 1;
auto InvInd = [&]() -> int { return srcInd ? 0 : 1; };
cv::cvtColor( matrix, tmp[InvInd()], CV_BGR2GRAY );
srcInd = InvInd();
cv::GaussianBlur( tmp[srcInd], tmp[InvInd()], cv::Size( 5, 5 ), 0, 0 );
srcInd = InvInd();
cv::Canny(tmp[srcInd], tmp[InvInd()], 0, 100);
srcInd = InvInd();
cv::cvtColor( tmp[srcInd], tmp[InvInd()], CV_GRAY2BGR );
srcInd = InvInd();
cv::inRange(tmp[srcInd], cv::Scalar(255,255,255), cv::Scalar(255,255,255), tmp[InvInd()]);
tmp[srcInd].setTo(cv::Scalar(0, 171, 255), tmp[InvInd()]);
cv::GaussianBlur( tmp[srcInd], matrix, cv::Size( 5, 5 ), 0, 0 );
cv::bitwise_and(matrix, mask, matrix);
}
Related
I got two histograms which I created with standard openCV function calcHist:
int getModels(string filename) {
Mat src = imread(filename, 1);
if(!src.data) { return -1; }
Mat imageHSV;
cvtColor(src, imageHSV, COLOR_BGR2HSV);
vector<Mat> bgr_planes;
split(imageHSV, bgr_planes);
int histSize = 256;
float range[] = { 0, 256 } ;
const float* histRange = { range };
bool uniform = true; bool accumulate = false;
Mat g_hist, r_hist;
calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate );
cv::Mat combined_hist = g_hist + r_hist;
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound( (double) hist_w/histSize );
Mat histImage(hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
for( int i = 0; i < histSize; i++ )
{
line( histImage, Point( bin_w*(i-1), cvRound(combined_hist.at<float>(i-1)) ) ,
Point( bin_w*(i), cvRound(combined_hist.at<float>(i)) ),
Scalar( 19,90,87), 2, 8, 0 );
}
/// Display
namedWindow(filename, CV_WINDOW_AUTOSIZE );
imshow(filename, histImage );
return 0;
}
Is there any way to merge them into one combined_hist histogram?
Yes, you can. OpenCV has cv::add which can be used like this:
cv::Mat combined_hist;
cv::add(g_hist, r_hist, combined_hist);
However, since cv::Mat has overloaded operators, you can just do this:
cv::Mat combined_hist = g_hist + r_hist;
Hope that helps you.
Hi I am trying to find all the circles in the following image and Identify the defect.
this is my code:
static void findCircles2(const Mat& image)
{
vector<Vec3f> circles;
int thresh1 = 5;
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
for( int c = 0; c < 3; c++ )
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
Canny(gray0, gray, 0, thresh1, 5);
//dilate(gray, gray, Mat(), Point(-1,-1));
gray = gray0 >= (1)*255/N;
gray = gray0 >= (2)*255/N;
gray = gray0 >= (6)*255/N;
namedWindow( "Hough Circle Transform Demo 1", CV_WINDOW_AUTOSIZE );
imshow( "Hough Circle Transform Demo 1", gray );
waitKey(0);
HoughCircles( gray, circles, CV_HOUGH_GRADIENT, 1, gray.rows/8, 200, 100, 0, 0 );
cout<<"size of circles: "<<circles.size()<<endl;
for( size_t i = 0; i < circles.size(); i++ )
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
circle( gray, center, 3, Scalar(0,255,0), -1, 8, 0 );
circle( gray, center, radius, Scalar(0,0,255), 3, 8, 0 );
}
/// Show your results
namedWindow( "Hough Circle Transform Demo 2", CV_WINDOW_AUTOSIZE );
imshow( "Hough Circle Transform Demo 2", gray );
waitKey(0);
}
}
Picture:
however the code is unable to find anything, I played arround with the thresholds but it doesnt help. please advise.
Development platform: VS2010, Opencv version: 2.4.10
Because the circles are so small and not that standard, so you cann't just do HoughCircles on the binary image.
An alternative method is to findContours, then filter the contours by ratio between the value of contourArea and the value of minEnclosingCircle.
Ok, I need to compute the distance between the cap (top of the bottle) and the liquid. I am trying to get the color pixel by pixel from the top down to the liquid. Any ideas how can I do that measure? (I am not sure how to use dilate properly)
#include <opencv/cvaux.h>
#include <opencv/highgui.h>
#include <opencv/cxcore.h>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src = imread("cocaCorte.jpg");
Mat srcTampa, srcLiquido, pixelColor;
Point3_<uchar>* p;
int posicaoX,posicaoY;
void find_moments( Mat grey );
int main(int argc, char* argv[])
{
///Criacao das janelas
cvNamedWindow( "Original", CV_WINDOW_AUTOSIZE);
cvNamedWindow( "Tampa Processada", CV_WINDOW_AUTOSIZE);
cvNamedWindow( "Liquido Processado", CV_WINDOW_AUTOSIZE);
cvNamedWindow( "Bitwise OR", CV_WINDOW_AUTOSIZE);
///Processamento do liquido
inRange( src, Scalar( 0, 0, 0), Scalar( 60, 60, 60), srcLiquido);
GaussianBlur( srcLiquido, srcLiquido, Size( 7, 7), 5, 5);
threshold( srcLiquido, srcLiquido, 100, 255, CV_THRESH_BINARY);
dilate( srcLiquido, srcLiquido, Mat(), Point(-1, -1), 2, 1, 1);
dilate( srcLiquido, srcLiquido, Mat(), Point(-1, -1), 2, 1, 1);
dilate( srcLiquido, srcLiquido, Mat(), Point(-1, -1), 2, 1, 1);
dilate( srcLiquido, srcLiquido, Mat(), Point(-1, -1), 2, 1, 1);
dilate(srcLiquido, srcLiquido, Mat(), Point(-1, -1), 2, 1, 1);
dilate(srcLiquido, srcLiquido, Mat(), Point(-1, -1), 2, 1, 1);
///Processamento da tampa
inRange( src, Scalar( 25, 20, 138), Scalar( 90, 115, 205), srcTampa);
GaussianBlur( srcTampa, srcTampa, Size( 7, 7), 5, 5);
threshold( srcTampa, srcTampa, 100, 255, CV_THRESH_BINARY);
find_moments( srcTampa);
///Juntar as duas imagens
bitwise_or( srcTampa, srcLiquido, pixelColor);
///Calculos
///Reach the white part represent by the liquid starting fromthe top
while(true){
p = pixelColor.ptr<Point3_<uchar> >( posicaoY, posicaoX);
if((p->z=0)&&(p->y=0)&&(p->x=0)){
//that means the liquid was found
break;
}
posicaoY++;
}
cout<<"FOUND"<<endl; //Print the result...
///Mostrar imagens
imshow("Tampa Processada", srcTampa);
imshow("Liquido Processado", srcLiquido);
imshow("Bitwise OR", pixelColor);
waitKey();
destroyAllWindows();
return 0;
}
void find_moments( Mat gray )
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny( gray, canny_output, 50, 150, 3 );
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Get the moments
vector<Moments> mu(contours.size() );
for( int i = 0; i < contours.size(); i++ )
{
mu[i] = moments( contours[i], false );
}
/// Get the mass centers:
vector<Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{
mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 );
}
posicaoX = mc[0].x;
posicaoY = mc[0].y;
/// Draw contours
for( int i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( 255, 0, 0);
drawContours( src, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle(src, mc[i], 2, color, -1, 1, 0 );
}
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", src );
}
I follow the instruction in this page: http://wiki.elphel.com/index.php?title=OpenCV_Tennis_balls_recognizing_tutorial
to detect the tennis ball. This code may be run for Python and it's requirement is V4L/AVLD for Morphological operations. it's use function cvClose() and cvOpen() to dilate and erode the mask. I write my code in c++, so cvDilate() and cvErode() are used instead, but the result isn't as good as the that site.
Here is my result:output.jpg. (i'm sorry, because i don't have reputation enough to post image T_T)
Here is my full code:
#include "highgui.h"
#include "cv.h"
void main()
{
IplImage* img = cvLoadImage("tennis.jpg",1);
CvSize size = cvGetSize(img);
IplImage *hsv = cvCreateImage(size, IPL_DEPTH_8U, 3);
cvCvtColor(img, hsv, CV_BGR2HSV);
CvMat *mask = cvCreateMat(size.height, size.width, CV_8UC1);
cvInRangeS(hsv, cvScalar(0.11*256, 0.60*256, 0.20*256, 0),
cvScalar(0.14*256, 1.00*256, 1.00*256, 0), mask);
cvReleaseImage(&hsv);
IplConvKernel *se21 = cvCreateStructuringElementEx(21, 21, 10, 10, CV_SHAPE_RECT, NULL);
IplConvKernel *se11 = cvCreateStructuringElementEx(11, 11, 5, 5, CV_SHAPE_RECT, NULL);
cvErode(mask, mask, se21);
cvDilate(mask, mask, se11);
cvReleaseStructuringElement(&se21);
cvReleaseStructuringElement(&se11);
/* Copy mask into a grayscale image */
IplImage *hough_in = cvCreateImage(size, 8, 1);
cvCopy(mask, hough_in, NULL);
cvSmooth(hough_in, hough_in, CV_GAUSSIAN, 15, 15, 0, 0);
/* Run the Hough function */
CvMemStorage *storage = cvCreateMemStorage(0);
CvSeq *circles = cvHoughCircles(hough_in, storage,CV_HOUGH_GRADIENT, 4, size.height/10, 100, 40, 0, 0);
cvReleaseMemStorage(&storage);
int i;
for (i = 0; i < circles->total; i++) {
float *p = (float*)cvGetSeqElem(circles, i);
CvPoint center = cvPoint(cvRound(p[0]),cvRound(p[1]));
CvScalar val = cvGet2D(mask, center.y, center.x);
if (val.val[0] < 1) continue;
cvCircle(img, center, 3, CV_RGB(0,255,0), -1, CV_AA, 0);
cvCircle(img, center, cvRound(p[2]), CV_RGB(255,0,0), 3, CV_AA, 0);
cvCircle(mask, center, 3, CV_RGB(0,255,0), -1, CV_AA, 0);
cvCircle(mask, center, cvRound(p[2]), CV_RGB(255,0,0), 3, CV_AA, 0);
}
cvNamedWindow( "Output", CV_WINDOW_AUTOSIZE );
cvShowImage( "Output", img );
cvNamedWindow( "mask", CV_WINDOW_AUTOSIZE );
cvShowImage( "mask", mask );
cvWaitKey(0);
}
Can someone help me to get V4L/AVLD and improve this code ? thank you very much.
V4L/AVLD is for webcam. It doesn't have anything to do with the code or algorithm. http://allonlinux.free.fr/Projets/AVLD/
If you are using Linux, v4l-utls package needs to be installed in order to use webcam.
I hope you could help me.
I' m using QT and try to do a simple detection of edges on a image. But my program crash when i launch
cv::GaussianBlur( src, src, cv::Size(3,3), 0, 0, cv::BORDER_DEFAULT );
or
cv::Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, cv::BORDER_DEFAULT );
here is my code:
QImage *image1;
IplImage *cv_image1;
image1 = new QImage("./image.png"); // Format is ARGB32
cv_image1 = QImage2IplImage(image1);
cv::Mat src(cv_image1);
cv::imshow(window_name, src); // Work Well
cv::Mat src_gray;
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
cv::GaussianBlur(src, src, cv::Size(3,3), 0, 0, cv::BORDER_DEFAULT); //Crash Here
cv::imshow( window_name, src);
I think that was a problem of format.
But in another program with QIMAGES in ARGB32 this code work well.
Thank you.
Try going with proper QImage to cv::Mat conversion using this functions and you should be fine (I also included a conversion from cv::Mat to QImage):
cv::Mat cvmat_from_qimage(const QImage& qimage)
{
cv::Mat mat = cv::Mat(qimage.height(), qimage.width(), CV_8UC4, (uchar*)qimage.bits(), qimage.bytesPerLine());
cv::Mat mat2 = cv::Mat(mat.rows, mat.cols, CV_8UC3 );
int from_to[] = { 0,0, 1,1, 2,2 };
cv::mixChannels( &mat, 1, &mat2, 1, from_to, 3 );
return mat2;
}
QImage qimage_from_cvmat(const cv::Mat& mat)
{
cv::Mat rgb;
cvtColor(mat, rgb, CV_BGR2RGB);
return QImage((const unsigned char*)(rgb.data), rgb.cols, rgb.rows, QImage::Format_RGB888);
}
I Found a solution.
That' s weird but when I do:
cvtColor(src, src_gray, CV_RGB2GRAY );
cv::Sobel(src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, cv::BORDER_CONSTANT);
without the cv::GaussianBlur it works well. I just change the last parameter to cv::BORDER_CONSTANT