Trying a Matching Contours, trouble with CV Mat and Iplimage - c++

I am doing a Matching Contours test.
Here I use image called "refshape.bmp"
(link: https://www.dropbox.com/s/06hrjji49uyid4w/refshape.bmp?dl=0)
And image called "2.bmp"
(link: https://www.dropbox.com/s/5t73mvbdfbtqvs1/2.BMP?dl=0)
to do this test.
This code is following:
I have two part of this code
Part 1: rotate the "refshape.bmp" image
Part 2: Matching Contours with a red line.
(the separate part can work successfully! )
But I have a problem with convert between CV Mat and IplImage.
There is the overflow warning: link: www.dropbox.com/s/mne4u3va94svx8y/%E6%93%B7%E5%8F%96.JPG?dl=0
First part there is a CV mat (image) "dst"
then I convert it to IplImage by: "IplImage* reference= ©"
"IplImage* reference= ©"
#include <stdlib.h>
#include<iostream>
#include "time.h"
#include "highgui.h"
#include "cv.h"
using namespace std;
int comp(const void *p,const void *q)
{
return (*(int *)p - *(int *)q);
}
int main()
{ int i =0;
cv::Mat src = cv::imread("refshape.bmp", CV_LOAD_IMAGE_UNCHANGED);
int angle = -i;
// get rotation matrix for rotating the image around its center
cv::Point2f center(src.cols/2.0, src.rows/2.0);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
cv::Rect bbox = cv::RotatedRect(center,src.size(), angle).boundingRect();
// adjust transformation matrix
rot.at<double>(0,2) += bbox.width/2.0 - center.x;
rot.at<double>(1,2) += bbox.height/2.0 - center.y;
cv::Mat dst;
cv::warpAffine(src, dst, rot, bbox.size());
IplImage copy = dst;
IplImage* input = NULL;
IplImage* input_canny = NULL;
IplImage* input_final = NULL;
//IplImage* reference = NULL;
IplImage* input_gray = NULL;
IplImage* reference_gray = NULL;
IplImage* find_contour = NULL;
IplImage* reference= ©
//圖像的尺寸的寬度大小
int x_min = 229;
int x_max = 0;
//圖像尺寸的高度大小
int y_min = 111;
int y_max = 0;
int n = 0;
//reference = cvLoadImage("refshape.bmp",1);//讀取圖檔
input = cvLoadImage("2.bmp",1);//讀取圖檔
input_canny=cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U,1);//canny灰階
input_final=cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U,3);//canny RGB
cvCvtColor(input, input_canny, CV_BGR2GRAY);//轉灰階圖
cvCanny(input_canny,input_canny,80,150,3);// canny edge
cvCvtColor(input_canny, input_final, CV_GRAY2BGR);// canny 灰階轉RGB
reference_gray = cvCreateImage(cvSize(reference->width, reference->height), IPL_DEPTH_8U,1);
input_gray = cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U,1);
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq *contour = 0;
//cvFindContours只能使用灰階影像,故須先轉成灰階
cvCvtColor(reference, reference_gray, CV_BGR2GRAY);
cvFindContours(reference_gray, storage, &contour, sizeof(CvContour), CV_RETR_LIST , CV_CHAIN_APPROX_NONE, cvPoint(0,0));
//用來存放點的位置矩陣
CvPoint* PointArray[50000]={0};
//以下是將每一層的每個點的位置座標,存到PointArray裡,並且找出所有sample點x,y軸的最大最小值
for( CvSeq* c = contour; c != NULL; c=c->h_next )
{
for( int i = 0; i<c->total; i++ )
{
PointArray[n] = CV_GET_SEQ_ELEM( CvPoint, c, i );
if(PointArray[n]->x < x_min)
{
x_min = PointArray[n]->x;
}
if(PointArray[n]->y < y_min)
{
y_min = PointArray[n]->y;
}
if(PointArray[n]->x > x_max)
{
x_max = PointArray[n]->x;
}
if(PointArray[n]->y > y_max)
{
y_max = PointArray[n]->y;
}
n+=1;
}
}
CvScalar s,t;
int match_x;
int match_y;
// Contour matching
int x;
int y;
int matchcount=0;
int maxcount=0;
for(int i=0;i<780;i++)
{
for(int j=0;j<630;j++)
{
matchcount=0;
for(int a = 0; a < n; a++)
{
s = cvGet2D(input_final, PointArray[a]->y -y_min+j, PointArray[a]->x -x_min+i);
t = cvGet2D(reference,PointArray[a]->y,PointArray[a]->x);
if(s.val[0]==255 && t.val[0]==255)
matchcount++;
}
if(matchcount>maxcount)
{
maxcount=matchcount;
match_x =i ;
match_y =j ;
}
}
}
system("pause");
//當找到match數最多的位置時,設定要畫出的顏色後,將這些點標上顏色
for(int a = 0; a < n; a++)
{
t.val[0] = 0;
t.val[1] = 0;
t.val[2] = 255;
//標上顏色
cvSet2D(input_final, PointArray[a]->y-y_min+match_y, PointArray[a]->x-x_min+match_x, t);
}
system("pause");
cvNamedWindow("reference_gray",1);
cvNamedWindow("reference",1);
cvNamedWindow("input",1);
cvShowImage("reference_gray",reference_gray);
cvShowImage("reference",reference);
cvShowImage("input",input_final);
cvSaveImage("result.bmp",input_final);
system("pause");
cvWaitKey(0);
return 0;
}
There are separate code:
Rotation:
#include "opencv2/opencv.hpp"
#include <sstream>
int main()
{ for (int i=0;i<361;i++)
{
cv::Mat src = cv::imread("refshape.bmp", CV_LOAD_IMAGE_UNCHANGED);
int angle = -i;
// get rotation matrix for rotating the image around its center
cv::Point2f center(src.cols/2.0, src.rows/2.0);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
cv::Rect bbox = cv::RotatedRect(center,src.size(), angle).boundingRect();
// adjust transformation matrix
rot.at<double>(0,2) += bbox.width/2.0 - center.x;
rot.at<double>(1,2) += bbox.height/2.0 - center.y;
cv::Mat dst;
cv::warpAffine(src, dst, rot, bbox.size());
std::ostringstream name;
name << "rotated_im_" << i << ".png";
cv::imwrite(name.str(), dst);
}
return 0;
}
There is Matching Contours code:
#include <stdlib.h>
#include<iostream>
#include "time.h"
#include "highgui.h"
#include "cv.h"
using namespace std;
int comp(const void *p,const void *q)
{
return (*(int *)p - *(int *)q);
}
int main()
{
IplImage* input = NULL;
IplImage* input_canny = NULL;
IplImage* input_final = NULL;
IplImage* reference = NULL;
IplImage* input_gray = NULL;
IplImage* reference_gray = NULL;
IplImage* find_contour = NULL;
//圖像的尺寸的寬度大小
int x_min = 229;
int x_max = 0;
//圖像尺寸的高度大小
int y_min = 111;
int y_max = 0;
int n = 0;
reference = cvLoadImage("refshape.bmp",1);//讀取圖檔
input = cvLoadImage("2.bmp",1);//讀取圖檔
input_canny=cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U,1);//canny灰階
input_final=cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U,3);//canny RGB
cvCvtColor(input, input_canny, CV_BGR2GRAY);//轉灰階圖
cvCanny(input_canny,input_canny,80,150,3);// canny edge
cvCvtColor(input_canny, input_final, CV_GRAY2BGR);// canny 灰階轉RGB
reference_gray = cvCreateImage(cvSize(reference->width, reference->height), IPL_DEPTH_8U,1);
input_gray = cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U,1);
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq *contour = 0;
//cvFindContours只能使用灰階影像,故須先轉成灰階
cvCvtColor(reference, reference_gray, CV_BGR2GRAY);
cvFindContours(reference_gray, storage, &contour, sizeof(CvContour), CV_RETR_LIST , CV_CHAIN_APPROX_NONE, cvPoint(0,0));
//用來存放點的位置矩陣
CvPoint* PointArray[5000]={0};
//以下是將每一層的每個點的位置座標,存到PointArray裡,並且找出所有sample點x,y軸的最大最小值
for( CvSeq* c = contour; c != NULL; c=c->h_next )
{
for( int i = 0; i<c->total; i++ )
{
PointArray[n] = CV_GET_SEQ_ELEM( CvPoint, c, i );
if(PointArray[n]->x < x_min)
{
x_min = PointArray[n]->x;
}
if(PointArray[n]->y < y_min)
{
y_min = PointArray[n]->y;
}
if(PointArray[n]->x > x_max)
{
x_max = PointArray[n]->x;
}
if(PointArray[n]->y > y_max)
{
y_max = PointArray[n]->y;
}
n+=1;
}
}
CvScalar s,t;
int match_x;
int match_y;
// Contour matching
int x;
int y;
int matchcount=0;
int maxcount=0;
for(int i=0;i<780;i++)
{
for(int j=0;j<630;j++)
{
matchcount=0;
for(int a = 0; a < n; a++)
{
s = cvGet2D(input_final, PointArray[a]->y -y_min+j, PointArray[a]->x -x_min+i);
t = cvGet2D(reference,PointArray[a]->y,PointArray[a]->x);
if(s.val[0]==255 && t.val[0]==255)
matchcount++;
}
if(matchcount>maxcount)
{
maxcount=matchcount;
match_x =i ;
match_y =j ;
}
}
}
system("pause");
//當找到match數最多的位置時,設定要畫出的顏色後,將這些點標上顏色
for(int a = 0; a < n; a++)
{
t.val[0] = 0;
t.val[1] = 0;
t.val[2] = 255;
//標上顏色
cvSet2D(input_final, PointArray[a]->y-y_min+match_y, PointArray[a]->x-x_min+match_x, t);
}
system("pause");
cvNamedWindow("reference_gray",1);
cvNamedWindow("reference",1);
cvNamedWindow("input",1);
cvShowImage("reference_gray",reference_gray);
cvShowImage("reference",reference);
cvShowImage("input",input_final);
cvSaveImage("result.bmp",input_final);
system("pause");
cvWaitKey(0);
}

Related

Coding dedicated function Average Filter to color Images C++ OpenCV

So basically, I have to code my own function in C++ with OpenCV, that will apply average filter on both gray and color images.
The function returns a Mat Object, have a mat Object and the size of the average filter (3 for 3x3 matrix of filtering for example).
I did this for the moment, it doesn't work, and I don't know how to extend it to color.
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
Mat filtrageMoyen(Mat image, int tailleZonage) {
Mat imageRetour;
imageRetour = image.clone();
Scalar intensite = 0;
int cadrillage = tailleZonage / 2;
int valeurMoyenne = 0;
for (size_t x = 0; x < imageRetour.rows; x++)
{
for (size_t y = 0; y < imageRetour.cols; y++)
{
for (size_t xZonage = 0; xZonage < cadrillage; xZonage++)
{
for (size_t yZonage = 0; yZonage < cadrillage; yZonage++)
{
valeurMoyenne += (image.at<unsigned char>(x+xZonage, y + yZonage));
}
}
imageRetour.at<unsigned char>(x, y) = valeurMoyenne;
valeurMoyenne = 0;
}
}
return imageRetour;
}
int main() {
Mat img;
string filename = "imageRickRoll.png";
img = imread(filename, cv::IMREAD_GRAYSCALE);
imshow("Image filtree", filtrageMoyen(img, 5));
waitKey(0);
return 0;
}

Finding only big blobs on image

Hello I'm trying to find characters on this image.
This is my image after some preprocessing I recieved this image.
Now I'm trying to do connected component labeling to find blobs. however I get a lot of small blobs too.
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs);
int main(int argc, char **argv)
{
Mat img = imread("adaptive.png", 0);
if(!img.data) {
cout << "File not found" << endl;
return -1;
}
namedWindow("binary");
namedWindow("labelled");
Mat output = Mat::zeros(img.size(), CV_8UC3);
Mat binary;
vector < vector<Point2i > > blobs;
threshold(img, binary, 0, 1, THRESH_BINARY_INV);
FindBlobs(binary, blobs);
// Randomy color the blobs
for(size_t i=0; i < blobs.size(); i++) {
unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));
for(size_t j=0; j < blobs[i].size(); j++) {
int x = blobs[i][j].x;
int y = blobs[i][j].y;
output.at<Vec3b>(y,x)[0] = b;//Vec3b RGB color order
output.at<Vec3b>(y,x)[1] = g;
output.at<Vec3b>(y,x)[2] = r;
}
}
imshow("binary", img);
imshow("labelled", output);
waitKey(0);
return 0;
}
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs)
{
blobs.clear();
Mat label_image;
binary.convertTo(label_image, CV_32SC1);
int label_count = 2; // starts at 2 because 0,1 are used already
for(int y=0; y < label_image.rows; y++) {
int *row = (int*)label_image.ptr(y);
for(int x=0; x < label_image.cols; x++) {
if(row[x] != 1) {
continue;
}
Rect rect;
floodFill(label_image, Point(x,y), label_count, &rect, 0, 0, 4);
vector <Point2i> blob;
for(int i=rect.y; i < (rect.y+rect.height); i++) {
int *row2 = (int*)label_image.ptr(i);
for(int j=rect.x; j < (rect.x+rect.width); j++) {
if(row2[j] != label_count) {
continue;
}
blob.push_back(Point2i(j,i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
so with this algorithm I recieve blobs
but when I do
if(blobs.size()>50) {
blob.push_back(Point2i(j,i));
}
I recieve black screen. however when I try to
if(blob.size()<50){
blob.push_back(Point2i(j,i));
}
I recieve small blobs what can be the actual problem here ?
Guess you want to store those "big" blobs?
If so, change the following code
blobs.push_back(blob);
label_count++;
to this:
if(blob.size() > 50){
blobs.push_back(blob);
}
label_count++;
And you can receive picture like this:

Different results with cvDFT and DFT in OpenCV 2.4.8

I'm having problems with the DFT function in OpenCV 2.4.8 for c++.
I used an image of a 10 phases sinus curve to compare the old cvDFT() with the newer c++ function DFT() (one dimensional DFT row-wise).
The old version gives me logical results: very high peak at pixel 0 and 10, the rest being almost 0.
The new version gives me strange results with peaks all over the spectrum.
Here is my code:
#include "stdafx.h"
#include <opencv2\core\core_c.h>
#include <opencv2\core\core.hpp>
#include <opencv2\imgproc\imgproc_c.h>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\highgui\highgui_c.h>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\legacy\compat.hpp>
using namespace cv;
void OldMakeDFT(Mat original, double* result)
{
const int width = original.cols;
const int height = 1;
IplImage* fftBlock = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
IplImage* imgReal = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
IplImage* imgImag = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
IplImage* imgDFT = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 2);
Rect roi(0, 0, width, 1);
Mat image_roi = original(roi);
fftBlock->imageData = (char*)image_roi.data;
//cvSaveImage("C:/fftBlock1.png", fftBlock);
cvConvert(fftBlock, imgReal);
cvMerge(imgReal, imgImag, NULL, NULL, imgDFT);
cvDFT(imgDFT, imgDFT, (CV_DXT_FORWARD | CV_DXT_ROWS));
cvSplit(imgDFT, imgReal, imgImag, NULL, NULL);
double re,imag;
for (int i = 0; i < width; i++)
{
re = ((float*)imgReal->imageData)[i];
imag = ((float*)imgImag->imageData)[i];
result[i] = re * re + imag * imag;
}
cvReleaseImage(&imgReal);
cvReleaseImage(&imgImag);
cvReleaseImage(&imgDFT);
cvReleaseImage(&fftBlock);
}
void MakeDFT(Mat original, double* result)
{
const int width = original.cols;
const int height = 1;
Mat fftBlock(1,width, CV_8UC1);
Rect roi(0, 0, width, height);
Mat image_roi = original(roi);
image_roi.copyTo(fftBlock);
//imwrite("C:/fftBlock2.png", fftBlock);
Mat planes[] = {Mat_<float>(fftBlock), Mat::zeros(fftBlock.size(), CV_32F)};
Mat complexI;
merge(planes, 2, complexI);
dft(complexI, complexI, DFT_ROWS); //also tried with DFT_COMPLEX_OUTPUT | DFT_ROWS
split(complexI, planes);
double re, imag;
for (int i = 0; i < width; i++)
{
re = (float)planes[0].data[i];
imag = (float)planes[1].data[i];
result[i] = re * re + imag * imag;
}
}
bool SinusFFTTest()
{
const int size = 1024;
Mat sinTest(size,size,CV_8UC1, Scalar(0));
const int n_sin_curves = 10;
double deg_step = (double)n_sin_curves*360/size;
for (int j = 0; j < size; j++)
{
for (int i = 0; i <size; i++)
{
sinTest.data[j*size+i] = 127.5 * sin(i*deg_step*CV_PI/180) + 127.5;
}
}
double* result1 = new double[size];
double* result2 = new double[size];
OldMakeDFT(sinTest,result1);
MakeDFT(sinTest,result2);
bool identical = true;
for (int i = 0; i < size; i++)
{
if (abs(result1[i] - result2[i]) > 1000)
{
identical = false;
break;
}
}
delete[] result1;
delete[] result2;
return identical;
}
int _tmain(int argc, _TCHAR* argv[])
{
if (SinusFFTTest())
{
printf("identical");
}
else
{
printf("different");
}
getchar();
return 0;
}
Could someone explain the difference?
imgReal - is not filled with zeroes by default.
The bug in in the MakeDFT() function:
re = (float)planes[0].data[i];
imag = (float)planes[1].data[i];
data[i]'s type is uchar, and its conversion to float is not right.
The fix:
re = planes[0].at<float>(0,i);
imag = planes[1].at<float>(0,i);
After this change, the old and the new DFT versions gives the same results. Or, you can use cv::magnitude() instead of calculating the sum of squares of re and imag:
Mat magn;
magnitude(planes[0], planes[1], magn);
for (int i = 0; i < width; i++)
result[i] = pow(magn.at<float>(0,i),2);
This gives also the same result as the old cvDFT.

How to resize the box to fix size when using boundingRect?

In this project I wanna use contour numbers (cnum) as the index of the objects so I can use them to the next process (I'm working on false human detection using blob tracking). I wanna to resize boundingRect inside DetectMultiScale, how to do that?
and here we go my code:
int main(int argc, char *argv[])
{
cv::Mat frame;
cv::Mat blurred;
cv::Mat fg;
cv::Mat bgmodel;
cv::Mat threshfg;
cv::Mat blob;
int pixblob = 0;
int tot_bgr = 0;
int tot_ex_bgr = 0;
int green0 = 0;
int green1 = 0;
int green2 = 0;
int green3 = 0;
cv::namedWindow("Frame");
cv::namedWindow("Background Model");
cv::namedWindow("Blob");
cv::VideoCapture cap("campus.avi");
cv::BackgroundSubtractorMOG2 bgs;
bgs.nmixtures = 3;
bgs.history = 500;
bgs.bShadowDetection = true;
bgs.nShadowDetection = 0;
bgs.fTau = 0.25;
std::vector<std::vector<cv::Point>> contours;
cv::CascadeClassifier human;
assert(human.load("hogcascade_pedestrians.xml"));
for(;;){
cap >> frame;
cv::GaussianBlur(frame,blurred,cv::Size(3,3),0,0,cv::BORDER_DEFAULT);
bgs.operator()(blurred,fg);
bgs.getBackgroundImage(bgmodel);
cv::erode(fg,fg,cv::Mat(),cv::Point(-1,-1),1);
cv::dilate(fg,fg,cv::Mat(),cv::Point(-1,-1),3);
cv::threshold(fg,threshfg,70.0f,255,CV_THRESH_BINARY);
cv::findContours(threshfg,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);
cv::cvtColor(threshfg,blob,CV_GRAY2RGB);
cv::drawContours(blob,contours,-1,cv::Scalar(255,255,255),CV_FILLED,8);
int cmin = 20;
int cmax = 1000;
bool FOD1 = true;
bool FOD2 = true;
std::vector<cv::Rect> rects;
for(int cnum = 0; cnum < contours.size(); cnum++){
if(contours[cnum].size() > cmin && contours[cnum].size() < cmax){
human.detectMultiScale(frame(cv::boundingRect(contours[cnum])),rects); //I wanna resize the box here!
if(rects.size() > 0){
cv::Rect r0 = cv::boundingRect(contours[cnum]);
cv::rectangle(frame,
r0,
cv::Scalar(255, 0, 0));
cv::putText(frame,
"HUMAN",
cv::Point(r0.x + r0.width / 2, r0.y + r0.height / 2),
cv::FONT_HERSHEY_SIMPLEX,
0.5,
cv::Scalar(0,0,255),
2,
8);
}
}
}
cv::imshow("Frame",frame);
cv::imshow("Background Model",bgmodel);
cv::imshow("Blob",blob);
if(cv::waitKey(30) >= 0) break;
}
return 0;
}
I'll appreciate any help here. Thanks :)

How to get extra information of blobs with SimpleBlobDetector?

#robot_sherrick answered me this question, this is a follow-up question for his answer.
cv::SimpleBlobDetector in Opencv 2.4 looks very exciting but I am not sure I can make it work for more detailed data extraction.
I have the following concerns:
if this only returns center of the blob, I can't have an entire, labelled Mat, can I?
how can I access the features of the detected blobs like area, convexity, color and so on?
can I display an exact segmentation with this? (like with say, waterfall)
So the code should look something like this:
cv::Mat inputImg = imread(image_file_name, CV_LOAD_IMAGE_COLOR); // Read a file
cv::SimpleBlobDetector::Params params;
params.minDistBetweenBlobs = 10.0; // minimum 10 pixels between blobs
params.filterByArea = true; // filter my blobs by area of blob
params.minArea = 20.0; // min 20 pixels squared
params.maxArea = 500.0; // max 500 pixels squared
SimpleBlobDetector myBlobDetector(params);
std::vector<cv::KeyPoint> myBlobs;
myBlobDetector.detect(inputImg, myBlobs);
If you then want to have these keypoints highlighted on your image:
cv::Mat blobImg;
cv::drawKeypoints(inputImg, myBlobs, blobImg);
cv::imshow("Blobs", blobImg);
To access the info in the keypoints, you then just access each element like so:
for(std::vector<cv::KeyPoint>::iterator blobIterator = myBlobs.begin(); blobIterator != myBlobs.end(); blobIterator++){
std::cout << "size of blob is: " << blobIterator->size << std::endl;
std::cout << "point is at: " << blobIterator->pt.x << " " << blobIterator->pt.y << std::endl;
}
Note: this has not been compiled and may have typos.
Here is a version that will allow you to get the last contours back, via the getContours() method. They will match up by index to the keypoints.
class BetterBlobDetector : public cv::SimpleBlobDetector
{
public:
BetterBlobDetector(const cv::SimpleBlobDetector::Params &parameters = cv::SimpleBlobDetector::Params());
const std::vector < std::vector<cv::Point> > getContours();
protected:
virtual void detectImpl( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask=cv::Mat()) const;
virtual void findBlobs(const cv::Mat &image, const cv::Mat &binaryImage,
std::vector<Center> &centers, std::vector < std::vector<cv::Point> >&contours) const;
};
Then cpp
using namespace cv;
BetterBlobDetector::BetterBlobDetector(const SimpleBlobDetector::Params &parameters)
{
}
void BetterBlobDetector::findBlobs(const cv::Mat &image, const cv::Mat &binaryImage,
vector<Center> &centers, std::vector < std::vector<cv::Point> >&curContours) const
{
(void)image;
centers.clear();
curContours.clear();
std::vector < std::vector<cv::Point> >contours;
Mat tmpBinaryImage = binaryImage.clone();
findContours(tmpBinaryImage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
for (size_t contourIdx = 0; contourIdx < contours.size(); contourIdx++)
{
Center center;
center.confidence = 1;
Moments moms = moments(Mat(contours[contourIdx]));
if (params.filterByArea)
{
double area = moms.m00;
if (area < params.minArea || area >= params.maxArea)
continue;
}
if (params.filterByCircularity)
{
double area = moms.m00;
double perimeter = arcLength(Mat(contours[contourIdx]), true);
double ratio = 4 * CV_PI * area / (perimeter * perimeter);
if (ratio < params.minCircularity || ratio >= params.maxCircularity)
continue;
}
if (params.filterByInertia)
{
double denominator = sqrt(pow(2 * moms.mu11, 2) + pow(moms.mu20 - moms.mu02, 2));
const double eps = 1e-2;
double ratio;
if (denominator > eps)
{
double cosmin = (moms.mu20 - moms.mu02) / denominator;
double sinmin = 2 * moms.mu11 / denominator;
double cosmax = -cosmin;
double sinmax = -sinmin;
double imin = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmin - moms.mu11 * sinmin;
double imax = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmax - moms.mu11 * sinmax;
ratio = imin / imax;
}
else
{
ratio = 1;
}
if (ratio < params.minInertiaRatio || ratio >= params.maxInertiaRatio)
continue;
center.confidence = ratio * ratio;
}
if (params.filterByConvexity)
{
vector < Point > hull;
convexHull(Mat(contours[contourIdx]), hull);
double area = contourArea(Mat(contours[contourIdx]));
double hullArea = contourArea(Mat(hull));
double ratio = area / hullArea;
if (ratio < params.minConvexity || ratio >= params.maxConvexity)
continue;
}
center.location = Point2d(moms.m10 / moms.m00, moms.m01 / moms.m00);
if (params.filterByColor)
{
if (binaryImage.at<uchar> (cvRound(center.location.y), cvRound(center.location.x)) != params.blobColor)
continue;
}
//compute blob radius
{
vector<double> dists;
for (size_t pointIdx = 0; pointIdx < contours[contourIdx].size(); pointIdx++)
{
Point2d pt = contours[contourIdx][pointIdx];
dists.push_back(norm(center.location - pt));
}
std::sort(dists.begin(), dists.end());
center.radius = (dists[(dists.size() - 1) / 2] + dists[dists.size() / 2]) / 2.;
}
centers.push_back(center);
curContours.push_back(contours[contourIdx]);
}
static std::vector < std::vector<cv::Point> > _contours;
const std::vector < std::vector<cv::Point> > BetterBlobDetector::getContours() {
return _contours;
}
void BetterBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat&) const
{
//TODO: support mask
_contours.clear();
keypoints.clear();
Mat grayscaleImage;
if (image.channels() == 3)
cvtColor(image, grayscaleImage, CV_BGR2GRAY);
else
grayscaleImage = image;
vector < vector<Center> > centers;
vector < vector<cv::Point> >contours;
for (double thresh = params.minThreshold; thresh < params.maxThreshold; thresh += params.thresholdStep)
{
Mat binarizedImage;
threshold(grayscaleImage, binarizedImage, thresh, 255, THRESH_BINARY);
vector < Center > curCenters;
vector < vector<cv::Point> >curContours, newContours;
findBlobs(grayscaleImage, binarizedImage, curCenters, curContours);
vector < vector<Center> > newCenters;
for (size_t i = 0; i < curCenters.size(); i++)
{
bool isNew = true;
for (size_t j = 0; j < centers.size(); j++)
{
double dist = norm(centers[j][ centers[j].size() / 2 ].location - curCenters[i].location);
isNew = dist >= params.minDistBetweenBlobs && dist >= centers[j][ centers[j].size() / 2 ].radius && dist >= curCenters[i].radius;
if (!isNew)
{
centers[j].push_back(curCenters[i]);
size_t k = centers[j].size() - 1;
while( k > 0 && centers[j][k].radius < centers[j][k-1].radius )
{
centers[j][k] = centers[j][k-1];
k--;
}
centers[j][k] = curCenters[i];
break;
}
}
if (isNew)
{
newCenters.push_back(vector<Center> (1, curCenters[i]));
newContours.push_back(curContours[i]);
//centers.push_back(vector<Center> (1, curCenters[i]));
}
}
std::copy(newCenters.begin(), newCenters.end(), std::back_inserter(centers));
std::copy(newContours.begin(), newContours.end(), std::back_inserter(contours));
}
for (size_t i = 0; i < centers.size(); i++)
{
if (centers[i].size() < params.minRepeatability)
continue;
Point2d sumPoint(0, 0);
double normalizer = 0;
for (size_t j = 0; j < centers[i].size(); j++)
{
sumPoint += centers[i][j].confidence * centers[i][j].location;
normalizer += centers[i][j].confidence;
}
sumPoint *= (1. / normalizer);
KeyPoint kpt(sumPoint, (float)(centers[i][centers[i].size() / 2].radius));
keypoints.push_back(kpt);
_contours.push_back(contours[i]);
}
}
//Access SimpleBlobDetector datas for video
#include "opencv2/imgproc/imgproc.hpp" //
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <math.h>
#include <vector>
#include <fstream>
#include <string>
#include <sstream>
#include <algorithm>
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/features2d/features2d.hpp"
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
const char* fileName ="C:/Users/DAGLI/Desktop/videos/new/m3.avi";
VideoCapture cap(fileName); //
if(!cap.isOpened()) //
{
cout << "Couldn't open Video " << fileName << "\n";
return -1;
}
for(;;) // videonun frameleri icin sonsuz dongu
{
Mat frame,labelImg;
cap >> frame;
if(frame.empty()) break;
//imshow("main",frame);
Mat frame_gray;
cvtColor(frame,frame_gray,CV_RGB2GRAY);
//////////////////////////////////////////////////////////////////////////
// convert binary_image
Mat binaryx;
threshold(frame_gray,binaryx,120,255,CV_THRESH_BINARY);
Mat src, gray, thresh, binary;
Mat out;
vector<KeyPoint> keyPoints;
SimpleBlobDetector::Params params;
params.minThreshold = 120;
params.maxThreshold = 255;
params.thresholdStep = 100;
params.minArea = 20;
params.minConvexity = 0.3;
params.minInertiaRatio = 0.01;
params.maxArea = 1000;
params.maxConvexity = 10;
params.filterByColor = false;
params.filterByCircularity = false;
src = binaryx.clone();
SimpleBlobDetector blobDetector( params );
blobDetector.create("SimpleBlob");
blobDetector.detect( src, keyPoints );
drawKeypoints( src, keyPoints, out, CV_RGB(255,0,0), DrawMatchesFlags::DEFAULT);
cv::Mat blobImg;
cv::drawKeypoints(frame, keyPoints, blobImg);
cv::imshow("Blobs", blobImg);
for(int i=0; i<keyPoints.size(); i++){
//circle(out, keyPoints[i].pt, 20, cvScalar(255,0,0), 10);
//cout<<keyPoints[i].response<<endl;
//cout<<keyPoints[i].angle<<endl;
//cout<<keyPoints[i].size()<<endl;
cout<<keyPoints[i].pt.x<<endl;
cout<<keyPoints[i].pt.y<<endl;
}
imshow( "out", out );
if ((cvWaitKey(40)&0xff)==27) break; // esc 'ye basilinca break
}
system("pause");
}