Changing brightness and contrast of an image - c++

I am working on the following code
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
Mat change(Mat m);
int main()
{
Mat image = imread("C:/Users/Public/Pictures/Sample Pictures/Penguins.jpg");
Mat copy = Mat::zeros(image.size(),image.type());
Mat changedImage = change(copy);
namedWindow("Image");
imshow("Image",changedImage);
waitKey(0);
}
Mat change(Mat m)
{
int cols = m.cols;
int rows = m.rows;
double alpha = 2.2;
int beta = 50;
for(int i=0;i<rows;i++)
{
for(int c=0;c<cols;c++)
{
m.at<Vec3b>(rows,c)[0] = saturate_cast<uchar>(alpha* (m.at<Vec3b>(rows,cols)[0]) + beta);
m.at<Vec3b>(rows,c)[1] = saturate_cast<uchar>(alpha* (m.at<Vec3b>(rows,cols)[1]) + beta);
m.at<Vec3b>(rows,c)[2] = saturate_cast<uchar>(alpha* (m.at<Vec3b>(rows,cols)[2]) + beta);
}
}
return m;
}
This compiles fine, but when I run this, I get the following error
OpenCV Error: Assertion failed (dims <= 2 && data && (unsigned)i0 < (unsigned)si
ze.p[0] && (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channel
s()) && ((((sizeof(size_t)<<28)|0x8442211) >> ((DataType<_Tp>::depth) & ((1 << 3
) - 1))*4) & 15) == elemSize1()) in unknown function, file c:\opencv\build\inclu
de\opencv2\core\mat.hpp, line 534
Why I am getting this? I guess I have done everything correctly.

Try this code.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
Mat change(Mat m);
int main()
{
Mat image = imread("C:/Users/Public/Pictures/Sample Pictures/Penguins.jpg");
Mat changedImage = change(image); //Modified
namedWindow("Image");
imshow("Image",changedImage);
waitKey(0);
}
Mat change(Mat m)
{
int cols = m.cols;
int rows = m.rows;
double alpha = 2.2;
int beta = 50;
for(int i=0;i<rows;i++)
{
for(int c=0;c<cols;c++)
{
m.at<Vec3b>(i,c)[0] = saturate_cast<uchar>(alpha* (m.at<Vec3b>(i,c))[0]) + beta); //Modified
m.at<Vec3b>(i,c)[1] = saturate_cast<uchar>(alpha* (m.at<Vec3b>(i,c))[1]) + beta); //Modified
m.at<Vec3b>(i,c)[2] = saturate_cast<uchar>(alpha* (m.at<Vec3b>(i,c))[2]) + beta); //Modified
}
}
return m;
}

Try this...
Mat Change(Mat input,int beta = 50)
{
Mat Output;
Scalar S(beta,beta,beta);
cv::add(input,S,Output);
return Output;
}

Related

different value of thresh - converting Matlab code to OpenCV code

I would like some help regarding passing a code that is in matlab to opencv c ++. I am trying to do some operations with the RGB channels, however, the value of thresh is not being the same - I am sending the same image. Could someone please help me?
MATLAB
im = imread('1.png');
[m,n,p] = size(im);
R=im(:, :, 1);
G=im(:, :, 2);
B=im(:, :, 3);
thresh=0;
for j=1:n
for i=1:m
thresh = thresh + double((1.262*G(i,j))-(0.884*R(i,j))-(0.311*B(i,j)));
end
end
C++
#include <opencv2/opencv.hpp>
#include "opencv2/highgui.hpp"
#include <opencv2/core/mat.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(){
Mat img = imread("1.png", IMREAD_COLOR);
int thresh = 0;
for(int j = 0; j <= img.cols; j++){
for(int i = 0; i <= img.rows; i++){
Vec3b color = img.at<Vec3b>(i,j);
uchar a = color.val[0], b = color.val[1], c = color.val[2];
thresh += double((1.262*b)-(0.884*c)-(0.311*a));
}
}
cout << thresh;
return 0;
}
First mistake is in the upper values of for loops because you are exceeding the range of image borders.
j <= img.cols should be j < img.cols and
i <= img.rows should be i < img.rows
Second mistake is that you don't make explicit type conversion for your uchar type pixel values
thresh += double((1.262*b)-(0.884*c)-(0.311*a));
should be
thresh += double((1.262*static_cast<double>(b))
-(0.884*static_cast<double>(c))
-(0.311*static_cast<double>(a)));
Here is the whole code I tried:
#include <opencv2/opencv.hpp>
#include "opencv2/highgui.hpp"
#include <opencv2/core/mat.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
Mat img = imread("img.jpg", IMREAD_COLOR);
double thresh = 0.0;
resize(img,img,Size(100,100));
for(int j = 0; j < img.cols; j++){
for(int i = 0; i < img.rows; i++){
// 1ST WAY
Vec3b color = img.at<Vec3b>(i,j);
uchar a = color.val[0], b = color.val[1], c = color.val[2];
thresh += double((1.262*static_cast<double>(b))
-(0.884*static_cast<double>(c))
-(0.311*static_cast<double>(a)));
// 2ND WAY
// thresh += double((1.262 * (double)img.at<Vec3b>(Point(i,j))[1])
// - (0.884*(double)img.at<Vec3b>(Point(i,j))[2])
// - (0.311 * (double)img.at<Vec3b>(Point(i,j))[0]));
}
}
cout << thresh << endl;
return 0;
}

Unable to predict in SVM OpenCV 3.0

I have been able to train my SVM. The program can run until it comes to prediction. I'm getting an error for SVM prediction with the testing images.
What have I missed in the code? Can anybody help me?
OpenCV Error: Assertion failed (samples.cols == var_count && samples.type() == CV_32F) in cv::ml::SVMImpl::predict, file C:\buildslave64\win64_amdocl\master_PackSlave-win64-vc14-shared\opencv\modules\ml\src\svm.cpp, line 1930
My prediction code is found below:
#include <opencv2/core.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc.hpp>
#include "opencv2/imgcodecs.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
#include <iostream>
#include <fstream>
#include<string.h>
using namespace std;
using namespace cv;
using namespace cv::ml;
int main(int, char**)
{
HOGDescriptor hog(cv::Size(64, 128), cv::Size(16, 16), cv::Size(8, 8), cv::Size(8, 8), 9, 1, -1, 0, 0.2, true, HOGDescriptor::DEFAULT_NLEVELS);
vector<cv::Point> locations;
std::vector<float> extractedFeature;
vector<vector< float>> features;
vector<Mat> testingImages;
vector<int> testingLabels;
int numFiles = 11; //no. of rows in matrix
int img_area = 320 * 240; //no. of columns - area of image 76800
FileStorage myfile("features.xml", FileStorage::READ);
const char* path = "C:/Testing Set/Extracted_Frames/image";
//set up labels for each training image
float label = 1.0; //positive image +1
Mat testingMat(img_area, numFiles, CV_32FC1);// 1D training matrix
cout << testingMat.rows << endl;
cout << testingMat.cols << endl;
Mat res; // output
//set up labels for each training image
Mat labels(testingMat.rows, 1, CV_32SC1, label); //flatten 1D label matrix
Ptr<ml::SVM> svm = Algorithm::load<ml::SVM>("test.xml");
std::cout << "Model Loaded" << std::endl;
for (int i = 0; i < labels.rows; i++) {
labels.at<int>(i, 0) = labels.at<int>(i, 0);
}
for (int file_num = 0; file_num < numFiles; file_num++)
{
stringstream ss(stringstream::in | stringstream::out);
ss << path << file_num << ".jpg";
cout << "read path = " << ss.str() << endl;
myfile["Descriptors" + ss.str()] >> extractedFeature;
Mat img = imread(ss.str());
int ii = 0; // Current column in training_mat
for (int i = 0; i < img.rows; i++) {
for (int j = 0; j < img.cols; j++) {
testingMat.at<float>(ii++, file_num) = img.at<uchar>(i, j);
Mat sampleMat = (Mat_<float>(1, 2) << i, j);
float response = svm->predict(sampleMat);//error here
}
}
features.push_back(extractedFeature);
testingImages.push_back(img);
testingLabels.push_back(1);
testingLabels.push_back(file_num);
myfile.release();
}
labels.at<int>(1, 0) = -1;
}

How to improve otsu threshold output

I am using otsu threshold on an image.
Here is the input image :
Here is the output :
Here is the code I am using:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <math.h>
using namespace std;
using namespace cv;
int main(int argc, char const *argv[]) {
title("Text Extractor");
string win_name = "textextractor";
Mat img_a;
img_a = imread("../input/test_c.jpg");
Mat img_a_gray;
cvtColor(img_a, img_a_gray, CV_BGR2GRAY);
Mat img_a_blur;
GaussianBlur(img_a_gray, img_a_blur, Size(3, 3), 0, 0);
Mat img_a_thres;
// adaptiveThreshold(img_a_blur, img_a_thres, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 5, 4);
threshold(img_a_blur, img_a_thres, 0, 255, THRESH_OTSU);
namedWindow(win_name + "_a", CV_WINDOW_AUTOSIZE);
imshow(win_name + "_a", img_a_thres);
imwrite("../output/output_a.jpg", img_a_thres);
waitKey(0);
return 0;
}
The problem is that output has a black region on the bottom and on the left. What can I do to minimize/remove this ?
Edit:
I tried equalizeHist() and I am getting this:
Will try out breaking image into pieces and working them separately.
Sorry, my bad. The previous one is using adaptive filtering. Using Otsu I get this:
There is no change in otsu's output :/
Edit 2: Completed the Feng Tan algorithm, it gives better results but text looses clarity.
Code:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/photo/photo.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <math.h>
using namespace std;
using namespace cv;
int main(int argc, char const *argv[]) {
string win_name = "textextractor";
Mat img_c;
img_c = imread("../input/sample.jpg");
Mat img_c_gray;
cvtColor(img_c, img_c_gray, CV_BGR2GRAY);
Mat img_c_bin = Mat::zeros(img_c_gray.rows, img_c_gray.cols, CV_8UC1);
int s_win = 17;
int l_win = 35;
double min_tau = 10;
Rect roi_s = Rect(-s_win/2, -s_win/2, s_win, s_win);
Rect roi_l = Rect(-l_win/2, -l_win/2, l_win, l_win);
Rect img_c_roi = Rect(0, 0, img_c_gray.cols, img_c_gray.rows);
for (size_t r = 0; r < img_c_gray.rows; r++) {
for (size_t c = 0; c < img_c_gray.cols; c++) {
double pthres = 255;
Rect sROI = roi_s + Point(c, r);
sROI = sROI & img_c_roi;
if(sROI.width == 0 || sROI.height == 0) {
continue;
}
Rect lROI = roi_l + Point(c, r);
lROI = lROI & img_c_roi;
if(lROI.width == 0 || lROI.height == 0) {
continue;
}
Mat sROI_gray = img_c_gray(sROI);
Mat lROI_gray = img_c_gray(lROI);
double s_stdDev = 0;
double l_stdDev = 0;
double s_mean = 0;
double l_mean = 0;
double l_min = DBL_MAX;
for (size_t r = 0; r < sROI_gray.rows; r++) {
for (size_t c = 0; c < sROI_gray.cols; c++) {
s_mean += sROI_gray.at<unsigned char>(r, c);
}
}
s_mean = s_mean / static_cast<double> (sROI_gray.cols * sROI_gray.rows);
for (size_t r = 0; r < sROI_gray.rows; r++) {
for (size_t c = 0; c < sROI_gray.cols; c++) {
double diff = sROI_gray.at<unsigned char> (r, c) - s_mean;
s_stdDev += diff * diff;
}
}
s_stdDev = sqrt(s_stdDev / static_cast<int> (sROI_gray.cols * sROI_gray.rows));
for (size_t r = 0; r < lROI_gray.rows; r++) {
for (size_t c = 0; c < lROI_gray.cols; c++) {
l_mean += lROI_gray.at<unsigned char> (c, r);
if(lROI_gray.at<unsigned char> (r, c) < l_min) {
l_min = lROI_gray.at<unsigned char> (r, c);
}
}
}
l_mean = l_mean / static_cast<double> (lROI_gray.cols * lROI_gray.rows);
for (size_t r = 0; r < lROI_gray.rows; r++) {
for (size_t c = 0; c < lROI_gray.cols; c++) {
double diff = lROI_gray.at<unsigned char> (r, c) - l_mean;
l_stdDev += diff * diff;
}
}
l_stdDev = sqrt(l_stdDev / static_cast<double> (lROI_gray.cols * lROI_gray.rows));
double tau = ((s_mean - l_min) * (1 - s_stdDev / l_stdDev)) / 2.0;
if(tau < min_tau) {
tau = min_tau;
}
double threshold = s_mean - tau;
unsigned char pixel_val = img_c_gray.at<unsigned char>(r, c);
if(pixel_val >= threshold) {
img_c_bin.at<unsigned char> (r, c) = 255;
} else {
img_c_bin.at<unsigned char> (r, c) = 0;
}
}
}
namedWindow(win_name + "_c", CV_WINDOW_AUTOSIZE);
imshow(win_name + "_c", img_c_bin);
imwrite("../output/output_c.jpg", img_c_bin);
waitKey(0);
return 0;
}
Output:
This is what I was able to obtain after some trial and run. Initially I median blurred the original image. Then I applied adpative threshold to the blurred image.
This is what I got:
1. Adaptive Threshold using Gaussian filter:
2. Adaptive Threshold using Mean filter:
From here on you can carry out a series of morphological operations that best suits your final image. :)
You should try using CLAHE.
I tried it on MATLAB using:
Ia = imread('FHXTJ.jpg');
I = rgb2gray(Ia);
A = adapthisteq(I, 'clipLimit', 0.02, 'Distribution', 'rayleigh');
Result:
Note: You can apply thresholding on this image. Otsu should work fine now.

Finding only big blobs on image

Hello I'm trying to find characters on this image.
This is my image after some preprocessing I recieved this image.
Now I'm trying to do connected component labeling to find blobs. however I get a lot of small blobs too.
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs);
int main(int argc, char **argv)
{
Mat img = imread("adaptive.png", 0);
if(!img.data) {
cout << "File not found" << endl;
return -1;
}
namedWindow("binary");
namedWindow("labelled");
Mat output = Mat::zeros(img.size(), CV_8UC3);
Mat binary;
vector < vector<Point2i > > blobs;
threshold(img, binary, 0, 1, THRESH_BINARY_INV);
FindBlobs(binary, blobs);
// Randomy color the blobs
for(size_t i=0; i < blobs.size(); i++) {
unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));
for(size_t j=0; j < blobs[i].size(); j++) {
int x = blobs[i][j].x;
int y = blobs[i][j].y;
output.at<Vec3b>(y,x)[0] = b;//Vec3b RGB color order
output.at<Vec3b>(y,x)[1] = g;
output.at<Vec3b>(y,x)[2] = r;
}
}
imshow("binary", img);
imshow("labelled", output);
waitKey(0);
return 0;
}
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs)
{
blobs.clear();
Mat label_image;
binary.convertTo(label_image, CV_32SC1);
int label_count = 2; // starts at 2 because 0,1 are used already
for(int y=0; y < label_image.rows; y++) {
int *row = (int*)label_image.ptr(y);
for(int x=0; x < label_image.cols; x++) {
if(row[x] != 1) {
continue;
}
Rect rect;
floodFill(label_image, Point(x,y), label_count, &rect, 0, 0, 4);
vector <Point2i> blob;
for(int i=rect.y; i < (rect.y+rect.height); i++) {
int *row2 = (int*)label_image.ptr(i);
for(int j=rect.x; j < (rect.x+rect.width); j++) {
if(row2[j] != label_count) {
continue;
}
blob.push_back(Point2i(j,i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
so with this algorithm I recieve blobs
but when I do
if(blobs.size()>50) {
blob.push_back(Point2i(j,i));
}
I recieve black screen. however when I try to
if(blob.size()<50){
blob.push_back(Point2i(j,i));
}
I recieve small blobs what can be the actual problem here ?
Guess you want to store those "big" blobs?
If so, change the following code
blobs.push_back(blob);
label_count++;
to this:
if(blob.size() > 50){
blobs.push_back(blob);
}
label_count++;
And you can receive picture like this:

How to get extra information of blobs with SimpleBlobDetector?

#robot_sherrick answered me this question, this is a follow-up question for his answer.
cv::SimpleBlobDetector in Opencv 2.4 looks very exciting but I am not sure I can make it work for more detailed data extraction.
I have the following concerns:
if this only returns center of the blob, I can't have an entire, labelled Mat, can I?
how can I access the features of the detected blobs like area, convexity, color and so on?
can I display an exact segmentation with this? (like with say, waterfall)
So the code should look something like this:
cv::Mat inputImg = imread(image_file_name, CV_LOAD_IMAGE_COLOR); // Read a file
cv::SimpleBlobDetector::Params params;
params.minDistBetweenBlobs = 10.0; // minimum 10 pixels between blobs
params.filterByArea = true; // filter my blobs by area of blob
params.minArea = 20.0; // min 20 pixels squared
params.maxArea = 500.0; // max 500 pixels squared
SimpleBlobDetector myBlobDetector(params);
std::vector<cv::KeyPoint> myBlobs;
myBlobDetector.detect(inputImg, myBlobs);
If you then want to have these keypoints highlighted on your image:
cv::Mat blobImg;
cv::drawKeypoints(inputImg, myBlobs, blobImg);
cv::imshow("Blobs", blobImg);
To access the info in the keypoints, you then just access each element like so:
for(std::vector<cv::KeyPoint>::iterator blobIterator = myBlobs.begin(); blobIterator != myBlobs.end(); blobIterator++){
std::cout << "size of blob is: " << blobIterator->size << std::endl;
std::cout << "point is at: " << blobIterator->pt.x << " " << blobIterator->pt.y << std::endl;
}
Note: this has not been compiled and may have typos.
Here is a version that will allow you to get the last contours back, via the getContours() method. They will match up by index to the keypoints.
class BetterBlobDetector : public cv::SimpleBlobDetector
{
public:
BetterBlobDetector(const cv::SimpleBlobDetector::Params &parameters = cv::SimpleBlobDetector::Params());
const std::vector < std::vector<cv::Point> > getContours();
protected:
virtual void detectImpl( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask=cv::Mat()) const;
virtual void findBlobs(const cv::Mat &image, const cv::Mat &binaryImage,
std::vector<Center> &centers, std::vector < std::vector<cv::Point> >&contours) const;
};
Then cpp
using namespace cv;
BetterBlobDetector::BetterBlobDetector(const SimpleBlobDetector::Params &parameters)
{
}
void BetterBlobDetector::findBlobs(const cv::Mat &image, const cv::Mat &binaryImage,
vector<Center> &centers, std::vector < std::vector<cv::Point> >&curContours) const
{
(void)image;
centers.clear();
curContours.clear();
std::vector < std::vector<cv::Point> >contours;
Mat tmpBinaryImage = binaryImage.clone();
findContours(tmpBinaryImage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
for (size_t contourIdx = 0; contourIdx < contours.size(); contourIdx++)
{
Center center;
center.confidence = 1;
Moments moms = moments(Mat(contours[contourIdx]));
if (params.filterByArea)
{
double area = moms.m00;
if (area < params.minArea || area >= params.maxArea)
continue;
}
if (params.filterByCircularity)
{
double area = moms.m00;
double perimeter = arcLength(Mat(contours[contourIdx]), true);
double ratio = 4 * CV_PI * area / (perimeter * perimeter);
if (ratio < params.minCircularity || ratio >= params.maxCircularity)
continue;
}
if (params.filterByInertia)
{
double denominator = sqrt(pow(2 * moms.mu11, 2) + pow(moms.mu20 - moms.mu02, 2));
const double eps = 1e-2;
double ratio;
if (denominator > eps)
{
double cosmin = (moms.mu20 - moms.mu02) / denominator;
double sinmin = 2 * moms.mu11 / denominator;
double cosmax = -cosmin;
double sinmax = -sinmin;
double imin = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmin - moms.mu11 * sinmin;
double imax = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmax - moms.mu11 * sinmax;
ratio = imin / imax;
}
else
{
ratio = 1;
}
if (ratio < params.minInertiaRatio || ratio >= params.maxInertiaRatio)
continue;
center.confidence = ratio * ratio;
}
if (params.filterByConvexity)
{
vector < Point > hull;
convexHull(Mat(contours[contourIdx]), hull);
double area = contourArea(Mat(contours[contourIdx]));
double hullArea = contourArea(Mat(hull));
double ratio = area / hullArea;
if (ratio < params.minConvexity || ratio >= params.maxConvexity)
continue;
}
center.location = Point2d(moms.m10 / moms.m00, moms.m01 / moms.m00);
if (params.filterByColor)
{
if (binaryImage.at<uchar> (cvRound(center.location.y), cvRound(center.location.x)) != params.blobColor)
continue;
}
//compute blob radius
{
vector<double> dists;
for (size_t pointIdx = 0; pointIdx < contours[contourIdx].size(); pointIdx++)
{
Point2d pt = contours[contourIdx][pointIdx];
dists.push_back(norm(center.location - pt));
}
std::sort(dists.begin(), dists.end());
center.radius = (dists[(dists.size() - 1) / 2] + dists[dists.size() / 2]) / 2.;
}
centers.push_back(center);
curContours.push_back(contours[contourIdx]);
}
static std::vector < std::vector<cv::Point> > _contours;
const std::vector < std::vector<cv::Point> > BetterBlobDetector::getContours() {
return _contours;
}
void BetterBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat&) const
{
//TODO: support mask
_contours.clear();
keypoints.clear();
Mat grayscaleImage;
if (image.channels() == 3)
cvtColor(image, grayscaleImage, CV_BGR2GRAY);
else
grayscaleImage = image;
vector < vector<Center> > centers;
vector < vector<cv::Point> >contours;
for (double thresh = params.minThreshold; thresh < params.maxThreshold; thresh += params.thresholdStep)
{
Mat binarizedImage;
threshold(grayscaleImage, binarizedImage, thresh, 255, THRESH_BINARY);
vector < Center > curCenters;
vector < vector<cv::Point> >curContours, newContours;
findBlobs(grayscaleImage, binarizedImage, curCenters, curContours);
vector < vector<Center> > newCenters;
for (size_t i = 0; i < curCenters.size(); i++)
{
bool isNew = true;
for (size_t j = 0; j < centers.size(); j++)
{
double dist = norm(centers[j][ centers[j].size() / 2 ].location - curCenters[i].location);
isNew = dist >= params.minDistBetweenBlobs && dist >= centers[j][ centers[j].size() / 2 ].radius && dist >= curCenters[i].radius;
if (!isNew)
{
centers[j].push_back(curCenters[i]);
size_t k = centers[j].size() - 1;
while( k > 0 && centers[j][k].radius < centers[j][k-1].radius )
{
centers[j][k] = centers[j][k-1];
k--;
}
centers[j][k] = curCenters[i];
break;
}
}
if (isNew)
{
newCenters.push_back(vector<Center> (1, curCenters[i]));
newContours.push_back(curContours[i]);
//centers.push_back(vector<Center> (1, curCenters[i]));
}
}
std::copy(newCenters.begin(), newCenters.end(), std::back_inserter(centers));
std::copy(newContours.begin(), newContours.end(), std::back_inserter(contours));
}
for (size_t i = 0; i < centers.size(); i++)
{
if (centers[i].size() < params.minRepeatability)
continue;
Point2d sumPoint(0, 0);
double normalizer = 0;
for (size_t j = 0; j < centers[i].size(); j++)
{
sumPoint += centers[i][j].confidence * centers[i][j].location;
normalizer += centers[i][j].confidence;
}
sumPoint *= (1. / normalizer);
KeyPoint kpt(sumPoint, (float)(centers[i][centers[i].size() / 2].radius));
keypoints.push_back(kpt);
_contours.push_back(contours[i]);
}
}
//Access SimpleBlobDetector datas for video
#include "opencv2/imgproc/imgproc.hpp" //
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <math.h>
#include <vector>
#include <fstream>
#include <string>
#include <sstream>
#include <algorithm>
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/features2d/features2d.hpp"
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
const char* fileName ="C:/Users/DAGLI/Desktop/videos/new/m3.avi";
VideoCapture cap(fileName); //
if(!cap.isOpened()) //
{
cout << "Couldn't open Video " << fileName << "\n";
return -1;
}
for(;;) // videonun frameleri icin sonsuz dongu
{
Mat frame,labelImg;
cap >> frame;
if(frame.empty()) break;
//imshow("main",frame);
Mat frame_gray;
cvtColor(frame,frame_gray,CV_RGB2GRAY);
//////////////////////////////////////////////////////////////////////////
// convert binary_image
Mat binaryx;
threshold(frame_gray,binaryx,120,255,CV_THRESH_BINARY);
Mat src, gray, thresh, binary;
Mat out;
vector<KeyPoint> keyPoints;
SimpleBlobDetector::Params params;
params.minThreshold = 120;
params.maxThreshold = 255;
params.thresholdStep = 100;
params.minArea = 20;
params.minConvexity = 0.3;
params.minInertiaRatio = 0.01;
params.maxArea = 1000;
params.maxConvexity = 10;
params.filterByColor = false;
params.filterByCircularity = false;
src = binaryx.clone();
SimpleBlobDetector blobDetector( params );
blobDetector.create("SimpleBlob");
blobDetector.detect( src, keyPoints );
drawKeypoints( src, keyPoints, out, CV_RGB(255,0,0), DrawMatchesFlags::DEFAULT);
cv::Mat blobImg;
cv::drawKeypoints(frame, keyPoints, blobImg);
cv::imshow("Blobs", blobImg);
for(int i=0; i<keyPoints.size(); i++){
//circle(out, keyPoints[i].pt, 20, cvScalar(255,0,0), 10);
//cout<<keyPoints[i].response<<endl;
//cout<<keyPoints[i].angle<<endl;
//cout<<keyPoints[i].size()<<endl;
cout<<keyPoints[i].pt.x<<endl;
cout<<keyPoints[i].pt.y<<endl;
}
imshow( "out", out );
if ((cvWaitKey(40)&0xff)==27) break; // esc 'ye basilinca break
}
system("pause");
}