Store the centroid to a new array or vector - c++

This is the image
This is the code below
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
// Read image
Mat im = imread("beethoven_ode_to_joy.jpg", IMREAD_GRAYSCALE);
// Setup SimpleBlobDetector parameters.
SimpleBlobDetector::Params params;
// Change thresholds
params.minThreshold = 10;
params.maxThreshold = 200;
// Filter by Area.
params.filterByArea = true;
params.minArea = 15;
// Filter by Circularity
params.filterByCircularity = true;
params.minCircularity = 0.1;
// Filter by Convexity
params.filterByConvexity = true;
params.minConvexity = 0.01;
// Filter by Inertia
params.filterByInertia = true;
params.minInertiaRatio = 0.01;
// Storage for blobs
vector<KeyPoint> keypoints;
// Set up detector with params
SimpleBlobDetector detector(params);
// Detect blobs
detector.detect(im, keypoints);
At this portion of the code is printing all the centroid of the blob, but i need to store the centroid of the blob into a new vector
for (vector<KeyPoint>::iterator it = keypoints.begin(); it != keypoints.end(); ++it)
{
KeyPoint k = *it;
cout << k.pt << endl;
}
// Draw detected blobs as red circles.
// DrawMatchesFlags::DRAW_RICH_KEYPOINTS flag ensures
// the size of the circle corresponds to the size of blob
Mat im_with_keypoints;
drawKeypoints(im, keypoints, im_with_keypoints, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
// Show blobs
imshow("keypoints", im_with_keypoints);
imwrite("a.jpg", im_with_keypoints);
waitKey(0);
}

Instead of:
for (vector<KeyPoint>::iterator it = keypoints.begin(); it != keypoints.end(); ++it)
{
KeyPoint k = *it;
cout << k.pt << endl;
}
You can do this:
std::vector<decltype(keypoints[0].pt)> vector_of_points;
keypoints.reserve(keypoints.size());
for (vector<KeyPoint>::iterator it = keypoints.begin(); it != keypoints.end(); ++it)
{
KeyPoint k = *it;
cout << k.pt << endl;
vector_of_points.emplace_back(k.pt);
}
Or even better:
std::vector<decltype(keypoints[0].pt)> vector_of_points(keypoints.size());
std::transform(std::begin(keypoints),std::end(keypoints),std::begin(vector_of_points),[](KeyPoint const& item){std::cout<<item.pt;return item.pt;});
EDIT:
If you want to access the centriod you can do this:
for(auto& centr:vector_of_points){
//centr is the centriod
}
If you want a specific centroid. The fourth for example:
vector_of_points[4]

Related

How reduce false detection in my code and how to improve tracking accuracy with opencv in c++? [closed]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 10 months ago.
Improve this question
**Requirements: **
(1) Build OpenCV with Cuda and compile in C++
(2) Version = OpenCV Latest Version
(3) Build and Compile OpenCV Link: https://techawarey.com/programming/install-opencv-c-c-in-ubuntu-18-04-lts-step-by-step-guide/#Summary
(4) Library samples_utility is here: https://github.com/opencv/opencv_contrib/blob/master/modules/tracking/samples/samples_utility.hpp
(4) Compile Program Command: g++ test.cpp -o testoutput -std=c++11 'pkg-config --cflags --libs opencv'
(5) Run Program Command: ./testoutput
Code is working fine but not accurate
Step: 1
Read Frame from Camera
Select ROI(Region of Interest)
After that start KCF tracker with Sobal Features Extractor
Tracking the selected object.
Step: 2
Failure detect
After that call template matching function called MatchingMethod()
Run template matching
Get x, y value from template matching
After that reinitialize KCF tracker with Sobal Features Extractor.
This code is fine for still object when the object is moving the tracker false detection. I want to improve accuracy and reduce false detection.
#include <opencv2/core/utility.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core/ocl.hpp>
#include <iostream>
#include <cstring>
#include <unistd.h>
#include "sample_utility.hpp"
#include <thread>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/cudaarithm.hpp>
#include <iomanip>
#include <stdlib.h>
#include <unistd.h>
//////////////////////////////
using namespace cv;
using namespace std;
////////////////////////////
// Convert to string
#define SSTR( x ) static_cast< std::ostringstream & >( \
( std::ostringstream() << std::dec << x ) ).str()
/// Global Variables
struct Array {
int arr[2];
};
Mat img;
Mat templ;
Mat result_h;
bool flag = true;
int match_method = 5;
int i=0;
int max_Trackbar = 5;
float fps;
int seconds = 0;
// Function Headers
void delay();
// prototype of the functino for feature extractor
void sobelExtractor(const Mat img, const Rect roi, Mat& feat);
struct Array MatchingMethod( int, void* );
int main(int argc, char **argv)
{
TrackerKCF::Params param;
param.compress_feature = true;
param.compressed_size = 2;
param.desc_npca = 0;
param.desc_pca = TrackerKCF::GRAY | TrackerKCF::CN;
param.detect_thresh = 0.8;
// create a tracker object
Ptr<TrackerKCF> tracker = TrackerKCF::create(param);
tracker->setFeatureExtractor(sobelExtractor);
VideoCapture cap(0);
// Exit if video is not opened
if(!cap.isOpened())
{
//cout << "Could not read video file" << endl;
return 1;
}
// Read first frame
Mat frame;
bool ok = cap.read(frame);
// Define initial bounding box
//Rect bbox(x, y, w, h);
// Uncomment the line below to select a different bounding box
Rect bbox = selectROI(frame, false);
// Display bounding box.
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
///////////////////////////
int H, W, cW, cH;
// print(f"hight {H} , Width {W}")
H = display_height;
W = display_width;
// Center point of the screen
cW = int(W / 2);
cH = int(H / 2);
Point p1(cW, cH);
// get bounding box
Mat imCrop = frame(bbox);
imwrite("1.png", imCrop);
//quit if ROI was not selected
if(bbox.width==0 || bbox.height==0)
return 0;
//////////////////////////
//imshow("Tracking", frame);
tracker->init(frame, bbox);
while(true)
{
Mat frame;
cap >> frame;
circle(frame, p1, 3, Scalar(0,255,0), -1);
// Start timer
if(bbox.width!=0 || bbox.height!=0){
double timer = (double)getTickCount();
// Update the tracking result
/////////////////////////////////////
bool ok = tracker->update(frame, bbox);
//////////////////////////////////////
//ok, bbox = tracker->update(frame);
// Calculate Frames per second (FPS)
fps = getTickFrequency() / ((double)getTickCount() - timer);
if (ok)
{
// Tracking success : Draw the tracked object
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
///////////////////////////////////////////////////
int xxx, yyy, height, width;
xxx = bbox.x;
yyy = bbox.y;
height = bbox.height;
width = bbox.width;
int diffX, diffY;
float cxROI, cyROI;
cxROI = int((xxx + (xxx + width)) / 2);
cyROI = int((yyy + (yyy + height)) / 2);
diffX = cxROI - cW;
diffY = cH - cyROI;
//cout<<diffX<<"\n";
//cout<<diffY<<"\n";
Point p(cxROI, cyROI);
circle(frame, p, 3, Scalar(128,0,0), -1);
putText(frame, "FPS : " + SSTR(int(fps)), Point(100,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(50,170,50), 2);
putText(frame, "Difference From X-Axis: "+SSTR(int(diffX)), Point(100, 50), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(100, 200, 200), 2);
putText(frame, "Difference From Y-Axis: "+SSTR(int(diffY)), Point(100, 80), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(100, 200, 200), 2);
}
else
{
// Tracking failure detected.
putText(frame, "Tracking failure detected", Point(100,110), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0,0,255),2);
templ = imread( "1.png", 1 );
img=frame.clone();
struct Array a = MatchingMethod( 0, 0 );
cout<<"X: "<<a.arr[0]<<"\n";
cout<<"Y: "<<a.arr[1]<<"\n";
cout<<"Width: "<<w<<"\n";
cout<<"Height: "<<h<<"\n";
int xx, yy, ww, hh;
xx = a.arr[0];
yy = a.arr[1];
ww = w;
hh = h;
Rect bbox(xx, yy, ww, hh);
tracker.release();
tracker = TrackerKCF::create(param);
tracker->setFeatureExtractor(sobelExtractor);
tracker->init(frame, bbox);
//roi.x = MatchingMethod.
//waitKey(30);
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
////////////////////////////////////////////////////////////////////////
int diffX, diffY;
float cxROI, cyROI;
cxROI = int((xx + (xx + ww)) / 2);
cyROI = int((yy + (yy + hh)) / 2);
diffX = cxROI - cW;
diffY = cH - cyROI;
//cout<<diffX<<"\n";
//cout<<diffY<<"\n";
Point p(cxROI, cyROI);
circle(frame, p, 3, Scalar(128,0,0), -1);
///////////////////////////////////////////////////////////////////////////
}
}
else{
}
// Display frame.
imshow("Tracking", frame);
// Exit if ESC pressed.
int k = waitKey(1);
if(k == 27)
{
break;
}
}
return 0;
}
///////////////////////
void sobelExtractor(const Mat img, const Rect roi, Mat& feat){
Mat sobel[2];
Mat patch;
Rect region=roi;
// extract patch inside the image
if(roi.x<0){region.x=0;region.width+=roi.x;}
if(roi.y<0){region.y=0;region.height+=roi.y;}
if(roi.x+roi.width>img.cols)region.width=img.cols-roi.x;
if(roi.y+roi.height>img.rows)region.height=img.rows-roi.y;
if(region.width>img.cols)region.width=img.cols;
if(region.height>img.rows)region.height=img.rows;
patch=img(region).clone();
cvtColor(patch,patch, COLOR_BGR2GRAY);
// add some padding to compensate when the patch is outside image border
int addTop,addBottom, addLeft, addRight;
addTop=region.y-roi.y;
addBottom=(roi.height+roi.y>img.rows?roi.height+roi.y-img.rows:0);
addLeft=region.x-roi.x;
addRight=(roi.width+roi.x>img.cols?roi.width+roi.x-img.cols:0);
copyMakeBorder(patch,patch,addTop,addBottom,addLeft,addRight,BORDER_REPLICATE);
Sobel(patch, sobel[0], CV_32F,1,0,1);
Sobel(patch, sobel[1], CV_32F,0,1,1);
merge(sobel,2,feat);
feat=feat/255.0-0.5; // normalize to range -0.5 .. 0.5
}
////////////////////////////////////////////////////
struct Array MatchingMethod( int, void* )
{
/// Source image to display
Mat frame;
struct Array a;
/////////
for(int i=1; i<=4; i++){
img.copyTo( frame );
// break;
//}
//////////////////////////
cv::cuda::setDevice(0); // initialize CUDA
// convert from mat to gpumat
cv::cuda::GpuMat image_d(img);
cv::cuda::GpuMat templ_d(templ);
cv::cuda::GpuMat result;
// GPU -> NG
cv::Ptr<cv::cuda::TemplateMatching> alg =
cv::cuda::createTemplateMatching(image_d.type(), cv::TM_CCOEFF_NORMED);
alg->match(image_d, templ_d, result); // no return.
//cv::cuda::normalize(result, result, 0, 1, cv::NORM_MINMAX, -1);
double max_value, min_value;
cv::Point location;
cv::cuda::minMaxLoc(result, &min_value, &max_value, 0, &location);
/////////////////////////
double THRESHOLD = 3e-09; //0.3;
if( min_value <= THRESHOLD) {
//struct Array a;
a.arr[0] = location.x;
a.arr[1] = location.y;
cout<<"Hi"<<endl;
}
}
if(flag==true){
return a;
flag = false;
}
//}
}
Okey here is my answer to your question.
First of all, you are making a mistake by applying template matching when the tracker misses. Because template matching matches the feature if and only if it is totally same with the reference feature. So in your case, there will be shadows, light issues etc. in the environment, and you will never be able to get success results.
Secondly, if you delete the template matching scope, tracker will continue to search the target in the image effectively. Which changements I did in your code is listed below. With these changes, I got better results:
Delete the template matching scope
Decrease the detection threshold(param.detect_thresh) to 0.5
Create more tracker objects to catch the target: This change is the most important part. What I am suggesting is that create more and more tracker objects(in my case I did 4 tracker objects, but you can increase the number). Each tracker should get as input rectangle similar to ROI user chose but not the same coordinates. For example, if user chooses cv::Rect(200,200,400,400) then other tracker should get target as cv::Rect(180,190,400,400) , cv::Rect(220,180,400,400) ... and so on. Why you should do it because, tracker algorithm is feature based, so it will always try to get a similar features to the reference. By doing this, you will increase the feature references.
And here is my code to guide you:
#include <opencv2/core/utility.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core/ocl.hpp>
#include <iostream>
#include <cstring>
#include <unistd.h>
#include <thread>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/cudaarithm.hpp>
#include <iomanip>
#include <stdlib.h>
#include <unistd.h>
//////////////////////////////
using namespace cv;
using namespace std;
////////////////////////////
// Convert to string
#define SSTR( x ) static_cast< std::ostringstream & >( \
( std::ostringstream() << std::dec << x ) ).str()
/// Global Variables
struct Array {
int arr[2];
};
Mat img;
Mat templ;
Mat result_h;
bool flag = true;
int match_method = 5;
int i=0;
int max_Trackbar = 5;
float fps;
int seconds = 0;
// Function Headers
void delay();
// prototype of the functino for feature extractor
void sobelExtractor(const Mat img, const Rect roi, Mat& feat);
struct Array MatchingMethod( int, void* );
int main(int argc, char **argv)
{
TrackerKCF::Params param;
param.compress_feature = true;
param.compressed_size = 2;
param.desc_npca = 0;
param.desc_pca = TrackerKCF::GRAY | TrackerKCF::CN;
param.detect_thresh = 0.5;
// create a tracker object
Ptr<TrackerKCF> tracker = TrackerKCF::create(param);
tracker->setFeatureExtractor(sobelExtractor);
Ptr<TrackerKCF> tracker2 = TrackerKCF::create(param);
tracker2->setFeatureExtractor(sobelExtractor);
Ptr<TrackerKCF> tracker3 = TrackerKCF::create(param);
tracker3->setFeatureExtractor(sobelExtractor);
Ptr<TrackerKCF> tracker4 = TrackerKCF::create(param);
tracker4->setFeatureExtractor(sobelExtractor);
VideoCapture cap(0);
// Exit if video is not opened
if(!cap.isOpened())
{
//cout << "Could not read video file" << endl;
return 1;
}
cv::imshow("Tracking",0);
// Read first frame
Mat frame;
bool ok = cap.read(frame);
// Define initial bounding box
//Rect bbox(x, y, w, h);
// Uncomment the line below to select a different bounding box
Rect2d bbox = selectROI(frame, false);
// Display bounding box.
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
///////////////////////////
int H, W, cW, cH;
// print(f"hight {H} , Width {W}")
H = frame.rows;
W = frame.cols;
// Center point of the screen
cW = int(W / 2);
cH = int(H / 2);
Point p1(cW, cH);
//quit if ROI was not selected
if(bbox.width==0 || bbox.height==0)
return 0;
//////////////////////////
//imshow("Tracking", frame);
tracker->init(frame, bbox);
tracker2->init(frame, cv::Rect2d(bbox.x-10,bbox.y-10, bbox.width,bbox.height));
tracker3->init(frame, cv::Rect2d(bbox.x+10,bbox.y+10, bbox.width,bbox.height));
tracker4->init(frame, cv::Rect2d(bbox.x+20,bbox.y+20, bbox.width,bbox.height));
while(true)
{
Mat frame;
cap >> frame;
circle(frame, p1, 3, Scalar(0,255,0), -1);
// Start timer
if(bbox.width!=0 || bbox.height!=0){
double timer = (double)getTickCount();
// Update the tracking result
/////////////////////////////////////
bool ok = tracker->update(frame, bbox);
bool ok2 = tracker->update(frame, bbox);
bool ok3 = tracker->update(frame, bbox);
bool ok4 = tracker->update(frame, bbox);
//////////////////////////////////////
//ok, bbox = tracker->update(frame);
// Calculate Frames per second (FPS)
fps = getTickFrequency() / ((double)getTickCount() - timer);
if (ok || ok2 || ok3 || ok4)
{
// Tracking success : Draw the tracked object
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
///////////////////////////////////////////////////
int xxx, yyy, height, width;
xxx = bbox.x;
yyy = bbox.y;
height = bbox.height;
width = bbox.width;
int diffX, diffY;
float cxROI, cyROI;
cxROI = int((xxx + (xxx + width)) / 2);
cyROI = int((yyy + (yyy + height)) / 2);
diffX = cxROI - cW;
diffY = cH - cyROI;
//cout<<diffX<<"\n";
//cout<<diffY<<"\n";
Point p(cxROI, cyROI);
circle(frame, p, 3, Scalar(128,0,0), -1);
putText(frame, "FPS : " + SSTR(int(fps)), Point(100,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(50,170,50), 2);
putText(frame, "Difference From X-Axis: "+SSTR(int(diffX)), Point(100, 50), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(100, 200, 200), 2);
putText(frame, "Difference From Y-Axis: "+SSTR(int(diffY)), Point(100, 80), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(100, 200, 200), 2);
}
}
else{
}
// Display frame.
imshow("Tracking", frame);
// Exit if ESC pressed.
int k = waitKey(1);
if(k == 27)
{
break;
}
}
return 0;
}
///////////////////////
void sobelExtractor(const Mat img, const Rect roi, Mat& feat){
Mat sobel[2];
Mat patch;
Rect region=roi;
// extract patch inside the image
if(roi.x<0){region.x=0;region.width+=roi.x;}
if(roi.y<0){region.y=0;region.height+=roi.y;}
if(roi.x+roi.width>img.cols)region.width=img.cols-roi.x;
if(roi.y+roi.height>img.rows)region.height=img.rows-roi.y;
if(region.width>img.cols)region.width=img.cols;
if(region.height>img.rows)region.height=img.rows;
patch=img(region).clone();
cvtColor(patch,patch, COLOR_BGR2GRAY);
// add some padding to compensate when the patch is outside image border
int addTop,addBottom, addLeft, addRight;
addTop=region.y-roi.y;
addBottom=(roi.height+roi.y>img.rows?roi.height+roi.y-img.rows:0);
addLeft=region.x-roi.x;
addRight=(roi.width+roi.x>img.cols?roi.width+roi.x-img.cols:0);
copyMakeBorder(patch,patch,addTop,addBottom,addLeft,addRight,BORDER_REPLICATE);
Sobel(patch, sobel[0], CV_32F,1,0,1);
Sobel(patch, sobel[1], CV_32F,0,1,1);
merge(sobel,2,feat);
feat=feat/255.0-0.5; // normalize to range -0.5 .. 0.5
}
////////////////////////////////////////////////////
struct Array MatchingMethod( int, void* )
{
/// Source image to display
Mat frame;
struct Array a;
/////////
for(int i=1; i<=4; i++){
img.copyTo( frame );
// break;
//}
//////////////////////////
cv::cuda::setDevice(0); // initialize CUDA
// convert from mat to gpumat
cv::cuda::GpuMat image_d(img);
cv::cuda::GpuMat templ_d(templ);
cv::cuda::GpuMat result;
// GPU -> NG
cv::Ptr<cv::cuda::TemplateMatching> alg =
cv::cuda::createTemplateMatching(image_d.type(), cv::TM_CCOEFF_NORMED);
alg->match(image_d, templ_d, result); // no return.
//cv::cuda::normalize(result, result, 0, 1, cv::NORM_MINMAX, -1);
double max_value, min_value;
cv::Point location;
cv::cuda::minMaxLoc(result, &min_value, &max_value, 0, &location);
/////////////////////////
double THRESHOLD = 3e-09; //0.3;
if( min_value <= THRESHOLD) {
//struct Array a;
a.arr[0] = location.x;
a.arr[1] = location.y;
cout<<"Hi"<<endl;
}
}
if(flag==true){
return a;
flag = false;
}
//}
}

OpenCV Assertion Failed with SolvePnP

I'm trying to use solvePnP in a C++ program that detects a chessboard in a video stream. Every time the calibration finishes, I try to run solvePnP, but I keep getting errors that I think are related to the translation and rotation vectors.
This is the error:
The error
This is my code:
#include <cstdio>
#include <cstdlib>
#include <string>
#include <iostream>
#include <fstream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
using namespace std;
//Global variables
vector<vector<Point2f> > corner_list;
vector<vector<Point3f> > point_list;
vector<Mat> rotation_vecs, translation_vecs;
int i = 0;
bool calibMode = true, drawMode = false, drawMode2 = false, camMatrixInit = false, calibrated = false;
Mat originalCameraMatrix, cameraMatrix, distCoefficients, frame, gray, rvec, tvec;
//Function declarations
vector<Point3f> genWorldPoints(int cols, int rows);
void printCameraMatrix();
void printDistCoeff();
void printRotVecs();
/*Generates the world points (0, 0, 0), (1, 0, 0), etc. for the camera calibration.*/
vector<Point3f> genWorldPoints(int cols, int rows){
vector<Point3f> ret;
for (int i=0; i<rows; i++) {
for (int j=0; j<cols; j++) {
int tempi = i*(-1);
ret.push_back(Point3f((float)j, (float)tempi, 0.0));
}
}
return ret;
}
/*Print the camera matrix*/
void printCameraMatrix(){
cout << "Original Camera Matrix" << endl << originalCameraMatrix << endl;
cout << "Current Camera Matrix" << endl << cameraMatrix << endl;
}
/*Prints the distortion coefficients*/
void printDistCoeff(){
cout << "Distortion Coefficients:"<< endl << distCoefficients << endl;
}
int main(int argc, char *argv[]){
VideoCapture *capdev;
cameraMatrix = Mat::eye(3, 3, CV_64F);
distCoefficients = Mat::zeros(8, 1, CV_64F);
// open the video device
capdev = new VideoCapture(0);
if (!capdev->isOpened()) {
printf("Unable to open video device\n");
return(-1);
}
namedWindow("Video", 1);
bool found;
*capdev >> frame;
rvec = Mat::zeros(3, 1, CV_64F);
tvec = Mat::zeros(3, 1, CV_64F);
//Initialize camera matrix
cameraMatrix.at<double>(0,2) = (frame.size().width)/2;
cameraMatrix.at<double>(1,2) = (frame.size().height)/2;
cameraMatrix.copyTo(originalCameraMatrix);
printCameraMatrix();
for(;;){
*capdev >> frame;
if (!found) {
imshow("Video", frame);
}
Size patternsize(9,6);
vector<Point2f> corner_set;
vector<Point3f> point_set;
int code = waitKey(10);
cvtColor(frame, gray, CV_BGR2GRAY);
found = findChessboardCorners(gray, patternsize, corner_set, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
//Code to add a calibration frame
if (found && calibMode) {
cornerSubPix(gray, corner_set, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(frame, patternsize, Mat(corner_set), found);
//s key press
if (code == 115){
printf("Adding calibration frame\n");
printf("Number of corners found: %lu\n", corner_set.size());
printf("Point 0 x: %f\ty: %f\n", corner_set[0].x, corner_set[1].y);
//Add the corner set
corner_list.push_back(corner_set);
//Generate point set
vector<Point3f> point_set = genWorldPoints(9,6);
//Add point set to point set list
point_list.push_back(point_set);
//Save the image
string filename = "../data/images/p4_calib_image_"+to_string(i)+".jpeg";
imwrite(filename, frame);
//if there are more than 5 saved calibration images, run calibration procedure
if (i>4) {
//Calculate the reprojection error by running calibrateCamera
double rpe = calibrateCamera(point_list, corner_list, frame.size(), cameraMatrix,
distCoefficients, rotation_vecs, translation_vecs,
CV_CALIB_FIX_ASPECT_RATIO | CV_CALIB_FIX_K4);
//Print the camera matrix
printCameraMatrix();
printDistCoeff();
//Print the reprojection error
cout << "Reprojection error: " << rpe << endl;
calibrated = true;
}
//Increment i
i++;
}
imshow("Video", frame);
}
else if (found && drawMode){
cout << "Draw mode" << endl;
bool solved = solvePnP(point_list, corner_list, cameraMatrix, distCoefficients,
rvec, tvec);
//rotation_vecs.front(), translation_vecs.front());
}
else if (found && drawMode2){
cout << "Draw mode 2" << endl;
bool solved = solvePnP(point_list, corner_list, cameraMatrix, distCoefficients,
rvec, tvec);
//rotation_vecs.front(), translation_vecs.front());
}
//Switching between drawing modes
if (calibrated && code == 49) {
calibMode = false;
drawMode2 = false;
drawMode = true;
}
else if (calibrated && code == 50){
calibMode = false;
drawMode = false;
drawMode2 = true;
}
//Switch back to calibration mode
else if (calibrated && code == 51){
drawMode = false;
drawMode2 = false;
calibMode = true;
}
if (code == 27) {
printf("Terminating\n");
delete capdev;
return(0);
}
}
capdev = new VideoCapture(0);
printf("Terminating\n");
delete capdev;
return(0);
}
Please excuse the indentation...
The problem is that sovlePnP requires vector<Point2/3f> as input, instead of vector<vector<Point2/3f> >. In your code, "point_list" is vector<vector<Point3f> >, and "corner_list" is vector<vector<Point2f> >.
The documentation of solvePnP can be found here: http://docs.opencv.org/3.0-beta/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html

Face Detection Using KinectV2 and OpenCV

Recently I have been trying to learn OpenCV and was trying to detect faces using the Haar Classifier.
I was successful in detecting faces when I obtained the video stream from the default webcam,but when I use Kinect instead of the default webcam, It still detects face but the frame rate drops tremendously.
The code that I've Written is
int main() {
string haar_face = "F:\haarcascade_frontalface_default.xml";
CascadeClassifier haar_cascade;
haar_cascade.load(haar_face);
if (haar_cascade.empty()) {
return -1;
}
vector<Rect_<int>> faces;
bool optionKinect = false;
cout << "Choose Option\n1.) Kinect \n>1.) WebCam\n";
int choice;
cin >> choice;
if (choice == 1) {
optionKinect = true;
}
if (optionKinect) {
CKinectStreamsMat* kinectStream = new CKinectStreamsMat();
kinectStream->initSensor();
while (true) {
Mat original, gray;
Mat face;
Rect face_i;
//cap >> original;
original = kinectStream->getColorFrame();
if (original.data) {
cvtColor(original, gray, CV_BGR2GRAY);
haar_cascade.detectMultiScale(gray, faces);
int size = faces.size();
for (size_t i = 0; i < size; i++) {
face_i = faces[i];
face = gray(face_i);
rectangle(original, face_i, CV_RGB(0, 255, 0), 1);
}
imshow("original", original);
if(waitKey(20) == 27){
break;
}
}
}
}
else {
VideoCapture cap(0);
while (true) {
Mat original, gray;
Mat face;
Rect face_i;
cap >> original;
//original = kinectStream->getColorFrame();
if (original.data) {
cvtColor(original, gray, CV_BGR2GRAY);
haar_cascade.detectMultiScale(gray, faces);
int size = faces.size();
for (size_t i = 0; i < size; i++) {
face_i = faces[i];
face = gray(face_i);
rectangle(original, face_i, CV_RGB(0, 255, 0), 1);
}
imshow("original", original);
if(waitKey(20) == 27){
break;
}
}
}
}
}
And this is how I am obtaining the Color frame from Kinect.
cv::Mat CKinectStreamsMat::getColorFrame()
{
HRESULT hr = E_FAIL;
IColorFrame* frame = NULL;
IFrameDescription* frameDesc;
cv::Mat colorImage;
hr = _color_reader->AcquireLatestFrame(&frame);
if (SUCCEEDED(hr)) {
hr = frame->get_FrameDescription(&frameDesc);
if (SUCCEEDED(hr)) {
int frameWidth = 0, frameHeight = 0;
hr = frameDesc->get_Width(&frameWidth);
if (SUCCEEDED(hr)) {
hr = frameDesc->get_Height(&frameHeight);
}
if (SUCCEEDED(hr)) {
const int imgSize = frameWidth*frameHeight * 4 * sizeof(unsigned char); //4 Channels(BGRA)
colorImage = cv::Mat(1080,1920,CV_8UC4);
hr = frame->CopyConvertedFrameDataToArray(imgSize, reinterpret_cast<BYTE*>(colorImage.data), ColorImageFormat_Bgra);
}
}
SafeRelease(frameDesc);
SafeRelease(frame);
}
return colorImage;
}
I thought the reason for low performance might be the difference in resolution of the frames provided by the WebCam and Kinect, So I also tried scaling down the frame provided by Kinect down to the size that was even less than the frame size of WebCam. But still the performance was very low.
As this is only what I could think of and now I am out of ideas, So could anyone please tell what could be the reason for this low performance?

OpenCV blob detection segfaults when filtering by color

I'm having trouble using the filterByColor functionality in the SimpleBlobDetector tool that ships with OpenCV. Make doesn't give me any errors, but when I try to run the program, it segfaults at blobme.detect().
It works fine when I use filterByArea, it's just filterByColor that's giving me headaches.
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#define ACTIVE_CHANNEL 2
int main(int argc, char* argv[])
{
if (argc != 3)
{
std::cout << "./image_proc <file> <thresh> (-1 for default)" << std::endl;
return -1;
}
cv::Mat test_im = cv::imread(argv[1], CV_LOAD_IMAGE_COLOR);
cv::Mat hsvim, outim, channels[3], descriptor;
std::vector<cv::KeyPoint> keypoints;
// Convert to HSV
cv::cvtColor(test_im, hsvim, CV_RGB2HSV);
cv::split(hsvim, channels);
cv::SimpleBlobDetector::Params params;
params.filterByInertia = false;
params.filterByConvexity = false;
params.filterByColor = true;
params.filterByCircularity = false;
params.filterByArea = false;
params.blobColor = 255;
//params.minArea = 100.0f;
//params.maxArea = 500.0f;
// Trying to use blob detector
cv::SimpleBlobDetector blobme(params);
blobme.detect(channels[ACTIVE_CHANNEL], keypoints);
// Print keypoints
cv::drawKeypoints(channels[ACTIVE_CHANNEL], keypoints, outim);
// Display
cv::namedWindow("Display window", cv::WINDOW_AUTOSIZE);
cv::imwrite("imout.jpg", outim);
cv::imshow("Display window", outim);
cv::waitKey(0);
return 0;
}

Saving Images of Detected Faces in OpenCV

I have a code that detects faces and saves multiple cropped area images of them to a file path. My code doesn't stop saving images of detected faces until I physically close the program. For every one second a face is detected on a webcam, my code saves 6 images of the face.
Is it possible to have it save just one image per face detected? For example, if there is one face, only one image, if two faces, an image of both faces are saved etc. My code is below. Can anyone help me?
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
void detectAndDisplay(Mat frame);
string face_cascade_name = "C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml";
CascadeClassifier face_cascade;
string window_name = "Window";
int filenumber;
string filename;
int main(void)
{
VideoCapture capture(0);
if (!capture.isOpened())
return -1;
if (!face_cascade.load(face_cascade_name))
{
cout << "error" << endl;
return (-1);
};
Mat frame;
for (;;)
{
capture >> frame;
if (!frame.empty())
{
detectAndDisplay(frame);
}
else
{
cout << "error2" << endl;
break;
}
int c = waitKey(10);
if (27 == char(c))
{
break;
}
}
return 0;
}
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
cv::Rect roi_b;
cv::Rect roi_c;
size_t ic = 0;
int ac = 0;
size_t ib = 0;
int ab = 0;
for (ic = 0; ic < faces.size(); ic++)
{
roi_c.x = faces[ic].x;
roi_c.y = faces[ic].y;
roi_c.width = (faces[ic].width);
roi_c.height = (faces[ic].height);
ac = roi_c.width * roi_c.height;
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
crop = frame(roi_b);
resize(crop, res, Size(128, 128), 0, 0, INTER_LINEAR);
cvtColor(crop, gray, CV_BGR2GRAY);
filename = "C:\\Users\\Desktop\\Faces\\face";
stringstream ssfn;
ssfn << filename.c_str() << filenumber << ".jpg";
filename = ssfn.str();
cv::imwrite(filename, res);
filenumber++;
Point pt1(faces[ic].x, faces[ic].y);
Point pt2((faces[ic].x + faces[ic].height), (faces[ic].y + faces[ic].width));
rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
}
sstm << "Crop area size: " << roi_b.width << "x" << roi_b.height << " Filename: " << filename;
text = sstm.str();
if (!crop.empty())
{
imshow("detected", crop);
}
else
destroyWindow("detected");
}