OpenCV Training the SVM with picture examples Error - c++

when training my classifier, i get this error:
Image step is wrong (The matrix is not continuous, thus its number of rows can not be changed) in reshape, file /home/denn/Downloads/opencv-2.4.6.1/modules/core/src/matrix.cpp, line 802
terminate called after throwing an instance of 'cv::Exception'
what(): /home/denn/Downloads/opencv-2.4.6.1/modules/core/src/matrix.cpp:802: error: (-13) The matrix is not continuous, thus its number of rows can not be changed in function reshape
Aborted (core dumped)
I'm working on an Automatic Number Plate Recognition project in C++. all that is left now is train the my SVM.
I resized all my images to a 450 by 450 after researching this but the error persists.
I have studied and looked around but none of the solutions works for me.
any help accorded will be highly appreciated.
// Main entry code OpenCV
#include <cv.h>
#include <highgui.h>
#include <cvaux.h>
#include <iostream>
#include <vector>
using namespace std;
using namespace cv;
int main ( int argc, char** argv )
{
cout << "OpenCV Training SVM Automatic Number Plate Recognition\n";
cout << "\n";
char* path_Plates;
char* path_NoPlates;
int numPlates;
int numNoPlates;
int imageWidth=450; //144
int imageHeight=450; //33
//Check if user specify image to process
if(argc >= 5 )
{
numPlates= atoi(argv[1]);
numNoPlates= atoi(argv[2]);
path_Plates= argv[3];
path_NoPlates= argv[4];
}else{
cout << "Usage:\n" << argv[0] << " <num Plate Files> <num Non Plate Files> <path to plate folder files> <path to non plate files> \n";
return 0;
}
Mat classes;//(numPlates+numNoPlates, 1, CV_32FC1);
Mat trainingData;//(numPlates+numNoPlates, imageWidth*imageHeight, CV_32FC1 );
Mat trainingImages;
vector<int> trainingLabels;
for(int i=0; i< numPlates; i++)
{
stringstream ss(stringstream::in | stringstream::out);
ss << path_Plates << i << ".jpg";
Mat img=imread(ss.str(), 0);
img= img.reshape(1, 1);
trainingImages.push_back(img);
trainingLabels.push_back(1);
}
for(int i=0; i< numNoPlates; i++)
{
stringstream ss(stringstream::in | stringstream::out);
ss << path_NoPlates << i << ".jpg";
Mat img=imread(ss.str(), 0);
img= img.reshape(1, 1);
trainingImages.push_back(img);
trainingLabels.push_back(0);
}
Mat(trainingImages).copyTo(trainingData);
//trainingData = trainingData.reshape(1,trainingData.rows);
trainingData.convertTo(trainingData, CV_32FC1);
Mat(trainingLabels).copyTo(classes);
FileStorage fs("SVM.xml", FileStorage::WRITE);
fs << "TrainingData" << trainingData;
fs << "classes" << classes;
fs.release();
return 0;
}
I edited the code and made it like this:
// Main entry code OpenCV
#include <cv.h>
#include <highgui.h>
#include <cvaux.h>
#include <iostream>
#include <vector>
#include <iostream>
using namespace std;
using namespace cv;
int main ( int argc, char** argv )
{
cout << "OpenCV Training SVM Automatic Number Plate Recognition\n";
cout << "\n";
char* path_Plates;
char* path_NoPlates;
int numPlates;
int numNoPlates;
int imageWidth=450; //144
int imageHeight=450; //33
//Check if user specify image to process
if(argc >= 5 )
{
numPlates= atoi(argv[1]);
numNoPlates= atoi(argv[2]);
path_Plates= argv[3];
path_NoPlates= argv[4];
}else{
cout << "Usage:\n" << argv[0] << " <num Plate Files> <num Non Plate Files> <path to plate folder files> <path to non plate files> \n";
return 0;
}
Mat classes;//(numPlates+numNoPlates, 1, CV_32FC1);
Mat trainingData;//(numPlates+numNoPlates, imageWidth*imageHeight, CV_32FC1 );
Mat trainingImages;
vector<int> trainingLabels;
Mat classes = new Mat();
Mat trainingData = new Mat();
Mat trainingImages = new Mat();
Mat trainingLabels = new Mat();
for(int i=0; i< numPlates; i++)
{
stringstream ss(stringstream::in | stringstream::out);
ss << path_Plates << i << ".png";
Mat img=imread(ss.str(), 0);
img= img.reshape(1, 1);
trainingImages.push_back(img);
trainingLabels.push_back(1);//trainingLabels.push_back(Mat.ones(new Size(1, 1), CvType.CV_32FC1));//trainingLabels.push_back(1);
}
for(int i=0; i< numNoPlates; i++)
{
stringstream ss(stringstream::in | stringstream::out);
ss << path_NoPlates << i << ".png";
Mat img=imread(ss.str(), 0);
img= img.reshape(1, 1); //img= img.clone().reshape(1, 1);
trainingImages.push_back(img);
trainingLabels.push_back(0);//trainingLabels.push_back(Mat.zeros(new Size(1, 1), CvType.CV_32FC1));//trainingLabels.push_back(0);
}
trainingImages.copyTo(trainingData);
//trainingData = trainingData.reshape(1,trainingData.rows);
trainingData.convertTo(trainingData, CV_32FC1);
trainingLabels.copyTo(classes);
FileStorage fs("SVM.xml", FileStorage::WRITE);
fs << "TrainingData" << trainingData;
fs << "classes" << classes;
fs.release();
return 0;
}
but I get this error on compilation:
/home/denn/Desktop/NumberPlateRecognition/trainSVM.cpp:52:27: error: conversion from ‘cv::Mat*’ to non-scalar type ‘cv::Mat’ requested
/home/denn/Desktop/NumberPlateRecognition/trainSVM.cpp:52:27: error: conversion from ‘cv::Mat*’ to non-scalar type ‘cv::Mat’ requested
/home/denn/Desktop/NumberPlateRecognition/trainSVM.cpp:53:32: error: conversion from ‘cv::Mat*’ to non-scalar type ‘cv::Mat’ requested
/home/denn/Desktop/NumberPlateRecognition/trainSVM.cpp:55:34: error: conversion from ‘cv::Mat*’ to non-scalar type ‘cv::Mat’ requested
/home/denn/Desktop/NumberPlateRecognition/trainSVM.cpp:56:34: error: conversion from ‘cv::Mat*’ to non-scalar type ‘cv::Mat’ requested
make[2]: *** [CMakeFiles/trainSVM.dir/trainSVM.cpp.o] Error 1
make[1]: *** [CMakeFiles/trainSVM.dir/all] Error 2
make: *** [all] Error 2
I any advice?

As berak pointed out in the comments above, your cv::Mat can become discontinuous in the following instances:
if you extract a part of the matrix using Mat::col(), Mat::diag() , and so on, or construct a matrix header for externally allocated data, such matrices may no longer have [the iscontinuous()] property.
As they point out in the above reference, create your matrices using Mat::create and you won't have this issue.
Update:
So, the function Mat::clone, as our friend berak pointed out in the comments above, will do the trick for you. It calls the function Mat::create. I just tried out the following code, and it worked like a charm.
Mat trainingImages;
vector<int> trainingLabels;
for(int i=0; i< numPlates; i++)
{
stringstream ss;
ss << path_Plates << "grumpy" << i << ".jpg";
std::cout << ss.str() << std::endl;
Mat img =imread(ss.str(), CV_LOAD_IMAGE_GRAYSCALE);
if(! img.data ) {
cout << "Could not open or find the image" << std::endl ;
return -1;
}
else {
img = img.clone().reshape(0,1);
trainingImages.push_back(img);
trainingLabels.push_back(i);
}
}
However, a few notes, it looks like you might not have the right header file names. I used the following with OpenCV 2.4.8 on Ubuntu 12.04:
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>
Also, make sure to compile it with the OpenCV libraries (i.e. opencv_core and opencv_ml). Hope that helps you in your quest for plate recognition.

Related

Google Cloud Vision API Object Detection Model Gives Bus Error on Raspberry Pi

I trained a simple object detection model on Google Cloud with the vision API. After exporting as a tflite model, I tried to run it on a Raspberry Pi 3B+ with the simple starter code below and tensorflow lite 2.6.0rc-2. The code runs the standard MobileNet model fine, but gives a bus error while allocating the tensors with my custom model. I then tried running the same code with my model on WSL debian, which worked. The vision API says it supports ARM edge devices, so I don't understand why it's not working. Does the raspberry pi just not have enough memory? If so, why does it run the more complex MobileNet model?
Test code modified from https://github.com/Qengineering/TensorFlow_Lite_SSD_RPi_32-bits
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <opencv2/highgui.hpp>
#include <fstream>
#include <iostream>
#include <opencv2/core/ocl.hpp>
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/model.h"
#include <cmath>
using namespace cv;
using namespace std;
const size_t width = 192;
const size_t height = 192;
std::vector<std::string> Labels;
std::unique_ptr<tflite::Interpreter> interpreter;
static bool getFileContent(std::string fileName)
{
// Open the File
std::ifstream in(fileName.c_str());
// Check if object is valid
if(!in.is_open()) return false;
std::string str;
// Read the next line from File untill it reaches the end.
while (std::getline(in, str))
{
// Line contains string of length > 0 then save it in vector
if(str.size()>0) Labels.push_back(str);
}
// Close The File
in.close();
return true;
}
void detect_from_video(Mat &src)
{
Mat image;
int cam_width =src.cols;
int cam_height=src.rows;
// copy image to input as input tensor
cv::resize(src, image, Size(width,height));
memcpy(interpreter->typed_input_tensor<uchar>(0), image.data, image.total() * image.elemSize());
interpreter->SetAllowFp16PrecisionForFp32(true);
interpreter->SetNumThreads(4); //quad core
// cout << "tensors size: " << interpreter->tensors_size() << "\n";
// cout << "nodes size: " << interpreter->nodes_size() << "\n";
// cout << "inputs: " << interpreter->inputs().size() << "\n";
// cout << "input(0) name: " << interpreter->GetInputName(0) << "\n";
// cout << "outputs: " << interpreter->outputs().size() << "\n";
interpreter->Invoke(); // run your model
const float* detection_locations = interpreter->tensor(interpreter->outputs()[0])->data.f;
const float* detection_classes=interpreter->tensor(interpreter->outputs()[1])->data.f;
const float* detection_scores = interpreter->tensor(interpreter->outputs()[2])->data.f;
const int num_detections = *interpreter->tensor(interpreter->outputs()[3])->data.f;
//there are ALWAYS 10 detections no matter how many objects are detectable
//cout << "number of detections: " << num_detections << "\n";
const float confidence_threshold = 0.5;
for(int i = 0; i < num_detections; i++){
if(detection_scores[i] > confidence_threshold){
int det_index = (int)detection_classes[i]+1;
float y1=detection_locations[4*i ]*cam_height;
float x1=detection_locations[4*i+1]*cam_width;
float y2=detection_locations[4*i+2]*cam_height;
float x2=detection_locations[4*i+3]*cam_width;
Rect rec((int)x1, (int)y1, (int)(x2 - x1), (int)(y2 - y1));
rectangle(src,rec, Scalar(0, 0, 255), 1, 8, 0);
putText(src, format("%s", Labels[det_index].c_str()), Point(x1, y1-5) ,FONT_HERSHEY_SIMPLEX,0.5, Scalar(0, 0, 255), 1, 8, 0);
}
}
}
int main(int argc,char ** argv)
{
float f;
float FPS[16];
int i;
int Fcnt=0;
Mat frame;
chrono::steady_clock::time_point Tbegin, Tend;
for(i=0;i<16;i++) FPS[i]=0.0;
// Load model
std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile("detect.tflite");
// Build the interpreter
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder(*model.get(), resolver)(&interpreter);
interpreter->AllocateTensors();
// Get the names
bool result = getFileContent("labels.txt");
if(!result)
{
cout << "loading labels failed";
exit(-1);
}
// VideoCapture cap("James.mp4");
VideoCapture cap(0);
if (!cap.isOpened()) {
cerr << "ERROR: Unable to open the camera" << endl;
return 0;
}
cout << "Start grabbing, press ESC on Live window to terminate" << endl;
while(1){
// frame=imread("Traffic.jpg"); //need to refresh frame before dnn class detection
cap >> frame;
if (frame.empty()) {
cerr << "End of movie" << endl;
break;
}
detect_from_video(frame);
Tend = chrono::steady_clock::now();
//calculate frame rate
f = chrono::duration_cast <chrono::milliseconds> (Tend - Tbegin).count();
Tbegin = chrono::steady_clock::now();
FPS[((Fcnt++)&0x0F)]=1000.0/f;
for(f=0.0, i=0;i<16;i++){ f+=FPS[i]; }
putText(frame, format("FPS %0.2f",f/16),Point(10,20),FONT_HERSHEY_SIMPLEX,0.6, Scalar(0, 0, 255));
//show output
imshow("RPi 4 - 2.0 GHz - 2 Mb RAM", frame);
char esc = waitKey(5);
if(esc == 27) break;
}
cout << "Closing the camera" << endl;
// When everything done, release the video capture and write object
cap.release();
destroyAllWindows();
cout << "Bye!" << endl;
return 0;
}
Stack trace of bus error, occurs during tensor allocation
Program terminated with signal SIGBUS, Bus error.
#0 0x00134dd0 in tflite::ops::builtin::broadcastto::ResizeOutputTensor(TfLiteContext*, tflite::ops::builtin::broadcastto::BroadcastToContext*) ()
(gdb) bt
#0 0x00134dd0 in tflite::ops::builtin::broadcastto::ResizeOutputTensor(TfLiteContext*, tflite::ops::builtin::broadcastto::BroadcastToContext*) ()
#1 0x00135194 in tflite::ops::builtin::broadcastto::Prepare(TfLiteContext*, TfLiteNode*) ()
#2 0x000d36c4 in tflite::Subgraph::PrepareOpsStartingAt(int, std::vector<int, std::allocator<int> > const&, int*) ()
#3 0x000d386c in tflite::Subgraph::PrepareOpsAndTensors() ()
#4 0x000d5c64 in tflite::Subgraph::AllocateTensors() ()
#5 0x0001b2cc in tflite::Interpreter::AllocateTensors() ()
#6 0x000161d8 in main(int, char**) (argc=1, argv=0x7ebd0644)
at MobileNetV1.cpp:106
Tflite object detection model trained for a single label type with 50 images (I want to get the model to work before I add more)
https://storage.googleapis.com/ml-data-storage-bucket/models/model-export/iod/tflite-ping_pong_ball_1-2021-08-02T19%3A46%3A09.324008Z/model.tflite

How to read data at the specific coordinates in high-dimensional Mat:class using c++?

I am trying to use use the MobileNet SSD + deep neural network ( dnn ) module in OpenCV for object detection. I loaded and used the model successfully. As the output of net.forward I obtain Mat object containing the information about the detected objects. Unfortunately, I struggle with "the easy part of work", with reading what exactly was detected.
Here is information I know about the output Mat object:
It has 4 dimensions.
The size is 1 x 1 x number_of_objects_detected x 7.
The seven pieces of information about each object were: the 1st is the class ID, the 2nd is the confidence, the 3rd-7th are the bounding box values.
I can't find any c++ example, but I found many python examples. They read the data like this:
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
What is the easiest way how to do this in c++? I.e. I need to read the data at the specific coordinates in high-dimensional Mat:class.
Thank you for your kind help. I am quite new in c++ and sometimes found it overwhelming...
I am using OpenCV 3.3.0. The GitHub with the MobileNet SSD I am using: https://github.com/chuanqi305/MobileNet-SSD.
The code of my program:
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <fstream>
#include <iostream>
using namespace cv;
using namespace cv::dnn;
using namespace std;
// function to create vector of class names
std::vector<String> createClaseNames() {
std::vector<String> classNames;
classNames.push_back("background");
classNames.push_back("aeroplane");
classNames.push_back("bicycle");
classNames.push_back("bird");
classNames.push_back("boat");
classNames.push_back("bottle");
classNames.push_back("bus");
classNames.push_back("car");
classNames.push_back("cat");
classNames.push_back("chair");
classNames.push_back("cow");
classNames.push_back("diningtable");
classNames.push_back("dog");
classNames.push_back("horse");
classNames.push_back("motorbike");
classNames.push_back("person");
classNames.push_back("pottedplant");
classNames.push_back("sheep");
classNames.push_back("sofa");
classNames.push_back("train");
classNames.push_back("tvmonitor");
return classNames;
}
// main function
int main(int argc, char **argv)
{
// set inputs
String modelTxt = "C:/Users/acer/Desktop/kurz_OCV/cv4faces/project/python/object-detection-deep-learning/MobileNetSSD_deploy.prototxt";
String modelBin = "C:/Users/acer/Desktop/kurz_OCV/cv4faces/project/python/object-detection-deep-learning/MobileNetSSD_deploy.caffemodel";
String imageFile = "C:/Users/acer/Desktop/kurz_OCV/cv4faces/project/puppies.jpg";
std::vector<String> classNames = createClaseNames();
//read caffe model
Net net;
try {
net = dnn::readNetFromCaffe(modelTxt, modelBin);
}
catch (cv::Exception& e) {
std::cerr << "Exception: " << e.what() << std::endl;
if (net.empty())
{
std::cerr << "Can't load network." << std::endl;
exit(-1);
}
}
// read image
Mat img = imread(imageFile);
// create input blob
resize(img, img, Size(300, 300));
Mat inputBlob = blobFromImage(img, 0.007843, Size(300, 300), Scalar(127.5)); //Convert Mat to dnn::Blob image batch
// apply the blob on the input layer
net.setInput(inputBlob); //set the network input
// classify the image by applying the blob on the net
Mat detections = net.forward("detection_out"); //compute output
// print some information about detections
std::cout << "dims: " << detections.dims << endl;
std::cout << "size: " << detections.size << endl;
//show image
String winName("image");
imshow(winName, img);
// Wait for keypress
waitKey();
}
Check out the official OpenCV tutorial on how to scan images.
The normal way you'd access a 3-channel (i.e. color) Mat way would be using the Mat::at() method of the Mat class, which is heavily overloaded for all sorts of accessor options. Specifically, you can send in an array of indices or a vector of indices.
Here's a most basic example creating a 4D Mat and accessing a specific element:
#include <opencv2/opencv.hpp>
#include <iostream>
int main() {
int size[4] = { 2, 2, 5, 7 };
cv::Mat M(4, size, CV_32FC1, cv::Scalar(1));
int indx[4] = { 0, 0, 2, 3 };
std::cout << "M[0, 0, 2, 3] = " << M.at<float>(indx) << std::endl;
}
M[0, 0, 2, 3] = 1
Someone might find this question in the context of using the MobileNet SSD + deep neural network ( dnn ) module in OpenCV for object detection. So here I post the already functional code of object detection. Alexander Reynolds thank you for your help.
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <fstream>
#include <iostream>
using namespace cv;
using namespace cv::dnn;
using namespace std;
// function to create vector of class names
std::vector<String> createClaseNames() {
std::vector<String> classNames;
classNames.push_back("background");
classNames.push_back("aeroplane");
classNames.push_back("bicycle");
classNames.push_back("bird");
classNames.push_back("boat");
classNames.push_back("bottle");
classNames.push_back("bus");
classNames.push_back("car");
classNames.push_back("cat");
classNames.push_back("chair");
classNames.push_back("cow");
classNames.push_back("diningtable");
classNames.push_back("dog");
classNames.push_back("horse");
classNames.push_back("motorbike");
classNames.push_back("person");
classNames.push_back("pottedplant");
classNames.push_back("sheep");
classNames.push_back("sofa");
classNames.push_back("train");
classNames.push_back("tvmonitor");
return classNames;
}
// main function
int main(int argc, char **argv)
{
// set inputs
String modelTxt = "Path to MobileNetSSD_deploy.prototxt";
String modelBin = "Path to MobileNetSSD_deploy.caffemodel";
String imageFile = "Path to test image";
std::vector<String> classNames = createClaseNames();
//read caffe model
Net net;
try {
net = dnn::readNetFromCaffe(modelTxt, modelBin);
}
catch (cv::Exception& e) {
std::cerr << "Exception: " << e.what() << std::endl;
if (net.empty())
{
std::cerr << "Can't load network." << std::endl;
exit(-1);
}
}
// read image
Mat img = imread(imageFile);
Size imgSize = img.size();
// create input blob
Mat img300;
resize(img, img300, Size(300, 300));
Mat inputBlob = blobFromImage(img300, 0.007843, Size(300, 300), Scalar(127.5)); //Convert Mat to dnn::Blob image batch
// apply the blob on the input layer
net.setInput(inputBlob); //set the network input
// classify the image by applying the blob on the net
Mat detections = net.forward("detection_out"); //compute output
// look what the detector found
for (int i=0; i < detections.size[2]; i++) {
// print information into console
cout << "-----------------" << endl;
cout << "Object nr. " << i + 1 << endl;
// detected class
int indxCls[4] = { 0, 0, i, 1 };
int cls = detections.at<float>(indxCls);
std::cout << "class: " << classNames[cls] << endl;
// confidence
int indxCnf[4] = { 0, 0, i, 2 };
float cnf = detections.at<float>(indxCnf);
std::cout << "confidence: " << cnf * 100 << "%" << endl;
// bounding box
int indxBx[4] = { 0, 0, i, 3 };
int indxBy[4] = { 0, 0, i, 4 };
int indxBw[4] = { 0, 0, i, 5 };
int indxBh[4] = { 0, 0, i, 6 };
int Bx = detections.at<float>(indxBx) * imgSize.width;
int By = detections.at<float>(indxBy) * imgSize.height;
int Bw = detections.at<float>(indxBw) * imgSize.width - Bx;
int Bh = detections.at<float>(indxBh) * imgSize.height - By;
std::cout << "bounding box [x, y, w, h]: " << Bx << ", " << By << ", " << Bw << ", " << Bh << endl;
// draw bounding box to image
Rect bbox(Bx, By, Bw, Bh);
rectangle(img, bbox, Scalar(255,0,255),1,8,0);
}
//show image
String winName("image");
imshow(winName, img);
// Wait for keypress
waitKey();
}

error /usr/include/opencv2/nonfree/features2d.hpp error vector in opencv on ubuntu

This code is surf algorithm of Opencv. My code is not working. I am using Ubuntu 14.04.2 with Opencv 3.0.0 and gcc/g++ 4.8.4. I want to complie on "surf algorithm"
ERROR content
/usr/include/opencv2/nonfree/features2d.hpp:73:21: error:
‘vector’ has not been declared
vector& keypoints) const;
^
/usr/include/opencv2/nonfree/features2d.hpp:73:27: error:
expected ‘,’ or ‘...’ before ‘<’ token
vector& keypoints) const;
^
/usr/include/opencv2/nonfree/features2d.hpp:77:21: error:
‘vector’ has not been declared
vector& keypoints,
^
/usr/include/opencv2/nonfree/features2d.hpp:77:27: error:
expected ‘,’ or ‘...’ before ‘<’ token
vector& keypoints,
'test_surf.cpp'
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main( int argc, char** argv )
{
if( argc != 3)
{
cout <<" Usage: sift input_image output_image" << endl;
return -1;
}
//cv::initModule_nonfree();
//cout <<"initModule_nonfree() called" << endl;
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_COLOR);
if(! image.data )
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
vector<KeyPoint> keypoints;
Mat descriptors;
// Create a SIFT keypoint detector.
SiftFeatureDetector detector;
detector.detect(image, keypoints);
cout << "Detected " << (int) keypoints.size() << " keypoints" <<endl;
// Compute feature description.
detector.compute(image,keypoints, descriptors);
cout << "Computed feature."<<endl;
// Store description to "descriptors.des".
FileStorage fs;
fs.open("descriptors.des", FileStorage::WRITE);
cout << "Opened file to store the features."<<endl;
fs << "descriptors" << descriptors;
cout << "Finished writing file."<<endl;
fs.release();
cout << "Released file."<<endl;
// Show keypoints in the output image.
Mat outputImg;
Scalar keypointColor = Scalar(255, 0, 0);
drawKeypoints(image, keypoints, outputImg, keypointColor, DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
cout << "Drew keypoints in output image file."<<endl;
//
#ifdef WIN32
namedWindow("Output image", CV_WINDOW_AUTOSIZE );
imshow("Output image", outputImg);
waitKey(0);
#endif
cout << "Generate the output image."<<endl;
imwrite(argv[2], outputImg);
cout << "Done."<<endl;
return 0;
}
Include header file vector
#include <vector>
refer http://www.cplusplus.com/reference/vector/vector/vector/

how can i detect webcam and compare the local file to match face OpenCV

The highlighted code demonstrate openCV framework is loaded in my C code and it render Police watching. Which is just to demonstrate it works very smooth and very clean code to write.
Target: My webCAM is connected in to the USB port. I would like to capture the live webcam image and match from a local file (/tmp/myface.png), if live webcam match with local file myface.png, it will show the text "Police watching"
How can i now, capture my webCAM on this following code? 2) When the webCAM is captured, how can i load the file and find if it match, on match it shows that text only.
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <fstream>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
#include "opencv/cv.h"
void detectAndDisplay(Mat frame);
//*************
// Set Region of Interest
cv::Rect roi_b;
cv::Rect roi_c;
size_t ic = 0; // ic is index of current element
int ac = 0; // ac is area of current element
size_t ib = 0; // ib is index of biggest element
int ab = 0; // ab is area of biggest element
stringstream ssfn;
//*************
CascadeClassifier face_cascade;
string window_name = "Capture - Face detection";
int filenumber; // Number of file to be saved
string filename;
Mat frameread = imread("test.jpg");
int main(int argc, const char *argv[]){
if (argc != 4) {
cout << "usage: " << argv[0] << " </path/to/haar_cascade> </path/to/csv.ext> </path/to/device id>" << endl;
cout << "\t </path/to/haar_cascade> -- Path to the Haar Cascade for face detection." << endl;
cout << "\t </path/to/csv.ext> -- Path to the CSV file with the face database." << endl;
cout << "\t <device id> -- The webcam device id to grab frames from." << endl;
// exit(1);
}
CascadeClassifier face_cascade;
CascadeClassifier face_cascade1;
String fn="C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml";
String fn1="C:\\opencv\\sources\\data\\haarcascades\\haarcascade_eye.xml";
face_cascade.load(fn);
face_cascade1.load(fn1);
VideoCapture input(0);
if(!input.isOpened()){return -1;}
namedWindow("Mezo",1);
Mat f2;
Mat frame;
std::vector<Rect> faces,faces1;
CvCapture* capture1;
IplImage* f1;
Mat crop;
cv::Rect r;
// detectAndDisplay(frameread);
while(1)
{
ic=0;
ib=0;
ab=0;
ac=0;
input >> frame;
waitKey(10);
//cvtColor(frame, frame, CV_BGR2GRAY);
//cv::equalizeHist(frame,frame);
face_cascade.detectMultiScale(frame, faces, 1.1, 10, CV_HAAR_SCALE_IMAGE | CV_HAAR_DO_CANNY_PRUNING, cvSize(0,0), cvSize(300,300));
for(int i=0; i < faces.size();i++)
{
Point pt1(faces[i].x+faces[i].width, faces[i].y+faces[i].height);
Point pt2(faces[i].x,faces[i].y);
Mat faceROI = frame(faces[i]);
face_cascade1.detectMultiScale(faceROI, faces1, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30,30));
for(size_t j=0; j< faces1.size(); j++)
{
Point center(faces[i].x+faces1[j].x+faces1[j].width*0.5, faces[i].y+faces1[j].y+faces1[j].height*0.5);
int radius = cvRound((faces1[j].width+faces1[j].height)*0.25);
circle(frame, center, radius, Scalar(255,0,0), 2, 8, 0);
}
rectangle(frame, pt1, pt2, cvScalar(0,255,0), 2, 8, 0);
}
imshow("Result", frame);
waitKey(3);
char c = waitKey(3);
if(c == 27)
break;
}
return 0;
}
What you are asking about is probably the Face recognition. You should be more clear in your question.
Opencv has a class for doing recognition perfectly, not as you think to do.
Many approaches are available for this technology, Opencv has three algorithms. As well you need to prepare your database of images (labelled faces)
All this steps are described in opencv docs with some examples : http://docs.opencv.org/modules/contrib/doc/facerec/facerec_tutorial.html
Just you need to read and apply.
Here you can also find a good tutorial for beginners.

Glibcxx error compiling FREAK OpenCV demo file

i have tried out this file to compile with
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/legacy/legacy.hpp>
using namespace cv;
static void help( char** argv )
{
std::cout << "\nUsage: " << argv[0] << " [path/to/image1] [path/to/image2] \n"
<< "This is an example on how to use the keypoint descriptor presented in the following paper: \n"
<< "A. Alahi, R. Ortiz, and P. Vandergheynst. FREAK: Fast Retina Keypoint. \n"
<< "In IEEE Conference on Computer Vision and Pattern Recognition, 2012. CVPR 2012 Open Source Award winner \n"
<< std::endl;
}
int main( int argc, char** argv ) {
// check http://docs.opencv.org/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html
// for OpenCV general detection/matching framework details
if( argc != 3 ) {
help(argv);
return -1;
}
// Load images
Mat imgA = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE );
if( !imgA.data ) {
std::cout<< " --(!) Error reading image " << argv[1] << std::endl;
return -1;
}
Mat imgB = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !imgB.data ) {
std::cout << " --(!) Error reading image " << argv[2] << std::endl;
return -1;
}
std::vector<KeyPoint> keypointsA, keypointsB;
Mat descriptorsA, descriptorsB;
std::vector<DMatch> matches;
// DETECTION
// Any openCV detector such as
SurfFeatureDetector detector(2000,4);
// DESCRIPTOR
// Our proposed FREAK descriptor
// (roation invariance, scale invariance, pattern radius corresponding to SMALLEST_KP_SIZE,
// number of octaves, optional vector containing the selected pairs)
// FREAK extractor(true, true, 22, 4, std::vector<int>());
FREAK extractor;
// MATCHER
// The standard Hamming distance can be used such as
// BruteForceMatcher<Hamming> matcher;
// or the proposed cascade of hamming distance using SSSE3
BruteForceMatcher<Hamming> matcher;
// detect
double t = (double)getTickCount();
detector.detect( imgA, keypointsA );
detector.detect( imgB, keypointsB );
t = ((double)getTickCount() - t)/getTickFrequency();
std::cout << "detection time [s]: " << t/1.0 << std::endl;
// extract
t = (double)getTickCount();
extractor.compute( imgA, keypointsA, descriptorsA );
extractor.compute( imgB, keypointsB, descriptorsB );
t = ((double)getTickCount() - t)/getTickFrequency();
std::cout << "extraction time [s]: " << t << std::endl;
// match
t = (double)getTickCount();
matcher.match(descriptorsA, descriptorsB, matches);
t = ((double)getTickCount() - t)/getTickFrequency();
std::cout << "matching time [s]: " << t << std::endl;
// Draw matches
Mat imgMatch;
drawMatches(imgA, keypointsA, imgB, keypointsB, matches, imgMatch);
namedWindow("matches", CV_WINDOW_KEEPRATIO);
imshow("matches", imgMatch);
waitKey(0);
}
with this command
gcc freak_demo.cpp `pkg-config opencv --cflags --libs`
and received this error message
/usr/lib64/gcc/x86_64-suse-linux/4.8/../../../../x86_64-suse-linux/bin/ld: /tmp/ccwSmrle.o: undefined reference to symbol '_ZNSsD1Ev##GLIBCXX_3.4'
/usr/lib64/gcc/x86_64-suse-linux/4.8/../../../../x86_64-suse-linux/bin/ld: note: '_ZNSsD1Ev##GLIBCXX_3.4' is defined in DSO /usr/lib64/libstdc++.so.6 so try adding it to the linker command line
/usr/lib64/libstdc++.so.6: could not read symbols: Invalid operation
collect2: error: ld returned 1 exit status
I don't know what GLIBCXX is. Which package (OpenSuse 13.1, gcc 4.8) do I have to install? I don't know how to interpret the error message, any help is appreciated.
As Daniel Frey said in his comment, use g++ instead of gcc. This solved the exact same problem for me, but for the compilation of a totally different program.