Compiler error; Is opencv2/contrib installed? face recognition - c++

/usr/include/opencv2/contrib/contrib.hpp:78:9: error: ‘CvHistogram’ does not name a type
CvHistogram *fHistogram;
^
/usr/include/opencv2/contrib/contrib.hpp:89:5: error: ‘IplImage’ does not name a type
IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame;
^
/usr/include/opencv2/contrib/contrib.hpp:90:5: error: ‘IplImage’ does not name a type
IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame;
^
/usr/include/opencv2/contrib/contrib.hpp:93:19: error: ‘IplImage’ has not been declared
void initData(IplImage *src, int widthDivider, int heightDivider);
^
/usr/include/opencv2/contrib/contrib.hpp:108:26: error: ‘IplImage’ has not been declared
virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask);
^
CODE:
/*
Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
Released to public domain under terms of the BSD Simplified license.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
Neither the name of the organization nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
See http://www.opensource.org/licenses/bsd-license
*/
#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <fstream>
#include <sstream>
using namespace cv;
using namespace std;
static Mat norm_0_255(InputArray _src) {
Mat src = _src.getMat();
// Create and return normalized image:
Mat dst;
switch(src.channels()) {
case 1:
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
break;
case 3:
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
break;
default:
src.copyTo(dst);
break;
}
return dst;
}
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
std::ifstream file(filename.c_str(), ifstream::in);
if (!file) {
string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
}
string line, path, classlabel;
while (getline(file, line)) {
stringstream liness(line);
getline(liness, path, separator);
getline(liness, classlabel);
if(!path.empty() && !classlabel.empty()) {
images.push_back(imread(path, 0));
labels.push_back(atoi(classlabel.c_str()));
}
}
}
int main(int argc, const char *argv[]) {
// Check for valid command line arguments, print usage
// if no arguments were given.
if (argc < 2) {
cout << "usage: " << argv[0] << " <csv.ext> <output_folder> " << endl;
exit(1);
}
string output_folder = ".";
if (argc == 3) {
output_folder = string(argv[2]);
}
// Get the path to your CSV.
string fn_csv = string(argv[1]);
// These vectors hold the images and corresponding labels.
vector<Mat> images;
vector<int> labels;
// Read in the data. This can fail if no valid
// input filename is given.
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);
}
// Quit if there are not enough images for this demo.
if(images.size() <= 1) {
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
CV_Error(CV_StsError, error_message);
}
// Get the height from the first image. We'll need this
// later in code to reshape the images to their original
// size:
int height = images[0].rows;
// The following lines simply get the last images from
// your dataset and remove it from the vector. This is
// done, so that the training data (which we learn the
// cv::FaceRecognizer on) and the test data we test
// the model with, do not overlap.
Mat testSample = images[images.size() - 1];
int testLabel = labels[labels.size() - 1];
images.pop_back();
labels.pop_back();
// The following lines create an Fisherfaces model for
// face recognition and train it with the images and
// labels read from the given CSV file.
// If you just want to keep 10 Fisherfaces, then call
// the factory method like this:
//
// cv::createFisherFaceRecognizer(10);
//
// However it is not useful to discard Fisherfaces! Please
// always try to use _all_ available Fisherfaces for
// classification.
//
// If you want to create a FaceRecognizer with a
// confidence threshold (e.g. 123.0) and use _all_
// Fisherfaces, then call it with:
//
// cv::createFisherFaceRecognizer(0, 123.0);
//
Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
model->train(images, labels);
// The following line predicts the label of a given
// test image:
int predictedLabel = model->predict(testSample);
//
// To get the confidence of a prediction call the model with:
//
// int predictedLabel = -1;
// double confidence = 0.0;
// model->predict(testSample, predictedLabel, confidence);
//
string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
cout << result_message << endl;
// Here is how to get the eigenvalues of this Eigenfaces model:
Mat eigenvalues = model->getMat("eigenvalues");
// And we can do the same to display the Eigenvectors (read Eigenfaces):
Mat W = model->getMat("eigenvectors");
// Get the sample mean from the training data
Mat mean = model->getMat("mean");
// Display or save:
if(argc == 2) {
imshow("mean", norm_0_255(mean.reshape(1, images[0].rows)));
} else {
imwrite(format("%s/mean.png", output_folder.c_str()), norm_0_255(mean.reshape(1, images[0].rows)));
}
// Display or save the first, at most 16 Fisherfaces:
for (int i = 0; i < min(16, W.cols); i++) {
string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at<double>(i));
cout << msg << endl;
// get eigenvector #i
Mat ev = W.col(i).clone();
// Reshape to original size & normalize to [0...255] for imshow.
Mat grayscale = norm_0_255(ev.reshape(1, height));
// Show the image & apply a Bone colormap for better sensing.
Mat cgrayscale;
applyColorMap(grayscale, cgrayscale, COLORMAP_BONE);
// Display or save:
if(argc == 2) {
imshow(format("fisherface_%d", i), cgrayscale);
} else {
imwrite(format("%s/fisherface_%d.png", output_folder.c_str(), i), norm_0_255(cgrayscale));
}
}
// Display or save the image reconstruction at some predefined steps:
for(int num_component = 0; num_component < min(16, W.cols); num_component++) {
// Slice the Fisherface from the model:
Mat ev = W.col(num_component);
Mat projection = subspaceProject(ev, mean, images[0].reshape(1,1));
Mat reconstruction = subspaceReconstruct(ev, mean, projection);
// Normalize the result:
reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows));
// Display or save:
if(argc == 2) {
imshow(format("fisherface_reconstruction_%d", num_component), reconstruction);
} else {
imwrite(format("%s/fisherface_reconstruction_%d.png", output_folder.c_str(), num_component), reconstruction);
}
}
// Display if we are not writing to an output folder:
if(argc == 2) {
waitKey(0);
}
return 0;
}

Related

Tensorflow C++ not using GPU

WINDOWS 10, amd64
Built tensorflow GPU enabled C++ static libraries with CMAKE GUI + MSBUILD
Built successful.
LABEL_IMAGE tutorial example execution times :
... Main.cc execution : 9.17 secs
... Label_image.py execution (tensorflow) : 10.34 secs
... Label_image.py execution (tensorflow-gpu) : 1.62 secs
Any idea why ? Thanks a lot
Main.cc with minor customizations :
#define NOMINMAX
#include <fstream>
#include <utility>
#include <vector>
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"
// These are all common classes it's handy to reference with no namespace.
using tensorflow::Flag;
using tensorflow::Tensor;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::int32;
static Status ReadEntireFile(tensorflow::Env* env, const string& filename, Tensor* output) {
tensorflow::uint64 file_size = 0;
TF_RETURN_IF_ERROR(env->GetFileSize(filename, &file_size));
string contents;
contents.resize(file_size);
std::unique_ptr<tensorflow::RandomAccessFile> file;
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(filename, &file));
tensorflow::StringPiece data;
TF_RETURN_IF_ERROR(file->Read(0, file_size, &data, &(contents)[0]));
if (data.size() != file_size) {
return tensorflow::errors::DataLoss("Truncated read of '", filename, "' expected ", file_size, " got ", data.size());
}
output->scalar<string>()() = data.ToString();
return Status::OK();
}
// Given an image file name, read in the data, try to decode it as an image,
// resize it to the requested size, and then scale the values as desired.
Status ReadTensorFromImageFile(const string file_name, const int input_height, const int input_width, const float input_mean, const float input_std, std::vector<Tensor>* out_tensors) {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
string input_name = "file_reader";
string output_name = "dim";
// read file_name into a tensor named input
Tensor input(tensorflow::DT_STRING, tensorflow::TensorShape());
TF_RETURN_IF_ERROR(ReadEntireFile(tensorflow::Env::Default(), file_name, &input));
// use a placeholder to read input data
auto file_reader = Placeholder(root.WithOpName("input"), tensorflow::DataType::DT_STRING);
std::vector<std::pair<string, tensorflow::Tensor>> inputs = { { "input", input }, };
// Now try to figure out what kind of file it is and decode it.
const int wanted_channels = 3;
tensorflow::Output image_reader;
if (tensorflow::StringPiece(file_name).ends_with(".png")) {
image_reader = DecodePng(root.WithOpName("png_reader"), file_reader, DecodePng::Channels(wanted_channels));
}
else if (tensorflow::StringPiece(file_name).ends_with(".gif")) {
// gif decoder returns 4-D tensor, remove the first dim
image_reader = Squeeze(root.WithOpName("squeeze_first_dim"), DecodeGif(root.WithOpName("gif_reader"), file_reader));
}
else if (tensorflow::StringPiece(file_name).ends_with(".bmp")) {
image_reader = DecodeBmp(root.WithOpName("bmp_reader"), file_reader);
}
else {
// Assume if it's neither a PNG nor a GIF then it must be a JPEG.
image_reader = DecodeJpeg(root.WithOpName("jpeg_reader"), file_reader, DecodeJpeg::Channels(wanted_channels));
}
// Now cast the image data to float so we can do normal math on it.
auto uint8_caster = Cast(root.WithOpName("uint8_caster"), image_reader, tensorflow::DT_UINT8);
// The convention for image ops in TensorFlow is that all images are expected
// to be in batches, so that they're four-dimensional arrays with indices of
// [batch, height, width, channel]. Because we only have a single image, we
// have to add a batch dimension of 1 to the start with ExpandDims().
auto dims_expander = ExpandDims(root.WithOpName(output_name), uint8_caster, 0);
// Bilinearly resize the image to fit the required dimensions.
//auto resized = ResizeBilinear(root, dims_expander,Const(root.WithOpName("size"), { input_height, input_width }));
// Subtract the mean and divide by the scale.
//Div(root.WithOpName(output_name), Sub(root, resized, { input_mean }),{ input_std });
// This runs the GraphDef network definition that we've just constructed, and
// returns the results in the output tensor.
tensorflow::GraphDef graph;
TF_RETURN_IF_ERROR(root.ToGraphDef(&graph));
tensorflow::SessionOptions options;
std::unique_ptr<tensorflow::Session> session(tensorflow::NewSession(options));
TF_RETURN_IF_ERROR(session->Create(graph));
TF_RETURN_IF_ERROR(session->Run({ inputs }, { output_name }, {}, out_tensors));
return Status::OK();
}
// Reads a model graph definition from disk, and creates a session object you
// can use to run it.
Status LoadGraph(const string& graph_file_name, std::unique_ptr<tensorflow::Session>* session) {
tensorflow::GraphDef graph_def;
Status load_graph_status = ReadBinaryProto(tensorflow::Env::Default(), graph_file_name, &graph_def);
if (!load_graph_status.ok()) {return tensorflow::errors::NotFound("Failed to load compute graph at '",graph_file_name, "'");}
tensorflow::SessionOptions options;
session->reset(tensorflow::NewSession(options));
Status session_create_status = (*session)->Create(graph_def);
if (!session_create_status.ok()) {return session_create_status; }
return Status::OK();
}
int main(int argc, char* argv[]) {
// These are the command-line flags the program can understand.
// They define where the graph and input data is located, and what kind of
// input the model expects. If you train your own model, or use something
// other than inception_v3, then you'll need to update these.
string image = "tensorflow/examples/label_image/data/grace_hopper.jpg";
string graph = "tensorflow/examples/label_image/data/faster_rcnn_resnet101_coco_11_06_2017/frozen_inference_graph.pb";
string labels = "/tensorflow/tensorflow/examples/label_image/data/faster_rcnn_resnet101_coco_11_06_2017/graph.pbtxt";
int32 input_width = 299;
int32 input_height = 299;
float input_mean = 0;
float input_std = 255;
string input_layer = "image_tensor:0";
std::vector<string> output_layer = { "detection_boxes:0", "detection_scores:0", "detection_classes:0", "num_detections:0" };
string o_layer = "detection_boxes:0, detection_scores : 0, detection_classes : 0, num_detections : 0"; //dummy for Flag structure
bool self_test = false;
string root_dir = "/tensorflow/";
std::vector<Flag> flag_list = {
Flag("image", &image, "image to be processed"),
Flag("graph", &graph, "graph to be executed"),
Flag("labels", &labels, "name of file containing labels"),
Flag("input_width", &input_width, "resize image to this width in pixels"),
Flag("input_height", &input_height,
"resize image to this height in pixels"),
Flag("input_mean", &input_mean, "scale pixel values to this mean"),
Flag("input_std", &input_std, "scale pixel values to this std deviation"),
Flag("input_layer", &input_layer, "name of input layer"),
Flag("output_layer", &o_layer, "name of output layer"),
Flag("self_test", &self_test, "run a self test"),
Flag("root_dir", &root_dir,
"interpret image and graph file names relative to this directory"),
};
string usage = tensorflow::Flags::Usage(argv[0], flag_list);
const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
LOG(ERROR) << usage;
return -1;
}
// We need to call this to set up global state for TensorFlow.
tensorflow::port::InitMain(argv[0], &argc, &argv);
if (argc > 1) {
LOG(ERROR) << "Unknown argument " << argv[1] << "\n" << usage;
return -1;
}
// First we load and initialize the model.
std::unique_ptr<tensorflow::Session> session;
string graph_path = tensorflow::io::JoinPath(root_dir, graph);
Status load_graph_status = LoadGraph(graph_path, &session);
if (!load_graph_status.ok()) {
LOG(ERROR) << load_graph_status;
return -1;
}
// Get the image from disk as a float array of numbers, resized and normalized
// to the specifications the main graph expects.
std::vector<Tensor> resized_tensors;
string image_path = tensorflow::io::JoinPath(root_dir, image);
//-------------------------------------
LOG(ERROR) << "Detection Basla....";
Status read_tensor_status = ReadTensorFromImageFile(image_path, input_height, input_width, input_mean, input_std, &resized_tensors);
if (!read_tensor_status.ok()) {
LOG(ERROR) << read_tensor_status;
return -1;
}
const Tensor resized_tensor = resized_tensors[0];
// Actually run the image through the model.
std::vector<Tensor> outputs;
Status run_status = session->Run({ { input_layer, resized_tensor } }, { output_layer }, {}, &outputs);
LOG(ERROR) << "Detection Bit......";
//-----------------------------------------
if (!run_status.ok()) {
LOG(ERROR) << "Running model failed: " << run_status;
return -1;
}
tensorflow::TTypes<float>::Flat scores = outputs[1].flat<float>();
tensorflow::TTypes<float>::Flat classes = outputs[2].flat<float>();
tensorflow::TTypes<float>::Flat num_detections = outputs[3].flat<float>();
auto boxes = outputs[0].flat_outer_dims<float, 3>();
LOG(ERROR) << "num_detections:" << num_detections(0) << "," << outputs[0].shape().DebugString();
for (size_t i = 0; i < num_detections(0) && i < 20; ++i)
{
if (scores(i) > 0.5)
{
LOG(ERROR) << i << ",score:" << scores(i) << ",class:" << classes(i) << ",box:" << "," << boxes(0, i, 0) << "," << boxes(0, i, 1) << "," << boxes(0, i, 2) << "," << boxes(0, i, 3);
}
}
return 0;
}
After successful built I ran the code and got a "_pywrap_tensorflow_internal.pyd not found" message.
I searched PC and found one in phython/tensorflow path.
I copied that one to execution path and everything was ok except gpu usage
Suddenly something whispered me ;
"Hey you immortal !! you should get recently generated
pywrap_tensorflow_internal.dll and rename it _pywrap_tensorflow_internal.pyd
and copy it to execution path.
GPU is being used

Capturing video with monochrome camera in aruco opencv

I'm currently trying to use a monochrome camera with the aruco and opencv libraries in order to accelerate the computation and get better marker capturing. The problem i am having is that the monochrome feed is being tripled on screen when running the aruco_test program and so the resolution in diminished by two thirds and the markers are being detected three times each instead of one.
I saw feeds which talk about similar problems with monochrome cameras in opencv. Some answers suggested cropping the image (which fixes the tripling problem but not the smaller resolution) but it all seems to be caused by the conversion from either BGR2GRAY or GRAY2BGR.
Any help on what exactly is causing the images being tripled and how to bypass that part either in the aruco source code or opencv source code would be appreciated.
INFO :
Driver Info (not using libv4l2):
Driver name : uvcvideo
Card type : oCam-1MGN-U
Bus info : usb-0000:00:1d.0-1.5
Driver version: 3.13.11
Capabilities : 0x84000001
Video Capture
Streaming
Device Capabilities
Device Caps : 0x04000001
Video Capture
Streaming
Priority: 2
Video input : 0 (Camera 1: ok)
Format Video Capture:
Width/Height : 1280/960
Pixel Format : 'GREY'
Field : None
Bytes per Line: 1280
Size Image : 1228800
Colorspace : Unknown (00000000)
Crop Capability Video Capture:
Bounds : Left 0, Top 0, Width 1280, Height 960
Default : Left 0, Top 0, Width 1280, Height 960
Pixel Aspect: 1/1
Streaming Parameters Video Capture:
Capabilities : timeperframe
Frames per second: 30.000 (30/1)
Read buffers : 0
brightness (int) : min=0 max=127 step=1 default=64 value=64
exposure_absolute (int) : min=1 max=625 step=1 default=39 value=39
Using Aruco 2.0.19 and OpenCV 3.2
Pixel Format not being YUYV i cannot simply take the Y channel from the camera feed.
code executed :
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include "aruco.h"
#include "cvdrawingutils.h"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace aruco;
MarkerDetector MDetector;
VideoCapture TheVideoCapturer;
vector< Marker > TheMarkers;
Mat TheInputImage, TheInputImageCopy;
CameraParameters TheCameraParameters;
void cvTackBarEvents(int pos, void *);
pair< double, double > AvrgTime(0, 0); // determines the average time required for detection
int iThresParam1, iThresParam2;
int waitTime = 0;
class CmdLineParser{int argc; char **argv; public: CmdLineParser(int _argc,char **_argv):argc(_argc),argv(_argv){} bool operator[] ( string param ) {int idx=-1; for ( int i=0; i<argc && idx==-1; i++ ) if ( string ( argv[i] ) ==param ) idx=i; return ( idx!=-1 ) ; } string operator()(string param,string defvalue="-1"){int idx=-1; for ( int i=0; i<argc && idx==-1; i++ ) if ( string ( argv[i] ) ==param ) idx=i; if ( idx==-1 ) return defvalue; else return ( argv[ idx+1] ); }};
cv::Mat resize(const cv::Mat &in,int width){
if (in.size().width<=width) return in;
float yf=float( width)/float(in.size().width);
cv::Mat im2;
cv::resize(in,im2,cv::Size(width,float(in.size().height)*yf));
return im2;
}
int main(int argc, char **argv) {
try {
CmdLineParser cml(argc,argv);
if (argc < 2 || cml["-h"]) {
cerr << "Invalid number of arguments" << endl;
cerr << "Usage: (in.avi|live[:idx_cam=0]) [-c camera_params.yml] [-s marker_size_in_meters] [-d dictionary:ARUCO by default] [-h]" << endl;
cerr<<"\tDictionaries: "; for(auto dict:aruco::Dictionary::getDicTypes()) cerr<<dict<<" ";cerr<<endl;
cerr<<"\t Instead of these, you can directly indicate the path to a file with your own generated dictionary"<<endl;
return false;
}
/////////// PARSE ARGUMENTS
string TheInputVideo = argv[1];
// read camera parameters if passed
if (cml["-c"] ) TheCameraParameters.readFromXMLFile(cml("-c"));
float TheMarkerSize = std::stof(cml("-s","-1"));
//aruco::Dictionary::DICT_TYPES TheDictionary= Dictionary::getTypeFromString( cml("-d","ARUCO") );
/////////// OPEN VIDEO
// read from camera or from file
if (TheInputVideo.find("live") != string::npos) {
int vIdx = 0;
// check if the :idx is here
char cad[100];
if (TheInputVideo.find(":") != string::npos) {
std::replace(TheInputVideo.begin(), TheInputVideo.end(), ':', ' ');
sscanf(TheInputVideo.c_str(), "%s %d", cad, &vIdx);
}
cout << "Opening camera index " << vIdx << endl;
TheVideoCapturer.open(vIdx);
waitTime = 10;
}
else TheVideoCapturer.open(TheInputVideo);
// check video is open
if (!TheVideoCapturer.isOpened()) throw std::runtime_error("Could not open video");
///// CONFIGURE DATA
// read first image to get the dimensions
TheVideoCapturer >> TheInputImage;
if (TheCameraParameters.isValid())
TheCameraParameters.resize(TheInputImage.size());
MDetector.setDictionary(cml("-d","ARUCO"));//sets the dictionary to be employed (ARUCO,APRILTAGS,ARTOOLKIT,etc)
MDetector.setThresholdParams(7, 7);
MDetector.setThresholdParamRange(2, 0);
// MDetector.setCornerRefinementMethod(aruco::MarkerDetector::SUBPIX);
//gui requirements : the trackbars to change this parameters
iThresParam1 = MDetector.getParams()._thresParam1;
iThresParam2 = MDetector.getParams()._thresParam2;
cv::namedWindow("in");
cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents);
cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
//go!
char key = 0;
int index = 0;
// capture until press ESC or until the end of the video
do {
TheVideoCapturer.retrieve(TheInputImage);
// copy image
double tick = (double)getTickCount(); // for checking the speed
// Detection of markers in the image passed
TheMarkers= MDetector.detect(TheInputImage, TheCameraParameters, TheMarkerSize);
// chekc the speed by calculating the mean speed of all iterations
AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency();
AvrgTime.second++;
cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second << " milliseconds nmarkers=" << TheMarkers.size() << std::endl;
// print marker info and draw the markers in image
TheInputImage.copyTo(TheInputImageCopy);
for (unsigned int i = 0; i < TheMarkers.size(); i++) {
cout << TheMarkers[i]<<endl;
TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
}
// draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid() && TheMarkerSize>0)
for (unsigned int i = 0; i < TheMarkers.size(); i++) {
CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
}
// DONE! Easy, right?
// show input with augmented information and the thresholded image
cv::imshow("in", resize(TheInputImageCopy,1280));
cv::imshow("thres", resize(MDetector.getThresholdedImage(),1280));
key = cv::waitKey(waitTime); // wait for key to be pressed
if(key=='s') waitTime= waitTime==0?10:0;
index++; // number of images captured
} while (key != 27 && (TheVideoCapturer.grab() ));
} catch (std::exception &ex)
{
cout << "Exception :" << ex.what() << endl;
}
}
void cvTackBarEvents(int pos, void *) {
(void)(pos);
if (iThresParam1 < 3) iThresParam1 = 3;
if (iThresParam1 % 2 != 1) iThresParam1++;
if (iThresParam1 < 1) iThresParam1 = 1;
MDetector.setThresholdParams(iThresParam1, iThresParam2);
// recompute
MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters);
TheInputImage.copyTo(TheInputImageCopy);
for (unsigned int i = 0; i < TheMarkers.size(); i++)
TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
// draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid())
for (unsigned int i = 0; i < TheMarkers.size(); i++)
CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
cv::imshow("in", resize(TheInputImageCopy,1280));
cv::imshow("thres", resize(MDetector.getThresholdedImage(),1280));
}

OpenCV Error: Assertion failed (samples.cols == var_count && samples.type() == CV_32F) in predict

I'm trying to use OpenCV SVM classifier in cocos2d-x game. Here's a simple test function:
void HelloWorld::testOpenCV(){
// Load SVM classifier
auto classifierPath = FileUtils::getInstance()->fullPathForFilename("classifier.yml");
cv::Ptr<cv::ml::SVM> svm = cv::ml::StatModel::load<cv::ml::SVM>(classifierPath);
string filename = "test.jpg";
auto img = new Image();
img->initWithImageFile(filename);
int imageSize = (int)img->getDataLen();
int imageXW = img->getWidth();
int imageYW = img->getHeight();
unsigned char * srcData = img->getData();
CCLOG("imageXW=%d, imageYW=%d", imageXW, imageYW);
int ch = imageSize/(imageXW*imageYW);
CCLOG("image=%dch raw data...", ch);
cv::Mat testMat = createCvMatFromRaw(srcData, imageXW, imageYW, ch);
testMat.convertTo(testMat, CV_32F);
// try to predict which number has been drawn
try{
int predicted = svm->predict(testMat);
CCLOG("Recognizing following number -> %d", predicted);
}catch(cv::Exception ex){
}
}
And it gives an output:
imageXW=28, imageYW=28
image=3ch raw data...
OpenCV Error: Assertion failed (samples.cols == var_count && samples.type() == CV_32F) in predict, file /Volumes/build-storage/build/master_iOS-mac/opencv/modules/ml/src/svm.cpp, line 1930
It is based on this tutorial:
https://www.simplicity.be/article/recognizing-handwritten-digits/
Especially on this method:
// Standard library
#include <iostream>
#include <vector>
#include <string>
// OpenCV
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
// POSIX
#include <unistd.h>
/**
* main
**/
int main( int argc, char** argv )
{
//
// Load SVM classifier
cv::Ptr<cv::ml::SVM> svm = cv::ml::StatModel::load<cv::ml::SVM>("classifier.yml");
// read image file (grayscale)
cv::Mat imgMat = cv::imread("test.jpg", 0);
// convert 2d to 1d
cv::Mat testMat = imgMat.clone().reshape(1,1);
testMat.convertTo(testMat, CV_32F);
// try to predict which number has been drawn
try{
int predicted = svm->predict(testMat);
std::cout << std::endl << "Recognizing following number -> " << predicted << std::endl << std::endl;
std::string notifyCmd = "notify-send -t 1000 Recognized: " + std::to_string(predicted);
system(notifyCmd.c_str());
}catch(cv::Exception ex){
}
}
I've ran it in terminal and it worked.
Here's an implementation of createCvMatFromRaw:
cv::Mat HelloWorld::createCvMatFromRaw(unsigned char *rawData, int rawXW, int rawYW, int ch)
{
cv::Mat cvMat( rawYW, rawXW, CV_8UC4); // 8 bits per component, 4 channels
for (int py=0; py<rawYW; py++) {
for (int px=0; px<rawXW; px++) {
int nBasePos = ((rawXW * py)+px) * ch;
cvMat.at<cv::Vec4b>(py, px) = cv::Vec4b(rawData[nBasePos + 0],
rawData[nBasePos + 1],
rawData[nBasePos + 2],
0xFF);
}
}
return cvMat;
}
I've found it here:
http://blog.szmake.net/archives/845
What does this assert mean? Can someone explain it to me? How can I fix this?
The assertion says
OpenCV Error: Assertion failed (samples.cols == var_count && samples.type() == CV_32F)
which means that the sample either doesn't have the right number of columns or doesn't have type CV_32F.
It looks like you forgot the reshape function, so your data violates the first condition. I think in order to apply svm, the data needs to be a vector, i.e. 1 x n matrix.

Read an image from a qrc using imread() of OpenCV

I want to read an image from a qrc using imread() of OpenCV in this way:
Mat img = imread(":/TempIcons/logo.png");
but the final img size is [0x0]. I have also tried:
Mat img = imread("qrc://TempIcons/logo.png");
but the size I get is the same. I don't want to load the image in a QImage to then transform it in a cv::Mat. Is there a way to do this in a easy way?. If it is, how can I do it?.
Thank you
As #TheDarkKnight pointed out, imread is not aware of Qt resources. You can however write your own loader, that uses QFile to retrieve the binary data from the resource, and uses imdecode (as done internally by imread) to read the image:
Mat loadFromQrc(QString qrc, int flag = IMREAD_COLOR)
{
//double tic = double(getTickCount());
QFile file(qrc);
Mat m;
if(file.open(QIODevice::ReadOnly))
{
qint64 sz = file.size();
std::vector<uchar> buf(sz);
file.read((char*)buf.data(), sz);
m = imdecode(buf, flag);
}
//double toc = (double(getTickCount()) - tic) * 1000.0 / getTickFrequency();
//qDebug() << "OpenCV loading time: " << toc;
return m;
}
You can call it like:
Mat m = loadFromQrc("qrc_path");
or specifying a flag:
Mat m = loadFromQrc("qrc_path", IMREAD_GRAYSCALE);
Performance
I tried loading the image with loadFromQrc, and loading the QImage and converting to Mat using this code, both with and without cloning. loadFromQrc results to be 10 time faster then loading a QImage and convert it to Mat.
Results in ms:
Load Mat : 4.85965
QImage to Mat (no clone): 49.3999
QImage to Mat (clone) : 49.8497
Test code:
#include <vector>
#include <iostream>
#include <QDebug>
#include <QtWidgets>
#include <opencv2/opencv.hpp>
using namespace cv;
Mat loadFromQrc(QString qrc, int flag = IMREAD_COLOR)
{
QFile file(qrc);
Mat m;
if(file.open(QIODevice::ReadOnly))
{
qint64 sz = file.size();
std::vector<uchar> buf(sz);
file.read((char*)buf.data(), sz);
m = imdecode(buf, flag);
}
return m;
}
cv::Mat QImageToCvMat( const QImage &inImage, bool inCloneImageData = true )
{
switch ( inImage.format() )
{
// 8-bit, 4 channel
case QImage::Format_RGB32:
{
cv::Mat mat( inImage.height(), inImage.width(), CV_8UC4, const_cast<uchar*>(inImage.bits()), inImage.bytesPerLine() );
return (inCloneImageData ? mat.clone() : mat);
}
// 8-bit, 3 channel
case QImage::Format_RGB888:
{
if ( !inCloneImageData )
qWarning() << "ASM::QImageToCvMat() - Conversion requires cloning since we use a temporary QImage";
QImage swapped = inImage.rgbSwapped();
return cv::Mat( swapped.height(), swapped.width(), CV_8UC3, const_cast<uchar*>(swapped.bits()), swapped.bytesPerLine() ).clone();
}
// 8-bit, 1 channel
case QImage::Format_Indexed8:
{
cv::Mat mat( inImage.height(), inImage.width(), CV_8UC1, const_cast<uchar*>(inImage.bits()), inImage.bytesPerLine() );
return (inCloneImageData ? mat.clone() : mat);
}
default:
qWarning() << "ASM::QImageToCvMat() - QImage format not handled in switch:" << inImage.format();
break;
}
return cv::Mat();
}
int main(int argc, char *argv[])
{
QString url = "...";
{
double tic = double(getTickCount());
Mat m1 = loadFromQrc(url);
double toc = (double(getTickCount()) - tic) * 1000.0 / getTickFrequency();
qDebug() << "Load Mat: " << toc;
if(m1.data != NULL)
{
imshow("m1", m1);
waitKey(1);
}
}
// {
// double tic = double(getTickCount());
// QImage img;
// img.load(url);
// Mat m2 = QImageToCvMat(img, false);
// double toc = (double(getTickCount()) - tic) * 1000.0 / getTickFrequency();
// qDebug() << "QImage to Mat (no clone): " << toc;
// if(m2.data != NULL)
// {
// imshow("m2", m2);
// waitKey(1);
// }
// }
// {
// double tic = double(getTickCount());
// QImage img;
// img.load(url);
// Mat m3 = QImageToCvMat(img, true);
// double toc = (double(getTickCount()) - tic) * 1000.0 / getTickFrequency();
// qDebug() << "QImage to Mat (clone): " << toc;
// if(m3.data != NULL)
// {
// imshow("m3", m3);
// waitKey(1);
// }
// }
waitKey();
return 0;
}
The problem here is that imread() loads an image from a file.
In contrast, Qt's resource system compiles the data from images directly into the program's executable. Qt's QFile operations know that when they are provided a path starting with ":/", it refers to the embedded resources, rather than on disk.
Therefore, I don't think you will be able to use imread() to directly access a file that has been placed in Qt's resources.

opencv face recognition tutorial code not working with opencv 3.0

i am trying to use the code given in the tutorial but it is not working. i am sure all the dependencies are there and the program is compiling but it giving me some errors related that some functions can not be found.
here is the code:
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/face.hpp"
#include <iostream>
#include <fstream>
#include <sstream>
using namespace cv;
using namespace std;
static Mat norm_0_255(InputArray _src){
Mat src = _src.getMat();
Mat dst;
switch(src.channels()){
case 1:
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
break;
case 3:
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
break;
default:
src.copyTo(dst);
break;
}
return dst;
}
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';'){
std::ifstream file(filename.c_str(), ifstream::in);
if(!file){
string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
}
string line, path, classlabel;
while(getline(file, line)){
stringstream liness(line);
getline(liness, path, separator);
getline(liness, classlabel);
if(!path.empty() && !classlabel.empty()){
images.push_back(imread(path, 0));
labels.push_back(atoi(classlabel.c_str()));
}
}
}
int main(int argc, const char *argv[]){
if(argc < 2){
cout << "usage: " << argv[0] << " <csv.ext> <output_folder> " << endl;
exit(0);
}
string output_folder = ".";
if(argc == 3){
output_folder = string(argv[2]);
}
string fn_csv = string(argv[1]);
vector<Mat> images;
vector<int> labels;
try{
read_csv(fn_csv, images, labels);
}catch(cv::Exception& e){
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
exit(1);
}
if(images.size() <= 1){
string error_message = "This demo needs at least 2 images to work. please add more images to your data set!";
CV_Error(CV_StsError, error_message);
}
int height = images[0].rows;
Mat testSample = images[images.size() - 1];
int testLabel = labels[labels.size() - 1];
images.pop_back();
labels.pop_back();
Ptr<cv::face::FaceRecognizer> model = cv::face::createEigenFaceRecognizer();
model->train(images, labels);
int predictedLabel = model->predict(testSample);
string result_message = format("Predicted class = %d / Actual class = %d ", predictedLabel, testLabel);
cout << result_message << endl;
Mat eigenvalues = model->getMat("eigenvalues");
Mat W = model->getMat("eigenvectors");
Mat mean = model->getMat("mean");
if(argc == 2){
imshow("mean", norm_0_255(mean.reshape(1, images[0].rows)));
}else{
imwrite(format("%s/mean.png", output_folder.c_str()), norm_0_255(mean.reshape(1, images[0].rows)));
}
// Display or save the Eigenfaces:
for (int i = 0; i < min(10, W.cols); i++) {
string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at<double>(i));
cout << msg << endl;
// get eigenvector #i
Mat ev = W.col(i).clone();
// Reshape to original size & normalize to [0...255] for imshow.
Mat grayscale = norm_0_255(ev.reshape(1, height));
// Show the image & apply a Jet colormap for better sensing.
Mat cgrayscale;
applyColorMap(grayscale, cgrayscale, COLORMAP_JET);
// Display or save:
if(argc == 2) {
imshow(format("eigenface_%d", i), cgrayscale);
} else {
imwrite(format("%s/eigenface_%d.png", output_folder.c_str(), i), norm_0_255(cgrayscale));
}
}
// Display or save the image reconstruction at some predefined steps:
for(int num_components = min(W.cols, 10); num_components < min(W.cols, 300); num_components+=15) {
// slice the eigenvectors from the model
Mat evs = Mat(W, Range::all(), Range(0, num_components));
Mat projection = subspaceProject(evs, mean, images[0].reshape(1,1));
Mat reconstruction = subspaceReconstruct(evs, mean, projection);
// Normalize the result:
reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows));
// Display or save:
if(argc == 2) {
imshow(format("eigenface_reconstruction_%d", num_components), reconstruction);
} else {
imwrite(format("%s/eigenface_reconstruction_%d.png", output_folder.c_str(), num_components), reconstruction);
}
}
// Display if we are not writing to an output folder:
if(argc == 2) {
waitKey(0);
}
return 0;
}
CMakeLists.txt file:
cmake_minimum_required(VERSION 2.8)
project(faceReco)
find_package(OpenCV REQUIRED)
add_executable(faceReco faceReco.cpp)
target_link_libraries(faceReco ${OpenCV_LIBS})
Errors:
/home/abdulaziz/workspace/OpenCV_Projects/FaceReco/faceReco.cpp: In function ‘int main(int, const char**)’:
/home/abdulaziz/workspace/OpenCV_Projects/FaceReco/faceReco.cpp:110:27: error: ‘class cv::face::FaceRecognizer’ has no member named ‘getMat’
Mat eigenvalues = model->getMat("eigenvalues");
^
/home/abdulaziz/workspace/OpenCV_Projects/FaceReco/faceReco.cpp:111:17: error: ‘class cv::face::FaceRecognizer’ has no member named ‘getMat’
Mat W = model->getMat("eigenvectors");
^
/home/abdulaziz/workspace/OpenCV_Projects/FaceReco/faceReco.cpp:112:20: error: ‘class cv::face::FaceRecognizer’ has no member named ‘getMat’
Mat mean = model->getMat("mean");
^
/home/abdulaziz/workspace/OpenCV_Projects/FaceReco/faceReco.cpp:130:46: error: ‘COLORMAP_JET’ was not declared in this scope
applyColorMap(grayscale, cgrayscale, COLORMAP_JET);
^
/home/abdulaziz/workspace/OpenCV_Projects/FaceReco/faceReco.cpp:130:58: error: ‘applyColorMap’ was not declared in this scope
applyColorMap(grayscale, cgrayscale, COLORMAP_JET);
^
/home/abdulaziz/workspace/OpenCV_Projects/FaceReco/faceReco.cpp:143:75: error: ‘subspaceProject’ was not declared in this scope
Mat projection = subspaceProject(evs, mean, images[0].reshape(1,1));
^
/home/abdulaziz/workspace/OpenCV_Projects/FaceReco/faceReco.cpp:144:71: error: ‘subspaceReconstruct’ was not declared in this scope
Mat reconstruction = subspaceReconstruct(evs, mean, projection);
^