Error in executing object detection main program - c++

I am using a program to train svm classifier for object detection using hog descriptor. And i am not able to get the executable file of main program. My code is as follows:
#include <stdio.h>
#include <dirent.h>
#include <ios>
#include <fstream>
#include <stdexcept>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>
#define SVMLIGHT 1
#define LIBSVM 2
#define TRAINHOG_USEDSVM SVMLIGHT
#if TRAINHOG_USEDSVM == SVMLIGHT
#include "svm_light/svmlight.h"
#define TRAINHOG_SVM_TO_TRAIN SVMlight
#elif TRAINHOG_USEDSVM == LIBSVM
#include "libsvm/libsvm.h"
#define TRAINHOG_SVM_TO_TRAIN libSVM
#endif
using namespace std;
using namespace cv;
static string posSamplesDir = "pos/";
static string negSamplesDir = "neg/";
static string featuresFile = "genfiles/features.dat";
static string svmModelFile = "genfiles/svmlightmodel.dat";
static string descriptorVectorFile = "genfiles/descriptorvector.dat";
static const Size trainingPadding = Size(0,0);
static const Size winStride = Size(8,8);
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Helper functions">
/* Helper functions */
static string toLowerCase(const string& in) {
string t;
for (string::const_iterator i = in.begin(); i != in.end(); ++i) {
t += tolower(*i);
}
return t;
}
static void storeCursor(void) {
printf("\033[s");
}
static void resetCursor(void) {
printf("\033[u");
}
/**
* Saves the given descriptor vector to a file
* #param descriptorVector the descriptor vector to save
* #param _vectorIndices contains indices for the corresponding vector values (e.g. descriptorVector(0)=3.5f may have index 1)
* #param fileName
* #TODO Use _vectorIndices to write correct indices
*/
static void saveDescriptorVectorToFile(vector<float>& descriptorVector, vector<unsigned int>& _vectorIndices, string fileName) {
printf("Saving descriptor vector to file '%s'\n", fileName.c_str());
string separator = " "; // Use blank as default separator between single features
fstream File;
float percent;
File.open(fileName.c_str(), ios::out);
if (File.good() && File.is_open()) {
printf("Saving %lu descriptor vector features:\t", descriptorVector.size());
storeCursor();
for (int feature = 0; feature < descriptorVector.size(); ++feature) {
if ((feature % 10 == 0) || (feature == (descriptorVector.size()-1)) ) {
percent = ((1 + feature) * 100 / descriptorVector.size());
printf("%4u (%3.0f%%)", feature, percent);
fflush(stdout);
resetCursor();
}
File << descriptorVector.at(feature) << separator;
}
printf("\n");
File << endl;
File.flush();
File.close();
}
}
/**
* For unixoid systems only: Lists all files in a given directory and returns a vector of path+name in string format
* #param dirName
* #param fileNames found file names in specified directory
* #param validExtensions containing the valid file extensions for collection in lower case
*/
static void getFilesInDirectory(const string& dirName, vector<string>& fileNames, const vector<string>& validExtensions) {
printf("Opening directory %s\n", dirName.c_str());
struct dirent* ep;
size_t extensionLocation;
DIR* dp = opendir(dirName.c_str());
if (dp != NULL) {
while ((ep = readdir(dp))) {
// Ignore (sub-)directories like . , .. , .svn, etc.
if (ep->d_type & DT_DIR) {
continue;
}
extensionLocation = string(ep->d_name).find_last_of("."); // Assume the last point marks beginning of extension like file.ext
// Check if extension is matching the wanted ones
string tempExt = toLowerCase(string(ep->d_name).substr(extensionLocation + 1));
if (find(validExtensions.begin(), validExtensions.end(), tempExt) != validExtensions.end()) {
printf("Found matching data file '%s'\n", ep->d_name);
fileNames.push_back((string) dirName + ep->d_name);
} else {
printf("Found file does not match required file type, skipping: '%s'\n", ep->d_name);
}
}
(void) closedir(dp);
} else {
printf("Error opening directory '%s'!\n", dirName.c_str());
}
return;
}
/**
* This is the actual calculation from the (input) image data to the HOG descriptor/feature vector using the hog.compute() function
* #param imageFilename file path of the image file to read and calculate feature vector from
* #param descriptorVector the returned calculated feature vector<float> ,
* I can't comprehend why openCV implementation returns std::vector<float> instead of cv::MatExpr_<float> (e.g. Mat<float>)
* #param hog HOGDescriptor containin HOG settings
*/
static void calculateFeaturesFromInput(const string& imageFilename, vector<float>& featureVector, HOGDescriptor& hog) {
/** for imread flags from openCV documentation,
* #see http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imread#Mat imread(const string& filename, int flags)
* #note If you get a compile-time error complaining about following line (esp. imread),
* you either do not have a current openCV version (>2.0)
* or the linking order is incorrect, try g++ -o openCVHogTrainer main.cpp `pkg-config --cflags --libs opencv`
*/
Mat imageData = imread(imageFilename, 0);
if (imageData.empty()) {
featureVector.clear();
printf("Error: HOG image '%s' is empty, features calculation skipped!\n", imageFilename.c_str());
return;
}
// Check for mismatching dimensions
if (imageData.cols != hog.winSize.width || imageData.rows != hog.winSize.height) {
featureVector.clear();
printf("Error: Image '%s' dimensions (%u x %u) do not match HOG window size (%u x %u)!\n", imageFilename.c_str(), imageData.cols, imageData.rows, hog.winSize.width, hog.winSize.height);
return;
}
vector<Point> locations;
hog.compute(imageData, featureVector, winStride, trainingPadding, locations);
imageData.release(); // Release the image again after features are extracted
}
/**
* Shows the detections in the image
* #param found vector containing valid detection rectangles
* #param imageData the image in which the detections are drawn
*/
static void showDetections(const vector<Point>& found, Mat& imageData) {
size_t i, j;
for (i = 0; i < found.size(); ++i) {
Point r = found[i];
// Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height);
rectangle(imageData, Rect(r.x-16, r.y-32, 32, 64), Scalar(64, 255, 64), 3);
}
}
/**
* Shows the detections in the image
* #param found vector containing valid detection rectangles
* #param imageData the image in which the detections are drawn
*/
static void showDetections(const vector<Rect>& found, Mat& imageData) {
vector<Rect> found_filtered;
size_t i, j;
for (i = 0; i < found.size(); ++i) {
Rect r = found[i];
for (j = 0; j < found.size(); ++j)
if (j != i && (r & found[j]) == r)
break;
if (j == found.size())
found_filtered.push_back(r);
}
for (i = 0; i < found_filtered.size(); i++) {
Rect r = found_filtered[i];
rectangle(imageData, r.tl(), r.br(), Scalar(64, 255, 64), 3);
}
}
/**
* Test the trained detector against the same training set to get an approximate idea of the detector.
* Warning: This does not allow any statement about detection quality, as the detector might be overfitting.
* Detector quality must be determined using an independent test set.
* #param hog
*/
static void detectTrainingSetTest(const HOGDescriptor& hog, const double hitThreshold, const vector<string>& posFileNames, const vector<string>& negFileNames) {
unsigned int truePositives = 0;
unsigned int trueNegatives = 0;
unsigned int falsePositives = 0;
unsigned int falseNegatives = 0;
vector<Point> foundDetection;
// Walk over positive training samples, generate images and detect
for (vector<string>::const_iterator posTrainingIterator = posFileNames.begin(); posTrainingIterator != posFileNames.end(); ++posTrainingIterator) {
const Mat imageData = imread(*posTrainingIterator, 0);
hog.detect(imageData, foundDetection, hitThreshold, winStride, trainingPadding);
if (foundDetection.size() > 0) {
++truePositives;
falseNegatives += foundDetection.size() - 1;
} else {
++falseNegatives;
}
}
// Walk over negative training samples, generate images and detect
for (vector<string>::const_iterator negTrainingIterator = negFileNames.begin(); negTrainingIterator != negFileNames.end(); ++negTrainingIterator) {
const Mat imageData = imread(*negTrainingIterator, 0);
hog.detect(imageData, foundDetection, hitThreshold, winStride, trainingPadding);
if (foundDetection.size() > 0) {
falsePositives += foundDetection.size();
} else {
++trueNegatives;
}
}
printf("Results:\n\tTrue Positives: %u\n\tTrue Negatives: %u\n\tFalse Positives: %u\n\tFalse Negatives: %u\n", truePositives, trueNegatives, falsePositives, falseNegatives);
}
/**
* Test detection with custom HOG description vector
* #param hog
* #param hitThreshold threshold value for detection
* #param imageData
*/
static void detectTest(const HOGDescriptor& hog, const double hitThreshold, Mat& imageData) {
vector<Rect> found;
Size padding(Size(32, 32));
Size winStride(Size(8, 8));
hog.detectMultiScale(imageData, found, hitThreshold, winStride, padding);
showDetections(found, imageData);
}
// </editor-fold>
/**
* Main program entry point
* #param argc unused
* #param argv unused
* #return EXIT_SUCCESS (0) or EXIT_FAILURE (1)
*/
int main(int argc, char** argv) {
// <editor-fold defaultstate="collapsed" desc="Init">
HOGDescriptor hog; // Use standard parameters here
hog.winSize = Size(64, 128); // Default training images size as used in paper
// Get the files to train from somewhere
static vector<string> positiveTrainingImages;
static vector<string> negativeTrainingImages;
static vector<string> validExtensions;
validExtensions.push_back("jpg");
validExtensions.push_back("png");
validExtensions.push_back("ppm");
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Read image files">
getFilesInDirectory(posSamplesDir, positiveTrainingImages, validExtensions);
getFilesInDirectory(negSamplesDir, negativeTrainingImages, validExtensions);
/// Retrieve the descriptor vectors from the samples
unsigned long overallSamples = positiveTrainingImages.size() + negativeTrainingImages.size();
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Calculate HOG features and save to file">
// Make sure there are actually samples to train
if (overallSamples == 0) {
printf("No training sample files found, nothing to do!\n");
return EXIT_SUCCESS;
}
/// #WARNING: This is really important, some libraries (e.g. ROS) seems to set the system locale which takes decimal commata instead of points which causes the file input parsing to fail
setlocale(LC_ALL, "C"); // Do not use the system locale
setlocale(LC_NUMERIC,"C");
setlocale(LC_ALL, "POSIX");
printf("Reading files, generating HOG features and save them to file '%s':\n", featuresFile.c_str());
float percent;
/**
* Save the calculated descriptor vectors to a file in a format that can be used by SVMlight for training
* #NOTE: If you split these steps into separate steps:
* 1. calculating features into memory (e.g. into a cv::Mat or vector< vector<float> >),
* 2. saving features to file / directly inject from memory to machine learning algorithm,
* the program may consume a considerable amount of main memory
*/
fstream File;
File.open(featuresFile.c_str(), ios::out);
if (File.good() && File.is_open()) {
// Remove following line for libsvm which does not support comments
// File << "# Use this file to train, e.g. SVMlight by issuing $ svm_learn -i 1 -a weights.txt " << featuresFile.c_str() << endl;
// Iterate over sample images
for (unsigned long currentFile = 0; currentFile < overallSamples; ++currentFile) {
storeCursor();
vector<float> featureVector;
// Get positive or negative sample image file path
const string currentImageFile = (currentFile < positiveTrainingImages.size() ? positiveTrainingImages.at(currentFile) : negativeTrainingImages.at(currentFile - positiveTrainingImages.size()));
// Output progress
if ( (currentFile+1) % 10 == 0 || (currentFile+1) == overallSamples ) {
percent = ((currentFile+1) * 100 / overallSamples);
printf("%5lu (%3.0f%%):\tFile '%s'", (currentFile+1), percent, currentImageFile.c_str());
fflush(stdout);
resetCursor();
}
// Calculate feature vector from current image file
calculateFeaturesFromInput(currentImageFile, featureVector, hog);
if (!featureVector.empty()) {
/* Put positive or negative sample class to file,
* true=positive, false=negative,
* and convert positive class to +1 and negative class to -1 for SVMlight
*/
File << ((currentFile < positiveTrainingImages.size()) ? "+1" : "-1");
// Save feature vector components
for (unsigned int feature = 0; feature < featureVector.size(); ++feature) {
File << " " << (feature + 1) << ":" << featureVector.at(feature);
}
File << endl;
}
}
printf("\n");
File.flush();
File.close();
} else {
printf("Error opening file '%s'!\n", featuresFile.c_str());
return EXIT_FAILURE;
}
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Pass features to machine learning algorithm">
/// Read in and train the calculated feature vectors
printf("Calling %s\n", TRAINHOG_SVM_TO_TRAIN::getInstance()->getSVMName());
TRAINHOG_SVM_TO_TRAIN::getInstance()->read_problem(const_cast<char*> (featuresFile.c_str()));
TRAINHOG_SVM_TO_TRAIN::getInstance()->train(); // Call the core libsvm training procedure
printf("Training done, saving model file!\n");
TRAINHOG_SVM_TO_TRAIN::getInstance()->saveModelToFile(svmModelFile);
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Generate single detecting feature vector from calculated SVM support vectors and SVM model">
printf("Generating representative single HOG feature vector using svmlight!\n");
vector<float> descriptorVector;
vector<unsigned int> descriptorVectorIndices;
// Generate a single detecting feature vector (v1 | b) from the trained support vectors, for use e.g. with the HOG algorithm
TRAINHOG_SVM_TO_TRAIN::getInstance()->getSingleDetectingVector(descriptorVector, descriptorVectorIndices);
// And save the precious to file system
saveDescriptorVectorToFile(descriptorVector, descriptorVectorIndices, descriptorVectorFile);
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Test detecting vector">
// Detector detection tolerance threshold
const double hitThreshold = TRAINHOG_SVM_TO_TRAIN::getInstance()->getThreshold();
// Set our custom detecting vector
hog.setSVMDetector(descriptorVector);
printf("Testing training phase using training set as test set (just to check if training is ok - no detection quality conclusion with this!)\n");
detectTrainingSetTest(hog, hitThreshold, positiveTrainingImages, negativeTrainingImages);
printf("Testing custom detection using camera\n");
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) { // check if we succeeded
printf("Error opening camera!\n");
return EXIT_FAILURE;
}
Mat testImage;
while ((cvWaitKey(10) & 255) != 27) {
cap >> testImage; // get a new frame from camera
// cvtColor(testImage, testImage, CV_BGR2GRAY); // If you want to work on grayscale images
detectTest(hog, hitThreshold, testImage);
imshow("HOG custom detection", testImage);
}
// </editor-fold>
return EXIT_SUCCESS;
}
If i execute it as "./objectdetectmain.cpp", it gives errors as follows:
./objectdetectmain.cpp: line 24: using: command not found
./objectdetectmain.cpp: line 25: using: command not found
./objectdetectmain.cpp: line 29: static: command not found
./objectdetectmain.cpp: line 31: static: command not found
./objectdetectmain.cpp: line 33: static: command not found
./objectdetectmain.cpp: line 35: static: command not found
./objectdetectmain.cpp: line 37: static: command not found
./objectdetectmain.cpp: line 40: syntax error near unexpected token `('
./objectdetectmain.cpp: line 40: `static const Size trainingPadding = Size(0,0);

It seems that you are trying to run the source as a program. You need to compile it into an executable first. E.g.
g++ -Wall -g objectdetectmain.cpp -o objectdetectmain
which will give you an executable file named objectdetectmain (note lack of .cpp extension) which you can run:
./objectdetectmain

Related

How to create an array of objects and acess the functions inside the subclass? c++

To capture images simultaneous I want to create an array of objects (cam). With the current array I can execute the functions of the class xiAPIplus_Camera but the functions inside the class xiAPIplusCameraOcv : public xiAPIplus_Camera won't start.
e.g:If I start cv::Mat GetNextImageOcvMat(); the class destructor runs instead.
class xiAPIplusCameraOcv : public xiAPIplus_Camera
{
public:
/**
* Initialize the xiAPIplusCameraOcv class.
*/
xiAPIplusCameraOcv();
/**
* Class destructor. Free allocated memory, release images
*/
virtual ~xiAPIplusCameraOcv();
/**
* Reads an image from the camera using XiAPI, stores the image in OpenCV Mat format.
* #return OpenCV Mat image.
*/
cv::Mat GetNextImageOcvMat(); //Reads an image and converts it to OpenCV Mat
/**
* Converts a XiAPI image (xiAPIplus_Image*) to OpenCV Mat.
* #param input_image[in] Input xiAPIplus_Image* to be converted.
* #return converted OpenCV IpliImage* image.
*/
cv::Mat ConvertOcvMat(xiAPIplus_Image * input_image); //Converts an image to OpenCV Mat
//virtual xiAPIplusCameraOcv& operator >> (CV_OUT Mat& input_image);
private:
/**
* Resets the Opencv image if properties of XI_IMG have changed. Resets the cv_mat_image_.
*/
void resetCvImage_(); //Resets the OpenCV image properties if XI_IMG format has changed
cv::Mat cv_mat_image_;
xiAPIplus_Image * next_image_;
int timeout_;
int counter_;
int index_;
};
The main function is able to create the array but I am missing something while using it.
int main(int argc, char* argv[])
{
try
{
xiAPIplusCameraOcv cam[5];
int n = cam[4].GetNumberOfConnectedCameras();
int uidArray[4];
char* snArray[4];
for (int id = 0; id < n; id++)
{
printf("Opening %d. camera ...\n", id + 1);
cam[id].OpenByID(id);
cam[id].GetSerialNumber(sn, 100);
}
for (int exp = 0; exp < n; exp++)
{
cam[exp].StartAcquisition();
Mat cv_mat_image = cam[exp].GetNextImageOcvMat(); //
printf("GetNextImageOcvMat\n");
cv::imwrite(save_at, cv_mat_image);// , compression_params);
cam[exp].StopAcquisition();
}
printf("Done\n");
cv::waitKey(500);
}
catch (xiAPIplus_Exception& exp)
{
printf("Error:\n");
exp.PrintError();
#ifdef WIN32
Sleep(2000);
#endif
cv::waitKey(2000);
return -1;
}
return 0;
}
After this line Mat cv_mat_image = cam[exp].GetNextImageOcvMat(); the destructor is closing my camera object.
/**
* Class destructor. Free allocated memory, release images
*/
xiAPIplusCameraOcv::~xiAPIplusCameraOcv(){
printf("Closing xiAPIplus_Ocv camera\n");
cv_mat_image_.release();
}

C++ eos - Error loading the Morphable Model: Error opening given file: ../share/sfm_shape_3448.bin

I am trying to run some software I found on github. I managed to compile and install everything on my ubuntu machine. However when trying to run one of the provided examples (fit-model-simple.cpp) I get the following error:
Error loading the Morphable Model: Error opening given file: ../share/sfm_shape_3448.bin
I tried to hard code the path to the file as well but the result stays the same.
Does anybody know what I might be doing wrong?
This is the code:
/*
* eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: examples/fit-model-simple.cpp
*
* Copyright 2015 Patrik Huber
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "eos/core/Landmark.hpp"
#include "eos/core/LandmarkMapper.hpp"
#include "eos/fitting/orthographic_camera_estimation_linear.hpp"
#include "eos/fitting/RenderingParameters.hpp"
#include "eos/fitting/linear_shape_fitting.hpp"
#include "eos/render/utils.hpp"
#include "eos/render/texture_extraction.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "boost/program_options.hpp"
#include "boost/filesystem.hpp"
#include <vector>
#include <iostream>
#include <fstream>
using namespace eos;
namespace po = boost::program_options;
namespace fs = boost::filesystem;
using eos::core::Landmark;
using eos::core::LandmarkCollection;
using cv::Mat;
using cv::Vec2f;
using cv::Vec3f;
using cv::Vec4f;
using std::cout;
using std::endl;
using std::vector;
using std::string;
/**
* Reads an ibug .pts landmark file and returns an ordered vector with
* the 68 2D landmark coordinates.
*
* #param[in] filename Path to a .pts file.
* #return An ordered vector with the 68 ibug landmarks.
*/
LandmarkCollection<cv::Vec2f> read_pts_landmarks(std::string filename)
{
using std::getline;
using cv::Vec2f;
using std::string;
LandmarkCollection<Vec2f> landmarks;
landmarks.reserve(68);
std::ifstream file(filename);
if (!file.is_open()) {
throw std::runtime_error(string("Could not open landmark file: " + filename));
}
string line;
// Skip the first 3 lines, they're header lines:
getline(file, line); // 'version: 1'
getline(file, line); // 'n_points : 68'
getline(file, line); // '{'
int ibugId = 1;
while (getline(file, line))
{
if (line == "}") { // end of the file
break;
}
std::stringstream lineStream(line);
Landmark<Vec2f> landmark;
landmark.name = std::to_string(ibugId);
if (!(lineStream >> landmark.coordinates[0] >> landmark.coordinates[1])) {
throw std::runtime_error(string("Landmark format error while parsing the line: " + line));
}
// From the iBug website:
// "Please note that the re-annotated data for this challenge are saved in the Matlab convention of 1 being
// the first index, i.e. the coordinates of the top left pixel in an image are x=1, y=1."
// ==> So we shift every point by 1:
landmark.coordinates[0] -= 1.0f;
landmark.coordinates[1] -= 1.0f;
landmarks.emplace_back(landmark);
++ibugId;
}
return landmarks;
};
/**
* This app demonstrates estimation of the camera and fitting of the shape
* model of a 3D Morphable Model from an ibug LFPW image with its landmarks.
*
* First, the 68 ibug landmarks are loaded from the .pts file and converted
* to vertex indices using the LandmarkMapper. Then, an orthographic camera
* is estimated, and then, using this camera matrix, the shape is fitted
* to the landmarks.
*/
int main(int argc, char *argv[])
{
std::cerr<<"modif 1"<<endl;
fs::path modelfile, isomapfile, imagefile, landmarksfile, mappingsfile, outputfile;
try {
po::options_description desc("Allowed options");
desc.add_options()
("help,h",
"display the help message")
("model,m", po::value<fs::path>(&modelfile)->required()->default_value("/home/yalishanda/Downloads/eos-master/share/sfm_shape_3448.bin"),
"a Morphable Model stored as cereal BinaryArchive")
("image,i", po::value<fs::path>(&imagefile)->required()->default_value("/home/yalishanda/Downloads/eos-master/examples/data/image_0010.png"),
"an input image")
("landmarks,l", po::value<fs::path>(&landmarksfile)->required()->default_value("/home/yalishanda/Downloads/eos-master/examples/data/image_0010.pts"),
"2D landmarks for the image, in ibug .pts format")
("mapping,p", po::value<fs::path>(&mappingsfile)->required()->default_value("/home/yalishanda/Downloads/eos-master/share/ibug_to_sfm.txt"),
"landmark identifier to model vertex number mapping")
("output,o", po::value<fs::path>(&outputfile)->required()->default_value("out"),
"basename for the output rendering and obj files")
;
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).options(desc).run(), vm);
if (vm.count("help")) {
cout << "Usage: fit-model-simple [options]" << endl;
cout << desc;
return EXIT_SUCCESS;
}
po::notify(vm);
}
catch (const po::error& e) {
cout << "Error while parsing command-line arguments: " << e.what() << endl;
cout << "Use --help to display a list of options." << endl;
return EXIT_FAILURE;
}
// Load the image, landmarks, LandmarkMapper and the Morphable Model:
Mat image = cv::imread(imagefile.string());
LandmarkCollection<cv::Vec2f> landmarks;
try {
landmarks = read_pts_landmarks(landmarksfile.string());
}
catch (const std::runtime_error& e) {
cout << "Error reading the landmarks: " << e.what() << endl;
return EXIT_FAILURE;
}
morphablemodel::MorphableModel morphable_model;
try {
morphable_model = morphablemodel::load_model(modelfile.string());
}
catch (const std::runtime_error& e) {
cout << "Error loading the Morphable Model: " << e.what() << endl;
return EXIT_FAILURE;
}
core::LandmarkMapper landmark_mapper = mappingsfile.empty() ? core::LandmarkMapper() : core::LandmarkMapper(mappingsfile);
// Draw the loaded landmarks:
Mat outimg = image.clone();
for (auto&& lm : landmarks) {
cv::rectangle(outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f), cv::Point2f(lm.coordinates[0] + 2.0f, lm.coordinates[1] + 2.0f), { 255, 0, 0 });
}
// These will be the final 2D and 3D points used for the fitting:
vector<Vec4f> model_points; // the points in the 3D shape model
vector<int> vertex_indices; // their vertex indices
vector<Vec2f> image_points; // the corresponding 2D landmark points
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM):
for (int i = 0; i < landmarks.size(); ++i) {
auto converted_name = landmark_mapper.convert(landmarks[i].name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
auto vertex = morphable_model.get_shape_model().get_mean_at_point(vertex_idx);
model_points.emplace_back(Vec4f(vertex.x(), vertex.y(), vertex.z(), 1.0f));
vertex_indices.emplace_back(vertex_idx);
image_points.emplace_back(landmarks[i].coordinates);
}
// Estimate the camera (pose) from the 2D - 3D point correspondences
fitting::ScaledOrthoProjectionParameters pose = fitting::estimate_orthographic_projection_linear(image_points, model_points, true, image.rows);
fitting::RenderingParameters rendering_params(pose, image.cols, image.rows);
// The 3D head pose can be recovered as follows:
float yaw_angle = glm::degrees(glm::yaw(rendering_params.get_rotation()));
// and similarly for pitch and roll.
// Estimate the shape coefficients by fitting the shape to the landmarks:
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, image.cols, image.rows);
vector<float> fitted_coeffs = fitting::fit_shape_to_landmarks_linear(morphable_model, affine_from_ortho, image_points, vertex_indices);
// Obtain the full mesh with the estimated coefficients:
core::Mesh mesh = morphable_model.draw_sample(fitted_coeffs, vector<float>());
// Extract the texture from the image using given mesh and camera parameters:
Mat isomap = render::extract_texture(mesh, affine_from_ortho, image);
// Save the mesh as textured obj:
outputfile += fs::path(".obj");
core::write_textured_obj(mesh, outputfile.string());
// And save the isomap:
outputfile.replace_extension(".isomap.png");
cv::imwrite(outputfile.string(), isomap);
cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfile.stem().stem() << "." << endl;
return EXIT_SUCCESS;
}
It might because you run it directly without generating it as a project. Check eos documentation:
git clone --recursive https://github.com/patrikhuber/eos.git
mkdir build && cd build # creates a build directory next to the 'eos' folder
cmake -G "<your favourite generator>" ../eos -DCMAKE_INSTALL_PREFIX=../install/
make && make install # or open the project file and build in an IDE like Visual Studio
Or it might due to files haven't loaded successfully after your git clone
Hope it helps!

Unknown pooling method when testing caffe with cuda but not cudnn

I built the caffe deep learning library in windows as shown in this link:
https://initialneil.wordpress.com/2015/07/15/caffe-vs2013-opencv-in-windows-tutorial-i/
I deactivated the cuDNN because my nvidia card didnot support this and changed the targert architecture to fermi architecture.
I built caffe as static library to use it in the test project shown below:
int main(int argc, char** argv)
{
// get a testing image and display
Mat img = imread(CAFFE_ROOT + "/examples/images/mnist_5.png");
cvtColor(img, img, CV_BGR2GRAY);
imshow("img", img);
waitKey(1);
// Set up Caffe
Caffe::set_mode(Caffe::GPU);
int device_id = 0;
Caffe::SetDevice(device_id);
LOG(INFO) << "Using GPU";
// Load net
Net<float> net(CAFFE_ROOT + "/examples/mnist/lenet_test-memory-1.prototxt");
string model_file = CAFFE_ROOT + "/examples/mnist/lenet_iter_10000.caffemodel";
net.CopyTrainedLayersFrom(model_file);
// set the patch for testing
vector<Mat> patches;
patches.push_back(img);
// push vector<Mat> to data layer
float loss = 0.0;
boost::shared_ptr<MemoryDataLayer<float> > memory_data_layer;
memory_data_layer = boost::static_pointer_cast<MemoryDataLayer<float>>(net.layer_by_name("data"));
vector<int> labels(patches.size());
memory_data_layer->AddMatVector(patches, labels);
// Net forward
//ERROR IN THE LINE BELOW
const vector<Blob<float>*> & results = net.ForwardPrefilled(&loss);// HERE THE ERROR
float *output = results[1]->mutable_cpu_data();
// Display the output
for (int i = 0; i < 10; i++) {
printf("Probability to be Number %d is %.3f\n", i, output[i]);
}
waitKey(0);
}
But I get an error when accessing the file: pooling_layer.cu in the function described below:
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = (*top)[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top->size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = (*top)[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> (
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (Caffe::phase() == Caffe::TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
And get the message "Unknown pooling method." as shown in the window below:
The normal execution of my project is described in the image below:
Could someone give me an idea about the possible solution?
The pooling layer which by default should be max pooling was translated into some other layers. You might add a breakpoint at pooling_layer.cu (line 163) or add cout << this->layer_param_.pooling_param().pool() << endl; before that line to see what pooling layer it was using. I guess it doesn't equal to PoolingParameter_PoolMethod_MAX here.
I'm not sure why it happened, maybe there some error in the prototxt file or the protobuf. A brutal trick would be overlapping line 206 with line 165-176 in order to force using max pooling.

Understanding the HOG feature layout

I am doing a project which involve multi-class object detection. My aim is to detect the following objects.
1. Truck
2. Car
3. Person
As I have three different objects, it means I will have three different window sizes. But HOG features for blocks will remain the same. I hacked into OpenCV hog.cpp and made two new functions to calculate the HOG descriptors for blocks only. Here is my code.
void cv::gpu::HOGDescriptor::getDescriptorsBlock(const GpuMat& img, Size win_stride, GpuMat& descriptors, FileStorage fs3, string fileName, double scale, int width, int height, size_t lev)
{
CV_Assert(win_stride.width % block_stride.width == 0 && win_stride.height % block_stride.height == 0);
size_t block_hist_size = getBlockHistogramSize();
computeBlockHistograms(img);
Size blocks_per_img = numPartsWithin(img.size(), block_size, block_stride);
// Size blocks_per_win = numPartsWithin(win_size, block_size, block_stride);
// Size wins_per_img = numPartsWithin(img.size(), win_size, win_stride);
// copy block_hists from GPU to CPU/
float dest_ptr[block_hist_size * blocks_per_img.area()];
cudaMemcpy( &dest_ptr[0], block_hists.ptr<float>(), block_hist_size *blocks_per_img.area()*sizeof(CV_32F), cudaMemcpyDeviceToHost);
std::cout<<"( "<<width<< " ," << height<< ")"<< std::endl;
std::cout <<lev<< std::endl;
// write to yml file
int level = lev;
fs3<<"Scale"<<scale;
fs3 <<"Level"<<level;
fs3<<"Width"<<width<<"Height"<<height;
fs3 << "features" << "[";
for (unsigned int i = 0; i < (block_hist_size * blocks_per_img.area()) ; i++ )
{
fs3 << dest_ptr[i];
}
fs3 << "]";
}
Similarly to get block descriptors for multi-scale
void cv::gpu::HOGDescriptor::getDescriptorsMultiScale(const GpuMat& img,
Size win_stride, double scale0, unsigned int count)
{
CV_Assert(img.type() == CV_8UC1 || img.type() == CV_8UC4);
vector<double> level_scale;
double scale = 1.;
int levels = 0;
for (levels = 0; levels < nlevels; levels++)
{
level_scale.push_back(scale);
if (cvRound(img.cols/scale) < win_size.width ||
cvRound(img.rows/scale) < win_size.height || scale0 <= 1)
break;
scale *= scale0;
}
levels = std::max(levels, 1);
level_scale.resize(levels);
image_scales.resize(levels);
// open yml file with image ID
FileStorage fs3;
char fileName[20];
GpuMat descriptors;
sprintf (fileName, "%04d", count);
fs3.open(fileName, FileStorage::WRITE);
for (size_t i = 0; i < level_scale.size(); i++)
{
scale = level_scale[i];
Size sz(cvRound(img.cols / scale), cvRound(img.rows / scale));
GpuMat smaller_img;
if (sz == img.size())
smaller_img = img;
else
{
image_scales[i].create(sz, img.type());
switch (img.type())
{
case CV_8UC1: hog::resize_8UC1(img, image_scales[i]); break;
case CV_8UC4: hog::resize_8UC4(img, image_scales[i]); break;
}
smaller_img = image_scales[i];
}
std::cout<<"scale "<<level_scale[i]<<std::endl;
// calculate descriptors for blocks
getDescriptorsBlock( smaller_img, win_stride, descriptors, fs3, fileName, scale, smaller_img.cols, smaller_img.rows, i);
// detect(smaller_img, locations, hit_threshold, win_stride, padding);
}
// close yml file
fs3.release();
}
My question would be to understand the layout structure of HOG descriptors for block only. Can some one share his thoughts
Generally, using an image pyramid is often applied to become scale invariant. If you want to get more sophisticated, have a look at this paper "Object Detection with Discriminatively Trained
Part Based Models" [1]. They were really successful in using HoG on different scales. Of course the original HoG paper might be of use to understand the structure of the feature itself [2], if that is more what you are after.
[1] http://vision.ics.uci.edu/papers/FelzenszwalbGMR_PAMI_2009/FelzenszwalbGMR_PAMI_2009.pdf
[2] http://lear.inrialpes.fr/people/triggs/pubs/Dalal-cvpr05.pdf

running opencv code with matlab via mex fails while on VisualStudio it works

I want to extract some harriscorners from an image and get FREAK descriptors. Here is how I try to do it:
(The passed variables are globally defined.)
void computeFeatures(cv::Mat &src, std::vector<cv::KeyPoint> &keypoints, cv::Mat &desc ) {
cv::Mat featureSpace;
featureSpace = cv::Mat::zeros( src.size(), CV_32FC1 );
//- Detector parameters
int blockSize = 3;
int apertureSize = 3;
double k = 0.04;
//- Detecting corners
cornerHarris( src, featureSpace, blockSize, apertureSize, k, cv::BORDER_DEFAULT );
//- Thresholding featureSpace
keypoints.clear();
nonMaximumSuppression(featureSpace, keypoints, param.nms_n);
//- compute FREAK-descriptor
cv::FREAK freak(false, false, 22.0f, 4);
freak.compute(src, keypoints, desc);
}
I can compile it with Visual Studio 12 as well as Matlab R2013b via mex. When I run it as "stand alone" (.exe) it works just fine. When I try to execute it via Matlab it fails with this message:
A buffer overrun has occurred in MATLAB.exe which has corrupted the
program's internal state. Press Break to debug the program or Continue
to terminate the program.
I mexed with the debug option '-g' and attached VisualStudio to Matlab to be able to get closer to the error:
After nonMaximumSuppression() the size of keypoints is 233 when I jump into freak.compute() the size is suddenly 83 with "random" values stored.
The actual error is then in KeyPointsFilter::runByKeypointSize when keypoints should be erased.
in keypoint.cpp line 256:
void KeyPointsFilter::runByKeypointSize( vector<KeyPoint>& keypoints, float minSize, float maxSize )
{
CV_Assert( minSize >= 0 );
CV_Assert( maxSize >= 0);
CV_Assert( minSize <= maxSize );
keypoints.erase( std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)),
keypoints.end() );
}
Is there some error I'm making with passing the keyPoint-vector? Has anybody run into a similar problem?
EDIT:
Here is the mex-file with the additional library "opencv_matlab.hpp" taken from MatlabCentral
#include "opencv_matlab.hpp"
void mexFunction (int nlhs,mxArray *plhs[],int nrhs,const mxArray *prhs[]) {
// read command
char command[128];
mxGetString(prhs[0],command,128);
if (!strcmp(command,"push") || !strcmp(command,"replace")) {
// check arguments
if (nrhs!=1+1 && nrhs!=1+2)
mexErrMsgTxt("1 or 2 inputs required (I1=left image,I2=right image).");
if (!mxIsUint8(prhs[1]) || mxGetNumberOfDimensions(prhs[1])!=2)
mexErrMsgTxt("Input I1 (left image) must be a uint8_t matrix.");
// determine input/output image properties
const int *dims1 = mxGetDimensions(prhs[1]);
const int nDims1 = mxGetNumberOfDimensions(prhs[1]);
const int rows1 = dims1[0];
const int cols1 = dims1[1];
const int channels1 = (nDims1 == 3 ? dims1[2] : 1);
// Allocate, copy, and convert the input image
// #note: input is double
cv::Mat I1_ = cv::Mat::zeros(cv::Size(cols1, rows1), CV_8UC(channels1));
om::copyMatrixToOpencv<uchar>((unsigned char*)mxGetPr(prhs[1]), I1_);
// push back single image
if (nrhs==1+1) {
// compute features and put them to ring buffer
pushBack(I1_,!strcmp(command,"replace"));
// push back stereo image pair
} else {
if (!mxIsUint8(prhs[2]) || mxGetNumberOfDimensions(prhs[2])!=2)
mexErrMsgTxt("Input I2 (right image) must be a uint8_t matrix.");
// determine input/output image properties
const int *dims2 = mxGetDimensions(prhs[2]);
const int nDims2 = mxGetNumberOfDimensions(prhs[2]);
const int rows2 = dims2[0];
const int cols2 = dims2[1];
const int channels2 = (nDims2 == 3 ? dims2[2] : 1);
// Allocate, copy, and convert the input image
// #note: input is double
cv::Mat I2_ = cv::Mat::zeros(cv::Size(cols2, rows2), CV_8UC(channels2));
om::copyMatrixToOpencv<uchar>((unsigned char*)mxGetPr(prhs[2]), I2_);
// check image size
if (dims1_[0]!=dims2_[0] || dims1_[1]!=dims2_[1])
mexErrMsgTxt("Input I1 and I2 must be images of same size.");
// compute features and put them to ring buffer
pushBack(I1_,I2_,!strcmp(command,"replace"));
}
}else {
mexPrintf("Unknown command: %s\n",command);
}
}
And here is an additional part of the main cpp project.
std::vector<cv::KeyPoint> k1c1, k2c1, k1p1, k2p1; //KeyPoints
cv::Mat d1c1, d2c1, d1p1, d2p1; //descriptors
void pushBack (cv::Mat &I1,cv::Mat &I2,const bool replace) {
// sanity check
if (I1.empty()) {
cerr << "ERROR: Image empty!" << endl;
return;
}
if (replace) {
//if (!k1c1.empty())
k1c1.clear(); k2c1.clear();
d1c1.release(); d2c1.release();
} else {
k1p1.clear(); k2p1.clear();
d1p1.release(); d2p1.release();
k1p1 = k1c1; k2p1 = k2c1;
d1c1.copyTo(d1p1); d2c1.copyTo(d2p1);
k1c1.clear(); k2c1.clear();
d1c1.release(); d2c1.release();
}
// compute new features for current frame
computeFeatures(I1,k1c1,d1c1);
if (!I2.empty())
computeFeatures(I2,k2c1,d2c1);
}
And here is how I call the mex-file from Matlab
I1p = imread('\I1.bmp');
I2p = imread('\I2.bmp');
harris_freak('push',I1p,I2p);
Hope this helps...
I hope this is the correct way to give an answer to my own question.
After a couple of days I found kind of a work around. Instead of building the mex file in Matlab, which gives the above mentioned error, I built it in Visual Studio with instructions taken from here.
Now everything works just fine.
It kind of bothers me to not know how to do it with matlab, but hey, maybe someone still has an idea.
Thanks to the commenters for taking the time to look through my question!
If you have the Computer Vision System Toolbox then you do not need mex. It includes the detectHarrisFeatures function for detecting Harris corners, and the extractFeatures function, which can compute FREAK descriptors.