Unknown pooling method when testing caffe with cuda but not cudnn - c++

I built the caffe deep learning library in windows as shown in this link:
https://initialneil.wordpress.com/2015/07/15/caffe-vs2013-opencv-in-windows-tutorial-i/
I deactivated the cuDNN because my nvidia card didnot support this and changed the targert architecture to fermi architecture.
I built caffe as static library to use it in the test project shown below:
int main(int argc, char** argv)
{
// get a testing image and display
Mat img = imread(CAFFE_ROOT + "/examples/images/mnist_5.png");
cvtColor(img, img, CV_BGR2GRAY);
imshow("img", img);
waitKey(1);
// Set up Caffe
Caffe::set_mode(Caffe::GPU);
int device_id = 0;
Caffe::SetDevice(device_id);
LOG(INFO) << "Using GPU";
// Load net
Net<float> net(CAFFE_ROOT + "/examples/mnist/lenet_test-memory-1.prototxt");
string model_file = CAFFE_ROOT + "/examples/mnist/lenet_iter_10000.caffemodel";
net.CopyTrainedLayersFrom(model_file);
// set the patch for testing
vector<Mat> patches;
patches.push_back(img);
// push vector<Mat> to data layer
float loss = 0.0;
boost::shared_ptr<MemoryDataLayer<float> > memory_data_layer;
memory_data_layer = boost::static_pointer_cast<MemoryDataLayer<float>>(net.layer_by_name("data"));
vector<int> labels(patches.size());
memory_data_layer->AddMatVector(patches, labels);
// Net forward
//ERROR IN THE LINE BELOW
const vector<Blob<float>*> & results = net.ForwardPrefilled(&loss);// HERE THE ERROR
float *output = results[1]->mutable_cpu_data();
// Display the output
for (int i = 0; i < 10; i++) {
printf("Probability to be Number %d is %.3f\n", i, output[i]);
}
waitKey(0);
}
But I get an error when accessing the file: pooling_layer.cu in the function described below:
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = (*top)[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top->size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = (*top)[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> (
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (Caffe::phase() == Caffe::TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
And get the message "Unknown pooling method." as shown in the window below:
The normal execution of my project is described in the image below:
Could someone give me an idea about the possible solution?

The pooling layer which by default should be max pooling was translated into some other layers. You might add a breakpoint at pooling_layer.cu (line 163) or add cout << this->layer_param_.pooling_param().pool() << endl; before that line to see what pooling layer it was using. I guess it doesn't equal to PoolingParameter_PoolMethod_MAX here.
I'm not sure why it happened, maybe there some error in the prototxt file or the protobuf. A brutal trick would be overlapping line 206 with line 165-176 in order to force using max pooling.

Related

Try to build and get Open Image Denoise working with Bazel

The bounty expires in 7 days. Answers to this question are eligible for a +500 reputation bounty.
Vertexwahn wants to draw more attention to this question.
Currently, I try to get Open Image Denoise to work with Bazel.
Therefore, I implemented rules_oidn.
To try it out, you can do a
git clone https://github.com/Vertexwahn/rules_oidn.git
cd rules_oidn
cd tests
Run example with Ubuntu 22.04:
bazel run --config=gcc11 //:example
Run example with Visual Studio 2022:
bazel run --config=vs2022 //:example
The example takes a noisy image and denoises it.
...
int main() {
cout << "Simple denoising example" << endl;
Image3f color = load_image_openexr("data/cornel_box.naive_diffuse.box_filter.spp128.embree.exr");
//Image3f color = load_image_openexr("data/noisy_10spp.exr");
Image3f normal = load_image_openexr("data/normal_10spp.exr");
Image3f albedo = load_image_openexr("data/albedo_10spp.exr");
Image3f out{color.width(), color.height()};
// for debug reasons the color image can be initialized with a const color
if(false) {
for (int x = 0; x < color.width(); ++x) {
for (int y = 0; y < color.height(); ++y) {
color.set_pixel(x,y,.5f, .5f, .5f);
}
}
}
float* colorPtr = color.data();
float* albedoPtr = albedo.data();
float* normalPtr = normal.data();
float* outputPtr = out.data();
int width = out.width();
int height = out.height();
oidn::DeviceRef device = oidn::newDevice();
device.set("verbose", 1);
device.commit();
// Create a filter for denoising a beauty (color) image using optional auxiliary images too
oidn::FilterRef filter = device.newFilter("RT"); // generic ray tracing filter
filter.setImage("color", colorPtr, oidn::Format::Float3, width, height); // beauty
//filter.setImage("albedo", albedoPtr, oidn::Format::Float3, width, height); // auxiliary
//filter.setImage("normal", normalPtr, oidn::Format::Float3, width, height); // auxiliary
filter.setImage("output", outputPtr, oidn::Format::Float3, width, height); // denoised beauty
filter.set("hdr", true); // beauty image is HDR
filter.commit();
// Filter the image
filter.execute();
// Check for errors
const char* errorMessage;
if (device.getError(errorMessage) != oidn::Error::None) {
std::cout << "Error: " << errorMessage << std::endl;
}
store_open_exr("denoised.exr", out);
return 0;
}
Unfortunately, the denoised image contains black stripes:
I tested the same input with https://github.com/DeclanRussell/IntelOIDenoiser and got the expected result (without black stripes).
If I choose a constant color image, e.g.
// for debug reasons the color image can be initialized with a const color
if(true) {
for (int x = 0; x < color.width(); ++x) {
for (int y = 0; y < color.height(); ++y) {
color.set_pixel(x,y,.5f, .5f, .5f);
}
}
}
I also get black stripes.
Currently, I am missing a good strategy to find the issue.
Any hints or solutions to fix the issue are welcome.

Null pointer (create() called for the missing output array) in create, file /home/m/OpenCV/modules/core/src/matrix_wrap.cpp, line 1461

I am trying to run an example of Drawing labeled connected components in O'reilly's book, but I get followings error message at run-time(I built the source without problems):
#include <opencv2/opencv.hpp>
#include <algorithm>
#include <iostream>
using namespace std;
//using namespace cv;
int main(int argc, char* argv[])
{
cv::Mat img, img_edge, labels, img_color, stats;
// load image or show help if no image was provided
if( argc != 2|| (img = cv::imread( argv[1], 0)).empty())
{
cout << "\nExample 8_3 Drawing Connected componnents\n" << "Call is:\n" <<argv[0] <<" image\n\n";
return -1;
}
cv::threshold(img, img_edge, 128, 255, cv::THRESH_BINARY);
cv::imshow("Image after threshold", img_edge);
int i, nccomps = cv::connectedComponentsWithStats (
img_edge, labels,
stats, cv::noArray()
);
cout << "Total Connected Components Detected: " << nccomps << endl;
vector<cv::Vec3b> colors(nccomps+1);
colors[0] = cv::Vec3b(0,0,0); // background pixels remain black.
for( i = 1; i <= nccomps; i++ )
{
colors[i] = cv::Vec3b(rand()%256, rand()%256, rand()%256);
if( stats.at<int>(i-1, cv::CC_STAT_AREA) < 100 )
colors[i] = cv::Vec3b(0,0,0); // small regions are painted with black too.
}
img_color = cv::Mat::zeros(img.size(), CV_8UC3);
for( int y = 0; y < img_color.rows; y++ )
for( int x = 0; x < img_color.cols; x++ )
{
int label = labels.at<int>(y, x);
CV_Assert(0 <= label && label <= nccomps);
img_color.at<cv::Vec3b>(y, x) = colors[label];
}
cv::imshow("Labeled map", img_color);
cv::waitKey();
return 0;
}
OpenCV(3.4.1) Error: Null pointer (create() called for the missing
output array) in create, file
/home/m/OpenCV/modules/core/src/matrix_wrap.cpp, line 1461 terminate
called after throwing an instance of 'cv::Exception' what():
OpenCV(3.4.1) /home/m/OpenCV/modules/core/src/matrix_wrap.cpp:1461:
error: (-27) create() called for the missing output array in function
create
What is the problem and how can I fix it?
Using a debugger shows that this exception appears during the call to connectedComponentsWithStats(). The description of the error infers this as having something empty / null that shouldn't be that way.
OpenCV's current docs show that the fourth argument, centroids, should be a
CV_64F matrix with 2 columns and "centroids" number of rows. This matrix is probably initialized by the function itself, as long as it exists.
Thus, all you need is to create this matrix and provide it as expected by the function:
cv::Mat centroids;
int i, nccomps = cv::connectedComponentsWithStats (img_edge, labels, stats, centroids);
Your problem lies in using OpenCV 3.4.1 (latest) which has changed in syntax compared to the book you're using; this is a common problem to any open source library. I suggest you keep the current OpenCV docs at hand to make sure the code from the book still works, or downgrade to an older version.

Understanding the HOG feature layout

I am doing a project which involve multi-class object detection. My aim is to detect the following objects.
1. Truck
2. Car
3. Person
As I have three different objects, it means I will have three different window sizes. But HOG features for blocks will remain the same. I hacked into OpenCV hog.cpp and made two new functions to calculate the HOG descriptors for blocks only. Here is my code.
void cv::gpu::HOGDescriptor::getDescriptorsBlock(const GpuMat& img, Size win_stride, GpuMat& descriptors, FileStorage fs3, string fileName, double scale, int width, int height, size_t lev)
{
CV_Assert(win_stride.width % block_stride.width == 0 && win_stride.height % block_stride.height == 0);
size_t block_hist_size = getBlockHistogramSize();
computeBlockHistograms(img);
Size blocks_per_img = numPartsWithin(img.size(), block_size, block_stride);
// Size blocks_per_win = numPartsWithin(win_size, block_size, block_stride);
// Size wins_per_img = numPartsWithin(img.size(), win_size, win_stride);
// copy block_hists from GPU to CPU/
float dest_ptr[block_hist_size * blocks_per_img.area()];
cudaMemcpy( &dest_ptr[0], block_hists.ptr<float>(), block_hist_size *blocks_per_img.area()*sizeof(CV_32F), cudaMemcpyDeviceToHost);
std::cout<<"( "<<width<< " ," << height<< ")"<< std::endl;
std::cout <<lev<< std::endl;
// write to yml file
int level = lev;
fs3<<"Scale"<<scale;
fs3 <<"Level"<<level;
fs3<<"Width"<<width<<"Height"<<height;
fs3 << "features" << "[";
for (unsigned int i = 0; i < (block_hist_size * blocks_per_img.area()) ; i++ )
{
fs3 << dest_ptr[i];
}
fs3 << "]";
}
Similarly to get block descriptors for multi-scale
void cv::gpu::HOGDescriptor::getDescriptorsMultiScale(const GpuMat& img,
Size win_stride, double scale0, unsigned int count)
{
CV_Assert(img.type() == CV_8UC1 || img.type() == CV_8UC4);
vector<double> level_scale;
double scale = 1.;
int levels = 0;
for (levels = 0; levels < nlevels; levels++)
{
level_scale.push_back(scale);
if (cvRound(img.cols/scale) < win_size.width ||
cvRound(img.rows/scale) < win_size.height || scale0 <= 1)
break;
scale *= scale0;
}
levels = std::max(levels, 1);
level_scale.resize(levels);
image_scales.resize(levels);
// open yml file with image ID
FileStorage fs3;
char fileName[20];
GpuMat descriptors;
sprintf (fileName, "%04d", count);
fs3.open(fileName, FileStorage::WRITE);
for (size_t i = 0; i < level_scale.size(); i++)
{
scale = level_scale[i];
Size sz(cvRound(img.cols / scale), cvRound(img.rows / scale));
GpuMat smaller_img;
if (sz == img.size())
smaller_img = img;
else
{
image_scales[i].create(sz, img.type());
switch (img.type())
{
case CV_8UC1: hog::resize_8UC1(img, image_scales[i]); break;
case CV_8UC4: hog::resize_8UC4(img, image_scales[i]); break;
}
smaller_img = image_scales[i];
}
std::cout<<"scale "<<level_scale[i]<<std::endl;
// calculate descriptors for blocks
getDescriptorsBlock( smaller_img, win_stride, descriptors, fs3, fileName, scale, smaller_img.cols, smaller_img.rows, i);
// detect(smaller_img, locations, hit_threshold, win_stride, padding);
}
// close yml file
fs3.release();
}
My question would be to understand the layout structure of HOG descriptors for block only. Can some one share his thoughts
Generally, using an image pyramid is often applied to become scale invariant. If you want to get more sophisticated, have a look at this paper "Object Detection with Discriminatively Trained
Part Based Models" [1]. They were really successful in using HoG on different scales. Of course the original HoG paper might be of use to understand the structure of the feature itself [2], if that is more what you are after.
[1] http://vision.ics.uci.edu/papers/FelzenszwalbGMR_PAMI_2009/FelzenszwalbGMR_PAMI_2009.pdf
[2] http://lear.inrialpes.fr/people/triggs/pubs/Dalal-cvpr05.pdf

Error in executing object detection main program

I am using a program to train svm classifier for object detection using hog descriptor. And i am not able to get the executable file of main program. My code is as follows:
#include <stdio.h>
#include <dirent.h>
#include <ios>
#include <fstream>
#include <stdexcept>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>
#define SVMLIGHT 1
#define LIBSVM 2
#define TRAINHOG_USEDSVM SVMLIGHT
#if TRAINHOG_USEDSVM == SVMLIGHT
#include "svm_light/svmlight.h"
#define TRAINHOG_SVM_TO_TRAIN SVMlight
#elif TRAINHOG_USEDSVM == LIBSVM
#include "libsvm/libsvm.h"
#define TRAINHOG_SVM_TO_TRAIN libSVM
#endif
using namespace std;
using namespace cv;
static string posSamplesDir = "pos/";
static string negSamplesDir = "neg/";
static string featuresFile = "genfiles/features.dat";
static string svmModelFile = "genfiles/svmlightmodel.dat";
static string descriptorVectorFile = "genfiles/descriptorvector.dat";
static const Size trainingPadding = Size(0,0);
static const Size winStride = Size(8,8);
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Helper functions">
/* Helper functions */
static string toLowerCase(const string& in) {
string t;
for (string::const_iterator i = in.begin(); i != in.end(); ++i) {
t += tolower(*i);
}
return t;
}
static void storeCursor(void) {
printf("\033[s");
}
static void resetCursor(void) {
printf("\033[u");
}
/**
* Saves the given descriptor vector to a file
* #param descriptorVector the descriptor vector to save
* #param _vectorIndices contains indices for the corresponding vector values (e.g. descriptorVector(0)=3.5f may have index 1)
* #param fileName
* #TODO Use _vectorIndices to write correct indices
*/
static void saveDescriptorVectorToFile(vector<float>& descriptorVector, vector<unsigned int>& _vectorIndices, string fileName) {
printf("Saving descriptor vector to file '%s'\n", fileName.c_str());
string separator = " "; // Use blank as default separator between single features
fstream File;
float percent;
File.open(fileName.c_str(), ios::out);
if (File.good() && File.is_open()) {
printf("Saving %lu descriptor vector features:\t", descriptorVector.size());
storeCursor();
for (int feature = 0; feature < descriptorVector.size(); ++feature) {
if ((feature % 10 == 0) || (feature == (descriptorVector.size()-1)) ) {
percent = ((1 + feature) * 100 / descriptorVector.size());
printf("%4u (%3.0f%%)", feature, percent);
fflush(stdout);
resetCursor();
}
File << descriptorVector.at(feature) << separator;
}
printf("\n");
File << endl;
File.flush();
File.close();
}
}
/**
* For unixoid systems only: Lists all files in a given directory and returns a vector of path+name in string format
* #param dirName
* #param fileNames found file names in specified directory
* #param validExtensions containing the valid file extensions for collection in lower case
*/
static void getFilesInDirectory(const string& dirName, vector<string>& fileNames, const vector<string>& validExtensions) {
printf("Opening directory %s\n", dirName.c_str());
struct dirent* ep;
size_t extensionLocation;
DIR* dp = opendir(dirName.c_str());
if (dp != NULL) {
while ((ep = readdir(dp))) {
// Ignore (sub-)directories like . , .. , .svn, etc.
if (ep->d_type & DT_DIR) {
continue;
}
extensionLocation = string(ep->d_name).find_last_of("."); // Assume the last point marks beginning of extension like file.ext
// Check if extension is matching the wanted ones
string tempExt = toLowerCase(string(ep->d_name).substr(extensionLocation + 1));
if (find(validExtensions.begin(), validExtensions.end(), tempExt) != validExtensions.end()) {
printf("Found matching data file '%s'\n", ep->d_name);
fileNames.push_back((string) dirName + ep->d_name);
} else {
printf("Found file does not match required file type, skipping: '%s'\n", ep->d_name);
}
}
(void) closedir(dp);
} else {
printf("Error opening directory '%s'!\n", dirName.c_str());
}
return;
}
/**
* This is the actual calculation from the (input) image data to the HOG descriptor/feature vector using the hog.compute() function
* #param imageFilename file path of the image file to read and calculate feature vector from
* #param descriptorVector the returned calculated feature vector<float> ,
* I can't comprehend why openCV implementation returns std::vector<float> instead of cv::MatExpr_<float> (e.g. Mat<float>)
* #param hog HOGDescriptor containin HOG settings
*/
static void calculateFeaturesFromInput(const string& imageFilename, vector<float>& featureVector, HOGDescriptor& hog) {
/** for imread flags from openCV documentation,
* #see http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imread#Mat imread(const string& filename, int flags)
* #note If you get a compile-time error complaining about following line (esp. imread),
* you either do not have a current openCV version (>2.0)
* or the linking order is incorrect, try g++ -o openCVHogTrainer main.cpp `pkg-config --cflags --libs opencv`
*/
Mat imageData = imread(imageFilename, 0);
if (imageData.empty()) {
featureVector.clear();
printf("Error: HOG image '%s' is empty, features calculation skipped!\n", imageFilename.c_str());
return;
}
// Check for mismatching dimensions
if (imageData.cols != hog.winSize.width || imageData.rows != hog.winSize.height) {
featureVector.clear();
printf("Error: Image '%s' dimensions (%u x %u) do not match HOG window size (%u x %u)!\n", imageFilename.c_str(), imageData.cols, imageData.rows, hog.winSize.width, hog.winSize.height);
return;
}
vector<Point> locations;
hog.compute(imageData, featureVector, winStride, trainingPadding, locations);
imageData.release(); // Release the image again after features are extracted
}
/**
* Shows the detections in the image
* #param found vector containing valid detection rectangles
* #param imageData the image in which the detections are drawn
*/
static void showDetections(const vector<Point>& found, Mat& imageData) {
size_t i, j;
for (i = 0; i < found.size(); ++i) {
Point r = found[i];
// Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height);
rectangle(imageData, Rect(r.x-16, r.y-32, 32, 64), Scalar(64, 255, 64), 3);
}
}
/**
* Shows the detections in the image
* #param found vector containing valid detection rectangles
* #param imageData the image in which the detections are drawn
*/
static void showDetections(const vector<Rect>& found, Mat& imageData) {
vector<Rect> found_filtered;
size_t i, j;
for (i = 0; i < found.size(); ++i) {
Rect r = found[i];
for (j = 0; j < found.size(); ++j)
if (j != i && (r & found[j]) == r)
break;
if (j == found.size())
found_filtered.push_back(r);
}
for (i = 0; i < found_filtered.size(); i++) {
Rect r = found_filtered[i];
rectangle(imageData, r.tl(), r.br(), Scalar(64, 255, 64), 3);
}
}
/**
* Test the trained detector against the same training set to get an approximate idea of the detector.
* Warning: This does not allow any statement about detection quality, as the detector might be overfitting.
* Detector quality must be determined using an independent test set.
* #param hog
*/
static void detectTrainingSetTest(const HOGDescriptor& hog, const double hitThreshold, const vector<string>& posFileNames, const vector<string>& negFileNames) {
unsigned int truePositives = 0;
unsigned int trueNegatives = 0;
unsigned int falsePositives = 0;
unsigned int falseNegatives = 0;
vector<Point> foundDetection;
// Walk over positive training samples, generate images and detect
for (vector<string>::const_iterator posTrainingIterator = posFileNames.begin(); posTrainingIterator != posFileNames.end(); ++posTrainingIterator) {
const Mat imageData = imread(*posTrainingIterator, 0);
hog.detect(imageData, foundDetection, hitThreshold, winStride, trainingPadding);
if (foundDetection.size() > 0) {
++truePositives;
falseNegatives += foundDetection.size() - 1;
} else {
++falseNegatives;
}
}
// Walk over negative training samples, generate images and detect
for (vector<string>::const_iterator negTrainingIterator = negFileNames.begin(); negTrainingIterator != negFileNames.end(); ++negTrainingIterator) {
const Mat imageData = imread(*negTrainingIterator, 0);
hog.detect(imageData, foundDetection, hitThreshold, winStride, trainingPadding);
if (foundDetection.size() > 0) {
falsePositives += foundDetection.size();
} else {
++trueNegatives;
}
}
printf("Results:\n\tTrue Positives: %u\n\tTrue Negatives: %u\n\tFalse Positives: %u\n\tFalse Negatives: %u\n", truePositives, trueNegatives, falsePositives, falseNegatives);
}
/**
* Test detection with custom HOG description vector
* #param hog
* #param hitThreshold threshold value for detection
* #param imageData
*/
static void detectTest(const HOGDescriptor& hog, const double hitThreshold, Mat& imageData) {
vector<Rect> found;
Size padding(Size(32, 32));
Size winStride(Size(8, 8));
hog.detectMultiScale(imageData, found, hitThreshold, winStride, padding);
showDetections(found, imageData);
}
// </editor-fold>
/**
* Main program entry point
* #param argc unused
* #param argv unused
* #return EXIT_SUCCESS (0) or EXIT_FAILURE (1)
*/
int main(int argc, char** argv) {
// <editor-fold defaultstate="collapsed" desc="Init">
HOGDescriptor hog; // Use standard parameters here
hog.winSize = Size(64, 128); // Default training images size as used in paper
// Get the files to train from somewhere
static vector<string> positiveTrainingImages;
static vector<string> negativeTrainingImages;
static vector<string> validExtensions;
validExtensions.push_back("jpg");
validExtensions.push_back("png");
validExtensions.push_back("ppm");
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Read image files">
getFilesInDirectory(posSamplesDir, positiveTrainingImages, validExtensions);
getFilesInDirectory(negSamplesDir, negativeTrainingImages, validExtensions);
/// Retrieve the descriptor vectors from the samples
unsigned long overallSamples = positiveTrainingImages.size() + negativeTrainingImages.size();
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Calculate HOG features and save to file">
// Make sure there are actually samples to train
if (overallSamples == 0) {
printf("No training sample files found, nothing to do!\n");
return EXIT_SUCCESS;
}
/// #WARNING: This is really important, some libraries (e.g. ROS) seems to set the system locale which takes decimal commata instead of points which causes the file input parsing to fail
setlocale(LC_ALL, "C"); // Do not use the system locale
setlocale(LC_NUMERIC,"C");
setlocale(LC_ALL, "POSIX");
printf("Reading files, generating HOG features and save them to file '%s':\n", featuresFile.c_str());
float percent;
/**
* Save the calculated descriptor vectors to a file in a format that can be used by SVMlight for training
* #NOTE: If you split these steps into separate steps:
* 1. calculating features into memory (e.g. into a cv::Mat or vector< vector<float> >),
* 2. saving features to file / directly inject from memory to machine learning algorithm,
* the program may consume a considerable amount of main memory
*/
fstream File;
File.open(featuresFile.c_str(), ios::out);
if (File.good() && File.is_open()) {
// Remove following line for libsvm which does not support comments
// File << "# Use this file to train, e.g. SVMlight by issuing $ svm_learn -i 1 -a weights.txt " << featuresFile.c_str() << endl;
// Iterate over sample images
for (unsigned long currentFile = 0; currentFile < overallSamples; ++currentFile) {
storeCursor();
vector<float> featureVector;
// Get positive or negative sample image file path
const string currentImageFile = (currentFile < positiveTrainingImages.size() ? positiveTrainingImages.at(currentFile) : negativeTrainingImages.at(currentFile - positiveTrainingImages.size()));
// Output progress
if ( (currentFile+1) % 10 == 0 || (currentFile+1) == overallSamples ) {
percent = ((currentFile+1) * 100 / overallSamples);
printf("%5lu (%3.0f%%):\tFile '%s'", (currentFile+1), percent, currentImageFile.c_str());
fflush(stdout);
resetCursor();
}
// Calculate feature vector from current image file
calculateFeaturesFromInput(currentImageFile, featureVector, hog);
if (!featureVector.empty()) {
/* Put positive or negative sample class to file,
* true=positive, false=negative,
* and convert positive class to +1 and negative class to -1 for SVMlight
*/
File << ((currentFile < positiveTrainingImages.size()) ? "+1" : "-1");
// Save feature vector components
for (unsigned int feature = 0; feature < featureVector.size(); ++feature) {
File << " " << (feature + 1) << ":" << featureVector.at(feature);
}
File << endl;
}
}
printf("\n");
File.flush();
File.close();
} else {
printf("Error opening file '%s'!\n", featuresFile.c_str());
return EXIT_FAILURE;
}
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Pass features to machine learning algorithm">
/// Read in and train the calculated feature vectors
printf("Calling %s\n", TRAINHOG_SVM_TO_TRAIN::getInstance()->getSVMName());
TRAINHOG_SVM_TO_TRAIN::getInstance()->read_problem(const_cast<char*> (featuresFile.c_str()));
TRAINHOG_SVM_TO_TRAIN::getInstance()->train(); // Call the core libsvm training procedure
printf("Training done, saving model file!\n");
TRAINHOG_SVM_TO_TRAIN::getInstance()->saveModelToFile(svmModelFile);
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Generate single detecting feature vector from calculated SVM support vectors and SVM model">
printf("Generating representative single HOG feature vector using svmlight!\n");
vector<float> descriptorVector;
vector<unsigned int> descriptorVectorIndices;
// Generate a single detecting feature vector (v1 | b) from the trained support vectors, for use e.g. with the HOG algorithm
TRAINHOG_SVM_TO_TRAIN::getInstance()->getSingleDetectingVector(descriptorVector, descriptorVectorIndices);
// And save the precious to file system
saveDescriptorVectorToFile(descriptorVector, descriptorVectorIndices, descriptorVectorFile);
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Test detecting vector">
// Detector detection tolerance threshold
const double hitThreshold = TRAINHOG_SVM_TO_TRAIN::getInstance()->getThreshold();
// Set our custom detecting vector
hog.setSVMDetector(descriptorVector);
printf("Testing training phase using training set as test set (just to check if training is ok - no detection quality conclusion with this!)\n");
detectTrainingSetTest(hog, hitThreshold, positiveTrainingImages, negativeTrainingImages);
printf("Testing custom detection using camera\n");
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) { // check if we succeeded
printf("Error opening camera!\n");
return EXIT_FAILURE;
}
Mat testImage;
while ((cvWaitKey(10) & 255) != 27) {
cap >> testImage; // get a new frame from camera
// cvtColor(testImage, testImage, CV_BGR2GRAY); // If you want to work on grayscale images
detectTest(hog, hitThreshold, testImage);
imshow("HOG custom detection", testImage);
}
// </editor-fold>
return EXIT_SUCCESS;
}
If i execute it as "./objectdetectmain.cpp", it gives errors as follows:
./objectdetectmain.cpp: line 24: using: command not found
./objectdetectmain.cpp: line 25: using: command not found
./objectdetectmain.cpp: line 29: static: command not found
./objectdetectmain.cpp: line 31: static: command not found
./objectdetectmain.cpp: line 33: static: command not found
./objectdetectmain.cpp: line 35: static: command not found
./objectdetectmain.cpp: line 37: static: command not found
./objectdetectmain.cpp: line 40: syntax error near unexpected token `('
./objectdetectmain.cpp: line 40: `static const Size trainingPadding = Size(0,0);
It seems that you are trying to run the source as a program. You need to compile it into an executable first. E.g.
g++ -Wall -g objectdetectmain.cpp -o objectdetectmain
which will give you an executable file named objectdetectmain (note lack of .cpp extension) which you can run:
./objectdetectmain

running opencv code with matlab via mex fails while on VisualStudio it works

I want to extract some harriscorners from an image and get FREAK descriptors. Here is how I try to do it:
(The passed variables are globally defined.)
void computeFeatures(cv::Mat &src, std::vector<cv::KeyPoint> &keypoints, cv::Mat &desc ) {
cv::Mat featureSpace;
featureSpace = cv::Mat::zeros( src.size(), CV_32FC1 );
//- Detector parameters
int blockSize = 3;
int apertureSize = 3;
double k = 0.04;
//- Detecting corners
cornerHarris( src, featureSpace, blockSize, apertureSize, k, cv::BORDER_DEFAULT );
//- Thresholding featureSpace
keypoints.clear();
nonMaximumSuppression(featureSpace, keypoints, param.nms_n);
//- compute FREAK-descriptor
cv::FREAK freak(false, false, 22.0f, 4);
freak.compute(src, keypoints, desc);
}
I can compile it with Visual Studio 12 as well as Matlab R2013b via mex. When I run it as "stand alone" (.exe) it works just fine. When I try to execute it via Matlab it fails with this message:
A buffer overrun has occurred in MATLAB.exe which has corrupted the
program's internal state. Press Break to debug the program or Continue
to terminate the program.
I mexed with the debug option '-g' and attached VisualStudio to Matlab to be able to get closer to the error:
After nonMaximumSuppression() the size of keypoints is 233 when I jump into freak.compute() the size is suddenly 83 with "random" values stored.
The actual error is then in KeyPointsFilter::runByKeypointSize when keypoints should be erased.
in keypoint.cpp line 256:
void KeyPointsFilter::runByKeypointSize( vector<KeyPoint>& keypoints, float minSize, float maxSize )
{
CV_Assert( minSize >= 0 );
CV_Assert( maxSize >= 0);
CV_Assert( minSize <= maxSize );
keypoints.erase( std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)),
keypoints.end() );
}
Is there some error I'm making with passing the keyPoint-vector? Has anybody run into a similar problem?
EDIT:
Here is the mex-file with the additional library "opencv_matlab.hpp" taken from MatlabCentral
#include "opencv_matlab.hpp"
void mexFunction (int nlhs,mxArray *plhs[],int nrhs,const mxArray *prhs[]) {
// read command
char command[128];
mxGetString(prhs[0],command,128);
if (!strcmp(command,"push") || !strcmp(command,"replace")) {
// check arguments
if (nrhs!=1+1 && nrhs!=1+2)
mexErrMsgTxt("1 or 2 inputs required (I1=left image,I2=right image).");
if (!mxIsUint8(prhs[1]) || mxGetNumberOfDimensions(prhs[1])!=2)
mexErrMsgTxt("Input I1 (left image) must be a uint8_t matrix.");
// determine input/output image properties
const int *dims1 = mxGetDimensions(prhs[1]);
const int nDims1 = mxGetNumberOfDimensions(prhs[1]);
const int rows1 = dims1[0];
const int cols1 = dims1[1];
const int channels1 = (nDims1 == 3 ? dims1[2] : 1);
// Allocate, copy, and convert the input image
// #note: input is double
cv::Mat I1_ = cv::Mat::zeros(cv::Size(cols1, rows1), CV_8UC(channels1));
om::copyMatrixToOpencv<uchar>((unsigned char*)mxGetPr(prhs[1]), I1_);
// push back single image
if (nrhs==1+1) {
// compute features and put them to ring buffer
pushBack(I1_,!strcmp(command,"replace"));
// push back stereo image pair
} else {
if (!mxIsUint8(prhs[2]) || mxGetNumberOfDimensions(prhs[2])!=2)
mexErrMsgTxt("Input I2 (right image) must be a uint8_t matrix.");
// determine input/output image properties
const int *dims2 = mxGetDimensions(prhs[2]);
const int nDims2 = mxGetNumberOfDimensions(prhs[2]);
const int rows2 = dims2[0];
const int cols2 = dims2[1];
const int channels2 = (nDims2 == 3 ? dims2[2] : 1);
// Allocate, copy, and convert the input image
// #note: input is double
cv::Mat I2_ = cv::Mat::zeros(cv::Size(cols2, rows2), CV_8UC(channels2));
om::copyMatrixToOpencv<uchar>((unsigned char*)mxGetPr(prhs[2]), I2_);
// check image size
if (dims1_[0]!=dims2_[0] || dims1_[1]!=dims2_[1])
mexErrMsgTxt("Input I1 and I2 must be images of same size.");
// compute features and put them to ring buffer
pushBack(I1_,I2_,!strcmp(command,"replace"));
}
}else {
mexPrintf("Unknown command: %s\n",command);
}
}
And here is an additional part of the main cpp project.
std::vector<cv::KeyPoint> k1c1, k2c1, k1p1, k2p1; //KeyPoints
cv::Mat d1c1, d2c1, d1p1, d2p1; //descriptors
void pushBack (cv::Mat &I1,cv::Mat &I2,const bool replace) {
// sanity check
if (I1.empty()) {
cerr << "ERROR: Image empty!" << endl;
return;
}
if (replace) {
//if (!k1c1.empty())
k1c1.clear(); k2c1.clear();
d1c1.release(); d2c1.release();
} else {
k1p1.clear(); k2p1.clear();
d1p1.release(); d2p1.release();
k1p1 = k1c1; k2p1 = k2c1;
d1c1.copyTo(d1p1); d2c1.copyTo(d2p1);
k1c1.clear(); k2c1.clear();
d1c1.release(); d2c1.release();
}
// compute new features for current frame
computeFeatures(I1,k1c1,d1c1);
if (!I2.empty())
computeFeatures(I2,k2c1,d2c1);
}
And here is how I call the mex-file from Matlab
I1p = imread('\I1.bmp');
I2p = imread('\I2.bmp');
harris_freak('push',I1p,I2p);
Hope this helps...
I hope this is the correct way to give an answer to my own question.
After a couple of days I found kind of a work around. Instead of building the mex file in Matlab, which gives the above mentioned error, I built it in Visual Studio with instructions taken from here.
Now everything works just fine.
It kind of bothers me to not know how to do it with matlab, but hey, maybe someone still has an idea.
Thanks to the commenters for taking the time to look through my question!
If you have the Computer Vision System Toolbox then you do not need mex. It includes the detectHarrisFeatures function for detecting Harris corners, and the extractFeatures function, which can compute FREAK descriptors.