OpenCV image recognition - setting up ANN MLP - c++

I am new in OpenCV world and neural networks but I have some coding experience in C++/Java.
I created my first ANN MLP and learned it the XOR:
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/ml/ml.hpp>
#include <iostream>
#include <iomanip>
using namespace cv;
using namespace ml;
using namespace std;
void print(Mat& mat, int prec)
{
for (int i = 0; i<mat.size().height; i++)
{
cout << "[";
for (int j = 0; j<mat.size().width; j++)
{
cout << fixed << setw(2) << setprecision(prec) << mat.at<float>(i, j);
if (j != mat.size().width - 1)
cout << ", ";
else
cout << "]" << endl;
}
}
}
int main()
{
const int hiddenLayerSize = 4;
float inputTrainingDataArray[4][2] = {
{ 0.0, 0.0 },
{ 0.0, 1.0 },
{ 1.0, 0.0 },
{ 1.0, 1.0 }
};
Mat inputTrainingData = Mat(4, 2, CV_32F, inputTrainingDataArray);
float outputTrainingDataArray[4][1] = {
{ 0.0 },
{ 1.0 },
{ 1.0 },
{ 0.0 }
};
Mat outputTrainingData = Mat(4, 1, CV_32F, outputTrainingDataArray);
Ptr<ANN_MLP> mlp = ANN_MLP::create();
Mat layersSize = Mat(3, 1, CV_16U);
layersSize.row(0) = Scalar(inputTrainingData.cols);
layersSize.row(1) = Scalar(hiddenLayerSize);
layersSize.row(2) = Scalar(outputTrainingData.cols);
mlp->setLayerSizes(layersSize);
mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
TermCriteria termCrit = TermCriteria(
TermCriteria::Type::COUNT + TermCriteria::Type::EPS,
100000000,
0.000000000000000001
);
mlp->setTermCriteria(termCrit);
mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP);
Ptr<TrainData> trainingData = TrainData::create(
inputTrainingData,
SampleTypes::ROW_SAMPLE,
outputTrainingData
);
mlp->train(trainingData
/*, ANN_MLP::TrainFlags::UPDATE_WEIGHTS
+ ANN_MLP::TrainFlags::NO_INPUT_SCALE
+ ANN_MLP::TrainFlags::NO_OUTPUT_SCALE*/
);
for (int i = 0; i < inputTrainingData.rows; i++) {
Mat sample = Mat(1, inputTrainingData.cols, CV_32F, inputTrainingDataArray[i]);
Mat result;
mlp->predict(sample, result);
cout << sample << " -> ";// << result << endl;
print(result, 0);
cout << endl;
}
return 0;
}
It works very well for this simple problem, I also learn this network the 1-10 to binary conversion.
But i need to use MLP for simple image classification - road signs. I write the code for loading training images and preparing matrix for learning but I'm not able to train the network - it "learn" in one second even with 1 000 000 iterations! And it produce garbage results, the same for all inputs!
Here are my test images and the source code:
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/ml/ml.hpp>
#include <iostream>
#include <chrono>
#include <memory>
#include <iomanip>
#include <climits>
#include <Windows.h>
using namespace cv;
using namespace ml;
using namespace std;
using namespace chrono;
const int WIDTH_SIZE = 50;
const int HEIGHT_SIZE = (int)(WIDTH_SIZE * sqrt(3)) / 2;
const int IMAGE_DATA_SIZE = WIDTH_SIZE * HEIGHT_SIZE;
void print(Mat& mat, int prec)
{
for (int i = 0; i<mat.size().height; i++)
{
cout << "[ ";
for (int j = 0; j<mat.size().width; j++)
{
cout << fixed << setw(2) << setprecision(prec) << mat.at<float>(i, j);
if (j != mat.size().width - 1)
cout << ", ";
else
cout << " ]" << endl;
}
}
}
bool loadImage(string imagePath, Mat& outputImage)
{
// load image in grayscale
Mat image = imread(imagePath, IMREAD_GRAYSCALE);
Mat temp;
// check for invalid input
if (image.empty()) {
cout << "Could not open or find the image" << std::endl;
return false;
}
// resize the image
Size size(WIDTH_SIZE, HEIGHT_SIZE);
resize(image, temp, size, 0, 0, CV_INTER_AREA);
// convert to float 1-channel
temp.convertTo(outputImage, CV_32FC1, 1.0/255.0);
return true;
}
vector<string> getFilesNamesInFolder(string folder)
{
vector<string> names;
char search_path[200];
sprintf(search_path, "%s/*.*", folder.c_str());
WIN32_FIND_DATA fd;
HANDLE hFind = ::FindFirstFile(search_path, &fd);
if (hFind != INVALID_HANDLE_VALUE) {
do {
// read all (real) files in current folder
// , delete '!' read other 2 default folder . and ..
if (!(fd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
names.push_back(fd.cFileName);
}
} while (::FindNextFile(hFind, &fd));
::FindClose(hFind);
}
return names;
}
class Sign {
public:
enum class Category { A = 'A', B = 'B', C = 'C', D = 'D' };
Mat image;
Category category;
int number;
Sign(Mat& image, string name) :image(image) {
category = static_cast<Category>(name.at(0));
number = stoi(name.substr(2, name.length()));
};
};
vector<Sign> loadSignsFromFolder(String folderName) {
vector<Sign> roadSigns;
for (string fileName : getFilesNamesInFolder(folderName)) {
Mat image;
loadImage(folderName + fileName, image);
roadSigns.emplace_back(image, fileName.substr(0, (fileName.length() - 4))); //cut .png
}
return roadSigns;
}
void showSignsInWindows(vector<Sign> roadSigns) {
for (Sign sign : roadSigns) {
String windowName = "Sign " + to_string(sign.number);
namedWindow(windowName, WINDOW_AUTOSIZE);
imshow(windowName, sign.image);
}
waitKey(0);
}
Mat getInputDataFromSignsVector(vector<Sign> roadSigns) {
Mat roadSignsImageData;
for (Sign sign : roadSigns) {
Mat signImageDataInOneRow = sign.image.reshape(0, 1);
roadSignsImageData.push_back(signImageDataInOneRow);
}
return roadSignsImageData;
}
Mat getOutputDataFromSignsVector(vector<Sign> roadSigns) {
int signsCount = (int) roadSigns.size();
int signsVectorSize = signsCount + 1;
Mat roadSignsData(0, signsVectorSize, CV_32FC1);
int i = 1;
for (Sign sign : roadSigns) {
vector<float> outputTraningVector(signsVectorSize);
fill(outputTraningVector.begin(), outputTraningVector.end(), -1.0);
outputTraningVector[i++] = 1.0;
Mat tempMatrix(outputTraningVector, false);
roadSignsData.push_back(tempMatrix.reshape(0, 1));
}
return roadSignsData;
}
int main(int argc, char* argv[])
{
if (argc != 2) {
cout << " Usage: display_image ImageToLoadAndDisplay" << endl;
return -1;
}
const int hiddenLayerSize = 500;
vector<Sign> roadSigns = loadSignsFromFolder("../../../Znaki/A/");
Mat inputTrainingData = getInputDataFromSignsVector(roadSigns);
Mat outputTrainingData = getOutputDataFromSignsVector(roadSigns);
Ptr<ANN_MLP> mlp = ANN_MLP::create();
Mat layersSize = Mat(3, 1, CV_16U);
layersSize.row(0) = Scalar(inputTrainingData.cols);
layersSize.row(1) = Scalar(hiddenLayerSize);
layersSize.row(2) = Scalar(outputTrainingData.cols);
mlp->setLayerSizes(layersSize);
mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM, 1.0, 1.0);
mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.05, 0.05);
//mlp->setTrainMethod(ANN_MLP::TrainingMethods::RPROP);
TermCriteria termCrit = TermCriteria(
TermCriteria::Type::MAX_ITER //| TermCriteria::Type::EPS,
,100 //(int) INT_MAX
,0.000001
);
mlp->setTermCriteria(termCrit);
Ptr<TrainData> trainingData = TrainData::create(
inputTrainingData,
SampleTypes::ROW_SAMPLE,
outputTrainingData
);
auto start = system_clock::now();
mlp->train(trainingData
//, //ANN_MLP::TrainFlags::UPDATE_WEIGHTS
, ANN_MLP::TrainFlags::NO_INPUT_SCALE
+ ANN_MLP::TrainFlags::NO_OUTPUT_SCALE
);
auto duration = duration_cast<milliseconds> (system_clock::now() - start);
cout << "Training time: " << duration.count() << "ms" << endl;
for (int i = 0; i < inputTrainingData.rows; i++) {
Mat result;
//mlp->predict(inputTrainingData.row(i), result);
mlp->predict(roadSigns[i].image.reshape(0, 1), result);
//cout << result << endl;
print(result, 2);
}
//showSignsInWindows(roadSigns);
return 0;
}
What is wrong in this code, that XOR works but images not? I cheked the input and output matrix and they're correct... could somebody also explain me when to/shoud I use the ANN_MLP::TrainFlags::NO_INPUT_SCALE and ANN_MLP::TrainFlags::NO_OUTPUT_SCALE or what values of setActivationFunction and setTrainMethod parameters should I use?
Thanks!

There was a problem in backprop weight scale parameter - it was too big and the ANN couldn't learn more difficult things.
I changed the line to mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001); and the hidden layer size to 100 (to speed up the learning) - now it's working!

Related

Is opencv interrupting serial communication with arduino?

I am trying to make a program that uses servos connected to an arduino that follows your face. I'm using visual c++ in visual studio 2017 with opencv 4.1 to do the facial recognition, then sending the location of the identified face to an arduino via serial connection.
I'm new to opencv so I've been going through many tutorials to try to make a code that works.
The plan is to do the facial recognition then calculate the location of the face in visual studio. Then combine the x and y locations to a single string and send it to the arduino with serial. The arduino then splits the coordinates, separated by a colon, using strtok(). It then will move servos accordingly to keep the tracked face in center screen.
I have tested the serial comm in a separate c++ project and it seems to work fine with the current arduino code (not anywhere near finished because I ran into a hiccup wiyh serial comm) except when I send the location string, the servo moves to the desired spot then returns to its starting location.
When I try to implement the serial communication in the project with opencv, it sends the location once, then appears to stop sending serial commands. I've tried debugging by manually, by calling the sendSerial function in other locations, to see if I can get it to send. I've tried looking around for solutions but haven't found any definite solutions other than it may be the waitKey(10) function. If this is so, is there a way around this?
Thanks.
###############SerialPort.h##############
#ifndef SERIALPORT_H
#define SERIALPORT_H
#define ARDUINO_WAIT_TIME 2000
#define MAX_DATA_LENGTH 255
#include <windows.h>
#include <stdio.h>
#include <stdlib.h>
class SerialPort
{
private:
HANDLE handler;
bool connected;
COMSTAT status;
DWORD errors;
public:
SerialPort(char *portName);
~SerialPort();
int readSerialPort(char *buffer, unsigned int buf_size);
bool writeSerialPort(char *buffer, unsigned int buf_size);
bool isConnected();
};
#endif // SERIALPORT_H
#################SerialSource.cpp##################
#include "SerialPort.h"
SerialPort::SerialPort(char *portName)
{
this->connected = false;
this->handler = CreateFileA(static_cast<LPCSTR>(portName),
GENERIC_READ | GENERIC_WRITE,
0,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (this->handler == INVALID_HANDLE_VALUE) {
if (GetLastError() == ERROR_FILE_NOT_FOUND) {
printf("ERROR: Handle was not attached. Reason: %s not available\n", portName);
}
else
{
printf("ERROR!!!");
}
}
else {
DCB dcbSerialParameters = { 0 };
if (!GetCommState(this->handler, &dcbSerialParameters)) {
printf("failed to get current serial parameters");
}
else {
dcbSerialParameters.BaudRate = CBR_9600;
dcbSerialParameters.ByteSize = 8;
dcbSerialParameters.StopBits = ONESTOPBIT;
dcbSerialParameters.Parity = NOPARITY;
dcbSerialParameters.fDtrControl = DTR_CONTROL_ENABLE;
if (!SetCommState(handler, &dcbSerialParameters))
{
printf("ALERT: could not set Serial port parameters\n");
}
else {
this->connected = true;
PurgeComm(this->handler, PURGE_RXCLEAR | PURGE_TXCLEAR);
Sleep(ARDUINO_WAIT_TIME);
}
}
}
}
SerialPort::~SerialPort()
{
if (this->connected) {
this->connected = false;
CloseHandle(this->handler);
}
}
int SerialPort::readSerialPort(char *buffer, unsigned int buf_size)
{
DWORD bytesRead;
unsigned int toRead = 0;
ClearCommError(this->handler, &this->errors, &this->status);
if (this->status.cbInQue > 0) {
if (this->status.cbInQue > buf_size) {
toRead = buf_size;
}
else toRead = this->status.cbInQue;
}
if (ReadFile(this->handler, buffer, toRead, &bytesRead, NULL)) return bytesRead;
return 0;
}
bool SerialPort::writeSerialPort(char *buffer, unsigned int buf_size)
{
DWORD bytesSend;
if (!WriteFile(this->handler, (void*)buffer, buf_size, &bytesSend, 0)) {
ClearCommError(this->handler, &this->errors, &this->status);
return false;
}
else return true;
}
bool SerialPort::isConnected()
{
return this->connected;
}
###################faceDetect.cpp################
// CPP program to detects face in a video
// Include required header files from OpenCV directory
#include <opencv2/objdetect.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <string>
#include <stdlib.h>
#include "SerialPort.h"
#include <sstream>
#include <iomanip>
using namespace std;
using namespace cv;
//Set up serial comm
char output[MAX_DATA_LENGTH];
char port[] = "\\\\.\\COM3";
char incoming[MAX_DATA_LENGTH];
// Function for Face Detection
void detectAndDraw(Mat& img, CascadeClassifier& cascade, double scale, SerialPort arduino);
string cascadeName;
// Function for sending locations to arduino
void sendSerial(string locations, SerialPort arduino);
int main(int argc, const char** argv)
{
//Establish connection to serial
SerialPort arduino(port);
if (arduino.isConnected()) {
cout << "COnnection Established" << endl;
}
else {
cout << "Error in port name" << endl;
}
// VideoCapture class for playing video for which faces to be detected
VideoCapture capture;
Mat frame, image;
// PreDefined trained XML classifiers with facial features
CascadeClassifier cascade;
double scale = 1;
// Change path before execution
cascade.load("C:/opencv/sources/data/haarcascades/haarcascade_frontalface_default.xml");
// Start Video..1) 0 for WebCam 2) "Path to Video" for a Local Video
capture.open(CAP_MSMF);
//sendSerial("400:100", arduino);
if (capture.isOpened())
{
// Capture frames from video and detect faces
cout << "Face Detection Started...." << endl;
while (1)
{
capture >> frame;
if (frame.empty())
break;
Mat frame1 = frame.clone();
detectAndDraw(frame1, cascade, scale, arduino);
char c = (char)waitKey(10);
// Press q to exit from window
if (c == 27 || c == 'q' || c == 'Q')
break;
}
}
else
cout << "Could not Open Camera";
return 0;
}
void sendSerial(string locations, SerialPort arduino) {
//string command;
//command = to_string(xloc);
cout << locations << endl;
char *charArray = new char[locations.size() + 1];
copy(locations.begin(), locations.end(), charArray);
charArray[locations.size()] = '\n';
arduino.writeSerialPort(charArray, MAX_DATA_LENGTH);
//arduino.readSerialPort(output, MAX_DATA_LENGTH);
//cout << output;
delete[] charArray;
//
//command = to_string(yloc);
//copy(command.begin(), command.end(), charArray);
//charArray[command.size()] = '\n';
//arduino.writeSerialPort(charArray, MAX_DATA_LENGTH);
////arduino.readSerialPort(output, MAX_DATA_LENGTH);
////cout << output;
//delete[] charArray;
}
void detectAndDraw(Mat& img, CascadeClassifier& cascade,
double scale, SerialPort arduino)
{
vector<Rect> faces;
Mat gray, smallImg;
cvtColor(img, gray, COLOR_BGR2GRAY); // Convert to Gray Scale
double fx = 1 / scale;
// Resize the Grayscale Image
resize(gray, smallImg, Size(), fx, fx, INTER_LINEAR);
equalizeHist(smallImg, smallImg);
// Detect faces of different sizes using cascade classifier
cascade.detectMultiScale(smallImg, faces, 1.1,
2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Draw circles around the faces
for (size_t i = 0; i < faces.size(); i++)
{
Rect r = faces[i];
Mat smallImgROI;
int x = faces[i].x;
int y = faces[i].y;
int h = y + faces[i].height;
int w = x + faces[i].width;
int centerX = x + (.5* faces[i].width);
int centerY = y + (.5* faces[i].height);
if (abs(320 - centerX) <= 50) {
if (abs(240 - centerY) <= 50) {
rectangle(img,
Point(x, y),
Point(w, h),
Scalar(0, 0, 275),
2,
8,
0);
}
}
else {
rectangle(img,
Point(x, y),
Point(w, h),
Scalar(275, 275, 275),
2,
8,
0);
}
stringstream stringX;
stringstream stringY;
stringX << std::setw(3) << std::setfill('0') << centerX;
stringY << std::setw(3) << std::setfill('0') << centerY;
std::stringstream ss;
//ss << std::setw(3) << std::setfill('0') << centerX << ":"<< centerY;
//std::string s = ss.str();
std::string s = stringX.str() + ":" + stringY.str();
//cout << s << endl;
sendSerial(s, arduino);
smallImgROI = smallImg(r);
if (arduino.isConnected()) {
cout << "COnnection Established" << endl;
//sendSerial("400:100", arduino);
}
}
// Show Processed Image with detected faces
imshow("Face Detection", img);
}
#####################arduino code################
#include <Servo.h>
String input;
char array[6];
char *strings[3];
char *ptr = NULL;
int xloc;
int yloc;
int hServoPin = 9;
Servo hServo;
int ledPin = 13;
void setup() {
//set up servos
hServo.attach(hServoPin);
//start serial connection
Serial.begin(9600);
//***** delete later *****
pinMode(ledPin, OUTPUT);
}
void loop() {
if(Serial.available()){
//grab "xloc:yloc" and convert to char array
input = Serial.readStringUntil('\n');
//delete later
//Serial.print("input; ");
//Serial.println(input);
for(int i = 0; i<6; i++){
array[i] = input.charAt(i);
//Serial.print(array[i]);
}
//split char array into two entities
byte index = 0;
ptr = strtok(array, ":;"); // takes a list of delimiters
while(ptr != NULL)
{
strings[index] = ptr;
index++;
ptr = strtok(NULL, ":;"); // takes a list of delimiters
//Serial.println("loop");
}
//set xloc and yloc respectively
xloc = atoi(strings[0]);
yloc = atoi(strings[1]);
}
if((xloc < 214)){
hServo.write(0);
delay(100);
}
else if((xloc > 214) && (xloc < 328)){
hServo.write(90);
delay(100);
}
else if((xloc > 328)){
hServo.write(180);
delay(100);
}
}

Mxnet c++ inference with MXPredSetInput segmentation fault

Mxnet c++ inference with MXPredSetInput segmentation fault
1. background
I have tried https://github.com/apache/incubator-mxnet/tree/master/example/image-classification/predict-cpp successed.
But when I try to deploy mxnet in c++ with my own model, I met a segmentation fault error:
[17:33:07] src/nnvm/legacy_json_util.cc:209: Loading symbol saved by previous version v1.2.1. Attempting to upgrade...
Signal: SIGSEGV (Segmentation fault)
2. code with error:
MXPredSetInput(pred_hnd, "data", image_data.data(), static_cast<mx_uint>(image_size));
3. tips
First I thought it's because of input data shape not compatible with the model input layer.But I ask model designer, it's a resnet model with conv only, so, any kind input shape should be OK.
4. Download model:
Download them, and put them into model dir.
https://drive.google.com/drive/folders/16MEKNOz_iwquVxHMk9c7igmBNuT6w7wz?usp=sharing
4. code: find: https://github.com/jaysimon/mxnet_cpp_infere
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <vector>
#include <memory>
#include <thread>
#include <iomanip>
#include <opencv2/opencv.hpp>
// Path for c_predict_api
#include <mxnet/c_predict_api.h>
const mx_float DEFAULT_MEAN = 117.0;
static std::string trim(const std::string& input) {
auto not_space = [](int ch) {
return !std::isspace(ch);
};
auto output = input;
output.erase(output.begin(), std::find_if(output.begin(), output.end(), not_space));
output.erase(std::find_if(output.rbegin(), output.rend(), not_space).base(), output.end());
return output;
}
// Read file to buffer
class BufferFile {
public :
std::string file_path_;
std::size_t length_ = 0;
std::unique_ptr<char[]> buffer_;
explicit BufferFile(const std::string& file_path)
: file_path_(file_path) {
std::ifstream ifs(file_path.c_str(), std::ios::in | std::ios::binary);
if (!ifs) {
std::cerr << "Can't open the file. Please check " << file_path << ". \n";
return;
}
ifs.seekg(0, std::ios::end);
length_ = static_cast<std::size_t>(ifs.tellg());
ifs.seekg(0, std::ios::beg);
std::cout << file_path.c_str() << " ... " << length_ << " bytes\n";
// Buffer as null terminated to be converted to string
buffer_.reset(new char[length_ + 1]);
buffer_[length_] = 0;
ifs.read(buffer_.get(), length_);
ifs.close();
}
std::size_t GetLength() {
return length_;
}
char* GetBuffer() {
return buffer_.get();
}
};
void GetImageFile(const std::string& image_file,
mx_float* image_data, int channels,
cv::Size resize_size, const mx_float* mean_data = nullptr) {
// Read all kinds of file into a BGR color 3 channels image
cv::Mat im_ori = cv::imread(image_file, cv::IMREAD_COLOR);
if (im_ori.empty()) {
std::cerr << "Can't open the image. Please check " << image_file << ". \n";
assert(false);
}
cv::Mat im;
resize(im_ori, im, resize_size);
int size = im.rows * im.cols * channels;
mx_float* ptr_image_r = image_data;
mx_float* ptr_image_g = image_data + size / 3;
mx_float* ptr_image_b = image_data + size / 3 * 2;
float mean_b, mean_g, mean_r;
mean_b = mean_g = mean_r = DEFAULT_MEAN;
mean_b = 103.06;
mean_g = 115.9;
mean_r = 123.15;
for (int i = 0; i < im.rows; i++) {
auto data = im.ptr<uchar>(i);
for (int j = 0; j < im.cols; j++) {
if (channels > 1) {
*ptr_image_b++ = static_cast<mx_float>(*data++) - mean_b;
*ptr_image_g++ = static_cast<mx_float>(*data++) - mean_g;
}
*ptr_image_r++ = static_cast<mx_float>(*data++) - mean_r;
}
}
}
// LoadSynsets
// Code from : https://github.com/pertusa/mxnet_predict_cc/blob/master/mxnet_predict.cc
std::vector<std::string> LoadSynset(const std::string& synset_file) {
std::ifstream fi(synset_file.c_str());
if (!fi.is_open()) {
std::cerr << "Error opening synset file " << synset_file << std::endl;
assert(false);
}
std::vector<std::string> output;
std::string synset, lemma;
while (fi >> synset) {
getline(fi, lemma);
output.push_back(lemma);
}
fi.close();
return output;
}
void PrintOutputResult(const std::vector<float>& data, const std::vector<std::string>& synset) {
if (data.size() != synset.size()) {
std::cerr << "Result data and synset size do not match!" << std::endl;
}
float best_accuracy = 0.0;
std::size_t best_idx = 0;
for (std::size_t i = 0; i < data.size(); ++i) {
std::cout << "Accuracy[" << i << "] = " << std::setprecision(8) << data[i] << std::endl;
if (data[i] > best_accuracy) {
best_accuracy = data[i];
best_idx = i;
}
}
std::cout << "Best Result: " << trim(synset[best_idx]) << " (id=" << best_idx << ", " <<
"accuracy=" << std::setprecision(8) << best_accuracy << ")" << std::endl;
}
void predict(PredictorHandle pred_hnd, const std::vector<mx_float> &image_data,
NDListHandle nd_hnd, const std::string &synset_file, int i) {
auto image_size = image_data.size();
// Set Input
//>>>>>>>>>>>>>>>>>>>> Problem code <<<<<<<<<<<<<<<<<<<<<<<
MXPredSetInput(pred_hnd, "data", image_data.data(), static_cast<mx_uint>(image_size));
// <<<<<<<<<<<<<<<<<<<<<<< Problem code <<<<<<<<<<<<<<<<<<<<<<<
// Do Predict Forward
MXPredForward(pred_hnd);
mx_uint output_index = 0;
mx_uint* shape = nullptr;
mx_uint shape_len;
// Get Output Result
MXPredGetOutputShape(pred_hnd, output_index, &shape, &shape_len);
std::size_t size = 1;
for (mx_uint i = 0; i < shape_len; ++i) { size *= shape[i]; }
std::vector<float> data(size);
MXPredGetOutput(pred_hnd, output_index, &(data[0]), static_cast<mx_uint>(size));
// Release NDList
if (nd_hnd) {
MXNDListFree(nd_hnd);
}
// Release Predictor
MXPredFree(pred_hnd);
// Synset path for your model, you have to modify it
auto synset = LoadSynset(synset_file);
// Print Output Data
PrintOutputResult(data, synset);
}
int main(int argc, char* argv[]) {
if (argc < 2) {
std::cout << "No test image here." << std::endl
<< "Usage: ./image-classification-predict apple.jpg [num_threads]" << std::endl;
return EXIT_FAILURE;
}
std::string test_file(argv[1]);
int num_threads = 1;
if (argc == 3)
num_threads = std::atoi(argv[2]);
// Models path for your model, you have to modify it
std::string json_file = "../model/rfcn_dcn_chicken-0000.json";
std::string param_file = "../model/rfcn_dcn_chicken-0000.params";
std::string synset_file = "../model/synset.txt";
std::string nd_file = "../model/mean_224.nd";
BufferFile json_data(json_file);
BufferFile param_data(param_file);
// Parameters
int dev_type = 1; // 1: cpu, 2: gpu
int dev_id = 0; // arbitrary.
mx_uint num_input_nodes = 1; // 1 for feedforward
const char* input_key[1] = { "data" };
const char** input_keys = input_key;
// Image size and channels
int width = 1000;
int height = 562;
int channels = 3;
const mx_uint input_shape_indptr[2] = { 0, 4 };
const mx_uint input_shape_data[4] = { 1,
static_cast<mx_uint>(channels),
static_cast<mx_uint>(height),
static_cast<mx_uint>(width) };
if (json_data.GetLength() == 0 || param_data.GetLength() == 0) {
return EXIT_FAILURE;
}
auto image_size = static_cast<std::size_t>(width * height * channels);
// Read Mean Data
const mx_float* nd_data = nullptr;
NDListHandle nd_hnd = nullptr;
BufferFile nd_buf(nd_file);
if (nd_buf.GetLength() > 0) {
mx_uint nd_index = 0;
mx_uint nd_len;
const mx_uint* nd_shape = nullptr;
const char* nd_key = nullptr;
mx_uint nd_ndim = 0;
MXNDListCreate(static_cast<const char*>(nd_buf.GetBuffer()),
static_cast<int>(nd_buf.GetLength()),
&nd_hnd, &nd_len);
MXNDListGet(nd_hnd, nd_index, &nd_key, &nd_data, &nd_shape, &nd_ndim);
}
// Read Image Data
std::vector<mx_float> image_data(image_size);
GetImageFile(test_file, image_data.data(), channels, cv::Size(width, height), nd_data);
if (num_threads == 1) {
// Create Predictor
PredictorHandle pred_hnd;
MXPredCreate(static_cast<const char*>(json_data.GetBuffer()),
static_cast<const char*>(param_data.GetBuffer()),
static_cast<int>(param_data.GetLength()),
dev_type,
dev_id,
num_input_nodes,
input_keys,
input_shape_indptr,
input_shape_data,
&pred_hnd);
assert(pred_hnd);
predict(pred_hnd, image_data, nd_hnd, synset_file, 0);
} else {
// Create Predictor
std::vector<PredictorHandle> pred_hnds(num_threads, nullptr);
MXPredCreateMultiThread(static_cast<const char*>(json_data.GetBuffer()),
static_cast<const char*>(param_data.GetBuffer()),
static_cast<int>(param_data.GetLength()),
dev_type,
dev_id,
num_input_nodes,
input_keys,
input_shape_indptr,
input_shape_data,
pred_hnds.size(),
pred_hnds.data());
for (auto hnd : pred_hnds)
assert(hnd);
std::vector<std::thread> threads;
for (int i = 0; i < num_threads; i++)
threads.emplace_back(predict, pred_hnds[i], image_data, nd_hnd, synset_file, i);
for (int i = 0; i < num_threads; i++)
threads[i].join();
}
printf("run successfully\n");
return EXIT_SUCCESS;
}

Performing MNIST example with mlpack

I'd like to do an simple dnn example for getting known with mlpack.
The simple example is to train a dnn with the MNist Digits - the classical
ml-hello world case :-)
I managed to preparate all images using opencv-filters - the result is a
monochannel grayscale opencv::mat matrix.
I also managed to convert the pixle values to the armadillo matrix arma::mat
and to label this "pictures".
But somewhere in the past two operations, I did a mistake.
I got N examples and M Input Neurons
IN means "Input Neuron"
OL means "OutputLabel"
Ex means "Example"
The structure of my train-data is like that:
arma::mat TrainSet = {{IN_1/Ex_1,IN_/Ex_2,IN_1/Ex_3,...,IN_1/Ex_N},
{IN_2/Ex_1, IN_2/Ex_2, IN_2/Ex_3,...,IN_2/Ex_N},
{...},
{IN_M/Ex_1, IN_M/Ex_2, IN_M/Ex_3,...,IN_M/Ex_N}}`
arma::mat LabelSet = {OL_Ex_1, OL_Ex_2, ..., OL_Ex_N}
Training my network leeds to an error.
Error training artificial neural network!
Error details: Mat::operator(): index out of bounds
I'm quite sure, that the structure of my train- and labelstack is not correct.
Does anybody know, what I did wrong?
I tried to follow this example and to translate it to my case:
http://www.mlpack.org/docs/mlpack-git/doxygen/cnetutorial.html
Here is a picture of visual studio showing the structure of my trainset:
Here is a picture of visual studio showing the structure of my lables:
Thank you for any help.
Yours,
Jan
This is my code:
#pragma region Includings
#include <iostream>
#include <stdlib.h>
#include <exception>
#include <string>
#include "opencv2/opencv.hpp"
#include <mlpack\\core.hpp>
#include <mlpack/methods/ann/layer/layer.hpp>
#include <mlpack/methods/ann/ffn.hpp>
#include <mlpack/core/optimizers/cne/cne.hpp>
#pragma endregion
#pragma region Globals
std::string TrainFolder = "C:\\HomeC\\MNist\\MNist\\train-labels\\";
#pragma endregion
#pragma region Structs
typedef struct TInputPair {
double Value;
int Index;
};
typedef struct TDigitPairExample {
TInputPair* InputPairArray;
int nNonZero;
char OutputValue;
};
#pragma endregion
#pragma region Identifier
void DisplayImage(cv::Mat* Img, std::string Title = "CV::DefaultForm");
std::vector<TDigitPairExample> GenerateTrainingSet(std::string TrainFolder, int nExamplesPerClass, bool DisplayAtWindow = false);
void DisplayImage(cv::Mat* Img, std::string Title, int Delay = 0);
TInputPair* MatToArray(cv::Mat* img, int* nEntries);
int CharToOutputInt(char c);
void TransferDataToMLPack(std::vector<TDigitPairExample>* ExStack, arma::mat* DataStack, arma::mat* LabelStack, int nInput);
typedef uchar Pixel;
#pragma endregion
int main() {
#pragma region Get training examples from images
std::vector<TDigitPairExample> TrainExamples = GenerateTrainingSet(TrainFolder, 101);
#pragma endregion
#pragma region Convert training vector to armadillo matrix
arma::mat trainset, labels;
TransferDataToMLPack(&TrainExamples, &trainset, &labels, 784);
#pragma endregion
#pragma region Define network
mlpack::ann::FFN<mlpack::ann::NegativeLogLikelihood<> > network;
network.Add<mlpack::ann::Linear<> >(784, 784);
network.Add<mlpack::ann::SigmoidLayer<> >();
network.Add<mlpack::ann::Linear<> >(784, 10);
network.Add<mlpack::ann::LogSoftMax<> >();
#pragma endregion
#pragma region Train network
try {
network.Train(trainset, labels);
}catch (const std::exception& e) {
std::cout << "Error training artificial neural network!" << std::endl << "Error details: " << e.what() << std::endl;
}
#pragma endregion
std::cout << "Application finished. Press ENTER to exit..." << std::endl;
std::cin.get();
}
#pragma region Private_regions
void DisplayImage(cv::Mat* Img, std::string Title, int Delay) {
/***************/
/*Define window*/
/***************/
cv:cvNamedWindow(Title.c_str(), cv::WINDOW_AUTOSIZE);
cv::imshow(Title.c_str(), *Img);
cv::waitKey(Delay);
//cv::destroyWindow(Title.c_str());
return;
}
TInputPair* MatToArray(cv::Mat* img, int* nEntries) {
uchar* ptr = nullptr, *dptr = nullptr;
TInputPair* InPairArr = nullptr;
int j = 0;
if (img->isContinuous()) {
ptr = img->ptr<uchar>();
}else { return nullptr; }
InPairArr = (TInputPair*)malloc((img->cols) * (img->rows) * sizeof(TInputPair));
if (InPairArr == nullptr) { return nullptr; }
for (int i = 0; i < (img->rows)*(img->cols); i++) {
//std::cout << "Index_" + std::to_string(i) + "; " + std::to_string(ptr[i]) << std::endl;
if (ptr[i] != 255) { InPairArr[j].Index = i; InPairArr[j].Value = (double)(255 - ptr[i]) / 255.0; j++; }
}
InPairArr = (TInputPair*)realloc(InPairArr, j * sizeof(TInputPair));
*nEntries = j;
return InPairArr;
}
std::vector<TDigitPairExample> GenerateTrainingSet(std::string TrainFolder, int nExamplesPerClass, bool DisplayAtWindow) {
/********/
/*Localc*/
/********/
int nEntries = 0;
cv::Mat imgMod, imgGrad, imgInv, ptHull, imgHull, imgResize;
std::vector<std::vector<cv::Point>> contours;
std::vector<TDigitPairExample> TrainExamples;
TDigitPairExample TDPE;
for (int i = 1, j = 0;; i++) {
/**************/
/*Reading file*/
/**************/
cv::Mat imgOrig = cv::imread(TrainFolder + std::to_string(j) + "_" + std::to_string(i) + ".bmp", cv::IMREAD_GRAYSCALE);
if (imgOrig.empty() || i > 100) { j++; i = 1; if (j > 9) { break; } continue; }
/****************/
/*Build negative*/
/****************/
cv::subtract(cv::Scalar::all(255.0), imgOrig, imgMod);
/*****************/
/*Cut by treshold*/
/*****************/
cv::threshold(imgMod, imgMod, 230.0, 255.0, cv::THRESH_BINARY);
/**************/
/*Get contours*/
/**************/
//cv::findContours(imgMod, contours, cv::CHAIN_APPROX_NONE, 1);
//cv::Scalar color = cv::Scalar(255, 0, 0);
//cv::drawContours(imgMod, contours, -1, color, 1, 8);
//cv::Laplacian(imgOrig, imgGrad, 16, 1, 1.0, 0.0, cv::BORDER_REFLECT);
/********************/
/*Resize and display*/
/********************/
cv::resize(imgMod, imgResize, cv::Size(300, 300), .0, .0, cv::INTER_LINEAR);
TDPE.InputPairArray = MatToArray(&imgMod, &nEntries);
TDPE.nNonZero = nEntries;
TDPE.OutputValue = std::to_string(j).c_str()[0];
TrainExamples.push_back(TDPE);
if (DisplayAtWindow) { DisplayImage(&imgResize, std::string("After inversion"), 5); }
}
return TrainExamples;
}
int CharToOutputInt(char c) {
switch (c) {
case '0': return 0;
case '1': return 1;
case '2': return 2;
case '3': return 3;
case '4': return 4;
case '5': return 5;
case '6': return 6;
case '7': return 7;
case '8': return 8;
case '9': return 9;
default: throw new std::exception();
}
}
void TransferDataToMLPack(std::vector<TDigitPairExample>* ExStack, arma::mat* DataStack, arma::mat* LabelStack, int nInput) {
*DataStack = arma::zeros(nInput, ExStack->size());
*LabelStack = arma::zeros(1, ExStack->size()); /*...edit...*/
TDigitPairExample DPE;
TInputPair TIP;
/*Looping all digit examples*/
for (int i = 0; i < ExStack->size(); i++) {
DPE = (*ExStack)[i];
/*Looping all nonZero pixle*/
for (int j = 0; j < DPE.nNonZero; j++) {
TIP = DPE.InputPairArray[j];
try {
(*DataStack)(TIP.Index, i) = TIP.Value;
}catch (std::exception& ex) {
std::cout << "Error adding example[" << std::to_string(j) << "] to training stack!" << std::endl <<
"Error details: " << ex.what() << std::endl;
}
}
/*Adding label*/
try {
(*LabelStack)(0, i) = CharToOutputInt(DPE.OutputValue); /*...edit...*/
}catch (std::exception& ex) {
std::cout << "Error adding example[" << std::to_string(i) << "] to label stack!" << std::endl <<
"Error details: " << ex.what() << std::endl;
}
}
return;
}
#pragma endregion
The NegativeLogLikelihood loss function expects the target to be in the range [1, N], so you would have to increase the return value of CharToOutputInt. In case you haven't seen it already there is an interesting example: mlpack - DigitRecognizerCNN which could be helpful as well.
Ok, thanks to Marcus, I found the error. A label "0" is not allowed. I simply changed the lables to:
1 ==> Zero
2 ==> One
3 ==> Two
...
10 => Nine
That worked.

Multi threads reading data

I am writing an interface for line detection in SFML and OpenCV in C++; I have 2 classes: Interface and ImageProccessing. I need 2 while loops (one for drawing whole interface on screen and other for line detecting) so I decided to use threads. I created a new thread in Interface constructor using:
ImageProccessingThread = new std::thread(&ImageProccessing::start, context.imageproc, toString(getContext().connection->getIP()), getContext().connection->getPort());
It needs 2 arguments (IP and Port of webcam). The thread is working correctly, cause when I am displaying windows from it, everything is fine. The problem is in the main thread. I want to copy cv::Mat from ImageProccessing class, convert it into SFML image and display. I handled converting and displaying (tested on another webcam image captured in main thread). But I can't copy the Mat from ImageProccessingThread and use it in main thread. Using:
getContext().imageproc->getOriginal().copyTo(frameRGB);
if (frameRGB.empty())
Says there is no image - it's empty. Just like threads works on separate data. I was thinking it's synchronisation problem, so I tried to use mutex. In ImageProccessing class I added:
mtx.lock();
result.copyTo(imgResult);
imgThresholded.copyTo(imgThresh);
mtx.unlock();
But it gives nothing. What do I do wrong?
EDIT:
More explanation about copying
The copy takes part in Interface class.
Interface:
cv::Mat frameRGB, frameRGBA;
getContext().imageproc->getOriginal().copyTo(frameRGB);
if (frameRGB.empty())
{
std::cout << "Can't copy" << std::endl;
system("PAUSE");
return;
}
cv::cvtColor(frameRGB, frameRGBA, cv::COLOR_BGR2RGBA);
camImage.create(frameRGBA.cols, frameRGBA.rows, frameRGBA.ptr());
if (!camTexture.loadFromImage(camImage))
{
std::cout << "No image" << std::endl;
system("PAUSE");
return;
}
camSprite.setTexture(camTexture);
camSprite.setPosition(1214.f, 30.f);
While I get cv::Mat from ImageProccessing getter
ImageProccessing:
ImageProccessing.hpp:
#pragma once
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <math.h>
#include <SFML/Network.hpp>
#include <fstream>
class ImageProccessing
{
public:
ImageProccessing();
~ImageProccessing();
sf::Int16 doProccessing();
void setup();
void number_of_lines(int*, cv::Mat*, int, int, bool);
bool zeros(float min);
void tresh();
void update(cv::Mat imgOriginal);
int start(std::string, unsigned short);
void Parameters_load(struct Config);
struct Config Parameters_check();
void show(bool);
cv::Mat getOriginal();
cv::Mat getThreshholded();
bool getStarted();
void changeTest(bool change);
bool getTest();
private:
bool Test;
bool isStarted;
cv::VideoCapture cap;
cv::Mat imgOriginal;
cv::Mat roi_left;
cv::Mat roi_right;
cv::Mat contours1;
cv::Mat imgThresholded;
cv::Mat result;
cv::Mat imgResult;
cv::Mat imgThresh;
int iLowH;
int iHighH;
int iLowS;
int iHighS;
int iLowV;
int iHighV;
int max_angle;
int min_angle;
int mid_angle_left;
int mid_angle_right;
int point_height;
float cut_off;
int houghVote_left;
int houghVote_right;
int size_destruct;
int size_fill;
void average(float, float);
FILE* files;
std::vector<cv::Vec2f> lines;
};
Parts of ImageProccessing.cpp:
cv::Mat ImageProccessing::getOriginal()
{
return imgResult;
}
sf::Int16 ImageProccessing::doProccessing()
{
sf::Int16 value = 0;
std::vector<Vec2f> lines_left;
std::vector<Vec2f> lines_right;
roi_left = contours1(Rect(0, 0, int(float(contours1.cols) *cut_off), imgOriginal.rows));
roi_right = contours1(Rect(int(float(contours1.cols) *(1 - cut_off)), 0, int(float(contours1.cols)*cut_off), imgOriginal.rows));
lines = lines_left;
this->number_of_lines(&houghVote_left, &roi_left, min_angle, mid_angle_left, true);
lines_left = lines;
lines = lines_right;
this->number_of_lines(&houghVote_right, &roi_right, mid_angle_right, max_angle, false);
lines_right = lines;
std::cout << "houghVote" << houghVote_left << " " << houghVote_right << "\n";
std::cout << "lines" << lines_left.size() << "lines1" << lines_right.size() << "\n";
std::cout << "min " << min_angle * PI / 180 << ", max" << max_angle * PI / 180 << "\n";
//fprintf(files, "houghVote %d %d lines %d lines1 %d min %f max %f", houghVote_left, houghVote_right, lines_left.size(), lines_right.size(), min_angle * PI / 180, max_angle * PI / 180);
imgOriginal.copyTo(result);
if (lines_left.size() < 30) {
lines = lines_left;
average(10, 1.5);
lines_left = lines;
}
if (lines_right.size() < 30) {
lines = lines_right;
average(10, 1.5);
lines_right = lines;
}
unsigned __int64 begin = 0;
Mat hough(imgOriginal.size(), CV_8U, Scalar(0));
Vec2f closest;
float min_distance_left = float(result.cols) / 2;
while (begin < lines_left.size()) {
float rho = lines_left[begin][0]; // first element is distance rho
float theta = lines_left[begin][1]; // second element is angle theta
float a = -(cos(theta) / sin(theta));
float b = rho / sin(theta);
float dist = (result.cols / 2) - ((result.rows / point_height) - b) / a;
//cout << dist << endl;
if (min_distance_left > dist && dist > 0) {
min_distance_left = dist;
}
//cout << min_distance_left << endl;
fprintf(files, "line: (%f,%f) \n", dist, min_distance_left);
// point of intersection of the line with first row
Point pt1(int(rho / cos(theta)), 0);
// point of intersection of the line with last row
Point pt2(int((rho - result.rows*sin(theta)) / cos(theta)), result.rows);
// draw a white line
line(result, pt1, pt2, Scalar(255), 8);
line(hough, pt1, pt2, Scalar(255), 8);
//}
//std::cout << "line: (" << rho << "," << theta << ")\n";
fprintf(files, "line: (%f,%f) \n", rho, theta);
++begin;
}
begin = 0;
float min_distance_right = float(result.cols) / 2;
while (begin < lines_right.size()) {
float rho = lines_right[begin][0]; // first element is distance rho
float theta = lines_right[begin][1]; // second element is angle theta
float a = -(cos(theta) / sin(theta));
float b = rho / sin(theta);
float dist = (0.5f - cut_off)*contours1.cols + ((result.rows / point_height) - b) / a;
//cout << dist << endl;
if (min_distance_right > dist && dist > 0) {
min_distance_right = dist;
}
//cout << min_distance_right << endl;
fprintf(files, "line: (%f,%f) \n", dist, min_distance_right);
//if (theta < 30.*PI / 180. || theta > 150.*PI / 180.) { // filter theta angle to find lines with theta between 30 and 150 degrees (mostly vertical)
// point of intersection of the line with first row
Point pt1(int(rho / cos(theta) + contours1.cols*(1 - cut_off)), 0);
// point of intersection of the line with last row
Point pt2(int((rho - result.rows*sin(theta)) / cos(theta) + contours1.cols*(1 - cut_off)), result.rows);
// draw a white line
line(result, pt1, pt2, Scalar(255), 8);
line(hough, pt1, pt2, Scalar(255), 8);
//}
//std::cout << "line: (" << rho << "," << theta << ")\n";
fprintf(files, "line: (%f,%f) \n", rho, theta);
++begin;
}
Point x1(result.cols / 2, 0);
Point x2(result.cols / 2, result.rows);
Point midle;
line(result, x1, x2, Scalar(255), 8);
x1 = { int((result.cols / 2) - min_distance_left),result.rows / point_height };
x2 = { int((result.cols / 2) + min_distance_right),result.rows / point_height };
midle = (x1 + x2) / 2;
circle(result, midle, 10, Scalar(0, 0, 255), -1);
line(result, x1, x2, Scalar(255), 8);
value = midle.x - (result.cols / 2);
mtx.lock();
result.copyTo(imgResult);
imgThresholded.copyTo(imgThresh);
mtx.unlock();
//this->show(1);
return value;
}
int ImageProccessing::start(string ip, unsigned short port)
{
//test
sf::UdpSocket socket;
socket.setBlocking(false);
sf::Packet packet;
sf::Int16 value = 0;
packet << value;
sf::IpAddress recipient = ip;
VideoCapture cap(0);
//VideoCapture cap("http://"+ip+":8080/stream/video.mjpeg"); //capture the video from web cam
if (!cap.isOpened()) // if not success, exit program
{
cout << "Cannot open the web cam" << endl;
system("pause");
return -1;
}
//this->setup();
Mat imgOriginal;
while (true) {
bool bSuccess = cap.read(imgOriginal); // read a new frame from video
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read a frame from video stream" << endl;
break;
}
//wysylanie
this->update(imgOriginal);
this->tresh();
if (this->zeros(0.001f)) {
value = 1000;
//this->show(0);
}
else {
value = this->doProccessing();
}
packet.clear();
packet << value;
if (socket.send(packet, recipient, port) != sf::Socket::Done)
{
std::cout << "Some error" << std::endl;
return 12;
}
//koniec wysylania
//waitKey(0);
if (waitKey(10) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
value = 1000;
packet.clear();
packet << value;
if (socket.send(packet, recipient, port) != sf::Socket::Done)
{
std::cout << "Some error" << std::endl;
return 12;
}
cout << "esc key is pressed by user" << endl;
break;
return 1;
}
}
isStarted = true;
}

create header file .h from cpp file [closed]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 9 years ago.
Improve this question
I need to create Header file mainCalib.h from the mainCalib.cpp file
the mainCalib.cpp file include calibration Sample of opencv ..
so finally I can execute the program from main.cpp file:
this is mainCalib.cpp file:
#include <iostream>
#include <sstream>
#include <time.h>
#include <stdio.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
#ifndef _CRT_SECURE_NO_WARNINGS
# define _CRT_SECURE_NO_WARNINGS
#endif
#include "mainCalib.h"
using namespace cv;
using namespace std;
void help()
{
cout << "This is a camera calibration sample." << endl
<< "Usage: calibration configurationFile" << endl
<< "Near the sample file you'll find the configuration file, which has detailed help of "
"how to edit it. It may be any OpenCV supported file format XML/YAML." << endl;
}
class Settings
{
public:
Settings() : goodInput(false) {}
enum Pattern { NOT_EXISTING, CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
enum InputType {INVALID, CAMERA, VIDEO_FILE, IMAGE_LIST};
void write(FileStorage& fs) const //Write serialization for this class
{
fs << "{" << "BoardSize_Width" << boardSize.width
<< "BoardSize_Height" << boardSize.height
<< "Square_Size" << squareSize
<< "Calibrate_Pattern" << patternToUse
<< "Calibrate_NrOfFrameToUse" << nrFrames
<< "Calibrate_FixAspectRatio" << aspectRatio
<< "Calibrate_AssumeZeroTangentialDistortion" << calibZeroTangentDist
<< "Calibrate_FixPrincipalPointAtTheCenter" << calibFixPrincipalPoint
<< "Write_DetectedFeaturePoints" << bwritePoints
<< "Write_extrinsicParameters" << bwriteExtrinsics
<< "Write_outputFileName" << outputFileName
<< "Show_UndistortedImage" << showUndistorsed
<< "Input_FlipAroundHorizontalAxis" << flipVertical
<< "Input_Delay" << delay
<< "Input" << input
<< "}";
}
void read(const FileNode& node) //Read serialization for this class
{
node["BoardSize_Width" ] >> boardSize.width;
node["BoardSize_Height"] >> boardSize.height;
node["Calibrate_Pattern"] >> patternToUse;
node["Square_Size"] >> squareSize;
node["Calibrate_NrOfFrameToUse"] >> nrFrames;
node["Calibrate_FixAspectRatio"] >> aspectRatio;
node["Write_DetectedFeaturePoints"] >> bwritePoints;
node["Write_extrinsicParameters"] >> bwriteExtrinsics;
node["Write_outputFileName"] >> outputFileName;
node["Calibrate_AssumeZeroTangentialDistortion"] >> calibZeroTangentDist;
node["Calibrate_FixPrincipalPointAtTheCenter"] >> calibFixPrincipalPoint;
node["Input_FlipAroundHorizontalAxis"] >> flipVertical;
node["Show_UndistortedImage"] >> showUndistorsed;
node["Input"] >> input;
node["Input_Delay"] >> delay;
interprate();
}
void interprate()
{
goodInput = true;
if (boardSize.width <= 0 || boardSize.height <= 0)
{
cerr << "Invalid Board size: " << boardSize.width << " " << boardSize.height << endl;
goodInput = false;
}
if (squareSize <= 10e-6)
{
cerr << "Invalid square size " << squareSize << endl;
goodInput = false;
}
if (nrFrames <= 0)
{
cerr << "Invalid number of frames " << nrFrames << endl;
goodInput = false;
}
if (input.empty()) // Check for valid input
inputType = INVALID;
else
{
if (input[0] >= '0' && input[0] <= '9')
{
stringstream ss(input);
ss >> cameraID;
inputType = CAMERA;
}
else
{
if (readStringList(input, imageList))
{
inputType = IMAGE_LIST;
nrFrames = (nrFrames < (int)imageList.size()) ? nrFrames : (int)imageList.size();
}
else
inputType = VIDEO_FILE;
}
if (inputType == CAMERA)
inputCapture.open(cameraID);
if (inputType == VIDEO_FILE)
inputCapture.open(input);
if (inputType != IMAGE_LIST && !inputCapture.isOpened())
inputType = INVALID;
}
if (inputType == INVALID)
{
cerr << " Inexistent input: " << input;
goodInput = false;
}
flag = 0;
if(calibFixPrincipalPoint) flag |= CV_CALIB_FIX_PRINCIPAL_POINT;
if(calibZeroTangentDist) flag |= CV_CALIB_ZERO_TANGENT_DIST;
if(aspectRatio) flag |= CV_CALIB_FIX_ASPECT_RATIO;
calibrationPattern = NOT_EXISTING;
if (!patternToUse.compare("CHESSBOARD")) calibrationPattern = CHESSBOARD;
if (!patternToUse.compare("CIRCLES_GRID")) calibrationPattern = CIRCLES_GRID;
if (!patternToUse.compare("ASYMMETRIC_CIRCLES_GRID")) calibrationPattern = ASYMMETRIC_CIRCLES_GRID;
if (calibrationPattern == NOT_EXISTING)
{
cerr << " Inexistent camera calibration mode: " << patternToUse << endl;
goodInput = false;
}
atImageList = 0;
}
Mat nextImage()
{
Mat result;
if( inputCapture.isOpened() )
{
Mat view0;
inputCapture >> view0;
view0.copyTo(result);
}
else if( atImageList < (int)imageList.size() )
result = imread(imageList[atImageList++], CV_LOAD_IMAGE_COLOR);
return result;
}
static bool readStringList( const string& filename, vector<string>& l )
{
l.clear();
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
l.push_back((string)*it);
return true;
}
public:
Size boardSize; // The size of the board -> Number of items by width and height
Pattern calibrationPattern;// One of the Chessboard, circles, or asymmetric circle pattern
float squareSize; // The size of a square in your defined unit (point, millimeter,etc).
int nrFrames; // The number of frames to use from the input for calibration
float aspectRatio; // The aspect ratio
int delay; // In case of a video input
bool bwritePoints; // Write detected feature points
bool bwriteExtrinsics; // Write extrinsic parameters
bool calibZeroTangentDist; // Assume zero tangential distortion
bool calibFixPrincipalPoint;// Fix the principal point at the center
bool flipVertical; // Flip the captured images around the horizontal axis
string outputFileName; // The name of the file where to write
bool showUndistorsed; // Show undistorted images after calibration
string input; // The input ->
int cameraID;
vector<string> imageList;
int atImageList;
VideoCapture inputCapture;
InputType inputType;
bool goodInput;
int flag;
private:
string patternToUse;
};
static void read(const FileNode& node, Settings& x, const Settings& default_value = Settings())
{
if(node.empty())
x = default_value;
else
x.read(node);
}
enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,
vector<vector<Point2f> > imagePoints );
int mainn(int argc, char* argv[])
{
help();
Settings s;
const string inputSettingsFile = argc > 1 ? argv[1] : "D:\\e+v\\Projekte\\Calibration_Test1\\Calibration_Test1\\in_VID5.xml";
FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings
if (!fs.isOpened())
{
cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl;
return -1;
}
fs["Settings"] >> s;
fs.release(); // close Settings file
if (!s.goodInput)
{
cout << "Invalid input detected. Application stopping. " << endl;
return -1;
}
vector<vector<Point2f> > imagePoints;
Mat cameraMatrix, distCoeffs;
Size imageSize;
int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION;
clock_t prevTimestamp = 0;
const Scalar RED(0,0,255), GREEN(0,255,0);
const char ESC_KEY = 27;
for(int i = 0;;++i)
{
Mat view;
bool blinkOutput = false;
view = s.nextImage();
//----- If no more image, or got enough, then stop calibration and show result -------------
if( mode == CAPTURING && imagePoints.size() >= (unsigned)s.nrFrames )
{
if( runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints))
mode = CALIBRATED;
else
mode = DETECTION;
}
if(view.empty()) // If no more images then run calibration, save and stop loop.
{
if( imagePoints.size() > 0 )
runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints);
break;
}
imageSize = view.size(); // Format input image.
if( s.flipVertical ) flip( view, view, 0 );
vector<Point2f> pointBuf;
bool found;
switch( s.calibrationPattern ) // Find feature points on the input format
{
case Settings::CHESSBOARD:
found = findChessboardCorners( view, s.boardSize, pointBuf,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
break;
case Settings::CIRCLES_GRID:
found = findCirclesGrid( view, s.boardSize, pointBuf );
break;
case Settings::ASYMMETRIC_CIRCLES_GRID:
found = findCirclesGrid( view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID );
break;
default:
found = false;
break;
}
if ( found) // If done with success,
{
// improve the found corners' coordinate accuracy for chessboard
if( s.calibrationPattern == Settings::CHESSBOARD)
{
Mat viewGray;
cvtColor(view, viewGray, COLOR_BGR2GRAY);
cornerSubPix( viewGray, pointBuf, Size(11,11),
Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
}
if( mode == CAPTURING && // For camera only take new samples after delay time
(!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC) )
{
imagePoints.push_back(pointBuf);
prevTimestamp = clock();
blinkOutput = s.inputCapture.isOpened();
}
// Draw the corners.
drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found );
}
//----------------------------- Output Text ------------------------------------------------
string msg = (mode == CAPTURING) ? "100/100" :
mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
int baseLine = 0;
Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);
if( mode == CAPTURING )
{
if(s.showUndistorsed)
msg = format( "%d/%d Undist", (int)imagePoints.size(), s.nrFrames );
else
msg = format( "%d/%d", (int)imagePoints.size(), s.nrFrames );
}
putText( view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED);
if( blinkOutput )
bitwise_not(view, view);
//------------------------- Video capture output undistorted ------------------------------
if( mode == CALIBRATED && s.showUndistorsed )
{
Mat temp = view.clone();
undistort(temp, view, cameraMatrix, distCoeffs);
}
//------------------------------ Show image and check for input commands -------------------
imshow("Image View", view);
char key = (char)waitKey(s.inputCapture.isOpened() ? 50 : s.delay);
if( key == ESC_KEY )
break;
if( key == 'u' && mode == CALIBRATED )
s.showUndistorsed = !s.showUndistorsed;
if( s.inputCapture.isOpened() && key == 'g' )
{
mode = CAPTURING;
imagePoints.clear();
}
}
// -----------------------Show the undistorted image for the image list ------------------------
if( s.inputType == Settings::IMAGE_LIST && s.showUndistorsed )
{
Mat view, rview, map1, map2;
initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
imageSize, CV_16SC2, map1, map2);
for(int i = 0; i < (int)s.imageList.size(); i++ )
{
view = imread(s.imageList[i], 1);
if(view.empty())
continue;
remap(view, rview, map1, map2, INTER_LINEAR);
imshow("Image View", rview);
char c = (char)waitKey();
if( c == ESC_KEY || c == 'q' || c == 'Q' )
break;
}
}
return 0;
}
static double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point2f> >& imagePoints,
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
const Mat& cameraMatrix , const Mat& distCoeffs,
vector<float>& perViewErrors)
{
vector<Point2f> imagePoints2;
int i, totalPoints = 0;
double totalErr = 0, err;
perViewErrors.resize(objectPoints.size());
for( i = 0; i < (int)objectPoints.size(); ++i )
{
projectPoints( Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix,
distCoeffs, imagePoints2);
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L2);
int n = (int)objectPoints[i].size();
perViewErrors[i] = (float) std::sqrt(err*err/n);
totalErr += err*err;
totalPoints += n;
}
return std::sqrt(totalErr/totalPoints);
}
static void calcBoardCornerPositions(Size boardSize, float squareSize, vector<Point3f>& corners,
Settings::Pattern patternType /*= Settings::CHESSBOARD*/)
{
corners.clear();
switch(patternType)
{
case Settings::CHESSBOARD:
case Settings::CIRCLES_GRID:
for( int i = 0; i < boardSize.height; ++i )
for( int j = 0; j < boardSize.width; ++j )
corners.push_back(Point3f(float( j*squareSize ), float( i*squareSize ), 0));
break;
case Settings::ASYMMETRIC_CIRCLES_GRID:
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float((2*j + i % 2)*squareSize), float(i*squareSize), 0));
break;
default:
break;
}
}
static bool runCalibration( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
vector<vector<Point2f> > imagePoints, vector<Mat>& rvecs, vector<Mat>& tvecs,
vector<float>& reprojErrs, double& totalAvgErr)
{
cameraMatrix = Mat::eye(3, 3, CV_64F);
if( s.flag & CV_CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = 1.0;
distCoeffs = Mat::zeros(8, 1, CV_64F);
vector<vector<Point3f> > objectPoints(1);
calcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0], s.calibrationPattern);
objectPoints.resize(imagePoints.size(),objectPoints[0]);
//Find intrinsic and extrinsic camera parameters
double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs, s.flag|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
cout << "Re-projection error reported by calibrateCamera: "<< rms << endl;
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints,
rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs);
return ok;
}
// Print camera parameters to the output file
static void saveCameraParams( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
const vector<float>& reprojErrs, const vector<vector<Point2f> >& imagePoints,
double totalAvgErr )
{
FileStorage fs( s.outputFileName, FileStorage::WRITE );
time_t tm;
time( &tm );
struct tm *t2 = localtime( &tm );
char buf[1024];
strftime( buf, sizeof(buf)-1, "%c", t2 );
fs << "calibration_Time" << buf;
if( !rvecs.empty() || !reprojErrs.empty() )
fs << "nrOfFrames" << (int)std::max(rvecs.size(), reprojErrs.size());
fs << "image_Width" << imageSize.width;
fs << "image_Height" << imageSize.height;
fs << "board_Width" << s.boardSize.width;
fs << "board_Height" << s.boardSize.height;
fs << "square_Size" << s.squareSize;
if( s.flag & CV_CALIB_FIX_ASPECT_RATIO )
fs << "FixAspectRatio" << s.aspectRatio;
if( s.flag )
{
sprintf( buf, "flags: %s%s%s%s",
s.flag & CV_CALIB_USE_INTRINSIC_GUESS ? " +use_intrinsic_guess" : "",
s.flag & CV_CALIB_FIX_ASPECT_RATIO ? " +fix_aspectRatio" : "",
s.flag & CV_CALIB_FIX_PRINCIPAL_POINT ? " +fix_principal_point" : "",
s.flag & CV_CALIB_ZERO_TANGENT_DIST ? " +zero_tangent_dist" : "" );
cvWriteComment( *fs, buf, 0 );
}
fs << "flagValue" << s.flag;
fs << "Camera_Matrix" << cameraMatrix;
fs << "Distortion_Coefficients" << distCoeffs;
fs << "Avg_Reprojection_Error" << totalAvgErr;
if( !reprojErrs.empty() )
fs << "Per_View_Reprojection_Errors" << Mat(reprojErrs);
if( !rvecs.empty() && !tvecs.empty() )
{
CV_Assert(rvecs[0].type() == tvecs[0].type());
Mat bigmat((int)rvecs.size(), 6, rvecs[0].type());
for( int i = 0; i < (int)rvecs.size(); i++ )
{
Mat r = bigmat(Range(i, i+1), Range(0,3));
Mat t = bigmat(Range(i, i+1), Range(3,6));
CV_Assert(rvecs[i].rows == 3 && rvecs[i].cols == 1);
CV_Assert(tvecs[i].rows == 3 && tvecs[i].cols == 1);
//*.t() is MatExpr (not Mat) so we can use assignment operator
r = rvecs[i].t();
t = tvecs[i].t();
}
cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
fs << "Extrinsic_Parameters" << bigmat;
}
if( !imagePoints.empty() )
{
Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2);
for( int i = 0; i < (int)imagePoints.size(); i++ )
{
Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols);
Mat imgpti(imagePoints[i]);
imgpti.copyTo(r);
}
fs << "Image_points" << imagePtMat;
}
}
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,vector<vector<Point2f> > imagePoints )
{
vector<Mat> rvecs, tvecs;
vector<float> reprojErrs;
double totalAvgErr = 0;
bool ok = runCalibration(s,imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs,
reprojErrs, totalAvgErr);
cout << (ok ? "Calibration succeeded" : "Calibration failed")
<< ". avg re projection error = " << totalAvgErr ;
if( ok )
saveCameraParams( s, imageSize, cameraMatrix, distCoeffs, rvecs ,tvecs, reprojErrs,
imagePoints, totalAvgErr);
return ok;
}
and here is main.cpp file:
#include<iostream>
#include"mainCalib.h"
using namespace std;
int main()
{
return 0;
}
So please can anyone help me to create the code of mainCalib.h file from mainCalib.cpp
classes and functions!
I tried such as followings but I have much Errors:
mainCalib.h file:
void help();
class Settings;
static void read(const FileNode& , Settings& , const Settings& );
bool runCalibrationAndSave (Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector<vector<Point2f>> imagePoints);
int mainn(int argc, char* argv[])
static double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints,const vector<vector<Point2f> >& imagePoints,const vector<Mat>& rvecs, const vector<Mat>& tvecs,const Mat& cameraMatrix , const Mat& distCoeffs,vector<float>& perViewErrors);
static void calcBoardCornerPositions(cv::Size , float , vector<Point3f>& ,
Settings::Pattern /*= Settings::CHESSBOARD*/);
static bool runCalibration( Settings& s, Size& , Mat& , Mat& ,vector<vector<Point2f> > , vector<Mat>& , vector<Mat>& ,vector<float>& , double& );
static void saveCameraParams( Settings& , cv::Size& , Mat& , Mat& ,
const vector<Mat>& , const vector<Mat>& ,
const vector<float>& , const vector<vector<Point2f> >& ,
double );
First read about include guards.
Then you put the actual class definition in the header file.
Then you put the class function implementation in the source file.
Short example
Header file
#ifndef HEADER_FILE_H
#define HEADER_FILE_H
class MyClass
{
public:
MyClass(int val);
~MyClass();
void some_function();
private:
int value;
};
#endif // HEADER_FILE_H
Source file
#include <iostream>
#include "header_file.h"
MyClass::MyClass(int val)
: value(val)
{
}
MyClass::~MyClass()
{
}
void MyClass::some_function()
{
std::cout << "value is " << value << '\n';
}
Main source file
#include "header_file.h"
int main()
{
MyClass my_object(5);
my_object.some_function();
}
Also note that you can't have static function in the file scope if you expect them to be called from outside that source file. Making a function static restricts linkage to the translation unit it is defined in.