I have done a DNI text recognition project using Visual Studio, the OpenCV library and the KNN recognition method.
To simplify the project I cut the fields whose information I want to extract and I did the whole process to each of them, obtaining good results.
But I have noticed that the program confuses an M with N and also classifies O as 0.
How could I fix it?
I will show you the field where detection incorrect occurs.
CODE:
#include <iostream>
#include <vector>
#include <opencv2\opencv.hpp>
#include <opencv2\highgui/highgui.hpp>
#include <opencv2\core\core.hpp>
#include <opencv2\ml\ml.hpp>
#include "Preproceso.h"
// VARIABLES GLOBALES
/////////////////////////////////////////////////////
const int AREA_MINIMA_DE_CONTORNO = 70;
const int ANCHO_IMAGEN_REDIM = 20;
const int ALTURA_IMAGEN_REDIM = 30;
class ContornoConDatos {
public:
// variables miembro
///////////////////////////////////////////////////////////////////////////
std::vector<cv::Point> ptContorno;
cv::Rect rectanguloDelimitador;
float fltAreaDelContorno;
////////////////////////////////////////////////////////////////////////////
bool comprobarSiContornoValido() {
if (fltAreaDelContorno < AREA_MINIMA_DE_CONTORNO) return false;
return true;
}
////////////////////////////////////////////////////////////////////////////
static bool ordenarPosicionXRectDelim(const ContornoConDatos& ccdIzquierda, const ContornoConDatos& ccdDerecha) {
return(ccdIzquierda.rectanguloDelimitador.x < ccdDerecha.rectanguloDelimitador.x);
}
};
//////////////////////////////////////////////////////////////////
void preprocess(cv::Mat &imagenOriginal, cv::Mat &imagenEscalaDeGrises, cv::Mat &imagenUmbralizada) {
imagenEscalaDeGrises = extraerValor(imagenOriginal);
cv::Mat imagenMaxContrasteEscalaDeGrises = maximizarContraste(imagenEscalaDeGrises);
cv::Mat imagenSuavizada;
cv::GaussianBlur(imagenMaxContrasteEscalaDeGrises, imagenSuavizada, TAMAÑO_FILTRO_SUAVE_GAUSSIANO, 0);
cv::adaptiveThreshold(imagenSuavizada, imagenUmbralizada, 255.0, CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY_INV, TAMAÑO_BLOQUE_UMBRAL, ANCHO_UMBRAL);
}
///////////////////////////////////////////////////////////////////
cv::Mat extraerValor(cv::Mat &imagenOriginal) {
cv::Mat imagenHSV;
std::vector<cv::Mat> vectorDeImagenesHSV(2);
cv::Mat imagenValor;
cv::cvtColor(imagenOriginal, imagenHSV, CV_BGR2HSV);
cv::split(imagenHSV, vectorDeImagenesHSV);
imagenValor = vectorDeImagenesHSV[2];
return(imagenValor);
}
//////////////////////////////////////////////////////////////
cv::Mat maximizarContraste(cv::Mat &imagenEscalaDeGrises) {
cv::Mat imagenTopHat;
cv::Mat imagenBlackHat;
cv::Mat imagenEscalaDeGrisesMasTopHat;
cv::Mat imagenEscalaDeGrisesMasTopHatMenosBlackHat;
cv::Mat structuringElement = cv::getStructuringElement(CV_SHAPE_RECT, cv::Size(3, 3));
cv::morphologyEx(imagenEscalaDeGrises, imagenTopHat, CV_MOP_TOPHAT, structuringElement);
cv::morphologyEx(imagenEscalaDeGrises, imagenBlackHat, CV_MOP_BLACKHAT, structuringElement);
imagenEscalaDeGrisesMasTopHat = imagenEscalaDeGrises + imagenTopHat;
imagenEscalaDeGrisesMasTopHatMenosBlackHat = imagenEscalaDeGrisesMasTopHat - imagenBlackHat;
return(imagenEscalaDeGrisesMasTopHatMenosBlackHat);
}
//////////////////////////////////////////////////////
int main()
{
cv::Ptr<cv::ml::KNearest> kNearest(cv::ml::KNearest::create());
// read in training classifications ///////////////////////////////////////////////////
cv::Mat matrizClasificacionCaracteres;
cv::FileStorage archivoClasificacion("classifications.xml", cv::FileStorage::READ);
if (archivoClasificacion.isOpened() == false) {
std::cout << "error, unable to open training classifications file, exiting program\n\n";
return(0);
}
archivoClasificacion["classifications"] >> matrizClasificacionCaracteres;
archivoClasificacion.release();
// read in training images ////////////////////////////////////////////////////////////
cv::Mat matrizPixelesImagenReferencia;
cv::FileStorage archivoImagenes("images.xml", cv::FileStorage::READ);
if (archivoImagenes.isOpened() == false) {
std::cout << "error, unable to open training images file, exiting program\n\n";
return(0);
}
archivoImagenes["images"] >> matrizPixelesImagenReferencia;
archivoImagenes.release();
////////////////////////////////////////////////////////////////
//entrenamiento
//cv::Ptr<cv::ml::KNearest> kNearest(cv::ml::KNearest::create());
kNearest->setDefaultK(1);
kNearest->train(matrizPixelesImagenReferencia, cv::ml::ROW_SAMPLE, matrizClasificacionCaracteres);
//Leer la imagen base, de la cual obtengo solamente las dimensiones para asignarselas a mi verdadera imagen, con la que voy a trabajar
cv::Mat imagenBase = cv::imread("ImagenBase.png");
if (imagenBase.empty()) {
std::cout << "No se puede leer la imagen del archivo \n\n";
return (0);
}
////////////////////////////////////////////////////////////////
//Leer la imagen con la que voy a trabajar, a la cual le realizo el cambio de dimension
cv::Mat imagenOriginal = cv::imread("DNI-Jessica.jpg");
//cv::Mat imagenOriginal = cv::imread("DNI.png");
if (imagenOriginal.empty()) {
std::cout << "No se puede leer la imagen del archivo \n\n";
return (0);
}
// Redimensionar imagenOriginal al tamaño de imagenBase para que todas las imagenes de entrada tengan las mismas dimensiones
cv::resize(imagenOriginal, imagenOriginal, imagenBase.size());
cv::imshow("ImagenOriginal", imagenOriginal);
//Sección de interés 2: SEGUNDO APELLIDO///////////////////////////////
cv::Rect dimensionesROI2(255, 128, 300, 30);
//cv::Rect dimensionesROI2(250, 125, 300, 30);
//Recortar la imagen original para obtener ROI2
cv::Mat ROI2 = imagenOriginal(dimensionesROI2);
//PRE-PROCESADO DE LA IMAGEN/////////////////////////////////////////////
cv::Mat imagenEscalaDeGrises2;
cv::Mat imagenSuavizada2;
cv::Mat imagenUmbralizada2;
preprocess(ROI2, imagenSuavizada2, imagenUmbralizada2);
cv::threshold(imagenUmbralizada2, imagenUmbralizada2, 0.0, 255.0, CV_THRESH_BINARY | CV_THRESH_OTSU);
cv::Mat imagenDeTrabajo2 = imagenUmbralizada2.clone();
cv::imshow("Imagen pre-procesada 2", imagenUmbralizada2);
////////////////////////////////////////////////////////
std::vector<ContornoConDatos> todosLosContornos2;
std::vector<ContornoConDatos> contornosValidos2;
std::vector<std::vector<cv::Point> > ptContornos2;
std::vector<cv::Vec4i> v4iJerarquia2;
cv::findContours(imagenDeTrabajo2, ptContornos2, v4iJerarquia2, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
for (int i = 0; i < ptContornos2.size(); i++)
{
ContornoConDatos contornoConDatos2;
contornoConDatos2.ptContorno = ptContornos2[i];
contornoConDatos2.rectanguloDelimitador = cv::boundingRect(contornoConDatos2.ptContorno);
contornoConDatos2.fltAreaDelContorno = cv::contourArea(contornoConDatos2.ptContorno);
todosLosContornos2.push_back(contornoConDatos2);
}
for (int i = 0; i < todosLosContornos2.size(); i++)
{
if (todosLosContornos2[i].comprobarSiContornoValido())
{
contornosValidos2.push_back(todosLosContornos2[i]);
}
}
std::sort(contornosValidos2.begin(), contornosValidos2.end(), ContornoConDatos::ordenarPosicionXRectDelim);
std::string strCadenaFinal2;
for (int i = 0; i < contornosValidos2.size(); i++)
{
cv::rectangle(ROI2, contornosValidos2[i].rectanguloDelimitador, cv::Scalar(0, 255, 0), 2);
cv::Mat imagenROI2 = imagenUmbralizada2(contornosValidos2[i].rectanguloDelimitador);
cv::Mat imagenROIRedimensionada2;
cv::resize(imagenROI2, imagenROIRedimensionada2, cv::Size(ANCHO_IMAGEN_REDIM, ALTURA_IMAGEN_REDIM));
cv::Mat matrizROIDatosTipoReal2;
imagenROIRedimensionada2.convertTo(matrizROIDatosTipoReal2, CV_32FC1);
cv::Mat matrizROIConRealesSimples2 = matrizROIDatosTipoReal2.reshape(1, 1);
cv::Mat vectorCaracterActual2(0, 0, CV_32F);
kNearest->findNearest(matrizROIConRealesSimples2, 1, vectorCaracterActual2);
float fltCaracterActual2 = (float)vectorCaracterActual2.at<float>(0, 0);
strCadenaFinal2 = strCadenaFinal2 + char(int(fltCaracterActual2));
}
std::cout << "\n" << "SEGUNDO APELLIDO = " << strCadenaFinal2 << "\n";
cv::imshow("SEGUNDO APELLIDO", ROI2);
Preproceso.h CODE:
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
// global variables
//////////////////////////////////////////////
const cv::Size TAMAÑO_FILTRO_SUAVE_GAUSSIANO = cv::Size(5, 5);
const int TAMAÑO_BLOQUE_UMBRAL = 19;
const int ANCHO_UMBRAL = 9;
// function prototypes
////////////////////////////////////////////////////////////////////////////
void preprocess(cv::Mat &imagenOriginal, cv::Mat &imagenEscalaDeGrises,
cv::Mat &imagenUmbralizada);
cv::Mat extraerValor(cv::Mat &imagenOriginal);
cv::Mat maximizarContraste(cv::Mat &imagenEscalaDeGrises);
This is the DNI image
And this, the base image
And to finish, here are the xml files that I use as a database in the program:
https://github.com/MicrocontrollersAndMore/OpenCV_3_KNN_Character_Recognition_Cpp/blob/master/classifications.xml
https://github.com/MicrocontrollersAndMore/OpenCV_3_KNN_Character_Recognition_Cpp/blob/master/images.xml
Related
I've researched a lot for this problem, but I can´t find what exactly is causing the error.
I'm using VS 2017 and this is my header file
#ifndef FUNCTION_H
#define FUNCTION_H
#include <iostream>
#include <vector>
#include <map>
extern "C" {
#include "extApi.h"
}
void BufferToMatrix(simxUChar* buffer, unsigned int** matrix, int dim_fil, int dim_col);
void PrintMatrix(unsigned int** matriz, int dim_fil, int dim_col);
class Graph
{
// representacion por Adjacency List
std::vector<std::vector<int>> graph;
std::map<int, std::vector<int>> neighbors; // Los nodos posibles, orden izquierda, abajo, derecha, arriba
public:
// Constructores crean el numero de nodos basados en una matriz
Graph(unsigned int**, int, int);
~Graph();
};
#endif
and the .cpp file
#include <iostream>
#include <vector>
#include <map>
extern "C" {
#include "extApi.h"
}
using namespace std;
void BufferToMatrix(simxUChar* buffer, unsigned int** matrix, int dim_fil, int dim_col)
{
// Sacamos la data del buffer, dim_fil = res[1] y dim_col = res[0]
for (int i = 0; i < dim_fil; i++) // filas
{
for (int j = 0; j < dim_col; j++) // Columnas
{
matrix[j][dim_fil-1 - i] = buffer[i*dim_col + j]; // Matriz con las filas invertidas... el buffer lo entrega asi.
}
}
}
void PrintMatrix(unsigned int** matriz, int dim_fil, int dim_col)
{
// Imprimimos la matriz en consola
for (int i = 0; i < dim_fil; i++) // filas
{
for (int j = 0; j < dim_col; j++) // columnas
{
cout << matriz[j][i] << " ";
}
cout << endl;
}
}
Graph::Graph(unsigned int** map, int dim_fil, int dim_col) {}
Graph::~Graph() {}
and the main so far
#include <iostream>
#include "function.h"
#include "function.cpp"
extern "C" {
#include "extApi.h"
}
using namespace std;
int main(int argc, char* argv[])
{
// Obtenemos los Handlers enviandos en la llamada del ejecutable
int portNb = 0;
int mappingSensorHandle = 0;
//int leftMotorHandle = 0;
//int rightMotorHandle = 0;
//int graphHandle = 0;
//int proximitySensorHandle = 0;
if (argc >= 3)
{
portNb = atoi(argv[1]);
mappingSensorHandle = atoi(argv[2]);
//leftMotorHandle = atoi(argv[3]);
//rightMotorHandle = atoi(argv[4]);
//graphHandle = atoi(argv[5]);
//proximitySensorHandle = atoi(argv[6]);
}
else
{
cout << "Es necesario indicar los handlers en V-rep" << endl;
extApi_sleepMs(5000);
return 0;
}
// Iniciamos la simulacion
int clientID = simxStart((simxChar*)"127.0.0.1", portNb, true, true, 2000, 5);
if (clientID != -1)
{
// Declaraciones de variables auxiliares
int retVal = 0;
bool OneTimeFlag = 0;
// Declaraciones de variables, lectura del mapa
simxInt res[2]; //res[0] son columnas y res[1] son filas
simxUChar* image; //apuntador al arreglo con la data del sensor de vision
simxUChar options = 1; //en 1 recibe en escala de grises, en 0 recibe en rgb
// Habilitamos el Streaming del mapa
simxGetVisionSensorImage(clientID, mappingSensorHandle, res, &image, options, simx_opmode_streaming);
OneTimeFlag = 1;
// Creamos una matriz dinamica para el mapa
unsigned int** map = new unsigned int* [res[1]];
for (int i = 0; i < res[1]; i++)
{
map[i] = new unsigned int[res[0]];
}
// Loop de la simulacion en V-rep
while (simxGetConnectionId(clientID) != -1)
{
if (OneTimeFlag)
{
retVal = simxGetVisionSensorImage(clientID, mappingSensorHandle, res, &image, options, simx_opmode_buffer);
if (retVal == simx_return_ok)
{
BufferToMatrix(image, map, res[1], res[0]); // Llenamos la matriz
PrintMatrix(map, res[1], res[0]); // Imprimimos la matriz en CMD
OneTimeFlag = 0; // Colocamos la flag en 0 para no ejecutar esta parte
}
// Creamos el grafo y el SpanningTree
//Graph grafo(map, res[1], res[0]);
}
}
// Borramos la matriz del mapa
for (int i = 0; i < res[1]; i++)
{
delete [] map[i];
}
delete [] map;
simxFinish(clientID);
}
else
{
// Terminamos la simulacion, no se realizo la conexion
cout << "Conexion no lograda" << endl;
simxFinish(clientID);
return(0);
}
return(0);
}
this is a script to use a external API in V-rep, the only lines that cause trouble is this two lines in the .h file
std::vector<std::vector<int>> graph;
std::map<int, std::vector<int>> neighbors;
Cause this problems:
Error LNK2001 símbolo externo __imp___CrtDbgReport sin resolver.
Error LNK2001 símbolo externo __imp___invalid_parameter sin resolver.
Error LNK1120 2 externos sin resolver coverageBotClient.
I apologize because this is to simple but I can´t see the error.
I've trained a BOW codebook (vocabulary) using a A-KAZE feature descriptors and am trying to use a BFMatcher with knnMatch to compare newly extracted features to the codebook.
Instead, I get the following error,
OpenCV Error: Assertion failed (_queryDescriptors.type() == trainDescType) in knnMatchImpl, file /home/cecilia/opencv-3.0.0/modules/features2d/src/matchers.cpp, line 722 terminate called after throwing an instance of 'cv::Exception' what(): /home/cecilia/opencv-3.0.0/modules/features2d/src/matchers.cpp:722: error: (-215) _queryDescriptors.type() == trainDescType in function knnMatchImpl
I've using the following examples
BoW in OpenCV using precomputed features
How to train and predict using bag of words?
My intuition is that I am adding the codebook to the matcher incorrectly, but I can't find any documentation or examples that support another method. How can I use my codebook with new examples.
MCVE
/* BOWTest.cpp*/
#include <opencv2/imgcodecs.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <string>
#include <stdio.h>
#include <dirent.h>
using namespace cv;
using namespace std;
std::string outputFile = "test_codebook.png";
std::string trainingDir = "dataset/";
std::string outputPrefix = "output/res_custom_";
void train(Mat codebook, int codebook_n, Ptr<Feature2D> akaze);
void test(Mat codebook, Ptr<Feature2D> akaze);
int main(int ac, char** av) {
Ptr<Feature2D> feature = AKAZE::create();
Mat codebook;
int codebook_n = 100;
//train(codebook, codebook_n, feature);
test(codebook, feature);
}
//I included the train method to show how the codebook is trained, but it is not actually called in this example
void train(Mat codebook, int codebook_n, Ptr<Feature2D> akaze){
//defining terms for bowkmeans trainer
TermCriteria tc(TermCriteria::MAX_ITER + TermCriteria::EPS, 10, 0.001);
int retries = 1;
int flags = KMEANS_PP_CENTERS;
BOWKMeansTrainer bowTrainer(codebook_n, tc, retries, flags);
int i = 0;
unsigned long numPoints = 0;
DIR *d;
struct dirent *dir;
d = opendir(trainingDir.c_str());
if (d) {
while ((dir = readdir(d)) != NULL){
try {
Mat img;
std::string imgName = trainingDir + dir->d_name;
i = i + 1;
printf("%d, %lu: %s ...", i,numPoints, imgName.c_str());
img = imread(imgName, CV_LOAD_IMAGE_COLOR);
if(img.empty()){ //not image
printf("bad.\n");
continue;
}
printf("loaded.\n");
resize(img, img, Size(200, 200));
Mat features;
vector<KeyPoint> keypoints;
akaze->detectAndCompute(img, Mat(), keypoints, features);
features.convertTo(features, CV_32F);
bowTrainer.add(features);
Mat res;
drawKeypoints(img, keypoints, res);
std::string output_img = outputPrefix + dir->d_name;
imwrite(output_img, res);
numPoints += features.rows;
}catch(int e){
cout << "An exception occurred. Nr. " << e << '\n';
}
}
printf("Read images!");
closedir(d);
codebook = bowTrainer.cluster();
imwrite(outputFile, codebook);
}
}
void test(Mat codebook, Ptr<Feature2D> akaze){
codebook = imread(outputFile);
int codebook_n = codebook.rows;
BFMatcher matcher(NORM_L2);
matcher.add(std::vector<cv::Mat>(1, codebook));
Mat res(Size(codebook_n * 10, 3*10), CV_8UC3, Scalar(0));
vector<int> res_idx(codebook_n, 0);
try {
Mat img;
String imgName = trainingDir + "dog1.jpeg";
img = imread(imgName, CV_LOAD_IMAGE_COLOR);
if(img.empty()){ //not image
printf("bad.\n");
}else{
printf("loaded.\n");
resize(img, img, Size(200, 200));
Mat features;
vector<KeyPoint> keypoints;
akaze->detectAndCompute(img, noArray(), keypoints, features);
features.convertTo(features, CV_32F);
vector< vector< DMatch > > nn_matches;
matcher.knnMatch(features, nn_matches, 1);
printf("%d matched keypoints", nn_matches.size());
}
}catch(int e){
cout << "An exception occurred. Nr. " << e << '\n';
}
}
test_codebook.png
dog1.jpeg
Output
loaded.
OpenCV Error: Assertion failed (_queryDescriptors.type() == trainDescType) in knnMatchImpl, file /home/cecilia/opencv-3.0.0/modules/features2d/src/matchers.cpp, line 722
terminate called after throwing an instance of 'cv::Exception'
what(): /home/cecilia/opencv-3.0.0/modules/features2d/src/matchers.cpp:722: error: (-215) _queryDescriptors.type() == trainDescType in function knnMatchImpl
You shouldn't save the codebook as an image. imwrite will eventually scale and convert the values of the codebook. And imread with default parameters will load it as a 3 channel image CV_8UC3. To store matrices that are not strictly images, you should use FileStorage.
Save:
FileStorage fs(outputFile, FileStorage::WRITE);
// Store codebook
fs << "codebook" << codebook;
Load:
FileStorage fs(outputFile, FileStorage::READ);
fs["codebook"] >> codebook;
You should use BOWImgDescriptorExtractor to compute the BoW image descriptor starting from your features, AKAZE in this case:
Ptr<DescriptorMatcher> matcher = BFMatcher::create("BruteForce");
BOWImgDescriptorExtractor bow(akaze, matcher);
bow.setVocabulary(codebook);
// Mat img = ...
// AKAZE features
Mat features;
vector<KeyPoint> keypoints;
akaze->detectAndCompute(img, noArray(), keypoints, features);
features.convertTo(features, CV_32F);
// BoW descriptor
Mat bowFeatures;
vector<vector<int>> pointsIdxsOfCluster;
bow.compute(features, bowFeatures, &pointsIdxsOfCluster);
You can use builtin glob to read images from a folder, avoiding dirent.
vector<String> fileNames;
glob(trainingDir, fileNames);
for (int i=0; i<fileNames.size(); ++i)
{
Mat img = imread(fileNames[i]);
...
You can use iostream with cout, instead of printf.
This is how the code looks like:
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
std::string outputFile = "test_codebook.yml";
std::string trainingDir = "path_to_train_folder/";
std::string outputPrefix = "path_to_output_folder/";
void train(Mat codebook, int codebook_n, Ptr<Feature2D> akaze);
void test(Mat codebook, Ptr<Feature2D> akaze);
int main(int ac, char** av) {
Ptr<Feature2D> feature = AKAZE::create();
Mat codebook;
int codebook_n = 100;
train(codebook, codebook_n, feature);
test(codebook, feature);
}
//I included the train method to show how the codebook is trained, but it is not actually called in this example
void train(Mat codebook, int codebook_n, Ptr<Feature2D> akaze){
//defining terms for bowkmeans trainer
TermCriteria tc(TermCriteria::MAX_ITER + TermCriteria::EPS, 10, 0.001);
int retries = 1;
int flags = KMEANS_PP_CENTERS;
BOWKMeansTrainer bowTrainer(codebook_n, tc, retries, flags);
int i = 0;
unsigned long numPoints = 0;
vector<String> fileNames;
glob(trainingDir, fileNames);
for (int i=0; i<fileNames.size(); ++i)
{
try {
Mat img;
std::string imgName = fileNames[i];
std::string filename = imgName.substr(trainingDir.length());
cout << i << ", " << numPoints << " : " << imgName;
img = imread(imgName, CV_LOAD_IMAGE_COLOR);
if (img.empty()){ //not image
cout << " bad" << endl;
continue;
}
cout << " loaded" << endl;
resize(img, img, Size(200, 200));
Mat features;
vector<KeyPoint> keypoints;
akaze->detectAndCompute(img, Mat(), keypoints, features);
features.convertTo(features, CV_32F);
bowTrainer.add(features);
Mat res;
drawKeypoints(img, keypoints, res);
std::string output_img = outputPrefix + filename;
imwrite(output_img, res);
numPoints += features.rows;
}
catch (int e){
cout << "An exception occurred. Nr. " << e << '\n';
}
}
cout << "Read images!" << endl;
codebook = bowTrainer.cluster();
{
FileStorage fs(outputFile, FileStorage::WRITE);
// Store codebook
fs << "codebook" << codebook;
// You can also store additional info, like the list of images
//// Store train images filenames
//fs << "train" << "[";
//for (int i = 0; i < fileNames.size(); ++i)
//{
// fs << fileNames[i];
//}
//fs << "]";
}
}
void test(Mat codebook, Ptr<Feature2D> akaze)
{
vector<String> trainFileNames;
{
FileStorage fs(outputFile, FileStorage::READ);
fs["codebook"] >> codebook;
/*FileNode trainingImages = fs["train"];
FileNodeIterator it = trainingImages.begin(), it_end = trainingImages.end();
int idx = 0;
for (; it != it_end; ++it, idx++)
{
trainFileNames.push_back(*it);
}*/
}
int codebook_n = codebook.rows;
Ptr<DescriptorMatcher> matcher = BFMatcher::create("BruteForce");
BOWImgDescriptorExtractor bow(akaze, matcher);
bow.setVocabulary(codebook);
try {
Mat img;
String imgName = "path_to_test_image";
img = imread(imgName, CV_LOAD_IMAGE_COLOR);
if (img.empty()){ //not image
cout << "bad" << endl;
}
else{
cout << "loaded" << endl;
resize(img, img, Size(200, 200));
Mat features;
vector<KeyPoint> keypoints;
akaze->detectAndCompute(img, noArray(), keypoints, features);
features.convertTo(features, CV_32F);
Mat bowFeatures;
vector<vector<int>> pointsIdxsOfCluster;
bow.compute(features, bowFeatures, &pointsIdxsOfCluster);
// bowFeatures is the descriptor you're looking for
// pointsIdxsOfCluster contains the indices of keypoints that belong to the cluster.
}
}
catch (int e){
cout << "An exception occurred. Nr. " << e << endl;
}
}
I want to read an image from a qrc using imread() of OpenCV in this way:
Mat img = imread(":/TempIcons/logo.png");
but the final img size is [0x0]. I have also tried:
Mat img = imread("qrc://TempIcons/logo.png");
but the size I get is the same. I don't want to load the image in a QImage to then transform it in a cv::Mat. Is there a way to do this in a easy way?. If it is, how can I do it?.
Thank you
As #TheDarkKnight pointed out, imread is not aware of Qt resources. You can however write your own loader, that uses QFile to retrieve the binary data from the resource, and uses imdecode (as done internally by imread) to read the image:
Mat loadFromQrc(QString qrc, int flag = IMREAD_COLOR)
{
//double tic = double(getTickCount());
QFile file(qrc);
Mat m;
if(file.open(QIODevice::ReadOnly))
{
qint64 sz = file.size();
std::vector<uchar> buf(sz);
file.read((char*)buf.data(), sz);
m = imdecode(buf, flag);
}
//double toc = (double(getTickCount()) - tic) * 1000.0 / getTickFrequency();
//qDebug() << "OpenCV loading time: " << toc;
return m;
}
You can call it like:
Mat m = loadFromQrc("qrc_path");
or specifying a flag:
Mat m = loadFromQrc("qrc_path", IMREAD_GRAYSCALE);
Performance
I tried loading the image with loadFromQrc, and loading the QImage and converting to Mat using this code, both with and without cloning. loadFromQrc results to be 10 time faster then loading a QImage and convert it to Mat.
Results in ms:
Load Mat : 4.85965
QImage to Mat (no clone): 49.3999
QImage to Mat (clone) : 49.8497
Test code:
#include <vector>
#include <iostream>
#include <QDebug>
#include <QtWidgets>
#include <opencv2/opencv.hpp>
using namespace cv;
Mat loadFromQrc(QString qrc, int flag = IMREAD_COLOR)
{
QFile file(qrc);
Mat m;
if(file.open(QIODevice::ReadOnly))
{
qint64 sz = file.size();
std::vector<uchar> buf(sz);
file.read((char*)buf.data(), sz);
m = imdecode(buf, flag);
}
return m;
}
cv::Mat QImageToCvMat( const QImage &inImage, bool inCloneImageData = true )
{
switch ( inImage.format() )
{
// 8-bit, 4 channel
case QImage::Format_RGB32:
{
cv::Mat mat( inImage.height(), inImage.width(), CV_8UC4, const_cast<uchar*>(inImage.bits()), inImage.bytesPerLine() );
return (inCloneImageData ? mat.clone() : mat);
}
// 8-bit, 3 channel
case QImage::Format_RGB888:
{
if ( !inCloneImageData )
qWarning() << "ASM::QImageToCvMat() - Conversion requires cloning since we use a temporary QImage";
QImage swapped = inImage.rgbSwapped();
return cv::Mat( swapped.height(), swapped.width(), CV_8UC3, const_cast<uchar*>(swapped.bits()), swapped.bytesPerLine() ).clone();
}
// 8-bit, 1 channel
case QImage::Format_Indexed8:
{
cv::Mat mat( inImage.height(), inImage.width(), CV_8UC1, const_cast<uchar*>(inImage.bits()), inImage.bytesPerLine() );
return (inCloneImageData ? mat.clone() : mat);
}
default:
qWarning() << "ASM::QImageToCvMat() - QImage format not handled in switch:" << inImage.format();
break;
}
return cv::Mat();
}
int main(int argc, char *argv[])
{
QString url = "...";
{
double tic = double(getTickCount());
Mat m1 = loadFromQrc(url);
double toc = (double(getTickCount()) - tic) * 1000.0 / getTickFrequency();
qDebug() << "Load Mat: " << toc;
if(m1.data != NULL)
{
imshow("m1", m1);
waitKey(1);
}
}
// {
// double tic = double(getTickCount());
// QImage img;
// img.load(url);
// Mat m2 = QImageToCvMat(img, false);
// double toc = (double(getTickCount()) - tic) * 1000.0 / getTickFrequency();
// qDebug() << "QImage to Mat (no clone): " << toc;
// if(m2.data != NULL)
// {
// imshow("m2", m2);
// waitKey(1);
// }
// }
// {
// double tic = double(getTickCount());
// QImage img;
// img.load(url);
// Mat m3 = QImageToCvMat(img, true);
// double toc = (double(getTickCount()) - tic) * 1000.0 / getTickFrequency();
// qDebug() << "QImage to Mat (clone): " << toc;
// if(m3.data != NULL)
// {
// imshow("m3", m3);
// waitKey(1);
// }
// }
waitKey();
return 0;
}
The problem here is that imread() loads an image from a file.
In contrast, Qt's resource system compiles the data from images directly into the program's executable. Qt's QFile operations know that when they are provided a path starting with ":/", it refers to the embedded resources, rather than on disk.
Therefore, I don't think you will be able to use imread() to directly access a file that has been placed in Qt's resources.
few days I'm fighting with camera calibration with chessboard example. Everything is going fine (corners are found and displayed, then feed to arrays) till I call final function calibrateCamera. Than I get assertation error:
OpenCV Error: Assertion failed (nimages > 0) in calibrateCamera, file /home/ig/Downloads/opencv-2.4.8/modules/calib3d/src/calibration.cpp, line 3415
here is classic code:
#include <iostream>
#include <fstream>
#include <vector>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
VideoCapture captR(0); // open the video camera no. 0 (RIGHT)
if (!captR.isOpened()) // if not success, exit program
{
cout << "Cannot open the video cam 0" << endl;
return -1;
}
namedWindow("MyVideo (RIGHT)",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
namedWindow("Grayscale",CV_WINDOW_AUTOSIZE); //create a window called "Grayscale"
int a = 0; // Frame counter
int numCornersHor = 7; // Chessboard dimensions
int numCornersVer = 5;
int numSquares = numCornersHor * numCornersVer;
Size boardSize = Size(numCornersHor, numCornersVer);
Mat frameR;
// Mat frameL;
Mat gray_frame;
vector<Point3f> obj;
vector<Point2f> corners; // output vectors of image points
for (int i=0; i<boardSize.height; i++) {
for (int j=0; j<boardSize.width; j++) {
obj.push_back(Point3f(i, j, 0.0f));
}
}
while (1){
int key = waitKey(30);
bool bCaptSuccessR = captR.read(frameR); // read a new frame from video
if (!bCaptSuccessR) //if capture not succeded, break loop
{
cout << "Cannot read a frame from video stream" << endl;
break;
}
vector<vector<Point3f> > object_points;
vector<vector<Point2f> > image_points;
// make grayscale frame version for conerSubPix
cvtColor(frameR, gray_frame, CV_BGR2GRAY);
// Get the chessboard corners
bool found = findChessboardCorners(frameR, boardSize, corners);
if (found) {
// Increase accuracy by subpixels
cornerSubPix(gray_frame, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(gray_frame, boardSize, corners, found);
imshow("Grayscale", gray_frame);
////////////////////////////////////////////
if(key==32){ // Save good found by pressing [space]
image_points.push_back(corners);
object_points.push_back(obj);
cout << "Captured good calibration image, No " << a << endl;
cout << "corners: " << corners << endl;
//cout << "obj: " << obj << endl;
a++;
}
}
imshow("MyVideo (RIGHT)", frameR); //show right webcam frame in "MyVideo" window
if (key == 27) { //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
cout << "esc key is pressed by user" << endl;
break;
}
if (key == 115){ // If 'S' key pressed begin calibration
//////////// BEGIN CALIBRATION ////////////////////////
cout << "Callibration started..." << endl;
Mat cameraMatrix = Mat(3, 3, CV_64F);
cameraMatrix.at<double>(0,0) = 1.0;
Mat distCoeffs;
distCoeffs = Mat::zeros(8, 1, CV_64F);
vector<Mat> rvecs;
vector<Mat> tvecs;
Size imageSize = frameR.size();
calibrateCamera(object_points, image_points, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs);
cout << "Callibration ended." << endl;
}//callibration
}
captR.release();
return 0;
}
And here is OpenCV file excerpt with line numbers:
3400 double cv::calibrateCamera( InputArrayOfArrays _objectPoints,
3401 InputArrayOfArrays _imagePoints,
3402 Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
3403 OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, TermCriteria criteria )
3404 {
3405 int rtype = CV_64F;
3406 Mat cameraMatrix = _cameraMatrix.getMat();
3407 cameraMatrix = prepareCameraMatrix(cameraMatrix, rtype);
3408 Mat distCoeffs = _distCoeffs.getMat();
3409 distCoeffs = prepareDistCoeffs(distCoeffs, rtype);
3410 if( !(flags & CALIB_RATIONAL_MODEL) )
3411 distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);
3412
3413 int i;
3414 size_t nimages = _objectPoints.total();
3415 CV_Assert( nimages > 0 );
3416 Mat objPt, imgPt, npoints, rvecM((int)nimages, 3, CV_64FC1), tvecM((int)nimages, 3, CV_64FC1);
3417 collectCalibrationData( _objectPoints, _imagePoints, noArray(),
3418 objPt, imgPt, 0, npoints );
3419 CvMat c_objPt = objPt, c_imgPt = imgPt, c_npoints = npoints;
3420 CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
3421 CvMat c_rvecM = rvecM, c_tvecM = tvecM;
3422
3423 double reprojErr = cvCalibrateCamera2(&c_objPt, &c_imgPt, &c_npoints, imageSize,
3424 &c_cameraMatrix, &c_distCoeffs, &c_rvecM,
3425 &c_tvecM, flags, criteria );
3426
3427 bool rvecs_needed = _rvecs.needed(), tvecs_needed = _tvecs.needed();
3428
3429 if( rvecs_needed )
3430 _rvecs.create((int)nimages, 1, CV_64FC3);
3431 if( tvecs_needed )
3432 _tvecs.create((int)nimages, 1, CV_64FC3);
3433
Using Linux Ubuntu 12.04, OpenCV 2.4.8, gcc 4.6.3, Eclipse, ...
You are declaring object_points and image_points within the while loop, but probably want to put declarations outside of the loop. Otherwise, the lists will (effectively) be cleared after each iteration.
The user presses the first key, to detect the checkerboards. Boards are written to object_points and image_points. Then the user presses the key to calibrate the camera. Processing of the first key ends, object_points and image_points loose scope (and are cleared), processing of the second key starts, calibrateCamera is called with an empty object_points array and fails.
You should also check whether object_points is not empty before calling calibrateCamera.
You may also find the source code in the samples/cpp/tutorial_code/calib3d/camera_calibration/ folder of the OpenCV source library
I have to use an IDS uEye Camera mod. UI1210SE-C under visual c++ 2010 and OpenCV
I need to take a photo every time there is a request by a client, so when I need, I need to take a simple single shot, that I need to save in a OpenCV Mat image.
How do this?
I have download SDK, and initialize camera, that have been recognized by my PC, but I don't understand how take a photo and how save in a Mat without issues
This is my actual code, works but Images are RED tone and not different, but first and second are equal, third different...I don't understand why...
#define CAPTURE_WIDTH 500
#define CAPTURE_HEIGHT 375
//Allocazione della matrice immagine per il frame catturato dalla telecamera
Mat frame(CAPTURE_HEIGHT, CAPTURE_WIDTH,CV_8UC3);
//Variabili di inizializzazione della Telecamera IDS uEye UI1220SE-C
HIDS hCam = 1; //Apre Camera con ID 1
int BITS_PER_PIXEL = 24;
int pWidth = CAPTURE_WIDTH;
int pHeight = CAPTURE_HEIGHT;
SENSORINFO sensor_info;
CAMINFO camera_info;
//puntatori memoria
char* m_pcImageMemory;
int m_lMemoryId;
//Pulizia memoria da foto precedenti
if (hCam != 0){
is_FreeImageMem (hCam,m_pcImageMemory,m_lMemoryId);
is_ExitCamera(hCam);
}
//inizializzazione della telecamera
int initcamera = is_InitCamera(&hCam, NULL);
if(initcamera != IS_SUCCESS)
{
cout<<endl<<"Impossibile inizializzare la telecamera"<<endl;
exit(-1);
}
// Acquisisce informazioni riguardanti la telecamera
int camerainfo = is_GetCameraInfo (hCam, &camera_info);
if(camerainfo != IS_SUCCESS)
{
printf("Impossibile acquisire le informazioni della telecamera");
exit(-1);
}
// Acquisisce informazioni riguardanti il sensore della telecamera
int sensorinfo = is_GetSensorInfo (hCam, &sensor_info);
if(sensorinfo != IS_SUCCESS)
{
printf("Impossibile acquisire le informazioni del sensore");
exit(-1);
}
//Output informazioni camera/sensore
cout<<endl<<"<<< CARATTERISTICHE DELLA TELECAMERA COLLEGATA >>>"<<endl;
cout<<"Numero seriale: " << camera_info.SerNo << endl;
cout << "Produttore: " << camera_info.ID << endl;
cout << "Modello: " << sensor_info.strSensorName << endl;
cout << "Dimensioni massime per l'immagine: " << sensor_info.nMaxWidth << "x" << sensor_info.nMaxHeight << endl << endl;
//Imposta la modalità di colore BGR24
int colormode = is_SetColorMode(hCam, IS_CM_BGR8_PACKED);
//int colormode = is_SetColorMode(hCam, IS_SET_CM_RGB24);
if(colormode != IS_SUCCESS)
{
printf("Impossibile impostare il modo di colore");
exit(-1);
}
//imposta dimensioni immagini che voglio catturare
int pXPos = (sensor_info.nMaxWidth);
int pYPos = (sensor_info.nMaxHeight);
//Inizializzazione Memoria camera
int rit = is_AllocImageMem (hCam,pXPos,pYPos, 24, &m_pcImageMemory, &m_lMemoryId);
if(rit != IS_SUCCESS)
{
cout<<endl<<"IMPOSSIBILE INIZIALIZZARE LA MEMORIA"<<endl;
system("PAUSE");
exit(-1);
}
cout<<endl<<"Memoria inizializzata"<<endl;
//attivazione della locazione di memoria
int rat = is_SetImageMem (hCam, m_pcImageMemory, m_lMemoryId);
if(rat != IS_SUCCESS)
{
cout<<endl<<"IMPOSSIBILE ATTIVARE LA MEMORIA"<<endl;
system("PAUSE");
exit(-1);
}
cout<<endl<<"Memoria Attivata"<<endl;
//impostazioni correzioni di colore
double strenght_factor = 1.0;
int colorcorrection = is_SetColorCorrection(hCam, IS_CCOR_ENABLE, &strenght_factor);
//impostazioni correzione del bianco
double pval = 1;
int whiteb = is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_WHITEBALANCE, &pval, 0);
//impostazione della correzione guadagno
double gval = 1;
int gains = is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_GAIN, &gval, 0);
//inizio fase cattura immagine
int dummy;
char *pMem, *pLast;
//ciclo di ripetizione
for (int i=0;i<3;i++)
{
int sho = is_FreezeVideo(hCam, IS_WAIT);
if(sho != IS_SUCCESS)
{
cout<<endl<<"IMPOSSIBILE ACQUISIRE DALLA TELECAMERA"<<endl;
system("PAUSE");
exit(-1);
}
if (sho == IS_SUCCESS){
int m_Ret = is_GetActiveImageMem(hCam, &pLast, &dummy);
int n_Ret = is_GetImageMem(hCam, (void**)&pLast);
}
IplImage* tmpImg = cvCreateImageHeader(cvSize (pXPos, pYPos), IPL_DEPTH_8U,3);
tmpImg->imageData = m_pcImageMemory;
frame = cv::cvarrToMat(tmpImg);
imshow("PROVA",frame);
waitKey(0);
}
//chiusura e pulizia della telecamera
int en = is_ExitCamera(hCam);
if (en == IS_SUCCESS){
cout<<endl<<"Camera chiusa correttamente"<<endl;
}
is_freezeVideo is the function that doing snapshot (reading manual) but I don't understand why pictures are RED and how use in a mat file insted passing from a iplimage....
Thanks in advance to all can help me...
Hmm I tested your code on my pc and everything is working perfectly. The images are maintaining the same parameters and I get the correct outcome.
Maybe the problem is that you don't define a parameter file (.ini). In order to do it you have to define it on your own as is said in the manual and call it with the
INT is_ParameterSet(HIDS hCam, UINT nCommand, void* pParam, UINT cbSizeOfParam)
I tested it on Ubuntu 12.04, Eclipse IDE for C/C++ Developers Version: Kepler Service Release 2 with ueye Version 4.31.