I have a class and there is a member vector<cvtrees*> vect. I generate many cvtrees object and push on vect. I use this function for train:
Mat trainingDataMat(trainSize, featureSize, CV_32FC1);
// fill trainingDataMat
for(int i = 0; i < LOOP; i++) {
Mat labelMat(trainSize, 1, CV_32FC1);
// fill labelMat
// learn classifier
CvRTrees *rtrees = new CvRTrees();
(*rtrees).train( trainingDataMat, CV_ROW_SAMPLE, labelMat, Mat(), Mat(), Mat(), Mat(), CvRTParams());
this->rtreesVector.push_back(rtrees);
}
And I use a function for predict. When I run below code, I get an error no source.
Mat testSample(1, featureSize, CV_32FC1);
for(int k = 0; k < featureSize; k++) {
testSample.at<float>(k) = (float)this->trainInvoiceVector[i]->at(j,k);
}
for(int i = 0; i < this->rtreesVector.size(); i++) {
int response = (int)((*(this->rtreesVector[i])).predict( testSample )); // !!!! THIS LINE IS THE PROBLEM
cout << "response" << response << endl;
}
Related
I am stitching three images. Now I wanted to implement warp perspective on the left side (image A and B) and right side (image B an C). I am able to compute both H matrices, but with some wierd behaving.
When I am computing A + B + C. It will do firstly B + C, then A + B. Here the H matric for AB is incorrect, but I dont know why!
When I will do only A+B then I will recieved "correct values". I tried everything, so I came here. It seems like there is some junk behind after computing B+C, but even I tried .clear() and .release() everything, still same behaving, I also tried to use .clone() everywhere - didnt help. I also checked if correct images are feeded in, yes they are.
I will try to simplify my code as much I can do:
// Stitcher variables
std::vector<std::pair<Mat, Mat>> images_toCalc;
std::vector<Mat> images;
std::vector<Mat> Hs;
//MAIN
images[2] = imread("E:/JOB/StitchingProject/StitchingApp/x64/Debug/i/left.jpg");
images[1] = imread("E:/JOB/StitchingProject/StitchingApp/x64/Debug/i/mid.jpg");
images[0] = imread("E:/JOB/StitchingProject/StitchingApp/x64/Debug/i/right.jpg");
stitcher->setImages(images, 3);
stitcher->runStitching2(false);
// runStitching2
for (int i = 0; i < this->images.size() - 1; i++) {
Mat image1 = this->images.at(i).clone();
Mat image2 = this->images.at(i + 1).clone();
//if (true) {
if (!isHalfAlready(i)) {
flip(image1, image1, 1);
flip(image2, image2, 1);
this->images_toCalc.push_back(std::make_pair(image2.clone(), image1.clone()));
}
else {
this->images_toCalc.push_back(std::make_pair(image1.clone(), image2.clone()));
}
}
for (int i = 0; i < images_toCalc.size(); i++) {
Mat H;
H = this->calcH(images_toCalc.at(i).first, images_toCalc.at(i).second);
this->Hs.push_back(H);
H.release();
if (this->debugLevel[3]) cout << "[" << i << "] H" << endl << Hs.at(i) << endl;
}
Now the buggy part
// this->calcH
Mat heStitcher::calcH(Mat& image1_toCalc, Mat& image2_toCalc) {
std::vector< KeyPoint > keypointsObject, keypointsScene;
Mat descriptorsObject, descriptorsScene;
Mat image1, image2;
image1 = image1_toCalc.clone();
image2 = image2_toCalc.clone();
if (greyCalc) {
cv::cvtColor(image1, image1, cv::COLOR_BGR2GRAY);
cv::cvtColor(image2, image2, cv::COLOR_BGR2GRAY);
}
Ptr<SURF> detector = SURF::create();
detector->detectAndCompute(image1, Mat(), keypointsObject, descriptorsObject);
detector->detectAndCompute(image2, Mat(), keypointsScene, descriptorsScene);
detector->clear();
vector<std::vector<DMatch>> matches;
Ptr<FlannBasedMatcher> matcher = cv::FlannBasedMatcher::create();
matcher->knnMatch(descriptorsObject, descriptorsScene, matches, 2);
matcher->clear();
double min_dist = 100;
double max_dist = 0;
std::vector< DMatch > goodMatches;
for (size_t i = 0; i < matches.size(); i++) {
if (matches[i][0].distance < 0.75f * matches[i][1].distance) {
goodMatches.push_back(matches[i][0]);
}
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for (int i = 0; i < goodMatches.size(); i++) {
obj.push_back(keypointsObject[goodMatches[i].queryIdx].pt);
scene.push_back(keypointsScene[goodMatches[i].trainIdx].pt);
}
cv::Mat H;
cv::Mat H2;
if (obj.size() < 4) {
if (this->debugLevel[2]) cout << endl << "!!!!!! not enough similarities (less than 4) !!!!!!" << endl;
}
else {
H = findHomography(obj, scene, RANSAC);
}
image1.release();
image2.release();
descriptorsObject.release();
descriptorsScene.release();
keypointsObject.clear();
keypointsScene.clear();
obj.clear();
scene.clear();
goodMatches.clear();
matches.clear();
H2 = H.clone();
H.release();
return H2;
}
After this warpPerspective, border cutting etc, results with matrices in picture 1 a 2. (from where obviously matrices are incorrect). I cannot understant, why when using same script, with same procedure, I am getting different ressults.
I have converted a Mat to 2d arrays in c++ using opencv. To be able to make sure that I am getting the same pixel values in both containers(Mat and array) I tried this:
cout << xma.at<float>(4,11) << endl;
cout << xma_[4][11] << endl;
xma is Mat and xma_ is the 2d array.
The result I get are very far from each other!
Is the way that I am trying to access the pixel values correct?
And here is the code:
Mat imrgb = imread("src.tif", CV_LOAD_IMAGE_COLOR);
Mat bgr[3];
split(imrgb, bgr);
double average[600][800];
for(int j=0; j<=599;j++ )
{
for(int i=0; i<=799; i++)
{
average[j][i] =((((float)(bgr[0].at<uchar>(j,i)) + (float)(bgr[1].at<uchar>(j,i)) +(float)(bgr[2].at<uchar>(j,i))))/(3));
}
}
for(int j=0; j<=599;j++ )
{
for(int i=0; i<=799; i++)
{
average[j][i] = average[j][i] / 255;
}
}
src = Mat(600, 800, CV_64F, &average);
vector<float>gx;
vector<float>mult_vec;
for(int i=0; i<vec_first.size(); i++)
{
mult_vec.push_back(vec1[i] * vec2[i]);
}
for(int i=0; i<mult_vec.size(); i++)
{
gx.push_back((exp(-mult_vec[i] / C);
}
for(int i = 0; i < gx.size(); i++)
{
gy.push_back(gx[i]);
}
Mat gyMat=Mat(5, 1, CV_32F);
memcpy(gyMat.data, gy.data(), gy.size()*sizeof(float));
Mat gykernel;
cv::transpose(gy, gykernel);
filter2D(src, xma, -1 , gykernel, Point( -1, -1 ), 0, BORDER_DEFAULT );
for(j=0; j<=599;j++ )
{
for(i=0; i<=799; i++)
{
xma_[j][i] =(double)(xma.at<uchar>(j,i));
}
}
First I have read an rgb image, then I have splitted it into 3 channels and tried to get the average of the 3 channels. I have then divided the values to 255 to get the gray scale image.
Excuse the hasty code dump. I'm working with OpenCV at the moment. I've been stuck with an error for 2h.
- (IBAction)faceRecognition:(id)sender {
// load images
vector<Mat> images;
vector<int> labels;
int numberOfSubjects = 4;
int numberPhotosPerSubject = 3;
for (int i=1; i<=numberOfSubjects; i++) {
for (int j=1; j<=numberPhotosPerSubject; j++) {
// create grayscale images
Mat src = [self CreateIplImageFromUIImage:[UIImage imageNamed:[NSString stringWithFormat:#"%d_%d.jpg", i, j]]];
Mat dst;
cv::cvtColor(src, dst, CV_BGR2GRAY);
images.push_back(dst);
labels.push_back(i);
}
}
// get test instances
Mat testSample = images[images.size() - 1];
int testLabel = labels[labels.size() - 1];
// ... and delete last element
images.pop_back();
labels.pop_back();
// build the Fisherfaces model
Fisherfaces model(images, labels);
// test model
int predicted = model.predict(testSample);
cout << "predicted class = " << predicted << endl;
cout << "actual class = " << testLabel << endl;
}
I can't figure out how to fix this:
Variable type 'cv::Fisherfaces' is an abstract class
It appears under "//build the Fisherfaces model" in the bottom.
Any assistance greatly appreciated.
I am trying to access the values in a opencv Mat; but when I print the values I get the following:
\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377
My code is as follows:
cv::Mat oneComp(yb-yt, xb-xt, CV_8U);
cv::Mat currentLabelImage(drawing.rows,drawing.cols, CV_8U);
currentLabelImage = (drawing == n+1);
currentLabelImage.convertTo(currentLabelImage, CV_32F);
for (int i = 0; i<yb-yt; i++){
for(int j = 0; j<xb-xt; j++){
std:: cout << currentLabelImage.at<uchar>(i+yt, j+xt);
}
}
I am doing this on Objective-C Xcode.
I am trying to make a classifier using OpenCV 3.0.0's CvSVM and color histogram. I already tried to make my own using the following code to make the datasets:
int labels[510];
if (label.compare("raw")){
for (int i = 0; i < 509; i++){
labels[i] = 1;
}
}
else if (label.compare("ripe")){
for (int i = 0; i < 509; i++){
labels[i] = 2;
}
}
else if (label.compare("rotten")){
for (int i = 0; i < 509; i++){
labels[i] = 3;
}
}
float trainingData[510][2];
for (int i = 0; i < 254; i++){
trainingData[i][1] = r_hist.at<float>(i - 1);
trainingData[i][2] = i;
}
int j = 0;
for (int i = 255; i < 509; i++){
trainingData[i][1] = g_hist.at<float>(j - 1);
trainingData[i][2] = i;
j++;
}
And this code for the SVM:
int width = 512, height = 512;
Mat image = Mat::zeros(height, width, CV_8UC3);
Mat labelsMat(510, 1, CV_32SC1, labels);
Mat trainingDataMat(510, 2, CV_32FC1, trainingData);
Ptr < cv::ml::SVM > svm = SVM::create();
svm = cv::Algorithm::load<ml::SVM>("svm.xml");
svm->setC(0.01);
svm->setType(ml::SVM::C_SVC);
svm->setKernel(ml::SVM::LINEAR);
svm->setTermCriteria((cvTermCriteria(TermCriteria::MAX_ITER, 100, 1e6)));
svm->train(trainingDataMat, ROW_SAMPLE, labelsMat);
svm->save("svm.xml");
The problem with the code above is that it won't save properly. Is there a better way to do it?