C++ OpenCV Xcode - c++

Excuse the hasty code dump. I'm working with OpenCV at the moment. I've been stuck with an error for 2h.
- (IBAction)faceRecognition:(id)sender {
// load images
vector<Mat> images;
vector<int> labels;
int numberOfSubjects = 4;
int numberPhotosPerSubject = 3;
for (int i=1; i<=numberOfSubjects; i++) {
for (int j=1; j<=numberPhotosPerSubject; j++) {
// create grayscale images
Mat src = [self CreateIplImageFromUIImage:[UIImage imageNamed:[NSString stringWithFormat:#"%d_%d.jpg", i, j]]];
Mat dst;
cv::cvtColor(src, dst, CV_BGR2GRAY);
images.push_back(dst);
labels.push_back(i);
}
}
// get test instances
Mat testSample = images[images.size() - 1];
int testLabel = labels[labels.size() - 1];
// ... and delete last element
images.pop_back();
labels.pop_back();
// build the Fisherfaces model
Fisherfaces model(images, labels);
// test model
int predicted = model.predict(testSample);
cout << "predicted class = " << predicted << endl;
cout << "actual class = " << testLabel << endl;
}
I can't figure out how to fix this:
Variable type 'cv::Fisherfaces' is an abstract class
It appears under "//build the Fisherfaces model" in the bottom.
Any assistance greatly appreciated.

Related

OpenCV c++ findHomography not returning same results

I am stitching three images. Now I wanted to implement warp perspective on the left side (image A and B) and right side (image B an C). I am able to compute both H matrices, but with some wierd behaving.
When I am computing A + B + C. It will do firstly B + C, then A + B. Here the H matric for AB is incorrect, but I dont know why!
When I will do only A+B then I will recieved "correct values". I tried everything, so I came here. It seems like there is some junk behind after computing B+C, but even I tried .clear() and .release() everything, still same behaving, I also tried to use .clone() everywhere - didnt help. I also checked if correct images are feeded in, yes they are.
I will try to simplify my code as much I can do:
// Stitcher variables
std::vector<std::pair<Mat, Mat>> images_toCalc;
std::vector<Mat> images;
std::vector<Mat> Hs;
//MAIN
images[2] = imread("E:/JOB/StitchingProject/StitchingApp/x64/Debug/i/left.jpg");
images[1] = imread("E:/JOB/StitchingProject/StitchingApp/x64/Debug/i/mid.jpg");
images[0] = imread("E:/JOB/StitchingProject/StitchingApp/x64/Debug/i/right.jpg");
stitcher->setImages(images, 3);
stitcher->runStitching2(false);
// runStitching2
for (int i = 0; i < this->images.size() - 1; i++) {
Mat image1 = this->images.at(i).clone();
Mat image2 = this->images.at(i + 1).clone();
//if (true) {
if (!isHalfAlready(i)) {
flip(image1, image1, 1);
flip(image2, image2, 1);
this->images_toCalc.push_back(std::make_pair(image2.clone(), image1.clone()));
}
else {
this->images_toCalc.push_back(std::make_pair(image1.clone(), image2.clone()));
}
}
for (int i = 0; i < images_toCalc.size(); i++) {
Mat H;
H = this->calcH(images_toCalc.at(i).first, images_toCalc.at(i).second);
this->Hs.push_back(H);
H.release();
if (this->debugLevel[3]) cout << "[" << i << "] H" << endl << Hs.at(i) << endl;
}
Now the buggy part
// this->calcH
Mat heStitcher::calcH(Mat& image1_toCalc, Mat& image2_toCalc) {
std::vector< KeyPoint > keypointsObject, keypointsScene;
Mat descriptorsObject, descriptorsScene;
Mat image1, image2;
image1 = image1_toCalc.clone();
image2 = image2_toCalc.clone();
if (greyCalc) {
cv::cvtColor(image1, image1, cv::COLOR_BGR2GRAY);
cv::cvtColor(image2, image2, cv::COLOR_BGR2GRAY);
}
Ptr<SURF> detector = SURF::create();
detector->detectAndCompute(image1, Mat(), keypointsObject, descriptorsObject);
detector->detectAndCompute(image2, Mat(), keypointsScene, descriptorsScene);
detector->clear();
vector<std::vector<DMatch>> matches;
Ptr<FlannBasedMatcher> matcher = cv::FlannBasedMatcher::create();
matcher->knnMatch(descriptorsObject, descriptorsScene, matches, 2);
matcher->clear();
double min_dist = 100;
double max_dist = 0;
std::vector< DMatch > goodMatches;
for (size_t i = 0; i < matches.size(); i++) {
if (matches[i][0].distance < 0.75f * matches[i][1].distance) {
goodMatches.push_back(matches[i][0]);
}
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for (int i = 0; i < goodMatches.size(); i++) {
obj.push_back(keypointsObject[goodMatches[i].queryIdx].pt);
scene.push_back(keypointsScene[goodMatches[i].trainIdx].pt);
}
cv::Mat H;
cv::Mat H2;
if (obj.size() < 4) {
if (this->debugLevel[2]) cout << endl << "!!!!!! not enough similarities (less than 4) !!!!!!" << endl;
}
else {
H = findHomography(obj, scene, RANSAC);
}
image1.release();
image2.release();
descriptorsObject.release();
descriptorsScene.release();
keypointsObject.clear();
keypointsScene.clear();
obj.clear();
scene.clear();
goodMatches.clear();
matches.clear();
H2 = H.clone();
H.release();
return H2;
}
After this warpPerspective, border cutting etc, results with matrices in picture 1 a 2. (from where obviously matrices are incorrect). I cannot understant, why when using same script, with same procedure, I am getting different ressults.

strange result of graphcut in opencv

I found the codes in stackoverflow and did some changes ,but the result of it is strange ,the mask is all white or all black.Now I want to know how to use GraphCut to find seeam line between two overlapping two images.thanks!
int main()
{
Mat image0=imread("F:\\1\\1.jpg");
Mat image1=imread("F:\\1\\2.jpg");
image0.convertTo(image0,CV_32F,1.0/255.0);
cv::imshow("image0",image0);
image1.convertTo(image1,CV_32F,1.0/255.0);
// our corners are just at (0,0)
cv::Point corner1;
corner1.x = 0;
corner1.y = 0;
cv::Point corner2;
corner2.x = 0;
corner2.y = 0;
std::vector<cv::Point> corners;
corners.push_back(corner1);
corners.push_back(corner2);
std::vector<cv::Mat> masks;
//Mat imageMask0;
//Mat imageMask1;
Mat imageMask0(image0.size(),CV_8U);
imageMask0(Rect(0,0,imageMask0.cols,imageMask0.rows)).setTo(255);
Mat imageMask1(image1.size(),CV_8U);
imageMask1(Rect(0,0,imageMask1.cols,imageMask1.rows)).setTo(255);
masks.push_back(imageMask0);
masks.push_back(imageMask1);
std::vector<cv::Mat> sources;
sources.push_back(image0);
sources.push_back(image1);
cv::detail::GraphCutSeamFinder *seam_finder = new cv::detail::GraphCutSeamFinder();
seam_finder->find(sources, corners, masks);
cv::imshow("mask",masks[1]);
masks[0].convertTo(masks[0],CV_8UC3,255);
cv::imwrite("F:\\1\\998.jpg",masks[0]);
masks[1].convertTo(masks[1],CV_8UC3,255);
cv::imwrite("F:\\1\\999.jpg",masks[1]);
printf("%lu\n", masks.size());
//for(int i = 0; i < masks.size(); i++)
//{
// std::cout << "MASK = "<< std::endl << " " << masks.at(i) << std::endl << std::endl;
// }
cv::waitKey();
return 0;
}
Then how to fix the code to make it right.

OpenCV CVTrees : double free or corruption on Eclipse

I have a class and there is a member vector<cvtrees*> vect. I generate many cvtrees object and push on vect. I use this function for train:
Mat trainingDataMat(trainSize, featureSize, CV_32FC1);
// fill trainingDataMat
for(int i = 0; i < LOOP; i++) {
Mat labelMat(trainSize, 1, CV_32FC1);
// fill labelMat
// learn classifier
CvRTrees *rtrees = new CvRTrees();
(*rtrees).train( trainingDataMat, CV_ROW_SAMPLE, labelMat, Mat(), Mat(), Mat(), Mat(), CvRTParams());
this->rtreesVector.push_back(rtrees);
}
And I use a function for predict. When I run below code, I get an error no source.
Mat testSample(1, featureSize, CV_32FC1);
for(int k = 0; k < featureSize; k++) {
testSample.at<float>(k) = (float)this->trainInvoiceVector[i]->at(j,k);
}
for(int i = 0; i < this->rtreesVector.size(); i++) {
int response = (int)((*(this->rtreesVector[i])).predict( testSample )); // !!!! THIS LINE IS THE PROBLEM
cout << "response" << response << endl;
}

how to use opencv graphcut

I am trying to get a graphcut to cut out an eye in a given image and replace it with another eye.
void imageStitching::findEye(cv::Mat originalImage, cv::Mat eye)
{
// our corners are just at (0,0)
cv::Point corner1;
corner1.x = 0;
corner1.y = 0;
cv::Point corner2;
corner2.x = 0;
corner2.y = 0;
std::vector<cv::Point> corners;
corners.push_back(corner1);
corners.push_back(corner2);
std::vector<cv::Mat> masks;
std::vector<cv::Mat> sources;
sources.push_back(originalImage);
sources.push_back(eye);
cv::detail::GraphCutSeamFinder *seam_finder = new cv::detail::GraphCutSeamFinder();
seam_finder->find(sources, corners, masks);
printf("%lu\n", masks.size());
for(int i = 0; i < masks.size(); i++)
{
std::cout << "MASK = "<< std::endl << " " << masks.at(i) << std::endl << std::endl;
}
}
But the code crashes on
seam_finder->find(sources, corners, masks);
with the message:
Thread 1: EXC_BAD_ACCESS(code=1, address=0x0)
How do I use opencv to get a graphcut of an object at a location in an image?

Access pixels with Mat OpenCV

I would like to access pixels in RGB with OpenCV 2.3.
I'm trying like this but it's like every pixels are equal frame after frame because I got no output. Images are from my webcam and I can see them.
Btw RED = 0;
THX
Mat frame;
Mat oldFrame;
VideoCapture cap(0);
cap >> oldFrame;
sumFramePix = oldFrame.cols * oldFrame.rows;
nbChannels = oldFrame.channels();
cout << "NbcHANNELs : " << nbChannels << endl;
imshow("Video 1", oldFrame);
while(1)
{
cap >> frame;
imshow("Video 1", frame);
for(int i=0; i<frame.rows; i++)
{
for(int j=0; j<frame.cols; j++)
{
if (frame.ptr<uchar>(i)[nbChannels*j+RED] < oldFrame.ptr<uchar>(i)[nbChannels*j+RED])
{
cout << "==============-";
}
}
}
oldFrame = frame;
if(waitKey(300) >= 0) break;
}
Change
oldFrame = frame;
to
oldFrame = frame.clone();
You are creating two Mat objects that point to the same data. clone() makes a deep copy.