I am trying the compile the following code to compute the chamfer distance. However I get the following error while compiling it. I am using opencv-3.2 on ubuntu 18.04, 64 bit.
fatal error: opencv2/contrib/contrib.hpp: No such file or directory
Synaptic package manager says that libopencv-contrib-dev and libopencv-contrib-3.2 are installed at /usr/include/opencv2 and /usr/lib/x86_64-linux-gnu respectively. I checked for contrib.hpp but found no file or folder named contrib in these locations.
The code for the chamfer distance computation is as below:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main( int argc, const char** argv )
{
Mat img = imread(argv[1], 0);
Mat tpl = imread(argv[2], 0);
Mat cimg;
cvtColor(img, cimg, CV_GRAY2BGR);
vector<vector<Point> > results;
vector<float> costs;
int best = chamerMatching( img, tpl, results, costs );
return 0;
}
My question is: How to add the correct headers and get the above chamfer distance code working in opencv 3.2?
It looks like the contrib module is not available in OpenCV 3.2. This module was removed in OpenCV 4.0 and is no longer part of the library. If you want to use the Chamfer matching algorithm in your code, you can try using the matchShapes function from the imgproc module instead. This function can be used to compare the shapes of two contours and return a measure of their similarity:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main( int argc, const char** argv )
{
Mat img = imread(argv[1], 0);
Mat tpl = imread(argv[2], 0);
// Find the contours of the images
vector<vector<Point> > img_contours;
vector<vector<Point> > tpl_contours;
findContours(img, img_contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
findContours(tpl, tpl_contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
// Compute the Chamfer distance between the contours
double chamfer = matchShapes(img_contours[0], tpl_contours[0], CHAMFER_DIST_L2, 0);
cout << "Chamfer distance: " << chamfer << endl;
return 0;
}
Related
I am having a bit of trouble in trying to get the opencv face detection to work in QT with my basler cam; I have tried many different approaches to get it to work, using many different sample codes online. I just can’t seem to get it to work at all; in addition the attempts I have made have lowered my frame rate.
The code I used to capture a video with the basler cam is working great, I’m just having trouble implementing the face detection part. I will paste the code I have so far for the camera and opencv below. The code does get me a few red boxes appearing now and then, but it isn’t stable. I am also getting this error
Failed to load OpenCL runtime
I’m not sure what I am doing wrong, also is there a way to implement the face detection without lowering the frame rate, as it is already slow
#include "mainwindow.h"
#include "ui_mainwindow.h"
#include <opencv2/opencv.hpp>
#include <pylon/PylonIncludes.h>
//#include <pylon/PylonGUI.h>
//#ifdef PYLON_WIN_BUILD
//#include <pylon/PylonGUI.h>
//#endif
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include<time.h>
#include<stdlib.h>
using namespace cv;
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
static const uint32_t c_countOfImagesToGrab = 100;
cv::CascadeClassifier faceCade;
String faceCascadeName = "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
String FaceDetectWindow = "Face Detector Window";
String FaceDetectGrayWindow = "Face Detector Gray Window";
size_t i;
vector<Rect> faces;
cv::Mat camFrames, grayFrames;
int main()
{
// The exit code of the sample application.
int exitCode = 0;
// Automagically call PylonInitialize and PylonTerminate to ensure
// the pylon runtime system is initialized during the lifetime of this object.
Pylon::PylonAutoInitTerm autoInitTerm;
faceCade.load( faceCascadeName );
CGrabResultPtr ptrGrabResult;
namedWindow("CV_Image",WINDOW_AUTOSIZE);
CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice());
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
camera.Open();
GenApi::CIntegerPtr width(camera.GetNodeMap().GetNode("Width"));
GenApi::CIntegerPtr height(camera.GetNodeMap().GetNode("Height"));
Mat cv_img(width->GetValue(), height->GetValue(), CV_8UC3);
camera.StartGrabbing();
CPylonImage image;
CImageFormatConverter fc;
fc.OutputPixelFormat = PixelType_BGR8packed;
while(camera.IsGrabbing()){
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
if (ptrGrabResult->GrabSucceeded()){
fc.Convert(image, ptrGrabResult);
cv_img = cv::Mat(ptrGrabResult->GetHeight(), ptrGrabResult->GetWidth(), CV_8UC3,(uint8_t*)image.GetBuffer());
//cvtColor(cv_img, grayFrames, cv::COLOR_BGR2GRAY);
//equalizeHist(grayFrames, grayFrames);
faceCade.detectMultiScale(cv_img, faces, 1.1, 2, 0, Size(160, 160));
for (int i = 0; i < faces.size(); i++)
{
//Mat faceROI = grayFrames(faces[i]);
rectangle(cv_img, Rect(faces[i].x - 25,faces[i].y - 25,faces[i].width + 35 ,faces[i].height + 35), Scalar(0, 0, 255), 1, 1, 0);
Point center(faces[i].x + faces[i].width * 0.5,faces[i].y + faces[i].height * 0.5);
}
imshow("CV_Image",cv_img);
//imshow("FaceDetectGrayWindow", grayFrames);
waitKey(1);
if(waitKey(30)==27){
camera.StopGrabbing();
}
}
}
}
}
Thank you
i'm not quite sure about this but detectMultiScale function works with image in cv_8u type , and as i see you are using cv_8uc3, as i know cv_8u it's 8 bit pixel with 1 channel, cv_8uc3 it's alos 8 bit but 3 channels, you need to convert your image to gray scale , i saw you did that but you comment it?!!!
look at this link opencv_face_detection.
maybe that will fix your problem, and some people advice you to install opencl
sudo apt-get install ocl-icd-opencl-dev
I am trying to set up a bag of visual words using openCV 3.0. I have looked a bit everywhere and all I seem to be able to find is code that is only compatible with versions in the 2.x domain. As of now this is what I have:
#include <opencv2/core/core.hpp>
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
int main(int argc, const char** argv) {
Ptr<FeatureDetector> features;
Ptr<DescriptorExtractor> descriptors;
Ptr<DescriptorMatcher> matcher;
int MAX_ITER = 100;
int EPS = 2;
TermCriteria tc(MAX_ITER + EPS,1,0.001);
int dictSize = 1000;
int retries = 1;
int flags = KMEANS_PP_CENTERS;
BOWKMeansTrainer bowTrainer(dictSize,tc,retries,flags);
BOWImgDescriptorExtractor bowDE(descriptors,matcher);
Mat img1 = imread("/Users/Lucas/Desktop/pic2.jpg");
Mat img2 = imread("/Users/Lucas/Desktop/2.jpg");
vector<KeyPoint> keypoints,keypoints2;
features->detect(img1, keypoints);
features->detect(img2, keypoints2);
Mat myFeatures;
Mat myFeatures2;
descriptors->compute(img1, keypoints, myFeatures);
descriptors->compute(img2, keypoints2, myFeatures2);
bowTrainer.add(myFeatures);
bowTrainer.add(myFeatures2);
Mat dictionary = bowTrainer.cluster();
bowDE.setVocabulary(dictionary);
cout << dictionary << endl;
return 0;
}
I have put this together by using a few tutorials and snippets, but I am running into an issue. When the program gets to
features->detect(img1, keypoints);
it exits with a segmentation fault 11, whatever that means. Could someone help me and point out what it is I am doing wrong?
you have to create your FeatureDetector, DescriptorExtractor first. atm, you got null-pointer instances (that's your segfault).
#include <opencv2/xfeatures2d.hpp>
...
Ptr<FeatureDetector> features = xfeatures2d::SIFT::create();
Ptr<DescriptorExtractor> descriptors = xfeatures2d::SIFT::create();
Ptr<DescriptorMatcher> matcher = makePtr<BFMatcher>(NORM_L2);
note, that since you have to use SIFT or SURF, you will need the opencv_contrib repo installed for this
I wrote a simple program in OpenCV that detects SURF feature in a given image and diplays the detected features in a namedWindow.
#include <iostream>
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\features2d\features2d.hpp>
using namespace cv;
int main(int argc,char** argv)
{
if(argc!=3)//Check cmd number of argumets
{
std::cout<<"Usage: "<<argv[0]<<" <image-file> <method>"<<std::endl;
return -1;
}
//LOAD THE SOURCE IMAGE
Mat Img = imread(argv[1],CV_LOAD_IMAGE_GRAYSCALE);
if(!Img.data)//Check correct image load
{
std::cout<<"Cannot read image file. Check file path!"<<std::endl;
return -1;
}
//COMPUTE FEATURES
SurfFeatureDetector detector;
std::vector<KeyPoint> features;
detector.detect(Img,features);
//SHOW RESULT
Mat ImgF;
drawKeypoints(Img,features,ImgF);
namedWindow("Features", CV_GUI_NORMAL);
imshow("Features",ImgF);
waitKey();
return 0;
}
Everything is OK, the programs do what it have to do. The problem is when pressing a key to terminate the program a crash error occurs.
It doesn't crash for me... but in order for me to compile your code, I had to add
#include <opencv2/nonfree/features2d.hpp>
because SURF was moved to the nonfree module at some point.
So, I would have to recommend trying the newest version (2.4.6 as of today).
I am trying to make the follwing Code by Mohammad Reza Mostajabi (http://alum.sharif.ir/~mostajabi/Tutorial.html) run under Ubuntu 12.04 with OpenCV 2.4.6.1. I made some minor changes with the libraries included and added "cv::initModule_nonfree()" right after starting the main file.
#include "cv.h"
#include "highgui.h"
#include "ml.h"
#include <stdio.h>
#include <iostream>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <vector>
using namespace cv;
using namespace std;
using std::cout;
using std::cerr;
using std::endl;
using std::vector;
char ch[30];
//--------Using SURF as feature extractor and FlannBased for assigning a new point to the nearest one in the dictionary
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
Ptr<DescriptorExtractor> extractor = new SurfDescriptorExtractor();
SurfFeatureDetector detector(500);
//---dictionary size=number of cluster's centroids
int dictionarySize = 1500;
TermCriteria tc(CV_TERMCRIT_ITER, 10, 0.001);
int retries = 1;
int flags = KMEANS_PP_CENTERS;
BOWKMeansTrainer bowTrainer(dictionarySize, tc, retries, flags);
BOWImgDescriptorExtractor bowDE(extractor, matcher);
void collectclasscentroids() {
IplImage *img;
int i,j;
for(j=1;j<=4;j++)
for(i=1;i<=60;i++){
sprintf( ch,"%s%d%s%d%s","train/",j," (",i,").jpg");
const char* imageName = ch;
img = cvLoadImage(imageName,0);
vector<KeyPoint> keypoint;
detector.detect(img, keypoint);
Mat features;
extractor->compute(img, keypoint, features);
bowTrainer.add(features);
}
return;
}
int main(int argc, char* argv[])
{
cv::initModule_nonfree();
int i,j;
IplImage *img2;
cout<<"Vector quantization..."<<endl;
collectclasscentroids();
vector<Mat> descriptors = bowTrainer.getDescriptors();
int count=0;
for(vector<Mat>::iterator iter=descriptors.begin();iter!=descriptors.end();iter++)
{
count+=iter->rows;
}
cout<<"Clustering "<<count<<" features"<<endl;
//choosing cluster's centroids as dictionary's words
Mat dictionary = bowTrainer.cluster();
bowDE.setVocabulary(dictionary);
cout<<"extracting histograms in the form of BOW for each image "<<endl;
Mat labels(0, 1, CV_32FC1);
Mat trainingData(0, dictionarySize, CV_32FC1);
int k=0;
vector<KeyPoint> keypoint1;
Mat bowDescriptor1;
//extracting histogram in the form of bow for each image
for(j=1;j<=4;j++)
for(i=1;i<=60;i++){
sprintf( ch,"%s%d%s%d%s","train/",j," (",i,").jpg");
const char* imageName = ch;
img2 = cvLoadImage(imageName,0);
detector.detect(img2, keypoint1);
bowDE.compute(img2, keypoint1, bowDescriptor1);
trainingData.push_back(bowDescriptor1);
labels.push_back((float) j);
}
//Setting up SVM parameters
CvSVMParams params;
params.kernel_type=CvSVM::RBF;
params.svm_type=CvSVM::C_SVC;
params.gamma=0.50625000000000009;
params.C=312.50000000000000;
params.term_crit=cvTermCriteria(CV_TERMCRIT_ITER,100,0.000001);
CvSVM svm;
printf("%s\n","Training SVM classifier");
bool res=svm.train(trainingData,labels,cv::Mat(),cv::Mat(),params);
cout<<"Processing evaluation data..."<<endl;
Mat groundTruth(0, 1, CV_32FC1);
Mat evalData(0, dictionarySize, CV_32FC1);
k=0;
vector<KeyPoint> keypoint2;
Mat bowDescriptor2;
Mat results(0, 1, CV_32FC1);;
for(j=1;j<=4;j++)
for(i=1;i<=60;i++){
sprintf( ch,"%s%d%s%d%s","eval/",j," (",i,").jpg");
const char* imageName = ch;
img2 = cvLoadImage(imageName,0);
detector.detect(img2, keypoint2);
bowDE.compute(img2, keypoint2, bowDescriptor2);
evalData.push_back(bowDescriptor2);
groundTruth.push_back((float) j);
float response = svm.predict(bowDescriptor2);
results.push_back(response);
}
//calculate the number of unmatched classes
double errorRate = (double) countNonZero(groundTruth- results) / evalData.rows;
printf("%s%f","Error rate is ",errorRate);
return 0;
}
After doing this I can compile the Code without problems. I can also run it within Eclipse, but once I try to make it work in terminal I get the following error message:
" OpenCV Error: Assertion failed (!_descriptors.empty()) in add, file /home/mark/Downloads/FP/opencv-2.4.6.1/modules/features2d/src/bagofwords.cpp, line 57
terminate called after throwing an instance of 'cv::Exception'
what(): /home/mark/Downloads/FP/opencv-2.4.6.1/modules/features2d/src/bagofwords.cpp:57: error: (-215) !_descriptors.empty() in function add "
I've been trying to solve the problem for a few days now, but I just cannot get rid of this error. I also tried to do it with CodeBlocks, which gives me the same error. I would appreciate some help very much!
Thanks!
It's possible that your program fails to load input images (when launched from the terminal window) because it can't find them. Make sure that your input images are copied to the directory from which you run the application. Eclipse may have a different home directory and hence it sees the image when the program is started in Eclipse.
I am now learning a code from the opencv codebook (OpenCV 2 Computer Vision Application Programming Cookbook): Chapter 5, Segmenting images using watersheds, page 131.
Here is my main code:
#include "opencv2/opencv.hpp"
#include <string>
using namespace cv;
using namespace std;
class WatershedSegmenter {
private:
cv::Mat markers;
public:
void setMarkers(const cv::Mat& markerImage){
markerImage.convertTo(markers, CV_32S);
}
cv::Mat process(const cv::Mat &image){
cv::watershed(image,markers);
return markers;
}
};
int main ()
{
cv::Mat image = cv::imread("/Users/yaozhongsong/Pictures/IMG_1648.JPG");
// Eliminate noise and smaller objects
cv::Mat fg;
cv::erode(binary,fg,cv::Mat(),cv::Point(-1,-1),6);
// Identify image pixels without objects
cv::Mat bg;
cv::dilate(binary,bg,cv::Mat(),cv::Point(-1,-1),6);
cv::threshold(bg,bg,1,128,cv::THRESH_BINARY_INV);
// Create markers image
cv::Mat markers(binary.size(),CV_8U,cv::Scalar(0));
markers= fg+bg;
// Create watershed segmentation object
WatershedSegmenter segmenter;
// Set markers and process
segmenter.setMarkers(markers);
segmenter.process(image);
imshow("a",image);
std::cout<<".";
cv::waitKey(0);
}
However, it doesn't work. How could I initialize a binary image? And how could I make this segmentation code work?
I am not very clear about this part of the book.
Thanks in advance!
There's a couple of things that should be mentioned about your code:
Watershed expects the input and the output image to have the same size;
You probably want to get rid of the const parameters in the methods;
Notice that the result of watershed is actually markers and not image as your code suggests; About that, you need to grab the return of process()!
This is your code, with the fixes above:
// Usage: ./app input.jpg
#include "opencv2/opencv.hpp"
#include <string>
using namespace cv;
using namespace std;
class WatershedSegmenter{
private:
cv::Mat markers;
public:
void setMarkers(cv::Mat& markerImage)
{
markerImage.convertTo(markers, CV_32S);
}
cv::Mat process(cv::Mat &image)
{
cv::watershed(image, markers);
markers.convertTo(markers,CV_8U);
return markers;
}
};
int main(int argc, char* argv[])
{
cv::Mat image = cv::imread(argv[1]);
cv::Mat binary;// = cv::imread(argv[2], 0);
cv::cvtColor(image, binary, CV_BGR2GRAY);
cv::threshold(binary, binary, 100, 255, THRESH_BINARY);
imshow("originalimage", image);
imshow("originalbinary", binary);
// Eliminate noise and smaller objects
cv::Mat fg;
cv::erode(binary,fg,cv::Mat(),cv::Point(-1,-1),2);
imshow("fg", fg);
// Identify image pixels without objects
cv::Mat bg;
cv::dilate(binary,bg,cv::Mat(),cv::Point(-1,-1),3);
cv::threshold(bg,bg,1, 128,cv::THRESH_BINARY_INV);
imshow("bg", bg);
// Create markers image
cv::Mat markers(binary.size(),CV_8U,cv::Scalar(0));
markers= fg+bg;
imshow("markers", markers);
// Create watershed segmentation object
WatershedSegmenter segmenter;
segmenter.setMarkers(markers);
cv::Mat result = segmenter.process(image);
result.convertTo(result,CV_8U);
imshow("final_result", result);
cv::waitKey(0);
return 0;
}
I took the liberty of using Abid's input image for testing and this is what I got:
Below is the simplified version of your code, and it works fine for me. Check it out :
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
using namespace std;
int main ()
{
Mat image = imread("sofwatershed.jpg");
Mat binary = imread("sofwsthresh.png",0);
// Eliminate noise and smaller objects
Mat fg;
erode(binary,fg,Mat(),Point(-1,-1),2);
// Identify image pixels without objects
Mat bg;
dilate(binary,bg,Mat(),Point(-1,-1),3);
threshold(bg,bg,1,128,THRESH_BINARY_INV);
// Create markers image
Mat markers(binary.size(),CV_8U,Scalar(0));
markers= fg+bg;
markers.convertTo(markers, CV_32S);
watershed(image,markers);
markers.convertTo(markers,CV_8U);
imshow("a",markers);
waitKey(0);
}
Below is my input image :
Below is my output image :
See the code explanation here : Simple watershed Sample in OpenCV
I had the same problem as you, following the exact same code sample of the cookbook (great book btw).
Just to place the matter I was coding under Visual Studio 2013 and OpenCV 2.4.8. After a lot of searching and no solutions I decided to change the IDE.
It's still Visual Studio BUT it's 2010!!!! And boom it works!
Becareful of how you configure Visual Studio with OpenCV. Here's a great tutorial for installation here
Good day to all