I'm using visual studio 2019 with OpenCV 4.4.0
every thing was great but when i want to start face detection the cascade classifiar doesn't load the haarcascade
you also have to know that i installed openCV in the c partition and this is a simple code
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <opencv2\opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <Windows.h>
#include <vector>
#include <stdio.h>
using namespace std;
using namespace cv;
int main()
{
VideoCapture cam(0);
Mat img;
CascadeClassifier detector;
vector<Rect> faces;
Point p[2];
bool cap = false;
if (!detector.load("c:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_default.xml"))
{
cout << "Image Detector Doesn't work\n";
return EXIT_FAILURE;
}
if (!cam.isOpened())
{
cout << "Can't Open Camera\n";
return EXIT_FAILURE;
}
while (!cap)
{
cam.read(img);
imshow("Cam", img);
waitKey(0);
if (GetAsyncKeyState(VK_ESCAPE))
cap = true;
}
destroyWindow("Cam");
cout << "Detecting Face...\n";
detector.detectMultiScale(img, faces);
for (int i = 0; i < faces.size(); i++)
{
p[0] = Point(faces[i].x,faces[i].y);
p[1] = Point(faces[i].x + faces[i].height,faces[i].y + faces[i].width);
rectangle(img,p[0],p[1],Scalar(0,0,255),3);
}
imwrite("Result.jpg",img);
return EXIT_SUCCESS;
}
this code doesn't load the haarcascade and it returns "can't load" in the cmd
so i really need help with and thanks for all
\ is used as escape sequence in C++ string literals.
Therefore, you should use \\ to put a character \ in them.
if (!dec.load("c:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_default.xml"))
I am new to SVM. I used to do object detection using HAAR Cascading. Now I am trying to implement SVM for object detection. I searched online and got to know about the basics.
I wanted to use libsvm while coding for c++. I am getting lots of problems.
Can anyone please explain step by step process of using it for object detection.
BTW I looked into opencv documentation of svm. But I am not able to do anything further.
Also I got this code for training my SVM and saving it into an xml file.
Now I want a code which can take this xml and detect objects in test cases.
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <cv.h>
#include <highgui.h>
#include <cvaux.h>
#include <iostream>
#include <vector>
#include<string.h>
using namespace std;
using namespace cv;
int main ( int argc, char** argv )
{
cout << "OpenCV Training SVM Automatic Number Plate Recognition\n";
cout << "\n";
char* path_Plates;
char* path_NoPlates;
int numPlates;
int numNoPlates;
int imageWidth=150;
int imageHeight=150;
//Check if user specify image to process
if(1)
{
numPlates= 11;
numNoPlates= 90 ;
path_Plates= "/home/kaushik/opencv_work/Manas6/Pics/Positive_Images/";
path_NoPlates= "/home/kaushik/opencv_work/Manas6/Pics/Negative_Images/i";
}else{
cout << "Usage:\n" << argv[0] << " <num Plate Files> <num Non Plate Files> <path to plate folder files> <path to non plate files> \n";
return 0;
}
Mat classes;//(numPlates+numNoPlates, 1, CV_32FC1);
Mat trainingData;//(numPlates+numNoPlates, imageWidth*imageHeight, CV_32FC1 );
Mat trainingImages;
vector<int> trainingLabels;
for(int i=1; i<= numPlates; i++)
{
stringstream ss(stringstream::in | stringstream::out);
ss<<path_Plates<<i<<".jpg";
try{
const char* a = ss.str().c_str();
printf("\n%s\n",a);
Mat img = imread(ss.str(), CV_LOAD_IMAGE_UNCHANGED);
img= img.clone().reshape(1, 1);
//imshow("Window",img);
//cout<<ss.str();
trainingImages.push_back(img);
trainingLabels.push_back(1);
}
catch(Exception e){;}
}
for(int i=0; i< numNoPlates; i++)
{
stringstream ss(stringstream::in | stringstream::out);
ss << path_NoPlates<<i << ".jpg";
try
{
const char* a = ss.str().c_str();
printf("\n%s\n",a);
Mat img=imread(ss.str(), 0);
//imshow("Win",img);
img= img.clone().reshape(1, 1);
trainingImages.push_back(img);
trainingLabels.push_back(0);
//cout<<ss.str();
}
catch(Exception e){;}
}
Mat(trainingImages).copyTo(trainingData);
//trainingData = trainingData.reshape(1,trainingData.rows);
trainingData.convertTo(trainingData, CV_32FC1);
Mat(trainingLabels).copyTo(classes);
FileStorage fs("SVM.xml", FileStorage::WRITE);
fs << "TrainingData" << trainingData;
fs << "classes" << classes;
fs.release();
return 0;
}
Any help would be greatly appreciated.
Also I would love to have suggestions on how to implement libsvm for object detection.
This is a simple code which you could take a test with your xml file:
#include "highgui.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "cv.h"
#include <vector>
#include <string.h>
#include <ml.h>
#include <iostream>
#include <io.h>
using namespace cv;
using namespace std;
int main()
{
FileStorage fs;
fs.open("SVM.xml", FileStorage::READ);
Mat trainingData;
Mat classes;
fs["TrainingData"] >> trainingData;
fs["classes"] >> classes;
CvSVMParams SVM_params;
SVM_params.svm_type = CvSVM::C_SVC;
SVM_params.kernel_type = CvSVM::LINEAR; //CvSVM::LINEAR;
SVM_params.degree = 1;
SVM_params.gamma = 1;
SVM_params.coef0 = 0;
SVM_params.C = 1;
SVM_params.nu = 0;
SVM_params.p = 0;
SVM_params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 1000, 0.01);
CvSVM svm(trainingData, classes, Mat(), Mat(), SVM_params);
Mat src = imread("D:\\SVM\\samples\\\pos\\10.jpg");
Mat gray;
cvtColor(src, gray, CV_BGR2GRAY);
Mat p = gray.reshape(1, 1);
p.convertTo(p, CV_32FC1);
int response = (int)svm.predict( p );
if(response ==1 )
{
cout<<"this is a object!"<<endl;
cout<<endl;
}
else
{
cout<<"no object detected!"<<endl;
cout<<endl;
}
return 0;
}
by the way,it seems that there is little problem when runing your offered code,the result shows that:"opencv errror,Image step is wrongin cv::Mat::reshape".Had you met such situation before?Thank you.
I develop the code in VS2015 + OpenCV3.0 in Windows 7 64bit. This is a demo code that I want have a try. And I have tried many demo but I was coming across the same problem:
object of abstract class type "cv::BackgroundSubtractorMOG2" is not allowed. all the methods are pure virtual function.
The demo code is:
using namespace cv;
using namespace std;
int main() {
VideoCapture video("1.avi");
Mat frame, mask, thresholdImage, output;
//video>>frame;
Ptr<BackgroundSubtractor> pMOG2;
pMOG2 = new BackgroundSubtractorMOG2();
BackgroundSubtractorMOG2 bgSubtractor(20, 16, true);
while (true) {
video >> frame;
++frameNum;
bgSubtractor(frame, mask, 0.001);
cout << frameNum << endl;
//imshow("mask",mask);
//waitKey(10);
}
return 0;
}
I include a lot of heaerd files but I still can not use the class BackgroundSubtractorMOG2 and what is worse, the class of BackgroundSubtractorMOG is shown undeclared.
Syntax has changed from OpenCV 2.9.X. This will work in OpenCV 3.0.0:
#include <opencv2\opencv.hpp>
using namespace cv;
using namespace std;
int main() {
VideoCapture video("1.avi");
Mat frame, mask, thresholdImage, output;
int frameNum = 0;
Ptr<BackgroundSubtractor> pMOG2 = createBackgroundSubtractorMOG2(20, 16, true);
while (true) {
video >> frame;
++frameNum;
pMOG2->apply(frame, mask, 0.001);
cout << frameNum << endl;
imshow("mask",mask);
waitKey(10);
}
return 0;
}
When I run this program and adjust the slider multiple times bar the image appears different even though it is at the same slider position. If you try this code, move the slider from the minimum to maximum position back and forth several times and you can see a slight alteration to the image each time.
I have traced the point at which this happens to the line running the add function in my onProgram6Trackbar1 function. Removing it removes the variations between slide movements. Why is this happening?
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
#include <cmath>
class ProgramData {
public:
ProgramData() {
k1=0;
k2=0;
k3=0;
k4=0;
k5=0;
}
int k1;
int k2;
int k3;
int k4;
int k5;
Mat * source_U8C3;
Mat * temp1_U8C3;
Mat * temp2_U8C3;
Mat * temp3_U8C1;
Mat * temp4_U8C1;
Mat * temp5_U8C1;
Mat * temp6_U8C1;
Mat * temp7_U8C1;
vector<Mat> tempv1_U8C1;
vector<Mat> tempv2_U8C1;
Mat * output_U8C1;
Mat * output_U8C3;
Mat * dim1by1;
};
static void onProgram6Trackbar1(int v, void* vp) {
ProgramData * pd = (ProgramData *) vp;
*(pd->temp3_U8C1) = pd->tempv1_U8C1[2].clone();
inRange(*(pd->temp3_U8C1), pd->k1, 255, *(pd->temp4_U8C1));
bitwise_not(*(pd->temp4_U8C1), *(pd->temp5_U8C1));
bitwise_and(*(pd->temp5_U8C1), *(pd->temp3_U8C1), *(pd->temp6_U8C1));
bitwise_or(pd->temp6_U8C1, Scalar(pd->k1), pd->temp7_U8C1, pd->temp4_U8C1);
imshow( "Glare Reduction 4", *(pd->temp7_U8C1));
}
void program6(char * argv) {
ProgramData pd;
pd.k1 = 0;
Mat source = imread(argv, IMREAD_COLOR); // Read the file
pd.source_U8C3 = &source;
Size s( pd.source_U8C3->size().width / 1.3, pd.source_U8C3->size().height / 1.3 );
resize( *(pd.source_U8C3), *(pd.source_U8C3), s, 0, 0, CV_INTER_AREA );
pd.output_U8C3 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,pd.source_U8C3->type());
pd.output_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
//pd.temp1_U8C3 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,pd.source_U8C3->type());
pd.temp2_U8C3 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,pd.source_U8C3->type());
pd.temp3_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.temp4_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.temp5_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.temp6_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.temp7_U8C1 = new Mat(pd.source_U8C3->rows,pd.source_U8C3->cols,CV_8UC1);
pd.dim1by1 = new Mat(100,800,CV_8UC1);
cout << "source type = " << pd.source_U8C3->type() << endl;
if(! pd.source_U8C3->data ) { cout << "Could not open image" << std::endl; return;}
cvtColor(*(pd.source_U8C3), *(pd.temp2_U8C3), CV_BGR2HSV); // original to hsv
split(*(pd.temp2_U8C3), pd.tempv1_U8C1);
namedWindow( "Glare Reduction - Controls", WINDOW_AUTOSIZE ); // Create a window for display.
onProgram6Trackbar1(0,&pd);
createTrackbar("k1", "Glare Reduction - Controls", &(pd.k1), 255, &onProgram6Trackbar1, &pd);
imshow( "Glare Reduction - Controls", *(pd.dim1by1) ); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
}
int main( int argc, char** argv )
{
program6("Blocks1.jpg");
}
Update 1:
New code posted below. I tried changing the code to not use any Mat pointers. Still does the exact same thing.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
#include <cmath>
class ProgramData {
public:
ProgramData() {
k1=0;
}
int k1;
Mat source_U8C3;
Mat temp1_U8C3;
Mat temp2_U8C3;
Mat temp3_U8C1;
Mat temp4_U8C1;
Mat temp5_U8C1;
Mat temp6_U8C1;
Mat temp7_U8C1;
vector<Mat> tempv1_U8C1;
vector<Mat> tempv2_U8C1;
Mat output_U8C1;
Mat output_U8C3;
Mat dim1by1;
};
static void onProgram6Trackbar1(int v, void* vp) {
ProgramData * pd = (ProgramData *) vp;
pd->temp3_U8C1 = pd->tempv1_U8C1[2].clone();
inRange(pd->temp3_U8C1, Scalar(pd->k1), Scalar(255), pd->temp4_U8C1);
bitwise_not(pd->temp4_U8C1, pd->temp5_U8C1); // Note for monday, here does not work below works. Why?
bitwise_and(pd->temp5_U8C1, pd->temp3_U8C1, pd->temp6_U8C1);
bitwise_or(pd->temp6_U8C1, Scalar(pd->k1), pd->temp7_U8C1, pd->temp4_U8C1);
imshow( "Glare Reduction 4", pd->temp7_U8C1);
}
int main( int argc, char** argv ) {
ProgramData pd;
pd.k1 = 0;
pd.source_U8C3 = imread("Photo Examples/Blocks1.jpg", IMREAD_COLOR); // Read the file
Size s( pd.source_U8C3.size().width / 1.3, pd.source_U8C3.size().height / 1.3 );
resize( pd.source_U8C3, pd.source_U8C3, s, 0, 0, CV_INTER_AREA );
pd.dim1by1.create(100,800,CV_8UC1);
cout << "source type = " << pd.source_U8C3.type() << endl;
if(! pd.source_U8C3.data ) { cout << "Could not open image" << std::endl; return 0;}
cvtColor(pd.source_U8C3, pd.temp2_U8C3, CV_BGR2HSV); // original to hsv
split(pd.temp2_U8C3, pd.tempv1_U8C1);
namedWindow( "Glare Reduction - Controls", WINDOW_AUTOSIZE ); // Create a window for display.
onProgram6Trackbar1(0,&pd);
createTrackbar("k1", "Glare Reduction - Controls", &(pd.k1), 255, &onProgram6Trackbar1, &pd);
imshow( "Glare Reduction - Controls", pd.dim1by1 ); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
return 0;
}
Update 2:
I think I may have found the source of the problem. When add this line
static void onProgram6Trackbar1(int v, void* vp) {
ProgramData * pd = (ProgramData *) vp;
pd->temp3_U8C1 = pd->tempv1_U8C1[2].clone();
inRange(pd->temp3_U8C1, Scalar(pd->k1), Scalar(255), pd->temp4_U8C1);
bitwise_not(pd->temp4_U8C1, pd->temp5_U8C1);
bitwise_and(pd->temp5_U8C1, pd->temp3_U8C1, pd->temp6_U8C1);
pd->temp7_U8C1 = pd->tempv1_U8C1[2].clone(); // <----
bitwise_or(pd->temp6_U8C1, Scalar(pd->k1), pd->temp7_U8C1, pd->temp4_U8C1);
imshow( "Glare Reduction 4", pd->temp7_U8C1);
}
to onProgram6Trackbar1 it suddenly works as expected. I thought since opencv 2 does its own memory allocation I didn't have to initialize pd->temp7_U8C1 which is serving as the output matrix in the call to bitwise_or. It's almost like the underlying memory in pd->temp7_U8C1 was pointing to memory that belonged to one of the buffers that was used as output to the image processing done in main (pd.tempv1_U8C1 or pd.source_U8C3). Either the line I added did something else that I have not thought of.
So my new question is why did this line fix it and what is going on underneath. Is the result of using an uninitialized mat behavior defined somewhere in the documentation? It was my understanding that you don't have to initialize the size or type of a matrix that you are using as an output mat.
maybe a bit too old, anyway: First check the slightly cleaned code. I removed everything that's redundant and send the actual function of the trackbar into a member of your class. This way, you can directly operate on the members.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
class ProgramData
{
public:
ProgramData()
{
k1 = 0;
}
int k1;
Mat source_U8C3,
temp2_U8C3, temp4_U8C1,
temp5_U8C1, temp6_U8C1,
temp7_U8C1;
vector<Mat> tempv1_U8C1;
void reduce_glare(void)
{
// sets elements in temp4 to 255 if within range
inRange(tempv1_U8C1[2], Scalar(k1), Scalar(255), temp4_U8C1);
// bitwise_not(InputArray src, OutputArray dst)
bitwise_not(temp4_U8C1, temp5_U8C1);
// bitwise_and(InputArray src1, InputArray src2, OutputArray dst)
bitwise_and(temp5_U8C1, tempv1_U8C1[2], temp6_U8C1);
// watch out here:
temp7_U8C1 = Mat::ones(tempv1_U8C1[2].size(), CV_8UC1);
Mat x = Mat::ones(tempv1_U8C1[2].size(), CV_8UC1) * k1;
// bitwise_or(InputArray src1, InputArray src2, OutputArray dst, InputArray mask)
bitwise_or(temp6_U8C1, x, temp7_U8C1, temp4_U8C1);
cout << "source type = " << temp7_U8C1.type() << endl;
cout << "source channels = " << temp7_U8C1.channels() << endl;
cout << "source depth = " << temp7_U8C1.depth() << endl;
}
};
void onProgram6Trackbar1(int v, void *vp)
{
ProgramData *pd = static_cast<ProgramData *>(vp);
(*pd).reduce_glare();
imshow("Glare Reduction 4", pd->temp7_U8C1);
}
int main(int argc, char **argv)
{
ProgramData pd;
pd.source_U8C3 = imread("CutDat.jpeg", IMREAD_COLOR);
Size s(pd.source_U8C3.size().width / 1.3, pd.source_U8C3.size().height / 1.3);
resize(pd.source_U8C3, pd.source_U8C3, s, 0, 0, CV_INTER_AREA);
cout << "source type = " << pd.source_U8C3.type() << endl;
cvtColor(pd.source_U8C3, pd.temp2_U8C3, CV_BGR2HSV);
split(pd.temp2_U8C3, pd.tempv1_U8C1);
namedWindow("Glare Reduction - Controls", WINDOW_AUTOSIZE);
imshow("Glare Reduction - Controls", Mat(100, 800, CV_8UC1));
createTrackbar("k1", "Glare Reduction - Controls", &(pd.k1), 255, &onProgram6Trackbar1, &pd);
waitKey(0);
return 0;
}
Important is the line where temp7_U8C1 is initialized, but not with the original data. The result you get is still not want you want, but it highlights that the issue is within the call to bitwise_or. Your question regarding the Scalar bug doesn't apply here as I've shown it in the code.
The code is tested on Windows with 2.4.10 and on Ubuntu 2.4.8 both giving the same results. Testing the code on valgrind runs fine.
I am using OpenCV 2.4.6. I have found over the Internet some example of getting frame from a camera. It works well (it displays my ugly face onto the screen). However, I absolutely cannot get pixel data from the frames. I've found some topic here: http://answers.opencv.org/question/1934/reading-pixel-values-from-a-frame-of-a-video/ but it doesn't work for me.
Here is the code - in the commented parts I pointed out what is wrong.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
int main() {
int c;
IplImage* img;
CvCapture* capture = cvCaptureFromCAM(1);
cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE);
while(1) {
img = cvQueryFrame(capture);
uchar* data = (uchar*)img->imageData; // access violation
// this does not work either
//Mat m(img);
//uchar a = m.data[0]; // access violation
cvShowImage("mainWin", img);
c = cvWaitKey(10);
if(c == 27)
break;
}
}
Could you give me some suggestions, please?
I suggest using the newer Mat structure instead of IplImage since your question is tagged with C++ tag. For your task you can use a data member of Mat - it points to internal Mat storage. For example Mat img; uchar* data = img.data;. Here's a full example
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
int main() {
int c;
Mat img;
VideoCapture capture(0);
namedWindow("mainWin", CV_WINDOW_AUTOSIZE);
bool readOk = true;
while(capture.isOpened()) {
readOk = capture.read(img);
// make sure we grabbed the frame successfully
if (!readOk) {
std::cout << "No frame" << std::endl;
break;
}
uchar* data = img.data; // this should work
imshow("mainWin", img);
c = waitKey(10);
if(c == 27)
break;
}
}