I want to use FER+ Emotion Recognition in my project:
string modelPath = "../data/model.onnx";
Mat frame = imread("../data/Smile-Mood.jpg");
Mat gray;
cvtColor(frame, gray, COLOR_BGR2GRAY);
float scale = 1.0;
int inHeight = 64;
int inWidth = 64;
bool swapRB = false;
//Read and initialize network
cv::dnn::Net net = cv::dnn::readNetFromONNX(modelPath);
Mat blob;
//Create a 4D blob from a frame
cv::dnn::blobFromImage(gray, blob, scale, Size(inWidth, inHeight), swapRB, false);
//Set input blob
net.setInput(blob);
//Make forward pass
Mat prob = net.forward();
I have this error in last line:
Unhandled exception at 0x00007FFDA25AA799 in FERPlusDNNOpenCV.exe: Microsoft C++ exception: cv::Exception at memory location 0x00000063839BE050. occurred
OpenCV(4.1.0) Error: Assertion failed (ngroups > 0 && inpCn % ngroups == 0 && outCn % ngroups == 0) in cv::dnn::ConvolutionLayerImpl::getMemoryShapes
how to fix that?
UPDATE ACCORDING #Micka COMMENT:
Mat dstImg = Mat(64, 64, CV_32FC1);
resize(gray, dstImg, Size(64, 64));
std::vector<float> array;
if (dstImg.isContinuous())
array.assign(dstImg.data, dstImg.data + dstImg.total());
else{
for (int i = 0; i < dstImg.rows; ++i) {
array.insert(array.end(), dstImg.ptr<float>(i), dstImg.ptr<float>(i) + dstImg.cols);
}
}
//Set input blob
net.setInput(array);
//Make forward pass
Mat prob = net.forward();
I've got this error: vector subscript out of range.
Should I use another function instead setInput?
Related
I had a code which is getting ORB keypoints and descriptors of images and then it trains a bag of word using "BOWKMeansTrainer". the purpose of this code is to cluster photos using k-means. I don't have any problem using ORB.
Here is my problem:
I saw that openCV provides a VGG, You can find it HERE. I want to use VGG descriptor instead of ORB. Based on the documents, VGG doesn't have any function to make keypoints, so I used ORB keypoint and then used ORB keypoints to create VGG descriptors.
Ptr<ORB> detector = ORB::create();
Ptr<cv::xfeatures2d::VGG> detectorVGG = cv::xfeatures2d::VGG::create(VGG::VGG_120, 1.4f, true, true, 0.75f, true);
detector->detect(grayFrame, kp);
detectorVGG->compute(grayFrame, kp, descriptors);
This one also works and I can easily train bag of word and cluster them similar to the way I did using ORB. However when I want to test the bag of words, I'm getting this error:
error: (-215) _queryDescriptors.type() == trainDescType in function knnMatchImpl Aborttrap: 6
I tried to change the type of query descriptor Mat and descriptor Mat but I cannot solve it. In the best case , the error is changed to this:
(-215) (type == CV_8U && dtype == CV_32S) || dtype == CV_32F in function batchDistance
I am not sure if the problem is a small mistake in types or it is because of using ORB keypoints and VGG descriptors at the same time.
Part of my code is here:
vector<Mat> makeDescriptor(vector<Mat> frames)
{
int frameSize = frames.size();
vector<Mat> characteristics;
for (int i = 0 ; i < frameSize ; i++)
{
vector<KeyPoint> kp;
Mat descriptors;
Ptr<cv::xfeatures2d::VGG> detectorVGG = cv::xfeatures2d::VGG::create();
Ptr<ORB> detectorORB = ORB::create();
Mat frame = frames[i];
if (frame.empty()) {
cout << "Frame empty"<< "\n";
continue;
}
Mat grayFrame;
try
{
cvtColor(frame,grayFrame,CV_RGB2GRAY);
}
catch(Exception& e)
{
continue;
}
detectorORB->detect(grayFrame,kp);
detectorVGG->compute(grayFrame,kp,descriptors);
characteristics.push_back(descriptors);
}
return characteristics;
}
Mat makeDict(vector<Mat> characteristics, int _nCluster)
{
BOWKMeansTrainer bow(_nCluster);
for (int j = 0 ; j < characteristics.size() ; j++)
{
Mat descr = characteristics[j];
if (!descr.empty())
{
bow.add(descr);
}
}
Mat voc = bow.cluster();
return voc;
}
BOWImgDescriptorExtractor makeBow(Mat dict)
{
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
Ptr<DescriptorExtractor> extractor = VGG::create();
BOWImgDescriptorExtractor bowDE(extractor,matcher);
Mat voc;
dict.convertTo(voc,CV_32F);
bowDE.setVocabulary(voc);
return bowDE;
}
void testBow(BOWImgDescriptorExtractor bowDE, vector<Mat> frames)
{
Mat features;
vector< vector<KeyPoint> > keypoints = makeORBKeyPoints(frames);
for (int i = 0 ; i < frames.size() ; i++)
{
Mat bowDesc;
if (!keypoints[i].empty())
{
cout << "inside if" << '\n';
Mat frame;
frames[i].convertTo(frame, CV_8U);
bowDE.compute(frame, keypoints[i], bowDesc); //This Line
bowDesc.convertTo(bowDesc,CV_32F);
}
features.push_back(bowDesc);
}
}
The runtime error is happening at this line:
bowDE.compute(frame, keypoints[i], bowDesc); //This Line
I appreciate if someone helps me out to find a solution for it.
Thanks,
I'm trying to save my Surf descriptors (of different images) Mat into a file, for later use using the code below:
int offline(int nb) { //creates descriptors image;
Mat img;
int nfeatures = 25;
int nOctaveLayers = 2;
double contrastThreshold = 0.04;
double edgeThreshold = 10;
double sigma = 1.6;
SurfFeatureDetector surfDetector = SURF(edgeThreshold, 2, 2, true, false);
vector< KeyPoint> keypoints;
SurfDescriptorExtractor surfExtractor;
Mat imgDescriptors;
for (int i = 2;i <= nb;i++) {
img = imread("images/" + to_string(i) + ".jpg", CV_LOAD_IMAGE_GRAYSCALE);
if (!img.data)
{
return -1;
}
surfDetector.detect(img, keypoints);
surfExtractor.compute(img, keypoints, imgDescriptors);
imwrite(to_string(i)+".jgp",imgDescriptors); //impossible to save
//imshow("lol", imgDescriptors); //impossible to show
}
return 0;
}
i'm getting this exception : OpenCV Error: Unspecified error (could not find a writer for the specified extension) in cv::imwrite_, file C:\builds\2_4_PackSlave-win64-vc12-shared\opencv\modules\highgui\src\loadsave.cpp, line 275 [RESOLVED BY adding the extension +".jpg".
So i thought i should try to show the image, so i'm having another error:
Any clue about this ? (SECOND EXCEPTION)
I was trying to write Point2f imagePoints to a Mat image in openCV. I was following the link below.
Create Mat from vector<point2f>
But I am getting 'Assertion failed' error. Please help.
Code:
std::vector<cv::Point3d> objectPoints;
std::vector<cv::Point2d> imagePoints;
cv::Mat intrisicMat(3, 3, cv::DataType<double>::type);
intrisicMat.at<double>(0, 0) = param.focalLength.first;
intrisicMat.at<double>(0, 1) = 0;
intrisicMat.at<double>(0, 2) = param.principalPoint.first;
intrisicMat.at<double>(1, 0) = 0;
intrisicMat.at<double>(1, 1) = param.focalLength.second;
intrisicMat.at<double>(1, 2) = param.principalPoint.second;
intrisicMat.at<double>(2, 0) = 0;
intrisicMat.at<double>(2, 1) = 0;
intrisicMat.at<double>(2, 2) = 1;
cv::Mat rVec(3, 1, cv::DataType<double>::type); // Rotation vector
rVec.at<double>(0) = 0;
rVec.at<double>(1) = 0;
rVec.at<double>(2) = 0;
cv::Mat tVec(3, 1, cv::DataType<double>::type); // Translation vector
tVec.at<double>(0) = 0;
tVec.at<double>(1) = 0;
tVec.at<double>(2) = 0;
cv::Mat distCoeffs(5, 1, cv::DataType<double>::type); // Distortion vector
distCoeffs.at<double>(0) = param.distortionRadial.at(0);
distCoeffs.at<double>(1) = param.distortionRadial.at(1);
distCoeffs.at<double>(2) = param.distortionTangential.first;
distCoeffs.at<double>(3) = param.distortionTangential.second;
distCoeffs.at<double>(4) = param.distortionRadial.at(2);
projectPoints(objectPoints, rVec, tVec, intrisicMat, distCoeffs, imagePoints);
Mat depthImage = Mat(imagePoints);
imwrite("E:/softwares/1.8.0.71/bin/depthImage.jpg", depthImage);
cout << "depthImage.channels()=" << depthImage.channels() << endl;
Error:
OpenCV Error: Assertion failed (image.channels() == 1 || image.channels() == 3 || image.channels() == 4) in cv::imwrite_, file E:\softwares\opencv-3.1.0\opencv-3.1.0\modules\imgcodecs\src\loadsave.cpp, line 455
My image has 2 channels. So ImWrite() is throwing assertion failed error. How can I create a Mat image using the Image points if not like this?
With what you have written in the comments, it seems that you're trying to imwrite your Mat to a file. The problem is, a Mat from Vector<Point2f> will give a 2 channels matrix, which is not compatible with any image format (grayscale, RGB or RGBA).
Moreover, please edit your main post to show the code (using markdown) so it is easier to read and then help you.
I am trying to convert this second answer code into c++ , What I did is not giving me appropriate result , here is my code :
{
Mat img = imread("messi5.jpg");
int level_n = 2;
Mat p = Mat::zeros(img.cols*img.rows, 3, CV_32F);
vector<Mat> bgr;
cv::split(img, bgr);
//Divide each pixel color with 127 for level 2
for(int i=0; i<img.cols*img.rows; i++) {
p.at<float>(i,0) = bgr[0].data[i] / 127.0;
p.at<float>(i,1) = bgr[1].data[i] / 127.0;
p.at<float>(i,2) = bgr[2].data[i] / 127.0;
}
vector<Mat> Img2 = p[bgr];
Mat out;
cv::transform(img,out,p);
imshow ("output" , out);
}
What I didn't understand is how I put these colour's which I divided by 127 into Matrix , where I am going wrong?
Other way i am trying is
vector<Mat> bgr;
Mat blue , green , red;
cv::split(img, bgr);
blue = bgr[0]/127.0;
if (blue > 128)
{
blue = 255;
}
else
{
blue = 0;
}
same for red and green
Why don't just do it in place (for level 2):
Mat img = imread("messi5.jpg");
for(int i=0;i<img.rows;i++)
for(int j=0;j<img.cols;j++) {
cv::Vec3b p = img.at<cv::Vec3b>(i,j);
for(int k = 0;k < img.channels();k++)
p[k] = p[k] > 127 ? 255 : 0;
img.at<cv::Vec3b>(i,j) = p;
}
// do whatever you want with processed image img
Below is my code , which is running fine but after a long processing it show me the run time error
// Initialize constant values
const int nb_cars = files.size();
const int not_cars = files_no.size();
const int num_img = nb_cars + not_cars; // Get the number of images
// Initialize your training set.
cv::Mat training_mat(num_img,dictionarySize,CV_32FC1);
cv::Mat labels(0,1,CV_32FC1);
std::vector<string> all_names;
all_names.assign(files.begin(),files.end());
all_names.insert(all_names.end(), files_no.begin(), files_no.end());
// Load image and add them to the training set
int count = 0;
vector<string>::const_iterator i;
string Dir;
for (i = all_names.begin(); i != all_names.end(); ++i)
{
Dir=( (count < files.size() ) ? YourImagesDirectory : YourImagesDirectory_2);
Mat row_img = cv::imread( Dir +*i, 0 );
detector.detect( row_img, keypoints);
RetainBestKeypoints(keypoints, 20); // retain top 10 key points
extractor->compute( row_img, keypoints, descriptors_1);
//uncluster.push_back(descriptors_1);
descriptors.reshape(1,1);
bow.add(descriptors_1);
++count;
}
int count_2=0;
vector<string>::const_iterator k;
Mat vocabulary = bow.cluster();
dextract.setVocabulary(vocabulary);
for (k = all_names.begin(); k != all_names.end(); ++k)
{
Dir=( (count_2 < files.size() ) ? YourImagesDirectory : YourImagesDirectory_2);
row_img = cv::imread( Dir +*k, 0 );
detector.detect( row_img, keypoints);
RetainBestKeypoints(keypoints, 20);
dextract.compute( row_img, keypoints, descriptors_1);
descriptors_1.reshape(1,1);
training_mat.push_back(descriptors_1);
labels.at< float >(count_2, 0) = (count_2<nb_cars)?1:-1;
++count_2;
}
Error :
OpenCv Error : Formats of input argument do not match() in unknown function , file ..\..\..\src\opencv\modules\core\src\matrix.cpp, line 652
I made the descriptor_1 in second loop to reshape into row for SVM , but error is not solving
I think you are trying to cluster with less features then the number of classes.
You can take more images or more then 10 descriptors from each image.
As far i find out after 3 days that my error is in labeling , when i label the image i got the error there , and yes above answer is also relevant that using less number of images also cause the error , but in my case this is not the reason , When i start checking error line by line , the error starts from here :
labels.at< float >(count_2, 0) = (count_2<nb_cars)?1:-1;
Because of the line :
Mat labels(0,1,CV_32FC1);
Instead of :
Mat labels(num_img,1,CV_32FC1);
and i should use
my_img.convertTo( training_mat.row(count_2), CV_32FC1 );