I'm trying a simple example to learn SVM in OpenCV, I'm not getting the right support vectors after training. Need some help in understanding the issue.
My code is :
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>
using namespace cv;
using namespace std;
int main() {
Mat frame(Size(640,360), CV_8UC3, Scalar::all(255));
float train[15][2] = { {296, 296}, {296, 312}, {312, 8}, {312, 56}, {312, 88}, {328, 88}, {328, 104}, {328, 264}, {344, 8}, {344, 40}, {360, 8}, {360, 56}, {376, 8}, {376, 40}, {376, 56} };
Mat trainingDataMat(15, 2, CV_32FC1, train);
float labels[15] = { -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1 };
Mat labelsMat(15, 1, CV_32FC1, labels);
CvSVMParams param;
param.svm_type = CvSVM::C_SVC;
param.C = 0.1;
param.kernel_type = SVM::LINEAR;
param.term_crit = TermCriteria(CV_TERMCRIT_ITER, 1000, 1e-6);
CvSVM SVM;
SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), param);
cout<< "Training Finished..." << endl;
for(int i = 0; i < frame.rows; ++i) {
for(int j = 0; j < frame.cols; ++j) {
Mat sampleMat = (Mat_<float>(1,2) << i,j);
float response = SVM.predict(sampleMat);
//cout << response << endl;
if(response == 1) {
frame.at<Vec3b>(i,j)[2] = 0;
} else {
frame.at<Vec3b>(i,j)[0] = 0;
}
}
}
for(int dis = 0; dis < trainingDataMat.rows; dis++) {
if(labels[dis] == 1) {
circle(frame, Point((int)train[dis][0], (int)train[dis][1]), 3, Scalar (0, 0, 0), -1);
} else {
circle(frame, Point((int)train[dis][0], (int)train[dis][1]), 3, Scalar (0, 255, 0), -1);
}
}
int n = SVM.get_support_vector_count();
for(int i = 0; i < n; i++) {
const float* v = SVM.get_support_vector(i);
cout << "support Vectors : " << v[0] << " " << v[1] <<endl;
circle(frame,Point((int)v[0], (int)v[1]), 6, Scalar(128, 128, 128), 2, 8);
}
imwrite("frame.jpg",frame);
imshow("output", frame);
waitKey(0);
return 0;
}
Output image is attached
The SVM line is not separating the two classes as I expect.
Result for Support Vector is
support Vectors : 0 0.0125
The SVM should be OK. I think the problem lies in your display. When you call your circle(frame, Point((int)train[dis][0], (int)train[dis][1]), 3, Scalar (0, 0, 0), -1);, OpenCV understands that you want a circle in row number train[dis][1] and column number train[dis][0]. This is not what you want because a specificity of OpenCV is that it uses different coordinate systems for matrices and points. image.at<float>(Point(i,j)) is equivalent to image.at<float>(j,i).
Try replacing your circle calls with this:
if(labels[dis] == 1) {
circle(frame, Point((int)train[dis][1], (int)train[dis][0]), 3, Scalar (0, 0, 0), -1);
} else {
circle(frame, Point((int)train[dis][1], (int)train[dis][0]), 3, Scalar (0, 255, 0), -1);
}
Related
Edit: Added the function code for matlabImrotate.
I need to translate code from matlab that uses this function.
some of the times it appears like this: imrotate(double(im), d, 'bilinear','crop');.
At other times it appears like this: imrotate(im_mask, rotation_angle, 'crop');
I tried to use the answer in how to implement Imrotate of Matlab in Opencv? , but got different results.
For example, by using the code from this link inside a function:
bool matlabImrotate(Mat& src, double angle, int interpolationMethod, Mat& dst)
{
// src: https://stackoverflow.com/questions/38715363/how-to-implement-imrotate-of-matlab-in-opencv
try
{
// Special Cases
if (fmod(angle, 360.0) == 0.0)
dst = src;
else {
Point2f center(src.cols / 2.0F, src.rows / 2.0F);
Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
Rect bbox = RotatedRect(center, src.size(), angle).boundingRect();
// adjust transformation matrix
//rot.at<double>(0, 2) += bbox.width / 2.0 - center.x;
//rot.at<double>(1, 2) += bbox.height / 2.0 - center.y;
warpAffine(src, dst, rot, bbox.size(), interpolationMethod);
}
return true;
}
catch (exception& e)
{
cout << "Error in matlabImrotate - " << e.what() << endl;
return false;
}
}
and the following code:
double data[9]{
0, 0, 3,
4, 5, 0,
0, 0, 9 };
Mat A = Mat(3, 3, CV_64F, data);
matlabImrotate(A, -5, INTER_LINEAR, A);
cout << "A:" << endl;
cout << A << endl;
cout << endl;
I got the result:
A:
[0.4375, 0.3046875, 2.54296875, 0.328125, 0, 0;
3.390625, 4.8134765625, 0.2421875, 0.0234375, 0, 0;
0, 0.2724609375, 8.4462890625, 0, 0, 0;
0, 0, 0.4921875, 0, 0, 0;
0, 0, 0, 0, 0, 0;
0, 0, 0, 0, 0, 0]
instead of matlab's:
A =
1.2127 0.3141 1.2084
2.5963 5.0000 1.2278
0 3.2512 3.6252
Cross post here
I have build two function with different name to drop the specfiy lines from difference Mat object, this is the code:
Mat drop_rows_int(Mat mat, vector<int> v) {
Mat mat_new = Mat::zeros(mat.rows - v.size(), mat.cols, CV_32SC1);
for (int i = 0, j = 0; i < mat.rows; i++) {
if (find(v.begin(), v.end(), i) != v.end())
{
continue;
}
else
{
int*pmat = mat.ptr<int>(i);
int*pmat_new = mat_new.ptr<int>(j);
for (int w = 0; w < mat.cols; w++) {
pmat_new[w] = pmat[w];
}
j++;
}
}
return mat_new;
}
Mat drop_rows_uchar(Mat mat, vector<int> v) {
Mat mat_new = Mat::zeros(mat.rows - v.size(), mat.cols, CV_8UC1);
for (int i = 0, j = 0; i < mat.rows; i++) {
if (find(v.begin(), v.end(), i) != v.end())
{
continue;
}
else
{
uchar*pmat = mat.ptr<uchar>(i);
uchar*pmat_new = mat_new.ptr<uchar>(j);
for (int w = 0; w < mat.cols; w++) {
pmat_new[w] = pmat[w];
}
j++;
}
}
return mat_new;
}
Then I can use it in my main() function like
int main()
{
Mat mat_uchar = (Mat_<uchar>(5, 4) << 5, 6, 0, 4, 0, 1, 9, 9, 100, 3, 5, 8, 200, 33, 1, 4, 8, 88, 23, 6);
Mat new_mat_uchar = drop_rows_uchar(mat_uchar, {2,4});
Mat mat_int = (Mat_<int>(5, 4) << 5, 6, 0, 4, 0, 1, 9, 9, 100, 3, 5, 8, 200, 33, 1, 4, 8, 88, 23, 6);
Mat new_mat_int = drop_rows_int(mat_int, { 2,4 });
return 0;
}
Yes, I made it. but as I know, the Mat can have 7 kinds of depth, such as CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F and CV_64F, So I have to build 7 functions with different name to do such thing?? Can anyone tell me how to use one function to implement it??
You cannot do that with cv::Mat. However, you can use cv::Mat_ and do some templating:
template<typename T>
cv::Mat_<T> drop_rows_int(cv::Mat_ mat, vector<int> v) {
...
}
And here you extract pointers of type T.
Just a piece of advice, for efficiency purposes I suggest sending the vector v as a const reference, if possible.
Here is the full solution:
#include "opencv/cv.h"
#include <vector>
#include <iostream>
template<typename T>
cv::Mat_<T> drop_rows(cv::Mat_<T> mat, const std::vector<int> &v) {
cv::Mat_<T> mat_new = cv::Mat_<T>::zeros(mat.rows - v.size(), mat.cols);
for (int i = 0, j = 0; i < mat.rows; i++) {
if (find(v.begin(), v.end(), i) != v.end())
continue;
else {
for (int w = 0; w < mat.cols; w++) {
mat_new(j, w) = mat(i, w);
}
j++;
}
}
return mat_new;
}
int main() {
cv::Mat_<uchar> mat = (cv::Mat_<uchar>(5, 4) << 5, 6, 0, 4, 0, 1, 9, 9, 100, 3, 5, 8, 200, 1, 2, 3, 4, 5, 6, 7);
auto result = drop_rows(mat, {2, 4});
std::cout << mat << std::endl;;
std::cout << result << std::endl;;
return 0;
}
Note that it works only for Mat_, not for Mat.
I'm working SIFT implementation using Visual C++. My code throws an error while taking inverse:
/// Load the source image
src = imread("C:/Users/Adithyaanirudhha/Documents/Visual Studio 2015/Projects/ConsoleApplication2/pa.jpg", 1);
if (display_caption("Original Image") != 0) { return 0; }
dst = src.clone();
width = src.size().width;
height = src.size().height;
Size size(height,width);
if (display_dst(DELAY_CAPTION) != 0) { return 0; }
cvtColor(src, src, CV_RGB2GRAY);
/// Applying Gaussian blur
//for(int j=0;j<4;j++)
//{
//resize(src, src, size / 2);
k = 2 ^ (1 / 2);
for(int i=0;i<3;i++)
{
//if (display_caption("Gaussian Blur") != 0) { return 0; }
GaussianBlur(src, dst, Size(), 1.6*k, 1.6*k);
if (display_dst(DELAY_BLUR*10) != 0) { return 0; }
k = k * k;
dst.copyTo(dest[m]);
//dest[m] = dst;
m++;
}
//}
width2 = dog[1].size().width;
height2 = dog[1].size().height;
Size sizes(width2,height2);
Mat dog_inv(sizes,0,CV_64F);
for (int n = 0; n < 2; n++)
{
if(m1!=3 || m1 != 6 || m1 != 9)
subtract(dest[m1 + 1], dest[m1], dog[n],noArray(),-1);
}
for (int i = 0; i < 2; i++)
{
Sobel(dog[i], grad_x, CV_16S, 1, 0, 3, 1, 0, BORDER_DEFAULT);
convertScaleAbs(grad_x, grad_x);
transpose(grad_x, temp);
Sobel(dog[i], grad_x_2, CV_16S, 2, 0, 3, 1, 0, BORDER_DEFAULT);
convertScaleAbs(grad_x_2, grad_x_2);
c = invert(dog[i],dog_inv, DECOMP_LU);
Sobel(dog[i], grad_x_2_1, CV_16S, 2, 0, 3, 1, 0, BORDER_DEFAULT);
convertScaleAbs(grad_x_2_1, grad_x_2_1);
//imshow(window_name,src);
//grad_x_2_1 = grad_x_2_1.inv(CV_32F);
multiply(grad_x_2_1, grad_x, x_max, 1, 1);
multiply(temp, x_max, p1, 1, 1);
transpose(x_max,temp);
//multiply(temp, grad_x_2, p2, 1, 1);
multiply(p2, x_max, p2, 1, 1);
imshow(window_name, dog[1]);
/*for (int y = 0; y < dog[i].rows; y++)
{
for (int x = 0; x < dog[i].cols; x++)
{
dog[i].at<Vec3b>(y, x) = (-1 * (p1.at<Vec3b>(y, x)) + 0.5*(p2.at<Vec3b>(y, x)) + dog[i].at<Vec3b>(y, x));
}
}*/
//imshow(window_name, dog[1]);
//imshow(window_name, src);
and the error is:
OpenCV Error: Assertion failed (type == CV_32F || type == CV_64F) in cv::invert,
file C:\buildslave64\win64_amdocl\master_PackSlave-win64-vc14-shared\opencv\mod
ules\core\src\lapack.cpp, line 798
Press any key to continue . . .
OpenCV Error: Assertion failed (type == CV_32F || type == CV_64F) in cv::invert
means that you need to pass to invert a matrix of type CV_32F (float) or CV_64F (double). But you're passing a CV_16S (short), because you set this when calling:
Sobel(dog[i], grad_x, CV_16S, 1, 0, 3, 1, 0, BORDER_DEFAULT);
^^^^^^
So you can
change the type of the matrix returned by Sobel to CV_32F
Sobel(dog[i], grad_x, CV_32F, 1, 0, 3, 1, 0, BORDER_DEFAULT);
c = invert(dog[i],dog_inv, DECOMP_LU);
or convert the matrix you pass to invert to the correct type
Sobel(dog[i], grad_x, CV_16S, 1, 0, 3, 1, 0, BORDER_DEFAULT);
Mat tmp;
dog[i].convertTo(tmp, CV_32F);
c = invert(tmp,dog_inv, DECOMP_LU);
This is only part of the code, but I know the error is in here. Specifically this line :
float y = hist.at<float>(0,i);
For some reason my histogram is 0 by 0.
So the actual error is likely from how I used the calcHist() function
void entropyImage(string filename) {
Mat YCbCrImage, hist , image = imread(filename, IMREAD_UNCHANGED);
float range[] = { 0, 256 };
const float* histRange = { range };
int histSize = 256;
if (image.channels() == 1){
double H = 0;
//GrayScale Image
calcHist(&image, 1, 0, Mat(), hist, 1, &histSize, &histRange, true, false);
for (int i = 0; i<histSize; i++){
float y = hist.at<float>(0,i);
cout << "symbol: " << i << " was repeated: " << y << endl;
}
}}
Here's how I'm calling the function from the main:
entropyImage("C:\\Users\\Documents\\Visual Studio 2013\\Projects\\lenagray.jpg");
Can someone more experienced in OpenCV let me know why my CalcHist isn't working"
Just give channel by declaring array like,
int channel[] = {0};
and
calcHist(&image, 1, channel, Mat(), hist, 1, &histSize, &histRange, true, false);
May be this will solve your probolem.
Hello after connected component labeling for a single label why am I getting more than a one pixel value ?
this is my image
#include <iostream>
#include <vector>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
void FindBlobs(const cv::Mat &binary, std::vector < std::vector<cv::Point2i> > &blobs);
int main(int argc, char **argv)
{
cv::Mat img = cv::imread("/Users/Rodrane/Documents/XCODE/test/makalesvm/persembe.png", 0); if(!img.data) {
std::cout << "File not found" << std::endl;
return -1;
}
cv::namedWindow("binary");
cv::namedWindow("labelled");
cv::Mat output = cv::Mat::zeros(img.size(), CV_8UC3);
cv::Mat binary;
std::vector < std::vector<cv::Point2i > > blobs;
equalizeHist( img , img );
cv::threshold(img, binary, 0, 1, cv::THRESH_BINARY_INV);
FindBlobs(binary, blobs);
// Randomy color the blobs
for(size_t i=0; i < blobs.size(); i++) {
unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));
for(size_t j=0; j < blobs[i].size(); j++) {
int x = blobs[i][j].x;
int y = blobs[i][j].y;
output.at<cv::Vec3b>(y,x)[0] = b;
output.at<cv::Vec3b>(y,x)[1] = g;
output.at<cv::Vec3b>(y,x)[2] = r;
}
}
cout << "H = "<< endl << " " << output << endl << endl;
cv::imshow("binary", img);
cv::imshow("labelled", output);
cv::waitKey(0);
return 0;
}
void FindBlobs(const cv::Mat &binary, std::vector < std::vector<cv::Point2i> > &blobs)
{
blobs.clear();
// Fill the label_image with the blobs
// 0 - background
// 1 - unlabelled foreground
// 2+ - labelled foreground
cv::Mat label_image;
binary.convertTo(label_image, CV_32SC1);
int label_count = 2; // starts at 2 because 0,1 are used already
for(int y=0; y < label_image.rows; y++) {
int *row = (int*)label_image.ptr(y);
for(int x=0; x < label_image.cols; x++) {
if(row[x] != 1) {
continue;
}
cv::Rect rect;
cv::floodFill(label_image, cv::Point(x,y), label_count, &rect, 0, 0, 4);
std::vector <cv::Point2i> blob;
for(int i=rect.y; i < (rect.y+rect.height); i++) {
int *row2 = (int*)label_image.ptr(i);
for(int j=rect.x; j < (rect.x+rect.width); j++) {
if(row2[j] != label_count) {
continue;
}
blob.push_back(cv::Point2i(j,i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
actually this is a single label when I print the image
however when I check pixel values. I see there is actually 2 different values (I just write down part of the matrix not all)
H = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 192, 33, 0, 192, 33, 0, 192, 33, 0, 0, 0, 0, ]
also by adding this line of code to the last line of FindBlobs function I recieve 3 since the label_count variable starts from 2 this also proves that H is a single label.
cout << "number of labels = "<< endl << " " << label_count << endl << endl;