Because this is a project for an image processing class, I have to implement a couple of linear filters from scratch( I'm not supposed to use already implemented features of OpenCV like Sobel, not even the 2D filter function). The code is at the end of the question.
Since images processed with the Sobel operator give similar results with the Prewitt ones, I used as a test a window where I display a Sobel-processed image.
I only got to the point where I applied an operator on the horizontal direction, but I'm already getting weird results. Images speak for themselves:
Original image:
Original image
My result using the Prewitt operator on the horizontal direction:
My processed image
I get a weird blue-beige pattern instead of a black-and-white horizontal lines. What is happening?
Here is the code:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
using namespace std;
int main(int, char** argv)
{
int Hprewitt[3][3] = { { -1, 0, 1 }, { -1, 0, 1 }, { -1, 0, 1 } };
int Vprewitt[3][3] = { { -1, -1, -1 }, { 0, 0, 0 }, { 1, 1, 1 } };
int tempInput[3][3];
int tempPixel=0;
Mat src, src_gray;
Mat grad;
const char* window_name = "Sobel Edge Detector";
const char* window_name2 = "Prewitt";
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
int computedIntensity;
src = imread(argv[1]);
if (src.empty())
{
return -1;
}
namedWindow(window_name2, WINDOW_AUTOSIZE);
Mat HprewittMat(src.rows, src.cols, CV_8UC3, Scalar(0, 0, 0));
GaussianBlur(src, src, Size(3, 3), 0, 0, BORDER_DEFAULT);
cvtColor(src, src_gray, COLOR_RGB2GRAY);
namedWindow(window_name, WINDOW_AUTOSIZE);
Scalar intensity = src.at<uchar>(Point(50, 50)); // this is how to access intensity at a certain pixel
Vec3b scalarTempPixel = src.at<Vec3b>(Point(1, 1));
cout << "Pixel (50,50) has intensity: " << intensity.val[0] << endl;
// applying horizontal prewitt operator
cout << "\n Image has resolution: " << src.cols << "x" << src.rows << "\n";
for (int i = 2; i < src.cols-1; i++){ // currently going from column 2 to n-2, same for row
for (int j = 2; j < src.rows-1; j++){
// storing a temporary 3x3 input matrix centered on the current pixel
// cout << "Matrix centered on pixel: [" << i << "," << j << "] \n";
for (int k = -1; k < 2; k++){
for (int l = -1; l < 2; l++){
intensity = src.at<uchar>(Point(i + k, j + l));
tempInput[k+1][l+1] = intensity.val[0];
// cout << "[" << intensity.val[0] << "]";
}
// cout << " \n";
}
// convolution of horizontal prewitt kernel with current 3x3 matrix
for (int x = 0; x < 3; x++){
for (int y = 0; y < 3; y++){
tempPixel = tempPixel + tempInput[x][y] * Hprewitt[x][y];
}
}
scalarTempPixel[0] = tempPixel;
HprewittMat.at<Vec3b>(Point(i, j)) = scalarTempPixel;
}
}
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
Sobel(src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_x, abs_grad_x);
Sobel(src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_y, abs_grad_y);
addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad);
imshow(window_name, grad);
imshow(window_name2, HprewittMat);
waitKey(0);
return 0;
}
So at this point I'm doing the following:
I'm reading an image in "src", then I create the HprewittMat which I initialize it with the number of rows and cols of the original image, but with black pixels. I then convert the src image to a gray one. Then I iterate through each pixel of the original image, and for each pixel I'm doing a convolution mask with the the surrounding pixels and the horizontal prewitt kernel. Then I store that value in "tempPixel" and put it in the HprewittMat image.
The next step would be to do the same but with the vertical kernel, and then calculate the gradient kernel.
I'm asking this question because I've found similar questions on how to manipulate individual pixels but usually for python or java. This might also be some flaw in the logic I'm using.
Related
hey i want to do a linear gray transformation, so that i can change the contrast.
how i can get the maximum and minimum gray value ? and then i want to scale the Image that it has a limited contrast range of 100 to 150. I have searched like 2 hours but dont found something.
would be nice if someone could help
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <opencv2/imgproc.hpp>
using namespace std;
using namespace cv;
cv::Mat plotHistogram(cv::Mat &image, bool cumulative = true, int histSize = 256);
int main()
{
cv::Mat img = cv::imread(schrott.png"); // Read the file
if (img.empty()) // Check for invalid input
{
std::cout << "Could not open or find the frame" << std::endl;
return -1;
}
cv::Mat img_gray;
cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY); // In case img is colored
cv::namedWindow("Input Image", cv::WINDOW_AUTOSIZE); // Create a window for display.
cv::imshow("Input Image", img);
cv::Mat hist;
hist = plotHistogram(img_gray);
cv::namedWindow("Histogram", cv::WINDOW_NORMAL); // Create a window for display.
cv::imshow("Histogram", hist);
cv::waitKey(0);
}
cv::Mat plotHistogram(cv::Mat &image, bool cumulative, int histSize) {
// Create Image for Histogram
int hist_w = 1024; int hist_h = 800;
int bin_w = cvRound((double)hist_w / histSize);
cv::Mat histImage(hist_h, hist_w, CV_8UC1, Scalar(255, 255, 255));
if (image.channels() > 1) {
cerr << "plotHistogram: Please insert only gray images." << endl;
return histImage;
}
// Calculate Histogram
float range[] = { 0, 256 };
const float* histRange = { range };
cv::Mat hist;
calcHist(&image, 1, 0, Mat(), hist, 1, &histSize, &histRange);
if (cumulative) {
cv::Mat accumulatedHist = hist.clone();
for (int i = 1; i < histSize; i++) {
accumulatedHist.at<float>(i) += accumulatedHist.at<float>(i - 1);
}
hist = accumulatedHist;
}
// Normalize the result to [ 0, histImage.rows ]
normalize(hist, hist, 0, histImage.rows, NORM_MINMAX, -1, Mat());
// Draw bars
for (int i = 1; i < histSize; i++) {
cv::rectangle(histImage, Point(bin_w * (i - 1), hist_h),
Point(bin_w * (i), hist_h - cvRound(hist.at<float>(i))),
Scalar(50, 50, 50), 1);
}
return histImage; // Not really call by value, as cv::Mat only saves a pointer to the image data
}
You can find minimum and maximum value with minMaxLoc
Mat image;
//read image;
double min, max;
minMaxLoc( image, &min, &max );
cout << "min : " << min << "max : " << max << endl;
I am trying to take out the ground and make grids on it for path mapping and insert it back to the image. Here I am using findhomography and warpPerspective functions to do so. But when I switch the points for inserting back the modified plane, everything except the plane becomes black in the image.
I have tried to do it using an intermediate image but the result is the same.
#include "pch.h"
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
struct userdata {
Mat im;
vector<Point2f> points;
};
void mouseHandler(int event, int x, int y, int flags, void* data_ptr)
{
if (event == EVENT_LBUTTONDOWN) {
userdata* data = ((userdata*)data_ptr);
circle(data - > im, Point(x, y), 3, Scalar(0, 0, 255), 5, LINE_AA);
imshow("Image", data - > im);
if (data - > points.size() < 4) {
data - > points.push_back(Point2f(x, y));
}
}
}
int main(int argc, char** argv)
{
// Read source image.
Mat im_src = imread("imagesindoor.jpg");
// Destination image. The aspect ratio of the book is 3/4
Size size(400, 300);
Size size2(im_src.cols, im_src.rows);
Mat im_dst = Mat::zeros(size, CV_8UC3);
// Create a vector of destination points.
vector<Point2f> pts_dst;
pts_dst.push_back(Point2f(0, 0));
pts_dst.push_back(Point2f(size.width - 1, 0));
pts_dst.push_back(Point2f(size.width - 1, size.height - 1));
pts_dst.push_back(Point2f(0, size.height - 1));
// Set data for mouse event
Mat im_temp = im_src.clone();
userdata data;
data.im = im_temp;
cout << "Click on the four corners of the book -- top left first and" <<
endl
<< "bottom left last -- and then hit ENTER" << endl;
// Show image and wait for 4 clicks.
imshow("Image", im_temp);
// Set the callback function for any mouse event
setMouseCallback("Image", mouseHandler, &data);
waitKey(0);
// Calculate the homography
Mat h = getPerspectiveTransform(data.points, pts_dst);
// Warp source image to destination
warpPerspective(im_src, im_dst, h, size);
// changing clor of im_dst
for (int i = 0; i < im_dst.rows; i++) {
for (int j = 0; j < im_dst.cols; j++) {
//apply condition here
im_dst.at<cv::Vec3b>(i, j) = 255;
}
}
Mat p = getPerspectiveTransform(pts_dst, data.points);
warpPerspective(im_dst, im_src, p, size2);
// Show image
//imshow("Image", im_dst);
imshow("Image2", im_src);
waitKey(0);
return 0;
}
addWeighted can be used to blend the current result with the source image to get the expected result.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv2/opencv.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
struct userdata {
Mat im;
vector<Point2f> points;
};
void mouseHandler(int event, int x, int y, int flags, void* data_ptr)
{
if (event == EVENT_LBUTTONDOWN) {
userdata* data = ((userdata*)data_ptr);
circle(data-> im, Point(x, y), 3, Scalar(0, 0, 255), 5, LINE_AA);
imshow("Image", data->im);
if (data-> points.size() < 4) {
data-> points.push_back(Point2f(x, y));
}
}
}
int main(int argc, char** argv)
{
// Read source image.
Mat im_src = imread("test.png");
// Destination image. The aspect ratio of the book is 3/4
Size size(400, 300);
Size size2(im_src.cols, im_src.rows);
Mat im_dst = Mat::zeros(size, CV_8UC3);
// Create a vector of destination points.
vector<Point2f> pts_dst;
pts_dst.push_back(Point2f(0, 0));
pts_dst.push_back(Point2f(size.width - 1, 0));
pts_dst.push_back(Point2f(size.width - 1, size.height - 1));
pts_dst.push_back(Point2f(0, size.height - 1));
// Set data for mouse event
Mat im_temp = im_src.clone();
userdata data;
data.im = im_temp;
cout << "Click on the four corners of the book -- top left first and" <<
endl
<< "bottom left last -- and then hit ENTER" << endl;
// Show image and wait for 4 clicks.
imshow("Image", im_temp);
// Set the callback function for any mouse event
setMouseCallback("Image", mouseHandler, &data);
waitKey(0);
// Calculate the homography
Mat h = getPerspectiveTransform(data.points, pts_dst);
// Warp source image to destination
warpPerspective(im_src, im_dst, h, size);
// changing clor of im_dst
for (int i = 0; i < im_dst.rows; i++) {
for (int j = 0; j < im_dst.cols; j++) {
//apply condition here
im_dst.at<cv::Vec3b>(i, j) = 255;
}
}
Mat t;
Mat p = getPerspectiveTransform(pts_dst, data.points);
warpPerspective(im_dst, t, p, size2);
// Show image
//imshow("Image", im_dst);
std::cout << "t :" <<t.cols << ", " <<t.rows <<std::endl;
Mat final;
addWeighted(im_src, 0.5, t, 0.5, 0, final);
imshow("Image2", final);
waitKey(0);
return 0;
}
I'm trying to calculate the Mean & Std Deviation for red only contours. I suspect that HSV pixels for red Hue values of a Vec3b are stored from 0-10 and 165-179.
Here is my code:
#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>
#include <cmath>
using namespace cv;
using namespace std;
int main(int argc, char** argv) {
// Mat Declarations
// Mat img = imread("white.jpg");
// Mat src = imread("Rainbro.png");
Mat src = imread("multi.jpg");
// Mat src = imread("DarkRed.png");
Mat Hist;
Mat HSV;
Mat Edges;
Mat Grey;
vector<vector<Vec3b>> hueMEAN;
vector<vector<Point>> contours;
// Variables
int edgeThreshold = 1;
int const max_lowThreshold = 100;
int ratio = 3;
int kernel_size = 3;
int lowThreshold = 0;
// Windows
namedWindow("img", WINDOW_NORMAL);
namedWindow("HSV", WINDOW_AUTOSIZE);
namedWindow("Edges", WINDOW_AUTOSIZE);
namedWindow("contours", WINDOW_AUTOSIZE);
// Color Transforms
cvtColor(src, HSV, CV_BGR2HSV);
cvtColor(src, Grey, CV_BGR2GRAY);
// Perform Hist Equalization to help equalize Red hues so they stand out for
// better Edge Detection
equalizeHist(Grey, Grey);
// Image Transforms
blur(Grey, Edges, Size(3, 3));
Canny(Edges, Edges, max_lowThreshold, lowThreshold * ratio, kernel_size);
findContours(Edges, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
//Rainbro MAT
//Mat drawing = Mat::zeros(432, 700, CV_8UC1);
//Multi MAT
Mat drawing = Mat::zeros(630, 1200, CV_8UC1);
//Red variation Mat
//Mat drawing = Mat::zeros(600, 900, CV_8UC1);
vector <vector<Point>> ContourPoints;
/* This code for loops through all contours and assigns the value of the y coordinate as a parameter
for the row pointer in the HSV mat. The value vec3b pointer pointing to the pixel in the mat is accessed
and stored for any Hue value that is between 0-10 and 165-179 as Red only contours.*/
for (int i = 0; i < contours.size(); i++) {
vector<Vec3b> vf;
vector<Point> points;
bool isContourRed = false;
for (int j = 0; j < contours[i].size(); j++) {
//Row Y-Coordinate of Mat from Y-Coordinate of Contour
int MatRow = int(contours[i][j].y);
//Row X-Coordinate of Mat from X-Coordinate of Contour
int MatCol = int(contours[i][j].x);
Vec3b *HsvRow = HSV.ptr <Vec3b>(MatRow);
int h = int(HsvRow[int(MatCol)][0]);
int s = int(HsvRow[int(MatCol)][1]);
int v = int(HsvRow[int(MatCol)][2]);
cout << "Coordinate: ";
cout << contours[i][j].x;
cout << ",";
cout << contours[i][j].y << endl;
cout << "Hue: " << h << endl;
// Get contours that are only in the red spectrum Hue 0-10, 165-179
if ((h <= 10 || h >= 165 && h <= 180) && ((s > 0) && (v > 0))) {
cout << "Coordinate: ";
cout << contours[i][j].x;
cout << ",";
cout << contours[i][j].y << endl;
cout << "Hue: " << h << endl;
vf.push_back(Vec3b(h, s, v));
points.push_back(contours[i][j]);
isContourRed = true;
}
}
if (isContourRed == true) {
hueMEAN.push_back(vf);
ContourPoints.push_back(points);
}
}
drawContours(drawing, ContourPoints, -1, Scalar(255, 255, 255), 2, 8);
// Calculate Mean and STD for each Contour
cout << "contour Means & STD of Vec3b:" << endl;
for (int i = 0; i < hueMEAN.size(); i++) {
Scalar meanTemp = mean(hueMEAN.at(i));
Scalar sdTemp;
cout << i << ": " << endl;
cout << meanTemp << endl;
cout << " " << endl;
meanStdDev(hueMEAN.at(i), meanTemp, sdTemp);
cout << sdTemp << endl;
cout << " " << endl;
}
cout << "Actual Contours: " << contours.size() << endl;
cout << "# Contours: " << hueMEAN.size() << endl;
imshow("img", src);
imshow("HSV", HSV);
imshow("Edges", Edges);
imshow("contours", drawing);
waitKey(0);
return 0;
}
I've come across an issue in this particular case:
On the right is the original Image, The left displays the HSV mat, the Edge detection and an arrow is pointing to a contours Mat that I drew after the filtering.
Here is the source image:
After the filtering is complete I just calculate the Mean and STD.
I have a feeling that my range is incorrect for 0-10 and 165-179. Any suggestions or further improvements would help a lot.
Thanks.
A quick test shows me that the range is correct. Without all the contour extraction stuff, if I just filter the colors using 0-10 and 165-179 ranges, I get the two red boxes in the lower-middle range of your input image.
The contour artifact that you see might actually be coming from both a JPEG artifact (if you zoom in at the limit between the white and red box, you can see that it is gradual and not sharp, due to JPEG compression), and the fact that you are only thresholding in the Hue channel. At low saturation, many grey-ish colors which you don't want will actually be fitting within your hue threshold. The solution for that is to filter pixel values in the S and V channel as well.
In your code, that means changing the line if ((h <= 10 || h >= 165 && h <= 180) && ((s > 0) && (v > 0))) { to if ((h <= 10 || h >= 165 && h <= 180) && ((s > 50) && (v > 50))) {
The value 50 is working on that specific sample image, but of course the correct value will depend on your input image.
I'm trying to calculate the mass center of images using OpenCV and I got errors, as you can see in the images (the mass center must not be to closest of any side in this cases). Also, I got mass centers that depends of the rotation and that's incorrect.
Next, you can see the code, input image and output image.
I tried with different example codes, and the results are the same.
Output image: Mass center calculated by the program
Input image: Image Input
Example code:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat srcGray;
RNG rng(12345);
int main(int argc, char **argv)
{
// Load source image and convert it to gray
src = imread(argv[1], 1);
// Convert image to gray and blur it
cvtColor(src, srcGray, CV_BGR2GRAY);
blur(srcGray, srcGray, Size(3, 3));
Mat srcThresh;
double otsu;
otsu = threshold(srcGray, srcThresh, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
Mat cannyOut;
Canny(srcGray, cannyOut, otsu, otsu * 1 / 2, 3, 1);
// Find contours
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(cannyOut, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
// Get the moments
vector<Moments> mu(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mu[i] = moments(contours[i], false);
}
// Get the mass centers:
vector<Point2f> mc(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
}
// Draw contours
Mat drawing = Mat::zeros(cannyOut.size(), CV_8UC3);
string sObjectNumber; // string which will contain the result
ostringstream sContourNumber; // stream used for the conversion
for (int i = 0; i< contours.size(); i++)
{
// drawing.setTo(Scalar(0.0,0.0,0.0));
sContourNumber << i;
sObjectNumber = sContourNumber.str(); // Convert int to string
Point pCoordinates(mc[i].x + 3, mc[i].y - 3); // Text's coordinates (A little bit off from mass center)
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
circle(drawing, mc[i], 4, color, -1, 8, 0); // Draw mass center
putText(drawing, sObjectNumber, pCoordinates, CV_FONT_HERSHEY_COMPLEX, 1, color, 2, 8); // Write object number
sContourNumber.str(""); // Clear string
sContourNumber.clear(); // Clear any error flags
// imshow("Contours", drawing);
// waitKey();
}
double hu[7];
for (int i = 0; i < contours.size(); i++)
{
cout << "Contour: " << i << " Area: " << contourArea(contours[i]) << " Length: " << arcLength(contours[i], true) << "\n";
for (int j = 0; j < 7; j++)
{
HuMoments(mu[i], hu);
cout << "Contour: " << i << " Hu: " << j << " Result: " << hu[j] << "\n";
}
cout << "\n";
}
imshow("Contours", drawing);
waitKey(0);
return(0);
}
Very thanks for all!
Diego
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include "opencv2/imgcodecs.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
#define NTRAINING_SAMPLES 100 // Number of training samples per class
#define FRAC_LINEAR_SEP 0.5f // Fraction of samples which compose the linear separable part
using namespace cv;
using namespace cv::ml;
using namespace std;
static void help()
{
cout<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows Support Vector Machines for Non-Linearly Separable Data. " << endl
<< "Usage:" << endl
<< "./non_linear_svms" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main()
{
help();
// Data for visual representation
const int WIDTH = 512, HEIGHT = 512;
Mat I = Mat::zeros(HEIGHT, WIDTH, CV_8UC3);
//--------------------- 1. Set up training data randomly ---------------------------------------
Mat trainData(2*NTRAINING_SAMPLES, 2, CV_32FC1);
Mat labels (2*NTRAINING_SAMPLES, 1, CV_32SC1);
RNG rng(100); // Random value generation class
// Set up the linearly separable part of the training data
int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES);
// Generate random points for the class 1
Mat trainClass = trainData.rowRange(0, nLinearSamples);
// The x coordinate of the points is in [0, 0.4)
Mat c = trainClass.colRange(0, 1);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(0.4 * WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
// Generate random points for the class 2
trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES);
// The x coordinate of the points is in [0.6, 1]
c = trainClass.colRange(0 , 1);
rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
//------------------ Set up the non-linearly separable part of the training data ---------------
// Generate random points for the classes 1 and 2
trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
// The x coordinate of the points is in [0.4, 0.6)
c = trainClass.colRange(0,1);
rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
//------------------------- Set up the labels for the classes ---------------------------------
labels.rowRange( 0, NTRAINING_SAMPLES).setTo(1); // Class 1
labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2); // Class 2
//------------------------ 2. Set up the support vector machines parameters --------------------
//------------------------ 3. Train the svm ----------------------------------------------------
cout << "Starting training process" << endl;
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
//svm->setC(0.1);
vector<float> weights;
weights.push_back( 1 );
weights.push_back( 1 );
Mat w(weights);
svm->setClassWeights(w);
svm->setKernel(SVM::INTER);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6));
// svm->train(trainData, ROW_SAMPLE, labels);
_InputArray tr_data1(trainData);
_InputArray lab(labels);
Ptr<TrainData> trainData_ptr = TrainData::create(tr_data1 , ROW_SAMPLE , lab);
svm->trainAuto(trainData_ptr);
cout << "Finished training process" << endl;
//------------------------ 4. Show the decision regions ----------------------------------------
Vec3b green(0,100,0), blue (100,0,0);
for (int i = 0; i < I.rows; ++i)
for (int j = 0; j < I.cols; ++j)
{
Mat sampleMat = (Mat_<float>(1,2) << i, j);
float response = svm->predict(sampleMat);
if (response == 1) I.at<Vec3b>(j, i) = green;
else if (response == 2) I.at<Vec3b>(j, i) = blue;
}
//----------------------- 5. Show the training data --------------------------------------------
int thick = -1;
int lineType = 8;
float px, py;
// Class 1
for (int i = 0; i < NTRAINING_SAMPLES; ++i)
{
px = trainData.at<float>(i,0);
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(0, 255, 0), thick, lineType);
}
// Class 2
for (int i = NTRAINING_SAMPLES; i <2*NTRAINING_SAMPLES; ++i)
{
px = trainData.at<float>(i,0);
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType);
}
//------------------------- 6. Show support vectors --------------------------------------------
thick = 2;
lineType = 8;
Mat sv = svm->getSupportVectors();
for (int i = 0; i < sv.rows; ++i)
{
const float* v = sv.ptr<float>(i);
circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType);
}
cout << endl << " C: "<< svm->getC() <<endl ;
imwrite("result.png", I); // save the Image
imshow("SVM for Non-Linear Training Data", I); // show it to the user
waitKey(0);
}
Currently running this code --> Which uses SVM::train_auto() method takes hours to finish !
So is there a way to make it run on GPU or Multi-thread it ?
The above is just a demo example , but I want to make my SVM train on image datasets where I have -> 4096 features for each image and so I was planning to use train auto to optimize the SVM_C and SVM_NU parameter , assuming it does. If not is there a way I can optimize those parameters ?
Thanks In Advance.