Related
hey i want to do a linear gray transformation, so that i can change the contrast.
how i can get the maximum and minimum gray value ? and then i want to scale the Image that it has a limited contrast range of 100 to 150. I have searched like 2 hours but dont found something.
would be nice if someone could help
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <opencv2/imgproc.hpp>
using namespace std;
using namespace cv;
cv::Mat plotHistogram(cv::Mat &image, bool cumulative = true, int histSize = 256);
int main()
{
cv::Mat img = cv::imread(schrott.png"); // Read the file
if (img.empty()) // Check for invalid input
{
std::cout << "Could not open or find the frame" << std::endl;
return -1;
}
cv::Mat img_gray;
cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY); // In case img is colored
cv::namedWindow("Input Image", cv::WINDOW_AUTOSIZE); // Create a window for display.
cv::imshow("Input Image", img);
cv::Mat hist;
hist = plotHistogram(img_gray);
cv::namedWindow("Histogram", cv::WINDOW_NORMAL); // Create a window for display.
cv::imshow("Histogram", hist);
cv::waitKey(0);
}
cv::Mat plotHistogram(cv::Mat &image, bool cumulative, int histSize) {
// Create Image for Histogram
int hist_w = 1024; int hist_h = 800;
int bin_w = cvRound((double)hist_w / histSize);
cv::Mat histImage(hist_h, hist_w, CV_8UC1, Scalar(255, 255, 255));
if (image.channels() > 1) {
cerr << "plotHistogram: Please insert only gray images." << endl;
return histImage;
}
// Calculate Histogram
float range[] = { 0, 256 };
const float* histRange = { range };
cv::Mat hist;
calcHist(&image, 1, 0, Mat(), hist, 1, &histSize, &histRange);
if (cumulative) {
cv::Mat accumulatedHist = hist.clone();
for (int i = 1; i < histSize; i++) {
accumulatedHist.at<float>(i) += accumulatedHist.at<float>(i - 1);
}
hist = accumulatedHist;
}
// Normalize the result to [ 0, histImage.rows ]
normalize(hist, hist, 0, histImage.rows, NORM_MINMAX, -1, Mat());
// Draw bars
for (int i = 1; i < histSize; i++) {
cv::rectangle(histImage, Point(bin_w * (i - 1), hist_h),
Point(bin_w * (i), hist_h - cvRound(hist.at<float>(i))),
Scalar(50, 50, 50), 1);
}
return histImage; // Not really call by value, as cv::Mat only saves a pointer to the image data
}
You can find minimum and maximum value with minMaxLoc
Mat image;
//read image;
double min, max;
minMaxLoc( image, &min, &max );
cout << "min : " << min << "max : " << max << endl;
I am processing a 32 bit (RGBA) image with opencv in c++. I want a count for the number of pixels at each color level from 0 to 255. So, for a black 1920x1080 image, my output currently looks like this:
(B, G, R)
0 = (2073600, 2073600, 2073600)
1 = (0, 0, 0)
2 = (0, 0, 0)
3 = (0, 0, 0)
...
252 = (0, 0, 0)
253 = (0, 0, 0)
254 = (0, 0, 0)
255 = (0, 0, 0)
for white, it looks like this:
(B, G, R)
0 = (0, 0, 0)
1 = (0, 0, 0)
2 = (0, 0, 0)
3 = (0, 0, 0)
...
252 = (0, 0, 0)
253 = (0, 0, 0)
254 = (0, 0, 0)
255 = (2073600, 2073600, 2073600)
The number of total pixels in the image is 1920*1080 = 2073600, and the sum of all pixels from 0, 255 doesn't exceed it in these cases. However, the problem is when I have a pure red image, with one pixel modified to have a value of 254 not 255, I get the following result:
(B, G, R)
0 = (2073600, 2073600, 0)
1 = (0, 0, 0)
2 = (0, 0, 0)
3 = (0, 0, 0)
...
252 = (0, 0, 0)
253 = (0, 0, 0)
254 = (0, 0, 1)
255 = (0, 0, 2073600)
Total pixels in the red channel: 2073601, not 2073600. I need the histogram representation not to exceed the total number of pixels in the image.
Here is the code:
cv::Mat getHist(std::string filename) {
cv::Mat img;
img = cv::imread(filename, CV_LOAD_IMAGE_COLOR);
if (!img.data) {
std::cout << "Problem with source\n";
return cv::Mat();
}
std::vector<cv::Mat> bgr_planes;
cv::split(img, bgr_planes); //split source image data into bgr planes vector array [0],[1], and [2]
int histSize = 256; //from 0 to 255 (8 bit)
float range[] = {0, 256}; //initialize range[] array with two values, 0 and 256, the upper boundary is exclusive.
const float* histRange = {range};
bool uniform = true;
bool accumulate = false;
int channels[] = {0};
cv::Mat b_hist, g_hist, r_hist;
cv::calcHist(&bgr_planes[0], 1, channels, cv::Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate); //1 parameter means only 1 image. cv::Mat() means no Mask
cv::calcHist(&bgr_planes[1], 1, channels, cv::Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate);
cv::calcHist(&bgr_planes[2], 1, channels, cv::Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate);
int hist_h = img.rows*img.cols;
int hist_w = 256;
int bin_w = cvRound( (double)hist_w/histSize);
cv::Mat histImage(hist_h, hist_w, CV_8UC3, cv::Scalar(0,0,0));
// cv::Mat histImage(hist_h, hist_w, CV_32F, cv::Scalar(0,0,0));
// normalize the histogram so values fall in the range indicated by the parameters entered.
// normalize the result to [ 0, histImage.rows ]
cv::normalize(b_hist, b_hist, 0, histImage.rows, cv::NORM_MINMAX, -1, cv::Mat()); // b_hist is input array and the output normalized array, okay if they are the same.
cv::normalize(g_hist, g_hist, 0, histImage.rows, cv::NORM_MINMAX, -1, cv::Mat());
cv::normalize(r_hist, r_hist, 0, histImage.rows, cv::NORM_MINMAX, -1, cv::Mat());
int rsum = 0;
for (int i = 0; i < histSize; i++) {
// std::cout << hist_h - cvRound(b_hist.at<float>(i));
std::cout << i << " = (";
std::cout << cvRound(b_hist.at<float>(i)) << ", ";
rsum += cvRound(r_hist.at<float>(i));
std::cout << cvRound(g_hist.at<float>(i)) << ", ";
std::cout << cvRound(r_hist.at<float>(i)) << ") \n";
}
std::cout << "Red channel pixel sum: " << rsum;
std::cout << " Resolution " << img.rows << "x" << img.cols << " == " << img.rows*img.cols << "\n";
std::cin.ignore();
return histImage;
}
I'm trying a simple example to learn SVM in OpenCV, I'm not getting the right support vectors after training. Need some help in understanding the issue.
My code is :
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>
using namespace cv;
using namespace std;
int main() {
Mat frame(Size(640,360), CV_8UC3, Scalar::all(255));
float train[15][2] = { {296, 296}, {296, 312}, {312, 8}, {312, 56}, {312, 88}, {328, 88}, {328, 104}, {328, 264}, {344, 8}, {344, 40}, {360, 8}, {360, 56}, {376, 8}, {376, 40}, {376, 56} };
Mat trainingDataMat(15, 2, CV_32FC1, train);
float labels[15] = { -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1 };
Mat labelsMat(15, 1, CV_32FC1, labels);
CvSVMParams param;
param.svm_type = CvSVM::C_SVC;
param.C = 0.1;
param.kernel_type = SVM::LINEAR;
param.term_crit = TermCriteria(CV_TERMCRIT_ITER, 1000, 1e-6);
CvSVM SVM;
SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), param);
cout<< "Training Finished..." << endl;
for(int i = 0; i < frame.rows; ++i) {
for(int j = 0; j < frame.cols; ++j) {
Mat sampleMat = (Mat_<float>(1,2) << i,j);
float response = SVM.predict(sampleMat);
//cout << response << endl;
if(response == 1) {
frame.at<Vec3b>(i,j)[2] = 0;
} else {
frame.at<Vec3b>(i,j)[0] = 0;
}
}
}
for(int dis = 0; dis < trainingDataMat.rows; dis++) {
if(labels[dis] == 1) {
circle(frame, Point((int)train[dis][0], (int)train[dis][1]), 3, Scalar (0, 0, 0), -1);
} else {
circle(frame, Point((int)train[dis][0], (int)train[dis][1]), 3, Scalar (0, 255, 0), -1);
}
}
int n = SVM.get_support_vector_count();
for(int i = 0; i < n; i++) {
const float* v = SVM.get_support_vector(i);
cout << "support Vectors : " << v[0] << " " << v[1] <<endl;
circle(frame,Point((int)v[0], (int)v[1]), 6, Scalar(128, 128, 128), 2, 8);
}
imwrite("frame.jpg",frame);
imshow("output", frame);
waitKey(0);
return 0;
}
Output image is attached
The SVM line is not separating the two classes as I expect.
Result for Support Vector is
support Vectors : 0 0.0125
The SVM should be OK. I think the problem lies in your display. When you call your circle(frame, Point((int)train[dis][0], (int)train[dis][1]), 3, Scalar (0, 0, 0), -1);, OpenCV understands that you want a circle in row number train[dis][1] and column number train[dis][0]. This is not what you want because a specificity of OpenCV is that it uses different coordinate systems for matrices and points. image.at<float>(Point(i,j)) is equivalent to image.at<float>(j,i).
Try replacing your circle calls with this:
if(labels[dis] == 1) {
circle(frame, Point((int)train[dis][1], (int)train[dis][0]), 3, Scalar (0, 0, 0), -1);
} else {
circle(frame, Point((int)train[dis][1], (int)train[dis][0]), 3, Scalar (0, 255, 0), -1);
}
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include "opencv2/imgcodecs.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
#define NTRAINING_SAMPLES 100 // Number of training samples per class
#define FRAC_LINEAR_SEP 0.5f // Fraction of samples which compose the linear separable part
using namespace cv;
using namespace cv::ml;
using namespace std;
static void help()
{
cout<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows Support Vector Machines for Non-Linearly Separable Data. " << endl
<< "Usage:" << endl
<< "./non_linear_svms" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main()
{
help();
// Data for visual representation
const int WIDTH = 512, HEIGHT = 512;
Mat I = Mat::zeros(HEIGHT, WIDTH, CV_8UC3);
//--------------------- 1. Set up training data randomly ---------------------------------------
Mat trainData(2*NTRAINING_SAMPLES, 2, CV_32FC1);
Mat labels (2*NTRAINING_SAMPLES, 1, CV_32SC1);
RNG rng(100); // Random value generation class
// Set up the linearly separable part of the training data
int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES);
// Generate random points for the class 1
Mat trainClass = trainData.rowRange(0, nLinearSamples);
// The x coordinate of the points is in [0, 0.4)
Mat c = trainClass.colRange(0, 1);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(0.4 * WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
// Generate random points for the class 2
trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES);
// The x coordinate of the points is in [0.6, 1]
c = trainClass.colRange(0 , 1);
rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
//------------------ Set up the non-linearly separable part of the training data ---------------
// Generate random points for the classes 1 and 2
trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
// The x coordinate of the points is in [0.4, 0.6)
c = trainClass.colRange(0,1);
rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
//------------------------- Set up the labels for the classes ---------------------------------
labels.rowRange( 0, NTRAINING_SAMPLES).setTo(1); // Class 1
labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2); // Class 2
//------------------------ 2. Set up the support vector machines parameters --------------------
//------------------------ 3. Train the svm ----------------------------------------------------
cout << "Starting training process" << endl;
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
//svm->setC(0.1);
vector<float> weights;
weights.push_back( 1 );
weights.push_back( 1 );
Mat w(weights);
svm->setClassWeights(w);
svm->setKernel(SVM::INTER);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6));
// svm->train(trainData, ROW_SAMPLE, labels);
_InputArray tr_data1(trainData);
_InputArray lab(labels);
Ptr<TrainData> trainData_ptr = TrainData::create(tr_data1 , ROW_SAMPLE , lab);
svm->trainAuto(trainData_ptr);
cout << "Finished training process" << endl;
//------------------------ 4. Show the decision regions ----------------------------------------
Vec3b green(0,100,0), blue (100,0,0);
for (int i = 0; i < I.rows; ++i)
for (int j = 0; j < I.cols; ++j)
{
Mat sampleMat = (Mat_<float>(1,2) << i, j);
float response = svm->predict(sampleMat);
if (response == 1) I.at<Vec3b>(j, i) = green;
else if (response == 2) I.at<Vec3b>(j, i) = blue;
}
//----------------------- 5. Show the training data --------------------------------------------
int thick = -1;
int lineType = 8;
float px, py;
// Class 1
for (int i = 0; i < NTRAINING_SAMPLES; ++i)
{
px = trainData.at<float>(i,0);
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(0, 255, 0), thick, lineType);
}
// Class 2
for (int i = NTRAINING_SAMPLES; i <2*NTRAINING_SAMPLES; ++i)
{
px = trainData.at<float>(i,0);
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType);
}
//------------------------- 6. Show support vectors --------------------------------------------
thick = 2;
lineType = 8;
Mat sv = svm->getSupportVectors();
for (int i = 0; i < sv.rows; ++i)
{
const float* v = sv.ptr<float>(i);
circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType);
}
cout << endl << " C: "<< svm->getC() <<endl ;
imwrite("result.png", I); // save the Image
imshow("SVM for Non-Linear Training Data", I); // show it to the user
waitKey(0);
}
Currently running this code --> Which uses SVM::train_auto() method takes hours to finish !
So is there a way to make it run on GPU or Multi-thread it ?
The above is just a demo example , but I want to make my SVM train on image datasets where I have -> 4096 features for each image and so I was planning to use train auto to optimize the SVM_C and SVM_NU parameter , assuming it does. If not is there a way I can optimize those parameters ?
Thanks In Advance.
Because this is a project for an image processing class, I have to implement a couple of linear filters from scratch( I'm not supposed to use already implemented features of OpenCV like Sobel, not even the 2D filter function). The code is at the end of the question.
Since images processed with the Sobel operator give similar results with the Prewitt ones, I used as a test a window where I display a Sobel-processed image.
I only got to the point where I applied an operator on the horizontal direction, but I'm already getting weird results. Images speak for themselves:
Original image:
Original image
My result using the Prewitt operator on the horizontal direction:
My processed image
I get a weird blue-beige pattern instead of a black-and-white horizontal lines. What is happening?
Here is the code:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
using namespace std;
int main(int, char** argv)
{
int Hprewitt[3][3] = { { -1, 0, 1 }, { -1, 0, 1 }, { -1, 0, 1 } };
int Vprewitt[3][3] = { { -1, -1, -1 }, { 0, 0, 0 }, { 1, 1, 1 } };
int tempInput[3][3];
int tempPixel=0;
Mat src, src_gray;
Mat grad;
const char* window_name = "Sobel Edge Detector";
const char* window_name2 = "Prewitt";
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
int computedIntensity;
src = imread(argv[1]);
if (src.empty())
{
return -1;
}
namedWindow(window_name2, WINDOW_AUTOSIZE);
Mat HprewittMat(src.rows, src.cols, CV_8UC3, Scalar(0, 0, 0));
GaussianBlur(src, src, Size(3, 3), 0, 0, BORDER_DEFAULT);
cvtColor(src, src_gray, COLOR_RGB2GRAY);
namedWindow(window_name, WINDOW_AUTOSIZE);
Scalar intensity = src.at<uchar>(Point(50, 50)); // this is how to access intensity at a certain pixel
Vec3b scalarTempPixel = src.at<Vec3b>(Point(1, 1));
cout << "Pixel (50,50) has intensity: " << intensity.val[0] << endl;
// applying horizontal prewitt operator
cout << "\n Image has resolution: " << src.cols << "x" << src.rows << "\n";
for (int i = 2; i < src.cols-1; i++){ // currently going from column 2 to n-2, same for row
for (int j = 2; j < src.rows-1; j++){
// storing a temporary 3x3 input matrix centered on the current pixel
// cout << "Matrix centered on pixel: [" << i << "," << j << "] \n";
for (int k = -1; k < 2; k++){
for (int l = -1; l < 2; l++){
intensity = src.at<uchar>(Point(i + k, j + l));
tempInput[k+1][l+1] = intensity.val[0];
// cout << "[" << intensity.val[0] << "]";
}
// cout << " \n";
}
// convolution of horizontal prewitt kernel with current 3x3 matrix
for (int x = 0; x < 3; x++){
for (int y = 0; y < 3; y++){
tempPixel = tempPixel + tempInput[x][y] * Hprewitt[x][y];
}
}
scalarTempPixel[0] = tempPixel;
HprewittMat.at<Vec3b>(Point(i, j)) = scalarTempPixel;
}
}
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
Sobel(src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_x, abs_grad_x);
Sobel(src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_y, abs_grad_y);
addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad);
imshow(window_name, grad);
imshow(window_name2, HprewittMat);
waitKey(0);
return 0;
}
So at this point I'm doing the following:
I'm reading an image in "src", then I create the HprewittMat which I initialize it with the number of rows and cols of the original image, but with black pixels. I then convert the src image to a gray one. Then I iterate through each pixel of the original image, and for each pixel I'm doing a convolution mask with the the surrounding pixels and the horizontal prewitt kernel. Then I store that value in "tempPixel" and put it in the HprewittMat image.
The next step would be to do the same but with the vertical kernel, and then calculate the gradient kernel.
I'm asking this question because I've found similar questions on how to manipulate individual pixels but usually for python or java. This might also be some flaw in the logic I'm using.