Edit: Added the function code for matlabImrotate.
I need to translate code from matlab that uses this function.
some of the times it appears like this: imrotate(double(im), d, 'bilinear','crop');.
At other times it appears like this: imrotate(im_mask, rotation_angle, 'crop');
I tried to use the answer in how to implement Imrotate of Matlab in Opencv? , but got different results.
For example, by using the code from this link inside a function:
bool matlabImrotate(Mat& src, double angle, int interpolationMethod, Mat& dst)
{
// src: https://stackoverflow.com/questions/38715363/how-to-implement-imrotate-of-matlab-in-opencv
try
{
// Special Cases
if (fmod(angle, 360.0) == 0.0)
dst = src;
else {
Point2f center(src.cols / 2.0F, src.rows / 2.0F);
Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
Rect bbox = RotatedRect(center, src.size(), angle).boundingRect();
// adjust transformation matrix
//rot.at<double>(0, 2) += bbox.width / 2.0 - center.x;
//rot.at<double>(1, 2) += bbox.height / 2.0 - center.y;
warpAffine(src, dst, rot, bbox.size(), interpolationMethod);
}
return true;
}
catch (exception& e)
{
cout << "Error in matlabImrotate - " << e.what() << endl;
return false;
}
}
and the following code:
double data[9]{
0, 0, 3,
4, 5, 0,
0, 0, 9 };
Mat A = Mat(3, 3, CV_64F, data);
matlabImrotate(A, -5, INTER_LINEAR, A);
cout << "A:" << endl;
cout << A << endl;
cout << endl;
I got the result:
A:
[0.4375, 0.3046875, 2.54296875, 0.328125, 0, 0;
3.390625, 4.8134765625, 0.2421875, 0.0234375, 0, 0;
0, 0.2724609375, 8.4462890625, 0, 0, 0;
0, 0, 0.4921875, 0, 0, 0;
0, 0, 0, 0, 0, 0;
0, 0, 0, 0, 0, 0]
instead of matlab's:
A =
1.2127 0.3141 1.2084
2.5963 5.0000 1.2278
0 3.2512 3.6252
Related
I am processing a 32 bit (RGBA) image with opencv in c++. I want a count for the number of pixels at each color level from 0 to 255. So, for a black 1920x1080 image, my output currently looks like this:
(B, G, R)
0 = (2073600, 2073600, 2073600)
1 = (0, 0, 0)
2 = (0, 0, 0)
3 = (0, 0, 0)
...
252 = (0, 0, 0)
253 = (0, 0, 0)
254 = (0, 0, 0)
255 = (0, 0, 0)
for white, it looks like this:
(B, G, R)
0 = (0, 0, 0)
1 = (0, 0, 0)
2 = (0, 0, 0)
3 = (0, 0, 0)
...
252 = (0, 0, 0)
253 = (0, 0, 0)
254 = (0, 0, 0)
255 = (2073600, 2073600, 2073600)
The number of total pixels in the image is 1920*1080 = 2073600, and the sum of all pixels from 0, 255 doesn't exceed it in these cases. However, the problem is when I have a pure red image, with one pixel modified to have a value of 254 not 255, I get the following result:
(B, G, R)
0 = (2073600, 2073600, 0)
1 = (0, 0, 0)
2 = (0, 0, 0)
3 = (0, 0, 0)
...
252 = (0, 0, 0)
253 = (0, 0, 0)
254 = (0, 0, 1)
255 = (0, 0, 2073600)
Total pixels in the red channel: 2073601, not 2073600. I need the histogram representation not to exceed the total number of pixels in the image.
Here is the code:
cv::Mat getHist(std::string filename) {
cv::Mat img;
img = cv::imread(filename, CV_LOAD_IMAGE_COLOR);
if (!img.data) {
std::cout << "Problem with source\n";
return cv::Mat();
}
std::vector<cv::Mat> bgr_planes;
cv::split(img, bgr_planes); //split source image data into bgr planes vector array [0],[1], and [2]
int histSize = 256; //from 0 to 255 (8 bit)
float range[] = {0, 256}; //initialize range[] array with two values, 0 and 256, the upper boundary is exclusive.
const float* histRange = {range};
bool uniform = true;
bool accumulate = false;
int channels[] = {0};
cv::Mat b_hist, g_hist, r_hist;
cv::calcHist(&bgr_planes[0], 1, channels, cv::Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate); //1 parameter means only 1 image. cv::Mat() means no Mask
cv::calcHist(&bgr_planes[1], 1, channels, cv::Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate);
cv::calcHist(&bgr_planes[2], 1, channels, cv::Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate);
int hist_h = img.rows*img.cols;
int hist_w = 256;
int bin_w = cvRound( (double)hist_w/histSize);
cv::Mat histImage(hist_h, hist_w, CV_8UC3, cv::Scalar(0,0,0));
// cv::Mat histImage(hist_h, hist_w, CV_32F, cv::Scalar(0,0,0));
// normalize the histogram so values fall in the range indicated by the parameters entered.
// normalize the result to [ 0, histImage.rows ]
cv::normalize(b_hist, b_hist, 0, histImage.rows, cv::NORM_MINMAX, -1, cv::Mat()); // b_hist is input array and the output normalized array, okay if they are the same.
cv::normalize(g_hist, g_hist, 0, histImage.rows, cv::NORM_MINMAX, -1, cv::Mat());
cv::normalize(r_hist, r_hist, 0, histImage.rows, cv::NORM_MINMAX, -1, cv::Mat());
int rsum = 0;
for (int i = 0; i < histSize; i++) {
// std::cout << hist_h - cvRound(b_hist.at<float>(i));
std::cout << i << " = (";
std::cout << cvRound(b_hist.at<float>(i)) << ", ";
rsum += cvRound(r_hist.at<float>(i));
std::cout << cvRound(g_hist.at<float>(i)) << ", ";
std::cout << cvRound(r_hist.at<float>(i)) << ") \n";
}
std::cout << "Red channel pixel sum: " << rsum;
std::cout << " Resolution " << img.rows << "x" << img.cols << " == " << img.rows*img.cols << "\n";
std::cin.ignore();
return histImage;
}
I'm trying a simple example to learn SVM in OpenCV, I'm not getting the right support vectors after training. Need some help in understanding the issue.
My code is :
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>
using namespace cv;
using namespace std;
int main() {
Mat frame(Size(640,360), CV_8UC3, Scalar::all(255));
float train[15][2] = { {296, 296}, {296, 312}, {312, 8}, {312, 56}, {312, 88}, {328, 88}, {328, 104}, {328, 264}, {344, 8}, {344, 40}, {360, 8}, {360, 56}, {376, 8}, {376, 40}, {376, 56} };
Mat trainingDataMat(15, 2, CV_32FC1, train);
float labels[15] = { -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1 };
Mat labelsMat(15, 1, CV_32FC1, labels);
CvSVMParams param;
param.svm_type = CvSVM::C_SVC;
param.C = 0.1;
param.kernel_type = SVM::LINEAR;
param.term_crit = TermCriteria(CV_TERMCRIT_ITER, 1000, 1e-6);
CvSVM SVM;
SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), param);
cout<< "Training Finished..." << endl;
for(int i = 0; i < frame.rows; ++i) {
for(int j = 0; j < frame.cols; ++j) {
Mat sampleMat = (Mat_<float>(1,2) << i,j);
float response = SVM.predict(sampleMat);
//cout << response << endl;
if(response == 1) {
frame.at<Vec3b>(i,j)[2] = 0;
} else {
frame.at<Vec3b>(i,j)[0] = 0;
}
}
}
for(int dis = 0; dis < trainingDataMat.rows; dis++) {
if(labels[dis] == 1) {
circle(frame, Point((int)train[dis][0], (int)train[dis][1]), 3, Scalar (0, 0, 0), -1);
} else {
circle(frame, Point((int)train[dis][0], (int)train[dis][1]), 3, Scalar (0, 255, 0), -1);
}
}
int n = SVM.get_support_vector_count();
for(int i = 0; i < n; i++) {
const float* v = SVM.get_support_vector(i);
cout << "support Vectors : " << v[0] << " " << v[1] <<endl;
circle(frame,Point((int)v[0], (int)v[1]), 6, Scalar(128, 128, 128), 2, 8);
}
imwrite("frame.jpg",frame);
imshow("output", frame);
waitKey(0);
return 0;
}
Output image is attached
The SVM line is not separating the two classes as I expect.
Result for Support Vector is
support Vectors : 0 0.0125
The SVM should be OK. I think the problem lies in your display. When you call your circle(frame, Point((int)train[dis][0], (int)train[dis][1]), 3, Scalar (0, 0, 0), -1);, OpenCV understands that you want a circle in row number train[dis][1] and column number train[dis][0]. This is not what you want because a specificity of OpenCV is that it uses different coordinate systems for matrices and points. image.at<float>(Point(i,j)) is equivalent to image.at<float>(j,i).
Try replacing your circle calls with this:
if(labels[dis] == 1) {
circle(frame, Point((int)train[dis][1], (int)train[dis][0]), 3, Scalar (0, 0, 0), -1);
} else {
circle(frame, Point((int)train[dis][1], (int)train[dis][0]), 3, Scalar (0, 255, 0), -1);
}
Because this is a project for an image processing class, I have to implement a couple of linear filters from scratch( I'm not supposed to use already implemented features of OpenCV like Sobel, not even the 2D filter function). The code is at the end of the question.
Since images processed with the Sobel operator give similar results with the Prewitt ones, I used as a test a window where I display a Sobel-processed image.
I only got to the point where I applied an operator on the horizontal direction, but I'm already getting weird results. Images speak for themselves:
Original image:
Original image
My result using the Prewitt operator on the horizontal direction:
My processed image
I get a weird blue-beige pattern instead of a black-and-white horizontal lines. What is happening?
Here is the code:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
using namespace std;
int main(int, char** argv)
{
int Hprewitt[3][3] = { { -1, 0, 1 }, { -1, 0, 1 }, { -1, 0, 1 } };
int Vprewitt[3][3] = { { -1, -1, -1 }, { 0, 0, 0 }, { 1, 1, 1 } };
int tempInput[3][3];
int tempPixel=0;
Mat src, src_gray;
Mat grad;
const char* window_name = "Sobel Edge Detector";
const char* window_name2 = "Prewitt";
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
int computedIntensity;
src = imread(argv[1]);
if (src.empty())
{
return -1;
}
namedWindow(window_name2, WINDOW_AUTOSIZE);
Mat HprewittMat(src.rows, src.cols, CV_8UC3, Scalar(0, 0, 0));
GaussianBlur(src, src, Size(3, 3), 0, 0, BORDER_DEFAULT);
cvtColor(src, src_gray, COLOR_RGB2GRAY);
namedWindow(window_name, WINDOW_AUTOSIZE);
Scalar intensity = src.at<uchar>(Point(50, 50)); // this is how to access intensity at a certain pixel
Vec3b scalarTempPixel = src.at<Vec3b>(Point(1, 1));
cout << "Pixel (50,50) has intensity: " << intensity.val[0] << endl;
// applying horizontal prewitt operator
cout << "\n Image has resolution: " << src.cols << "x" << src.rows << "\n";
for (int i = 2; i < src.cols-1; i++){ // currently going from column 2 to n-2, same for row
for (int j = 2; j < src.rows-1; j++){
// storing a temporary 3x3 input matrix centered on the current pixel
// cout << "Matrix centered on pixel: [" << i << "," << j << "] \n";
for (int k = -1; k < 2; k++){
for (int l = -1; l < 2; l++){
intensity = src.at<uchar>(Point(i + k, j + l));
tempInput[k+1][l+1] = intensity.val[0];
// cout << "[" << intensity.val[0] << "]";
}
// cout << " \n";
}
// convolution of horizontal prewitt kernel with current 3x3 matrix
for (int x = 0; x < 3; x++){
for (int y = 0; y < 3; y++){
tempPixel = tempPixel + tempInput[x][y] * Hprewitt[x][y];
}
}
scalarTempPixel[0] = tempPixel;
HprewittMat.at<Vec3b>(Point(i, j)) = scalarTempPixel;
}
}
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
Sobel(src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_x, abs_grad_x);
Sobel(src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_y, abs_grad_y);
addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad);
imshow(window_name, grad);
imshow(window_name2, HprewittMat);
waitKey(0);
return 0;
}
So at this point I'm doing the following:
I'm reading an image in "src", then I create the HprewittMat which I initialize it with the number of rows and cols of the original image, but with black pixels. I then convert the src image to a gray one. Then I iterate through each pixel of the original image, and for each pixel I'm doing a convolution mask with the the surrounding pixels and the horizontal prewitt kernel. Then I store that value in "tempPixel" and put it in the HprewittMat image.
The next step would be to do the same but with the vertical kernel, and then calculate the gradient kernel.
I'm asking this question because I've found similar questions on how to manipulate individual pixels but usually for python or java. This might also be some flaw in the logic I'm using.
This is only part of the code, but I know the error is in here. Specifically this line :
float y = hist.at<float>(0,i);
For some reason my histogram is 0 by 0.
So the actual error is likely from how I used the calcHist() function
void entropyImage(string filename) {
Mat YCbCrImage, hist , image = imread(filename, IMREAD_UNCHANGED);
float range[] = { 0, 256 };
const float* histRange = { range };
int histSize = 256;
if (image.channels() == 1){
double H = 0;
//GrayScale Image
calcHist(&image, 1, 0, Mat(), hist, 1, &histSize, &histRange, true, false);
for (int i = 0; i<histSize; i++){
float y = hist.at<float>(0,i);
cout << "symbol: " << i << " was repeated: " << y << endl;
}
}}
Here's how I'm calling the function from the main:
entropyImage("C:\\Users\\Documents\\Visual Studio 2013\\Projects\\lenagray.jpg");
Can someone more experienced in OpenCV let me know why my CalcHist isn't working"
Just give channel by declaring array like,
int channel[] = {0};
and
calcHist(&image, 1, channel, Mat(), hist, 1, &histSize, &histRange, true, false);
May be this will solve your probolem.
Hello after connected component labeling for a single label why am I getting more than a one pixel value ?
this is my image
#include <iostream>
#include <vector>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
void FindBlobs(const cv::Mat &binary, std::vector < std::vector<cv::Point2i> > &blobs);
int main(int argc, char **argv)
{
cv::Mat img = cv::imread("/Users/Rodrane/Documents/XCODE/test/makalesvm/persembe.png", 0); if(!img.data) {
std::cout << "File not found" << std::endl;
return -1;
}
cv::namedWindow("binary");
cv::namedWindow("labelled");
cv::Mat output = cv::Mat::zeros(img.size(), CV_8UC3);
cv::Mat binary;
std::vector < std::vector<cv::Point2i > > blobs;
equalizeHist( img , img );
cv::threshold(img, binary, 0, 1, cv::THRESH_BINARY_INV);
FindBlobs(binary, blobs);
// Randomy color the blobs
for(size_t i=0; i < blobs.size(); i++) {
unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));
for(size_t j=0; j < blobs[i].size(); j++) {
int x = blobs[i][j].x;
int y = blobs[i][j].y;
output.at<cv::Vec3b>(y,x)[0] = b;
output.at<cv::Vec3b>(y,x)[1] = g;
output.at<cv::Vec3b>(y,x)[2] = r;
}
}
cout << "H = "<< endl << " " << output << endl << endl;
cv::imshow("binary", img);
cv::imshow("labelled", output);
cv::waitKey(0);
return 0;
}
void FindBlobs(const cv::Mat &binary, std::vector < std::vector<cv::Point2i> > &blobs)
{
blobs.clear();
// Fill the label_image with the blobs
// 0 - background
// 1 - unlabelled foreground
// 2+ - labelled foreground
cv::Mat label_image;
binary.convertTo(label_image, CV_32SC1);
int label_count = 2; // starts at 2 because 0,1 are used already
for(int y=0; y < label_image.rows; y++) {
int *row = (int*)label_image.ptr(y);
for(int x=0; x < label_image.cols; x++) {
if(row[x] != 1) {
continue;
}
cv::Rect rect;
cv::floodFill(label_image, cv::Point(x,y), label_count, &rect, 0, 0, 4);
std::vector <cv::Point2i> blob;
for(int i=rect.y; i < (rect.y+rect.height); i++) {
int *row2 = (int*)label_image.ptr(i);
for(int j=rect.x; j < (rect.x+rect.width); j++) {
if(row2[j] != label_count) {
continue;
}
blob.push_back(cv::Point2i(j,i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
actually this is a single label when I print the image
however when I check pixel values. I see there is actually 2 different values (I just write down part of the matrix not all)
H = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 192, 33, 0, 192, 33, 0, 192, 33, 0, 0, 0, 0, ]
also by adding this line of code to the last line of FindBlobs function I recieve 3 since the label_count variable starts from 2 this also proves that H is a single label.
cout << "number of labels = "<< endl << " " << label_count << endl << endl;