code is working properly on Debug mode. However, it does not work on Release mode.
I don't know why... I can't find reason..
I've been having some troubles with using LUT function
I believe the issue is related to applying a LUT to a single channel image,
Unhandled exception at 0x00007FFA61523FE9 in HW2.exe: Microsoft C++ exception: cv::Exception at memory location 0x000000EFCABFE750.
Here is my code. Thank you in advance.
#include <opencv2/opencv.hpp>
#include <iostream>
#include <cmath>
#include <ctime>
#define COUNT 1000000
using namespace std;
using namespace cv;
void GammaCorrection(Mat& src, Mat& dst, double gamma) {
Size dims{ 1,256 };
int channels{ 3 };
Mat bgr_planes[3];
// lookup table
Mat lut(dims ,CV_8UC1);
for (int i = 0; i < dims.height; i++) {
double v = pow((i / 255.0) ,gamma);
lut.at<uint8_t>(i) = cvRound( v * 255);
}
// B,G,R color plane
split(src, bgr_planes);
// ERROR
LUT(bgr_planes[0], lut, bgr_planes[0]);
LUT(bgr_planes[1], lut, bgr_planes[1]);
LUT(bgr_planes[2], lut, bgr_planes[2]);
//B,G,R color plane merge
merge(bgr_planes, channels, dst);
}
int main() {
clock_t start, end;
Mat origin = imread("..\\origin.png", IMREAD_COLOR);
Mat result = Mat(origin.size(), CV_8UC3);
start = clock();
//----------
//for (int i = 0; i < COUNT; i++) {
GammaCorrection(origin, result, 0.4);
//}
//-----------
end = clock();
cout << (double)(end - start) / COUNT << endl;
}
Related
I've been reading about opencv and I've been doing some exercises, in this case I want to perform an image equalization, I have implemented the following code, but when I execute it I get the following error:
"Segmentation fault (core dumped)"
So I have no idea what is due.
The formula I am trying to use is the following:
equalization
The code is the following:
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <stdio.h>
using namespace cv;
using namespace std;
void equalization(cv::Mat &image,cv::Mat &green, int m) {
Mat eqIm;
int nl= image.rows; // number of lines
int nc= image.cols * image.channels();
for (int j=0; j<nl; j++) {
uchar* data= image.ptr<uchar>(j);
uchar* data2= green.ptr<uchar>(j);
uchar* eqIm= green.ptr<uchar>(j);
for (int i=0; i<nc; i++) {
eqIm[i]= data[i]+m-data2[i];
}
}
cv::imshow("Image",eqIm);
imwrite("eqIm.png",eqIm);
}
float mean(cv::Mat &image){
cv:Scalar tempVal = mean( image );
float myMAtMean = tempVal.val[0];
cout << "The value is " << myMAtMean;
}
int main(int argc, char** argv ){
Mat dst;
Mat image= cv::imread("img.jpg");
Mat green= cv::imread("green.jpg");
cv::imshow("Image",image);
float m= mean(image);
equalization(image,green,m);
cv::namedWindow("Image");
cv::imshow("Image",image);
imwrite("equalizated.png",dst);
waitKey(0);
return 0;
}
and the image "Equalization.png" that is written contains nothing
You never initialized Mat eqIm, so when you do cv::imshow("Image", eqIm);
imwrite("eqIm.png", eqIm); there is nothing in the mat. https://docs.opencv.org/2.4/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.html
Also, I should note that you have 2 variables of eqIm. That may be part of the confusion.
One last thing, in your mean function, you may end up with a recursive function. You should specify what mean function you are using in the mean function you create, i.e.
float mean(cv::Mat &image) {
cv:Scalar tempVal = cv::mean(image);
float myMAtMean = tempVal.val[0];
cout << "The value is " << myMAtMean;
return myMAtMean;
}
The following is something closer to what you are looking for in your equalization function.
void equalization(cv::Mat &image, cv::Mat &green, int m) {
Mat eqIm(image.rows,image.cols,image.type());
int nl = image.rows; // number of lines
int nc = image.cols * image.channels();
for (int j = 0; j<nl; j++) {// j is each row
for (int ec = 0; ec < nc; ec++) {//ec is each col and channels
eqIm.data[j*image.cols*image.channels() + ec] = image.data[j*image.cols*image.channels() + ec] + m - green.data[j*image.cols*image.channels() + ec];
}
}
cv::imshow("Image", eqIm);
imwrite("eqIm.png", eqIm);
}
I do j*image.cols*image.channels() to step through the entire size of j lines (the number of columns times the number of channels per pixel).
I want to equate the 0 pixel value to the pixel location having 0 pixel value in Mask image to the same location in the grayimg12 image, which is a gray image. When I put the for loop in try-catch block it is giving me error and assertion failed, without using try-catch the error is "Unhandled Exception at 0x755b0f22 and cv:: Exception at memory location 0x004af338.. I am using opencv 3.0.0 beta version and Visual Studio 2010.
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d.hpp>
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
int main()
{
// Reading Mask and Creating New Image
Mat grayimg, grayimg12, input, Mask; int keyboard;
input = imread("peter.jpg");
cvtColor(input, grayimg, COLOR_BGR2GRAY);
grayimg.copyTo(grayimg12, grayimg);
namedWindow("Gray Converted Frame");
imshow("Gray Converted Frame", grayimg);
int r = input.rows; int c = input.cols;
Mask = grayimg > 100;
namedWindow("Binary Image");
imshow("Binary Image", Mask);
try
{
for (int i=1;i<=r;i++)
{
for (int j=1;j<=c; j++)
{
if (Mask.at<uchar>(i,j) == 0)
{
grayimg12.at<uchar>(i,j) = 0;
}
else
grayimg12.at<uchar>(i,j) = grayimg.at<uchar>(i,j);
}
}
}
catch(Exception)
{
cout<<"Hi..";
}
namedWindow("Gray Output Image");
imshow("Gray Output Image", grayimg12);
keyboard = waitKey( 10000 );
return 0;
}
Your loop indices are off by one, so you get an exception when you try to access memory beyond the image bounds. Change:
for (int i=1;i<=r;i++)
{
for (int j=1;j<=c; j++)
{
to:
for (int i=0;i<r;i++) // for i = 0 to r-1
{
for (int j=0;j<c; j++) // for j = 0 to c-1
{
Note that in C, C++ and related languages, arrays are zero-based. So the valid index range for an array of size N is from 0 to N-1 inclusive.
I'm trying to get eigenvalues from an image matrix. I tried with a streaming video and did not get an error. When I tried to find eigenvalues of a stored image I get cv::Exception at memory location 0x0000000296AFDAD0 error.
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
void calceigen(Mat covarm){
Mat eigenvalues;
Mat eigenvectors;
eigen(covarm, eigenvalues, eigenvectors);
cout << eigenvalues;
cout << eigenvectors;
}
void covard(Mat g)
{
Mat covarm;
Mat b;
calcCovarMatrix(g, covarm, b, CV_COVAR_COLS, CV_64F);
calceigen(covarm);
}
int main(int argc, const char** argv)
{
cv::Mat image = cv::imread("C:/Users/DellPc/Desktop/images1.jpg");
//setup image files used in the capture process
Mat grayscaleFrame;
Mat graysc;
//convert captured image to gray scale and equalize
//cvtColor(image, grayscaleFrame, CV_BGR2GRAY);
//resize(grayscaleFrame, graysc, Size(16, 16), 0, 0, INTER_LANCZOS4);
int rows = image.rows;
int cols = image.cols;
cv::Size s = image.size();
rows = s.height;
cols = s.width;
cout << rows << endl;
cout << cols << endl;
covard(image);
//find faces and store them in the vector array
imshow("small", image);
waitKey(33);
return 0;
}
I have an image i1. I am supposed to create another Mat m1 of size (image.rows*3, image.cols*3).
In m1, I'm supposed to fill the pixel value in the following way. (Please do see the image):
Here is my code-
#include <highgui.h>
#include "opencv2/opencv.hpp"
#include <fstream>
using namespace cv;
static Mat NeurMap1, NeurMap2, NeurMap3, frame, hsv_Frame;
std::ofstream myfile;
void InitializeNeurMap(cv::Mat Channel[3])
{
int i=0,j=0,m_i=0,m_j=0, t1=0, t2=0;
for(i=0; i < frame.rows; i++)
{
for(j=0;j < frame.cols;j++)
{
t1= i*n+1; t2 = j*n+1;
for(m_i=t1-1; m_i <= t1+1;m_i++)
{
for(m_j=t2-1; m_j <= t2+1; m_j++)
{
NeurMap1.at<uchar>(m_i, m_j)= frame.at<uchar>(i,j);
}
}
}
}
std::cout<<m_j;
myfile<<frame;
}
int main()
{
myfile.open("NeurMaptext.txt");
String filename="BootStrap/b%05d.bmp";// sequence of frames are read
VideoCapture cap(filename);
if(!cap.isOpened()) // check if we succeeded
return -1;
namedWindow("edges",1);
//namedWindow("frames",1);
Mat Channel[3];
cap>>frame;
NeurMap1 = Mat::zeros(frame.rows*n, frame.cols*n, frame.type());
InitializeNeurMap(Channel);
imshow("edges",NeurMap1);waitKey(33);
for(;;)
{
cap>>frame;
if(frame.empty())
break;
}
system("pause");
return 0;
}
The input image is RGB[160*120]. Why am I not getting the columns in the output image given in the link above?.
You can simply call resize() by passing the INTER_NEAREST parameter, i.e. using the nearest-neighbor interpolation.
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
unsigned char data[] = { 1, 2, 3, 4, 5, 6 };
Mat img(2, 3, CV_8UC1, data);
cout << img << endl;
Mat res(6, 9, CV_8UC1);
resize(img, res, res.size(), 0, 0, INTER_NEAREST);
cout << res << endl;
return 0;
}
You will get:
In you are getting three only one-third of image filled because, probably you are passing 3 channel(colour) image to the function and treat it as a single channel image. So change the above code to,
void InitializeNeurMap(cv::Mat Channel[3])
{
for(int i=0; i < frame.rows; i++){
for(int j=0;j < frame.cols;j++){
for(int k=0;k<n;k++){
for(int l=0;l<n;l++){
NeurMap1.at<Vec3b>(i*n+k,j*n+l)[0] = frame.at<Vec3b>(i,j)[0]; //Access Blue channel
NeurMap1.at<Vec3b>(i*n+k,j*n+l)[1] = frame.at<Vec3b>(i,j)[1];//Access green channel
NeurMap1.at<Vec3b>(i*n+k,j*n+l)[2] = frame.at<Vec3b>(i,j)[2]; //Access red channel
}
}
}
}
myfile<<frame;
}
See the reult
Here is my code, which uses OpenCV 2.4.5
Histogram1D.h
#ifndef HISTOGRAM1D_H
#define HISTOGRAM1D_H
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
class Histogram1D
{
public:
Histogram1D();
//Histogram generators
MatND getHistogram(Mat );
Mat getHistogramImage(Mat );
//Generate Negative Image
Mat applyLookup(Mat ,Mat );
//Generate improved image with equalized histogram
Mat equalize(Mat image);
private:
int histSize[1];//Number of bins
float hRanges[2];//Max and Min pixel values
const float *ranges[1];
int channels[1];//Only one channel will be used
};
#endif // HISTOGRAM1D_H
Histogram1D.cpp
#include "Histogram1D.h"
Histogram1D::Histogram1D()
{
histSize[0] = 256;
hRanges[0] = 0.0;
hRanges[1] = 255.0;
ranges[0] = hRanges;
channels[0] = 0;
}
MatND Histogram1D::getHistogram(Mat image)
{
MatND hist;
cv::calcHist(&image,1,channels,Mat(),hist,1,histSize,ranges);
return hist;
}
Mat Histogram1D::getHistogramImage(Mat image)
{
MatND histo = getHistogram(image);
//Get minimum and maximum value bins
double minVal = 0;
double maxVal = 0;
minMaxLoc(histo,&minVal,&maxVal,0,0);
//Image on which to display histogram
Mat histImage(histSize[0],histSize[0],CV_8U,Scalar(255));
//Set highest point at 90% of nbins
int hpt = static_cast<int>(0.9,histSize[0]);
//Draw a vertical line for each bin
for(int i=0;i<histSize[0];i++)
{
float binVal = histo.at<float>(i);
int intensity = static_cast<int>(binVal*hpt/maxVal);
line(histImage,Point(i,histSize[0]),Point(i,histSize[0]-intensity),Scalar::all(0));
}
return histImage;
}
Mat Histogram1D::applyLookup(Mat image,Mat lookup)
{
Mat result;
cv::LUT(image,lookup,result);
return result;
}
Mat Histogram1D::equalize(Mat image)
{
Mat result;
cv::equalizeHist(image,result);
return result;
}
HistogramMain.cpp
#include "Histogram1D.h"
int main()
{
Histogram1D h;
Mat image = imread("C:/Users/Public/Pictures/Sample Pictures/Penguins.jpg",CV_LOAD_IMAGE_GRAYSCALE);
cout << "Number of Channels: " << image.channels() << endl;
namedWindow("Image");
imshow("Image",image);
Mat histogramImage = h.getHistogramImage(image);
namedWindow("Histogram");
imshow("Histogram",histogramImage);
Mat thresholded;
threshold(image,thresholded,60,255,THRESH_BINARY);
namedWindow("Binary Image");
imshow("Binary Image",thresholded);
Mat negativeImage;
int dim(256);
negativeImage = h.applyLookup(image,Mat(1,&dim,CV_8U));
namedWindow("Negative Image");
imshow("Negative Image",negativeImage);
Mat equalizedImage;
equalizedImage = h.equalize(image);
namedWindow("Equalized Image");
imshow("Equalized Image",equalizedImage);
waitKey(0);
return 0;
}
When you run this code, the negative image is 100% black! The most amazing this is, if you remove all other code from HistogramMain.cpp but keep the code below which is related to negative image, you will get the correct negative image! Why is this?
I am using QT latest version which use the VS 2010 Compiler.
Mat negativeImage;
int dim(256);
negativeImage = h.applyLookup(image,Mat(1,&dim,CV_8U));
namedWindow("Negative Image");
imshow("Negative Image",negativeImage);
Your primary difficulty is that the expression Mat(1,&dim,CV_8U) allocates memory for a cv::Mat, but does not initialize any values. It is possible that your environment may fill uninitialized memory with zeros, which would explain the black image after calling applyLookup(). In any case, you should initialize the values in your lookup table in order to achieve correct results. For inverting the image, it is easy:
int dim(256);
cv::Mat tab(1,&dim,CV_8U);
uchar* ptr = tab.ptr();
for (size_t i = 0; i < tab.total(); ++i)
{
ptr[i] = 255 - i;
}
There are a few other issues with your code:
The line
int hpt = static_cast<int>(0.9,histSize[0]);
should be
int hpt = static_cast<int>(0.9*histSize[0]);
to do what your comment indicates. Pay attention to your compiler warnings!
You also have problems with your histogram ranges.
By the way, with opencv2 image are now numpy array, so to negative a grey 8-bits image in python, it's simply:
img = 255 - img