I'm trying to make the Sharpening of an Image in HLS format.
I've done the Blurring correctly, but the Sharpening doesn't work.
I know the Sharpening is:
1) Blur the Image: Image -> Blurred. 2) Make Unsharp Mask: Unsharp_Mask = Image - Blurred. 3) Sharpen the Image: Sharpened = Image + Unsharp_Mask
Also I know in HLS you don't have to do this in every channel, but just in the "L" one.
I did it, but it doesn't work.
This is my code (i can't use the "code" button cause it gives me error - says that there are parts of the code that are not properly formatted as code):
// UNSHARP MASK HLS Mat* UnsharpHLS = new Mat(PaddedHLS->rows, PaddedHLS->cols, PaddedHLS->type()); Mat* SharpHLS = new Mat(PaddedHLS->rows, PaddedHLS->cols, PaddedHLS->type());
for (int i = 0; i < ImageHLS.rows; i++) {
for (int j = 0; j < ImageHLS.cols; j++)
{
UnsharpHLS->at<Vec3b>(i+1, j+1)[1] = ImageHLS.at<Vec3b>(i, j)[1] - PaddedHLS->at<Vec3b>(i + 1, j + 1)[1];
SharpHLS->at<Vec3b>(i+1, j+1)[1] = ImageHLS.at<Vec3b>(i, j)[1] + (UnsharpHLS->at<Vec3b>(i + 1, j + 1)[1]);
} }
cvtColor(*SharpHLS, Sharpened, COLOR_HLS2BGR);
Let's assume the previous part of the code works (I don't get any error and I've already tryed it), the only problem is in the mentioned code.
This is the whole code:
#include <iostream>
#include <opencv2/opencv.hpp>
#include <cstdlib>
#include <math.h>
using namespace cv;
using namespace std;
int main()
{
// CARICAMENTO IMMAGINE
Mat Original = imread("Lena.png", IMREAD_COLOR);
// VERIFICA SE L'IMMAGINE E' STATA LETTA CORRETTAMENTE, IN CASO CONTRARIO RITORNA -1
if (Original.empty())
{
return -1;
}
// CONVERSIONE COLORI
Mat ImageHLS;
Mat ImageRGB;
Mat Blurred;
Mat Sharpened;
cvtColor(Original, ImageRGB, COLOR_BGR2RGB);
cvtColor(Original, ImageHLS, COLOR_BGR2HLS);
// CREAZIONE IMMAGINE HLS PADDED
int FilterSize = 3;
int Padding = FilterSize - 1;
Mat* PaddedHLS = new Mat(ImageHLS.rows + Padding, ImageHLS.cols + Padding, ImageHLS.type());
copyMakeBorder(ImageHLS, *PaddedHLS, Padding / 2, Padding / 2, Padding / 2, Padding / 2, BORDER_DEFAULT);
// BLURRING SU IMMAGINE HLS PADDED
Mat* Filter = new Mat(FilterSize, FilterSize, ImageHLS.type());
for (int i = 1; i < PaddedHLS->rows - 1; i++)
{
for (int j = 1; j < PaddedHLS->cols - 1; j++)
{
for (int x = 0; x < FilterSize; x++)
{
for (int y = 0; y < FilterSize; y++)
{
Filter->at<Vec3b>(x, y)[1] = PaddedHLS->at<Vec3b>(i - 1 + x, j - 1 + y)[1];
}
}
PaddedHLS->at<Vec3b>(i, j)[1] = mean(*Filter).val[1];
}
}
cvtColor(*PaddedHLS, Blurred, COLOR_HLS2BGR);
// UNSHARP MASK HLS
Mat* UnsharpHLS = new Mat(PaddedHLS->rows, PaddedHLS->cols, PaddedHLS->type());
Mat* SharpHLS = new Mat(PaddedHLS->rows, PaddedHLS->cols, PaddedHLS->type());
for (int i = 0; i < ImageHLS.rows; i++)
{
for (int j = 0; j < ImageHLS.cols; j++)
{
UnsharpHLS->at<Vec3b>(i+1, j+1)[1] = ImageHLS.at<Vec3b>(i, j)[1] - PaddedHLS->at<Vec3b>(i + 1, j + 1)[1];
SharpHLS->at<Vec3b>(i+1, j+1)[1] = ImageHLS.at<Vec3b>(i, j)[1] + (UnsharpHLS->at<Vec3b>(i + 1, j + 1)[1]);
}
}
cvtColor(*SharpHLS, Sharpened, COLOR_HLS2BGR);
// VISUALIZZAZIONE IMMAGINI
imshow("Originale", Original);
imshow("Image RGB", ImageRGB);
imshow("Image HLS", ImageHLS);
imshow("Blurred HLS", *PaddedHLS);
imshow("Blurred BGR", Blurred);
imshow("Unsharp HLS", *UnsharpHLS);
imshow("Sharpened HLS", *SharpHLS);
imshow("Sharpened BGR", Sharpened);
//CHIUDI TUTTO
waitKey(0);
destroyAllWindows();
}
Related
I am wanting to move through an image and take a 5x5 grid centered around each pixel in the image. I then want to sum that grid and compare it to a threshold.
int main()
{
Mat element = getStructuringElement(MORPH_RECT, Size(7, 7));
Mat im = imread("blob.png", IMREAD_GRAYSCALE);
bool fromCenter = false;
namedWindow("Crop frame", WINDOW_NORMAL);
Rect2d r = selectROI("Crop frame", im, fromCenter);
im = im(r);
erode(im, im, element);
Mat clone = im;
int sectionSize = 4;
int width = im.cols - sectionSize/2;
int height = im.rows - sectionSize/2;
int sum = 0;
int counter = 0;
for (int i = sectionSize/2; i < width; i++) {
for (int j = sectionSize/2; j < height; j++) {
Rect rect = Rect(i, j, sectionSize, sectionSize);
rect -= Point(rect.width / 2, rect.height / 2);
Mat temp = im(rect);
for (int x = 0; x < temp.cols; x++) {
for (int y = 0; y < temp.rows; y++) {
int pixelValue = (int)temp.at<uchar>(y, x);
sum += pixelValue;
}
}
cout << sum << endl;
if (sum > 3800) {
clone.at<uchar>(j, i) = 255;
}
else {
clone.at<uchar>(j, i) = 0;
}
namedWindow("erode", WINDOW_NORMAL);
imshow("erode", clone);
waitKey(1);
sum = 0;
}
}
}
I am getting fluctuations in the pixel sum based on where I select my ROI in the image even when both over white space Also, my pixel sum is changing when I change the value of the clone pixel in this section of the code which I do not understand at all:
if (sum > 3800) {
clone.at<uchar>(j, i) = 255;
}
else {
clone.at<uchar>(j, i) = 0;
}
I am writing a simple c++ application using visual studio and opencv that stores Red, Green and Blue values from an image(named src in the code) and stores each Red, Green, Blue pixel values individually in 3 different Mat objects(named RM,BM,GM in the code). I saw this stackOverflow question
and did exactly as the first answer explained. I was able to save all the pixel values just fine, but wasn't able to change pixel values of other images because an Abort() has been called. This is the console window after I run the code.
Console Window
#include<opencv2/core.hpp>
#include<opencv2/highgui.hpp>
#include<opencv2/imgproc.hpp>
#include <stdio.h>
using namespace cv;
using namespace std;
int main() {
String file_name = "C:\\images\\haaand.jpg";
Mat src;
Mat RM, BM, GM;
//RM.create(src.cols, src.rows, CV_8UC(2));
//BM.create(src.cols, src.rows, CV_8UC(2));
//GM.create(src.cols, src.rows, CV_8UC(2));
Vec3b intensity;
Vec3b To[3];
src = imread(file_name);
imshow("src", src);
printf("cols:%d rows:%d \n", src.cols, src.rows);
for (int i = 0; i < src.cols; i++) {
for (int j = 0; j < src.rows; j++) {
intensity = src.at<Vec3b>(j, i);
printf("intensity:%d %d %d \n", intensity[0], intensity[1], intensity[2]);
for (int k = 0; k < 3; k++) {
//uchar bla;
//bla = intensity[k];
for (int p = 0; p < 3; p++) {
To[k][p] = intensity[k];
}
printf("(k:%d) %d %d %d\n", k, To[k][0], To[k][1], To[k][2]);
}
printf("all done\n");
BM.at<Vec3b>(j, i) = To[0];
GM.at<Vec3b>(j, i) = To[1];
RM.at<Vec3b>(j, i) = To[2];
}
}
imshow("RM", RM);
imshow("BM", BM);
imshow("GM", GM);
return 0;
}
Could anyone tell me why why this error might happen?
//RM.create(src.cols, src.rows, CV_8UC(2));
//BM.create(src.cols, src.rows, CV_8UC(2));
//GM.create(src.cols, src.rows, CV_8UC(2));
...
BM.at<Vec3b>(j, i) = To[0];
RM, BM, and GM are not setup. The debugger should show an error when you try to set BM.at<Vec3b>(j, i).
Try instead:
int main()
{
String file_name = "C:\\images\\haaand.jpg";
Mat src = imread(file_name);
Mat RM = Mat(src.size(), CV_8UC3);
Mat BM = Mat(src.size(), CV_8UC3);
Mat GM = Mat(src.size(), CV_8UC3);
Vec3b intensity;
Vec3b To[3];
for(int i = 0; i < src.cols; i++)
{
for(int j = 0; j < src.rows; j++)
{
intensity = src.at<Vec3b>(j, i);
for(int k = 0; k < 3; k++)
for(int p = 0; p < 3; p++)
To[k][p] = intensity[k];
BM.at<Vec3b>(j, i) = To[0];
GM.at<Vec3b>(j, i) = To[1];
RM.at<Vec3b>(j, i) = To[2];
}
}
imshow("src", src);
imshow("RM", RM);
imshow("BM", BM);
imshow("GM", GM);
waitKey(0);
return 0;
}
I am trying to make a classifier using OpenCV 3.0.0's CvSVM and color histogram. I already tried to make my own using the following code to make the datasets:
int labels[510];
if (label.compare("raw")){
for (int i = 0; i < 509; i++){
labels[i] = 1;
}
}
else if (label.compare("ripe")){
for (int i = 0; i < 509; i++){
labels[i] = 2;
}
}
else if (label.compare("rotten")){
for (int i = 0; i < 509; i++){
labels[i] = 3;
}
}
float trainingData[510][2];
for (int i = 0; i < 254; i++){
trainingData[i][1] = r_hist.at<float>(i - 1);
trainingData[i][2] = i;
}
int j = 0;
for (int i = 255; i < 509; i++){
trainingData[i][1] = g_hist.at<float>(j - 1);
trainingData[i][2] = i;
j++;
}
And this code for the SVM:
int width = 512, height = 512;
Mat image = Mat::zeros(height, width, CV_8UC3);
Mat labelsMat(510, 1, CV_32SC1, labels);
Mat trainingDataMat(510, 2, CV_32FC1, trainingData);
Ptr < cv::ml::SVM > svm = SVM::create();
svm = cv::Algorithm::load<ml::SVM>("svm.xml");
svm->setC(0.01);
svm->setType(ml::SVM::C_SVC);
svm->setKernel(ml::SVM::LINEAR);
svm->setTermCriteria((cvTermCriteria(TermCriteria::MAX_ITER, 100, 1e6)));
svm->train(trainingDataMat, ROW_SAMPLE, labelsMat);
svm->save("svm.xml");
The problem with the code above is that it won't save properly. Is there a better way to do it?
I am new to opencv. I am trying to do convolution of an image using kernel having same size as image in opencv c++. I am getting an error 'Segmentation fault (core dumped)'. I checked for intialisation of variables and for loop. But I am not able to sort out exactly where the problem is coming. Can anybody please help me in finding out the problem. My code is given below:
#include<opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include<stdio.h>
#include<iostream>
#include<math.h>
#include<cv.hpp>
using namespace cv;
using namespace std;
Mat img;
Mat kernel, gd, dest;
int c = 120;
double mysum = 0.0, mysum1 = 0.0, k = 0;
int cent=0,radius=0;
enum ConvolutionType {
/* Return the full convolution, including border */
CONVOLUTION_FULL,
/* Return only the part that corresponds to the original image */
CONVOLUTION_SAME,
/* Return only the submatrix containing elements that were not influenced by the
border
*/
CONVOLUTION_VALID
};
void conv2(const Mat &img, const Mat& kernel, ConvolutionType type,Mat& dest)
{
Mat source = img;
if(CONVOLUTION_FULL == type)
{
source = Mat();
const int additionalRows = kernel.rows - 1, additionalCols = kernel.cols - 1;
copyMakeBorder(img, source, (additionalRows + 1) / 2, additionalRows / 2,
(additionalCols + 1) / 2, additionalCols / 2, BORDER_CONSTANT, Scalar(0));
}
flip(kernel, kernel, -1);
Point anchor(kernel.cols - kernel.cols / 2 - 1, kernel.rows - kernel.rows / 2 - 1);
int borderMode = BORDER_CONSTANT;
filter2D(source, dest, img.depth(), kernel, anchor, 0, borderMode);
if(CONVOLUTION_VALID == type)
{
dest = dest.colRange((kernel.cols - 1) / 2, dest.cols - kernel.cols / 2).rowRange((kernel.rows - 1) / 2, dest.rows - kernel.rows / 2);
}
}
int main()
{
img = imread("building1.jpg", CV_LOAD_IMAGE_COLOR);
dest.create(img.size(), img.type());
gd.create(img.size(), img.type());
for(int j = 0; j < img.rows; j++)
{
for(int i = 0; i < img.cols; i++)
{
radius = ((cent - i)^2 + (cent - j)^2);
gd.at<float>(j, i) = exp((-(radius) / c^2));
mysum = mysum + gd.at<float>(j, i);
}
mysum1 = mysum1 + mysum;
}
k=1/mysum1;
cout<<endl<<k<<"\n"<<endl;
for(int j = 0; j < img.rows; j++)
{
for(int i = 0; i < img.cols; i++)
{
gd.at<float>(j, i) = k * gd.at<float>(j, i);
}
}
conv2(img, gd, CONVOLUTION_FULL, dest);
imshow("conv", dest);
waitKey(0);
return 0;
}
When you create img
img = imread("building1.jpg", CV_LOAD_IMAGE_COLOR);
it will of type CV_UC3, i.e 3 bytes per pixel (one each for blue, green and red).
However when you access the image
gd.at<float>(j, i) = k * gd.at<float>(j, i);
you are using a float pointer. Since a float is 4 bytes, rather than 3, you will end up accessing memory outside of the image, or even your program. The latter is happening, as indicated by the segmentation violation.
Probably the best thing to do would be to compile your code in debug mode. THen you will probably get an exception from OpenCV rather than the segmentation violation.
It looks like what you might have wanted is
img = imread("building1.jpg", CV_LOAD_IMAGE_GRAYSCALE);
img.convertTo(img, CV_32FC1);
...
Also some of you code can be greatly simplified, e.g.
for(int j = 0; j < img.rows; j++)
{
for(int i = 0; i < img.cols; i++)
{
gd.at<float>(j, i) = k * gd.at<float>(j, i);
}
}
should be
gd = gd * k;
If you are accessing pixels sequentially the using at<>() is very inefficient. See the efficient way
I am new to opencv c++ .I am getting error with code for convolution (got from internet)which is equivalent to conv2 in matlab. The problem is all the pixel values are becoming 255.The filter which i am using in the code has same size as image. Can anybody please help me in correcting the problem.My opencv c++ code is given below:
#include<opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include<stdio.h>
#include<iostream>
#include<math.h>
#include<cv.hpp>
using namespace cv;
using namespace std;
Mat gd,img,bimgFiltered,gimgFiltered,rimgFiltered,fin_img;
Mat b,g,r,cr,cb,cg,B,G,R;
Mat b_logplane, b_plane,b_logfiltered,b_log,g_logplane,g_plane,g_logfiltered;
Mat g_log,r_logplane,r_plane,r_logfiltered,r_log;
Mat kernel, dest;
int m,n,m1,m2,n1,n2;
int c = 120;
double mysum = 0.0, mysum1 = 0.0, k = 0;
int cent=0,radius=0;
enum ConvolutionType {
/* Return the full convolution, including border */
CONVOLUTION_FULL,
/* Return only the part that corresponds to the original image */
CONVOLUTION_SAME,
/* Return only the submatrix containing elements that were not influenced
by the border
*/
CONVOLUTION_VALID
};
void conv2(const Mat &img, const Mat& kernel, ConvolutionType type,Mat& dest)
{
Mat source = img;
if(CONVOLUTION_FULL == type)
{
source = Mat();
const int additionalRows = kernel.rows - 1, additionalCols = kernel.cols - 1;
copyMakeBorder(img, source, (additionalRows + 1) / 2, additionalRows / 2,
(additionalCols + 1) / 2, additionalCols / 2, BORDER_CONSTANT, Scalar(0));
}
flip(kernel, kernel, -1);
Point anchor(kernel.cols - kernel.cols / 2 - 1, kernel.rows - kernel.rows / 2 - 1);
int borderMode = BORDER_CONSTANT;
filter2D(source, dest, img.depth(), kernel, anchor, 0, borderMode);
if(CONVOLUTION_VALID == type)
{
dest = dest.colRange((kernel.cols - 1) / 2, dest.cols - kernel.cols /
2).rowRange((kernel.rows - 1) / 2, dest.rows - kernel.rows / 2);
}
}
int main()
{
img = imread("milla.bmp", CV_LOAD_IMAGE_COLOR);
b.create(img.size(),img.type());
g.create(img.size(),img.type());
r.create(img.size(),img.type());
cr.create(img.size(),img.type());
cg.create(img.size(),img.type());
cb.create(img.size(),img.type());
Mat planes[3];
split(img,planes);
bimgFiltered.create(img.size(),img.type());
gimgFiltered.create(img.size(),img.type());
rimgFiltered.create(img.size(),img.type());
dest.create(img.size(), img.type());
gd.create(img.size(), img.type());
for(int j = 0; j < img.rows; j++)
{
for(int i = 0; i < img.cols; i++)
{
radius = ((cent - i)^2 + (cent - j)^2);
gd.at<float>(j, i) = exp((-(radius) / c^2));
mysum = mysum + gd.at<float>(j, i);
}
mysum1 = mysum1 + mysum;
}
k=1/mysum1;
cout<<endl<<k<<"\n"<<endl;
for(int j = 0; j < img.rows; j++)
{
for(int i = 0; i < img.cols; i++)
{
gd.at<float>(j, i) = k * gd.at<float>(j, i);
}
}
planes[0].convertTo(planes[0],CV_32F,1.0/255.0);
planes[1].convertTo(planes[1],CV_32F,1.0/255.0);
planes[2].convertTo(planes[2],CV_32F,1.0/255.0);
conv2(planes[0],gd,CONVOLUTION_SAME,bimgFiltered);
conv2(planes[1],gd,CONVOLUTION_SAME,gimgFiltered);
conv2(planes[2],gd,CONVOLUTION_SAME,rimgFiltered);
imshow("img",gimgFiltered );
waitKey(0);
return 0;
}
There are a few problems with the code:
Issue 1:
In the following two lines:
radius = ((cent - i)^2 + (cent - j)^2);
gd.at<float>(j, i) = exp((-(radius) / c^2));
You are using ^ operator which is the bitwise XOR operator in C/C++. I think you are mistaking it for power operator. To take the power of a number you have to use the pow function as follows:
radius = powf((cent - i),2) + powf((cent - j),2);
gd.at<float>(j, i) = expf((-(radius) / (c*c)));
Issue 2:
The gd matrix is assumed to have floating point values as it is accessed like gd.at<float>(j, i), but it is declared with the same type as that of the image, i.e. CV_8UC3. So gd should be created as follows:
gd.create(img.size(), CV_32FC1);
Issue 3:
Another possible logical error may be present in the first nested loop. You may have to set mysum = 0; before starting the inner loop like this:
for(int j = 0; j < img.rows; j++)
{
mysum = 0;
for(int i = 0; i < img.cols; i++)
{
radius = powf((cent - i),2) + powf((cent - j),2);
gd.at<float>(j, i) = expf((-(radius) / (c*c)));
mysum = mysum + gd.at<float>(j, i);
}
mysum1 = mysum1 + mysum;
}
Issue 4:
Output filtered images should be created single channel instead of 3 channels:
bimgFiltered.create(img.size(),CV_8UC1);
gimgFiltered.create(img.size(),CV_8UC1);
rimgFiltered.create(img.size(),CV_8UC1);