I am trying to implement a sobel operator in both horizontal and vertical direction. But somehow I am getting the reverse output. The code I have attached below. For the horizontal mask
char mask [3][3]= {{-1,-2,-1},{0,0,0},{1,2,1}};
void masking(Mat image){
Mat temImage= image.clone();
for (int i = 1; i < image.rows-1; i++)
{
for (int j = 1; j < image.cols-1; j++)
{
for(int k=0;k<3;k++)
{
int pixel1 = image.at<Vec3b>(i-1,j-1)[k] * -1;
int pixel2 = image.at<Vec3b>(i,j-1)[k] * -2;
int pixel3 = image.at<Vec3b>(i+1,j-1)[k] * -1;
int pixel4 = image.at<Vec3b>(i-1,j)[k] * 0;
int pixel5 = image.at<Vec3b>(i,j)[k] * 0;
int pixel6 = image.at<Vec3b>(i+1,j)[k] * 0;
int pixel7 = image.at<Vec3b>(i-1,j+1)[k] * 1;
int pixel8 = image.at<Vec3b>(i,j+1)[k] * 2;
int pixel9 = image.at<Vec3b>(i+1,j+1)[k] * 1;
int sum = pixel1 + pixel2 + pixel3 + pixel4 + pixel5 + pixel6 + pixel7 + pixel8 + pixel9;
if(sum < 0)
{
sum = 0;
}
if(sum > 255)
sum = 255;
temImage.at<Vec3b>(i,j)[k] = sum;
}
}
}
//printf("conter = %d",counter);
imshow( "Display", temImage );
imwrite("output1.png",temImage);
}
I am getting the output as
where as for the vertical mask
char mask [3][3]= {{-1,0,1},{-2,0,2},{-1,0,1}};
void masking(Mat image){
Mat temImage= image.clone();
for (int i = 1; i < image.rows-1; i++)
{
for (int j = 1; j < image.cols-1; j++)
{
for(int k=0;k<3;k++)
{
int pixel1 = image.at<Vec3b>(i-1,j-1)[k] * -1;
int pixel2 = image.at<Vec3b>(i,j-1)[k] * 0;
int pixel3 = image.at<Vec3b>(i+1,j-1)[k] * 1;
int pixel4 = image.at<Vec3b>(i-1,j)[k] * -2;
int pixel5 = image.at<Vec3b>(i,j)[k] * 0;
int pixel6 = image.at<Vec3b>(i+1,j)[k] * 2;
int pixel7 = image.at<Vec3b>(i-1,j+1)[k] * -1;
int pixel8 = image.at<Vec3b>(i,j+1)[k] * 0;
int pixel9 = image.at<Vec3b>(i+1,j+1)[k] * 1;
int sum = pixel1 + pixel2 + pixel3 + pixel4 + pixel5 + pixel6 + pixel7 + pixel8 + pixel9;
if(sum < 0)
{
sum = 0;
}
if(sum > 255)
sum = 255;
temImage.at<Vec3b>(i,j)[k] = sum;
}
}
}
//printf("conter = %d",counter);
imshow( "Display", temImage );
imwrite("output1.png",temImage);
}
I am getting output as
The main function is attached below
int main( int argc, char** argv ){
Mat input_image = imread("sobel1.jpg",1);
masking(input_image);
waitKey(0);
return 0;
}
According the the guide https://www.tutorialspoint.com/dip/sobel_operator.htm I should get reverse output. Can anyone help me in this
The original image is
No, the tutorial is not wrong, it talks about masks and not gradients. The weak point of that tutorial is that it doesn't mention we are calculating horizontal gradients using what they call the vertical mask.
Related
I have tried to extract patches from an image parallelly with pixel shift/overlapping. I have written the CPU version of the code. But I could not able to convert the for loop which has an increment of pixel shift. I have given the part of the code where for loop is being used. CreatePatchDataSet function has the "for loop " which has an increment of pixel shift. Please help me out to convert this function into Cuda. I have provided the following code.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <fstream>
#include <sstream>
#include <random>
#include <vector>
#include <omp.h>
using namespace std;
using namespace cv;
#define PATCH_SIZE (5)
#define PIXEL_SHIFT (2)
void ConvertMat2DoubleArray(cv::Mat input, double* output)
{
for (int i = 0; i < input.rows; i++)
{
double *src = input.ptr<double>(i);
for (int j = 0; j < input.cols; j++)
{
output[input.cols * input.channels() * i + input.channels() * j + 0] = src[j];
}
}
}
void GetNumOfPatch(const int width, const int height, const int patch_size, const int pixel_shift, int* num_of_patch, int* num_of_patch_col, int* num_of_patch_row) {
*num_of_patch_col = 0;
int len_nb = 0;
while (len_nb < width) {
if (len_nb != 0) {
len_nb += patch_size - (patch_size - pixel_shift);
}
else {
len_nb += patch_size;
}
(*num_of_patch_col)++;
}
len_nb = 0;
*num_of_patch_row = 0;
while (len_nb < height) {
if (len_nb != 0) {
len_nb += patch_size - (patch_size - pixel_shift);
}
else {
len_nb += patch_size;
}
(*num_of_patch_row)++;
}
*num_of_patch = (*num_of_patch_col) * (*num_of_patch_row);
}
void CreatePatchDataSet(double *original_data, double* patch_data, const int width, const int height, const int pixel_shift, const int patch_size, const int num_of_patch_col, const int num_of_patch_row) {
int counter_row = 0;
int num_of_patch_image = num_of_patch_row * num_of_patch_col;
for (int i = 0; i < height; i += pixel_shift) {
int counter_col = 0;
for (int j = 0; j < width; j += pixel_shift) {
//Get Low Resolution Image
for (int ii = 0; ii < patch_size; ii++) {
for (int jj = 0; jj < patch_size; jj++) {
if ((i + ii) < height && (j + jj) < width) {
patch_data[num_of_patch_image * (patch_size * ii + jj) + num_of_patch_col*counter_row + counter_col] = original_data[width*(i + ii) + (j + jj)];
}
else {
patch_data[num_of_patch_image * (patch_size * ii + jj) + num_of_patch_col*counter_row + counter_col] = 0.;
}
}
}
counter_col++;
if (counter_col == num_of_patch_col) {
break;
}
}
counter_row++;
if (counter_row == num_of_patch_row) {
break;
}
}
}
int main()
{
int ratio=2;
cv::Mat image = cv::imread("input_b2_128.tif", CV_LOAD_IMAGE_UNCHANGED);
cv::Mat imageH = cv::Mat(image.rows * ratio, image.cols * ratio, CV_8UC1);
cv::resize(image, imageH, cv::Size(imageH.cols, imageH.rows), 0, 0,
cv::INTER_LANCZOS4);
double* orgimageH = (double*)calloc(imageH.cols*imageH.rows*image.channels(), sizeof(double));
ConvertMat2DoubleArray(imageH, orgimageH);
int widthH = imageH.cols;
int heightH = imageH.rows;
int dimH = (int)PATCH_SIZE * (int)PATCH_SIZE* (int)image.channels();
int dimL = (int)PATCH_SIZE/ratio* (int)PATCH_SIZE/ratio * (int)image.channels();
//3. Create training data set=========================
int num_of_patch_image = 0;
int num_of_patch_col = 0;
int num_of_patch_row = 0;
GetNumOfPatch(widthH, heightH, (int)PATCH_SIZE, (int)PIXEL_SHIFT, &num_of_patch_image, &num_of_patch_col, &num_of_patch_row);
cout<<"patch numbers: \n " << num_of_patch_image << endl;
double* FY = (double*)calloc(dimH * num_of_patch_image, sizeof(double));
CreatePatchDataSet(orgimageH, FY, widthH, heightH, (int)PIXEL_SHIFT, (int)PATCH_SIZE, num_of_patch_col, num_of_patch_row);
free(orgimageH);
free(FY);
return 0;
}
The results I got for first 10 values in CPU version:
patch numbers:
16129
238,240,240,235,237,230,227,229,228,227
I have tried to convert this function to Kernel function using cuda:. But it goes into the infinite loop. As I am very new to this CUDA field, could you please help me to find out the problem in the code ?
__global__ void CreatePatchDataSet(double *original_data, double* patch_data, const int width, const int height, const int pixel_shift, const int patch_size, const int num_of_patch_col, const int num_of_patch_row) {
int num_of_patch_image = num_of_patch_row * num_of_patch_col;
int i = threadIdx.x + (blockDim.x*blockIdx.x);
int j = threadIdx.y + (blockDim.y*blockIdx.y);
while (i<height && j< width)
{
int counter_row = 0;
int counter_col = 0;
//Get Low Resolution Image
for (int ii = 0; ii < patch_size; ii++) {
for (int jj = 0; jj < patch_size; jj++) {
if ((i + ii) < height && (j + jj) < width) {
patch_data[num_of_patch_image * (patch_size * ii + jj) + num_of_patch_col*counter_row + counter_col] = original_data[width*(i + ii) + (j + jj)];
}
else {
patch_data[num_of_patch_image * (patch_size * ii + jj) + num_of_patch_col*counter_row + counter_col] = 0.;
}
}
}
counter_col++;
if (counter_col == num_of_patch_col) {
break;
}
counter_row++;
if (counter_row == num_of_patch_row) {
break;
}
}
i+= blockDim.x*gridDim.x;
j+= blockDim.y*gridDim.y;
}
int main()
{
int ratio=2;
cv::Mat image = cv::imread("input_b2_128.tif", CV_LOAD_IMAGE_UNCHANGED);
cv::Mat imageH = cv::Mat(image.rows * ratio, image.cols * ratio, CV_8UC1);
cv::resize(image, imageH, cv::Size(imageH.cols, imageH.rows), 0, 0, cv::INTER_LANCZOS4);
double *orgimageH = (double*)calloc(imageH.cols*imageH.rows*image.channels(), sizeof(double));
ConvertMat2DoubleArray(imageH, orgimageH);
int widthH = imageH.cols;
int heightH = imageH.rows;
//
int dimH = (int)PATCH_SIZE * (int)PATCH_SIZE* (int)image.channels();
int dimL = (int)PATCH_SIZE/ratio* (int)PATCH_SIZE/ratio * (int)image.channels();
//3. Create training data set=========================
int num_of_patch_image = 0;
int num_of_patch_col = 0;
int num_of_patch_row = 0;
GetNumOfPatch(widthH, heightH, (int)PATCH_SIZE, (int)PIXEL_SHIFT, &num_of_patch_image, &num_of_patch_col, &num_of_patch_row);
cout<<"patch numbers: \n " << num_of_patch_image << endl;
double* FY = (double*)calloc(dimH * num_of_patch_image, sizeof(double));
double *d_orgimageH;
gpuErrchk(cudaMalloc ((void**)&d_orgimageH, sizeof(double)*widthH*heightH));
double *d_FY;
gpuErrchk(cudaMalloc ((void**)&d_FY, sizeof(double)* dimH * num_of_patch_image));
gpuErrchk(cudaMemcpy(d_orgimageH , orgimageH , sizeof(double)*widthH*heightH, cudaMemcpyHostToDevice));
dim3 dimBlock(16, 16);
dim3 dimGrid;
dimGrid.x = (widthH + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (heightH + dimBlock.y - 1) / dimBlock.y;
CreatePatchDataSet<<<dimGrid,dimBlock>>>(d_orgimageH, d_FY, widthH, heightH, (int)PIXEL_SHIFT, (int)PATCH_SIZE, num_of_patch_col, num_of_patch_row);
gpuErrchk(cudaMemcpy(FY,d_FY, sizeof(double)*dimH * num_of_patch_image, cudaMemcpyDeviceToHost));
// cout<<"Hello world";
free(orgimageH);
free(FY);
cudaFree(d_FY);
cudaFree(d_orgimageH);
return 0;
}
Image I have used: [1]: https://i.stack.imgur.com/Ywg7p.png
i+= blockDim.x*gridDim.x;
j+= blockDim.y*gridDim.y;
is outside the while loop in your kernel. As i and j never change inside the while loop, it isn't stopping. There could be more problems here, but this is the most prominent one.
EDIT: Another one that I found, is that you have only one while over both i and j instead of one for each. You should probably use for loops like in your CPU code:
for (i = pixel_shift * (threadIdx.x + (blockDim.x*blockIdx.x));
i < height;
i += pixel_shift * blockDim.x * gridDim.x) {
for (j = ...; j < ...; j += ...) {
/* ... */
}
}
EDIT 2:
I could imagine this to be a good idea:
for (counter_row = threadIdx.y + blockDim.y * blockIdx.y;
counter_row < num_of_patch_row;
counter_row += blockDim.y * gridDim.y) {
i = counter_row * pixel_shift;
if (i > height)
break;
for (counter_col = threadIdx.x + blockDim.x * blockIdx.x;
counter_col < num_of_patch_col;
counter_col += blockDim.x * gridDim.x) {
j = counter_col * pixel_shift;
if (j > width)
break;
/* ... */
}
}
I have also exchanged the x/y fields of the execution parameters between the inner and the outer loop, as it seemed more appropriate considering that the x field is continuous in warps (memory access benefits).
I am trying to perform gaussian smoothing on this image without using any opencv function (except displaying the image).
However, the output I got after convoluting the image with the gaussian kernel is as follow:
The output image seems to have misaligned and looks very weird. Any idea what is happening?
Generate gaussian kernel:
double gaussian(int x, int y,double sigma){
return (1/(2*M_PI*pow(sigma,2)))*exp(-1*(pow(x,2)+pow(y,2))/(2*pow(sigma,2)));
}
double generateFilter(vector<vector<double>> & kernel,int width,double sigma){
int value = 0;
double total =0;
if(width%2 == 1){
value = (width-1)/2;
}else{
value = width/2;
}
double smallest = gaussian(-1*value,-1*value,sigma);
for(int i = -1*value; i<=value; i++){
vector<double> temp;
for(int k = -1*value; k<=value; k++){
int gVal = round(gaussian(i,k,sigma)/smallest);
temp.push_back(gVal);
total += gVal;
}
kernel.push_back(temp);
}
cout<<total<<endl;
return total;
}
Convolution:
vector<vector<unsigned int>> convolution(vector<vector<unsigned int>> src, vector<vector<double>> kernel,double total){
int kCenterX = floor(kernel.size() / 2); //center of kernel
int kCenterY = kCenterX; //center of kernel
int kRows = kernel.size(); //height of kernel
int kCols = kRows; //width of kernel
int imgRows = src.size(); //height of input image
int imgCols = src[0].size(); //width of input image
vector<vector<unsigned int>> dst = vector<vector<unsigned int>> (imgRows, vector<unsigned int>(imgCols ,0));
for ( size_t row = 0; row < imgRows; row++ ) {
for ( size_t col = 0; col < imgCols; col++ ) {
float accumulation = 0;
float weightsum = 0;
for ( int i = -1*kCenterX; i <= 1*kCenterX; i++ ) {
for ( int j = -1*kCenterY; j <= 1*kCenterY; j++ ) {
int k = 0;
if((row+i)>=0 && (row+i)<imgRows && (col+j)>=0 && (col+j)<imgCols){
k = src[row+i][col+j];
weightsum += kernel[kCenterX+i][kCenterY+j];
}
accumulation += k * kernel[kCenterX +i][kCenterY+j];
}
}
dst[row][col] = round(accumulation/weightsum);
}
}
return dst;
}
Thank you.
The convolution function is basically correct, so the issue is with the input and output format.
Make sure you are reading the image as Grayscale (and not RGB):
cv::Mat I = cv::imread("img.png", cv::IMREAD_GRAYSCALE);
You are passing vector<vector<unsigned int>> argument to convolution.
I can't say if it's part of the problem or not, but it's recommended to pass argument of type cv::Mat (and return cv::Mat):
cv::Mat convolution(cv::Mat src, vector<vector<double>> kernel, double total)
I assume you can convert the input to and from vector<vector<unsigned int>>, but it's not necessary.
Here is a working code sample:
#include <vector>
#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/highgui.hpp"
using namespace std;
double gaussian(int x, int y, double sigma) {
return (1 / (2 * 3.141592653589793*pow(sigma, 2)))*exp(-1 * (pow(x, 2) + pow(y, 2)) / (2 * pow(sigma, 2)));
}
double generateFilter(vector<vector<double>> & kernel, int width, double sigma)
{
int value = 0;
double total = 0;
if (width % 2 == 1) {
value = (width - 1) / 2;
}
else {
value = width / 2;
}
double smallest = gaussian(-1 * value, -1 * value, sigma);
for (int i = -1 * value; i <= value; i++) {
vector<double> temp;
for (int k = -1 * value; k <= value; k++) {
int gVal = round(gaussian(i, k, sigma) / smallest);
temp.push_back(gVal);
total += gVal;
}
kernel.push_back(temp);
}
cout << total << endl;
return total;
}
//vector<vector<unsigned int>> convolution(vector<vector<unsigned int>> src, vector<vector<double>> kernel, double total) {
cv::Mat convolution(cv::Mat src, vector<vector<double>> kernel, double total) {
int kCenterX = floor(kernel.size() / 2); //center of kernel
int kCenterY = kCenterX; //center of kernel
int kRows = kernel.size(); //height of kernel
int kCols = kRows; //width of kernel
int imgRows = src.rows;//src.size(); //height of input image
int imgCols = src.cols;//src[0].size(); //width of input image
//vector<vector<unsigned int>> dst = vector<vector<unsigned int>> (imgRows, vector<unsigned int>(imgCols ,0));
cv::Mat dst = cv::Mat::zeros(src.size(), CV_8UC1); //Create destination matrix, and fill with zeros (dst is Grayscale image with byte per pixel).
for (size_t row = 0; row < imgRows; row++) {
for (size_t col = 0; col < imgCols; col++) {
double accumulation = 0;
double weightsum = 0;
for (int i = -1 * kCenterX; i <= 1 * kCenterX; i++) {
for (int j = -1 * kCenterY; j <= 1 * kCenterY; j++) {
int k = 0;
if ((row + i) >= 0 && (row + i) < imgRows && (col + j) >= 0 && (col + j) < imgCols) {
//k = src[row+i][col+j];
k = (int)src.at<uchar>(row + i, col + j); //Read pixel from row [row + i] and column [col + j]
weightsum += kernel[kCenterX + i][kCenterY + j];
}
accumulation += (double)k * kernel[kCenterX + i][kCenterY + j];
}
}
//dst[row][col] = round(accumulation/weightsum);
dst.at<uchar>(row, col) = (uchar)round(accumulation / weightsum); //Write pixel from to row [row] and column [col]
//dst.at<uchar>(row, col) = src.at<uchar>(row, col);
}
}
return dst;
}
int main()
{
vector<vector<double>> kernel;
double total = generateFilter(kernel, 11, 3.0);
//Read input image as Grayscale (one byte per pixel).
cv::Mat I = cv::imread("img.png", cv::IMREAD_GRAYSCALE);
cv::Mat J = convolution(I, kernel, total);
//Display input and output
cv::imshow("I", I);
cv::imshow("J", J);
cv::waitKey(0);
cv::destroyAllWindows();
return 0;
}
Result:
I am using a lookup table to convert raw pixel data between color spaces and coding variants. This is the definition of my LUT:
typedef struct
{
unsigned char data[3];
} rgb;
rgb LUTYUVTORGB[256][256][256];
It is initialized like this:
// loop through all possible values
for (int in_1 = 0; in_1 < 256; in_1++) {
for (int in_2 = 0; in_2 < 256; in_2++) {
for (int in_3 = 0; in_3 < 256; in_3++) {
int out_1, out_2, out_3;
// LUT YUV -> RGB
// convert to rgb (http://softpixel.com/~cwright/programming/colorspace/yuv/)
out_1 = (int)(in_1 + 1.4075 * (in_3 - 128));
out_2 = (int)(in_1 - 0.3455 * (in_2 - 128) - (0.7169 * (in_3 - 128)));
out_3 = (int)(in_1 + 1.7790 * (in_2 - 128));
// clamp values
if (out_1 < 0) { out_1 = 0; } else if (out_1 > 255) { out_1 = 255; }
if (out_2 < 0) { out_2 = 0; } else if (out_2 > 255) { out_2 = 255; }
if (out_3 < 0) { out_3 = 0; } else if (out_3 > 255) { out_3 = 255; }
// set values in LUT
LUTYUVTORGB[in_1][in_2][in_3].data[0] = (unsigned char)out_1;
LUTYUVTORGB[in_1][in_2][in_3].data[1] = (unsigned char)out_2;
LUTYUVTORGB[in_1][in_2][in_3].data[2] = (unsigned char)out_3;
}
}
}
The LUT is then applied to copy the raw pixel data to a QImage():
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
xpos = (y*w + x); // don't calculate 3 times
buff[x * 3 + 0] = psImage->comps[0].data[xpos];
buff[x * 3 + 1] = psImage->comps[1].data[xpos];
buff[x * 3 + 2] = psImage->comps[2].data[xpos];
}
memcpy(image.scanLine(y), buff, bytes_per_line);
}
The values of the LUT are static, and have to be initialized every time the programm starts. Is there any way to initialize it via the preprocessor? Or would it be recommendable to save it in a file?
EDIT: The conversion is used in a time critical video application where every frame has to be processed individually.
Thanks very much in advance!
I have created one dimensional array for this table, it is convenient to save and load such array. I think using this array in runtime wont decrease performance. But I didn't test it for performance differences.
#include <stdio.h>
#include <stdlib.h>
#define LUTSIZE 0x1000000
typedef struct
{
unsigned char data[3];
} rgb;
rgb *LUT;
inline int LUT_index(int in_1, int in_2, int in_3) {
return in_1 * 0x10000 + in_2 * 0x100 + in_3 * 0x1;
}
inline rgb LUT_value(int in_1, int in_2, int in_3) {
return LUT[LUT_index(in_1,in_2,in_3)];
}
void save(rgb *LUT, char* fileName) {
FILE* file = fopen(fileName,"wb");
int index;
for (int in_1 = 0; in_1 < 256; in_1++) {
for (int in_2 = 0; in_2 < 256; in_2++) {
for (int in_3 = 0; in_3 < 256; in_3++) {
int out_1, out_2, out_3;
// LUT YUV -> RGB
// convert to rgb (http://softpixel.com/~cwright/programming/colorspace/yuv/)
out_1 = (int)(in_1 + 1.4075 * (in_3 - 128));
out_2 = (int)(in_1 - 0.3455 * (in_2 - 128) - (0.7169 * (in_3 - 128)));
out_3 = (int)(in_1 + 1.7790 * (in_2 - 128));
// clamp values
if (out_1 < 0) { out_1 = 0; } else if (out_1 > 255) { out_1 = 255; }
if (out_2 < 0) { out_2 = 0; } else if (out_2 > 255) { out_2 = 255; }
if (out_3 < 0) { out_3 = 0; } else if (out_3 > 255) { out_3 = 255; }
index = LUT_index(in_1,in_2,in_3);
// set values in LUT
LUT[index].data[0] = (unsigned char)out_1;
LUT[index].data[1] = (unsigned char)out_2;
LUT[index].data[2] = (unsigned char)out_3;
}
}
}
fwrite((void*)LUT, sizeof(rgb),LUTSIZE,file);
fclose(file);
}
void read(rgb *LUT, char* fileName) {
FILE* file = fopen(fileName, "rb");
fread((void*)LUT,sizeof(rgb),LUTSIZE,file);
fclose(file);
}
int main(int argc, char *argv[])
{
LUT = (rgb*)malloc(LUTSIZE * sizeof(rgb));
save(LUT, "LUT_data");
rgb testValue = LUT_value(5,3,7);
printf("%d %d %d\n", testValue.data[0], testValue.data[1], testValue.data[2]);
read(LUT, "LUT_data");
testValue = LUT_value(5,3,7);
printf("%d %d %d\n", testValue.data[0], testValue.data[1], testValue.data[2]);
free(LUT);
}
By the OpenCV library, I want to threshold an image like this:
threshold(image, thresh, 220, 255, THRESH_BINARY_INV)
But I want to automatically find the threshold value (220).
I use Otsu to estimate the threshold. But it doesn't work in my case.
therefore, I should use Histogram Peak Technique. I want to find the two peaks in the histogram corresponding to the background and object of the image. It sets the threshold value automatically halfway between the two peaks.
I use this book (pages: 117 and 496-505): "Image Processing in C" by Dwayne Phillips (http://homepages.inf.ed.ac.uk/rbf/BOOKS/PHILLIPS/). And I use source code for find the two peaks in the histogram corresponding to the background and object of the image. this is my image:
this is my c++ code:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <stdio.h>
#include <fstream>
using namespace std;
using namespace cv;
int main()
{
Mat image0 = imread("C:/Users/Alireza/Desktop/contrast950318/2.bmp");
imshow("image0", image0);
Mat image, thresh, Tafrigh;
cvtColor(image0, image, CV_RGB2GRAY);
int N = image.rows*image.cols;
int histogram[256];
for (int i = 0; i < 256; i++) {
histogram[i] = 0;
}
//create histo
for (int i = 0; i < image.rows; i++){
for (int j = 0; j < image.cols; j++){
histogram[((int)image.at<uchar>(i, j))]++;
}
}
int peak1, peak2;
#define PEAKS 30
int distance[PEAKS], peaks[PEAKS][2];
int i, j = 0, max = 0, max_place = 0;
for (int i = 0; i<PEAKS; i++){
distance[i] = 0;
peaks[i][0] = -1;
peaks[i][1] = -1;
}
for (i = 0; i <= 255; i++){
max = histogram[i];
max_place = i;
//insert_into_peaks(peaks, max, max_place);
//int max, max_place, peaks[PEAKS][2];
//int i, j;
/* first case */
if (max > peaks[0][0]){
for (i = PEAKS - 1; i > 0; i--){
peaks[i][0] = peaks[i - 1][0];
peaks[i][1] = peaks[i - 1][1];
}
peaks[0][0] = max;
peaks[0][1] = max_place;
} /* ends if */
/* middle cases */
for (j = 0; j < PEAKS - 3; j++){
if (max < peaks[j][0] && max > peaks[j + 1][0]){
for (i = PEAKS - 1; i > j + 1; i--){
peaks[i][0] = peaks[i - 1][0];
peaks[i][1] = peaks[i - 1][1];
}
peaks[j + 1][0] = max;
peaks[j + 1][1] = max_place;
} /* ends if */
} /* ends loop over j */
/* last case */
if (max < peaks[PEAKS - 2][0] &&
max > peaks[PEAKS - 1][0]){
peaks[PEAKS - 1][0] = max;
peaks[PEAKS - 1][1] = max_place;
} /* ends if */
}/* ends loop over i */
for (int i = 1; i<PEAKS; i++){
distance[i] = peaks[0][1] - peaks[i][1];
if (distance[i] < 0)
distance[i] = distance[i] * (-1);
}
peak1 = peaks[0][1];
cout << " peak1= " << peak1;
for (int i = PEAKS - 1; i > 0; i--){
if (distance[i] > 1)
peak2 = peaks[i][1];
}
cout << " peak2= " << peak2;
int mid_point;
//int peak1, peak2;
short hi, low;
unsigned long sum1 = 0, sum2 = 0;
if (peak1 > peak2)
mid_point = ((peak1 - peak2) / 2) + peak2;
if (peak1 < peak2)
mid_point = ((peak2 - peak1) / 2) + peak1;
for (int i = 0; i<mid_point; i++)
sum1 = sum1 + histogram[i];
for (int i = mid_point; i <= 255; i++)
sum2 = sum2 + histogram[i];
if (sum1 >= sum2){
low = mid_point;
hi = 255;
}
else{
low = 0;
hi = mid_point;
}
cout << " low= " << low << " hi= " << hi;
double threshnum = 0.5* (low + hi);
threshold(image, thresh, threshnum, hi, THRESH_BINARY_INV);
waitKey(0);
return 0;
}
But I don't know this code correct is or not. If it correct, why is threshold value 202?
What ideas on how to solve this task would you suggest? Or on what resource on the internet can I find help?
You can use also the Max Entropy. In some cases using only the high frequency of the entropy could be better
int maxentropie(const cv::Mat1b& src)
{
// Histogram
cv::Mat1d hist(1, 256, 0.0);
for (int r=0; r<src.rows; ++r)
for (int c=0; c<src.cols; ++c)
hist(src(r,c))++;
// Normalize
hist /= double(src.rows * src.cols);
// Cumulative histogram
cv::Mat1d cumhist(1, 256, 0.0);
float sum = 0;
for (int i = 0; i < 256; ++i)
{
sum += hist(i);
cumhist(i) = sum;
}
cv::Mat1d hl(1, 256, 0.0);
cv::Mat1d hh(1, 256, 0.0);
for (int t = 0; t < 256; ++t)
{
// low range entropy
double cl = cumhist(t);
if (cl > 0)
{
for (int i = 0; i <= t; ++i)
{
if (hist(i) > 0)
{
hl(t) = hl(t) - (hist(i) / cl) * log(hist(i) / cl);
}
}
}
// high range entropy
double ch = 1.0 - cl; // constraint cl + ch = 1
if (ch > 0)
{
for (int i = t+1; i < 256; ++i)
{
if (hist(i) > 0)
{
hh(t) = hh(t) - (hist(i) / ch) * log(hist(i) / ch);
}
}
}
}
// choose best threshold
cv::Mat1d entropie(1, 256, 0.0);
double h_max = hl(0) + hh(0);
int threshold = 0;
entropie(0) = h_max;
for (int t = 1; t < 256; ++t)
{
entropie(t) = hl(t) + hh(t);
if (entropie(t) > h_max)
{
h_max = entropie(t);
threshold = uchar(t);
}
}
if(threshold==0) threshold=255;
return threshold;
}
I want to implement the color transfer algorithm in this paper and I refer this tutorial to transfer the algorithm in OpenCV C++.
But I got some strange result, for example:
This is the source image and this is the target, but the combined result look like this.
Some part of the result look strange.
This is my source code
Mat src; Mat tar; Mat result;
class imageInfo{
public:
double lMean, lStd, aMean, aStd, bMean, bStd;
};
/// Function header
void image_stats(Mat img,imageInfo *info);
/** #function main */
int main(int argc, char** argv)
{
vector<Mat> mv;
imageInfo srcInfo, tarInfo;
src = imread("images/autumn.jpg");
tar = imread("images/fallingwater.jpg");
imshow("src", src);
imshow("tar", tar);
cvtColor(src, src, CV_BGR2Lab);
cvtColor(tar, tar, CV_BGR2Lab);
image_stats(src, &srcInfo);
image_stats(tar, &tarInfo);
split(tar, mv);
Mat l = mv[0];
Mat a = mv[1];
Mat b = mv[2];
/*pixel color modify*/
for (int i = 0; i<l.rows; i++){
for (int j = 0; j<l.cols; j++){
double li = l.data[l.step[0] * i + l.step[1] * j];
if (i == 426 && j == 467)
cout << "i:" << i << "j:" << j << " " << li << endl;
li -= tarInfo.lMean;
li = (tarInfo.lStd / srcInfo.lStd)*li;
li += srcInfo.lMean;
li = (int)li % 256;
l.data[l.step[0] * i + l.step[1] * j] = li;
}
}
for (int i = 0; i<a.rows; i++){
for (int j = 0; j<a.cols; j++){
double ai = a.data[a.step[0] * i + a.step[1] * j];
ai -= tarInfo.aMean;
ai = (tarInfo.aStd / srcInfo.aStd)*ai;
ai += srcInfo.aMean;
ai = (int)ai % 256;
a.data[a.step[0] * i + a.step[1] * j] = ai;
}
}
for (int i = 0; i<b.rows; i++){
for (int j = 0; j<b.cols; j++){
double bi = b.data[b.step[0] * i + b.step[1] * j];
bi -= tarInfo.bMean;
bi = (tarInfo.bStd / srcInfo.bStd)*bi;
bi += srcInfo.bMean;
bi = (int)bi % 256;
b.data[b.step[0] * i + b.step[1] * j] = bi;
}
}
mv.clear();
mv.push_back(l);
mv.push_back(a);
mv.push_back(b);
merge(mv, result);
cvtColor(result, result, CV_Lab2BGR);
imshow("result", result);
imwrite("result.png", result);
waitKey(0);
return(0);
}
image_stats function:
void image_stats(Mat img, imageInfo *info){
int Max=0;
vector<Mat> mv;
vector<int> vl, va, vb;
split(img, mv);
Mat l = mv[0];
Mat a = mv[1];
Mat b = mv[2];
/*statistics L space*/
for (int i = 0; i<l.rows; i++){
for (int j = 0; j<l.cols; j++){
int li = l.data[l.step[0] * i + l.step[1] * j];
vl.push_back(li);
}
}
double sum_l = std::accumulate(vl.begin(), vl.end(), 0.0);
double mean_l = sum_l / vl.size();
std::vector<double> diff_l(vl.size());
std::transform(vl.begin(), vl.end(), diff_l.begin(),
std::bind2nd(std::minus<double>(), mean_l));
double sq_sum_l = std::inner_product(diff_l.begin(), diff_l.end(), diff_l.begin(), 0.0);
double stdev_l = std::sqrt(sq_sum_l / vl.size());
info->lMean = mean_l;
info->lStd = stdev_l;
/*statistics A space*/
for (int i = 0; i<a.rows; i++){
for (int j = 0; j<a.cols; j++){
int ai = a.data[a.step[0] * i + a.step[1] * j];
va.push_back(ai);
}
}
double sum_a = std::accumulate(va.begin(), va.end(), 0.0);
double mean_a = sum_a / va.size();
std::vector<double> diff_a(va.size());
std::transform(va.begin(), va.end(), diff_a.begin(),
std::bind2nd(std::minus<double>(), mean_a));
double sq_sum_a = std::inner_product(diff_a.begin(), diff_a.end(), diff_a.begin(), 0.0);
double stdev_a = std::sqrt(sq_sum_a / va.size());
info->aMean = mean_a;
info->aStd = stdev_a;
/*statistics B space*/
for (int i = 0; i<b.rows; i++){
for (int j = 0; j<b.cols; j++){
int bi = b.data[b.step[0] * i + b.step[1] * j];
vb.push_back(bi);
}
}
double sum_b = std::accumulate(vb.begin(), vb.end(), 0.0);
double mean_b = sum_b / vb.size();
std::vector<double> diff_b(vb.size());
std::transform(vb.begin(), vb.end(), diff_b.begin(),
std::bind2nd(std::minus<double>(), mean_b));
double sq_sum_b = std::inner_product(diff_b.begin(), diff_b.end(), diff_b.begin(), 0.0);
double stdev_b = std::sqrt(sq_sum_b / vb.size());
info->bMean = mean_b;
info->bStd = stdev_b;
}
In your main function you should probably bound your result values by upper & lower limits [0; 255], not take a modulo. If li = 256; then code li = (int)li % 256; will make it zero.