Problem in converting the "for loop" in CUDA - c++

I have tried to extract patches from an image parallelly with pixel shift/overlapping. I have written the CPU version of the code. But I could not able to convert the for loop which has an increment of pixel shift. I have given the part of the code where for loop is being used. CreatePatchDataSet function has the "for loop " which has an increment of pixel shift. Please help me out to convert this function into Cuda. I have provided the following code.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <fstream>
#include <sstream>
#include <random>
#include <vector>
#include <omp.h>
using namespace std;
using namespace cv;
#define PATCH_SIZE (5)
#define PIXEL_SHIFT (2)
void ConvertMat2DoubleArray(cv::Mat input, double* output)
{
for (int i = 0; i < input.rows; i++)
{
double *src = input.ptr<double>(i);
for (int j = 0; j < input.cols; j++)
{
output[input.cols * input.channels() * i + input.channels() * j + 0] = src[j];
}
}
}
void GetNumOfPatch(const int width, const int height, const int patch_size, const int pixel_shift, int* num_of_patch, int* num_of_patch_col, int* num_of_patch_row) {
*num_of_patch_col = 0;
int len_nb = 0;
while (len_nb < width) {
if (len_nb != 0) {
len_nb += patch_size - (patch_size - pixel_shift);
}
else {
len_nb += patch_size;
}
(*num_of_patch_col)++;
}
len_nb = 0;
*num_of_patch_row = 0;
while (len_nb < height) {
if (len_nb != 0) {
len_nb += patch_size - (patch_size - pixel_shift);
}
else {
len_nb += patch_size;
}
(*num_of_patch_row)++;
}
*num_of_patch = (*num_of_patch_col) * (*num_of_patch_row);
}
void CreatePatchDataSet(double *original_data, double* patch_data, const int width, const int height, const int pixel_shift, const int patch_size, const int num_of_patch_col, const int num_of_patch_row) {
int counter_row = 0;
int num_of_patch_image = num_of_patch_row * num_of_patch_col;
for (int i = 0; i < height; i += pixel_shift) {
int counter_col = 0;
for (int j = 0; j < width; j += pixel_shift) {
//Get Low Resolution Image
for (int ii = 0; ii < patch_size; ii++) {
for (int jj = 0; jj < patch_size; jj++) {
if ((i + ii) < height && (j + jj) < width) {
patch_data[num_of_patch_image * (patch_size * ii + jj) + num_of_patch_col*counter_row + counter_col] = original_data[width*(i + ii) + (j + jj)];
}
else {
patch_data[num_of_patch_image * (patch_size * ii + jj) + num_of_patch_col*counter_row + counter_col] = 0.;
}
}
}
counter_col++;
if (counter_col == num_of_patch_col) {
break;
}
}
counter_row++;
if (counter_row == num_of_patch_row) {
break;
}
}
}
int main()
{
int ratio=2;
cv::Mat image = cv::imread("input_b2_128.tif", CV_LOAD_IMAGE_UNCHANGED);
cv::Mat imageH = cv::Mat(image.rows * ratio, image.cols * ratio, CV_8UC1);
cv::resize(image, imageH, cv::Size(imageH.cols, imageH.rows), 0, 0,
cv::INTER_LANCZOS4);
double* orgimageH = (double*)calloc(imageH.cols*imageH.rows*image.channels(), sizeof(double));
ConvertMat2DoubleArray(imageH, orgimageH);
int widthH = imageH.cols;
int heightH = imageH.rows;
int dimH = (int)PATCH_SIZE * (int)PATCH_SIZE* (int)image.channels();
int dimL = (int)PATCH_SIZE/ratio* (int)PATCH_SIZE/ratio * (int)image.channels();
//3. Create training data set=========================
int num_of_patch_image = 0;
int num_of_patch_col = 0;
int num_of_patch_row = 0;
GetNumOfPatch(widthH, heightH, (int)PATCH_SIZE, (int)PIXEL_SHIFT, &num_of_patch_image, &num_of_patch_col, &num_of_patch_row);
cout<<"patch numbers: \n " << num_of_patch_image << endl;
double* FY = (double*)calloc(dimH * num_of_patch_image, sizeof(double));
CreatePatchDataSet(orgimageH, FY, widthH, heightH, (int)PIXEL_SHIFT, (int)PATCH_SIZE, num_of_patch_col, num_of_patch_row);
free(orgimageH);
free(FY);
return 0;
}
The results I got for first 10 values in CPU version:
patch numbers:
16129
238,240,240,235,237,230,227,229,228,227
I have tried to convert this function to Kernel function using cuda:. But it goes into the infinite loop. As I am very new to this CUDA field, could you please help me to find out the problem in the code ?
__global__ void CreatePatchDataSet(double *original_data, double* patch_data, const int width, const int height, const int pixel_shift, const int patch_size, const int num_of_patch_col, const int num_of_patch_row) {
int num_of_patch_image = num_of_patch_row * num_of_patch_col;
int i = threadIdx.x + (blockDim.x*blockIdx.x);
int j = threadIdx.y + (blockDim.y*blockIdx.y);
while (i<height && j< width)
{
int counter_row = 0;
int counter_col = 0;
//Get Low Resolution Image
for (int ii = 0; ii < patch_size; ii++) {
for (int jj = 0; jj < patch_size; jj++) {
if ((i + ii) < height && (j + jj) < width) {
patch_data[num_of_patch_image * (patch_size * ii + jj) + num_of_patch_col*counter_row + counter_col] = original_data[width*(i + ii) + (j + jj)];
}
else {
patch_data[num_of_patch_image * (patch_size * ii + jj) + num_of_patch_col*counter_row + counter_col] = 0.;
}
}
}
counter_col++;
if (counter_col == num_of_patch_col) {
break;
}
counter_row++;
if (counter_row == num_of_patch_row) {
break;
}
}
i+= blockDim.x*gridDim.x;
j+= blockDim.y*gridDim.y;
}
int main()
{
int ratio=2;
cv::Mat image = cv::imread("input_b2_128.tif", CV_LOAD_IMAGE_UNCHANGED);
cv::Mat imageH = cv::Mat(image.rows * ratio, image.cols * ratio, CV_8UC1);
cv::resize(image, imageH, cv::Size(imageH.cols, imageH.rows), 0, 0, cv::INTER_LANCZOS4);
double *orgimageH = (double*)calloc(imageH.cols*imageH.rows*image.channels(), sizeof(double));
ConvertMat2DoubleArray(imageH, orgimageH);
int widthH = imageH.cols;
int heightH = imageH.rows;
//
int dimH = (int)PATCH_SIZE * (int)PATCH_SIZE* (int)image.channels();
int dimL = (int)PATCH_SIZE/ratio* (int)PATCH_SIZE/ratio * (int)image.channels();
//3. Create training data set=========================
int num_of_patch_image = 0;
int num_of_patch_col = 0;
int num_of_patch_row = 0;
GetNumOfPatch(widthH, heightH, (int)PATCH_SIZE, (int)PIXEL_SHIFT, &num_of_patch_image, &num_of_patch_col, &num_of_patch_row);
cout<<"patch numbers: \n " << num_of_patch_image << endl;
double* FY = (double*)calloc(dimH * num_of_patch_image, sizeof(double));
double *d_orgimageH;
gpuErrchk(cudaMalloc ((void**)&d_orgimageH, sizeof(double)*widthH*heightH));
double *d_FY;
gpuErrchk(cudaMalloc ((void**)&d_FY, sizeof(double)* dimH * num_of_patch_image));
gpuErrchk(cudaMemcpy(d_orgimageH , orgimageH , sizeof(double)*widthH*heightH, cudaMemcpyHostToDevice));
dim3 dimBlock(16, 16);
dim3 dimGrid;
dimGrid.x = (widthH + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (heightH + dimBlock.y - 1) / dimBlock.y;
CreatePatchDataSet<<<dimGrid,dimBlock>>>(d_orgimageH, d_FY, widthH, heightH, (int)PIXEL_SHIFT, (int)PATCH_SIZE, num_of_patch_col, num_of_patch_row);
gpuErrchk(cudaMemcpy(FY,d_FY, sizeof(double)*dimH * num_of_patch_image, cudaMemcpyDeviceToHost));
// cout<<"Hello world";
free(orgimageH);
free(FY);
cudaFree(d_FY);
cudaFree(d_orgimageH);
return 0;
}
Image I have used: [1]: https://i.stack.imgur.com/Ywg7p.png

i+= blockDim.x*gridDim.x;
j+= blockDim.y*gridDim.y;
is outside the while loop in your kernel. As i and j never change inside the while loop, it isn't stopping. There could be more problems here, but this is the most prominent one.
EDIT: Another one that I found, is that you have only one while over both i and j instead of one for each. You should probably use for loops like in your CPU code:
for (i = pixel_shift * (threadIdx.x + (blockDim.x*blockIdx.x));
i < height;
i += pixel_shift * blockDim.x * gridDim.x) {
for (j = ...; j < ...; j += ...) {
/* ... */
}
}
EDIT 2:
I could imagine this to be a good idea:
for (counter_row = threadIdx.y + blockDim.y * blockIdx.y;
counter_row < num_of_patch_row;
counter_row += blockDim.y * gridDim.y) {
i = counter_row * pixel_shift;
if (i > height)
break;
for (counter_col = threadIdx.x + blockDim.x * blockIdx.x;
counter_col < num_of_patch_col;
counter_col += blockDim.x * gridDim.x) {
j = counter_col * pixel_shift;
if (j > width)
break;
/* ... */
}
}
I have also exchanged the x/y fields of the execution parameters between the inner and the outer loop, as it seemed more appropriate considering that the x field is continuous in warps (memory access benefits).

Related

gaussian smoothing output misaligned

I am trying to perform gaussian smoothing on this image without using any opencv function (except displaying the image).
However, the output I got after convoluting the image with the gaussian kernel is as follow:
The output image seems to have misaligned and looks very weird. Any idea what is happening?
Generate gaussian kernel:
double gaussian(int x, int y,double sigma){
return (1/(2*M_PI*pow(sigma,2)))*exp(-1*(pow(x,2)+pow(y,2))/(2*pow(sigma,2)));
}
double generateFilter(vector<vector<double>> & kernel,int width,double sigma){
int value = 0;
double total =0;
if(width%2 == 1){
value = (width-1)/2;
}else{
value = width/2;
}
double smallest = gaussian(-1*value,-1*value,sigma);
for(int i = -1*value; i<=value; i++){
vector<double> temp;
for(int k = -1*value; k<=value; k++){
int gVal = round(gaussian(i,k,sigma)/smallest);
temp.push_back(gVal);
total += gVal;
}
kernel.push_back(temp);
}
cout<<total<<endl;
return total;
}
Convolution:
vector<vector<unsigned int>> convolution(vector<vector<unsigned int>> src, vector<vector<double>> kernel,double total){
int kCenterX = floor(kernel.size() / 2); //center of kernel
int kCenterY = kCenterX; //center of kernel
int kRows = kernel.size(); //height of kernel
int kCols = kRows; //width of kernel
int imgRows = src.size(); //height of input image
int imgCols = src[0].size(); //width of input image
vector<vector<unsigned int>> dst = vector<vector<unsigned int>> (imgRows, vector<unsigned int>(imgCols ,0));
for ( size_t row = 0; row < imgRows; row++ ) {
for ( size_t col = 0; col < imgCols; col++ ) {
float accumulation = 0;
float weightsum = 0;
for ( int i = -1*kCenterX; i <= 1*kCenterX; i++ ) {
for ( int j = -1*kCenterY; j <= 1*kCenterY; j++ ) {
int k = 0;
if((row+i)>=0 && (row+i)<imgRows && (col+j)>=0 && (col+j)<imgCols){
k = src[row+i][col+j];
weightsum += kernel[kCenterX+i][kCenterY+j];
}
accumulation += k * kernel[kCenterX +i][kCenterY+j];
}
}
dst[row][col] = round(accumulation/weightsum);
}
}
return dst;
}
Thank you.
The convolution function is basically correct, so the issue is with the input and output format.
Make sure you are reading the image as Grayscale (and not RGB):
cv::Mat I = cv::imread("img.png", cv::IMREAD_GRAYSCALE);
You are passing vector<vector<unsigned int>> argument to convolution.
I can't say if it's part of the problem or not, but it's recommended to pass argument of type cv::Mat (and return cv::Mat):
cv::Mat convolution(cv::Mat src, vector<vector<double>> kernel, double total)
I assume you can convert the input to and from vector<vector<unsigned int>>, but it's not necessary.
Here is a working code sample:
#include <vector>
#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/highgui.hpp"
using namespace std;
double gaussian(int x, int y, double sigma) {
return (1 / (2 * 3.141592653589793*pow(sigma, 2)))*exp(-1 * (pow(x, 2) + pow(y, 2)) / (2 * pow(sigma, 2)));
}
double generateFilter(vector<vector<double>> & kernel, int width, double sigma)
{
int value = 0;
double total = 0;
if (width % 2 == 1) {
value = (width - 1) / 2;
}
else {
value = width / 2;
}
double smallest = gaussian(-1 * value, -1 * value, sigma);
for (int i = -1 * value; i <= value; i++) {
vector<double> temp;
for (int k = -1 * value; k <= value; k++) {
int gVal = round(gaussian(i, k, sigma) / smallest);
temp.push_back(gVal);
total += gVal;
}
kernel.push_back(temp);
}
cout << total << endl;
return total;
}
//vector<vector<unsigned int>> convolution(vector<vector<unsigned int>> src, vector<vector<double>> kernel, double total) {
cv::Mat convolution(cv::Mat src, vector<vector<double>> kernel, double total) {
int kCenterX = floor(kernel.size() / 2); //center of kernel
int kCenterY = kCenterX; //center of kernel
int kRows = kernel.size(); //height of kernel
int kCols = kRows; //width of kernel
int imgRows = src.rows;//src.size(); //height of input image
int imgCols = src.cols;//src[0].size(); //width of input image
//vector<vector<unsigned int>> dst = vector<vector<unsigned int>> (imgRows, vector<unsigned int>(imgCols ,0));
cv::Mat dst = cv::Mat::zeros(src.size(), CV_8UC1); //Create destination matrix, and fill with zeros (dst is Grayscale image with byte per pixel).
for (size_t row = 0; row < imgRows; row++) {
for (size_t col = 0; col < imgCols; col++) {
double accumulation = 0;
double weightsum = 0;
for (int i = -1 * kCenterX; i <= 1 * kCenterX; i++) {
for (int j = -1 * kCenterY; j <= 1 * kCenterY; j++) {
int k = 0;
if ((row + i) >= 0 && (row + i) < imgRows && (col + j) >= 0 && (col + j) < imgCols) {
//k = src[row+i][col+j];
k = (int)src.at<uchar>(row + i, col + j); //Read pixel from row [row + i] and column [col + j]
weightsum += kernel[kCenterX + i][kCenterY + j];
}
accumulation += (double)k * kernel[kCenterX + i][kCenterY + j];
}
}
//dst[row][col] = round(accumulation/weightsum);
dst.at<uchar>(row, col) = (uchar)round(accumulation / weightsum); //Write pixel from to row [row] and column [col]
//dst.at<uchar>(row, col) = src.at<uchar>(row, col);
}
}
return dst;
}
int main()
{
vector<vector<double>> kernel;
double total = generateFilter(kernel, 11, 3.0);
//Read input image as Grayscale (one byte per pixel).
cv::Mat I = cv::imread("img.png", cv::IMREAD_GRAYSCALE);
cv::Mat J = convolution(I, kernel, total);
//Display input and output
cv::imshow("I", I);
cv::imshow("J", J);
cv::waitKey(0);
cv::destroyAllWindows();
return 0;
}
Result:

Gaussian filter reads same value multiple time usin BMP image

I need to translate GaussianFilter that uses openCV to code that uses BMP image ( so i first read image, and translate it to greyscale). My function using openCV looks like ( basic GaussianFilter ) :
Mat CreateGaussFilter(int kernalHeight, int kernalWidth, double kernalArray[5][5]){
Mat image = imread("konik.jpg");
Mat grayScaleImage(image.size(),CV_8UC1);
Mat filter(image.size(),CV_8UC1);
cvtColor(image,grayScaleImage,CV_RGB2GRAY);
int rows=image.rows;
int cols=image.cols;
int verticleImageBound=(kernalHeight-1)/2;
int horizontalImageBound=(kernalWidth-1)/2;
for(int row=0+verticleImageBound;row<rows-verticleImageBound;row++){
for(int col=0+horizontalImageBound;col<cols-horizontalImageBound;col++){
float value=0.0;
for(int kRow=0;kRow<kernalHeight;kRow++){
for(int kCol=0;kCol<kernalWidth;kCol++){
float pixel=grayScaleImage.at<uchar>(kRow+row-verticleImageBound,kCol+col-horizontalImageBound)*kernalArray[kRow][kCol];
value+=pixel;
}
}
filter.at<uchar>(row,col)=cvRound(value);
}
}
return filter;
}
Now for BMP image:
i have loaded it using:
struct Info{
int width;
int height;
int offset;
unsigned char * info;
unsigned char * data;
int size;
};
Info readBMP(char* filename)
{
int i;
std::ifstream is(filename, std::ifstream::binary);
is.seekg(0, is.end);
i = is.tellg();
is.seekg(0);
unsigned char *info = new unsigned char[i];
is.read((char *)info,i);
int width = *(int*)&info[18];
int height = *(int*)&info[22];
int offset = *(int*)&info[10];
unsigned char a[offset];
unsigned char *b = new unsigned char[i - offset];
std::copy(info,
info + offset,
a);
std::copy(info + offset,
info + i,
b + 0);
Info dat;
dat.width = width;
dat.height = height;
dat.offset = offset;
dat.size = i;
dat.info = new unsigned char[offset - 1];
dat.data = new unsigned char[i - offset + 1];
for( int j = 0; j < offset ; j++ ){
dat.info[j] = a[j];
}
for( int j = 0; j < i - offset; j++ ){
dat.data[j] = b[j];
}
return dat;
}
turned it into grayscale usin:
void greyScale( unsigned char * src , int rows, int cols){
for( int i = 0; i < rows; i++){
for( int j = 0; j < cols; j++){
unsigned char r = src[3 * (i * cols + j)];
unsigned char g = src[3 * (i * cols + j) + 1];
unsigned char b = src[3 * (i * cols + j) + 2];
char linearIntensity = (char)(0.2126f * r + 0.7512f * g + 0);
src[3 * (i * cols + j)] = linearIntensity;
src[3 * (i * cols + j) + 1] = linearIntensity;
src[3 * (i * cols + j) + 2] = linearIntensity;
}
}
}
And now i am trying to use GaussianFilter ( translated from my OpenCV function )
void FilterCreation(double GKernel[][5]) {
// intialising standard deviation to 1.0
double sigma = 1.0;
double r, s = 2.0 * sigma * sigma;
// sum is for normalization
double sum = 0.0;
// generating 5x5 kernel
for (int x = -2; x <= 2; x++) {
for (int y = -2; y <= 2; y++) {
r = sqrt(x * x + y * y);
GKernel[x + 2][y + 2] = (exp(-(r * r) / s)) / (M_PI * s);
sum += GKernel[x + 2][y + 2];
}
}
// normalising the Kernel
for (int i = 0; i < 5; ++i)
for (int j = 0; j < 5; ++j)
GKernel[i][j] /= sum;
}
unsigned char ** CreateGaussFilter(unsigned char ** src,int kernalHeight, int kernalWidth, double kernalArray[5][5], int rows, int cols){
int verticleImageBound=(kernalHeight-1)/2;
int horizontalImageBound=(kernalWidth-1)/2;
unsigned char ** dst = new unsigned char *[rows];
for( int i = 0; i < rows; i++){
dst[i] = new unsigned char [cols];
}
for(int row=0+verticleImageBound;row<rows-verticleImageBound;row++){
for(int col=0+horizontalImageBound;col<cols-horizontalImageBound;col++){
float value=0;
for(int kRow=0;kRow<kernalHeight;kRow++){
for(int kCol=0;kCol<kernalWidth;kCol++){
float pixel =src[kRow+row-verticleImageBound][kCol+col-horizontalImageBound]*kernalArray[kRow][kCol];
value+=pixel;
}
}
dst[row][col] = round(value);
}
}
return dst;
}
Since grayscale values are same for every channel, istead of doing calculation like in grayscale function, i turned the data into 2d array and then back into 1d array using:
unsigned char ** return2darray(unsigned char *src, int width, int height, int size){
unsigned char **array = new unsigned char *[width];
for( int i = 0; i < width; i++ ){
array[i] = new unsigned char[height];
}
for( int i = 0; i < width; i++ ){
for( int j = 0; j < height; j++ ){
array[i][j] = src[3 * (i * height + j)];
}
}
return array;
}
unsigned char * return1darray(unsigned char **src, int width, int height, int size){
unsigned char *array = new unsigned char[size];
for( int i = 0; i < width; i++ ){
for( int j = 0; j < height; j++ ){
array[3 * (i * height + j)] = src[i][j];
array[3 * (i * height + j) + 1] = src[i][j];
array[3 * (i * height + j) + 2] = src[i][j];
}
}
return array;
}
And using it like:
int main() {
// load img
Info dat = readBMP("input.bmp");
// turn in into greyscale
greyScale(dat.data,dat.width,dat.height);
// turn 1d array into 2d
unsigned char** arr = return2darray(dat.data,dat.width,dat.height,dat.size);
double GKernel[5][5];
// geneate gausian filter
FilterCreation(GKernel);
// apply gausianFilter
unsigned char** filter = CreateGaussFilter(arr,5,5,GKernel,dat.width,dat.height,dat.size);
// convert it back into 1d array
unsigned char* ar = return1darray(filter,dat.width,dat.height,dat.size);
ofstream fout;
fout.open("out.bmp", ios::binary | ios::out);
fout.write( reinterpret_cast<char *>(dat.info), dat.offset);
fout.write( reinterpret_cast<char *>(ar), dat.size - dat.offset );
fout.close();
return 0;
}
But for some reason, that I cannot realize for input :
the output looks like this.
It seems like it reads the same values in periodes, but that would mean the original image would have the same periods because it just reads bytes from loaded image. The GreyScale function works as it should. I am not very proficient in manipulation with images ( i was using openCV all the time ) What could cause these periods? Thanks for the help!

Implementing Sobel Operator

I am trying to implement a sobel operator in both horizontal and vertical direction. But somehow I am getting the reverse output. The code I have attached below. For the horizontal mask
char mask [3][3]= {{-1,-2,-1},{0,0,0},{1,2,1}};
void masking(Mat image){
Mat temImage= image.clone();
for (int i = 1; i < image.rows-1; i++)
{
for (int j = 1; j < image.cols-1; j++)
{
for(int k=0;k<3;k++)
{
int pixel1 = image.at<Vec3b>(i-1,j-1)[k] * -1;
int pixel2 = image.at<Vec3b>(i,j-1)[k] * -2;
int pixel3 = image.at<Vec3b>(i+1,j-1)[k] * -1;
int pixel4 = image.at<Vec3b>(i-1,j)[k] * 0;
int pixel5 = image.at<Vec3b>(i,j)[k] * 0;
int pixel6 = image.at<Vec3b>(i+1,j)[k] * 0;
int pixel7 = image.at<Vec3b>(i-1,j+1)[k] * 1;
int pixel8 = image.at<Vec3b>(i,j+1)[k] * 2;
int pixel9 = image.at<Vec3b>(i+1,j+1)[k] * 1;
int sum = pixel1 + pixel2 + pixel3 + pixel4 + pixel5 + pixel6 + pixel7 + pixel8 + pixel9;
if(sum < 0)
{
sum = 0;
}
if(sum > 255)
sum = 255;
temImage.at<Vec3b>(i,j)[k] = sum;
}
}
}
//printf("conter = %d",counter);
imshow( "Display", temImage );
imwrite("output1.png",temImage);
}
I am getting the output as
where as for the vertical mask
char mask [3][3]= {{-1,0,1},{-2,0,2},{-1,0,1}};
void masking(Mat image){
Mat temImage= image.clone();
for (int i = 1; i < image.rows-1; i++)
{
for (int j = 1; j < image.cols-1; j++)
{
for(int k=0;k<3;k++)
{
int pixel1 = image.at<Vec3b>(i-1,j-1)[k] * -1;
int pixel2 = image.at<Vec3b>(i,j-1)[k] * 0;
int pixel3 = image.at<Vec3b>(i+1,j-1)[k] * 1;
int pixel4 = image.at<Vec3b>(i-1,j)[k] * -2;
int pixel5 = image.at<Vec3b>(i,j)[k] * 0;
int pixel6 = image.at<Vec3b>(i+1,j)[k] * 2;
int pixel7 = image.at<Vec3b>(i-1,j+1)[k] * -1;
int pixel8 = image.at<Vec3b>(i,j+1)[k] * 0;
int pixel9 = image.at<Vec3b>(i+1,j+1)[k] * 1;
int sum = pixel1 + pixel2 + pixel3 + pixel4 + pixel5 + pixel6 + pixel7 + pixel8 + pixel9;
if(sum < 0)
{
sum = 0;
}
if(sum > 255)
sum = 255;
temImage.at<Vec3b>(i,j)[k] = sum;
}
}
}
//printf("conter = %d",counter);
imshow( "Display", temImage );
imwrite("output1.png",temImage);
}
I am getting output as
The main function is attached below
int main( int argc, char** argv ){
Mat input_image = imread("sobel1.jpg",1);
masking(input_image);
waitKey(0);
return 0;
}
According the the guide https://www.tutorialspoint.com/dip/sobel_operator.htm I should get reverse output. Can anyone help me in this
The original image is
No, the tutorial is not wrong, it talks about masks and not gradients. The weak point of that tutorial is that it doesn't mention we are calculating horizontal gradients using what they call the vertical mask.

OpenCV color image to gray trouble

I'm new to OpenCV and I'm trying to proccess the image from the directory, make it black and white (grayscale) and then write it down to another file. But the output image is quite different from what I expected. Maybe you can help me and indicate the errors in code?
#include <iostream>
#include <opencv2/opencv.hpp>
#include <conio.h>
#include <string.h>
#include <string>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
void faktorial(int InSize, char *DataIn, char *DataOut)// заголовок функции
{
for(int i = 0, j = 0; i < InSize; i += 4, j++)
{
DataOut[j] = (DataIn[i] + DataIn[i + 1] + DataIn[i + 2]) / 3;
}
}
int main()
{
char* c = "E:\henrik-evensen-castle-valley-v03.jpg";
printf("Input source of image\n Example of right directory file: E:\henrik-evensen-castle-valley-v03.jpg\n Your try:\n");
char *tbLEN;
tbLEN = new char [1024];
cin.getline(tbLEN,1024);
cout << tbLEN;
IplImage* image;
image = cvLoadImage(tbLEN, 1);
int height1 = image->height;
int width1 = image->width;
int step = image->widthStep;
int SizeIn = step*height1;
char* DatIn = image->imageData;
IplImage *image2 = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1);
char* DatOut = image2->imageData;
faktorial(SizeIn, DatIn, DatOut);
cvNamedWindow("Imagecolor");
cvShowImage("Imagecolor", image);
cvNamedWindow("Gray");
cvShowImage("Gray", image2);
cvWaitKey(0);
return 0;
}
EDIT:
I don't need CvtColor function, I need to use that one factorial function.
In faktorial you assume you have 3 channels. So you need to increase i by 3, and not by 4. Also, you need to convert char* data to uchar* data, so that accumulation works ok:
You end up with:
void faktorial(int InSize, uchar *DataIn, uchar *DataOut)
{
for (int i = 0, j = 0; i < InSize; i += 3, j++)
{
DataOut[j] = (DataIn[i] + DataIn[i + 1] + DataIn[i + 2]) / 3;
}
}
You can easily extend this to multiple channels, like:
void faktorial2(int InSize, int nChannels, uchar *DataIn, uchar *DataOut)
{
for (int i = 0, j = 0; i < InSize; i += nChannels, j++)
{
int accum = 0;
for (int c = 0; c < nChannels; ++c)
{
accum += DataIn[i + c];
}
DataOut[j] = uchar(accum / nChannels);
}
}
You in general need also to take image stride into account:
void faktorial3(int rows, int cols, int in_step, int in_channels, int out_step, uchar *in, uchar *out)
{
for (int r = 0; r < rows; ++r)
{
for (int c = 0; c < cols; ++c)
{
int accum = 0;
for (int i = 0; i < in_channels; ++i)
{
accum += in[r*in_step + c * in_channels + i];
}
out[r*out_step + c] = uchar(accum / in_channels);
}
}
}
Here the full code with the calls:
#include <opencv2/opencv.hpp>
using namespace std;
void faktorial3(int rows, int cols, int in_step, int in_channels, int out_step, uchar *in, uchar *out)
{
for (int r = 0; r < rows; ++r)
{
for (int c = 0; c < cols; ++c)
{
int accum = 0;
for (int i = 0; i < in_channels; ++i)
{
accum += in[r*in_step + c * in_channels + i];
}
out[r*out_step + c] = uchar(accum / in_channels);
}
}
}
void faktorial(int InSize, uchar *DataIn, uchar *DataOut)
{
for (int i = 0, j = 0; i < InSize; i += 3, j++)
{
DataOut[j] = (DataIn[i] + DataIn[i + 1] + DataIn[i + 2]) / 3;
}
}
void faktorial2(int InSize, int nChannels, uchar *DataIn, uchar *DataOut)
{
for (int i = 0, j = 0; i < InSize; i += nChannels, j++)
{
int accum = 0;
for (int c = 0; c < nChannels; ++c)
{
accum += DataIn[i + c];
}
DataOut[j] = uchar(accum / nChannels);
}
}
int main()
{
char tbLEN[] = "D:\\SO\\img\\barns.jpg";
IplImage* image;
image = cvLoadImage(tbLEN, 1);
IplImage *image2 = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1);
int height1 = image->height;
int width1 = image->width;
int step = image->widthStep;
int SizeIn = step*height1;
int nChannels = image->nChannels;
uchar* DatIn = (uchar*)image->imageData;
uchar* DatOut = (uchar*)image2->imageData;
faktorial(SizeIn, DatIn, DatOut);
//faktorial2(SizeIn, nChannels, DatIn, DatOut);
//faktorial3(image->height, image->width, image->widthStep, image->nChannels, image2->widthStep, (uchar*)image->imageData, (uchar*)image2->imageData);
cvNamedWindow("Imagecolor");
cvShowImage("Imagecolor", image);
cvNamedWindow("Gray");
cvShowImage("Gray", image2);
cvWaitKey(0);
return 0;
}
Remember that C api is obsolete. You should switch to C++ api.
Try cvtColor(src, bwsrc, CV_RGB2GRAY);
http://docs.opencv.org/2.4/modules/imgproc/doc/miscellaneous_transformations.html (look for cvtColor).
Your faktorial is intended for 4 byte per pixel images (and it doesn't take into account possible line padding).
Loaded from JPG image has 3 byte per pixel, that is why you see 4 shifted ghosts.
You can modify faktorial or just convert loaded image to 4-byte format
image = cvLoadImage(tbLEN, 1);
cvtColor(image, image, CV_RGB2RGBA);

How to implement midpoint displacement

I'm trying to implement procedural generation in my game. I want to really grasp and understand all of the algorithms nessecary rather than simply copying/pasting existing code. In order to do this I've attempted to implement 1D midpoint displacement on my own. I've used the information here to write and guide my code. Below is my completed code, it doesn't throw an error but that results don't appear correct.
srand(time(NULL));
const int lineLength = 65;
float range = 1.0;
float displacedLine[lineLength];
for (int i = 0; i < lineLength; i++)
{
displacedLine[i] = 0.0;
}
for (int p = 0; p < 100; p++)
{
int segments = 1;
for (int i = 0; i < (lineLength / pow(2, 2)); i++)
{
int segs = segments;
for (int j = 0; j < segs; j++)
{
int x = floor(lineLength / segs);
int start = (j * x) + 1;
int end = start + x;
if (i == 0)
{
end--;
}
float lo = -range;
float hi = +range;
float change = lo + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (hi - lo)));
int center = ((end - start) / 2) + start;
displacedLine[center - 1] += change;
segments++;
}
range /= 2;
}
}
Where exactly have I made mistakes and how might I correct them?
I'm getting results like this:
But I was expecting results like this:
The answer is very simple and by the way I'm impressed you managed to debug all the potential off-by-one errors in your code. The following line is wrong:
displacedLine[center - 1] += change;
You correctly compute the center index and change amount but you missed that the change should be applied to the midpoint in terms of height. That is:
displacedLine[center - 1] = (displacedLine[start] + displacedLine[end]) / 2;
displacedLine[center - 1] += change;
I'm sure you get the idea.
The problem seems to be that you are changing only the midpoint of each line segment, rather than changing the rest of the line segment in proportion to its distance from each end to the midpoint. The following code appears to give you something more like what you're looking for:
#include <iostream>
#include <cstdlib>
#include <math.h>
#include <algorithm>
using namespace std;
void displaceMidPt (float dline[], int len, float disp) {
int midPt = len/2;
float fmidPt = float(midPt);
for (int i = 1; i <= midPt; i++) {
float ptDisp = disp * float(i)/fmidPt;
dline[i] += ptDisp;
dline[len-i] += ptDisp;
}
}
void displace (float displacedLine[], int lineLength, float range) {
for (int p = 0; p < 100; p++) {
int segs = pow(p, 2);
for (int j = 0; j < segs; j++) {
float lo = -range;
float hi = +range;
float change = lo + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (hi - lo)));
int start = int(float(j)/float(segs)*float(lineLength));
int end = int(float(j+1)/float(segs)*float(lineLength));
displaceMidPt (displacedLine+start,end-start,change);
}
range /= 2;
}
}
void plot1D (float x[], int len, int ht = 10) {
float minX = *min_element(x,x+len);
float maxX = *max_element(x,x+len);
int xi[len];
for (int i = 0; i < len; i++) {
xi[i] = int(ht*(x[i] - minX)/(maxX - minX) + 0.5);
}
char s[len+1];
s[len] = '\0';
for (int j = ht; j >= 0; j--) {
for (int i = 0; i < len; i++) {
if (xi[i] == j) {
s[i] = '*';
} else {
s[i] = ' ';
}
}
cout << s << endl;
}
}
int main () {
srand(time(NULL));
const int lineLength = 65;
float range = 1.0;
float displacedLine[lineLength];
for (int i = 0; i < lineLength; i++) {
displacedLine[i] = 0.0;
}
displace (displacedLine,lineLength,range);
plot1D (displacedLine,lineLength);
return 0;
}
When run this way, it produces the following result:
$ c++ -lm displace.cpp
$ ./a
*
* *
* ***
* * * *
* ** **** * **
* *** **** * * * ** *
* * ** ** *** * * * *
** ** *
* * * ***
** ***
*