I'm making a painterly rendering.
And now I'm doing that clipping line things.
But I got this error:
<<unsigned><pt.x*DataType<_Tp>::channels> <<unsigned<size.p[1]*channels<>>>
And
template<typename _Tp> inline const _Tp& Mat::at(int i0, int i1) const
{
CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] &&
(unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) &&
CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());
return ((const _Tp*)(data + step.p[0]*i0))[i1];
}
Maybe this is the error that on 'Lineclipping()'
Please, tell me another good idea that clipped line.
this is my code. And I'm just a student so my codding skill is very beginner.
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <sstream>
#include <cmath>
#include <stdio.h>
#include <cstdlib>
#include <time.h>
#include <random>
using namespace cv;
using namespace std;
random_device rd;
mt19937_64 rng(rd());
double PI = 3.141592;
int perturbLength = (rand() % 6) + 1;
int perturbRadius = ((rand() % 5) + 0) / 10;
int perturbAngle = (rand() % 15) + (-15);
int Maxlength = 10 - perturbLength;
int radius = 2 - perturbRadius;
int angle = 45 - perturbAngle;
double theta = angle*(PI / 180);
void Lineclipping(int x, int y, double theta, int len, Point2d& pt1, Point2d& pt2, Mat& EdgeMap)
{
double length = ceil(len);
enter code here
float detectPT = len / length;
for (int i = detectPT; i <= len;)
{
Point2d Mpt1(x + length*cos(theta), y + length*sin(theta));
if (EdgeMap.at<uchar>(Mpt1.y, Mpt1.x) > 0)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
else if (i == length)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
i = i + detectPT;
}
for (int i = detectPT; i <= len;)
{
Point2d Mpt2(x - length*cos(theta), y - length*sin(theta));
if (EdgeMap.at<uchar>(Mpt2.y, Mpt2.x) > 0)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
else if (i == length)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
i = i + detectPT;
}
}
Mat EdgeDetect(Mat& referenceimg, Mat& Edge)
{
Mat image = referenceimg.clone();
//Make Edge Map
Mat IntensityImg(image.size(), CV_8U, 255);
Mat sobelx, sobely;
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
Vec3b intensity = image.at<Vec3b>(j, i);
uchar blue = intensity.val[0];
uchar green = intensity.val[1];
uchar red = intensity.val[2];
IntensityImg.at<uchar>(j, i) = (30 * red + 59 * green + 11 * blue) / 100;
}
}
GaussianBlur(IntensityImg, IntensityImg, Size(5, 5), 0.1, 0.1);
Sobel(IntensityImg, sobelx, CV_32F, 1, 0);
Sobel(IntensityImg, sobely, CV_32F, 0, 1);
Mat magnitudeXY = abs(sobelx), abs(sobely);
magnitudeXY.convertTo(Edge, CV_8U);
Mat mask(3, 3, CV_8UC1, 1);
morphologyEx(Edge, Edge, MORPH_ERODE, mask);
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
Edge.at<uchar>(j, i) = (Edge.at<uchar>(j, i) > 20 ? 255 : 0);
}
}
imshow("intensity", Edge);
return Edge;
}
void paint(Mat &image, int snum)
{
Mat Edge;
EdgeDetect(image, Edge);
for (int n = 0; n < snum; n++)
{
int x = rand() % image.cols;
int y = rand() % image.rows;
if (image.channels() == 1)
{
image.at<uchar>(x, y) = 255;
}
else if (image.channels() == 3)
{
int length = Maxlength / 2;
Point2d pt1(x + length*cos(theta), y + length*sin(theta));
Point2d pt2(x - length*cos(theta), y - length*sin(theta));
Lineclipping(x, y, theta, length, fpt1, fpt2, Edge);
//draw line
Scalar color(image.at<Vec3b>(y, x)[0], image.at<Vec3b>(y, x)[1], image.at<Vec3b>(y, x)[2]);
line(image, pt1, pt2, color, radius);
}
}
}
int main()
{
Mat Img = imread("fruit.jpg", IMREAD_COLOR);
CV_Assert(Img.data);
Mat resultImage = Img.clone();
Mat sobel = Img.clone();
int num = Img.rows*Img.cols;
paint(resultImage, num);
imshow("result", resultImage);
waitKey();
return 0;
}
And This is the error parts.
for (int i = detectPT; i <= len;)
{
Point2d Mpt1(x + length*cos(theta), y + length*sin(theta));
if (EdgeMap.at<uchar>(Mpt1.y, Mpt1.x) > 0)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
else if (i == length)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
i = i + detectPT;
}
for (int i = detectPT; i <= len;)
{
Point2d Mpt2(x - length*cos(theta), y - length*sin(theta));
if (EdgeMap.at<uchar>(Mpt2.y, Mpt2.x) > 0)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
else if (i == length)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
i = i + detectPT;
}
Thank you!
Since I can't compile this and run it, I am going to run through a possible execution and show you where you can hit this out of range error.
int perturbLength = (rand() % 6) + 1; // Range is 1 to 6, let's assume 4
int perturbAngle = (rand() % 15) + (-15); // Range is -15 to -1 let's assume -14
int Maxlength = 10 - perturbLength; // 6
int angle = 45 - perturbAngle; // 44
double theta = angle*(PI / 180); // .7679
Now we get into this code inside the paint method:
int x = rand() % image.cols; // Let's assume image.cols - 2
int y = rand() % image.rows; // Let's assume image.rows - 1
Inside of paint we will reach this code:
int length = Maxlength / 2; // Maxlength is 6 so this is 3
Lineclipping(x, y, theta, length, fpt1, fpt2, Edge);
Which leads to the Lineclipping method and here we get a problem:
Point2d Mpt1(x + length*cos(theta), y + length*sin(theta));
if (EdgeMap.at<uchar>(Mpt1.y, Mpt1.x) > 0)
This is the problem. Remember, x is image.cols - 2. Now we perform the operations x + length * cos(theta), which is (image.cols-2) + 3 * cos(.7679). 3 * cos(.7679) is 2.999 which whether you floor it or round it is going to cause a problem when you add it to image.cols - 2. If it is floored and you get 2 we have image.cols which causes out of range, if it is rounded then we have image.cols + 1, so in either case we go beyond the bounds of the array.
Related
I want to implement region growing algorithm for components Cr and Cb (YCbCr) (separate and combined) with manually chosen seed point (mouse click).
At the moment I have two functions that implement region growing for the H component in the HSV color space.
bool isOk(int new_x, int new_y, int width, int height)
{
if (new_x < 0 || new_y < 0 || new_x >= width || new_y >= height)
return false;
return true;
}
void lab04_MouseCallback(int event, int x, int y, int flags, void* param)
{
Mat* src = (Mat*)param;
int height = (*src).rows;
int width = (*src).cols;
if (event == CV_EVENT_LBUTTONDOWN)
{
printf("Seed point(x,y): %d,%d\n", x, y);
Mat labels = Mat::zeros((*src).size(), CV_16UC1);
int w = 3,
hue_avg = 0,
inf_x, sup_x,
inf_y, sup_y,
cnt = 0;
inf_x = (x - w < 0) ? 0 : x - w;
inf_y = (y - w < 0) ? 0 : y - w;
sup_x = (x + w >= width) ? (width - 1) : x + w;
sup_y = (y + w >= height) ? (height - 1) : y + w;
printf("inf x: %d sup x: %d --- inf y: %d sup y: %d\n", inf_x, sup_x, inf_y, sup_y);
for (int i = inf_y; i <= sup_y; ++i)
{
for (int j = inf_x; j <= sup_x; ++j)
{
hue_avg += (*src).data[i * width + j];
//printf("H at <%d, %d> is %d\n", i, j, (*src).data[i * width + j]);
}
}
hue_avg /= (sup_x - inf_x + 1) * (sup_y - inf_y + 1);
printf("Hue average: %d\n\n", hue_avg);
int k = 1, N = 1, hue_std = 10;
int konst = 3;
int T = konst * (float)hue_std;
queue<Point> Q;
Q.push(Point(x, y));
while (!Q.empty())
{
int dx[8] = { -1, 0, 1, 1, 1, 0, -1, -1 };
int dy[8] = { -1, -1, -1, 0, 1, 1, 1, 0 };
Point temp = Q.front();
Q.pop();
for (int dir = 0; dir < 8; ++dir)
{
int new_x = temp.x + dx[dir];
int new_y = temp.y + dy[dir];
if (isOk(new_x, new_y, width, height))
{
//printf("(%d, %d)\n", new_x, new_y);
if (labels.at<ushort>(new_y, new_x) == 0)
{
//printf("labels(%d, %d) = %hu\n", new_x, new_y, labels.at<ushort>(new_y, new_x));
if (abs((*src).at<uchar>(new_y, new_x) - hue_avg) < T)
{
//printf("this one\n");
Q.push(Point(new_x, new_y));
labels.at<ushort>(new_y, new_x) = k;
hue_avg = ((N * hue_avg) + (*src).at<uchar>(new_y, new_x)) / (N + 1);
++N;
}
}
}
}
}
Mat dst = (*src).clone();
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (labels.at<ushort>(i, j) == 1)
{
dst.at<uchar>(i, j) = 255;
}
else
{
dst.at<uchar>(i, j) = 0;
}
}
}
imshow("dst", dst);
}
}
void lab04_MouseClick()
{
Mat src;
Mat hsv;
// Read image from file
char fname[MAX_PATH];
while (openFileDlg(fname))
{
src = imread(fname);
int height = src.rows;
int width = src.cols;
//Create a window
namedWindow("My Window", 1);
// Aplicare FTJ gaussian pt. eliminare zgomote: essential sa il aplicati
GaussianBlur(src, src, Size(5, 5), 0, 0);
// Componenta de culoare Hue a modelului HSV
Mat H = Mat(height, width, CV_8UC1);
// definire pointeri la matricea (8 biti/pixeli) folosita la stocarea
// componentei individuale H
uchar* lpH = H.data;
cvtColor(src, hsv, CV_BGR2HSV); // conversie RGB -> HSV
// definire pointer la matricea (24 biti/pixeli) a imaginii HSV
uchar* hsvDataPtr = hsv.data;
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
// index in matricea hsv (24 biti/pixel)
int hi = i * width * 3 + j * 3;
int gi = i * width + j; // index in matricea H (8 biti/pixel)
lpH[gi] = hsvDataPtr[hi] * 510 / 360; // lpH = 0 .. 255
}
}
//set the callback function for any mouse event
setMouseCallback("My Window", lab04_MouseCallback, &H);
//show the image
imshow("My Window", src);
// Wait until user press some key
waitKey(0);
}
}
How can I change this code to be for components Cr and Cb?
I am trying to perform gaussian smoothing on this image without using any opencv function (except displaying the image).
However, the output I got after convoluting the image with the gaussian kernel is as follow:
The output image seems to have misaligned and looks very weird. Any idea what is happening?
Generate gaussian kernel:
double gaussian(int x, int y,double sigma){
return (1/(2*M_PI*pow(sigma,2)))*exp(-1*(pow(x,2)+pow(y,2))/(2*pow(sigma,2)));
}
double generateFilter(vector<vector<double>> & kernel,int width,double sigma){
int value = 0;
double total =0;
if(width%2 == 1){
value = (width-1)/2;
}else{
value = width/2;
}
double smallest = gaussian(-1*value,-1*value,sigma);
for(int i = -1*value; i<=value; i++){
vector<double> temp;
for(int k = -1*value; k<=value; k++){
int gVal = round(gaussian(i,k,sigma)/smallest);
temp.push_back(gVal);
total += gVal;
}
kernel.push_back(temp);
}
cout<<total<<endl;
return total;
}
Convolution:
vector<vector<unsigned int>> convolution(vector<vector<unsigned int>> src, vector<vector<double>> kernel,double total){
int kCenterX = floor(kernel.size() / 2); //center of kernel
int kCenterY = kCenterX; //center of kernel
int kRows = kernel.size(); //height of kernel
int kCols = kRows; //width of kernel
int imgRows = src.size(); //height of input image
int imgCols = src[0].size(); //width of input image
vector<vector<unsigned int>> dst = vector<vector<unsigned int>> (imgRows, vector<unsigned int>(imgCols ,0));
for ( size_t row = 0; row < imgRows; row++ ) {
for ( size_t col = 0; col < imgCols; col++ ) {
float accumulation = 0;
float weightsum = 0;
for ( int i = -1*kCenterX; i <= 1*kCenterX; i++ ) {
for ( int j = -1*kCenterY; j <= 1*kCenterY; j++ ) {
int k = 0;
if((row+i)>=0 && (row+i)<imgRows && (col+j)>=0 && (col+j)<imgCols){
k = src[row+i][col+j];
weightsum += kernel[kCenterX+i][kCenterY+j];
}
accumulation += k * kernel[kCenterX +i][kCenterY+j];
}
}
dst[row][col] = round(accumulation/weightsum);
}
}
return dst;
}
Thank you.
The convolution function is basically correct, so the issue is with the input and output format.
Make sure you are reading the image as Grayscale (and not RGB):
cv::Mat I = cv::imread("img.png", cv::IMREAD_GRAYSCALE);
You are passing vector<vector<unsigned int>> argument to convolution.
I can't say if it's part of the problem or not, but it's recommended to pass argument of type cv::Mat (and return cv::Mat):
cv::Mat convolution(cv::Mat src, vector<vector<double>> kernel, double total)
I assume you can convert the input to and from vector<vector<unsigned int>>, but it's not necessary.
Here is a working code sample:
#include <vector>
#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/highgui.hpp"
using namespace std;
double gaussian(int x, int y, double sigma) {
return (1 / (2 * 3.141592653589793*pow(sigma, 2)))*exp(-1 * (pow(x, 2) + pow(y, 2)) / (2 * pow(sigma, 2)));
}
double generateFilter(vector<vector<double>> & kernel, int width, double sigma)
{
int value = 0;
double total = 0;
if (width % 2 == 1) {
value = (width - 1) / 2;
}
else {
value = width / 2;
}
double smallest = gaussian(-1 * value, -1 * value, sigma);
for (int i = -1 * value; i <= value; i++) {
vector<double> temp;
for (int k = -1 * value; k <= value; k++) {
int gVal = round(gaussian(i, k, sigma) / smallest);
temp.push_back(gVal);
total += gVal;
}
kernel.push_back(temp);
}
cout << total << endl;
return total;
}
//vector<vector<unsigned int>> convolution(vector<vector<unsigned int>> src, vector<vector<double>> kernel, double total) {
cv::Mat convolution(cv::Mat src, vector<vector<double>> kernel, double total) {
int kCenterX = floor(kernel.size() / 2); //center of kernel
int kCenterY = kCenterX; //center of kernel
int kRows = kernel.size(); //height of kernel
int kCols = kRows; //width of kernel
int imgRows = src.rows;//src.size(); //height of input image
int imgCols = src.cols;//src[0].size(); //width of input image
//vector<vector<unsigned int>> dst = vector<vector<unsigned int>> (imgRows, vector<unsigned int>(imgCols ,0));
cv::Mat dst = cv::Mat::zeros(src.size(), CV_8UC1); //Create destination matrix, and fill with zeros (dst is Grayscale image with byte per pixel).
for (size_t row = 0; row < imgRows; row++) {
for (size_t col = 0; col < imgCols; col++) {
double accumulation = 0;
double weightsum = 0;
for (int i = -1 * kCenterX; i <= 1 * kCenterX; i++) {
for (int j = -1 * kCenterY; j <= 1 * kCenterY; j++) {
int k = 0;
if ((row + i) >= 0 && (row + i) < imgRows && (col + j) >= 0 && (col + j) < imgCols) {
//k = src[row+i][col+j];
k = (int)src.at<uchar>(row + i, col + j); //Read pixel from row [row + i] and column [col + j]
weightsum += kernel[kCenterX + i][kCenterY + j];
}
accumulation += (double)k * kernel[kCenterX + i][kCenterY + j];
}
}
//dst[row][col] = round(accumulation/weightsum);
dst.at<uchar>(row, col) = (uchar)round(accumulation / weightsum); //Write pixel from to row [row] and column [col]
//dst.at<uchar>(row, col) = src.at<uchar>(row, col);
}
}
return dst;
}
int main()
{
vector<vector<double>> kernel;
double total = generateFilter(kernel, 11, 3.0);
//Read input image as Grayscale (one byte per pixel).
cv::Mat I = cv::imread("img.png", cv::IMREAD_GRAYSCALE);
cv::Mat J = convolution(I, kernel, total);
//Display input and output
cv::imshow("I", I);
cv::imshow("J", J);
cv::waitKey(0);
cv::destroyAllWindows();
return 0;
}
Result:
I want to implement the harris corner detector. I found this page to be very helpful, since it shows how the detector is implemented using the basic opencv functions (like gaussianBlur and Sobel):
https://compvisionlab.wordpress.com/2013/03/02/harris-interest-point-detection-implementation-opencv/
Now I even want to implement Gaussian Blur and Sobel. If I run my Gaussian or Sobel over some Images it works but in combination with my Corner Detector it does not work. Can anybody help me please. The full Code is below, thx.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray, dst;
int thresh = 200;
int max_thresh = 255;
char* source_window = "Source Image";
char* corners_window = "Corner Image";
/// Function header
void cornerHarris_demo(int, void*);
void cornerHarrisMe(int, int, double);
int xGradient(Mat, int, int);
int yGradient(Mat, int, int);
void SobelMe(Mat&,Mat&,int,int);
int borderCheck(int M, int x);
void SepGaussian(Mat&, Mat&, int, int);
/** #function main */
int main(int argc, char** argv)
{
/// Load source image and convert it to gray
src = imread("data/a-real-big-church.jpg", 1);
//Mat src_gray(src.size(), CV_8UC1);
cvtColor(src, src_gray, CV_BGR2GRAY);
/// Create a window and a trackbar
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
createTrackbar("Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo);
imshow(source_window, src);
cornerHarris_demo(0, 0);
waitKey(0);
return(0);
}
/** #function cornerHarris_demo */
void cornerHarris_demo(int, void*)
{
Mat dst_norm, dst_norm_scaled;
/// Detector parameters
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
/// Detecting corners
cornerHarrisMe(blockSize, apertureSize, k);
/// Normalizing
normalize(dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat());
convertScaleAbs(dst_norm, dst_norm_scaled);
/// Drawing a circle around corners
for (int j = 0; j < dst_norm.rows; j++)
{
for (int i = 0; i < dst_norm.cols; i++)
{
if ((int)dst_norm.at<float>(j, i) > thresh)
{
circle(dst_norm_scaled, Point(i, j), 5, Scalar(255), 2, 8, 0);
}
}
}
/// Showing the result
namedWindow(corners_window, CV_WINDOW_AUTOSIZE);
imshow(corners_window, dst_norm_scaled);
}
void cornerHarrisMe(int blockSize, int apertureSize, double k)
{
Mat x2y2, xy, mtrace, x_der, y_der, x2_der, y2_der, xy_der, x2g_der, y2g_der, xyg_der;
//1: calculate x and y derivative of image via Sobel
SobelMe(src_gray, x_der, 1, 0);
SobelMe(src_gray, y_der, 0, 1);
//2: calculate other three images in M
pow(x_der, blockSize, x2_der);
pow(y_der, blockSize, y2_der);
multiply(x_der, y_der, xy_der);
//3: gaussain
SepGaussian(x2_der, x2g_der, 1, 0);
SepGaussian(y2_der, y2g_der, 0, 1);
SepGaussian(xy_der, xyg_der, 1, 1);
//4. calculating R with k
multiply(x2g_der, y2g_der, x2y2);
multiply(xyg_der, xyg_der, xy);
pow((x2g_der + y2g_der), blockSize, mtrace);
dst = (x2y2 - xy) - k * mtrace;
}
// gradient in the x direction
int xGradient(Mat image, int x, int y)
{
return image.at<uchar>(y - 1, x - 1) +
2 * image.at<uchar>(y, x - 1) +
image.at<uchar>(y + 1, x - 1) -
image.at<uchar>(y - 1, x + 1) -
2 * image.at<uchar>(y, x + 1) -
image.at<uchar>(y + 1, x + 1);
}
// gradient in the y direction
int yGradient(Mat image, int x, int y)
{
return image.at<uchar>(y - 1, x - 1) +
2 * image.at<uchar>(y - 1, x) +
image.at<uchar>(y - 1, x + 1) -
image.at<uchar>(y + 1, x - 1) -
2 * image.at<uchar>(y + 1, x) -
image.at<uchar>(y + 1, x + 1);
}
void SobelMe(Mat& source, Mat& destination, int xOrder, int yOrder){
int gradX, gradY, sum;
destination = source.clone();
if (xOrder == 1 && yOrder == 0){
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradX = xGradient(source, x, y);
sum = abs(gradX);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
else if (xOrder == 0 && yOrder == 1){
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradY = yGradient(source, x, y);
sum = abs(gradY);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
else if (xOrder == 1 && yOrder == 1)
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradX = xGradient(source, x, y);
gradY = yGradient(source, x, y);
sum = abs(gradX) + abs(gradY);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
int borderCheck(int M, int x){
if (x < 0)
return -x - 1;
if (x >= M)
return 2 * M - x - 1;
return x;
}
void SepGaussian(Mat& source, Mat& desination, int sigmaX, int sigmaY){
// coefficients of 1D gaussian kernel with sigma = 1
double coeffs[] = { 0.0545, 0.2442, 0.4026, 0.2442, 0.0545 };
Mat tempX, tempY;
float sum, x1, y1;
desination = source.clone();
tempY = source.clone();
tempX = source.clone();
// along y - direction
if (sigmaX == 0 && sigmaY == 1){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
y1 = borderCheck(source.rows, y - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y1, x);
}
desination.at<uchar>(y, x) = sum;
}
}
}
// along x - direction
else if (sigmaX == 1 && sigmaY == 0){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
x1 = borderCheck(source.cols, x - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y, x1);
}
desination.at<uchar>(y, x) = sum;
}
}
}
// along xy - direction
else if (sigmaX == 1 && sigmaY == 1){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
y1 = borderCheck(source.rows, y - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y1, x);
}
tempY.at<uchar>(y, x) = sum;
}
}
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
x1 = borderCheck(source.cols, x - i);
sum = sum + coeffs[i + 2] * tempY.at<uchar>(y, x1);
}
desination.at<uchar>(y, x) = sum;
}
}
}
}
The Result:
Here is the a picture of the Result.
The Result is now the other way around, it detects areas where are no Corners.
In case there are some questions, feel free to ask me.
I need to select a pixel value and apply the region growing in terms of the seed pixel. After trying to write the code, the result was always a black image
regardless of what seed point I used. The whole problem is involved in the GrowColor function. My guess is a logical error with the ifs.
#include <cv.h>
#include <highgui.h>
using namespace std;
int xDim, yDim, zDim;
float ThreshHold = 45.0;
unsigned long total[3];
int coont, tt;
IplImage *Image1;
IplImage *Image2;
CvScalar s = cvScalar(0, 0, 0, 0);
CvScalar s11 = cvScalar(0, 0, 0, 0);
int Diff, mean[3], temp[3];
void GrowColor(int x, int y);
int main(int argc, char *argv[]) {
char value[4];
int pixType, dimCut;
int Dbug = false;
int Xseed = 40, Yseed = 234;
int i = 0, x, y;
Image1 = cvLoadImage("lenah.jpg");
yDim = Image1->height;
xDim = Image1->width;
// int step= Image1->widthStep;
//uchar* data = (uchar *)Image1->imageData;
//New image
Image2 = cvCreateImage(cvSize(Image1->width, Image1->height), IPL_DEPTH_8U,
1);
cvZero(Image2);
total[0] = total[1] = total[2] = coont = 0;
//Process
for (y = Yseed - 5; y <= Yseed + 5; y++)
for (x = Xseed - 5; x <= Xseed + 5; x++)
if ((x > 0) && (y > 0) && (x < xDim) && (y < yDim)) {
coont++;
s = cvGet2D(Image1, x, y);
total[0] += abs(s.val[0]);
total[1] += abs(s.val[1]);
total[2] += abs(s.val[2]);
}
GrowColor(Xseed, Yseed);
cvNamedWindow("wndname", 1);
cvShowImage("original", Image1);
cvShowImage("wndname", Image2);
cvWaitKey(0);
return 0;
}
void GrowColor(int x, int y) {
//Check to see if point already part of region
s.val[0] = 0;
s.val[1] = 0;
s.val[2] = 0;
s.val[3] = 0;
if ((x < 1) && (y < 1))
s = cvGet2D(Image2, x, y);
if (s.val[0] == 0) {
int k;
if ((x == 1) && (y == 1))
s11 = cvGet2D(Image1, x, y);
mean[0] = total[0] / coont;
mean[1] = total[1] / coont;
mean[2] = total[2] / coont;
temp[0] = abs(s11.val[0]) - mean[0];
temp[1] = abs(s11.val[1]) - mean[1];
temp[2] = abs(s11.val[2]) - mean[2];
Diff =
(int) (sqrt(
(temp[0] * temp[0] + temp[1] * temp[1]
+ temp[2] * temp[2]) / 3));
if (Diff < ThreshHold) {
total[0] += abs(s11.val[0]);
total[1] += abs(s11.val[1]);
total[2] += abs(s11.val[2]);
coont++;
s.val[0] = 120;
if ((x > 0) && (y > 0))
cvSet2D(Image2, x, y, s);
if (x > 2)
GrowColor(x - 1, y);
if (y > 2)
GrowColor(x, y - 1);
if (x < xDim - 2)
GrowColor(x + 1, y);
if (y < yDim - 2)
GrowColor(x, y + 1);
}
}
}
Making GrowColor a recursive function may result in an infinite loop. Check the code in that function once.
For a project I'm writing some code to compute the HoG of some images, but I'm stuck with the fact that my orientations are only between 0 ~ 90 degrees, while using the atan2 function.
I'm guessing that this problem occurs due to the filter2D function of OpenCV but I'm not sure if this is the reason or that I'm doing something else wrong:
Vector<Vector<Mat_<float>>> HoG(Mat image) {
Mat img_x;
Mat img_y;
IplImage img = image;
Mat kern_x = (Mat_<char>(1, 3) << -1, 0, 1);
Mat kern_y = (Mat_<char>(3, 1) << -1, 0, 1);
filter2D(image, img_x, image.depth(), kern_x);
filter2D(image, img_y, image.depth(), kern_y);
Vector<Vector<Mat_<float>>> histograms;
for(int y = 0; y < image.rows - size; y += size) {
Vector<Mat_<float>> temp_hist;
for(int x = 0; x < image.cols - size; x += size) {
float total_mag = 0;
Mat hist = Mat::zeros(1, 8, CV_32FC1);
for(int i = y; i < y + size; ++i) {
for(int j = x; j < x + size; ++j) {
float grad_x = (float)img_x.at<uchar>(i, j);
float grad_y = (float)img_y.at<uchar>(i, j);
double ori = myatan2(grad_x, grad_y);
float mag = sqrt(pow(grad_x, 2) + pow(grad_y, 2));
int bin = round(ori/45);
hist.at<float>(0, (bin - 1 < 0 ? 7 : bin - 1)) += - (float)(ori - ((round(ori/45) - 1) * 45.0 + 22.5)) / 45.0f;
hist.at<float>(0, bin) += -(float)(ori - ((round(ori/45) - 1) * 45.0 + 22.5)) / 45.0f;
total_mag += mag;
}
}
// Normalize the histogram
for(int i = 0; i < 8; ++i) {
hist.at<float>(0, i) = hist.at<float>(0, i) / total_mag;
}
temp_hist.push_back(hist);
}
histograms.push_back(temp_hist);
}
return histograms;
}
If you have any other tips to increase a speed-up in my code or something else that is also welcome of course.
I notice this:
float grad_x = (float)img_x.at<uchar>(i, j);
float grad_y = (float)img_y.at<uchar>(i, j);
You seem to be using uchar. Should this not be char?