Region Growing for Cr and Cb (YCbCr) components - c++

I want to implement region growing algorithm for components Cr and Cb (YCbCr) (separate and combined) with manually chosen seed point (mouse click).
At the moment I have two functions that implement region growing for the H component in the HSV color space.
bool isOk(int new_x, int new_y, int width, int height)
{
if (new_x < 0 || new_y < 0 || new_x >= width || new_y >= height)
return false;
return true;
}
void lab04_MouseCallback(int event, int x, int y, int flags, void* param)
{
Mat* src = (Mat*)param;
int height = (*src).rows;
int width = (*src).cols;
if (event == CV_EVENT_LBUTTONDOWN)
{
printf("Seed point(x,y): %d,%d\n", x, y);
Mat labels = Mat::zeros((*src).size(), CV_16UC1);
int w = 3,
hue_avg = 0,
inf_x, sup_x,
inf_y, sup_y,
cnt = 0;
inf_x = (x - w < 0) ? 0 : x - w;
inf_y = (y - w < 0) ? 0 : y - w;
sup_x = (x + w >= width) ? (width - 1) : x + w;
sup_y = (y + w >= height) ? (height - 1) : y + w;
printf("inf x: %d sup x: %d --- inf y: %d sup y: %d\n", inf_x, sup_x, inf_y, sup_y);
for (int i = inf_y; i <= sup_y; ++i)
{
for (int j = inf_x; j <= sup_x; ++j)
{
hue_avg += (*src).data[i * width + j];
//printf("H at <%d, %d> is %d\n", i, j, (*src).data[i * width + j]);
}
}
hue_avg /= (sup_x - inf_x + 1) * (sup_y - inf_y + 1);
printf("Hue average: %d\n\n", hue_avg);
int k = 1, N = 1, hue_std = 10;
int konst = 3;
int T = konst * (float)hue_std;
queue<Point> Q;
Q.push(Point(x, y));
while (!Q.empty())
{
int dx[8] = { -1, 0, 1, 1, 1, 0, -1, -1 };
int dy[8] = { -1, -1, -1, 0, 1, 1, 1, 0 };
Point temp = Q.front();
Q.pop();
for (int dir = 0; dir < 8; ++dir)
{
int new_x = temp.x + dx[dir];
int new_y = temp.y + dy[dir];
if (isOk(new_x, new_y, width, height))
{
//printf("(%d, %d)\n", new_x, new_y);
if (labels.at<ushort>(new_y, new_x) == 0)
{
//printf("labels(%d, %d) = %hu\n", new_x, new_y, labels.at<ushort>(new_y, new_x));
if (abs((*src).at<uchar>(new_y, new_x) - hue_avg) < T)
{
//printf("this one\n");
Q.push(Point(new_x, new_y));
labels.at<ushort>(new_y, new_x) = k;
hue_avg = ((N * hue_avg) + (*src).at<uchar>(new_y, new_x)) / (N + 1);
++N;
}
}
}
}
}
Mat dst = (*src).clone();
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (labels.at<ushort>(i, j) == 1)
{
dst.at<uchar>(i, j) = 255;
}
else
{
dst.at<uchar>(i, j) = 0;
}
}
}
imshow("dst", dst);
}
}
void lab04_MouseClick()
{
Mat src;
Mat hsv;
// Read image from file
char fname[MAX_PATH];
while (openFileDlg(fname))
{
src = imread(fname);
int height = src.rows;
int width = src.cols;
//Create a window
namedWindow("My Window", 1);
// Aplicare FTJ gaussian pt. eliminare zgomote: essential sa il aplicati
GaussianBlur(src, src, Size(5, 5), 0, 0);
// Componenta de culoare Hue a modelului HSV
Mat H = Mat(height, width, CV_8UC1);
// definire pointeri la matricea (8 biti/pixeli) folosita la stocarea
// componentei individuale H
uchar* lpH = H.data;
cvtColor(src, hsv, CV_BGR2HSV); // conversie RGB -> HSV
// definire pointer la matricea (24 biti/pixeli) a imaginii HSV
uchar* hsvDataPtr = hsv.data;
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
// index in matricea hsv (24 biti/pixel)
int hi = i * width * 3 + j * 3;
int gi = i * width + j; // index in matricea H (8 biti/pixel)
lpH[gi] = hsvDataPtr[hi] * 510 / 360; // lpH = 0 .. 255
}
}
//set the callback function for any mouse event
setMouseCallback("My Window", lab04_MouseCallback, &H);
//show the image
imshow("My Window", src);
// Wait until user press some key
waitKey(0);
}
}
How can I change this code to be for components Cr and Cb?

Related

CFD in C++, Lattice boltzmann method, I dont know what im doing

I'm trying to create my own CFD in C++. I have watched some videos on youtube about the Lattice Boltzmann method, but I cant get my simulations to look like the simulations performed in the videos with lattice Boltzmann implemented in Python.
I use SDL2 to create an image on my screen. I am not trying to create anything fast. Just something that will make pretty simulations on the CPU.
Here is my class for each cell:
//cell class
class cell {
public:
double Fi[nL] = {0,0,0,0,0,0,0,0,0};
double density = 0;
double momentumX = 0;
double momentumY = 0;
double velocityX = 0;
double velocityY = 0;
double Fieq[nL] = {0,0,0,0,0,0,0,0,0};
//obstacle
bool obstacle = false;
void densityOperator() {
for (int i = 0; i < nL; i++) {
density += Fi[i];
}
}
void momentumOperator() {
for (int i = 0; i < nL; i++) {
momentumX += Fi[i] * cX[i];
momentumY += Fi[i] * cY[i];
}
}
void velocityOperator() {
for (int i = 0; i < nL; i++) {
if (density == 0) {
density += 0.001;
}
velocityX += momentumX / density; // prolly very slow
velocityY += momentumY / density;
//velocityX += cX[i];
//velocityY += cY[i];
}
}
void FieqOperator() {
for (int i = 0; i < nL; i++) {
Fieq[i] = weights[i] * density *
(
1 +
(cX[i] * velocityX + cY[i] * velocityY) / Cs +
pow((cX[i] * velocityX + cY[i] * velocityY), 2) / (2 * pow(Cs, 4)) -
(velocityX * velocityX + velocityY * velocityY) / (2 * pow(Cs, 2))
);
}
}
void FiOperator() {
for (int i = 0; i < nL; i++) {
Fi[i] = Fi[i] - (timestep / tau) * (Fi[i] - Fieq[i]);
}
}
void addRightVelocity() {
Fi[0] = 1.f;
Fi[1] = 1.f;
Fi[2] = 1.f;
Fi[3] = 6.f;
Fi[4] = 1.f;
Fi[5] = 1.f;
Fi[6] = 1.f;
Fi[7] = 1.f;
Fi[8] = 1.f;
}
};
Please note that im am using a vector for my cells instead of a 2d array. I am using a index function to go from x,y to 1d cordinate.
int index(int x, int y) {
return x * nY + y;
}
Variables:
//box
const int nX = 400;
const int nY = 100;
//viscosity
float tau = 0.5; // 0.53
//time delta time per iteration
float timestep = 1;
//distance between cells
float dist = 1000;
//Speed of sound
float Cs = 1 / sqrt(3) * (dist / timestep);
//viscociti
float v = pow(Cs, 2) * (tau - timestep / 2); // tau will need to be much smaller
//time steps
int nT = 3000;
//lattice speeds and weights
const int nL = 9;
//Ci vector direction, discrete velocity
int cX[9] = { 0, 0, 1, 1, 1, 0, -1, -1, -1 };
int cY[9] = { 0, 1, 1, 0, -1, -1, -1, 0 , 1 };
//weights, based on navier stokes
float weights[9] = { 4 / 9, 1 / 9, 1 / 36, 1 / 9, 1 / 36, 1 / 9, 1 / 36, 1 / 4, 1 / 36 };
//opposite populations
int cO[9] = { 0, 5, 6, 7, 8, 1, 2, 3, 4 };
My main function:
int main() {
//init vector cells
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
cell cellUnit;
cells.push_back(cellUnit);
TempCells.push_back(cellUnit);
}
}
//SDL
//SDL
//-------------------------------------------------------------
SDL_Window* window = nullptr;
SDL_Renderer* renderer = nullptr;
SDL_Init(SDL_INIT_VIDEO);
SDL_CreateWindowAndRenderer(nX* 3, nY * 3, 0, &window, &renderer);
SDL_RenderSetScale(renderer, 3, 3);
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
SDL_RenderClear(renderer);
//-------------------------------------------------------------//
//Circle Object Gen
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
//cicle position
int circleX = 5;
int circleY = 50;
//circle radius
float radius = 10;
//distance bewtween cell and circle pos
float distance = sqrt(pow(circleX - x, 2) + pow(circleY - y, 2));
if (distance < radius) {
cells[index(x,y)].obstacle = true;
}
else {
cells[index(x, y)].obstacle = false;
}
}
}
//add velocity
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
cells[index(x, y)].addRightVelocity();
//random velocity
for (int i = 0; i < nL; i++) {
cells[index(x,y)].Fi[i] += (rand() % 200) / 100;
}
}
}
for (int t = 0; t < nT; t++) {
//SDL
//--------------------------------------------------------------
//clear renderer
if (t % 20 == 0) {
SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
SDL_RenderClear(renderer);
}
//--------------------------------------------------------------
//streaming:
//because we will loop over the same populations we do not want to switch the same population twice
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
if (x == 0) {
cells[index(x, y)].Fi[3] += 0.4;
}
//for populations
for (int i = 0; i < nL; i++) {
//boundary
//checs if cell is object or air
if (cells[index(x, y)].obstacle == false) {
//air
//targetet cell
int cellX = x + cX[i];
int cellY = y + cY[i];
//out of bounds check + rearange to other side
if (cellX < 0) {
//left to right
cellX = nX;
}
if (cellX >= nX) {
//right to left
cellX = 0;
continue;
}
if (cellY < 0) {
//top to buttom
cellY = nY;
}
if (cellY >= nY) {
//bottom to top
cellY = 0;
}
//if neighborinig cell is object --> collision with object
if (cells[index(cellX, cellY)].obstacle == true) {
//Boundary handling https://youtu.be/jfk4feD7rFQ?t=2821
TempCells[index(x,y)].Fi[cO[i]] = cells[index(x, y)].Fi[i];
}
//if not then stream to neighbor air cell with oposite population
TempCells[index(cellX, cellY)].Fi[cO[i]] = cells[index(x, y)].Fi[i];
}
else {
//wall
//SDL GRAPICHS
if (t % 20 == 0) {
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
SDL_RenderDrawPoint(renderer, x, y);
}
}
}
}
}
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
for (int i = 0; i < nL; i++) {
cells[index(x, y)].Fi[i] = TempCells[index(x, y)].Fi[cO[i]];
}
}
}
//collision:
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
//density:
cells[index(x, y)].densityOperator();
//momentum:
cells[index(x, y)].momentumOperator();
//velocity:
cells[index(x, y)].velocityOperator();
//Fieq + new new Fi:
for (int i = 0; i < nL; i++) {
cells[index(x, y)].FieqOperator();
}
//SDL Graphics
if (t % 20 == 0) {
if (cells[index(x, y)].obstacle == false) {
SDL_SetRenderDrawColor(renderer, cells[index(x, y)].density, cells[index(x, y)].density , 255 , 255);
SDL_RenderDrawPoint(renderer, x, y);
}
}
}
}
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
cells[index(x, y)].FiOperator();
}
}
//SDL Graphics
if (t % 20 == 0 ) {
SDL_RenderPresent(renderer);
}
}
return 0;
}
I do realize my code might be a bit messy and not easy to understand at first. And it is definitely not optimal.
If anyone has any experience in programming their own LBM in c++ i would like to hear your input.
It seams like my simulations is working but i do not get those bueatiful animations like in, https://youtu.be/ZUXmO4hu-20?t=3394
Thanks for any help.
Edit:
I have edited my script to reset, density, velocity X Y and Momentum X Y
Simulation visualised by density, pink is higher, loops if density exceeds color range of 255
Simulation visualised by density
Simulation visualised by density

dividing the irregular shape into decreasing rectangles

I have to dividing the irregular shape into decreasing rectangles, one the largest and then decreasing rectangles. I just wondering if such problem is known in the coding world? How to do it?
The drawing is showing what I would like to achieve:
First rectangle can be found with this piece of code taken from here
Rect findMinRect(const Mat1b& src)
{
Mat1f W(src.rows, src.cols, float(0));
Mat1f H(src.rows, src.cols, float(0));
Rect maxRect(0, 0, 0, 0);
float maxArea = 0.f;
for (int r = 0; r < src.rows; ++r)
{
for (int c = 0; c < src.cols; ++c)
{
if (src(r, c) == 0)
{
H(r, c) = 1.f + ((r > 0) ? H(r - 1, c) : 0);
W(r, c) = 1.f + ((c > 0) ? W(r, c - 1) : 0);
}
float minw = W(r, c);
for (int h = 0; h < H(r, c); ++h)
{
minw = min(minw, W(r - h, c));
float area = (h + 1) * minw;
if (area > maxArea)
{
maxArea = area;
maxRect = Rect(Point(c - minw + 1, r - h), Point(c + 1, r + 1));
}
}
}
}
return maxRect;
}
Usage is simple
Mat src, src_gray, src_bin;
cv::cvtColor(src, src_gray, COLOR_BGR2GRAY);
src_bin = (src_gray > 1) * 255;
Rect box = findMinRect(~src_bin);
rectangle(src, box, Scalar(0, 0, 255), 2);
cv::imshow("with_rectangle", src);
cv::waitKey();
But it's only first iteration. In next iteration we have to deal with at least 3 original image pieces, then with 9 pieces, and so on. I thing so... I will be grateful for any further suggestions.
I took Paul Gray's answer and wrote an iterative process that consumes the source to find all the areas from largest to smallest in pure c++, the filled portions of the matrix do not need to be a contiguous shape.
This is the rect class that I used, I found out after that there is already a RECT class in windef.h but whatever I had already made my own at that point.
class rect
{
public:
uint32_t rowStart;
uint32_t colStart;
uint32_t height;
uint32_t width;
rect(uint32_t _rowStart, uint32_t _colStart, uint32_t _height, uint32_t _width);
rect();
};
this is the algorithm Paul Gray posted in their answer redone without using openCV
bool findMinRect(std::vector<std::vector<bool>> *src, rect *retItem)
{
bool contFlg = false;
uint32_t srcRows = src->size();
uint32_t srcCols = (*src)[0].size();
std::vector<std::vector<float>> W;
W.resize(srcRows);
for (int row = 0; row < srcRows; row++)
{
for (int col = 0; col < srcCols; col++)
{
W[row].push_back(0.f);
}
}
std::vector<std::vector<float>> H;
H.resize(srcRows);
for (int row = 0; row < srcRows; row++)
{
for (int col = 0; col < srcCols; col++)
{
H[row].push_back(0.f);
}
}
rect maxRect(0, 0, 0, 0);
float maxArea = 0.f;
for (int r = 0; r < srcRows; ++r)
{
for (int c = 0; c < srcCols; ++c)
{
if ((*src)[r][c] == true)
{
H[r][c] = 1.f + ((r > 0) ? H[r-1][c] : 0);
W[r][c] = 1.f + ((c > 0) ? W[r][c-1] : 0);
}
float minw = W[r][c];
for (int h = 0; h < H[r][c]; ++h)
{
minw = min(minw, W[r - h][c]);
float area = (h + 1) * minw;
if (area > maxArea)
{
maxArea = area;
maxRect = rect(r - h, c - minw + 1, (r + 1 - (r - h)), (c + 1 - (c - minw + 1)));
contFlg = true;
}
}
}
}
*retItem = maxRect;
return contFlg;
}
The shapeChk method below is a sanity check for the source matrix to make sure is rectangular, if not it identifies where the issue is.
int shapeChk(std::vector<std::vector<bool>> *src)
{
uint32_t srcRows = src->size();
if (srcRows == 0)
{
return -1;
}
uint32_t srcCols = (*src)[0].size();
if (srcCols == 0)
{
return -2;
}
for (int row = 0; row < srcRows; row++)
{
if ((*src)[row].size() != srcCols)
{
return (-4 - (row + 1));
}
}
return 0;
}
The following method removes the area found from the source matrix in order to rerun the findMinRect method.
void clearclump(std::vector<std::vector<bool>> *src, rect *area)
{
for (int r = (area->rowStart); r < (area->rowStart + area->height); r++)
{
for (int c = (area->colStart); c < (area->colStart + area->width); c++)
{
(*src)[r][c] = false;
}
}
}
And here is the main method that puts it all together.
int areaClump(std::vector<std::vector<bool>> *src, std::vector<rect> *areas)
{
rect retItem(1, 0, 0, 0);
int err = shapeChk(src);
if (err != 0)
{
return err;
}
while (findMinRect(src, &retItem))
{
areas->push_back(retItem);
clearclump(src, &retItem);
}
return 0;
}
When you run areaClump the source matrix will be entirely consumed and will have all flase entries, while the areas vector contains all the found areas.
Not sure if this works for your purposes or if it's even the best way to handle it, but it worked for me so I thought I'd share the results, may be it'll help someone.

Painterly Rendering, Clipping line, I'm have an error

I'm making a painterly rendering.
And now I'm doing that clipping line things.
But I got this error:
<<unsigned><pt.x*DataType<_Tp>::channels> <<unsigned<size.p[1]*channels<>>>
And
template<typename _Tp> inline const _Tp& Mat::at(int i0, int i1) const
{
CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] &&
(unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) &&
CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());
return ((const _Tp*)(data + step.p[0]*i0))[i1];
}
Maybe this is the error that on 'Lineclipping()'
Please, tell me another good idea that clipped line.
this is my code. And I'm just a student so my codding skill is very beginner.
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <sstream>
#include <cmath>
#include <stdio.h>
#include <cstdlib>
#include <time.h>
#include <random>
using namespace cv;
using namespace std;
random_device rd;
mt19937_64 rng(rd());
double PI = 3.141592;
int perturbLength = (rand() % 6) + 1;
int perturbRadius = ((rand() % 5) + 0) / 10;
int perturbAngle = (rand() % 15) + (-15);
int Maxlength = 10 - perturbLength;
int radius = 2 - perturbRadius;
int angle = 45 - perturbAngle;
double theta = angle*(PI / 180);
void Lineclipping(int x, int y, double theta, int len, Point2d& pt1, Point2d& pt2, Mat& EdgeMap)
{
double length = ceil(len);
enter code here
float detectPT = len / length;
for (int i = detectPT; i <= len;)
{
Point2d Mpt1(x + length*cos(theta), y + length*sin(theta));
if (EdgeMap.at<uchar>(Mpt1.y, Mpt1.x) > 0)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
else if (i == length)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
i = i + detectPT;
}
for (int i = detectPT; i <= len;)
{
Point2d Mpt2(x - length*cos(theta), y - length*sin(theta));
if (EdgeMap.at<uchar>(Mpt2.y, Mpt2.x) > 0)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
else if (i == length)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
i = i + detectPT;
}
}
Mat EdgeDetect(Mat& referenceimg, Mat& Edge)
{
Mat image = referenceimg.clone();
//Make Edge Map
Mat IntensityImg(image.size(), CV_8U, 255);
Mat sobelx, sobely;
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
Vec3b intensity = image.at<Vec3b>(j, i);
uchar blue = intensity.val[0];
uchar green = intensity.val[1];
uchar red = intensity.val[2];
IntensityImg.at<uchar>(j, i) = (30 * red + 59 * green + 11 * blue) / 100;
}
}
GaussianBlur(IntensityImg, IntensityImg, Size(5, 5), 0.1, 0.1);
Sobel(IntensityImg, sobelx, CV_32F, 1, 0);
Sobel(IntensityImg, sobely, CV_32F, 0, 1);
Mat magnitudeXY = abs(sobelx), abs(sobely);
magnitudeXY.convertTo(Edge, CV_8U);
Mat mask(3, 3, CV_8UC1, 1);
morphologyEx(Edge, Edge, MORPH_ERODE, mask);
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
Edge.at<uchar>(j, i) = (Edge.at<uchar>(j, i) > 20 ? 255 : 0);
}
}
imshow("intensity", Edge);
return Edge;
}
void paint(Mat &image, int snum)
{
Mat Edge;
EdgeDetect(image, Edge);
for (int n = 0; n < snum; n++)
{
int x = rand() % image.cols;
int y = rand() % image.rows;
if (image.channels() == 1)
{
image.at<uchar>(x, y) = 255;
}
else if (image.channels() == 3)
{
int length = Maxlength / 2;
Point2d pt1(x + length*cos(theta), y + length*sin(theta));
Point2d pt2(x - length*cos(theta), y - length*sin(theta));
Lineclipping(x, y, theta, length, fpt1, fpt2, Edge);
//draw line
Scalar color(image.at<Vec3b>(y, x)[0], image.at<Vec3b>(y, x)[1], image.at<Vec3b>(y, x)[2]);
line(image, pt1, pt2, color, radius);
}
}
}
int main()
{
Mat Img = imread("fruit.jpg", IMREAD_COLOR);
CV_Assert(Img.data);
Mat resultImage = Img.clone();
Mat sobel = Img.clone();
int num = Img.rows*Img.cols;
paint(resultImage, num);
imshow("result", resultImage);
waitKey();
return 0;
}
And This is the error parts.
for (int i = detectPT; i <= len;)
{
Point2d Mpt1(x + length*cos(theta), y + length*sin(theta));
if (EdgeMap.at<uchar>(Mpt1.y, Mpt1.x) > 0)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
else if (i == length)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
i = i + detectPT;
}
for (int i = detectPT; i <= len;)
{
Point2d Mpt2(x - length*cos(theta), y - length*sin(theta));
if (EdgeMap.at<uchar>(Mpt2.y, Mpt2.x) > 0)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
else if (i == length)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
i = i + detectPT;
}
Thank you!
Since I can't compile this and run it, I am going to run through a possible execution and show you where you can hit this out of range error.
int perturbLength = (rand() % 6) + 1; // Range is 1 to 6, let's assume 4
int perturbAngle = (rand() % 15) + (-15); // Range is -15 to -1 let's assume -14
int Maxlength = 10 - perturbLength; // 6
int angle = 45 - perturbAngle; // 44
double theta = angle*(PI / 180); // .7679
Now we get into this code inside the paint method:
int x = rand() % image.cols; // Let's assume image.cols - 2
int y = rand() % image.rows; // Let's assume image.rows - 1
Inside of paint we will reach this code:
int length = Maxlength / 2; // Maxlength is 6 so this is 3
Lineclipping(x, y, theta, length, fpt1, fpt2, Edge);
Which leads to the Lineclipping method and here we get a problem:
Point2d Mpt1(x + length*cos(theta), y + length*sin(theta));
if (EdgeMap.at<uchar>(Mpt1.y, Mpt1.x) > 0)
This is the problem. Remember, x is image.cols - 2. Now we perform the operations x + length * cos(theta), which is (image.cols-2) + 3 * cos(.7679). 3 * cos(.7679) is 2.999 which whether you floor it or round it is going to cause a problem when you add it to image.cols - 2. If it is floored and you get 2 we have image.cols which causes out of range, if it is rounded then we have image.cols + 1, so in either case we go beyond the bounds of the array.

How to split image in OpenCV based on lines

I am trying to do text segmentation. The attachment below is the results of it.
I manage to form lines to divide the image. However, I am stuck in splitting the image according to the lines that I'd found.
As labeled (red text) in the attached picture, I would like to split the image into 5 different images and I do not know where should I start. All the method I found only work for straight lines.
Header
Code - Source:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <ctype.h>
#include <fstream>
#define _USE_MATH_DEFINES
#include <math.h>
#define JC_VORONOI_IMPLEMENTATION
#include "jc_voronoi.h"
typedef struct compPoint
{
cv::Point pointer;
int siteNum, size ;
};
int maximumSize;
float average=0;
std::vector<compPoint> generatePoint(cv::Mat image);
void generateVoronoi(std::vector<cv::Point> points, int width, int height);
static inline jcv_point remap(const jcv_point* pt, const jcv_point* min, const jcv_point* max, const jcv_point* scale);
static void draw_line(int x0, int y0, int x1, int y1, unsigned char* image, int width, int height, int nchannels, unsigned char* color);
static void plot(int x, int y, unsigned char* image, int width, int height, int nchannels, unsigned char* color);
float areaDifference(int s1,int s2);
float areaDifference(int s1, int s2)
{
if (s1 > s2)
{
return s1 / s2;
}
else
{
return s2 / s1;
}
}
std::vector<compPoint> generatePoint(cv::Mat image)
{
cv::Mat grayscale, binary;
cv::cvtColor(image, grayscale, cv::COLOR_BGR2GRAY);
cv::threshold(grayscale, binary, 190, 255, 1);
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(binary, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_NONE, cv::Point(0, 0));
std::vector<compPoint> extractedPoint;
cv::Mat drawing = cv::Mat::zeros(binary.size(), CV_8UC3);
cv::Scalar color = cv::Scalar(255, 255, 255);
maximumSize = cv::contourArea(contours[0]);
int skip = 0;
for (int i = 0; i < contours.size(); i++)
{
int jumpPoint = contours[i].size() / (contours[i].size() * 0.12);
bool isInner = false;
cv::Vec4i currentHierarchy = hierarchy[i];
if (contours[i].size() <= 20) //Remove small component
continue;
for (int g = 0; g < contours[i].size(); g = g + jumpPoint) //Sample point from connected component
{
compPoint temp;
temp.pointer = contours[i].at(g);
line(drawing, contours[i].at(g), contours[i].at(g), color, 1, 8, 0);
if (currentHierarchy.val[3] != -1)
{
int currentIndex = currentHierarchy.val[3];
while (hierarchy[currentIndex].val[3] != -1)
{
currentIndex = hierarchy[currentIndex].val[3];
}
temp.siteNum = currentIndex;
temp.size = cv::contourArea(contours[currentIndex]);
isInner = true;
}
else
{
temp.siteNum = i;
temp.size = cv::contourArea(contours[i]);
if (cv::contourArea(contours[i])>maximumSize)
{
maximumSize = cv::contourArea(contours[i]);
}
}
extractedPoint.push_back(temp);
}
if (isInner == false)
{
average = average + cv::contourArea(contours[i]);
skip++;
}
}
average = average/skip;
return extractedPoint;
}
static inline jcv_point remap(const jcv_point* pt, const jcv_point* min, const jcv_point* max, const jcv_point* scale)
{
jcv_point p;
p.x = (pt->x - min->x) / (max->x - min->x) * scale->x;
p.y = (pt->y - min->y) / (max->y - min->y) * scale->y;
return p;
}
static void plot(int x, int y, unsigned char* image, int width, int height, int nchannels, unsigned char* color)
{
if (x < 0 || y < 0 || x >(width - 1) || y >(height - 1))
return;
int index = y * width * nchannels + x * nchannels;
for (int i = 0; i < nchannels; ++i)
{
image[index + i] = color[i];
}
}
static void draw_line(int x0, int y0, int x1, int y1, unsigned char* image, int width, int height, int nchannels, unsigned char* color)
{
int dx = abs(x1 - x0), sx = x0<x1 ? 1 : -1;
int dy = -abs(y1 - y0), sy = y0<y1 ? 1 : -1;
int err = dx + dy, e2; // error value e_xy
for (;;)
{ // loop
plot(x0, y0, image, width, height, nchannels, color);
if (x0 == x1 && y0 == y1) break;
e2 = 2 * err;
if (e2 >= dy) { err += dy; x0 += sx; } // e_xy+e_x > 0
if (e2 <= dx) { err += dx; y0 += sy; } // e_xy+e_y < 0
}
}
void generateVoronoi(std::vector<compPoint> points, int width, int height)
{
int size = points.size();
jcv_point* voronoiPoint = (jcv_point*)malloc(sizeof(jcv_point) * (size_t)size);
for (int i = 0; i < size; i++)
{
voronoiPoint[i].x = (float)points[i].pointer.x;
voronoiPoint[i].y = (float)points[i].pointer.y;
voronoiPoint[i].site = points[i].siteNum;
voronoiPoint[i].totalPoint = points[i].size;
}
jcv_rect* rect = 0;
size_t imagesize = (size_t)(width*height * 3);
unsigned char* image = (unsigned char*)malloc(imagesize);
unsigned char* image2 = (unsigned char*)malloc(imagesize);
memset(image, 0, imagesize);
unsigned char color_pt[] = { 255, 255, 255 };
unsigned char color_line[] = { 220, 220, 220 };
jcv_diagram diagram;
jcv_point dimensions;
dimensions.x = (jcv_real)width;
dimensions.y = (jcv_real)height;
memset(&diagram, 0, sizeof(jcv_diagram));
jcv_diagram_generate(size, (const jcv_point*)voronoiPoint, rect, &diagram);
//Edge
const jcv_edge* edge = jcv_diagram_get_edges(&diagram);
std::vector<filtered_edge> filteredEdge;
float min_x = 0.0, min_y = 0.0;
while (edge) //Remove edge from the same connected component
{
jcv_point p0 = edge->pos[0];
jcv_point p1 = edge->pos[1];
if (edge->sites[0]->p.site != edge->sites[1]->p.site)
{
filteredEdge.push_back(jcv_save_edge(edge));
min_x = min_x + abs(edge->sites[0]->p.x - edge->sites[1]->p.x);
min_y = min_y + abs(edge->sites[0]->p.y - edge->sites[1]->p.y);
}
edge = edge->next;
}
min_x = min_x / filteredEdge.size();
min_y = min_y / filteredEdge.size();
std::vector<filtered_edge> selectedEdge;
for (int i = 0; i < filteredEdge.size(); i++)
{
jcv_point p0 = remap(&filteredEdge.at(i).pos[0], &diagram.min, &diagram.max, &dimensions);
jcv_point p1 = remap(&filteredEdge.at(i).pos[1], &diagram.min, &diagram.max, &dimensions);
float site_x = abs(filteredEdge.at(i).sites[0]->p.x - filteredEdge.at(i).sites[1]->p.x);
float site_y = abs(filteredEdge.at(i).sites[0]->p.y - filteredEdge.at(i).sites[1]->p.y);
float x_difference = abs(filteredEdge.at(i).pos[0].x- filteredEdge.at(i).pos[1].x);
float y_difference = abs(filteredEdge.at(i).pos[0].y - filteredEdge.at(i).pos[1].y);
float areaDiff = areaDifference(filteredEdge.at(i).sites[0]->p.totalPoint, filteredEdge.at(i).sites[1]->p.totalPoint);
if (p0.x - p1.x == 0 && p0.y - p1.y == 0.0) //Remove short edges
continue;
if (areaDiff > 20) //Keep edge between small(text) and big(image) component
{
float difference = abs(filteredEdge.at(i).sites[0]->p.totalPoint - filteredEdge.at(i).sites[1]->p.totalPoint);
if (difference > average*4 )
{
unsigned char color_line2[] = { 0, 220, 220 };
selectedEdge.push_back(filteredEdge.at(i));
draw_line((int)p0.x, (int)p0.y, (int)p1.x, (int)p1.y, image, width, height, 3, color_line2);
continue;
}
}
if (x_difference > y_difference) //Remove edge between close component
{
if (site_y > min_y*1.6)
{
unsigned char color_line2[] = { 220, 0, 220 };
selectedEdge.push_back(filteredEdge.at(i));
draw_line((int)p0.x, (int)p0.y, (int)p1.x, (int)p1.y, image, width, height, 3, color_line2);
}
}
else
{
if (site_x > min_x*2.5)
{
unsigned char color_line2[] = { 220, 220, 0 };
selectedEdge.push_back(filteredEdge.at(i));
draw_line((int)p0.x, (int)p0.y, (int)p1.x, (int)p1.y, image, width, height, 3, color_line2);
}
}
}
jcv_diagram_free(&diagram);
for (int i = 0; i < size; ++i)
{
jcv_point p = remap(&voronoiPoint[i], &diagram.min, &diagram.max, &dimensions);
plot((int)p.x, (int)p.y, image, width, height, 3, color_pt);
}
free(voronoiPoint);
cv::Mat segmentedImg = cv::Mat(height, width, CV_8UC3, image);
cv::imshow("Testing", segmentedImg);
cv::waitKey(0);
free(image);
}
int main()
{
cv::Mat image, skewCorrected;
image = cv::imread("C:\\figure5.PNG");
if (!image.data)
{
std::cout << "Error" << std::endl;
system("PAUSE");
return 0;
}
std::vector<compPoint> points = generatePoint(image);
int width = image.size().width, height = image.size().height;
generateVoronoi(points, width, height);
cv::waitKey(0);
}
Input image:
I don't understand many things in your code so I just appended some lines to do what you want.
1 - Create a Mat of zeros to draw the lines (CV_8U)
Mat dst = cv::Mat(height, width, CV_8U, cvScalar(0.));
2 - Draw the lines (using your points)
line( dst, Point((int)p0.x, (int)p0.y), Point((int)p1.x, (int)p1.y), Scalar( 255, 255, 255 ), 1, 8);
3 - Close the "holes" between the lines (CLOSE morphology operation)
int morph_size = 20; // adjust this values to your image
Mat element = getStructuringElement( MORPH_RECT, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
// Apply the CLOSE morphology operation
morphologyEx( dst, closed, MORPH_CLOSE, element );
4 - Flood fill to a mask (= "painting" the splitted areas)
// iterate through the points
for (int i = 0; i < closed.rows; i++ ) {
for (int j = 0; j < closed.cols; j++) {
// if point is not "painted" yet
if (closed.at<uchar>(i, j) == 0) {
// copy Mat before Flood fill
Mat previous_closed = closed.clone();
// Flood fill that seed point ("paint" that area)
floodFill(closed, Point(j, i), 255);
// Get mask with the "painted" area
Mat mask = closed - previous_closed;
/// Copy from segmentedImg using the mask
Mat outputMat;
segmentedImg.copyTo(outputMat, mask);
cv::imshow("Closed lines", closed);
imshow("Splitted Area", outputMat);
waitKey(0);
break;
}
}
}
Area 1:
Area 2:
Area 3:
... And so on, for the 5 areas, that loop basically keeps on painting the "black areas" in white and creating mats given the difference before and after each flood fill.
Full code (your code + this lines):
#include <opencv2/core/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/opencv.hpp>
using namespace cv;
#define JC_VORONOI_IMPLEMENTATION
#include "jc_voronoi.h"
typedef struct compPoint
{
cv::Point pointer;
int siteNum, size ;
};
int maximumSize;
float average=0;
std::vector<compPoint> generatePoint(cv::Mat image);
void generateVoronoi(std::vector<cv::Point> points, int width, int height);
static inline jcv_point remap(const jcv_point* pt, const jcv_point* min, const jcv_point* max, const jcv_point* scale);
static void draw_line(int x0, int y0, int x1, int y1, unsigned char* image, int width, int height, int nchannels, unsigned char* color);
static void plot(int x, int y, unsigned char* image, int width, int height, int nchannels, unsigned char* color);
float areaDifference(int s1,int s2);
float areaDifference(int s1, int s2)
{
if (s1 > s2)
{
return s1 / s2;
}
else
{
return s2 / s1;
}
}
std::vector<compPoint> generatePoint(cv::Mat image)
{
cv::Mat grayscale, binary;
cv::cvtColor(image, grayscale, cv::COLOR_BGR2GRAY);
cv::threshold(grayscale, binary, 190, 255, 1);
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(binary, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_NONE, cv::Point(0, 0));
std::vector<compPoint> extractedPoint;
cv::Mat drawing = cv::Mat::zeros(binary.size(), CV_8UC3);
cv::Scalar color = cv::Scalar(255, 255, 255);
maximumSize = cv::contourArea(contours[0]);
int skip = 0;
for (int i = 0; i < contours.size(); i++)
{
int jumpPoint = contours[i].size() / (contours[i].size() * 0.12);
bool isInner = false;
cv::Vec4i currentHierarchy = hierarchy[i];
if (contours[i].size() <= 20) //Remove small component
continue;
for (int g = 0; g < contours[i].size(); g = g + jumpPoint) //Sample point from connected component
{
compPoint temp;
temp.pointer = contours[i].at(g);
line(drawing, contours[i].at(g), contours[i].at(g), color, 1, 8, 0);
if (currentHierarchy.val[3] != -1)
{
int currentIndex = currentHierarchy.val[3];
while (hierarchy[currentIndex].val[3] != -1)
{
currentIndex = hierarchy[currentIndex].val[3];
}
temp.siteNum = currentIndex;
temp.size = cv::contourArea(contours[currentIndex]);
isInner = true;
}
else
{
temp.siteNum = i;
temp.size = cv::contourArea(contours[i]);
if (cv::contourArea(contours[i])>maximumSize)
{
maximumSize = cv::contourArea(contours[i]);
}
}
extractedPoint.push_back(temp);
}
if (isInner == false)
{
average = average + cv::contourArea(contours[i]);
skip++;
}
}
average = average/skip;
return extractedPoint;
}
static inline jcv_point remap(const jcv_point* pt, const jcv_point* min, const jcv_point* max, const jcv_point* scale)
{
jcv_point p;
p.x = (pt->x - min->x) / (max->x - min->x) * scale->x;
p.y = (pt->y - min->y) / (max->y - min->y) * scale->y;
return p;
}
static void plot(int x, int y, unsigned char* image, int width, int height, int nchannels, unsigned char* color)
{
if (x < 0 || y < 0 || x >(width - 1) || y >(height - 1))
return;
int index = y * width * nchannels + x * nchannels;
for (int i = 0; i < nchannels; ++i)
{
image[index + i] = color[i];
}
}
static void draw_line(int x0, int y0, int x1, int y1, unsigned char* image, int width, int height, int nchannels, unsigned char* color)
{
int dx = abs(x1 - x0), sx = x0<x1 ? 1 : -1;
int dy = -abs(y1 - y0), sy = y0<y1 ? 1 : -1;
int err = dx + dy, e2; // error value e_xy
for (;;)
{ // loop
plot(x0, y0, image, width, height, nchannels, color);
if (x0 == x1 && y0 == y1) break;
e2 = 2 * err;
if (e2 >= dy) { err += dy; x0 += sx; } // e_xy+e_x > 0
if (e2 <= dx) { err += dx; y0 += sy; } // e_xy+e_y < 0
}
}
void generateVoronoi(std::vector<compPoint> points, int width, int height)
{
/// 1 - Create Mat of zeros to draw the lines
Mat dst = cv::Mat(height,width, CV_8U, cvScalar(0.));
int size = points.size();
jcv_point* voronoiPoint = (jcv_point*)malloc(sizeof(jcv_point) * (size_t)size);
for (int i = 0; i < size; i++)
{
voronoiPoint[i].x = (float)points[i].pointer.x;
voronoiPoint[i].y = (float)points[i].pointer.y;
voronoiPoint[i].site = points[i].siteNum;
voronoiPoint[i].totalPoint = points[i].size;
}
jcv_rect* rect = 0;
size_t imagesize = (size_t)(width*height * 3);
unsigned char* image = (unsigned char*)malloc(imagesize);
memset(image, 0, imagesize);
unsigned char color_pt[] = { 255, 255, 255 };
jcv_diagram diagram;
jcv_point dimensions;
dimensions.x = (jcv_real)width;
dimensions.y = (jcv_real)height;
memset(&diagram, 0, sizeof(jcv_diagram));
jcv_diagram_generate(size, (const jcv_point*)voronoiPoint, rect, &diagram);
//Edge
const jcv_edge* edge = jcv_diagram_get_edges(&diagram);
std::vector<filtered_edge> filteredEdge;
float min_x = 0.0, min_y = 0.0;
while (edge) //Remove edge from the same connected component
{
jcv_point p0 = edge->pos[0];
jcv_point p1 = edge->pos[1];
if (edge->sites[0]->p.site != edge->sites[1]->p.site)
{
filteredEdge.push_back(jcv_save_edge(edge));
min_x = min_x + abs(edge->sites[0]->p.x - edge->sites[1]->p.x);
min_y = min_y + abs(edge->sites[0]->p.y - edge->sites[1]->p.y);
}
edge = edge->next;
}
min_x = min_x / filteredEdge.size();
min_y = min_y / filteredEdge.size();
std::vector<filtered_edge> selectedEdge;
for (int i = 0; i < filteredEdge.size(); i++)
{
jcv_point p0 = remap(&filteredEdge.at(i).pos[0], &diagram.min, &diagram.max, &dimensions);
jcv_point p1 = remap(&filteredEdge.at(i).pos[1], &diagram.min, &diagram.max, &dimensions);
float site_x = abs(filteredEdge.at(i).sites[0]->p.x - filteredEdge.at(i).sites[1]->p.x);
float site_y = abs(filteredEdge.at(i).sites[0]->p.y - filteredEdge.at(i).sites[1]->p.y);
float x_difference = abs(filteredEdge.at(i).pos[0].x- filteredEdge.at(i).pos[1].x);
float y_difference = abs(filteredEdge.at(i).pos[0].y - filteredEdge.at(i).pos[1].y);
float areaDiff = areaDifference(filteredEdge.at(i).sites[0]->p.totalPoint, filteredEdge.at(i).sites[1]->p.totalPoint);
if (p0.x - p1.x == 0 && p0.y - p1.y == 0.0) //Remove short edges
continue;
/// 2 - Draw lines
if (areaDiff > 20) //Keep edge between small(text) and big(image) component
{
float difference = abs(filteredEdge.at(i).sites[0]->p.totalPoint - filteredEdge.at(i).sites[1]->p.totalPoint);
if (difference > average*4 )
{
unsigned char color_line2[] = { 0, 220, 220 };
selectedEdge.push_back(filteredEdge.at(i));
draw_line((int)p0.x, (int)p0.y, (int)p1.x, (int)p1.y, image, width, height, 3, color_line2);
line( dst, Point((int)p0.x, (int)p0.y), Point((int)p1.x, (int)p1.y), Scalar( 255, 255, 255 ), 1, 8);
continue;
}
}
if (x_difference > y_difference) //Remove edge between close component
{
if (site_y > min_y*1.6)
{
unsigned char color_line2[] = { 220, 0, 220 };
selectedEdge.push_back(filteredEdge.at(i));
draw_line((int)p0.x, (int)p0.y, (int)p1.x, (int)p1.y, image, width, height, 3, color_line2);
line( dst, Point((int)p0.x, (int)p0.y), Point((int)p1.x, (int)p1.y), Scalar( 255, 255, 255 ), 1, 8);
}
}
else
{
if (site_x > min_x*2.5)
{
unsigned char color_line2[] = { 220, 220, 0 };
selectedEdge.push_back(filteredEdge.at(i));
draw_line((int)p0.x, (int)p0.y, (int)p1.x, (int)p1.y, image, width, height, 3, color_line2);
line( dst, Point((int)p0.x, (int)p0.y), Point((int)p1.x, (int)p1.y), Scalar( 255, 255, 255 ), 1, 8);
}
}
}
jcv_diagram_free(&diagram);
for (int i = 0; i < size; ++i)
{
jcv_point p = remap(&voronoiPoint[i], &diagram.min, &diagram.max, &dimensions);
plot((int)p.x, (int)p.y, image, width, height, 3, color_pt);
}
free(voronoiPoint);
cv::Mat segmentedImg = cv::Mat(height, width, CV_8UC3, image);
cv::imshow("Testing", segmentedImg);
cv::imshow("Lines", dst);
/// New code:
Mat closed = dst.clone();
/// 3 - Close the "holes" between the lines
int morph_size = 20; // adjust this values to your image
Mat element = getStructuringElement( MORPH_RECT, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
// Apply the CLOSE morphology operation
morphologyEx( dst, closed, MORPH_CLOSE, element );
imshow("Closed lines", closed);
waitKey(0);
/// 4 - Flood fill to a mask
// iterate through the points
for (int i = 0; i < closed.rows; i++ ) {
for (int j = 0; j < closed.cols; j++) {
// if point is not "painted" yet
if (closed.at<uchar>(i, j) == 0) {
// copy Mat before Flood fill
Mat previous_closed = closed.clone();
// Flood fill that seed point ("paint" that area)
floodFill(closed, Point(j, i), 255);
// Get mask with the "painted" area
Mat mask = closed - previous_closed;
/// 5 - Copy from segmentedImg using the mask
Mat outputMat;
segmentedImg.copyTo(outputMat, mask);
cv::imshow("Closed lines", closed);
imshow("Splitted Area", outputMat);
waitKey(0);
break;
}
}
}
free(image);
}
int main()
{
cv::Mat image, skewCorrected;
image = cv::imread("/home/tribta/Downloads/HI2IT.png");
if (!image.data)
{
std::cout << "Error" << std::endl;
system("PAUSE");
return 0;
}
std::vector<compPoint> points = generatePoint(image);
int width = image.size().width, height = image.size().height;
generateVoronoi(points, width, height);
cv::waitKey(0);
}

Implementation of Harris Corner Detector using Sobel and Gaussian Blur in C++

I want to implement the harris corner detector. I found this page to be very helpful, since it shows how the detector is implemented using the basic opencv functions (like gaussianBlur and Sobel):
https://compvisionlab.wordpress.com/2013/03/02/harris-interest-point-detection-implementation-opencv/
Now I even want to implement Gaussian Blur and Sobel. If I run my Gaussian or Sobel over some Images it works but in combination with my Corner Detector it does not work. Can anybody help me please. The full Code is below, thx.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray, dst;
int thresh = 200;
int max_thresh = 255;
char* source_window = "Source Image";
char* corners_window = "Corner Image";
/// Function header
void cornerHarris_demo(int, void*);
void cornerHarrisMe(int, int, double);
int xGradient(Mat, int, int);
int yGradient(Mat, int, int);
void SobelMe(Mat&,Mat&,int,int);
int borderCheck(int M, int x);
void SepGaussian(Mat&, Mat&, int, int);
/** #function main */
int main(int argc, char** argv)
{
/// Load source image and convert it to gray
src = imread("data/a-real-big-church.jpg", 1);
//Mat src_gray(src.size(), CV_8UC1);
cvtColor(src, src_gray, CV_BGR2GRAY);
/// Create a window and a trackbar
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
createTrackbar("Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo);
imshow(source_window, src);
cornerHarris_demo(0, 0);
waitKey(0);
return(0);
}
/** #function cornerHarris_demo */
void cornerHarris_demo(int, void*)
{
Mat dst_norm, dst_norm_scaled;
/// Detector parameters
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
/// Detecting corners
cornerHarrisMe(blockSize, apertureSize, k);
/// Normalizing
normalize(dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat());
convertScaleAbs(dst_norm, dst_norm_scaled);
/// Drawing a circle around corners
for (int j = 0; j < dst_norm.rows; j++)
{
for (int i = 0; i < dst_norm.cols; i++)
{
if ((int)dst_norm.at<float>(j, i) > thresh)
{
circle(dst_norm_scaled, Point(i, j), 5, Scalar(255), 2, 8, 0);
}
}
}
/// Showing the result
namedWindow(corners_window, CV_WINDOW_AUTOSIZE);
imshow(corners_window, dst_norm_scaled);
}
void cornerHarrisMe(int blockSize, int apertureSize, double k)
{
Mat x2y2, xy, mtrace, x_der, y_der, x2_der, y2_der, xy_der, x2g_der, y2g_der, xyg_der;
//1: calculate x and y derivative of image via Sobel
SobelMe(src_gray, x_der, 1, 0);
SobelMe(src_gray, y_der, 0, 1);
//2: calculate other three images in M
pow(x_der, blockSize, x2_der);
pow(y_der, blockSize, y2_der);
multiply(x_der, y_der, xy_der);
//3: gaussain
SepGaussian(x2_der, x2g_der, 1, 0);
SepGaussian(y2_der, y2g_der, 0, 1);
SepGaussian(xy_der, xyg_der, 1, 1);
//4. calculating R with k
multiply(x2g_der, y2g_der, x2y2);
multiply(xyg_der, xyg_der, xy);
pow((x2g_der + y2g_der), blockSize, mtrace);
dst = (x2y2 - xy) - k * mtrace;
}
// gradient in the x direction
int xGradient(Mat image, int x, int y)
{
return image.at<uchar>(y - 1, x - 1) +
2 * image.at<uchar>(y, x - 1) +
image.at<uchar>(y + 1, x - 1) -
image.at<uchar>(y - 1, x + 1) -
2 * image.at<uchar>(y, x + 1) -
image.at<uchar>(y + 1, x + 1);
}
// gradient in the y direction
int yGradient(Mat image, int x, int y)
{
return image.at<uchar>(y - 1, x - 1) +
2 * image.at<uchar>(y - 1, x) +
image.at<uchar>(y - 1, x + 1) -
image.at<uchar>(y + 1, x - 1) -
2 * image.at<uchar>(y + 1, x) -
image.at<uchar>(y + 1, x + 1);
}
void SobelMe(Mat& source, Mat& destination, int xOrder, int yOrder){
int gradX, gradY, sum;
destination = source.clone();
if (xOrder == 1 && yOrder == 0){
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradX = xGradient(source, x, y);
sum = abs(gradX);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
else if (xOrder == 0 && yOrder == 1){
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradY = yGradient(source, x, y);
sum = abs(gradY);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
else if (xOrder == 1 && yOrder == 1)
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradX = xGradient(source, x, y);
gradY = yGradient(source, x, y);
sum = abs(gradX) + abs(gradY);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
int borderCheck(int M, int x){
if (x < 0)
return -x - 1;
if (x >= M)
return 2 * M - x - 1;
return x;
}
void SepGaussian(Mat& source, Mat& desination, int sigmaX, int sigmaY){
// coefficients of 1D gaussian kernel with sigma = 1
double coeffs[] = { 0.0545, 0.2442, 0.4026, 0.2442, 0.0545 };
Mat tempX, tempY;
float sum, x1, y1;
desination = source.clone();
tempY = source.clone();
tempX = source.clone();
// along y - direction
if (sigmaX == 0 && sigmaY == 1){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
y1 = borderCheck(source.rows, y - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y1, x);
}
desination.at<uchar>(y, x) = sum;
}
}
}
// along x - direction
else if (sigmaX == 1 && sigmaY == 0){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
x1 = borderCheck(source.cols, x - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y, x1);
}
desination.at<uchar>(y, x) = sum;
}
}
}
// along xy - direction
else if (sigmaX == 1 && sigmaY == 1){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
y1 = borderCheck(source.rows, y - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y1, x);
}
tempY.at<uchar>(y, x) = sum;
}
}
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
x1 = borderCheck(source.cols, x - i);
sum = sum + coeffs[i + 2] * tempY.at<uchar>(y, x1);
}
desination.at<uchar>(y, x) = sum;
}
}
}
}
The Result:
Here is the a picture of the Result.
The Result is now the other way around, it detects areas where are no Corners.
In case there are some questions, feel free to ask me.