Add a cv::Mat inside a cv::Mat at a specific position - c++

I am currently making a soft that renders chess game. To do it, I use OpenCV.
The idea is to have the chess board in a cv::Mat and add pieces with a std::array of cv::Mat :
RenderImage::RenderImage() {
backgroundChess = cv::imread("files/board_chess4.png"); /// The chess board
piecesChess[0] = cv::imread("files/pieces/wP.png"); /// The pieces
piecesChess[1] = cv::imread("files/pieces/wB.png");
piecesChess[2] = cv::imread("files/pieces/wN.png");
piecesChess[3] = cv::imread("files/pieces/wR.png");
piecesChess[4] = cv::imread("files/pieces/wQ.png");
piecesChess[5] = cv::imread("files/pieces/wK.png");
piecesChess[6] = cv::imread("files/pieces/bP.png");
piecesChess[7] = cv::imread("files/pieces/bB.png");
piecesChess[8] = cv::imread("files/pieces/bN.png");
piecesChess[9] = cv::imread("files/pieces/bR.png");
piecesChess[10] = cv::imread("files/pieces/bQ.png");
piecesChess[11] = cv::imread("files/pieces/bK.png");
}
And after, I have made a method to try to add a piece in the chess board.
I have started by use the copyTo:
cv::Mat RenderImage::getImage() {
cv::Mat chess = backgroundChess.clone();
piecesChess[0].copyTo(chess(cv::Rect(0,0, piecesChess[0].cols, piecesChess[0].rows)));
cv::imshow("Display Image", chess);
cv::waitKey(0);
return chess;
}
But I have a black square like around the piece:
So I tryed to make my own methods:
void RenderImage::merge2img(cv::Mat& back, const cv::Mat front, std::size_t posX, std::size_t posY) {
cv::Size bsize { back.size() };
cv::Size fsize { front.size() };
for (std::size_t startX {posX}; startX < posX + fsize.width && startX < bsize.width; startX++) {
for (std::size_t startY {posY}; startY < posY + fsize.height && startY < bsize.height; startY++) {
cv::Vec4b fpixel { front.at<cv::Vec4b>(startY, startX) };
cv::Vec4b bpixel { back.at<cv::Vec4b>(startY, startX) };
for (int i {0}; i < 3; i++) {
back.at<cv::Vec4b>(startY, startX)[i] = (fpixel[i] * fpixel[3] + bpixel[i] * (255 - fpixel[3])) / 255;
}
back.at<cv::Vec4b>(startY, startX)[3] = 255;
}
}
}
And I have changed the methode getImage()
cv::Mat RenderImage::getImage() {
cv::Mat chess = backgroundChess.clone();
//addWeighted( piecesChess[0], 0.5, backgroundChess, 0.5, 0.0, chess);
merge2img(chess, piecesChess[0], 0, 0);
//piecesChess[0].copyTo(chess(cv::Rect(0,0, piecesChess[0].cols, piecesChess[0].rows)));
cv::imshow("Display Image", chess);
//cv::imshow("Display Image", piecesChess[0]);
cv::waitKey(0);
return chess;
}
But the result have this problem :
So can you help me to find a solution to draw piece inside my chess board ?
Thank you

Thanks you #Christoph Rackwitz, with your link I have found the solution.
I convert the python code of Try2Code ( enter link description here )
The final code is here :
void RenderImage::overlayImage(cv::Mat& back, const cv::Mat& front, std::size_t posX, std::size_t posY) {
cv::Mat gray, mask, mask_inv, back_bg, front_bg, result;
cv::Size fsize { front.size() };
cv::cvtColor(front, gray, cv::COLOR_BGR2GRAY);
cv::threshold(gray, mask, 0, 255, cv::THRESH_BINARY);
cv::bitwise_not(mask, mask_inv);
cv::Mat roi { back(cv::Range(posX, posX + fsize.width), cv::Range(posY, fsize.height)) };
cv::bitwise_and(roi, roi, back_bg, mask_inv);
cv::bitwise_and(front, front, front_bg, mask);
cv::add(back_bg, front_bg, result);
cv::addWeighted(roi, 0.1, result, 0.9, 0.0, result);
result.copyTo(back(cv::Rect(posX, posY, fsize.width, fsize.height)));
}
cv::Mat RenderImage::getImage() {
cv::Mat chess = backgroundChess.clone();
overlayImage(chess, piecesChess[0], 128, 0); //The method to merge two images
cv::imshow("Display Image", chess);
cv::waitKey(0);
return chess;
}

Related

How to rotate a point cloud given 3 points?

I have a 3D depth camera placed above three moving belt lanes and I'm trying to rotate the depth image (or the point cloud) so that the three lanes match the camera's angle. I'm not experienced at all with point clouds, but after some research I've tried to do the following:
Acquire an XYZ cartesian image from the sensor which I turn into a point cloud vector.
Define three points on the point cloud, one on each of the three lanes.
Fit a plane through them by finding the plane coefficients.
Finding the cross product between the plane and the z_normal, and then finding the angle of
rotation.
Use the Eigen library to transform the PCL cloud and turn it back into an openCV Mat.
For whatever reason, I always end up with a bad image with max-int values on one side and zeros on the other. I'm not certain anymore if there's something wrong with the code or if the method above is incorrect to start with.
My code so far:
// helper functions
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPcl(cv::Mat xyzMat);
cv::Mat PclToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr);
void colorize(cv::Mat& src, cv::Mat& dst);
void clip(cv::Mat& m, const uint16_t lowerBound, const uint16_t upperBound);
while(1)
{
// camera framegrabber object to capture an image
fg->SWTrigger();
if (!fg->WaitForFrame(im.get(), 2000))
{
throw;
}
// openCV Mat declerations
cv::Mat zDepth, zDepthColor;
cv::Mat xyz = im->XYZImage();
vector<cv::Mat> channels(3);
cv::split(xyz, channels);
zDepth = channels[0];
cv::imwrite("xyzMat.png", xyz);
cv::imwrite("depthImage.png", zDepth);
clip(zDepth, 1250, 1400);
colorise(zDepth, zDepthColor);
cv::imwrite("depthColored.png", zDepthColor);
// specify a 3D point on each lane
cv::Point3i p1, p2, p3;
p1.x = w / 4;
p1.y = 24;
p1.z = zDepth.at<uint16_t>(p1.x, p1.y);
p2.x = w / 2;
p2.y = 70;
p2.z = zDepth.at<uint16_t>(p2.x, p2.y);
p3.x = int(w * 0.75);
p3.y = 114;
p3.z = zDepth.at<uint16_t>(p3.x, p3.y);
auto cross = (p2 - p1).cross(p3 - p1);
// transform Mats to point clouds
pcl::PointCloud<pcl::PointXYZ>::Ptr floor_plane, xyzCentered;
floor_plane = MatToPcl(zDepth);
Eigen::Matrix<float, 1, 3> floor_plane_normal_vector, xy_plane_normal_vector, rotation_vector;
floor_plane_normal_vector[0] = cross.x;
floor_plane_normal_vector[1] = cross.y;
floor_plane_normal_vector[2] = cross.z;
// specify the z normal from the xy-plane
xy_plane_normal_vector[0] = 0.0;
xy_plane_normal_vector[1] = 0.0;
xy_plane_normal_vector[2] = 1.0;
// cross product and normalize vector
rotation_vector = xy_plane_normal_vector.cross(floor_plane_normal_vector);
rotation_vector.normalized();
// angle of rotation
float theta = -atan2(rotation_vector.norm(), xy_plane_normal_vector.dot(floor_plane_normal_vector));
// transform plane according to angle
Eigen::Affine3f transform_2 = Eigen::Affine3f::Identity();
transform_2.translation() << 0, 0, 30;
transform_2.rotate(Eigen::AngleAxisf(theta, rotation_vector));
pcl::transformPointCloud(*floor_plane, *xyzCentered, transform_2);
// Pointcloud to Mat again
cv::Mat xyzRot = PclToMat(xyzCentered);
// clipLow and clipHigh values obtained from trackbars
clip(xyzRot, clipLow, clipHigh);
cv::Mat xyzRotColor;
colorize(xyzRot, xyzRotColor)
cv::imshow("result", xyzRotColor);
cv::waitKey(1);
}
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPcl(cv::Mat xyzMat)
{
/*
* Function: Get from a Mat to pcl pointcloud datatype
* In: cv::Mat
* Out: pcl::PointCloud
*/
//char pr=100, pg=100, pb=100;
pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZ>);;
vector<cv::Mat> channels(3);
cv::split(xyzMat, channels);
for (int i = 0; i < ifmXYZ.rows; i++)
{
for (int j = 0; j < ifmXYZ.cols; j++)
{
pcl::PointXYZ point;
point.x = channels[0].at<short>(i,j);
point.y = channels[1].at<short>(i, j);
point.z = channels[2].at<short>(i, j);
// when color needs to be added:
//uint32_t rgb = (static_cast<uint32_t>(pr) << 16 | static_cast<uint32_t>(pg) << 8 | static_cast<uint32_t>(pb));
//point.rgb = *reinterpret_cast<float*>(&rgb);
point_cloud_ptr->points.push_back(point);
}
}
point_cloud_ptr->width = (int)point_cloud_ptr->points.size();
/*point_cloud_ptr->height = 1;*/
return point_cloud_ptr;
}
// convert PCL to cv::Mat, taking only the depth values at z.
cv::Mat PclToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr)
{
cv::Mat depth_image;
if (!depth_image.empty())
depth_image.release();
depth_image.create(132, 176, CV_32F);
int count = 0;
for (int i = 0; i < 132; i++)
{
for (int j = 0; j < 176; j++)
{
depth_image.at<float>(i, j) = point_cloud_ptr->points.at(count++).z;
}
}
depth_image.convertTo(depth_image, CV_16UC1);
return depth_image;
}
/*
* For display purposes with cv::imshow, will convert a 16bit depth image to 8bit 3 channel colored image
* thanks to fmw42 for the function at https://stackoverflow.com/a/67678634/13184944
*/
void colorize(cv::Mat& src, cv::Mat& dst)
{
// stretch the image by rescaling intensity within the output 8-bit range
double oldMin;
double oldMax;
cv::Point minLoc;
cv::Point maxLoc;
cv::minMaxLoc(src, &oldMin, &oldMax, &minLoc, &maxLoc);
double oldRange = oldMax - oldMin;
double newMin = 0.0;
double newMax = 255.0;
double newRange = newMax - newMin;
//cout << oldMin << ' ' << oldMax << ' ' << oldRange << '\n';
// clip the values of the image to the required range
clip(src, oldMin, oldMax);
//TODO: Look at difference between OpenCV normalization and skimage
normalize(src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
//img = (img - cv::Scalar(oldMin)) / (cv::Scalar(oldRange));
//img = (img * cv::Scalar(newRange)) + cv::Scalar(newMin);
cv::Mat channels[3] = { dst, dst, dst };
cv::merge(channels, 3, dst);
cv::Mat C(1, 6, CV_8UC(3));
cv::Vec3b color1 = { 0, 0, 255 };
cv::Vec3b color2 = { 0, 165, 255 };
cv::Vec3b color3 = { 0, 255, 255 };
cv::Vec3b color4 = { 255, 255, 0 };
cv::Vec3b color5 = { 255, 0, 0 };
cv::Vec3b color6 = { 128, 64, 64 };
C.at<cv::Vec3b>(0, 0) = color1;
C.at<cv::Vec3b>(0, 1) = color2;
C.at<cv::Vec3b>(0, 2) = color3;
C.at<cv::Vec3b>(0, 3) = color4;
C.at<cv::Vec3b>(0, 4) = color5;
C.at<cv::Vec3b>(0, 5) = color6;
cv::Mat lut;
cv::resize(C, lut, cv::Size(256, 1), 0.0, 0.0, cv::INTER_LINEAR);
//cout << lut.size << '\n';
cv::LUT(dst, lut, dst);
return;
}
void clip(cv::Mat& m, const uint16_t lowerBound, const uint16_t upperBound)
{
m.setTo(lowerBound, m < lowerBound);
m.setTo(upperBound, m > upperBound);
return;
}
Apologies if this is really basic or something is obviously wrong but I feel stuck here. I also tried segmentation with ransac but the it never aligns the plane in the way I wanted.
Thanks!
Edit: Updated the code to include additional steps and functions. Only the camera initialization is skipped.
The clip and colorize functions aid in displaying the 16bit depth image. My end goal here is to be able to use trackbars with clip(zImg, low, high) where the three lanes will always be vertically aligns (as in, change color at the same rate) as I change the clip values.
download link with image files: link
Colorized depth image:

How do you search for images that have a non white background using c++?

I wrote a program that uses the openCV and boost::filesystem libraries, and the program crops images to fit the object in the image. (Photoshop has already been used to replace most of the backgrounds with white). However, I have thousands and thousands of pictures that I need to sort through. I already know how to use the filesystem library and have no issue traversing the system's directories. However, how do I detect images that have a non-white background (missed in the photoshop process)? This incorrect crop is formatted to have a margin and have a 1:1 aspect ratio, but it still has the odd grayish background. The image should end up looking like this correct crop. So, how do I determine if the image has a background like the incorrect crop?
could you try the code below
( to test the code you should create a directory c:/cropping and some subdirs on it. and put some images in the dirs you created.)
hope it will be helpful
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
vector<Rect> divideHW(Mat src, int dim, double threshold1, double threshold2)
{
Mat gray, reduced, canny;
if (src.channels() == 1)
{
gray = src;
}
if (src.channels() == 3)
{
Laplacian(src, gray, CV_8UC1);
cvtColor(gray, gray, COLOR_BGR2GRAY);
imshow("sobel", gray);
}
reduce(gray, reduced, dim, REDUCE_AVG);
Canny(reduced, canny, threshold1, threshold2);
vector<Point> pts;
findNonZero(canny, pts);
vector<Rect> rects;
Rect rect(0, 0, gray.cols, gray.rows);
if (!pts.size())
{
rects.push_back(rect);
}
int ref_x = 0;
int ref_y = 0;
for (size_t i = 0; i< pts.size(); i++)
{
if (dim)
{
rect.height = pts[i].y - ref_y;
rects.push_back(rect);
rect.y = pts[i].y;
ref_y = rect.y;
if (i == pts.size() - 1)
{
rect.height = gray.rows - pts[i].y;
rects.push_back(rect);
}
}
else
{
rect.width = pts[i].x - ref_x;
rects.push_back(rect);
rect.x = pts[i].x;
ref_x = rect.x;
if (i == pts.size() - 1)
{
rect.width = gray.cols - pts[i].x;
rects.push_back(rect);
}
}
}
return rects;
}
int main( int argc, char** argv )
{
int wait_time = 0; // set this value > 0 for not waiting
vector<String> filenames;
String folder = "c:/cropping/*.*"; // you can change this value or set it by argv[1]
glob(folder, filenames, true);
for (size_t i = 0; i < filenames.size(); ++i)
{
Mat src = imread(filenames[i]);
if (src.data)
{
vector<Rect> rects = divideHW(src, 0, 0, 0);
if (rects.size() < 3) continue;
Rect border;
border.x = rects[0].width;
border.width = src.cols - rects[rects.size() - 1].width - border.x;
rects = divideHW(src, 1, 0, 20);
if (rects.size() < 3) continue;
border.y = rects[0].height;
border.height = src.rows - rects[rects.size() - 1].height - border.y;
Mat cropped = src(border).clone();
src(border).setTo(Scalar(255, 255, 255));
Scalar _mean = mean(src);
int mean_total = _mean[0] + _mean[1] + _mean[2];
if (mean_total > 763)
{
imwrite(filenames[i] + ".jpg", cropped);
imshow("cropped", cropped);
waitKey(wait_time);
}
}
}
return 0;
}
I that you can compute the gradient of an ROI of your image (all rows in column 10 to 15 for exemple).
Then you compute the energy of your gradient (sum of all pixels of the gradient image).
If the energy is very low, you have an uniform background (you can't know the background color with this algorithm). Else you have a textured backgroud.
This is a first approach. You can found in OpenCV all the functions required to do that.
A second approach :
If you are sure that your background is white, you can get the ROI of the first approach, then iterate over all pixels, and check for its color. If there are more than "n" pixels with a different color than "255,255,255", you can mark your image as "non white Background".

Non correct image filtering

I have to create a function that filter an grey scale image in the frequency domain.
The main problem is that the result look like salt and pepper noise.
the main function is:
void FiltroFrequenze::dftMia(){
int rowsExtendedImage = image.rows + kernel.rows - 1;
int colsExtendedImage = image.cols + kernel.cols - 1;
Mat extendedImage(rowsExtendedImage, colsExtendedImage, image.type());
Mat extendedKernel(rowsExtendedImage, colsExtendedImage, image.type());
filtered=extendedImage.clone();
espansione(image,kernel,extendedImage);
Mat trasformata (rowsExtendedImage, colsExtendedImage, image.type());
translation(extendedImage,trasformata);
Mat planes[] ={Mat_<float>(trasformata),Mat::zeros(trasformata.size(),CV_32F)};
Mat complexI;
merge(planes,2,complexI);
dft(complexI,complexI);
cout<<"dft fatto"<<endl;
split(complexI,planes);
Mat magI=planes[0];
Mat moltiplicata(rowsExtendedImage, colsExtendedImage, image.type());
switch (type)
{
case 1:
kernelPassaBasso(raggio);
break;
case 2:
kernelPassaAlto(raggio);
break;
}
showImage(kernel,"kernel");
waitKey(0);
multiply(magI,kernel,moltiplicata);
//multiply(complexI,kernel,moltiplicata);
cout<<"moltiplicata"<<endl;
Mat inversa (rowsExtendedImage, colsExtendedImage, image.type());
dft(moltiplicata,inversa,DFT_INVERSE+DFT_SCALE);
split(inversa,planes);
//magnitude(planes[0],planes[1],planes[0]);
inversa=planes[0];
//inversa.convertTo(inversa,image.type());
cout<<"invertita"<<endl;
showImage(inversa,"inversa");
waitKey(0);
translation(inversa,filtered);
//filtered.convertTo(filtered,image.type());
to translate the image in the frequency I use, in the spatial domain:
void FiltroFrequenze::translation(Mat image, Mat traslatedImage){
for(int x=0;x<image.rows;x++)
{
for(int y=0;y<image.cols;y++)
{
if(image.channels()==1)
{
traslatedImage.at<float>(x,y)=image.at<float>(x,y)*pow(-1,x+y);
}
else
{
for(int k=0;k<image.channels();k++)
{
traslatedImage.at<Vec3b>(x,y)[k]=image.at<Vec3b>(x,y)[k]*pow(-1,x+y);
}
}
}
}
and to create the high pass and the low pass kernels:
void FiltroFrequenze::kernelPassaBasso(int raggio){
Mat kernelNew(kernel.rows+image.rows-1, kernel.cols+image.cols-1, CV_32FC1);
kernel=kernelNew;
drawCircle(kernel, raggio, 1.0, 0.0);
void FiltroFrequenze::kernelPassaAlto(int raggio){
showImage(image,"immagine");
Mat kernelNew(kernel.rows+image.rows-1, kernel.cols+image.cols-1, CV_32FC1);
kernel=kernelNew.clone();
drawCircle(kernel, raggio, 0.0, 1.0);
void FiltroFrequenze::drawCircle(Mat image, int raggio, float colorCircle, float colorBackground){
int center[]={floor(kernel.rows/2)+1,floor(kernel.cols/2)+1};
for(int y = 0; y < image.rows; y++)
for(int x = 0; x < image.cols; x++)
{
if(pow(x-center[1],2)+pow(y-center[0],2)<pow(raggio,2))
{
image.at<float>(y,x)=colorCircle;
}
else
{
image.at<float>(y,x)=colorBackground;
}
}
thanks for the help
My suspicion is, that what you see is caused by the fact, that the image being the result of filtering gets cast to float. OpenCV displays float images correctly only when the floats are in the <0, 1> range, so you'll have to get your results back to 8-bit or scale properly.

Calculating skew of text OpenCV

I am trying to calculate the skew of text in an image so I can correct it for the best OCR results.
Currently this is the function I am using:
double compute_skew(Mat &img)
{
// Binarize
cv::threshold(img, img, 225, 255, cv::THRESH_BINARY);
// Invert colors
cv::bitwise_not(img, img);
cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5, 3));
cv::erode(img, img, element);
std::vector<cv::Point> points;
cv::Mat_<uchar>::iterator it = img.begin<uchar>();
cv::Mat_<uchar>::iterator end = img.end<uchar>();
for (; it != end; ++it)
if (*it)
points.push_back(it.pos());
cv::RotatedRect box = cv::minAreaRect(cv::Mat(points));
double angle = box.angle;
if (angle < -45.)
angle += 90.;
cv::Point2f vertices[4];
box.points(vertices);
for(int i = 0; i < 4; ++i)
cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(255, 0, 0), 1, CV_AA);
return angle;
}
When I look at then angle in debug I get 0.000000
However when I give it this image I get proper results of a skew of about 16 degrees:
How can I properly detect the skew in the first image?
there are a few other ways to get the skew degree, 1) by hough transform 2) by horizontal projection profile. rotate the image in different angle bins and calculate horizontal projection. the angle with the greatest horizontal histogram value is the deskewed angle.
i have provided below implementation of 1). i believe this to be superior to the boxing method you are using because it requires that you completely clean the image of any noise,which just isnt possible in most of the time.
you should know that the method doesnt work well if there's too much noise. you can reduce noise in different ways depending on what type of "line" you want to treat as the most dominant in the image. i have provided two methods for this. be sure to play with parameters and threshold etc.
results (all run using preprocess2, all run using same parameter set)
code
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
void hough_transform(Mat& im,Mat& orig,double* skew)
{
double max_r=sqrt(pow(.5*im.cols,2)+pow(.5*im.rows,2));
int angleBins = 180;
Mat acc = Mat::zeros(Size(2*max_r,angleBins),CV_32SC1);
int cenx = im.cols/2;
int ceny = im.rows/2;
for(int x=1;x<im.cols-1;x++)
{
for(int y=1;y<im.rows-1;y++)
{
if(im.at<uchar>(y,x)==255)
{
for(int t=0;t<angleBins;t++)
{
double r =(x-cenx)*cos((double)t/angleBins*CV_PI)+(y-ceny)*sin((double)t /angleBins*CV_PI);
r+=max_r;
acc.at<int>(t,int(r))++;
}
}
}
}
Mat thresh;
normalize(acc,acc,255,0,NORM_MINMAX);
convertScaleAbs(acc,acc);
/*debug
Mat cmap;
applyColorMap(acc,cmap,COLORMAP_JET);
imshow("cmap",cmap);
imshow("acc",acc);*/
Point maxLoc;
minMaxLoc(acc,0,0,0,&maxLoc);
double theta = (double)maxLoc.y/angleBins*CV_PI;
double rho = maxLoc.x-max_r;
if(abs(sin(theta))<0.000001)//check vertical
{
//when vertical, line equation becomes
//x = rho
double m = -cos(theta)/sin(theta);
Point2d p1 = Point2d(rho+im.cols/2,0);
Point2d p2 = Point2d(rho+im.cols/2,im.rows);
line(orig,p1,p2,Scalar(0,0,255),1);
*skew=90;
cout<<"skew angle "<<" 90"<<endl;
}else
{
//convert normal form back to slope intercept form
//y = mx + b
double m = -cos(theta)/sin(theta);
double b = rho/sin(theta)+im.rows/2.-m*im.cols/2.;
Point2d p1 = Point2d(0,b);
Point2d p2 = Point2d(im.cols,im.cols*m+b);
line(orig,p1,p2,Scalar(0,0,255),1);
double skewangle;
skewangle= p1.x-p2.x>0? (atan2(p1.y-p2.y,p1.x-p2.x)*180./CV_PI):(atan2(p2.y-p1.y,p2. x-p1.x)*180./CV_PI);
*skew=skewangle;
cout<<"skew angle "<<skewangle<<endl;
}
imshow("orig",orig);
}
Mat preprocess1(Mat& im)
{
Mat ret = Mat::zeros(im.size(),CV_32SC1);
for(int x=1;x<im.cols-1;x++)
{
for(int y=1;y<im.rows-1;y++)
{
int gy = (im.at<uchar>(y-1,x+1)-im.at<uchar>(y-1,x-1))
+2*(im.at<uchar>(y,x+1)-im.at<uchar>(y,x-1))
+(im.at<uchar>(y+1,x+1)-im.at<uchar>(y+1,x-1));
int gx = (im.at<uchar>(y+1,x-1) -im.at<uchar>(y-1,x-1))
+2*(im.at<uchar>(y+1,x)-im.at<uchar>(y-1,x))
+(im.at<uchar>(y+1,x+1)-im.at<uchar>(y-1,x+1));
int g2 = (gy*gy + gx*gx);
ret.at<int>(y,x)=g2;
}
}
normalize(ret,ret,255,0,NORM_MINMAX);
ret.convertTo(ret,CV_8UC1);
threshold(ret,ret,50,255,THRESH_BINARY);
return ret;
}
Mat preprocess2(Mat& im)
{
// 1) assume white on black and does local thresholding
// 2) only allow voting top is white and buttom is black(buttom text line)
Mat thresh;
//thresh=255-im;
thresh=im.clone();
adaptiveThreshold(thresh,thresh,255,CV_ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY,15,-2);
Mat ret = Mat::zeros(im.size(),CV_8UC1);
for(int x=1;x<thresh.cols-1;x++)
{
for(int y=1;y<thresh.rows-1;y++)
{
bool toprowblack = thresh.at<uchar>(y-1,x)==0 || thresh.at<uchar>(y-1,x-1)==0 || thresh.at<uchar>(y-1,x+1)==0;
bool belowrowblack = thresh.at<uchar>(y+1,x)==0 || thresh.at<uchar>(y+1, x-1)==0 || thresh.at<uchar>(y+1,x+1)==0;
uchar pix=thresh.at<uchar>(y,x);
if((!toprowblack && pix==255 && belowrowblack))
{
ret.at<uchar>(y,x) = 255;
}
}
}
return ret;
}
Mat rot(Mat& im,double thetaRad)
{
cv::Mat rotated;
double rskew = thetaRad* CV_PI/180;
double nw = abs(sin(thetaRad))*im.rows+abs(cos(thetaRad))*im.cols;
double nh = abs(cos(thetaRad))*im.rows+abs(sin(thetaRad))*im.cols;
cv::Mat rot_mat = cv::getRotationMatrix2D(Point2d(nw*.5,nh*.5), thetaRad*180/CV_PI, 1);
Mat pos = Mat::zeros(Size(1,3),CV_64FC1);
pos.at<double>(0)=(nw-im.cols)*.5;
pos.at<double>(1)=(nh-im.rows)*.5;
Mat res = rot_mat*pos;
rot_mat.at<double>(0,2) += res.at<double>(0);
rot_mat.at<double>(1,2) += res.at<double>(1);
cv::warpAffine(im, rotated, rot_mat,Size(nw,nh), cv::INTER_LANCZOS4);
return rotated;
}
int main(int argc, char** argv)
{
string src="C:/data/skew.png";
Mat im= imread(src);
Mat gray;
cvtColor(im,gray,CV_BGR2GRAY);
Mat preprocessed = preprocess2(gray);
imshow("preprocessed2",preprocessed);
double skew;
hough_transform(preprocessed,im,&skew);
Mat rotated = rot(im,skew* CV_PI/180);
imshow("corrected",rotated);
waitKey(0);
return 0;
}
the approach you posted has its own "ideal binarization" assumption. the threshold value directly affects the process. utilize otsu threshold, or think about DFT for a generic solution.
otsu trial:
int main()
{
Mat input = imread("your text");
cvtColor(input, input, CV_BGR2GRAY);
Mat img;
cv::threshold(input, img, 100, 255, cv::THRESH_OTSU);
cv::bitwise_not(img, img);
imshow("img ", img);
waitKey(0);
vector<Point> points;
findNonZero(img, points);
cv::RotatedRect box = cv::minAreaRect(points);
double angle = box.angle;
if (angle < -45.)
angle += 90.;
cv::Point2f vertices[4];
box.points(vertices);
for(int i = 0; i < 4; ++i)
cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(255, 0, 0));
imshow("img ", img);
waitKey(0);
return 0;
}

Need only one edge in Canny edge algorithm

When i use the canny edge algorithm, it produces the 2 edges opposite the thick colored line as expected, but i want only one edge to be displayed so as to make my line and curve detection algorithm much less complicated, any ideas on how i can make that happen ?
Here is the code :
bool CannyEdgeDetection(DataStructure& col)
{
Mat src, src_gray;
Mat dst, detected_edges, fin;
int WhiteCount = 0, BCount = 0;
char szFil1[32] = "ocv.bmp";
char szFil2[32] = "dst.bmp";
src = imread(szFil1);
dst = imread(szFil1);
blur( src_gray, detected_edges, Size(3,3) );
Canny( src, dst, 100, 200, 3 );
imwrite(szFil2, dst );
IplImage* img = cvLoadImage(szFil2);
int height = img->height;
int width = img->width;
int step = img->widthStep;
int channels = img->nChannels;
uchar * datau = (uchar *)img->imageData;
for(int i=0;i<height;i++){
for(int j=0;j<width;j++){
for(int k=0;k<channels;k++){
datau[i*step+j*channels+k] = 255 - datau[i*step+j*channels+k];
if (datau[i*step+j*channels+k]==0){
WhiteCount++;
col.pixel_col [i][j] = 2;
}
else{BCount++;
col.pixel_col[i][j] = 0;
}
}
}
}
cvSaveImage("img.bmp" ,img);
return 0;
}
This is not the original image but similar :
Which part do i comment out to be able to read black images in white backgrounds ? or any colored image ?
bool done;
do
{
cv::morphologyEx(img, temp, cv::MORPH_OPEN, element);
cv::bitwise_not(temp, temp);
cv::bitwise_and(img, temp, temp);
cv::bitwise_or(skel, temp, skel);
cv::erode(img, img, element);
double max;
cv::minMaxLoc(img, 0, &max);
done = (max == 0);
} while (!done);
That process is called skeletonization or thinning. You can google for that.
Here is a simple method for skeletonization : skeletonization OpenCV In C#
Below is the output I got when applied above method to your image ( Image is inverted before skeletonization because above method work for white images in black background, just opposite case of your input image).