How to rotate a point cloud given 3 points? - c++

I have a 3D depth camera placed above three moving belt lanes and I'm trying to rotate the depth image (or the point cloud) so that the three lanes match the camera's angle. I'm not experienced at all with point clouds, but after some research I've tried to do the following:
Acquire an XYZ cartesian image from the sensor which I turn into a point cloud vector.
Define three points on the point cloud, one on each of the three lanes.
Fit a plane through them by finding the plane coefficients.
Finding the cross product between the plane and the z_normal, and then finding the angle of
rotation.
Use the Eigen library to transform the PCL cloud and turn it back into an openCV Mat.
For whatever reason, I always end up with a bad image with max-int values on one side and zeros on the other. I'm not certain anymore if there's something wrong with the code or if the method above is incorrect to start with.
My code so far:
// helper functions
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPcl(cv::Mat xyzMat);
cv::Mat PclToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr);
void colorize(cv::Mat& src, cv::Mat& dst);
void clip(cv::Mat& m, const uint16_t lowerBound, const uint16_t upperBound);
while(1)
{
// camera framegrabber object to capture an image
fg->SWTrigger();
if (!fg->WaitForFrame(im.get(), 2000))
{
throw;
}
// openCV Mat declerations
cv::Mat zDepth, zDepthColor;
cv::Mat xyz = im->XYZImage();
vector<cv::Mat> channels(3);
cv::split(xyz, channels);
zDepth = channels[0];
cv::imwrite("xyzMat.png", xyz);
cv::imwrite("depthImage.png", zDepth);
clip(zDepth, 1250, 1400);
colorise(zDepth, zDepthColor);
cv::imwrite("depthColored.png", zDepthColor);
// specify a 3D point on each lane
cv::Point3i p1, p2, p3;
p1.x = w / 4;
p1.y = 24;
p1.z = zDepth.at<uint16_t>(p1.x, p1.y);
p2.x = w / 2;
p2.y = 70;
p2.z = zDepth.at<uint16_t>(p2.x, p2.y);
p3.x = int(w * 0.75);
p3.y = 114;
p3.z = zDepth.at<uint16_t>(p3.x, p3.y);
auto cross = (p2 - p1).cross(p3 - p1);
// transform Mats to point clouds
pcl::PointCloud<pcl::PointXYZ>::Ptr floor_plane, xyzCentered;
floor_plane = MatToPcl(zDepth);
Eigen::Matrix<float, 1, 3> floor_plane_normal_vector, xy_plane_normal_vector, rotation_vector;
floor_plane_normal_vector[0] = cross.x;
floor_plane_normal_vector[1] = cross.y;
floor_plane_normal_vector[2] = cross.z;
// specify the z normal from the xy-plane
xy_plane_normal_vector[0] = 0.0;
xy_plane_normal_vector[1] = 0.0;
xy_plane_normal_vector[2] = 1.0;
// cross product and normalize vector
rotation_vector = xy_plane_normal_vector.cross(floor_plane_normal_vector);
rotation_vector.normalized();
// angle of rotation
float theta = -atan2(rotation_vector.norm(), xy_plane_normal_vector.dot(floor_plane_normal_vector));
// transform plane according to angle
Eigen::Affine3f transform_2 = Eigen::Affine3f::Identity();
transform_2.translation() << 0, 0, 30;
transform_2.rotate(Eigen::AngleAxisf(theta, rotation_vector));
pcl::transformPointCloud(*floor_plane, *xyzCentered, transform_2);
// Pointcloud to Mat again
cv::Mat xyzRot = PclToMat(xyzCentered);
// clipLow and clipHigh values obtained from trackbars
clip(xyzRot, clipLow, clipHigh);
cv::Mat xyzRotColor;
colorize(xyzRot, xyzRotColor)
cv::imshow("result", xyzRotColor);
cv::waitKey(1);
}
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPcl(cv::Mat xyzMat)
{
/*
* Function: Get from a Mat to pcl pointcloud datatype
* In: cv::Mat
* Out: pcl::PointCloud
*/
//char pr=100, pg=100, pb=100;
pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZ>);;
vector<cv::Mat> channels(3);
cv::split(xyzMat, channels);
for (int i = 0; i < ifmXYZ.rows; i++)
{
for (int j = 0; j < ifmXYZ.cols; j++)
{
pcl::PointXYZ point;
point.x = channels[0].at<short>(i,j);
point.y = channels[1].at<short>(i, j);
point.z = channels[2].at<short>(i, j);
// when color needs to be added:
//uint32_t rgb = (static_cast<uint32_t>(pr) << 16 | static_cast<uint32_t>(pg) << 8 | static_cast<uint32_t>(pb));
//point.rgb = *reinterpret_cast<float*>(&rgb);
point_cloud_ptr->points.push_back(point);
}
}
point_cloud_ptr->width = (int)point_cloud_ptr->points.size();
/*point_cloud_ptr->height = 1;*/
return point_cloud_ptr;
}
// convert PCL to cv::Mat, taking only the depth values at z.
cv::Mat PclToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr)
{
cv::Mat depth_image;
if (!depth_image.empty())
depth_image.release();
depth_image.create(132, 176, CV_32F);
int count = 0;
for (int i = 0; i < 132; i++)
{
for (int j = 0; j < 176; j++)
{
depth_image.at<float>(i, j) = point_cloud_ptr->points.at(count++).z;
}
}
depth_image.convertTo(depth_image, CV_16UC1);
return depth_image;
}
/*
* For display purposes with cv::imshow, will convert a 16bit depth image to 8bit 3 channel colored image
* thanks to fmw42 for the function at https://stackoverflow.com/a/67678634/13184944
*/
void colorize(cv::Mat& src, cv::Mat& dst)
{
// stretch the image by rescaling intensity within the output 8-bit range
double oldMin;
double oldMax;
cv::Point minLoc;
cv::Point maxLoc;
cv::minMaxLoc(src, &oldMin, &oldMax, &minLoc, &maxLoc);
double oldRange = oldMax - oldMin;
double newMin = 0.0;
double newMax = 255.0;
double newRange = newMax - newMin;
//cout << oldMin << ' ' << oldMax << ' ' << oldRange << '\n';
// clip the values of the image to the required range
clip(src, oldMin, oldMax);
//TODO: Look at difference between OpenCV normalization and skimage
normalize(src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
//img = (img - cv::Scalar(oldMin)) / (cv::Scalar(oldRange));
//img = (img * cv::Scalar(newRange)) + cv::Scalar(newMin);
cv::Mat channels[3] = { dst, dst, dst };
cv::merge(channels, 3, dst);
cv::Mat C(1, 6, CV_8UC(3));
cv::Vec3b color1 = { 0, 0, 255 };
cv::Vec3b color2 = { 0, 165, 255 };
cv::Vec3b color3 = { 0, 255, 255 };
cv::Vec3b color4 = { 255, 255, 0 };
cv::Vec3b color5 = { 255, 0, 0 };
cv::Vec3b color6 = { 128, 64, 64 };
C.at<cv::Vec3b>(0, 0) = color1;
C.at<cv::Vec3b>(0, 1) = color2;
C.at<cv::Vec3b>(0, 2) = color3;
C.at<cv::Vec3b>(0, 3) = color4;
C.at<cv::Vec3b>(0, 4) = color5;
C.at<cv::Vec3b>(0, 5) = color6;
cv::Mat lut;
cv::resize(C, lut, cv::Size(256, 1), 0.0, 0.0, cv::INTER_LINEAR);
//cout << lut.size << '\n';
cv::LUT(dst, lut, dst);
return;
}
void clip(cv::Mat& m, const uint16_t lowerBound, const uint16_t upperBound)
{
m.setTo(lowerBound, m < lowerBound);
m.setTo(upperBound, m > upperBound);
return;
}
Apologies if this is really basic or something is obviously wrong but I feel stuck here. I also tried segmentation with ransac but the it never aligns the plane in the way I wanted.
Thanks!
Edit: Updated the code to include additional steps and functions. Only the camera initialization is skipped.
The clip and colorize functions aid in displaying the 16bit depth image. My end goal here is to be able to use trackbars with clip(zImg, low, high) where the three lanes will always be vertically aligns (as in, change color at the same rate) as I change the clip values.
download link with image files: link
Colorized depth image:

Related

Image shearing OpenCV CPP

I'm trying to use cv::warpAffine to perform image transformations, but I have an issue.
Note: I already saw questions/46998895 and it works well too, but with no interpolation and I wanted to use affine matrixes.
So I want to shear the image and the result goes in a bigger image to have the entire sheared image like this:
Right now, my idea was to perform the shear on inverted image if the values are negatives rather than calculate the new image size with negative values etc as seen on paperspace blog.
When we use a negative shear, the direction of shear is right to left, while x2 is not further in the negative direction than x1. One way to solve this could be to get the other set of corners (that would satisfy the constraint, can you prove it?). Apply the shear transformation and then change to the other set of corners because of the notation we follow.
Well, we could do that, but there's a better method. Here's how how perform a negative shear with shearing factor -alpha.
Flip the image and boxes horizontally.
Apply the positive shear transformation with shearing factor alpha
Flip the image and boxes horizontally again.
The left ones are my results, and the right ones are the expected results :
So as you can see it works well when x and y are both positive values or when one is 0 and the other is negative, its ok. When one is positive and the other is negative, or when they are both negative, it provides always the same result for my shear function, see the 4 last lines in the pic above. Again, the left forms are my results, the right ones are the mickaShear() results (and obviously the correct ones).
My question is: what do I have to do in my shear_matrix to perform the correct transformation with negative values? Is the cv::flip() method described by paperSpace the right way to achieve this transformation?
Here you can find the reproductible code with the comparison between my function and the function in this answer. You will need OpenCV. I work on OpenCV 4.5.1
#include <opencv2/core/mat.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/xfeatures2d.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/core.hpp>
#include <iostream>
cv::Mat myShear(const cv::Mat & src, float sx, float sy) {
cv::Mat tmp;
cv::Mat dst;
std::vector<cv::Point2f> extremePoints;
extremePoints.emplace_back(cv::Point2f(0, 0));
extremePoints.emplace_back(cv::Point2f((float)src.cols, 0));
extremePoints.emplace_back(cv::Point2f((float)src.cols, (float)src.rows));
extremePoints.emplace_back(cv::Point2f(0, (float)src.rows));
for(auto & pt : extremePoints){
pt = cv::Point2f(pt.x + pt.y * abs(sx), pt.y + pt.x * abs(sy));
}
cv::Rect offsets = cv::boundingRect(extremePoints);
cv::Size new_size = offsets.size();
float mat_values[] = {1.0f, abs(sx), 0.0f, abs(sy), 1.0f, 0.0f};
cv::Mat shear_matrix = cv::Mat(2, 3, CV_32F, mat_values);
dst = cv::Mat::zeros(new_size, src.type());
/*
*cv::flip(img, tmp, INT_FLIP_CODE) where INT_FLIP_CODE can be:
* 0 (Vertically)
* 1 (Horizontally)
* -1 (Both)
*/
if(sx < 0.0f and sy < 0.0f) {
cv::flip(img, tmp, -1);
cv::warpAffine(tmp, dst, shear_matrix, new_size, cv::INTER_LINEAR);
cv::flip(dst, dst, -1);
} else if(sx < 0.0f) {
cv::flip(img, tmp, 1);
cv::warpAffine(tmp, dst, shear_matrix, new_size, cv::INTER_LINEAR);
cv::flip(dst, dst, 1);
} else if(sy < 0.0f) {
cv::flip(img, tmp, 0);
cv::warpAffine(tmp, dst, shear_matrix, new_size, cv::INTER_LINEAR);
cv::flip(dst, dst, 0);
} else {
tmp = src.clone();
cv::warpAffine(tmp, dst, shear_matrix, new_size, cv::INTER_LINEAR);
}
return dst;
}
cv::Mat mickaShear(const cv::Mat & input, float Bx, float By)
{
if (Bx*By == 1)
{
std::cerr << "error == 1" << std::endl;
}
if (input.type() != CV_8UC3) return {};
std::vector<cv::Point2f> extremePoints;
extremePoints.emplace_back(0, 0);
extremePoints.emplace_back(input.cols, 0);
extremePoints.emplace_back(input.cols, input.rows);
extremePoints.emplace_back(0, input.rows);
for (auto & pt : extremePoints)
{
pt = cv::Point2f(pt.x + pt.y*Bx, pt.y + pt.x*By);
}
cv::Rect offsets = cv::boundingRect(extremePoints);
cv::Point2f offset = -offsets.tl();
cv::Size resultSize = offsets.size();
cv::Mat shearedImage = cv::Mat::zeros(resultSize, input.type());
for (int j = 0; j < shearedImage.rows; ++j){
for (int i = 0; i < shearedImage.cols; ++i){
cv::Point2f pp((float)i, (float)j);
pp = pp - offset; // go back to original coordinate system
cv::Point2f p;
p.x = int((-pp.y*Bx + pp.x) / (1 - By*Bx));
p.y = int(pp.y - p.x*By);
if ((p.x >= 0 && p.x < (float)input.cols) && (p.y >= 0 && p.y < (float)input.rows)){
shearedImage.at<cv::Vec3b>(j, i) = input.at<cv::Vec3b>(p);
}
}
}
return shearedImage;
}
int main(int argc, char *argv[]){
float x = -0.2f; //CHANGE SIGN TO TEST
float y = 0.5f; //CHANGE SIGN TO TEST
cv::Mat im = cv::imread("MODIFY BY JPG FILE PATH");
cv::Mat result = im.clone();
cv::Mat output = cv::Mat();
for(auto & aug: augments){
output = mickaShear(im, x, y);
result = myShear(im);
}
cv::imshow("result", result);
cv::imshow("output", output);
auto k = cv::waitKey(0);
if(k == (int)'q'){
cv::destroyAllWindows();
break;
}
cv::destroyAllWindows();
return 0;
}

Add a cv::Mat inside a cv::Mat at a specific position

I am currently making a soft that renders chess game. To do it, I use OpenCV.
The idea is to have the chess board in a cv::Mat and add pieces with a std::array of cv::Mat :
RenderImage::RenderImage() {
backgroundChess = cv::imread("files/board_chess4.png"); /// The chess board
piecesChess[0] = cv::imread("files/pieces/wP.png"); /// The pieces
piecesChess[1] = cv::imread("files/pieces/wB.png");
piecesChess[2] = cv::imread("files/pieces/wN.png");
piecesChess[3] = cv::imread("files/pieces/wR.png");
piecesChess[4] = cv::imread("files/pieces/wQ.png");
piecesChess[5] = cv::imread("files/pieces/wK.png");
piecesChess[6] = cv::imread("files/pieces/bP.png");
piecesChess[7] = cv::imread("files/pieces/bB.png");
piecesChess[8] = cv::imread("files/pieces/bN.png");
piecesChess[9] = cv::imread("files/pieces/bR.png");
piecesChess[10] = cv::imread("files/pieces/bQ.png");
piecesChess[11] = cv::imread("files/pieces/bK.png");
}
And after, I have made a method to try to add a piece in the chess board.
I have started by use the copyTo:
cv::Mat RenderImage::getImage() {
cv::Mat chess = backgroundChess.clone();
piecesChess[0].copyTo(chess(cv::Rect(0,0, piecesChess[0].cols, piecesChess[0].rows)));
cv::imshow("Display Image", chess);
cv::waitKey(0);
return chess;
}
But I have a black square like around the piece:
So I tryed to make my own methods:
void RenderImage::merge2img(cv::Mat& back, const cv::Mat front, std::size_t posX, std::size_t posY) {
cv::Size bsize { back.size() };
cv::Size fsize { front.size() };
for (std::size_t startX {posX}; startX < posX + fsize.width && startX < bsize.width; startX++) {
for (std::size_t startY {posY}; startY < posY + fsize.height && startY < bsize.height; startY++) {
cv::Vec4b fpixel { front.at<cv::Vec4b>(startY, startX) };
cv::Vec4b bpixel { back.at<cv::Vec4b>(startY, startX) };
for (int i {0}; i < 3; i++) {
back.at<cv::Vec4b>(startY, startX)[i] = (fpixel[i] * fpixel[3] + bpixel[i] * (255 - fpixel[3])) / 255;
}
back.at<cv::Vec4b>(startY, startX)[3] = 255;
}
}
}
And I have changed the methode getImage()
cv::Mat RenderImage::getImage() {
cv::Mat chess = backgroundChess.clone();
//addWeighted( piecesChess[0], 0.5, backgroundChess, 0.5, 0.0, chess);
merge2img(chess, piecesChess[0], 0, 0);
//piecesChess[0].copyTo(chess(cv::Rect(0,0, piecesChess[0].cols, piecesChess[0].rows)));
cv::imshow("Display Image", chess);
//cv::imshow("Display Image", piecesChess[0]);
cv::waitKey(0);
return chess;
}
But the result have this problem :
So can you help me to find a solution to draw piece inside my chess board ?
Thank you
Thanks you #Christoph Rackwitz, with your link I have found the solution.
I convert the python code of Try2Code ( enter link description here )
The final code is here :
void RenderImage::overlayImage(cv::Mat& back, const cv::Mat& front, std::size_t posX, std::size_t posY) {
cv::Mat gray, mask, mask_inv, back_bg, front_bg, result;
cv::Size fsize { front.size() };
cv::cvtColor(front, gray, cv::COLOR_BGR2GRAY);
cv::threshold(gray, mask, 0, 255, cv::THRESH_BINARY);
cv::bitwise_not(mask, mask_inv);
cv::Mat roi { back(cv::Range(posX, posX + fsize.width), cv::Range(posY, fsize.height)) };
cv::bitwise_and(roi, roi, back_bg, mask_inv);
cv::bitwise_and(front, front, front_bg, mask);
cv::add(back_bg, front_bg, result);
cv::addWeighted(roi, 0.1, result, 0.9, 0.0, result);
result.copyTo(back(cv::Rect(posX, posY, fsize.width, fsize.height)));
}
cv::Mat RenderImage::getImage() {
cv::Mat chess = backgroundChess.clone();
overlayImage(chess, piecesChess[0], 128, 0); //The method to merge two images
cv::imshow("Display Image", chess);
cv::waitKey(0);
return chess;
}

How to detect the custom shape in the given binary image using OpenCV?

I am looking for a way to detect custom shape in the given binary image using OpenCV.
my custom shape is like below:
I am trying to find if the above shape(or approx. to it) exists in the given 512 by 512 binary image.
I tried different ways like using cv::matchTemplate()
but this is not working for variant with scaled and rotated shapes.
I need some solution to identify the these kind of shapes in a given binary images.
Thank in advance
some information on shape
:shapes is simple with three connected component with line separation.
These are shapes of cross section of tibia, fibula and talus bone at ankle joint.
some more images of shape
Edit:
512by512 Images having the the shape
512 by 512 image without the shape
Here's my algorithm. The idea is to cluster contours (in the sample algorithm just by dilation/erosion) and for each contour to normalize the size and to test the shape similarity in different rotations. Then compare the image region with the template.
I am using this image as template:
with this contour
and this object image (removed the white background because I assume to use external contours only.
The algorithm gives this result:
found target shape with similarity 72.5144% and angle: 180 degrees
5 of 26
found target shape with similarity 73.1325% and angle: 0 degrees
6 of 26
found target shape with similarity 71.7287% and angle: 270 degrees
7 of 26
8 of 26
found target shape with similarity 72.3608% and angle: 90 degrees
9 of 26
10 of 26
11 of 26
12 of 26
13 of 26
14 of 26
15 of 26
16 of 26
found target shape with similarity 62.7371% and angle: 60 degrees
17 of 26
found target shape with similarity 62.6041% and angle: 240 degrees
18 of 26
19 of 26
20 of 26
found target shape with similarity 62.8935% and angle: 150 degrees
21 of 26
found target shape with similarity 62.39% and angle: 330 degrees
22 of 26
23 of 26
24 of 26
25 of 26
This is the code (with some dirty helpers for saving the images etc.):
int glob_counter = 0;
double contourMaskSimilarity(float angleDiff, float scale, cv::Point2f cont_center, std::vector<cv::Point> contour, cv::Mat img, cv::Point2f template_center, cv::Mat img_templ)
{
cv::Mat rotationMat = cv::getRotationMatrix2D(cont_center, angleDiff, scale);
cv::Mat rotationMatPersp = cv::Mat::eye(3, 3, CV_64FC1);
for (int y = 0; y < rotationMat.rows; ++y)
for (int x = 0; x < rotationMat.cols; ++x)
{
rotationMatPersp.at<double>(y, x) = rotationMat.at<double>(y, x);
}
//cv::Mat img_tmp = img_color.clone();
cv::Mat img_tmp_mask = cv::Mat::zeros(img.size(), img.type());
std::vector < std::vector<cv::Point> >contours_img;
contours_img.push_back(contour);
cv::drawContours(img_tmp_mask, contours_img, 0, cv::Scalar::all(255), -1);
//cv::circle(img_tmp, cont_center, 3, cv::Scalar(255, 0, 255), 2); // drawing
std::vector<cv::Point2f> points;
std::vector<cv::Point2f> warpedPoints;
points.push_back(cont_center);
cv::perspectiveTransform(points, warpedPoints, rotationMatPersp);
cv::Mat translation = cv::Mat::eye(3, 3, CV_64FC1);
translation.at<double>(0, 2) = template_center.x - warpedPoints[0].x; // x
translation.at<double>(1, 2) = template_center.y - warpedPoints[0].y; // x
cv::Mat transformation = translation * rotationMatPersp; // transformation after each other => 1. rotation 2. translation
cv::Mat imgBin = img.clone();
imgBin = imgBin & img_tmp_mask;
cv::Mat imgBinWarped;
// warp the image to same size and rotation as the template, according to angle and center
//cv::warpPerspective(imgBin, imgBinWarped, transformation, cv::Size(img.size().width * scale, img.size().height * scale));
cv::warpPerspective(imgBin, imgBinWarped, transformation, img_templ.size());
cv::Rect subImage = cv::Rect(0, 0, img_templ.cols, img_templ.rows);
cv::Mat imgSub = imgBinWarped(subImage);
cv::Mat imgMul = imgSub.mul(img_templ); // 255 everywhere where template and current image-region are non-zero. 0 everywhere else
double sum1 = cv::countNonZero(imgMul);
double sum2 = cv::countNonZero(img_templ);
double sum3 = cv::countNonZero(imgSub);
//std::cout << sum1 << " " << sum2 << " " << sum3 << std::endl;
// confidence similar to intersection over union.
// use a better shape-similarity here, like a chamfer matching or a mean-hausdorff-distance?
double conf = sum1 * sum1 / (sum2 * sum3);
//std::cout << conf * 100 << " %" << std::endl;
// TODO: remove!
if (conf > 0.5)
{
cv::imwrite("C:/data/StackOverflow/bone_shapes/out_sub_" + std::to_string(glob_counter) + "_" + std::to_string(conf) + ".png", imgSub);
cv::imwrite("C:/data/StackOverflow/bone_shapes/out_mul_" + std::to_string(glob_counter) + "_" + std::to_string(conf) + ".png", imgMul);
glob_counter++;
}
return conf;
}
int main()
{
cv::Mat img_templ = cv::imread("C:/data/StackOverflow/bone_shapes/bone_shape_template.png", cv::IMREAD_GRAYSCALE);
// binarize the img (I guess it was binarized already?)
cv::Mat templ = img_templ > 0;
cv::Mat img_shapes = cv::imread("C:/data/StackOverflow/bone_shapes/bones_set_blackBG.png", cv::IMREAD_GRAYSCALE);
// binarize the image (it was some grayscale gradients at the object borders...)
cv::Mat img = img_shapes > 200;
// 1. close-operator to merge all the parts of the shapes to a single contour. For other shapes you might need some kind of clustering.
cv::Mat img_closed = img.clone();
int nDilations = 2;
cv::dilate(img_closed, img_closed, cv::getStructuringElement(cv::MorphShapes::MORPH_RECT, cv::Size(3, 3), cv::Point(1, 1)), cv::Point(1, 1), nDilations);
cv::erode(img_closed, img_closed, cv::getStructuringElement(cv::MorphShapes::MORPH_RECT, cv::Size(3, 3), cv::Point(1, 1)), cv::Point(1, 1), nDilations);
cv::Mat template_closed = templ.clone();
int nDilationsTemplate = 2;
cv::dilate(template_closed, template_closed, cv::getStructuringElement(cv::MorphShapes::MORPH_RECT, cv::Size(3, 3), cv::Point(1, 1)), cv::Point(1, 1), nDilations);
cv::erode(template_closed, template_closed, cv::getStructuringElement(cv::MorphShapes::MORPH_RECT, cv::Size(3, 3), cv::Point(1, 1)), cv::Point(1, 1), nDilations);
// 2. find contours (only necessary once for the template:)
std::vector<std::vector<cv::Point> > contour_template;
cv::findContours(template_closed, contour_template, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);
std::vector<std::vector<cv::Point> > contours_img;
cv::findContours(img_closed, contours_img, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);
// display the results for debugging/sanity checks only
cv::Mat img_color, template_color;
cv::cvtColor(img_shapes, img_color, cv::COLOR_GRAY2BGR);
cv::cvtColor(img_templ, template_color, cv::COLOR_GRAY2BGR);
for (int i = 0; i < contour_template.size(); ++i)
cv::drawContours(template_color, contour_template, i, cv::Scalar(255, 0, 255), 4);
cv::imshow("template color", template_color);
for (int i = 0; i < contours_img.size(); ++i)
cv::drawContours(img_color, contours_img, i, cv::Scalar(0, 0, 255), 2);
cv::imshow("img color", img_color);
//cv::waitKey(1);
// make sure the template only has one contour!
if (contour_template.size() != 1)
{
std::cout << "closed template doesnt consist of a single contour" << std::endl;
throw("closed template doesnt consist of a single contour");
}
// 3. get size and orientation of the shapes:
cv::RotatedRect template_orientation = cv::minAreaRect(contour_template[0]);
cv::Point2f template_center; float template_size = 0;
//cv::minEnclosingCircle(contour_template[0], template_center, template_size);
template_center = template_orientation.center;
template_size = (template_orientation.size.width > template_orientation.size.height) ? template_orientation.size.width : template_orientation.size.height;
// now check every contour in the target image:
for (int i = 0; i < contours_img.size(); ++i)
{
std::cout << i << " of " << contours_img.size() << std::endl;
std::vector<cv::Point> cont = contours_img[i];
cv::RotatedRect cont_orientation = cv::minAreaRect(cont);
cv::Point2f cont_center; float cont_size = 0;
cont_center = cont_orientation.center;
cont_size = (cont_orientation.size.width > cont_orientation.size.height) ? cont_orientation.size.width : cont_orientation.size.height;
// angle difference according to rotated rectangle bounding boxes
float angleDiff = template_orientation.angle - cont_orientation.angle;
// scale according to rotated rectangle bounding boxes
float scale = template_size / cont_size;
double bestSimilarity = 0.0;
float bestAngle = 0.0;
float stepDegree = 15; // make smaller if you need a finer rotation resolution.
// check various angles:
for (float j = 0; j < 360.0f; j+=stepDegree)
{
//float angle = angleDiff + j * stepDegree; // initial guess for rotation. Works if shape is really similar
float angle = j;
// similarity computation is slow for small contours. Maybe because of the warping?
double similarity = contourMaskSimilarity(angle, scale, cont_center, contours_img[i], img, template_center, templ);
if (similarity > bestSimilarity)
{
bestSimilarity = similarity;
bestAngle = angle;
}
}
glob_counter++;
if (bestSimilarity > 0.5)
{
cv::drawContours(img_color, contours_img, i, cv::Scalar(0, 255, 0), 2);
std::cout << "found target shape with similarity " << 100*bestSimilarity << "% and angle: " << bestAngle << " degrees"<< std::endl;
}
//cv::waitKey(0);
//cv::waitKey(0);
/*
std::cout << rotationMat << std::endl;
cv::Point2f offsetTemplateImage;
offsetTemplateImage.x = -template_center.x;
offsetTemplateImage.y = -template_center.y;
cv::Rect subImage = cv::Rect(cont_center.x + offsetTemplateImage.x, cont_center.y + offsetTemplateImage.y, img_templ.cols, img_templ.rows);
*/
}
cv::imshow("template", img_templ);
cv::imshow("template binary", templ);
cv::imshow("template closed", template_closed);
cv::imshow("shapes", img_shapes);
cv::imshow("shapes binary", img);
cv::imshow("shapes closed", img_closed);
cv::imshow("result", img_color);
cv::imwrite("C:/data/StackOverflow/bone_shapes/out_template_color.png", template_color);
cv::imwrite("C:/data/StackOverflow/bone_shapes/out_result.png", img_color);
cv::waitKey(0);
}
Here are some samples for warped images and the multiplied binary with the template:

Detect a target circle from other circles in an image or webcam frame

I'm trying to detect this circle
within an image with several other circles .
How would you go about doing something like this? I tried combining both colour ranges but this didn't work.
Here's my current code:
// Threshold for yellow colour on the Drop-off point
int bLowH = 25;
int bHighH = 79;
int bLowS = 0;
int bHighS = 121;
int bLowV = 87;
int bHighV = 196;
// Threshold values for red colour on the Drop-off point
int gLowH = 148;
int gHighH = 180;
int gLowS = 54;
int gHighS = 255;
int gLowV = 96;
int gHighV = 247;
Mat imgHSV;
Mat yellowRange;
Mat redRange;
cvtColor(frame, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV
//Threshold the images.. Only Keep The threshold values for the dropoff point
inRange(imgHSV, Scalar(bLowH, bLowS, bLowV), Scalar(bHighH, bHighS, bHighV), yellowRange);
inRange(imgHSV, Scalar(gLowH, gLowS, gLowV), Scalar(gHighH, gHighS, gHighV), redRange);
// combine both images and slightly blur...
Mat dropoff_image;
addWeighted(yellowRange, 1.0, redRange, 1.0, 0.0, dropoff_image);
GaussianBlur(dropoff_image, dropoff_image, Size(9, 9), 2, 2);
// Hough Transform to detect circle
vector<Vec3f> dropoff;
HoughCircles(dropoff_image, dropoff, CV_HOUGH_GRADIENT, 1, dropoff_image.rows / 8, 100, 20, 0, 0);
if (dropoff.size() == 0)
{
cout << "No dropoff circle found" << endl;
exit(-1);
}
for (size_t current_circle = 0; current_circle < dropoff.size(); ++current_circle)
{
cout << "circle found" << endl;
Point center(round(dropoff[current_circle][0]), round(dropoff[current_circle][1]));
int radius = round(dropoff[current_circle][2]);
circle(frame, center, radius, Scalar(0, 255, 0), 5);
imshow("Gaussian", dropoff_image);
}

Calculating skew of text OpenCV

I am trying to calculate the skew of text in an image so I can correct it for the best OCR results.
Currently this is the function I am using:
double compute_skew(Mat &img)
{
// Binarize
cv::threshold(img, img, 225, 255, cv::THRESH_BINARY);
// Invert colors
cv::bitwise_not(img, img);
cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5, 3));
cv::erode(img, img, element);
std::vector<cv::Point> points;
cv::Mat_<uchar>::iterator it = img.begin<uchar>();
cv::Mat_<uchar>::iterator end = img.end<uchar>();
for (; it != end; ++it)
if (*it)
points.push_back(it.pos());
cv::RotatedRect box = cv::minAreaRect(cv::Mat(points));
double angle = box.angle;
if (angle < -45.)
angle += 90.;
cv::Point2f vertices[4];
box.points(vertices);
for(int i = 0; i < 4; ++i)
cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(255, 0, 0), 1, CV_AA);
return angle;
}
When I look at then angle in debug I get 0.000000
However when I give it this image I get proper results of a skew of about 16 degrees:
How can I properly detect the skew in the first image?
there are a few other ways to get the skew degree, 1) by hough transform 2) by horizontal projection profile. rotate the image in different angle bins and calculate horizontal projection. the angle with the greatest horizontal histogram value is the deskewed angle.
i have provided below implementation of 1). i believe this to be superior to the boxing method you are using because it requires that you completely clean the image of any noise,which just isnt possible in most of the time.
you should know that the method doesnt work well if there's too much noise. you can reduce noise in different ways depending on what type of "line" you want to treat as the most dominant in the image. i have provided two methods for this. be sure to play with parameters and threshold etc.
results (all run using preprocess2, all run using same parameter set)
code
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
void hough_transform(Mat& im,Mat& orig,double* skew)
{
double max_r=sqrt(pow(.5*im.cols,2)+pow(.5*im.rows,2));
int angleBins = 180;
Mat acc = Mat::zeros(Size(2*max_r,angleBins),CV_32SC1);
int cenx = im.cols/2;
int ceny = im.rows/2;
for(int x=1;x<im.cols-1;x++)
{
for(int y=1;y<im.rows-1;y++)
{
if(im.at<uchar>(y,x)==255)
{
for(int t=0;t<angleBins;t++)
{
double r =(x-cenx)*cos((double)t/angleBins*CV_PI)+(y-ceny)*sin((double)t /angleBins*CV_PI);
r+=max_r;
acc.at<int>(t,int(r))++;
}
}
}
}
Mat thresh;
normalize(acc,acc,255,0,NORM_MINMAX);
convertScaleAbs(acc,acc);
/*debug
Mat cmap;
applyColorMap(acc,cmap,COLORMAP_JET);
imshow("cmap",cmap);
imshow("acc",acc);*/
Point maxLoc;
minMaxLoc(acc,0,0,0,&maxLoc);
double theta = (double)maxLoc.y/angleBins*CV_PI;
double rho = maxLoc.x-max_r;
if(abs(sin(theta))<0.000001)//check vertical
{
//when vertical, line equation becomes
//x = rho
double m = -cos(theta)/sin(theta);
Point2d p1 = Point2d(rho+im.cols/2,0);
Point2d p2 = Point2d(rho+im.cols/2,im.rows);
line(orig,p1,p2,Scalar(0,0,255),1);
*skew=90;
cout<<"skew angle "<<" 90"<<endl;
}else
{
//convert normal form back to slope intercept form
//y = mx + b
double m = -cos(theta)/sin(theta);
double b = rho/sin(theta)+im.rows/2.-m*im.cols/2.;
Point2d p1 = Point2d(0,b);
Point2d p2 = Point2d(im.cols,im.cols*m+b);
line(orig,p1,p2,Scalar(0,0,255),1);
double skewangle;
skewangle= p1.x-p2.x>0? (atan2(p1.y-p2.y,p1.x-p2.x)*180./CV_PI):(atan2(p2.y-p1.y,p2. x-p1.x)*180./CV_PI);
*skew=skewangle;
cout<<"skew angle "<<skewangle<<endl;
}
imshow("orig",orig);
}
Mat preprocess1(Mat& im)
{
Mat ret = Mat::zeros(im.size(),CV_32SC1);
for(int x=1;x<im.cols-1;x++)
{
for(int y=1;y<im.rows-1;y++)
{
int gy = (im.at<uchar>(y-1,x+1)-im.at<uchar>(y-1,x-1))
+2*(im.at<uchar>(y,x+1)-im.at<uchar>(y,x-1))
+(im.at<uchar>(y+1,x+1)-im.at<uchar>(y+1,x-1));
int gx = (im.at<uchar>(y+1,x-1) -im.at<uchar>(y-1,x-1))
+2*(im.at<uchar>(y+1,x)-im.at<uchar>(y-1,x))
+(im.at<uchar>(y+1,x+1)-im.at<uchar>(y-1,x+1));
int g2 = (gy*gy + gx*gx);
ret.at<int>(y,x)=g2;
}
}
normalize(ret,ret,255,0,NORM_MINMAX);
ret.convertTo(ret,CV_8UC1);
threshold(ret,ret,50,255,THRESH_BINARY);
return ret;
}
Mat preprocess2(Mat& im)
{
// 1) assume white on black and does local thresholding
// 2) only allow voting top is white and buttom is black(buttom text line)
Mat thresh;
//thresh=255-im;
thresh=im.clone();
adaptiveThreshold(thresh,thresh,255,CV_ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY,15,-2);
Mat ret = Mat::zeros(im.size(),CV_8UC1);
for(int x=1;x<thresh.cols-1;x++)
{
for(int y=1;y<thresh.rows-1;y++)
{
bool toprowblack = thresh.at<uchar>(y-1,x)==0 || thresh.at<uchar>(y-1,x-1)==0 || thresh.at<uchar>(y-1,x+1)==0;
bool belowrowblack = thresh.at<uchar>(y+1,x)==0 || thresh.at<uchar>(y+1, x-1)==0 || thresh.at<uchar>(y+1,x+1)==0;
uchar pix=thresh.at<uchar>(y,x);
if((!toprowblack && pix==255 && belowrowblack))
{
ret.at<uchar>(y,x) = 255;
}
}
}
return ret;
}
Mat rot(Mat& im,double thetaRad)
{
cv::Mat rotated;
double rskew = thetaRad* CV_PI/180;
double nw = abs(sin(thetaRad))*im.rows+abs(cos(thetaRad))*im.cols;
double nh = abs(cos(thetaRad))*im.rows+abs(sin(thetaRad))*im.cols;
cv::Mat rot_mat = cv::getRotationMatrix2D(Point2d(nw*.5,nh*.5), thetaRad*180/CV_PI, 1);
Mat pos = Mat::zeros(Size(1,3),CV_64FC1);
pos.at<double>(0)=(nw-im.cols)*.5;
pos.at<double>(1)=(nh-im.rows)*.5;
Mat res = rot_mat*pos;
rot_mat.at<double>(0,2) += res.at<double>(0);
rot_mat.at<double>(1,2) += res.at<double>(1);
cv::warpAffine(im, rotated, rot_mat,Size(nw,nh), cv::INTER_LANCZOS4);
return rotated;
}
int main(int argc, char** argv)
{
string src="C:/data/skew.png";
Mat im= imread(src);
Mat gray;
cvtColor(im,gray,CV_BGR2GRAY);
Mat preprocessed = preprocess2(gray);
imshow("preprocessed2",preprocessed);
double skew;
hough_transform(preprocessed,im,&skew);
Mat rotated = rot(im,skew* CV_PI/180);
imshow("corrected",rotated);
waitKey(0);
return 0;
}
the approach you posted has its own "ideal binarization" assumption. the threshold value directly affects the process. utilize otsu threshold, or think about DFT for a generic solution.
otsu trial:
int main()
{
Mat input = imread("your text");
cvtColor(input, input, CV_BGR2GRAY);
Mat img;
cv::threshold(input, img, 100, 255, cv::THRESH_OTSU);
cv::bitwise_not(img, img);
imshow("img ", img);
waitKey(0);
vector<Point> points;
findNonZero(img, points);
cv::RotatedRect box = cv::minAreaRect(points);
double angle = box.angle;
if (angle < -45.)
angle += 90.;
cv::Point2f vertices[4];
box.points(vertices);
for(int i = 0; i < 4; ++i)
cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(255, 0, 0));
imshow("img ", img);
waitKey(0);
return 0;
}