Draw image around Aruco marker - c++

I'm trying to build a simple Augmented Reality application using OpenCV 4.1.1 and Aruco. The goal is to overlay an image on top of a marker but have the image go beyond the edges of the marker.
I have calibrated my camera and gotten the camera matrix and distortion coefficients. By using OpenCV's warpPerspective I can draw an image on top of a marker, but I can only tie it to the corners of the marker so it stays within the border of the marker.
std::vector<int> ids;
std::vector<std::vector<Point2f>> corners;
// detect markers
aruco::detectMarkers(image, dictionary, corners, ids);
if (ids.size() > 0) {
// file with image to draw
auto file = "square.png";
// image to draw on the marker
Mat im_src = imread(file);
if (im_src.data == NULL) {
std::cout << file << ": File not found\n" << std::endl;
continue;
}
// flip(im_src, im_src, 1);
// points of corners of the image
std::vector<Point2f> pts_src;
pts_src.push_back(Point2f(0, 0));
pts_src.push_back(Point2f(im_src.cols-1, 0));
pts_src.push_back(Point2f(im_src.cols-1, im_src.rows-1));
pts_src.push_back(Point2f(0, im_src.rows-1));
// use aruco marker
for (int i = 0; i < ids.size(); i++) {
if (ids[i] == 69) {
aruco::drawDetectedMarkers(imageCopy, corners, ids);
std::vector<Point> pts_dst;
pts_dst.push_back(corners[i][0]);
pts_dst.push_back(corners[i][1]);
pts_dst.push_back(corners[i][2]);
pts_dst.push_back(corners[i][3]);
Mat h = findHomography(pts_src, pts_dst);
Mat im_out;
warpPerspective(im_src, im_out, h, imageCopy.size());
fillConvexPoly(imageCopy, pts_dst, 0, 16);
imageCopy = imageCopy + im_out;
}
}
Here is an image of what I have and what I want. I think I need to use 3d points to draw the image but i'm not sure how to do that. Any help would be appreciated.
[
[

As you said in the comment, if the marker length is available, say l0, you can define the length of the desired square as l = l0 * 1.05 or something.
for (int i = 0; i < ids.size(); i++) {
if (ids[i] == 69) {
aruco::drawDetectedMarkers(imageCopy, corners, ids);
// Estimate the pose of the marker
std::vector<cv::Vec3d> rvecs, tvecs;
cv::aruco::estimatePoseSingleMarkers(
corners, l0, camera_matrix, dist_coeffs,
rvecs, tvecs
);
drawSquare(
image_copy, camera_matrix, dist_coeffs, rvecs[i], tvecs[i],
l0
);
}
}
void drawSquare(
cv::InputOutputArray image, cv::InputArray cameraMatrix,
cv::InputArray distCoeffs, cv::InputArray rvec, cv::InputArray tvec,
float l0
)
{
float l = l0 * 1.05; // new square is 5% larger than the aruco marker
float half_l = l / 2.0;
// Define the square on the camera frame (this is 3D since the camera
// frame is 3D).
std::vector<cv::Point3f> squarePoints;
squarePoints.push_back(cv::Point3f(half_l, half_l, 0));
squarePoints.push_back(cv::Point3f(half_l, -half_l, 0));
squarePoints.push_back(cv::Point3f(-half_l, -half_l, 0));
squarePoints.push_back(cv::Point3f(-half_l, half_l, 0));
// Project the square to the image.
std::vector<cv::Point2f> imagePoints;
projectPoints(
squarePoints, rvec, tvec, cameraMatrix, distCoeffs, imagePoints
);
// Draw the square on the image.
cv::line(image, imagePoints[0], imagePoints[1], cv::Scalar(255, 0, 0), 3);
cv::line(image, imagePoints[1], imagePoints[2], cv::Scalar(255, 0, 0), 3);
cv::line(image, imagePoints[2], imagePoints[3], cv::Scalar(255, 0, 0), 3);
cv::line(image, imagePoints[3], imagePoints[0], cv::Scalar(255, 0, 0), 3);
}
I did not test this, but I used a similar code for a different project. If you run into any issues, please let me know. I will update the above code.

Related

Opencv: false number of vertices of a circle

Im struggling with the shape detection using OpenCV for C++. The edged figures such as triangle and rectangular are detected trouble-free. But when it comes to circle it estimates number of vertices up to 6-8. Could somebody help me?
void getContours(Mat video){
Mat grayscale, canny_output;
cvtColor(video, grayscale,COLOR_RGB2GRAY);//converting image to grayscale
GaussianBlur(grayscale, grayscale, Size(9, 9), 2, 2 );
threshold(grayscale, grayscale,60,255,THRESH_BINARY);
vector <vector<Point>> contours, output_contour;
vector <Vec4i> hierarchy;
findContours( grayscale, contours, hierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE );
Mat drawing = Mat::zeros( grayscale.size(), CV_8UC3 );
vector<Point> c;
for (size_t i = 0; i<contours.size(); i++){
c = contours[i];
Rect crect = boundingRect(c);
// compute the center of the contour, then detect the name of the
// shape using only the contour
Moments M = moments(c);
int cX, cY;
cX = static_cast<int>(M.m10/M.m00);
cY = static_cast<int>(M.m01/M.m00);
string shape = detect(Mat(c));
drawContours( drawing, contours, (int)i, Scalar(0, 255, 0), 2);
Point pt(cX,cY);
putText(drawing,shape,pt, FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255, 255, 255), 2);
imshow("contour", drawing);
}
}
string detect(const Mat &curve){
string shape = "unidentified";
double peri = arcLength(curve, true);
Mat approx;
approxPolyDP(curve, approx, 0.04 * peri, true); // 0.01~0.05
const int num_of_vertices = approx.rows;
if(num_of_vertices == 0){
shape = "circle";
}
if(num_of_vertices==2){
shape = "line";
}
cout<<"\n"<<num_of_vertices;
return to_string(num_of_vertices);
}

How to find rectangle in black region opencv (C++)

I'm newbie OpenCV. I am using opencv for find position attach a stamp in image. The stamp can't overlap with other object in image.
Example binary image with white region is object on image, black region is none object
Example image
Result : Find rect in black region (in white circle) can attach stamp
Result imagge
Please help me find rect in black region same blue rect.
Thank you!
If you know about rectangles count then it can use kmeans for clusterization points.
First get only blue points and binarization they:
cv::Mat img = cv::imread("NQdmi.png", cv::IMREAD_COLOR);
std::vector<cv::Mat> chans;
cv::split(img, chans);
cv::Mat diff;
cv::absdiff(chans[2], chans[1], diff);
cv::threshold(diff, diff, 1, 255, cv::THRESH_BINARY);
cv::imshow("diff", diff);
Only blue points:
Clusterization points and find rotated rectangles:
std::vector<cv::Point2f> points;
for (int y = 0; y < diff.rows; ++y)
{
for (int x = 0; x < diff.cols; ++x)
{
if (diff.at<uchar>(y, x))
{
points.emplace_back(x, y);
}
}
}
cv::Mat pointsKmeans(points.size(), 1, CV_32FC2, &points[0]);
cv::Mat labels;
int clusterCount = 2;
cv::Mat centers;
cv::kmeans(pointsKmeans, clusterCount, labels,
cv::TermCriteria(cv::TermCriteria::EPS+cv::TermCriteria::COUNT, 100, 1.0),
3, cv::KMEANS_PP_CENTERS, centers);
std::vector<cv::Point2f> points1;
std::vector<cv::Point2f> points2;
cv::Mat draw = img.clone();
for (size_t i = 0; i < points.size(); ++i)
{
int clusterIdx = labels.at<int>(i);
if (clusterIdx > 0)
{
cv::circle(draw, points[i], 2, cv::Scalar(255, 0, 0), cv::FILLED, cv::LINE_AA);
points1.push_back(points[i]);
}
else
{
cv::circle(draw, points[i], 2, cv::Scalar(0, 0, 255), cv::FILLED, cv::LINE_AA);
points2.push_back(points[i]);
}
}
auto DrawRRect = [draw](const std::vector<cv::Point2f>& pp)
{
cv::RotatedRect rr = cv::minAreaRect(pp);
cv::Point2f corners[4];
rr.points(corners);
cv::line(draw, corners[0], corners[1], cv::Scalar(0, 255, 0), 2);
cv::line(draw, corners[1], corners[2], cv::Scalar(0, 255, 0), 2);
cv::line(draw, corners[2], corners[3], cv::Scalar(0, 255, 0), 2);
cv::line(draw, corners[3], corners[0], cv::Scalar(0, 255, 0), 2);
};
DrawRRect(points1);
DrawRRect(points2);
cv::imshow("draw", draw);
Result:

Deskewing Image OpenCV

I have followed this article on how to calculate and deskew an image for better Tesseract OCR results: http://felix.abecassis.me/2011/10/opencv-rotation-deskewing/
The correct angle is calculated, but the text is never actually rotated.
These are the methods I am using:
+(UIImage *) prepareImage: (UIImage *)image{
return deskew(image, computeSkew(image));
}
// Organization -> Deskewing
double computeSkew(UIImage *image)
{
Mat src;
UIImageToMat(image, src);
cv::Size size = src.size();
bitwise_not(src, src);
vector<Vec4i> lines;
HoughLinesP(src, lines, 1, CV_PI/180, 100, size.width / 2.f, 20);
Mat disp_lines(size, CV_8UC1, Scalar(0, 0, 0));
double angle = 0.;
unsigned nb_lines = lines.size();
for (unsigned i = 0; i < nb_lines; ++i)
{
line(disp_lines, cv::Point(lines[i][0], lines[i][1]),
cv::Point(lines[i][2], lines[i][3]), Scalar(255, 0 ,0));
angle += atan2((double)lines[i][3] - lines[i][1],
(double)lines[i][2] - lines[i][0]);
}
angle /= nb_lines; // mean angle, in radians.
cout << angle << endl;
return angle;
}
UIImage* deskew(UIImage *image, double angle)
{
Mat img;
UIImageToMat(image, img);
bitwise_not(img, img);
vector<cv::Point> points;
Mat_<uchar>::iterator it = img.begin<uchar>();
Mat_<uchar>::iterator end = img.end<uchar>();
for (; it != end; ++it)
if (*it)
points.push_back(it.pos());
RotatedRect box = minAreaRect(Mat(points));
Mat rot_mat = getRotationMatrix2D(box.center, angle, 1);
Mat rotated;
warpAffine(img, rotated, rot_mat, img.size(), INTER_CUBIC);
return MatToUIImage(rotated);
}
UIImageToMat and MatToUIImage are reliable methods that convert back and forth. I have also tried to return the angle in both radians and degrees. Both times the image returned from the prepareImage function is still tilted at the same angle as the original image.

Draw rotated rectangle in opencv c++

I want to draw a rotated rectangle in opencv with c++. I use "rectangle" function like bellow:
rectangle(RGBsrc, vertices[0], vertices[2], Scalar(0, 0, 0), CV_FILLED, 8, 0);
but this function draw an rectangle with 0 angle. How can i draw rotated rectangle with special angle in opencv with c++?
Since you want a filled rectangle, you should use fillConvexPoly:
// Include center point of your rectangle, size of your rectangle and the degrees of rotation
void DrawRotatedRectangle(cv::Mat& image, cv::Point centerPoint, cv::Size rectangleSize, double rotationDegrees)
{
cv::Scalar color = cv::Scalar(255.0, 255.0, 255.0); // white
// Create the rotated rectangle
cv::RotatedRect rotatedRectangle(centerPoint, rectangleSize, rotationDegrees);
// We take the edges that OpenCV calculated for us
cv::Point2f vertices2f[4];
rotatedRectangle.points(vertices2f);
// Convert them so we can use them in a fillConvexPoly
cv::Point vertices[4];
for(int i = 0; i < 4; ++i){
vertices[i] = vertices2f[i];
}
// Now we can fill the rotated rectangle with our specified color
cv::fillConvexPoly(image,
vertices,
4,
color);
}
The sample below demonstrates how to draw rotated rectangle in opencv c++.
Mat test_image(200, 200, CV_8UC3, Scalar(0));
RotatedRect rRect = RotatedRect(Point2f(100,100), Size2f(100,50), 30);
Point2f vertices[4];
rRect.points(vertices);
for (int i = 0; i < 4; i++)
line(test_image, vertices[i], vertices[(i+1)%4], Scalar(0,255,0), 2);
Rect brect = rRect.boundingRect();
rectangle(test_image, brect, Scalar(255,0,0), 2);
imshow("rectangles", test_image);
waitKey(0);
The result is :
Reference:
OpenCV docs

Draw rectangles like voting on a heatmap

I want to create mat files in opencv and initialize them to zero(all the pixels to be black). Thus I use
for initialization purpose:
Mat img = Mat::zeros(image.rows, image.cols, CV_8UC1);
After that I have got some rectangles with locations inside that image and I want to draw the correspondent regions of rectangle white. How is it possible to draw a region in mat file?
I have the following function to draw rects. However I want to draw all the rectangle not just the boundaries.
static Mat image_draw(Mat image, vector<Rect> rect, CvScalar color){
for(int i = 0; i < faces.size(); i++)
{
Point pt1(rect[i].x + rect[i].width, rect[i].y + rect[i].height);
Point pt2(rect[i].x, rect[i].y);
rectangle(image, pt1, pt2, color, 5, 8, 0);
}
return image;
}
The exact thing I want to do is to create a heat map for my rectangles so the overlapped bounding boxes to have higher values(close to 255) that the simple non-overlapped rectangles. I change thickness:
img = image_draw( img, rects, cvScalar(255, 102, 255, 0), -1);
Variable rects contains from 0 to 10 rectangle. I want somehow to aggregate the rectangles drawing. Not just redraw again the rectangles.
If I want to functionize it, is somwthing like that: EDIT final solution:
static Mat heatmap2(Mat image1, vector<Rect> faces, CvScalar color, int thickness) {
cv::Mat heatmap(image1.rows, image1.cols, CV_8U,cv::Scalar(0));
for(int i = 0; i < faces.size(); i++)
{
cv::Mat temp(image1.rows, image1.cols , CV_8U, cv::Scalar(0));
Point pt1(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
Point pt2(faces[i].x, faces[i].y);
rectangle(temp, pt1, pt2, color, thickness, 8, 0);
heatmap+=temp;
}
return heatmap;
}
Try this:
cv::Mat heatmap(200,300,CV_8U,cv::Scalar(0));
{
cv::Mat temp(200,300,CV_8U,cv::Scalar(0));
cv::Rect r(10,20,30,30);
cv::rectangle(temp,r,cv::Scalar(100),-1);
heatmap+=temp;
}
{
cv::Mat temp(200,300,CV_8U,cv::Scalar(0));
cv::Rect r(20,25,30,30);
cv::rectangle(temp,r,cv::Scalar(100),-1);
heatmap+=temp;
}
cv::imshow("Heatmap",heatmap);
cv::waitKey();
Result:
From the official OpenCV Documentation (check here), "Thickness of lines that make up the rectangle. Negative values, like CV_FILLED , mean that the function has to draw a filled rectangle."
So give thickness a negative value like -
rectangle(image, pt1, pt2, color, -1, 8, 0);
UPDATE
Use these lines in your code,
for(int i=0; i < rect.size(); i++)
for( int y = rect[i].y; y < rect[i].y + rect[i].height; y++ )
for( int x = rect[i].x; x < rect[i].x + rect[i].width; x++ )
{
image.at<uchar>(y,x) =
saturate_cast<uchar>( image.at<uchar>(y,x) + 50 );
}
Here each Rect will increase the intensity by 50, and when it reaches 255, it will stay 255.
Input Image
Output Image
2 overlapping rect
Just a slight modification to your code should work:
static void draw_rectangles(Mat image, vector<Rect> faces) {
cv::Mat heatmap(image.rows, image.cols, CV_8U,cv::Scalar(0));
for(int i = 0; i < faces.size(); i++)
{
cv::Mat temp = heatmat(faces[i]); // gives you a submatrix of your heatmap pointing at the location of your rectangle
temp += 10; // add 10 grey levels to the existing values. This also modifies heatmap as side-effect
}
imshow("heatmap", heatmap);
waitKey(0);