Get correspondences using Chamfer matching opencv - c++

I am using chamfer matching to match a template with a reference image. I am using OpenCV'r chamermatching() method. I get the matching scores, but how can I get the correspondences ? In other words, corresponding to each cost, how can I get the template point to reference image point correspondence ? Thanks in advance!
I am using the following code:
int main( int argc, const char** argv )
{
Mat img = imread(argv[1], 0);
Mat tpl = imread(argv[2], 0);
GaussianBlur(img, img, cv::Size(3, 3), 4.0);
GaussianBlur(tpl, tpl, cv::Size(3, 3), 4.0);
imshow("Image", img);
imshow("Template", tpl);
if (img.empty() || tpl.empty())
{
cout << "Could not read image files";
return -1;
}
Mat cimg;
cvtColor(img, cimg, COLOR_GRAY2BGR);
// if the image and the template are not edge maps but normal grayscale images,
// you might want to uncomment the lines below to produce the maps. You can also
// run Sobel instead of Canny.
// Canny(img, img, 5, 50, 3);
// Canny(tpl, tpl, 5, 50, 3);
vector<vector<Point> > results;
vector<float> costs;
int best = chamerMatching( img, tpl, results, costs );
if( best < 0 )
{
cout << "matching not found" << endl;
return -1;
}
cout << results.size() << endl;
size_t i, n = results[best].size();
for( i = 0; i < n; i++ )
{
Point pt = results[best][i];
if( pt.inside(Rect(0, 0, cimg.cols, cimg.rows)) )
cimg.at<Vec3b>(pt) = Vec3b(0, 255, 0);
}
imshow("result", cimg);
float total_cost = 0.0;
for(int i=0;i<costs.size();i++)
total_cost += costs[i];
cout << total_cost << "\n";
waitKey();
return 0;
}

Related

Wrong pose estimate when using Charuco on image with higher resolution

I'm working on a project where a high quality pose estimate is needed. I am therefore trying to get this pose estimate using OpenCV charuco board. Previously I have been using a Aruco board of size 2x2, but the pose estimate was not sufficient.
I have made the charuco estimate work using a realSense D415 camera with resolution 640x480. However, when I change the resolution to 1280x720 the coordinate system which I draw on the board, starts jumping around completely random.
The code for estimating the charuco board is here:
void ReconstructionSystem::detect_charuco_markers(cv::Mat& image, cv::Matx33f& matrix, cv::Vec<float, 5>& coef, int& centerPix_x, int& centerPix_y, cv::Vec3d& rotation, bool& arucoFound)
{
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(3, 3, 0.04f, 0.02f, dictionary);
cv::Ptr<cv::aruco::DetectorParameters> params = cv::aruco::DetectorParameters::create();
//params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE;
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f>> markerCorners;
cv::Mat copyImage;
image.copyTo(copyImage);
cv::Mat gray;
cv::cvtColor(copyImage, gray, cv::COLOR_RGB2GRAY);
cv::aruco::detectMarkers(gray, board->getDictionary(), markerCorners, markerIds, params);
// if at least one marker detected
if (markerIds.size() > 3) {
cv::aruco::drawDetectedMarkers(image, markerCorners, markerIds);
std::vector<cv::Point2f> charucoCorners;
std::vector<int> charucoIds;
cv::aruco::interpolateCornersCharuco(markerCorners, markerIds, gray, board, charucoCorners, charucoIds, matrix, coef);
// if at least one charuco corner detected
if (charucoIds.size() > 3) {
cv::Scalar color = cv::Scalar(255, 0, 0);
cv::aruco::drawDetectedCornersCharuco(image, charucoCorners, charucoIds, color);
cv::Vec3d rvec, tvec;
bool valid = cv::aruco::estimatePoseCharucoBoard(charucoCorners, charucoIds, board, matrix, coef, rvec, tvec);
// if charuco pose is valid
if (valid){
cv::drawFrameAxes(image, matrix, coef, rvec, tvec, 0.1f);
arucoFound = true;
}
else
{
arucoFound = false;
}
}
else
{
arucoFound = false;
}
}
else
{
arucoFound = false;
}
board = NULL;
dictionary = NULL;
copyImage.release();
gray.release();
}
The function above is called within this while loop:
//Variables for transformation matrices
int centerPix_x = 0, centerPix_y = 0;
cv::Vec3d rotationVec;
cv::Matx33f rotation;
bool arucoWasFound = false;
std::vector<float> final_x, final_y, final_z;
std::vector<float> rotation_x, rotation_y, rotation_z;
cv::Matx33f matrix = get_cameraMatrix(path);
cv::Vec<float, 5> coef = get_distCoeffs(path);
const auto window_name = "Validation image";
cv::namedWindow(window_name, cv::WINDOW_AUTOSIZE);
// TODO Also add here that if we have iterated through X frames and not found Aruco, exit with failure
while (cv::waitKey(1) < 0 && cv::getWindowProperty(window_name, cv::WND_PROP_AUTOSIZE) >= 0 && counter < 60) {
rs2::frame f = sensorPtr->color_data.wait_for_frame();
// Query frame size (width and height)
const int w = f.as<rs2::video_frame>().get_width();
const int h = f.as<rs2::video_frame>().get_height();
cv::Mat image(cv::Size(w, h), CV_8UC3, (void*)f.get_data(), cv::Mat::AUTO_STEP);
cv::cvtColor(image, image, cv::COLOR_RGB2BGR);
//detect_aruco_markers(image, matrix, coef, centerPix_x, centerPix_y, rotationVec, arucoWasFound);
detect_charuco_markers(image, matrix, coef, centerPix_x, centerPix_y, rotationVec, arucoWasFound);
if (arucoWasFound)
{
rs2::depth_frame depth = sensorPtr->depth_data.wait_for_frame();
rs2_intrinsics intrinsic = rs2::video_stream_profile(depth.get_profile()).get_intrinsics();
float pixel_distance_in_meters = depth.get_distance(centerPix_x, centerPix_y);
float InputPixelAsFloat[2];
InputPixelAsFloat[0] = centerPix_x;
InputPixelAsFloat[1] = centerPix_y;
float finalDepthPoint[3];
rs2_deproject_pixel_to_point(finalDepthPoint, &intrinsic, InputPixelAsFloat, pixel_distance_in_meters);
// Postion //
final_x.push_back(finalDepthPoint[0]);
final_y.push_back(finalDepthPoint[1]);
final_z.push_back(finalDepthPoint[2]);
// Rotation //
rotation_x.push_back(rotationVec[0]);
rotation_y.push_back(rotationVec[1]);
rotation_z.push_back(rotationVec[2]);
counter++;
}
cv::imshow(window_name, image);
}
cv::destroyWindow(window_name);
Furthermore, here is an image of the detection using resolution of 1270x720.
And here is an image of the detection with resolution 640x480.
If anybody knows why this is happening please let me know :D
As pointed out the problem was that the calibration of the cameras had been made with a wrong resolution, in my case 640x480 instead of 1280x720. Below is the code I used to calculate the calibration matrix and coefficients. The two values that were wrong were: cv::Size frameSize(_imageWidth, _imageWidth);
void ReconstructionSystem::camera_calibration()
{
std::string folder_501("\\Users\\Mikke\\Desktop\\Calibration\\501_images\\*.png");
std::string folder_309("\\Users\\Mikke\\Desktop\\Calibration\\309_images\\*.png");
for (int x = 0; x < 2; x++)
{
std::vector<cv::String> filenames;
std::string currentCam;
if (x == 0) currentCam = folder_501;
if (x == 1) currentCam = folder_309;
cv::glob(currentCam, filenames, false);
for each (std::string var in filenames)
{
printf("file: %s\n", var.c_str());
}
cv::Size patterSize(9, 6);
std::vector<std::vector<cv::Point2f>> q(filenames.size());
std::vector<std::vector<cv::Point3f>> Q;
int checkerboard[2] = { 10, 7 }; //size of checkerboard
int square_size = 27; //2.7 cm == 27mm
std::vector<cv::Point3f> objp;
for (int i = 1; i < checkerboard[1]; i++) {
for (int j = 1; j < checkerboard[0]; j++) {
objp.push_back(cv::Point3f(j * square_size, i * square_size, 0));
}
}
std::vector<cv::Point2f> imgPoint;
std::size_t i = 0;
for (auto const& f : filenames) {
std::cout << std::string(f) << std::endl;
cv::Mat img = cv::imread(filenames[i]);
cv::Mat gray;
cv::cvtColor(img, gray, cv::COLOR_RGB2GRAY);
bool patternFound = cv::findChessboardCorners(gray, patterSize, q[i], cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_NORMALIZE_IMAGE + cv::CALIB_CB_FAST_CHECK);
if (patternFound) {
cv::cornerSubPix(gray, q[i], cv::Size(11, 11), cv::Size(-1, -1), cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.001));
Q.push_back(objp);
}
// Display
cv::drawChessboardCorners(img, patterSize, q[i], patternFound);
cv::imshow("chessboard detection", img);
cv::waitKey(0);
i++;
}
cv::Matx33f K(cv::Matx33f::eye());
cv::Vec<float, 5> k(0, 0, 0, 0, 0);
std::vector<cv::Mat> rvecs, tvecs;
std::vector<double> stdIntrinsics, stdExtrinsics, perViewErrors;
int flags = cv::CALIB_FIX_ASPECT_RATIO + cv::CALIB_FIX_K3 + cv::CALIB_ZERO_TANGENT_DIST + cv::CALIB_FIX_PRINCIPAL_POINT;
cv::Size frameSize(_imageWidth, _imageWidth);
std::cout << "calibrating..." << std::endl;
float error = cv::calibrateCamera(Q, q, frameSize, K, k, rvecs, tvecs, flags);
std::cout << "reprojection error = " << error << "\nK = \n" << K << "\nk=\n" << k << std::endl;
if (x == 0) {
std::string path_mat = "\\Users\\Mikke\\Desktop\\Calibration\\104122061501\\calibration_Mat_new.yml";
std::string path_coe = ("\\Users\\Mikke\\Desktop\\Calibration\\104122061501\\calibration_coef_new.yml");
saveData_mat(path_mat, K);
saveData_coef(path_coe, k);
}
if (x == 1) {
std::string path_mat = "\\Users\\Mikke\\Desktop\\Calibration\\102122061309\\calibration_Mat_new.yml";
std::string path_coe = ("\\Users\\Mikke\\Desktop\\Calibration\\102122061309\\calibration_coef_new.yml");
saveData_mat(path_mat, K);
saveData_coef(path_coe, k);
}
}
}

Find yaw, roll and pitch angles of the face from live webcam stream

I need to find the yaw, roll and pitch angles from a webcam stream of a face. I used opencv to get face landmarks and detect the face. Can someone help or guide me on how to find and print those angles?
I am using Visual Studio 2022 (community).
Ptr<Facemark> facemark; //mark detection
CascadeClassifier faceDetector; //face detection
void process(Mat img, Mat imgcol) {
vector<Rect> faces;
faceDetector.detectMultiScale(img, faces);
Mat imFace;
if (faces.size() != 0) {
for (size_t i = 0; i < faces.size(); i++)
{
cv::rectangle(imgcol, faces[i], Scalar(255, 0, 0));
imFace = imgcol(faces[i]);
resize(imFace, imFace, Size(imFace.cols * 5, imFace.rows * 5));
faces[i] = Rect(faces[i].x = 0, faces[i].y = 0, faces[i].width * 5,
(faces[i].height) * 5);
}
vector< vector<Point2f> > shapes;
if (facemark->fit(imFace, faces, shapes))
{
for (unsigned long i = 0; i < faces.size(); i++) {
for (unsigned long k = 0; k < shapes[i].size(); k++) {
cv::circle(imFace, shapes[i][k], 5, cv::Scalar(0, 0, 255),FILLED);
}
}
}
namedWindow("Detected_shape");
imshow("Detected_shape", imFace);
waitKey(5);
}
else {
cout << "Faces not detected." << endl;
}
}
int main()
{
facemark = FacemarkLBF::create();
facemark->loadModel("lbfmodel.yml");
faceDetector.load("D:/opencv/build/install/etc/haarcascades/haarcascade_frontalface_alt2.xml");
cout << "Loaded model" << endl;
VideoCapture cap(1); //capture image
int initialized = 0;
for (;;)
{
if (!cap.isOpened()) {
cout << "Video Capture Fail" << endl;
break;
}
else {
Mat img; //image containers
Mat imgbw;
cap >> img; //image from webcam
resize(img, img, Size(460, 460), 0, 0, INTER_LINEAR_EXACT);
cvtColor(img, imgbw, COLOR_BGR2GRAY);
process(imgbw, img);
namedWindow("Live", WINDOW_AUTOSIZE);
imshow("Live", img);
waitKey(5);
}
}
}
The code includes what I could do so far. It detects a face and landmarks. How do I find the angles and print them from this?

Opencv linear gray value transformation

hey i want to do a linear gray transformation, so that i can change the contrast.
how i can get the maximum and minimum gray value ? and then i want to scale the Image that it has a limited contrast range of 100 to 150. I have searched like 2 hours but dont found something.
would be nice if someone could help
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <opencv2/imgproc.hpp>
using namespace std;
using namespace cv;
cv::Mat plotHistogram(cv::Mat &image, bool cumulative = true, int histSize = 256);
int main()
{
cv::Mat img = cv::imread(schrott.png"); // Read the file
if (img.empty()) // Check for invalid input
{
std::cout << "Could not open or find the frame" << std::endl;
return -1;
}
cv::Mat img_gray;
cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY); // In case img is colored
cv::namedWindow("Input Image", cv::WINDOW_AUTOSIZE); // Create a window for display.
cv::imshow("Input Image", img);
cv::Mat hist;
hist = plotHistogram(img_gray);
cv::namedWindow("Histogram", cv::WINDOW_NORMAL); // Create a window for display.
cv::imshow("Histogram", hist);
cv::waitKey(0);
}
cv::Mat plotHistogram(cv::Mat &image, bool cumulative, int histSize) {
// Create Image for Histogram
int hist_w = 1024; int hist_h = 800;
int bin_w = cvRound((double)hist_w / histSize);
cv::Mat histImage(hist_h, hist_w, CV_8UC1, Scalar(255, 255, 255));
if (image.channels() > 1) {
cerr << "plotHistogram: Please insert only gray images." << endl;
return histImage;
}
// Calculate Histogram
float range[] = { 0, 256 };
const float* histRange = { range };
cv::Mat hist;
calcHist(&image, 1, 0, Mat(), hist, 1, &histSize, &histRange);
if (cumulative) {
cv::Mat accumulatedHist = hist.clone();
for (int i = 1; i < histSize; i++) {
accumulatedHist.at<float>(i) += accumulatedHist.at<float>(i - 1);
}
hist = accumulatedHist;
}
// Normalize the result to [ 0, histImage.rows ]
normalize(hist, hist, 0, histImage.rows, NORM_MINMAX, -1, Mat());
// Draw bars
for (int i = 1; i < histSize; i++) {
cv::rectangle(histImage, Point(bin_w * (i - 1), hist_h),
Point(bin_w * (i), hist_h - cvRound(hist.at<float>(i))),
Scalar(50, 50, 50), 1);
}
return histImage; // Not really call by value, as cv::Mat only saves a pointer to the image data
}
You can find minimum and maximum value with minMaxLoc
Mat image;
//read image;
double min, max;
minMaxLoc( image, &min, &max );
cout << "min : " << min << "max : " << max << endl;

OpenCV fisheye undistort issues

EDIT: I found the cause of the problem, the fisheye::undistortImage() function was not working correctly, I replaced it with estimateNewCameraMatrixForUndistortRectify(), initUndistortRectifyMap(), and remap() as in the original calibrate camera example. Not perfect yet but going in the right direction. Output image: http://imgur.com/a/Xm5vq
Mat output;
Mat newK;
Mat view, map1, map2;
Size newSize(1200, 1200);
Mat rview(newSize, frame.type());
//resize(rview, rview, newSize);
fisheye::estimateNewCameraMatrixForUndistortRectify(K, D, frame.size(), Matx33d::eye(), newK, 1);
fisheye::initUndistortRectifyMap(K, D, Matx33d::eye(), newK, frame.size(), CV_16SC2, map1, map2);
//fisheye::undistortImage(frame, output, K, D, identity);
remap(frame, rview, map1, map2, INTER_LINEAR);
imshow("Image View", rview);
imshow(window_name, frame);
if (waitKey(50) == 27) {
break;
}
Original post:
I'm trying to calibrate and undistort an image coming from an 180 degree fisheye USB camera. Most of this code is from existing examples that claim to be functional.
The code runs fine until fisheye::undistortImage where the output image is very distorted and centered around the top left corner of the window.
Screen shot of the "undistorted" chess board and calibration matrix outputs -
http://imgur.com/a/RTIoT
What am I missing here?
int main(int argc, char** argv) {
VideoCapture camera;
camera.open(1);
if (!camera.isOpened()) {
cout << "Failed to open camera." << std::endl;
return -1;
}
double fWidth = camera.get(CAP_PROP_FRAME_WIDTH);
double fHeight = camera.get(CAP_PROP_FRAME_HEIGHT);
cout << fWidth << std::endl;
cout << fHeight << std::endl;
/*
640 320
480 240
*/
const char* window_name = "output";
namedWindow(window_name, WINDOW_NORMAL);
Mat frame;
Size boardSize;
boardSize.width = 9;
boardSize.height = 6;
int remaining_frames = 30;
Mat K;// = Mat(3, 3, CV_64F, vK);
Mat D;
Mat identity = Mat::eye(3, 3, CV_64F);
vector<vector<Point2f> > img_points;
vector<vector<Point3f> > obj_points(1);
int sq_sz = 25;
for (int i = 0; i < boardSize.height; i++) {
for (int j = 0; j < boardSize.width; j++) {
obj_points[0].push_back(Point3f(float(j * sq_sz), float(i * sq_sz), 0));
}
}
obj_points.resize(remaining_frames, obj_points[0]);
bool found = false;
clock_t prevTimestamp = 0;
int delay = 500;
while (1) {
frame = nextFrame(camera);
bool blinkOutput = false;
if (remaining_frames > 0) {
vector<Point2f> corners;
int chessBoardFlags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE;
found = findChessboardCorners(frame, boardSize, corners, chessBoardFlags);
if (found) {
drawChessboardCorners(frame, boardSize, corners, found);
if (clock() - prevTimestamp > delay*1e-3*CLOCKS_PER_SEC) {
Mat viewGray;
cvtColor(frame, viewGray, COLOR_BGR2GRAY);
cornerSubPix(viewGray, corners, Size(11, 11), Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1));
img_points.push_back(corners);
remaining_frames--;
cout << remaining_frames << " frames to calibration." << endl;
blinkOutput = true;
prevTimestamp = clock();
}
if (remaining_frames == 0) {
cout << "Computing distortion" << endl;
int flags = 0;
flags |= cv::fisheye::CALIB_RECOMPUTE_EXTRINSIC;
flags |= cv::fisheye::CALIB_CHECK_COND;
flags |= cv::fisheye::CALIB_FIX_SKEW;
fisheye::calibrate(obj_points, img_points, frame.size(), K, D, noArray(), noArray(), flags);
cout << "Finished computing distortion" << endl;
cout << K << endl;
cout << D << endl;
}
}
if (blinkOutput) { bitwise_not(frame, frame); }
cv::imshow(window_name, frame);
if (waitKey(50) == 27) {
break;
}
}
else {
Mat output;
fisheye::undistortImage(frame, output, K, D, identity);
cv::imshow(window_name, output);
if (waitKey(50) == 27) {
break;
}
}
}
return 0;
}

Wrong mass center point (opencv and moment function)

I'm trying to calculate the mass center of images using OpenCV and I got errors, as you can see in the images (the mass center must not be to closest of any side in this cases). Also, I got mass centers that depends of the rotation and that's incorrect.
Next, you can see the code, input image and output image.
I tried with different example codes, and the results are the same.
Output image: Mass center calculated by the program
Input image: Image Input
Example code:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat srcGray;
RNG rng(12345);
int main(int argc, char **argv)
{
// Load source image and convert it to gray
src = imread(argv[1], 1);
// Convert image to gray and blur it
cvtColor(src, srcGray, CV_BGR2GRAY);
blur(srcGray, srcGray, Size(3, 3));
Mat srcThresh;
double otsu;
otsu = threshold(srcGray, srcThresh, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
Mat cannyOut;
Canny(srcGray, cannyOut, otsu, otsu * 1 / 2, 3, 1);
// Find contours
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(cannyOut, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
// Get the moments
vector<Moments> mu(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mu[i] = moments(contours[i], false);
}
// Get the mass centers:
vector<Point2f> mc(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
}
// Draw contours
Mat drawing = Mat::zeros(cannyOut.size(), CV_8UC3);
string sObjectNumber; // string which will contain the result
ostringstream sContourNumber; // stream used for the conversion
for (int i = 0; i< contours.size(); i++)
{
// drawing.setTo(Scalar(0.0,0.0,0.0));
sContourNumber << i;
sObjectNumber = sContourNumber.str(); // Convert int to string
Point pCoordinates(mc[i].x + 3, mc[i].y - 3); // Text's coordinates (A little bit off from mass center)
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
circle(drawing, mc[i], 4, color, -1, 8, 0); // Draw mass center
putText(drawing, sObjectNumber, pCoordinates, CV_FONT_HERSHEY_COMPLEX, 1, color, 2, 8); // Write object number
sContourNumber.str(""); // Clear string
sContourNumber.clear(); // Clear any error flags
// imshow("Contours", drawing);
// waitKey();
}
double hu[7];
for (int i = 0; i < contours.size(); i++)
{
cout << "Contour: " << i << " Area: " << contourArea(contours[i]) << " Length: " << arcLength(contours[i], true) << "\n";
for (int j = 0; j < 7; j++)
{
HuMoments(mu[i], hu);
cout << "Contour: " << i << " Hu: " << j << " Result: " << hu[j] << "\n";
}
cout << "\n";
}
imshow("Contours", drawing);
waitKey(0);
return(0);
}
Very thanks for all!
Diego