I'm working on a project where a high quality pose estimate is needed. I am therefore trying to get this pose estimate using OpenCV charuco board. Previously I have been using a Aruco board of size 2x2, but the pose estimate was not sufficient.
I have made the charuco estimate work using a realSense D415 camera with resolution 640x480. However, when I change the resolution to 1280x720 the coordinate system which I draw on the board, starts jumping around completely random.
The code for estimating the charuco board is here:
void ReconstructionSystem::detect_charuco_markers(cv::Mat& image, cv::Matx33f& matrix, cv::Vec<float, 5>& coef, int& centerPix_x, int& centerPix_y, cv::Vec3d& rotation, bool& arucoFound)
{
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(3, 3, 0.04f, 0.02f, dictionary);
cv::Ptr<cv::aruco::DetectorParameters> params = cv::aruco::DetectorParameters::create();
//params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE;
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f>> markerCorners;
cv::Mat copyImage;
image.copyTo(copyImage);
cv::Mat gray;
cv::cvtColor(copyImage, gray, cv::COLOR_RGB2GRAY);
cv::aruco::detectMarkers(gray, board->getDictionary(), markerCorners, markerIds, params);
// if at least one marker detected
if (markerIds.size() > 3) {
cv::aruco::drawDetectedMarkers(image, markerCorners, markerIds);
std::vector<cv::Point2f> charucoCorners;
std::vector<int> charucoIds;
cv::aruco::interpolateCornersCharuco(markerCorners, markerIds, gray, board, charucoCorners, charucoIds, matrix, coef);
// if at least one charuco corner detected
if (charucoIds.size() > 3) {
cv::Scalar color = cv::Scalar(255, 0, 0);
cv::aruco::drawDetectedCornersCharuco(image, charucoCorners, charucoIds, color);
cv::Vec3d rvec, tvec;
bool valid = cv::aruco::estimatePoseCharucoBoard(charucoCorners, charucoIds, board, matrix, coef, rvec, tvec);
// if charuco pose is valid
if (valid){
cv::drawFrameAxes(image, matrix, coef, rvec, tvec, 0.1f);
arucoFound = true;
}
else
{
arucoFound = false;
}
}
else
{
arucoFound = false;
}
}
else
{
arucoFound = false;
}
board = NULL;
dictionary = NULL;
copyImage.release();
gray.release();
}
The function above is called within this while loop:
//Variables for transformation matrices
int centerPix_x = 0, centerPix_y = 0;
cv::Vec3d rotationVec;
cv::Matx33f rotation;
bool arucoWasFound = false;
std::vector<float> final_x, final_y, final_z;
std::vector<float> rotation_x, rotation_y, rotation_z;
cv::Matx33f matrix = get_cameraMatrix(path);
cv::Vec<float, 5> coef = get_distCoeffs(path);
const auto window_name = "Validation image";
cv::namedWindow(window_name, cv::WINDOW_AUTOSIZE);
// TODO Also add here that if we have iterated through X frames and not found Aruco, exit with failure
while (cv::waitKey(1) < 0 && cv::getWindowProperty(window_name, cv::WND_PROP_AUTOSIZE) >= 0 && counter < 60) {
rs2::frame f = sensorPtr->color_data.wait_for_frame();
// Query frame size (width and height)
const int w = f.as<rs2::video_frame>().get_width();
const int h = f.as<rs2::video_frame>().get_height();
cv::Mat image(cv::Size(w, h), CV_8UC3, (void*)f.get_data(), cv::Mat::AUTO_STEP);
cv::cvtColor(image, image, cv::COLOR_RGB2BGR);
//detect_aruco_markers(image, matrix, coef, centerPix_x, centerPix_y, rotationVec, arucoWasFound);
detect_charuco_markers(image, matrix, coef, centerPix_x, centerPix_y, rotationVec, arucoWasFound);
if (arucoWasFound)
{
rs2::depth_frame depth = sensorPtr->depth_data.wait_for_frame();
rs2_intrinsics intrinsic = rs2::video_stream_profile(depth.get_profile()).get_intrinsics();
float pixel_distance_in_meters = depth.get_distance(centerPix_x, centerPix_y);
float InputPixelAsFloat[2];
InputPixelAsFloat[0] = centerPix_x;
InputPixelAsFloat[1] = centerPix_y;
float finalDepthPoint[3];
rs2_deproject_pixel_to_point(finalDepthPoint, &intrinsic, InputPixelAsFloat, pixel_distance_in_meters);
// Postion //
final_x.push_back(finalDepthPoint[0]);
final_y.push_back(finalDepthPoint[1]);
final_z.push_back(finalDepthPoint[2]);
// Rotation //
rotation_x.push_back(rotationVec[0]);
rotation_y.push_back(rotationVec[1]);
rotation_z.push_back(rotationVec[2]);
counter++;
}
cv::imshow(window_name, image);
}
cv::destroyWindow(window_name);
Furthermore, here is an image of the detection using resolution of 1270x720.
And here is an image of the detection with resolution 640x480.
If anybody knows why this is happening please let me know :D
As pointed out the problem was that the calibration of the cameras had been made with a wrong resolution, in my case 640x480 instead of 1280x720. Below is the code I used to calculate the calibration matrix and coefficients. The two values that were wrong were: cv::Size frameSize(_imageWidth, _imageWidth);
void ReconstructionSystem::camera_calibration()
{
std::string folder_501("\\Users\\Mikke\\Desktop\\Calibration\\501_images\\*.png");
std::string folder_309("\\Users\\Mikke\\Desktop\\Calibration\\309_images\\*.png");
for (int x = 0; x < 2; x++)
{
std::vector<cv::String> filenames;
std::string currentCam;
if (x == 0) currentCam = folder_501;
if (x == 1) currentCam = folder_309;
cv::glob(currentCam, filenames, false);
for each (std::string var in filenames)
{
printf("file: %s\n", var.c_str());
}
cv::Size patterSize(9, 6);
std::vector<std::vector<cv::Point2f>> q(filenames.size());
std::vector<std::vector<cv::Point3f>> Q;
int checkerboard[2] = { 10, 7 }; //size of checkerboard
int square_size = 27; //2.7 cm == 27mm
std::vector<cv::Point3f> objp;
for (int i = 1; i < checkerboard[1]; i++) {
for (int j = 1; j < checkerboard[0]; j++) {
objp.push_back(cv::Point3f(j * square_size, i * square_size, 0));
}
}
std::vector<cv::Point2f> imgPoint;
std::size_t i = 0;
for (auto const& f : filenames) {
std::cout << std::string(f) << std::endl;
cv::Mat img = cv::imread(filenames[i]);
cv::Mat gray;
cv::cvtColor(img, gray, cv::COLOR_RGB2GRAY);
bool patternFound = cv::findChessboardCorners(gray, patterSize, q[i], cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_NORMALIZE_IMAGE + cv::CALIB_CB_FAST_CHECK);
if (patternFound) {
cv::cornerSubPix(gray, q[i], cv::Size(11, 11), cv::Size(-1, -1), cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.001));
Q.push_back(objp);
}
// Display
cv::drawChessboardCorners(img, patterSize, q[i], patternFound);
cv::imshow("chessboard detection", img);
cv::waitKey(0);
i++;
}
cv::Matx33f K(cv::Matx33f::eye());
cv::Vec<float, 5> k(0, 0, 0, 0, 0);
std::vector<cv::Mat> rvecs, tvecs;
std::vector<double> stdIntrinsics, stdExtrinsics, perViewErrors;
int flags = cv::CALIB_FIX_ASPECT_RATIO + cv::CALIB_FIX_K3 + cv::CALIB_ZERO_TANGENT_DIST + cv::CALIB_FIX_PRINCIPAL_POINT;
cv::Size frameSize(_imageWidth, _imageWidth);
std::cout << "calibrating..." << std::endl;
float error = cv::calibrateCamera(Q, q, frameSize, K, k, rvecs, tvecs, flags);
std::cout << "reprojection error = " << error << "\nK = \n" << K << "\nk=\n" << k << std::endl;
if (x == 0) {
std::string path_mat = "\\Users\\Mikke\\Desktop\\Calibration\\104122061501\\calibration_Mat_new.yml";
std::string path_coe = ("\\Users\\Mikke\\Desktop\\Calibration\\104122061501\\calibration_coef_new.yml");
saveData_mat(path_mat, K);
saveData_coef(path_coe, k);
}
if (x == 1) {
std::string path_mat = "\\Users\\Mikke\\Desktop\\Calibration\\102122061309\\calibration_Mat_new.yml";
std::string path_coe = ("\\Users\\Mikke\\Desktop\\Calibration\\102122061309\\calibration_coef_new.yml");
saveData_mat(path_mat, K);
saveData_coef(path_coe, k);
}
}
}
Related
I tried to perform object detection using the yolov5 model with c++. I have a custom-trained yolov5 model which is working perfectly fine in python but my whole complete setup is in c++ thereby I have to switch. So I have converted the yolov5s model into ONNX format and tried to run it as by "https://github.com/doleron/yolov4-opencv-cpp-python"1. Unfortunately, I'm getting multiple bounding boxes in the top left corner as in the image.
I don't know how to eliminate this kind of error, but when I used the inbuilt pre-train yolov5s model the c++ code is detecting and worked perfectly. Similarly, when I used the custom-trained model in python it's working perfectly.
Here is my c++ code for object detection using c++
#include <fstream>
#include <opencv2/opencv.hpp>
std::vector<std::string> load_class_list()
{
std::vector<std::string> class_list;
std::ifstream ifs("config_files/classes.txt");
std::string line;
while (getline(ifs, line))
{
class_list.push_back(line);
}
return class_list;
}
void load_net(cv::dnn::Net &net, bool is_cuda)
{
auto result = cv::dnn::readNet("config_files/yolov5s_custom.onnx");
if (is_cuda)
{
std::cout << "Attempty to use CUDA\n";
result.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
result.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA_FP16);
}
else
{
std::cout << "Running on CPU\n";
result.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
result.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
}
net = result;
}
const std::vector<cv::Scalar> colors = {cv::Scalar(255, 255, 0), cv::Scalar(0, 255, 0), cv::Scalar(0, 255, 255), cv::Scalar(255, 0, 0)};
const float INPUT_WIDTH = 640.0;
const float INPUT_HEIGHT = 640.0;
const float SCORE_THRESHOLD = 0.2;
const float NMS_THRESHOLD = 0.4;
const float CONFIDENCE_THRESHOLD = 0.4;
struct Detection
{
int class_id;
float confidence;
cv::Rect box;
};
cv::Mat format_yolov5(const cv::Mat &source) {
int col = source.cols;
int row = source.rows;
int _max = MAX(col, row);
cv::Mat result = cv::Mat::zeros(_max, _max, CV_8UC3);
source.copyTo(result(cv::Rect(0, 0, col, row)));
return result;
}
void detect(cv::Mat &image, cv::dnn::Net &net, std::vector<Detection> &output, const std::vector<std::string> &className) {
cv::Mat blob;
auto input_image = format_yolov5(image);
cv::dnn::blobFromImage(input_image, blob, 1./255., cv::Size(INPUT_WIDTH, INPUT_HEIGHT), cv::Scalar(), true, false);
net.setInput(blob);
std::vector<cv::Mat> outputs;
net.forward(outputs, net.getUnconnectedOutLayersNames());
float x_factor = input_image.cols / INPUT_WIDTH;
float y_factor = input_image.rows / INPUT_HEIGHT;
float *data = (float *)outputs[0].data;
const int dimensions = 85;
const int rows = 25200;
std::vector<int> class_ids;
std::vector<float> confidences;
std::vector<cv::Rect> boxes;
for (int i = 0; i < rows; ++i) {
float confidence = data[4];
if (confidence >= CONFIDENCE_THRESHOLD) {
float * classes_scores = data + 5;
cv::Mat scores(1, className.size(), CV_32FC1, classes_scores);
cv::Point class_id;
double max_class_score;
minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
if (max_class_score > SCORE_THRESHOLD) {
confidences.push_back(confidence);
class_ids.push_back(class_id.x);
float x = data[0];
float y = data[1];
float w = data[2];
float h = data[3];
int left = int((x - 0.5 * w) * x_factor);
int top = int((y - 0.5 * h) * y_factor);
int width = int(w * x_factor);
int height = int(h * y_factor);
boxes.push_back(cv::Rect(left, top, width, height));
}
}
data += 85;
}
std::vector<int> nms_result;
cv::dnn::NMSBoxes(boxes, confidences, SCORE_THRESHOLD, NMS_THRESHOLD, nms_result);
for (int i = 0; i < nms_result.size(); i++) {
int idx = nms_result[i];
Detection result;
result.class_id = class_ids[idx];
result.confidence = confidences[idx];
result.box = boxes[idx];
output.push_back(result);
}
}
int main(int argc, char **argv)
{
std::vector<std::string> class_list = load_class_list();
cv::Mat frame;
cv::VideoCapture capture("sample.mp4");
if (!capture.isOpened())
{
std::cerr << "Error opening video file\n";
return -1;
}
bool is_cuda = argc > 1 && strcmp(argv[1], "cuda") == 0;
cv::dnn::Net net;
load_net(net, is_cuda);
auto start = std::chrono::high_resolution_clock::now();
int frame_count = 0;
float fps = -1;
int total_frames = 0;
while (true)
{
capture.read(frame);
if (frame.empty())
{
std::cout << "End of stream\n";
break;
}
std::vector<Detection> output;
detect(frame, net, output, class_list);
frame_count++;
total_frames++;
int detections = output.size();
for (int i = 0; i < detections; ++i)
{
auto detection = output[i];
auto box = detection.box;
auto classId = detection.class_id;
const auto color = colors[classId % colors.size()];
cv::rectangle(frame, box, color, 3);
cv::rectangle(frame, cv::Point(box.x, box.y - 20), cv::Point(box.x + box.width, box.y), color, cv::FILLED);
cv::putText(frame, class_list[classId].c_str(), cv::Point(box.x, box.y - 5), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
}
if (frame_count >= 30)
{
auto end = std::chrono::high_resolution_clock::now();
fps = frame_count * 1000.0 / std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
frame_count = 0;
start = std::chrono::high_resolution_clock::now();
}
if (fps > 0)
{
std::ostringstream fps_label;
fps_label << std::fixed << std::setprecision(2);
fps_label << "FPS: " << fps;
std::string fps_label_str = fps_label.str();
cv::putText(frame, fps_label_str.c_str(), cv::Point(10, 25), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 0, 255), 2);
}
cv::imshow("output", frame);
if (cv::waitKey(1) != -1)
{
capture.release();
std::cout << "finished by user\n";
break;
}
}
std::cout << "Total frames: " << total_frames << "\n";
return 0;
}
Kindly guide me on how to eliminate these multiple boxes on the output video stream.
I tried to calibrate Iphone 11 Pro camera and openCV calibrateCamera() function returns error more than 1.2+ (same for computeReprojectionErrors() below). I want them to be lower than 0.5, because after this I am making disparity map and I need it to be smooth. How can I minimize it? Here is my code and I made 90 pictures of board from diffirent angles which are stored in ../chess_image. Here is code: github.com/markilebyak/opencv.git
Also here: 4shared.com/folder/PSDaeTcI/opencv.html are 36 images from those 90 I used, and 34 grayscaled with detected lines from those 36.
void createKnownBoardPosition(Size &boardSize, float squareEdgeLength, vector<Point3f>& corners) {
for (int i = 0; i < boardSize.height; i++) {
for (int j = 0; j < boardSize.width; j++) {
corners.emplace_back((float)j * squareEdgeLength, (float)i * squareEdgeLength, 0.0f);
}
}
}
void process_images(std::string &path_to_directory, Size &chessboard_size, vector<vector<Point2f>>& allFoundCorners) {
// Function to iterate through images
// taken for camera calibration
vector<String> filenames;
cv::glob(path_to_directory, filenames);
vector<Point2f> pointBuffer;
int counter = 0;
for (const auto & filename : filenames) {
if (filename != "../chess_images/.DS_Store"){
Mat im = imread(filename);
std::cout << filename << std::endl;
Mat gray_im;
cvtColor(im, gray_im, COLOR_BGR2GRAY);
bool found = findChessboardCorners(gray_im, chessboard_size, pointBuffer,
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
if (found) {
counter++;
TermCriteria criteria = TermCriteria( TermCriteria::EPS + TermCriteria::MAX_ITER,
30,0.0001 );
Size winSize = Size( 11, 11);
Size zeroZone = Size( -1, -1 );
//cornerSubPix is the algorithm focused on relocating the points. it receives the image, the corners
// a window size, zeroZone and the actual criteria. The window size is the search area.
cornerSubPix(gray_im, pointBuffer, winSize, zeroZone, criteria );
drawChessboardCorners( gray_im, chessboard_size, Mat(pointBuffer), found );
string nametext = "../drawn_chessboard/" + to_string(counter) + ".jpg";
imwrite(nametext, gray_im);
allFoundCorners.push_back(pointBuffer);
}
}
}
std::cout << "Images used: " << counter << std::endl;
}
double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point2f> >& imagePoints,
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
const Mat& cameraMatrix , const Mat& distCoeffs,
vector<float>& perViewErrors, bool fisheye) {
vector<Point2f> imagePoints2;
size_t totalPoints = 0;
double totalErr = 0, err;
perViewErrors.resize(objectPoints.size());
for(size_t i = 0; i < objectPoints.size(); ++i )
{
if (fisheye)
{
fisheye::projectPoints(objectPoints[i], imagePoints2, rvecs[i], tvecs[i], cameraMatrix,
distCoeffs);
}
else
{
projectPoints(objectPoints[i], rvecs[i], tvecs[i], cameraMatrix, distCoeffs, imagePoints2);
}
err = norm(imagePoints[i], imagePoints2, NORM_L2);
size_t n = objectPoints[i].size();
perViewErrors[i] = (float) std::sqrt(err*err/n);
totalErr += err*err;
totalPoints += n;
}
return std::sqrt(totalErr/totalPoints);
}
void calibration_process(Size &boardSize, float &squareEdgeLength, Mat& cameraMatrix, Mat& distortionCoefficients,
std::string &path_to_directory) {
vector<vector<Point2f>> checkerboardImageSpacePoints;
process_images(path_to_directory, boardSize, checkerboardImageSpacePoints);
vector<vector<Point3f>> worldSpaceCornerPoints(1);
createKnownBoardPosition(boardSize, squareEdgeLength, worldSpaceCornerPoints[0]);
worldSpaceCornerPoints.resize(checkerboardImageSpacePoints.size(), worldSpaceCornerPoints[0]);
//rotation and translation vectors (rVectors, tVectors)
vector<Mat> rVectors, tVectors;
distortionCoefficients = Mat::zeros(8, 1, CV_64F);
calibrateCamera(worldSpaceCornerPoints, checkerboardImageSpacePoints, boardSize, cameraMatrix,
distortionCoefficients, rVectors, tVectors, CALIB_FIX_K5 + CALIB_FIX_INTRINSIC);
vector<float> perViewErrors;
double rep_error = computeReprojectionErrors(worldSpaceCornerPoints, checkerboardImageSpacePoints, rVectors, tVectors, cameraMatrix,
distortionCoefficients, perViewErrors, false);
std::cout << "Reprojection Error: " << rep_error << std::endl;
}
int main() {
Mat cameraMatrix, distortionCoefficient;
Size chessboardDimensions = Size(6, 9);
float calibrationSquareDimension = 0.03f;
std::string path = "../chess_images";
calibration_process(chessboardDimensions, calibrationSquareDimension, cameraMatrix,
distortionCoefficient, path);
return 0;
}
I use an algorithm of panorama stitching from opencv, in order to stitch 2 or 3 images into one new result image.
I have coordinates of points in each source image. I need to calculate what are the new coordinates for these points in the result image.
I describe below the algorithm. My code is similar to a sample "stitching_detailed" from opencv (branch 3.4). A result_mask of type Mat is produced, maybe it is the solution? But I don't know how to use it. I found a related question here but not on stitching.
Any idea?
Here is the algorithm (for detailed code: stitching_detailed.cpp):
Find features for each image:
Ptr<FeaturesFinder> finder = makePtr<SurfFeaturesFinder>()
vector<ImageFeatures> features(num_images);
for (int i = 0; i < num_images; ++i)
{
(*finder)(images[i], features[i]);
}
Make pairwise_matches:
vector<MatchesInfo> pairwise_matches;
Ptr<FeaturesMatcher> matcher = makePtr<BestOf2NearestMatcher>(false, match_conf);
(*matcher)(features, pairwise_matches);
Reorder the images:
vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
# here some code to reorder 'images'
Estimate an homography in cameras:
vector<CameraParams> cameras;
Ptr<Estimator> estimator = makePtr<HomographyBasedEstimator>();
(*estimator)(features, pairwise_matches, cameras);
Convert to CV_32F:
for (size_t i = 0; i < cameras.size(); ++i)
{
Mat R;
cameras[i].R.convertTo(R, CV_32F);
cameras[i].R = R;
}
Execute a BundleAdjuster:
Ptr<detail::BundleAdjusterBase> adjuster = makePtr<detail::BundleAdjusterRay>();
adjuster->setConfThresh(conf_thresh);
adjuster->setRefinementMask(refine_mask);
(*adjuster)(features, pairwise_matches, cameras);
Compute a value for warped_image_scale:
for (int i = 0; i < cameras.size(); ++i)
focals.push_back(cameras[i].focal);
float warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
Do wave correction:
vector<Mat> rmats;
for (size_t i = 0; i < cameras.size(); ++i)
rmats.push_back(cameras[i].R.clone());
waveCorrect(rmats, wave_correct);
for (size_t i = 0; i < cameras.size(); ++i)
cameras[i].R = rmats[i];
Create a warper:
Ptr<WarperCreator> warper_creator = makePtr<cv::SphericalWarper>();
Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
Create a blender and feed it:
Ptr<Blender> blender;
for (size_t i = 0; i < cameras.size(); ++i)
{
full_img = input_imgs[img_idx];
if (!is_compose_scale_set)
{
is_compose_scale_set = true;
compose_scale = /* … */
}
if (abs(compose_scale - 1) > 1e-1)
resize(full_img, img, Size(), compose_scale, compose_scale, INTER_LINEAR_EXACT);
else
img = full_img;
// Warp the current image
warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
// Compensate exposure
compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
dilate(masks_warped[img_idx], dilated_mask, Mat());
resize(dilated_mask, seam_mask, mask_warped.size(), 0, 0, INTER_LINEAR_EXACT);
mask_warped = seam_mask & mask_warped;
if (!blender)
{
blender = Blender::createDefault(blend_type, try_gpu);
Size dst_sz = resultRoi(corners, sizes).size();
float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
MultiBandBlender *mb = dynamic_cast<MultiBandBlender *>(blender.get());
mb->setNumBands(static_cast<int>(ceil(log(blend_width) / log(2.)) - 1.));
blender->prepare(corners, sizes);
}
// Blend the current image
blender->feed(img_warped_s, mask_warped, corners[i]);
}
Then, use the blender:
Mat result, result_mask;
blender->blend(result, result_mask);
// The result image is in 'result'
When I was a school boy, I foundopencv/samples/cpp/stitching_detailed.cpp in OpenCV samples folder. At that time, my programming skills were very poor. I can't understand it even though I racked my brains. This question attracts my attention, and arouses my memory. After a whole night of hard work and debugging, I finally get it.
Basic steps:
Given the three images: blue.png, green.png, and red.png
We can get the stitching result(result.png) using the stitching_detailed.cpp.
.
blender->blend(result, result_mask);
imwrite("result.png", result);
imwrite("result_mask.png", result_mask);
I choose the centers from the three images, and calculate the corresponding coordinates (warped) on the stitching image, and draw in solid as follow:
Warping images (auxiliary)...
Compensating exposure...
Blending ...
Warp each center point, and draw solid circle.
[408, 204] => [532, 224]
[408, 204] => [359, 301]
[408, 204] => [727, 320]
Check `result.png`, `result_mask.png` and `result2.png`!
Done!
This is the function calcWarpedPoint I wrote to calculate the warped point on the stitching image:
cv::Point2f calcWarpedPoint(
const cv::Point2f& pt,
InputArray K, // Camera K parameter
InputArray R, // Camera R parameter
Ptr<RotationWarper> warper, // The Rotation Warper
const std::vector<cv::Point> &corners,
const std::vector<cv::Size> &sizes)
{
// Calculate the wrapped point using camera parameter.
cv::Point2f dst = warper->warpPoint(pt, K, R);
// Calculate the stitching image roi using corners and sizes.
// the corners and sizes have already been calculated.
cv::Point2f tl = cv::detail::resultRoi(corners, sizes).tl();
// Finally adjust the wrapped point to the stitching image.
return cv::Point2f(dst.x - tl.x, dst.y - tl.y);
}
This is example code snippet:
std::cout << "\nWarp each center point, and draw solid circle.\n";
std::vector<cv::Scalar> colors = { {255,0,0}, {0, 255, 0}, {0, 0, 255} };
for (int idx = 0; idx < img_names.size(); ++idx) {
img = cv::imread(img_names[idx]);
Mat K;
cameras[idx].K().convertTo(K, CV_32F);
Mat R = cameras[idx].R;
cv::Point2f cpt = cv::Point2f(img.cols / 2, img.rows / 2);
cv::Point pt = calcWarpedPoint(cpt, K, R, warper, corners, sizes);
cv::circle(result, pt, 5, colors[idx], -1, cv::LINE_AA);
std::cout << cpt << " => " << pt << std::endl;
}
std::cout << "\nCheck `result.png`, `result_mask.png` and `result2.png`!\n";
imwrite("result2.png", result);
The full code:
/*
* Author : Kinght-金(https://stackoverflow.com/users/3547485/)
* Created : 2019/03/01 23:00 (CST)
* Finished : 2019/03/01 07:50 (CST)
*
* Modified on opencv401/samples/cpp/stitching_detailed.cpp
* From https://github.com/opencv/opencv/blob/4.0.1/samples/cpp/stitching_detailed.cpp
*
*
* Description: A simple opencv(4.0.1) image stitching code for Stack Overflow answers.
* For https://stackoverflow.com/questions/54904718/compute-coordinates-from-source-images-after-stitching/54953792#comment96681412_54953792
*
*/
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/opencv_modules.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
using namespace std;
using namespace cv;
using namespace cv::detail;
//! img_names are the input image (full) paths
// You can download from using the links from the answer.
//! Blue: https://i.stack.imgur.com/Yz3U1.png
//! Green: https://i.stack.imgur.com/AbUTH.png
//! Red: https://i.stack.imgur.com/9wcGc.png
vector<String> img_names = {"D:/stitching/blue.png", "D:/stitching/green.png", "D:/stitching/red.png"};
//! The function to calculate the warped point on the stitching image.
cv::Point2f calcWarpedPoint(
const cv::Point2f& pt,
InputArray K, // Camera K parameter
InputArray R, // Camera R parameter
Ptr<RotationWarper> warper, // The Rotation Warper
const std::vector<cv::Point> &corners,
const std::vector<cv::Size> &sizes)
{
// Calculate the wrapped point
cv::Point2f dst = warper->warpPoint(pt, K, R);
// Calculate the stitching image roi using corners and sizes,
// the corners and sizes have already been calculated.
cv::Point2f tl = cv::detail::resultRoi(corners, sizes).tl();
// Finally adjust the wrapped point
return cv::Point2f(dst.x - tl.x, dst.y - tl.y);
}
int main(int argc, char* argv[])
{
double work_megapix = 0.6;
double seam_megapix = 0.1;
double compose_megapix = -1;
float conf_thresh = 1.f;
float match_conf = 0.3f;
float blend_strength = 5;
// Check if have enough images
int num_images = static_cast<int>(img_names.size());
if (num_images < 2)
{
std::cout << "Need more images\n";
return -1;
}
double work_scale = 1, seam_scale = 1, compose_scale = 1;
bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;
//(1) 创建特征查找器
Ptr<Feature2D> finder = ORB::create();
// (2) 读取图像,适当缩放,并计算图像的特征描述
Mat full_img, img;
vector<ImageFeatures> features(num_images);
vector<Mat> images(num_images);
vector<Size> full_img_sizes(num_images);
double seam_work_aspect = 1;
for (int i = 0; i < num_images; ++i)
{
full_img = imread(img_names[i]);
full_img_sizes[i] = full_img.size();
if (full_img.empty())
{
cout << "Can't open image " << img_names[i] << std::endl;
return -1;
}
if (!is_work_scale_set)
{
work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
is_work_scale_set = true;
}
resize(full_img, img, Size(), work_scale, work_scale, INTER_LINEAR_EXACT);
if (!is_seam_scale_set)
{
seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
seam_work_aspect = seam_scale / work_scale;
is_seam_scale_set = true;
}
computeImageFeatures(finder, img, features[i]);
features[i].img_idx = i;
std::cout << "Features in image #" << i + 1 << ": " << features[i].keypoints.size() << std::endl;
resize(full_img, img, Size(), seam_scale, seam_scale, INTER_LINEAR_EXACT);
images[i] = img.clone();
}
full_img.release();
img.release();
// (3) 创建图像特征匹配器,计算匹配信息
vector<MatchesInfo> pairwise_matches;
Ptr<FeaturesMatcher> matcher = makePtr<BestOf2NearestMatcher>(false, match_conf);
(*matcher)(features, pairwise_matches);
matcher->collectGarbage();
//! (4) 剔除外点,保留最确信的大成分
// Leave only images we are sure are from the same panorama
vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
vector<Mat> img_subset;
vector<String> img_names_subset;
vector<Size> full_img_sizes_subset;
for (size_t i = 0; i < indices.size(); ++i)
{
img_names_subset.push_back(img_names[indices[i]]);
img_subset.push_back(images[indices[i]]);
full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
}
images = img_subset;
img_names = img_names_subset;
full_img_sizes = full_img_sizes_subset;
// Check if we still have enough images
num_images = static_cast<int>(img_names.size());
if (num_images < 2)
{
std::cout << "Need more images\n";
return -1;
}
//!(5) 估计 homography
Ptr<Estimator> estimator = makePtr<HomographyBasedEstimator>();
vector<CameraParams> cameras;
if (!(*estimator)(features, pairwise_matches, cameras))
{
cout << "Homography estimation failed.\n";
return -1;
}
for (size_t i = 0; i < cameras.size(); ++i)
{
Mat R;
cameras[i].R.convertTo(R, CV_32F);
cameras[i].R = R;
std::cout << "\nInitial camera intrinsics #" << indices[i] + 1 << ":\nK:\n" << cameras[i].K() << "\nR:\n" << cameras[i].R << std::endl;
}
//(6) 创建约束调整器
Ptr<detail::BundleAdjusterBase> adjuster = makePtr<detail::BundleAdjusterRay>();
adjuster->setConfThresh(conf_thresh);
Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
refine_mask(0, 0) = 1;
refine_mask(0, 1) = 1;
refine_mask(0, 2) = 1;
refine_mask(1, 1) = 1;
refine_mask(1, 2) = 1;
adjuster->setRefinementMask(refine_mask);
if (!(*adjuster)(features, pairwise_matches, cameras))
{
cout << "Camera parameters adjusting failed.\n";
return -1;
}
// Find median focal length
vector<double> focals;
for (size_t i = 0; i < cameras.size(); ++i)
{
focals.push_back(cameras[i].focal);
}
sort(focals.begin(), focals.end());
float warped_image_scale;
if (focals.size() % 2 == 1)
warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
else
warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
std::cout << "\nWarping images (auxiliary)... \n";
vector<Point> corners(num_images);
vector<UMat> masks_warped(num_images);
vector<UMat> images_warped(num_images);
vector<Size> sizes(num_images);
vector<UMat> masks(num_images);
// Preapre images masks
for (int i = 0; i < num_images; ++i)
{
masks[i].create(images[i].size(), CV_8U);
masks[i].setTo(Scalar::all(255));
}
// Warp images and their masks
Ptr<WarperCreator> warper_creator = makePtr<cv::CylindricalWarper>();
if (!warper_creator)
{
cout << "Can't create the warper \n";
return 1;
}
//! Create RotationWarper
Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
//! Calculate warped corners/sizes/mask
for (int i = 0; i < num_images; ++i)
{
Mat_<float> K;
cameras[i].K().convertTo(K, CV_32F);
float swa = (float)seam_work_aspect;
K(0, 0) *= swa; K(0, 2) *= swa;
K(1, 1) *= swa; K(1, 2) *= swa;
corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
sizes[i] = images_warped[i].size();
warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
}
vector<UMat> images_warped_f(num_images);
for (int i = 0; i < num_images; ++i)
images_warped[i].convertTo(images_warped_f[i], CV_32F);
std::cout << "Compensating exposure... \n";
//! 计算曝光度,调整图像曝光,减少亮度差异
Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(ExposureCompensator::GAIN_BLOCKS);
if (dynamic_cast<BlocksCompensator*>(compensator.get()))
{
BlocksCompensator* bcompensator = dynamic_cast<BlocksCompensator*>(compensator.get());
bcompensator->setNrFeeds(1);
bcompensator->setNrGainsFilteringIterations(2);
bcompensator->setBlockSize(32, 32);
}
compensator->feed(corners, images_warped, masks_warped);
Ptr<SeamFinder> seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR);
seam_finder->find(images_warped_f, corners, masks_warped);
// Release unused memory
images.clear();
images_warped.clear();
images_warped_f.clear();
masks.clear();
Mat img_warped, img_warped_s;
Mat dilated_mask, seam_mask, mask, mask_warped;
Ptr<Blender> blender;
double compose_work_aspect = 1;
for (int img_idx = 0; img_idx < num_images; ++img_idx)
{
// Read image and resize it if necessary
full_img = imread(img_names[img_idx]);
if (!is_compose_scale_set)
{
is_compose_scale_set = true;
compose_work_aspect = compose_scale / work_scale;
// Update warped image scale
warped_image_scale *= static_cast<float>(compose_work_aspect);
warper = warper_creator->create(warped_image_scale);
// Update corners and sizes
for (int i = 0; i < num_images; ++i)
{
cameras[i].focal *= compose_work_aspect;
cameras[i].ppx *= compose_work_aspect;
cameras[i].ppy *= compose_work_aspect;
Size sz = full_img_sizes[i];
if (std::abs(compose_scale - 1) > 1e-1)
{
sz.width = cvRound(full_img_sizes[i].width * compose_scale);
sz.height = cvRound(full_img_sizes[i].height * compose_scale);
}
Mat K;
cameras[i].K().convertTo(K, CV_32F);
Rect roi = warper->warpRoi(sz, K, cameras[i].R);
corners[i] = roi.tl();
sizes[i] = roi.size();
}
}
if (abs(compose_scale - 1) > 1e-1)
resize(full_img, img, Size(), compose_scale, compose_scale, INTER_LINEAR_EXACT);
else
img = full_img;
full_img.release();
Size img_size = img.size();
Mat K, R;
cameras[img_idx].K().convertTo(K, CV_32F);
R = cameras[img_idx].R;
// Warp the current image : img => img_warped
warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
img_warped.convertTo(img_warped_s, CV_16S);
img_warped.release();
img.release();
mask.release();
dilate(masks_warped[img_idx], dilated_mask, Mat());
resize(dilated_mask, seam_mask, mask_warped.size(), 0, 0, INTER_LINEAR_EXACT);
mask_warped = seam_mask & mask_warped;
if (!blender)
{
blender = Blender::createDefault(Blender::MULTI_BAND, false);
Size dst_sz = resultRoi(corners, sizes).size();
float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
if (blend_width < 1.f){
blender = Blender::createDefault(Blender::NO, false);
}
else
{
MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(blender.get());
mb->setNumBands(static_cast<int>(ceil(log(blend_width) / log(2.)) - 1.));
}
blender->prepare(corners, sizes);
}
blender->feed(img_warped_s, mask_warped, corners[img_idx]);
}
/* ===========================================================================*/
// Blend image
std::cout << "\nBlending ...\n";
Mat result, result_mask;
blender->blend(result, result_mask);
imwrite("result.png", result);
imwrite("result_mask.png", result_mask);
std::cout << "\nWarp each center point, and draw solid circle.\n";
std::vector<cv::Scalar> colors = { {255,0,0}, {0, 255, 0}, {0, 0, 255} };
for (int idx = 0; idx < img_names.size(); ++idx) {
img = cv::imread(img_names[idx]);
Mat K;
cameras[idx].K().convertTo(K, CV_32F);
Mat R = cameras[idx].R;
cv::Point2f cpt = cv::Point2f(img.cols / 2, img.rows / 2);
cv::Point pt = calcWarpedPoint(cpt, K, R, warper, corners, sizes);
cv::circle(result, pt, 5, colors[idx], -1, cv::LINE_AA);
std::cout << cpt << " => " << pt << std::endl;
}
std::cout << "\nCheck `result.png`, `result_mask.png` and `result2.png`!\n";
imwrite("result2.png", result);
std::cout << "\nDone!\n";
/* ===========================================================================*/
return 0;
}
Some links maybe useful:
stitching_detailed.cpp : https://github.com/opencv/opencv/blob/4.0.1/samples/cpp/stitching_detailed.cpp
waper->warp(), warpPoint(), warpRoi() https://github.com/opencv/opencv/blob/master/modules/stitching/src/warpers.cpp#L153
resultRoi() https://github.com/opencv/opencv/blob/master/modules/stitching/src/util.cpp#L116
Other links maybe interesting:
Converting opencv remap code from c++ to python
Split text lines in scanned document
How do I use the relationships between Flann matches to determine a sensible homography?
After upgrading from openCV 3.2 to openCV 3.4.1 I get Segmentation fault when running my program
Full source code below (I'm not a C expert)
running the program ./txtbin input.png output.png
Hope someone would be helpful :)
error
> Error: signal 11:
> #1 ./txtbin(_Z6bTracei+0x1c) [0x5596628d8e68]
> #2 /lib/x86_64-linux-gnu/libc.so.6(+0x33060) [0x7fa1b8481060]
> #3 /usr/local/lib/libopencv_world.so.3.4(+0xa3f7da) [0x7fa1b9ac97da]
> #4 /usr/local/lib/libopencv_world.so.3.4(+0xa2782d) [0x7fa1b9ab182d]
> #5 /usr/local/lib/libopencv_world.so.3.4(_ZN2cv7imwriteERKNS_6StringERKNS_11_InputArrayERKSt6vectorIiSaIiEE+0x8e) [0x7fa1b9ab4a4e]
> #6 ./txtbin(_ZN6Txtbin8binarizeEv+0x12d) [0x5596628d67c3]
> #7 ./txtbin(_ZN6Txtbin3runEv+0x698) [0x5596628d8c10]
> #8 ./txtbin(main+0x3fa) [0x5596628d92db]
> #9 /lib/x86_64-linux-gnu/libc.so.6(__libc_start_main+0xf1) [0x7fa1b846e2e1]
input.png
txtbin.cpp
/*
* Compile
* # g++ -rdynamic -std=c++11 txtbin.cpp -o txtbin `pkg-config opencv --cflags --libs`
*
* Get opencv version
* # pkg-config --modversion opencv
*/
#include <signal.h>
#include "../backtrace/backtrace.h"
#include "txtbin.hpp"
Backtrace b;
void bTrace(int sig){
b.trace(sig);
}
void usage(const std::string VERSION){
std::cout << "txtbin: " << VERSION << "\nOpenCV: " << CV_VERSION << "\n\nUsage: txtbin input [options] output\n"
"Options:\n"
"\t-w <number> -- Set max width (keeps aspect ratio)\n"
"\t-h <number> -- Set max height (keeps aspect ratio)\n"
"\t-c -- Crop text contours\n"
"\t-r -- Rotate contents (deskew)\n"
"\t-m <number> -- Set margins (%)\n"
"\t-b <number> -- Set blockside (pixel)\n"
"\t Default: 9 pixel\n"
"\t-t <number> -- Set threshold (%)\n"
"\t Default: 85 %\n"
"\t-v -- Verbose\n" << std::endl;
}
int main(int argc, char* argv[]){
signal(SIGSEGV, &bTrace);
Txtbin a;
try{
// Parse arguments
for(int i = 1; i < argc; i++){
std::string arg = std::string(argv[i]);
if(i == 1){
a.set_input(arg);
}
else if(arg == "-w"){
a.set_width(atoi(argv[++i]));
}
else if(arg == "-h"){
a.set_height(atoi(argv[++i]));
}
else if(arg == "-c"){
a.set_crop(true);
}
else if(arg == "-r"){
a.set_rotate(true);
}
else if(arg == "-m"){
a.set_margin(atoi(argv[++i]));
}
else if(arg == "-b"){
a.set_blockside(atoi(argv[++i]));
}
else if(arg == "-t"){
a.set_threshold(atoi(argv[++i]));
}
else if(arg == "-v"){
a.set_verbose(true);
}
else if(i == argc - 1){
a.set_output(arg);
}
else{
throw std::runtime_error("Argument '"+arg+"' is invalid");
}
}
a.run();
}
catch(std::exception& e){
std::cerr << "Error: " << e.what() << "\n" << std::endl;
usage(a.VERSION);
return 1;
}
return 0;
}
txtbin.h
struct textblock{
int left = 0;
int top = 0;
int right = 0;
int bottom = 0;
};
class Txtbin{
private:
std::string input = "";
std::string output = "output.png";
int max_width = 0;
int max_height = 0;
bool is_crop = false;
bool is_rotate = false;
float margin = 0;
int blockside = 9; // set greater for larger fonts in image and vice versa
bool is_verbose = false;
float contrast = 0.01; // set smaller for lower contrast image
float thresholding = 0.85;
void binarize ();
cv::Mat src;
cv::Mat calc_block_mean_variance(cv::Mat& img);
textblock detect_text_block (bool test_output);
void downsize ();
void crop (textblock coords);
void error (const std::string& s);
public:
Txtbin();
const std::string VERSION = "0.3.6";
void set_input (const std::string& s);
void set_output (const std::string& s);
void set_height (int h);
void set_width (int w);
void set_crop (bool c);
void set_rotate (bool r);
void set_margin (float m);
void set_blockside (int b);
void set_threshold (int t);
void set_verbose (bool v);
void run ();
};
txtbin.hpp
#include <iostream>
#include <fstream>
#include <chrono>
#include <boost/algorithm/string.hpp>
#include "/usr/local/include/opencv2/opencv.hpp"
#include "txtbin.h"
Txtbin::Txtbin(){}
void Txtbin::set_input(const std::string& s){
input = s;
}
void Txtbin::set_output(const std::string& s){
output = s;
}
void Txtbin::set_height(int h){
max_height = h;
}
void Txtbin::set_width(int w){
max_width = w;
}
void Txtbin::set_crop(bool c){
is_crop = c;
}
void Txtbin::set_rotate(bool r){
is_rotate = r;
}
void Txtbin::set_margin(float m){
margin = m;
}
void Txtbin::set_blockside(int b){
blockside = b;
}
void Txtbin::set_threshold(int t){
thresholding = t / 100;
}
void Txtbin::set_verbose(bool v){
is_verbose = v;
}
void Txtbin::error(const std::string& s){
throw std::runtime_error(s);
}
void Txtbin::binarize(){
src.convertTo(src, CV_32FC1, 1.0 / 255.0);
cv::Mat res = calc_block_mean_variance(src);
imwrite(output, res * 255);
}
void Txtbin::crop(textblock coords){
if(coords.left < coords.right && coords.top < coords.bottom){
if(coords.left < 0){
coords.left = 0;
}
int crop_width = coords.right - coords.left;
int trim_width = coords.left + crop_width - src.cols;
if(trim_width > 0){
crop_width -= trim_width;
}
if(coords.top < 0){
coords.top = 0;
}
int crop_height = coords.bottom - coords.top;
int trim_height = coords.top + crop_height - src.rows;
if(trim_height > 0){
crop_height -= trim_height;
}
cv::Rect cut_rect = cv::Rect(coords.left, coords.top, crop_width, crop_height);
src = src(cut_rect);
}
else{
std::cout << "Warning: Invalid text block coordinates. Cropping is omitted!" << std::endl;
}
}
void Txtbin::downsize(){
float
width = src.cols,
height = src.rows,
scale = 0;
bool resized = false;
if(max_width > 0 && width > max_width){
scale = width / max_width;
width /= scale;
height /= scale;
resized = true;
}
if(max_height > 0 && height > max_height){
scale = height / max_height;
width /= scale;
height /= scale;
resized = true;
}
if(resized){
resize(src, src, cv::Size(round(width), round(height)));
}
}
textblock Txtbin::detect_text_block(bool test_output){
cv::Mat img = src.clone();
// downsample image and use it for processing
int multiplier = 2;
pyrDown(img, img);
textblock block;
block.left = img.cols;
block.top = img.rows;
int
rect_bottom,
rect_right;
// morphological gradient
cv::Mat grad;
cv::Mat morphKernel = getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(3, 3));
morphologyEx(img, grad, cv::MORPH_GRADIENT, morphKernel);
// binarize
cv::Mat bw;
threshold(grad, bw, 0.0, 255.0, cv::THRESH_BINARY | cv::THRESH_OTSU);
// connect horizontally oriented regions
cv::Mat connected;
morphKernel = getStructuringElement(cv::MORPH_RECT, cv::Size(9, 1));
morphologyEx(bw, connected, cv::MORPH_CLOSE, morphKernel);
// find contours
cv::Mat mask = cv::Mat::zeros(bw.size(), CV_8UC1);
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
findContours(connected, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));
cv::Scalar color = cv::Scalar(0, 255, 0);
cv::Scalar color2 = cv::Scalar(0, 0, 255);
int thickness = 2;
if(test_output){
cv::cvtColor(img, img, CV_GRAY2BGR);
}
// filter contours
if(!hierarchy.empty()){
for(int idx = 0; idx >= 0; idx = hierarchy[idx][0]){
cv::Rect rect = boundingRect(contours[idx]);
cv::Mat maskROI(mask, rect);
maskROI = cv::Scalar(0, 0, 0);
// fill the contour
drawContours(mask, contours, idx, cv::Scalar(255, 255, 255), CV_FILLED);
// ratio of non-zero pixels in the filled region
double r = (double)countNonZero(maskROI) / (rect.width * rect.height);
// assume at least 25% of the area is filled if it contains text
if (r > 0.25 &&
(rect.height > 8 && rect.width > 8) // constraints on region size
// these two conditions alone are not very robust. better to use something
//like the number of significant peaks in a horizontal projection as a third condition
){
if(test_output){
rectangle(img, rect, color, thickness);
}
//rectangle(src, cv::Rect(rect.x * multiplier, rect.y * multiplier, rect.width * multiplier, rect.height * multiplier), color, thickness);
if(rect.y < block.top){
block.top = rect.y;
}
rect_bottom = rect.y + rect.height;
if(rect_bottom > block.bottom){
block.bottom = rect_bottom;
}
if(rect.x < block.left){
block.left = rect.x;
}
rect_right = rect.x + rect.width;
if(rect_right > block.right){
block.right = rect_right;
}
}
}
}
if(test_output){
rectangle(img, cv::Point(block.left, block.top), cv::Point(block.right, block.bottom), color2, thickness);
imwrite("test_text_contours.jpg", img);
}
//rectangle(src, cv::Point(block.left * multiplier, block.top * multiplier), cv::Point(block.right * multiplier, block.bottom * multiplier), color2, thickness);
block.left *= multiplier;
block.top *= multiplier;
block.right *= multiplier;
block.bottom *= multiplier;
return block;
}
cv::Mat Txtbin::calc_block_mean_variance(cv::Mat& img){
cv::Mat res;
cv::Mat I;
img.convertTo(I, CV_32FC1);
res = cv::Mat::zeros(img.rows / blockside, img.cols / blockside, CV_32FC1);
cv::Mat inpaintmask;
cv::Mat patch;
cv::Mat small_img;
cv::Scalar m, s;
for(int i = 0; i < img.rows - blockside; i += blockside){
for(int j = 0; j < img.cols - blockside; j += blockside){
patch = I(cv::Range(i, i + blockside + 1), cv::Range(j, j + blockside + 1));
meanStdDev(patch, m, s);
if(s[0] > contrast){
res.at<float>(i / blockside, j / blockside) = m[0];
}
else{
res.at<float>(i / blockside, j / blockside) = 0;
}
}
}
resize(I, small_img, res.size());
threshold(res, inpaintmask, 0.02, 1.0, cv::THRESH_BINARY);
cv::Mat inpainted;
small_img.convertTo(small_img, CV_8UC1, 255);
inpaintmask.convertTo(inpaintmask, CV_8UC1);
inpaint(small_img, inpaintmask, inpainted, 5, cv::INPAINT_TELEA);
resize(inpainted, res, img.size());
res.convertTo(res, CV_32FC1, 1.0 / 255.0);
res = 1.0 - res;
res = img + res;
threshold(res, res, thresholding, 1, cv::THRESH_BINARY);
return res;
}
void Txtbin::run(){
// Return error if input file is not defined
if(input == ""){
error("Input file not defined");
}
// Return error if input file is not found
else{
std::ifstream stream(input.c_str());
if(!stream.good()){
error("Input file not found");
}
}
// Return error if output file is not defined
if(output == ""){
error("Output file not defined");
}
// Return error if output file is not PNG
else{
std::string output_lc = output;
boost::to_lower(output_lc);
if(output_lc.substr(output_lc.find_last_of(".") + 1) != "png"){
error("Output file must be PNG");
}
}
bool test_output = false;
auto start = std::chrono::high_resolution_clock::now();
src = cv::imread(input, CV_LOAD_IMAGE_GRAYSCALE);
if(is_crop || is_verbose){
textblock coords = detect_text_block(test_output);
if(is_verbose){
std::cout << "Image dimensions: " << src.cols << " x " << src.rows << std::endl;
std::cout << "Text block coordinates: Left|Top " << coords.left << "," << coords.top << " Right|Bottom " << coords.right << "," << coords.bottom << std::endl;
}
if(is_crop){
crop(coords);
}
}
if(margin){
int border = src.cols * margin / 100;
copyMakeBorder(src, src, border, border, border, border, cv::BORDER_CONSTANT, cv::Scalar(255, 255, 255));
}
downsize();
binarize();
auto elapsed = std::chrono::high_resolution_clock::now() - start;
if(is_verbose){
std::cout << "Execution time: " << (std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count() / 1000) << " ms" << std::endl;
}
}
backtrace.h
#include <stdio.h>
#include <execinfo.h>
#include <stdlib.h>
class Backtrace {
public:
Backtrace();
void trace(int sig);
};
Backtrace::Backtrace(){}
void Backtrace::trace(int sig){
void *trace[10];
char **messages = (char **)NULL;
int i, trace_size = 0;
trace_size = backtrace(trace, 10);
messages = backtrace_symbols(trace, trace_size);
fprintf(stderr, "Error: signal %d:\n", sig);
for(i=1; i<trace_size; ++i){
fprintf(stderr, "#%d %s\n", i, messages[i]);
}
exit(1);
}
The segfault does not occur with the following implementation of Txtbin::binarize.
void Txtbin::binarize(){
src.convertTo(src, CV_32FC1, 1.0 / 255.0);
cv::Mat res = calc_block_mean_variance(src) * 255;
imwrite(output, res);
}
With the prior definition of Txtbin::binarize, cv::imwrite sees the type as _InputArray::EXPR rather than as _InputArray::MAT, and passes an uninitialized value of img_vec to cv::imwrite_. The segfault results from an attempt to dereference that uninitialized value. The _InputArray::EXPR value comes from the return value of MatExpr operator * (a cv::MatExpr) being passed to _InputArray::_InputArray.
With the modified definition of Txtbin::binarize the return value of MatExpr operator * is passed to MatExpr::operator Mat() const, which passes it to MatOp_AddEx::assign, and the value passed to _InputArray::_InputArray is a cv::Mat rather than a cv::MatExpr. In short, it seems the assignment of the return value of MatExpr operator * prevents an incorrect type conversion (cv::imwrite expects a cv::InputArray, not a cv:Mat, for its second parameter).
It appears that newer versions of opencv are not susceptible to this crash. Prior to this commit, cv::imwrite called the getMat method, and this commit restored that call as the default behavior (the else clause), as a replacement for an overly specific conditional. I confirmed that recompiling opencv with that change to cv::imwrite prevents the crash even with the prior version of Txtbin::binarize (the one in the question), and that the return value of MatExpr operator * is passed to MatExpr::operator Mat() const, as in the modified definition of Txtbin::binarize due to the assignment.
In short, you can work around issue 15545, or you can install its fix.
EDIT: I found the cause of the problem, the fisheye::undistortImage() function was not working correctly, I replaced it with estimateNewCameraMatrixForUndistortRectify(), initUndistortRectifyMap(), and remap() as in the original calibrate camera example. Not perfect yet but going in the right direction. Output image: http://imgur.com/a/Xm5vq
Mat output;
Mat newK;
Mat view, map1, map2;
Size newSize(1200, 1200);
Mat rview(newSize, frame.type());
//resize(rview, rview, newSize);
fisheye::estimateNewCameraMatrixForUndistortRectify(K, D, frame.size(), Matx33d::eye(), newK, 1);
fisheye::initUndistortRectifyMap(K, D, Matx33d::eye(), newK, frame.size(), CV_16SC2, map1, map2);
//fisheye::undistortImage(frame, output, K, D, identity);
remap(frame, rview, map1, map2, INTER_LINEAR);
imshow("Image View", rview);
imshow(window_name, frame);
if (waitKey(50) == 27) {
break;
}
Original post:
I'm trying to calibrate and undistort an image coming from an 180 degree fisheye USB camera. Most of this code is from existing examples that claim to be functional.
The code runs fine until fisheye::undistortImage where the output image is very distorted and centered around the top left corner of the window.
Screen shot of the "undistorted" chess board and calibration matrix outputs -
http://imgur.com/a/RTIoT
What am I missing here?
int main(int argc, char** argv) {
VideoCapture camera;
camera.open(1);
if (!camera.isOpened()) {
cout << "Failed to open camera." << std::endl;
return -1;
}
double fWidth = camera.get(CAP_PROP_FRAME_WIDTH);
double fHeight = camera.get(CAP_PROP_FRAME_HEIGHT);
cout << fWidth << std::endl;
cout << fHeight << std::endl;
/*
640 320
480 240
*/
const char* window_name = "output";
namedWindow(window_name, WINDOW_NORMAL);
Mat frame;
Size boardSize;
boardSize.width = 9;
boardSize.height = 6;
int remaining_frames = 30;
Mat K;// = Mat(3, 3, CV_64F, vK);
Mat D;
Mat identity = Mat::eye(3, 3, CV_64F);
vector<vector<Point2f> > img_points;
vector<vector<Point3f> > obj_points(1);
int sq_sz = 25;
for (int i = 0; i < boardSize.height; i++) {
for (int j = 0; j < boardSize.width; j++) {
obj_points[0].push_back(Point3f(float(j * sq_sz), float(i * sq_sz), 0));
}
}
obj_points.resize(remaining_frames, obj_points[0]);
bool found = false;
clock_t prevTimestamp = 0;
int delay = 500;
while (1) {
frame = nextFrame(camera);
bool blinkOutput = false;
if (remaining_frames > 0) {
vector<Point2f> corners;
int chessBoardFlags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE;
found = findChessboardCorners(frame, boardSize, corners, chessBoardFlags);
if (found) {
drawChessboardCorners(frame, boardSize, corners, found);
if (clock() - prevTimestamp > delay*1e-3*CLOCKS_PER_SEC) {
Mat viewGray;
cvtColor(frame, viewGray, COLOR_BGR2GRAY);
cornerSubPix(viewGray, corners, Size(11, 11), Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1));
img_points.push_back(corners);
remaining_frames--;
cout << remaining_frames << " frames to calibration." << endl;
blinkOutput = true;
prevTimestamp = clock();
}
if (remaining_frames == 0) {
cout << "Computing distortion" << endl;
int flags = 0;
flags |= cv::fisheye::CALIB_RECOMPUTE_EXTRINSIC;
flags |= cv::fisheye::CALIB_CHECK_COND;
flags |= cv::fisheye::CALIB_FIX_SKEW;
fisheye::calibrate(obj_points, img_points, frame.size(), K, D, noArray(), noArray(), flags);
cout << "Finished computing distortion" << endl;
cout << K << endl;
cout << D << endl;
}
}
if (blinkOutput) { bitwise_not(frame, frame); }
cv::imshow(window_name, frame);
if (waitKey(50) == 27) {
break;
}
}
else {
Mat output;
fisheye::undistortImage(frame, output, K, D, identity);
cv::imshow(window_name, output);
if (waitKey(50) == 27) {
break;
}
}
}
return 0;
}