OpenCV Error: Camera Calibration: Assertion failed in matrix_wrap.cpp - c++

The Error:
OpenCV(3.4.1) Error: Assertion failed (i < 0) in cv::debug_build_guard::_InputArray::getMat_,
file C:\build\master_winpack-build-win64-vc15\opencv\modules\core\src\matrix_wrap.cpp, line 50
The Code inside "matrix_wrap.cpp" that triggers the Error:
if( k == STD_VECTOR )
{
CV_Assert( i < 0 );
int t = CV_MAT_TYPE(flags);
const std::vector<uchar>& v = *(const std::vector<uchar>*)obj;
return !v.empty() ? Mat(size(), t, (void*)&v[0]) : Mat();
}
Entire Code:
#include <iostream>
#include <opencv2/opencv.hpp>
int main(int argc, char *argv[])
{
cv::Mat img = cv::imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
cv::Size imageSize = cv::Size(img.size[0], img.size[1]);
cv::Mat cameraMatrix, distCoeffs;
double squareSize = 30;
cv::Size boardSize = cv::Size(6, 11);
std::vector<cv::Point2f> imagePoints;
std::vector<cv::Point3f> objectPoints;
for (int i = 0; i < boardSize.height; i++) {
for (int j = 0; j < boardSize.width; j++) {
objectPoints.push_back(
cv::Point3f(float(j * squareSize), float(i * squareSize), 0));
}
}
std::vector<cv::Mat> rvecs, tvecs;
bool found = false;
if (img.size[0] > 1)
{
bool found = findChessboardCorners(img, boardSize, imagePoints, cv::CALIB_CB_ADAPTIVE_THRESH);
if (found)
{
drawChessboardCorners(img, boardSize, imagePoints, found);
}
objectPoints.resize(imagePoints.size(), objectPoints[0]);
double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs);
cv::namedWindow("My Image");
cv::imshow("My Image", img);
cv::waitKey(1000);
}
return 0;
}
I want to use a basic camera calibration with a straigth chessboard.
Until the function "calibrateCamera" everything works fine, but I can't figure out why this Error shows up.
The size of "objectPoints" and "imagePoints" is the same.
Thank you in advance.
P.S.:
I'm new in this forum and also in OpenCV ;)

As per this source of opencv, lines 3139 and 3142 :
CV_Error(CV_StsUnsupportedFormat, "objectPoints should contain vector of vectors of points of type Point3f");
CV_Error(CV_StsUnsupportedFormat, "imagePoints1 should contain vector of vectors of points of type Point2f");
the types for objectPoints and imagePoints should be std::vector<std::vector<Point{2|3}f>>

Related

Wrong pose estimate when using Charuco on image with higher resolution

I'm working on a project where a high quality pose estimate is needed. I am therefore trying to get this pose estimate using OpenCV charuco board. Previously I have been using a Aruco board of size 2x2, but the pose estimate was not sufficient.
I have made the charuco estimate work using a realSense D415 camera with resolution 640x480. However, when I change the resolution to 1280x720 the coordinate system which I draw on the board, starts jumping around completely random.
The code for estimating the charuco board is here:
void ReconstructionSystem::detect_charuco_markers(cv::Mat& image, cv::Matx33f& matrix, cv::Vec<float, 5>& coef, int& centerPix_x, int& centerPix_y, cv::Vec3d& rotation, bool& arucoFound)
{
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(3, 3, 0.04f, 0.02f, dictionary);
cv::Ptr<cv::aruco::DetectorParameters> params = cv::aruco::DetectorParameters::create();
//params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE;
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f>> markerCorners;
cv::Mat copyImage;
image.copyTo(copyImage);
cv::Mat gray;
cv::cvtColor(copyImage, gray, cv::COLOR_RGB2GRAY);
cv::aruco::detectMarkers(gray, board->getDictionary(), markerCorners, markerIds, params);
// if at least one marker detected
if (markerIds.size() > 3) {
cv::aruco::drawDetectedMarkers(image, markerCorners, markerIds);
std::vector<cv::Point2f> charucoCorners;
std::vector<int> charucoIds;
cv::aruco::interpolateCornersCharuco(markerCorners, markerIds, gray, board, charucoCorners, charucoIds, matrix, coef);
// if at least one charuco corner detected
if (charucoIds.size() > 3) {
cv::Scalar color = cv::Scalar(255, 0, 0);
cv::aruco::drawDetectedCornersCharuco(image, charucoCorners, charucoIds, color);
cv::Vec3d rvec, tvec;
bool valid = cv::aruco::estimatePoseCharucoBoard(charucoCorners, charucoIds, board, matrix, coef, rvec, tvec);
// if charuco pose is valid
if (valid){
cv::drawFrameAxes(image, matrix, coef, rvec, tvec, 0.1f);
arucoFound = true;
}
else
{
arucoFound = false;
}
}
else
{
arucoFound = false;
}
}
else
{
arucoFound = false;
}
board = NULL;
dictionary = NULL;
copyImage.release();
gray.release();
}
The function above is called within this while loop:
//Variables for transformation matrices
int centerPix_x = 0, centerPix_y = 0;
cv::Vec3d rotationVec;
cv::Matx33f rotation;
bool arucoWasFound = false;
std::vector<float> final_x, final_y, final_z;
std::vector<float> rotation_x, rotation_y, rotation_z;
cv::Matx33f matrix = get_cameraMatrix(path);
cv::Vec<float, 5> coef = get_distCoeffs(path);
const auto window_name = "Validation image";
cv::namedWindow(window_name, cv::WINDOW_AUTOSIZE);
// TODO Also add here that if we have iterated through X frames and not found Aruco, exit with failure
while (cv::waitKey(1) < 0 && cv::getWindowProperty(window_name, cv::WND_PROP_AUTOSIZE) >= 0 && counter < 60) {
rs2::frame f = sensorPtr->color_data.wait_for_frame();
// Query frame size (width and height)
const int w = f.as<rs2::video_frame>().get_width();
const int h = f.as<rs2::video_frame>().get_height();
cv::Mat image(cv::Size(w, h), CV_8UC3, (void*)f.get_data(), cv::Mat::AUTO_STEP);
cv::cvtColor(image, image, cv::COLOR_RGB2BGR);
//detect_aruco_markers(image, matrix, coef, centerPix_x, centerPix_y, rotationVec, arucoWasFound);
detect_charuco_markers(image, matrix, coef, centerPix_x, centerPix_y, rotationVec, arucoWasFound);
if (arucoWasFound)
{
rs2::depth_frame depth = sensorPtr->depth_data.wait_for_frame();
rs2_intrinsics intrinsic = rs2::video_stream_profile(depth.get_profile()).get_intrinsics();
float pixel_distance_in_meters = depth.get_distance(centerPix_x, centerPix_y);
float InputPixelAsFloat[2];
InputPixelAsFloat[0] = centerPix_x;
InputPixelAsFloat[1] = centerPix_y;
float finalDepthPoint[3];
rs2_deproject_pixel_to_point(finalDepthPoint, &intrinsic, InputPixelAsFloat, pixel_distance_in_meters);
// Postion //
final_x.push_back(finalDepthPoint[0]);
final_y.push_back(finalDepthPoint[1]);
final_z.push_back(finalDepthPoint[2]);
// Rotation //
rotation_x.push_back(rotationVec[0]);
rotation_y.push_back(rotationVec[1]);
rotation_z.push_back(rotationVec[2]);
counter++;
}
cv::imshow(window_name, image);
}
cv::destroyWindow(window_name);
Furthermore, here is an image of the detection using resolution of 1270x720.
And here is an image of the detection with resolution 640x480.
If anybody knows why this is happening please let me know :D
As pointed out the problem was that the calibration of the cameras had been made with a wrong resolution, in my case 640x480 instead of 1280x720. Below is the code I used to calculate the calibration matrix and coefficients. The two values that were wrong were: cv::Size frameSize(_imageWidth, _imageWidth);
void ReconstructionSystem::camera_calibration()
{
std::string folder_501("\\Users\\Mikke\\Desktop\\Calibration\\501_images\\*.png");
std::string folder_309("\\Users\\Mikke\\Desktop\\Calibration\\309_images\\*.png");
for (int x = 0; x < 2; x++)
{
std::vector<cv::String> filenames;
std::string currentCam;
if (x == 0) currentCam = folder_501;
if (x == 1) currentCam = folder_309;
cv::glob(currentCam, filenames, false);
for each (std::string var in filenames)
{
printf("file: %s\n", var.c_str());
}
cv::Size patterSize(9, 6);
std::vector<std::vector<cv::Point2f>> q(filenames.size());
std::vector<std::vector<cv::Point3f>> Q;
int checkerboard[2] = { 10, 7 }; //size of checkerboard
int square_size = 27; //2.7 cm == 27mm
std::vector<cv::Point3f> objp;
for (int i = 1; i < checkerboard[1]; i++) {
for (int j = 1; j < checkerboard[0]; j++) {
objp.push_back(cv::Point3f(j * square_size, i * square_size, 0));
}
}
std::vector<cv::Point2f> imgPoint;
std::size_t i = 0;
for (auto const& f : filenames) {
std::cout << std::string(f) << std::endl;
cv::Mat img = cv::imread(filenames[i]);
cv::Mat gray;
cv::cvtColor(img, gray, cv::COLOR_RGB2GRAY);
bool patternFound = cv::findChessboardCorners(gray, patterSize, q[i], cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_NORMALIZE_IMAGE + cv::CALIB_CB_FAST_CHECK);
if (patternFound) {
cv::cornerSubPix(gray, q[i], cv::Size(11, 11), cv::Size(-1, -1), cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.001));
Q.push_back(objp);
}
// Display
cv::drawChessboardCorners(img, patterSize, q[i], patternFound);
cv::imshow("chessboard detection", img);
cv::waitKey(0);
i++;
}
cv::Matx33f K(cv::Matx33f::eye());
cv::Vec<float, 5> k(0, 0, 0, 0, 0);
std::vector<cv::Mat> rvecs, tvecs;
std::vector<double> stdIntrinsics, stdExtrinsics, perViewErrors;
int flags = cv::CALIB_FIX_ASPECT_RATIO + cv::CALIB_FIX_K3 + cv::CALIB_ZERO_TANGENT_DIST + cv::CALIB_FIX_PRINCIPAL_POINT;
cv::Size frameSize(_imageWidth, _imageWidth);
std::cout << "calibrating..." << std::endl;
float error = cv::calibrateCamera(Q, q, frameSize, K, k, rvecs, tvecs, flags);
std::cout << "reprojection error = " << error << "\nK = \n" << K << "\nk=\n" << k << std::endl;
if (x == 0) {
std::string path_mat = "\\Users\\Mikke\\Desktop\\Calibration\\104122061501\\calibration_Mat_new.yml";
std::string path_coe = ("\\Users\\Mikke\\Desktop\\Calibration\\104122061501\\calibration_coef_new.yml");
saveData_mat(path_mat, K);
saveData_coef(path_coe, k);
}
if (x == 1) {
std::string path_mat = "\\Users\\Mikke\\Desktop\\Calibration\\102122061309\\calibration_Mat_new.yml";
std::string path_coe = ("\\Users\\Mikke\\Desktop\\Calibration\\102122061309\\calibration_coef_new.yml");
saveData_mat(path_mat, K);
saveData_coef(path_coe, k);
}
}
}

OpenCV calibrateCamera() returns too high reprojection error (C++)

I tried to calibrate Iphone 11 Pro camera and openCV calibrateCamera() function returns error more than 1.2+ (same for computeReprojectionErrors() below). I want them to be lower than 0.5, because after this I am making disparity map and I need it to be smooth. How can I minimize it? Here is my code and I made 90 pictures of board from diffirent angles which are stored in ../chess_image. Here is code: github.com/markilebyak/opencv.git
Also here: 4shared.com/folder/PSDaeTcI/opencv.html are 36 images from those 90 I used, and 34 grayscaled with detected lines from those 36.
void createKnownBoardPosition(Size &boardSize, float squareEdgeLength, vector<Point3f>& corners) {
for (int i = 0; i < boardSize.height; i++) {
for (int j = 0; j < boardSize.width; j++) {
corners.emplace_back((float)j * squareEdgeLength, (float)i * squareEdgeLength, 0.0f);
}
}
}
void process_images(std::string &path_to_directory, Size &chessboard_size, vector<vector<Point2f>>& allFoundCorners) {
// Function to iterate through images
// taken for camera calibration
vector<String> filenames;
cv::glob(path_to_directory, filenames);
vector<Point2f> pointBuffer;
int counter = 0;
for (const auto & filename : filenames) {
if (filename != "../chess_images/.DS_Store"){
Mat im = imread(filename);
std::cout << filename << std::endl;
Mat gray_im;
cvtColor(im, gray_im, COLOR_BGR2GRAY);
bool found = findChessboardCorners(gray_im, chessboard_size, pointBuffer,
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
if (found) {
counter++;
TermCriteria criteria = TermCriteria( TermCriteria::EPS + TermCriteria::MAX_ITER,
30,0.0001 );
Size winSize = Size( 11, 11);
Size zeroZone = Size( -1, -1 );
//cornerSubPix is the algorithm focused on relocating the points. it receives the image, the corners
// a window size, zeroZone and the actual criteria. The window size is the search area.
cornerSubPix(gray_im, pointBuffer, winSize, zeroZone, criteria );
drawChessboardCorners( gray_im, chessboard_size, Mat(pointBuffer), found );
string nametext = "../drawn_chessboard/" + to_string(counter) + ".jpg";
imwrite(nametext, gray_im);
allFoundCorners.push_back(pointBuffer);
}
}
}
std::cout << "Images used: " << counter << std::endl;
}
double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point2f> >& imagePoints,
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
const Mat& cameraMatrix , const Mat& distCoeffs,
vector<float>& perViewErrors, bool fisheye) {
vector<Point2f> imagePoints2;
size_t totalPoints = 0;
double totalErr = 0, err;
perViewErrors.resize(objectPoints.size());
for(size_t i = 0; i < objectPoints.size(); ++i )
{
if (fisheye)
{
fisheye::projectPoints(objectPoints[i], imagePoints2, rvecs[i], tvecs[i], cameraMatrix,
distCoeffs);
}
else
{
projectPoints(objectPoints[i], rvecs[i], tvecs[i], cameraMatrix, distCoeffs, imagePoints2);
}
err = norm(imagePoints[i], imagePoints2, NORM_L2);
size_t n = objectPoints[i].size();
perViewErrors[i] = (float) std::sqrt(err*err/n);
totalErr += err*err;
totalPoints += n;
}
return std::sqrt(totalErr/totalPoints);
}
void calibration_process(Size &boardSize, float &squareEdgeLength, Mat& cameraMatrix, Mat& distortionCoefficients,
std::string &path_to_directory) {
vector<vector<Point2f>> checkerboardImageSpacePoints;
process_images(path_to_directory, boardSize, checkerboardImageSpacePoints);
vector<vector<Point3f>> worldSpaceCornerPoints(1);
createKnownBoardPosition(boardSize, squareEdgeLength, worldSpaceCornerPoints[0]);
worldSpaceCornerPoints.resize(checkerboardImageSpacePoints.size(), worldSpaceCornerPoints[0]);
//rotation and translation vectors (rVectors, tVectors)
vector<Mat> rVectors, tVectors;
distortionCoefficients = Mat::zeros(8, 1, CV_64F);
calibrateCamera(worldSpaceCornerPoints, checkerboardImageSpacePoints, boardSize, cameraMatrix,
distortionCoefficients, rVectors, tVectors, CALIB_FIX_K5 + CALIB_FIX_INTRINSIC);
vector<float> perViewErrors;
double rep_error = computeReprojectionErrors(worldSpaceCornerPoints, checkerboardImageSpacePoints, rVectors, tVectors, cameraMatrix,
distortionCoefficients, perViewErrors, false);
std::cout << "Reprojection Error: " << rep_error << std::endl;
}
int main() {
Mat cameraMatrix, distortionCoefficient;
Size chessboardDimensions = Size(6, 9);
float calibrationSquareDimension = 0.03f;
std::string path = "../chess_images";
calibration_process(chessboardDimensions, calibrationSquareDimension, cameraMatrix,
distortionCoefficient, path);
return 0;
}

How to use findChessboardCorners and calibrateCamera?

I want to use findChessboardCorners with calibrateCamera but encounter errors when using calibrateCamera. The output is not very helpful.
OpenCV Error: Assertion failed (i < 0) in cv::_OutputArray::create
static const Size patterSize(8, 6);
auto image = imread("x.jpg", IMREAD_GRAYSCALE);
Mat corners;
auto found = findChessboardCorners(image, patterSize, corners);
//constructing objectPoints
vector<vector<Vec3f>> objectPoints;
objectPoints.push_back(vector<Vec3f>());
for (int row = 0; row < patterSize.height; row++) {
for (int col = 0; col < patterSize.width; col++) {
objectPoints.at(0).push_back(Vec3f(row, col, 0));
}
}
vector<vector<Vec2f>> imagePoints;
imagePoints.push_back(vector<Vec2f>());
for (int row = 0; row < patterSize.height; row++) {
for (int col = 0; col < patterSize.width; col++) {
int num = row*patterSize.width + col;
imagePoints.at(0).push_back(corners.at<Vec2f>(num, 0));
}
}
Mat cameraMatrix, distMatrix, rvecs, tvecs;
calibrateCamera(objectPoints, imagePoints, Size(image.size().width, image.size().height), cameraMatrix, distMatrix, rvecs, tvecs);
The decleration of rvecs, tvecs should be vector<Mat> rather than Mat, and then everything will be fine. For more information, visit 3calibration.cpp.

OpenCV Camera Calibration, Assertion Failed <ni > 0 && ni==ni1>

I am trying to perform camera calibration using openCV. On the same example data from openCV samples this works great, but my source generates an error:
Assertion failed <ni > 0 && ni==ni1>... collectCallibrationData.. calibration.cpp line 3193"
objectPoints and imagePoints are the same size. Changing other inputs dont affect error code.
Can anyone help me deal with it? The code follows:
void main()//enter code here
{
//initial operations, declatarions etc
int *Asize; float *APoints;
float AAPoints[]= {/* My Array */};
int AAsize[] = {4,54,2}; //My array size
Asize = AAsize; APoints = AAPoints; // will be called as dll that is why this wierd attribution
int page = *(Asize); int row = *(Asize+1); int col = *(Asize +2);
Point3f pointBuf;
vector<vector<Point3f>> imagePoints;
vector<Point3f> vectBuf;
Size boardSize;
boardSize.height = 6; boardSize.width = 9;
Mat cameraMatrix, distCoeffs;
float squareSize=50;
//change 1d array to vector<vector<point3f>>
for (int i = 0; i<page; i++)
{
for (int j = 0; j<row; j++)
{
pointBuf.x = *(APoints + (i*(row*col))+j*2);
pointBuf.y = *(APoints + (i*(row*col))+j*2+1);
pointBuf.z = 0;
vectBuf.push_back(pointBuf);
}
imagePoints.push_back(vectBuf);
vectBuf.clear();
}
// create objectPoint vector<vector<Points3f>>
vector<vector<Point3f>> objectPoints;
vectBuf.clear();
for( int i = 0; i < boardSize.height; ++i )
{
for( int j = 0; j < boardSize.width; ++j )
vectBuf.push_back(Point3f(float( j*squareSize ), float( i*squareSize ), 0));
}
objectPoints.resize(imagePoints.size(),vectBuf);
//initialize starting variables for calibration
cameraMatrix = Mat::eye(3, 3, CV_64F);
distCoeffs = Mat::zeros(1, 1, CV_64F);
cameraMatrix.at<double>(0,0) = 1.0;
vector<Mat> rvecs, tvecs;
Size imageSize; imageSize.width = 2040; imageSize.height = 2040;
double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs);
}
There is a trouble with the use of cv::calibrateCamera(). It requires std::vector<std::vector<cv::Point3f>> as the first argument and std::vector<std::vector<cv::Point2f>> as the second.
e.g.:
cv::Size imageSize(2040, 2040);
cv::Mat cameraMatrix, distCoeffs;
std::vector<cv::Mat> rvecs, tvecs;
std::vector<std::vector<cv::Point2f> > imagePoints;
std::vector<std::vector<cv::Point3f> > objectPoints;
// Fill imagePoints and objectPoints
...
cv::calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs);
I met the same issue, too. In my case, there is a bug to convert vector< vector < Point3f > > to InputArrayOfArrays.
You may run the following sample code to check it in your system.
using namespace std;
using namespace cv;
/** #function main */
int main( void )
{
vector<Point3f> objectPoints_tmp(54);
objectPoints_tmp.clear();
for( int i = 0; i < 9; ++i )
for( int j = 0; j < 6; ++j )
objectPoints_tmp.push_back(Point3f(j*50, i*50, 0.0f));
vector<vector<Point3f> > objectPoints(7, objectPoints_tmp);
InputArrayOfArrays OBJPOINT = objectPoints; std::cout << (int) OBJPOINT.total() << std::endl;
for( int i = 0; i < 7; ++i )
std::cout << OBJPOINT.getMat(i).checkVector(3, CV_32F) << std::endl;
waitKey(0);
return 0;
}
Normally, it should print out
7
54
54
54
54
54
54
54
In my NG case, it prints out random numbers.
I guess that the opencv lib is not fit with my system(win7 x64 + vs2012). After rebuild opencv myself, it may work now. You may google keyword "Win7x64 VS2012 OpenCV CMake TBB".

Assertion Failed when trying to run projectPoints

I keep getting an assertion failed(0 <= i && i < (int)vv.size())in cv::_InputArray::getMat, in file matrix.cpp when trying to run projectPoints. I have checked everything I can think of but I can't work out what's wrong. My guess is something went wrong with the calibratecamera but I don't know how I would find out what is wrong exactly. Here is my code, thanks.
using namespace cv;
using namespace std;
std::vector<cv::Point3f> Generate3DPoints()
{
std::vector<cv::Point3f> points;
float x,y,z;
x=.5;y=.5;z=.5;
points.push_back(cv::Point3f(x,y,z));
x=0;y=0;z=0;
points.push_back(cv::Point3f(x,y,z));
x=-0;y=0;z=.5;
points.push_back(cv::Point3f(x,y,z));
x=0;y=.5;z=.5;
points.push_back(cv::Point3f(x,y,z));
x=0;y=-.5;z=0;
points.push_back(cv::Point3f(x,y,z));
x=.5;y=0;z=.5;
points.push_back(cv::Point3f(x,y,z));
x=.5;y=0;z=0;
points.push_back(cv::Point3f(x,y,z));
/*
for(unsigned int i = 0; i < points.size(); ++i)
{
std::cout << points[i] << std::endl;
}
*/
return points;
}
int main()
{
int numBoards = 4;
int numCornersHor = 6;
int numCornersVer = 9;
int numSquares = numCornersHor * numCornersVer;
Size board_sz = Size(numCornersHor, numCornersVer);
VideoCapture capture = VideoCapture(0);
vector<vector<Point3f>> object_points;
vector<vector<Point2f>> imagePoints;
vector<vector<Point2f>> image_points;
vector<Point3f> objectPoints1 = Generate3DPoints();
vector<Point2f> corners;
int successes=0;
Mat image;
Mat gray_image;
//capture >> image;
Sleep(1000);
capture.read(image);
imshow("Welcome", image);
vector<Point3f> obj;
for(int j=0;j<numSquares;j++)
obj.push_back(Point3f(j/numCornersHor, j%numCornersHor, 0.0f));
while(successes<numBoards)
{
cvtColor(image, gray_image, CV_BGR2GRAY);
bool found = findChessboardCorners(image, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if(found)
{
cornerSubPix(gray_image, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(gray_image, board_sz, corners, found);
cout << "SUCESS\n";
}
else
cout << "FAIL\n";
imshow("win1", image);
imshow("win2", gray_image);
capture.read(image);
image_points.push_back(corners);
object_points.push_back(obj);
printf("Snap stored!\n");
char continues;
cout << "Press c to continue\n";
cin >> continues;
successes++;
//if(successes>=numBoards)
// break;
}
Mat intrinsic = Mat(3, 3, CV_32FC1);
Mat distCoeffs;
vector<Mat> rvecs;
vector<Mat> tvecs;
intrinsic.ptr<float>(0)[0] = 1;
intrinsic.ptr<float>(1)[1] = 1;
calibrateCamera(object_points, image_points, image.size(), intrinsic, distCoeffs, rvecs, tvecs);
cout << "Done\n";
projectPoints(objectPoints1, rvecs, tvecs, intrinsic, distCoeffs, imagePoints);
cout << "done2";
return 0;
}
You are trying to pass rvecs and tvecs to the function projectPoints, which expects Mat and not vector<Mat>.
Basically, the calibrateCamera function estimates the intrinsics matrix and distortion coefficients of the camera using several images of a chessboard. The function also returns multiple versions of the extrinsics parameters (rotation+translation), one for each image you used. These extrinsic parameters are stored in rvecs and tvecs.
However, you want to project the 3D points in only one of these images, so you should choose what set of extrinsic parameters you want (i.e. rvecs[i] and tvecs[i], i corresponding to the image in which you want to project your object) and call projectPoints for these extrinsic parameters only, not the whole vectors<Mat>.