OpenCV run-time error while calibrating camera in line calibratecamera - c++

I wrote a calibration code as bellow:
int numBoards = 20;
int numCornersHor=6;
int numCornersVer=9;
int numSquares = numCornersHor * numCornersVer;
cv::Size board_sz = cv::Size(numCornersHor, numCornersVer);
std::vector<std::vector<cv::Point3f> > object_points;
std::vector<std::vector<cv::Point2f> > image_points;
std::vector<cv::Point2f> corners;
std::vector<cv::Point3f> obj;
for(int j=0;j<numSquares;j++)
obj.push_back(cv::Point3f(j/numCornersHor, j%numCornersHor, 0.0f));
int successes=0;
After initialization of useful variables, I get frames from webcam and store it in buffer.
while(successes<numBoards)
{
unsigned char* buffer=eyeCamera->getFrame();
cv::Mat rawImg=cv::Mat(cv::Size(widthCam,heightCam),CV_8UC4, buffer,cv::Mat::AUTO_STEP);
cv::Mat grayImg;
cv::cvtColor(rawImg,grayImg,CV_BGR2GRAY);
bool found = findChessboardCorners(rawImg, board_sz, corners,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if(found)
{
cv::cornerSubPix(grayImg, corners, cv::Size(11, 11), cv::Size(-1, -1),
cv::TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 1.1));
cv::drawChessboardCorners(grayImg, board_sz, corners, found);
}
cv::imshow("win2", grayImg);
Everything is ok so far.I show grayImg and chessboard corners are painted.
int key = cv::waitKey(1);
if(key==27)
return;
if(key==' ' && found!=0)
{
image_points.push_back(corners);
object_points.push_back(obj);
successes++;
if(successes>=numBoards)
break;
}
}
cv::Mat intrinsic = cv::Mat(3, 3, CV_64F);
cv::Mat distCoeffs= cv::Mat(8, 1, CV_64F);
std::vector<cv::Mat> rvecs;
std::vector<cv::Mat> tvecs;
intrinsic.at<double>(0,0) = 1.0;
int widthCam=640;
int heightCam=480;
object_points and image_points are filled with 54 elements ~ 9 * 6
cv::calibrateCamera(object_points, image_points, cv::Size(widthCam,heightCam), intrinsic, distCoeffs, rvecs, tvecs);
I'm using Qt creator. I always get run-time error while calling the last line: calibrateCamera()
Edit: I tried the same code with cvCalibrateCamera2 and again i got the same error. I provide opencv Exeption:
OpenCV error: Bad argument (the output array of translation vectors must be 3-channel
1xn or nx1 array or 1-channel nx3 array, where n is the bumber of views) in
cvCalibrateCamera2, file F:\OpenCV\opencv\modules\calib3d\src\calibration.cpp,line 1506
terminate called after throwing an instance of 'cv::Exeption'
I am using 10 snapshot and my defined rvec and tvec are as follows:
CvMat* rvec = cvCreateMat(10,3,CV_32FC1);
CvMat* tvec = cvCreateMat(10,3,CV_32FC1);
Can anyone help me solve this error?
Thanks.

Okay, tried this now locally on my system, turns out that the definition of rVecs and tVecs was actually differently from what I expected it to be.
CvMat* rVecs = cvCreateMat( 1, 1, CV_32FC3 );
CvMat* tVecs = cvCreateMat( 1, 1, CV_32FC3 );
did the job for me.

I wonder if
CvMat* tvec = cvCreateMat(3,10,CV_32FC1);
will help.
also, try
CvMat* tvec = cvCreateMat(1,10,CV_32FC3);

Related

OpenCV: "draw" image on another image

I have 2 images with transparency. Images have the same format and size.
How can I copy pixels from second image to the first one by using C++ OpenCV?
The idea is to draw 2nd image on the 1st image.
Thanks
code from the link in comment above (modified for my case)
L. Scott Johnson thanks you again!
void alphaBlend(Mat& foreground, Mat& background, Mat& alpha, Mat& outImage)
{
// Find number of pixels.
int numberOfPixels = foreground.rows * foreground.cols * foreground.channels();
// Get floating point pointers to the data matrices
float* fptr = reinterpret_cast<float*>(foreground.data);
float* bptr = reinterpret_cast<float*>(background.data);
float* aptr = reinterpret_cast<float*>(alpha.data);
float* outImagePtr = reinterpret_cast<float*>(outImage.data);
// Loop over all pixesl ONCE
for (
int i = 0;
i < numberOfPixels;
i++, outImagePtr++, fptr++/*, aptr++*/, bptr++
)
{
if (i!= 0 && (i % 3) == 0)
aptr++;
*outImagePtr = (*fptr) * (*aptr) + (*bptr) * (1 - *aptr);
}
}
void Mix()
{
Mat layer = imread("images\\leyer.png", IMREAD_UNCHANGED);
Mat image = imread("images\\bg.jpg");
std::vector<cv::Mat> bgra_planes;
cv::split(layer, bgra_planes);
Mat alpha = bgra_planes[3];
bgra_planes.pop_back();
cv::merge(bgra_planes, layer);
alpha.convertTo(alpha, CV_32FC3, 1.0 / 255);
layer.convertTo(layer, CV_32FC3);
image.convertTo(image, CV_32FC3);
Mat result(layer.size(), CV_32FC3);
alphaBlend(layer, image, alpha, result);
result.convertTo(result, CV_8UC3);
// previous tries
//cv::copyTo(layer, image, );
//cv::addWeighted(image, 1, layer, 1, 0.5, result);
String windowName = "alpha blending";
namedWindow(windowName, WINDOW_NORMAL);
imshow(windowName, result);
waitKey(0);
destroyWindow(windowName);
}
Here's what you can try:
load your first image
cv::Mat img = cv::imread("img.jpeg");
find your smaller image - here I'm just resizing the same image
cv::Mat img_resize;
cv::resize(img, img_resize, cv::Size(), 0.3, 0.3);
choose the xy origin location
const cv::Point origin(100, 100);
create a Region of Interest
cv::Rect roi(origin, img_resize.size());
copy the matrix data in
img_resize.copyTo(img(roi));

opencv cornerSubPix Exception while converting python code to c++

I am trying to port this response to c++ but I am not able to get past this cryptic exception (see image below). Not sure what is the limiting factor. I imagine it is the image color format or the corners parameter but nothing seems to be working. If it is related to converting color format please provide a small code snippet.
The python code provided by Anubhav Singh is working great however I would like to develop in c++. Any help would be greatly appreciated.
I am using OpenCV04.2.0
void CornerDetection(){
std::string image_path = samples::findFile("../wing.png");
Mat img = imread(image_path);
Mat greyMat;
Mat dst;
cv::cvtColor(img, greyMat, COLOR_BGR2GRAY);
threshold(greyMat, greyMat, 0, 255, THRESH_BINARY | THRESH_OTSU);
cornerHarris(greyMat, dst, 9, 5, 0.04);
dilate(dst, dst,NULL);
Mat img_thresh;
threshold(dst, img_thresh, 0.32 * 255, 255, 0);
img_thresh.convertTo(img_thresh, CV_8UC1);
Mat labels = Mat();
Mat stats = Mat();
Mat centroids = Mat();
cv::connectedComponentsWithStats(img_thresh, labels, stats, centroids, 8, CV_32S);
TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::MAX_ITER, 30, 0.001);
std::vector<Point2f> corners = std::vector<Point2f>();
Size winSize = Size(5, 5);
Size zeroZone = Size(-1, -1);
cornerSubPix(greyMat, corners, winSize, zeroZone, criteria);
for (int i = 0; i < corners.size(); i++)
{
circle(img, Point(corners[i].x, corners[i].y), 5, Scalar(0, 255, 0), 2);
}
imshow("img", img);
waitKey();
destroyAllWindows();
}
The solution was to iterate over the centroids to build the corners vector before passing the corners variable to the cornerSubPix(...) function.
std::vector<Point2f> corners = std::vector<Point2f>();
for (int i = 0; i < centroids.rows; i++)
{
double x = centroids.at<double>(i, 0);
double y = centroids.at<double>(i, 1);
corners.push_back(Point2f(x, y));
}
The output of the solution is still not exactly what the python output is, regardless it fixed this question in case anyone else ran across this issue.

How do you make a face symmetric using openCV and C++?

I was looking at this tutorial, and it said "You can make a symmetric face, by averaging a face and its mirror reflection." - and there was an example of Obama's face being made symmetrical. I tried doing the same with openCV and C++, but these are the results I'm getting using the following code:
Mat3b getMean(const vector<Mat3b>& images) {
Mat m(images[0].rows, images[0].cols, CV_64FC3); // Create a 0 initialized image to use as accumulator
m.setTo(Scalar(0, 0, 0, 0)); //set all image elements to 0
Mat temp; // Use a temp image to hold the conversion of each input image to CV_64FC3
for (int i = 0; i < images.size(); ++i) { //loop through the images
images[i].convertTo(temp, CV_64FC3); // Convert the input images to CV_64FC3...
m += temp; //...so you can accumulate
}
m.convertTo(m, CV_8U, 1. / images.size()); // Convert back to CV_8UC3 type, applying the division to get the actual mean
return m;
}
int main() {
Mat img1 = imread("E:/barack-obama.jpg"), img2, img4;
resize(img1, img1, Size(0.4 * img1.cols, 0.4 * img1.rows), 1, 1, INTER_LINEAR);
flip(img1, img2, +1);
vector<Mat3b> imgs;
imgs.push_back(img1);
imgs.push_back(img2);
Mat3b img3 = getMean(imgs); // Compute the mean
//img3 = (img1 + img2)*0.5;
double alpha = 0.5, beta;
beta = (1.0 - alpha);
addWeighted(img1, alpha, img2, beta, 0.0, img4);
imshow("Original", img1);
imshow("getMean", img3);
imshow("AddWeighted", img4);
waitKey(0);
}

OpenCV error with projectPoints

I am trying to quantify the accuracy of my camera calibration using OpenCV. In my program I am reading an image of a chessboard pattern and calling the calibrateCamera function to get an initial guess of my camera instrinsics and extrinsics. I am aware that using only one image does not yield a perfect calibration and that the calibrateCamera returns the reprojection error. Nevertheless, I want to use the projectPoints function, to get the image points of my detected corners on the calibration board for further processing. I am using the code below for the calibration but as it tries to run the projectPoints function, the program crashes at runtime. If I remove the function call the code works just fine.
Mat image_;
Mat gray_image_;
Size chessboard_size_;
vector<Point2f> corners_;
vector< vector< Point2f> > imagePoints_;
vector< Point2f> imagePointsProjected_;
vector< vector< Point3f> > objectPoints_;
bool corners_found;
float measure_ = 35;
chessboard_size_ = Size(CHESSBOARD_INTERSECTIONS_HORIZONTAL, CHESSBOARD_INTERSECTIONS_VERTICAL);
// image of type CV_8UC3 is read, with 8 bit & 3 channels
image_ = imread("/home/fes1rng/left.png");
if(!image_.data )
{
printf( "No image data \n" );
return;
}
// image is converted to grayscale, afterwards it is of type CV_8UC1
cvtColor(image_, gray_image_, CV_RGB2GRAY);
// detect corners and draw them
corners_found = findChessboardCorners(gray_image_, Size(CHESSBOARD_INTERSECTIONS_HORIZONTAL, CHESSBOARD_INTERSECTIONS_VERTICAL), corners_);
if (corners_found)
{
cornerSubPix(gray_image_, corners_, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(image_, Size(CHESSBOARD_INTERSECTIONS_HORIZONTAL, CHESSBOARD_INTERSECTIONS_VERTICAL), corners_, corners_found);
}
vector< Point2f> v_tImgPT;
vector< Point3f> v_tObjPT;
//save 2d coordinate and world coordinate
for(int j=0; j< corners_.size(); ++j)
{
Point2d tImgPT;
Point3d tObjPT;
tImgPT.x = corners_[j].x;
tImgPT.y = corners_[j].y;
tObjPT.x = j%CHESSBOARD_INTERSECTIONS_HORIZONTAL*measure_;
tObjPT.y = j/CHESSBOARD_INTERSECTIONS_HORIZONTAL*measure_;
tObjPT.z = 0;
v_tImgPT.push_back(tImgPT);
v_tObjPT.push_back(tObjPT);
}
imagePoints_.push_back(v_tImgPT);
objectPoints_.push_back(v_tObjPT);
Mat rvec(3,1, CV_64FC1);
Mat tvec(3,1, CV_64FC1);
vector<Mat> rvecs;
vector<Mat> tvecs;
rvecs.push_back(rvec);
tvecs.push_back(tvec);
Mat intrinsic_Matrix(3,3, CV_64FC1);
Mat distortion_coeffs(8,1, CV_64FC1);
calibrateCamera(objectPoints_, imagePoints_, image_.size(), intrinsic_Matrix, distortion_coeffs, rvecs, tvecs);
projectPoints(objectPoints_, rvecs, tvecs, intrinsic_Matrix, distortion_coeffs, imagePointsProjected_);
cv::namedWindow( "Display Image", CV_WINDOW_AUTOSIZE );
cv::imshow( "Display Image", image_ );
waitKey(0);
The error message is:
OpenCV Error: Assertion failed (0 <= i && i < (int)vv.size()) in getMat, file /build/buildd/opencv-2.4.8+dfsg1/modules/core/src/matrix.cpp, line 977
terminate called after throwing an instance of 'cv::Exception'
what(): /build/buildd/opencv-2.4.8+dfsg1/modules/core/src/matrix.cpp:977: error: (-215) 0 <= i && i < (int)vv.size() in function getMat
As the error occurs at runtime and in a subfunction call, I assume that it is caused by wrong datatypes of the matrices. But as the function projectPoints is internally used in calibrateCamera, I am confused why a single function call with the same parameters is causing the error.
As the first parameter, projectPoints waits an std::vector<cv::Point3f> and not a std::vector<std::vector<cv::Point3f>>.
Using the following expression solved the issue!
projectPoints(objectPoints_.front(), rvecs.front(), tvecs.front(), intrinsic_Matrix, distortion_coeffs, imagePointsProjected_);

DFT to spatial domain in OpenCV is not working

I have created dft of an image and after some adjustment with filters i want to convert it back to the real image but every time when i do that it gives me wrong result ..seems like its not converting it back.
ForierTransform and createGaussianHighPassFilter are my own functions rest of the code i am using like below for the inversion back to real image.
Mat fft = ForierTransform(HeightPadded,WidthPadded);
Mat ghpf = createGaussianHighPassFilter(Size(WidthPadded, HeightPadded), db);
Mat res;
cv::multiply(fft,ghpf,res);
imshow("fftXhighpass1", res);
idft(res,res,DFT_INVERSE,res.rows);
cv::Mat croped = res(cv::Rect(0, 0, img.cols,img.rows));
//res.convertTo(res,CV_32S);
imshow("fftXhighpass", res);
even if i dont apply the filter i am unable to reverse back dft result ...
here is my dft code is , i could not find any sample to reverse dft back to normal image..
Mat ForierTransform(int M,int N)
{
Mat img = imread("thumb1-small-test.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat padded;
copyMakeBorder(img, padded, 0, M - img.rows, 0, N - img.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexImg;
merge(planes, 2, complexImg);
dft(complexImg, complexImg);
split(complexImg, planes);
magnitude(planes[0], planes[1], planes[0]);
Mat mag = planes[0];
mag += Scalar::all(1);
log(mag, mag);
// crop the spectrum, if it has an odd number of rows or columns
mag = mag(Rect(0, 0, mag.cols & -2, mag.rows & -2));
normalize(mag, mag, 0, 1, CV_MINMAX);
return mag;
}
kindly help
[EDIT: After I found the solution with the help of mevatron] (below is the correct code)
Mat ForierTransform(int M,int N)
{
Mat img = imread("thumb1-small-test.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat padded;
copyMakeBorder(img, padded, 0, M - img.rows, 0, N - img.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexImg;
merge(planes, 2, complexImg);
dft(complexImg, complexImg);
return complexImg;
}
Mat img = imread("thumb1-small-test.jpg",CV_LOAD_IMAGE_GRAYSCALE);
int WidthPadded=0,HeightPadded=0;
WidthPadded=img.cols*2;
HeightPadded=img.rows*2;
int M = getOptimalDFTSize( img.rows );
//Create a Gaussian Highpass filter 5% the height of the Fourier transform
double db = 0.05 * HeightPadded;
Mat fft = ForierTransform(HeightPadded,WidthPadded);
Mat ghpf = createGaussianHighPassFilter(Size(WidthPadded, HeightPadded), db);
Mat res;
cv::mulSpectrums(fft,ghpf,res,DFT_COMPLEX_OUTPUT);
idft(res,res,DFT_COMPLEX_OUTPUT,img.rows);
Mat padded;
copyMakeBorder(img, padded, 0, img.rows, 0, img.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
split(res, planes);
magnitude(planes[0], planes[1], planes[0]);
Mat mag = planes[0];
mag += Scalar::all(1);
log(mag, mag);
// crop the spectrum, if it has an odd number of rows or columns
mag = mag(Rect(0, 0, mag.cols & -2, mag.rows & -2));
int cx = mag.cols/2;
int cy = mag.rows/2;
normalize(mag, mag, 1, 0, CV_MINMAX);
cv::Mat croped = mag(cv::Rect(cx, cy, img.cols,img.rows));
cv::threshold(croped , croped , 0.56, 1, cv::THRESH_BINARY);
imshow("fftPLUShpf", mag);
imshow("cropedBinary", croped);
It now can able to display ridges valley of finger , and can be more optimize with respect to threshold as well
I see a few problems going on here.
First, you need to use the mulSpectrums function to convolve two FFTs, and not multiply.
Second, the createGaussianHighPassFilter is only outputting a single channel non-complex filter. You'll probably need to just set the complex channel to Mat::zeros like you did for your input image.
Third, don't convert the output of the FFT to log-magnitude spectrum. It will not combine correctly with the filter, and you won't get the same thing when performing the inverse. So, just return complexImg right after the DFT is executed. Log-magnitude spectrum is useful for a human to look at the data, but not for what you are trying to do.
Finally, make sure you pay attention to the difference to between the full-complex output of dft and the Complex Conjugate Symmetric (CCS) packed output. Intel has a good page on how this data is formatted here. In your case, for simplicity I would keep everything in full-complex mode to make your life easier.
Hope that helps!