I have been working on Camera Calibration for about a week know. The examples I saw from online articles and blogs uses a Web Camera to Capture the images.
But for my scenario I am using a digital camera namely Casio Exilim EX-Z77. I add the images into the set program arguments and access them individually using a for loop. In this manner I was able to imitate how a web camera works.
Is it possible for me to get the correct distortions and intrinsics??
Correct me if I am wrong or having a misunderstanding.
Here is the article I based my code. The code below is what I was able to make.
int n_boards = 0;
int board_w;
int board_h;
using namespace std;
int main( int argc, char *argv[] )
{
board_w = 5; // Board width in squares
board_h = 8; // Board height
n_boards = 16; // Number of boards
int board_n = board_w * board_h;
CvSize board_sz = cvSize( board_w, board_h );
CvMat* image_points = cvCreateMat( n_boards*board_n, 2, CV_32FC1 );
CvMat* object_points = cvCreateMat( n_boards*board_n, 3, CV_32FC1 );
CvMat* point_counts = cvCreateMat( n_boards, 1, CV_32SC1 );
CvMat* intrinsic_matrix = cvCreateMat( 3, 3, CV_32FC1 );
CvMat* distortion_coeffs = cvCreateMat( 5, 1, CV_32FC1 );
CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
int corner_count;
int successes = 0;
int step;
int a;
for(a =1; a<=n_boards; a++){
while( successes < n_boards ){
IplImage *image = cvLoadImage(argv[a]);
IplImage *gray_image = cvCreateImage( cvGetSize( image ), 8, 1 );
int found = cvFindChessboardCorners( image, board_sz, corners,
&corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS );
// Get subpixel accuracy on those corners
cvCvtColor( image, gray_image, CV_BGR2GRAY );
cvFindCornerSubPix( gray_image, corners, corner_count, cvSize( 11, 11 ),
cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
// Draw it
cvDrawChessboardCorners( image, board_sz, corners, corner_count, found );
//cvShowImage( "Calibration", image );
// If we got a good board, add it to our data
if( corner_count == board_n ){
step = successes*board_n;
for( int i=step, j=0; j < board_n; ++i, ++j ){
CV_MAT_ELEM( *image_points, float, i, 0 ) = corners[j].x;
CV_MAT_ELEM( *image_points, float, i, 1 ) = corners[j].y;
CV_MAT_ELEM( *object_points, float, i, 0 ) = j/board_w;
CV_MAT_ELEM( *object_points, float, i, 1 ) = j%board_w;
CV_MAT_ELEM( *object_points, float, i, 2 ) = 0.0f;
}
CV_MAT_ELEM( *point_counts, int, successes, 0 ) = board_n;
successes++;
}
}
IplImage *image1 = cvLoadImage(argv[1]);
CvMat* object_points2 = cvCreateMat( successes*board_n, 3, CV_32FC1 );
CvMat* image_points2 = cvCreateMat( successes*board_n, 2, CV_32FC1 );
CvMat* point_counts2 = cvCreateMat( successes, 1, CV_32SC1 );
// Transfer the points into the correct size matrices
for( int i = 0; i < successes*board_n; ++i ){
CV_MAT_ELEM( *image_points2, float, i, 0) = CV_MAT_ELEM( *image_points, float, i, 0 );
CV_MAT_ELEM( *image_points2, float, i, 1) = CV_MAT_ELEM( *image_points, float, i, 1 );
CV_MAT_ELEM( *object_points2, float, i, 0) = CV_MAT_ELEM( *object_points, float, i, 0 );
CV_MAT_ELEM( *object_points2, float, i, 1) = CV_MAT_ELEM( *object_points, float, i, 1 );
CV_MAT_ELEM( *object_points2, float, i, 2) = CV_MAT_ELEM( *object_points, float, i, 2 );
}
for( int i=0; i < successes; ++i ){
CV_MAT_ELEM( *point_counts2, int, i, 0 ) = CV_MAT_ELEM( *point_counts, int, i, 0 );
}
cvReleaseMat( &object_points );
cvReleaseMat( &image_points );
cvReleaseMat( &point_counts );
CV_MAT_ELEM( *intrinsic_matrix, float, 0, 0 ) = 1.0;
CV_MAT_ELEM( *intrinsic_matrix, float, 1, 1 ) = 1.0;
cvCalibrateCamera2( object_points2, image_points2, point_counts2, cvGetSize( image1 ),
intrinsic_matrix, distortion_coeffs, NULL, NULL, CV_CALIB_FIX_ASPECT_RATIO );
cvSave( "Intrinsics.xml", intrinsic_matrix );
cvSave( "Distortion.xml", distortion_coeffs );
// Example of loading these matrices back in
CvMat *intrinsic = (CvMat*)cvLoad( "Intrinsics.xml" );
CvMat *distortion = (CvMat*)cvLoad( "Distortion.xml" );
IplImage* mapx = cvCreateImage( cvGetSize( image1 ), IPL_DEPTH_32F, 1 );
IplImage* mapy = cvCreateImage( cvGetSize( image1 ), IPL_DEPTH_32F, 1 );
cvInitUndistortMap( intrinsic, distortion, mapx, mapy );
cvNamedWindow( "Undistort" );
while( image1 ){
IplImage *t = cvCloneImage( image1 );
cvShowImage( "Calibration", image ); // Show raw image
cvRemap( t, image1, mapx, mapy ); // undistort image
cvReleaseImage( &t );
cvShowImage( "Undistort", image1 ); // Show corrected image
}
}
return 0;
}
I am using Code blocks 10.05 and Opencv 2.3.0, Mingw GNU GCC compiler.
Digital cameras such as Casio Exilim EX-Z77 usually perform a certain amount of image correction in-camera.
I believe that the images you get from this camera are already corrected (regarding lens distortion), but I couldn't find a reference to back up this claim.
As for the multiple images you are using, in practice you only need one to find the distortion. For more information on this procedure using OpenCV check this answer.
EDIT:
Since you mentioned image stitching, OpenCV started to support this feature on version 2.2:
OpenCV 2.2 is released! Teasers already far along AFTER this release: Panoramic Stitching
On this subject, this interesting post shares some source code.
Related
I am new in opencv so could you help me to find coordinates of points that detected by harris using following code?
source image is img
i want to store coordinates of corner points in matrix S
Mat S;
dst = Mat::zeros( img.size(), CV_32FC1 );
cornerHarris( img, dst, 7, 5, 0.0001, BORDER_DEFAULT );
dst = Mat::zeros( img.size(), CV_32FC1 );
cornerHarris( img, dst, 7, 5, 0.0001, BORDER_DEFAULT );
// Normalizing
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
for( int j = 0; j < dst_norm.rows ; j++ ) {
for( int i = 0; i < dst_norm.cols; i++ ) {
if( (int) dst_norm.at<float>(j,i) > thresh ) {
S.at<int >(j,i)= (int) dst_norm.at<int>(j,i);
}
}
}
You can store the points coordinates in a Nx2 matrix, where the first column is the x coordinate, and second column is the y coordinate.
You can declare S as an empty CV_32SC1 matrix like:
Mat S(0, 2, CV_32SC1);
(or you can even leave Mat S;, since the type is determined by the first push_back).
Then you can append the coordinates. Inside the if statement, add:
// Create a matrix for the point
Mat pt(1,2,CV_32SC1);
pt.at<int>(0, 0) = i;
pt.at<int>(0, 1) = j;
// Add the point to S
S.push_back(pt);
Note that it can be more straightforward to use std::vector<Point> to store points. In this case, you can declare Svec as:
std::vector<cv::Point> Svec;
and in your if statement you'll have:
Svec.push_back(Point(i,j));
or
Svec.emplace_back(i,j);
If needed, you can convert the vector<Point> to a Mat like:
Mat Z(Svec); // Creates a 2 channels matrix, Nx1
Z = Z.reshape(1); // Creates a 1 channel matrix, Nx2
Mat pt(1,2,CV_32SC1);
pt.at<int>(0, 0) = i;
pt.at<int>(0, 1) = j;
i , j in ur code will be values of i,j counters.
I need to put coordinates of harris corner points
I have the following output from red-only filtration done by the following algorithm:
cv::Mat findColor(const cv::Mat & inputBGRimage, int rng=20)
{
// Make sure that your input image uses the channel order B, G, R (check not implemented).
cv::Mat mt1, mt2;
cv::Mat input = inputBGRimage.clone();
cv::Mat imageHSV; //(input.rows, input.cols, CV_8UC3);
cv::Mat imgThreshold, imgThreshold0, imgThreshold1; //(input.rows, input.cols, CV_8UC1);
assert( ! input.empty() );
// blur image
cv::blur( input, input, Size(11, 11) );
// convert input-image to HSV-image
cv::cvtColor( input, imageHSV, cv::COLOR_BGR2HSV );
// In the HSV-color space the color 'red' is located around the H-value 0 and also around the
// H-value 180. That is why you need to threshold your image twice and the combine the results.
cv::inRange( imageHSV, cv::Scalar( H_MIN, S_MIN, V_MIN ), cv::Scalar( H_MAX, S_MAX, V_MAX ), imgThreshold0 );
if ( rng > 0 )
{
// cv::inRange(imageHSV, cv::Scalar(180-rng, 53, 185, 0), cv::Scalar(180, 255, 255, 0), imgThreshold1);
// cv::bitwise_or( imgThreshold0, imgThreshold1, imgThreshold );
}
else
{
imgThreshold = imgThreshold0;
}
// cv::dilate( imgThreshold0, mt1, Mat() );
// cv::erode( mt1, mt2, Mat() );
return imgThreshold0;
}
And here is the output:
And I want to detect the four coordinates of the rectangle. As you can see, the output is not perfect, I used cv::findContours in conjunction with cv::approxPolyDP before, but it's not working good anymore.
Is there any filter that I can apply for input image (except blur, dilate, erode) to make image better for processing?
Any suggestions?
Updated:
When I am using findContours like this:
findContours( src, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE );
double largest_area = 0;
for( int i = 0; i < contours.size(); i++) { // get the largest contour
area = fabs( contourArea( contours[i] ) );
if( area >= largest_area ) {
largest_area = area;
largestContours.clear();
largestContours.push_back( contours[i] );
}
}
if( largest_area > 5000 ) {
cv::approxPolyDP( cv::Mat(largestContours[0]), approx, 100, true );
cout << approx.size() << endl; /* ALWAYS RETURN 2 ?!? */
}
The approxPolyDP is not working as expected.
I think your result is quite good, maybe if you select the contour with greatest area using Image Moments and then finding the minimal rotated rectangle of the bigger contour.
vector<cv::RotatedRect> cv::minRect( contours.size() );
for( size_t = 0; i < contours.size(); i++ )
{
minRect[i] = minAreaRect( cv::Mat(contours[i]) );
}
Rotated Rect class already has a vector of Point2f to store the points.
RotatedRect rRect = RotatedRect(Point2f(100,100), Size2f(100,50), 30);
Point2f vertices[4];
rRect.points(vertices);
for(int i = 0; i < 4; i++){
std::cout << vertices[i] << " ";
}
I have this really strange issue and I think I might be doing something wrong, but I have an opencv1 implementation for Pyramidal Lucas Kanade and an opencv2 implementation. The difference is that the opencv2 takes MUCH longer to run (in particular the goodFeaturesToTrack function) vs. the opencv1. In addition, including the opencv2 libs and headers in the opencv1 implmentation results in that one becoming extremely slow as well (we're talking about 0.002 s per two images vs. 1 second per two images). Am I doing something wrong?
Windows 7, 64 bit. Here is the opencv2 code that runs really slow, at about 1 frame per second. As I said, taking the opencv1 implementation and switching library version causes the same slow down by a factor of 10 or more. I think this is very weird and google came up with no information! THANKS!!!
#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>
#include <cmath>
using namespace cv;
using namespace std;
int64 now, then;
double elapsed_seconds, tickspersecond=cvGetTickFrequency() * 1.0e6;
int main(int argc, char** argv)
{
// Load two images and allocate other structures
Mat imgA = imread("0000.png", CV_LOAD_IMAGE_GRAYSCALE);
Mat imgB = imread("0001.png", CV_LOAD_IMAGE_GRAYSCALE);
Size img_sz = imgA.size();
Mat imgC(img_sz,1);
int win_size = 15;
int maxCorners = 100;
double qualityLevel = 0.05;
double minDistance = 2.0;
int blockSize = 3;
double k = 0.04;
std::vector<cv::Point2f> cornersA;
cornersA.reserve(maxCorners);
std::vector<cv::Point2f> cornersB;
cornersB.reserve(maxCorners);
then = cvGetTickCount();
goodFeaturesToTrack( imgA,cornersA,maxCorners,qualityLevel,minDistance,cv::Mat(),blockSize,true);
goodFeaturesToTrack( imgB,cornersB,maxCorners,qualityLevel,minDistance,cv::Mat(),blockSize,true);
now = cvGetTickCount();
cout << (double)(now - then) / tickspersecond;
cornerSubPix( imgA, cornersA, Size( win_size, win_size ), Size( -1, -1 ),
TermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );
cornerSubPix( imgB, cornersB, Size( win_size, win_size ), Size( -1, -1 ),
TermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );
// Call Lucas Kanade algorithm
CvSize pyr_sz = Size( img_sz.width+8, img_sz.height/3 );
std::vector<uchar> features_found;
features_found.reserve(maxCorners);
std::vector<float> feature_errors;
feature_errors.reserve(maxCorners);
calcOpticalFlowPyrLK( imgA, imgB, cornersA, cornersB, features_found, feature_errors ,
Size( win_size, win_size ), 5,
cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0 );
// Make an image of the results
for( int i=0; i < features_found.size(); i++ ){
// cout<<"Error is "<<feature_errors[i]<<endl;
//continue;
//cout<<"Got it"<<endl;
Point p0( ceil( cornersA[i].x ), ceil( cornersA[i].y ) );
Point p1( ceil( cornersB[i].x ), ceil( cornersB[i].y ) );
line( imgC, p0, p1, CV_RGB(255,255,255), 2 );
}
namedWindow( "ImageA", 0 );
namedWindow( "ImageB", 0 );
namedWindow( "LKpyr_OpticalFlow", 0 );
imshow( "ImageA", imgA );
imshow( "ImageB", imgB );
imshow( "LKpyr_OpticalFlow", imgC );
cvWaitKey(0);
return 0;
}
You're probably using the debug libraries (*d.lib) instead of the release ones. I had this same problem with ~1-2s per call for goodFeaturesToTrack() and switching to release solved it.
Why are you calling goodFeaturesToTrack twice ?
Call it once to get cornersA and then use LK to identify the same corners / features in imgB.
i am trying to use cvCamShift function in opencv. i have written a class for doing this.this is it's code:
#include <cv.h>
#include <highgui.h>
using namespace std;
class Tracker{
// File-level variables
IplImage * pHSVImg; // the input image converted to HSV color mode
IplImage * pHueImg; // the Hue channel of the HSV image
IplImage * pMask; // this image is used for masking pixels
IplImage * pProbImg; // the face probability estimates for each pixel
CvHistogram * pHist; // histogram of hue in the original face image
CvRect prevFaceRect; // location of face in previous frame
CvBox2D faceBox; // current face-location estimate
int vmin;
int vmax;
int smin;
void updateHueImage(IplImage* pImg);
public:
Tracker(IplImage * pImg, CvRect pFaceRect);
~Tracker();
CvBox2d track(IplImage* pImg);
};
Tracker::Tracker(IplImage * pImg, CvRect pFaceRect){
// File-level variables
int nHistBins = 30; // number of histogram bins
float rangesArr[] = {0,180}; // histogram range
vmin = 10;
vmax = 256;
smin = 55;
float * pRanges = rangesArr;
pHSVImg = cvCreateImage( cvGetSize(pImg), 8, 3 );
pHueImg = cvCreateImage( cvGetSize(pImg), 8, 1 );
pMask = cvCreateImage( cvGetSize(pImg), 8, 1 );
pProbImg = cvCreateImage( cvGetSize(pImg), 8, 1 );
pHist = cvCreateHist( 1, &nHistBins, CV_HIST_ARRAY, &pRanges, 1 );
float maxVal = 0.f;
// Create a new hue image
updateHueImage(pImg);
// Create a histogram representation for the face
cvSetImageROI( pHueImg, pFaceRect );
cvSetImageROI( pMask, pFaceRect );
cvCalcHist( &pHueImg, pHist, 0, pMask );
cvGetMinMaxHistValue( pHist, 0, &maxVal, 0, 0 );
cvConvertScale( pHist->bins, pHist->bins, maxVal? 255.0/maxVal : 0, 0 );
cvResetImageROI( pHueImg );
cvResetImageROI( pMask );
// Store the previous face location
prevFaceRect = pFaceRect;
}
Tracker::~Tracker(){
cvReleaseImage( &pHSVImg );
cvReleaseImage( &pHueImg );
cvReleaseImage( &pMask );
cvReleaseImage( &pProbImg );
cvReleaseHist( &pHist );
}
void Tracker::updateHueImage(IplImage * pImg)
{
// Convert to HSV color model
cvCvtColor( pImg, pHSVImg, CV_BGR2HSV );
// Mask out-of-range values
cvInRangeS( pHSVImg, cvScalar(0, smin, MIN(vmin,vmax), 0),
cvScalar(180, 256, MAX(vmin,vmax) ,0), pMask );
// Extract the hue channel
cvSplit( pHSVImg, pHueImg, 0, 0, 0 );
}
CvBox2D Tracker::track(IplImage * pImg)
{
CvConnectedComp components;
updateHueImage(pImg);
cvCalcBackProject( &pHueImg, pProbImg, pHist );
cvAnd( pProbImg, pMask, pProbImg, 0 );
cvCamShift( pProbImg, prevFaceRect,
cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
&components, &faceBox );
prevFaceRect = components.rect;
faceBox.angle = -faceBox.angle;
return faceBox;
}
it works good but when tracked object goes out of the frame or somthing comes front of it, it makes a runtime error. one of my friends has a code that it's very similar to my code and it doesn't make any runtime error even in the worst situations. please guide me how can i debug this.
I think when tracked object goes out of the frame or something comes front of it, the parameter prevFaceRect will be null and may be because of that, it is showing error. So before calling this function
cvCamShift( pProbImg, prevFaceRect,
cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
&components, &faceBox );
add a check, to know whether prevFaceRect is null or not
I try to calibrate a camera (opencv 2.3.1, VS 2010 and Windows 7), but when I compile my program, there is an opencv error which is
"Assertion Failed:<src.size==dst.size && src.channels<>dst.channels<>> in unknown function , file ....modules \core\src\convert.cpp, line 1277".
here a part of code,
CvMat* image_points = cvCreateMat(n_boards*board_total,2,CV_32FC1);
CvMat* object_points = cvCreateMat(n_boards*board_total,3,CV_32FC1);
CvMat* point_counts = cvCreateMat(n_boards,1,CV_32SC1);
CvMat* intrinsic_matrix = cvCreateMat(3,3,CV_32FC1);
CvMat* distortion_coeffs = cvCreateMat(4,1,CV_32FC1);
for ( int ig = 0; ig< n_boards; ig++ )
{
image= cvLoadImage(names[ig],CV_LOAD_IMAGE_COLOR); // load image
cvNamedWindow("imageessai", 1);
cvShowImage("imageessai", image);
cvWaitKey(0);
cvDestroyWindow("imageessai");
for (int ik=0; ik<n_boards; ik++)
{
int found = cvFindChessboardCorners(image, board_sz, corners, &corner_count,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS );
cvFindCornerSubPix(gray_image, corners, corner_count, cvSize(11,11),cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 0.1 ));
cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
if( corner_count == board_total )
{
step= ig*board_total;
for( int i=step,j=0 ; j<board_total; j++, i++)
{
CV_MAT_ELEM(*image_points, float,i,0) = corners[j].x;
CV_MAT_ELEM(*image_points, float,i,1) = corners[j].y;
CV_MAT_ELEM(*object_points,float,i,0) = (float) j/board_w;
CV_MAT_ELEM(*object_points,float,i,1) = (float) (j%board_w);
CV_MAT_ELEM(*object_points,float,i,2)=0.0f;
}
CV_MAT_ELEM (*point_counts, int,ig,0) = board_total;
printf("\n %d successful Snapshots out of %d collected.\n",ig+1,n_boards);
}
}
CvMat* object_points2 = cvCreateMat(n_boards*board_total,3,CV_32FC1);
CvMat* image_points2 = cvCreateMat(n_boards*board_total,2,CV_32FC1);
CvMat* point_counts2 = cvCreateMat(n_boards,1,CV_32SC1);
for(int i = 0; i<n_boards*board_total; ++i)
{
CV_MAT_ELEM( *image_points2, float, i, 0) =CV_MAT_ELEM( *image_points, float, i, 0);
CV_MAT_ELEM( *image_points2, float,i,1) =CV_MAT_ELEM( *image_points, float, i, 1);
CV_MAT_ELEM(*object_points2, float, i, 0) = CV_MAT_ELEM(*object_points, float, i,0) ;
CV_MAT_ELEM( *object_points2, float, i, 1)= CV_MAT_ELEM(*object_points, float, i,1) ;
CV_MAT_ELEM( *object_points2, float, i, 2)= CV_MAT_ELEM(*object_points, float, i,2) ;
}
for(int i=0; i<n_boards; ++i)
{
CV_MAT_ELEM( *point_counts2, int, i, 0)=CV_MAT_ELEM(*point_counts,int,i,0);
}
cvReleaseMat(&object_points);
cvReleaseMat(&image_points);
cvReleaseMat(&point_counts);
CV_MAT_ELEM( *intrinsic_matrix, float, 0, 0 ) = 1.0f;
CV_MAT_ELEM( *intrinsic_matrix, float, 1, 1 ) = 1.0f;
CvMat* rvec = cvCreateMat(n_boards,3,CV_32F);//matrice de rotation
CvMat* tvec = cvCreateMat(n_boards,3,CV_32F);//matrice de translation
CvMat* H = cvCreateMat(1,3,CV_32FC1);
cvCalibrateCamera2(object_points2, image_points2, point_counts2, sizeim, intrinsic_matrix, distortion_coeffs, rvec, tvec,0 );
cvFindHomography(object_points2,image_points2,H,0,3,NULL);
I tried to resolve this problem many times, please I need help.
(I'm opening my comment as an answer since Nabiha might not be able to answer to a comment with 1 rep.)
It seems you're passing an image of wrong type (wrong number of channels) to one of the cv functions. From your description it's hard to say but i guess it happens in cvCalibrateCamera2 or cvFindHomograpy.
Assertion also sounds like a runtime error rather than a complie time error.
If it is a runtime error, you'd have to check if the Images you're passing to those functions have the correct setup (channels, same size etc..). This is something the compiler can't check for you since these parameters are dynamic.
You can methodically comment some of the functions (to debug your code) and see if your program still works, in case you have trouble finding out where exactly the code breaks with a debugger. Once you isolate the call, check the documentation on what it expects as parameters of your image and you should be on the right track to solve this.
Calibration is a multi-step process (grabbing images, extracting features, computing homography, computing the instrinic and extrinisic camera parameters). It is also tricky to understand (math-wise) so don't give up. The OpenCV calibration functions work usually pretty well.
the debug button is not active and the rescan button ofthe solution project is not active, so I can not compile or debug the program samples \ cpp \ calibration.cpp