I have this really strange issue and I think I might be doing something wrong, but I have an opencv1 implementation for Pyramidal Lucas Kanade and an opencv2 implementation. The difference is that the opencv2 takes MUCH longer to run (in particular the goodFeaturesToTrack function) vs. the opencv1. In addition, including the opencv2 libs and headers in the opencv1 implmentation results in that one becoming extremely slow as well (we're talking about 0.002 s per two images vs. 1 second per two images). Am I doing something wrong?
Windows 7, 64 bit. Here is the opencv2 code that runs really slow, at about 1 frame per second. As I said, taking the opencv1 implementation and switching library version causes the same slow down by a factor of 10 or more. I think this is very weird and google came up with no information! THANKS!!!
#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>
#include <cmath>
using namespace cv;
using namespace std;
int64 now, then;
double elapsed_seconds, tickspersecond=cvGetTickFrequency() * 1.0e6;
int main(int argc, char** argv)
{
// Load two images and allocate other structures
Mat imgA = imread("0000.png", CV_LOAD_IMAGE_GRAYSCALE);
Mat imgB = imread("0001.png", CV_LOAD_IMAGE_GRAYSCALE);
Size img_sz = imgA.size();
Mat imgC(img_sz,1);
int win_size = 15;
int maxCorners = 100;
double qualityLevel = 0.05;
double minDistance = 2.0;
int blockSize = 3;
double k = 0.04;
std::vector<cv::Point2f> cornersA;
cornersA.reserve(maxCorners);
std::vector<cv::Point2f> cornersB;
cornersB.reserve(maxCorners);
then = cvGetTickCount();
goodFeaturesToTrack( imgA,cornersA,maxCorners,qualityLevel,minDistance,cv::Mat(),blockSize,true);
goodFeaturesToTrack( imgB,cornersB,maxCorners,qualityLevel,minDistance,cv::Mat(),blockSize,true);
now = cvGetTickCount();
cout << (double)(now - then) / tickspersecond;
cornerSubPix( imgA, cornersA, Size( win_size, win_size ), Size( -1, -1 ),
TermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );
cornerSubPix( imgB, cornersB, Size( win_size, win_size ), Size( -1, -1 ),
TermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );
// Call Lucas Kanade algorithm
CvSize pyr_sz = Size( img_sz.width+8, img_sz.height/3 );
std::vector<uchar> features_found;
features_found.reserve(maxCorners);
std::vector<float> feature_errors;
feature_errors.reserve(maxCorners);
calcOpticalFlowPyrLK( imgA, imgB, cornersA, cornersB, features_found, feature_errors ,
Size( win_size, win_size ), 5,
cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0 );
// Make an image of the results
for( int i=0; i < features_found.size(); i++ ){
// cout<<"Error is "<<feature_errors[i]<<endl;
//continue;
//cout<<"Got it"<<endl;
Point p0( ceil( cornersA[i].x ), ceil( cornersA[i].y ) );
Point p1( ceil( cornersB[i].x ), ceil( cornersB[i].y ) );
line( imgC, p0, p1, CV_RGB(255,255,255), 2 );
}
namedWindow( "ImageA", 0 );
namedWindow( "ImageB", 0 );
namedWindow( "LKpyr_OpticalFlow", 0 );
imshow( "ImageA", imgA );
imshow( "ImageB", imgB );
imshow( "LKpyr_OpticalFlow", imgC );
cvWaitKey(0);
return 0;
}
You're probably using the debug libraries (*d.lib) instead of the release ones. I had this same problem with ~1-2s per call for goodFeaturesToTrack() and switching to release solved it.
Why are you calling goodFeaturesToTrack twice ?
Call it once to get cornersA and then use LK to identify the same corners / features in imgB.
Related
I have to perform eye detection on each of the faces in the famous Oscar selfie image.I tried using Haar Casacades on the faces since most of them are near-frontal, but the eye detection is totally random and no eyes are being recognized at all.
I have have tried the same haar cascade xml file for eye detection on images with single faces and it worked fine.
What steps could I take to correctly detect the eyes?
The image I used for eye detection can be downloaded from here:
https://drive.google.com/file/d/0B3jt6sHgpxO-d1plUjg5eU5udW8/view?usp=sharing
Below is the code I have written for face and eye detection. Basic idea is I first detect the face using viola jones algorithm and within each face, I try to detect the eyes.
#include <opencv2/highgui/highgui.hpp>
#include <cv.h>
#include <opencv2/objdetect/objdetect.hpp>
#include <vector>
using namespace cv;
using namespace std;
int x,y,w,h;
int main(int argc, const char** argv)
{
Mat image = imread("oscarSelfie.jpg",CV_LOAD_IMAGE_UNCHANGED);
Mat gray_img;
cvtColor(image, gray_img, CV_BGR2GRAY);
string faceCascade_file = "haarcascade_frontalface_alt2.xml";
string eyeCascade_file = "haarcascade_eye.xml";
CascadeClassifier faceCascade;
CascadeClassifier eyeCascade;
//Cascade classifier is a class which has a method to load the classifier from file
if( !faceCascade.load( faceCascade_file ) )
{ cout<<"--(!)Error loading\n"; return -1; };
//If it returns zero, it means an error has occured in loading the classifier
if( !eyeCascade.load( eyeCascade_file ) )
{ cout<<"--(!)Error loading\n"; return -1; };
equalizeHist(gray_img, gray_img);
//Increases contrast and make image more distingushable
/***** Detecting Faces in Image *******/
vector<Rect> faces;
vector<Rect> eyes;
//Rect is a class handling the rectangle datatypes
faceCascade.detectMultiScale(gray_img, faces, 1.1, 1, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
//faces.size()-it will return number of faces detected
for( int i = 0; i < faces.size(); i++ )
{
x = faces[i].x;
y = faces[i].y;
w = faces[i].width;
h = faces[i].height;
//Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
//ellipse( image, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
rectangle(image, cvPoint(x,y), cvPoint(x+w,y+h), CV_RGB(0,255,0), 2, 8 );
/******** Detecting eyes ***********/
eyeCascade.detectMultiScale(gray_img, eyes, 1.1, 50, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for(int j=0; j < eyes.size(); j++)
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( image, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
namedWindow("oscarSelfie :)", CV_WINDOW_AUTOSIZE);
imshow("oscarSelfie :)", image);
waitKey(0);
destroyWindow("pic");
return 0;
}
`
i get the following result with facedetect.cpp (uses haarcascade_eye_tree_eyeglasses.xml)
don't expect to find all faces and eyes
i also tried dlib's face_landmark_detection_ex.cpp to compare results
dlib has an extra feature that gives you aligned faces like seen below
You may want to use CLM-framework for face landmark detection. As far as I have experience CLM-framework performance satisfactory.
Some examples of the system in action: http://youtu.be/V7rV0uy7heQ
I'm writing a program using C++ and OpenCV. It's actually my first so what I'm asking is probably something very basic I've overlooked. Much of it is copied - not copy+pasted mind you, but copied by hand, going line by line, understanding what each line was doing as I wrote it - from some of OpenCV's tutorials. I'll paste the code below.
The problem I'm encountering is that as soon as the webcam starts trying to implement facial recognition, everything just SLOWS. DOWN. As I understand it, its because the .exe is trying to read from two MASSIVE .xml files every frame update, but I don't have any idea how to fix it. It was worse before I constrained the height, width, and framerate of the video.
If anyone has any ideas at this point, I'd love to hear them. I'm very new to software programming - until now I've mostly done web development, so I'm not used to worrying about system memory and other factors.
Thanks in advance!
EDIT: Here are my system specs: Mac, OSX 10.9.4, 2.5 GHz Intel Core i5, 4 GB 1600 MHz DDR3 RAM.
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay( Mat frame );
/** Global variables */
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
String window_name = "Capture - Face detection";
/** #function main */
int main( void )
{
cv::VideoCapture capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };
//-- 2. Read the video stream
capture.open( -1 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
capture.set(CV_CAP_PROP_FRAME_WIDTH,640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,480);
capture.set(CV_CAP_PROP_FPS, 15);
while ( capture.read(frame) )
{
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
detectAndDisplay( frame );
int c = waitKey(10);
if( (char)c == 27 ) { break; } // escape
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
for ( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
for ( size_t j = 0; j < eyes.size(); j++ )
{
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
}
A quick solution would be to replace:
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
by
eyes_cascade.detectMultiScale( faceROI, eyes, 1.3, 2, 0 |CASCADE_SCALE_IMAGE, Size(60, 60), Size(350, 350) );
1.3 is the scale factor, Size(60, 60) the min windows size and Size(350, 350) the max one. It means basically that it will start to search for 60*60 faces then increase size by oldWindowSize*1.3 until it reach 350*350. It is assumed there that your faces are min 60*60 and max 350 * 350.
You can tune it even more depending what you want. The minSize will have a the most impact on performance as well as scale (but 1.3 is already high). The maxSize will have less impact.
After this update, your prog should be twice faster or decrease CPU usage by half. However, I am still surprise that with your current tunings and you computer you have performances problems...
Give us a feedback if it works.
I'm using OpenCV and C++. I want to check if an image is part of another image and already have found a function called matchTemplate which is working. But what if the template image is a little bit differently? Is there a function or a way like matchTemplate that checks if a template is part of a source image, but with tolerance parameters like position, angle, size and maybe even deformation? Or do I need a completely different approach here than template matching?
Here's my code so far, which finds a template image in a source image, but without (or almost without) tolerance.
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
/// Global Variables
Mat img; Mat templ; Mat result;
const char* image_window = "Source Image";
const char* result_window = "Result window";
int match_method;
int max_Trackbar = 5;
/// Function Headers
void MatchingMethod( int, void* );
/**
* #function main
*/
int main( int, char** argv )
{
/// Load image and template
img = imread( "a1.jpg", 1 );
templ = imread( "a2.jpg", 1 );
/// Create windows
namedWindow( image_window, WINDOW_AUTOSIZE );
namedWindow( result_window, WINDOW_AUTOSIZE );
/// Create Trackbar
const char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";
createTrackbar( trackbar_label, image_window, &match_method, max_Trackbar, MatchingMethod );
MatchingMethod( 0, 0 );
waitKey(0);
return 0;
}
/**
* #function MatchingMethod
* #brief Trackbar callback
*/
void MatchingMethod( int, void* )
{
/// Source image to display
Mat img_display;
img.copyTo( img_display );
/// Create the result matrix
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
result.create( result_cols, result_rows, CV_32FC1 );
/// Do the Matching and Normalize
matchTemplate( img, templ, result, match_method );
normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
/// Localizing the best match with minMaxLoc
double minVal; double maxVal; Point minLoc; Point maxLoc;
Point matchLoc;
minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if( match_method == TM_SQDIFF || match_method == TM_SQDIFF_NORMED )
{ matchLoc = minLoc; }
else
{ matchLoc = maxLoc; }
/// Show me what you got
rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
imshow( image_window, img_display );
imshow( result_window, result );
return;
}
The images I'm using in my code:
You've identified the major limitation with template matching. It's very fragile to any deformation of the image. Template-matching works by sliding a template-sized box around the image, and checking the similarity between the template and the region inside the box. It checks similarity using a pixel-by-pixel comparison method, such as normalized cross-correlation. If you want to allow different sizes and rotations, you'll need to write a loop that scales the original template up or down, or rotates it. It gets really inefficient.
If you want to allow deformation, and also do a more efficient search at different scales and rotations, the standard method is SURF. It's very efficient, and quite accurate if your images have good resolution, which yours do. You can google tutorials and find sample code for finding objects using SURF. Basically SURF identifies keypoints (distinctive image regions) in the template and the image. Then, you find the region in the image with the largest number of keypoints which match the template. (If you're already doing this, and it's what you meant by "feature matching," then I think you're on the right track.)
i am trying to use cvCamShift function in opencv. i have written a class for doing this.this is it's code:
#include <cv.h>
#include <highgui.h>
using namespace std;
class Tracker{
// File-level variables
IplImage * pHSVImg; // the input image converted to HSV color mode
IplImage * pHueImg; // the Hue channel of the HSV image
IplImage * pMask; // this image is used for masking pixels
IplImage * pProbImg; // the face probability estimates for each pixel
CvHistogram * pHist; // histogram of hue in the original face image
CvRect prevFaceRect; // location of face in previous frame
CvBox2D faceBox; // current face-location estimate
int vmin;
int vmax;
int smin;
void updateHueImage(IplImage* pImg);
public:
Tracker(IplImage * pImg, CvRect pFaceRect);
~Tracker();
CvBox2d track(IplImage* pImg);
};
Tracker::Tracker(IplImage * pImg, CvRect pFaceRect){
// File-level variables
int nHistBins = 30; // number of histogram bins
float rangesArr[] = {0,180}; // histogram range
vmin = 10;
vmax = 256;
smin = 55;
float * pRanges = rangesArr;
pHSVImg = cvCreateImage( cvGetSize(pImg), 8, 3 );
pHueImg = cvCreateImage( cvGetSize(pImg), 8, 1 );
pMask = cvCreateImage( cvGetSize(pImg), 8, 1 );
pProbImg = cvCreateImage( cvGetSize(pImg), 8, 1 );
pHist = cvCreateHist( 1, &nHistBins, CV_HIST_ARRAY, &pRanges, 1 );
float maxVal = 0.f;
// Create a new hue image
updateHueImage(pImg);
// Create a histogram representation for the face
cvSetImageROI( pHueImg, pFaceRect );
cvSetImageROI( pMask, pFaceRect );
cvCalcHist( &pHueImg, pHist, 0, pMask );
cvGetMinMaxHistValue( pHist, 0, &maxVal, 0, 0 );
cvConvertScale( pHist->bins, pHist->bins, maxVal? 255.0/maxVal : 0, 0 );
cvResetImageROI( pHueImg );
cvResetImageROI( pMask );
// Store the previous face location
prevFaceRect = pFaceRect;
}
Tracker::~Tracker(){
cvReleaseImage( &pHSVImg );
cvReleaseImage( &pHueImg );
cvReleaseImage( &pMask );
cvReleaseImage( &pProbImg );
cvReleaseHist( &pHist );
}
void Tracker::updateHueImage(IplImage * pImg)
{
// Convert to HSV color model
cvCvtColor( pImg, pHSVImg, CV_BGR2HSV );
// Mask out-of-range values
cvInRangeS( pHSVImg, cvScalar(0, smin, MIN(vmin,vmax), 0),
cvScalar(180, 256, MAX(vmin,vmax) ,0), pMask );
// Extract the hue channel
cvSplit( pHSVImg, pHueImg, 0, 0, 0 );
}
CvBox2D Tracker::track(IplImage * pImg)
{
CvConnectedComp components;
updateHueImage(pImg);
cvCalcBackProject( &pHueImg, pProbImg, pHist );
cvAnd( pProbImg, pMask, pProbImg, 0 );
cvCamShift( pProbImg, prevFaceRect,
cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
&components, &faceBox );
prevFaceRect = components.rect;
faceBox.angle = -faceBox.angle;
return faceBox;
}
it works good but when tracked object goes out of the frame or somthing comes front of it, it makes a runtime error. one of my friends has a code that it's very similar to my code and it doesn't make any runtime error even in the worst situations. please guide me how can i debug this.
I think when tracked object goes out of the frame or something comes front of it, the parameter prevFaceRect will be null and may be because of that, it is showing error. So before calling this function
cvCamShift( pProbImg, prevFaceRect,
cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
&components, &faceBox );
add a check, to know whether prevFaceRect is null or not
I have been working on Camera Calibration for about a week know. The examples I saw from online articles and blogs uses a Web Camera to Capture the images.
But for my scenario I am using a digital camera namely Casio Exilim EX-Z77. I add the images into the set program arguments and access them individually using a for loop. In this manner I was able to imitate how a web camera works.
Is it possible for me to get the correct distortions and intrinsics??
Correct me if I am wrong or having a misunderstanding.
Here is the article I based my code. The code below is what I was able to make.
int n_boards = 0;
int board_w;
int board_h;
using namespace std;
int main( int argc, char *argv[] )
{
board_w = 5; // Board width in squares
board_h = 8; // Board height
n_boards = 16; // Number of boards
int board_n = board_w * board_h;
CvSize board_sz = cvSize( board_w, board_h );
CvMat* image_points = cvCreateMat( n_boards*board_n, 2, CV_32FC1 );
CvMat* object_points = cvCreateMat( n_boards*board_n, 3, CV_32FC1 );
CvMat* point_counts = cvCreateMat( n_boards, 1, CV_32SC1 );
CvMat* intrinsic_matrix = cvCreateMat( 3, 3, CV_32FC1 );
CvMat* distortion_coeffs = cvCreateMat( 5, 1, CV_32FC1 );
CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
int corner_count;
int successes = 0;
int step;
int a;
for(a =1; a<=n_boards; a++){
while( successes < n_boards ){
IplImage *image = cvLoadImage(argv[a]);
IplImage *gray_image = cvCreateImage( cvGetSize( image ), 8, 1 );
int found = cvFindChessboardCorners( image, board_sz, corners,
&corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS );
// Get subpixel accuracy on those corners
cvCvtColor( image, gray_image, CV_BGR2GRAY );
cvFindCornerSubPix( gray_image, corners, corner_count, cvSize( 11, 11 ),
cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
// Draw it
cvDrawChessboardCorners( image, board_sz, corners, corner_count, found );
//cvShowImage( "Calibration", image );
// If we got a good board, add it to our data
if( corner_count == board_n ){
step = successes*board_n;
for( int i=step, j=0; j < board_n; ++i, ++j ){
CV_MAT_ELEM( *image_points, float, i, 0 ) = corners[j].x;
CV_MAT_ELEM( *image_points, float, i, 1 ) = corners[j].y;
CV_MAT_ELEM( *object_points, float, i, 0 ) = j/board_w;
CV_MAT_ELEM( *object_points, float, i, 1 ) = j%board_w;
CV_MAT_ELEM( *object_points, float, i, 2 ) = 0.0f;
}
CV_MAT_ELEM( *point_counts, int, successes, 0 ) = board_n;
successes++;
}
}
IplImage *image1 = cvLoadImage(argv[1]);
CvMat* object_points2 = cvCreateMat( successes*board_n, 3, CV_32FC1 );
CvMat* image_points2 = cvCreateMat( successes*board_n, 2, CV_32FC1 );
CvMat* point_counts2 = cvCreateMat( successes, 1, CV_32SC1 );
// Transfer the points into the correct size matrices
for( int i = 0; i < successes*board_n; ++i ){
CV_MAT_ELEM( *image_points2, float, i, 0) = CV_MAT_ELEM( *image_points, float, i, 0 );
CV_MAT_ELEM( *image_points2, float, i, 1) = CV_MAT_ELEM( *image_points, float, i, 1 );
CV_MAT_ELEM( *object_points2, float, i, 0) = CV_MAT_ELEM( *object_points, float, i, 0 );
CV_MAT_ELEM( *object_points2, float, i, 1) = CV_MAT_ELEM( *object_points, float, i, 1 );
CV_MAT_ELEM( *object_points2, float, i, 2) = CV_MAT_ELEM( *object_points, float, i, 2 );
}
for( int i=0; i < successes; ++i ){
CV_MAT_ELEM( *point_counts2, int, i, 0 ) = CV_MAT_ELEM( *point_counts, int, i, 0 );
}
cvReleaseMat( &object_points );
cvReleaseMat( &image_points );
cvReleaseMat( &point_counts );
CV_MAT_ELEM( *intrinsic_matrix, float, 0, 0 ) = 1.0;
CV_MAT_ELEM( *intrinsic_matrix, float, 1, 1 ) = 1.0;
cvCalibrateCamera2( object_points2, image_points2, point_counts2, cvGetSize( image1 ),
intrinsic_matrix, distortion_coeffs, NULL, NULL, CV_CALIB_FIX_ASPECT_RATIO );
cvSave( "Intrinsics.xml", intrinsic_matrix );
cvSave( "Distortion.xml", distortion_coeffs );
// Example of loading these matrices back in
CvMat *intrinsic = (CvMat*)cvLoad( "Intrinsics.xml" );
CvMat *distortion = (CvMat*)cvLoad( "Distortion.xml" );
IplImage* mapx = cvCreateImage( cvGetSize( image1 ), IPL_DEPTH_32F, 1 );
IplImage* mapy = cvCreateImage( cvGetSize( image1 ), IPL_DEPTH_32F, 1 );
cvInitUndistortMap( intrinsic, distortion, mapx, mapy );
cvNamedWindow( "Undistort" );
while( image1 ){
IplImage *t = cvCloneImage( image1 );
cvShowImage( "Calibration", image ); // Show raw image
cvRemap( t, image1, mapx, mapy ); // undistort image
cvReleaseImage( &t );
cvShowImage( "Undistort", image1 ); // Show corrected image
}
}
return 0;
}
I am using Code blocks 10.05 and Opencv 2.3.0, Mingw GNU GCC compiler.
Digital cameras such as Casio Exilim EX-Z77 usually perform a certain amount of image correction in-camera.
I believe that the images you get from this camera are already corrected (regarding lens distortion), but I couldn't find a reference to back up this claim.
As for the multiple images you are using, in practice you only need one to find the distortion. For more information on this procedure using OpenCV check this answer.
EDIT:
Since you mentioned image stitching, OpenCV started to support this feature on version 2.2:
OpenCV 2.2 is released! Teasers already far along AFTER this release: Panoramic Stitching
On this subject, this interesting post shares some source code.