I have few cameras in system. I initialise them this way
cap1 = cvCreateCameraCapture(0);
cap2 = cvCreateCameraCapture(1); // or -1
But after each execution their behaviour is different: they work together or both or them don't work or one of them captures well and other shows green screen. And sometimes system shows me dialogue box for choosing device.
Here is this part of source code:
CvCapture* cap2;
CvCapture* cap1;
printf("- Searching first cam : \n");
for (i; i < LASTCAM; i++)
{
cap1 = cvCreateCameraCapture(i);
if (!cap1)
{
printf("-- Camera %d is empty \n", i);
}
else
{
printf("-- Camera %d is OK \n", i);
i++;
break;
}
}
printf("- Searching second cam : \n");
for (; i < LASTCAM; i++)
{
cap2 = cvCreateCameraCapture(i);
if (!cap2)
{
printf("-- Camera %d is empty \n", i);
}
else
{
printf("-- Camera %d is OK \n", i);
break;
}
} printf("Frame propeties:\n");
double width = cvGetCaptureProperty(cap1, CV_CAP_PROP_FRAME_WIDTH);
double height = cvGetCaptureProperty(cap1, CV_CAP_PROP_FRAME_HEIGHT);
printf("First cam : %.0f x %.0f\n", width, height );
double width2 = cvGetCaptureProperty(cap2, CV_CAP_PROP_FRAME_WIDTH);
double height2 = cvGetCaptureProperty(cap2, CV_CAP_PROP_FRAME_HEIGHT);
printf("Second cam : %.0f x %.0f\n\n", width2, height2 );
IplImage* frame1=0;
IplImage* frame2=0;
cvNamedWindow("cam1", CV_WINDOW_AUTOSIZE);
cvNamedWindow("cam2", CV_WINDOW_AUTOSIZE);
int counter=0;
char filename[512];
while(true){
frame1 = cvQueryFrame( cap1 );
frame2 = cvQueryFrame( cap2 );
cvShowImage("cam1", frame1);
cvShowImage("cam2", frame2);
...
what's wrong with it?
1-9 cams are empty; 10 - first cam, 11-infinity - returns cams which are "green screens".
Thanks beforehand.
Have you looked at the stereo mode? It looks like it's required if you want to run multiple cameras.
USB cameras (at least through directshow on windows) can be a little difficult.
Some things to try:
// A small delay between the captures
cap1 = cvCreateCameraCapture(0);
Sleep(100);
cap2 = cvCreateCameraCapture(1);
or
// call all the setup functiosn for camera0 before capturing camera1
cap1 = cvCreateCameraCapture(0);
cvGetCaptureProperty(cap1,......)
cap2 = cvCreateCameraCapture(1);
cvGetCaptureProperty(cap2,......)
Related
I am detecting shapes in real time with the help of OpenCv in C++ programming language. I found a code that reads from the folder and detect shapes. But in My case camera should detect in real time. How can I use raspicam::RaspiCam_Cv capture; instead of CvCapture *capture = cvCaptureFromAVI("a.avi"); in C++.
#include <cv.h>
#include <highgui.h>
using namespace std;
IplImage* imgTracking=0;
int lastX1 = -1;
int lastY1 = -1;
int lastX2 = -1;
int lastY2 = -1;
void trackObject(IplImage* imgThresh){
CvSeq* contour; //hold the pointer to a contour
CvSeq* result; //hold sequence of points of a contour
CvMemStorage *storage = cvCreateMemStorage(0); //storage area for all contours
//finding all contours in the image
cvFindContours(imgThresh, storage, &contour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
//iterating through each contour
while(contour)
{
//obtain a sequence of points of the countour, pointed by the variable 'countour'
result = cvApproxPoly(contour, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contour)*0.02, 0);
//if there are 3 vertices in the contour and the area of the triangle is more than 100 pixels
if(result->total==3 && fabs(cvContourArea(result, CV_WHOLE_SEQ))>100 )
{
//iterating through each point
CvPoint *pt[3];
for(int i=0;i<3;i++){
pt[i] = (CvPoint*)cvGetSeqElem(result, i);
}
int posX=( pt[0]->x + pt[1]->x + pt[2]->x )/3;
int posY=( pt[0]->y + pt[1]->y + pt[2]->y )/3;
if(posX > 360 ){
if(lastX1>=0 && lastY1>=0 && posX>=0 && posY>=0){
// Draw a red line from the previous point to the current point
cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX1, lastY1), cvScalar(0,0,255), 4);
}
lastX1 = posX;
lastY1 = posY;
}
else{
if(lastX2>=0 && lastY2>=0 && posX>=0 && posY>=0){
// Draw a blue line from the previous point to the current point
cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX2, lastY2), cvScalar(255,0,0), 4);
}
lastX2 = posX;
lastY2 = posY;
}
}
//obtain the next contour
contour = contour->h_next;
}
cvReleaseMemStorage(&storage);
}
int main(){
//load the video file to the memory
CvCapture *capture = cvCaptureFromAVI("a.avi");
if(!capture){
printf("Capture failure\n");
return -1;
}
IplImage* frame=0;
frame = cvQueryFrame(capture);
if(!frame) return -1;
//create a blank image and assigned to 'imgTracking' which has the same size of original video
imgTracking=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U, 3);
cvZero(imgTracking); //covert the image, 'imgTracking' to black
cvNamedWindow("Video");
//iterate through each frames of the video
while(true){
frame = cvQueryFrame(capture);
if(!frame) break;
frame=cvCloneImage(frame);
//smooth the original image using Gaussian kernel
cvSmooth(frame, frame, CV_GAUSSIAN,3,3);
//converting the original image into grayscale
IplImage* imgGrayScale = cvCreateImage(cvGetSize(frame), 8, 1);
cvCvtColor(frame,imgGrayScale,CV_BGR2GRAY);
//thresholding the grayscale image to get better results
cvThreshold(imgGrayScale,imgGrayScale,100,255,CV_THRESH_BINARY_INV);
//track the possition of the ball
trackObject(imgGrayScale);
// Add the tracking image and the frame
cvAdd(frame, imgTracking, frame);
cvShowImage("Video", frame);
//Clean up used images
cvReleaseImage(&imgGrayScale);
cvReleaseImage(&frame);
//Wait 10mS
int c = cvWaitKey(10);
//If 'ESC' is pressed, break the loop
if((char)c==27 ) break;
}
cvDestroyAllWindows();
cvReleaseImage(&imgTracking);
cvReleaseCapture(&capture);
return 0;
}
I cannot use raspicam::RaspiCam_Cv capture; keyword instead of CvCapture *capture = cvCaptureFromAVI(); I should detect shapes in real time for example when triangle comes then call some function. Please help me
I am coding opencv source for playing video. I want to add trackBar and adjust video speed. But, The TrackBar does't move. and I can't focus the Video window. This is my code OpenCv C++. What should i do?
void onTrackbarSlide(int pos, void *){
sec = 1;
sec /= pos;
printf("pos = %d\n",pos);
}
int main(void)
{
strcat(InputFile, FileName); // input video file
//VideoCapture cap(0); // Create an object and open the default(0) camera. C++ Syntax: VideoCapture::VideoCapture(int device)
VideoCapture cap; // Class for video capturing from video files or cameras.
cap.open(InputFile); // Open video file or a capturing device for video capturing
if (!cap.isOpened()) { // Check if the file is opened sucessfully.
printf("\nFail to open a video file");
return -1;
}
// When querying a property that is not supported by the backend used by the VideoCapture class, value 0 is returned.
double fps = cap.get(CV_CAP_PROP_FPS); printf("\nCV_CAP_PROP_FPS = %f", fps);
double ToTalFrames = cap.get(CV_CAP_PROP_FRAME_COUNT); printf("\nCV_CAP_PROP_FRAME_COUNT = %f", fps);
CvSize FrameSize;
FrameSize.width = (int)cap.get(CV_CAP_PROP_FRAME_WIDTH);
FrameSize.height = (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT);
printf("\nWidth * Height = %d * %d\n", FrameSize.width, FrameSize.height);
VideoWriter wrt;
#define CODEC -1
namedWindow("original", 1);
int slider_position = 0;
int slider_max = 255;
createTrackbar("video speed", "original", &slider_position, slider_max, onTrackbarSlide);
wrt.open(OutputFile, CODEC, fps, FrameSize);
Mat frame, dst1;
Mat dst2;
for (int i = 1; i< ToTalFrames; i++)
{
cap.read(frame);
if (frame.data == NULL) { printf("\nNo image found!\n"); return(0); }
imshow("original", frame);
if( waitKey(sec /fps) == 0x1b ) break; // Break if key input is escape code.
}
return 0;
}
I read a logitech c200 webcam on usb-port with this code in c++ with opencv:
Mat result;
IplImage* frame;
int hell=0;
int dunkel=0;
CvCapture* capture;
capture = 0;
capture = cvCaptureFromCAM( CV_CAP_ANY );
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 320);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 240);
frame = cvQueryFrame( capture );
if( !frame ) break;
result = frame;
flip(result , result ,-1);
cvReleaseCapture( &capture );
for(int i = 0; i < 240; ++i){
for(int j = 0; j < 320; ++j){
if((result.at<Vec3b>(i,j)[1] > 230) && (result.at<Vec3b>(i,j)[0] > 230))
{ hell++;}
else
{dunkel++;}
}
}
How can I get the alpha channel, in this case the [4] fourth element of one element in the Mat-Matrix in OpenCV?
Thanks for Help
there is no alpha channel in images from a webcam.
also, please use opencv's c++ api, the venerable c one is a dead end.
is there a possibility to read the alpha channel from a webcam?
like the hsv-color model:
http://en.wikipedia.org/wiki/HSL_and_HSV
it's the hue(Farbton)
I'm trying to write a simple C++ script using opencv to detect faces and save one facial square box for each new face showing up in the camera. The frame should be right, but I'm struggling with the pointer / esp. "total" around cvseq. Could someone help me? Whenever it gets to the row "cvSeqPush(faces_new, r);", faces_new shows up with 60 or 70 in the faces_new->total, without ever being allocated with anything.... Very frustrated, and would really appreciate some help.
// memeory allocation
static CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
static CvMemStorage* storage2 = 0;
storage2 = cvCreateMemStorage(0);
static CvMemStorage* storage3 = 0;
storage3 = cvCreateMemStorage(0);
// Create a new named window with title: result
cvNamedWindow("Window"); // create a window to display in
CvCapture* capture = capture = cvCaptureFromCAM(-1); // capture from video device (Macbook iSight)
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 1000 );// set window size to 640
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 600 ); // set window size to 480
// Declare images
IplImage *imgCamera; // captured from camera
IplImage *imgCamera_last; // last campera image
IplImage *imgDrawn; // image with drawing (rect containing faces)
IplImage *imgFace; // face picture extracted from the camera
CvRect *r; // rects containing faces
CvSeq *faces = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage); ; // sequnece of faces in the camera image - CURRENT
CvSeq *faces_last = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage2); // sequnece of faces in the camera image - LAST FRAME
CvSeq *faces_new = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage3); // sequnece of faces in the camera image - NEW FACES
float scale = 1.0/5; // how far do we want to scale down the haar detect objects images for speed
// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
// file name where to save the file
std::stringstream filename;
int counter = 1;
int filecounter = 1;
while(1) {
//*************************************************************************************/
//Step 1: stream video. Video to images
//*************************************************************************************/
// capture frame from video and then turn it into one single image-imgCamera
capture_frame(capture, imgCamera);
// allocate an image to be used later
imgDrawn = cvCreateImage(cvGetSize(imgCamera), imgCamera->depth, imgCamera->nChannels);
imgFace = cvCreateImage(cvSize(600, 600), imgCamera->depth, imgCamera->nChannels);
cvCopy(imgCamera, imgDrawn);
if (counter == 10) { // take action for every 10 frames
counter = 1;
//*************************************************************************************/
//Step 2: Detection
//*************************************************************************************/
find_faces(imgCamera, storage, cascade, faces, scale);
//printf("Last faces seq had %d faces detected. \n",faces_last->total);
//*************************************************************************************/
//Step 4: Draw every face in the picture
//*************************************************************************************/
// for each face found in the image
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
// draw the rectange around the face on the imgDrawn
draw_rect(imgDrawn, r, scale);
}
cvShowImage("Window", imgDrawn);
// press escape to quit
if( cvWaitKey(33) == 27 ) break;
//*************************************************************************************/
//Step 3: Recognize the new faces
//*************************************************************************************/
//TO DO: Combined the following into a funciton: match_faces(faces_new, faces, faces_last, lastspotted, currentspotted, imgCamera);
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
cout << "faces_new"<< faces_new->total<< "\n";
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
if (faces_last->total == 0) {
//cout << "New PERSON!!";
cvSeqPush(faces_new, r);
}
else {
for(int k = 0; k < (faces_last ? faces_last->total : 0); k++ ){
CvRect *r_last = (CvRect*)cvGetSeqElem(faces_last, k);
if (!same_face(r, r_last, imgCamera, imgCamera_last, i, k)) {
cvSeqPush(faces_new, r);
//cout << "faces_new"<< faces_new->total<< "\n";
}
}
}
}
//*************************************************************************************/
//Step 3: Process faces - save new faces, report new faces
//*************************************************************************************/
if ((faces_new->total)>0) {
// To change to save only faces_new
save_faces(faces_new, imgCamera, imgFace, scale, filecounter);
// report_faces(filecounter, faces_new->total, model); // report new faces stored starting from filecounter
filecounter = filecounter+(faces_new-> total);}
cvClearMemStorage(storage2);
cvSeqPush(faces_last, faces);
//cout << "face_last:" << faces_last->total << "\n";}
cvClearMemStorage(storage);
cvClearMemStorage(storage3);
}
counter++;
imgCamera_last = imgCamera;
}
cvReleaseCapture( &capture );
cvReleaseImage(&imgCamera);
cvReleaseImage(&imgDrawn);
cvReleaseImage(&imgFace);
cvDestroyWindow("window");
Finally got it work.
// memeory allocation
static CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
static CvMemStorage* storage2 = 0;
storage2 = cvCreateMemStorage(0);
// Create a new named window with title: result
cvNamedWindow("Window"); // create a window to display in
CvCapture* capture = capture = cvCaptureFromCAM(-1); // capture from video device (Macbook iSight)
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 1000 );// set window size to 640
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 600 ); // set window size to 480
// Declare images
IplImage *imgCamera; // captured from camera
IplImage *imgCamera_last; // last campera image
IplImage *imgDrawn; // image with drawing (rect containing faces)
IplImage *imgFace; // face picture extracted from the camera
CvRect *r; // rects containing faces
CvSeq *faces = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage); ; // sequnece of faces in the camera image - CURRENT
CvSeq *faces_last = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage2); // sequnece of faces in the camera image - LAST FRAME
float scale = 1.0/5; // how far do we want to scale down the haar detect objects images for speed
// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
// file name where to save the file
std::stringstream filename;
int counter = 1;
int filecounter = 1;
while(1) {
//*************************************************************************************/
//Step 1: stream video. Video to images
//*************************************************************************************/
// capture frame from video and then turn it into one single image-imgCamera
capture_frame(capture, imgCamera);
// allocate an image to be used later
imgDrawn = cvCreateImage(cvGetSize(imgCamera), imgCamera->depth, imgCamera->nChannels);
imgFace = cvCreateImage(cvSize(600, 600), imgCamera->depth, imgCamera->nChannels);
cvCopy(imgCamera, imgDrawn);
if (counter == 10) { // take action for every 10 frames
counter = 1;
//*************************************************************************************/
//Step 2: Detection
//*************************************************************************************/
find_faces(imgCamera, storage, cascade, faces, scale);
//printf("Last faces seq had %d faces detected. \n",faces_last->total);
//*************************************************************************************/
//Step 4: Draw every face in the picture
//*************************************************************************************/
// for each face found in the image
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
// draw the rectange around the face on the imgDrawn
draw_rect(imgDrawn, r, scale);
}
cvShowImage("Window", imgDrawn);
// press escape to quit
if( cvWaitKey(33) == 27 ) break;
//*************************************************************************************/
//Step 3: Recognize the new faces
//*************************************************************************************/
//TO DO: Combined the following into a funciton: match_faces(faces_new, faces, faces_last, lastspotted, currentspotted, imgCamera);
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
if (faces_last->total == 0) {
cout<<"a face appeared: "<<"there are total faces of "<<faces_last->total<<"\n";
save_face(r, imgCamera, imgFace, scale, filecounter);
filecounter++;
//report_faces(filecounter, faces_new->total, model); // report new faces stored starting from filecounter
}
else {
for(int k = 0; k < (faces_last ? faces_last->total : 0); k++ ){
CvRect *r_last = (CvRect*)cvGetSeqElem(faces_last, k);
if (!same_face(r, r_last, imgCamera, imgCamera_last, i, k)) {
save_face(r, imgCamera, imgFace, scale, filecounter);
filecounter++;
//report_faces(filecounter, faces_new->total, model); // report new faces stored starting from filecounter
}
}
}
}
//cvClearMemStorage(storage2);
while (faces_last->total >0) {
cvSeqPop(faces_last);}
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
cvSeqPush(faces_last, r);
}
//cout << "face_last:" << faces_last->total << "\n";}
cvClearMemStorage(storage);
}
counter++;
imgCamera_last = imgCamera;
}
cvReleaseCapture( &capture );
cvReleaseImage(&imgCamera);
cvReleaseImage(&imgDrawn);
cvReleaseImage(&imgFace);
cvDestroyWindow("window");
// return 0 to indicate successfull execution of the program
return 0;
Hey,
I'm doing a project to stabilize video sequence by using optical flow method.
I have done well the optical flow so far. But I have 2 branches in front of me to work on it..
1- after getting the optical flow, I have found the average of the image displacement and then I have subtracted the average from the features of the second frame, my question is what to do next?
2- Or I could use the openCV function in order stabilize image, which I calculated the transformation Matrix and then I used cvPerspectiveTransform then cvWarpPerspective, but I'm getting error which is "bad flag"
you can see the code, what I want is what to do to stabilize the image? I wanna any solution you can provide?
enter code here
#include <stdio.h>
#include <stdlib.h>
//#include "/usr/include/opencv/cv.h"
#include <cv.h>
#include <cvaux.h>
#include <highgui.h>
#include <math.h>
#include <iostream>
#define PI 3.1415926535898
double rads(double degs)
{
return (PI/180 * degs);
}
CvCapture *cap;
IplImage *img;
IplImage *frame;
IplImage *frame1;
IplImage *frame3;
IplImage *frame2;
IplImage *temp_image1;
IplImage *temp_image2;
IplImage *frame1_1C;
IplImage *frame2_1C;
IplImage *eig_image;
IplImage *temp_image;
IplImage *pyramid1 = NULL;
IplImage *pyramid2 = NULL;
char * mapx;
char * mapy;
int h;
int corner_count;
CvMat* M = cvCreateMat(3,3,CV_32FC1);
CvPoint p,q,l,s;
double hypotenuse;
double angle;
int line_thickness = 1, line_valid = 1, pos = 0;
CvScalar line_color;
CvScalar target_color[4] = { // in BGR order
{{ 0, 0, 255, 0 }}, // red
{{ 0, 255, 0, 0 }}, // green
{{ 255, 0, 0, 0 }}, // blue
{{ 0, 255, 255, 0 }} // yellow
};
inline static double square(int a)
{
return a * a;
}
char* IntToChar(int num){return NULL;}
/*{
char* retstr = static_cast<char*>(calloc(12, sizeof(char)));
if (sprintf(retstr, "%i", num) > 0)
{
return retstr;
}
else
{
return NULL;
}
}*/
inline static void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels )
{
if ( *img != NULL )
return;
*img = cvCreateImage( size, depth, channels );
if ( *img == NULL )
{
fprintf(stderr, "Error: Couldn't allocate image. Out of memory?\n");
exit(-1);
}
}
void clearImage (IplImage *img)
{
for (int i=0; i<img->imageSize; i++)
img->imageData[i] = (char) 0;
}
int main()
{
cap = cvCaptureFromCAM(0);
//cap = cvCaptureFromAVI("/home/saif/Desktop/NAO.. the project/jj/Test3.avi");
CvSize frame_size;
// Reading the video's frame size
frame_size.height = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_HEIGHT );
frame_size.width = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_WIDTH );
cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);
while(true)
{
frame = cvQueryFrame( cap );
if (frame == NULL)
{
fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
return -1;
}
// Allocating another image if it is not allocated already.
allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );
cvConvertImage(frame, frame1_1C, 0);
allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
cvConvertImage(frame, frame1, 0);
//Get the second frame of video.
frame = cvQueryFrame( cap );
if (frame == NULL)
{
fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
return -1;
}
if(!frame)
{
printf("bad video \n");
exit(0);
}
allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
cvConvertImage(frame, frame2_1C, 0);
allocateOnDemand( &frame2, frame_size, IPL_DEPTH_8U, 3 );
cvConvertImage(frame, frame2, 0);
CvSize optical_flow_window = cvSize(5,5);
eig_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );
temp_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );
CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );
// Feature tracking
CvPoint2D32f frame1_features[4];
CvPoint2D32f frame2_features[4];
//cvCornerEigenValsAndVecs(eig_image, temp_image, 1 );
corner_count = 4;
cvGoodFeaturesToTrack(frame1_1C,eig_image , temp_image, frame1_features, &corner_count, 0.1, .01, NULL, 5, 1);
cvFindCornerSubPix( frame1_1C, frame1_features, corner_count,cvSize(5, 5) ,optical_flow_window , optical_flow_termination_criteria);
if ( corner_count <= 0 )
printf( "\nNo features detected.\n" );
else
printf( "\nNumber of features found = %d\n", corner_count );
//Locus Kande method.
char optical_flow_found_feature[20];
float optical_flow_feature_error[20];
allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );
cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, corner_count, optical_flow_window, 5, optical_flow_found_feature, NULL, optical_flow_termination_criteria, NULL);
/*
double sumOfDistancesX = 0;
double sumOfDistancesY = 0;
int debug = 0;
CvFont font1, font2;
CvScalar red, green, blue;
IplImage* seg_in = NULL;
IplImage *seg_out = NULL;
allocateOnDemand( &seg_in, frame_size, IPL_DEPTH_8U, 3 );
allocateOnDemand( &seg_out, frame_size, IPL_DEPTH_8U, 3 );
clearImage(seg_in);
clearImage(seg_in);
for( int i=0; i <corner_count; i++ )
{
if ( optical_flow_found_feature[i] == 0 )
continue;
p.x = (int) frame1_features[i].x;
p.y = (int) frame1_features[i].y;
q.x = (int) frame2_features[i].x;
q.y = (int) frame2_features[i].y;
angle = atan2( (double) p.y - q.y, (double) p.x - q.x );
sumOfDistancesX += q.x - p.x;
sumOfDistancesY += q.y - p.y;
//cvRemap(frame2,frame1,averageDistanceX , averageDistanceY,CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
}
*/
/*
int averageDistanceX = sumOfDistancesX / corner_count;
int averageDistanceY = sumOfDistancesY / corner_count;
l.x = averageDistanceX - q.x;
s.y = averageDistanceY - q.y;
*/
#define cvWarpPerspectiveQMatrix cvGetPerspectiveTransform
//CvMat* N = cvCreateMat(3,3,CV_32FC1);
cvGetPerspectiveTransform(frame2_features, frame1_features, M);
cvPerspectiveTransform(frame1_features, frame2_features, M);
cvWarpPerspective( frame2_features, frame1_features, M,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0) );
cvShowImage("Optical Flow", frame1);
cvWaitKey(50);
}
cvReleaseCapture(&cap);
cvReleaseMat(&M);
return 0;
}
You don't want to subtract the average displacement from the second image, you want to transform (move) the second image by the average displacement so that it "matches" the first. The "displacement" you use depends on your situation.
If your camera is shaking but stationary otherwise you want the use the average displacement between two consecutive frames as transformation vector for the second frame. With each new frame you compute the displacement between the transformed first frame and the new frame, and transform the new frame.
If your camera moves and shakes (i.e. helmet mounted camera on a mountainbiker) you want to first find an average displacement between frames over a few frames and then transform the individual frames in a sequence by the difference between that average displacement and the displacement between it and the previous frame.
EDIT
What you basically need to do for option 2 is calculate the average of the average movement between frames over the last few frames. This you could do in any number of ways, but I'd suggest using something like a kalman filter. Then, for a new frame you calculate the movement between that and the (corrected) previous frame. From the movement you get you subtract the average movement up to that point and you move the new frame by that difference.