VideoCapture rotation with Bilinear Interpolation using OpenCV 2.4.3 - c++

I'm implementing video capture rotation with bilinear interpolation like warpAffine() does in OpenCV library. But so far I have got some problems:
1.I'm getting some artifacts during rotations. Here are samples of border, 90 rotation and 360 degrees artifacts
https://www.dropbox.com/sh/oe51ty0cy695i3o/hcAzwmAk6z
2.I can't change resolution of my capture using
capture.set(CV_CAP_PROP_FRAME_WIDTH, 1280 )
capture.set(CV_CAP_PROP_FRAME_HEIGHT, 720 )
Both of them return false value.
I use LifeCam Cinema.
Here is my code:
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <cmath>
#include <ctime>
#include <iostream>
#include <cstdlib>
using namespace cv;
using namespace std;
const double M_PI = 3.14159265359;
void print_help( const char* prg );
Mat rotate( Mat& in, int angle , Point2f rotationCenter );
inline uchar int2uchar( int color ) {
return (uchar)( color < 0 ? 0 : color > 255 ? 255 : color );
}
void print_help( const char* prg ) {
cout << "Report:" << endl;
cout << "Application : " << prg << endl;
cout << "Can't access capture device" << endl;
}
// rotation with bilinear interpolation
Mat rotate( Mat& in, int angle , Point2f rotationCenter ) {
// Note : added Scalar(0) for unused pixels to be black
Mat out( in.size(), in.type(), Scalar(0) );
float in_radians = (float)( angle * M_PI / 180 );
float sinAngle = (float)( sin( in_radians ) );
float cosAngle = (float)( cos( in_radians ) );
for ( int col(0); col < in.cols; ++col ) {
for ( int row(0); row < in.rows; ++row ) {
// already around rotationCenter
// x' = x * cos(angle) - y * sin(angle)
float temp_columns( ( col - rotationCenter.x ) * (cosAngle) -
( row - rotationCenter.y ) * (sinAngle) +
rotationCenter.x );
// y' = x * sin(angle) + y * cos(angle)
float temp_rows ( ( col - rotationCenter.x ) * (sinAngle) +
( row - rotationCenter.y ) * (cosAngle) +
rotationCenter.y );
float max_col( ceil (temp_columns) );
float min_col( floor(temp_columns) );
float max_row( ceil (temp_rows) );
float min_row( floor(temp_rows) );
// clip all irrelevant parts
if ( max_col >= in.cols || max_row >= in.rows ||
min_col < 0 || min_row < 0 ) {
// don't draw
continue;
}
float deltaCol( temp_columns - min_col );
float deltaRow( temp_rows - min_row );
// left top, right top, left bottom and right bottom
Vec3b q12( in.at < Vec3b >( (int)min_row, (int)min_col ) );
Vec3b q22( in.at < Vec3b >( (int)min_row, (int)max_col ) );
Vec3b q11( in.at < Vec3b >( (int)max_row, (int)min_col ) );
Vec3b q21( in.at < Vec3b >( (int)max_row, (int)max_col ) );
// R1 - linear interpolation of bottom neighborhoods
double blueR1 ( ( 1 - deltaCol ) * q11[0] + deltaCol * q21[0] );
double greenR1 ( ( 1 - deltaCol ) * q11[1] + deltaCol * q21[1] );
double redR1 ( ( 1 - deltaCol ) * q11[2] + deltaCol * q21[2] );
// R2 - linear interpolation of top neighborhoods
double blueR2 ( ( 1 - deltaCol ) * q12[0] + deltaCol * q22[0] );
double greenR2 ( ( 1 - deltaCol ) * q12[1] + deltaCol * q22[1] );
double redR2 ( ( 1 - deltaCol ) * q12[2] + deltaCol * q22[2] );
// P - linear interpolation of R1 and R2
int blue ( (int)ceil( ( 1 - deltaRow ) * blueR2 + deltaRow * blueR1 ) );
int green( (int)ceil( ( 1 - deltaRow ) * greenR2 + deltaRow * greenR1 ) );
int red ( (int)ceil( ( 1 - deltaRow ) * redR2 + deltaRow * redR1 ) );
// Vec3b stands for 3-channel value, each channel is a byte
out.at < Vec3b >( row, col )[ 0 ] = int2uchar(blue);
out.at < Vec3b >( row, col )[ 1 ] = int2uchar(green);
out.at < Vec3b >( row, col )[ 2 ] = int2uchar(red);
}
}
return out;
}
int main( int ac, char ** av ) {
if ( ac < 2 ) {
print_help( av[ 0 ] );
return -1;
}
// In degrees
int step = 1, angle = 90;
VideoCapture capture;
// doesn't work properly
if ( capture.set(CV_CAP_PROP_FRAME_WIDTH, 1280 ) &&
capture.set(CV_CAP_PROP_FRAME_HEIGHT, 720 ) ) {
cout << "Resolution : "
<< capture.get(CV_CAP_PROP_FRAME_WIDTH )
<< " x "
<< capture.get(CV_CAP_PROP_FRAME_HEIGHT )
<< endl;
} else {
cout << "There's some problem with VideoCapture::set()" << endl;
}
capture.open( atoi( av[ 1 ] ) );
while ( !capture.isOpened( ) ) {
print_help( av[ 0 ] );
cout << "Capture device " << atoi( av[ 1 ] ) << " failed to open!" << endl;
cout << "Connect capture device to PC\a" << endl;
system("pause");
cout << endl;
capture.open( atoi( av[ 1 ] ) );
}
cout << "Device " << atoi( av[ 1 ] ) << " is connected" << endl;
string original("Original");
string withInterpolation("With Bilinear Interpolation");
namedWindow( original, CV_WINDOW_AUTOSIZE );
namedWindow( withInterpolation, CV_WINDOW_AUTOSIZE);
Mat frame;
for ( ;; ) {
capture >> frame;
if ( frame.empty( ) )
break;
createTrackbar("Rotate", withInterpolation, &angle, 360, 0);
imshow( original, frame );
char key = ( char ) waitKey( 2 );
switch ( key ) {
case '+':
angle += step;
break;
case '-':
angle -= step;
break;
case 27:
case 'q':
return 0;
break;
}
Mat result;
Point2f rotationCenter( (float)( frame.cols / 2.0 ),
(float)( frame.rows / 2.0 ) );
result = rotate( frame, angle, rotationCenter );
// Note : mirror effect
// 1 says, that given frame will be flipped horizontally
flip(result,result, 1);
imshow( withInterpolation, result );
// test to compare my bilinear interpolation and of OpenCV
Mat temp;
warpAffine( frame, temp,
getRotationMatrix2D( rotationCenter, angle, (double)(1.0) ),
frame.size(), 1, 0 );
string openCVInterpolation("OpenCV Bilinear Interpolation");
namedWindow( openCVInterpolation, CV_WINDOW_AUTOSIZE );
createTrackbar("Rotate", openCVInterpolation, &angle, 360, 0);
flip(temp,temp, 1);
imshow( openCVInterpolation, temp );
}
return 0;
}

Addressing your second issue - setting Lifecam resolution using OpenCV
I found that the Lifecam dashboard was perhaps interfering with OpenCV Videocapture calls. If you uninstall Lifecam using Programs & Features from the control panel, the calls
capture.set(CV_CAP_PROP_FRAME_WIDTH, 1280)
capture.set(CV_CAP_PROP_FRAME_HEIGHT, 720)
will work fine.

Related

Not able to create a vector with objects inside

I'm trying to create some code that takes the color of some picture and transfere to another. For this I'm using opencv for segmentation and treatment of the pictures.
I create a class named Shape that contains the contour of one shape ( find with opencv findContours() that creates a std::vector> ), a random string that serves as id for the object and a cv::Vec3b for the color of the shape.
But when I iterate over the contours found by opencv, and create one object(Shape class) and want to added to a list, it just add some of them then thows Segmentation error on console.
Here is my code:
#include <random>
#include <string>
#include <algorithm>
#include <opencv2/opencv.hpp>
using contourType = std::vector < std::vector < cv::Point > >;
std::string randomString ( )
{
auto randchar = [ ]( ) -> char {
const char charset[ ] =
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
const size_t max_index = ( sizeof ( charset ) - 1);
return charset[ rand ( ) % max_index ];
};
std::string str ( 14, 0 );
std::generate_n ( str.begin ( ), 14, randchar );
return str;
};
class Shape {
public:
std::string idShape;
std::vector < cv::Point > contour;
cv::Vec3b color;
Shape () {
};
void setShape ( std::vector < cv::Point > cnt, cv::Vec3b clr ) {
idShape = randomString ( );
contour = cnt;
color = clr;
};
cv::Point centroid ( bool showImage ) {
cv::Moments m = cv::moments ( contour, true );
cv::Point centroid ( m.m10 / m.m00, m.m01 / m.m00 );
return centroid;
};
};
class Frame {
public:
std::string idFrame;
cv::Size size;
int type;
std::vector < Shape > shapes;
Frame ( cv::Size imSize, int tp ) {
idFrame = randomString ( );
size = imSize;
type = tp;
};
int addShape ( Shape shape ) {
shapes.push_back ( shape );
return 0;
};
};
contourType findShapes ( cv::Mat img, bool showImage ) {
//Threshold
cv::adaptiveThreshold ( img, img, 255, cv::ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, 5, 1 );
//Erotion
cv::Mat kernel = cv::getStructuringElement ( cv::MORPH_RECT, cv::Size ( 2, 2 ), cv::Point ( 0, 0 ) );
cv::erode ( img, img, kernel );
//Find contours
contourType contours;
cv::Mat contourImg = img.clone ( );
cv::findContours ( contourImg, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE );
shapes
//Show image if true 2nd. argument
if ( showImage == true ) {
cv::Mat contourImage ( img.size ( ), CV_8UC3, cv::Scalar ( 0, 0, 0 ) );
cv::Scalar colors[ 3 ];
colors[ 0 ] = cv::Scalar ( 255, 0, 0 );
colors[ 1 ] = cv::Scalar ( 0, 255, 0 );
colors[ 2 ] = cv::Scalar ( 0, 0, 255 );
for ( size_t idx = 0; idx < contours.size ( ); idx++ ) {
cv::drawContours ( contourImage, contours, idx, colors[ idx % 3 ], CV_FILLED /*thick*/ );
std::cout << idx;
};
cv::imshow ( "Contour Image", contourImage );
cv::waitKey ( 0 );
};
return contours;
};
cv::Vec3b findColor ( cv::Mat img, std::vector < cv::Point > contour, bool print ) {
//Read pixel intensity
cv::Vec3b intensity;
cv::Point coordinate = contour[ 1 ];
intensity = img.at < cv::Vec3b > ( coordinate );
//Print pixel values
if ( print == true ) {
std::cout << intensity << std::endl;
};
return intensity;
};
int main ( int argc, char** argv ) {
std::string url = argv[1];
int type = 0;
cv::Mat img;
img = cv::imread ( url, 0 );
Frame frame ( img.size ( ), type );
contourType shapes;
shapes = findShapes ( img, false );
cv::Mat imColor;
imColor = cv::imread ( url, 1 );
std::list<Shape> te;
for ( size_t i = 0; i < shapes.size ( ); i++ ) {
Shape shape;
shape.setShape ( shapes[i], findColor ( imColor, shapes[i], false ) );
te.push_back( shape ); //Here is where it fails after adding a few objects, running out of memory? The idea is to contain the Shape object inside the a Frame members shapes property. But i'm testing first like this, just adding it to a list.
};
return 0;
}
It adds only 29 shapes to the list, then the error "segmentation fault (core dumped)" appears in console. But the image is about 67 shapes.

What does AND operator do when operated between two objects in C++?

In OpenCV CamShift implementation there is a line ( just before the switch statement in onMouse() function in the code below ) of code which goes like this ,
selection &= Rect(0, 0, image.cols, image.rows);
where, selection is a Rect datatype of user defined dimensions ( selected by mouse callback ) and image is a frame from a video. The dimensions of selection and image do not match. What I don't understand is what is the purpose of this &= operator. I commented out this line of code and I couldn't figure out any difference. The code compilers just fine. I am not sure about what goes on inside when two objects are AND-ed. I could not find any resource that addresses this particular issue.
This is the original code:
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
Mat image;
bool backprojMode = false;
bool selectObject = false;
int trackObject = 0;
bool showHist = true;
Point origin;
Rect selection;
int vmin = 10, vmax = 256, smin = 30;
static void onMouse( int event, int x, int y, int, void* )
{
if( selectObject )
{
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x - origin.x);
selection.height = std::abs(y - origin.y);
selection &= Rect(0, 0, image.cols, image.rows);
}
switch( event )
{
case CV_EVENT_LBUTTONDOWN:
origin = Point(x,y);
selection = Rect(x,y,0,0);
selectObject = true;
break;
case CV_EVENT_LBUTTONUP:
selectObject = false;
if( selection.width > 0 && selection.height > 0 )
trackObject = -1;
break;
}
}
static void help()
{
cout << "\nThis is a demo that shows mean-shift based tracking\n"
"You select a color objects such as your face and it tracks it.\n"
"This reads from video camera (0 by default, or the camera number the user enters\n"
"Usage: \n"
" ./camshiftdemo [camera number]\n";
cout << "\n\nHot keys: \n"
"\tESC - quit the program\n"
"\tc - stop the tracking\n"
"\tb - switch to/from backprojection view\n"
"\th - show/hide object histogram\n"
"\tp - pause video\n"
"To initialize tracking, select the object with mouse\n";
}
const char* keys =
{
"{1| | 0 | camera number}"
};
int main( int argc, const char* argv[] )
{
help();
VideoCapture cap;
Rect trackWindow;
int hsize = 16;
float hranges[] = {0,180};
const float* phranges = hranges;
CommandLineParser parser(argc, argv, keys);
int camNum = parser.get<int>("1");
cap.open(argv[1]);
/* if( !cap.isOpened() )
{
help();
cout << "***Could not initialize capturing...***\n";
cout << "Current parameter's value: \n";
parser.printParams();
return -1;
}*/
namedWindow( "Histogram", 0 );
namedWindow( "CamShift Demo", 0 );
namedWindow( "ROI", 0 );
setMouseCallback( "CamShift Demo", onMouse, 0 );
createTrackbar( "Vmin", "CamShift Demo", &vmin, 256, 0 );
createTrackbar( "Vmax", "CamShift Demo", &vmax, 256, 0 );
createTrackbar( "Smin", "CamShift Demo", &smin, 256, 0 );
Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
bool paused = false;
for(;;)
{
if( !paused )
{
cap >> frame;
if( frame.empty() )
break;
}
frame.copyTo(image);
if( !paused )
{
cvtColor(image, hsv, COLOR_BGR2HSV);
if( trackObject )
{
int _vmin = vmin, _vmax = vmax;
inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),
Scalar(180, 256, MAX(_vmin, _vmax)), mask);
int ch[] = {0, 0};
hue.create(hsv.size(), hsv.depth());
mixChannels(&hsv, 1, &hue, 1, ch, 1);
if( trackObject < 0 )
{
Mat roi(hue, selection), maskroi(mask, selection);
calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
normalize(hist, hist, 0, 255, CV_MINMAX);
trackWindow = selection;
trackObject = 1;
histimg = Scalar::all(0);
int binW = histimg.cols / hsize;
Mat buf(1, hsize, CV_8UC3);
for( int i = 0; i < hsize; i++ )
buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
cvtColor(buf, buf, CV_HSV2BGR);
for( int i = 0; i < hsize; i++ )
{
int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
rectangle( histimg, Point(i*binW,histimg.rows),
Point((i+1)*binW,histimg.rows - val),
Scalar(buf.at<Vec3b>(i)), -1, 8 );
}
imshow("ROI",roi);
}
calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
backproj &= mask;
RotatedRect trackBox = CamShift(backproj, trackWindow,
TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));
cout<<trackWindow.x<<" "<<trackWindow.y<<endl;
if( trackWindow.area() <= 1 )
{
int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5)/6;
trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
trackWindow.x + r, trackWindow.y + r) &
Rect(0, 0, cols, rows);
}
if( backprojMode )
cvtColor( backproj, image, COLOR_GRAY2BGR );
ellipse( image, trackBox, Scalar(0,0,255), 3, CV_AA );
}
}
else if( trackObject < 0 )
paused = false;
if( selectObject && selection.width > 0 && selection.height > 0 )
{
Mat roi(image, selection);
bitwise_not(roi, roi);
}
imshow( "CamShift Demo", image );
//imshow( "Histogram", histimg );
char c = (char)waitKey(50);
if( c == 27 )
break;
switch(c)
{
case 'b':
backprojMode = !backprojMode;
break;
case 'c':
trackObject = 0;
histimg = Scalar::all(0);
break;
case 'h':
showHist = !showHist;
if( !showHist )
destroyWindow( "Histogram" );
else
namedWindow( "Histogram", 1 );
break;
case 'p':
paused = !paused;
break;
default:
int a=1;
//continue;
}
}
return 0;
}
Any comments or suggestions will be appreciated.
The &-operator of two Rect objects results in a new Rect object, which is their intersection. Here they intersect the selection with the entire image-frame. I suspect that this is merely a precaution, done so that selection does not accidentally exceed the boundaries of the image.
the & and | operators are overloaded for cv::Rect, & is the intersection, | the union of 2 Rects,
see for yourself:
Rect a(2,2,5,5);
Rect b(4,4,5,5);
Rect c = a & b;
Rect d = a | b;
cerr << a << endl << b << endl << c << endl << d << endl ;
[5 x 5 from (2, 2)]
[5 x 5 from (4, 4)]
[3 x 3 from (4, 4)] // intersection
[7 x 7 from (2, 2)] // union
in you code example above,
selection &= Rect(0, 0, image.cols, image.rows);
'selection' gets cropped to the valid image borders

Kalman Filter with acceleration

I am trying to implement a Kalman filter based mouse tracking (as a test first) using a velocity-acceleration model.
I want to try this out this simple model, my state transition equations are:
X(k) = [x(k), y(k)]' (Position)
V(k) = [vx(k), vy(k)]' (Velocity)
X(k) = X(k-1) + dt*V(k-1) + 0.5*dt*dt*a(k-1)
V(k) = V(k-1) + t*a(k-1)
a(k) = a(k-1)
Using this I have basically wrote down the following piece of code:
#include <iostream>
#include <vector>
#include <cstdio>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/tracking.hpp>
using namespace cv;
using namespace std;
struct mouse_info_struct { int x,y; };
struct mouse_info_struct mouse_info = {-1,-1}, last_mouse;
void on_mouse(int event, int x, int y, int flags, void* param)
{
//if (event == CV_EVENT_LBUTTONUP)
{
last_mouse = mouse_info;
mouse_info.x = x;
mouse_info.y = y;
}
}
void printmat(const cv::Mat &__mat, std::string __str)
{
std::cout << "--------" << __str << "----------\n";
for (int i=0 ; i<__mat.rows ; ++i)
{
for (int j=0 ; j<__mat.cols ; ++j)
std::cout << __mat.at<double>(i,j) << " ";
std::cout << std::endl;
}
std::cout << "-------------------------------------\n";
}
int main (int argc, char * const argv[])
{
int nStates = 5, nMeasurements = 2, nInputs = 1;
Mat img(500, 900, CV_8UC3);
KalmanFilter KF(nStates, nMeasurements, nInputs, CV_64F);
Mat state(nStates, 1, CV_64F); /* (x, y, Vx, Vy, a) */
Mat measurement(nMeasurements,1,CV_64F); measurement.setTo(Scalar(0));
Mat prevMeasurement(nMeasurements,1,CV_64F); prevMeasurement.setTo(Scalar(0));
int key = -1, dt=100, T=1000;
float /*a=100, acclErrMag = 0.05,*/ measurementErrVar = 100, noiseVal=0.001, covNoiseVal=0.9e-4;
namedWindow("Mouse-Kalman");
setMouseCallback("Mouse-Kalman", on_mouse, 0);
//while ( (char)(key=cv::waitKey(100)) != 'q' )
{
/// A
KF.transitionMatrix.at<double>(0,0) = 1;
KF.transitionMatrix.at<double>(0,1) = 0;
KF.transitionMatrix.at<double>(0,2) = (dt/T);
KF.transitionMatrix.at<double>(0,3) = 0;
KF.transitionMatrix.at<double>(0,4) = 0.5*(dt/T)*(dt/T);
KF.transitionMatrix.at<double>(1,0) = 0;
KF.transitionMatrix.at<double>(1,1) = 1;
KF.transitionMatrix.at<double>(1,2) = 0;
KF.transitionMatrix.at<double>(1,3) = (dt/T);
KF.transitionMatrix.at<double>(1,4) = 0.5*(dt/T)*(dt/T);
KF.transitionMatrix.at<double>(2,0) = 0;
KF.transitionMatrix.at<double>(2,1) = 0;
KF.transitionMatrix.at<double>(2,2) = 1;
KF.transitionMatrix.at<double>(2,3) = 0;
KF.transitionMatrix.at<double>(2,4) = (dt/T);
KF.transitionMatrix.at<double>(3,0) = 0;
KF.transitionMatrix.at<double>(3,1) = 0;
KF.transitionMatrix.at<double>(3,2) = 0;
KF.transitionMatrix.at<double>(3,3) = 1;
KF.transitionMatrix.at<double>(3,4) = (dt/T);
KF.transitionMatrix.at<double>(4,0) = 0;
KF.transitionMatrix.at<double>(4,1) = 0;
KF.transitionMatrix.at<double>(4,2) = 0;
KF.transitionMatrix.at<double>(4,3) = 0;
KF.transitionMatrix.at<double>(4,4) = 1;
/// Initial estimate of state variables
KF.statePost = cv::Mat::zeros(nStates, 1,CV_64F);
KF.statePost.at<double>(0) = mouse_info.x;
KF.statePost.at<double>(1) = mouse_info.y;
KF.statePost.at<double>(2) = 0;
KF.statePost.at<double>(3) = 0;
KF.statePost.at<double>(4) = 0;
KF.statePre = KF.statePost;
/// Ex or Q
setIdentity(KF.processNoiseCov, Scalar::all(noiseVal));
/// Initial covariance estimate Sigma_bar(t) or P'(k)
setIdentity(KF.errorCovPre, Scalar::all(1000));
/// Sigma(t) or P(k)
setIdentity(KF.errorCovPost, Scalar::all(1000));
/// B
KF.controlMatrix = cv::Mat(nStates, nInputs,CV_64F);
KF.controlMatrix.at<double>(0,0) = 0;
KF.controlMatrix.at<double>(1,0) = 0;
KF.controlMatrix.at<double>(2,0) = 0;
KF.controlMatrix.at<double>(3,0) = 0;
KF.controlMatrix.at<double>(4,0) = 1;
/// H
KF.measurementMatrix = cv::Mat::eye(nMeasurements, nStates, CV_64F);
/// Ez or R
setIdentity(KF.measurementNoiseCov, Scalar::all(measurementErrVar*measurementErrVar));
printmat(KF.controlMatrix, "KF.controlMatrix");
printmat(KF.transitionMatrix, "KF.transitionMatrix");
printmat(KF.statePre,"KF.statePre");
printmat(KF.processNoiseCov, "KF.processNoiseCov");
printmat(KF.measurementMatrix, "KF.measurementMatrix");
printmat(KF.measurementNoiseCov, "KF.measurementNoiseCov");
printmat(KF.errorCovPost,"KF.errorCovPost");
printmat(KF.errorCovPre,"KF.errorCovPre");
printmat(KF.statePost,"KF.statePost");
while (mouse_info.x < 0 || mouse_info.y < 0)
{
imshow("Mouse-Kalman", img);
waitKey(30);
continue;
}
while ( (char)key != 's' )
{
/// MAKE A MEASUREMENT
measurement.at<double>(0) = mouse_info.x;
measurement.at<double>(1) = mouse_info.y;
/// MEASUREMENT UPDATE
Mat estimated = KF.correct(measurement);
/// STATE UPDATE
Mat prediction = KF.predict();
cv::Mat u(nInputs,1,CV_64F);
u.at<double>(0,0) = 0.0 * sqrt(pow((prevMeasurement.at<double>(0) - measurement.at<double>(0)),2)
+ pow((prevMeasurement.at<double>(1) - measurement.at<double>(1)),2));
/// STORE ALL DATA
Point predictPt(prediction.at<double>(0),prediction.at<double>(1));
Point estimatedPt(estimated.at<double>(0),estimated.at<double>(1));
Point measuredPt(measurement.at<double>(0),measurement.at<double>(1));
/// PLOT POINTS
#define drawCross( center, color, d ) \
line( img, Point( center.x - d, center.y - d ), \
Point( center.x + d, center.y + d ), color, 2, CV_AA, 0); \
line( img, Point( center.x + d, center.y - d ), \
Point( center.x - d, center.y + d ), color, 2, CV_AA, 0 )
/// DRAW ALL ON IMAGE
img = Scalar::all(0);
drawCross( predictPt, Scalar(255,255,255), 9 ); //WHITE
drawCross( estimatedPt, Scalar(0,0,255), 6 ); //RED
drawCross( measuredPt, Scalar(0,255,0), 3 ); //GREEN
line( img, estimatedPt, measuredPt, Scalar(100,255,255), 3, CV_AA, 0 );
line( img, estimatedPt, predictPt, Scalar(0,255,255), 3, CV_AA, 0 );
prevMeasurement = measurement;
imshow( "Mouse-Kalman", img );
key=cv::waitKey(10);
}
}
return 0;
}
Here is the output of the code: http://www.youtube.com/watch?v=9_xd4HSz8_g
As you can see that the tracking very very slow. I don't understand what is wrong with the model and why the estimation is so slow. I don't expect there should be any control input.
Can anyone explain this?
I have modified my code and I am posting it for those who want to tweak it to play around for more. The main problem was the choice of covariances.
#include <iostream>
#include <vector>
#include <cstdio>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/tracking.hpp>
using namespace cv;
using namespace std;
struct mouse_info_struct { int x,y; };
struct mouse_info_struct mouse_info = {-1,-1}, last_mouse;
vector<Point> mousev,kalmanv;
int trackbarProcessNoiseCov = 10, trackbarMeasurementNoiseCov = 10, trackbarStateEstimationErrorCov = 10;
float processNoiseCov=10, measurementNoiseCov = 1000, stateEstimationErrorCov = 50;
int trackbarProcessNoiseCovMax=10000, trackbarMeasurementNoiseCovMax = 10000,
trackbarStateEstimationErrorCovMax = 5000;
float processNoiseCovMin=0, measurementNoiseCovMin = 0,
stateEstimationErrorCovMin = 0;
float processNoiseCovMax=100, measurementNoiseCovMax = 5000,
stateEstimationErrorCovMax = 5000;
int nStates = 5, nMeasurements = 2, nInputs = 1;
KalmanFilter KF(nStates, nMeasurements, nInputs, CV_64F);
void on_mouse(int event, int x, int y, int flags, void* param)
{
last_mouse = mouse_info;
mouse_info.x = x;
mouse_info.y = y;
}
void on_trackbarProcessNoiseCov( int, void* )
{
processNoiseCov = processNoiseCovMin +
(trackbarProcessNoiseCov * (processNoiseCovMax-processNoiseCovMin)/trackbarProcessNoiseCovMax);
setIdentity(KF.processNoiseCov, Scalar::all(processNoiseCov));
std::cout << "\nProcess Noise Cov: " << processNoiseCov;
std::cout << "\nMeasurement Noise Cov: " << measurementNoiseCov << std::endl;
}
void on_trackbarMeasurementNoiseCov( int, void* )
{
measurementNoiseCov = measurementNoiseCovMin +
(trackbarMeasurementNoiseCov * (measurementNoiseCovMax-measurementNoiseCovMin)/trackbarMeasurementNoiseCovMax);
setIdentity(KF.measurementNoiseCov, Scalar::all(measurementNoiseCov));
std::cout << "\nProcess Noise Cov: " << processNoiseCov;
std::cout << "\nMeasurement Noise Cov: " << measurementNoiseCov << std::endl;
}
int main (int argc, char * const argv[])
{
Mat img(500, 1000, CV_8UC3);
Mat state(nStates, 1, CV_64F);/* (x, y, Vx, Vy, a) */
Mat measurementNoise(nMeasurements, 1, CV_64F), processNoise(nStates, 1, CV_64F);
Mat measurement(nMeasurements,1,CV_64F); measurement.setTo(Scalar(0.0));
Mat noisyMeasurement(nMeasurements,1,CV_64F); noisyMeasurement.setTo(Scalar(0.0));
Mat prevMeasurement(nMeasurements,1,CV_64F); prevMeasurement.setTo(Scalar(0.0));
Mat prevMeasurement2(nMeasurements,1,CV_64F); prevMeasurement2.setTo(Scalar(0.0));
int key = -1, dt=50, T=1000;
namedWindow("Mouse-Kalman");
setMouseCallback("Mouse-Kalman", on_mouse, 0);
createTrackbar( "Process Noise Cov", "Mouse-Kalman", &trackbarProcessNoiseCov,
trackbarProcessNoiseCovMax, on_trackbarProcessNoiseCov );
createTrackbar( "Measurement Noise Cov", "Mouse-Kalman", &trackbarMeasurementNoiseCov,
trackbarMeasurementNoiseCovMax, on_trackbarMeasurementNoiseCov );
on_trackbarProcessNoiseCov( trackbarProcessNoiseCov, 0 );
on_trackbarMeasurementNoiseCov( trackbarMeasurementNoiseCov, 0 );
//while ( (char)(key=cv::waitKey(100)) != 'q' )
{
/// A (TRANSITION MATRIX INCLUDING VELOCITY AND ACCELERATION MODEL)
KF.transitionMatrix.at<double>(0,0) = 1;
KF.transitionMatrix.at<double>(0,1) = 0;
KF.transitionMatrix.at<double>(0,2) = (dt/T);
KF.transitionMatrix.at<double>(0,3) = 0;
KF.transitionMatrix.at<double>(0,4) = 0.5*(dt/T)*(dt/T);
KF.transitionMatrix.at<double>(1,0) = 0;
KF.transitionMatrix.at<double>(1,1) = 1;
KF.transitionMatrix.at<double>(1,2) = 0;
KF.transitionMatrix.at<double>(1,3) = (dt/T);
KF.transitionMatrix.at<double>(1,4) = 0.5*(dt/T)*(dt/T);
KF.transitionMatrix.at<double>(2,0) = 0;
KF.transitionMatrix.at<double>(2,1) = 0;
KF.transitionMatrix.at<double>(2,2) = 1;
KF.transitionMatrix.at<double>(2,3) = 0;
KF.transitionMatrix.at<double>(2,4) = (dt/T);
KF.transitionMatrix.at<double>(3,0) = 0;
KF.transitionMatrix.at<double>(3,1) = 0;
KF.transitionMatrix.at<double>(3,2) = 0;
KF.transitionMatrix.at<double>(3,3) = 1;
KF.transitionMatrix.at<double>(3,4) = (dt/T);
KF.transitionMatrix.at<double>(4,0) = 0;
KF.transitionMatrix.at<double>(4,1) = 0;
KF.transitionMatrix.at<double>(4,2) = 0;
KF.transitionMatrix.at<double>(4,3) = 0;
KF.transitionMatrix.at<double>(4,4) = 1;
/// Initial estimate of state variables
KF.statePost = cv::Mat::zeros(nStates, 1,CV_64F);
KF.statePost.at<double>(0) = mouse_info.x;
KF.statePost.at<double>(1) = mouse_info.y;
KF.statePost.at<double>(2) = 0.1;
KF.statePost.at<double>(3) = 0.1;
KF.statePost.at<double>(4) = 0.1;
KF.statePre = KF.statePost;
state = KF.statePost;
/// Ex or Q (PROCESS NOISE COVARIANCE)
setIdentity(KF.processNoiseCov, Scalar::all(processNoiseCov));
/// Initial covariance estimate Sigma_bar(t) or P'(k)
setIdentity(KF.errorCovPre, Scalar::all(stateEstimationErrorCov));
/// Sigma(t) or P(k) (STATE ESTIMATION ERROR COVARIANCE)
setIdentity(KF.errorCovPost, Scalar::all(stateEstimationErrorCov));
/// B (CONTROL MATRIX)
KF.controlMatrix = cv::Mat(nStates, nInputs,CV_64F);
KF.controlMatrix.at<double>(0,0) = /*0.5*(dt/T)*(dt/T);//*/0;
KF.controlMatrix.at<double>(1,0) = /*0.5*(dt/T)*(dt/T);//*/0;
KF.controlMatrix.at<double>(2,0) = 0;
KF.controlMatrix.at<double>(3,0) = 0;
KF.controlMatrix.at<double>(4,0) = 1;
/// H (MEASUREMENT MATRIX)
KF.measurementMatrix = cv::Mat::eye(nMeasurements, nStates, CV_64F);
/// Ez or R (MEASUREMENT NOISE COVARIANCE)
setIdentity(KF.measurementNoiseCov, Scalar::all(measurementNoiseCov));
while (mouse_info.x < 0 || mouse_info.y < 0)
{
imshow("Mouse-Kalman", img);
waitKey(30);
continue;
}
prevMeasurement.at<double>(0,0) = 0;
prevMeasurement.at<double>(1,0) = 0;
prevMeasurement2 = prevMeasurement;
while ( (char)key != 's' )
{
/// STATE UPDATE
Mat prediction = KF.predict();
/// MAKE A MEASUREMENT
measurement.at<double>(0) = mouse_info.x;
measurement.at<double>(1) = mouse_info.y;
/// MEASUREMENT NOISE SIMULATION
randn( measurementNoise, Scalar(0),
Scalar::all(sqrtf(measurementNoiseCov)));
noisyMeasurement = measurement + measurementNoise;
/// MEASUREMENT UPDATE
Mat estimated = KF.correct(noisyMeasurement);
cv::Mat u(nInputs,1,CV_64F);
u.at<double>(0,0) = 0.0 * sqrtf(pow((prevMeasurement.at<double>(0) - measurement.at<double>(0)),2)
+ pow((prevMeasurement.at<double>(1) - measurement.at<double>(1)),2));
/// STORE ALL DATA
Point noisyPt(noisyMeasurement.at<double>(0),noisyMeasurement.at<double>(1));
Point estimatedPt(estimated.at<double>(0),estimated.at<double>(1));
Point measuredPt(measurement.at<double>(0),measurement.at<double>(1));
/// PLOT POINTS
#define drawCross( center, color, d ) \
line( img, Point( center.x - d, center.y - d ), \
Point( center.x + d, center.y + d ), color, 2, CV_AA, 0); \
line( img, Point( center.x + d, center.y - d ), \
Point( center.x - d, center.y + d ), color, 2, CV_AA, 0 )
/// DRAW ALL ON IMAGE
img = Scalar::all(0);
drawCross( noisyPt, Scalar(255,255,255), 9 ); //WHITE
drawCross( estimatedPt, Scalar(0,0,255), 6 ); //RED
drawCross( measuredPt, Scalar(0,255,0), 3 ); //GREEN
line( img, estimatedPt, measuredPt, Scalar(100,255,255), 3, CV_AA, 0 );
line( img, estimatedPt, noisyPt, Scalar(0,255,255), 3, CV_AA, 0 );
imshow( "Mouse-Kalman", img );
key=cv::waitKey(dt);
prevMeasurement = measurement;
}
}
return 0;
}

Normals for height map data

I want to find normals for height map data. I am using gl_triangles in my code for indices. How would I find normals for this?
Given a triangle (vert1, vert2, vert3) its normal is ((vert2 - vert1).cross(vert3 - vert1)).normalize().
For smooth, per-vertex normals: Foreach vertex, sum together the face normals for each triangle that vertex is a part of, then normalize the sum.
EDIT: Example:
#include <GL/glut.h>
#include <vector>
#include <cmath>
#include <Eigen/Core>
#include <Eigen/Geometry>
using namespace std;
using namespace Eigen;
typedef Matrix< Vector3f, Dynamic, Dynamic > VecMat;
// given a matrix of heights returns a matrix of vertices
VecMat GetVerts( const MatrixXf& hm )
{
VecMat verts( hm.rows(), hm.cols() );
for( int col = 0; col < hm.cols(); ++col )
for( int row = 0; row < hm.rows(); ++row )
verts( row, col ) = Vector3f( col, row, hm( row, col ) );
return verts;
}
VecMat GetNormals( const VecMat& hm )
{
VecMat normals( hm );
for( int col = 0; col < hm.cols(); ++col )
for( int row = 0; row < hm.rows(); ++row )
{
Vector3f sum( Vector3f::Zero() );
const Vector3f& cur = hm( row, col );
if( row+1 < hm.rows() && col+1 < hm.cols() )
sum += ( hm( row+0, col+1 ) - cur ).cross( hm( row+1, col+0 ) - cur ).normalized();
if( row+1 < hm.rows() && col > 0 )
sum += ( hm( row+1, col+0 ) - cur ).cross( hm( row+0, col-1 ) - cur ).normalized();
if( row > 0 && col > 0 )
sum += ( hm( row+0, col-1 ) - cur ).cross( hm( row-1, col+0 ) - cur ).normalized();
if( row > 0 && col+1 < hm.cols() )
sum += ( hm( row-1, col+0 ) - cur ).cross( hm( row+0, col+1 ) - cur ).normalized();
normals( row, col ) = sum.normalized();
}
return normals;
}
// returns an index array for a GL_TRIANGLES heightmap
vector< unsigned int > GetIndices( int rows, int cols )
{
vector< unsigned int > indices;
for( int col = 1; col < cols; ++col )
for( int row = 1; row < rows; ++row )
{
// Eigen default storage order is column-major
// lower triangle
indices.push_back( (col-1) * rows + (row-1) );
indices.push_back( (col-0) * rows + (row-1) );
indices.push_back( (col-1) * rows + (row-0) );
// upper triangle
indices.push_back( (col-1) * rows + (row-0) );
indices.push_back( (col-0) * rows + (row-1) );
indices.push_back( (col-0) * rows + (row-0) );
}
return indices;
}
VecMat heightmap;
VecMat normals;
vector< unsigned int > indices;
void init()
{
// wavy heightmap
MatrixXf hm( 64, 64 );
for( int col = 1; col < hm.cols(); ++col )
for( int row = 1; row < hm.rows(); ++row )
{
float x = ( col - ( hm.cols() / 2.0f ) ) / 2.0f;
float y = ( row - ( hm.rows() / 2.0f ) ) / 2.0f;
hm( row, col ) = cos( sqrt( x * x + y * y ) );
}
heightmap = GetVerts( hm );
heightmap.array() -= Vector3f( hm.cols() / 2.0f, hm.rows() / 2.0f, 0 );
for( int col = 0; col < hm.cols(); ++col )
for( int row = 0; row < hm.rows(); ++row )
heightmap( row, col ).array() *= Vector3f( 1 / 4.0f, 1 / 4.0f, 1.0f ).array();
normals = GetNormals( heightmap );
indices = GetIndices( heightmap.rows(), heightmap.cols() );
}
void display()
{
glEnable( GL_DEPTH_TEST );
glEnable( GL_CULL_FACE );
glShadeModel( GL_SMOOTH );
glEnable( GL_LIGHTING );
GLfloat global_ambient[] = { 0.0, 0.0, 0.0, 1.0 };
glLightModelfv( GL_LIGHT_MODEL_AMBIENT, global_ambient );
glEnable( GL_COLOR_MATERIAL );
glColorMaterial( GL_FRONT, GL_AMBIENT_AND_DIFFUSE );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
double w = glutGet( GLUT_WINDOW_WIDTH );
double h = glutGet( GLUT_WINDOW_HEIGHT );
gluPerspective( 60, w / h, 1, 100 );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
gluLookAt( 8, 8, 8, 0, 0, 0, 0, 0, 1 );
// spinning light
glEnable( GL_LIGHT0 );
float angle = 20 * ( glutGet( GLUT_ELAPSED_TIME ) / 1000.0f ) * (3.14159f / 180.0f);
float x = cos( -angle ) * 6;
float y = sin( -angle ) * 6;
GLfloat light_position[] = { x, y, 2, 1.0 };
glLightfv( GL_LIGHT0, GL_POSITION, light_position );
glDisable( GL_LIGHTING );
glPointSize( 5 );
glBegin(GL_POINTS);
glColor3ub( 255, 255, 255 );
glVertex3fv( light_position );
glEnd();
glEnable( GL_LIGHTING );
glColor3ub(255,0,0);
glEnableClientState( GL_VERTEX_ARRAY );
glEnableClientState( GL_NORMAL_ARRAY );
glVertexPointer( 3, GL_FLOAT, sizeof( Vector3f ), heightmap(0,0).data() );
glNormalPointer( GL_FLOAT, sizeof( Vector3f ), normals(0,0).data() );
glDrawElements( GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, &indices[0] );
glDisableClientState( GL_VERTEX_ARRAY );
glDisableClientState( GL_NORMAL_ARRAY );
glutSwapBuffers();
}
void timer( int extra )
{
glutPostRedisplay();
glutTimerFunc( 16, timer, 0 );
}
int main( int argc, char **argv )
{
glutInit( &argc, argv );
glutInitDisplayMode( GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE );
glutInitWindowSize( 640, 480 );
glutCreateWindow( "Heightmap" );
init();
glutDisplayFunc( display );
glutTimerFunc( 0, timer, 0 );
glutMainLoop();
return 0;
}

c++ algorithm for running a command after detecting square

I have opencv code for detecting square. And now I want after detect square, the code run another command.
Here is the code:
#include "cv.h"
#include "cxcore.h"
#include "highgui.h"
#include "math.h"
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <sstream>
using namespace std;
double angle( CvPoint* pt1, CvPoint* pt2, CvPoint* pt0 )
{
double dx1 = pt1->x - pt0->x;
double dy1 = pt1->y - pt0->y;
double dx2 = pt2->x - pt0->x;
double dy2 = pt2->y - pt0->y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
} // angle
IplImage* findSquares4( IplImage* img, CvMemStorage* storage )
{
double s = 0, t = 0;
int sdetect =0, sqdetect = 0,sqt = 0;
CvSeq* contours;
CvSeq* result;
CvSeq* squares = cvCreateSeq( 0, sizeof( CvSeq), sizeof( CvPoint), storage );
IplImage* cny = cvCreateImage(cvGetSize(img), 8, 1);
cvCanny(img, cny, 5, 100, 3);
cvNamedWindow("canny",CV_WINDOW_AUTOSIZE);
cvShowImage("canny",cny);
cvFindContours( cny, storage, &contours, sizeof( CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint( 0, 0) );
while( contours )
{
result = cvApproxPoly( contours,
sizeof( CvContour),
storage,
CV_POLY_APPROX_DP,
cvContourPerimeter( contours)*0.02, 0 );
if( result->total == 4 &&
fabs( cvContourArea( result, CV_WHOLE_SEQ)) > 1000 &&
cvCheckContourConvexity( result) )
{
s = 0;
for( int i = 2; i < 5; i++ )
{
{
t = fabs( angle(
( CvPoint*)cvGetSeqElem( result, i ),
( CvPoint*)cvGetSeqElem( result, i-2 ),
( CvPoint*)cvGetSeqElem( result, i-1 )));
s = s > t ? s : t;
cout << "s = " << s<< endl;
cout << "t = " << t<< endl;
}
} // for
if( s < 0.3 )
for( int i = 0; i < 4; i++ )
cvSeqPush( squares,
( CvPoint*)cvGetSeqElem( result, i ));
} // if
contours = contours->h_next;
} // while
if ((squares->total/4) = 1)
{
sdetect = 1;
} / /if
else
{
sdetect = 2;
sleep(0.5);
} // else
if (sqdetect != sdetect)
{
sqdetect=sdetect;
switch(sqdetect)
{
case 0 : system(NULL) ; break;
case 2 : cout<< "no "<< endl; break;
case 1 : system("./ambil1.sh"); break;
} // switch
} // if
sdetect = 0;
cout<<"Persegi : "<< squares->total/4 <<endl;
cvReleaseMemStorage(&storage);
cvClearSeq(squares);
} // findSquares4
void drawSquares(IplImage *img, CvSeq* squares )
{
CvFont font;
cvInitFont( &font, CV_FONT_HERSHEY_SIMPLEX, 0.4f, 0.4f, 0,1, 8 );
int i,j,sdetect,sqdetect = 0;
CvSeqReader reader;
cvStartReadSeq( squares, &reader, 0 );
for( i = 0; i < squares->total; i += 4 )
{
j++;
CvPoint pt[4], *rect = pt;
int count = 4;
// read 4 vertices
memcpy( pt, reader.ptr, squares->elem_size );
CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
memcpy( pt + 1, reader.ptr, squares->elem_size );
CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
memcpy( pt + 2, reader.ptr, squares->elem_size );
CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
memcpy( pt + 3, reader.ptr, squares->elem_size );
CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
cvPutText( img, "SQUARE", pt[i], &font, CV_RGB(20,255,0));
cvPolyLine( img, &rect, &count, 1, 1, CV_RGB(200,0,0), 4, CV_AA, 0 );
} // for
cvClearSeq(squares);
} // drawSquares
But what I got is that the program becomes laggy. So what is the right algorithm to place condition for executing system (./ambil1.sh)?
The program waits until the subordinate command finishes. During this time it will not respond.
If that's what you need, system is adequate.
If you want to fire and forget, you can either use the fork/exec pair, or just use the shell & operator to run stuff in the background.
If you need to fire, change your program's behaviour, then change it back when the subordinate command finishes, you almost have to use the fork/exec/SIGCHLD/waitpid combination.