I have a simple model of spacecraft building by glut libraries and I need that my model will rotate in the obtained angles (they will calculate). I'm watching a lot of tutorials but I can't release animation >: My program is just closed that I trying compile with using animation. Help me please
namespace Runge_Kuta_1._6
{
public Form3()
{
InitializeComponent();
AnT1.InitializeContexts();
Glut.glutTimerFunc(50, Timer, 0);
}
void Timer(int value)
{
Display();
Gl.glRotated(1, 0, 0, 1);
Glut.glutPostRedisplay();
}
private void AnT1_Load(object sender, EventArgs e)
{
// инициализация Glut
Glut.glutInit();
Glut.glutInitDisplayMode(Glut.GLUT_RGB | Glut.GLUT_DOUBLE | Glut.GLUT_DEPTH);
// очитка окна
Gl.glClearColor(255, 255, 255, 1);
// установка порта вывода в соотвествии с размерами элемента anT
Gl.glViewport(0, 0, AnT1.Width, AnT1.Height);
// настройка проекции
Gl.glMatrixMode(Gl.GL_PROJECTION);
Gl.glLoadIdentity();
Glu.gluPerspective(45, (float)AnT1.Width / (float)AnT1.Height, 0.1, 200);
Gl.glMatrixMode(Gl.GL_MODELVIEW);
Gl.glLoadIdentity();
// настройка параметров OpenGL для визуализации
Gl.glEnable(Gl.GL_DEPTH_TEST);
Gl.glClear(Gl.GL_COLOR_BUFFER_BIT | Gl.GL_DEPTH_BUFFER_BIT);
Draw();
}
void Draw()
{
//основание
Gl.glLoadIdentity();
Gl.glColor3f(0.502f, 0.502f, 0.502f);
Gl.glPushMatrix();
Gl.glTranslated(-1, 0, -6);
Gl.glRotated(95, 1, 0, 0);
Glut.glutSolidCylinder(0.7, 2, 60, 60);
//каркас основания
Gl.glLoadIdentity();
Gl.glColor3f(0, 0, 0);
Gl.glPushMatrix();
Gl.glTranslated(-1, 0, -6);
Gl.glRotated(95, 1, 0, 0);
Glut.glutWireCylinder(0.7, 2, 20, 20);
//соединение №1
Gl.glLoadIdentity();
Gl.glColor3f(0.502f, 0.502f, 0.502f);
Gl.glPushMatrix();
Gl.glTranslated(-1, 1, -6);
Gl.glRotated(95, 1, 0, 0);
Glut.glutSolidCylinder(0.02, 1, 40, 40);
//соединение №1 каркас
Gl.glLoadIdentity();
Gl.glColor3f(0, 0, 0);
Gl.glPushMatrix();
Gl.glTranslated(-1, 1, -6);
Gl.glRotated(95, 1, 0, 0);
Glut.glutWireCylinder(0.02, 1, 5, 5);
//антенна
Gl.glLoadIdentity();
Gl.glColor3f(0.502f, 0.502f, 0.502f);
Gl.glPushMatrix();
Gl.glTranslated(-1, 1, -6);
Gl.glRotated(170, 1, 0, 0);
Glut.glutSolidCylinder(0.4, 0.05, 40, 40);
//антенна каркас
Gl.glLoadIdentity();
Gl.glColor3f(0, 0, 0);
Gl.glPushMatrix();
Gl.glTranslated(-1, 1, -6);
Gl.glRotated(170, 1, 0, 0);
Glut.glutWireCylinder(0.4, 0.05, 20, 20);
//соединение №2
Gl.glLoadIdentity();
Gl.glColor3f(0.502f, 0.502f, 0.502f);
Gl.glPushMatrix();
Gl.glTranslated(-0.5, 0.025, -6);
Gl.glRotated(178, 1, 0, 1);
Glut.glutSolidCylinder(0.02, 0.8, 40, 40);
//соединение №2 каркас
Gl.glLoadIdentity();
Gl.glColor3f(0, 0, 0);
Gl.glPushMatrix();
Gl.glTranslated(-0.5, 0.025, -6);
Gl.glRotated(178, 1, 0, 1);
Glut.glutWireCylinder(0.02, 0.8, 2, 2);
//антенна №2
Gl.glLoadIdentity();
Gl.glColor3f(0.502f, 0.502f, 0.502f);
Gl.glPushMatrix();
Gl.glTranslated(0.35, 0.025, -6.5);
Gl.glRotated(178, 1, 0, 1);
Glut.glutSolidCylinder(0.4, 0.05, 20, 20);
//антенна №2 каркас
Gl.glLoadIdentity();
Gl.glColor3f(0, 0, 0);
Gl.glPushMatrix();
Gl.glTranslated(0.35, 0.025, -6.5);
Gl.glRotated(178, 1, 0, 1);
Glut.glutWireCylinder(0.4, 0.05, 10, 10);
//соединение №3
Gl.glLoadIdentity();
Gl.glColor3f(0.502f, 0.502f, 0.502f);
Gl.glPushMatrix();
Gl.glTranslated(-1, 0.2, -4.5);
Gl.glRotated(95, 1, 0, 0);
Glut.glutSolidCylinder(0.02, 0.2, 40, 40);
//соединение №3 каркас
Gl.glLoadIdentity();
Gl.glColor3f(0, 0, 0);
Gl.glPushMatrix();
Gl.glTranslated(-1, 0.2, -4.5);
Gl.glRotated(95, 1, 0, 0);
Glut.glutWireCylinder(0.02, 0.2, 5, 5);
//антенна №3
Gl.glLoadIdentity();
Gl.glColor3f(0.502f, 0.502f, 0.502f);
Gl.glPushMatrix();
Gl.glTranslated(-1, 0.3, -4.48);
Gl.glRotated(180, 1, 0, 0);
Glut.glutSolidCylinder(0.25, 0.05, 20, 20);
//антенна №3 каркас
Gl.glLoadIdentity();
Gl.glColor3f(0, 0, 0);
Gl.glPushMatrix();
Gl.glTranslated(-1, 0.3, -4.48);
Gl.glRotated(180, 1, 0, 0);
Glut.glutWireCylinder(0.25, 0.05, 20, 20);
//соединение №4
Gl.glLoadIdentity();
Gl.glColor3f(0.502f, 0.502f, 0.502f);
Gl.glPushMatrix();
Gl.glTranslated(-0.45, 0.4, -5.5);
Gl.glRotated(95, 1, 0, 0);
Glut.glutSolidCylinder(0.02, 0.4, 40, 40);
//соединение №4 каркас
Gl.glLoadIdentity();
Gl.glColor3f(0, 0, 0);
Gl.glPushMatrix();
Gl.glTranslated(-0.45, 0.4, -5.5);
Gl.glRotated(95, 1, 0, 0);
Glut.glutWireCylinder(0.02, 0.4, 10, 10);
//антенна №4
Gl.glLoadIdentity();
Gl.glColor3f(0.502f, 0.502f, 0.502f);
Gl.glPushMatrix();
Gl.glTranslated(-0.45, 0.55, -5.48);
Gl.glRotated(180, 1, 0, 0);
Glut.glutSolidCylinder(0.45, 0.05, 20, 20);
//антенна №4 каркас
Gl.glLoadIdentity();
Gl.glColor3f(0, 0, 0);
Gl.glPushMatrix();
Gl.glTranslated(-0.45, 0.55, -5.48);
Gl.glRotated(180, 1, 0, 0);
Glut.glutWireCylinder(0.45, 0.05, 20, 20);
Gl.glPopMatrix();
Gl.glFlush();
AnT1.Invalidate();
}
void Display()
{
Gl.glClearColor(255, 255, 255, 1);
x[0] = x0; y[0] = y0; z[0] = z0;
Gamma[0] = Gamma0; Psi[0] = Psi0; Phi[0] = Phi0;
while (t0 <= T)
{
Gl.glPushMatrix();
Label:
for (int i = 0; i <= 3; i++)
{
fx = -y[i] - z[i];
fy = x[i] + 0.2 * y[i];
fz = 0.2 + (x[i] - 5.7) * z[i];
Kx[i] = h * fx;
Ky[i] = h * fy;
Kz[i] = h * fz;
x[1] = x[0] + 0.5 * Kx[0];
x[2] = x[0] + 0.5 * Kx[1];
x[3] = x[0] + Kx[2];
y[1] = y[0] + 0.5 * Ky[0];
y[2] = y[0] + 0.5 * Ky[1];
y[3] = y[0] + Ky[2];
z[1] = z[0] + 0.5 * Kz[0];
z[2] = z[0] + 0.5 * Kz[1];
z[3] = z[0] + Kz[2];
}
Ex = Math.Abs(2 * (Kx[0] - Kx[1] - Kx[2] + Kx[3]) * divBy3);
Ey = Math.Abs(2 * (Ky[0] - Ky[1] - Ky[2] + Ky[3]) * divBy3);
Ez = Math.Abs(2 * (Kz[0] - Kz[1] - Kz[2] + Kz[3]) * divBy3);
Eps1 = Eps * divBy8;
if (Ex > Eps | Ey > Eps | Ez > Eps)
{
h /= 2;
goto Label;
}
for (int i = 0; i <= 3; i++)
{
fGamma = x[0] * System.Math.Sin(Phi[0]) + y[0] * System.Math.Cos(Phi[0]);
fPsi = 1 / (System.Math.Cos(Gamma[0])) * (x[0] * System.Math.Cos(Phi[0]) - y[0] * System.Math.Sin(Phi[0]));
fPhi = z[0] - (System.Math.Sin(Gamma[0]) / (System.Math.Cos(Gamma[0]))) * (x[0] * System.Math.Cos(Phi[0]) - y[0] * System.Math.Sin(Phi[0]));
KGamma[i] = h * fGamma;
KPsi[i] = h * fPsi;
KPhi[i] = h * fPhi;
Gamma[1] = Gamma[0] + 0.5 * KGamma[0];
Gamma[2] = Gamma[0] + 0.5 * KGamma[1];
Gamma[3] = Gamma[0] + KGamma[2];
Psi[1] = Psi[0] + 0.5 * KPsi[0];
Psi[2] = Psi[0] + 0.5 * KPsi[1];
Psi[3] = Psi[0] + KPsi[2];
Phi[1] = Phi[0] + 0.5 * KPhi[0];
Phi[2] = Phi[0] + 0.5 * KPhi[1];
Phi[3] = Phi[0] + KPhi[2];
}
xx = x[0] + (Kx[0] + 2 * Kx[1] + 2 * Kx[2] + Kx[3]) * divBy6;
yy = y[0] + (Ky[0] + 2 * Ky[1] + 2 * Ky[2] + Ky[3]) * divBy6;
zz = z[0] + (Kz[0] + 2 * Kz[1] + 2 * Kz[2] + Kz[3]) * divBy6;
GGamma = Gamma[0] + (KGamma[0] + 2 * KGamma[1] + 2 * KGamma[2] + KGamma[3]) * divBy6;
PPsi = Psi[0] + (KPsi[0] + 2 * KPsi[1] + 2 * KPsi[2] + KPsi[3]) * divBy6;
PPhi = Phi[0] + (KPhi[0] + 2 * KPhi[1] + 2 * KPhi[2] + KPhi[3]) * divBy6;
Gl.glRotated(Gamma[0], 1, 0, 0);
Gl.glRotated(Psi[0], 0, 1, 0);
Gl.glRotated(Phi[0], 0, 0, 1);
Gl.glPopMatrix();
x[0] = xx;
y[0] = yy;
z[0] = zz;
Gamma[0] = GGamma;
Psi[0] = PPsi;
Phi[0] = PPhi;
t0 += h;
Glut.glutMainLoop();
}
}
private void button1_Click(object sender, EventArgs e)
{
Application.Exit();
}
}
}
I see two problems from a quick scan through the code:
Your glPushMatrix() and glPopMatrix() calls are unbalanced. As the name of these functions suggest, they manage a matrix stack. You need to pop as many stack entries as you push in your Draw() method, otherwise your stack will gradually increase in size, and overflow.
You added a glutMainLoop() call after my suggestion in the comment, but that doesn't look like it's in the right place. I'm not really familiar with the C# program structure, but it's typically called after you finished your initialization, and registered all your GLUT functions. Based on everything else in there, that might be at the end of AnT1_Load(). But you may want to look for some C# OpenGL examples to see where they normally place these calls.
Related
I wrote a kalman Filter implementation using the Eigen Library in C++ and also using the implementation at this link to test my filter: My prediction step looks like this:
void KalmanFilter::Predict()
{
// state Estimate = state transition matrix * previous state
// No control input present.
x = A * x;
// State Covariance Matrix = (State Transition Matrix * Previous State
Covariance matrix * (State Transition Matrix)^T ) + Process Noise
P = A * P * A.transpose() + Q;
}
while my update step is:
void KalmanFilter::Update(VectorXd z)
{
//Kalman Gain = (State Covariance Matrix * Measurement matrix.transpose) * (H*P*H^T + Measurement Noise)^-1
K = (P * H.transpose()) * (H * P * H.transpose()+ R).inverse();
//Estimated Stated = Estimated state + Kalman Gain (Measurement Innovation)
x = x + K*(z - H * x);
//State Covariance matrix = (Identity Matrix of the size of x.size * x.size) - K* H * P;
long x_size = x.size();
MatrixXd I = MatrixXd::Identity(x_size, x_size);
P = (I - K * H) * P ;
}
My initial values are:
pos_x = 0.0;
pos_y = 0.0;
pos_z = 1.0;
vel_x = 10.0;
vel_y = 0.0;
vel_z = 0.0;
acc_x = 0.0;
acc_y = 0.0;
acc_z = -9.81;
and I'm generating "fake data" by doing the following in a loop:
double c = 0.1; // Drag resistance coefficient
double damping = 0.9 ; // Damping
double sigma_position = 0.1 ; // position_noise
// Create simulated position data
for (int i = 0; i < N; i ++)
{
acc_x = -c * pow(vel_x, 2); // calculate acceleration ( Drag Resistance)
vel_x += acc_x * dt; // Integrate acceleration to give you velocity in the x axis.
pos_x += vel_x * dt; // Integrate velocity to return the position in the x axis
acc_z = -9.806 + c * pow(vel_z, 2); // Gravitation + Drag
vel_z += acc_z * dt; // z axis velocity
pos_z += vel_z * dt; // position in z axis
// generate y position here later.
if(pos_z < 0.01)
{
vel_z = -vel_z * damping;
pos_z += vel_z * dt;
}
if (vel_x < 0.1)
{
acc_x = 0.0;
acc_z = 0.0;
}
// add some noise
pos_x = pos_x + sigma_position * process_noise(generator);
pos_y = pos_y + sigma_position * process_noise(generator);
pos_z = pos_z + sigma_position * process_noise(generator);
I then run my prediction and update step by:
// Prediction Step
kalmanFilter.Predict();
// Correction Step
kalmanFilter.Update(z);
where z is a 3 x 1 vector containing pos_x, pos_y and pos_z
My State Transition Matrix A looks like this:
A << 1, 0, 0, dt, 0, 0, dt_squared, 0 , 0,
0, 1, 0, 0, dt, 0, 0, dt_squared, 0,
0, 0, 1, 0, 0, dt, 0, 0, dt_squared,
0, 0, 0, 1, 0, 0, dt, 0, 0,
0, 0, 0, 0, 1, 0, 0 , dt, 0,
0, 0, 0, 0, 0, 1, 0, 0, dt,
0, 0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1;
where dt_squared is (dt * dt) /2;
P is
P<< 100, 0, 0, 0, 0, 0, 0, 0, 0,
0, 100, 0, 0, 0, 0, 0, 0, 0,
0, 0, 100, 0, 0, 0, 0, 0, 0,
0, 0, 0, 100, 0, 0, 0, 0, 0,
0, 0, 0, 0, 100, 0, 0, 0, 0,
0, 0, 0, 0, 0, 100, 0, 0, 0,
0, 0, 0, 0, 0, 0, 100, 0, 0,
0, 0, 0, 0, 0, 0, 0, 100, 0,
0, 0, 0, 0, 0, 0, 0, 0, 100;
and
R << 1, 0, 0,
0, 1, 0,
0, 0, 1;
and
Q = G * G.transpose()* a * a;
where G is a 9 x 1 Matrix of
G << dt_squared, dt_squared, dt_squared, dt, dt, dt, 1, 1, 1;
a = 0.1 //( acceleration process noise)
My issue is my estimated position for y and z are off and diverge from the "real" positions. If you look at the following graphs,
This is what pos_x looks like:
This is what pos_y looks like:
And finally Z:
This is my first foray with Kalman filters and I'm not sure what I'm doing wrong here. My final goal would be to use this to estimate the position of a drone. Additionally, I have the following questions:
In a real life situation for a drone for example, how do you about choosing your Process Noise if you can't directly observe the process? Do you simply just pick arbitrary values?
My apologies for the long post. Any help is appreciated.
I am not sure if it's a code related issue, an algorithm implementation issue, or an expectation issue.
You do realize that a filter like this will NOT reproduce truth data, or even anything close to it, if the fake data has too much maneuvering in it.
Also, your graphs are not present.
I know my response doesn't follow community standards but I cannot comment or I'd do that.
Until you provide the plots and check the curvature of the path against the update rate I would not attempt to go into detail. Also filters need to be "tuned" to a specific system. You may need to play with noise parameters to tune it better. For maneuvering tracks one may need to go to higher order filters, Singer, or Jerk filters... The filter needs to model the system well enough. Based on your update matrix it looks like you have a parabolic (second order) estimate. You may also want to ask about this in other forums that are not s/w or code specific.
Every System has variances. Let's say the Filter has a variance of +-1% and the real value has +-5%; If you predict a value you have to make a choice for the update to use the predicted or the meassures value. Depending on which one you believe more.
Otherwise your filter does develop always based on it's own values...
I am new to visual studio.I have created a simple console application and then selected an empty project of c++.Then pasted the following code
#include "stdafx.h"
#include <cstdio>
#include <iostream>
#include <fstream>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <vector>
#include "GL/glut.h"
#include "GL/glu.h"
#include "GL/gl.h"
#include <math.h>
#include <time.h>
using namespace std;
using namespace cv;
const float zNear = 0.05;
const float zFar = 500.0;
int width, height;
int draw = 0;
Point FIX_X(0, 0), FIX_Y(0, 0), FIX_Z(0, 0);
float skew_x, skew_y, skew_z;
VideoCapture cap(0);
Mat tmp, test;
Mat intrinsic_Matrix(3, 3, CV_64F);
Mat distortion_coeffs(8, 1, CV_64F);
Mat Projection(4, 4, CV_64FC1);
double largest_area;
int largest_contour_index;
int n = 0;
int no_of_fingers = 0;
vector<vector<pair<float, Point>>> position;
vector<int> finger_count;
Point first, second, third;
float size_of_pot = 10;
int rot_angle = 10;
float distanceP2P(Point a, Point b) {
float d = sqrt(fabs(pow(a.x - b.x, 2) + pow(a.y - b.y, 2)));
return d;
}
float getAngle(Point s, Point f, Point e) {
float l1 = distanceP2P(f, s);
float l2 = distanceP2P(f, e);
float dot = (s.x - f.x)*(e.x - f.x) + (s.y - f.y)*(e.y - f.y);
float angle = acos(dot / (l1*l2));
angle = angle * 180 / 3.147;
return angle;
}
String intToString(int number) {
stringstream ss;
ss << number;
string str = ss.str();
return str;
}
bool pairCompare(const pair<float, Point>&i, const pair<float, Point>&j) {
return i.first <j.first;
}
GLfloat* convertMatrixType(const cv::Mat& m)
{
typedef double precision;
Size s = m.size();
GLfloat* mGL = new GLfloat[s.width*s.height];
for (int ix = 0; ix < s.width; ix++)
{
for (int iy = 0; iy < s.height; iy++)
{
mGL[ix*s.height + iy] = m.at<precision>(iy, ix);
}
}
return mGL;
}
void generateProjectionModelview(const cv::Mat& calibration, const cv::Mat& rotation, const cv::Mat& translation, cv::Mat& projection, cv::Mat& modelview)
{
typedef double precision;
projection.at<precision>(0, 0) = 2 * calibration.at<precision>(0, 0) / width;
projection.at<precision>(1, 0) = 0;
projection.at<precision>(2, 0) = 0;
projection.at<precision>(3, 0) = 0;
projection.at<precision>(0, 1) = 0;
projection.at<precision>(1, 1) = 2 * calibration.at<precision>(1, 1) / height;
projection.at<precision>(2, 1) = 0;
projection.at<precision>(3, 1) = 0;
projection.at<precision>(0, 2) = 1 - 2 * calibration.at<precision>(0, 2) / width;
projection.at<precision>(1, 2) = -1 + (2 * calibration.at<precision>(1, 2) + 2) / height;
projection.at<precision>(2, 2) = (zNear + zFar) / (zNear - zFar);
projection.at<precision>(3, 2) = -1;
projection.at<precision>(0, 3) = 0;
projection.at<precision>(1, 3) = 0;
projection.at<precision>(2, 3) = 2 * zNear*zFar / (zNear - zFar);
projection.at<precision>(3, 3) = 0;
modelview.at<precision>(0, 0) = rotation.at<precision>(0, 0);
modelview.at<precision>(1, 0) = rotation.at<precision>(1, 0);
modelview.at<precision>(2, 0) = rotation.at<precision>(2, 0);
modelview.at<precision>(3, 0) = 0;
modelview.at<precision>(0, 1) = rotation.at<precision>(0, 1);
modelview.at<precision>(1, 1) = rotation.at<precision>(1, 1);
modelview.at<precision>(2, 1) = rotation.at<precision>(2, 1);
modelview.at<precision>(3, 1) = 0;
modelview.at<precision>(0, 2) = rotation.at<precision>(0, 2);
modelview.at<precision>(1, 2) = rotation.at<precision>(1, 2);
modelview.at<precision>(2, 2) = rotation.at<precision>(2, 2);
modelview.at<precision>(3, 2) = 0;
modelview.at<precision>(0, 3) = translation.at<precision>(0, 0);
modelview.at<precision>(1, 3) = translation.at<precision>(1, 0);
modelview.at<precision>(2, 3) = translation.at<precision>(2, 0);
modelview.at<precision>(3, 3) = 1;
// This matrix corresponds to the change of coordinate systems.
static double changeCoordArray[4][4] = { { 1, 0, 0, 0 },{ 0, -1, 0, 0 },{ 0, 0, -1, 0 },{ 0, 0, 0, 1 } };
static Mat changeCoord(4, 4, CV_64FC1, changeCoordArray);
modelview = changeCoord*modelview;
}
void calibrate(Mat &intrinsic_Matrix, Mat &distortion_coeffs)
{
vector< vector< Point2f> > AllimagePoints;
vector< vector< Point3f> > AllobjectPoints;
char str[100];
stringstream st;
int no_of_images = 1;
Size imagesize;
Mat gray;
while (no_of_images <= 14)
{
st << "E:/SelectedImages/Selected" << ++no_of_images << ".jpg";
String strcopy3 = st.str();
st.str("");
Mat img = imread(strcopy3, 1);
if (!img.data)
break;
imagesize = Size(img.rows, img.cols);
cvtColor(img, gray, CV_RGB2GRAY);
vector< Point2f> corners;
bool sCorner = false;
sCorner = findChessboardCorners(gray, Size(7, 7), corners);
if (sCorner)
{
cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(img, Size(7, 7), corners, sCorner);
if (corners.size() == 7 * 7)
{
vector< Point2f> v_tImgPT;
vector< Point3f> v_tObjPT;
for (int j = 0; j< corners.size(); ++j)
{
Point2f tImgPT;
Point3f tObjPT;
tImgPT.x = corners[j].x;
tImgPT.y = corners[j].y;
tObjPT.x = j % 7 * 3;
tObjPT.y = j / 7 * 3;
tObjPT.z = 0;
v_tImgPT.push_back(tImgPT);
v_tObjPT.push_back(tObjPT);
}
AllimagePoints.push_back(v_tImgPT);
AllobjectPoints.push_back(v_tObjPT);
}
}
st << "E:/DetectedImages/Detected" << no_of_images + 1 << ".jpg";
String strcopy1 = st.str();
st.str("");
imwrite(strcopy1, img);
//imshow("pattern",img);
//cvWaitKey(30);
}
vector< Mat> rvecs, tvecs;
if (AllimagePoints.size()>0)
{
calibrateCamera(AllobjectPoints, AllimagePoints, imagesize, intrinsic_Matrix, distortion_coeffs, rvecs, tvecs);
}
}
void renderBackgroundGL(const cv::Mat& image)
{
GLint polygonMode[2];
glGetIntegerv(GL_POLYGON_MODE, polygonMode);
glPolygonMode(GL_FRONT, GL_FILL);
glPolygonMode(GL_BACK, GL_FILL);
glLoadIdentity();
gluOrtho2D(0.0, 1.0, 0.0, 1.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
static bool textureGenerated = false;
static GLuint textureId;
if (!textureGenerated)
{
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
textureGenerated = true;
}
// Copy the image to the texture.
glBindTexture(GL_TEXTURE_2D, textureId);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image.size().width, image.size().height, 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, image.data);
// Draw the image.
glEnable(GL_TEXTURE_2D);
glBegin(GL_TRIANGLES);
glNormal3f(0.0, 0.0, 1.0);
glTexCoord2f(0.0, 1.0);
glVertex3f(0.0, 0.0, 0.0);
glTexCoord2f(0.0, 0.0);
glVertex3f(0.0, 1.0, 0.0);
glTexCoord2f(1.0, 1.0);
glVertex3f(1.0, 0.0, 0.0);
glTexCoord2f(1.0, 1.0);
glVertex3f(1.0, 0.0, 0.0);
glTexCoord2f(0.0, 0.0);
glVertex3f(0.0, 1.0, 0.0);
glTexCoord2f(1.0, 0.0);
glVertex3f(1.0, 1.0, 0.0);
glEnd();
glDisable(GL_TEXTURE_2D);
// Clear the depth buffer so the texture forms the background.
glClear(GL_DEPTH_BUFFER_BIT);
// Restore the polygon mode state.
glPolygonMode(GL_FRONT, polygonMode[0]);
glPolygonMode(GL_BACK, polygonMode[1]);
}
void display(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
namedWindow("live", 1);
Mat gray1, test, modelview, dis_img, thresh, img1;
Mat rvec(3, 1, DataType<double>::type);
Mat tvec(3, 1, DataType<double>::type);
modelview.create(4, 4, CV_64FC1);
//Projection.create(4, 4, CV_64FC1);
vector< Point2f> corners1;
vector< Point2f> imagePoints1;
vector< Point3f> objectPoints1;
largest_area = 0;
largest_contour_index = 0;
clock_t clock_1 = clock();
cap >> dis_img;
//resize(dis_img,dis_img,Size(180,180),0,0);
if (!dis_img.data)
{
exit(3);
}
img1 = dis_img.clone();
dis_img.copyTo(img1);
//resize(img1,img1,Size(180,180),0,0);
cvtColor(dis_img, dis_img, COLOR_BGR2YCrCb);
inRange(dis_img, Scalar(0, 133, 77), Scalar(255, 173, 127), thresh);
clock_t clock_2 = clock();
cout << "threshold(Skin Color Segmentation) time is :" << (double)(clock_2 - clock_1) << endl;
dilate(thresh, thresh, Mat());
blur(thresh, thresh, Size(5, 5), Point(-1, -1), BORDER_DEFAULT);
vector<vector<Point>> contours;
vector<Point> FingerTips;
vector<Vec4i> hierachy;
vector<Vec4i> defects;
vector<Point> defect_circle;
vector<vector<Point>> hull(1);
Point2f center;
float radius;
clock_t clock_3 = clock();
cout << "image filtering (smoothing) time is :" << (double)(clock_3 - clock_2) << endl;
findContours(thresh, contours, hierachy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
//cout<<"contour"<<endl;
int cont_size = contours.size();
for (int i = 0; i<cont_size; i++)
{
double a = contourArea(contours[i], false);
if (a>largest_area)
{
largest_area = a;
largest_contour_index = i;
}
}
vector<int> hull_index;
Rect brect;
if (largest_area>0 && contours[largest_contour_index].size()>5)
{
approxPolyDP(contours[largest_contour_index], contours[largest_contour_index], 8, true);
//cout<<"approx_poly"<<endl;
convexHull(Mat(contours[largest_contour_index]), hull[0], false, true);
// cout<<"convex_hull"<<endl;
brect = boundingRect(contours[largest_contour_index]);
//cout<<"bounding_rect"<<endl;
convexHull(Mat(contours[largest_contour_index]), hull_index, true);
//cout<<"convex_hull2"<<endl;
convexityDefects(contours[largest_contour_index], hull_index, defects);
//cout<<"convexity defect"<<endl;
// Mom ents mom=moments(contours[largest_contour_index]);
// draw mass center
// circle(img,Point(mom.m10/mom.m00,mom.m01/mom.m00),2,cv::Scalar(0),2);
Scalar colorw = Scalar(0, 255, 0);
Scalar color1 = Scalar(0, 0, 255);
//drawContours(img,contours,largest_contour_index,color,2, 8, hierachy);
//drawContours(timg,contours,largest_contour_index,color,1, 8, hierachy);
//drawContours(timg, hull, 0, color1, 1, 8, vector<Vec4i>(), 0, Point() );
// drawContours(img, hull, 0, color1, 2, 8, vector<Vec4i>(), 0, Point() );
int defc_size = defects.size();
Point ptStart;
Point ptEnd;
Point ptStart2;
Point ptEnd2;
Point ptFar;
int count = 1;
int startidx2;
int endidx2;
int tolerance = brect.height / 5;
float angleTol = 95;
for (int in = 0; in<defc_size; in++)
{
//Vec4i& v=(*d); d++;
int startidx = defects[in].val[0]; ptStart = contours[largest_contour_index].at(startidx);
int endidx = defects[in].val[1]; ptEnd = contours[largest_contour_index].at(endidx);
int faridx = defects[in].val[2]; ptFar = contours[largest_contour_index].at(faridx);
if (in + 1<defc_size)
startidx2 = defects[in + 1].val[0]; ptStart = contours[largest_contour_index].at(startidx);
endidx2 = defects[in + 1].val[1]; ptEnd = contours[largest_contour_index].at(endidx);
if (distanceP2P(ptStart, ptFar) > tolerance && distanceP2P(ptEnd, ptFar) > tolerance && getAngle(ptStart, ptFar, ptEnd) < angleTol) {
{
if (in + 1<defc_size)
{
if (distanceP2P(ptStart, ptEnd2) < tolerance)
contours[largest_contour_index][startidx] = ptEnd2;
else {
if (distanceP2P(ptEnd, ptStart2) < tolerance)
contours[largest_contour_index][startidx2] = ptEnd;
}
}
defect_circle.push_back(ptFar);
// cout<<"ptfar"<<ptFar.x<<"&&"<<ptFar.y<<endl;
if (count == 1)
{
FingerTips.push_back(ptStart);
cv::circle(img1, ptStart, 2, Scalar(0, 255, 0), 2);
putText(img1, intToString(count), ptStart - Point(0, 30), FONT_HERSHEY_PLAIN, 1.2f, Scalar(255, 0, 0), 2);
}
FingerTips.push_back(ptEnd);
count++;
putText(img1, intToString(count), ptEnd - Point(0, 30), FONT_HERSHEY_PLAIN, 1.2f, Scalar(255, 0, 0), 2);
cv::circle(img1, ptEnd, 2, Scalar(0, 255, 0), 2);
//cv::circle( img, ptFar, 2, Scalar(255,255,255 ), 2 );
}
}
}
// circle(img, ptStart,2,Scalar(0xFF,0x60,0x02 ), 2, 8, 0 );
//cv::circle( img, ptEnd, 4, Scalar( 0xFF,0x60,0x02 ), 2 );
clock_t clock_4 = clock();
cout << "fingerTip detection time is :" << (double)(clock_4 - clock_3) << endl;
// cout<<"hii"<<endl;
bool two_fn = false;
bool five_fn = false;
if (defect_circle.size() == 1)
{
two_fn = true;
Point fn = FingerTips.back();
FingerTips.pop_back();
Point ln = FingerTips.back();
FingerTips.pop_back();
Point defect_point = defect_circle.back();
float curr = getAngle(fn, defect_point, ln);
curr = curr / 10;
curr = 10 - curr;
renderBackgroundGL(img1);
objectPoints1.push_back(Point3d(9, 6, 0));
imagePoints1.push_back(defect_point);
objectPoints1.push_back(Point3d(9, 6, 0));
imagePoints1.push_back(defect_point);
objectPoints1.push_back(Point3d(19, 6, 0));
imagePoints1.push_back(fn);
objectPoints1.push_back(Point3d(9, 18, 0));
imagePoints1.push_back(ln);
// cout<<width<<" &"<<height<<endl;
// cout<<"solvepnp"<<endl;
solvePnP(Mat(objectPoints1), Mat(imagePoints1), intrinsic_Matrix, distortion_coeffs, rvec, tvec);
cv::Mat rotation;
cv::Rodrigues(rvec, rotation);
double offsetA[3][1] = { 9,6,6 };
Mat offset(3, 1, CV_64FC1, offsetA);
tvec = tvec + rotation*offset;
generateProjectionModelview(intrinsic_Matrix, rotation, tvec, Projection, modelview);
glMatrixMode(GL_PROJECTION);
GLfloat* projection = convertMatrixType(Projection);
glLoadMatrixf(projection);
delete[] projection;
glMatrixMode(GL_MODELVIEW);
GLfloat* modelView = convertMatrixType(modelview);
glLoadMatrixf(modelView);
delete[] modelView;
//glTranslatef(0.0f,0.0f,-5.0f);
glPushMatrix();
glColor3f(1.0, 0.0, 0.0);
glutWireTeapot(10.0 / curr);
glPopMatrix();
glColor3f(1.0, 1.0, 1.0);
}
//Rotation Module
if (defect_circle.size() == 4)
{
five_fn = true;
minEnclosingCircle(defect_circle, center, radius);
//circle(img, center, (int)radius,Scalar(255,255,255), 2, 8, 0 );
circle(img1, center, 2, Scalar(0), 2, 8, 0);
vector<pair<float, Point>> pos;
for (int in = 0; in<FingerTips.size(); in++)
{
Point p = FingerTips.back();
FingerTips.pop_back();
//if(in==0)
//{
pos.push_back(make_pair(distanceP2P(center, p), p));
//position.push_back(pos);
}
// }
//else
// {
// cout<<"size is"<<position.size()<<endl;
// position[n].push_back(make_pair(distanceP2P(center,p),p));
//}
sort(pos.begin(), pos.end(), pairCompare);
// vector<pair<float,Point>> now=position[i].back();
first = pos.back().second;
pos.pop_back();
//cout<<"new value :"<<new1.x<<" && "<<new1.y<<endl;
second = pos.back().second;
pos.pop_back();
third = pos.back().second;
pos.pop_back();
if (third.y<second.y&&second.y<first.y)
{
// cout<<"vertical pose"<<endl;
FIX_X.x = center.x + 40;
FIX_X.y = center.y;
FIX_Y.x = center.x;
FIX_Y.y = center.y - 40;
}
skew_x = getAngle(first, center, FIX_X);
skew_y = getAngle(third, center, FIX_Y);
cout << skew_x << "&" << skew_y << endl;
if (first.x<img1.cols)
line(img1, center, first, Scalar(200, 200, 200), 2, 8, 0);
line(img1, center, FIX_X, Scalar(200, 200, 200), 2, 8, 0);
if (second.x<img1.cols)
line(img1, center, second, Scalar(0, 255, 0), 2, 8, 0);
if (third.x<img1.cols)
line(img1, center, third, Scalar(0, 0, 255), 2, 8, 0);
line(img1, center, FIX_Y, Scalar(0, 0, 255), 2, 8, 0);
// line(img1,center,first,Scalar(255,255,255),2,8,0);
// line(img1,center,second,Scalar(0,255,255),2,8,0);
// line(img1,center,third,Scalar(0,0,255),2,8,0);
renderBackgroundGL(img1);
/* cvtColor(test, gray1, CV_RGB2GRAY);
bool sCorner1=findChessboardCorners(gray1, Size(7, 7), corners1);
imshow("live",test);
if(sCorner1)
{
cornerSubPix(gray1, corners1, Size(11,11), Size(-1,-1), TermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1));
if(corners1.size() == 7*7)
{control pan
for(int j=0; j< corners1.size(); ++j)
{
Point2f tImgPT;
Point3f tObjPT;
tImgPT.x = corners1[j].x;
tImgPT.y = corners1[j].y;
tObjPT.x = j%7*3;
tObjPT.y = j/7*3;
tObjPT.z = 0;
imagePoints1.push_back(tImgPT);
objectPoints1.push_back(tObjPT);
}
vector<Point2f> projectedPoints;
vector<Point3f> axis;
axis.push_back(Point3f(6,0,0));
axis.push_back(Point3f(0,6,0));
axis.push_back(Point3f(0,0,6)); */
objectPoints1.push_back(Point3d(9, 6, 0));
imagePoints1.push_back(center);
objectPoints1.push_back(Point3d(9, 18, 0));
imagePoints1.push_back(first);
objectPoints1.push_back(Point3d(19, 6, 0));
imagePoints1.push_back(third);
objectPoints1.push_back(Point3d(15, 15, 0));
imagePoints1.push_back(second);
// cout<<width<<" &"<<height<<endl;
// cout<<"solvepnp"<<endl;
solvePnP(Mat(objectPoints1), Mat(imagePoints1), intrinsic_Matrix, distortion_coeffs, rvec, tvec);
cv::Mat rotation;
cv::Rodrigues(rvec, rotation);
double offsetA[3][1] = { 9,6,0 };
Mat offset(3, 1, CV_64FC1, offsetA);
tvec = tvec + rotation*offset;
generateProjectionModelview(intrinsic_Matrix, rotation, tvec, Projection, modelview);
/* double offsetA[3][1] = {{(7-1.0)/2.0}, {(7-1.0)/2.0}, {0}};
Mat offset(3, 1, CV_64FC1, offsetA);
tvec = tvec + rotation*offset;
for(unsigned int row=0; row<3; ++row)
{
for(unsigned int col=0; col<3; ++col)
{
modelview.at<float>(row, col) = rotation.at<float>(row, col);
cout<<modelview.at<float>(row,col)<<endl;
}
modelview.at<float>(row, 3) = tvec.at<float>(row, 0);
}
modelview.at<float>(3, 3) = 1.0f;
cout<<endl;
static float changeCoordArray[4][4] = {{-1, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 1}};
static Mat changeCoord(4, 4, CV_64FC1, changeCoordArray);
modelview = changeCoord*modelview;
cv::Mat glmodelview = cv::Mat::zeros(4, 4, CV_64F);
transpose(modelview , glmodelview);
gluLookAt(0.0,2.0,-50.0,0.0,0.5,0.0,0.0,1.0,0.0);
/* glMatrixMode(GL_PROJECTION);
glLoadIdentity();
float fx=intrinsic_Matrix.at<float>(0,0);
float fy=intrinsic_Matrix.at<float>(1,1);
float cf=(2*atanf(0.5*height/fy)*180/3.14);
float aspect=(width*fy)/(height*fx);
//gluPerspective(cf,1.0, zNear, zFar);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glLoadMatrixf(&glmodelview.at<float>(0,0)); */
glMatrixMode(GL_PROJECTION);
GLfloat* projection = convertMatrixType(Projection);
glLoadMatrixf(projection);
delete[] projection;
glMatrixMode(GL_MODELVIEW);
GLfloat* modelView = convertMatrixType(modelview);
glLoadMatrixf(modelView);
delete[] modelView;
//glTranslat ef(0.0f,0.0f,-5.0f);
glPushMatrix();
glColor3f(1.0, 0.0, 0.0);
glRotatef(skew_x, 1.0, 0.0, 0.0);
glRotatef(skew_y, 0.0, 1.0, 0.0);
glutWireTeapot(10.0);
glPopMatrix();
glColor3f(1.0, 1.0, 1.0);
clock_t clock_5 = clock();
cout << "interaction time is :" << (double)(clock_5 - clock_4) << endl;
}
imshow("live", img1);
cout << "----------------------------------------------" << endl;
glFlush();
glutSwapBuffers();
}
waitKey(27);
glutPostRedisplay();
}
void reshape(int x, int y)
{
width = x; height = y;
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
/*Projection.at<float>(0,0) = 2*intrinsic_Matrix.at<float>(0,0)/width;
Projection.at<float>(1,0) = 0;
Projection.at<float>(2,0) = 0;
Projection.at<float>(3,0) = 0;
Projection.at<float>(0,1) = 0;
Projection.at<float>(1,1) = 2*intrinsic_Matrix.at<float>(1,1)/height;
Projection.at<float>(2,1) = 0;
Projection.at<float>(3,1) = 0;
Projection.at<float>(0,2) = 1-2*intrinsic_Matrix.at<float>(0,2)/width;
Projection.at<float>(1,2) = -1+(2*intrinsic_Matrix.at<float>(1,2)+2)/height;
Projection.at<float>(2,2) = (zNear+zFar)/(zNear - zFar);
Projection.at<float>(3,2) = -1;
Projection.at<float>(0,3) = 0;
Projection.at<float>(1,3) = 0;
Projection.at<float>(2,3) = 2*zNear*zFar/(zNear - zFar);
Projection.at<float>(3,3) = 0;
cv::Mat projection = cv::Mat::zeros(4, 4, CV_64F);
transpose(Projection ,projection);
glLoadMatrixf(&projection.at<float>(0,0)); */
// gluPerspective(60, (GLfloat)width / (GLfloat)height, 1.0, 100.0);
/* float fx=intrinsic_Matrix.at<float>(0,0);
float fy=intrinsic_Matrix.at<float>(1,1);
float cf=(2*atanf(0.5*height/fy)*180/3.14);
cout<<fx<<" "<<fy<<endl;
float aspect=(width*fy)/(height*fx); */
//gluPerspective(cf,CALIB_FIX_ASPECT_RATIO, zNear, zFar);
//glMatrixMode(GL_MODELVIEW);
// gluPerspective(60,width/height, zNear, zFar);
//glOrtho(-100,100,-100.0,100,zNear, zFar);
}
void init()
{
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
}
void main()
{
if (!cap.isOpened())
{
exit(-1);
}
cap >> test;
if (!test.data)
{
exit(-1);
}
// resize(test,test,Size(180,180),0,0);
width = test.cols;
height = test.rows;
cout << width << endl;
calibrate(intrinsic_Matrix, distortion_coeffs);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(width, height);
glutCreateWindow("code4change");
init();
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutMainLoop();
}
But when i compile the cide it always get error message "Cannot open include file: 'stdafx.h'".What should i do?.As i know stdafx.h is a c++ headerfile.
Check if you have the file in your project. If you don't (you have removed it by mistake) just create a new temporary project and copy it.
I figured out It's not necessary to include "stdafx.h" header file in your program.It's used in Microsoft Visual Studio to let the compiler know the files that are once compiled and no need to compile it from scratch.
for better understanding you can refer
http://www.cplusplus.com/articles/1TUq5Di1/
Error C1083 Showing because you have included "stdafx.h" (#include "stdafx.h") but you don't have the file in your project. Add stdafx.h and stdafx.cpp (Project->add->existing file) from other solution if present solution don't have
Enable precompiled headers in all configurations for all *.cpp files. It can be done on the "Precompiled Header" tab:
Set the value "Use (/Yu)" for the "Precompiled Header" option.
Set "stdafx.h" for the "Precompiled Header File" option.
Set "$(IntDir)$(TargetName).pch" for the "Precompiled Header Output File"
option.
Create an stdafx.h file, and add it into the project.
Create an stdafx.cpp file, and add it into the project. This file has only one line: #include "stdafx.h".
Change the settings for the stdafx.cpp file in all configurations; set the value "Create (/Yc)" for the "Precompiled Header" option.
Reference: https://www.viva64.com/en/b/0265/
Well explained article about precompilation error.
Also sums up the process of usage and common errors faced and their solutions. Hope this is helpful.
I'm trying to draw this color map in OpenGL
COLOR_MAP
When using QUADS, the result look fine:
int colorArr[] = { 255, 224, 192, 160, 128, 96, 64, 32 };
gluOrtho2D(-2, 2, -2, 2);
glPolygonMode(GL_FRONT, GL_FILL);
glDisable(GL_COLOR_LOGIC_OP);
glClear(GL_COLOR_BUFFER_BIT);
glBegin(GL_QUADS);
for (int i = 0; i < 8; i++) {
glColor3ub(colorArr[7 - i], 0, 0);
glVertex2f(-4, i - 4);
glVertex2f(4, i - 4);
glVertex2f(4, i - 4 + 1);
glVertex2f(-4, i - 4 + 1);
}
glEnd();
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(GL_OR);
glBegin(GL_QUADS);
for (int i = 0; i < 8; i++) {
glColor3ub(0, colorArr[i], 0);
glVertex2f(i - 4, 4);
glVertex2f(i - 4, -4);
glVertex2f(i - 4 + 1, -4);
glVertex2f(i - 4 + 1, 4);
}
glEnd();
glFlush();
But if I use QUAD_STRIPS to draw the polygons, the color get smooth between the lines:
int colorArr[] = { 255, 224, 192, 160, 128, 96, 64, 32 };
gluOrtho2D(-2, 2, -2, 2);
glPolygonMode(GL_FRONT, GL_FILL);
glDisable(GL_COLOR_LOGIC_OP);
glClear(GL_COLOR_BUFFER_BIT);
glBegin(GL_QUAD_STRIP);
for (int i = 0; i < 8; i++) {
glColor3ub(colorArr[7 - i], 0, 0);
glVertex2f(-4, i - 4);
glVertex2f(4, i - 4);
}
glVertex2f(-4, 4);
glVertex2f(4, 4);
glEnd();
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(GL_OR);
glBegin(GL_QUAD_STRIP);
for (int i = 0; i < 8; i++) {
glColor3ub(0, colorArr[i], 0);
glVertex2f(i - 4, 4);
glVertex2f(i - 4, -4);
}
glVertex2f(4, 4);
glVertex2f(4, -4);
glEnd();
glFlush();
Do you have any idea why is that?
The Quad strip you're drawing is equivant to this:
glBegin(GL_QUADS);
for (int i = 0; i < 7; i++) {
glColor3ub(colorArr[7 - i], 0, 0);
glVertex2f(-4, i - 4);
glVertex2f(4, i - 4);
glColor3ub(colorArr[7 - i+1], 0, 0);
glVertex2f(4, i - 4 + 1);
glVertex2f(-4, i - 4 + 1);
}
You are changing color every 2 vertices and the color is interpolated as an attribute to the next/previous points.
I am using visual studio 15 and working in opencv 3.0 ,i am getting access violation error in my code and even this function is not working with sample code given in opencv.
#include"stdafx.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <math.h>
#include <iostream>
using namespace cv;
using namespace std;
static void help()
{
cout
<< "\nThis program illustrates the use of findContours and drawContours\n"
<< "The original image is put up along with the image of drawn contours\n"
<< "Usage:\n"
<< "./contours2\n"
<< "\nA trackbar is put up which controls the contour level from -3 to 3\n"
<< endl;
}
const int w = 500;
int levels = 3;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
static void on_trackbar(int, void*)
{
Mat cnt_img = Mat::zeros(w, w, CV_8UC3);
int _levels = levels - 3;
drawContours(cnt_img, contours, _levels <= 0 ? 3 : -1, Scalar(128, 255, 255),
3, LINE_AA, hierarchy, std::abs(_levels));
imshow("contours", cnt_img);
}
int main(int argc, char**)
{
Mat img = Mat::zeros(w, w, CV_8UC1);
if (argc > 1)
{
help();
return -1;
}
//Draw 6 faces
for (int i = 0; i < 6; i++)
{
int dx = (i % 2) * 250 - 30;
int dy = (i / 2) * 150;
const Scalar white = Scalar(255);
const Scalar black = Scalar(0);
if (i == 0)
{
for (int j = 0; j <= 10; j++)
{
double angle = (j + 5)*CV_PI / 21;
line(img, Point(cvRound(dx + 100 + j * 10 - 80 * cos(angle)),
cvRound(dy + 100 - 90 * sin(angle))),
Point(cvRound(dx + 100 + j * 10 - 30 * cos(angle)),
cvRound(dy + 100 - 30 * sin(angle))), white, 1, 8, 0);
}
}
ellipse(img, Point(dx + 150, dy + 100), Size(100, 70), 0, 0, 360, white, -1, 8, 0);
ellipse(img, Point(dx + 115, dy + 70), Size(30, 20), 0, 0, 360, black, -1, 8, 0);
ellipse(img, Point(dx + 185, dy + 70), Size(30, 20), 0, 0, 360, black, -1, 8, 0);
ellipse(img, Point(dx + 115, dy + 70), Size(15, 15), 0, 0, 360, white, -1, 8, 0);
ellipse(img, Point(dx + 185, dy + 70), Size(15, 15), 0, 0, 360, white, -1, 8, 0);
ellipse(img, Point(dx + 115, dy + 70), Size(5, 5), 0, 0, 360, black, -1, 8, 0);
ellipse(img, Point(dx + 185, dy + 70), Size(5, 5), 0, 0, 360, black, -1, 8, 0);
ellipse(img, Point(dx + 150, dy + 100), Size(10, 5), 0, 0, 360, black, -1, 8, 0);
ellipse(img, Point(dx + 150, dy + 150), Size(40, 10), 0, 0, 360, black, -1, 8, 0);
ellipse(img, Point(dx + 27, dy + 100), Size(20, 35), 0, 0, 360, white, -1, 8, 0);
ellipse(img, Point(dx + 273, dy + 100), Size(20, 35), 0, 0, 360, white, -1, 8, 0);
}
//show the faces
namedWindow("image", 1);
imshow("image", img);
//Extract the contours so that
//vector<vector<Point> > contours0;
vector<cv::Mat> coutours;
findContours(img, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE);
contours.resize(contours.size());
for (size_t k = 0; k < contours.size(); k++)
approxPolyDP(Mat(contours[k]), contours[k], 3, true);
namedWindow("contours", 1);
createTrackbar("levels+3", "contours", &levels, 7, on_trackbar);
on_trackbar(0, 0);
waitKey();
return 0;
}
I am using x64 architecture and linked all the library .lib along with d.lib(debug library).
I think the problem comes from your "contours" variable. You're declaring it as a vector<cv::Mat>, but the contours are not represented as a matrix, but rather as a series of points.
Look at this example : http://docs.opencv.org/2.4/doc/tutorials/imgproc/shapedescriptors/find_contours/find_contours.html
They declare the contours as vector<vector<Point> > contours;
Look also at the declaration of the function (http://docs.opencv.org/2.4/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=findcontours#findcontours), the paramater contour is defined as : contours – Detected contours. Each contour is stored as a vector of points.
I am trying to rotate four points(rectangle) around the middle point of them.
But the result looks strange.
I use the formula found here and on wikipedia
p.x'=p.x*cos(alpha) + p.y*sin(alpha);
p.y'=(-1)p.x*sin(alpha) + p.y*cos(alpha);
Is this formula applicable for my purpose (rotating rectangle)?
Here is the source, I try to rotate 33 degrees counter clockwise
Thank You, very much
Mat img(480, 800, CV_8UC4, Scalar(255,255,255,255));
Mat dst(480, 800, CV_8UC4, Scalar(255,255,255,255));
Point p1, p2, p3, p4;
Point center;
p1.x=501; p1.y=247;
p2.x=429; p2.y=291;
p3.x=388; p3.y=222;
p4.x=451; p4.y=186;
circle(img, p1, 3, Scalar(255, 0, 0, 255), 3, 8, 0 );
circle(img, p2, 3, Scalar(0, 255, 0, 255), 3, 8, 0 );
circle(img, p3, 3, Scalar(0, 0, 255, 255), 3, 8, 0 );
circle(img, p4, 3, Scalar(255, 255, 0, 255), 3, 8, 0 );
center.x = (p1.x+p2.x+p3.x+p4.x)/4;
center.y = (p1.y+p2.y+p3.y+p4.y)/4;
double alpha = -33 * 0.0174532925;
double s = sin(alpha);
double c = cos(alpha);
p1.x = (+c*(p1.x-center.x)* + s*(p1.y-center.y)) + center.x;
p1.y = (-s*(p1.x-center.x) + c*(p1.y-center.y)) + center.y;
p2.x = (+c*(p2.x-center.x)* + s*(p2.y-center.y)) + center.x;
p2.y = (-s*(p2.x-center.x) + c*(p2.y-center.y)) + center.y;
p3.x = (+c*(p3.x-center.x)* + s*(p3.y-center.y)) + center.x;
p3.y = (-s*(p3.x-center.x) + c*(p3.y-center.y)) + center.y;
p4.x = (+c*(p4.x-center.x)* + s*(p4.y-center.y)) + center.x;
p4.y = (-s*(p4.x-center.x) + c*(p4.y-center.y)) + center.y;
circle(dst, p1, 3, Scalar(255, 0, 0, 255), 3, 8, 0 );
circle(dst, p2, 3, Scalar(0, 255, 0, 255), 3, 8, 0 );
circle(dst, p3, 3, Scalar(0, 0, 255, 255), 3, 8, 0 );
circle(dst, p4, 3, Scalar(255, 255, 0, 255), 3, 8, 0 );
imshow("src", img);
imshow("dst", dst);
cvMoveWindow("dst", 0, img.cols+50);
waitKey(0);
return 0;
Your formula is correct. Your math looks fine. Your problem is probably just the extra * symbol:
p1.x = (+c*(p1.x-center.x)* + s*(p1.y-center.y)) + center.x;
^
This gets evaluated as:
p1.x = ((+c*(p1.x-center.x))* (+ s*(p1.y-center.y))) + center.x;
Just remove the *
So, i find the solution.
I noob overwrite previous x value with new computed values
This is the solution
for(int i = 0; i<360; i++)
{
Mat img(480, 800, CV_8UC4, Scalar(255,255,255,255));
Mat dst(480, 800, CV_8UC4, Scalar(255,255,255,255));
Point p1, p2, p3, p4;
Point center;
p1.x=501; p1.y=247;
p2.x=429; p2.y=291;
p3.x=388; p3.y=222;
p4.x=451; p4.y=186;
circle(img, p1, 3, Scalar(255, 0, 0, 255), 3, 8, 0 );
circle(img, p2, 3, Scalar(0, 255, 0, 255), 3, 8, 0 );
circle(img, p3, 3, Scalar(0, 0, 255, 255), 3, 8, 0 );
circle(img, p4, 3, Scalar(255, 255, 0, 255), 3, 8, 0 );
center.x = (p1.x+p2.x+p3.x+p4.x)/4;
center.y = (p1.y+p2.y+p3.y+p4.y)/4;
double alpha = -i * 0.0174532925;
double s = sin(alpha);
double c = cos(alpha);
int x = 0, y = 0;
x=p1.x; y=p1.y;
p1.x = (c*(x-center.x) - s*(y-center.y)) + center.x;
p1.y = (s*(x-center.x) + c*(y-center.y)) + center.y;
x=p2.x; y=p2.y;
p2.x = (c*(x-center.x) - s*(y-center.y)) + center.x;
p2.y = (s*(x-center.x) + c*(y-center.y)) + center.y;
x=p3.x; y=p3.y;
p3.x = (c*(x-center.x) - s*(y-center.y)) + center.x;
p3.y = (s*(x-center.x) + c*(y-center.y)) + center.y;
x=p4.x; y=p4.y;
p4.x = (c*(x-center.x) - s*(y-center.y)) + center.x;
p4.y = (s*(x-center.x) + c*(y-center.y)) + center.y;
circle(dst, p1, 3, Scalar(255, 0, 0, 255), 3, 8, 0 );
circle(dst, p2, 3, Scalar(0, 255, 0, 255), 3, 8, 0 );
circle(dst, p3, 3, Scalar(0, 0, 255, 255), 3, 8, 0 );
circle(dst, p4, 3, Scalar(255, 255, 0, 255), 3, 8, 0 );
imshow("src", img);
imshow("dst", dst);
cvMoveWindow("dst", 0, img.cols+50);
waitKey(20);