OpenGL Quaternion class usage - c++

For a coding project I'm doing, I was given a Quaternion class to use to make the rotation of my camera easier and to solve Gimbal lock.
I'm not that well versed in using Quaternions so I was wondering how I would implement it into my camera class.
Currently to rotate the camera I'm using the built-in glRotatef function.
Camera functions
void Camera::Pitch(float aAngle)
{
m_fPitchAngle += aAngle;
}
void Camera::Roll(float aAngle)
{
//aAngle = 5.0f;
m_fRollAngle += aAngle;
}
void Camera::Yaw(float aAngle)
{
m_fYawAngle += aAngle;
}
void Camera::MoveForward(float aDistance)
{
m_vPosition.z += aDistance;
}
void Camera::Strafe(float aDistance)
{
m_vPosition.x += aDistance;
}
These variables are being used inside the camera's render function.
Inside Camera's Render function
// Yaw
glRotatef(m_fYawAngle, m_vUp.x, m_vUp.y, m_vUp.z);
// Pitch
glRotatef(m_fPitchAngle, m_vRight.z, m_vRight.y, m_vRight.z);
//Roll
glRotatef(m_fRollAngle, m_vFacing.x, m_vFacing.y, m_vFacing.z);
//angleBetween = cosf(m_fYawAngle) + m_vPosition.z;
// Move Forward
glTranslatef(m_vPosition.x, m_vPosition.y, m_vPosition.z);
Which are being utilised in the camera's update function, inside a switch statement.
Camera Update function
case SDLK_a:
Yaw(-kAngleToTurn);
break;
case SDLK_d:
Yaw(kAngleToTurn);
break;
And so on for the other variables. Here's the basic Quaternion header file I was given.
Quaternion.h
struct Quaternion
{
float w;
Vector3D vec;
Quaternion()
{
vec.x = 0.0f;
vec.y = 0.0f;
vec.z = 0.0f;
}
Quaternion(float startW, Vector3D startVec)
{
vec.x = startVec.x;
vec.y = startVec.y;
vec.z = startVec.z;
}
};
class QuaternionMath
{
public:
~QuaternionMath();
static QuaternionMath* Instance();
void QuaternionToMatrix(Quaternion* q, float m[4][4]);
void MatrixToQuaternion(float m[4][4], Quaternion* q);
void EulerToQuaternion(float roll, float pitch, float yaw, Quaternion* q);
void Multiply(Quaternion* q1, Quaternion* q2, Quaternion* resultingQuaternion);
void RotateVector(Quaternion* q, Vector3D* v, Vector3D* resultingVector);
private:
QuaternionMath();
private:
static QuaternionMath* mInstance;
};

Instead of using a chain of glRotate calls, retrieve a 4×4 matrix from the quaternion instance using MatrixToQuaternion and multiply that onto the matrix on top of the stack with glMultMatrix.
In a further step you should get rid of using any code using the OpenGL fixed function matrix stack and use something like GLM or similar.

Related

how to parent object to another object and affect its position through rotation (make object rotate around other object)

For context, I'm making a top down shooter game where the player always rotates/faces itself to the mouse cursor. That can be easily done, but now I'm stuck in positioning the weapon that the player hold (I separate the weapon entity and the player entity because I want the player to be able to switch weapons). I have to make the weapon also rotates to the same angle as the player (which is also easily done by just getting the player's rotation angle and applying that to the weapon as well). Then the part where I'm really stuck is to always position the weapon like it's revolving around the player (with a bit offset).
With no further ado, here's the code:
class Player
{
public:
Player(string skin)
{
this->skin.loadFromFile("gfx/skins/" + skin + ".png");
player.setTexture(this->skin);
player.setOrigin(Vector2f(7, 6.5f));
}
void SetScale(float x, float y)
{
player.setScale(x, y);
}
void SetPosition(float x, float y)
{
x_pos = x;
y_pos = y;
}
Vector2f GetScale()
{
return player.getScale();
}
Vector2f GetPosition()
{
return Vector2f(x_pos, y_pos);
}
float GetRotation()
{
return rotate_angle;
}
void Update(float delta_time, Vector2f mouse_pos)
{
if (Keyboard::isKeyPressed(Keyboard::A) || Keyboard::isKeyPressed(Keyboard::D))
{
if (Keyboard::isKeyPressed(Keyboard::A))
{
vel_x = smoothMotion(-185.f, vel_x, delta_time);
}
if (Keyboard::isKeyPressed(Keyboard::D))
{
vel_x = smoothMotion(185.f, vel_x, delta_time);
}
}
else
vel_x = smoothMotion(0.f, vel_x, delta_time);
if (Keyboard::isKeyPressed(Keyboard::W) || Keyboard::isKeyPressed(Keyboard::S))
{
if (Keyboard::isKeyPressed(Keyboard::W))
{
vel_y = smoothMotion(-185.f, vel_y, delta_time);
}
if (Keyboard::isKeyPressed(Keyboard::S))
{
vel_y = smoothMotion(185.f, vel_y, delta_time);
}
}
else
vel_y = smoothMotion(0.f, vel_y, delta_time);
x_pos += vel_x * delta_time;
y_pos += vel_y * delta_time;
player.setPosition(x_pos, y_pos);
player_mouse_distance = Vector2f(mouse_pos.x - x_pos, mouse_pos.y - y_pos);
rotate_angle = radToDeg(atan2(player_mouse_distance.y, player_mouse_distance.x));
player.setRotation(rotate_angle);
}
void Draw(RenderWindow& window)
{
window.draw(player);
}
public:
Vector2f player_mouse_distance;
private:
Sprite player;
Texture skin;
float x_pos, y_pos;
float vel_x = 0.f, vel_y = 0.f;
float rotate_angle;
};
class Weapon
{
public:
Weapon(string weapon_name)
{
weapon_texture.loadFromFile("gfx/weapons/" + weapon_name + ".png");
weapon.setTexture(weapon_texture);
}
void SetScale(float x, float y)
{
weapon.setScale(x, y);
}
void SetPosition(float x, float y)
{
x_pos = x;
y_pos = y;
}
void Update(Player player, float delta_time)
{
SetPosition((player.GetScale().x * (9 - 7)) /* <- offset */ * cos(player.GetRotation()) + player.GetPosition().x, (player.GetScale().y * (6.5 - 5)) * sin(player.GetRotation()) + player.GetPosition().y);
weapon.setPosition(x_pos, y_pos);
weapon.setRotation(player.GetRotation());
}
void Draw(RenderWindow& window)
{
window.draw(weapon);
}
private:
Sprite weapon;
Texture weapon_texture;
float x_pos, y_pos;
float vel_x = 0.f, vel_y = 0.f;
float rotate_angle;
};
I'm using C++ and SFML 2.5.1 by the way, but any answer using other language or other graphics library (like Pygame, etc) can be accepted too (since the physics uses the same math formulas anyways).
I watched tutorials about this, but most of them uses game engines like Unity and Godot. They simply just parents the player entity to the weapon entity so that the weapon can also change position when player is rotating.
I figured out that cosine and sine function must be the key formula to implement that, but if I'm wrong please correct me.
Any help is appreciated :]
First, in Player.Update(), the formula for rotation angle should be atan2(y,x), do not convert it to degrees as sin and cos take radians as input.
If other parts of your project rely on Player.rotate_angle to be in degrees, you should convert them back to radians in Weapon.Update(). However, I recommend using radians as all of the C++ base trig functions take radians as input.
In Weapon.Update(), you are applying different offset multipliers to the x and y arguments for SetPosition: (9 - 7) to the x coordinate and (6.5 - 5) to the y coordinates. These should be singular constants instead of expressions like that, and they have to be the same unless you want the Weapon to have an elliptical orbit. Replace those expressions with a constant variable defined somewhere in the Weapon class.
Additionally, player.GetScale() could have different x and y values, so you can replace player.GetScale().x and player.GetScale().y with some new method like Player.GetScaleMagnitude() that returns the length of the vector from player.GetScale() as a float. However, player.GetScale() contributing to an elliptical orbit could be visually beneficial depending on how you want the game to look.
I totally agree with Pablo's answer, but I would go a step further :
Implement a parenting system!
Once you implement his solution, you will already be adding a transformation on top of another one : the weapon's final tranformation will be a composition of its own transformation (offset from the player) and the player transformation (its position+orientation).
I won't describe the exact formulas involved in composing the transformations, Pablo already gave a good answer on that. I'll describe here the architecture of a parentable system :
class TransformationNode
{
public :
TransformationNode(TransformationNode* _parent = nullptr)
: parent(_parent)
{
}
void SetPosition(const float x, const float y)
{
localX = x;
localY = y;
}
void SetAngle(const float angle)
{
localAngle = angle;
}
void computeGlobalCoords()
{
if (parent)
{
globalX = transformFormulaHere(parent->GetGlobalPosition(), parent->GetGlobalAngle());
globalY = transformFormulaHere(parent->GetGlobalPosition(), parent->GetGlobalAngle());
globalAngle = localAngle + parent->GetGlobalAngle();
}
else
{
globalX = localX;
globalY = localY;
globalAngle = localAngle;
}
}
private :
float localX, localY, localAngle;
float globalX, globalY, globalAngle;
TransformationNode* parent;
};
And then you'll have both Player and Weapon inherit from TransformNode. I haven't compiled the code, it's just to get the idea.
By the way, I strongly recommend you to look at Transformation matrices. They are better to use than individual positions and angles.

How to correctly implement a quaternion camera in modern opengl?

I am trying to create a uvn quaternion based camera in opengl, having used a variety of tutorials listed below, and having read up on quaternions and axis angle rotation. I am left with a peculiar bug which I cannot seem to fix.
Basically the camera seems to work fine up until the camera is rotated approx 45 degrees from +z at this point tilting the camera up or down seems to tilt the camera around its target axis, turning the up vector.
By the time the camera faces along -z tilting up or down gives the illusion of the opposite, up tilts down and down tilts up.
I have seen other implementations suggesting the use of a non uvn system where quaternions are accumulated into one which describes the current orientation as a delta from some arbitrary start angle. This sounds great however I can't seem to work out exactly how I would implement this, specifically the conversion from this to a view matrix.
Elsewhere on SO I read about splitting the rotation into two quaternions that represent the yaw and pitch separately but I'm not convinced that this is the cause of the problem since in this context, correct me if I am wrong but my understanding is that the order in which you apply the two rotations does not matter.
Relevant Source Code Snippets:
Quarternion Operations
Quaternion<TValue> conjugate() const{
return Quaternion({ { -m_values[X], -m_values[Y], -m_values[Z], m_values[W] } });
};
Quaternion<TValue>& operator*=(const Quaternion<TValue>& rhs) {
TValue x, y, z, w;
w = rhs[W] * m_values[W] - rhs[X] * m_values[X] - rhs[Y] * m_values[Y] - rhs[Z] * m_values[Z];
x = rhs[W] * m_values[X] + rhs[X] * m_values[W] - rhs[Y] * m_values[Z] + rhs[Z] * m_values[Y];
y = rhs[W] * m_values[Y] + rhs[X] * m_values[Z] + rhs[Y] * m_values[W] - rhs[Z] * m_values[X];
z = rhs[W] * m_values[Z] - rhs[X] * m_values[Y] + rhs[Y] * m_values[X] + rhs[Z] * m_values[W];
m_values[X] = x;
m_values[Y] = y;
m_values[Z] = z;
m_values[W] = w;
return *this;
};
static Quaternion<TValue> rotation(Vector<3, TValue> axis, TValue angle){
float x, y, z, w;
TValue halfTheta = angle / 2.0f;
TValue sinHalfTheta = sin(halfTheta);
return Quaternion<TValue>({ { axis[X] * sinHalfTheta, axis[Y] * sinHalfTheta, axis[Z] * sinHalfTheta, cos(halfTheta) } });
};
Vector Rotation Operation
Vector<dimensions, TValue> rotate(const Vector<3, TValue> axis, float angle){
Quaternion<TValue> R = Quaternion<TValue>::rotation(axis, angle);
Quaternion<TValue> V = (*this);
Vector<dimensions, TValue> result = R * V * R.conjugate();
return result;
}
Camera Methods
Camera::Camera(Vector<2, int> windowSize, float fov, float near, float far):
m_uvn(Matrix<4, float>::identity()),
m_translation(Matrix<4, float>::identity()),
m_ar(windowSize[Dimensions::X] / (float)windowSize[Dimensions::Y]),
m_fov(fov),
m_near(near),
m_far(far),
m_position(),
m_forward({ { 0, 0, 1 } }),
m_up({ { 0, 1, 0 } })
{
setViewMatrix(Matrix<4, float>::identity());
setProjectionMatrix(Matrix<4, float>::perspective(m_ar, m_near, m_far, m_fov));
};
Matrix<4, float> Camera::getVPMatrix() const{
return m_vp;
};
const Vector<3, float> Camera::globalY = Vector<3, float>({ { 0, 1, 0 } });
void Camera::setProjectionMatrix(const Matrix<4, float> p){
m_projection = p;
m_vp = m_projection * m_view;
};
void Camera::setViewMatrix(const Matrix<4, float> v){
m_view = v;
m_vp = m_projection * m_view;
};
void Camera::setTranslationMatrix(const Matrix<4, float> t){
m_translation = t;
setViewMatrix(m_uvn * m_translation);
}
void Camera::setPosition(Vector<3, float> position){
if (position != m_position){
m_position = position;
setTranslationMatrix(Matrix<4, float>::translation(-position));
}
};
void Camera::moveForward(float ammount){
setPosition(m_position + (m_forward * ammount));
}
void Camera::moveRight(float ammount){
setPosition(m_position + (getRight() * ammount));
}
void Camera::moveUp(float ammount){
setPosition(m_position + (m_up * ammount));
}
void Camera::setLookAt(Vector<3, float> target, Vector<3, float> up){
Vector<3, float> newUp = up.normalize();
Vector<3, float> newForward = target.normalize();
if (newUp != m_up || newForward != m_forward){
m_up = newUp;
m_forward = newForward;
Vector<3, float> newLeft = getLeft();
m_up = newLeft * m_forward;
m_uvn = generateUVN();
setViewMatrix(m_uvn * m_translation);
}
};
void Camera::rotateX(float angle){
Vector<3, float> hAxis = (globalY * m_forward).normalize();
m_forward = m_forward.rotate(hAxis, angle).normalize();
m_up = (m_forward * hAxis).normalize();
m_uvn = generateUVN();
setViewMatrix(m_translation * m_uvn);
}
void Camera::rotateY(float angle){
Vector<3, float> hAxis = (globalY * m_forward).normalize();
m_forward = m_forward.rotate(globalY, angle).normalize();
m_up = (m_forward * hAxis).normalize();
m_uvn = generateUVN();
setViewMatrix(m_translation * m_uvn);
}
Vector<3, float> Camera::getRight(){
return (m_forward * m_up).normalize();
}
Vector <3, float> Camera::getLeft(){
return (m_up * m_forward).normalize();
}
};
I am guessing that the problem is in either my implementation of a quaternion or the way I am using it, but due to the complex nature of the system I cannot seem to pin down the problem any further than that. Due to the weird bugs being experienced I am unsure if there is just something wrong with the way I am trying to implement the camera?
Tutorials
https://www.youtube.com/watch?v=1Aw1PDu33PI
http://www.gamedev.net/page/resources/_/technical/math-and-physics/a-simple-quaternion-based-camera-r1997
Quarternion/Vector Math
http://mathworld.wolfram.com/Quaternion.html
https://en.wikipedia.org/wiki/Cross_product
http://ogldev.atspace.co.uk/www/tutorial13/tutorial13.html
http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/index.htm
Old question but still a relevant topic so I'll give some pointers. The thing to remember is a quaternion is a 3D map. Unit, and to a lesser degree Pure, quaternions provide a consistent way to say this rotation is this value. The benefits of this over the Euler angles are they attempt to reconstruct orientation from rotation about some axis where as the quaternion directly correlates to the orientation and can avoid gimbal lock
Specifically for a quaternion camera let Q{1,0,0,0} where w=1, the corresponding matrix to this quaternion is the identity matrix. There for any valid unit quaternion when decomposed into a 3x3 matrix gives you the (usually) world space rotation of the camera. However you don't even need that because you can define your camera space so that say X{1,0,0} Y{0,1,0} Z{0,0,-1} then multiply these unit axes by your cameras orientation quaternion and the resulting vector is the transformed unit vector. These then create your right up and front vectors which can be used to build the 3x3 rotation of the view transform.
Moving the camera should be relatively straight forward at that point. The linear movement vectors can be easily reconstructed and applied to the camera position and the angular movement can be achieved by multiplying the the camera quaternion with the direction which is the normal of the corresponding plane in which the rotation happens. For example, turning left and right is a rotation that happens in there XZ plane there for the Y vector{as a unit quaternion not a pure quaternion} would be multiplied with the camera quaternion producing the desired rotational effect

Physics in video game, Apply torque on angular acceleration

I'm working on a game project about a top down car game in 2d. I want to manage all the physics by myself. Im working with this book : http://www.amazon.fr/Game-Physics-Engine-Development-Commercial-Grade/dp/0123819768 to implement the physics.
From now my physic engine can handle force on the different axes. But I have some issues to implement a correct simulation of rotation. Im trying to implement some torque to find angular acceleration. So I implemented an inertia tensor matrix :
setMass(400.f);
Matrix3 it;
it.setBlockInertiaTensor(Vector3(2, 1, 1), 400);
setInertiaTensor(it);
void setBlockInertiaTensor(const Vector3 &halfSizes, float mass)
{
Vector3 squares = halfSizes.componentProduct(halfSizes);
setInertiaTensorCoeffs(0.3f*mass*(squares.y + squares.z),
0.3f*mass*(squares.x + squares.z),
0.3f*mass*(squares.x + squares.y));
}
To apply torque I apply a force at a body point of my car and I find the torque by a cross product :
player->addForceAtBodyPoint(Vector3(-2000, 1000, 0), Vector3(0, 100, 0));
void AObject::addForceAtBodyPoint(const Vector3 &force, const Vector3 &point)
{
Vector3 pt = getPointInWorldSpace(point);
addForceAtPoint(force, pt);
}
void AObject::addForceAtPoint(const Vector3 &force,
const Vector3 &point)
{
// Convert to coordinates relative to center of mass.
Vector3 pt = point;
pt -= _position;
_forceAccumulate += force;
_torqueAccumulate += pt % force;
//std::cout << "torque x " << pt.x << " y " << pt.y << " z "<< pt.z << std::endl;
}
Vector3 Vector3::operator%(const Vector3 &vector) const
{
return Vector3(y*vector.z - z*vector.y,
z*vector.x - x*vector.z,
x*vector.y - y*vector.x);
}
(The modulo % is the cross product )
And finally I do my integration of all the data :
void Player::integrate(float deltaTime)
{
addForce(_velocity * -150.0f);
// Calculate linear acceleration from force inputs.
_lastFrameAcceleration = _acceleration;
_lastFrameAcceleration.addScaledVector(_forceAccumulate, _inverseMass);
// Calculate angular acceleration from torque inputs.
Vector3 angularAcceleration = _inverseInertiaTensorWorld.transform(_torqueAccumulate);
// Update linear velocity from acceleration .
_velocity.addScaledVector(_lastFrameAcceleration, deltaTime);
// Update angular velocity from acceleration .
_rotation.addScaledVector(angularAcceleration, deltaTime);
// Impose drag.
_velocity *= pow(_linearDamping, deltaTime);
_rotation *= pow(_angularDamping, deltaTime);
// Update linear position.
_position.addScaledVector(_velocity, deltaTime);
_position.z = 0;
// Update angular position
_orientation.addScaledVector(_rotation, deltaTime);
// Normalise the orientation, and update the matrice
calculateWorldLocalData();
// Clear accumulators.
clearAccumulator();
}
And the orientation is not working at all. Im not that good with physics stuff, so I think that im misunderstanding the physic implementation of torque with inertia tensor...
If your game is top down in 2D then you can only have rotation in the Z direction. I.E. in and out of the screen. Thus you can simplify your problem and avoid 3D tensors. In which case, in your car class I would have a private variable called rotation. e.g.
private:
double angle;
double tourque;
public:
void updateTorque(*some way of passing forces*)
{
double total_t = 0;
for each force
{
double t = use cosine and length to point to generate a tourque
total_t = t + total_t
}
}
void update_angle // place your integration routine here and call once per loop

std::vector memory, vector of unwanted 0's

My Code works for my purely glut implementation, but I am trying to get it to work in qt.
I have a vector of masspoints for a wire mesh system
std::vector<masspoint> m_particles;
The problem is in my qt version none of what I write really sticks and I am left with an array of zeros. Basically I am confused why the glut version has correct values but the qt one does not given that it is basically identical code. What is wrong with the qt code?
Yes I only see zeros when using qDebug. When I am calling my drawing function in the qt version all vertex points turn out to be 0 in all components so nothing is seen.
int myboog = 1;
int county = 0;
// Constructors
Cloth::Cloth(float width, float height, int particles_in_width, int particles_in_height):
m_width(particles_in_width),
m_height(particles_in_height),
m_dimensionWidth(width),
m_dimensionHeight(height),
m_distanceX(width/(float)particles_in_width),
m_distanceY(height/(float)particles_in_height)
{
//Set the particle array to the given size
//Height by width
//mparticles is the name of our vector
m_particles.resize(m_width*m_height);
qDebug() << m_particles.size();
// Create the point masses to simulate the cloth
for (int x = 0; x < m_width; ++x)
{
for (int y=0; y < m_height; ++y)
{
// Place the pointmass of the cloth, lift the edges to give the wind more effect as the cloth falls
Vector3f position = Vector3f(m_dimensionWidth * (x / (float)m_width),
((x==0)||(x==m_width-1)||(y==0)||(y==m_height-1)) ? m_distanceY/2.0f:0,
m_dimensionHeight * (y / (float)m_height));
// The gravity effect is applied to new pmasspoints
m_particles[y * m_width + x] = masspoint(position,Vector3f(0,-0.06,0));
}
}
int num = (int)m_particles.size();
for (int i=0; i<num; ++i)
{
masspoint* p = &m_particles[i];
if(myboog)
{
qDebug() << "test " << *p->getPosition().getXLocation() << county;
county++;
}
}
myboog = 0;
// Calculate the normals for the first time so the initial draw is correctly lit
calculateClothNormals();
}
Code for masspoint involved in constructor for CLoth
#ifndef MASSPOINT_H
#define MASSPOINT_H
#include <QGLWidget>
#include "vector3f.h"
class masspoint
{
private:
Vector3f m_position; // Current Location of the pointmass
Vector3f m_velocity; // Direction and speed the pointmass is traveling in
Vector3f m_acceleration; // Speed at which the pointmass is accelerating (used for gravity)
Vector3f m_forceAccumulated; // Force that has been accumulated since the last update
Vector3f m_normal; // Normal of this pointmass, used to light the cloth when drawing
float m_damping; // Amount of velocity lost per update
bool m_stationary; // Whether this pointmass is currently capible of movement
public:
masspoint& operator= (const masspoint& particle);
//Some constructors
masspoint();
masspoint(const masspoint& particle);
masspoint(Vector3f position, Vector3f acceleration);
//Like eulur integration
void integrate(float duration);
// Accessor functions
//Get the position of the point mass
inline Vector3f getPosition() const {return m_position;}
Vector stuff involved in the constructor for CLoth
#ifndef VECTOR3F_H
#define VECTOR3F_H
#include <math.h>
// Vector library to be used
class Vector3f
{
private:
float m_x, m_y, m_z;
public:
const float* getXLocation() const { return &m_x; }

OpenGL Camera vectors

I have a very rudimentary camera which generates 3 vectors for use with gluLookAt(...) the problem is I'm not sure if this is correct, I adapted code from something my lecturer showed us (I think he got it from somewhere).
This actually works until you spin the mouse round in circles than camera starts to rotate around the z-axis. Which shouldn't happen as the mouse coords are only attached to the pitch and yaw not the roll.
Camera
// Camera.hpp
#ifndef MOOT_CAMERA_INCLUDE_HPP
#define MOOT_CAMERA_INCLUDE_HPP
#include <GL/gl.h>
#include <GL/glu.h>
#include <boost/utility.hpp>
#include <Moot/Platform.hpp>
#include <Moot/Vector3D.hpp>
namespace Moot
{
class Camera : public boost::noncopyable
{
protected:
Vec3f m_position, m_up, m_right, m_forward, m_viewPoint;
uint16_t m_height, m_width;
public:
Camera()
{
m_forward = Vec3f(0.0f, 0.0f, -1.0f);
m_right = Vec3f(1.0f, 0.0f, 0.0f);
m_up = Vec3f(0.0f, 1.0f, 0.0f);
}
void setup(uint16_t setHeight, uint16_t setWidth)
{
m_height = setHeight;
m_width = setWidth;
}
void move(float distance)
{
m_position += (m_forward * distance);
}
void addPitch(float setPitch)
{
m_forward = (m_forward * cos(setPitch) + (m_up * sin(setPitch)));
m_forward.setNormal();
// Cross Product
m_up = (m_forward / m_right) * -1;
}
void addYaw(float setYaw)
{
m_forward = ((m_forward * cos(setYaw)) - (m_right * sin(setYaw)));
m_forward.setNormal();
// Cross Product
m_right = m_forward / m_up;
}
void addRoll(float setRoll)
{
m_right = (m_right * cos(setRoll) + (m_up * sin(setRoll)));
m_right.setNormal();
// Cross Product
m_up = (m_forward / m_right) * -1;
}
virtual void apply() = 0;
}; // Camera
} // Moot
#endif
Snippet from update cycle
// Mouse movement
m_camera.addPitch((float)input().mouseDeltaY() * 0.001);
m_camera.addYaw((float)input().mouseDeltaX() * 0.001);
apply() in the camera class is defined in an inherited class, which is called from the draw function of the game loop.
void apply()
{
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(40.0,(GLdouble)m_width/(GLdouble)m_height,0.5,20.0);
m_viewPoint = m_position + m_forward;
gluLookAt( m_position.getX(), m_position.getY(), m_position.getZ(),
m_viewPoint.getX(), m_viewPoint.getY(), m_viewPoint.getZ(),
m_up.getX(), m_up.getY(), m_up.getZ());
}
Don't accumulate the transforms in your vectors, store the angles and generate the vectors on-the-fly.
EDIT: Floating-point stability. Compare the output of a and b:
#include <iostream>
using namespace std;
int main()
{
const float small = 0.00001;
const unsigned int times = 100000;
float a = 0.0f;
for( unsigned int i = 0; i < times; ++i )
{
a += small;
}
cout << a << endl;
float b = 0.0f;
b = small * times;
cout << b << endl;
return 0;
}
Output:
1.00099
1
I am not sure where to start, as you are posting only small snippets, not enough to fully reproduce the problem.
In your methods you update all parameters, and your parameters are depending on previous values. I am not sure what exactly you call, because you posted that you call only these two :
m_camera.addPitch((float)input().mouseDeltaY() * 0.001);
m_camera.addYaw((float)input().mouseDeltaX() * 0.001);
You should somehow break that circle by adding new parameters, and the output should depend on the input (for example, m_position shouldn't depend on m_forward).
You should also initialize all variables in the constructor, and I see you are initializing only m_forward, m_right and m_up (by the way, use initialization list).
You might want to reconsider your approach in favor of using quaternion rotations as described in this paper. This has the advantage of representing all of your accumulated rotations as a single rotation about a single vector (only need to keep track of a single quaternion) which you can apply to the canonical orientation vectors (up, norm and right) describing the camera orientation. Furthermore, since you're using C++, you can use the Boost quaternion class to manage the math of most of it.