std::vector memory, vector of unwanted 0's - c++

My Code works for my purely glut implementation, but I am trying to get it to work in qt.
I have a vector of masspoints for a wire mesh system
std::vector<masspoint> m_particles;
The problem is in my qt version none of what I write really sticks and I am left with an array of zeros. Basically I am confused why the glut version has correct values but the qt one does not given that it is basically identical code. What is wrong with the qt code?
Yes I only see zeros when using qDebug. When I am calling my drawing function in the qt version all vertex points turn out to be 0 in all components so nothing is seen.
int myboog = 1;
int county = 0;
// Constructors
Cloth::Cloth(float width, float height, int particles_in_width, int particles_in_height):
m_width(particles_in_width),
m_height(particles_in_height),
m_dimensionWidth(width),
m_dimensionHeight(height),
m_distanceX(width/(float)particles_in_width),
m_distanceY(height/(float)particles_in_height)
{
//Set the particle array to the given size
//Height by width
//mparticles is the name of our vector
m_particles.resize(m_width*m_height);
qDebug() << m_particles.size();
// Create the point masses to simulate the cloth
for (int x = 0; x < m_width; ++x)
{
for (int y=0; y < m_height; ++y)
{
// Place the pointmass of the cloth, lift the edges to give the wind more effect as the cloth falls
Vector3f position = Vector3f(m_dimensionWidth * (x / (float)m_width),
((x==0)||(x==m_width-1)||(y==0)||(y==m_height-1)) ? m_distanceY/2.0f:0,
m_dimensionHeight * (y / (float)m_height));
// The gravity effect is applied to new pmasspoints
m_particles[y * m_width + x] = masspoint(position,Vector3f(0,-0.06,0));
}
}
int num = (int)m_particles.size();
for (int i=0; i<num; ++i)
{
masspoint* p = &m_particles[i];
if(myboog)
{
qDebug() << "test " << *p->getPosition().getXLocation() << county;
county++;
}
}
myboog = 0;
// Calculate the normals for the first time so the initial draw is correctly lit
calculateClothNormals();
}
Code for masspoint involved in constructor for CLoth
#ifndef MASSPOINT_H
#define MASSPOINT_H
#include <QGLWidget>
#include "vector3f.h"
class masspoint
{
private:
Vector3f m_position; // Current Location of the pointmass
Vector3f m_velocity; // Direction and speed the pointmass is traveling in
Vector3f m_acceleration; // Speed at which the pointmass is accelerating (used for gravity)
Vector3f m_forceAccumulated; // Force that has been accumulated since the last update
Vector3f m_normal; // Normal of this pointmass, used to light the cloth when drawing
float m_damping; // Amount of velocity lost per update
bool m_stationary; // Whether this pointmass is currently capible of movement
public:
masspoint& operator= (const masspoint& particle);
//Some constructors
masspoint();
masspoint(const masspoint& particle);
masspoint(Vector3f position, Vector3f acceleration);
//Like eulur integration
void integrate(float duration);
// Accessor functions
//Get the position of the point mass
inline Vector3f getPosition() const {return m_position;}
Vector stuff involved in the constructor for CLoth
#ifndef VECTOR3F_H
#define VECTOR3F_H
#include <math.h>
// Vector library to be used
class Vector3f
{
private:
float m_x, m_y, m_z;
public:
const float* getXLocation() const { return &m_x; }

Related

how to parent object to another object and affect its position through rotation (make object rotate around other object)

For context, I'm making a top down shooter game where the player always rotates/faces itself to the mouse cursor. That can be easily done, but now I'm stuck in positioning the weapon that the player hold (I separate the weapon entity and the player entity because I want the player to be able to switch weapons). I have to make the weapon also rotates to the same angle as the player (which is also easily done by just getting the player's rotation angle and applying that to the weapon as well). Then the part where I'm really stuck is to always position the weapon like it's revolving around the player (with a bit offset).
With no further ado, here's the code:
class Player
{
public:
Player(string skin)
{
this->skin.loadFromFile("gfx/skins/" + skin + ".png");
player.setTexture(this->skin);
player.setOrigin(Vector2f(7, 6.5f));
}
void SetScale(float x, float y)
{
player.setScale(x, y);
}
void SetPosition(float x, float y)
{
x_pos = x;
y_pos = y;
}
Vector2f GetScale()
{
return player.getScale();
}
Vector2f GetPosition()
{
return Vector2f(x_pos, y_pos);
}
float GetRotation()
{
return rotate_angle;
}
void Update(float delta_time, Vector2f mouse_pos)
{
if (Keyboard::isKeyPressed(Keyboard::A) || Keyboard::isKeyPressed(Keyboard::D))
{
if (Keyboard::isKeyPressed(Keyboard::A))
{
vel_x = smoothMotion(-185.f, vel_x, delta_time);
}
if (Keyboard::isKeyPressed(Keyboard::D))
{
vel_x = smoothMotion(185.f, vel_x, delta_time);
}
}
else
vel_x = smoothMotion(0.f, vel_x, delta_time);
if (Keyboard::isKeyPressed(Keyboard::W) || Keyboard::isKeyPressed(Keyboard::S))
{
if (Keyboard::isKeyPressed(Keyboard::W))
{
vel_y = smoothMotion(-185.f, vel_y, delta_time);
}
if (Keyboard::isKeyPressed(Keyboard::S))
{
vel_y = smoothMotion(185.f, vel_y, delta_time);
}
}
else
vel_y = smoothMotion(0.f, vel_y, delta_time);
x_pos += vel_x * delta_time;
y_pos += vel_y * delta_time;
player.setPosition(x_pos, y_pos);
player_mouse_distance = Vector2f(mouse_pos.x - x_pos, mouse_pos.y - y_pos);
rotate_angle = radToDeg(atan2(player_mouse_distance.y, player_mouse_distance.x));
player.setRotation(rotate_angle);
}
void Draw(RenderWindow& window)
{
window.draw(player);
}
public:
Vector2f player_mouse_distance;
private:
Sprite player;
Texture skin;
float x_pos, y_pos;
float vel_x = 0.f, vel_y = 0.f;
float rotate_angle;
};
class Weapon
{
public:
Weapon(string weapon_name)
{
weapon_texture.loadFromFile("gfx/weapons/" + weapon_name + ".png");
weapon.setTexture(weapon_texture);
}
void SetScale(float x, float y)
{
weapon.setScale(x, y);
}
void SetPosition(float x, float y)
{
x_pos = x;
y_pos = y;
}
void Update(Player player, float delta_time)
{
SetPosition((player.GetScale().x * (9 - 7)) /* <- offset */ * cos(player.GetRotation()) + player.GetPosition().x, (player.GetScale().y * (6.5 - 5)) * sin(player.GetRotation()) + player.GetPosition().y);
weapon.setPosition(x_pos, y_pos);
weapon.setRotation(player.GetRotation());
}
void Draw(RenderWindow& window)
{
window.draw(weapon);
}
private:
Sprite weapon;
Texture weapon_texture;
float x_pos, y_pos;
float vel_x = 0.f, vel_y = 0.f;
float rotate_angle;
};
I'm using C++ and SFML 2.5.1 by the way, but any answer using other language or other graphics library (like Pygame, etc) can be accepted too (since the physics uses the same math formulas anyways).
I watched tutorials about this, but most of them uses game engines like Unity and Godot. They simply just parents the player entity to the weapon entity so that the weapon can also change position when player is rotating.
I figured out that cosine and sine function must be the key formula to implement that, but if I'm wrong please correct me.
Any help is appreciated :]
First, in Player.Update(), the formula for rotation angle should be atan2(y,x), do not convert it to degrees as sin and cos take radians as input.
If other parts of your project rely on Player.rotate_angle to be in degrees, you should convert them back to radians in Weapon.Update(). However, I recommend using radians as all of the C++ base trig functions take radians as input.
In Weapon.Update(), you are applying different offset multipliers to the x and y arguments for SetPosition: (9 - 7) to the x coordinate and (6.5 - 5) to the y coordinates. These should be singular constants instead of expressions like that, and they have to be the same unless you want the Weapon to have an elliptical orbit. Replace those expressions with a constant variable defined somewhere in the Weapon class.
Additionally, player.GetScale() could have different x and y values, so you can replace player.GetScale().x and player.GetScale().y with some new method like Player.GetScaleMagnitude() that returns the length of the vector from player.GetScale() as a float. However, player.GetScale() contributing to an elliptical orbit could be visually beneficial depending on how you want the game to look.
I totally agree with Pablo's answer, but I would go a step further :
Implement a parenting system!
Once you implement his solution, you will already be adding a transformation on top of another one : the weapon's final tranformation will be a composition of its own transformation (offset from the player) and the player transformation (its position+orientation).
I won't describe the exact formulas involved in composing the transformations, Pablo already gave a good answer on that. I'll describe here the architecture of a parentable system :
class TransformationNode
{
public :
TransformationNode(TransformationNode* _parent = nullptr)
: parent(_parent)
{
}
void SetPosition(const float x, const float y)
{
localX = x;
localY = y;
}
void SetAngle(const float angle)
{
localAngle = angle;
}
void computeGlobalCoords()
{
if (parent)
{
globalX = transformFormulaHere(parent->GetGlobalPosition(), parent->GetGlobalAngle());
globalY = transformFormulaHere(parent->GetGlobalPosition(), parent->GetGlobalAngle());
globalAngle = localAngle + parent->GetGlobalAngle();
}
else
{
globalX = localX;
globalY = localY;
globalAngle = localAngle;
}
}
private :
float localX, localY, localAngle;
float globalX, globalY, globalAngle;
TransformationNode* parent;
};
And then you'll have both Player and Weapon inherit from TransformNode. I haven't compiled the code, it's just to get the idea.
By the way, I strongly recommend you to look at Transformation matrices. They are better to use than individual positions and angles.

Tiles being drawn in the wrong location

I've finally managed to get my tiles drawn on the screen somewhat in a correct way. Although the location is a bit off and I can't seem to figure out why...
I'm using SFML for drawing.
Tile.hpp:
#ifndef TILE_HPP
#define TILE_HPP
#include <SFML/Graphics.hpp>
#include <SFML/System.hpp>
#include "textureManager.hpp"
class Tile {
public:
Tile();
Tile(sf::Vector2i coord, int biome);
~Tile();
sf::Vector2i getCoord() const { return coord; };
int getBiome() const { return biome; };
void setCoord(sf::Vector2i coord) { this->coord = coord; };
void setBiome(int biome) { this->biome = biome; };
void draw(int x, int y, sf::RenderWindow* rw);
void update(sf::Texture& texture);
private:
sf::Vector2i coord;
int biome;
sf::Sprite sprite;
};
#endif
Tile.cpp
#include <SFML/Graphics.hpp>
#include <SFML/System.hpp>
#include "textureManager.hpp"
#include "tile.hpp"
Tile::Tile()
{}
Tile::Tile(sf::Vector2i coord, int biome) {
this->biome = biome;
this->coord = coord;
}
Tile::~Tile(){}
void Tile::draw(int x, int y, sf::RenderWindow* rw)
{
sprite.setPosition(x, y);
rw->draw(sprite);
}
void Tile::update(sf::Texture& texture)
{
switch (biome)
{
// Not important here
}
}
Now the more relevant part: the drawing
void StatePlay::draw(const float dt)
{
game->window.setView(view);
game->window.clear(sf::Color::Black);
sf::Vector2f offset = camera.getLocation();
int newX = (offset.x / map.getTileSize()) - (map.chunkSize / 2);
int newY = (offset.y / map.getTileSize()) - (map.chunkSize / 2);
for (int x = 0; x < map.chunkSize; x++)
{
for (int y = 0; y < map.chunkSize; y++)
{
Tile tile = map.getTile(newX + x, newY + y);
tile.draw((newX + x) * map.getTileSize(), (newY + y) * map.getTileSize(), &game->window);
}
}
return;
}
StatePlay::StatePlay(Game* game)
{
this->game = game;
sf::Vector2f pos = sf::Vector2f(game->window.getSize()); // 1366x768
view.setSize(pos);
pos *= 0.5f; // 688x384
view.setCenter(pos);
// Initialize map
map.init(game->gameTime, game->textureManager.getImage("tileset.png"));
float w = (float) map.getWidth(); // 500
float h = (float) map.getHeight(); // 500
w *= 0.5f; // 250
h *= 0.5f; // 250
w *= map.getTileSize(); // 250 * 32 = 8000
h *= map.getTileSize(); // 250 * 32 = 8000
// Move camera
// Uses view::move from sfml to move the view with w and h
// Also sets camera private to w and h values, return with camera::getLocation()
camera.setLocation(&view, sf::Vector2f(w, h));
}
The result is that I only see the ~10 tiles squared, in the bottom left corner of my screen, covering about 3/4.
The correct tiles are chosen, but the draw location is wrong... It should draw the center of 64x64 (x 32px each) tiles, as much as fit on the screen.
I have fixed the problem. It was a very stupid mistake...
At first without drawing anything, it is normal to center the view on 0.5f * sf::View::getSize() to get the view centered in your window. So the center was already at half of my window size. When using Camera::setLocation(), I used the sf::View::move() to move the view accordingly. So when trying to center it on the map, it added the x and y correctly, but also half of my window size. This resulted in having an offset which was incorrect. Substracting or leaving those values out has fixed this stupid problem.
Thank you for the help.

Fast Ray and Polygon Intersection

I'm working on programming my own little game which should have a visibility effect as described here. My world consists of Polygons which each have a list of Edges (sorted CW). I now want (as described in the article) to cast Rays towards the Edges of the polygons, find the intersections and retrieve a Polygon that defines the visible area.
So I wrote a classes for Vectors, Points, Edges and Polygons and adjusted the intersection-algorithm so it works with my code.
I then tested it and everything worked fine, but as I ran the Intersection algorithm in a for-loop to simulate a large amount of Edges processed(starting with 100, until 1000) the fps dropped drastically, with 100 Edges "only" 300fps (3000 before), and with 300 it dropped below 60 i think. This seems to be way to much drop for me as i wanted to reuse this code for my Lightsources and then i think i would quickly come up with processing way more than 300 Edges and it should run fast on way less powerful processors(i got an xeon e1230v3).
I figured out that only calling the EdgeIntersection the program runs many times faster, but I definitely need to loop through the Edges in my polygons so this is no option.
My Source-Code:
Vector.h/.cpp: Basic Vector class with two floats(X,Y), getters&setters, rotating
Vertex.h/.cpp: Basic Point class with a Position Vector, getters&setters and a boolean that indicates whether it is a Intersection Vertex
Edge.h/.cpp Basic Edge class with start/end-Verticies, getters&setters and rotating function(uses Vector.rotate())
Polygon.h:
#pragma once
#include <vector>
#include "Edge.h"
namespace geo
{
class Polygon
{
private:
std::vector<Edge> edges;
public:
Polygon();
Polygon(std::vector<Edge> edges);
~Polygon();
std::vector<Edge> getEdges();
Edge getEdge(int index);
int getEdgeCount();
void setEdges(std::vector<Edge> edges);
void setEdge(Edge e, int index);
void addEdge(Edge e);
void removeEdge(int index);
};
}
Ray.h:
#pragma once
#include "Vertex.h"
class Ray
{
private:
geo::Vertex origin;
geo::Vector dir;
public:
Ray();
Ray(geo::Vertex origin, geo::Vector dir);
~Ray();
geo::Vertex getOrigin();
geo::Vector getDirection();
void setOrigin(geo::Vertex origin);
void setDirection(geo::Vector dir);
};
LightModule.h:
#pragma once
#include "Polygon.h"
#include "Ray.h"
class LightModule
{
private:
//List of blocking Polygons
std::vector<geo::Polygon>* blockingPolygons;
std::vector<Ray> rays;
geo::Polygon bounds;
geo::Polygon visible;
/*geo::Polygon blocked;*/
//HitDetection Class later
geo::Vertex getIntersection(Ray r, geo::Edge* e);
geo::Vertex getClosestIntersection(Ray r, geo::Polygon *p);
public:
LightModule();
LightModule(std::vector<geo::Polygon>* blockingPolygons);
~LightModule();
//Set the Blocking Polygons
void setBlockingPolygons(std::vector<geo::Polygon>* blockingPolygons);
geo::Vertex callCI(Ray r, geo::Polygon* p);
geo::Vertex callI(Ray r, geo::Edge* e);
//Cast Rays towards Vertecies and store them in rays
void updateRays();
//Update Visibility Polygon
void updateVisible();
//Return Visibility Polygon
geo::Polygon* getVisible();
};
LightMModule.cpp:
#include "LightModule.h"
LightModule::LightModule()
{
rays.clear();
}
LightModule::LightModule(std::vector<geo::Polygon>* blockingPolygons)
{
this->blockingPolygons = blockingPolygons;
rays.clear();
}
LightModule::~LightModule()
{
}
void LightModule::setBlockingPolygons(std::vector<geo::Polygon>* blockingPolygons)
{
this->blockingPolygons = blockingPolygons;
}
//Test-cast a Ray (will follow mouse in the Test)
void LightModule::updateRays()
{
Ray r(geo::Vertex(geo::Vector(200, 100)), geo::Vector(-100, 0));
rays.push_back(r);
}
void LightModule::updateVisible()
{
}
//Both for Testing will later be part of a seperate class
geo::Vertex LightModule::callCI(Ray r, geo::Polygon *p)
{
return this->getClosestIntersection(r, p);
}
geo::Vertex LightModule::callI(Ray r, geo::Edge* e)
{
return this->getIntersection(r, e);
}
//TEST
geo::Vertex LightModule::getIntersection(Ray r, geo::Edge* e)
{
geo::Vertex v;
v.setIntersectVert(false);
float r_px = r.getOrigin().getPosition().getX();
float r_py = r.getOrigin().getPosition().getY();
float r_dx = r.getDirection().getX();
float r_dy = r.getDirection().getY();
float s_px = e->getOrigin().getPosition().getX();
float s_py = e->getOrigin().getPosition().getY();
float s_dx = e->getDirection().getX();
float s_dy = e->getDirection().getY();
float r_mag = sqrt(r_dx*r_dx + r_dy*r_dy);
float s_mag = sqrt(s_dx*s_dx + s_dy*s_dy);
if (r_dx / r_mag == s_dx / s_mag && r_dy / r_mag == s_dy / s_mag)
{
return v;
}
float T2 = (r_dx*(s_py - r_py) + r_dy*(r_px - s_px)) / (s_dx*r_dy - s_dy*r_dx);
float T1 = (s_px + s_dx*T2 - r_px) / r_dx;
if (T1 < 0 /*|| T1 > 1 For Lines*/)
{
return v;
}
if (T2 < 0 || T2 > 1)
{
return v;
}
v.setIntersectVert(true);
v.setPosition(geo::Vector(r_px + r_dx*T1, r_py + r_dy*T1));
return v;
}
geo::Vertex LightModule::getClosestIntersection(Ray r, geo::Polygon *p)
{
geo::Vertex v;
v.setIntersectVert(false);
geo::Vertex v_nearest(geo::Vector(0, 0));
v_nearest.setIntersectVert(false);
geo::Vector h1;
geo::Vector h2;
for (int i = 0; i < p->getEdges().size(); i++)
{
v = this->getIntersection(r, &p->getEdges().at(i));
h1.setX(v.getPosition().getX() - r.getOrigin().getPosition().getX());
h1.setY(v.getPosition().getY() - r.getOrigin().getPosition().getY());
h2.setX(v_nearest.getPosition().getX() - r.getOrigin().getPosition().getX());
h2.setY(v_nearest.getPosition().getY() - r.getOrigin().getPosition().getY());
if (i < 1)
v_nearest = v;
else if (v.isIntersectVert() == true && h1.getLength() < h2.getLength())
{
v_nearest = v;
}
}
return v_nearest;
}
For the Testing i create a Polygon a LightModule and call updateRays and then call the helper-Function callCI().
I know my code gets pretty messy when i have to cascade my getters and setters, ill have to fix that but for the Rest i hope everything is understandable and if not feel free to ask. And just to have mentioned it, I Test-draw my Objects with Vertex-Arrays but I don't need Graphical output of the intersection process, i just need the visible polygon.
Just to point out again: I need a faster way of finding the Intersection-Point between a Ray and a Polygon and as I didn't know if i did something wrong in my code I posted it all here so someone can maybe help me making my code more efficient or show me a different method to solve my problem.
Have a nice day and thank you for your answers :)
Paul
EDIT: Would it be meaningfully faster to first triangulate my polygons and then do a Ray-Triangle intersection Test?
I can't speak to the algorithm (which is possibly what you need) but some immediate thoughts on speeding up what you have.
First off you can define all your getters and setters inline (put them in the class in the header, not the separate source file) so the compiler can optimize the function calls away.
Then these changes might buy you a few frames:
// make sure your getters and setters are inline so the compiler
// can optimize them away
geo::Vertex LightModule::getClosestIntersection(Ray r, geo::Polygon* p)
{
geo::Vertex v;
v.setIntersectVert(false);
geo::Vector h1;
geo::Vector h2;
// cache these
Vector ray_position = r.getOrigin().getPosition();
geo::Vertex v_nearest(geo::Vector(0, 0));
v_nearest.setIntersectVert(false);
// cache size (don't dereference each time)
size_t size = p->getEdges().size();
// avoid acces violation
if(!size)
return v_nearest;
// preset item 0
v_nearest = this->getIntersection(r, &p->getEdges()[0]);
// start from 1 not 0
for(int i = 1; i < size; i++)
{
// don't use at() its slower
// v = this->getIntersection(r, &p->getEdges().at(i));
v = this->getIntersection(r, &p->getEdges()[i]);
// used cached ray position rather than call functions
h1.setX(v.getPosition().getX() - ray_position.getX());
h1.setY(v.getPosition().getY() - ray_position.getY());
h2.setX(v_nearest.getPosition().getX() - ray_position.getX());
h2.setY(v_nearest.getPosition().getY() - ray_position.getY());
// this if not needed because presetting item 0
//if(i < 1)
// v_nearest = v;
if(v.isIntersectVert() == true && h1.getLength() < h2.getLength())
{
v_nearest = v;
}
}
return v_nearest;
}
I removed one of the if statements by calculating the 0 item before the loop and starting the loop from 1, the rest is just caching a much used value and avoiding at() which is slower because it does bound-checking.

OpenGL Camera vectors

I have a very rudimentary camera which generates 3 vectors for use with gluLookAt(...) the problem is I'm not sure if this is correct, I adapted code from something my lecturer showed us (I think he got it from somewhere).
This actually works until you spin the mouse round in circles than camera starts to rotate around the z-axis. Which shouldn't happen as the mouse coords are only attached to the pitch and yaw not the roll.
Camera
// Camera.hpp
#ifndef MOOT_CAMERA_INCLUDE_HPP
#define MOOT_CAMERA_INCLUDE_HPP
#include <GL/gl.h>
#include <GL/glu.h>
#include <boost/utility.hpp>
#include <Moot/Platform.hpp>
#include <Moot/Vector3D.hpp>
namespace Moot
{
class Camera : public boost::noncopyable
{
protected:
Vec3f m_position, m_up, m_right, m_forward, m_viewPoint;
uint16_t m_height, m_width;
public:
Camera()
{
m_forward = Vec3f(0.0f, 0.0f, -1.0f);
m_right = Vec3f(1.0f, 0.0f, 0.0f);
m_up = Vec3f(0.0f, 1.0f, 0.0f);
}
void setup(uint16_t setHeight, uint16_t setWidth)
{
m_height = setHeight;
m_width = setWidth;
}
void move(float distance)
{
m_position += (m_forward * distance);
}
void addPitch(float setPitch)
{
m_forward = (m_forward * cos(setPitch) + (m_up * sin(setPitch)));
m_forward.setNormal();
// Cross Product
m_up = (m_forward / m_right) * -1;
}
void addYaw(float setYaw)
{
m_forward = ((m_forward * cos(setYaw)) - (m_right * sin(setYaw)));
m_forward.setNormal();
// Cross Product
m_right = m_forward / m_up;
}
void addRoll(float setRoll)
{
m_right = (m_right * cos(setRoll) + (m_up * sin(setRoll)));
m_right.setNormal();
// Cross Product
m_up = (m_forward / m_right) * -1;
}
virtual void apply() = 0;
}; // Camera
} // Moot
#endif
Snippet from update cycle
// Mouse movement
m_camera.addPitch((float)input().mouseDeltaY() * 0.001);
m_camera.addYaw((float)input().mouseDeltaX() * 0.001);
apply() in the camera class is defined in an inherited class, which is called from the draw function of the game loop.
void apply()
{
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(40.0,(GLdouble)m_width/(GLdouble)m_height,0.5,20.0);
m_viewPoint = m_position + m_forward;
gluLookAt( m_position.getX(), m_position.getY(), m_position.getZ(),
m_viewPoint.getX(), m_viewPoint.getY(), m_viewPoint.getZ(),
m_up.getX(), m_up.getY(), m_up.getZ());
}
Don't accumulate the transforms in your vectors, store the angles and generate the vectors on-the-fly.
EDIT: Floating-point stability. Compare the output of a and b:
#include <iostream>
using namespace std;
int main()
{
const float small = 0.00001;
const unsigned int times = 100000;
float a = 0.0f;
for( unsigned int i = 0; i < times; ++i )
{
a += small;
}
cout << a << endl;
float b = 0.0f;
b = small * times;
cout << b << endl;
return 0;
}
Output:
1.00099
1
I am not sure where to start, as you are posting only small snippets, not enough to fully reproduce the problem.
In your methods you update all parameters, and your parameters are depending on previous values. I am not sure what exactly you call, because you posted that you call only these two :
m_camera.addPitch((float)input().mouseDeltaY() * 0.001);
m_camera.addYaw((float)input().mouseDeltaX() * 0.001);
You should somehow break that circle by adding new parameters, and the output should depend on the input (for example, m_position shouldn't depend on m_forward).
You should also initialize all variables in the constructor, and I see you are initializing only m_forward, m_right and m_up (by the way, use initialization list).
You might want to reconsider your approach in favor of using quaternion rotations as described in this paper. This has the advantage of representing all of your accumulated rotations as a single rotation about a single vector (only need to keep track of a single quaternion) which you can apply to the canonical orientation vectors (up, norm and right) describing the camera orientation. Furthermore, since you're using C++, you can use the Boost quaternion class to manage the math of most of it.

OpenGL draw circle, weird bugs

I'm no mathematician, but I need to draw a filled in circle.
My approach was to use someone else's math to get all the points on the circumference of a circle, and turn them into a triangle fan.
I need the vertices in a vertex array, no immediate mode.
The circle does appear. However, when I try and overlay circles strange things happen. They appear only a second and then disappear. When I move my mouse out of the window a triangle sticks out from nowhere.
Here's the class:
class circle
{
//every coordinate with have an X and Y
private:
GLfloat *_vertices;
static const float DEG2RAD = 3.14159/180;
GLfloat _scalex, _scaley, _scalez;
int _cachearraysize;
public:
circle(float scalex, float scaley, float scalez, float radius, int numdegrees)
{
//360 degrees, 2 per coordinate, 2 coordinates for center and end of triangle fan
_cachearraysize = (numdegrees * 2) + 4;
_vertices = new GLfloat[_cachearraysize];
for(int x= 2; x < (_cachearraysize-2); x = x + 2)
{
float degreeinRadians = x*DEG2RAD;
_vertices[x] = cos(degreeinRadians)*radius;
_vertices[x + 1] = sin(degreeinRadians)*radius;
}
//get the X as X of 0 and X of 180 degrees, subtract to get diameter. divide
//by 2 for radius and add back to X of 180
_vertices[0]= ((_vertices[2] - _vertices[362])/2) + _vertices[362];
//same idea for Y
_vertices[1]= ((_vertices[183] - _vertices[543])/2) + _vertices[543];
//close off the triangle fan at the same point as start
_vertices[_cachearraysize -1] = _vertices[0];
_vertices[_cachearraysize] = _vertices[1];
_scalex = scalex;
_scaley = scaley;
_scalez = scalez;
}
~circle()
{
delete[] _vertices;
}
void draw()
{
glScalef(_scalex, _scaley, _scalez);
glVertexPointer(2,GL_FLOAT, 0, _vertices);
glDrawArrays(GL_TRIANGLE_FAN, 0, _cachearraysize);
}
};
That's some ugly code, I'd say - lots of magic numbers et cetera.
Try something like:
struct Point {
Point(float x, float y) : x(x), y(y) {}
float x, y;
};
std::vector<Point> points;
const float step = 0.1;
const float radius = 2;
points.push_back(Point(0,0));
// iterate over the angle array
for (float a=0; a<2*M_PI; a+=step) {
points.push_back(cos(a)*radius,sin(a)*radius);
}
// duplicate the first vertex after the centre
points.push_back(points.at(1));
// rendering:
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(2,GL_FLOAT,0, &points[0]);
glDrawArrays(GL_TRIANGLE_FAN,0,points.size());
It's up to you to rewrite this as a class, as you prefer. The math behind is really simple, don't fear to try and understand it.