How to fix weird camera rotation while moving camera with sdl, opengl in c++ - c++

I have a camera object that I have put together from reading on the net that handles moving forward and backward, strafe left and right and even look around with the mouse. But when I move in any direction plus try to look around it jumps all over the place, but when I don't move and look around its fine.
I'm hoping someone can help me work out why I can move and look around at the same time?
main.h
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
#include <cmath>
#define CAMERASPEED 0.03f // The Camera Speed
struct tVector3 // Extended 3D Vector Struct
{
tVector3() {} // Struct Constructor
tVector3 (float new_x, float new_y, float new_z) // Init Constructor
{ x = new_x; y = new_y; z = new_z; }
// overload + operator
tVector3 operator+(tVector3 vVector) {return tVector3(vVector.x+x, vVector.y+y, vVector.z+z);}
// overload - operator
tVector3 operator-(tVector3 vVector) {return tVector3(x-vVector.x, y-vVector.y, z-vVector.z);}
// overload * operator
tVector3 operator*(float number) {return tVector3(x*number, y*number, z*number);}
// overload / operator
tVector3 operator/(float number) {return tVector3(x/number, y/number, z/number);}
float x, y, z; // 3D vector coordinates
};
class CCamera
{
public:
tVector3 mPos;
tVector3 mView;
tVector3 mUp;
void Strafe_Camera(float speed);
void Move_Camera(float speed);
void Rotate_View(float speed);
void Position_Camera(float pos_x, float pos_y,float pos_z,
float view_x, float view_y, float view_z,
float up_x, float up_y, float up_z);
};
void Draw_Grid();
camera.cpp
#include "main.h"
void CCamera::Position_Camera(float pos_x, float pos_y, float pos_z,
float view_x, float view_y, float view_z,
float up_x, float up_y, float up_z)
{
mPos = tVector3(pos_x, pos_y, pos_z);
mView = tVector3(view_x, view_y, view_z);
mUp = tVector3(up_x, up_y, up_z);
}
void CCamera::Move_Camera(float speed)
{
tVector3 vVector = mView - mPos;
mPos.x = mPos.x + vVector.x * speed;
mPos.z = mPos.z + vVector.z * speed;
mView.x = mView.x + vVector.x * speed;
mView.z = mView.z + vVector.z * speed;
}
void CCamera::Strafe_Camera(float speed)
{
tVector3 vVector = mView - mPos;
tVector3 vOrthoVector;
vOrthoVector.x = -vVector.z;
vOrthoVector.z = vVector.x;
mPos.x = mPos.x + vOrthoVector.x * speed;
mPos.z = mPos.z + vOrthoVector.z * speed;
mView.x = mView.x + vOrthoVector.x * speed;
mView.z = mView.z + vOrthoVector.z * speed;
}
void CCamera::Rotate_View(float speed)
{
tVector3 vVector = mView - mPos;
tVector3 vOrthoVector;
vOrthoVector.x = -vVector.z;
vOrthoVector.z = vVector.x;
mView.z = (float)(mPos.z + sin(speed)*vVector.x + cos(speed)*vVector.z);
mView.x = (float)(mPos.x + cos(speed)*vVector.x - sin(speed)*vVector.z);
}
and the mousemotion code
void processEvents()
{
int mid_x = screen_width >> 1;
int mid_y = screen_height >> 1;
int mpx = event.motion.x;
int mpy = event.motion.y;
float angle_y = 0.0f;
float angle_z = 0.0f;
while(SDL_PollEvent(&event))
{
switch(event.type)
{
case SDL_MOUSEMOTION:
if( (mpx == mid_x) && (mpy == mid_y) ) return;
// Get the direction from the mouse cursor, set a resonable maneuvering speed
angle_y = (float)( (mid_x - mpx) ) / 1000; //1000
angle_z = (float)( (mid_y - mpy) ) / 1000; //1000
// The higher the value is the faster the camera looks around.
objCamera.mView.y += angle_z * 2;
// limit the rotation around the x-axis
if((objCamera.mView.y - objCamera.mPos.y) > 8) objCamera.mView.y = objCamera.mPos.y + 8;
if((objCamera.mView.y - objCamera.mPos.y) <-8) objCamera.mView.y = objCamera.mPos.y - 8;
objCamera.Rotate_View(-angle_y);
SDL_WarpMouse(mid_x, mid_y);
break;
case SDL_KEYUP:
objKeyb.handleKeyboardEvent(event,true);
break;
case SDL_KEYDOWN:
objKeyb.handleKeyboardEvent(event,false);
break;
case SDL_QUIT:
quit = true;
break;
case SDL_VIDEORESIZE:
screen = SDL_SetVideoMode( event.resize.w, event.resize.h, screen_bpp, SDL_OPENGL | SDL_HWSURFACE | SDL_RESIZABLE | SDL_GL_DOUBLEBUFFER | SDL_HWPALETTE );
screen_width = event.resize.w;
screen_height = event.resize.h;
init_opengl();
std::cout << "Resized to width: " << event.resize.w << " height: " << event.resize.h << std::endl;
break;
default:
break;
}
}
}

I'm not entirely sure what you are doing above.
Personally I would just allow a simple 4x4 matrix. Any implementation will do. To rotate you, simply, need to rotate using the change of mouse x and y as euler inputs for rotation around the y and x axes. There is lots of code available all over the internet that will do this for you.
Some of those matrix libraries won't provide you with a "MoveForward()" function. If this is the case its ok, moving forward is pretty easy. The third column (or row if you are using row major matrices) is your forward vector. Extract it. Normalise it (It really should be normalised anyway so this step may not be needed). Multiply it by how much you wish to move forward and then add it to the position (the 4th column/row).
Now here is the odd part. A view matrix is a special type of matrix. The matrix above defines the view space. If you multiply your current model matrix by this matrix you will not get the answer you expect. Because you wish to transform it such that the camera is at the origin. As such you need to, effectively, undo the camera transformation to re-orient things to the view defined above. To do this you multiply your model matrix by the inverse of the view matrix.
You now have an object defined in the correct view space.
This is my very simple camera class. It does not handle the functionality you describe but hopefully will give you a few ideas on how to set up the class (Be warned, I use row major, ie DirectX style, matrices).
BaseCamera.h:
#ifndef BASE_CAMERA_H_
#define BASE_CAMERA_H_
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/
#include "Maths/Vector4.h"
#include "Maths/Matrix4x4.h"
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/
class BaseCamera
{
protected:
bool mDirty;
MathsLib::Matrix4x4 mCameraMat;
MathsLib::Matrix4x4 mViewMat;
public:
BaseCamera();
BaseCamera( const BaseCamera& camera );
BaseCamera( const MathsLib::Vector4& vPos, const MathsLib::Vector4& vLookAt );
BaseCamera( const MathsLib::Matrix4x4& matCamera );
bool IsDirty() const;
void SetDirty();
MathsLib::Matrix4x4& GetOrientationMatrix();
const MathsLib::Matrix4x4& GetOrientationMatrix() const;
MathsLib::Matrix4x4& GetViewMatrix();
};
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/
inline MathsLib::Matrix4x4& BaseCamera::GetOrientationMatrix()
{
return mCameraMat;
}
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/
inline const MathsLib::Matrix4x4& BaseCamera::GetOrientationMatrix() const
{
return mCameraMat;
}
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/
inline bool BaseCamera::IsDirty() const
{
return mDirty;
}
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/
inline void BaseCamera::SetDirty()
{
mDirty = true;
}
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/
#endif
BaseCamera.cpp:
#include "Render/stdafx.h"
#include "BaseCamera.h"
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/
BaseCamera::BaseCamera() :
mDirty( true )
{
}
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/
BaseCamera::BaseCamera( const BaseCamera& camera ) :
mDirty( camera.mDirty ),
mCameraMat( camera.mCameraMat ),
mViewMat( camera.mViewMat )
{
}
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/
BaseCamera::BaseCamera( const MathsLib::Vector4& vPos, const MathsLib::Vector4& vLookAt ) :
mDirty( true )
{
MathsLib::Vector4 vDir = (vLookAt - vPos).Normalise();
MathsLib::Vector4 vLat = MathsLib::CrossProduct( MathsLib::Vector4( 0.0f, 1.0f, 0.0f ), vDir ).Normalise();
MathsLib::Vector4 vUp = MathsLib::CrossProduct( vDir, vLat );//.Normalise();
mCameraMat.Set( vLat, vUp, vDir, vPos );
}
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/
BaseCamera::BaseCamera( const MathsLib::Matrix4x4& matCamera ) :
mDirty( true ),
mCameraMat( matCamera )
{
}
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/
MathsLib::Matrix4x4& BaseCamera::GetViewMatrix()
{
if ( IsDirty() )
{
mViewMat = mCameraMat.Inverse();
mDirty = false;
}
return mViewMat;
}
/*+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+*/

I agree with Goz. You need to use homegenous 4x4 matrices if you want to represent affine transformations such as rotate + translate
Assuming row major representation then if there is no scaling or shearing, your 4x4 matrix represents the following:
Rows 0 to 2 : The three basis vectors of your local co-ordinate system ( i.e x,y,z )
Row 3 : the current translation from the origin
So to move along your local x vector, as Goz says, because you can assume it's a unit vector
if there is no scale/shear you just multiply it by the move step ( +ve or -ve ) then add the resultant vector onto Row 4 in the matrix
So taking a simple example of starting at the origin with your local frame set to world frame then your matrix would look something like this
1 0 0 0 <--- x unit vector
0 1 0 0 <--- y unit vector
0 0 1 0 <--- z unit vector
0 0 0 1 <--- translation vector
In terms of a way most game cameras work then the axes map like this:
x axis <=> Camera Pan Left/Right
y axis <=> Camera Pan Up/Down
z axis <=> Camera Zoom In/Out
So if I rotate my entire frame of reference to say look at a new point LookAt then as Goz puts in his BaseCamera overloaded constructor code, you then construct a new local co-ordinate system and set this into your matrix ( all mCameraMat.Set( vLat, vUp, vDir, vPos ) does typically is set those four rows of the matrix i.e VLat would be row 0, vUp row 1, vDir row 2 and vPos row 3 )
Then to zoom in/out would just become row 3 = row 2 * stepval
Again as Goz, rightly points out, you then need to transform this back into world-space and this is done by multiplying by the inverse of the view matrix

Related

how to parent object to another object and affect its position through rotation (make object rotate around other object)

For context, I'm making a top down shooter game where the player always rotates/faces itself to the mouse cursor. That can be easily done, but now I'm stuck in positioning the weapon that the player hold (I separate the weapon entity and the player entity because I want the player to be able to switch weapons). I have to make the weapon also rotates to the same angle as the player (which is also easily done by just getting the player's rotation angle and applying that to the weapon as well). Then the part where I'm really stuck is to always position the weapon like it's revolving around the player (with a bit offset).
With no further ado, here's the code:
class Player
{
public:
Player(string skin)
{
this->skin.loadFromFile("gfx/skins/" + skin + ".png");
player.setTexture(this->skin);
player.setOrigin(Vector2f(7, 6.5f));
}
void SetScale(float x, float y)
{
player.setScale(x, y);
}
void SetPosition(float x, float y)
{
x_pos = x;
y_pos = y;
}
Vector2f GetScale()
{
return player.getScale();
}
Vector2f GetPosition()
{
return Vector2f(x_pos, y_pos);
}
float GetRotation()
{
return rotate_angle;
}
void Update(float delta_time, Vector2f mouse_pos)
{
if (Keyboard::isKeyPressed(Keyboard::A) || Keyboard::isKeyPressed(Keyboard::D))
{
if (Keyboard::isKeyPressed(Keyboard::A))
{
vel_x = smoothMotion(-185.f, vel_x, delta_time);
}
if (Keyboard::isKeyPressed(Keyboard::D))
{
vel_x = smoothMotion(185.f, vel_x, delta_time);
}
}
else
vel_x = smoothMotion(0.f, vel_x, delta_time);
if (Keyboard::isKeyPressed(Keyboard::W) || Keyboard::isKeyPressed(Keyboard::S))
{
if (Keyboard::isKeyPressed(Keyboard::W))
{
vel_y = smoothMotion(-185.f, vel_y, delta_time);
}
if (Keyboard::isKeyPressed(Keyboard::S))
{
vel_y = smoothMotion(185.f, vel_y, delta_time);
}
}
else
vel_y = smoothMotion(0.f, vel_y, delta_time);
x_pos += vel_x * delta_time;
y_pos += vel_y * delta_time;
player.setPosition(x_pos, y_pos);
player_mouse_distance = Vector2f(mouse_pos.x - x_pos, mouse_pos.y - y_pos);
rotate_angle = radToDeg(atan2(player_mouse_distance.y, player_mouse_distance.x));
player.setRotation(rotate_angle);
}
void Draw(RenderWindow& window)
{
window.draw(player);
}
public:
Vector2f player_mouse_distance;
private:
Sprite player;
Texture skin;
float x_pos, y_pos;
float vel_x = 0.f, vel_y = 0.f;
float rotate_angle;
};
class Weapon
{
public:
Weapon(string weapon_name)
{
weapon_texture.loadFromFile("gfx/weapons/" + weapon_name + ".png");
weapon.setTexture(weapon_texture);
}
void SetScale(float x, float y)
{
weapon.setScale(x, y);
}
void SetPosition(float x, float y)
{
x_pos = x;
y_pos = y;
}
void Update(Player player, float delta_time)
{
SetPosition((player.GetScale().x * (9 - 7)) /* <- offset */ * cos(player.GetRotation()) + player.GetPosition().x, (player.GetScale().y * (6.5 - 5)) * sin(player.GetRotation()) + player.GetPosition().y);
weapon.setPosition(x_pos, y_pos);
weapon.setRotation(player.GetRotation());
}
void Draw(RenderWindow& window)
{
window.draw(weapon);
}
private:
Sprite weapon;
Texture weapon_texture;
float x_pos, y_pos;
float vel_x = 0.f, vel_y = 0.f;
float rotate_angle;
};
I'm using C++ and SFML 2.5.1 by the way, but any answer using other language or other graphics library (like Pygame, etc) can be accepted too (since the physics uses the same math formulas anyways).
I watched tutorials about this, but most of them uses game engines like Unity and Godot. They simply just parents the player entity to the weapon entity so that the weapon can also change position when player is rotating.
I figured out that cosine and sine function must be the key formula to implement that, but if I'm wrong please correct me.
Any help is appreciated :]
First, in Player.Update(), the formula for rotation angle should be atan2(y,x), do not convert it to degrees as sin and cos take radians as input.
If other parts of your project rely on Player.rotate_angle to be in degrees, you should convert them back to radians in Weapon.Update(). However, I recommend using radians as all of the C++ base trig functions take radians as input.
In Weapon.Update(), you are applying different offset multipliers to the x and y arguments for SetPosition: (9 - 7) to the x coordinate and (6.5 - 5) to the y coordinates. These should be singular constants instead of expressions like that, and they have to be the same unless you want the Weapon to have an elliptical orbit. Replace those expressions with a constant variable defined somewhere in the Weapon class.
Additionally, player.GetScale() could have different x and y values, so you can replace player.GetScale().x and player.GetScale().y with some new method like Player.GetScaleMagnitude() that returns the length of the vector from player.GetScale() as a float. However, player.GetScale() contributing to an elliptical orbit could be visually beneficial depending on how you want the game to look.
I totally agree with Pablo's answer, but I would go a step further :
Implement a parenting system!
Once you implement his solution, you will already be adding a transformation on top of another one : the weapon's final tranformation will be a composition of its own transformation (offset from the player) and the player transformation (its position+orientation).
I won't describe the exact formulas involved in composing the transformations, Pablo already gave a good answer on that. I'll describe here the architecture of a parentable system :
class TransformationNode
{
public :
TransformationNode(TransformationNode* _parent = nullptr)
: parent(_parent)
{
}
void SetPosition(const float x, const float y)
{
localX = x;
localY = y;
}
void SetAngle(const float angle)
{
localAngle = angle;
}
void computeGlobalCoords()
{
if (parent)
{
globalX = transformFormulaHere(parent->GetGlobalPosition(), parent->GetGlobalAngle());
globalY = transformFormulaHere(parent->GetGlobalPosition(), parent->GetGlobalAngle());
globalAngle = localAngle + parent->GetGlobalAngle();
}
else
{
globalX = localX;
globalY = localY;
globalAngle = localAngle;
}
}
private :
float localX, localY, localAngle;
float globalX, globalY, globalAngle;
TransformationNode* parent;
};
And then you'll have both Player and Weapon inherit from TransformNode. I haven't compiled the code, it's just to get the idea.
By the way, I strongly recommend you to look at Transformation matrices. They are better to use than individual positions and angles.

Tiles being drawn in the wrong location

I've finally managed to get my tiles drawn on the screen somewhat in a correct way. Although the location is a bit off and I can't seem to figure out why...
I'm using SFML for drawing.
Tile.hpp:
#ifndef TILE_HPP
#define TILE_HPP
#include <SFML/Graphics.hpp>
#include <SFML/System.hpp>
#include "textureManager.hpp"
class Tile {
public:
Tile();
Tile(sf::Vector2i coord, int biome);
~Tile();
sf::Vector2i getCoord() const { return coord; };
int getBiome() const { return biome; };
void setCoord(sf::Vector2i coord) { this->coord = coord; };
void setBiome(int biome) { this->biome = biome; };
void draw(int x, int y, sf::RenderWindow* rw);
void update(sf::Texture& texture);
private:
sf::Vector2i coord;
int biome;
sf::Sprite sprite;
};
#endif
Tile.cpp
#include <SFML/Graphics.hpp>
#include <SFML/System.hpp>
#include "textureManager.hpp"
#include "tile.hpp"
Tile::Tile()
{}
Tile::Tile(sf::Vector2i coord, int biome) {
this->biome = biome;
this->coord = coord;
}
Tile::~Tile(){}
void Tile::draw(int x, int y, sf::RenderWindow* rw)
{
sprite.setPosition(x, y);
rw->draw(sprite);
}
void Tile::update(sf::Texture& texture)
{
switch (biome)
{
// Not important here
}
}
Now the more relevant part: the drawing
void StatePlay::draw(const float dt)
{
game->window.setView(view);
game->window.clear(sf::Color::Black);
sf::Vector2f offset = camera.getLocation();
int newX = (offset.x / map.getTileSize()) - (map.chunkSize / 2);
int newY = (offset.y / map.getTileSize()) - (map.chunkSize / 2);
for (int x = 0; x < map.chunkSize; x++)
{
for (int y = 0; y < map.chunkSize; y++)
{
Tile tile = map.getTile(newX + x, newY + y);
tile.draw((newX + x) * map.getTileSize(), (newY + y) * map.getTileSize(), &game->window);
}
}
return;
}
StatePlay::StatePlay(Game* game)
{
this->game = game;
sf::Vector2f pos = sf::Vector2f(game->window.getSize()); // 1366x768
view.setSize(pos);
pos *= 0.5f; // 688x384
view.setCenter(pos);
// Initialize map
map.init(game->gameTime, game->textureManager.getImage("tileset.png"));
float w = (float) map.getWidth(); // 500
float h = (float) map.getHeight(); // 500
w *= 0.5f; // 250
h *= 0.5f; // 250
w *= map.getTileSize(); // 250 * 32 = 8000
h *= map.getTileSize(); // 250 * 32 = 8000
// Move camera
// Uses view::move from sfml to move the view with w and h
// Also sets camera private to w and h values, return with camera::getLocation()
camera.setLocation(&view, sf::Vector2f(w, h));
}
The result is that I only see the ~10 tiles squared, in the bottom left corner of my screen, covering about 3/4.
The correct tiles are chosen, but the draw location is wrong... It should draw the center of 64x64 (x 32px each) tiles, as much as fit on the screen.
I have fixed the problem. It was a very stupid mistake...
At first without drawing anything, it is normal to center the view on 0.5f * sf::View::getSize() to get the view centered in your window. So the center was already at half of my window size. When using Camera::setLocation(), I used the sf::View::move() to move the view accordingly. So when trying to center it on the map, it added the x and y correctly, but also half of my window size. This resulted in having an offset which was incorrect. Substracting or leaving those values out has fixed this stupid problem.
Thank you for the help.

World to screen space coordinates in OpenSceneGraph

So I've got a class Label that inherits from osg::Geode which I draw in the world space in OpenSceneGraph. After displaying each frame, I then want to read the screen space coordinates of
each Label, so I can find out how much they overlap in the screen space. To this end, I created a class ScreenSpace which should calculate this (the interesting function is calc_screen_coords.)
I wrote a small subroutine that dumps each frame with some extra information, including the ScreenSpace box which represents what the program thinks the screen space coordinates are:
Now in the above picture, there seems to be no problem; but if I rotate it to the other side (with my mouse), then it looks quite different:
And that is what I don't understand.
Is my world to screen space calculation wrong?
Or am I getting the wrong BoundingBox from the Drawable?
Or maybe it has something to do with the setAutoRotateToScreen(true) directive that I give the osgText::Text object?
Is there a better way to do this? Should I try to use a Billboard instead? How would I do that though? (I tried and it totally didn't work for me — I must be missing something...)
Here is the source code for calculating the screen space coordinates of a Label:
struct Pixel {
// elided methods...
int x;
int y;
}
// Forward declarations:
pair<Pixel, Pixel> calc_screen_coords(const osg::BoundingBox& box, const osg::Camera* cam);
void rearange(Pixel& left, Pixel& right);
class ScreenSpace {
public:
ScreenSpace(const Label* label, const osg::Camera* cam)
{
BoundingBox box = label->getDrawable(0)->computeBound();
tie(bottom_left_, upper_right_) = calc_screen_coords(box, cam);
rearrange(bottom_left_, upper_right_);
}
// elided methods...
private:
Pixel bottom_left_;
Pixel upper_right_;
}
pair<Pixel, Pixel> calc_screen_coords(const osg::BoundingBox& box, const osg::Camera* cam)
{
Vec4d vec (box.xMin(), box.yMin(), box.zMin(), 1.0);
Vec4d veq (box.xMax(), box.yMax(), box.zMax(), 1.0);
Matrixd transmat
= cam->getViewMatrix()
* cam->getProjectionMatrix()
* cam->getViewport()->computeWindowMatrix();
vec = vec * transmat;
vec = vec / vec.w();
veq = veq * transmat;
veq = veq / veq.w();
return make_pair(
Pixel(static_cast<int>(vec.x()), static_cast<int>(vec.y())),
Pixel(static_cast<int>(veq.x()), static_cast<int>(veq.y()))
);
}
inline void swap(int& v, int& w)
{
int temp = v;
v = w;
w = temp;
}
inline void rearrange(Pixel& left, Pixel& right)
{
if (left.x > right.x) {
swap(left.x, right.x);
}
if (left.y > right.y) {
swap(left.y, right.y);
}
}
And here is the construction of Label (I tried to abridge it a little):
// Forward declaration:
Geometry* createLeader(straph::Point pos, double height, Color color);
class Label : public osg::Geode {
public:
Label(font, fontSize, text, color, position, height, margin, bgcolor, leaderColor)
{
osgText::Text* txt = new osgText::Text;
txt->setFont(font);
txt->setColor(color.vec4());
txt->setCharacterSize(fontSize);
txt->setText(text);
// Set display properties and height
txt->setAlignment(osgText::TextBase::CENTER_BOTTOM);
txt->setAutoRotateToScreen(true);
txt->setPosition(toVec3(position, height));
// Create bounding box and leader
typedef osgText::TextBase::DrawModeMask DMM;
unsigned drawMode = DMM::TEXT | DMM::BOUNDINGBOX;
drawMode |= DMM::FILLEDBOUNDINGBOX;
txt->setBoundingBoxColor(bgcolor.vec4());
txt->setBoundingBoxMargin(margin);
txt->setDrawMode(drawMode);
this->addDrawable(txt);
Geometry* leader = createLeader(position, height, leaderColor);
this->addDrawable(leader);
}
// elided methods and data members...
}
Geometry* createLeader(straph::Point pos, double height, Color color)
{
Geometry* leader = new Geometry();
Vec3Array* array = new Vec3Array();
array->push_back(Vec3(pos.x, pos.y, height));
array->push_back(Vec3(pos.x, pos.y, 0.0f));
Vec4Array* colors = new Vec4Array(1);
(*colors)[0] = color.vec4();
leader->setColorArray(colors);
leader->setColorBinding(Geometry::BIND_OVERALL);
leader->setVertexArray(array);
leader->addPrimitiveSet(new DrawArrays(PrimitiveSet::LINES, 0, 2));
LineWidth* lineWidth = new osg::LineWidth();
lineWidth->setWidth(2.0f);
leader->getOrCreateStateSet()->setAttributeAndModes(lineWidth, osg::StateAttribute::ON);
return leader;
}
Any pointers or help?
I found a solution that works for me, but is also unsatisfying, so if you have a better solution, I'm all ears.
Basically, I take different points from the Label that I know will be at certain points,
and I calculate the screen space by combining this. For the left and right sides, I take
the bounds of the regular bounding box, and for the top and bottom, I calculate it with the
center of the bounding box and the position of the label.
ScreenSpace::ScreenSpace(const Label* label, const osg::Camera* cam)
{
const Matrixd transmat
= cam->getViewMatrix()
* cam->getProjectionMatrix()
* cam->getViewport()->computeWindowMatrix();
auto topixel = [&](Vec3 v) -> Pixel {
Vec4 vec(v.x(), v.y(), v.z(), 1.0);
vec = vec * transmat;
vec = vec / vec.w();
return Pixel(static_cast<int>(vec.x()), static_cast<int>(vec.y()));
};
// Get left right coordinates
vector<int> xs; xs.reserve(8);
vector<int> ys; ys.reserve(8);
BoundingBox box = label->getDrawable(0)->computeBound();
for (int i=0; i < 8; i++) {
Pixel p = topixel(box.corner(i));
xs.push_back(p.x);
ys.push_back(p.y);
};
int xmin = *min_element(xs.begin(), xs.end());
int xmax = *max_element(xs.begin(), xs.end());
// Get up-down coordinates
int ymin = topixel(dynamic_cast<const osgText::Text*>(label->getDrawable(0))->getPosition()).y;
int center = topixel(box.center()).y;
int ymax = center + (center - ymin);
bottom_left_ = Pixel(xmin, ymin);
upper_right_ = Pixel(xmax, ymax);
z_ = distance_from_camera(label, cam);
}

How to convert mouse coordinate on screen to 3D coordinate

I'm creating a 3D application using GLUT in C++.
Now, I want to implement a method similar to this:
Vector3* MyClass::get3DObjectfromMouse(int mouseX, int mouseY);
How can I implement this method?
As it was commented by Andon M. Coleman, one way you can achieve this is by doing a ray/object intersection test, with unprojected screen coordinates. This technique is commonly known as picking.
A pseudo-C++ code for picking:
Assume we have a 3D object type/class:
class Object3D { ... };
A 3D picking function would return a list of all objects that are intersected by a line going from the given 2D point in the near plane to the same point in the far plane.
struct LineSegment
{
Vector3 start;
Vector3 end;
};
Object3D[] Pick(float x, float y)
{
LineSegment lineSeg;
Object3D[] intersectedObjs;
// Do both un-projections for z-near (0) and z-far (1).
// This produces a line segment going from z-near to far.
UnProject(x, y, /* z = */ 0.0, modelViewMatrix, projectionMatrix, viewport, lineSeg.start);
UnProject(x, y, /* z = */ 1.0, modelViewMatrix, projectionMatrix, viewport, lineSeg.end);
// Iterate all object in the scene or in the current view:
for (Object3D obj : scene)
{
if (TestLineIntersection(obj, lineSeg))
{
// This object is crossed by the picking line.
intersectedObjs.Add(obj);
}
}
// Optionally you might want sort them from distance
// to the camera/viewer before returning the intersections.
return intersectedObjs;
}
And the UnProject() function would look like this:
bool UnProject(float winX, float winY, float winZ,
const Matrix4 & modelView, const Matrix4 & projection,
const ScreenRect viewport, Vector3 & worldCoordinates)
{
// Compute (projection x modelView) ^ -1:
const Matrix4 m = inverse(projection * modelView);
// Need to invert Y since screen Y-origin point down,
// while 3D Y-origin points up (this is an OpenGL only requirement):
winY = viewport.Height() - winY;
// Transformation of normalized coordinates between -1 and 1:
Vector4 in;
in[0] = (winX - viewport.X()) / viewport.Width() * 2.0 - 1.0;
in[1] = (winY - viewport.Y()) / viewport.Height() * 2.0 - 1.0;
in[2] = 2.0 * winZ - 1.0;
in[3] = 1.0;
// To world coordinates:
Vector4 out(m * in);
if (out[3] == 0.0) // Avoid a division by zero
{
worldCoordinates = Vector3Zero;
return false;
}
out[3] = 1.0 / out[3];
worldCoordinates[0] = out[0] * out[3];
worldCoordinates[1] = out[1] * out[3];
worldCoordinates[2] = out[2] * out[3];
return true;
}
To clarify, TestLineIntersection() does a line vs AABB intersection test. The bounding box should be transformed to world-space, since it is usually expressed as a set of points in local model-space.
bool TestLineIntersection(const Object3D & obj, const LineSegment & lineSeg)
{
AABB aabb = obj.GetAABB();
aabb.TransformBy(obj.modelMatrix);
return aabb.LineIntersection(lineSeg.start, lineSeg.end);
}
// AABB.cpp:
bool AABB::LineIntersection(const Vector3 & start, const Vector3 & end) const
{
const Vector3 center = (mins + maxs) * 0.5;
const Vector3 extents = maxs - center;
const Vector3 lineDir = 0.5 * (end - start);
const Vector3 lineCenter = start + lineDir;
const Vector3 dir = lineCenter - center;
const float ld0 = Mathf::Abs(lineDir[0]);
if (Mathf::Abs(dir[0]) > (extents[0] + ld0))
{
return false;
}
const float ld1 = Mathf::Abs(lineDir[1]);
if (Mathf::Abs(dir[1]) > (extents[1] + ld1))
{
return false;
}
const float ld2 = Mathf::Abs(lineDir[2]);
if (Mathf::Abs(dir[2]) > (extents[2] + ld2))
{
return false;
}
const Vector3 vCross = cross(lineDir, dir);
if (Mathf::Abs(vCross[0]) > (extents[1] * ld2 + extents[2] * ld1))
{
return false;
}
if (Mathf::Abs(vCross[1]) > (extents[0] * ld2 + extents[2] * ld0))
{
return false;
}
if (Mathf::Abs(vCross[2]) > (extents[0] * ld1 + extents[1] * ld0))
{
return false;
}
return true;
}

OpenGL draw circle, weird bugs

I'm no mathematician, but I need to draw a filled in circle.
My approach was to use someone else's math to get all the points on the circumference of a circle, and turn them into a triangle fan.
I need the vertices in a vertex array, no immediate mode.
The circle does appear. However, when I try and overlay circles strange things happen. They appear only a second and then disappear. When I move my mouse out of the window a triangle sticks out from nowhere.
Here's the class:
class circle
{
//every coordinate with have an X and Y
private:
GLfloat *_vertices;
static const float DEG2RAD = 3.14159/180;
GLfloat _scalex, _scaley, _scalez;
int _cachearraysize;
public:
circle(float scalex, float scaley, float scalez, float radius, int numdegrees)
{
//360 degrees, 2 per coordinate, 2 coordinates for center and end of triangle fan
_cachearraysize = (numdegrees * 2) + 4;
_vertices = new GLfloat[_cachearraysize];
for(int x= 2; x < (_cachearraysize-2); x = x + 2)
{
float degreeinRadians = x*DEG2RAD;
_vertices[x] = cos(degreeinRadians)*radius;
_vertices[x + 1] = sin(degreeinRadians)*radius;
}
//get the X as X of 0 and X of 180 degrees, subtract to get diameter. divide
//by 2 for radius and add back to X of 180
_vertices[0]= ((_vertices[2] - _vertices[362])/2) + _vertices[362];
//same idea for Y
_vertices[1]= ((_vertices[183] - _vertices[543])/2) + _vertices[543];
//close off the triangle fan at the same point as start
_vertices[_cachearraysize -1] = _vertices[0];
_vertices[_cachearraysize] = _vertices[1];
_scalex = scalex;
_scaley = scaley;
_scalez = scalez;
}
~circle()
{
delete[] _vertices;
}
void draw()
{
glScalef(_scalex, _scaley, _scalez);
glVertexPointer(2,GL_FLOAT, 0, _vertices);
glDrawArrays(GL_TRIANGLE_FAN, 0, _cachearraysize);
}
};
That's some ugly code, I'd say - lots of magic numbers et cetera.
Try something like:
struct Point {
Point(float x, float y) : x(x), y(y) {}
float x, y;
};
std::vector<Point> points;
const float step = 0.1;
const float radius = 2;
points.push_back(Point(0,0));
// iterate over the angle array
for (float a=0; a<2*M_PI; a+=step) {
points.push_back(cos(a)*radius,sin(a)*radius);
}
// duplicate the first vertex after the centre
points.push_back(points.at(1));
// rendering:
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(2,GL_FLOAT,0, &points[0]);
glDrawArrays(GL_TRIANGLE_FAN,0,points.size());
It's up to you to rewrite this as a class, as you prefer. The math behind is really simple, don't fear to try and understand it.