Trying to implement Camera - c++

I am trying to implement a Camera class so I can walk and look on the world as follows:
#ifndef _CAMERA_H_
#define _CAMERA_H_
#include <glm\glm.hpp>
class Camera
{
public:
Camera();
~Camera();
void Update(const glm::vec2& newXY);
//if by = 0.0 it means, it will use the const Class speed to scale it
void MoveForward(const float by = 0.0f);
void MoveBackword(const float by = 0.0f);
void MoveLef(const float by = 0.0f);
void MoveRight(const float by = 0.0f);
void MoveUp(const float by = 0.0f);
void MoveDown(const float by = 0.0f);
void Speed(const float speed = 0.0f);
glm::vec3& GetCurrentPosition();
glm::vec3& GetCurrentDirection();
glm::mat4 GetWorldToView() const;
private:
glm::vec3 position, viewDirection, strafeDir;
glm::vec2 oldYX;
float speed;
const glm::vec3 up;
};
#endif
#include "Camera.h"
#include <glm\gtx\transform.hpp>
Camera::Camera()
:up(0.0f, 1.0f, 0.0), viewDirection(0.0f, 0.0f, -1.0f),
speed(0.1f)
{
}
Camera::~Camera()
{
}
void Camera::Update(const glm::vec2& newXY)
{
glm::vec2 delta = newXY - oldYX;
auto length = glm::length(delta);
if (glm::length(delta) < 50.f)
{
strafeDir = glm::cross(viewDirection, up);
glm::mat4 rotation = glm::rotate(-delta.x * speed, up) *
glm::rotate(-delta.y * speed, strafeDir);
viewDirection = glm::mat3(rotation) * viewDirection;
}
oldYX = newXY;
}
void Camera::Speed(const float speed)
{
this->speed = speed;
}
void Camera::MoveForward(const float by)
{
float s = by == 0.0f ? speed : by;
position += s * viewDirection;
}
void Camera::MoveBackword(const float by)
{
float s = by == 0.0f ? speed : by;
position += -s * viewDirection;
}
void Camera::MoveLef(const float by )
{
float s = by == 0.0f ? speed : by;
position += -s * strafeDir;
}
void Camera::MoveRight(const float by )
{
float s = by == 0.0f ? speed : by;
position += -s * strafeDir;
}
void Camera::MoveUp(const float by )
{
float s = by == 0.0f ? speed : by;
position += s * up;
}
void Camera::MoveDown(const float by )
{
float s = by == 0.0f ? speed : by;
position += -s * up;
}
glm::vec3& Camera::GetCurrentPosition()
{
return position;
}
glm::vec3& Camera::GetCurrentDirection()
{
return viewDirection;
}
glm::mat4 Camera::GetWorldToView() const
{
return glm::lookAt(position, position + viewDirection, up);
}
and I update and render as follow :
void Game::OnUpdate()
{
glLoadIdentity();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUniformMatrix4fv(program->GetUniformLocation("modelToViewWorld"), 1, GL_FALSE, &cam.GetWorldToView()[0][0]);
}
void Game::OnRender()
{
model->Draw();
}
Where the vertex shader looks like:
#version 410
layout (location = 0) in vec3 inVertex;
layout (location = 1) in vec2 inTexture;
layout (location = 2) in vec3 inNormal;
uniform mat4 modelToViewWorld;
void main()
{
gl_Position = vec4(mat3(modelToViewWorld) * inVertex, 1);
}
But I am moving/rotating the Model itself, not the camera around it. What am I doing wrong here?

I think the problem is that you are not inverting the view matrix. The model-view matrix is just a product of a model->world coordinates matrix transformation and a world->view coordinates matrix transformation. The first one takes the coordinates in the local model spaces and transforms them to the world space, therefore needs no invertion. However, the second one takes the coordinates of a camera in world space and transforms them to the local coordinate system of the camera and since it's the opposite of the first one it needs to be inverted.

You are not rotating the model, you are rotating the view direction.
viewDirection = glm::mat3(rotation) * viewDirection;
What you want to do is to rotate the center of the camera around the object and then set the direction of the camera towards the object.
For example:
position = vec3( radius * cos(t), radius * sin(t), 0);
direction = normalize(-position);

Related

How to set the pitch, yaw, roll of a quaternion

How to set the pitch, yaw, roll of a quaternion
What I need help with:
Hello! I have a quaternion that stores the orientation of my camera and what I would like to do is set the pitch, yaw and roll of the quaternion relative to the world.
Picture this...
What I mean my that is imagine 4 walls around you with an arrow on each wall pointing to the world up. What I want to do is no matter what the pitch, yaw and roll of the quaternion, if I'm looking at any of the arrows (any yaw) but with a little roll (so the arrow will point anywhere but up). If I use the setRoll function and pass 0*, the camera should see the arrow pointing up. If I pass 90* to the setRoll function, no matter how many times I call the function it should set the camera's rotation to 90* so the arrow will point to the left.
My code:
Here is my full camera class below. The only code that really matters are the setPitch, setYaw and setRoll functions.
Camera.hpp
#ifndef CAMERA_HPP
#define CAMERA_HPP
// GLAD
#include <glad/glad.h>
// GLM
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <glm/gtc/quaternion.hpp>
class QuaternionCamera
{
public:
QuaternionCamera(glm::vec3 position, float zNear, float zFar);
QuaternionCamera(glm::vec3 position, glm::quat orientation, float zNear, float zFar);
QuaternionCamera(glm::vec3 position, glm::vec3 orientation, float zNear, float zFar);
// Set the position in world coords
void setPosition(glm::vec3 position);
// Set the orientation relative to the world
void setOrientation(glm::quat orientation);
// Set the orientation relative to the world
void setOrientation(glm::vec3 orientation);
// Set the pitch relative to the world
void setPitch(float amount);
// Set the yaw relative to the world
void setYaw(float amount);
// Set the roll relative to the world
void setRoll(float amount);
// Set the camera's near plane
void setZNear(float zNear);
// Set the camera's far plane
void setZFar(float zFar);
// Move the camera relative to it's current location
void move(glm::vec3 movement);
// Move the camera relative to it's current location on the world axis
void moveAxis(glm::vec3 translation);
// Rotate the camera relative to it's current location
void rotate(glm::quat rotation);
// Rotate the camera relative to it's current location
void rotate(glm::vec3 rotation);
// Pitch the camera relative to it's current location
void pitch(float amount);
// Yaw the camera relative to it's current location
void yaw(float amount);
// Roll the camera relative to it's current location
void roll(float amount);
// Get the camera's position in world coords
glm::vec3 getPosition() const;
// Get the camera's direction relative to the world
glm::vec3 getDirection() const;
// Get the camera's right vector
glm::vec3 getRight() const;
// Get the camera's up vector
glm::vec3 getUp() const;
// Get the camera's rotation relative to the world
glm::quat getOrientation() const;
// Get the camera's near plane
float getZNear() const;
// Get the camera's far plane
float getZFar() const;
// Get the camera's view matrix
glm::mat4 getViewMatrix() const;
// Get the camera's projection matrix
glm::mat4 getProjectionMatrix() const;
// Get the camera's view projection matrix
glm::mat4 getViewProjectionMatrix() const;
protected:
// updateProjectionMatrix is virtual because it will be defined in the two derived camera classes.
// This is because the camera class does not know if we want a perspective or orthographic viewport.
// So because the viewport is defined in a derived class, and because we change the projection matrix
// in the base camera class (zNear and zFar), we need a way to get an updated projection matrix!
virtual void updateProjectionMatrix() = 0;
void updateViewProjectionMatrix();
float zNear;
float zFar;
glm::mat4 projectionMatrix;
glm::mat4 viewProjectionMatrix;
private:
void updateCameraVectors();
void updateViewMatrix();
glm::mat4 viewMatrix;
glm::vec3 position;
glm::quat orientation;
glm::vec3 direction; // Camera Direction / Camera View Facing
glm::vec3 right;
glm::vec3 up;
};
#endif
Camera.cpp
QuaternionCamera::QuaternionCamera(glm::vec3 position, float zNear, float zFar)
{
this->position = position;
this->orientation = glm::quat(1.0f, 0.0f, 0.0f, 0.0f); // glm::quat constructor is (w, x, y, z) but it is stored as (x, y, z, w)
right = glm::vec3( 1, 0, 0);
up = glm::vec3( 0, 1, 0);
direction = glm::vec3( 0, 0, -1);
this->zNear = zNear;
this->zFar = zFar;
updateViewMatrix();
updateViewProjectionMatrix();
}
QuaternionCamera::QuaternionCamera(glm::vec3 position, glm::quat orientation, float zNear, float zFar)
{
this->position = position;
this->orientation = orientation;
right = glm::vec3( 1, 0, 0);
up = glm::vec3( 0, 1, 0);
direction = glm::vec3( 0, 0, -1);
this->zNear = zNear;
this->zFar = zFar;
updateViewMatrix();
updateViewProjectionMatrix();
}
QuaternionCamera::QuaternionCamera(glm::vec3 position, glm::vec3 orientation, float zNear, float zFar)
{
this->position = position;
this->orientation = glm::quat(glm::vec3(glm::radians(orientation.x), glm::radians(orientation.y), glm::radians(orientation.z)));
right = glm::vec3( 1, 0, 0);
up = glm::vec3( 0, 1, 0);
direction = glm::vec3( 0, 0, -1);
this->zNear = zNear;
this->zFar = zFar;
updateCameraVectors();
updateViewMatrix();
updateViewProjectionMatrix();
}
void QuaternionCamera::setPosition(glm::vec3 position)
{
this->position = position;
}
void QuaternionCamera::setOrientation(glm::quat orientation)
{
this->orientation = orientation;
}
void QuaternionCamera::setOrientation(glm::vec3 orientation)
{
glm::quat orientationQuat = glm::quat(orientation);
this->orientation = orientationQuat;
}
void QuaternionCamera::setPitch(float amount)
{
// TODO:
updateCameraVectors();
updateViewMatrix();
updateViewProjectionMatrix();
}
void QuaternionCamera::setYaw(float amount)
{
// TODO:
updateCameraVectors();
updateViewMatrix();
updateViewProjectionMatrix();
}
void QuaternionCamera::setRoll(float amount)
{
// TODO:
updateCameraVectors();
updateViewMatrix();
updateViewProjectionMatrix();
}
void QuaternionCamera::setZNear(float zNear)
{
this->zNear = zNear;
updateProjectionMatrix();
}
void QuaternionCamera::setZFar(float zFar)
{
this->zFar = zFar;
updateProjectionMatrix();
}
void QuaternionCamera::move(glm::vec3 movement)
{
position += (orientation * glm::vec3(1, 0, 0)) * movement.x + (orientation * glm::vec3(0, 1, 0)) * movement.y + (orientation * glm::vec3(0, 0, -1)) * movement.z;
updateViewMatrix();
updateViewProjectionMatrix();
}
void QuaternionCamera::moveAxis(glm::vec3 translation)
{
position += translation;
updateViewMatrix();
updateViewProjectionMatrix();
}
void QuaternionCamera::rotate(glm::quat rotation)
{
orientation *= rotation;
updateCameraVectors();
updateViewMatrix();
updateViewProjectionMatrix();
}
void QuaternionCamera::rotate(glm::vec3 rotation)
{
glm::quat rotationQuat = glm::quat(rotation);
orientation *= rotationQuat;
updateCameraVectors();
updateViewMatrix();
updateViewProjectionMatrix();
}
void QuaternionCamera::pitch(float amount)
{
glm::quat rotation = glm::angleAxis(glm::radians(amount), glm::vec3(1, 0, 0));
orientation *= rotation;
updateCameraVectors();
updateViewMatrix();
updateViewProjectionMatrix();
}
void QuaternionCamera::yaw(float amount)
{
glm::quat rotation = glm::angleAxis(glm::radians(-amount), glm::vec3(0, 1, 0));
orientation *= rotation;
updateCameraVectors();
updateViewMatrix();
updateViewProjectionMatrix();
}
void QuaternionCamera::roll(float amount)
{
glm::quat rotation = glm::angleAxis(glm::radians(amount), glm::vec3(0, 0, -1));
orientation *= rotation;
updateCameraVectors();
updateViewMatrix();
updateViewProjectionMatrix();
}
glm::vec3 QuaternionCamera::getPosition() const
{
return position;
}
glm::vec3 QuaternionCamera::getDirection() const
{
return direction;
}
glm::vec3 QuaternionCamera::getRight() const
{
return right;
}
glm::vec3 QuaternionCamera::getUp() const
{
return up;
}
glm::quat QuaternionCamera::getOrientation() const
{
return orientation;
}
float QuaternionCamera::getZNear() const
{
return zNear;
}
float QuaternionCamera::getZFar() const
{
return zFar;
}
glm::mat4 QuaternionCamera::getViewMatrix() const
{
return viewMatrix;
}
glm::mat4 QuaternionCamera::getProjectionMatrix() const
{
return projectionMatrix;
}
glm::mat4 QuaternionCamera::getViewProjectionMatrix() const
{
return viewProjectionMatrix;
}
void QuaternionCamera::updateViewProjectionMatrix()
{
viewProjectionMatrix = getProjectionMatrix() * getViewMatrix();
}
void QuaternionCamera::updateCameraVectors()
{
right = glm::normalize(orientation * glm::vec3(1, 0, 0));
up = glm::normalize(orientation * glm::vec3(0, 1, 0));
direction = glm::normalize(orientation * glm::vec3(0, 0, -1));
if (glm::dot(up, glm::cross(right, direction)) < 0)
{
up *= -1;
}
}
void QuaternionCamera::updateViewMatrix()
{
viewMatrix = glm::lookAt(position, position + direction, up/*glm::cross(right, direction)*/);
}
PerspectiveCamera::PerspectiveCamera(glm::vec3 position, float fov, float aspectRatio, float zNear, float zFar) : QuaternionCamera(position, zNear, zFar)
{
this->fov = fov;
this->aspectRatio = aspectRatio;
updateProjectionMatrix();
}
PerspectiveCamera::PerspectiveCamera(glm::vec3 position, glm::quat rotation, float fov, float aspectRatio, float zNear, float zFar) : QuaternionCamera(position, rotation, zNear, zFar)
{
this->fov = fov;
this->aspectRatio = aspectRatio;
updateProjectionMatrix();
}
PerspectiveCamera::PerspectiveCamera(glm::vec3 position, glm::vec3 rotation, float fov, float aspectRatio, float zNear, float zFar) : QuaternionCamera(position, rotation, zNear, zFar)
{
this->fov = fov;
this->aspectRatio = aspectRatio;
updateProjectionMatrix();
}
void PerspectiveCamera::setFOV(float fov)
{
this->fov = fov;
updateProjectionMatrix();
}
void PerspectiveCamera::setAspectRatio(float aspectRatio)
{
this->aspectRatio = aspectRatio;
updateProjectionMatrix();
}
void PerspectiveCamera::setAspectRatio(float width, float height)
{
this->aspectRatio = height / width;
updateProjectionMatrix();
}
float PerspectiveCamera::getFOV() const
{
return fov;
}
float PerspectiveCamera::getAspectRatio() const
{
return aspectRatio;
}
void PerspectiveCamera::updateProjectionMatrix()
{
projectionMatrix = glm::perspective(glm::radians(fov), aspectRatio, zNear, zFar);
}
In general, changing the pitch, yaw, or roll will change all the components of a quaternion. So I would just transform the quaternion into euler angles, set the appropriate angle, and transform that back into the quaternion. Something like this:
glm::vec3 eulerAngles = glm::eulerAngles(orientation);
eulerAngles.x = amount; // .y for yaw, .z for roll
orientation = glm::quat(eulerAngles);

Orbiting an object around another object, but it doesn't move

I'm trying to make a visual "simulation" of the solar system using OpenGL and am using this function to orbit a planet around the sun (in a circular orbit).
glm::vec3 application::orbit(glm::vec3 thisPlanet, glm::vec3 otherPlanet, float rotSpeed, const time &dt)
{
float radius = glm::distance(thisPlanet, otherPlanet);
float angle = acosf((thisPlanet.x - otherPlanet.x) / radius) - atanf(1.0f) * 4.0f;
angle += dt.as_seconds() * rotSpeed;
if (angle > 2 * atanf(1.0f) * 4.0f)
angle -= 2 * atanf(1.0f) * 4.0f;
float x = otherPlanet.x + cosf(angle) * radius;
float z = otherPlanet.z + sinf(angle) * radius;
return glm::vec3(x, thisPlanet.y, z);
}
The function is called every frame like this:
void application::tick(const time &dt)
{
if (m_keyboard.key_released(GLFW_KEY_ESCAPE)) {
m_running = false;
}
m_controller.update(m_keyboard, m_mouse, dt);
m_cube_rotation += dt.as_seconds();
m_mercury_position = orbit(m_mercury_position, m_sun_position, 2.0f, dt);
// glm::mat4 world = glm::translate(glm::mat4(1.0f), m_cube_position)
// * glm::rotate(glm::mat4(1.0f), m_cube_rotation, glm::normalize(glm::vec3(1.0f, 1.0f, -1.0f)));
glm::mat4 sun = glm::translate(glm::scale(glm::mat4(1.0f), glm::vec3(2.0f, 2.0f, 2.0f)), m_sun_position)
* glm::rotate(glm::mat4(1.0f), m_cube_rotation, glm::normalize(glm::vec3(1.0f, 1.0f, -1.0f)));
glm::mat4 mercury = glm::translate(glm::scale(glm::mat4(1.0f), glm::vec3(1.0f, 1.0f, 1.0f)), m_mercury_position)
* glm::rotate(glm::mat4(1.0f), m_cube_rotation, glm::normalize(glm::vec3(1.0f, 1.0f, -1.0f)));
//m_crate.set_transform(world);
m_sun.set_transform(sun);
m_mercury.set_transform(mercury);
const int frames_per_second = int(1.0f / dt.as_seconds());
const int frame_timing_ms = int(dt.as_milliseconds());
m_overlay.pre_frame(m_width, m_height);
m_overlay.push_line("FPS: %d (%dms)", frames_per_second, frame_timing_ms);
}
Why doesn't the planet move?
This is how I would construct the model matrix:
mat4 model_mat;
const vec3 n_forward = dir.x;
const vec3 n_up = vec3(0, 1, 0);
const vec3 n_left = dir.z;
// construct a basis and a translation
model_mat[0] = normalize(vec4(n_left, 0.0f));
model_mat[1] = normalize(vec4(n_forward, 0.0f));
model_mat[2] = normalize(vec4(n_up, 0.0f));
model_mat[3] = vec4(p, 1.0f);
The position (e.g. p) is given by the parametric circle equation p = r cos(t) + r sin(t), where t is the elapsed time and r is the circular orbit radius. Also see: https://www.mathopenref.com/coordparamcircle.html
Does this help at all?

OpenGL - Getting camera to move

I'm having trouble with my OpenGL game where I can't get the camera to move.
I am unable to use GLFW, GLUT and glulookat(). Here is my code, what's wrong?
P.S everything works except the camera movement meaning the game plays and works perfectly, just cant move the camera.
My Camera Code:
#include "SpriteRenderer.h"
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
class Camera
{
private:
Shader shader;
GLfloat angle = -90.f;
glm::vec3 cameraFront = glm::vec3(0.0f, 0.0f, -1.0f),
cameraPosition = glm::vec3(0.0f, 0.0f, 0.1f),
cameraUp = glm::vec3(0.0f, 1.0f, 0.0f);
glm::mat4 viewMatrix;
// recompute the view matrix from the camera variables
void updateMatrix()
{
viewMatrix = glm::lookAt(cameraPosition, cameraPosition + cameraFront, cameraUp);
}
// default constructor
void defaultNew()
{
cameraPosition = glm::vec3(0.0f, 0.0f, 0.1f);
cameraFront = glm::vec3(0.0f, 0.0f, -1.0f);
cameraUp = glm::vec3(0.0f, 1.0f, 0.0f);
updateMatrix();
}
public:
Camera() { defaultNew(); }
Camera(Shader &shader) { this->shader = shader; defaultNew(); }
glm::mat4 GetViewMatrix() const
{
// if your view matrix is always up-to-date, just return it directly
return viewMatrix;
}
// get functions
glm::vec3 GetCameraPosition() const { return cameraPosition; }
// .. same for Front and Up
// set functions
// call updateMatrix every time you update a variable
void SetCameraPosition(glm::vec3 pos)
{
cameraPosition = pos;
updateMatrix();
}
// .. same for Front and Up
// no need to use this-> all the time
virtual void Draw()
{
this->shader.Use();
this->shader.SetMatrix4("view", viewMatrix);
}
};
My Shader Code:
Shader &Use(){ glUseProgram(this->ID); return *this; }
void SetMatrix4(const GLchar *name, const glm::mat4 &matrix, GLboolean useShader = false)
{ if (useShader)this->Use(); glUniformMatrix4fv(glGetUniformLocation(this->ID, name), 1, GL_FALSE, glm::value_ptr(matrix)); }
My Game Code:
Camera *View;
projection2 = glm::perspective(glm::radians(44.0f), (float)this->Width / (float)this->Width, 0.1f, 100.0f);
AssetController::LoadShader("../src/Shaders/Light.vert", "../src/Shaders/Light.frag", "light");
AssetController::GetShader("light").SetMatrix4("projection", projection2);
View = new Camera(AssetController::GetShader("light"));
(...)
GLfloat velocity = playerSpeed * deltaTime;
glm::vec3 camPosition;
// Update Players Position
if (movingLeft)
{
if (Player->Position.x >= 0)
{
Player->Position.x -= velocity;
if (Ball->Stuck)
Ball->Position.x -= velocity;
camPosition = View->GetCameraPosition();
camPosition.x -= velocity / 2;
View->SetCameraPosition(camPosition);
}
}
else if (movingRight)
{
if (Player->Position.x <= this->Width - Player->Size.x)
{
Player->Position.x += velocity;
if (Ball->Stuck)
Ball->Position.x += velocity;
camPosition = View->GetCameraPosition();
camPosition.x += velocity / 2;
View->SetCameraPosition(camPosition);
}
}
(...)
GameOver->Draw(*Renderer);
View->Draw();
My Shaders:
.vert:
#version 440 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec2 aTexCoord;
out vec2 TexCoord;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(aPos, 1.0f);
TexCoord = vec2(aTexCoord.x, aTexCoord.y);
}
.frag:
#version 440 core
out vec4 FragColor;
in vec2 TexCoord;
// texture samplers
uniform sampler2D texture1;
uniform sampler2D texture2;
void main()
{
// linearly interpolate between both textures (80% container, 20% awesomeface)
FragColor = mix(texture(texture1, TexCoord), texture(texture2, TexCoord), 0.2);
}
The problem is that you only update local position variable cameraPosition, and not the view matrix, which is passed to OpenGL during rendering.
It is also a bad habit to make the camera variables and matrix public, as they can potentially be modified incorrectly or out-of-sync (as you are doing). Instead, you could write a pair of get/set functions:
class Camera
{
private:
Shader shader;
GLfloat angle = -90.f;
glm::vec3 cameraFront = glm::vec3(0.0f, 0.0f, -1.0f),
cameraPosition = glm::vec3(0.0f, 0.0f, 0.1f),
cameraUp = glm::vec3(0.0f, 1.0f, 0.0f);
glm::mat4 viewMatrix;
// recompute the view matrix from the camera variables
void updateMatrix()
{
viewMatrix = glm::lookAt(cameraPosition, cameraPosition + cameraFront, cameraUp);
}
// default constructor
void defaultNew()
{
cameraPosition = glm::vec3(0.0f, 0.0f, 0.1f);
cameraFront = glm::vec3(0.0f, 0.0f, -1.0f);
cameraUp = glm::vec3(0.0f, 1.0f, 0.0f);
updateMatrix();
}
public:
Camera() {
defaultNew();
}
Camera(Shader &shader) {
this->shader = shader;
defaultNew();
}
glm::mat4 GetViewMatrix() const
{
// if your view matrix is always up-to-date, just return it directly
return viewMatrix;
}
// get functions
glm::vec3 GetCameraPosition() const { return cameraPosition; }
// .. same for Front and Up
// set functions
// call updateMatrix every time you update a variable
void SetCameraPosition(glm::vec3 p)
{
cameraPosition = p;
updateMatrix();
}
// .. same for Front and Up
// no need to use this-> all the time
virtual void Draw()
{
shader.Use();
shader.SetMatrix4("view", viewMatrix);
}
};
And then when you update the camera position, simply use these functions instead of the exposed variables:
view->SetCameraPosition(view->GetCameraPosition() + velocity / 2.0f);
This will make sure that the draw calls always use the updated view matrix instead of the initial one (which was the case before and the source of your troubles).

Mouse picking miss

I did mouse picking with terrain for these lessons (but used c++)
https://www.youtube.com/watch?v=DLKN0jExRIM&index=29&listhLoLuZVfUksDP
http://antongerdelan.net/opengl/raycasting.html
The problem is that the position of the mouse does not correspond to the place where the ray intersects with the terrane:
There's a big blunder on the vertical and a little horizontal.
Do not look at the shadows, this is not a corrected map of normals.
What can be wrong? My code:
void MousePicker::update() {
view = cam->getViewMatrix();
currentRay = calculateMouseRay();
if (intersectionInRange(0, RAY_RANGE, currentRay)) {
currentTerrainPoint = binarySearch(0, 0, RAY_RANGE, currentRay);
}
else {
currentTerrainPoint = vec3();
}
}
vec3 MousePicker::calculateMouseRay() {
glfwGetCursorPos(win, &mouseInfo.xPos, &mouseInfo.yPos);
vec2 normalizedCoords = getNormalizedCoords(mouseInfo.xPos, mouseInfo.yPos);
vec4 clipCoords = vec4(normalizedCoords.x, normalizedCoords.y, -1.0f, 1.0f);
vec4 eyeCoords = toEyeCoords(clipCoords);
vec3 worldRay = toWorldCoords(eyeCoords);
return worldRay;
}
vec2 MousePicker::getNormalizedCoords(double xPos, double yPos) {
GLint width, height;
glfwGetWindowSize(win, &width, &height);
//GLfloat x = (2.0 * xPos) / width - 1.0f;
GLfloat x = -((width - xPos) / width - 0.5f) * 2.0f;
//GLfloat y = 1.0f - (2.0f * yPos) / height;
GLfloat y = ((height - yPos) / height - 0.5f) * 2.0f;
//float z = 1.0f;
mouseInfo.normalizedCoords = vec2(x, y);
return vec2(x,y);
}
vec4 MousePicker::toEyeCoords(vec4 clipCoords) {
vec4 invertedProjection = inverse(projection) * clipCoords;
//vec4 eyeCoords = translate(invertedProjection, clipCoords);
mouseInfo.eyeCoords = vec4(invertedProjection.x, invertedProjection.y, -1.0f, 0.0f);
return vec4(invertedProjection.x, invertedProjection.y, -1.0f, 0.0f);
}
vec3 MousePicker::toWorldCoords(vec4 eyeCoords) {
vec3 rayWorld = vec3(inverse(view) * eyeCoords);
vec3 mouseRay = vec3(rayWorld.x, rayWorld.y, rayWorld.z);
rayWorld = normalize(rayWorld);
mouseInfo.worldRay = rayWorld;
return rayWorld;
}
//*********************************************************************************
vec3 MousePicker::getPointOnRay(vec3 ray, float distance) {
vec3 camPos = cam->getCameraPos();
vec3 start = vec3(camPos.x, camPos.y, camPos.z);
vec3 scaledRay = vec3(ray.x * distance, ray.y * distance, ray.z * distance);
return vec3(start + scaledRay);
}
vec3 MousePicker::binarySearch(int count, float start, float finish, vec3 ray) {
float half = start + ((finish - start) / 2.0f);
if (count >= RECURSION_COUNT) {
vec3 endPoint = getPointOnRay(ray, half);
//Terrain* ter = &getTerrain(endPoint.x, endPoint.z);
if (terrain != NULL) {
return endPoint;
}
else {
return vec3();
}
}
if (intersectionInRange(start, half, ray)) {
return binarySearch(count + 1, start, half, ray);
}
else {
return binarySearch(count + 1, half, finish, ray);
}
}
bool MousePicker::intersectionInRange(float start, float finish, vec3 ray) {
vec3 startPoint = getPointOnRay(ray, start);
vec3 endPoint = getPointOnRay(ray, finish);
if (!isUnderGround(startPoint) && isUnderGround(endPoint)) {
return true;
}
else {
return false;
}
}
bool MousePicker::isUnderGround(vec3 testPoint) {
//Terrain* ter = &getTerrain(testPoint.x, testPoint.z);
float height = 0;
if (terrain != NULL) {
height = terrain->getHeightPoint(testPoint.x, testPoint.z);
mouseInfo.height = height;
}
if (testPoint.y < height) {
return true;
}
else {
return false;
}
}
Terrain MousePicker::getTerrain(float worldX, float worldZ) {
return *terrain;
}
In perspective projection, a ray from the eye position through a point on the screen can defined by 2 points. The first point is the eye (camera) position which is (0, 0, 0) in view space. The second point has to be calculated by the position on the screen.
The screen position has to be converted to normalized device coordinates in range from (-1,-1) to (1,1).
w = with of the viewport
h = height of the viewport
x = X position of the mouse
y = Y position ot the mouse
GLfloat ndc_x = 2.0 * x/w - 1.0;
GLfloat ndc_y = 1.0 - 2.0 * y/h; // invert Y axis
To calculate a point on the ray, which goes through the camera position and through the point on the screen, the field of view and the aspect ratio of the perspective projection has to be known:
fov_y = vertical field of view angle in radians
aspect = w / h
GLfloat tanFov = tan( fov_y * 0.5 );
glm::vec3 ray_P = vec3( ndc_x * aspect * tanFov, ndc_y * tanFov, -1.0 ) );
A ray from the camera position through a point on the screen can be defined by the following position (P0) and normalized direction (dir), in world space:
view = view matrix
glm::mat4 invView = glm::inverse( view );
glm::vec3 P0 = invView * glm::vec3(0.0f, 0.0f, 0.0f);
// = glm::vec3( view[3][0], view[3][1], view[3][2] );
glm::vec3 dir = glm::normalize( invView * ray_P - P0 );
In this case, the answers to the following questions will be interesting too:
How to recover view space position given view space depth value and ndc xy
Is it possble get which surface of cube will be click in OpenGL?
How to render depth linearly in modern OpenGL with gl_FragCoord.z in fragment shader?
GLSL spotlight projection volume
Applying to your code results in the following changes:
The Perspective Projection Matrix looks like this:
r = right, l = left, b = bottom, t = top, n = near, f = far
2*n/(r-l) 0 0 0
0 2*n/(t-b) 0 0
(r+l)/(r-l) (t+b)/(t-b) -(f+n)/(f-n) -1
0 0 -2*f*n/(f-n) 0
it follows:
aspect = w / h
tanFov = tan( fov_y * 0.5 );
p[0][0] = 2*n/(r-l) = 1.0 / (tanFov * aspect)
p[1][1] = 2*n/(t-b) = 1.0 / tanFov
Convert from screen (mouse) coordinates to normalized device coordinates:
vec2 MousePicker::getNormalizedCoords(double x, double y) {
GLint w, h;
glfwGetWindowSize(win, &width, &height);
GLfloat ndc_x = 2.0 * x/w - 1.0;
GLfloat ndc_y = 1.0 - 2.0 * y/h; // invert Y axis
mouseInfo.normalizedCoords = vec2(ndc_x, ndc_x);
return vec2(ndc_x, ndc_x);
}
Calculate A ray from the camera position through a point on the screen (mouse position) in world space:
vec3 MousePicker::calculateMouseRay( void ) {
glfwGetCursorPos(win, &mouseInfo.xPos, &mouseInfo.yPos);
vec2 normalizedCoords = getNormalizedCoords(mouseInfo.xPos, mouseInfo.yPos);
ray_Px = normalizedCoords.x / projection[0][0]; // projection[0][0] == 1.0 / (tanFov * aspect)
ray_Py = normalizedCoords.y / projection[1][1]; // projection[1][1] == 1.0 / tanFov
glm::vec3 ray_P = vec3( ray_Px, ray_Py, -1.0f ) );
vec3 camPos = cam->getCameraPos(); // == glm::vec3( view[3][0], view[3][1], view[3][2] );
glm::mat4 invView = glm::inverse( view );
glm::vec3 P0 = camPos;
glm::vec3 dir = glm::normalize( invView * ray_P - P0 );
return dir;
}

X Axis seems inverted in OpenTK

Edit: okay, I've written the code totally intuitive now and this is the result:
http://i.imgur.com/x5arJE9.jpg
The Cube is at 0,0,0
As you can see, the camera position is negative on the z axis, suggesting that I'm viewing along the positive z axis, which does not match up. (fw is negative)
Also the cube colors suggest that I'm on the positive z axis, looking in the negative direction. Also the positive x-axis is to the right (in modelspace)
The angles are calculated like this:
public virtual Vector3 Right
{
get
{
return Vector3.Transform(Vector3.UnitX, Rotation);
}
}
public virtual Vector3 Forward
{
get
{
return Vector3.Transform(-Vector3.UnitZ, Rotation);
}
}
public virtual Vector3 Up
{
get
{
return Vector3.Transform(Vector3.UnitY, Rotation);
}
}
Rotation is a Quaternion.
This is how the view and model matrices are creates:
public virtual Matrix4 GetMatrix()
{
Matrix4 translation = Matrix4.CreateTranslation(Position);
Matrix4 rotation = Matrix4.CreateFromQuaternion(Rotation);
return translation * rotation;
}
Projection:
private void SetupProjection()
{
if(GameObject != null)
{
AspectRatio = GameObject.App.Window.Width / (float)GameObject.App.Window.Height;
projectionMatrix = Matrix4.CreatePerspectiveFieldOfView((float)((Math.PI * Fov) / 180), AspectRatio, ZNear, ZFar);
}
}
Matrix multiplication:
public Matrix4 GetModelViewProjectionMatrix(Transform model)
{
return model.GetMatrix()* Transform.GetMatrix() * projectionMatrix;
}
Shader:
[Shader vertex]
#version 150 core
in vec3 pos;
in vec4 color;
uniform float _time;
uniform mat4 _modelViewProjection;
out vec4 vColor;
void main() {
gl_Position = _modelViewProjection * vec4(pos, 1);
vColor = color;
}
OpenTK matrices are transposed, thus the multiplication order.
Any idea why the axis / locations are all messed up ?
End of edit. Original Post:
Have a look at this image: http://i.imgur.com/Cjjr8jz.jpg
As you can see, while the forward vector ( of the camera ) is positive in the z-Axis and the red cube is on the negative x axis,
float[] points = {
// position (3) Color (3)
-s, s, z, 1.0f, 0.0f, 0.0f, // Red point
s, s, z, 0.0f, 1.0f, 0.0f, // Green point
s, -s, z, 0.0f, 0.0f, 1.0f, // Blue point
-s, -s, z, 1.0f, 1.0f, 0.0f, // Yellow point
};
(cubes are created in the geometry shader around those points)
the camera x position seems to be inverted. In other words, if I increase the camera position along its local x axis, it will move to the left, and vice versa.
I pass the transformation matrix like this:
if (DefaultAttributeLocations.TryGetValue("modelViewProjectionMatrix", out loc))
{
if (loc >= 0)
{
Matrix4 mvMatrix = Camera.GetMatrix() * projectionMatrix;
GL.UniformMatrix4(loc, false, ref mvMatrix);
}
}
The GetMatrix() method looks like this:
public virtual Matrix4 GetMatrix()
{
Matrix4 translation = Matrix4.CreateTranslation(Position);
Matrix4 rotation = Matrix4.CreateFromQuaternion(Rotation);
return translation * rotation;
}
And the projection matrix:
private void SetupProjection()
{
AspectRatio = Window.Width / (float)Window.Height;
projectionMatrix = Matrix4.CreatePerspectiveFieldOfView((float)((Math.PI * Fov)/180), AspectRatio, ZNear, ZFar);
}
I don't see what I'm doing wrong :/
It's a little hard to tell from the code, but I believe this is because in OpenGL, the default forward vector of the camera is negative along the Z axis - yours is positive, which means you're looking at the model from the back. That would be why the X coordinate seems inverted.
Although this question is a few years old, I'd still like to give my input.
The reason you're experiencing this bug is because OpenTK's matrices are row major. All this really means is you have to do all matrix math is reverse. For example, the transformation matrix will be multiplied like so:
public static Matrix4 CreateTransformationMatrix(Vector3 position, Quaternion rotation, Vector3 scale)
{
return Matrix4.CreateScale(scale) *
Matrix4.CreateFromQuaternion(rotation) *
Matrix4.CreateTranslation(position);
}
This goes for any matrix, so if you're using Vector3's instead of Quaternion's for your rotation it would look like this:
public static Matrix4 CreateTransformationMatrix(Vector3 position, Vector3 rotation, Vector3 scale)
{
return Matrix4.CreateScale(scale) *
Matrix4.CreateRotationZ(rotation.Z) *
Matrix4.CreateRotationY(rotation.Y) *
Matrix4.CreateRotationX(rotation.X) *
Matrix4.CreateTranslation(position);
}
Note that your vertex shader will still be multiplied like this:
void main()
{
gl_Position = projection * view * transform * vec4(position, 1.0f);
}
I hope this helps!