Related
I'm currently working on my own small game engine (I am learning OpenGL). I made camera mechanism that can rotate vectors with Euler angles, and now I'm working on rotations with quaternions. Now I'm stuck because my quaternions rotations behave very strangely (flipping objects, not rotating camera as it should). Please, help me find out what is wrong with my algorithm. Could you suggest some fixes to my camera code? Below is my source code, and here is some marks to it: target I want to look at is at coordinates (0.0f, 0.0f, 0.0f). I want my position from which I am look at the target to be glm::vec3 position = glm::vec3(0.0f, 0.0f, 3.0f)
My camera class:
class Camera {
private:
bool eulerMode = false;
float m_mouseSensitivity;
float m_velocity;
glm::vec3 m_rightAxis{};
glm::vec3 m_upAxis;
glm::vec3 m_position;
glm::vec3 m_target{};
glm::vec3 m_r{};
glm::vec3 m_direction{};
static glm::vec3 rotateVector(float angle, glm::vec3 rotationAxis, glm::vec3 vectorToRotate);
static glm::quat quaternion(float angle, glm::vec3 vec);
public:
static Camera *s_context;
float m_yaw;
float m_pitch;
float m_mouseLastX;
float m_mouseLastY;
bool m_firstMouse;
glm::vec3 m_frontAxis;
Camera(float speed,
int width,
int height,
glm::vec3 position = glm::vec3(0.0f, 0.0f, 3.0f),
glm::vec3 up = glm::vec3(0.0f, 1.0f, 0.0f),
glm::vec3 target = glm::vec3(0.0f, 0.0f, 0.0f)
);
glm::mat4 getLookAtMatrix();
static void quaternionRotate(GLFWwindow *window, double x, double y);
};
Camera::Camera(
float speed,
int width,
int height,
glm::vec3 position,
glm::vec3 up,
glm::vec3 target
)
: m_pitch(0.0f), m_yaw(-90.0f), m_mouseLastX((float) width / 2),
m_mouseLastY((float) height / 2), m_mouseSensitivity(0.1f), m_upAxis(up), m_position(position),
m_frontAxis(glm::vec3(0.0f, 0.0f, -1.0f)), m_firstMouse(true) {
m_velocity = speed;
m_direction = glm::normalize(position - target);
m_rightAxis = glm::normalize(glm::cross(up, m_direction));
m_upAxis = glm::cross(m_direction, m_rightAxis);
s_context = this;
glfwSetWindowUserPointer(g_Window->getOpenGLWindow(), this);
if (eulerMode) {
glfwSetCursorPosCallback(g_Window->getOpenGLWindow(), eulerRotate);
} else {
glfwSetCursorPosCallback(g_Window->getOpenGLWindow(), quaternionRotate);
}
}
glm::mat4 Camera::getLookAtMatrix() {
glm::mat4 view = glm::lookAt(m_position, m_r, m_upAxis);
return view;
}
// for the sake of brevity, I skipped some class methods that are unnecessary for quaternions rotations
void Camera::quaternionRotate(GLFWwindow *window, double x, double y) {
if (s_context->m_firstMouse) {
s_context->m_mouseLastX = (float) x;
s_context->m_mouseLastY = (float) y;
s_context->m_firstMouse = false;
}
auto xoffset = (float) (x - s_context->m_mouseLastX);
auto yoffset = (float) (s_context->m_mouseLastY - y);
s_context->m_mouseLastX = (float) x;
s_context->m_mouseLastY = (float) y;
float sensitivity = 0.1f;
xoffset *= sensitivity;
yoffset *= sensitivity;
s_context->m_yaw += xoffset;
s_context->m_pitch += yoffset;
glm::vec3 yAxis = glm::vec3(0, 1, 0);
// Rotate the view vector by the horizontal angle around the vertical axis
glm::vec3 view = s_context->m_direction;
view = glm::normalize(rotateVector(s_context->m_yaw, yAxis, view));
// Rotate the view vector by the vertical angle around the horizontal axis
glm::vec3 xAxis = glm::normalize(glm::cross(yAxis, view));
view = glm::normalize(rotateVector(s_context->m_pitch, xAxis, view));
s_context->m_r = view;
s_context->m_upAxis = glm::normalize(glm::cross(s_context->m_r, xAxis));
}
glm::vec3 Camera::rotateVector(float angle, const glm::vec3 rotationAxis, const glm::vec3 vectorToRotate) {
glm::quat rotationQ = quaternion(angle, rotationAxis);
glm::quat conjugateQ = glm::conjugate(rotationQ);
glm::quat result = rotationQ * vectorToRotate * conjugateQ;
return {result.x, result.y, result.z};
}
glm::quat Camera::quaternion(float angle, const glm::vec3 vec) {
float HalfAngleInRadians = glm::radians(angle / 2);
float SineHalfAngle = sinf(HalfAngleInRadians);
float CosHalfAngle = cosf(HalfAngleInRadians);
float xC = vec.x * SineHalfAngle;
float yC = vec.y * SineHalfAngle;
float zC = vec.z * SineHalfAngle;
float wC = CosHalfAngle;
return {wC, xC, yC, zC};
}
I'm making a level editor for my game with OpenGL in C++. I'm trying to make Editor Camera just like in Unity Engine 2D Scene Camera, but I have an issue when I try to implement mouse movement for the camera (Camera Panning). I'm converting mouse position from screen to world space.
ScreenToWorldSpace Method:
Vector3 Application::ScreenToWorldSpace(int mousex, int mousey)
{
double x = 2.0 * mousex / viewportWidth - 1;
double y = 2.0 * mousey / viewportHeight - 1;
Vector4 screenPos = Vector4(x, -y, -1.0f, 1.0f);
Matrix4 ProjectionViewMatrix = camera1->GetProjectionMatrix() * camera1->GetViewMatrix();
Matrix4 InverseProjectionViewMatrix = glm::inverse(ProjectionViewMatrix);
Vector4 worldPos = InverseProjectionViewMatrix * screenPos;
return Vector3(worldPos);
}
The above method works correctly.
But I'm using ScreenToWorldSpace coordinates to update camera position.
Render Method:
void Application::Render(float deltaTime)
{
Vector3 pos = ScreenToWorldSpace(mousePosition.x, mousePosition.y);
// This is the position of a tile not the camera
position = Vector3(0, 0, 0);
Vector3 rotation = Vector3(0, 0, 0);
Vector3 scale = Vector3(1);
Matrix4 translationMatrix = glm::translate(Matrix4(1.0f), position);
Matrix4 rotationMatrix = glm::eulerAngleYXZ(rotation.y, rotation.x, rotation.z);
Matrix4 scaleMatrix = glm::scale(Matrix4(1.0f), scale);
modelMatrix = translationMatrix * rotationMatrix * scaleMatrix;
if (mouseButtonDown)
{
Console << pos.x << ", " << pos.y << Endl;
camera1->position = Vector3(pos.x, pos.y, -10);
}
{
glScissor(0, 0, 900, 600);
glEnable(GL_SCISSOR_TEST);
glClearColor(236 / 255.0f, 64 / 255.0f, 122 / 255.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, 900, 600);
basicShader->Use();
dirt_grass_tex->Use();
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
camera1->SetZoom(zoomFactor);
camera1->Update();
Matrix4 mvp = camera1->GetProjectionMatrix() * camera1->GetViewMatrix() * modelMatrix;
basicShader->SetUniformMat4("MVP", mvp);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glDisable(GL_SCISSOR_TEST);
}
}
Camera Class:
#include "camera.h"
Camera::Camera(int width, int height)
{
swidth = width;
sheight = height;
position = Vector3(0, 0, -10);
rotation = Vector3(0, 0, 0);
m_direction = Vector3(0, 0, -5);
m_up = Vector3(0, 1, 0);
m_right = Vector3(1, 0, 0);
m_offset = Vector3(-swidth / 2 * m_zoom, -sheight / 2 * m_zoom, 0);
m_projection = glm::ortho(0.0f * m_zoom, (float)swidth * m_zoom, 0.0f * m_zoom, (float)sheight * m_zoom, -1000.0f, 0.0f);
}
Camera::~Camera()
{
}
void Camera::Update()
{
Vector3 finalPos = position + m_offset;
m_up = glm::cross(m_right, m_direction);
m_viewMatrix = glm::lookAt(finalPos, finalPos + m_direction, m_up);
m_viewMatrix = glm::scale(m_viewMatrix, Vector3(100));
}
void Camera::SetZoom(float zoom)
{
m_zoom = zoom;
m_offset = Vector3(-swidth / 2 * m_zoom, -sheight / 2 * m_zoom, 0);
m_projection = glm::ortho(0.0f * m_zoom, (float)swidth * m_zoom, 0.0f * m_zoom, (float)sheight * m_zoom, -1000.0f, 0.0f);
}
The following is the output I get when I try to move camera with mouse position converted from Screen to World Space:
if (mouseButtonDown)
{
Console << pos.x << ", " << pos.y << Endl;
position = Vector3(pos.x, pos.y, 0);
}
But if I use mouse position converted from Screen to World space using ScreenToWorldSpace Method the object moves perfectly. Have a look at the following gif:
Following is what I'm trying to achieve:
So I'm Trying to make Game Engine Editor, in that I want to implement Editor Scene Camera like unity / unreal engine scene camera. Following is the editor I'm currently working on:
I tried looking into different resources, but i'm clueless. Help me understand how to move the camera with mouse.
What I think is happening:
Since I'm converting mouse position from screen to world space using camera's projectionView matrix and using those world coordinates to move camera position is causing the problem, because when ever camera moves, projectionView is updated which in turn changes mouse position relative to viewMatrix recursively.
I would Appreciate some help.
Ordinarily, you wouldn't want to write the mouse position directly into the camera location (because that will be of limited use in practice - whenever you click on the screen, the camera would jump).
What you probably want to do something along these lines:
Vector3 g_lastPosition;
void onMousePressed(int x, int y) {
// record starting position!
g_lastPosition = ScreenToWorldSpace(x, y);
}
void onMouseMove(int x, int y) {
// find the difference between new position, and last, in world space
Vector3 new_pos = ScreenToWorldSpace(x, y);
Vector3 offset = new_pos - g_lastPosition;
g_lastPosition = new_pos;
// now move camera by offset
camera->position += offset
}
If you are in an orthographic view, then really you don't need to worry about the projection matrix at all.
int g_lastX;
int g_lastY;
void onMousePressed(int x, int y) {
// store mouse pos
g_lastX = x;
g_lastY = y;
}
void onMouseMove(int x, int y) {
// find the difference between new position, and last, in pixels
int offsetX = x - g_lastX;
int offsetY = y - g_lastY;
// update mouse pos
g_lastX = x;
g_lastY = y;
// get as ratio +/- 1
float dx = ((float) offsetX) / swidth;
float dy = ((float) offsetY) / sheight;
// now move camera by offset (might need to multiply by 2 here?)
camera->position.x += camera->m_offset.x * dx;
camera->position.y += camera->m_offset.y * dy;
}
But in general, for any mouse based movement, you always want to be thinking in terms of adding an offset, rather than setting an exact position.
I'm trying to get the coordinates (x,y) of the grid (z = 0) using only the cursor coordinates. After a long search I found this way to do that using the glm::unproject.
First I'm getting the cursor coordinates using the callback:
void cursorCallback(GLFWwindow *window, double x, double y)
{
this->cursorCoordinate = glm::vec3(x, (this->windowHeight - y - 1.0f), 0.0f);
}
an then converting these coordinates:
glm::vec3 cursorCoordinatesToWorldCoordinates()
{
glm::vec3 pointInitial = glm::unProject(
glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 0.0),
this->modelMatrix * this->viewMatrix,
this->projectionMatrix,
this->viewPort
);
glm::vec3 pointFinal = glm::unProject(
glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 1.0),
this->modelMatrix * this->viewMatrix,
this->projectionMatrix,
this->viewPort
);
glm::vec3 vectorDirector = pointFinal - pointInitial;
double lambda = (-pointInitial.y) / vectorDirector.y;
double x = pointInitial.x + lambda * vectorDirector.x;
double y = pointInitial.z + lambda * vectorDirector.z;
return glm::vec3(x, y, 0.0f);
}
I use an ArcBall camera to rotate the world around specified axis, so that is how I generate the MVP matrixes:
this->position = glm::vec3(0.0f, 10.0f, 5.0f);
this->up = glm::vec3(0.0f, 1.0f, 0.0f);
this->lookAt = glm::vec3(0.0f, 0.0f, 0.0f);
this->fieldView = 99.0f;
this->farDistance = 100.0f;
this->nearDistance = 0.1f;
this->modelMatrix = glm::mat4(1.0f);
this->viewMatrix = glm::lookAt(this->position, this->lookAt, this->up) * glm::rotate(glm::degrees(this->rotationAngle) * this->dragSpeed, this->rotationAxis);
this->projectionMatrix = glm::perspective(glm::radians(this->fieldView), 1.0f, this->nearDistance, this->farDistance);
But something is going wrong because I'm not getting the right results. Look this print of the application:
each square is 1 unit, the cube is rendered at position (0, 0, 0). With rotationAngle = 0 when a put the cursor at (0,0), (1,1), (2,2), (3,3), (4,4), (5,5) I get (0, 5.7), (0.8, 6.4), (1.6, 6.9), (2.4, 7.6), (3.2, 8.2), (4.2, 8.8) respectivally. That's not expected.
Why y is delayed by 6 units?
It's necessary rotate the result cursorCoordinatesToWorldCoordinates based on rotationAngle isn't?
--
That I already did:
Checked if the viewport match with glViewport - OK
Checked the opengl coordinates (Y is up, not Z) - OK
You want to intersect the ray from glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 0.0) to glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 1.0) with the grid in world space, rather than model space (of the cuboid).
You've to skip this.modelMatrix:
glm::vec3 pointInitial = glm::unProject(
glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 0.0),
this->viewMatrix,
this->projectionMatrix,
this->viewPort);
glm::vec3 pointFinal = glm::unProject(
glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 1.0),
this->viewMatrix,
this->projectionMatrix,
this->viewPort);
In any case this->modelMatrix * this->viewMatrix is incorrect. If you eant to intersect the ray with an object in model space, then it has to be this->viewMatrix * this->modelMatrix. Matrix multiplication is not Commutative.
Using a glm::lookAt function to create the camera view matrix is good but does not help in storing the camera Euler angles. although the calculations seem correct, the view seems to bring the wrong pitch value.
in the following code, if the Y values of the current camera position and destination is the same then, there is no problem. however, the view seems to tilt further down or up if the Y values of the camera position and destination are not equal.
The question is: why does the camera not correctly points an object if the Y values of the camera and object positions are not equal.
float wrapAngle(float angle)
{
int break_after = 100;
constexpr float full_rotation = 2.0 * glm::pi<float>();
while (angle < 0.0f || angle >= full_rotation)
{
if (angle < 0.0f) angle = angle + full_rotation;
if (angle >= full_rotation) angle = angle - full_rotation;
if (--break_after == 0) break;
}
if (break_after == 0) angle = 0.0f;
return angle;
}
void getLookAtAngle(const glm::vec3& position, const glm::vec3& destination, glm::vec3 &angle)
{
//! Find vector of sight toward destination.
glm::vec3 sight = destination - position;
//! Find X, Y rotation against the axis (global).
double yAngle = wrapAngle(std::atan2(sight.x, sight.z));
glm::mat4 yModel(1);
yModel = glm::rotate(yModel, static_cast<float>(-yAngle), glm::vec3(0.0f, 1, 0));
sight = yModel * glm::vec4(sight, 1.0f);
double xAngle = wrapAngle(-std::atan2(sight.y, sight.z));
//! assign xAngle, yAngle to the parameter 'angle'
angle.x = glm::degrees(static_cast<float>(xAngle));
angle.y = glm::degrees(static_cast<float>(yAngle));
angle.z = 0.0f;
}
void updateCamera(const glm::vec3& position, const glm::vec3& destination)
{
glm::vec3 m_rotation(1);
getLookAtAngle(position, destination, m_rotation)
m_model = glm::mat4(1);
m_model = glm::rotate(m_model, glm::radians(m_rotation[0]), glm::vec3(1.0f, 0, 0));
m_model = glm::rotate(m_model, glm::radians(m_rotation[1]), glm::vec3(0.0f, 1, 0));
m_model = glm::rotate(m_model, glm::radians(m_rotation[2]), glm::vec3(0.0f, 0, 1));
m_front = glm::normalize(glm::vec3(m_model * glm::vec4(0, 0, 1.0f, 1.0f)));
m_right = glm::normalize(glm::cross(m_front, glm::vec3(0, 1.0f, 0)));
m_up = glm::normalize(glm::cross(m_right, m_front));
m_view = glm::lookAt(position, position+ m_front, m_up);
}
I'm attempting to implement an arcball style camera. I use glm::lookAt to keep the camera pointed at a target, and then move it around the surface of a sphere using azimuth/inclination angles to rotate the view.
I'm running into an issue where the view gets flipped upside down when the azimuth approaches 90 degrees.
Here's the relevant code:
Get projection and view martrices. Runs in the main loop
void Visual::updateModelViewProjection()
{
model = glm::mat4();
projection = glm::mat4();
view = glm::mat4();
projection = glm::perspective
(
(float)glm::radians(camera.Zoom),
(float)width / height, // aspect ratio
0.1f, // near clipping plane
10000.0f // far clipping plane
);
view = glm::lookAt(camera.Position, camera.Target, camera.Up);
}
Mouse move event, for camera rotation
void Visual::cursor_position_callback(GLFWwindow* window, double xpos, double ypos)
{
if (leftMousePressed)
{
...
}
if (rightMousePressed)
{
GLfloat xoffset = (xpos - cursorPrevX) / 4.0;
GLfloat yoffset = (cursorPrevY - ypos) / 4.0;
camera.inclination += yoffset;
camera.azimuth -= xoffset;
if (camera.inclination > 89.0f)
camera.inclination = 89.0f;
if (camera.inclination < 1.0f)
camera.inclination = 1.0f;
if (camera.azimuth > 359.0f)
camera.azimuth = 359.0f;
if (camera.azimuth < 1.0f)
camera.azimuth = 1.0f;
float radius = glm::distance(camera.Position, camera.Target);
camera.Position[0] = camera.Target[0] + radius * cos(glm::radians(camera.azimuth)) * sin(glm::radians(camera.inclination));
camera.Position[1] = camera.Target[1] + radius * sin(glm::radians(camera.azimuth)) * sin(glm::radians(camera.inclination));
camera.Position[2] = camera.Target[2] + radius * cos(glm::radians(camera.inclination));
camera.updateCameraVectors();
}
cursorPrevX = xpos;
cursorPrevY = ypos;
}
Calculate camera orientation vectors
void updateCameraVectors()
{
Front = glm::normalize(Target-Position);
Right = glm::rotate(glm::normalize(glm::cross(Front, {0.0, 1.0, 0.0})), glm::radians(90.0f), Front);
Up = glm::normalize(glm::cross(Front, Right));
}
I'm pretty sure it's related to the way I calculate my camera's right vector, but I cannot figure out how to compensate.
Has anyone run into this before? Any suggestions?
It's a common mistake to use lookAt for rotating the camera. You should not. The backward/right/up directions are the columns of your view matrix. If you already have them then you don't even need lookAt, which tries to redo some of your calculations. On the other hand, lookAt doesn't help you in finding those vectors in the first place.
Instead build the view matrix first as a composition of translations and rotations, and then extract those vectors from its columns:
void Visual::cursor_position_callback(GLFWwindow* window, double xpos, double ypos)
{
...
if (rightMousePressed)
{
GLfloat xoffset = (xpos - cursorPrevX) / 4.0;
GLfloat yoffset = (cursorPrevY - ypos) / 4.0;
camera.inclination = std::clamp(camera.inclination + yoffset, -90.f, 90.f);
camera.azimuth = fmodf(camera.azimuth + xoffset, 360.f);
view = glm::mat4();
view = glm::translate(view, glm::vec3(0.f, 0.f, camera.radius)); // add camera.radius to control the distance-from-target
view = glm::rotate(view, glm::radians(camera.inclination + 90.f), glm::vec3(1.f,0.f,0.f));
view = glm::rotate(view, glm::radians(camera.azimuth), glm::vec3(0.f,0.f,1.f));
view = glm::translate(view, camera.Target);
camera.Right = glm::column(view, 0);
camera.Up = glm::column(view, 1);
camera.Front = -glm::column(view, 2); // minus because OpenGL camera looks towards negative Z.
camera.Position = glm::column(view, 3);
view = glm::inverse(view);
}
...
}
Then remove the code that calculates view and the direction vectors from updateModelViewProjection and updateCameraVectors.
Disclaimer: this code is untested. You might need to fix a minus sign somewhere, order of operations, or the conventions might mismatch (Z is up or Y is up, etc...).