Quaternion-based camera unwanted roll - c++

I created a camera based on quaternions, but when I turn the camera, an unwanted roll appears. I would not like to lose my freedom of movement using, for example, Euler angles, since there is a need to add roll from time to time. If I use Euler angles, then, as far as I know, I can get a gimbal lock.
Code:
struct FreeCamera : public BaseCamera {
float pitch = 0, yaw = 0, roll = 0;
void updateView();
private:
glm::quat qCamera;
};
struct FreeCameraController: public BaseCameraController {
float sensitivityPitch = 0.0025f, sensitivityYaw = 0.0025f, sensitivityRoll = 0.0025f;
void mouseMove(const float x, const float y, const float z = 0);
inline void setMousePos(const float x, const float y, const float z = 0) {
lastMousePos = glm::vec3(x, y, z);
}
private:
glm::vec3 lastMousePos = glm::vec3(0.0f);
};
void FreeCamera::updateView() {
// temporary frame quaternion from pitch, yaw, roll
glm::quat qPYR = glm::quat(glm::vec3(pitch, yaw, roll));
// reset values
pitch = yaw = roll = 0;
// update qCamera
qCamera = qPYR * qCamera;
qCamera = glm::normalize(qCamera);
glm::mat4 rotate = glm::mat4_cast(qCamera);
glm::mat4 translate = glm::mat4(1.0f);
translate = glm::translate(translate, -pos);
view = rotate * translate;
}
void FreeCameraController::mouseMove(const float x, const float y, const float z) {
glm::vec3 dCoord = glm::vec3(x, y, z) - lastMousePos;
((FreeCamera*)camera)->yaw = dCoord.x * sensitivityYaw;
((FreeCamera*)camera)->pitch = dCoord.y * sensitivityPitch;
((FreeCamera*)camera)->roll = dCoord.z * sensitivityRoll;
lastMousePos = glm::vec3(x, y, z);
}
Is it possible to reset unwanted roll, "stabilize" the camera?

As you want to block roll (and if its a car, possibly yaw too since you will make the car fly), you must block one of the axis by concatenating the rotations. What you want to achieve is the actual Gimbal's lock (you use a single quaternion containing all the rotations when flying specifically to get rid of it). So, assuming you can detect wether the vehicle is on the ground or not:
glm::mat4 rotationMatrix;
// When you want to get rid of any axis rotation, you must lock it
if(onGround)
{
glm::quat yawQ = glm::quat(glm::vec3(0.0f, yaw, 0.0f));
yawQ = glm::normalize(yawQ);
glm::mat4 yawMat = glm::mat4_cast(yawQ);
glm::quat pitch = glm::quat(glm::vec3(pitch, 0.0f, 0.0f));
pitch = glm::normalize(pitch);
glm::mat4 pitchMat = glm::mat4_cast(pitch);
rotationMatrix = pitchMat * yawMat;
}
else
{
//Your computation
rotationMatrix = glm::mat4_cast(yourQuaternion);
}
viewMatrix = rotationMatrix * translationMatrix;
Note that it is not necessary to use quaternions to achieve the gound control effect

Related

Quaternion rotations strange behavior

I'm currently working on my own small game engine (I am learning OpenGL). I made camera mechanism that can rotate vectors with Euler angles, and now I'm working on rotations with quaternions. Now I'm stuck because my quaternions rotations behave very strangely (flipping objects, not rotating camera as it should). Please, help me find out what is wrong with my algorithm. Could you suggest some fixes to my camera code? Below is my source code, and here is some marks to it: target I want to look at is at coordinates (0.0f, 0.0f, 0.0f). I want my position from which I am look at the target to be glm::vec3 position = glm::vec3(0.0f, 0.0f, 3.0f)
My camera class:
class Camera {
private:
bool eulerMode = false;
float m_mouseSensitivity;
float m_velocity;
glm::vec3 m_rightAxis{};
glm::vec3 m_upAxis;
glm::vec3 m_position;
glm::vec3 m_target{};
glm::vec3 m_r{};
glm::vec3 m_direction{};
static glm::vec3 rotateVector(float angle, glm::vec3 rotationAxis, glm::vec3 vectorToRotate);
static glm::quat quaternion(float angle, glm::vec3 vec);
public:
static Camera *s_context;
float m_yaw;
float m_pitch;
float m_mouseLastX;
float m_mouseLastY;
bool m_firstMouse;
glm::vec3 m_frontAxis;
Camera(float speed,
int width,
int height,
glm::vec3 position = glm::vec3(0.0f, 0.0f, 3.0f),
glm::vec3 up = glm::vec3(0.0f, 1.0f, 0.0f),
glm::vec3 target = glm::vec3(0.0f, 0.0f, 0.0f)
);
glm::mat4 getLookAtMatrix();
static void quaternionRotate(GLFWwindow *window, double x, double y);
};
Camera::Camera(
float speed,
int width,
int height,
glm::vec3 position,
glm::vec3 up,
glm::vec3 target
)
: m_pitch(0.0f), m_yaw(-90.0f), m_mouseLastX((float) width / 2),
m_mouseLastY((float) height / 2), m_mouseSensitivity(0.1f), m_upAxis(up), m_position(position),
m_frontAxis(glm::vec3(0.0f, 0.0f, -1.0f)), m_firstMouse(true) {
m_velocity = speed;
m_direction = glm::normalize(position - target);
m_rightAxis = glm::normalize(glm::cross(up, m_direction));
m_upAxis = glm::cross(m_direction, m_rightAxis);
s_context = this;
glfwSetWindowUserPointer(g_Window->getOpenGLWindow(), this);
if (eulerMode) {
glfwSetCursorPosCallback(g_Window->getOpenGLWindow(), eulerRotate);
} else {
glfwSetCursorPosCallback(g_Window->getOpenGLWindow(), quaternionRotate);
}
}
glm::mat4 Camera::getLookAtMatrix() {
glm::mat4 view = glm::lookAt(m_position, m_r, m_upAxis);
return view;
}
// for the sake of brevity, I skipped some class methods that are unnecessary for quaternions rotations
void Camera::quaternionRotate(GLFWwindow *window, double x, double y) {
if (s_context->m_firstMouse) {
s_context->m_mouseLastX = (float) x;
s_context->m_mouseLastY = (float) y;
s_context->m_firstMouse = false;
}
auto xoffset = (float) (x - s_context->m_mouseLastX);
auto yoffset = (float) (s_context->m_mouseLastY - y);
s_context->m_mouseLastX = (float) x;
s_context->m_mouseLastY = (float) y;
float sensitivity = 0.1f;
xoffset *= sensitivity;
yoffset *= sensitivity;
s_context->m_yaw += xoffset;
s_context->m_pitch += yoffset;
glm::vec3 yAxis = glm::vec3(0, 1, 0);
// Rotate the view vector by the horizontal angle around the vertical axis
glm::vec3 view = s_context->m_direction;
view = glm::normalize(rotateVector(s_context->m_yaw, yAxis, view));
// Rotate the view vector by the vertical angle around the horizontal axis
glm::vec3 xAxis = glm::normalize(glm::cross(yAxis, view));
view = glm::normalize(rotateVector(s_context->m_pitch, xAxis, view));
s_context->m_r = view;
s_context->m_upAxis = glm::normalize(glm::cross(s_context->m_r, xAxis));
}
glm::vec3 Camera::rotateVector(float angle, const glm::vec3 rotationAxis, const glm::vec3 vectorToRotate) {
glm::quat rotationQ = quaternion(angle, rotationAxis);
glm::quat conjugateQ = glm::conjugate(rotationQ);
glm::quat result = rotationQ * vectorToRotate * conjugateQ;
return {result.x, result.y, result.z};
}
glm::quat Camera::quaternion(float angle, const glm::vec3 vec) {
float HalfAngleInRadians = glm::radians(angle / 2);
float SineHalfAngle = sinf(HalfAngleInRadians);
float CosHalfAngle = cosf(HalfAngleInRadians);
float xC = vec.x * SineHalfAngle;
float yC = vec.y * SineHalfAngle;
float zC = vec.z * SineHalfAngle;
float wC = CosHalfAngle;
return {wC, xC, yC, zC};
}

Raycasting (Mouse Picking) while using an Perspective VS Orthographic Projection in OpenGL

I am struggling to understand how to change my algorithm to handle raycasting (utilized for MousePicking) using a Perspective projection and an Orthographic projection.
Currently I have a scene with 3D objects that have AxisAligned bounding boxes attached to them.
While rendering the scene using a perspective projection (created with glm::perspective) I can successfully use raycasting and my mouse to "pick" different objects in my scene. Here is a demonstration.
If I render the same scene, but using an Orthographic projection, and positioning the camera above the facing down (looking down the Y axis, Imagine like a level editor fora game) I am unable to correctly raycasting from the where the user clicks on the screen so I can get MousePicking working while rendering using an Orthographic projection. Here is a demonstration of it not working.
My algorithm at a high level:
auto const coords = mouse.coords();
glm::vec2 const mouse_pos{coords.x, coords.y};
glm::vec3 ray_dir, ray_start;
if (perspective) { // This "works"
auto const ar = aspect_rate;
auto const fov = field_of_view;
glm::mat4 const proj_matrix = glm::perspective(fov, ar, f.near, f.far);
auto const& target_pos = camera.target.get_position();
glm::mat4 const view_matrix = glm::lookAt(target_pos, target_pos, glm::vec3{0, -1, 0});
ray_dir = Raycast::calculate_ray_into_screen(mouse_pos, proj_matrix, view_matrix, view_rect);
ray_start = camera.world_position();
}
else if (orthographic) { // This "doesn't work"
glm::vec3 const POS = glm::vec3{50};
glm::vec3 const FORWARD = glm::vec3{0, -1, 0};
glm::vec3 const UP = glm::vec3{0, 0, -1};
// 1024, 768 with NEAR 0.001 and FAR 10000
//glm::mat4 proj_matrix = glm::ortho(0, 1024, 0, 768, 0.0001, 10000);
glm::mat4 proj_matrix = glm::ortho(0, 1024, 0, 768, 0.0001, 100);
// Look down at the scene from above
glm::mat4 view_matrix = glm::lookAt(POS, POS + FORWARD, UP);
// convert the mouse screen coordinates into world coordinates for the cube/ray test
auto const p0 = screen_to_world(mouse_pos, view_rect, proj_matrix, view_matrix, 0.0f);
auto const p1 = screen_to_world(mouse_pos, view_rect, proj_matrix, view_matrix, 1.0f);
ray_start = p0;
ray_dir = glm::normalize(p1 - p0);
}
bool const intersects = ray_intersects_cube(logger, ray_dir, ray_start,
eid, tr, cube, distances);
In perspective mode, we cast a ray into the scene and see if it intersects with the cube surrounding the object.
In orthographic mode, I'm casting two rays from the screen (one at z=0, the other at z=1) and creating a ray between those two points. I set the ray start point to where the mouse pointer is (with z=0) and use the ray direction just calculated as inputs into the same ray_cube_intersection algorithm.
My question is this
Since the MousePicking works using the Perspective projection, but not using an Orthographic projection:
Is it reasonable to assume the same ray_cube intersection algorithm can be used with a perspective/orthographic projection?
Is my thinking about setting the ray_start and ray_dir variables in the orthographic case correct?
Here is the source for the ray/cube collision algorithm in use.
glm::vec3
Raycast::calculate_ray_into_screen(glm::vec2 const& point, glm::mat4 const& proj,
glm::mat4 const& view, Rectangle const& view_rect)
{
// When doing mouse picking, we want our ray to be pointed "into" the screen
float constexpr Z = -1.0f;
return screen_to_world(point, view_rect, proj, view, Z);
}
bool
ray_cube_intersect(Ray const& r, Transform const& transform, Cube const& cube,
float& distance)
{
auto const& cubepos = transform.translation;
glm::vec3 const minpos = cube.min * transform.scale;
glm::vec3 const maxpos = cube.max * transform.scale;
std::array<glm::vec3, 2> const bounds{{minpos + cubepos, maxpos + cubepos}};
float txmin = (bounds[ r.sign[0]].x - r.orig.x) * r.invdir.x;
float txmax = (bounds[1 - r.sign[0]].x - r.orig.x) * r.invdir.x;
float tymin = (bounds[ r.sign[1]].y - r.orig.y) * r.invdir.y;
float tymax = (bounds[1 - r.sign[1]].y - r.orig.y) * r.invdir.y;
if ((txmin > tymax) || (tymin > txmax)) {
return false;
}
if (tymin > txmin) {
txmin = tymin;
}
if (tymax < txmax) {
txmax = tymax;
}
float tzmin = (bounds[ r.sign[2]].z - r.orig.z) * r.invdir.z;
float tzmax = (bounds[1 - r.sign[2]].z - r.orig.z) * r.invdir.z;
if ((txmin > tzmax) || (tzmin > txmax)) {
return false;
}
distance = tzmin;
return true;
}
edit: The math space conversions functions I'm using:
namespace boomhs::math::space_conversions
{
inline glm::vec4
clip_to_eye(glm::vec4 const& clip, glm::mat4 const& proj_matrix, float const z)
{
auto const inv_proj = glm::inverse(proj_matrix);
glm::vec4 const eye_coords = inv_proj * clip;
return glm::vec4{eye_coords.x, eye_coords.y, z, 0.0f};
}
inline glm::vec3
eye_to_world(glm::vec4 const& eye, glm::mat4 const& view_matrix)
{
glm::mat4 const inv_view = glm::inverse(view_matrix);
glm::vec4 const ray = inv_view * eye;
glm::vec3 const ray_world = glm::vec3{ray.x, ray.y, ray.z};
return glm::normalize(ray_world);
}
inline constexpr glm::vec2
screen_to_ndc(glm::vec2 const& scoords, Rectangle const& view_rect)
{
float const x = ((2.0f * scoords.x) / view_rect.right()) - 1.0f;
float const y = ((2.0f * scoords.y) / view_rect.bottom()) - 1.0f;
auto const assert_fn = [](float const v) {
assert(v <= 1.0f);
assert(v >= -1.0f);
};
assert_fn(x);
assert_fn(y);
return glm::vec2{x, -y};
}
inline glm::vec4
ndc_to_clip(glm::vec2 const& ndc, float const z)
{
return glm::vec4{ndc.x, ndc.y, z, 1.0f};
}
inline glm::vec3
screen_to_world(glm::vec2 const& scoords, Rectangle const& view_rect, glm::mat4 const& proj_matrix,
glm::mat4 const& view_matrix, float const z)
{
glm::vec2 const ndc = screen_to_ndc(scoords, view_rect);
glm::vec4 const clip = ndc_to_clip(ndc, z);
glm::vec4 const eye = clip_to_eye(clip, proj_matrix, z);
glm::vec3 const world = eye_to_world(eye, view_matrix);
return world;
}
} // namespace boomhs::math::space_conversions
I worked on this for several days because I ran into the same problem.
The unproject methods that we are used to work with are working 100% correctly here as well - even with orthographic projection. But with orthographic projection the direction vector going from the camera position into the screen is always the same. So, unprojecting the cursor in the same way dies not work as intended in this case.
What you want to do is getting the camera direction vector as it is but in order to get the ray origin you need to shift the camera position according to the current mouse position on screen.
My approach (C#, but you'll get the idea):
Vector3 worldUpDirection = new Vector3(0, 1, 0); // if your world is y-up
// Get mouse coordinates (2d) relative to window position:
Vector2 mousePosRelativeToWindow = GetMouseCoordsRelativeToWindow(); // (0,0) would be top left window corner
// get camera direction vector:
Vector3 camDirection = Vector3.Normalize(cameraTarget - cameraPosition);
// get x and y coordinates relative to frustum width and height.
// glOrthoWidth and glOrthoHeight are the sizeX and sizeY values
// you created your projection matrix with. If your frustum has a width of 100,
// x would become -50 when the mouse is left and +50 when the mouse is right.
float x = +(2.0f * mousePosRelativeToWindow .X / viewportWidth - 1) * (glOrthoWidth / 2);
float y = -(2.0f * mousePosRelativeToWindow .Y / viewPortHeight - 1) * (glOrthoHeight / 2);
// Now, you want to calculate the camera's local right and up vectors
// (depending on the camera's current view direction):
Vector3 cameraRight = Vector3.Normalize(Vector3.Cross(camDirection, worldUpDirection));
Vector3 cameraUp = Vector3.Normalize(Vector3.Cross(cameraRight, camDirection));
// Finally, calculate the ray origin:
Vector3 rayOrigin = cameraPosition + cameraRight * x + cameraUp * y;
Vector3 rayDirection = camDirection;
Now you have the ray origin and the ray direction for your orthographic projection.
With these you can run any ray-plane/volume-intersections as usual.

How to move camera around and stick it to the player?

I'm trying to move camera around the player.
Right now I'm using camera class like this:
Camera::Camera(glm::vec3 position, glm::vec3 up, GLfloat yaw, GLfloat pitch)
{
this->position = position;
this->m_WorldUp = up;
this->up = up;
this->m_Yaw = yaw;
this->m_Pitch = pitch;
this->UpdateCameraVectors();
}
glm::mat4 Camera::getViewMatrix()
{
return glm::lookAt(position, position + m_Front, up);
}
void Camera::ProcessKeyboard(Camera_Movement direction, GLfloat deltaTime)
{
float velocity = moveSpeed * deltaTime;
switch (direction) {
case FORWARD: position += m_Front * velocity; break;
case BACKWARD: position -= m_Front * velocity; break;
case LEFT: position -= m_Right * velocity; break;
case RIGHT: position += m_Right * velocity; break;
case UPWARDS: position += m_WorldUp * velocity; break;
case DOWNWARDS: position -= m_WorldUp * velocity; break;
}
}
void Camera::ProcessMouseMovement(GLfloat xOffset, GLfloat yOffset, GLboolean constrainPitch)
{
xOffset *= sensitivity;
yOffset *= sensitivity;
m_Yaw += xOffset;
m_Pitch += yOffset;
if (constrainPitch) {
if (m_Pitch > 89.0f) {
m_Pitch = 89.0f;
} else if (m_Pitch < -89.0f) {
m_Pitch = -89.0f;
}
}
UpdateCameraVectors();
}
void Camera::UpdateCameraVectors()
{
glm::vec3 front;
front.x = cos(glm::radians(m_Yaw)) * cos(glm::radians(m_Pitch));
front.y = -sin(glm::radians(m_Pitch));
front.z = sin(glm::radians(m_Yaw)) * cos(glm::radians(m_Pitch));
m_Front = glm::normalize(front);
m_Right = glm::normalize(glm::cross(m_Front, m_WorldUp));
up = glm::normalize(glm::cross(m_Right, m_Front));
}
It allows me to free look and move around the world.
Player's update method at the moment:
glm::mat4 projection = glm::mat4(1.0f);
projection = glm::perspective(glm::radians(45.0f), 16.0f / 9.0f, 0.1f, 1000.0f);
glm::mat4 view = glm::mat4(1.0f);
view = camera->getViewMatrix();
glm::mat4 model = glm::mat4(1.0f); {
glm::mat4 translate = glm::translate(model, position);
glm::mat4 rotate = glm::rotate(model, glm::radians(180.0f), glm::vec3(0.0f, 1.0f, 0.0f));
glm::mat4 scale = glm::scale(model, glm::vec3(0.1f, 0.1f, 0.1f));
model = translate * rotate * scale;
}
glm::mat4 mvp = projection * view * model;
GLint u_mvp = shader.GetUniformLocation("u_mvp");
glUniformMatrix4fv(u_mvp, 1, GL_FALSE, glm::value_ptr(mvp));
I know that I have to change something with view matrix, but I have not got enough knowledge.
How can I upgrade my camera class that it can look, rotate, around a player, like in a circle, an MMO RPG style?
The camera class itself should not be receiving keyboard updates at all - that should be done in the player class. Every time the player moves, update the camera class with its new position. See comments in below code for more details.
Camera::Camera(glm::vec3 position, glm::vec3 up, GLfloat yaw, GLfloat pitch, GLfloat dist)
{
m_WorldUp = up;
// this->up = up; <- delete this variable; lookAt computes it for us
m_Pos = position; // this is the *player* position
m_Yaw = yaw;
m_Pitch = pitch;
m_Dist = dist; // distance from the player
UpdateViewMatrix(true);
}
// private method
void Camera::UpdateViewMatrix(bool computeDir = false)
{
// compute the new direction
if (computeDir)
{
glm::vec3 radial;
radial.x = cos(glm::radians(m_Yaw)) * cos(glm::radians(m_Pitch));
radial.y = sin(glm::radians(m_Pitch)); // there was a sign error here
radial.z = sin(glm::radians(m_Yaw)) * cos(glm::radians(m_Pitch));
m_Dir = -radial;
}
glm::vec3 pos = m_Pos - m_Dist * m_Dir; // *camera* position
// additional view matrix member variable
// you were using lookAt in the wrong way
m_View = glm::lookAt(m_Pos, pos, m_WorldUp);
}
// public method - call this everytime the player moves
void Camera::UpdateTargetPosition(glm::vec3 const & pos)
{
m_Pos = pos;
UpdateViewMatrix();
}
void Camera::UpdateAngles(GLfloat yaw, GLfloat pitch, GLboolean constrainPitch)
{
if (constrainPitch) {
if (pitch > 89.0f) {
pitch = 89.0f;
} else if (pitch < -89.0f) {
pitch = -89.0f;
}
}
// if yaw is outside the conventional range (-180.0, 180.0], shift it
if (yaw < -180.0f || yaw > 180.0f) {
yaw -= floor((yaw + 180.0f) / 360.0f) * 360.0f;
}
m_Yaw = yaw;
m_Pitch = pitch;
UpdateViewMatrix(true);
}
void Camera::ProcessMouseMovement(GLfloat xOffset, GLfloat yOffset, GLboolean constrainPitch)
{
UpdateAngles(m_Yaw + xOffset * sensitivity,
m_Pitch + yOffset * sensitivity,
constrainPitch);
}
for mouse follow you need:
camera = inverse(player * camera_view_and_offset)
where player is your player direct matrix, camera_view_and_offset is the view offset and turn around matrix relative to your player coordinate system and camera is your camera inverse matrix you should use as part of modelview ....
for more info see:
Understanding 4x4 homogenous transform matrices

Arcball camera inverting at 90 deg azimuth

I'm attempting to implement an arcball style camera. I use glm::lookAt to keep the camera pointed at a target, and then move it around the surface of a sphere using azimuth/inclination angles to rotate the view.
I'm running into an issue where the view gets flipped upside down when the azimuth approaches 90 degrees.
Here's the relevant code:
Get projection and view martrices. Runs in the main loop
void Visual::updateModelViewProjection()
{
model = glm::mat4();
projection = glm::mat4();
view = glm::mat4();
projection = glm::perspective
(
(float)glm::radians(camera.Zoom),
(float)width / height, // aspect ratio
0.1f, // near clipping plane
10000.0f // far clipping plane
);
view = glm::lookAt(camera.Position, camera.Target, camera.Up);
}
Mouse move event, for camera rotation
void Visual::cursor_position_callback(GLFWwindow* window, double xpos, double ypos)
{
if (leftMousePressed)
{
...
}
if (rightMousePressed)
{
GLfloat xoffset = (xpos - cursorPrevX) / 4.0;
GLfloat yoffset = (cursorPrevY - ypos) / 4.0;
camera.inclination += yoffset;
camera.azimuth -= xoffset;
if (camera.inclination > 89.0f)
camera.inclination = 89.0f;
if (camera.inclination < 1.0f)
camera.inclination = 1.0f;
if (camera.azimuth > 359.0f)
camera.azimuth = 359.0f;
if (camera.azimuth < 1.0f)
camera.azimuth = 1.0f;
float radius = glm::distance(camera.Position, camera.Target);
camera.Position[0] = camera.Target[0] + radius * cos(glm::radians(camera.azimuth)) * sin(glm::radians(camera.inclination));
camera.Position[1] = camera.Target[1] + radius * sin(glm::radians(camera.azimuth)) * sin(glm::radians(camera.inclination));
camera.Position[2] = camera.Target[2] + radius * cos(glm::radians(camera.inclination));
camera.updateCameraVectors();
}
cursorPrevX = xpos;
cursorPrevY = ypos;
}
Calculate camera orientation vectors
void updateCameraVectors()
{
Front = glm::normalize(Target-Position);
Right = glm::rotate(glm::normalize(glm::cross(Front, {0.0, 1.0, 0.0})), glm::radians(90.0f), Front);
Up = glm::normalize(glm::cross(Front, Right));
}
I'm pretty sure it's related to the way I calculate my camera's right vector, but I cannot figure out how to compensate.
Has anyone run into this before? Any suggestions?
It's a common mistake to use lookAt for rotating the camera. You should not. The backward/right/up directions are the columns of your view matrix. If you already have them then you don't even need lookAt, which tries to redo some of your calculations. On the other hand, lookAt doesn't help you in finding those vectors in the first place.
Instead build the view matrix first as a composition of translations and rotations, and then extract those vectors from its columns:
void Visual::cursor_position_callback(GLFWwindow* window, double xpos, double ypos)
{
...
if (rightMousePressed)
{
GLfloat xoffset = (xpos - cursorPrevX) / 4.0;
GLfloat yoffset = (cursorPrevY - ypos) / 4.0;
camera.inclination = std::clamp(camera.inclination + yoffset, -90.f, 90.f);
camera.azimuth = fmodf(camera.azimuth + xoffset, 360.f);
view = glm::mat4();
view = glm::translate(view, glm::vec3(0.f, 0.f, camera.radius)); // add camera.radius to control the distance-from-target
view = glm::rotate(view, glm::radians(camera.inclination + 90.f), glm::vec3(1.f,0.f,0.f));
view = glm::rotate(view, glm::radians(camera.azimuth), glm::vec3(0.f,0.f,1.f));
view = glm::translate(view, camera.Target);
camera.Right = glm::column(view, 0);
camera.Up = glm::column(view, 1);
camera.Front = -glm::column(view, 2); // minus because OpenGL camera looks towards negative Z.
camera.Position = glm::column(view, 3);
view = glm::inverse(view);
}
...
}
Then remove the code that calculates view and the direction vectors from updateModelViewProjection and updateCameraVectors.
Disclaimer: this code is untested. You might need to fix a minus sign somewhere, order of operations, or the conventions might mismatch (Z is up or Y is up, etc...).

Opengl Camera and multiplying matrixes

I Am currently having alot of problems with the camera I am making. The problem occurs with my matrix rotation I am doing as this website says to avoid gimble lock..
One of the first problems you will note is that the order you apply
these rotations matter. As previously stated, a rotation matrix is an
orientation transform. Each transform defines a new coordinate system,
and the next transform is based on an object in the new space. For
example, if we apply the roll first, we have now changed what the axis
for the subsequent yaw is.
And when i perform this for example if I am wanted to pitch around the current x axis the x axis also changes in my axis to rotation method which is obviously wrong. Ive look around alot and cant find any solution. I have tried alot of differenet version of the axis angle rotation matrix..
void FrustumCamera::xAxisRotation(float angle)
{
Vector3<float> x = m_orientation.getXAxis();
Matrix4<float> matrix = m_orientation.axisAngleRotation(x,angle);
m_orientation = matrix*m_orientation;
normalise(m_orientation.getXAxis());
normalise(m_orientation.getYAxis());
normalise(m_orientation.getZAxis());
}
void FrustumCamera::yAxisRotation(float angle)
{
Vector3<float> y = m_orientation.getYAxis();
Matrix4<float> matrix = m_orientation.axisAngleRotation(y,angle);
m_orientation = matrix*m_orientation;
normalise(m_orientation.getXAxis());
normalise(m_orientation.getYAxis());
normalise(m_orientation.getZAxis());
}
Matrix4<Type> Matrix4<Type>::operator*(Matrix4& matrix)
{
Matrix4<Type> temp(m_matrix);
for(int i=0;i<4;i++)
{
for(int j=0;j<4;j++)
{
Type total = 0;
for(int k=0;k<4;k++)
{
total += m_matrix[i][k]*matrix.getAt(k,j);;
}
temp.setAt(i,j,total);
}
}
return temp;
}
template <class Type>
Matrix4<Type> Matrix4<Type>::axisAngleRotation(Vector3<Type> axis, const Type angle)
{
Type radians = angle * (double)degToRad;
Matrix4<Type> temp;
float c = cosf(radians);
float s = sinf(radians);
float t = 1.0f - c;
float x = axis.x;
float y = axis.y;
float z = axis.z;
temp.setAt(0,0, c+x*x*(t));
temp.setAt(0,1, x*y*(t)-z*s);
temp.setAt(0,2, x*z*(t)+y*s);
temp.setAt(0,3, 0.0f);
temp.setAt(1,0, y*x*(t)+z*s);
temp.setAt(1,1, c+y*y*(t));
temp.setAt(1,2, y*z*(t)-x*s);
temp.setAt(1,3, 0.0f);
temp.setAt(2,0, z*x*(t)-y*s);
temp.setAt(2,1, z*y*(1-c)+x*s);
temp.setAt(2,2, c+z*z*(t));
temp.setAt(2,3, 0.0f);
temp.setAt(3,0, 0.0f);
temp.setAt(3,1, 0.0f);
temp.setAt(3,2, 0.0f);
temp.setAt(3,3, 1.0f);
return temp;
}
void OpenGLRenderer::startDraw(unsigned long mask)
{
//sortBuffer(); // sort draw queue
clearBuffers(mask); // clear buffers
loadIdentity();
glTranslatef(-1*m_frustumCamera->getViewMatrix().getTranslationAxis().x,-1*m_frustumCamera->getViewMatrix().getTranslationAxis().y,-1*m_frustumCamera->getViewMatrix().getTranslationAxis().z);// load identity
glMultMatrixf(m_frustumCamera->getViewMatrix().getMatrix());
glTranslatef(m_frustumCamera->getViewMatrix().getTranslationAxis().x,m_frustumCamera->getViewMatrix().getTranslationAxis().y,m_frustumCamera->getViewMatrix().getTranslationAxis().z);
matrixStackPush();
}
I think order of multiplication can cause the problem, instead of
m_orientation = matrix*m_orientation;
try
m_orientation = m_orientation * matrix;