I have a function which rotates the camera around the player by yaw and pitch angles.
void Camera::updateVectors() {
GLfloat radius = glm::length(center - position);
position.x = cos(glm::radians(this->yaw)) * cos(glm::radians(this->pitch));
position.y = sin(glm::radians(this->pitch));
position.z = sin(glm::radians(this->yaw)) * cos(glm::radians(this->pitch));
position *= radius;
this->front = glm::normalize(center - position);
this->right = glm::normalize(glm::cross(this->front, this->worldUp));
this->up = glm::normalize(glm::cross(this->right, this->front));
lookAt = glm::lookAt(this->position, this->position + this->front, this->up);
}
When I move the player the camera should moves with it by adding a translation vector to both the center and position of the camera:
void Camera::Transform(glm::vec3& t) {
this->position += t;
this->center += t;
}
Vefore moving the player the camera rotation works fine and the player movement also works fine but once I try to rotate the camera after player moving it start to change position unexpected.
After making some debugging I noticed that the radius which Is calculated at first line which is the distance between center and position of the camera like 49.888889 or 50.000079 and due to the initialized values it should be 50.0, this very small difference makes the result unexpected at all.
so how could I treat this float precision or is there a bug in my code or calculations.
Edit:
position the player depends on its yaw and pitch and update the center of the camera
GLfloat velocity = this->movementSpeed * deltaTime;
if (direction == FORWARD) {
glm::vec3 t = glm::vec3(sin(glm::radians(yaw)), sin(glm::radians(pitch)), cos(glm::radians(yaw))) * velocity;
matrix = glm::translate(matrix, t);
for (GLuint i = 0; i < this->m_Entries.size(); i++) {
this->m_Entries[i].setModelMatrix(matrix);
}
glm::vec3 f(matrix[2][0], matrix[2][1], matrix[2][2]);
f *= velocity;
scene->getDefCamera()->Transform(f);
}
if (direction == BACKWARD) {
glm::vec3 t = glm::vec3(sin(glm::radians(yaw)), 0.0, cos(glm::radians(yaw))) * velocity;
matrix = glm::translate(matrix, -t);
for (GLuint i = 0; i < this->m_Entries.size(); i++) {
this->m_Entries[i].setModelMatrix(matrix);
}
glm::vec3 f(matrix[2][0], matrix[2][1], matrix[2][2]);
f *= velocity;
f = -f;
scene->getDefCamera()->Transform(f);
}
The main problem here is that you're rotating based on a position that is moving. But rotations are based on the origin of the coordinate system. So when you move the position, the rotation is still being done relative to the origin.
Instead of having Transform offset the position, it should only offset the center. Indeed, storing position makes no sense; you compute the camera's position based on its current center point, the radius, and the angles of rotation. The radius is a property that should be stored, not computed.
the solution simply is making transformations on the camera view matrix instead of making it by lookAt function
first initialize the camera
void Camera::initCamera(glm::vec3& pos, glm::vec3& center, GLfloat yaw, GLfloat pitch) {
view = glm::translate(view, center-pos);
view = glm::rotate(view, glm::radians(yaw), glm::vec3(0.0, 1.0, 0.0));
view = glm::rotate(view, glm::radians(pitch), glm::vec3(1.0, 0.0, 0.0));
view = glm::translate(view, pos-center);
}
then the rotation function:
void Camera::Rotate(GLfloat xoffset, GLfloat yoffset, glm::vec3& c) {
xoffset *= this->mouseSensitivity;
yoffset *= this->mouseSensitivity;
view = glm::translate(view, c );//c is the player position
view = glm::rotate(view, glm::radians(xoffset), glm::vec3(0.0, 1.0, 0.0));
view = glm::rotate(view, glm::radians(yoffset), glm::vec3(1.0, 0.0, 0.0));
view = glm::translate(view, - c);
}
and the camera move function:
void Camera::Translate(glm::vec3& t) {
view = glm::translate(view, -t);
}
and in the player class when the player moves it push camera to move in its direction by this code
void Mesh::Move(Move_Directions direction, GLfloat deltaTime) {
GLfloat velocity = 50.0f * this->movementSpeed * deltaTime;
if (direction == FORWARD) {
glm::vec3 t = glm::vec3(sin(glm::radians(yaw)), sin(glm::radians(pitch)), cos(glm::radians(yaw))) * velocity;
matrix = glm::translate(matrix, t);
for (GLuint i = 0; i < this->m_Entries.size(); i++) {
this->m_Entries[i].setModelMatrix(matrix);
}
glm::vec3 f(matrix[2][0], matrix[2][1], matrix[2][2]);
f *= velocity;
scene->getDefCamera()->Translate(f);
}
if (direction == BACKWARD) {
glm::vec3 t = glm::vec3(sin(glm::radians(yaw)), 0.0, cos(glm::radians(yaw))) * velocity;
matrix = glm::translate(matrix, -t);
for (GLuint i = 0; i < this->m_Entries.size(); i++) {
this->m_Entries[i].setModelMatrix(matrix);
}
glm::vec3 f(matrix[2][0], matrix[2][1], matrix[2][2]);
f *= velocity;
f = -f;
scene->getDefCamera()->Translate(f);
}
if (direction == RIGHT) {
matrix = glm::rotate(matrix, (GLfloat) -M_PI * deltaTime, glm::vec3(0.0, 1.0, 0.0));
for (GLuint i = 0; i < this->m_Entries.size(); i++) {
this->m_Entries[i].setModelMatrix(matrix);
}
}
if (direction == LEFT) {
matrix = glm::rotate(matrix, (GLfloat) M_PI * deltaTime, glm::vec3(0.0, 1.0, 0.0));
for (GLuint i = 0; i < this->m_Entries.size(); i++) {
this->m_Entries[i].setModelMatrix(matrix);
}
}
}
thanks for every body helped
Related
I have encountered a situation where passing a glm::vec3 to the glm::lookAt function appears to modify it.
The following code is about shadow frustum calculation in a C++ / OpenGL game engine. The problem arises in the glm::lookAt function, at the end.
void Shadows::updateFrustumBoundingBox()
{
// Here we convert main camera frustum coordinates in light view space
std::array<glm::vec3,8> points = {
// Near plane points
lightView * glm::vec4(cameraPtr->ntl, 1.0),
lightView * glm::vec4(cameraPtr->ntr, 1.0),
lightView * glm::vec4(cameraPtr->nbl, 1.0),
lightView * glm::vec4(cameraPtr->nbr, 1.0),
// Far plane points
lightView * glm::vec4(cameraPtr->ftl, 1.0),
lightView * glm::vec4(cameraPtr->ftr, 1.0),
lightView * glm::vec4(cameraPtr->fbl, 1.0),
lightView * glm::vec4(cameraPtr->fbr, 1.0)};
// Here we find the shadow bounding box dimensions
bool first = true;
for (int i=0; i<7; ++i)
{
glm::vec3* point = &points[i];
if (first)
{
minX = point->x;
maxX = point->x;
minY = point->y;
maxY = point->y;
minZ = point->z;
maxZ = point->z;
first = false;
}
if (point->x > maxX)
maxX = point->x;
else if (point->x < minX)
minX = point->x;
if (point->y > maxY)
maxY = point->y;
else if (point->y < minY)
minY = point->y;
if (point->z > maxZ)
maxZ = point->z;
else if (point->z < minZ)
minZ = point->z;
}
frustumWidth = maxX - minX;
frustumHeight = maxY - minY;
frustumLength = maxZ - minZ;
// Here we find the bounding box center, in light view space
float x = (minX + maxX) / 2.0f;
float y = (minY + maxY) / 2.0f;
float z = (minZ + maxZ) / 2.0f;
glm::vec4 frustumCenter = glm::vec4(x, y, z, 1.0f);
// Here we convert the bounding box center in world space
glm::mat4 invertedLight = glm::mat4(1.0f);
invertedLight = glm::inverse(lightView);
frustumCenter = invertedLight * frustumCenter;
// Here we define the light projection matrix (shadow frustum dimensions)
lightProjection = glm::ortho(
-frustumWidth/2.0f, // left
frustumWidth/2.0f, // right
-frustumHeight/2.0f, // down
frustumHeight/2.0f, // top
0.01f, // near
SHADOW_DISTANCE); // far
// Here we define the light view matrix (shadow frustum position and orientation)
lightDirection = glm::normalize(lightDirection);
target = glm::vec3(0.0f, 100.0f, 200.0f) + lightDirection;
lightView = glm::lookAt(
// Shadow box center
glm::vec3(0.0f, 100.0f, 200.0f), // THIS LINE
// glm::vec3(frustumCenter), // ALTERNATIVELY, THIS LINE. Here I convert it as a vec3 because it is a vec4
// Light orientation
target,
// Up vector
glm::vec3( 0.0f, 1.0f, 0.0f));
cout << "frustumCenter: " << frustumCenter.x << " " << frustumCenter.y << " " << frustumCenter.z << endl;
// Final matrix calculation
lightSpaceMatrix = lightProjection * lightView;
}
As is, the first glm::lookAt parameter is glm::vec3(0.0f, 100.0f, 200.0f), and it works correctly. The glm::vec4 frustumCenter variable isn't used by glm::lookAt, and outputs correct values each frame.
frustumCenter: 573.41 -93.2823 -133.848 1
But if I change the first glm::lookAt parameter to "glm::vec3(frustumCenter)":
frustumCenter: nan nan nan nan
How can it be?
I have encountered a situation where passing a glm::vec3 to the glm::lookAt function appears to modify it."
I don't think so. You use frustumCenter to caclucalte lightView, but before you do that, you use lightView to calculate frustumCenter: frustumCenter = invertedLight * frustumCenter;
So my educated guess on what happens here is:
The lightView matrix is not properly initialized / initialized to a singular matrix (like all zeros). As such, the inverse will be not defined, resulting in frustumCenter becoming all NaN, which in turn results in lightView becoming all NaN.
But if you not use frustumCenter in the first iteration, lightView will be properly initialized, and frustumCenter will be calculated to a sane value in the next iteration.
I´m trying to implements a bullet so I have this free movement first person camera. I got this camera from learnopengl.com this is the coding:
// Default camera values
const float YAW = -90.0f;
const float PITCH = 0.0f;
const float SPEED = 2.5f;
const float SENSITIVITY = 0.1f;
const float ZOOM = 45.0f;
// An abstract camera class that processes input and calculates the corresponding Euler Angles, Vectors and Matrices for use in OpenGL
class Camera
{
public:
// Camera Attributes
glm::vec3 Position;
glm::vec3 Front;
glm::vec3 Up;
glm::vec3 Right;
glm::vec3 WorldUp;
// Euler Angles
float Yaw;
float Pitch;
// Camera options
float MovementSpeed;
float MouseSensitivity;
float Zoom;
// Constructor with vectors
Camera(glm::vec3 position = glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3 up = glm::vec3(0.0f, 1.0f, 0.0f), float yaw = YAW, float pitch = PITCH) : Front(glm::vec3(0.0f, 0.0f, -1.0f)), MovementSpeed(SPEED), MouseSensitivity(SENSITIVITY), Zoom(ZOOM)
{
Position = position;
WorldUp = up;
Yaw = yaw;
Pitch = pitch;
updateCameraVectors();
}
// Constructor with scalar values
Camera(float posX, float posY, float posZ, float upX, float upY, float upZ, float yaw, float pitch) : Front(glm::vec3(0.0f, 0.0f, -1.0f)), MovementSpeed(SPEED), MouseSensitivity(SENSITIVITY), Zoom(ZOOM)
{
Position = glm::vec3(posX, posY, posZ);
WorldUp = glm::vec3(upX, upY, upZ);
Yaw = yaw;
Pitch = pitch;
updateCameraVectors();
}
// Returns the view matrix calculated using Euler Angles and the LookAt Matrix
glm::mat4 GetViewMatrix()
{
return glm::lookAt(Position, Position + Front, Up);
}
// Processes input received from any keyboard-like input system. Accepts input parameter in the form of camera defined ENUM (to abstract it from windowing systems)
void ProcessKeyboard(Camera_Movement direction, float deltaTime)
{
float velocity = MovementSpeed * deltaTime;
if (direction == FORWARD)
Position += Front * velocity;
if (direction == BACKWARD)
Position -= Front * velocity;
if (direction == LEFT)
Position -= Right * velocity;
if (direction == RIGHT)
Position += Right * velocity;
}
// Processes input received from a mouse input system. Expects the offset value in both the x and y direction.
void ProcessMouseMovement(float xoffset, float yoffset, GLboolean constrainPitch = true)
{
xoffset *= MouseSensitivity;
yoffset *= MouseSensitivity;
Yaw += xoffset;
Pitch += yoffset;
// Make sure that when pitch is out of bounds, screen doesn't get flipped
if (constrainPitch)
{
if (Pitch > 89.0f)
Pitch = 89.0f;
if (Pitch < -89.0f)
Pitch = -89.0f;
}
// Update Front, Right and Up Vectors using the updated Euler angles
updateCameraVectors();
}
// Processes input received from a mouse scroll-wheel event. Only requires input on the vertical wheel-axis
void ProcessMouseScroll(float yoffset)
{
if (Zoom >= 1.0f && Zoom <= 45.0f)
Zoom -= yoffset;
if (Zoom <= 1.0f)
Zoom = 1.0f;
if (Zoom >= 45.0f)
Zoom = 45.0f;
}
private:
// Calculates the front vector from the Camera's (updated) Euler Angles
void updateCameraVectors()
{
// Calculate the new Front vector
glm::vec3 front;
front.x = cos(glm::radians(Yaw)) * cos(glm::radians(Pitch));
front.y = sin(glm::radians(Pitch));
front.z = sin(glm::radians(Yaw)) * cos(glm::radians(Pitch));
Front = glm::normalize(front);
// Also re-calculate the Right and Up vector
Right = glm::normalize(glm::cross(Front, WorldUp)); // Normalize the vectors, because their length gets closer to 0 the more you look up or down which results in slower movement.
Up = glm::normalize(glm::cross(Right, Front));
}
};
So now I want to create a bullet that starts from
model = glm::translate(model, camara.Position+7.0f*camara.Front);
The issue is that as I try to move the camera the object rotates with it which I know why but I don't know how to fix it, I have tried something like this:
model = glm::rotate(model, glm::radians(camara.Pitch), glm::vec3(1.0f, 0.0f, 0.0f));
model = glm::rotate(model, -glm::radians(camara.Yaw), glm::vec3(0.0f, 1.0f, 0.0f));
trying to sync the rotations but it's not working.
I want to store the position because then I want the bullets to go straight no matter where I move. Thank you.
This is how I always want it to look:
This is how it rotates as I move:
I did mouse picking with terrain for these lessons (but used c++)
https://www.youtube.com/watch?v=DLKN0jExRIM&index=29&listhLoLuZVfUksDP
http://antongerdelan.net/opengl/raycasting.html
The problem is that the position of the mouse does not correspond to the place where the ray intersects with the terrane:
There's a big blunder on the vertical and a little horizontal.
Do not look at the shadows, this is not a corrected map of normals.
What can be wrong? My code:
void MousePicker::update() {
view = cam->getViewMatrix();
currentRay = calculateMouseRay();
if (intersectionInRange(0, RAY_RANGE, currentRay)) {
currentTerrainPoint = binarySearch(0, 0, RAY_RANGE, currentRay);
}
else {
currentTerrainPoint = vec3();
}
}
vec3 MousePicker::calculateMouseRay() {
glfwGetCursorPos(win, &mouseInfo.xPos, &mouseInfo.yPos);
vec2 normalizedCoords = getNormalizedCoords(mouseInfo.xPos, mouseInfo.yPos);
vec4 clipCoords = vec4(normalizedCoords.x, normalizedCoords.y, -1.0f, 1.0f);
vec4 eyeCoords = toEyeCoords(clipCoords);
vec3 worldRay = toWorldCoords(eyeCoords);
return worldRay;
}
vec2 MousePicker::getNormalizedCoords(double xPos, double yPos) {
GLint width, height;
glfwGetWindowSize(win, &width, &height);
//GLfloat x = (2.0 * xPos) / width - 1.0f;
GLfloat x = -((width - xPos) / width - 0.5f) * 2.0f;
//GLfloat y = 1.0f - (2.0f * yPos) / height;
GLfloat y = ((height - yPos) / height - 0.5f) * 2.0f;
//float z = 1.0f;
mouseInfo.normalizedCoords = vec2(x, y);
return vec2(x,y);
}
vec4 MousePicker::toEyeCoords(vec4 clipCoords) {
vec4 invertedProjection = inverse(projection) * clipCoords;
//vec4 eyeCoords = translate(invertedProjection, clipCoords);
mouseInfo.eyeCoords = vec4(invertedProjection.x, invertedProjection.y, -1.0f, 0.0f);
return vec4(invertedProjection.x, invertedProjection.y, -1.0f, 0.0f);
}
vec3 MousePicker::toWorldCoords(vec4 eyeCoords) {
vec3 rayWorld = vec3(inverse(view) * eyeCoords);
vec3 mouseRay = vec3(rayWorld.x, rayWorld.y, rayWorld.z);
rayWorld = normalize(rayWorld);
mouseInfo.worldRay = rayWorld;
return rayWorld;
}
//*********************************************************************************
vec3 MousePicker::getPointOnRay(vec3 ray, float distance) {
vec3 camPos = cam->getCameraPos();
vec3 start = vec3(camPos.x, camPos.y, camPos.z);
vec3 scaledRay = vec3(ray.x * distance, ray.y * distance, ray.z * distance);
return vec3(start + scaledRay);
}
vec3 MousePicker::binarySearch(int count, float start, float finish, vec3 ray) {
float half = start + ((finish - start) / 2.0f);
if (count >= RECURSION_COUNT) {
vec3 endPoint = getPointOnRay(ray, half);
//Terrain* ter = &getTerrain(endPoint.x, endPoint.z);
if (terrain != NULL) {
return endPoint;
}
else {
return vec3();
}
}
if (intersectionInRange(start, half, ray)) {
return binarySearch(count + 1, start, half, ray);
}
else {
return binarySearch(count + 1, half, finish, ray);
}
}
bool MousePicker::intersectionInRange(float start, float finish, vec3 ray) {
vec3 startPoint = getPointOnRay(ray, start);
vec3 endPoint = getPointOnRay(ray, finish);
if (!isUnderGround(startPoint) && isUnderGround(endPoint)) {
return true;
}
else {
return false;
}
}
bool MousePicker::isUnderGround(vec3 testPoint) {
//Terrain* ter = &getTerrain(testPoint.x, testPoint.z);
float height = 0;
if (terrain != NULL) {
height = terrain->getHeightPoint(testPoint.x, testPoint.z);
mouseInfo.height = height;
}
if (testPoint.y < height) {
return true;
}
else {
return false;
}
}
Terrain MousePicker::getTerrain(float worldX, float worldZ) {
return *terrain;
}
In perspective projection, a ray from the eye position through a point on the screen can defined by 2 points. The first point is the eye (camera) position which is (0, 0, 0) in view space. The second point has to be calculated by the position on the screen.
The screen position has to be converted to normalized device coordinates in range from (-1,-1) to (1,1).
w = with of the viewport
h = height of the viewport
x = X position of the mouse
y = Y position ot the mouse
GLfloat ndc_x = 2.0 * x/w - 1.0;
GLfloat ndc_y = 1.0 - 2.0 * y/h; // invert Y axis
To calculate a point on the ray, which goes through the camera position and through the point on the screen, the field of view and the aspect ratio of the perspective projection has to be known:
fov_y = vertical field of view angle in radians
aspect = w / h
GLfloat tanFov = tan( fov_y * 0.5 );
glm::vec3 ray_P = vec3( ndc_x * aspect * tanFov, ndc_y * tanFov, -1.0 ) );
A ray from the camera position through a point on the screen can be defined by the following position (P0) and normalized direction (dir), in world space:
view = view matrix
glm::mat4 invView = glm::inverse( view );
glm::vec3 P0 = invView * glm::vec3(0.0f, 0.0f, 0.0f);
// = glm::vec3( view[3][0], view[3][1], view[3][2] );
glm::vec3 dir = glm::normalize( invView * ray_P - P0 );
In this case, the answers to the following questions will be interesting too:
How to recover view space position given view space depth value and ndc xy
Is it possble get which surface of cube will be click in OpenGL?
How to render depth linearly in modern OpenGL with gl_FragCoord.z in fragment shader?
GLSL spotlight projection volume
Applying to your code results in the following changes:
The Perspective Projection Matrix looks like this:
r = right, l = left, b = bottom, t = top, n = near, f = far
2*n/(r-l) 0 0 0
0 2*n/(t-b) 0 0
(r+l)/(r-l) (t+b)/(t-b) -(f+n)/(f-n) -1
0 0 -2*f*n/(f-n) 0
it follows:
aspect = w / h
tanFov = tan( fov_y * 0.5 );
p[0][0] = 2*n/(r-l) = 1.0 / (tanFov * aspect)
p[1][1] = 2*n/(t-b) = 1.0 / tanFov
Convert from screen (mouse) coordinates to normalized device coordinates:
vec2 MousePicker::getNormalizedCoords(double x, double y) {
GLint w, h;
glfwGetWindowSize(win, &width, &height);
GLfloat ndc_x = 2.0 * x/w - 1.0;
GLfloat ndc_y = 1.0 - 2.0 * y/h; // invert Y axis
mouseInfo.normalizedCoords = vec2(ndc_x, ndc_x);
return vec2(ndc_x, ndc_x);
}
Calculate A ray from the camera position through a point on the screen (mouse position) in world space:
vec3 MousePicker::calculateMouseRay( void ) {
glfwGetCursorPos(win, &mouseInfo.xPos, &mouseInfo.yPos);
vec2 normalizedCoords = getNormalizedCoords(mouseInfo.xPos, mouseInfo.yPos);
ray_Px = normalizedCoords.x / projection[0][0]; // projection[0][0] == 1.0 / (tanFov * aspect)
ray_Py = normalizedCoords.y / projection[1][1]; // projection[1][1] == 1.0 / tanFov
glm::vec3 ray_P = vec3( ray_Px, ray_Py, -1.0f ) );
vec3 camPos = cam->getCameraPos(); // == glm::vec3( view[3][0], view[3][1], view[3][2] );
glm::mat4 invView = glm::inverse( view );
glm::vec3 P0 = camPos;
glm::vec3 dir = glm::normalize( invView * ray_P - P0 );
return dir;
}
I have a question in regards to using quaternions for the rotation of my graphics object.
I have a Transform class which has the following constructor with default parameters:
Transform(const glm::vec3& pos = glm::vec3(0.0), const glm::quat& rot = glm::quat(1.0, 0.0, 0.0, 0.0),
const glm::vec3& scale = glm::vec3(1.0))
{
m_pos = pos;
m_rot = rot;
m_scale = scale;
}
In my Transform class calculate the MVP as follows:
glm::mat4 Transform::GetModelMatrix() const
{
glm::mat4 translate = glm::translate(glm::mat4(1.0), m_pos);
glm::mat4 rotate = glm::mat4_cast(m_rot);
glm::mat4 scale = glm::scale(glm::mat4(1.0), m_scale);
return translate * rotate * scale;
}
The issue I'm facing is that when I use const glm::quat& rot = glm::quat(1.0, 0.0, 0.0, 0.0) my object appears normal on screen. The following image shows it:
However if I try to use for example const glm::quat& rot = glm::quat(glm::radians(90.0f), 0.0, 1.0, 0.0) (rotating on y axis by 90 degrees) my object appears as if it has been scaled. The following image shows it:
I can't figure out what is causing it to become like this when I try to rotate it. Am I missing something important?
If it's of any relevance, the following is how I calculate my view matrix:
glm::mat4 Camera::GetView() const
{
glm::mat4 view = glm::lookAt(m_pos, m_pos + m_forward, m_up);
return view;
}
AFAIK you can init a glm::quat using:
glm::vec3 angles(degToRad(rotx), degToRad(roty), degToRad(rotz));
glm::quat rotation(angles);
Where rotx, roty, rotz are the rotation angles around x, y and z axis and degToRad converts angles to radians. Therefore for your case:
glm::vec3 angles(degToRad(0), degToRad(90), degToRad(0));
glm::quat rotation(angles);
Regards
Basically, I need to change the eye and up vectors correctly when pressing the left key (turning the view to right). My implementation is as follows but it does not seem to pass the tests. Anyone can help?
// Transforms the camera left around the "crystal ball" interface
void Transform::left(float degrees, vec3& eye, vec3& up) {
// YOUR CODE FOR HW1 HERE
eye = rotate(degrees, vec3(0, 1, 0)) * eye;
up = rotate(degrees, vec3(0, 1, 0)) * up;
}
The rotation function takes two arguments degree and axis, and returns the rotation matrix which is a 3 by 3 matrix:
mat3 Transform::rotate(const float degrees, const vec3& axis) {
// YOUR CODE FOR HW1 HERE
mat3 rot, I(1.0);
mat3 a_x;
a_x[0][0] = 0;
a_x[0][1] = -axis[2];
a_x[0][2] = axis[1];
a_x[1][0] = axis[2];
a_x[1][1] = 0;
a_x[1][2] = -axis[0];
a_x[2][0] = -axis[1];
a_x[2][1] = axis[0];
a_x[2][2] = 0;
float theta = degrees / 180 * pi;
rot = I * cos(theta) + glm::outerProduct(axis, axis) *(1-cos(theta)) + a_x*sin(theta);
return rot;
}
Try if something like this fixes it:
glm::mat3 Transform::rotate(float angle, const glm::vec3& axis) {
glm::mat3 a_x( 0.0f, axis.z, -axis.y,
-axis.z, 0.0f, axis.x,
axis.y, -axis.x, 0.0f);
angle = glm::radians(angle);
return glm::mat3() * cos(angle) + sin(angle) * a_x
+ (1.0f - cos(angle)) * glm::outerProduct(axis, axis);
}
I googled around and find a solution:
// Transforms the camera left around the "crystal ball" interface
void Transform::left(float degrees, vec3& eye, vec3& up) {
// YOUR CODE FOR HW1 HERE
eye = eye * rotate(degrees, up);
}
The rotation function is correct.