I can translate my 2d image to 0, 0 using the below code.
D3DXMATRIX worldMatrix, viewMatrix, orthoMatrix, rotation, movement;
// Get the world, view, and ortho matrices from the camera.
m_camera.GetViewMatrix(viewMatrix);
m_camera.GetWorldMatrix(worldMatrix);
m_camera.GetOrthoMatrix(orthoMatrix);
// Move the texture to the new position
D3DXMatrixTranslation(&movement, ((m_VerticeProperties->screenWidth / 2) * -1) + m_posX,
(m_VerticeProperties->screenHeight / 2) - m_posY, 0.0f);
worldMatrix = movement;
//float m_rotationZ = -90 * 0.0174532925f;
//D3DXMatrixRotationYawPitchRoll(&rotation, 0, 0, m_rotationZ);
//worldMatrix = rotation;
// Give the bitmap class what it needs to make source rect
m_bitmap->SetVerticeProperties(m_VerticeProperties->screenWidth, m_VerticeProperties->screenHeight,
m_VerticeProperties->frameWidth, m_VerticeProperties->frameHeight, m_VerticeProperties->U, m_VerticeProperties->V);
//Render the model (the vertices)
m_bitmap->Render(m_d3dManager.GetDeviceContext(), flipped);
//Render the shader
m_shader->Render(m_d3dManager.GetDeviceContext(), m_bitmap->GetIndexCount(), worldMatrix, viewMatrix,
orthoMatrix, m_bitmap->GetTexture(), m_textureTranslationU, m_VerticeProperties->translationPercentageV);
The result:
I can also rotate the image with this code:
D3DXMATRIX worldMatrix, viewMatrix, orthoMatrix, rotation, movement;
// Get the world, view, and ortho matrices from the camera.
m_camera.GetViewMatrix(viewMatrix);
m_camera.GetWorldMatrix(worldMatrix);
m_camera.GetOrthoMatrix(orthoMatrix);
//// Move the texture to the new position
//D3DXMatrixTranslation(&movement, ((m_VerticeProperties->screenWidth / 2) * -1) + m_posX,
// (m_VerticeProperties->screenHeight / 2) - m_posY, 0.0f);
//worldMatrix = movement;
float m_rotationZ = 90 * 0.0174532925f;
D3DXMatrixRotationYawPitchRoll(&rotation, 0, 0, m_rotationZ);
worldMatrix = rotation;
// Give the bitmap class what it needs to make source rect
m_bitmap->SetVerticeProperties(m_VerticeProperties->screenWidth, m_VerticeProperties->screenHeight,
m_VerticeProperties->frameWidth, m_VerticeProperties->frameHeight, m_VerticeProperties->U, m_VerticeProperties->V);
//Render the model (the vertices)
m_bitmap->Render(m_d3dManager.GetDeviceContext(), flipped);
//Render the shader
m_shader->Render(m_d3dManager.GetDeviceContext(), m_bitmap->GetIndexCount(), worldMatrix, viewMatrix,
orthoMatrix, m_bitmap->GetTexture(), m_textureTranslationU, m_VerticeProperties->translationPercentageV);
The result:
I thought multiplying the translation and rotation matrices and setting them = to the world matrix would allow me to see both effects at once.
D3DXMatrixTranslation(&movement, ((m_VerticeProperties->screenWidth / 2) * -1) + m_posX,
(m_VerticeProperties->screenHeight / 2) - m_posY, 0.0f);
float m_rotationZ = 90 * 0.0174532925f;
D3DXMatrixRotationYawPitchRoll(&rotation, 0, 0, m_rotationZ);
worldMatrix = rotation * movement;
It doesn't. The image no longer appears on the screen.
Can anyone tell me what im doing wrong? Thanks.
just do world * -translate * rotation * translate it will make you rotate local
here my code for example
void Ojbect::RotZ(float angle, Vec3 origin)
{
Mat4 w, rz, t;
rz.RotZ(angle);
t.Translation(-origin.x, -origin.y, 0);
w = t * rz * -1 * t;
Vec4 newPos;
for (int i = 0; i < countV; i++)
{
Vec3 pos(vertex[i].x, vertex[i].y, 1);
newPos.Transform(pos, w);
vertex[i].x = newPos.x;
vertex[i].y = newPos.y;
}
UpdateVertex(countV);
}
Related
I'm making a level editor for my game with OpenGL in C++. I'm trying to make Editor Camera just like in Unity Engine 2D Scene Camera, but I have an issue when I try to implement mouse movement for the camera (Camera Panning). I'm converting mouse position from screen to world space.
ScreenToWorldSpace Method:
Vector3 Application::ScreenToWorldSpace(int mousex, int mousey)
{
double x = 2.0 * mousex / viewportWidth - 1;
double y = 2.0 * mousey / viewportHeight - 1;
Vector4 screenPos = Vector4(x, -y, -1.0f, 1.0f);
Matrix4 ProjectionViewMatrix = camera1->GetProjectionMatrix() * camera1->GetViewMatrix();
Matrix4 InverseProjectionViewMatrix = glm::inverse(ProjectionViewMatrix);
Vector4 worldPos = InverseProjectionViewMatrix * screenPos;
return Vector3(worldPos);
}
The above method works correctly.
But I'm using ScreenToWorldSpace coordinates to update camera position.
Render Method:
void Application::Render(float deltaTime)
{
Vector3 pos = ScreenToWorldSpace(mousePosition.x, mousePosition.y);
// This is the position of a tile not the camera
position = Vector3(0, 0, 0);
Vector3 rotation = Vector3(0, 0, 0);
Vector3 scale = Vector3(1);
Matrix4 translationMatrix = glm::translate(Matrix4(1.0f), position);
Matrix4 rotationMatrix = glm::eulerAngleYXZ(rotation.y, rotation.x, rotation.z);
Matrix4 scaleMatrix = glm::scale(Matrix4(1.0f), scale);
modelMatrix = translationMatrix * rotationMatrix * scaleMatrix;
if (mouseButtonDown)
{
Console << pos.x << ", " << pos.y << Endl;
camera1->position = Vector3(pos.x, pos.y, -10);
}
{
glScissor(0, 0, 900, 600);
glEnable(GL_SCISSOR_TEST);
glClearColor(236 / 255.0f, 64 / 255.0f, 122 / 255.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, 900, 600);
basicShader->Use();
dirt_grass_tex->Use();
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
camera1->SetZoom(zoomFactor);
camera1->Update();
Matrix4 mvp = camera1->GetProjectionMatrix() * camera1->GetViewMatrix() * modelMatrix;
basicShader->SetUniformMat4("MVP", mvp);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glDisable(GL_SCISSOR_TEST);
}
}
Camera Class:
#include "camera.h"
Camera::Camera(int width, int height)
{
swidth = width;
sheight = height;
position = Vector3(0, 0, -10);
rotation = Vector3(0, 0, 0);
m_direction = Vector3(0, 0, -5);
m_up = Vector3(0, 1, 0);
m_right = Vector3(1, 0, 0);
m_offset = Vector3(-swidth / 2 * m_zoom, -sheight / 2 * m_zoom, 0);
m_projection = glm::ortho(0.0f * m_zoom, (float)swidth * m_zoom, 0.0f * m_zoom, (float)sheight * m_zoom, -1000.0f, 0.0f);
}
Camera::~Camera()
{
}
void Camera::Update()
{
Vector3 finalPos = position + m_offset;
m_up = glm::cross(m_right, m_direction);
m_viewMatrix = glm::lookAt(finalPos, finalPos + m_direction, m_up);
m_viewMatrix = glm::scale(m_viewMatrix, Vector3(100));
}
void Camera::SetZoom(float zoom)
{
m_zoom = zoom;
m_offset = Vector3(-swidth / 2 * m_zoom, -sheight / 2 * m_zoom, 0);
m_projection = glm::ortho(0.0f * m_zoom, (float)swidth * m_zoom, 0.0f * m_zoom, (float)sheight * m_zoom, -1000.0f, 0.0f);
}
The following is the output I get when I try to move camera with mouse position converted from Screen to World Space:
if (mouseButtonDown)
{
Console << pos.x << ", " << pos.y << Endl;
position = Vector3(pos.x, pos.y, 0);
}
But if I use mouse position converted from Screen to World space using ScreenToWorldSpace Method the object moves perfectly. Have a look at the following gif:
Following is what I'm trying to achieve:
So I'm Trying to make Game Engine Editor, in that I want to implement Editor Scene Camera like unity / unreal engine scene camera. Following is the editor I'm currently working on:
I tried looking into different resources, but i'm clueless. Help me understand how to move the camera with mouse.
What I think is happening:
Since I'm converting mouse position from screen to world space using camera's projectionView matrix and using those world coordinates to move camera position is causing the problem, because when ever camera moves, projectionView is updated which in turn changes mouse position relative to viewMatrix recursively.
I would Appreciate some help.
Ordinarily, you wouldn't want to write the mouse position directly into the camera location (because that will be of limited use in practice - whenever you click on the screen, the camera would jump).
What you probably want to do something along these lines:
Vector3 g_lastPosition;
void onMousePressed(int x, int y) {
// record starting position!
g_lastPosition = ScreenToWorldSpace(x, y);
}
void onMouseMove(int x, int y) {
// find the difference between new position, and last, in world space
Vector3 new_pos = ScreenToWorldSpace(x, y);
Vector3 offset = new_pos - g_lastPosition;
g_lastPosition = new_pos;
// now move camera by offset
camera->position += offset
}
If you are in an orthographic view, then really you don't need to worry about the projection matrix at all.
int g_lastX;
int g_lastY;
void onMousePressed(int x, int y) {
// store mouse pos
g_lastX = x;
g_lastY = y;
}
void onMouseMove(int x, int y) {
// find the difference between new position, and last, in pixels
int offsetX = x - g_lastX;
int offsetY = y - g_lastY;
// update mouse pos
g_lastX = x;
g_lastY = y;
// get as ratio +/- 1
float dx = ((float) offsetX) / swidth;
float dy = ((float) offsetY) / sheight;
// now move camera by offset (might need to multiply by 2 here?)
camera->position.x += camera->m_offset.x * dx;
camera->position.y += camera->m_offset.y * dy;
}
But in general, for any mouse based movement, you always want to be thinking in terms of adding an offset, rather than setting an exact position.
I'm trying to set up a google maps style zoom-to-cursor control for my opengl camera. I'm using a similar method to the one suggested here. Basically, I get the position of the cursor, and calculate the width/height of my perspective view at that depth using some trigonometry. I then change the field of view, and calculate how to much I need to translate in order to keep the point under the cursor in the same apparent position on the screen. That part works pretty well.
The issue is that I want to limit the fov to be less than 90 degrees. When it ends up >90, I cut it in half and then translate everything away from the camera so that the resulting scene looks the same as with the larger fov. The equation to find that necessary translation isn't working, which is strange because it comes from pretty simple algebra. I can't find my mistake. Here's the relevant code.
void Visual::scroll_callback(GLFWwindow* window, double xoffset, double yoffset)
{
glm::mat4 modelview = view*model;
glm::vec4 viewport = { 0.0, 0.0, width, height };
float winX = cursorPrevX;
float winY = viewport[3] - cursorPrevY;
float winZ;
glReadPixels(winX, winY, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT, &winZ);
glm::vec3 screenCoords = { winX, winY, winZ };
glm::vec3 cursorPosition = glm::unProject(screenCoords, modelview, projection, viewport);
if (isinf(cursorPosition[2]) || isnan(cursorPosition[2])) {
cursorPosition[2] = 0.0;
}
float zoomFactor = 1.1;
// = zooming in
if (yoffset > 0.0)
zoomFactor = 1/1.1;
//the width and height of the perspective view, at the depth of the cursor position
glm::vec2 fovXY = camera.getFovXY(cursorPosition[2] - zTranslate, width / height);
camera.setZoomFromFov(fovXY.y * zoomFactor, cursorPosition[2] - zTranslate);
//don't want fov to be greater than 90, so cut it in half and move the world farther away from the camera to compensate
//not working...
if (camera.Zoom > 90.0 && zTranslate*2 > MAX_DEPTH) {
float prevZoom = camera.Zoom;
camera.Zoom *= .5;
//need increased distance between camera and world origin, so that view does not appear to change when fov is reduced
zTranslate = cursorPosition[2] - tan(glm::radians(prevZoom)) / tan(glm::radians(camera.Zoom) * (cursorPosition[2] - zTranslate));
}
else if (camera.Zoom > 90.0) {
camera.Zoom = 90.0;
}
glm::vec2 newFovXY = camera.getFovXY(cursorPosition[2] - zTranslate, width / height);
//translate so that position under the cursor does not appear to move.
xTranslate += (newFovXY.x - fovXY.x) * (winX / width - .5);
yTranslate += (newFovXY.y - fovXY.y) * (winY / height - .5);
updateView = true;
}
The definition of my view matrix. Called ever iteration of the main loop.
void Visual::setView() {
view = glm::mat4();
view = glm::translate(view, { xTranslate,yTranslate,zTranslate });
view = glm::rotate(view, glm::radians(camera.inclination), glm::vec3(1.f, 0.f, 0.f));
view = glm::rotate(view, glm::radians(camera.azimuth), glm::vec3(0.f, 0.f, 1.f));
camera.Right = glm::column(view, 0).xyz();
camera.Up = glm::column(view, 1).xyz();
camera.Front = -glm::column(view, 2).xyz(); // minus because OpenGL camera looks towards negative Z.
camera.Position = glm::column(view, 3).xyz();
updateView = false;
}
Field of view helper functions.
glm::vec2 getFovXY(float depth, float aspectRatio) {
float fovY = tan(glm::radians(Zoom / 2)) * depth;
float fovX = fovY * aspectRatio;
return glm::vec2{ 2*fovX , 2*fovY };
}
//you have a desired fov, and you want to set the zoom to achieve that.
//factor of 1/2 inside the atan because we actually need the half-fov. Keep full-fov as input for consistency
void setZoomFromFov(float fovY, float depth) {
Zoom = glm::degrees(2 * atan(fovY / (2 * depth)));
}
The equations I'm using can be found from the diagram here. Since I want to have the same field of view dimensions before and after the angle is changed, I start with
fovY = tan(theta1) * d1 = tan(theta2) * d2
d2 = (tan(theta1) / tan(theta2)) * d1
d1 = distance between camera and cursor position, before fov change = cursorPosition[2] - zTranslate
d2 = distance after
theta1 = fov angle before
theta2 = fov angle after = theta1 * .5
Appreciate the help.
I'm drawing a 10x10 grid of squares at a depth of 0 and trying to highlight the one the mouse is over. I've tried following the tutorial here: http://antongerdelan.net/opengl/raycasting.html
but I don't know if I did it right. I end up with a vector at the end, but I'm not sure what to do with it.
Here's a screenshot of the squares (not sure how it helps..)
http://postimg.org/image/dau330qwt/2
/* Enable attribute index 1 as being used */
glEnableVertexAttribArray(1);
float camera_z = 50;
float camera_x = 0;
float camera_y = 0;
GLuint MatrixID = glGetUniformLocation(program, "MVP");
GLuint ColorID = glGetUniformLocation(program, "input_color");
int mouse_x;
int mouse_y;
while (1) {
int window_width;
int window_height;
SDL_GetWindowSize(win, &window_width, &window_height);
glm::mat4 Projection = glm::perspective(45.0f, ((float)window_width) / window_height, 0.1f, 100.0f);
// printf("Camera at %f %f\n", camera_x, camera_y);
glm::mat4 View = glm::lookAt(glm::vec3(camera_x,camera_y,camera_z), // camera position
glm::vec3(camera_x,camera_y,0), // looking at
glm::vec3(0,1,0)); // up
int map_width = map.width();
int map_height = map.height();
/* Make our background black */
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
// go through my 10x10 map and
for (int i = 0; i < map_width; i++) {
for ( int j = 0; j < map_height; j++) {
glm::mat4 Model = glm::translate(glm::mat4(1.0f), glm::vec3(i, j, 0.0f));
glm::mat4 MVP = Projection * View * Model;
glm::vec3 color = random_color();
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
glUniform3fv(ColorID, 1, &color[0]);
glDrawArrays(GL_LINE_LOOP, 0, 4);
}
}
/* Swap our buffers to make our changes visible */
SDL_GL_SwapWindow(win);
// printf("Window dimensions %d x %d\n", window_width, window_height);
float normalized_mouse_x = (2.0f * mouse_x) / window_width - 1.0f;
float normalized_mouse_y = 1.0f - (2.0f * mouse_y) / window_height;
printf("Normalized mouse position %f x %f\n", normalized_mouse_x, normalized_mouse_y);
glm::vec3 normalized_mouse_vector = glm::vec3(normalized_mouse_x, normalized_mouse_y, 1.0f);
glm::vec4 ray_clip = glm::vec4 (normalized_mouse_vector.x, normalized_mouse_vector.y, -1.0, 1.0);
glm::vec4 ray_eye = glm::inverse(Projection) * ray_clip;
ray_eye = glm::vec4(ray_eye.xy(), -1.0, 0.0);
glm::vec3 ray_world = (glm::inverse(View) * ray_eye).xyz();
ray_world = glm::normalize(ray_world);
// this prints out values like: World ray: 0.000266, 0.000382, 1.000000
printf("World ray: %f, %f, %f\n", ray_world.x, ray_world.y, ray_world.z);
// l = -(camera_z / ray_world.z)
float l = -(camera_z / ray_world.z);
float mouse_world_x = camera_x + l * ray_world.x;
float mouse_world_y = camera_y + l * ray_world.y;
printf("mouse world %f, %f\n", mouse_world_x, mouse_world_y);
}
Updated with code from BDL's comment. The output I get now is:
Normalized mouse position 0.087500 x 0.145833
World ray: 0.065083, 0.081353, 499.000000
World ray: 0.000130, 0.000163, 1.000000
mouse world -0.006521, -0.008152
I'm expecting the "mouse world" line to have numbers in the 1-10 range, not in the .00x range, though, based on the screenshot above showing a grid of squares with x and y ranging from 0-10.
Thanks for looking.
The intersection between a given ray r, starting at point C (in this case the camera position) with a x/y plane with z=0 can be calculated as follows:
C ... Camera position [cx,cy,cz]
r ... ray direction [rx,ry,rz]
We are searching for the point on the ray that has z=0
C + l*r = [x,y,0]
=>
cz + l*rz = 0
l * rz = -cz
l = -(cz / rz)
The xy-coordinates of the intersection are now:
x = cx + l * rx
y = cy + l * ry
What is left to do is to check in which rectangle this (x,y) coordinates are located.
I'm trying to get object space coordinates from the mouse position. I have some standard rendering code, which works well.
The problem is with the mouse picking code. I have tried lots of things and gone through similar questions but I can't seem to understand why it's not working.
I expect the result to return a x, y coordinates within [-1, 1] based on the position of the mouse over the object. I do get points within [-1, 1], but they are extremely skewed, such as (2.63813e-012, -1, 300).
Unproject code:
int z;
glReadPixels(mouse_pos_[0], int( navWidget->height() - mouse_pos_[1]), 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT, &z);
glm::vec3 win(mouse_pos_[0], navWidget->height() - mouse_pos_[1], z);
glm::vec4 viewport(0, 0, navWidget->width(), navWidget->height());
auto result_vec3 = glm::unProject(win, view * model1, proj, viewport);
auto result = glm::normalize(glm::vec2(result_vec3.x, result_vec3.y)); // < -- I normalize here since that gave good results without the translate
bool left_image = true;
if (!(result.x <= length_per_side && result.x >= -length_per_side &&
result.y <= length_per_side && result.y >= -length_per_side)) {
// do stuff
}
}
Rendering code:
float fov = 2*(atan((camProjModule->camResY()/2*camProjModule->camPixSizeY()) /
camProjModule->camFocalLength()) / M_PI * 180.0);
float znear = 1.0f;
float zfar = 6000.0f;
//float aspect = 1024.f / 683.f;
float aspect = navWidget->width() / navWidget->height();
glm::mat4 proj = glm::perspective(fov, aspect, znear, zfar);
float required_height =(float)( znear * tan((fov / 2.f) * M_PI / 180.f));
float eye_distance = znear / required_height * ((float)(navWidget->height()) / 2.f);
eye_distance = 300.f;
glm::mat4 view = glm::lookAt(glm::vec3(0.f, 0.f, 1.f * eye_distance), glm::vec3(0.f, 0.f, 0.f), glm::vec3(0.f, 1.f, 0.f));
glUseProgram(correspond_shader_);
glBindVertexArray(quad_vao_);
glUniform3f(colorLoc, 1.0f, 1.0f, 1.0f);
// draw left
if (left_correspond_texture_) {
glEnable(GL_TEXTURE_2D);
glActiveTexture(GL_TEXTURE0 + 0);
glBindTexture(GL_TEXTURE_2D, left_correspond_texture_);
glUniform1i(drawTexLoc, left_correspond_texture_);
}
GLint proj_loc = glGetUniformLocation(correspond_shader_, "proj");
GLint view_loc = glGetUniformLocation(correspond_shader_, "view");
GLint draw_tex_loc = glGetUniformLocation(correspond_shader_, "drawTex");
glUniformMatrix4fv(proj_loc, 1, GL_FALSE, glm::value_ptr(proj));
glUniformMatrix4fv(view_loc, 1, GL_FALSE, glm::value_ptr(view));
float ratio = 1024.f / 683.f;
float height = navWidget->height() / 2.f;
float ratio_to_multiply = height / 2.f;
glm::vec3 translation_vector = glm::vec3(0.f, height / 2.f, 0.f); // < --- If I remove this translation I get results that seem to be correct, and can be used after normalizing the x and y
glm::mat4 left_model = glm::scale(glm::translate(glm::mat4(1.f), translation_vector), glm::vec3(ratio * ratio_to_multiply, ratio_to_multiply, 1.f));
glm::mat4 right_model = glm::scale(glm::translate(glm::mat4(1.f), -1.f * translation_vector), glm::vec3(ratio * ratio_to_multiply, ratio_to_multiply, 1.f));
glUniformMatrix4fv(glGetUniformLocation(correspond_shader_, "model"), 1, GL_FALSE, glm::value_ptr(left_model));
glDrawArrays(GL_TRIANGLES, 0, 6); //, GL_UNSIGNED_INT, NULL);
EDIT: I think my question needs to be improved. I'm drawing two quads and rendering separate textures to it. What I want to do is get the mouse coordinates as normalized texture coordinates depending on which quad it is.
I see that you are using glm library. You can get mouse coordinate/ray direction using unprojection method.
glm::vec2 screenPos(mousePos.x, mousePos.y);
screenPos.y = height - screenPos.y;
float aspect = width / height;
glm::vec4 viewport = glm::vec4(0.0f, 0.0f, width , height);
glm::mat4 proj = glm::perspective(75.0f, aspect, 0.1f, 10000.0f);
glm::vec3 a (screenPos.x, screenPos.y, 0);
glm::vec3 b (screenPos.x, screenPos.y, 1);
glm::vec3 result = glm::unProject(a, viewMatrix, proj, viewport);
glm::vec3 result2 = glm::unProject(b, viewMatrix, proj, viewport);
glm::vec3 pickingPos = result;
glm::vec3 pickingDir = result2 - result;
After that you can use direction and position to check for collisions
I think CrSe's answer is right too. I have done this and I can pick any point on model:
I shoot a ray from these two points (p1 and p2):
Glu.gluUnProject(tempx, viewport[3] - tempy, 0, modelMatrix, projMatrix, viewport, out x1, out y1, out z1);
p = new Point(x1, y1, z1);
Glu.gluUnProject(tempx, viewport[3] - tempy, 1, modelMatrix, projMatrix, viewport, out x1, out y1, out z1);
p1 = new Point(x1, y1, z1);
if the distance btw this ray and a vertex is less than a threshold, I pick that point. I hope it is useful.
I am trying to create my own quaternion class and I get weird results. Either the cube I am trying to rotate is flickering like crazy, or it is getting warped.
This is my code:
void Quaternion::AddRotation(vec4 v)
{
Quaternion temp(v.x, v.y, v.z, v.w);
*this = temp * (*this);
}
mat4 Quaternion::GenerateMatrix(Quaternion &q)
{
q.Normalize();
//Row order
mat4 m( 1 - 2*q.y*q.y - 2*q.z*q.z, 2*q.x*q.y - 2*q.w*q.z, 2*q.x*q.z + 2*q.w*q.y, 0,
2*q.x*q.y + 2*q.w*q.z, 1 - 2*q.x*q.x - 2*q.z*q.z, 2*q.y*q.z + 2*q.w*q.x, 0,
2*q.x*q.z - 2*q.w*q.y, 2*q.y*q.z - 2*q.w*q.x, 1 - 2*q.x*q.x - 2*q.y*q.y, 0,
0, 0, 0, 1);
//Col order
// mat4 m( 1 - 2*q.y*q.y - 2*q.z*q.z,2*q.x*q.y + 2*q.w*q.z,2*q.x*q.z - 2*q.w*q.y,0,
// 2*q.x*q.y - 2*q.w*q.z,1 - 2*q.x*q.x - 2*q.z*q.z,2*q.y*q.z - 2*q.w*q.x,0,
// 2*q.x*q.z + 2*q.w*q.y,2*q.y*q.z + 2*q.w*q.x,1 - 2*q.x*q.x - 2*q.y*q.y,0,
// 0,0,0,1);
return m;
}
When I create the entity I give it a quaternion:
entity->Quat.AddRotation(vec4(1.0f, 1.0f, 0.0f, 45.f));
And each frame I try to rotate it additionally by a small amount:
for (int i = 0; i < Entities.size(); i++)
{
if (Entities[i] != NULL)
{
Entities[i]->Quat.AddRotation(vec4(0.5f, 0.2f, 1.0f, 0.000005f));
Entities[i]->DrawModel();
}
else
break;
}
And finally this is how I draw each cube:
void Entity::DrawModel()
{
glPushMatrix();
//Rotation
mat4 RotationMatrix;
RotationMatrix = this->Quat.GenerateMatrix(this->Quat);
//Position
mat4 TranslationMatrix = glm::translate(mat4(1.0f), this->Pos);
this->Trans = TranslationMatrix * RotationMatrix;
glMultMatrixf(value_ptr(this->Trans));
if (this->shape != NULL)
this->shape->DrawShape();
glPopMatrix();
}
EDIT: This is the tutorial I used to learn quaternions:
http://www.cprogramming.com/tutorial/3d/quaternions.html
Without studying your rotation matrix to the end, there are two possible bugs I can think of. The first one is that your rotation matrix R is not orthogonal, i.e. the inverse of R is not equal to the transposed. This could cause warping of the object. The second place to hide a bug is inside the multiplication of your quaternions.
There's a mistake in the rotation matrix. Try exchanging the element (2,3) with element (3,2).