glm::mat4 yellow_bone_obj_mat = m_bone_animation->get_yellow_mat();
glUniformMatrix4fv(glGetUniformLocation(shader.program, "model"), 1, GL_FALSE, glm::value_ptr(yellow_bone_obj_mat));
bone_obj->obj_color = m_bone_animation->colors[1];
draw_object(shader, *bone_obj);
I created a cube using this code.
glm::vec3 scale = glm::vec3(1.f, 1.f, 1.f);
m_yellow_mat = glm::mat4(1.0f);
m_yellow_mat = glm::scale(m_yellow_mat, scale);
glm::vec3 pivot = glm::vec3(0.0f, 2.f, 0.0f);
glm::vec3 pos = root_position;
m_yellow_mat = glm::translate(m_yellow_mat, pos);
m_yellow_mat = glm::rotate(m_yellow_mat, glm::radians(angleZ), glm::vec3(0, 0, 1));
m_yellow_mat = glm::rotate(m_yellow_mat, glm::radians(angleY), glm::vec3(0, 1, 0));
m_yellow_mat = glm::rotate(m_yellow_mat, glm::radians(angleX), glm::vec3(1, 0, 0));
m_yellow_mat = glm::translate(m_yellow_mat, pivot);
m_yellow_mat = glm::scale(m_yellow_mat, scale_vector[1]);
// scale_vector[1] = {0.5f,4.f,0.5f} This is scale_vector[1]
// root_position = { 2.0f,1.0f,2.0f };
These are the transformations I applied.
This enables it to rotate around the endpoint (bottom part) of the cube. I want to find the Vector position of the start point of the cube (top part). How can I do that?
A 4x4 transformation matrix looks as follows:
column 0: Xx, Xy, Xz, 0
column 1: Yx, Xy, Yz, 0
column 2: Zx Zy Zz, 0
column 3: Tx, Ty, Tz, 1
The translation is stored in the 4th column of the column major order matrix.
That means the xyz components of the translation are m_yellow_mat[3][0], m_yellow_mat[3][1] and m_yellow_mat[3][2]:
glm::vec3 trans = glm::vec3(m_yellow_mat[3]);
If you want to know the world position of a vertex coordinate of the model, then you've to transform the model coordinate by the model matrix:
glm::vec3 vertex_corodiante;
glm::vec3 world_coordiante = glm::vec3(m_yellow_mat * glm::vec4(vertex_corodiante, 1.0f));
Related
I'm trying to get the coordinates (x,y) of the grid (z = 0) using only the cursor coordinates. After a long search I found this way to do that using the glm::unproject.
First I'm getting the cursor coordinates using the callback:
void cursorCallback(GLFWwindow *window, double x, double y)
{
this->cursorCoordinate = glm::vec3(x, (this->windowHeight - y - 1.0f), 0.0f);
}
an then converting these coordinates:
glm::vec3 cursorCoordinatesToWorldCoordinates()
{
glm::vec3 pointInitial = glm::unProject(
glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 0.0),
this->modelMatrix * this->viewMatrix,
this->projectionMatrix,
this->viewPort
);
glm::vec3 pointFinal = glm::unProject(
glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 1.0),
this->modelMatrix * this->viewMatrix,
this->projectionMatrix,
this->viewPort
);
glm::vec3 vectorDirector = pointFinal - pointInitial;
double lambda = (-pointInitial.y) / vectorDirector.y;
double x = pointInitial.x + lambda * vectorDirector.x;
double y = pointInitial.z + lambda * vectorDirector.z;
return glm::vec3(x, y, 0.0f);
}
I use an ArcBall camera to rotate the world around specified axis, so that is how I generate the MVP matrixes:
this->position = glm::vec3(0.0f, 10.0f, 5.0f);
this->up = glm::vec3(0.0f, 1.0f, 0.0f);
this->lookAt = glm::vec3(0.0f, 0.0f, 0.0f);
this->fieldView = 99.0f;
this->farDistance = 100.0f;
this->nearDistance = 0.1f;
this->modelMatrix = glm::mat4(1.0f);
this->viewMatrix = glm::lookAt(this->position, this->lookAt, this->up) * glm::rotate(glm::degrees(this->rotationAngle) * this->dragSpeed, this->rotationAxis);
this->projectionMatrix = glm::perspective(glm::radians(this->fieldView), 1.0f, this->nearDistance, this->farDistance);
But something is going wrong because I'm not getting the right results. Look this print of the application:
each square is 1 unit, the cube is rendered at position (0, 0, 0). With rotationAngle = 0 when a put the cursor at (0,0), (1,1), (2,2), (3,3), (4,4), (5,5) I get (0, 5.7), (0.8, 6.4), (1.6, 6.9), (2.4, 7.6), (3.2, 8.2), (4.2, 8.8) respectivally. That's not expected.
Why y is delayed by 6 units?
It's necessary rotate the result cursorCoordinatesToWorldCoordinates based on rotationAngle isn't?
--
That I already did:
Checked if the viewport match with glViewport - OK
Checked the opengl coordinates (Y is up, not Z) - OK
You want to intersect the ray from glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 0.0) to glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 1.0) with the grid in world space, rather than model space (of the cuboid).
You've to skip this.modelMatrix:
glm::vec3 pointInitial = glm::unProject(
glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 0.0),
this->viewMatrix,
this->projectionMatrix,
this->viewPort);
glm::vec3 pointFinal = glm::unProject(
glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 1.0),
this->viewMatrix,
this->projectionMatrix,
this->viewPort);
In any case this->modelMatrix * this->viewMatrix is incorrect. If you eant to intersect the ray with an object in model space, then it has to be this->viewMatrix * this->modelMatrix. Matrix multiplication is not Commutative.
So, I drew a Yellow cuboid using this
glm::mat4 yellow_bone_obj_mat = m_bone_animation->get_yellow_mat();
glUniformMatrix4fv(glGetUniformLocation(shader.program, "model"), 1, GL_FALSE,
glm::value_ptr(yellow_bone_obj_mat));
bone_obj->obj_color = m_bone_animation->colors[1];
draw_object(shader, *bone_obj);
witth the scale factor { 0.5f,4.0f,0.5f } and position { 2.0f,3.0f,2.0f }
I want my yellow cuboid to rotate 90 degrees towards right, while the end position of the yellow cuboid to stick the red cube
It should look like this
I tried
m_yellow_mat = glm::translate(m_yellow_mat, glm::vec3(0.0, -0.5, 0.0)); //0.5f is just random number to check if pivot changed
m_yellow_mat = glm::rotate(m_yellow_mat, glm::radians(angle), glm::vec3(0, 0, 1));
m_yellow_mat = glm::translate(m_yellow_mat, glm::vec3(0.0, 0.5, 0.0));
Got this as output
Next i tried
m_yellow_mat = glm::translate(m_yellow_mat, glm::vec3(0.0, -0.5, 0.0)); //0.5f is just random number to check if pivot changed
m_yellow_mat = glm::rotate(m_yellow_mat, glm::radians(angle), glm::vec3(1, 0, 0)); //changed axis
m_yellow_mat = glm::translate(m_yellow_mat, glm::vec3(0.0, 0.5, 0.0));
Got this as output. No matter what I do, the cuboid isn't falling at the right side. I am not sure why
What you actually do is to rotate a perfect cube and to scale the rotated cube. The cube is rotated, but the scale is applied after, so it appears to be always orientated to the same direction.
You've to scale the cube and then you've to rotated the cuboid:
m_yellow_mat = translate(pivot) * rotate * translate(-pivot) * scale
Note, operations like rotate, scale and translate create a new matrix and multiply the current matrix by the new matrix. e.g:
vec3 scale = glm::vec3(0.5f, 4.0f, 0.5f);
vec3 pivot = glm::vec3(0.0f, 0.5f, 0.0f);
m_yellow_mat = glm::mat4(1.0f);
m_yellow_mat = glm::translate(m_yellow_mat, pivot);
m_yellow_mat = glm::rotate(m_yellow_mat, glm::radians(angle), glm::vec3(0, 0, 1));
m_yellow_mat = glm::translate(m_yellow_mat, -pivot);
m_yellow_mat = glm::scale(m_yellow_mat, scale);
I'm trying to rotate my camera with the purpose of see an object rotating around my cam with a rotation Matrix that I develop the problem is that it doesn't works.
So I try with the glm::rotation matrix and put the values
m_View = glm::rotate(m_View, a * glm::radians(180.0f), glm::vec3(0.0f, 1.0f, 0.0f))
but it does not works either:
void CCam::setView()
{
Front = glm::normalize(Eye - At);
Right = glm::normalize(glm::cross(Up, Front));
up = glm::cross(Front, Right); // Up Verdadero
m_View = glm::lookAt(
Eye, // Camera Position
(Eye + Front), // Where the camera looks
up // This is another way to say camera is not rotated
);
newAt = glm::vec4(At, 1.0f);
//m_View = m_View * GLMatrixRotationY(a);
m_View = glm::rotate(m_View, a * glm::radians(180.0f), glm::vec3(0.0f, 1.0f, 0.0f));
}
glm::mat4 CCam::GLMatrixRotationX(float Angle)
{
matrizRotacionX = glm::mat4(
1, 0, 0, 0,
0, cos(Angle), -sin(Angle), 0,
0, sin(Angle), cos(Angle), 0,
0, 0, 0, 1
);
return matrizRotacionX;
}
I expect to see my mesh rotating around the camera but I only got the cam rotating around the mesh.
I need to rotate object in local coordinates system, like you can rotate it in 3dmax\maya etc...
My current code is:
ModelMatrix = glm::mat4(1.0f);
TransformMatrix = glm::mat4(1.0f);
ScaleMatrix = glm::mat4(1.0f);
RotateMatrix = glm::mat4(1.0f);
ScaleMatrix = glm::scale(ScaleMatrix, glm::vec3(scalex, scalez, scaley));
TransformMatrix = glm::translate(TransformMatrix, glm::vec3(x, z, y));
RotateMatrix = glm::rotate(RotateMatrix, anglex, glm::vec3(1, 0, 0));
RotateMatrix= glm::rotate(RotateMatrix, angley, glm::vec3(0, 0, 1));
RotateMatrix = glm::rotate(RotateMatrix, anglez, glm::vec3(0, 1, 0));
ModelMatrix = TransformMatrix * ScaleMatrix* RotateMatrix;
MVP = Projection * View * ModelMatrix ;
anglex,y,z - comes from keyboard.
Right now only last dimension works as local (im my example it's glm::vec3(0, 1, 0) Z axis) At this IMAGE I show what I needed(2) and what I've got(3)... If I changes "anglez" it's always works as ROLL. But anglex and angley is in the world coordinates system.
The second my attempt - use Quaternions:
quat MyQuaternion= glm::quat(cos(glm::radians(xangle / 2)), 0, sin(glm::radians(xangle / 2)), 0);
quat MyQuaternion2 = glm::quat(cos(glm::radians(yangle/ 2)), sin(glm::radians(yangle / 2)), 0, 0);
quat MyQuaternion3 = glm::quat(cos(glm::radians(zangle / 2)), 0,0,sin(glm::radians(zangle / 2)));
glm::mat4 RotationMatrix = toMat4(MyQuaternion*MyQuaternion2*MyQuaternion3);
But I have the same result
You should modify the entire ModelMatrix instead of the angles. Initialize ModelMatrix to the identity matrix. Then, when you process keyboard input:
if(rotate about x-axis)
ModelMatrix = glm::rotate(ModelMatrix, angle, glm::vec3(1, 0, 0));
if(rotate about y-axis)
ModelMatrix = glm::rotate(ModelMatrix, angle, glm::vec3(0, 1, 0));
if(rotate about z-axis)
ModelMatrix = glm::rotate(ModelMatrix, angle, glm::vec3(0, 0, 1));
if(any rotation happened)
MVP = Projection * View * ModelMatrix ;
You can do this modification at any level. Either the MVP level, the ModelMatrix level (as shown here) or the RotateMatrix level.
I'm trying to get object space coordinates from the mouse position. I have some standard rendering code, which works well.
The problem is with the mouse picking code. I have tried lots of things and gone through similar questions but I can't seem to understand why it's not working.
I expect the result to return a x, y coordinates within [-1, 1] based on the position of the mouse over the object. I do get points within [-1, 1], but they are extremely skewed, such as (2.63813e-012, -1, 300).
Unproject code:
int z;
glReadPixels(mouse_pos_[0], int( navWidget->height() - mouse_pos_[1]), 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT, &z);
glm::vec3 win(mouse_pos_[0], navWidget->height() - mouse_pos_[1], z);
glm::vec4 viewport(0, 0, navWidget->width(), navWidget->height());
auto result_vec3 = glm::unProject(win, view * model1, proj, viewport);
auto result = glm::normalize(glm::vec2(result_vec3.x, result_vec3.y)); // < -- I normalize here since that gave good results without the translate
bool left_image = true;
if (!(result.x <= length_per_side && result.x >= -length_per_side &&
result.y <= length_per_side && result.y >= -length_per_side)) {
// do stuff
}
}
Rendering code:
float fov = 2*(atan((camProjModule->camResY()/2*camProjModule->camPixSizeY()) /
camProjModule->camFocalLength()) / M_PI * 180.0);
float znear = 1.0f;
float zfar = 6000.0f;
//float aspect = 1024.f / 683.f;
float aspect = navWidget->width() / navWidget->height();
glm::mat4 proj = glm::perspective(fov, aspect, znear, zfar);
float required_height =(float)( znear * tan((fov / 2.f) * M_PI / 180.f));
float eye_distance = znear / required_height * ((float)(navWidget->height()) / 2.f);
eye_distance = 300.f;
glm::mat4 view = glm::lookAt(glm::vec3(0.f, 0.f, 1.f * eye_distance), glm::vec3(0.f, 0.f, 0.f), glm::vec3(0.f, 1.f, 0.f));
glUseProgram(correspond_shader_);
glBindVertexArray(quad_vao_);
glUniform3f(colorLoc, 1.0f, 1.0f, 1.0f);
// draw left
if (left_correspond_texture_) {
glEnable(GL_TEXTURE_2D);
glActiveTexture(GL_TEXTURE0 + 0);
glBindTexture(GL_TEXTURE_2D, left_correspond_texture_);
glUniform1i(drawTexLoc, left_correspond_texture_);
}
GLint proj_loc = glGetUniformLocation(correspond_shader_, "proj");
GLint view_loc = glGetUniformLocation(correspond_shader_, "view");
GLint draw_tex_loc = glGetUniformLocation(correspond_shader_, "drawTex");
glUniformMatrix4fv(proj_loc, 1, GL_FALSE, glm::value_ptr(proj));
glUniformMatrix4fv(view_loc, 1, GL_FALSE, glm::value_ptr(view));
float ratio = 1024.f / 683.f;
float height = navWidget->height() / 2.f;
float ratio_to_multiply = height / 2.f;
glm::vec3 translation_vector = glm::vec3(0.f, height / 2.f, 0.f); // < --- If I remove this translation I get results that seem to be correct, and can be used after normalizing the x and y
glm::mat4 left_model = glm::scale(glm::translate(glm::mat4(1.f), translation_vector), glm::vec3(ratio * ratio_to_multiply, ratio_to_multiply, 1.f));
glm::mat4 right_model = glm::scale(glm::translate(glm::mat4(1.f), -1.f * translation_vector), glm::vec3(ratio * ratio_to_multiply, ratio_to_multiply, 1.f));
glUniformMatrix4fv(glGetUniformLocation(correspond_shader_, "model"), 1, GL_FALSE, glm::value_ptr(left_model));
glDrawArrays(GL_TRIANGLES, 0, 6); //, GL_UNSIGNED_INT, NULL);
EDIT: I think my question needs to be improved. I'm drawing two quads and rendering separate textures to it. What I want to do is get the mouse coordinates as normalized texture coordinates depending on which quad it is.
I see that you are using glm library. You can get mouse coordinate/ray direction using unprojection method.
glm::vec2 screenPos(mousePos.x, mousePos.y);
screenPos.y = height - screenPos.y;
float aspect = width / height;
glm::vec4 viewport = glm::vec4(0.0f, 0.0f, width , height);
glm::mat4 proj = glm::perspective(75.0f, aspect, 0.1f, 10000.0f);
glm::vec3 a (screenPos.x, screenPos.y, 0);
glm::vec3 b (screenPos.x, screenPos.y, 1);
glm::vec3 result = glm::unProject(a, viewMatrix, proj, viewport);
glm::vec3 result2 = glm::unProject(b, viewMatrix, proj, viewport);
glm::vec3 pickingPos = result;
glm::vec3 pickingDir = result2 - result;
After that you can use direction and position to check for collisions
I think CrSe's answer is right too. I have done this and I can pick any point on model:
I shoot a ray from these two points (p1 and p2):
Glu.gluUnProject(tempx, viewport[3] - tempy, 0, modelMatrix, projMatrix, viewport, out x1, out y1, out z1);
p = new Point(x1, y1, z1);
Glu.gluUnProject(tempx, viewport[3] - tempy, 1, modelMatrix, projMatrix, viewport, out x1, out y1, out z1);
p1 = new Point(x1, y1, z1);
if the distance btw this ray and a vertex is less than a threshold, I pick that point. I hope it is useful.