I create a cube like normal using 8 vertex points that outline a cube and use indices to draw each individual triangle. However, when I create my camera matrix and rotate it using the lookat function with glm it rotates the entire screen positions not world positions.
glm::mat4 Projection = glm::mat4(1);
Projection = glm::perspective(glm::radians(60.0f), (float)window_width / (float)window_hight, 0.1f, 100.0f);
const float radius = 10.0f;
float camX = sin(glfwGetTime()) * radius;
float camZ = cos(glfwGetTime()) * radius;
glm::mat4 View = glm::mat4(1);
View = glm::lookAt(
glm::vec3(camX, 0, camZ),
glm::vec3(0, 0, 0),
glm::vec3(0, 1, 0)
);
glm::mat4 Model = glm::mat4(1);
glm::mat4 mvp = Projection * View * Model;
Then in glsl:
uniform mat4 camera_mat4
void main()
{
vec4 pos = vec4(vertexPosition_modelspace, 1.0) * camera_mat4;
gl_Position.xyzw = pos;
}
Example: GLM rotating screen coordinates not cube
Related
I need to use perspective transformation but I can't understand how to define model coordinates of sprite. If I use orthogonal projection I can define coordinate of each vertex as number pixels on screen. But with perspective projection I can't.
Orthogonal projection:
glm::ortho<GLfloat>(0.0f, screen_width, screen_height, 0.0f, 1.0f, -1.0f));
Perspective:
glm::perspective(glm::radians(45.f), (float)screen_width / (float)screen_height, 0.1f, 100.f);
Vertex shader:
#version 330 core
layout (std140) uniform Matrices
{
mat4 ProjectionMatrix;
mat4 ViewMatrix;
mat4 ModelMatrix;
};
layout (location = 0) in vec2 position;
layout (location = 1) in vec2 inTexCoords;
out vec2 TextureCoords;
void main()
{
TextureCoords = inTexCoords;
gl_Position = ProjectionMatrix * ViewMatrix * ModelMatrix * vec4(position, 1.f, 1.0);
}
For example
vertices[1] = 0.f;
vertices[8] = 0.f;
vertices[12] = 0.f;
vertices[13] = 0.f;
for (GLuint i = 0; i < m_totSprites; ++i) {
// Vertex pos
vertices[0] = m_clips[i].w;
vertices[4] = vertices[0];
vertices[5] = m_clips[i].h;
vertices[9] = vertices[5];
// Texture pos
vertices[2] = (m_clips[i].x + m_clips[i].w) / tw;
vertices[3] = (m_clips[i].y + m_clips[i].h) / th;
vertices[6] = (m_clips[i].x + m_clips[i].w) / tw;
vertices[7] = m_clips[i].y / th;
vertices[10] = m_clips[i].x / tw;
vertices[11] = m_clips[i].y / th;
vertices[14] = m_clips[i].x / tw;
vertices[15] = (m_clips[i].y + m_clips[i].h) / th;
It works well with orthogonal projection. How can I define vertex coordinates for perspective?
What the different with model coordinates in orthogonal projection and perspective? Why in first case it's easy to set coords of vertices as pixel sizes, but in all examples with perspective they normalized between -0.5 to 0.5? It's necessary?
Initially I was misunderstood difference between orthogonal and perspective projections. As I understood now all vertices mapped initially in NDC for perspective projection. Then they moved, scaled, etc with model matrix. Pixel perfect rendering can be realized only with some constant depth or with orthogonal. I't unuseful for 3D with perspective projection.
if you have projection matrix you need a view matrix too.
there's glm::lookAt() for ex
i use this combo usually
glm::lookAt(glm::vec3(-1.2484,0.483,1.84384), glm::vec3(-0.3801, -0.4183,-3.15),glm::vec3( 0., 0.2,-00.))
glm::perspective(45., 1., 1.2, 300.)
glm::mat4(1.)
I'm developing an application in C++ using OpenGL. My problem is that when I render a cube to screen which has a rotation, it appears out of proportion (see the images).
I'm using the following NuGet packages:
When the cube is rotated at 0 degrees:
When the cube is rotated at 45 degrees:
When the cube is rotated at 90 degrees:
When the cube is rotated at 180 degrees:
When the cube is rotated at 360 degrees:
The following code is where the matrix is calculated:
glm::mat4 DrawnEntity::getMatrix() const
{
glm::mat4 translate = glm::translate(glm::mat4(1.0f), position);
glm::mat4 rotation = glm::toMat4(glm::quat(glm::radians(360.0f), 0, 1, 0));
glm::mat4 matrix = translate * rotation;
return matrix;
}
When the entity is drawn, this is called:
void DrawnEntity::render()
{
if (enabled)
mesh->render(getMatrix());
}
Which subsequently calls this:
void Mesh::render(glm::mat4 matrix)
{
glUseProgram(shaderID);
vao->render(matrix);
}
vao is a VertexArrayObject, and this is the function:
void VertexArrayObject::render(glm::mat4 matrix)
{
GLuint uModel = glGetUniformLocation(shaderID, "uModel");
glUniformMatrix4fv(uModel, 1, GL_TRUE, &matrix[0][0]);
glBindVertexArray(vao[0]);
glDrawArrays(GL_TRIANGLES, 0, mesh->vertexCount());
glBindVertexArray(0);
}
Ignoring the rotation, it appears all the vertices are being correctly loaded. For what it's worth, this is the class which generates the cube:
#include "CubeMesh.h"
#include "../VertexArrayObject.h"
CubeMesh::CubeMesh(GLuint shader)
{
shaderID = shader;
glUseProgram(shaderID);
generateFaces();
vao = new VertexArrayObject(this);
}
void CubeMesh::generateFaces()
{
// Front face
generateFace(glm::vec3(-0.5, 0.5, 0.5), glm::vec3(0.5, -0.5, 0.5));
// Left face
generateFace(glm::vec3(-0.5, -0.5, 0.5), glm::vec3(-0.5, 0.5, -0.5));
// Back face
generateFace2(glm::vec3(0.5, -0.5, -0.5), glm::vec3(-0.5, 0.5, -0.5));
// Right face
generateFace2(glm::vec3(0.5, -0.5, 0.5), glm::vec3(0.5, 0.5, -0.5));
}
void CubeMesh::generateFace(glm::vec3 point1, glm::vec3 point2)
{
glm::vec3 tl = point1;
glm::vec3 br = point2;
glm::vec3 tr = glm::vec3(br.x, tl.y, br.z);
glm::vec3 bl = glm::vec3(tl.x, br.y, tl.z);
Vertex f1v1(glm::vec3(tl.x, tl.y, tl.z));
Vertex f1v2(glm::vec3(bl.x, bl.y, bl.z));
Vertex f1v3(glm::vec3(br.x, br.y, br.z));
Triangle f1(f1v1, f1v2, f1v3);
Vertex f2v1(glm::vec3(tr.x, tr.y, tr.z));
Vertex f2v2(glm::vec3(tl.x, tl.y, tl.z));
Vertex f2v3(glm::vec3(br.x, br.y, br.z));
Triangle f2(f2v1, f2v2, f2v3);
addData(f1);
addData(f2);
}
void CubeMesh::generateFace2(glm::vec3 point1, glm::vec3 point2)
{
glm::vec3 tl = point1;
glm::vec3 br = point2;
glm::vec3 tr = glm::vec3(br.x, tl.y, br.z);
glm::vec3 bl = glm::vec3(tl.x, br.y, tl.z);
Vertex f1v1(glm::vec3(tl.x, tl.y, tl.z));
Vertex f1v2(glm::vec3(br.x, br.y, br.z));
Vertex f1v3(glm::vec3(bl.x, bl.y, bl.z));
Triangle f1(f1v1, f1v2, f1v3);
Vertex f2v1(glm::vec3(tr.x, tr.y, tr.z));
Vertex f2v2(glm::vec3(br.x, br.y, br.z));
Vertex f2v3(glm::vec3(tl.x, tl.y, tl.z));
Triangle f2(f2v1, f2v2, f2v3);
addData(f1);
addData(f2);
}
The vertex shader is as follows:
#version 430 core
uniform mat4 uModel;
uniform mat4 uView;
uniform mat4 uProjection;
in vec3 vPosition;
in vec3 vNormal;
out vec4 oColour;
void main(void)
{
oColour = vec4(vPosition, 1);
gl_Position = vec4(vPosition, 1) * uModel * uView * uProjection;
}
uModel is the matrix representing the individual model translation, rotation etc. uView is the position of the camera, and uProjection is the projection matrix. The first is fed to the shader in the VertexArrayObject shown previously, while the last two are fed to the shader in the camera object as below:
void Camera::initialise()
{
glUseProgram(shaderID);
view = glm::translate(glm::mat4(1), position);
int uView = glGetUniformLocation(shaderID, "uView");
glUniformMatrix4fv(uView, 1, GL_TRUE, &view[0][0]);
int uProjection = glGetUniformLocation(shaderID, "uProjection");
glm::mat4 projection = glm::perspective(1.0, (double)1024 / (double)1024, 0.01, 10.0);
glUniformMatrix4fv(uProjection, 1, GL_TRUE, &projection[0][0]);
}
The position of the model is 0 0 0 and the position of the camera is 0 0 -5. When the uModel position is changed, the cube is rendered where it is expected, however with the rotation it doesn't act as it should.
Can anybody see anything that I might be doing wrong? Is there anymore code you need to see?
I found that changing the following line:
glm::mat4 rotation = glm::toMat4(glm::quat(glm::radians(360.0f), 0, 1, 0));
to this:
glm::mat4 rotation = glm::rotate(translate, (float)glm::radians(0.0), glm::vec3(0.0f, 1.0f, 0.0f));
solves the issue. I can't really offer an explanation for why the first one didn't work, as I don't know enough about Quaternion's to comment on it. If anyone can explain it better, please edit my answer. Regardless, this was the best fix I could find.
How can I rotate a camera in a axis? What matrix I have to multiply?
I am using glm::lookAt to construct the viewMatrix, but I tried to multiply it by a rotation matrix and nothing happened.
glm::mat4 GetViewMatrix()
{
return glm::lookAt(this->Position, this->Position + this->Front, glm::vec3(0.0f, 5.0f, 0.0f));
}
glm::mat4 ProjectionMatrix = glm::perspective(actual_camera->Zoom, (float)g_nWidth / (float)g_nHeight, 0.1f, 1000.0f);
glm::mat4 ViewMatrix = actual_camera->GetViewMatrix();
glm::mat4 ModelMatrix = glm::mat4(1.0);
glm::mat4 MVP = ProjectionMatrix * ViewMatrix * ModelMatrix;
Rotate the front and up vectors of your camera using glm::rotate:
glm::mat4 GetViewMatrix()
{
auto front = glm::rotate(this->Front, angle, axis);
auto up = glm::rotate(glm::vec3(0, 1, 0), angle, axis);
return glm::lookAt(this->Position, this->Position + front, up);
}
Alternatively, you can add a multiplication with your rotation matrix to your MVP construction:
glm::mat4 MVP = ProjectionMatrix * glm::transpose(Rotation) * ViewMatrix * ModelMatrix;
It is important that the rotation happens after the view matrix, so all objects will be rotated relative to the camera's position. Furthermore, you have to use transpose(Rotation) (the inverse of a rotation matrix is its transpose), since rotating the camera clockwise for example, is equivalent to rotating all objects counter-clockwise.
I'm attempting to set up an orthographic projection in OpenGL, but can't seem to find why this triangle is not rendering correctly (it isn't visible). I have used perspective projection with the same code (apart from my vertex coordinates and projection matrix, of course) and it works fine. I construct the triangle vertices as:
Vertex vertices[] = { Vertex(glm::vec3(0, 600, 0.0), glm::vec2(0.0, 0.0)),
Vertex(glm::vec3(300, 0, 0.0), glm::vec2(0.5, 1.0)),
Vertex(glm::vec3(800 , 600, 0.0), glm::vec2(1.0, 0.0)) };
My camera constructor is:
Camera::Camera(const glm::vec3& pos, int width, int height) {
ortho = glm::ortho(0, width, height, 0, 0, 1000);
this->position = pos;
this->up = glm::vec3(0.0f, 1.0f, 0.0f);
this->forward = glm::vec3(0.0f, 0.0f, 1.0f);
}
I call this as:
camera = Camera(glm::vec3(0, 0, 2), window->getSize().x, window->getSize().y);
Where the window is 800 by 600 pixels. I am uploading a transform to the shader via the function:
void Shader::update(const Transform& transform, const Camera& camera) {
glm::mat4 model = camera.getProjection() * transform.getModel();
glUniformMatrix4fv(uniforms[TRANSFORM_U], 1, GL_FALSE, &model[0][0]);
}
In which camera.getProjection() is:
glm::mat4 Camera::getProjection() const {
return ortho * glm::lookAt(position, glm::vec3(0, 0, 0), up);
}
And transform.getModel() is:
glm::mat4 Transform::getModel() const {
glm::mat4 posMat = glm::translate(pos);
glm::quat rotQuat = glm::quat(glm::radians(rot));
glm::mat4 rotMat = glm::toMat4(rotQuat);
glm::mat4 scaleMat = glm::scale(scl);
return posMat * rotMat * scaleMat;
}
Though I suspect the problem lies in my set up of orthographic projection rather than my transforms, as this worked fine for perspective projection. Can anyone see why the triangle rendered with these coordinates is not visible? I am binding my shader and uploading the projection matrix to it before rendering the mesh. If it helps, my vertex shader is:
#version 120
attribute vec3 position;
attribute vec2 texCoord;
varying vec2 texCoord0;
uniform mat4 transform;
void main()
{
gl_Position = transform * vec4(position, 1.0);
texCoord0 = texCoord;
}
For anyone interested in the issue, it was with:
ortho = glm::ortho(0, width, height, 0, 0, 1000);
Where the arguments are supplied as integers, not floats. Therefore the integer division applied within glm::ortho was creating an incorrect orthographic projection matrix.
I'm trying to apply multiple rotations around x,y,z axis to an object by using glm::rotate method but for some reason it only rotates around one axis and seems to be completely ignoring other rotations.
Here is how I apply rotation:
glm::mat4 rotateTransform = glm::mat4(1.0f);
rotateTransform = glm::rotate(rotateTransform, this->rotation.x, glm::vec3(1, 0, 0));
rotateTransform = glm::rotate(rotateTransform, this->rotation.y, glm::vec3(0, 1, 0));
rotateTransform = glm::rotate(rotateTransform, this->rotation.z, glm::vec3(0, 0, 1));
return glm::translate(glm::mat4(1.0f), this->position) * rotateTransform * glm::scale(glm::mat4(1.0f), this->scale);
the method returns modelToWorldMatrix which I then pass to my vertexShader where I perform standart calculation on a vertex:
vec4 vertexPositionInModelSpace = vec4(Position, 1);
vec4 vertexInWorldSpace = gModelToWorldTransform * vertexPositionInModelSpace;
vec4 vertexInViewSpace = gWorldToViewTransform * vertexInWorldSpace;
vec4 vertexInHomogeneousClipSpace = gProjectionTransform * vertexInViewSpace;
gl_Position = vertexInHomogeneousClipSpace;
So how can you apply multiple rotations by using glm::mat4 matrices?