I am trying to rotate a quad in a 3D space. The following code shows the vertex shader utilized to draw the quad:
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec3 aColor;
out vec3 ourColor;
uniform mat4 transform;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = transform*(projection*view*model*vec4(aPos, 1.0f));
ourColor = aColor;
}
The quad is displayed when transform is not multiplied to projection*view*model*vec4(aPos,1.0f) but is not displayed when it is multiplied as above.
The code for transformation:
trans=glm::rotate(trans,(float)(glfwGetTime()),glm::vec3(0.0,0.0,1.0));
float scaleAmount = sin(j*0.3);j=j+0.035;
trans=glm::scale(trans,glm::vec3(scaleAmount,scaleAmount,scaleAmount));
unsigned int transformLoc = glGetUniformLocation(shaderProgram, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, glm::value_ptr(trans));
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
I have set the uniform present in the vertex shader as well.Why is it not rotating and scaling, or even appearing when I multiply transform with (projection*view*model*vec4(aPos,1.0f)) ?
Edit: I figured out that the problem is with scaling, since the code works with rotation only. The code does not work with scaling only.
Let's think only in 2D.
The quad is defined in "world" coordinates. To rotate it around some point move the quad to that point, then rotate and scale it and then move it back. Doing this with matrices is the same as transform * model where transform is something like
transform = moveback * scale * rotate * movetopoint
If scaleAmount == 0.0:
glm::mat4 trans( 1.0f );
float scaleAmount = 0.0f;
trans=glm::scale(trans,glm::vec3(scaleAmount,scaleAmount,scaleAmount));
then this would cause that trans is
{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 1}}
Since sin(0.0) == 0.0 it has to be ensured that in case of sin(j*0.3);, j is not equal 0.0.
Related
What I want to attrive is to render many small quads with this opengl function "glDrawArraysInstanced", the space between them is the same. For example, please refer to the follwing image:
The code is as follow:
void OpenGLShowVideo::displayBySmallMatrix()
{
// Now use QOpenGLExtraFunctions instead of QOpenGLFunctions as we want to
// do more than what GL(ES) 2.0 offers.
QOpenGLExtraFunctions *f = QOpenGLContext::currentContext()->extraFunctions();
f->glClearColor(9.f/255.0f, 14.f/255.0f, 15.f/255.0f, 1);
glClear(GL_COLOR_BUFFER_BIT);
f->glViewport(0, 0, this->width(), this->height());
m_displayByMatrixProgram->bind();
f->glActiveTexture(GL_TEXTURE0 + m_acRenderToScreenTexUnit);
f->glBindTexture(GL_TEXTURE_2D, m_renderWithMaskFbo->texture());
if (m_uniformsDirty) {
m_uniformsDirty = false;
m_displayByMatrixProgram->setUniformValue(m_samplerLoc, m_acRenderToScreenTexUnit);
m_proj.setToIdentity();
m_proj.perspective(INIT_VERTICAL_ANGLE, float(this->width()) / float(this->height()), m_fNearPlane, m_fFarPlane);
m_displayByMatrixProgram->setUniformValue(m_projMatrixLoc, m_proj);
QMatrix4x4 camera;
camera.lookAt(m_eye, m_eye + m_target, QVector3D(0, 1, 0));
m_displayByMatrixProgram->setUniformValue(m_camMatrixLoc, camera);
m_world.setToIdentity();
float fOffsetZ = m_fVerticalAngle / INIT_VERTICAL_ANGLE;
m_world.translate(m_fMatrixOffsetX, m_fMatrixOffsetY, fOffsetZ);
m_proj.scale(MATRIX_INIT_SCALE_X, MATRIX_INIT_SCALE_Y, 1.0f);
m_world.rotate(180, 1, 0, 0);
QMatrix4x4 wm = m_world;
m_displayByMatrixProgram->setUniformValue(m_worldMatrixLoc, wm);
QMatrix4x4 mm;
mm.setToIdentity();
m_displayByMatrixProgram->setUniformValue(m_myMatrixLoc, mm);
m_displayByMatrixProgram->setUniformValue(m_lightPosLoc, QVector3D(0, 0, 70));
QSize tmpSize = QSize(m_viewPortWidth, m_viewPortHeight);
m_displayByMatrixProgram->setUniformValue(m_resolutionLoc, tmpSize);
int whRatioVal = m_viewPortWidth / m_viewPortHeight;
m_displayByMatrixProgram->setUniformValue(m_whRatioLoc, whRatioVal);
}
m_geometries->bindBufferForArraysInstancedDraw();
f->glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, m_viewPortWidth * m_viewPortHeight);
}
And the vertex shader code is as follow:
#version 330
layout(location = 0) in vec4 vertex;
out vec3 color;
uniform mat4 mvp_matrix;
uniform mat4 projMatrix;
uniform mat4 camMatrix;
uniform mat4 worldMatrix;
uniform mat4 myMatrix;
uniform vec2 viewResolution;
uniform int whRatio;
uniform sampler2D sampler;
void main() {
int posX = gl_InstanceID % int(viewResolution.x);
int posY = gl_InstanceID / int(viewResolution.y);
if( posY % whRatio < whRatio) {
posY = gl_InstanceID / int(viewResolution.x);
}
ivec2 pos = ivec2(posX, posY);
vec2 t = vec2( pos.x * 3.0, pos.y * 3.0 );
mat4 wm = mat4(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, t.x, t.y, 1, 1) * worldMatrix;
color = texelFetch(sampler,pos,0).rgb;
gl_Position = projMatrix * camMatrix * wm * vertex;
}
And the fragment shader is as follow:
#version 330 core
in vec3 color;
out vec4 fragColor;
void main() {
fragColor = vec4(color, 1.0);
}
However, when I move the camera far from the screen (by changing the [camera.lookAt (m_eye, m_eye + m_target, QVector3D (0, 1, 0);] "m_eye" parameter value), I got sth like this:
The space between quads is different, and the size of the quad is also different. But when I move the camera closer to the screen, it looks much better.
I think what you're seeing there is the result of rounding the coordinates to the nearest integer pixel coordinate.
To get something that looks more even, you want to use some form of anti-aliasing. The options that spring to mind are:
Enable some sort of full screen anti-aliasing like MSAA. This is simple to enable, but can have a significant performance cost.
Put your pattern in a texture, and tile that texture over a single quad. Texture filtering and mip maps should take care of the anti-aliasing for you, and it will probably be faster to render that way as well because you only need a single quad.
I'm trying to move a triangle based on time using a matrix. But it does some weird stuff:
What it should do:
move on the x-axis
What it does:
The top point of the triangle is fixed and the other points seem to move around it in a circular movement and scale on the x, z axis (I'm still in 2d so I don't have depth).
My C++ Code:
...
GLfloat timeValue = glfwGetTime();
GLfloat offset = (sin(timeValue * 4) / 2);
GLfloat matrix[16] = {
1, 0, 0, offset,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
};
GLuint uniform_m_transform = glGetUniformLocation(shader_program, "m_transform");
glUniformMatrix4fv(uniform_m_transform, 1, GL_FALSE, matrix);
...
My vertex shader:
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
out vec3 ourColor;
uniform mat4 m_transform;
void main()
{
ourColor = color;
gl_Position = m_transform * vec4(position, 1.0);
}
I don't know what I did wrong, according to the tutorial the matrix attribute I've set to offset should change the x-translation.
Do you know what's my mistake?
you are providing a row-major matrix, so you need to specify the transpose:
glUniformMatrix4fv(uniform_m_transform, 1, GL_TRUE, matrix);
Reference: glUniform, check the transpose parameter.
Im using OpenGL 3.3 with GLFW.
The problem is that GL_LINE_STRIP and GL_LINE LOOP give the same result.
Here is the array of 2D coordinates:
GLfloat vertices[] =
{
0, 0,
1, 1,
1, 2,
2, 2,
3, 1,
};
The attribute pointer:
// Position attribute 2D
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
And finally:
glDrawArrays(GL_LINE_STRIP, 0, sizeof(vertices)/4);
Vertex shader:
#version 330 core
layout (location = 0) in vec2 position;
layout (location = 1) in vec3 color;
out vec3 ourColor;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(position, 0.0f, 1.0f);
ourColor = color;
}
Fragment shader:
#version 330 core
in vec3 ourColor;
out vec3 color;
void main()
{
color = vec3(ourColor);
}
The Color attrib. is disabled (lines are black and visible)
Any idea?
You have only 5 pairs of floats, so 5 vertices. Total size of your array is 4 times 10 floats, so 40 bytes.
Your equation for count, 40/4 gives 10. sizeof(array) / (sizeof(array[0]) * dimensionality) would be the correct equation there.
As a test, I created a simple quad. Here are its attributes:
Vertex vertices[] =
{
// Positions Normals
{vec3(-1,-1, 0), vec3(-1,-1, 1)}, // v0
{vec3( 1,-1, 0), vec3( 1,-1, 1)}, // v1
{vec3(-1, 1, 0), vec3(-1, 1, 1)}, // v2
{vec3( 1, 1, 0), vec3( 1, 1, 1)}, // v3
};
And I put it in my world space at (0.0, 0.0, -9.5). Then I put my point light position at (0.0, 0.0, -8.0). My camera is at the origin (0.0, 0.0, 0.0). When I run my program, this works as expected:
But then, when I replace this quad with 9 scaled down quads, put them all at -9.5 on Z (in other word, they are all parallel to each other on Z), my diffuse lighting gets a little weird
It looks like the corners are showing too much lighting, breaking the nice diffuse circle that we see on a regular quad.
Here is my shader program:
precision mediump int;
precision mediump float;
varying vec3 v_position;
varying vec3 v_normal;
#if defined(VERTEX)
uniform mat4 u_mvpMatrix;
uniform mat4 u_mvMatrix;
uniform mat3 u_normalMatrix;
attribute vec4 a_position;
attribute vec3 a_normal;
void main()
{
vec4 position = u_mvMatrix * a_position;
v_position = position.xyz / position.w;
v_normal = normalize(u_normalMatrix * a_normal);
gl_Position = u_mvpMatrix * a_position;
}
#endif // VERTEX
#if defined(FRAGMENT)
uniform vec3 u_pointLightPosition;
void main()"
{
vec3 viewDir = normalize(-v_position);
vec3 normal = normalize(v_normal);
vec3 lightPosition = u_pointLightPosition - v_position;
vec3 pointLightDir = normalize(lightPosition);
float distance = length(lightPosition);
float pointLightAttenuation = 1.0 / (1.0 + (0.25 * distance * distance));
float diffuseTerm = max(dot(pointLightDir, normal), 0.15);
gl_FragColor = vec4(diffuseTerm * pointLightAttenuation);
}
#endif // FRAGMENT
My uniforms are uploaded as followed (I'm using GLM):
const mat4 &view_matrix = getViewMatrix();
mat4 mv_matrix = view * getModelMatrix();
mat4 mvp_matrix = getProjectionMatrix() * mv_matrix;
mat3 normal_matrix = inverseTranspose(mat3(mv_matrix));
vec3 pointLightPos = vec3(view_matrix * vec4(getPointLightPos(), 1.0f));
glUniformMatrix4fv( mvpMatrixUniformID, 1, GL_FALSE, (GLfloat*)&mvp_matrix);
glUniformMatrix4fv( vpMatrixUniformID, 1, GL_FALSE, (GLfloat*)&mv_matrix);
glUniformMatrix3fv(normalMatrixUniformID, 1, GL_FALSE, (GLfloat*)&normal_matrix);
glUniform3f(pointLightPosUniformID, pointLightPos.x, pointLightPos.y, pointLightPos.z);
Am I doing anything wrong?
Thanks!
Without going too much into your code, I think everything is working just fine. I see a very similar result with a quick blender setup:
The issue is the interpolation of the normal doesn't produce a spherical bump.
It ends up being a patch like this (I simply subdivided a smooth shaded cube)...
If you want a more spherical bump, you could generate the normals implicitly in a fragment shader (for example as is done here (bottom image)), use a normal map, or use more tessellated geometry such as an actual sphere.
I'm having an issue drawing multiple point lights in my scene. I am working on a simple maze-style game in OpenGL, where the maze is randomly generated. Each "room" in the maze is represented by a Room struct, like so:
struct Room
{
int x, z;
bool already_visited, n, s, e, w;
GLuint vertex_buffer, texture, uv_buffer, normal_buffer;
std::vector<glm::vec3>vertices, normals;
std::vector<glm::vec2>uvs;
glm::vec3 light_pos; //Stores the position of a light in the room
};
Each room has a light in it, the position of this light is stored in light_pos. This light is used in a simple per-vertex shader, like so:
layout(location = 0) in vec3 pos;
layout(location = 1) in vec2 uv_coords;
layout(location = 2) in vec3 normal;
uniform mat4 mvpMatrix;
uniform mat4 mvMatrix;
uniform vec3 lightpos;
out vec2 vs_uv;
out vec3 vs_normal;
out vec3 color;
void main()
{
gl_Position = mvpMatrix * vec4(pos,1.0);
vs_normal = normal;
vs_uv = uv_coords;
vec3 lightVector = normalize(lightpos - pos);
float diffuse = clamp(dot(normal,lightVector),0.0,1.0);
color = vec3(diffuse,diffuse,diffuse);
}
My fragment shader looks like this (ignore the "vs_normal", it is unused for now):
in vec2 vs_uv;
in vec3 vs_normal;
in vec3 color;
uniform sampler2D tex;
out vec4 frag_color;
void main()
{
frag_color = vec4(color,1.0) * texture(tex,vs_uv).rgba;
}
And my drawing code looks like this:
mat4 mvMatrix = view_matrix*model_matrix;
mat4 mvpMatrix = projection_matrix * mvMatrix;
glBindVertexArray(vertexBufferObjectID);
glUseProgram(shaderProgram);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
for (int x = 0; x < NUM_ROOMS_X; x++)
{
for (int z = 0; z < NUM_ROOMS_Z; z++)
{
//int x = int(std::round(position.x / ROOM_SIZE_X_MAX));
//int z = int(std::round(position.z / ROOM_SIZE_Z_MAX));
Room rm = room_array[x][z];
glBindBuffer(GL_ARRAY_BUFFER, rm.vertex_buffer);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glBindBuffer(GL_ARRAY_BUFFER, rm.uv_buffer);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glBindBuffer(GL_ARRAY_BUFFER, rm.normal_buffer);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glUniformMatrix4fv(mvpMatrixID, 1, GL_FALSE, &mvpMatrix[0][0]);
glUniformMatrix4fv(mvMatrixID, 1, GL_FALSE, &mvMatrix[0][0]);
glUniform3fv(light_ID, 3, &rm.light_pos[0]); //Here is where I'm setting the new light position. It looks like this is ignored
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, rm.texture);
glUniform1i(texture_ID, 0);
glDrawArrays(GL_QUADS, 0, rm.vertices.size());
}
}
glUseProgram(0);
glBindVertexArray(0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glBindTexture(GL_TEXTURE_2D, 0);
However, here is what the result looks like (I've modified my drawing code to draw a box where each light is located, and I've circled the room at position (0,0)):
http://imgur.com/w4uPMD6
As you can see, it looks like only the light at position (0,0) affects any of the rooms on the map, the other lights are simply ignored. I know that the lights are positioned correctly, because the boxes I use to show the positions are correct. I think even though I'm setting the new light_pos, it isn't going through for some reason. Any ideas?
One thing that your are doing, which is not very common, is to pass the light position as a vertex attribute. Optimally, you should pass it to the shader as a uniform variable, just as you do with the matrices. But I doubt that is the problem here.
I believe your problem is that you are doing the light calculations in different spaces. The vertexes of the surfaces that you draw are in object/model space, while I'm guessing, your light is located at a point defined in world space. Try multiplying your light position by the inverse of the model matrix you are applying to the vertexes. I'm not familiar with GLM, but I figure there must be an inverse() function in it:
vec3 light_pos_object_space = inverse(model_matrix) * rm.light_pos;
glVertexAttrib3fv(light_ID, &light_pos_object_space[0]);
Figured out my problem. I was calling this function:
glUniform3fv(light_ID, 3, &rm.light_pos[0]);
When I should have been calling this:
glUniform3fv(light_ID, 1, &rm.light_pos[0]);