Related
I have this vertex shader, which simply passes the position given and passes the UV and color to the fragment shader:
#version 330 core
layout (location = 0) in vec2 in_pos;
layout (location = 1) in vec2 in_texUV;
layout (location = 2) in vec4 in_color;
out vec2 ex_texUV;
out vec4 ex_color;
uniform mat4 projection;
void main()
{
gl_Position = vec4(in_pos, 0.0, 1.0) * projection;
ex_texUV = in_texUV;
ex_color = in_color;
}
Edit: The fragment shader is shown here, and all uniforms are properly set:
#version 330 core
in vec2 in_texUV;
in vec4 in_color;
out vec4 out_color;
uniform vec2 screenSize;
uniform vec3 transparentColour;
uniform sampler2D sprite;
uniform sampler2D palette;
uniform int paletteLines[0x100];
void main()
{
if (in_color.a == 0.0) {
vec4 coord = gl_FragCoord - 0.5;
vec2 screenPos;
screenPos.x = coord.x * screenSize.x;
screenPos.y = coord.y * screenSize.y;
int id = paletteLines[int(screenPos.y)];
int index = int(texture2D(sprite, in_texUV).r * 255);
if (index == 0)
discard;
vec2 palvec;
palvec.x = index;
palvec.y = id;
out_color = texture(palette, palvec);
}
}
(The projection variable is properly set, shown using NVIDIA Nsight.)
Both the vertex and fragment shader have been edited to be simple passthroughs (even setting the fragment shader to a constant vec4(1.0, 1.0, 1.0, 1.0),) but it's always shown nothing.
To setup for the shader, I first set up the VAO and VBO to pass from a list of DrawVertex:
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glGenBuffers(2, &GFXVBO);
glBindBuffer(GL_ARRAY_BUFFER, GFXVBO);
glVertexAttribPointer(0, 2, GL_SHORT, GL_FALSE, sizeof(DrawVertex), 0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 2, GL_SHORT, GL_FALSE, sizeof(DrawVertex), (void *)(sizeof(short) * 2));
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_FALSE, sizeof(DrawVertex), (void *)(sizeof(short) * 4));
glEnableVertexAttribArray(2);
and then draw using the code below (the VAO and VBO are ensured to be bound, and gfxShader is just a helper to use a program):
gfxShader.use();
// [setup the program, change uniforms as necessary]
// lastRenderCount is how many to render
// gfxPolyList is the list of DrawVertex
glBufferData(GL_ARRAY_BUFFER, lastRenderCount * sizeof(DrawVertex), gfxPolyList, GL_DYNAMIC_DRAW);
glDrawArrays(GL_TRIANGLES, 0, lastRenderCount);
gfxShader.stop();
However, despite this, although RenderDoc shows that the input is being passed through, the output shows nothing at all. On top of this, NVIDIA Nsight says that no fragments are being drawn. Where could I be going wrong?
For context, here is struct DrawVertex:
struct DrawVertex {
short x;
short y;
short u;
short v;
Color color = 0; //0xRRGGBBAA
};
gl_Position = vec4(in_pos, 0.0, 1.0) * projection;
This is wrong. Matrix multiplication is not commutative. Should be:
gl_Position = projection * vec4(in_pos, 0.0, 1.0);
If you don't believe me, try this:
glm::mat4 proj = {
1.0f, 0.0f, 3.0f, 5.0f,
0.0f, 1.0f, 3.0f, 6.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 3.0f, 1.0f
};
glm::vec4 vec = { 0.0f, 1.0f, 0.0f, 1.0f };
glm::vec4 result1 = proj * vec;
glm::vec4 result2 = vec * proj;
std::cout << "result1: " << result1.x << result1.y << result1.z << result1.w << '\n';
std::cout << "result2: " << result2.x << result2.y << result2.z << result2.w << '\n';
// Output:
result1: 0167
result2: 5701
https://learnopengl.com/Getting-started/Transformations
Edit: Your fragment shader runs only if in_color.a == 0.0? Are you sure this is correct? Maybe you meant to use != instead.
I was wondering how I would go about programming point light shadows with deferred rendering??
The point light shadows just dont show up for me. I think it is to do with the following line: float shadow = calculate_shadows(FragPos); as for directional shadows I multiple the fragpos with a lightSpaceMatrix (lightView * lightProj) and that worked, but for point shadows I dont have a lightSpaceMatrix to use.
light fragment shader
#version 420 core
out vec4 FragColor;
in vec2 _texcoord;
uniform vec3 camera_pos;
uniform sampler2D gPosition;
uniform sampler2D gNormal;
uniform sampler2D gAlbedo;
uniform sampler2D gSpecular;
uniform sampler2D gMetalness;
uniform samplerCube gPointShadowmap;
uniform mat4 viewMatrix;
uniform vec3 lightPos;
vec3 FragPos;
vec3 Normal;
float calculate_shadows(vec3 light_space_pos)
{
// perform perspective divide
vec3 fragToLight = light_space_pos - vec3(0.0f, 0.0f, 0.0f);
// get closest depth value from light's perspective (using [0,1] range fragPosLight as coords)
float closestDepth = texture(gPointShadowmap, fragToLight).r;
// it is currently in linear range between [0,1], let's re-transform it back to original depth value
closestDepth *= 25.0f;
// now get current linear depth as the length between the fragment and light position
float currentDepth = length(fragToLight);
// test for shadows
float bias = 0.05; // we use a much larger bias since depth is now in [near_plane, far_plane] range
float shadow = currentDepth - bias > closestDepth ? 1.0 : 0.0;
//FragColor = vec4(vec3(closestDepth / 25.0f), 1.0);
return shadow;
}
void main(void)
{
FragPos = texture(gPosition, _texcoord).rgb;
Normal = texture(gNormal, _texcoord).rgb;
vec3 Diffuse = texture(gAlbedo, _texcoord).rgb;
float Emissive = texture(gAlbedo, _texcoord).a;
vec3 Specular = texture(gAlbedo, _texcoord).rgb;
vec3 Metalness = texture(gMetalness, _texcoord).rgb; // Reflection pass
float AmbientOcclusion = texture(gSsao, _texcoord).r;
vec3 lightColor = vec3(0.3);
// ambient
vec3 ambient = 0.3 * Diffuse;
// diffuse
vec3 lightDir = normalize(vec3(0.0, 0.0, 0.0) - FragPos);
float diff = max(dot(lightDir, Normal), 0.0);
vec3 diffuse = diff * lightColor;
// specular
vec3 viewDir = normalize(camera_pos - FragPos);
vec3 reflectDir = reflect(-lightDir, Normal);
float spec = 0.0;
vec3 halfwayDir = normalize(lightDir + viewDir);
spec = pow(max(dot(Normal, halfwayDir), 0.0), 64.0);
vec3 specular = spec * lightColor;
// calculate shadow
float shadow = calculate_shadows(FragPos);
vec3 lighting = (ambient + (1.0 - shadow) * (diffuse + specular));
FragColor = vec4(lighting, 1.0);
}
pointshadows vertex shader
#version 330 core
layout(location = 0) in vec3 position;
uniform mat4 model;
void main(void)
{
gl_Position = model * vec4(position, 1.0);
}
pointshadows fragment shader
#version 330 core
in vec4 FragPos;
void main(void)
{
float lightDistance = length(FragPos.xyz - vec3(0.0, 3.0, 0.0));
// map to [0;1] range by dividing by far_plane
lightDistance = lightDistance / 25.0;
// write this as modified depth
gl_FragDepth = lightDistance;
}
pointshadows geometry shader
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices = 18) out;
uniform mat4 shadowMatrices[6];
out vec4 FragPos;
void main(void)
{
for(int face = 0; face < 6; ++face)
{
gl_Layer = face; // built-in variable that specifies to which face we render.
for(int i = 0; i < 3; ++i) // for each triangle's vertices
{
FragPos = gl_in[i].gl_Position;
gl_Position = shadowMatrices[face] * FragPos;
EmitVertex();
}
EndPrimitive();
}
}
Temp PointShadow class
#ifndef __POINTSHADOWPASS
#define __POINTSHADOWPASS
#include "Content.h"
class PointShadowPass
{
private:
static unsigned int _shadow_fbo;
public:
static unsigned int _shadow_texture;
static glm::vec3 lightPos;
static std::vector<glm::mat4> shadowTransforms;
PointShadowPass() {}
~PointShadowPass() {}
inline static void Initialise()
{
lightPos = glm::vec3(0.0f, 0.0f, 0.0f);
glGenFramebuffers(1, &_shadow_fbo);
glGenTextures(1, &_shadow_texture);
glBindTexture(GL_TEXTURE_2D, _shadow_texture);
for (unsigned int i = 0; i < 6; i++)
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_DEPTH_COMPONENT, 1024, 1024, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glBindFramebuffer(GL_FRAMEBUFFER, _shadow_fbo);
glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, _shadow_texture, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
inline static void Render(unsigned int pointshadow_program, Camera* camera, std::vector<Actor*> _actors)
{
glDisable(GL_BLEND); // Disable blending for opique materials
glEnable(GL_DEPTH_TEST); // Enable depth test
glm::mat4 model;
glm::mat4 shadowProj = glm::perspective(glm::radians(90.0f), (float)1024 / (float)1024, 1.0f, 25.0f);
shadowTransforms.push_back(shadowProj * glm::lookAt(lightPos, lightPos + glm::vec3(1.0f, 0.0f, 0.0f), glm::vec3(0.0f, -1.0f, 0.0f)));
shadowTransforms.push_back(shadowProj * glm::lookAt(lightPos, lightPos + glm::vec3(-1.0f, 0.0f, 0.0f), glm::vec3(0.0f, -1.0f, 0.0f)));
shadowTransforms.push_back(shadowProj * glm::lookAt(lightPos, lightPos + glm::vec3(0.0f, 1.0f, 0.0f), glm::vec3(0.0f, 0.0f, 1.0f)));
shadowTransforms.push_back(shadowProj * glm::lookAt(lightPos, lightPos + glm::vec3(0.0f, -1.0f, 0.0f), glm::vec3(0.0f, 0.0f, -1.0f)));
shadowTransforms.push_back(shadowProj * glm::lookAt(lightPos, lightPos + glm::vec3(0.0f, 0.0f, 1.0f), glm::vec3(0.0f, -1.0f, 0.0f)));
shadowTransforms.push_back(shadowProj * glm::lookAt(lightPos, lightPos + glm::vec3(0.0f, 0.0f, -1.0f), glm::vec3(0.0f, -1.0f, 0.0f)));
glViewport(0, 0, 1024, 1024);
glBindFramebuffer(GL_FRAMEBUFFER, _shadow_fbo);
glClear(GL_DEPTH_BUFFER_BIT);
glUseProgram(pointshadow_program);
for (unsigned int i = 0; i < 6; ++i)
glUniformMatrix4fv(glGetUniformLocation(pointshadow_program, ("shadowMatrices[" + std::to_string(i) + "]").c_str()), 1, GL_FALSE, glm::value_ptr(shadowTransforms[i]));
for (unsigned int i = 0; i < _actors.size(); i++)
{
model = _actors[i]->GetModelMatrix() * camera->GetViewMatrix();
glUniformMatrix4fv(glGetUniformLocation(pointshadow_program, "model"), 1, GL_FALSE, glm::value_ptr(model)); // set the model matrix uniform
_actors[i]->Render();
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
};
std::vector<glm::mat4> PointShadowPass::shadowTransforms;
unsigned int PointShadowPass::_shadow_fbo;
unsigned int PointShadowPass::_shadow_texture;
glm::vec3 PointShadowPass::lightPos;
#endif
I managed to get something showing (shadows move with camera rotation)
Reading your comments it seems you have some misconceptions about what informations you can have in defered rendering?
You said that all the coordinates have to be in screenspace which isn't true. For deffered rendering you have a G-Buffer and in it you can put whatever kind of information you want. To get world position information you have two choices, either you have a world position buffer, so you know where each of your fragment is in the world. Or you can compute this information back from the depth buffer and camera projection matrix.
If you have a point shadow calculation that works in forward rendering you can do the same in deferred rendering, in the shader that does all the light calculation you need the shadow cubemap, light position and you do the calculation like you used to.
EDIT:
looking at your code for calculate_shadows(vec3 light_space_pos), in deferred rendering you don't send it your position in lightspace, but the position in world space. So the function should be:
calculate_shadows(vec3 frag_world_pos)
you have for the first line vec3 fragToLight = light_space_pos - vec3(0.0f, 0.0f, 0.0f);
which should be vec3 fragToLight = frag_world_pos- lightPos.
Or you do this calculation before using the function. Eitherway, you need the position of your point light to calculate the direction and distance between your fragment and the light.
For some reason, the quad that I'm rendering doesn't show and it only renders a black screen. I've checked the code multiple times and couldn't find the problem maybe someone can see what I don't see!
The purpose is to have a quad that follows the camera, right now I just want to show the quad with a single color, but all I get is a black screen. I am using QOpenGLWindow and QOpenGLFunctions.
void CSLFWindow::renderQuad()
{
float x0 = -(float)1.f, y0 = -(float)1.f;
float x1 = (float)1.f, y1 = (float)1.f;
const QVector3D vertices[4] = {
QVector3D( x0, y0, 0.0f),
QVector3D( x0, y1, 0.0f),
QVector3D( x1, y1, 0.0f),
QVector3D( x1, y0, 0.0f)
};
const QVector3D normals[4] = {
QVector3D(0.0f, 0.0f,1.0f),
QVector3D(0.0f, 0.0f,1.0f),
QVector3D(0.0f, 0.0f,1.0f),
QVector3D(0.0f, 0.0f,1.0f)
};
const QVector2D texcoords[4] = {
QVector2D(0.0f, 1.0f),
QVector2D(0.0f, 0.0f),
QVector2D(1.0f, 0.0f),
QVector2D(1.0f, 1.0f)
};
const unsigned int indices[4] = { 3, 2, 1, 0 };
m_shaderProgram.enableAttributeArray("vVertices");
m_shaderProgram.enableAttributeArray("vTexCoords");
m_shaderProgram.enableAttributeArray("vNormals");
m_shaderProgram.setAttributeArray("vVertices", vertices);
m_shaderProgram.setAttributeArray("vTexCoords", texcoords);
m_shaderProgram.setAttributeArray("vNormals", normals);
glDrawElements(GL_QUADS, 4, GL_UNSIGNED_INT, indices);
m_shaderProgram.disableAttributeArray("vVertices");
m_shaderProgram.disableAttributeArray("vTexCoords");
m_shaderProgram.disableAttributeArray("vNormals");
}
and the rendering:
void CSLFWindow::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
m_shaderProgram.bind();
m_model.setToIdentity();
m_view = m_camera.toMatrix();
QMatrix4x4 modelMatrix = m_model ;
QMatrix4x4 modelViewMatrix = m_view * modelMatrix;
QMatrix4x4 mvp = m_projection * modelViewMatrix;
m_shaderProgram.setUniformValue("MV", modelViewMatrix);
m_shaderProgram.setUniformValue("MVP", mvp);
m_shaderProgram.setUniformValue("P", m_projection);
renderQuad();
m_shaderProgram.release();
}
I'm setting the projection matrix as:
m_view.setToIdentity();
float aspect = h / w;
m_projection.setToIdentity();
m_projection.perspective(
m_fov,
aspect,
0.1f,
1000.0f);
here are my camera parameters:
m_cameraPos = QVector3D(0.0f, 0.0f, 3.0f);
m_cameraFront = QVector3D(0.0f, 0.0f, -1.0f);
m_cameraUp = QVector3D(0.0f, 1.0f, 0.0f);
QMatrix4x4 toMatrix()
{
QMatrix4x4 vMatrix;
vMatrix.setToIdentity();
vMatrix.lookAt(m_cameraPos, QVector3D(0.0f, 0.0f, 0.0f),
m_cameraUp);
return vMatrix;
}
and here is my vertex shader:
#version 330 core
layout (location = 0)in vec3 vVertices;
layout (location = 1)in vec2 vTexCoords;
layout (location = 2)in vec3 vNormals;
uniform mat4 MV;
uniform mat4 MVP;
uniform mat4 P;
out vec2 FragTexCoord;
out vec3 FragNormals;
void main()
{
FragTexCoord = vTexCoords;
FragNormals = vNormals;
gl_Position = MVP * vec4(vVertices,1);
}
and my fragment shader:
#version 330 core
out vec4 fragmentColor;
in vec2 FragTexCoord;
in vec3 FragNormals;
void main()
{
fragmentColor = vec4(1.0,1.0,1.0,1.0);
}
I found the problem was in setting the surface format! when I remove format.setProfile(QSurfaceFormat::CoreProfile); I see the quad. but I don't understand why it happens
Change GL_QUAD to GL_TRIANGLE_FAN and it will work:
glDrawElements(GL_TRIANGLE_FAN, 4, GL_UNSIGNED_INT, indices);
GL_QUAD is deprecated and is removed in core profile.
See further Legacy OpenGL - Removed functionality, OpenGL Context and Forward compatibility
Hey I cant add a comment but could you try to change vertex shader like this:
void main()
{
FragTexCoord = vTexCoords;
FragNormals = vNormals;
gl_Position = vec4(vVertices,1);
}
and let me know if you see anything if you don't see anything try to change the order of the indices
I am trying to incorporate both normal mapping & cube mapping into a single program, but I am having trouble getting them to render correctly. I am working on a simple exercise to help me with that before going onto something more complexed. I am trying to render both these objects into a single program. They both have different textures and the torus uses cube mapping while the wall uses normal mapping.
These are what they are supposed to look like individually:
Currently, this is what I've got. The torus renders correctly but the wall's textures don't appear.
I am using 2 separate shader programs for this, and it is my first time using more than 1 shader program, for a program. I suspect my issue could be with the initialising of shader variables, or something really obvious that I'm just not getting. I am using two different Vertex structs for the objects.
struct Vertex2
{
GLfloat position[3];
GLfloat normal[3];
GLfloat tangent[3];
GLfloat texCoord[2];
};
Vertex2 g_vertices[] = {
// Front: triangle 1
// vertex 1
-1.0f, 1.0f, 0.0f, // position
0.0f, 0.0f, 1.0f, // normal
1.0f, 0.0f, 0.0f, // tangent
0.0f, 1.0f, // texture coordinate
// vertex 2
-1.0f, -1.0f, 0.0f, // position
0.0f, 0.0f, 1.0f, // normal
1.0f, 0.0f, 0.0f, // tangent
0.0f, 0.0f, // texture coordinate
// vertex 3
1.0f, 1.0f, 0.0f, // position
0.0f, 0.0f, 1.0f, // normal
1.0f, 0.0f, 0.0f, // tangent
1.0f, 1.0f, // texture coordinate
// triangle 2
// vertex 1
1.0f, 1.0f, 0.0f, // position
0.0f, 0.0f, 1.0f, // normal
1.0f, 0.0f, 0.0f, // tangent
1.0f, 1.0f, // texture coordinate
// vertex 2
-1.0f, -1.0f, 0.0f, // position
0.0f, 0.0f, 1.0f, // normal
1.0f, 0.0f, 0.0f, // tangent
0.0f, 0.0f, // texture coordinate
// vertex 3
1.0f, -1.0f, 0.0f, // position
0.0f, 0.0f, 1.0f, // normal
1.0f, 0.0f, 0.0f, // tangent
1.0f, 0.0f, // texture coordinate
};
Main.cpp init function:
static void init(GLFWwindow* window)
{
glEnable(GL_DEPTH_TEST); // enable depth buffer test
glEnable(GL_TEXTURE_2D);
// read the image data
GLint imageWidth[5]; //image width info
GLint imageHeight[5]; //image height info
g_texImage[FRONT] = readBitmapRGBImage("images/cm_front.bmp", &imageWidth[0], &imageHeight[0]);
g_texImage[BACK] = readBitmapRGBImage("images/cm_back.bmp", &imageWidth[0], &imageHeight[0]);
g_texImage[LEFT] = readBitmapRGBImage("images/cm_left.bmp", &imageWidth[0], &imageHeight[0]);
g_texImage[RIGHT] = readBitmapRGBImage("images/cm_right.bmp", &imageWidth[0], &imageHeight[0]);
g_texImage[TOP] = readBitmapRGBImage("images/cm_top.bmp", &imageWidth[0], &imageHeight[0]);
g_texImage[BOTTOM] = readBitmapRGBImage("images/cm_bottom.bmp", &imageWidth[0], &imageHeight[0]);
g_texImage[6] = readBitmapRGBImage("images/Fieldstone.bmp", &imageWidth[1], &imageHeight[1]);
g_texImage[7] = readBitmapRGBImage("images/FieldstoneBumpDOT3.bmp", &imageWidth[2], &imageHeight[2]);
glGenTextures(10, g_textureID);
// ...
// create and compile our GLSL program from the shader files
g_shaderProgramID[0] = loadShaders("CubeEnvMapVS.vert", "CubeEnvMapFS.frag");
g_shaderProgramID[1] = loadShaders("NormalMappingVS.vert", "NormalMappingFS.frag");
// find the location of shader variables
for (int i = 0; i < 2; i++)
{
positionIndex[i] = glGetAttribLocation(g_shaderProgramID[i], "aPosition");
normalIndex[i] = glGetAttribLocation(g_shaderProgramID[i], "aNormal");
texCoordIndex[i] = glGetAttribLocation(g_shaderProgramID[i], "aTexCoord");
g_MVP_Index[i] = glGetUniformLocation(g_shaderProgramID[i], "uModelViewProjectionMatrix");
g_M_Index[i] = glGetUniformLocation(g_shaderProgramID[i], "uModelMatrix");
g_viewPointIndex[i] = glGetUniformLocation(g_shaderProgramID[i], "uViewPoint");
g_lightPositionIndex[i] = glGetUniformLocation(g_shaderProgramID[i], "uLightingProperties.position");
g_lightAmbientIndex[i] = glGetUniformLocation(g_shaderProgramID[i], "uLightingProperties.ambient");
g_lightDiffuseIndex[i] = glGetUniformLocation(g_shaderProgramID[i], "uLightingProperties.diffuse");
g_lightSpecularIndex[i] = glGetUniformLocation(g_shaderProgramID[i], "uLightingProperties.specular");
g_lightShininessIndex[i] = glGetUniformLocation(g_shaderProgramID[i], "uLightingProperties.shininess");
g_materialAmbientIndex[i] = glGetUniformLocation(g_shaderProgramID[i], "uMaterialProperties.ambient");
g_materialDiffuseIndex[i] = glGetUniformLocation(g_shaderProgramID[i], "uMaterialProperties.diffuse");
g_materialSpecularIndex[i] = glGetUniformLocation(g_shaderProgramID[i], "uMaterialProperties.specular");
}
g_envMapSamplerIndex = glGetUniformLocation(g_shaderProgramID[0], "uEnvironmentMap");
tangentIndex = glGetAttribLocation(g_shaderProgramID[1], "aTangent");
g_texSamplerIndex = glGetUniformLocation(g_shaderProgramID[1], "uTextureSampler");
g_normalSamplerIndex = glGetUniformLocation(g_shaderProgramID[1], "uNormalSampler");
// initialise model matrix to the identity matrix
g_mm_torus = glm::mat4(1.0f);
g_mm_wall = mat4(1.0f);
// ...
// load mesh
// load_mesh("models/sphere.obj");
load_mesh("models/torus.obj");
// ...
// generate identifier for VBOs and copy data to GPU
glGenBuffers(5, g_VBO);
glBindBuffer(GL_ARRAY_BUFFER, g_VBO[0]);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)*g_numberOfVertices, g_pMeshVertices, GL_STATIC_DRAW);
// generate identifier for IBO and copy data to GPU
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, g_VBO[1]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLint) * 3 * g_numberOfFaces, g_pMeshIndices, GL_STATIC_DRAW);
// generate identifiers for VAO
glGenVertexArrays(5, g_VAO);
// create VAO and specify VBO data
glBindVertexArray(g_VAO[0]);
glBindBuffer(GL_ARRAY_BUFFER, g_VBO[0]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, g_VBO[1]);
glVertexAttribPointer(positionIndex[0], 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), reinterpret_cast<void*>(offsetof(Vertex, position)));
glVertexAttribPointer(normalIndex[0], 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), reinterpret_cast<void*>(offsetof(Vertex, normal)));
glEnableVertexAttribArray(positionIndex[0]); // enable vertex attributes
glEnableVertexAttribArray(normalIndex[0]);
// generate identifier for VBOs and copy data to GPU
glBindBuffer(GL_ARRAY_BUFFER, g_VBO[2]);
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertices), g_vertices, GL_STATIC_DRAW);
// create VAO and specify VBO data
glBindVertexArray(g_VAO[1]);
glBindBuffer(GL_ARRAY_BUFFER, g_VBO[2]);
glVertexAttribPointer(positionIndex[1], 3, GL_FLOAT, GL_FALSE, sizeof(Vertex2), reinterpret_cast<void*>(offsetof(Vertex2, position)));
glVertexAttribPointer(normalIndex[1], 3, GL_FLOAT, GL_FALSE, sizeof(Vertex2), reinterpret_cast<void*>(offsetof(Vertex2, normal)));
glVertexAttribPointer(tangentIndex, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex2), reinterpret_cast<void*>(offsetof(Vertex2, tangent)));
glVertexAttribPointer(texCoordIndex[0], 2, GL_FLOAT, GL_FALSE, sizeof(Vertex2), reinterpret_cast<void*>(offsetof(Vertex2, texCoord)));
// enable vertex attributes
glEnableVertexAttribArray(positionIndex[1]);
glEnableVertexAttribArray(normalIndex[1]);
glEnableVertexAttribArray(tangentIndex);
glEnableVertexAttribArray(texCoordIndex[0]);
}
Render scene function:
static void render_scene()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // clear colour buffer and depth buffer
glUseProgram(g_shaderProgramID[0]); // use the shaders associated with the shader program
glBindVertexArray(g_VAO[0]); // make VAO active
// set uniform shader variables
glm::mat4 MVP = g_camera.getProjectionMatrix() * g_camera.getViewMatrix() * g_mm_torus;
glUniformMatrix4fv(g_MVP_Index[0], 1, GL_FALSE, &MVP[0][0]);
glUniformMatrix4fv(g_M_Index[0], 1, GL_FALSE, &g_mm_torus[0][0]);
glUniform3fv(g_viewPointIndex[0], 1, &g_camera.getPosition()[0]);
glUniform4fv(g_lightPositionIndex[0], 1, &g_lightProperties.position[0]);
glUniform4fv(g_lightAmbientIndex[0], 1, &g_lightProperties.ambient[0]);
glUniform4fv(g_lightDiffuseIndex[0], 1, &g_lightProperties.diffuse[0]);
glUniform4fv(g_lightSpecularIndex[0], 1, &g_lightProperties.specular[0]);
glUniform1fv(g_lightShininessIndex[0], 1, &g_lightProperties.shininess);
glUniform4fv(g_materialAmbientIndex[0], 1, &g_materialProperties.ambient[0]);
glUniform4fv(g_materialDiffuseIndex[0], 1, &g_materialProperties.diffuse[0]);
glUniform4fv(g_materialSpecularIndex[0], 1, &g_materialProperties.specular[0]);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_CUBE_MAP, g_textureID[0]);
glUniform1i(g_envMapSamplerIndex, 0);
glDrawElements(GL_TRIANGLES, g_numberOfFaces * 3, GL_UNSIGNED_INT, 0); // display the vertices based on their indices and primitive type
glUseProgram(g_shaderProgramID[1]); // use the shaders associated with the shader program
glBindVertexArray(g_VAO[1]); // make VAO active
// set uniform shader variables
glClear(GL_DEPTH_BUFFER_BIT);
MVP = g_camera.getProjectionMatrix() * g_camera.getViewMatrix() * g_mm_wall;
glUniformMatrix4fv(g_MVP_Index[1], 1, GL_FALSE, &MVP[0][0]);
glUniformMatrix4fv(g_M_Index[1], 1, GL_FALSE, &g_mm_wall[0][0]);
glUniform3fv(g_viewPointIndex[1], 1, &g_camera.getPosition()[0]);
glUniform4fv(g_lightPositionIndex[1], 1, &g_lightProperties.position[0]);
glUniform4fv(g_lightAmbientIndex[1], 1, &g_lightProperties.ambient[0]);
glUniform4fv(g_lightDiffuseIndex[1], 1, &g_lightProperties.diffuse[0]);
glUniform4fv(g_lightSpecularIndex[1], 1, &g_lightProperties.specular[0]);
glUniform1fv(g_lightShininessIndex[1], 1, &g_lightProperties.shininess);
glUniform4fv(g_materialAmbientIndex[1], 1, &g_materialProperties.ambient[0]);
glUniform4fv(g_materialDiffuseIndex[1], 1, &g_materialProperties.diffuse[0]);
glUniform4fv(g_materialSpecularIndex[1], 1, &g_materialProperties.specular[0]);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, g_textureID[6]);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, g_textureID[7]);
glUniform1i(g_texSamplerIndex, 1);
glUniform1i(g_normalSamplerIndex, 2);
glDrawArrays(GL_TRIANGLES, 0, 36);
glFlush(); // flush the pipeline
}
Vertex shader for torus:
#version 330 core
// input data (different for all executions of this shader)
in vec3 aPosition;
in vec3 aNormal;
// uniform input data
uniform mat4 uModelViewProjectionMatrix;
uniform mat4 uModelMatrix;
// output data (will be interpolated for each fragment)
out vec3 vNormal;
out vec3 vPosition;
void main()
{
// set vertex position
gl_Position = uModelViewProjectionMatrix * vec4(aPosition, 1.0);
// world space
vPosition = (uModelMatrix * vec4(aPosition, 1.0)).xyz;
vNormal = (uModelMatrix * vec4(aNormal, 0.0)).xyz;
}
Fragment shader for torus:
#version 330 core
// interpolated values from the vertex shaders
in vec3 vNormal;
in vec3 vPosition;
// uniform input data
struct LightProperties
{
vec4 position;
vec4 ambient;
vec4 diffuse;
vec4 specular;
float shininess;
};
struct MaterialProperties
{
vec4 ambient;
vec4 diffuse;
vec4 specular;
};
uniform LightProperties uLightingProperties;
uniform MaterialProperties uMaterialProperties;
uniform vec3 uViewPoint;
uniform samplerCube uEnvironmentMap;
// output data
out vec3 fColor;
void main()
{
vec3 N = normalize(vNormal);
vec3 L;
// determine whether the light is a point light source or directional light
if(uLightingProperties.position.w == 0.0f)
L = normalize((uLightingProperties.position).xyz);
else
L = normalize((uLightingProperties.position).xyz - vPosition);
vec3 V = normalize(uViewPoint - vPosition);
vec3 R = reflect(-L, N);
// calculate the ambient, diffuse and specular components
vec4 ambient = uLightingProperties.ambient * uMaterialProperties.ambient;
vec4 diffuse = uLightingProperties.diffuse * uMaterialProperties.diffuse * max(dot(L, N), 0.0);
vec4 specular = vec4(0.0f, 0.0f, 0.0f, 1.0f);
if(dot(L, N) > 0.0f)
{
specular = uLightingProperties.specular * uMaterialProperties.specular
* pow(max(dot(V, R), 0.0), uLightingProperties.shininess);
}
vec3 reflectEnvMap = reflect(-V, N);
// set output color
fColor = texture(uEnvironmentMap, reflectEnvMap).rgb;
fColor *= (diffuse + specular + ambient).rgb;
}
Vertex shader for wall:
#version 330 core
// input data (different for all executions of this shader)
in vec3 aPosition;
in vec3 aNormal;
in vec3 aTangent;
in vec2 aTexCoord;
// uniform input data
uniform mat4 uModelViewProjectionMatrix;
uniform mat4 uModelMatrix;
// output data (will be interpolated for each fragment)
out vec3 vPosition;
out vec3 vNormal;
out vec3 vTangent;
out vec2 vTexCoord;
void main()
{
// set vertex position
gl_Position = uModelViewProjectionMatrix * vec4(aPosition, 1.0);
// world space
vPosition = (uModelMatrix * vec4(aPosition, 1.0)).xyz;
vNormal = (uModelMatrix * vec4(aNormal, 0.0)).xyz;
vTangent = (uModelMatrix * vec4(aTangent, 0.0)).xyz;
vTexCoord = aTexCoord;
}
Fragment shader for wall:
#version 330 core
// interpolated values from the vertex shaders
in vec3 vPosition;
in vec3 vNormal;
in vec3 vTangent;
in vec2 vTexCoord;
// uniform input data
struct LightProperties
{
vec4 position;
vec4 ambient;
vec4 diffuse;
vec4 specular;
float shininess;
};
struct MaterialProperties
{
vec4 ambient;
vec4 diffuse;
vec4 specular;
};
uniform LightProperties uLightingProperties;
uniform MaterialProperties uMaterialProperties;
uniform vec3 uViewPoint;
uniform sampler2D uTextureSampler;
uniform sampler2D uNormalSampler;
// output data
out vec3 fColor;
void main()
{
// calculate normal map vectors
vec3 normal = normalize(vNormal);
vec3 tangent = normalize(vTangent);
vec3 biTangent = normalize(cross(tangent, normal));
vec3 normalMap = 2.0f * texture(uNormalSampler, vTexCoord).xyz - 1.0f;
// calculate vectors for lighting
vec3 N = normalize(mat3(tangent, biTangent, normal) * normalMap);
vec3 L;
// determine whether the light is a point light source or directional light
if(uLightingProperties.position.w == 0.0f)
L = normalize((uLightingProperties.position).xyz);
else
L = normalize((uLightingProperties.position).xyz - vPosition);
vec3 V = normalize(uViewPoint - vPosition);
vec3 R = reflect(-L, N);
// calculate Phong lighting
vec4 ambient = uLightingProperties.ambient * uMaterialProperties.ambient;
vec4 diffuse = uLightingProperties.diffuse * uMaterialProperties.diffuse * max(dot(L, N), 0.0);
vec4 specular = vec4(0.0f, 0.0f, 0.0f, 1.0f);
if(dot(L, N) > 0.0f)
{
specular = uLightingProperties.specular * uMaterialProperties.specular
* pow(max(dot(V, R), 0.0), uLightingProperties.shininess);
}
// set output color
fColor = (diffuse + specular + ambient).rgb;
fColor *= texture(uTextureSampler, vTexCoord).rgb;
}
PS: Sorry if I was a bit too irresponsible with my questions yesterday. Some of the advice I just didn't understand and thus didn't reply.
When you are drawing the 2nd part (wall), then you are binding the textures to the texture units GL_TEXTURE1 and GL_TEXTURE2:
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, g_textureID[6]);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, g_textureID[7]);
But you are setting the texture unit indices 0 and 1 to the texture sampler uniforms uTextureSampler and uNormalSampler:
glUniform1i(g_texSamplerIndex, 0);
glUniform1i(g_normalSamplerIndex, 1);`
Adapt your code like this:
glUniform1i(g_texSamplerIndex, 1); // GL_TEXTURE1
glUniform1i(g_normalSamplerIndex, 2); // GL_TEXTURE2
Further the attribute index of "aTexCoord" is stored to texCoordIndex[i] for g_shaderProgramID[i]:
for (int i = 0; i < 2; i++)
{
....
texCoordIndex[i] = glGetAttribLocation(g_shaderProgramID[i], "aTexCoord");
.....
}
You have to be aware of this when set up the vertex attribute pointer and enable the vertex attribute
Change this:
glVertexAttribPointer(texCoordIndex[0], 2, GL_FLOAT, GL_FALSE, sizeof(Vertex2), reinterpret_cast<void*>(offsetof(Vertex2, texCoord)));
.....
glEnableVertexAttribArray(texCoordIndex[0]);
To this:
glVertexAttribPointer(texCoordIndex[1], 2, GL_FLOAT, GL_FALSE, sizeof(Vertex2), reinterpret_cast<void*>(offsetof(Vertex2, texCoord)));
.....
glEnableVertexAttribArray(texCoordIndex[1]);
I seem to have broken the shaders in my program, here is their code:
vertex shader
#version 330 core
uniform mat4 camera;
uniform mat4 model;
layout(location = 0) in vec3 vert;
layout(location = 1) in vec3 vertNormal;
out vec3 fragVert;
out vec3 fragNormal;
void main() {
// Pass some variables to the fragment shader
fragNormal = vertNormal;
fragVert = vert;
// Apply all matrix transformations to vert
gl_Position = camera * model * vec4(vert, 1);
}
fragment shader
#version 150 core
uniform mat4 model;
uniform vec3 cameraPosition;
// material settings
uniform float materialShininess;
uniform vec3 materialSpecularColor;
uniform vec3 materialColor;
uniform struct Light {
vec3 position;
vec3 intensities; //a.k.a the color of the light
float attenuation;
float ambientCoefficient;
} light;
in vec3 fragNormal;
in vec3 fragVert;
out vec4 finalColor;
void main() {
vec3 normal = normalize(transpose(inverse(mat3(model))) * fragNormal);
vec3 surfacePos = vec3(model * vec4(fragVert, 1));
vec4 surfaceColor = vec4(materialColor, 1);
vec3 surfaceToLight = normalize(light.position - surfacePos);
vec3 surfaceToCamera = normalize(cameraPosition - surfacePos);
//ambient
vec3 ambient = light.ambientCoefficient * surfaceColor.rgb * light.intensities;
//diffuse
float diffuseCoefficient = max(0.0, dot(normal, surfaceToLight));
vec3 diffuse = diffuseCoefficient * surfaceColor.rgb * light.intensities;
//specular
float specularCoefficient = 0.0;
if(diffuseCoefficient > 0.0)
specularCoefficient = pow(max(0.0, dot(surfaceToCamera, reflect(-surfaceToLight, normal))), materialShininess);
vec3 specular = specularCoefficient * materialSpecularColor * light.intensities;
//attenuation
float distanceToLight = length(light.position - surfacePos);
float attenuation = 1.0 / (1.0 + light.attenuation * pow(distanceToLight, 2));
//linear color (color before gamma correction)
vec3 linearColor = ambient + attenuation*(diffuse + specular);
//final color (after gamma correction)
vec3 gamma = vec3(1.0/2.2);
finalColor = vec4(pow(linearColor, gamma), surfaceColor.a);
}
I have an asset that I am loading from an obj file, then drawing it like such:
void OpenGLView::run()
{
initializeAndSetupWindow(WINDOW_WIDTH, WINDOW_HEIGHT, "PhongBunny");
glClearColor(1.0f, 1.0f, 0.0f, 1.0f);
loadBunnyAsset();
AssetInstance bunny1;
bunny1.asset = bunny;
bunny1.position = glm::vec3(2.0f, 2.0f, 2.0f);
bunny1.scale = glm::vec3(1.0f, 1.0f, 1.0f);
do{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
loadUniforms(bunny1);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, bunny.vertexBuffer);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, bunny.normalBuffer);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bunny.elementBuffer);
glDrawElements(GL_TRIANGLES, bunny.elementsSize, GL_UNSIGNED_INT, (void*)0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glfwSwapBuffers(window);
glfwPollEvents();
} while (!glfwWindowShouldClose(window));
glfwDestroyWindow(window);
glfwTerminate();
}
with this being the function to load uniforms:
void OpenGLView::loadUniforms(AssetInstance assetInstance)
{
Asset* asset = &assetInstance.asset;
glUseProgram(asset->shaderProgramID);
glm::mat4 Projection = glm::perspective(45.0f, 4.0f / 3.0f, 0.1f, 1000.0f);
glm::mat4 camera = Projection * getViewMatrix();
glm::mat4 model = translate(assetInstance.position) * scale(assetInstance.position);
GLuint cameraID = glGetUniformLocation(asset->shaderProgramID, "camera");
GLuint modelID = glGetUniformLocation(asset->shaderProgramID, "model");
GLuint cameraPositionID = glGetUniformLocation(asset->shaderProgramID, "cameraPosition");
GLuint lightPositionID = glGetUniformLocation(asset->shaderProgramID, "light.position");
GLuint lightIntensitiesID = glGetUniformLocation(asset->shaderProgramID, "light.intensities");
GLuint lightAttenuationID = glGetUniformLocation(asset->shaderProgramID, "light.attenuation");
GLuint lightAmbientCoefficientID = glGetUniformLocation(asset->shaderProgramID, "light.ambientCoefficient");
GLuint materialColorID = glGetUniformLocation(asset->shaderProgramID, "materialColor");
GLuint materialShininessID = glGetUniformLocation(asset->shaderProgramID, "materialShininess");
GLuint materialSpecularColorID = glGetUniformLocation(asset->shaderProgramID, "materialSpecularColor");
glUniformMatrix4fv(cameraID, 1, GL_FALSE, &camera[0][0]);
glUniformMatrix4fv(modelID, 1, GL_FALSE, &model[0][0]);
glUniform3fv(cameraPositionID, 1, &cameraPosition[0]);
glUniform3fv(lightPositionID, 1, &light.position[0]);
glUniform3fv(lightIntensitiesID, 1, &light.intensities[0]);
glUniform1f(lightAttenuationID, light.attenuation);
glUniform1f(lightAmbientCoefficientID, light.ambientCoefficient);
glUniform3fv(materialColorID, 1, &assetInstance.materialColor[0]);
glUniform1f(materialShininessID, assetInstance.materialShininess);
glUniform3fv(materialSpecularColorID, 1, &assetInstance.materialSpecularColor[0]);
}
and some setup being done here:
OpenGLView::OpenGLView()
{
light.position = glm::vec3(0.0f, 7.0f, 3.0f);
light.intensities = glm::vec3(0.3f, 0.3, 0.3f);
light.attenuation = 0.3f;
light.ambientCoefficient = 0.005f;
cameraPosition = glm::vec3(5.0f, 3.0f, 8.0f);
}
For a while I had the bunny1's position set to 0, 0, 0 which caused it to not be drawn at all, I can't figure out why that is? Then when I changed it to 1, 1, 1 it started to draw, but now my key_callback function (which rotates and scales the bunny) stopped working. Also, here are my translate and scale functions:
glm::mat4 OpenGLView::translate(glm::vec3 position)
{
return glm::translate(glm::mat4(), position);
}
glm::mat4 OpenGLView::scale(glm::vec3 size)
{
return glm::scale(glm::mat4(), size);
}
and I can't figure out why changing bunny1.position seems to scale the bunny instead of translating its position?
The reason why your bunny's scale changes when changing bunny1.position is because you scale your bunny by bunny1.position:
glm::mat4 model = translate(assetInstance.position) * scale(assetInstance.position);
That might also be the reason why the bunny disapears when setting it's position to (0,0,0) since you then scale it to 0.