Can't make blending work - c++

I have a 3 dimensional dataset where each value of the dataset is normalized to [0, 1]. I want to visualize this dataset by using texture, and blending.
However, it seems that I can't make it work.
Here is what I have done so far:
int main(){
...
//building an image for each rectangular slice of data
vector<Texture> myTextures;
for (GLint rowIndex = 0; rowIndex < ROW_NUM; rowIndex++)
{
auto pathToImage = "images/row" + to_string(rowIndex) + FILE_EXT;
FIBITMAP *img = FreeImage_Allocate(SLICE_DIMENSION, SLICE_NUM, 32); //32 = RGBA
for (GLint depthIndex = 0; depthIndex < DEPTH_NUM; depthIndex++)
{
for (GLint colIndex = 0; colIndex < COL_NUM; colIndex++)
{
auto value = my3DData[depthIndex][rowIndex][colIndex];
//transform tValue to a source color
glm::vec4 source = transformValueToColor(value);
//note that here I am also setting the opacity.
RGBQUAD linRgb = { source.b, source.g, source.r, source.a };
FreeImage_SetPixelColor(img, colIndex, depthIndex, &linRgb);
}
}
//Saving images. Saved images shows transparency.
FreeImage_Save(FIF_PNG, img, pathToImage.c_str());
myTextures.push_back(Texture(pathToImage.c_str(), GL_TEXTURE0));
}
//create VAO, VBO, EBO for a unit quad.
glEnable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
//game loop
while (!glfwWindowShouldClose(window))
{
...
for (int i = 0; i < myTextures.size(); i++)
{
GLint index = myTextures.size() - i - 1;
myTextures[index].bind(); //does glActiveTexture(...), and glBindTexture(...);
glm::mat4 model;
//translate
model = glm::translate(model, glm::vec3(0.0f, 0.0f, -index*0.003f));
//scale
model = glm::scale(model, glm::vec3(1.2f));
glUniformMatrix4fv(glGetUniformLocation(ourShader.Program, "model"), 1, GL_FALSE, glm::value_ptr(model));
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
}
}
}
transformValueToColorfor transforming data value in [0,1] to color value:
//All inputs >=0.6 is transformed to highly transparent white color.
glm::vec4 transformValueToColor(GLclampf tValue)
{
if (tValue >= 0.6f) {
return glm::vec4(255, 255, 255, 10);
}
else {
auto val = round(255 * tValue);
auto valOp = round(255 * (1 - tValue));
return glm::vec4(val, val, val, valOp);
}
}
My vertex shader:
#version 330 core
layout(location = 0) in vec3 position;
layout(location = 1) in vec2 texCoord;
out vec2 TexCoord;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(position, 1.0f);
TexCoord = vec2(texCoord.s, 1-texCoord.t);
}
My fragment shader:
#version 330 core
in vec2 TexCoord;
out vec4 color;
uniform sampler2D sliceTexture;
void main()
{
vec4 texColor = texture(sliceTexture, TexCoord);
color = texColor;
}
I think this is the code needed for the blending to work. The images are generated correctly, and also applied as texture on the quads correctly. However, the quads on the front appears as completely opaque, though the generated images (even the one appearing at front) shows transparent area.
I am not sure where I am going wrong. Requesting your suggestions.
Thank you.
Edit: details of Texture class (only the parts relevant to loading RGBA image and creating RGBA texture from that):
int width, height, channelCount;
unsigned char* image = SOIL_load_image(pathToImage, &width, &height, &channelCount, SOIL_LOAD_RGBA);
...
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
Edit2: Added details of camera class. Camera::getViewMatrix() provides view matrix.
Camera::Camera(GLFWwindow* window, glm::vec3 position, glm::vec3 worldUpDirection, GLfloat yaw, GLfloat pitch)
:mouseSensitivity(0.25f), fov(45.0f), cameraSpeed(1.0f)
{
this->position = this->originalPosition = position;
this->worldUpDirection = worldUpDirection;
this->yaw = this->originalYaw = yaw;
this->pitch = this->originalPitch = pitch;
updateCameraVectors();
}
void Camera::updateCameraVectors()
{
glm::mat4 yawPitchRotMat;
yawPitchRotMat = glm::rotate(yawPitchRotMat, glm::radians(yaw), glm::vec3(0.0f, 1.0f, 0.0f)); //y-ais as yaw axis
yawPitchRotMat = glm::rotate(yawPitchRotMat, glm::radians(pitch), glm::vec3(1.0f, 0.0f, 0.0f)); //x-axis as pitch axis
frontDirection = glm::normalize(-glm::vec3(yawPitchRotMat[2].x, yawPitchRotMat[2].y, yawPitchRotMat[2].z));
rightDirection = glm::normalize(glm::cross(frontDirection, worldUpDirection));
upDirection = glm::normalize(glm::cross(rightDirection, frontDirection));
}
glm::mat4 Camera::getViewMatrix()
{
return glm::lookAt(position, position + frontDirection, upDirection);
}

myTextures.push_back(Texture(pathToImage.c_str(), GL_TEXTURE0));
This doesn't include any information about your Texture class and how it's parsing the image files. Even if the files themselves show transparency, it's possible that your texture loading code is discarding the alpha channel when you load the image.
model = glm::translate(model, glm::vec3(0.0f, 0.0f, -index*0.003f));
This makes it look as if you're rendering from front to back. If you want to render transparent objects, on top of one another you need to use an algorithm for order independent transparency, or you need to render from back to front.
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
You may also want / need to use glBlendFuncSeparate so that the mixing of the alpha channel is done differently. I'm not sure about this one though.
You may also want to consider simply populating a single GL_TEXTURE_3D object and rendering it as a cube, doing all the mixing in the fragment shader, rather than rendering a series of quads for each layer.

Related

How do I get this vector of objects to render in OpenGL?

My scene: (the video is blurry because I had to convert this to a GIF)
There are two other objects that should be rendered here!
I am writing a program with GLFW/OpenGL. Essentially what I am trying to do is to be able to render a bunch of independent objects, who all can move freely around. To do this, I create a shader, a VAO, a VBO, and a EBO for each model that I want to render. static_models is a vector of class Model, and class Model is just a way to organize my vertices, indices, colors, and normals.
First is creating the vector of Models: (I know this class works as it should, because I use the exact same class for different shaders and buffer objects and things render well)
std::vector<Model> static_models; // scale // color
Model plane("models/plane.ply", { 1.0f, 1.0f, 1.0f }, { 1.0f, 1.0f, 1.0f });
Model tetrahedron("models/tetrahedron.ply", { 1.0f, 1.0f, 1.0f }, { 0.2f, 1.0f, 1.0f });
static_models.emplace_back(plane);
static_models.emplace_back(tetrahedron);
The code for generating the shader objects, VAOS, VBOS, and EBOS:
for (int i = 0; i < static_models.size(); i++)
{
Shader tempShader("plane.vert", "plane.frag");
// create a shader program for each model (in case we need to rotate them or transform them in some way they will be independent)
static_model_shaders.emplace_back(tempShader);
VAOS_static.emplace_back();
VAOS_static.back().Bind();
VBO tempVBO(&static_models.at(i).vertices.front(), static_models.at(i).vertices.size() * sizeof(GLfloat));
EBO tempEBO(&static_models.at(i).indices.front(), static_models.at(i).indices.size() * sizeof(GLuint));
VAOS_static.back().LinkAttrib(tempVBO, 0, 3, GL_FLOAT, 11 * sizeof(float), (void*)0);
VAOS_static.back().LinkAttrib(tempVBO, 1, 3, GL_FLOAT, 11 * sizeof(float), (void*)(3 * sizeof(float)));
VAOS_static.back().LinkAttrib(tempVBO, 2, 2, GL_FLOAT, 11 * sizeof(float), (void*)(6 * sizeof(float)));
VAOS_static.back().LinkAttrib(tempVBO, 3, 3, GL_FLOAT, 11 * sizeof(float), (void*)(8 * sizeof(float)));
VAOS_static.back().Unbind();
tempVBO.Unbind();
tempEBO.Unbind();
}
Then the code to create the positions and mat4 matrixes for each model:
// static model vectors for position and matrix
std::vector<glm::vec3> staticModelPositions;
std::vector<glm::mat4> staticModels;
// initialize all static_model object positions
for (int i = 0; i < static_models.size(); i++)
{
staticModelPositions.emplace_back();
staticModelPositions.back() = glm::vec3(0.0f, 1.0f, 0.0f);
staticModels.emplace_back();
staticModels.back() = glm::translate(staticModels.back(), staticModelPositions.back());
}
Then I set some initial values for the uniforms:
std::vector<Texture> textures;
//static objects
for (int i = 0; i < static_models.size(); i++)
{
//activate first before setting uniforms
static_model_shaders.at(i).Activate();
// static model load model, then load lightColor, then load lightPos for each static_model
glUniformMatrix4fv(glGetUniformLocation(static_model_shaders.at(i).ID, "model"), 1, GL_FALSE, glm::value_ptr(staticModels.at(i)));
glUniform4f(glGetUniformLocation(static_model_shaders.at(i).ID, "lightColor"), lightColor.x, lightColor.y, lightColor.z, 1.0f);
glUniform3f(glGetUniformLocation(static_model_shaders.at(i).ID, "lightPos"), lightPos.x, lightPos.y, lightPos.z);
//create texture objects
textures.emplace_back(Texture("brick.png", GL_TEXTURE_2D, GL_TEXTURE0, GL_RGBA, GL_UNSIGNED_BYTE));
textures.back().texUnit(static_model_shaders.at(i), "tex0", 0);
}
Then drawing the models in the game loop: (game loop not shown this is a big program)
//draw all static models (each with a different shader and matrix)
for (int i = 0; i < static_model_shaders.size(); i++)
{
//activate shader for current model
// Tells OpenGL which Shader Program we want to use
static_model_shaders.at(i).Activate();
// Exports the camera Position to the Fragment Shader for specular lighting
glUniform3f(glGetUniformLocation(static_model_shaders.at(i).ID, "camPos"), camera.Position.x, camera.Position.y, camera.Position.z);
glUniformMatrix4fv(glGetUniformLocation(static_model_shaders.at(i).ID, "model"), 1, GL_FALSE, glm::value_ptr(staticModels.at(i)));
glUniform4f(glGetUniformLocation(static_model_shaders.at(i).ID, "lightColor"), lightColor.x, lightColor.y, lightColor.z, 1.0f);
// Export the camMatrix to the Vertex Shader of the pyramid
camera.Matrix(static_model_shaders.at(i), "camMatrix");
// Binds texture so that is appears in rendering
textures.at(i).Bind();
VAOS_static.at(i).Bind();
glDrawElements(GL_TRIANGLES, static_models.at(i).indices.size(), GL_UNSIGNED_INT, 0);
VAOS_static.at(i).Unbind();
}
My vertex shader:
#version 330 core
// Positions/Coordinates
layout (location = 0) in vec3 aPos;
// Colors
layout (location = 1) in vec3 aColor;
// Texture Coordinates
layout (location = 2) in vec2 aTex;
// Normals (not necessarily normalized)
layout (location = 3) in vec3 aNormal;
// Outputs the color for the Fragment Shader
out vec3 color;
// Outputs the texture coordinates to the Fragment Shader
out vec2 texCoord;
// Outputs the normal for the Fragment Shader
out vec3 Normal;
// Outputs the current position for the Fragment Shader
out vec3 crntPos;
// Imports the camera matrix from the main function
uniform mat4 camMatrix;
// Imports the model matrix from the main function
uniform mat4 model;
void main()
{
// calculates current position
crntPos = vec3(model * vec4(aPos, 1.0f));
// Outputs the positions/coordinates of all vertices
gl_Position = camMatrix * vec4(crntPos, 1.0);
// Assigns the colors from the Vertex Data to "color"
color = aColor;
// Assigns the texture coordinates from the Vertex Data to "texCoord"
texCoord = aTex;
// Assigns the normal from the Vertex Data to "Normal"
Normal = aNormal;
}
And fragment shader:
#version 330 core
// Outputs colors in RGBA
out vec4 FragColor;
// Imports the color from the Vertex Shader
in vec3 color;
// Imports the texture coordinates from the Vertex Shader
in vec2 texCoord;
// Imports the normal from the Vertex Shader
in vec3 Normal;
// Imports the current position from the Vertex Shader
in vec3 crntPos;
// Gets the Texture Unit from the main function
uniform sampler2D tex0;
// Gets the color of the light from the main function
uniform vec4 lightColor;
// Gets the position of the light from the main function
uniform vec3 lightPos;
// Gets the position of the camera from the main function
uniform vec3 camPos;
void main()
{
// ambient lighting
float ambient = 0.40f;
// diffuse lighting
vec3 normal = normalize(Normal);
vec3 lightDirection = normalize(lightPos - crntPos);
float diffuse = max(dot(normal, lightDirection), 0.0f);
// specular lighting
float specularLight = 0.50f;
vec3 viewDirection = normalize(camPos - crntPos);
vec3 reflectionDirection = reflect(-lightDirection, normal);
float specAmount = pow(max(dot(viewDirection, reflectionDirection), 0.0f), 8);
float specular = specAmount * specularLight;
// outputs final color
FragColor = texture(tex0, texCoord) * lightColor * (diffuse + ambient + specular);
}
I have other objects in the scene, and they render and update well. There are no errors in the code and everything runs fine, the objects in static_models are just not being rendered. Anyone have any ideas as to why it wouldn't be showing anything?
I fixed this after a very long time spent. The issue was this block of code:
// static model vectors for position and matrix
std::vector<glm::vec3> staticModelPositions;
std::vector<glm::mat4> staticModels;
// initialize all static_model object positions
for (int i = 0; i < static_models.size(); i++)
{
staticModelPositions.emplace_back();
staticModelPositions.back() = glm::vec3(0.0f, 1.0f, 0.0f);
staticModels.emplace_back();
staticModels.back() = glm::translate(staticModels.back(), staticModelPositions.back());
}
There is a line missing here. After doing staticModels.emplace_back(); we must create the identity matrix for the model. This code allows the program to function as intended:
// static model vectors for position and matrix
std::vector<glm::vec3> staticModelPositions;
std::vector<glm::mat4> staticModels;
// initialize all static_model object positions
for (int i = 0; i < static_models.size(); i++)
{
staticModelPositions.emplace_back();
staticModelPositions.back() = glm::vec3(0.0f, 1.0f, 0.0f);
staticModels.emplace_back();
staticModels.at(i) = glm::mat4(1.0f);
staticModels.back() = glm::translate(staticModels.back(), staticModelPositions.back());
}

openGL- Drawing Grid of Quads and Manually paint them

I'm doing a simple image processing app using OpenGL and C++.
However, there is one particular thing that I don't know how to do, which is:
I need to let my user to draw a Histogram Graph.
The way I thought to do this is by creating a grid of quads one quad for each pixel intesity of my image. Example: if the image is 8 bits, I would need 256x256 quads in my grid. After drawing the grid I want my to user manually paint the quads in a quantized way (each quad) in the way that he could "draw" the histogram. The problem is that I dont know how to do any of these things...
Would anyone give me direction on how to draw the grid, and how to make the paiting thing.
Iif you're confused about "drawing histogram" just considerit as a regular graph.
You don't have to draw a grid of quads. Just one quad is enough, and then use a shader to sample from the histogram stored in a 1d-texture. Here is what I get:
Vertex shader:
#version 450 core
layout(std140, binding = 0) uniform view_block {
vec2 scale, offset;
} VIEW;
layout(std140, binding = 1) uniform draw_block {
vec4 position;
float max_value;
} DRAW;
out gl_PerVertex {
vec4 gl_Position;
};
void main()
{
ivec2 id = ivec2(gl_VertexID&1, gl_VertexID>>1);
vec2 position = vec2(DRAW.position[id.x<<1], DRAW.position[(id.y<<1) + 1]);
gl_Position = vec4(fma(position, VIEW.scale, VIEW.offset), 0, 1);
}
Fragment shader:
#version 450 core
layout(std140, binding = 1) uniform draw_block {
vec4 position;
float max_value;
} DRAW;
layout(binding = 0) uniform sampler1D hist;
layout(location = 0) out vec4 OUT;
void main()
{
const vec2 extent = DRAW.position.zw - DRAW.position.xy;
vec2 texcoord = (gl_FragCoord.xy - DRAW.position.xy)/(DRAW.position.zw - DRAW.position.xy);
OUT.rgb = vec3(lessThan(texcoord.yyy*DRAW.max_value, texture(hist, texcoord.x).rgb));
OUT.a = 1;
}
Histogram texture creation:
image hist(256, 1, 3, type_float);
// ... calculate the histogram ...
tex.reset(glCreateTextureSN(GL_TEXTURE_1D));
glTextureStorage1D(tex.get(), 1, GL_RGB32F, hist.w);
glTextureSubImage1D(tex.get(), 0, 0, hist.w, GL_RGB, GL_FLOAT, hist.c[0]);
glTextureParameteri(tex.get(), GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
Rendering routine:
const vec2i vs = { glutGet(GLUT_WINDOW_WIDTH), glutGet(GLUT_WINDOW_HEIGHT) };
glViewport(0, 0, vs[0], vs[1]);
glClear(GL_COLOR_BUFFER_BIT);
struct view_block {
vec2f scale, offset;
} VIEW = {
vec2f(2)/vec2f(vs), -vec2f(1)
};
GLbuffer view_buf(glCreateBufferStorageSN(sizeof(VIEW), &VIEW, 0));
glBindBufferBase(GL_UNIFORM_BUFFER, 0, view_buf.get());
struct draw_block {
box2f position;
float max_value;
} DRAW = {
box2f(0, 0, vs[0], vs[1]),
max_value
};
GLbuffer draw_buf(glCreateBufferStorageSN(sizeof(DRAW), &DRAW, 0));
glBindBufferBase(GL_UNIFORM_BUFFER, 1, draw_buf.get());
bind_textures(tex.get());
glBindProgramPipeline(pp.get());
glBindVertexArray(0);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glutSwapBuffers();

OpenGL Projection Matrix showing Orthographic

I got an orthographic camera working however I wanted to try and implement a perspective camera so I can do some parallax effects later down the line. I am having some issues when trying to implement it. It seems like the depth is not working correctly. I am rotating a 2d image along the x-axis to simulate it laying somewhat down so I get see the projection matrix working. It is still showing as an orthographic perspective though.
Here is some of my code:
CameraPersp::CameraPersp() :
_camPos(0.0f,0.0f,0.0f), _modelMatrix(1.0f), _viewMatrix(1.0f), _projectionMatrix(1.0f)
Function called init to setup the matrix variables:
void CameraPersp::init(int screenWidth, int screenHeight)
{
_screenHeight = screenHeight;
_screenWidth = screenWidth;
_modelMatrix = glm::translate(_modelMatrix, glm::vec3(0.0f, 0.0f, 0.0f));
_modelMatrix = glm::rotate(_modelMatrix, glm::radians(-55.0f), glm::vec3(1.0f, 0.0f, 0.0f));
_viewMatrix = glm::translate(_viewMatrix, glm::vec3(0.0f, 0.0f, -3.0f));
_projectionMatrix = glm::perspective(glm::radians(45.0f), static_cast<float>(_screenWidth) / _screenHeight, 0.1f, 100.0f);
}
Initializing a texture to be loaded in with x,y,z,width,height,src
_sprites.back()->init(-0.5f, -0.5f, 0.0f, 1.0f, 1.0f, "src/content/sprites/DungeonCrawlStoneSoupFull/monster/deep_elf_death_mage.png");
Sending in the matrices to the vertexShader:
GLint mLocation = _colorProgram.getUniformLocation("M");
glm::mat4 mMatrix = _camera.getMMatrix();
//glUniformMatrix4fv(mLocation, 1, GL_FALSE, &(mMatrix[0][0]));
glUniformMatrix4fv(mLocation, 1, GL_FALSE, glm::value_ptr(mMatrix));
GLint vLocation = _colorProgram.getUniformLocation("V");
glm::mat4 vMatrix = _camera.getVMatrix();
//glUniformMatrix4fv(vLocation, 1, GL_FALSE, &(vMatrix[0][0]));
glUniformMatrix4fv(vLocation, 1, GL_FALSE, glm::value_ptr(vMatrix));
GLint pLocation = _colorProgram.getUniformLocation("P");
glm::mat4 pMatrix = _camera.getPMatrix();
//glUniformMatrix4fv(pLocation, 1, GL_FALSE, &(pMatrix[0][0]));
glUniformMatrix4fv(pLocation, 1, GL_FALSE, glm::value_ptr(pMatrix));
Here is my vertex shader:
#version 460
//The vertex shader operates on each vertex
//input data from VBO. Each vertex is 2 floats
in vec3 vertexPosition;
in vec4 vertexColor;
in vec2 vertexUV;
out vec3 fragPosition;
out vec4 fragColor;
out vec2 fragUV;
//uniform mat4 MVP;
uniform mat4 M;
uniform mat4 V;
uniform mat4 P;
void main() {
//Set the x,y position on the screen
//gl_Position.xy = vertexPosition;
gl_Position = M * V * P * vec4(vertexPosition, 1.0);
//the z position is zero since we are 2d
//gl_Position.z = 0.0;
//indicate that the coordinates are nomalized
gl_Position.w = 1.0;
fragPosition = vertexPosition;
fragColor = vertexColor;
// opengl needs to flip the coordinates
fragUV = vec2(vertexUV.x, 1.0 - vertexUV.y);
}
I can see the image "squish" a little because it is still rendering the perspective as orthographic. If I remove the rotation on the x-axis, it is not longer squished because it isn't laying down at all. Any thoughts on what I am doing wrong? I can supply more info upon request but I think I put in most of the meat of things.
Picture:
You shouldn't modify gl_Position.w
gl_Position = M * V * P * vec4(vertexPosition, 1.0); // gl_Position is good
//indicate that the coordinates are nomalized < not true
gl_Position.w = 1.0; // Now perspective divisor is lost, projection isn't correct

Shadow Map: whole mesh is in shadow, there is no light where it should be according to depth map

First time trying to implement shadow map using openGL ang glsl shader language.
I think the first pass where I render to a texture is correct but when I compare the depth values it seems to shadow everything.
https://www.dropbox.com/s/myxenx9y41yz2fc/Screenshot%202014-12-09%2012.18.53.png?dl=0
My perspective projection matrix looks like this:
FOV = 90
Aspect = According to the programs window size. (I also tried to put different values here)
Near = 2;
Far= 10000;
Function to initialize the frame buffer
void OpenGLWin::initDepthMap()
{
//Framebuffer
m_glFunctions->glGenFramebuffers(1, &m_frameBuffer);
m_glFunctions->glBindFramebuffer(GL_FRAMEBUFFER, m_frameBuffer);
//////////////////////////////////////////////////////////////////////////
//Texture to render scene to
m_glFunctions->glGenTextures(1, &m_renderToTexture);
//Bind created texture to make it current
m_glFunctions->glBindTexture(GL_TEXTURE_2D, m_renderToTexture);
//Creates an empty texture of specified size.
//m_glFunctions->glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 1024, 768, 0, GL_RGB, GL_UNSIGNED_BYTE, 0);
m_glFunctions->glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT24, 1024, 1024, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE);
m_glFunctions->glDrawBuffer(GL_NONE);
m_glFunctions->glReadBuffer(GL_NONE);
// Always check that our framebuffer is ok
if (m_glFunctions->glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE){
qDebug() << "FrameBuffer not OK";
return;
}
m_glFunctions->glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
Draw function for each mesh. Model matrix is passed as argument from a Transform class draw function
void Mesh::draw(const Matrix4x4& projection, const Matrix4x4& view, const Matrix4x4& model)
{
//Shadow map pass 1
if (m_shadowMapFirstpass){
//Pass 1 Shaders
m_glFunctions->glUseProgram(m_depthRTTShaderProgram);
//Light view matrix
m_depthMVP = projection*view*model;
//Get the location of the uniform name mvp
GLuint depthMVPLocation = m_glFunctions->glGetUniformLocation(m_depthRTTShaderProgram, "depthMVP");
m_glFunctions->glUniformMatrix4fv(depthMVPLocation, 1, GL_TRUE, &m_depthMVP[0][0]);
m_shadowMapFirstpass = false;
}
//Shadow map pass 2
else if(m_shadowMapFirstpass == false){
//Pass 2 Shader
m_glFunctions->glUseProgram(m_shaderProgram);
//Gets the model matrix which is then multiplied with view and projection to form the mvp matrix
Matrix4x4 mvp = projection * view * model;
//Get the location of the uniform name mvp
GLuint mvpLocation = m_glFunctions->glGetUniformLocation(m_shaderProgram, "mvp");
//Send the mvp matrix to the vertex shader
m_glFunctions->glUniformMatrix4fv(mvpLocation, 1, GL_TRUE, &mvp[0][0]);
Matrix4x4 depthBiasMVP = m_depthMVP;// biasMatrix*m_depthMVP;
GLuint depthBiasMVPLocation = m_glFunctions->glGetUniformLocation(m_shaderProgram, "depthBiasMVP");
m_glFunctions->glUniformMatrix4fv(depthBiasMVPLocation, 1, GL_TRUE, &depthBiasMVP[0][0]);
m_shadowMapFirstpass = true;
}
//Bind this mesh VAO
m_glFunctions->glBindVertexArray(m_vao);
//Draw the triangles using the index buffer(EBO)
glDrawElements(GL_TRIANGLES, m_indices.size(), GL_UNSIGNED_INT, 0);
//Unbind the VAO
m_glFunctions->glBindVertexArray(0);
/////////////////////////////////////////////////////////////////////////////////////////////////////
//Calls the childrens' update
if (!m_children.empty())
{
for (int i = 0; i < m_children.size(); i++)
{
if (m_children[i] != NULL)
{
m_children[i]->draw(frustumCheck, projection, view, bvScaleFactor, model);
}
}
}
}
My render loop
void OpenGLWin::paintGL()
{
// m_glFunctions->glBindFramebuffer(GL_FRAMEBUFFER, m_frameBuffer);
m_glFunctions->glBindFramebuffer(GL_DRAW_FRAMEBUFFER, m_frameBuffer);
glViewport(0, 0, 1024, 1024);
// Clear the buffer with the current clearing color
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//Light View Matrix
Matrix4x4 lightView;
lightView.lookAt(Vector3(0, 0, 0), Vector3(0, 0, -1), Vector3(0, 1, 0));
//Draw scene to Texture
m_root->draw(m_projection, lightView);
///////////////////////////////////////////////////////////////////
//Draw to real scene
m_glFunctions->glBindFramebuffer(GL_FRAMEBUFFER, 0);
// m_glFunctions->glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
// Clear the screen
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//Bind Pass 2 shader
m_glFunctions->glUseProgram(m_shadowMapShaderProgram->getShaderProgramID());
GLuint shadowMapLocation = m_glFunctions->glGetUniformLocation(m_shadowMapShaderProgram->getShaderProgramID(), "shadowMap");
//Shadow Texture
m_glFunctions->glActiveTexture(GL_TEXTURE0);
m_glFunctions->glBindTexture(GL_TEXTURE_2D, m_renderToTexture);
m_glFunctions->glUniform1i(shadowMapLocation, 0);
//Updates matrices and view matrix for player camera
m_root->update(m_view);
//Render scene to main frame buffer
m_root->draw(m_projection, m_view);
}
Pass 1 Vertex Shader
#version 330 core
//Passthrough vertex shader
uniform mat4 depthMVP;
//Vertex received from the program
layout(location = 0) in vec3 vertexPosition_modelspace;
void main(void)
{
//Output position of vertex in clip space
gl_Position = depthMVP * vec4(vertexPosition_modelspace, 1);
}
Pass 1 Fragment Shader
#version 330 core
//Render to texture
// Ouput data
layout(location = 0) out float depthValue;
void main(void)
{
depthValue = gl_FragCoord.z;
}
Pass 2 Vertex Shader
#version 330 core
layout(location = 0) in vec3 vertexPosition_modelspace;
out vec4 ShadowCoord;
// Values that stay constant for the whole mesh.
uniform mat4 mvp;
uniform mat4 depthBiasMVP;
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = mvp * vec4(vertexPosition_modelspace,1);
ShadowCoord = depthBiasMVP * vec4(vertexPosition_modelspace,1);
}
Pass 2 Fragment Shader
#version 330 core
in vec4 ShadowCoord;
// Ouput data
layout(location = 0) out vec3 color;
// Values that stay constant for the whole mesh.
uniform sampler2D shadowMap;
void main(){
float visibility=1.0;
vec3 ProjCoords = ShadowCoord.xyz / ShadowCoord.w;
vec2 UVCoords;
UVCoords.x = 0.5 * ProjCoords.x + 0.5;
UVCoords.y = 0.5 * ProjCoords.y + 0.5;
float z = 0.5 * ProjCoords.z + 0.5;
float Depth = texture(shadowMap, UVCoords).z;//or x
if (Depth < (z + 0.00001)){
visibility = 0.1;
}
color = visibility*vec3(1,0,0);
}
Disable texture comparison for one thing. That's only valid when used with sampler2DShadow and you clearly are not using that in your code because your texture coordinates are 2D.
This means replacing the following code:
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE);
With this instead:
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_NONE);
Likewise, using GL_LINEAR filtering on a non-sampler2DShadow texture is a bad idea. That is going to average the 4 nearest depth values and give you a single depth back. But that's not the proper way to anti-alias shadows; you actually want to average the result of 4 depth tests instead of doing a single test on the average of 4 depths.

Why can't access the G-Buffer from my lighting shader?

I implemented a new rendering pipeline in my engine and rendering is broken now. When I directly draw a texture of the G-Buffer to screen, it shows up correctly. So the G-Buffer is fine. But somehow the lighting pass makes trouble. Even if I don't use the resulting texture of it but try to display albedo from G-Buffer after the lighting pass, it shows a solid gray color.
I can't explain this behavior and the strange thing is that there are no OpenGL errors at any point.
Vertex Shader to draw a fullscreen quad.
#version 330
in vec4 vertex;
out vec2 coord;
void main()
{
coord = vertex.xy;
gl_Position = vertex * 2.0 - 1.0;
}
Fragment Shader for lighting.
#version 330
in vec2 coord;
out vec3 image;
uniform int type = 0;
uniform sampler2D positions;
uniform sampler2D normals;
uniform vec3 light;
uniform vec3 color;
uniform float radius;
uniform float intensity = 1.0;
void main()
{
if(type == 0) // directional light
{
vec3 normal = texture2D(normals, coord).xyz;
float fraction = max(dot(normalize(light), normal) / 2.0 + 0.5, 0);
image = intensity * color * fraction;
}
else if(type == 1) // point light
{
vec3 pixel = texture2D(positions, coord).xyz;
vec3 normal = texture2D(normals, coord).xyz;
float dist = max(distance(pixel, light), 1);
float magnitude = 1 / pow(dist / radius + 1, 2);
float cutoff = 0.4;
float attenuation = clamp((magnitude - cutoff) / (1 - cutoff), 0, 1);
float fraction = clamp(dot(normalize(light - pixel), normal), -1, 1);
image = intensity * color * attenuation * max(fraction, 0.2);
}
}
Targets and samplers for the lighting pass. Texture ids are mapped to attachment respectively shader location.
unordered_map<GLenum, GLuint> targets;
targets.insert(make_pair(GL_COLOR_ATTACHMENT2, ...)); // light
targets.insert(make_pair(GL_DEPTH_STENCIL_ATTACHMENT, ...)); // depth and stencil
unordered_map<string, GLuint> samplers;
samplers.insert(make_pair("positions", ...)); // positions from G-Buffer
samplers.insert(make_pair("normals", ...)); // normals from G-Buffer
Draw function for lighting pass.
void DrawLights(unordered_map<string, GLuint> Samplers, GLuint Program)
{
auto lis = Entity->Get<Light>();
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE);
glUseProgram(Program);
int n = 0; for(auto i : Samplers)
{
glActiveTexture(GL_TEXTURE0 + n);
glBindTexture(GL_TEXTURE_2D, i.second);
glUniform1i(glGetUniformLocation(Program, i.first.c_str()), n);
n++;
}
mat4 view = Entity->Get<Camera>(*Global->Get<unsigned int>("camera"))->View;
for(auto i : lis)
{
int type = i.second->Type == Light::DIRECTIONAL ? 0 : 1;
vec3 pos = vec3(view * vec4(Entity->Get<Form>(i.first)->Position(), !type ? 0 : 1));
glUniform1i(glGetUniformLocation(Program, "type"), type);
glUniform3f(glGetUniformLocation(Program, "light"), pos.x, pos.y, pos.z);
glUniform3f(glGetUniformLocation(Program, "color"), i.second->Color.x, i.second->Color.y, i.second->Color.z);
glUniform1f(glGetUniformLocation(Program, "radius"), i.second->Radius);
glUniform1f(glGetUniformLocation(Program, "intensity"), i.second->Intensity);
glBegin(GL_QUADS);
glVertex2i(0, 0);
glVertex2i(1, 0);
glVertex2i(1, 1);
glVertex2i(0, 1);
glEnd();
}
glDisable(GL_BLEND);
glActiveTexture(GL_TEXTURE0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
I found the error and it was such a stupid one. The old rendering pipeline bound the correct framebuffer before calling the draw function of that pass. But the new one didn't so each draw function had to do that itself. Therefore I wanted to update all draw function, but I missed the draw function of the lighting pass.
Therefore the framebuffer of the G-Buffer was still bound and the lighting pass changed its targets.
Thanks to you guys, you had no change to find that error, since I hadn't posted my complete pipeline system.