I'm trying to implement ray casting based volume rendering and therefore I'd need to pass a float Array to the fragment shader as a Texture (Sampler3D).
I've got a volume datastructure containing all the voxels. Each voxel contains a density value. So for processing I stored the values into a float Array.
//initialize glew, initialize glfw, create window, etc.
float* density;
density = new float[volume->size()];
for (int i = 0; i < volume->size(); i++){
density[i] = volume->voxel(i).getValue();
}
Then I tried creating and binding the textures.
glGenTextures(1, &textureHandle);
glBindTexture(GL_TEXTURE_3D, textureHandle);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_REPEAT);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage3D(GL_TEXTURE_3D, 0, GL_LUMINANCE, volume->width(),
volume->height(), volume->depth(), 0, GL_LUMINANCE, GL_FLOAT, density);
In my render loop I try to load the Texture to the uniform Sampler3D.
glClearColor(0.4f, 0.2f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glActiveTexture(GL_TEXTURE0);
GLint gSampler = glGetUniformLocation(shader->shaderProgram, "volume");
glUniform1i(gSampler, 0);
cube->draw();
So the basic idea is to calculate the current position and direction for ray casting in the Vertex Shader.
in vec3 position;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform vec4 cameraPos;
out vec3 pos;
out vec3 dir;
void main(){
gl_Position = projection * view * model * vec4(position, 1.0);
pos = position;
dir = pos - (inverse(model) * cameraPos).xyz;
}
That seems to work well, so far so good. The fragment shader looks like this. I take some samples along the ray and the one with the largest density value will be taken as a color for red, green and blue.
#version 330 core
in vec3 pos;
in vec3 dir;
uniform sampler3D volume;
out vec4 color;
const float stepSize = 0.008;
const float iterations = 1000;
void main(){
vec3 rayDir = normalize(dir);
vec3 rayPos = pos;
float src;
float dst = 0;
float density = 0;
for(int i = 0; i < iterations; i++){
src = texture(volume, rayPos).r;
if(src > density){
density = src;
}
rayPos += rayDir * stepSize;
//check whether rays are within bounds. if not -> break.
}
color = vec4(density, density, density, 1.0f);
}
Now I've tried inserting some small debug assertions.
if(src != 0){
rayPos = vec3(1.0f);
break;
}
But src seems to be 0 at every iteration of every pixel. Which gets me to the conclusion that the Sampler isn't correctly set. Debugging the C++ code I get the correct values for the density array right before I pass it to the shader, so I guess there must be some opengl function missing. Thanks in advance!
glTexImage3D(GL_TEXTURE_3D, 0, GL_LUMINANCE, volume->width(), volume->height(), volume->depth(), 0, GL_LUMINANCE, GL_FLOAT, density);
Unless this density is on the range [0, 1], then this is almost certainly not doing what you intend.
GL_LUMINANCE, when used as an internal format (the third parameter to glTexImage3D, means that each pixel in OpenGL's texture data will contain a single normal integer value. So if you want a floating-point value, you're kinda out of luck.
The proper way to do this is to explicitly declare the type and pixel size of the data. Luminance was removed from the core OpenGL profile back in 3.1, so the way to do that today is to use GL_R32F as your internal format. That declares that each pixel contains one value, and that value is a 32-bit float.
If you really need to broadcast the value across the RGB channels, you can use texture swizzling to accomplish that. You can set a swizzle mask to broadcast the red component to any other channel you like.
glActiveTexture(GL_TEXTURE0);
GLint gSampler = glGetUniformLocation(shader->shaderProgram, "volume");
glUniform1i(gSampler, 0);
I've heard that binding the texture is also a good idea. You know, if you actually want to read from it ;)
Related
I have a scalar field of values which I have mapped to a 3D texture( image_texture ). And then given a plane gPlaneParams , I have to render the texture of the scalar-field along it.
What I'm doing:
I send 4 points which span the window dimensions using two triangles to the shaders. I bind the texture using a sampler in the fragment shader. Below is the fragment shader code.
#version 330 core
uniform sampler3D text_sampler;
uniform vec4 gPlaneParams;
in vec4 inPos;
void main()
{
vec4 Pos = inPos;
// position input is a square[-1,1]^2
// and needs to be mapped the plane ax+by+cz=d, where a,b,c,d are the plane parameters;
//where x,y,z belongs to [0,1]^3
if (gPlaneParams.z!=0){
Pos.z = (gPlaneParams.w - gPlaneParams.x*Pos.x - gPlaneParams.y*Pos.y)/gPlaneParams.z;
}
else{
if (gPlaneParams.x!=0){
Pos.z=Pos.x;
Pos.x = (gPlaneParams.w - gPlaneParams.y*Pos.y - gPlaneParams.z*Pos.z)/gPlaneParams.x;
}
else if (gPlaneParams.y!=0){
Pos.z=Pos.y;
Pos.y = (gPlaneParams.w - gPlaneParams.x*Pos.x - gPlaneParams.z*Pos.z)/gPlaneParams.y;
}
}
gl_FragColor=vec4(1.0,0,0,0)*texture3D(text_sampler,(Pos.xyz+1)/2);
}
In my C++ code, I bind the texture as follows:
glGenTextures(1,textureID);
glBindTexture(GL_TEXTURE_3D,textureID[0]);
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGB,object_size[0],object_size[1],object_size[2], 0, GL_RGB, GL_UNSIGNED_INT, image_texture1);
glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_3D,GL_TEXTURE_WRAP_R,GL_CLAMP_TO_EDGE);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_3D,textureID[0]);
bool err=glIsTexture(textureID[0]);
cout<<"Texture bound?"<<err<<endl;
Unfortunately, this does not render any output. Can someone help me figure out what I'm doing wrong?
I have done everything else correctly
The 4 Vertices and 2 triangles are properly bound (I can render them by giving them constant colours)
The image texture is contiguous in memory image_texture = (unsigned int*) malloc(object_size[0] *object_size[1] *object_size[2]*3* sizeof(unsigned int));
All my inputs to the shader are successfully bound.:
gSamplerLocation = glGetUniformLocation(ShaderProgram, "text_sampler");
gPLaneLoc = glGetUniformLocation(ShaderProgram, "gPlaneParams");
glUniform1i(gSamplerLocation, 0);
glUniform4f(gPLaneLoc,plane_params[0],plane_params[1],plane_params[2],plane_params[3]);
I've written this code to render a 2d map of square tiles:
#define TILE_NUM_INDICES 6
inline static u32 GetRandomIntBetween(u32 min, u32 max) {
return (u32)rand() % (max - min + 1) + min;
}
static void GetRandomTileMap(u32* map, u32 size) {
for (int i = 0; i < size; i++) {
u32 r = GetRandomIntBetween(0, 23);
map[i] = r;
}
}
NewRenderer::NewRenderer(const NewRendererInitialisationInfo& info)
:m_tileShader("shaders\\TilemapVert2.glsl", "shaders\\TilemapFrag2.glsl"),
m_worldMapSize(info.tilemapSizeX, info.tilemapSizeY),
m_tilemapChunkSize(info.chunkSizeX, info.chunkSizeY),
m_windowWidth(info.windowWidth),
m_windowHeight(info.windowHeight)
{
using namespace std;
const u32 mapsize = info.tilemapSizeX * info.tilemapSizeY;
m_worldTextureBytes = make_unique<u32[]>(mapsize);
GetRandomTileMap(m_worldTextureBytes.get(), mapsize);
glGenTextures(1, &m_worldTextureHandle);
glBindTexture(GL_TEXTURE_2D, m_worldTextureHandle);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); // GL_NEAREST is the better filtering option for this game
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, info.tilemapSizeX, info.tilemapSizeY, 0, GL_RED, GL_UNSIGNED_INT, m_worldTextureBytes.get());
glGenerateMipmap(GL_TEXTURE_2D);
glGenVertexArrays(1, &m_vao);
}
void NewRenderer::DrawChunk(
const glm::ivec2& chunkWorldMapOffset,
const glm::vec2& pos,
const glm::vec2& scale,
float rotation,
ArrayTexture2DHandle texArray,
const Camera2D& cam
) const
{
m_tileShader.use();
glm::mat4 model = glm::mat4(1.0f);
model = glm::translate(model, glm::vec3(pos, 0.0f));
model = glm::rotate(model, glm::radians(rotation), glm::vec3(0.0f, 0.0f, 1.0f));
model = glm::scale(model, glm::vec3(scale, 1.0f));
m_tileShader.setMat4("vpMatrix", cam.GetProjectionMatrix(m_windowWidth, m_windowHeight));
m_tileShader.setMat4("modelMatrix", model);
m_tileShader.SetIVec2("chunkOffset", chunkWorldMapOffset);
m_tileShader.SetIVec2("chunkSize", m_tilemapChunkSize);
m_tileShader.setInt("masterTileTexture", 0);
m_tileShader.setInt("atlasSampler", 1);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_worldTextureHandle);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D_ARRAY, texArray);
glBindVertexArray(m_vao);
glDrawArrays(GL_TRIANGLES, 0, m_tilemapChunkSize.x * m_tilemapChunkSize.y * TILE_NUM_INDICES);
}
(Vertex shader)
#version 440 core
/*
cpp setup:
create a big index buffer
*/
layout (location = 0) in vec2 pos;
layout (location = 1) in vec2 uv;
out vec3 TexCoords;
uniform mat4 vpMatrix;
uniform mat4 modelMatrix;
uniform ivec2 chunkOffset;
uniform ivec2 chunkSize;
uniform sampler2D masterTileTexture;
#define TILE_NUM_VERTS 4
#define NUM_TILE_INDICES 6
void main()
{
// vertices and indices that make up two triangles (a quad)
// ie one tile in the map
vec4 vertices[TILE_NUM_VERTS] = vec4[TILE_NUM_VERTS](
vec4(0.5f, 0.5f, 1.0f, 1.0f),
vec4(0.5f, -0.5f, 1.0f, 0.0f),
vec4(-0.5f, -0.5f, 0.0f, 0.0f),
vec4(-0.5f, 0.5f, 0.0f, 1.0f)
);
int indices[NUM_TILE_INDICES] = int[NUM_TILE_INDICES](
0, 1, 3, // first triangle
1, 2, 3 // second triangle
);
// cycle through indicies
int index = indices[int(gl_VertexID % NUM_TILE_INDICES)];
// get base vertex
vec4 baseVertex = vertices[index];
// which tile in the map is being drawn?
int whichTile = gl_VertexID / NUM_TILE_INDICES;
// transfrom into x y coords of tile in the chunk
ivec2 tilexy = ivec2(int(whichTile / chunkSize.y), int(whichTile % chunkSize.y));
// translate base vertex by tilexy
baseVertex.xy += vec2(tilexy);
// set the z coord of the tex coords passed based on what tile is here
// in the master tile map.
// based on shader output all steps up to here are successful, a grid is drawn.
// The problem is the texelFetch is not working, it's always the same tile drawn.
TexCoords = vec3(
baseVertex.zw,
// changing this to different hard coded values does change what tile is drawn as expectd so sampler2DArray is setup correctly
float(texelFetch(masterTileTexture, tilexy + chunkOffset, 0).r));
gl_Position = vpMatrix * modelMatrix * vec4(baseVertex.xy, 0.0, 1.0);
}
(Frag shader)
#version 440 core
uniform sampler2DArray atlasSampler;
in vec3 TexCoords;
out vec4 FragColor;
void main()
{
FragColor = texture(atlasSampler, TexCoords);
}
The idea is that it will be used to draw chunks of a large texture, each pixel of which represents a tile. The basic premise seems to work, a grid of tiles is drawn, however the texelFetch line in the vertex shader does not seem to be working, or the texture containing the tile indices is not set up properly as it is only ever the tile with index 0 that is drawn.
To test it I've tried to make a texture which contains random values for the tile index texture, debugging the code I can see that random values are inserted into the texture buffer.
I've used texelFetch before in a shader and it's worked and as far as I can tell I am using it right.
Can anyone spot what is wrong with my code?
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, info.tilemapSizeX, info.tilemapSizeY, 0, GL_RED, GL_UNSIGNED_INT, m_worldTextureBytes.get());
This creates a texture in a normalized fixed-point format. When you read it in the shader (through texelFetch) the value is always going to be between 0 and 1, thus sampling the 0th layer from the array texture.
OpenGL 4.4 supports integer texture formats, which is what you should use here. Replace the first GL_RED with GL_R8UI, GL_16UI or GL_R32UI, whichever is more appropriate, and the second GL_RED with GL_RED_INTEGER. E.g.:
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32UI, //<---
info.tilemapSizeX, info.tilemapSizeY, 0, GL_RED_INTEGER, //<---
GL_UNSIGNED_INT, m_worldTextureBytes.get());
Additionally you have to change the sampler2D in the shader to a matching integer sampler type. For the above internal format, the matching sampler would be usampler2D:
uniform usampler2D masterTileTexture;
EDIT: Also you have to set the MAG filter to GL_NEAREST, since it's the only one that's supported:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
(MIN filter could also be GL_NEAREST_MIPMAP_NEAREST.)
I'm trying to implement deferred rendering within an engine I'm developing as a personal learning, and I cannot get to understand what I'm doing wrong when it comes to render all the textures in the GBuffer to check if the implementation is okay.
The thing is that I currently have a framebuffer with 3 color attachments for the different textures of the GBuffer (color, normal and position), which I initialize as follows:
glCreateFramebuffers(1, &id);
glBindFramebuffer(GL_FRAMEBUFFER, id);
std::vector<uint> textures;
textures.resize(3);
glCreateTextures(GL_TEXTURE_2D, 3, textures.data());
for(size_t i = 0; i < 3; ++i)
{
glBindTexture(GL_TEXTURE_2D, textures[i]);
if(i == 0) // For Color Buffer
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
else
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, width, height, 0, GL_RGBA, GL_FLOAT, nullptr);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D, textures[i], 0);
}
GLenum color_buffers[3] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1, GL_COLOR_ATTACHMENT2 };
glDrawBuffers((GLsizei)textures.size(), color_buffers);
uint depth_texture;
glCreateTextures(GL_TEXTURE_2D, 1, &depth_texture);
glBindTexture(GL_TEXTURE_2D, depth_texture);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_DEPTH24_STENCIL8, width, height);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, depth_texture, 0);
bool fbo_status = glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE;
ASSERT(fbo_status, "Framebuffer Incompleted!");
glBindFramebuffer(GL_FRAMEBUFFER, 0);
This is not reporting any errors and it seems to work since the framebuffer of the forward renderer renders properly. Then, when rendering, I run the next code after binding the framebuffer and clearing the color and depth buffers:
camera_buffer->Bind();
camera_buffer->SetData("ViewProjection", glm::value_ptr(viewproj_mat));
camera_buffer->SetData("CamPosition", glm::value_ptr(glm::vec4(view_position, 0.0f)));
camera_buffer->Unbind();
for(Entity& entity : scene_entities)
{
shader->Bind();
Texture* texture = entity.GetTexture();
BindTexture(0, texture);
shader->SetUniformMat4("u_Model", entity.transform);
shader->SetUniformInt("u_Albedo", 0);
shader->SetUniformVec4("u_Material.AlbedoColor", entity->AlbedoColor);
shader->SetUniformFloat("u_Material.Smoothness", entity->Smoothness);
glBindVertexArray(entity.VertexArray);
glDrawElements(GL_TRIANGLES, entity.VertexArray.index_buffer.count, GL_UNSIGNED_INT, nullptr);
// Shader, VArray and Textures Unbindings
}
So with this code I manage to render the 3 textures created by using the ImGui::Image function, by switching the texture index between 0, 1 or 2 as the next:
ImGui::Image((ImTextureID)(fbo->textures[0]), viewport_size, ImVec2(0, 1), ImVec2(1, 0));
Now, the color texture (at index 0) works perfectly, as the next image shows:
But when rendering the normals and position textures (indexes 2 and 3), I have no result:
Does anybody sees what I'm doing wrong? Because I've been hours and hours with this and I cannot see it. I ran this on RenderDoc and I couldn't see anything wrong, the textures displayed in RenderDoc are the same than in the engine.
The vertex shader I use when rendering the entities is the next:
layout(location = 0) in vec3 a_Position;
layout(location = 1) in vec2 a_TexCoord;
layout(location = 2) in vec3 a_Normal;
out IBlock
{
vec2 TexCoord;
vec3 FragPos;
vec3 Normal;
} v_VertexData;
layout(std140, binding = 0) uniform ub_CameraData
{
mat4 ViewProjection;
vec3 CamPosition;
};
uniform mat4 u_ViewProjection = mat4(1.0);
uniform mat4 u_Model = mat4(1.0);
void main()
{
vec4 world_pos = u_Model * vec4(a_Position, 1.0);
v_VertexData.TexCoord = a_TexCoord;
v_VertexData.FragPos = world_pos.xyz;
v_VertexData.Normal = transpose(inverse(mat3(u_Model))) * a_Normal;
gl_Position = ViewProjection * u_Model * vec4(a_Position, 1.0);
}
And the fragment one is the next, they are both pretty simple:
layout(location = 0) out vec4 gBuff_Color;
layout(location = 1) out vec3 gBuff_Normal;
layout(location = 2) out vec3 gBuff_Position;
in IBlock
{
vec2 TexCoord;
vec3 FragPos;
vec3 Normal;
} v_VertexData;
struct Material
{
float Smoothness;
vec4 AlbedoColor;
};
uniform Material u_Material = Material(1.0, vec4(1.0));
uniform sampler2D u_Albedo, u_Normal;
void main()
{
gBuff_Color = texture(u_Albedo, v_VertexData.TexCoord) * u_Material.AlbedoColor;
gBuff_Normal = normalize(v_VertexData.Normal);
gBuff_Position = v_VertexData.FragPos;
}
It is not clear from the question what exactly might be happening here, as lots of GL states - both at the time the rendering to the gbuffer, and at that time the gbuffer texture is rendered for visualization - are just unknown. However, from the images given in the question, one can not conclude that the actual color output for attachments 1 and 2 is not working.
One issue which comes to mind is alpha blending. The color values processed by the per-fragment operations after the vertex shader are always working with RGBA values - although the value of the A channel only matters if you enabled blending and use a blend function which somehow depends on the source alpha.
If you declare a custom fragment shader output as float, vec2, vec3, the remaining components stay undefined (undefined value, not undefined behavior). This does not impose a problem unless some other operations you do depend on those values.
What we also have here is a GL_RGBA16F output format (which is the right choice, because none of the 3-component RGB formats are required as color-renderable by the spec).
What might happen here is either:
Alpha blending is already turned on during rendering into the g-buffer. The fragment shader's alpha output happens to be zero, so that it appears as 100% transparent and the contents of the texture are not changed.
Alpha blending is not used during rendering into the g-buffer, so the correct contents end up in the texture, the alpha channel just happens to end up with all zeros. Now the texture might be visualized with alpha blending enbaled, ending up in a 100% transparent view.
If it is the first option, turn off blending when rendering the into the g-buffer. It would not work with deferred shading anyway. You might still run into the second option then.
If this is the second option, there is no issue at all - the lighting passes which follow will read the data they need (and ultimately, you will want to put useful information into the alpha channel to not waste it and be able to reduce the number of attachments). It is just your visualization (which I assume is for debug purposed only) is wrong. You can try to fix the visualization.
As a side note: Storing the world space position in the G-Buffer is a huge waste of bandwidth. All you need to be able to reconstruct the world space position is the depth value and the inverse of your view and projection matrices. Also storing world space position in GL_RGB16F will very easily run into precision issues if you move your camera away from world space origin.
I am having problems getting the correct texture coordinate to sample my shadow map. Looking at my code, the problem appears to be from incorrect matrices. This is the fragment shader for the rendering pass where I do shadows:
in vec2 st;
uniform sampler2D colorTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;
uniform sampler2D shadowmapTexture;
uniform mat4 invProj;
uniform mat4 lightProj;
uniform vec3 lightPosition;
out vec3 color;
void main () {
vec3 clipSpaceCoords;
clipSpaceCoords.xy = st.xy * 2.0 - 1.0;
clipSpaceCoords.z = texture(depthTexture, st).x * 2.0 - 1.0;
vec4 position = invProj * vec4(clipSpaceCoords,1.0);
position.xyz /= position.w;
//At this point, position.xyz seems to be what it should be, the world space coordinates of the pixel. I know this because it works for lighting calculations.
vec4 lightSpace = lightProj * vec4(position.xyz,1.0);
//This line above is where I think things go wrong.
lightSpace.xyz /= lightSpace.w;
lightSpace.xyz = lightSpace.xyz * 0.5 + 0.5;
float lightDepth = texture(shadowmapTexture, lightSpace.xy).x;
//Right here lightDepth seems to be incorrect. The only explanation I can think of for this is if there is a problem in the above calculations leading to lightSpace.xy.
float shadowFactor = 1.0;
if(lightSpace.z > lightDepth+0.0005) {
shadowFactor = 0.2;
}
color = vec3(lightDepth);
}
I have removed all the code irrelevant to shadowing from this shader (Lighting, etc). This is the code I use to render the final pass:
glCullFace(GL_BACK);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
postShader->UseShader();
postShader->SetUniform1I("colorTexture", 0);
postShader->SetUniform1I("normalTexture", 1);
postShader->SetUniform1I("depthTexture", 2);
postShader->SetUniform1I("shadowmapTexture", 3);
//glm::vec3 cp = camera->GetPosition();
postShader->SetUniform4FV("invProj", glm::inverse(camera->GetCombinedProjectionView()));
postShader->SetUniform4FV("lightProj", lights[0].camera->GetCombinedProjectionView());
//Again, if I had to guess, these two lines above would be part of the problem.
postShader->SetUniform3F("lightPosition", lights[0].x, lights[0].y, lights[0].z);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, frameBuffer->GetColor());
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, frameBuffer->GetNormals());
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, frameBuffer->GetDepth());
glActiveTexture(GL_TEXTURE3);
glBindTexture(GL_TEXTURE_2D, lights[0].shadowmap->GetDepth());
this->BindPPQuad();
glDrawArrays(GL_TRIANGLES, 0, 6);
In case it is relevant to my problem, here is how I generate the depth framebuffer attachments for the depth and shadow maps:
void FrameBuffer::Init(int textureWidth, int textureHeight) {
glGenFramebuffers(1, &fbo);
glGenTextures(1, &depth);
glBindTexture(GL_TEXTURE_2D, depth);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT24, textureWidth, textureHeight, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depth, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
Where is the problem in my math or my code, and what can I do to fix it?
After some experimentation, I have found that my problem does not lie in my matrices, but in my clamping. It seems that I get strange values when I use GL_CLAMP or GL_CLAMP_TO_EDGE, but I get almost correct values when I use GL_CLAMP_TO_BORDER. There are more problems, but they do not seem to be matrix related as I thought.
I create a texture for use in a 2D sampler as a displacement map for a mesh of tessellated terrain. Using the passed in vertex coordinates, I have a smooth interpolated value of the patch corners for subsequent vertices. When using the height value derived from the sampler, all I receive is a flat plane. When I multiply that value by a hundred, the height of the plane increases by around a hundred leading me to believe the alpha value is constantly one.
Here is the GLSL evaluation shader and the texture setup.
#version 430
layout(triangles, equal_spacing, ccw) in;
uniform mat4 camera;
uniform mat4 model;
uniform sampler2D terrain;
//uniform float lod_factor;
uniform float size;
in vec4 WorldPos_ES_in[];
in vec2 TexCoord_ES_in[];
in vec3 Normal_ES_in[];
out vec4 WorldPos_FS_in;
out vec2 TexCoord_FS_in;
out vec3 Normal_FS_in;
vec3 interpolate3D(vec3, vec3, vec3);
vec2 interpolate2D(vec2, vec2, vec2);
void main()
{
// Interpolate the attributes of the output vertex using the barycentric coordinates
TexCoord_FS_in = interpolate2D(TexCoord_ES_in[0], TexCoord_ES_in[1], TexCoord_ES_in[2]);
Normal_FS_in = interpolate3D(Normal_ES_in[0], Normal_ES_in[1], Normal_ES_in[2]);
Normal_FS_in = normalize(Normal_FS_in);
WorldPos_FS_in = vec4(interpolate3D(WorldPos_ES_in[0].xyz, WorldPos_ES_in[1].xyz, WorldPos_ES_in[2].xyz),1);
vec2 position=WorldPos_FS_in.xz;
float Displacement = texture(terrain, position/size).a;
//gl_Position = camera*model * WorldPos_FS_in;
gl_Position = camera*model * vec4(WorldPos_FS_in.x, Displacement,WorldPos_FS_in.z, 1.0);
}
vec2 interpolate2D(vec2 v0, vec2 v1, vec2 v2)
{
return vec2(gl_TessCoord.x) * v0 + vec2(gl_TessCoord.y) * v1 + vec2(gl_TessCoord.z) * v2;
}
vec3 interpolate3D(vec3 v0, vec3 v1, vec3 v2)
{
return vec3(gl_TessCoord.x) * v0 + vec3(gl_TessCoord.y) * v1 + vec3(gl_TessCoord.z) * v2;
}
and
glActiveTexture(GL_TEXTURE1);
GLuint tex2 = createTerrainMap();
glBindTexture(GL_TEXTURE_2D, tex2);
shader->setUniform("terrain", 1);
static GLuint createTerrainMap(){
GLuint texName;
glGenTextures(1, &texName);
glBindTexture(GL_TEXTURE_2D, texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, size+1, size+1, 0, GL_RGBA, GL_FLOAT, terrainM);
return texName;
}
terrainM is declared as
GLfloat terrain[size+1][size+1][4];
As mentioned, using the vertex coordinates yields the correct result but defeats the purpose of the displacement map. In addition, I use another texture from a file as GL_TEXTURE1 which is TexCoord_ES_in and it is for the fragment shader and correctly applied. Any ideas to what is causing the flat plane instead of a displaced value?
Since the TEC does not provide data about the position of the triangle outside of the barycentric coordinates inside of it. Using the interpolated data for the triangle is useless. In my situation where the world coordinates is from 0 to an arbitrary amount, another set of UV coordinates is needed to find the correct height.