Odd OpenGL shadow mapping behaviour - c++
I am working on a 3D game in C++ and OpenGL 3.2 with SFML. I have been struggling to implement point light shadow mapping. What I have done so far seems to conform to what I have learnt and examples I have seen, but still, no shadows.
What I have done is write a simplistic list of all the code I use in the exact order I use it, but not as full source code, only code that is relevant (because my project is split up in several classes):
Omnidirectional shadow mapping
C++
- Initialization
-- Use shadow pass shader program
-- Generate + bind the shadow frame buffer
glGenFramebuffers(1, &shadowFrameBuffer);
glBindFramebuffer(GL_FRAMEBUFFER, shadowFrameBuffer);
-- Generate a texture
glGenTextures(1, &shadowMap);
-- Bind texture as cubemap
glBindTexture(GL_TEXTURE_CUBE_MAP);
-- Set texture parameters
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
-- Generate empty 1024 x 1024 for every face of the cube
for (int face = 0; face < 6; face++)
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, 0, GL_DEPTH_COMPONENT32F , 1024, 1024, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
-- Attach the cubemap to the framebuffer
glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, shadowMap, 0);
-- Only draw depth to framebuffer
glDrawBuffer(GL_NONE);
- Every frame
-- Clear screen
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
-- Render shadow map
--- Bind shadow frame buffer
glBindFramebuffer(GL_FRAMEBUFFER, shadowFrameBuffer);
--- Set the viewport to the size of the shadow map
glViewport(0, 0, 1024, 1024);
-- Cull front faces
glCullFace(GL_FRONT);
-- Use shadow mapping program
--- Define projection matrix for rendering each face
glm::mat4 depthProjectionMatrix = glm::perspective(90.0f, 1.0f, 1.0f, 10.0f);
--- Define view matrices for all six faces
std::vector<glm::mat4> depthViewMatrices;
depthViewMatrices.push_back(glm::lookAt(lightInvDir, glm::vec3(1,0,0), glm::vec3(0,-1,0) )); // +X
depthViewMatrices.push_back(glm::lookAt(lightInvDir, glm::vec3(-1,0,0), glm::vec3(0,1,0) )); // -X
depthViewMatrices.push_back(glm::lookAt(lightInvDir, glm::vec3(0,1,0), glm::vec3(0,0,1) )); // +Y
depthViewMatrices.push_back(glm::lookAt(lightInvDir, glm::vec3(0,-1,0), glm::vec3(0,0,-1) )); // -Y
depthViewMatrices.push_back(glm::lookAt(lightInvDir, glm::vec3(0,0,1), glm::vec3(0,-1,0) )); // +Z
depthViewMatrices.push_back(glm::lookAt(lightInvDir, glm::vec3(0,0,-1), glm::vec3(0,1,0) )); // -Z
--- For every object in the scene
---- Bind the VBO of the object
---- Define the model matrix for the object based on its position and orientation
---- For all six sides of the cube
----- Set the correct side to render to
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, shadowMap, 0);
----- Clear depth buffer
glClear(GL_DEPTH_BUFFER_BIT);
----- Send model, view and projection matrices to shadow mapping shader
glUniformMatrix4fv(glGetUniformLocation(shadowMapper, "lightModelMatrix"), 1, GL_FALSE, glm::value_ptr(depthModelMatrix));
glUniformMatrix4fv(glGetUniformLocation(shadowMapper, "lightViewMatrix"), 1, GL_FALSE, glm::value_ptr(depthViewMatrices[i]));
glUniformMatrix4fv(glGetUniformLocation(shadowMapper, "lightProjectionMatrix"), 1, GL_FALSE, glm::value_ptr(depthProjectionMatrix));
----- Draw the object
glDrawElements(....);
- END SHADOW MAP DRAW
-- Cull back faces
glCullFace(GL_BACK);
-- Use standard shader program
-- Bind default framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, 0);
-- Activate cubemap texture
glActiveTexture(GL_TEXTURE1);
-- Bind cubemap texture
glBindTexture(GL_TEXTURE_CUBE_MAP, shadowMap);
-- Tell shader to use first texture
glUniform1i(glGetUniformLocation(currentProgram->id, "shadowmap"), 1);
-- Send standard MVPs and draw objects
glDrawElements(...);
- END C++
=================================
GLSL
shadowpass vertex shader source
#version 150
in vec3 position;
out vec3 worldPosition;
uniform mat4 lightModelMatrix;
uniform mat4 lightViewMatrix;
uniform mat4 lightProjectionMatrix;
void main()
{
gl_Position = lightProjectionMatrix * lightViewMatrix * lightModelMatrix * vec4(position, 1.0);
worldPosition = (lightModelMatrix * vec4(position, 1.0)).xyz; // Send world position of vertex to fragment shader
}
shadowpass fragment shader source
#version 150
in vec3 worldPosition; // Vertex position in world space
out float distance; // Distance from vertex position to light position
vec3 lightWorldPosition = vec3(0.0, 0.0, 0.0); // Light position in world space
void main()
{
distance = length(worldPosition - lightWorldPosition); // Distance from point to light
// Distance will be written to the cubemap
}
standard vertex shader source
#version 150
in vec3 position;
in vec3 normal;
in vec2 texcoord;
uniform mat4 modelMatrix;
uniform mat4 viewMatrix;
uniform mat4 projectionMatrix;
out vec3 fragnormal;
out vec3 fragnormaldirection;
out vec2 fragtexcoord;
out vec4 fragposition;
out vec4 fragshadowcoord;
void main()
{
fragposition = vec4(position, 1.0); // Position of vertex in object space
fragtexcoord = texcoord;
fragnormaldirection = normalize(modelInverseTranspose * normal);
fragnormal = normalize(normal);
gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4(position, 1.0);
}
standard fragment shader source
#version 150
out vec4 outColour;
in vec3 fragnormaldirection;
in vec2 fragtexcoord;
in vec3 fragnormal;
in vec4 fragposition;
uniform mat4 modelMatrix;
uniform mat4 viewMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrixInversed;
uniform sampler2D tex;
uniform samplerCube shadowmap;
void main()
{
vec3 lightpos = vec3(0.0, 0.0, 0.0);
vec3 pointToLight = (fragposition * modelMatrix).xyz - lightpos; // Get vector between this point and the light
float dist = texture(shadowmap, pointToLight).x; // Get distance written in texture
float shadowfactor = 1.0;
if (length(pointToLight) > dist) // Is it occluded?
shadowfactor = 0.5;
outColour = texture(tex, fragtexcoord) * shadowfactor;
}
Here is a picture of what my code does now:
This is a strange effect but seems to be close to what I meant. It seems that any surface exposed to the light at 0, 0, 0 has an unshadowed circle at the center of it while everything else is unshadowed.
One very useful way of debugging shadow maps ins indeed to have a way to display the content of the shadow maps as quads on the screen. 6 quads in case of cube shadow maps. that could be implemented as a debug easter egg where you can display the full texture on the whole screen and 'go to next face' so you can skid the 6 faces with another key combo.
Then, one of the most important things in cubic shadow maps is the depth range. A point light doesn't have an infinite range, so generally you want to scale your depth storage to match the light range.
You can use floating point 16 bits luminance (or red channel) texture to store a world depth (spherical, meaning the true length(ray-to-intersection) using a little calculation in the pixel shader)
Or you can use linear depth (the same kind that is stored in a classic ZBuffer, which is the depth of the normalized device coordinates. That is the depth after the projection matrix. In which case, to reconstruct the world position once in the lighting shader (next pass), the issue is to be sure to divide by w after you multiplied by the camera-cube-face inverse view*projection.
The key to debugging shadow maps is all in shader twiddling. Start by using colors to vizualize the depth stored in your shadow maps as perceived by the pixels of your world. It was the only way that helped be fix point shadow maps in my company's engine. You can make a color code using a combination of mix and clamp like blue from 0 to 0.3, red from 0.3 to 0.6, green from 0.6 to 1. If you have world distance storage it is easier, but is still interesting to vizu it through color codes. just use the same function but dividing the distance by your expected world range.
Using that vizu scheme you'll be able to see the shadowed zone right away because they all bear the same color (since it the 'ray' was intercepted by a closer surface). Once you get to that point; the rest will all go smoothly.
good luck :)
Related
Attempt to fix sprite sheet pixel bleeding in OpenGL 2D causing sprite distortion
While working on a project, I encountered the common problem of pixel bleeding when trying to draw subregions of my sprite sheet. This caused sort of "seams" to appear at the edges of my sprites. You can see the issue here, on the right and top of the sprite. Doing some searching, I found others with a similar problem, and a suggested solution (here, and here for example) was to offset my texture coordinates by a bit, such as 0.5. I tried this, and it seemed to work. But I have noticed that sometimes, depending on where the sprite or camera is, I get a bit of distortion on the sprites. Here, the left side appears to be cut off, and here, the bottom seems to have expanded. (I should note, the distortion happens on all sides, I just happened to take screenshots of it happening on the bottom and left.) It may be a little difficult to see in screenshots, but it is definitely noticeable in motion. For reference, here is the part of the sprite sheet that is being displayed here Does anybody have any idea what is going on here? I didn't actually notice this issue until recently. I originally set out to resolve the pixel bleeding when I saw it occurring between my tile sprites. This new issue does not occur with them using my current half-pixel offset solution (or if it does, it's not noticeable). Code: Texture parameters glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); Texture coordinate calculation std::vector<glm::vec4> Texture2D::GetUVs(int w, int h) { std::vector<glm::vec4> uvs; int rows = Width/ w; int columns = Height / h; for(int c = 0; c < columns; c ++) { for(int i = 0; i < rows; i ++) { float offset = 0.5; uvs.emplace_back(glm::vec4(float(((i) * w + offset))/Width, float(((1 + i) * w - offset))/Width, float(((c) * h + offset))/Height, float(((1 + c) * h - offset))/Height)); } } return uvs; Where Width and Height are the dimensions of the sprite sheet, and w and h are the dimensions of the subregion, in this case 32 and 32. How I pass the uvs to the shader GLfloat verticies[] = { uv.x, uv.w, uv.y, uv.z, uv.x, uv.z, uv.x, uv.w, uv.y, uv.w, uv.y, uv.z }; this->shader.Use().SetVector2fv("uvs", 12, verticies); Where uv is the uv at an index in the uvs vector that was returned above in the GetUVs function. Vertex shader #version 330 core layout (location = 0) in vec2 vertex; out vec2 TextureCoordinates; uniform vec2 uvs[6]; uniform mat4 model; uniform mat4 projection; void main() { const vec2 position [6] = vec2[] ( vec2(0.0f, 1.0f), vec2(1.0f, 0.0f), vec2(0.0f, 0.0f), vec2(0.0f, 1.0f), vec2(1.0f, 1.0f), vec2(1.0f, 0.0f) ); TextureCoordinates = uvs[gl_VertexID]; gl_Position = projection * model * vec4(position[gl_VertexID], 0.0, 1.0); } Fragment shader #version 330 core in vec2 TextureCoordinates; out vec4 color; uniform sampler2D image; uniform vec4 spriteColor; void main() { color = vec4(spriteColor) * texture(image, TextureCoordinates); } Thanks for reading, any help is appreciated.
Sample from GL_TEXTURE_1D in fragment shader
Been trying to sample from a 1D texture (.png), got a model with the correct texture coordinates and all but I just can't get the texture to show up. The geometry is rendering just black, there must be something I have missunderstood about textures in OpenGL but can't see it. Any pointers? C++ // Setup GLint texCoordAttrib = glGetAttribLocation(batch_shader_program, "vTexCoord"); glVertexAttribPointer(texCoordAttrib, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex<float>), (const void *)offsetof(Vertex<float>, texCoord)); glEnableVertexAttribArray(texCoordAttrib); // Loading GLuint load_1d_texture(std::string filepath) { SDL_Surface *image = IMG_Load(filepath.c_str()); int width = image->w; GLuint texture; glGenTextures(1, &texture); glBindTexture(GL_TEXTURE_1D, texture); glTexImage2D(GL_TEXTURE_1D, 0, GL_RGBA, width, 0, GL_RGBA, GL_UNSIGNED_BYTE, image->pixels); SDL_FreeSurface(image); return texture; } // Rendering glUseProgram(batch.gl_program); glBindTexture(GL_TEXTURE_1D, batch.mesh.texture.gl_texture_reference); glDraw*** Vertex Shader #version 330 core in vec3 position; in vec4 vColor; in vec3 normal; // Polygon normal in vec2 vTexCoord; // Model in mat4 model; out vec4 fColor; out vec3 fTexcoord; // View or a.k.a camera matrix uniform mat4 camera_view; // Projection or a.k.a perspective matrix uniform mat4 projection; void main() { gl_Position = projection * camera_view * model * vec4(position, 1.0); fTexcoord = vec3(vTexCoord, 1.0); } Fragment Shader #version 330 core in vec4 fColor; out vec4 outColor; in vec3 fTexcoord; // passthrough shading for interpolated textures uniform sampler1D sampler; void main() { outColor = texture(sampler, fTexcoord.x); }
glBindTexture(GL_TEXTURE_2D, texture); glBindTexture(GL_TEXTURE_1D, batch.mesh.texture.gl_texture_reference); Assuming that these two lines of code are talking about the same OpenGL object, you cannot do that. A texture that uses the 2D texture target is a 2D texture. It is not a 1D texture, nor is it a 2D array texture with one layer or a 3D texture with depth 1. It is a 2D texture. Once you bind a texture object after generating it, the texture's target is fixed. You can use view textures to create a view of the same storage with different targets, but the original texture object itself is unaffected by this. And you can't create a 1D view of a 2D texture. You should have gotten a GL_INVALID_OPERATION error when you tried to bind the 2D texture as if it were 1D. You should always check for errors when you run into OpenGL problems.
In the end there was no problem, only a bug in the texture coordinate loading (it took the wrong indices from the vertices..)..
Cube mapping does not work correctly using OpenGL/GLSL
I have a strange behaviour with Cube Mapping technique: all my pixel shaders return the black color. So I have in result a black screen. Here's the situation: I have a scene only composed by a simple cube mesh (the skybox) and a track ball camera. Now let's examine the resources (the aspect of the 6 separate textures): Here's the image details : So as you can see these textures need to be loaded in GL_RGB format. Now, for the initialization part, let's take a look to the client C++ texture loading code (I use the 'DevIL' library to load my images): glGenTextures(1, &this->m_Handle); glBindTexture(this->m_Target, this->m_Handle); { glTexParameteri(this->m_Target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(this->m_Target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(this->m_Target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(this->m_Target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(this->m_Target, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE); { for (uint32_t idx = 0; idx < this->m_SourceFileList.size(); idx++) { ilLoadImage((const wchar_t*)this->m_SourceFileList[idx].c_str()); //IMAGE LOADING { uint32_t width = ilGetInteger(IL_IMAGE_WIDTH); uint32_t height = ilGetInteger(IL_IMAGE_HEIGHT); uint32_t bpp = ilGetInteger(IL_IMAGE_BPP); { char *pPixelData = (char*)ilGetData(); glTexImage2D(this->m_TargetList[idx], 0, GL_RGB8, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, pPixelData); } } } } } glBindTexture(this->m_Target, 0); Concerning the main loop part, here's the informations I send to the shader program (texture and matrix data): //TEXTURE DATA scene::type::MaterialPtr pSkyBoxMaterial = pBatch->GetMaterial(); glActiveTexture(GL_TEXTURE1); glBindTexture(GL_TEXTURE_CUBE_MAP, pSkyBoxMaterial->GetDiffuseTexture()->GetHandle()); this->SetUniform("CubeMapSampler", 1); //MATRIX DATA this->SetUniform("ModelViewProjMatrix", pBatch->GetModelViewProjMatrix()); As you can see the cube map texture in bound to texture unit 1. Now here's the vertex shader: #version 400 /* ** Vertex attributes. */ layout (location = 0) in vec3 VertexPosition; /* ** Uniform matrix buffer. */ uniform mat4 ModelViewProjMatrix; /* ** Outputs. */ out vec3 TexCoords; /* ** Vertex shader entry point. */ void main(void) { TexCoords = VertexPosition; gl_Position = ModelViewProjMatrix * vec4(VertexPosition, 1.0f); } And finally the fragment shader: #version 400 /* ** Output color value. */ layout (location = 0) out vec4 FragColor; /* ** Vertex inputs. */ in vec3 TexCoords; /* ** Texture uniform. */ uniform samplerCube CubeMapSampler; /* ** Fragment shader entry point. */ void main(void) { vec4 finalColor = texture(CubeMapSampler, TexCoords); FragColor = finalColor; } So all the program compiled and executed show the following result : But I want to precise I use the NVIDIA NSight Debugger and I want to show you first that the cube map is correctly loaded on the GPU: As you can see as it's been written in the pieces of code above my texture is a RGB texture bound to unit texture 1 and it's a GL_TEXTURE_CUBE_MAP texture type. So until here all seems to be correct! And if I replace in the fragment shader the line: vec4 finalColor = texture(CubeMapSampler, TexCoords); By the line: vec4 finalColor = vec4(TexCoords, 1.0f); I have the following result (I render directly the vertex coordinates in model space as color) without wireframe: And the same result with wireframe: Plus I want to precise that the line: std::cout << glGetError() << std::endl; Always returns the '0' value so I have none error! So I think these two last pictures show that the matrix informations are correct and the vertex cordinates are correct too (moreover I use a track ball camera and when I move around into my scene I can recognize the cube architecture). So for me all the informations I recover in my shader program are correct except for the cube sampler! I think the problem comes from this sampler. However as you could see above the cube map seems to be loaded correctly. I am really lost in front of this situation. I don't understand why all the pixel shaders return a #000000 color (I also tried using RGBA format but the result is the same).
How do I get textures to work in OpenGL?
I'm using the tutorials on http://arcsynthesis.org/gltut/ to learn OpenGL, it's required, I have to use it. Mostly I want to apply the textures from Tutorial 15 onto objects in tutorial 7 (world with UBO). For now it seemed like the textures only work when mipmaps are turned on. This comes with a downside: The only mipmap used is the one with an index of zero, and that's the 1 colored 1x1 pixel one. I tried setting the minimum level of a mipmap higher or turning off mipmaps entirely, but even that doesn't fix thing, because then everything turns pitch black. Now I'll list the most important parts of my program EDIT: I guess I'll add more details... The vertex shader has something like this: #version 330 layout(location = 0) in vec4 position; layout(location = 1) in vec4 color; layout(location = 2) in vec3 normal; //Added these later layout(location = 5) in vec2 texCoord; out vec2 colorCoord; smooth out vec4 interpColor; out vec3 vertexNormal; out vec3 modelSpacePosition; out vec3 cameraSpacePosition; uniform mat4 worldToCameraMatrix; uniform mat4 modelToWorldMatrix; uniform mat3 normalModelToCameraMatrix; uniform vec3 dirToLight; uniform vec4 lightIntensity; uniform vec4 ambientIntensity; uniform vec4 baseColor; uniform mat4 cameraToClipMatrix; void main() { vertexNormal = normal; vec3 normCamSpace = normalize(normalModelToCameraMatrix * vertexNormal); cameraSpacePosition = normCamSpace; float cosAngIncidence = dot(normCamSpace, dirToLight); cosAngIncidence = clamp(cosAngIncidence, 0, 1); modelSpacePosition.x = position.x; modelSpacePosition.y = position.y; modelSpacePosition.z = position.z; vec4 temp = modelToWorldMatrix * position; temp = worldToCameraMatrix * temp; gl_Position = cameraToClipMatrix * temp; interpColor = ((lightIntensity * cosAngIncidence) + (ambientIntensity)) * baseColor; colorCoord= texCoord ; } The fragment shader like this: #version 330 in vec3 vertexNormal; in vec3 modelSpacePosition; smooth in vec4 interpColor; uniform vec3 modelSpaceLightPos; uniform vec4 lightIntensity2; uniform vec4 ambientIntensity2; out vec4 outputColor; //Added later in vec2 colorCoord; uniform sampler2D colorTexture; void main() { vec3 lightDir2 = normalize(modelSpacePosition - modelSpaceLightPos); float cosAngIncidence2 = dot(normalize(vertexNormal), lightDir2); cosAngIncidence2 = clamp(cosAngIncidence2, 0, 1); float light2DistanceSqr = dot(modelSpacePosition - modelSpaceLightPos, modelSpacePosition - modelSpaceLightPos); //added vec4 texture2 = texture(colorTexture, colorCoord); outputColor = ((ambientIntensity2 + (interpColor*2))/4) + ((((interpColor) * lightIntensity2/200 * cosAngIncidence2) + (ambientIntensity2* interpColor )) /( ( sqrt(light2DistanceSqr) + light2DistanceSqr)/200 )); //No outputColor for texture testing outputColor = texture2 ; } } Those were both shaders. And here are the parts added to the .cpp: #include <glimg/glimg.h> #include "../framework/directories.h" [...] const int g_colorTexUnit = 0; GLuint g_checkerTexture = 0; And here's the loader for the texture: void LoadCheckerTexture() { try { std::string filename(LOCAL_FILE_DIR); filename += "checker.dds"; std::auto_ptr<glimg::ImageSet> pImageSet(glimg::loaders::dds::LoadFromFile(filename.c_str())); glGenTextures(1, &g_checkerTexture); glBindTexture(GL_TEXTURE_2D, g_checkerTexture); glimg::SingleImage image = pImageSet->GetImage(0, 0, 0); glimg::Dimensions dims = image.GetDimensions(); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, dims.width, dims.height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, image.GetImageData()); glBindTexture(GL_TEXTURE_2D, 0); } catch(std::exception &e) { printf("%s\n", e.what()); throw; } } Naturally I've got this in void init(): LoadCheckerTexture(); And then when rendering the object: glActiveTexture(GL_TEXTURE0 + g_colorTexUnit); glBindTexture(GL_TEXTURE_2D,g_checkerTexture); g_pLeftMesh->Render(); glBindSampler(g_colorTexUnit, 0); glBindTexture(GL_TEXTURE_2D, 0); With all of this, I get put pitch black for everything, however when I change the outputColor equation into "texture + outputColor;", everything looks normal. I have no idea what I'm doing wrong here. A friend tried to help me, we removed some unnecessairy stuff, but we got nothing running.
Ok guys, I've worked on this whole thing, and did manage to somehow get it running. First off I had to add samplers: GLuint g_samplers; //Add Later void CreateSamplers() { glGenSamplers(1, &g_samplers); glSamplerParameteri(g_samplers, GL_TEXTURE_WRAP_S, GL_REPEAT); glSamplerParameteri(g_samplers, GL_TEXTURE_WRAP_T, GL_REPEAT); //Linear mipmap Nearest glSamplerParameteri(g_samplers, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glSamplerParameteri(g_samplers, GL_TEXTURE_MIN_FILTER, GL_NEAREST); } I also added this to the file thing: glimg::OpenGLPixelTransferParams xfer = glimg::GetUploadFormatType(pImageSet->GetFormat(), 0); glimg::SingleImage image = pImageSet->GetImage(0, 0, 0); glimg::Dimensions dims = image.GetDimensions(); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, dims.width, dims.height, 0, xfer.format, xfer.type, image.GetImageData()); The xfer variable does get the format and type adjusted to the dds. Also the render code got turned into this: //Added necessary glActiveTexture(GL_TEXTURE0 + g_colorTexUnit); glBindTexture(GL_TEXTURE_2D,g_checkerTexture); glBindSampler(g_colorTexUnit, g_samplers); g_pLeftMesh->Render(); glBindSampler(g_colorTexUnit, 0); glBindTexture(GL_TEXTURE_2D, 0); And of course at the end of init() I needed to add the CreateSamplers thing: //Added this later LoadCheckerTexture(); CreateSamplers(); I'm sorry for all the trouble with all this, but guess OpenGL really is just this confusing and it was just dumb luck that I got it right. Just posting this so that people know
Your fail to add textures may be caused by: Have you add texture coordinates to objects? (this is the most probable cause, because you are adding textures to non textured tutorial), add textures to VAO. Did you add uniform textureunit (Sampler2D)? (it must be uniform, else texturing will not work properly) Is your texture loaded,binded,enabled (GL_TEXTURE_2D) ? Is your active texture unit - 0? if not change layout/multitexture coords or set active texture 0 This two codes are simple texturing shaders (texture unit 0) no special things (like light,blend,bump,...): tm_l2g is transformation local obj space -> world space (Modelview) tm_g2s is transformation world space -> screen space (Projection) pos are vertex coordinates txt are texture coordinates col are colors Do not forget to change uniform names and layout locations to yours. Vertex: //------------------------------------------------------------------ #version 420 core //------------------------------------------------------------------ uniform mat4x4 tm_l2g; uniform mat4x4 tm_g2s; layout(location=0) in vec3 pos; layout(location=1) in vec4 col; layout(location=2) in vec2 txr; out smooth vec4 pixel_col; out smooth vec2 pixel_txr; //------------------------------------------------------------------ void main(void) { vec4 p; p.xyz=pos; p.w=1.0; p=tm_l2g*p; p=tm_g2s*p; gl_Position=p; pixel_col=col; pixel_txr=txr; } //------------------------------------------------------------------ fragment: //------------------------------------------------------------------ #version 420 core //------------------------------------------------------------------ in smooth vec4 pixel_col; in smooth vec2 pixel_txr; uniform sampler2D txr_texture0; out layout(location=0) vec4 frag_col; //------------------------------------------------------------------ void main(void) { vec4 col; col=texture(txr_texture0,pixel_txr.st); frag_col=col*pixel_col; } //------------------------------------------------------------------ [edit1] CPU old style OpenGL render code (initializations are not included its only render code they can be found here) //------------------------------------------------------------------ // set modelview,projection,textures,bind GLSL programs... GLfloat a=10.0,z=0.0; glColor3f(1.0,1.0,1.0); glBegin(GL_QUADS); // textured quad glTexCoord2f(0.0,0.0); glVertex3f(-a,-a,z); glTexCoord2f(0.0,1.0); glVertex3f(-a,+a,z); glTexCoord2f(1.0,1.0); glVertex3f(+a,+a,z); glTexCoord2f(1.0,0.0); glVertex3f(+a,-a,z); // reverse order quad to be shore that at least one passes by CULL_FACE glTexCoord2f(1.0,0.0); glVertex3f(+a,-a,z); glTexCoord2f(1.0,1.0); glVertex3f(+a,+a,z); glTexCoord2f(0.0,1.0); glVertex3f(-a,+a,z); glTexCoord2f(0.0,0.0); glVertex3f(-a,-a,z); glEnd(); //------------------------------------------------------------------ [edit2] ok here goes VAO/VBO render code,... //------------------------------------------------------------------------------ // enum of VBO locations (it is also your layout location) I use enums for simple in code changes enum _vbo_enum { _vbo_pos=0, // glVertex _vbo_col, // glColor _vbo_tan, // glNormal _vbo_unused0, // unused (at least i dont see anything at this location in your code) _vbo_unused1, // unused (at least i dont see anything at this location in your code) _vbo_txr, // glTexCoord _vbos }; //------------------------------------------------------------------------------ // 'global' names and size for OpenGL mesh in VAO/VBO ... similar ot texture names/handles GLuint vao[1],vbo[_vbos],num_pnt=0; //------------------------------------------------------------------------------ void VAO_init_cube() // call this before VAO use,...but after OpenGL init ! { //[1] first you need some model to render (mesh), here is a simple cube // size,position of cube - change it that it is visible in your scene const GLfloat a=1.0,x=0.0,y=0.0,z=0.0; // cube points 3f x,y,z GLfloat mesh_pos[]= { x-a,y-a,z-a,x-a,y+a,z-a,x+a,y+a,z-a,x+a,y-a,z-a, x-a,y-a,z+a,x-a,y+a,z+a,x+a,y+a,z+a,x+a,y-a,z+a, x-a,y-a,z-a,x-a,y-a,z+a,x+a,y-a,z+a,x+a,y-a,z-a, x-a,y+a,z-a,x-a,y+a,z+a,x+a,y+a,z+a,x+a,y+a,z-a, x-a,y-a,z-a,x-a,y+a,z-a,x-a,y+a,z+a,x-a,y-a,z+a, x+a,y-a,z-a,x+a,y+a,z-a,x+a,y+a,z+a,x+a,y-a,z+a, }; // cube colors 3f r,g,b GLfloat mesh_col[]= { 0.0,0.0,0.0,0.0,1.0,0.0,1.0,1.0,0.0,1.0,0.0,0.0, 0.0,0.0,1.0,0.0,1.0,1.0,1.0,1.0,1.0,1.0,0.0,1.0, 0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,1.0,1.0,0.0,0.0, 0.0,1.0,0.0,0.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.0, 0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,1.0,0.0,0.0,1.0, 1.0,0.0,0.0,1.0,1.0,0.0,1.0,1.0,1.0,1.0,0.0,1.0, }; // cube normals 3f x,y,z GLfloat mesh_tan[]= { -0.6,-0.6,-0.6,-0.6,+0.6,-0.6,+0.6,+0.6,-0.6,+0.6,-0.6,-0.6, -0.6,-0.6,+0.6,-0.6,+0.6,+0.6,+0.6,+0.6,+0.6,+0.6,-0.6,+0.6, -0.6,-0.6,-0.6,-0.6,-0.6,+0.6,+0.6,-0.6,+0.6,+0.6,-0.6,-0.6, -0.6,+0.6,-0.6,-0.6,+0.6,+0.6,+0.6,+0.6,+0.6,+0.6,+0.6,-0.6, -0.6,-0.6,-0.6,-0.6,+0.6,-0.6,-0.6,+0.6,+0.6,-0.6,-0.6,+0.6, +0.6,-0.6,-0.6,+0.6,+0.6,-0.6,+0.6,+0.6,+0.6,+0.6,-0.6,+0.6, }; // cube texture coords 2f s,t GLfloat mesh_txr[]= { 0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0, 0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0, 0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0, 0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0, 0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0, 0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0, }; // init VAO/VBO glGenVertexArrays(1,vao); // allocate 1 x VAO glGenBuffers(_vbos,vbo); // allocate _vbos x VBO // copy mesh to VAO/VBO ... after this you do not need the mesh anymore GLint i,sz,n; // n = number of numbers per 1 entry glBindVertexArray(vao[0]); num_pnt=sizeof(mesh_pos)/(sizeof(GLfloat)*3); // num of all points in mesh i=_OpenGLVAOgfx_pos; n=3; sz=sizeof(GLfloat)*n; glBindBuffer(GL_ARRAY_BUFFER,vbo[i]); glBufferData(GL_ARRAY_BUFFER,sz*num_pnt,mesh_pos,GL_STATIC_DRAW); glEnableVertexAttribArray(i); glVertexAttribPointer(i,n,GL_FLOAT,GL_FALSE,0,0); i=_OpenGLVAOgfx_col; n=3; sz=sizeof(GLfloat)*n; glBindBuffer(GL_ARRAY_BUFFER,vbo[i]); glBufferData(GL_ARRAY_BUFFER,sz*num_pnt,mesh_col,GL_STATIC_DRAW); glEnableVertexAttribArray(i); glVertexAttribPointer(i,n,GL_FLOAT,GL_FALSE,0,0); i=_OpenGLVAOgfx_tan; n=3; sz=sizeof(GLfloat)*n; glBindBuffer(GL_ARRAY_BUFFER,vbo[i]); glBufferData(GL_ARRAY_BUFFER,sz*num_pnt,mesh_tan,GL_STATIC_DRAW); glEnableVertexAttribArray(i); glVertexAttribPointer(i,n,GL_FLOAT,GL_FALSE,0,0); i=_OpenGLVAOgfx_txr; n=2; sz=sizeof(GLfloat)*n; glBindBuffer(GL_ARRAY_BUFFER,vbo[i]); glBufferData(GL_ARRAY_BUFFER,sz*num_pnt,mesh_txr,GL_STATIC_DRAW); glEnableVertexAttribArray(i); glVertexAttribPointer(i,n,GL_FLOAT,GL_FALSE,0,0); glBindVertexArray(0); } //------------------------------------------------------------------------------ void VAO_draw() // call this to draw your mesh,... need to enable and bind textures,... before use { glDisable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_LEQUAL); glBindVertexArray(vao[0]); glEnableVertexAttribArray(_vbo_pos); glEnableVertexAttribArray(_vbo_col); glEnableVertexAttribArray(_vbo_tan); glDisableVertexAttribArray(_vbo_unused0); glEnableVertexAttribArray(_vbo_txr); glDrawArrays(GL_QUADS,0,num_pnt); glDisableVertexAttribArray(_vbo_pos); glDisableVertexAttribArray(_vbo_col); glDisableVertexAttribArray(_vbo_tan); glDisableVertexAttribArray(_vbo_unused0); glDisableVertexAttribArray(_vbo_unused1); glDisableVertexAttribArray(_vbo_txr); glBindVertexArray(0); } //------------------------------------------------------------------------------ void VAO_exit() // clean up ... call this when you do not need VAO/VBO anymore { glDisableVertexAttribArray(_vbo_pos); glDisableVertexAttribArray(_vbo_col); glDisableVertexAttribArray(_vbo_tan); glDisableVertexAttribArray(_vbo_unused0); glDisableVertexAttribArray(_vbo_unused1); glDisableVertexAttribArray(_vbo_txr); glBindVertexArray(0); glDeleteVertexArrays(1,vao); glDeleteBuffers(_vbos,vbo); } //------------------------------------------------------------------------------ [edit3] if you are win32/64 user you can try my IDE for GLSL It is very simple and easy to use, but cannot change texture/attrib locations. Press [F1] for help,... [F9] for run [F10] for return to normal OpenGL mode. Also txt-editor is little buggy sometimes but it is enough for my purpose. GLSL IDE
Pointers on modern OpenGL shadow cubemapping?
Background I am working on a 3D game using C++ and modern OpenGL (3.3). I am now working on the lighting and shadow rendering, and I've successfully implemented directional shadow mapping. After reading over the requirements for the game I have decided that I'd be needing point light shadow mapping. After doing some research, I discovered that to do omnidirectional shadow mapping I will do something similar to directional shadow mapping, but with a cubemap instead. I have no previous knowledge of cubemaps but my understanding of them is that a cubemap is six textures, seamlessly attached. I did some looking around but unfortunately I struggled to find a definitive "tutorial" on the subject for modern OpenGL. I look for tutorials first that explain it from start to finish because I seriously struggled to learn from snippets of source code or just concepts, but I tried. Current understandings Here is my general understanding of the idea, minus the technicalities. Please correct me. For each point light, a framebuffer is set up, like directional shadowmapping A single cubemap texture is then generated, and bound with glBindTexture(GL_TEXTURE_CUBE_MAP, shadowmap). The cubemap is set up with the following attributes: glTexParameteri(GL_TEXTURE_CUBE_MAP_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_CUBE_MAP_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_CUBE_MAP_ARB, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR); (this is also similar to directional shadowmapping) Now glTexImage2D() is iterated through six times, once for each face. I do that like this: for (int face = 0; face < 6; face++) // Fill each face of the shadow cubemap glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, 0, GL_DEPTH_COMPONENT32F , 1024, 1024, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL); The texture is attached to the framebuffer with a call to glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, shadowmap, 0); When the scene is to be rendered, it is rendered in two passes, like directional shadow mapping. First of all, the shadow framebuffer is bound, the viewport is adjusted to the size of the shadowmap (1024 by 1024 in this case). Culling is set to the front faces with glCullFace(GL_FRONT) The active shader program is switched to the vertex and fragment shadow shaders that I will provide the sources of further down The light view matrices for all six views are calculated. I do it by creating a vector of glm::mat4's and push_back() the matrices, like this: // Create the six view matrices for all six sides for (int i = 0; i < renderedObjects.size(); i++) // Iterate through all rendered objects { renderedObjects[i]->bindBuffers(); // Bind buffers for rendering with it glm::mat4 depthModelMatrix = renderedObjects[i]->getModelMatrix(); // Set up model matrix for (int i = 0; i < 6; i++) // Draw for each side of the light { glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, shadowmap, 0); glClear(GL_DEPTH_BUFFER_BIT); // Clear depth buffer // Send MVP for shadow map glm::mat4 depthMVP = depthProjectionMatrix * depthViewMatrices[i] * depthModelMatrix; glUniformMatrix4fv(glGetUniformLocation(shadowMappingProgram, "depthMVP"), 1, GL_FALSE, glm::value_ptr(depthMVP)); glUniformMatrix4fv(glGetUniformLocation(shadowMappingProgram, "lightViewMatrix"), 1, GL_FALSE, glm::value_ptr(depthViewMatrices[i])); glUniformMatrix4fv(glGetUniformLocation(shadowMappingProgram, "lightProjectionMatrix"), 1, GL_FALSE, glm::value_ptr(depthProjectionMatrix)); glDrawElements(renderedObjects[i]->getDrawType(), renderedObjects[i]->getElementSize(), GL_UNSIGNED_INT, 0); } } The default framebuffer is bound, and the scene is drawn normally. Issue Now, to the shaders. This is where my understanding runs dry. I am completely unsure on what I should do, my research seems to conflict with eachother, because it's for different versions. I ended up blandly copying and pasting code from random sources, and hoping it'd achieve something other than a black screen. I know this is terrible, but there doesn't seem to be any clear definitions on what to do. What spaces do I work in? Do I even need a separate shadow shader, like I used in directional point lighting? What the hell do I use as the type for a shadow cubemap? samplerCube? samplerCubeShadow? How do I sample said cubemap properly? I hope that someone can clear it up for me and provide a nice explanation. My current understanding of the shader part is: - When the scene is being rendered into the cubemap, the vertex shader simply takes the depthMVP uniform I calculated in my C++ code and transforms the input vertices by them. - The fragment shader of the cubemap pass simply assigns the single out value to the gl_FragCoord.z. (This part is unchanged from when I implemented directional shadow mapping. I assumed it would be the same for cubemapping because the shaders don't even interact with the cubemap - OpenGL simply renders the output from them to the cubemap, right? Because it's a framebuffer?) The vertex shader for the normal rendering is unchanged. In the fragment shader for normal rendering, the vertex position is transformed into the light's space with the light's projection and view matrix. That's somehow used in the cubemap texture lookup. ??? Once the depth has been retrieved using magical means, it is compared to the distance of the light to the vertex, much like directional shadowmapping. If it's less, that point must be shadowed, and vice-versa. It's not much of an understanding. I go blank as to how the vertices are transformed and used to lookup the cubemap, so I'm going to paste the source for my shaders, in hope that people can clarify this. Please note that a lot of this code is blind copying and pasting, I haven't altered anything as to not jeopardise any understanding. Shadow vertex shader: #version 150 in vec3 position; uniform mat4 depthMVP; void main() { gl_Position = depthMVP * vec4(position, 1); } Shadow fragment shader: #version 150 out float fragmentDepth; void main() { fragmentDepth = gl_FragCoord.z; } Standard vertex shader: #version 150 in vec3 position; in vec3 normal; in vec2 texcoord; uniform mat3 modelInverseTranspose; uniform mat4 modelMatrix; uniform mat4 viewMatrix; uniform mat4 projectionMatrix; out vec3 fragnormal; out vec3 fragnormaldirection; out vec2 fragtexcoord; out vec4 fragposition; out vec4 fragshadowcoord; void main() { fragposition = modelMatrix * vec4(position, 1.0); fragtexcoord = texcoord; fragnormaldirection = normalize(modelInverseTranspose * normal); fragnormal = normalize(normal); fragshadowcoord = projectionMatrix * viewMatrix * modelMatrix * vec4(position, 1.0); gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4(position, 1.0); } Standard fragment shader: #version 150 out vec4 outColour; in vec3 fragnormaldirection; in vec2 fragtexcoord; in vec3 fragnormal; in vec4 fragposition; in vec4 fragshadowcoord; uniform mat4 modelMatrix; uniform mat4 viewMatrix; uniform mat4 projectionMatrix; uniform mat4 viewMatrixInversed; uniform mat4 lightViewMatrix; uniform mat4 lightProjectionMatrix; uniform sampler2D tex; uniform samplerCubeShadow shadowmap; float VectorToDepthValue(vec3 Vec) { vec3 AbsVec = abs(Vec); float LocalZcomp = max(AbsVec.x, max(AbsVec.y, AbsVec.z)); const float f = 2048.0; const float n = 1.0; float NormZComp = (f+n) / (f-n) - (2*f*n)/(f-n)/LocalZcomp; return (NormZComp + 1.0) * 0.5; } float ComputeShadowFactor(samplerCubeShadow ShadowCubeMap, vec3 VertToLightWS) { float ShadowVec = texture(ShadowCubeMap, vec4(VertToLightWS, 1.0)); if (ShadowVec + 0.0001 > VectorToDepthValue(VertToLightWS)) // To avoid self shadowing, I guess return 1.0; return 0.7; } void main() { vec3 light_position = vec3(0.0, 0.0, 0.0); vec3 VertToLightWS = light_position - fragposition.xyz; outColour = texture(tex, fragtexcoord) * ComputeShadowFactor(shadowmap, VertToLightWS); } I can't remember where the ComputerShadowFactor and VectorToDepthValue function code came from, because I was researching it on my laptop which I can't get to right now, but this is the result of those shaders: It is a small square of unshadowed space surrounded by shadowed space. I am obviously doing a lot wrong here, probably centered on my shaders, due to a lack of knowledge on the subject because I find it difficult to learn from anything but tutorials, and I am very sorry for that. I am at a loss it it would be wonderful if someone can shed light on this with a clear explanation on what I am doing wrong, why it's wrong, how I can fix it and maybe even some code. I think the issue may be because I am working in the wrong spaces.
I hope to provide an answer to some of your questions, but first some definitions are required: What is a cubemap? It is a map from a direction vector to a pair of [face, 2d coordinates on that face], obtained by projecting the direction vector on an hypothetical cube. What is an OpenGL cubemap texture? It is a set of six "images". What is a GLSL cubemap sampler? It is a sampler primitive from which cubemap sampling can be done. This mean that it is sampled using a direction vector instead of the usual texture coordinates. The hardware then project the direction vector on an hypothetical cube and use the resulting [face, 2d texture coordinate] pair to sample the right "image" at the right 2d position. What is a GLSL shadow sampler? It is a sampler primitive that is bounded to a texture containing NDC-space depth values and, when sampled using the shadow-specific sampling functions, return a "comparison" between a NDC-space depth (in the same space of the shadow map, obviously) and the NDC-space depth stored inside the bounded texture. The depth to compare against is specified as an additional element in the texture coordinates when calling the sampling function. Note that shadow samplers are provided for ease of use and speed, but it is always possible to do the comparison "manually" in the shader. Now, for your questions: OpenGL simply renders [...] to the cubemap, right? No, OpenGL render to a set of targets in the currently bounded framebuffer. In the case of cubemaps, the usual way to render in them is: to create them and attach each of their six "images" to the same framebuffer (at different attachment points, obviously) to enable only one of the target at a time (so, you render in each cubemap face individually) to render what you want in the cubemap face (possibly using face-specific "view" and "projection" matrices) Point-light shadow maps In addition to everything said about cubemaps, there are a number of problems in using them to implement point-light shadow mapping and so the hardware depth comparison is rarely used. Instead, what is common pratice is the following: instead of writing NDC-space depth, write radial distance from the point light when querying the shadow map (see sample code at bottom): do not use hardware depth comparisons (use samplerCube instead of samplerCubeShadow) transform the point to be tested in the "cube space" (that do not include projection at all) use the "cube-space" vector as the lookup direction to sample the cubemap compare the radial distance sampled from the cubemap with the radial distance of the tested point Sample code // sample radial distance from the cubemap float radial_dist = texture(my_cubemap, cube_space_vector).x; // compare against test point radial distance bool shadowed = length(cube_space_vector) > radial_dist;