How to get texture coordinate to GLSL in version 150? - glsl

In GLSL version 110 i can get coordinate in gl_TexCoord[] but it's deprecated in 150.
OpenGL code:
shader.setupShaderFromFile(GL_VERTEX_SHADER, "t.vert");
shader.setupShaderFromFile(GL_FRAGMENT_SHADER, "t.frag");
shader.linkProgram();
glGetFloatv(GL_MODELVIEW_MATRIX, modelview_mat_);
glGetFloatv(GL_PROJECTION_MATRIX, projectionview_mat_);
shader.begin();
glUniformMatrix4fv( glGetUniformLocation(shader.getProgram(), "MVMatrix"), 1, GL_FALSE, modelview_mat_);
glUniformMatrix4fv( glGetUniformLocation(shader.getProgram(), "MPMatrix"), 1, GL_FALSE, projectionview_mat_);
glBegin(GL_QUADS);
glTexCoord2f(0, 0);glVertex2f(0, 0);
glTexCoord2f(1, 0);glVertex2f(100, 0);
glTexCoord2f(1, 1);glVertex2f(100, 100);
glTexCoord2f(0, 1);glVertex2f(0, 100);
glEnd();
shader.end();
Shader Code:
VERTEX-
#version 150
in vec3 in_vertex;
in vec2 in_coord;
uniform mat4 MVMatrix;
uniform mat4 MPMatrix;
out vec2 tex_coord;
void main()
{
vec4 v = vec4( in_vertex, 1.0 );
tex_coord = in_coord;
gl_Position = MPMatrix * MVMatrix * v;
}
FRAGMENT:
#version 150
in vec2 tex_coord;
out vec4 frgcolor;
void main()
{
frgcolor = vec4( tex_coord, 0.0, 1.0);
}
final screen is a red quad.
How to pass texture coordinate to shader? or my code is wrong?

If you don't want to use the deprecated methods, you'll have to stop using glTexCoord2f, and use a custom attribute.
If you want to stay with immediate mode, you'll use glVertexAttrib2f.
You pair this up with your new variable 'in_coord' by quering the index after shader linking:
int program = glCreateProgram();
//create shader as normal
int texcoord_index = glGetAttribLocation(program, "in_coord");
glBegin(GL_QUADS);
glVertexAttrib2f(texcoord_index, 0, 0); ...
If you want to use VA/VBO instead, the functions you use instead is glEnableVertexAttribArray/glVertexAttribPointer

in your case just look into gl_TexCoord[0] in your vertex shader. Definitely works with shader model 1.5
in C++:
glActiveTexture(GL_TEXTURE0 );
glBindTexture(GL_TEXTURE_2D, id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glBegin(GL_QUADS);
glTexCoord2f(0, 0);glVertex2f(0, 0);
glTexCoord2f(1, 0);glVertex2f(100, 0);
glTexCoord2f(1, 1);glVertex2f(100, 100);
glTexCoord2f(0, 1);glVertex2f(0, 100);
glEnd();
in vertex shader:
gl_TexCoord[0]=gl_MultiTexCoord0;
in fragment shader:
vec4 c = texture2D(texSampler, gl_TexCoord[0].st );

Related

imageStore() doesn't work with integer types

I bind a texture to both a texture unit and a image unit, write to it via imageStore() in the compute shader, and sample it with a sampler2D in the fragment shader.
This works when the pixel format is floating point, but stops working with integers. glGetError() yields nothing.
glew and glm are used; should be irrelevant to the problem though.
main.cpp:
constexpr glm::vec2 TEX_DIM = { 2048.0f, 2048.0f / ASPECT_RATIO };
constexpr int LOCAL_WORKGROUP_SIZE = 32;
// After setting up vbo, etc //////////
// Texture
const unsigned int texSlot = 0;
unsigned int texId;
glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glBindTexture(GL_TEXTURE_2D, 0);
glActiveTexture(GL_TEXTURE0 + texSlot);
glBindTexture(GL_TEXTURE_2D, texId);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, int(TEX_DIM.x), int(TEX_DIM.y), 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
// Binding to image unit
const unsigned int imageSlot = 0;
glBindImageTexture(imageSlot, texId, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_R8);
// Shaders
unsigned int computeShader;
unsigned int graphicsShader;
// After creating shaders //////////
// Graphics shader
glUseProgram(graphicsShader);
glUniform1i(glGetUniformLocation(graphicsShader, "uTexture"), texSlot);
glUniformMatrix4fv(glGetUniformLocation(graphicsShader, "uMVP"), 1, GL_FALSE, &mvp);
auto mvp = glm::ortho(0.0f, (float)WINDOW_WIDTH, 0.0f, (float)WINDOW_HEIGHT, -1.0f, 1.0f);
// Compute shader
glUseProgram(computeShader);
glUniform1i(glGetUniformLocation(computeShader, "uImage"), imageSlot);
// After validating shaders //////////
while (true)
{
// Compute
glUseProgram(computeShader);
glDispatchCompute(TEX_DIM.x / LOCAL_WORKGROUP_SIZE, TEX_DIM.y / LOCAL_WORKGROUP_SIZE, 1);
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
// Draw
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(graphicsShader);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, nullptr);
glfwSwapBuffers(window);
}
Compute Shader:
# version 430 core
layout(local_size_x = 32, local_size_y = 32) in;
layout(r8) uniform image2D uImage;
void main()
{
// Writing all to red, for testing purpose
imageStore(uImage, ivec2(gl_GlobalInvocationID.xy), vec4(1.0, 0.0, 0.0, 0.0));
}
Vertex Shader:
# version 430 core
layout(location = 0) in vec2 position;
layout(location = 1) in vec2 texCoord;
out vec2 vTexCoord;
uniform mat4 uMVP;
void main()
{
gl_Position = uMVP * vec4(position, 0.0, 1.0);
vTexCoord = texCoord;
}
Fragment Shader:
# version 430 core
in vec2 vTexCoord;
out vec4 color;
uniform sampler2D uTexture;
void main()
{
color = vec4(
texture(uTexture, vTexCoord).x,
0.0, 0.0, 1.0
);
}
Below is my attempt to convert the minimal program to be using integers instead; gives me a black screen but no errors otherwize.
main.cpp:
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8UI, int(TEX_DIM.x), int(TEX_DIM.y), 0, GL_RED_INTEGER, GL_UNSIGNED_BYTE, nullptr);
glBindImageTexture(imageSlot, texId, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_R8UI);
Compute Shader:
layout(r8ui) uniform uimage2D uImage;
void main()
{
// Writing all to red, for testing purpose
imageStore(uImage, ivec2(gl_GlobalInvocationID.xy), uvec4(255, 0, 0, 0));
}
Fragment Shader:
uniform usampler2D uTexture;
void main()
{
color = vec4(
float(texture(uTexture, vTexCoord).x) / 256.0,
0.0, 0.0, 1.0
);
}
I've thought about GL_R8UI being incompatable but the wiki says both GL_R8 and GL_R8UI are fine to use.

GLSL cannot get sampler3D value

I cannot get sampler3D value in my GLSL fragment shader.
I am writing a shader in GLSL and want to take sampler3D (3d texture) as volume data and do volume rendering. However, seems I cannot bind 3d texture to my shader's sampler3D. texture3D() always returns (0,0,0,0) on shader side. But on my application side, I can use glGetTexImage to get the data.
Application Side (dS is a Shader class object):
dS.use();
dS.setInt("volumeData", 0);
uint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_3D, texture);
float data[64];
for (int i = 0; i < 64; ++i) data[i] = 1.0f;
glTexImage3D(GL_TEXTURE_3D, 0, GL_R16F, 4, 4, 4, 0, GL_RED, GL_FLOAT, data);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_3D, 0);
float read[64];
glBindTexture(GL_TEXTURE_3D, texture);
glGetTexImage(GL_TEXTURE_3D, 0, GL_RED, GL_FLOAT, read);
std::cout << read[0] << "," << read[63] << std::endl; // 1,1
glBindTexture(GL_TEXTURE_3D, 0);
// When drawing.
dS.use();
dS.setMat4("model", glm::mat4(1.0f));
dS.setMat4("view", view);
dS.setMat4("projection", projection);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_3D, texture);
cube.draw();
Vertex Shader
#version 330 core
// for snow rendering
layout (location = 0) in vec3 aPos;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(aPos, 1.0);
}
Fragment Shader
#version 330
out vec4 FragColor;
uniform sampler3D volumeData;
void main()
{
ivec3 size = textureSize(volumeData, 0);
float c = texture3D(volumeData, vec3(0)).x;
if (size.x == 1)
FragColor = vec4(1.0, 0.0, c, 1.0);
else if (size.x > 2)
FragColor = vec4(0.0, 1.0, 0.0, 1.0);
else if (size.x == 0)
FragColor = vec4(0.0, 0.0, 1.0, 1.0);
else
FragColor = vec4(1.0, 1.0, 1.0, 1.0);
}
I only got a red cube, which means size.x == 1 and texture3D returns 0, which is expected to be size.x == 4 and texture3D returns 1.0
I used similar method for 2d texture and it works. So I guess my Shader class is right. And I also tried to add glEnable(GL_TEXTURE_3D) before I did any 3d texture operation.
And I am not sure if this helps: I used glfw-3.2.1 WIN64 and glad.
I solved it.
It's a really stupid typo.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
should use GL_TEXTURE_3D instead of GL_TEXTURE_2D.
And since GL_TEXTURE_MIN_FILTER default state is GL_NEAREST_MIPMAP_LINEAR. It leads to mipmap incompleteness.

OpenGL 3.3 - I can only bind to GL_TEXTURE0

I've been trying to follow an OpenGL-3.3 tutorial for some time but it appears that I can only bind a texture to GL_TEXTURE0 otherwise I get a black square.
Example:
This works
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glActiveTexture(GL_TEXTURE0 + 0);
glBindTexture(GL_TEXTURE_2D, texture1);
glUniform1i(glGetUniformLocation(shader.program, "our_texture1"), 0);
shader.bind();
glBindVertexArray(vertex_array);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
This doesn't
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glActiveTexture(GL_TEXTURE0 + 1);
glBindTexture(GL_TEXTURE_2D, texture1);
glUniform1i(glGetUniformLocation(shader.program, "our_texture1"), 1);
shader.bind();
glBindVertexArray(vertex_array);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
Details:
shader.bind simply calls glUseProgram
Vertex Shader:
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec2 tex_coords;
out vec4 our_color;
out vec2 our_tex_coords;
void main()
{
gl_Position = vec4(position, 1.0f);
our_color = vec4(color, 1.0f);
our_tex_coords = tex_coords;
}
Fragment Shader:
#version 330 core
out vec4 color;
in vec4 our_color;
in vec2 our_tex_coords;
uniform sampler2D our_texture1;
void main()
{
color = texture(our_texture1, our_tex_coords);
}
Texture Creation:
GLuint texture1;
glGenTextures(1, &texture1);
glBindTexture(GL_TEXTURE_2D, texture1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
sf::Image image1;
if (!image1.loadFromFile("../Assets/container.jpg"))
{
error();
}
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image1.getSize().x, image1.getSize().y, 0, GL_RGBA, GL_UNSIGNED_BYTE, image1.getPixelsPtr());
glGenerateMipmap(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 0);
From what I can inspect the file is loaded correctly. Any idea on what may be wrong?
The earlier answer tells you how to change the code to make it work, but gives the wrong explanation. In this sequence:
glActiveTexture(GL_TEXTURE0 + 1);
glBindTexture(GL_TEXTURE_2D, texture1);
glUniform1i(glGetUniformLocation(shader.program, "our_texture1"), 1);
shader.bind();
with shader.bind() calling glUseProgram(), there is indeed a problem with the sequence. But it has nothing to do with glActiveTexture(), which is completely unrelated to the currently bound program.
The glUniform1i() call sets a uniform value on the currently bound program. So the key is that you call glUseProgram() before glUniform*(). For example, the following sequence will work:
glActiveTexture(GL_TEXTURE0 + 1);
glBindTexture(GL_TEXTURE_2D, texture1);
shader.bind();
glUniform1i(glGetUniformLocation(shader.program, "our_texture1"), 1);
Your code looks good.
But I think shader.bind() should be called before glActiveTexture in general.
Further hint: If you want your texture to be antialiased when minification is used, take GL_LINEAR_MIPMAP_LINEAR instead of GL_LINEAR. This should work out of the box since you already generated MIPMaps.
Best regards,
Chris

OpenGL scene rendering as black with shadow mapping

I am trying to implement shadow mapping in my OpenGL engine using this tutorial : http://www.fabiensanglard.net/shadowmapping/index.php
I don't have any problems while making the shadow map (i think ). But using it the scene is totally shadowed.
The way I am rendering my scene is as follows:
Set up the depth FBO
GLuint sdepthtex;
GLuint sframebuffer ;
glGenTextures(1, &sdepthtex);
glBindTexture(GL_TEXTURE_2D, sdepthtex);
glTexImage2D(GL_TEXTURE_2D, 0,GL_DEPTH_COMPONENT16, 1024, 1024, 0,GL_DEPTH_COMPONENT, GL_FLOAT, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, sdepthtex, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
if(glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
displayMessage("Error loading the Depth Framebuffer");
return;
}
Make the ModelViewProjection matrix from light's perspective and store it in a shadowMatrix variable.
The function i use :
Matrix getMVPmatrix(vector3 position,vector3 lookat )
{
glPushMatrix();
double projection[16];
double modelView[16];
SDL_Surface*screen = SDL_GetVideoSurface();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(FOVY,screen->w/screen->h,NEAR,FAR);
glMatrixMode(GL_MODELVIEW);
glEnable(GL_DEPTH_TEST);
glLoadIdentity();
gluLookAt(position.x,position.y,position.z,lookat.x,lookat.y,lookat.z,0,1,0);
glGetDoublev(GL_MODELVIEW_MATRIX, modelView);
glGetDoublev(GL_PROJECTION_MATRIX, projection);
Matrix m1(projection);
Matrix m2(modelView);
glPopMatrix();
return m1*m2;
}
Render the scene in the depth framebuffer. The shaders :
Vertex Shader:
uniform mat4 shadowMatrix;
void main()
{
gl_Position = shadowMatrix*gl_Vertex;
}
Fragment Shader:
void main(void)
{
gl_FragDepth = gl_FragCoord.z;
}
For my 3 boxes scene, the linearized depth look like this :
http://www.2shared.com/photo/IExy9aUo/Depth.html
So i think the shadowMatrix and the depth rendering are right.
The last pass is just about drawing the scene with the shadow map
Vertex Shader:
varying vec4 ShadowCoord;
uniform mat4 shadowMatrix;
mat4 biasMatrix = mat4(
0.5, 0.0, 0.0, 0.0,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.5, 0.5, 0.5, 1.0
);
void main (void)
{
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_FrontColor = gl_Color;
gl_Position = ftransform();
ShadowCoord = biasMatrix*shadowMatrix *vec4(gl_Vertex.xyz,1.0);
}
Fragment Shader:
uniform sampler2D tex;
uniform sampler2D shadowtex;//the non linearized depth texture we made in the 3rd step.
varying vec4 ShadowCoord;
float getShadowFactor(void)
{
vec4 shadowCoordinateWdivide = ShadowCoord / ShadowCoord.w ;
shadowCoordinateWdivide.z += 0.0005;
float distanceFromLight = texture2D(shadowtex,shadowCoordinateWdivide.st).z;
float shadow = 1.0;
if (ShadowCoord.w > 0.0)
shadow = distanceFromLight < shadowCoordinateWdivide.z ? 0.5 :1.0 ;
return shadow ;
}
void main (void)
{
gl_FragColor =texture2D(tex, gl_TexCoord[0].st);
gl_FragColor.rgb *= getShadowFactor() ;//add shadows Here
}
The result ? all my scene is shadowed !
It looks like your code has an inconsistency between the texture setup and the shader. In the texture setup code, you have this:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE);
When GL_TEXTURE_COMPARE_FUNC is not GL_NONE, you need to use a shadow texture sampler in your shader code. But in your fragment shader, you use a regular sampler for this texture:
uniform sampler2D shadowtex;
This needs to be changed to this to be compatible with the texture settings:
uniform sampler2DShadow shadowtex;
To match the type, shadow2D() is then used instead of texture2D() to sample the texture.
The other option is that you keep GL_TEXTURE_COMPARE_FUNC at its default value of GL_NONE. This is then consistent with using a sampler2D for sampling.

OpenGL Shadow Map

I am trying to do a basic shadow map but for some reason, It doesn't render properly.
Video of the Problem
I render the house using a flat shader:
int shadowMapWidth = WINDOW_SIZE_X * (int)SHADOW_MAP_RATIO;
int shadowMapHeight = WINDOW_SIZE_Y * (int)SHADOW_MAP_RATIO;
// Rendering into the shadow texture.
glActiveTexture(GL_TEXTURE0);
CALL_GL(glBindTexture(GL_TEXTURE_2D, shadowTexture));
// Bind the framebuffer.
CALL_GL(glBindFramebuffer(GL_FRAMEBUFFER, shadowFBO));
//Clear it
CALL_GL(glClear(GL_DEPTH_BUFFER_BIT));
CALL_GL(glViewport(0, 0, shadowMapWidth, shadowMapHeight));
CALL_GL(glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE));
//Render stuff
flatShader.use();
flatShader["baseColor"] = glm::vec4(1.0f,1.0f,1.0f,1.0f);
flatShader["pvm"] = projectionMatrix*pointLight.viewMatrix*cursor.modelMatrix;
cursor.draw(); //binds the vao and draws
// Revert for the scene.
CALL_GL(glBindFramebuffer(GL_FRAMEBUFFER, 0));
CALL_GL(glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE));
CALL_GL(glViewport(0, 0, WINDOW_SIZE_X, WINDOW_SIZE_Y));
Notice that I only render the house. I don't render the floor in the depth-buffer pass.
Following this I render the quad that represents the floor using the following shader pair:
/* [VERT] */
#version 330
in vec3 in_Position;
in vec2 in_TexCoord;
uniform mat4 shadowMatrix;
uniform mat4 mvp;
out vec2 UV;
out vec4 shadowProj;
void main()
{
gl_Position = mvp*vec4(in_Position,1.0);
shadowProj = shadowMatrix*vec4(in_Position,1.0);
UV = in_TexCoord;
}
And the Fragment Shader:
/* [FRAG] */
#version 330
in vec2 UV;
in vec4 shadowProj;
out vec4 fragColor;
uniform sampler2D texturex;
uniform sampler2DShadow shadowMap;
void main()
{
fragColor = vec4(texture(texturex, UV).rgb,1);
float shadow = 1.0;
shadow = textureProj(shadowMap,shadowProj);
fragColor *= shadow;
}
I then draw the house again in color and ... the floor:
textureShader.use();
glUniform1i(baseImageLoc, 0); //Texture unit 0 is for base images.
glUniform1i(shadowMapLoc, 1); //Texture unit 1 is for shadow maps.
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, floorTexture);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, shadowTexture);
textureShader["shadowMatrix"] = projectionMatrix*pointLight.viewMatrix*floorMatrix;
textureShader["mvp"] = projectionMatrix*viewMatrix*floorMatrix;
CALL_GL(glBindVertexArray(floorVAO));
CALL_GL(glDrawArrays(GL_TRIANGLES,0,18));
glfwSwapBuffers();
Has anybody seen this behavior before? Any idea what could be wrong? By the way, the light's coordinates place it directly on top of the house so the shadow should be directly below the house on the floor (but it ends up sideways).
For reference here is how I generate the shadow FBO:
int shadowMapWidth = WINDOW_SIZE_X * (int)SHADOW_MAP_RATIO;
int shadowMapHeight = WINDOW_SIZE_Y * (int)SHADOW_MAP_RATIO;
glGenTextures(1, &shadowTexture);
glBindTexture(GL_TEXTURE_2D, shadowTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, shadowMapWidth, shadowMapHeight, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LESS);
glTexImage2D(GL_TEXTURE_2D,0,GL_DEPTH_COMPONENT,shadowMapWidth,shadowMapHeight,0,GL_DEPTH_COMPONENT,GL_FLOAT,NULL);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_COMPARE_MODE,GL_COMPARE_R_TO_TEXTURE);
glBindTexture(GL_TEXTURE_2D, 0); //unbind the texture
glGenFramebuffers(1, &shadowFBO);
glBindFramebuffer(GL_FRAMEBUFFER, shadowFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, shadowTexture, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{ printf("GL_FRAMEBUFFER_COMPLETE error 0x%x", glCheckFramebufferStatus(GL_FRAMEBUFFER)); }
glClearDepth(1.0f); glEnable(GL_DEPTH_TEST);
// Needed when rendering the shadow map. This will avoid artifacts.
glPolygonOffset(1.0f, 0.0f); glBindFramebuffer(GL_FRAMEBUFFER, 0);
//to convert the texture coordinates to -1 ~ 1
GLfloat biasMatrixf[] = {
0.5f, 0.0f, 0.0f, 0.0f,
0.0f, 0.5f, 0.0f, 0.0f,
0.0f, 0.0f, 0.5f, 0.0f,
0.5f, 0.5f, 0.5f, 1.0f };
biasMatrix = glm::make_mat4(biasMatrixf);
It looks like you forgot to multiply your shadow matrix by the bias matrix.