Write positions to texture OpenGL/GLSL - c++

I want to write the model-space vertex positions of a 3D mesh to a texture in OGL. Currently in order to write to a texture I set it to a fullscreen quad and write to it using a separate pass (based on tutorial seen here.)
The problem is that, from what I understand, I cannot pass more than one vertex list to the shader as the vertex shader can only be bound to one vertex list at a time, currently occupied by the screenspace quad.
Vertex Shader code:
layout(location = 0) in vec4 in_position;
out vec4 vs_position;
void main() {
vs_position = in_position;
gl_Position = vec4(in_position.xy, 0.0, 1.0);
}
Fragment Shader code:
in vec4 position; // coordinate in the screenspace quad
out vec4 outColor;
void main() {
vec2 uv = vec2(0.5, 0.5) * position.xy + vec2(0.5, 0.5);
outColor = ?? // Here I need my vertex position
}
Possible solution (?):
My idea was to introduce another shader pass before this to output the positions as r, g, b so that the position of the current texel can be retrieved from the texture (the only input format large enough to store many vertecies).
Although not 100% accurate, this image might give you an idea of what I want to do:
Model space coordinate map
Is there a way to encode the positions to the texture without using a fullscreen quad on the GPU?
Please let me know if you want to see more code.

Instead of generating the quad CPU side I would attach a geometry shader and create the quad there, that should free up the slot for your model-geometry to be passed in.
Geometry shader:
layout(points) in;
layout(triangle_strip, max_vertices = 4) out;
out vec2 texcoord;
void main()
{
gl_Position = vec4( 1.0, 1.0, 0.5, 1.0 );
texcoord = vec2( 1.0, 1.0 );
EmitVertex();
gl_Position = vec4(-1.0, 1.0, 0.5, 1.0 );
texcoord = vec2( 0.0, 1.0 );
EmitVertex();
gl_Position = vec4( 1.0,-1.0, 0.5, 1.0 );
texcoord = vec2( 1.0, 0.0 );
EmitVertex();
gl_Position = vec4(-1.0,-1.0, 0.5, 1.0 );
texcoord = vec2( 0.0, 0.0 );
EmitVertex();
EndPrimitive();
}

Related

OpenGL texture transformation when two textures on same mesh

I have a situation where i have two textures on a single mesh. I want to transform these textures independently. I have base code wherein i was able to load and transform one texture. Now i have code to load two textures but the issue is that when i try to transform the first texture both of them gets
transformed as we are modifying texture coordinates.
Green one is the first texture and star is the second texture.
I have no idea how to transform just the second texture. Guide me with any solution you have.
You can do it in many ways , one of them would be to have two different texture Matrices.
and than pass them to the vertex shader.
#version 400 compatibility
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec3 aNormal;
layout (location = 2) in vec2 aTexCoord;
out vec2 TexCoord;
out vec2 TexCoord2;
uniform mat4 textureMatrix;
uniform mat4 textureMatrix2;
void main()
{
vec4 mTex2;
vec4 mTex;
Normal = mat3(NormalMatrix) * aNormal;
Tex2Matrix = textureMatrix2;
ViewDirMatrix = textureMatrix;
mTex = textureMatrix * vec4( aTexCoord.x , aTexCoord.y , 0.0 , 1.0 ) ;
mTex2 = textureMatrix2 * vec4( aTexCoord.x , aTexCoord.y , 0.0 , 1.0 ) ;
TexCoord = vec2(mTex.x , mTex.y );
TexCoord2 = vec2(mTex2.x , mTex2.y );
FragPos = vec3( ubo_model * (vec4( aPos, 1.0 )));
gl_Position = ubo_projection * ubo_view * (vec4(FragPos, 1.0));
}
This is how you can create a texture matrix.
glm::mat4x4 GetTextureMatrix()
{
glm::mat4x4 matrix = glm::mat4x4(1.0f);
matrix = glm::translate(matrix, glm::vec3(-PositionX + 0.5, PositionY + 0.5, 0.0));
matrix = glm::scale(matrix, glm::vec3(1.0 / ScalingX, 1.0 / ScalingY, 0.0));
matrix = glm::rotate(matrix, glm::radians(RotationX) , glm::vec3(1.0, 0.0, 0.0));
matrix = glm::rotate(matrix, glm::radians( RotationY), glm::vec3(0.0, 1.0, 0.0));
matrix = glm::rotate(matrix, glm::radians(-RotationZ), glm::vec3(0.0, 0.0, 1.0));
matrix = glm::translate(matrix, glm::vec3(-PositionX -0.5, -PositionY -0.5, 0.0));
matrix = glm::translate(matrix, glm::vec3(PositionX, PositionY, 0.0));
return matrix;
}

OpenGL - Layered Rendering Cube Only Render the First Face [duplicate]

I'm trying to draw to a cubemap in a single pass using a geometry shade in OpenGL.
Basically need I do this to copy the content of a cubemap into another cubemap, and the may not have the same resolution and pixel layout.
I'm trying to achieve the result I want feeding a single point to the vertex shader and then, from the geometry shader, select each layer (face of the cubemap) and emit a quad and texture coordinates.
So far I've tried this method emitting only two of the cubemap faces (positive and negative X) to see if it could work, but it doesn't.
Using NSight I can see that there is something wrong.
This is the source cubemap:
And this is the result cubemap:
The only face that's being drawn to is the positive X and still it's not correct.
This is my geometry shader:
#version 330 core
layout(points) in;
layout(triangle_strip, max_vertices = 8) out;
in vec3 pos[];
out vec3 frag_textureCoord;
void main()
{
const vec4 positions[4] = vec4[4] ( vec4(-1.0, -1.0, 0.0, 0.0),
vec4( 1.0, -1.0, 0.0, 0.0),
vec4(-1.0, 1.0, 0.0, 0.0),
vec4( 1.0, 1.0, 0.0, 0.0) );
// Positive X
gl_Layer = 0;
gl_Position = positions[0];
frag_textureCoord = vec3(1.0, -1.0, -1.0);
EmitVertex();
gl_Position = positions[1];
frag_textureCoord = vec3(1.0, -1.0, 1.0);
EmitVertex();
gl_Position = positions[2];
frag_textureCoord = vec3(1.0, 1.0, -1.0);
EmitVertex();
gl_Position = positions[3];
frag_textureCoord = vec3(1.0, 1.0, 1.0);
EmitVertex();
EndPrimitive();
// Negative X
gl_Layer = 1;
gl_Position = positions[0];
frag_textureCoord = vec3(-1.0, -1.0, 1.0);
EmitVertex();
gl_Position = positions[1];
frag_textureCoord = vec3(-1.0, -1.0, -1.0);
EmitVertex();
gl_Position = positions[2];
frag_textureCoord = vec3(-1.0, 1.0, 1.0);
EmitVertex();
gl_Position = positions[3];
frag_textureCoord = vec3(-1.0, 1.0, -1.0);
EmitVertex();
EndPrimitive();
}
And this is my fragment shader:
#version 150 core
uniform samplerCube AtmosphereMap;
in vec3 frag_textureCoord;
out vec4 FragColor;
void main()
{
FragColor = texture(AtmosphereMap, frag_textureCoord) * 1.0f;
}
UPDATE
Further debugging with NSight shows that for the positive x face every fragment gets a value of frag_textureCoord of vec3(~1.0, ~0.0, ~0.0) (I've used ~ since the values are not exactly those but approximated). The negative x face instead never reaches the fragment shader stage.
UPDATE
Changing the definition of my vertex position from vec4(x, y, z, 0.0) to vec4(x, y, z, 1.0) makes my shader render correctly the positive X face, but the negative is still wrong, even if debugging the fragment shader I see that the right color is selected and applied, but then it becomes black.
gl_Layer = 0;
This is a Geometry Shader output. Calling EmitVertex will cause the value of all output variables to become undefined. Therefore, you must always set each output for each vertex to which that output applies.

OpenGL shadow mapping - shadow map texture doesn't get sampled at all?

I'm currently working on an OpenGL project and I'm trying to get shadow mapping to work properly. I could get to a point where the shadow map gets rendered into a texture, but it doesn't seem to get applied to the scenery when rendered. Here's the most important bits of my code:
The shadow map vertex shader, basically a simple pass through shader (also does some additional stuff like normals, but that shouldn't distract you); it basically just transforms the vertices so they're seen from the perspective of the light (it's a directional light but since we need to assume a position, it's basically a point far away):
#version 430 core
layout(location = 0) in vec3 v_position;
layout(location = 1) in vec3 v_normal;
layout(location = 2) in vec3 v_texture;
layout(location = 3) in vec4 v_color;
out vec3 f_texture;
out vec3 f_normal;
out vec4 f_color;
uniform mat4 modelMatrix;
uniform mat4 depthViewMatrix;
uniform mat4 depthProjectionMatrix;
// Shadow map vertex shader.
void main() {
mat4 mvp = depthProjectionMatrix * depthViewMatrix * modelMatrix;
gl_Position = mvp * vec4(v_position, 1.0);
// Passing attributes on to the fragment shader
f_texture = v_texture;
f_normal = (transpose(inverse(modelMatrix)) * vec4(v_normal, 1.0)).xyz;
f_color = v_color;
}
The shadow map fragment shader that writes the depth value to a texture:
#version 430 core
layout(location = 0) out float fragmentDepth;
in vec3 f_texture;
in vec3 f_normal;
in vec4 f_color;
uniform vec3 lightDirection;
uniform sampler2DArray texSampler;
// Shadow map fragment shader.
void main() {
fragmentDepth = gl_FragCoord.z;
}
The vertex shader that actually renders the scene, but also calculates the position of the current vertex from the lights point of view (shadowCoord) to compare against the depth texture; it also applies a bias matrix, since the coordinates aren't in the correct [0, 1] interval for sampling:
#version 430 core
layout(location = 0) in vec3 v_position;
layout(location = 1) in vec3 v_normal;
layout(location = 2) in vec3 v_texture;
layout(location = 3) in vec4 v_color;
out vec3 f_texture;
out vec3 f_normal;
out vec4 f_color;
out vec3 f_shadowCoord;
uniform mat4 modelMatrix;
uniform mat4 viewMatrix;
uniform mat4 projectionMatrix;
uniform mat4 depthViewMatrix;
uniform mat4 depthProjectionMatrix;
// Simple vertex shader.
void main() {
mat4 mvp = projectionMatrix * viewMatrix * modelMatrix;
gl_Position = mvp * vec4(v_position, 1.0);
// This bias matrix adjusts the projection of a given vertex on a texture to be within 0 and 1 for proper sampling
mat4 depthBias = mat4(0.5, 0.0, 0.0, 0.5,
0.0, 0.5, 0.0, 0.5,
0.0, 0.0, 0.5, 0.5,
0.0, 0.0, 0.0, 1.0);
mat4 depthMVP = depthProjectionMatrix * depthViewMatrix * modelMatrix;
mat4 biasedDMVP = depthBias * depthMVP;
// Passing attributes on to the fragment shader
f_shadowCoord = (biasedDMVP * vec4(v_position, 1.0)).xyz;
f_texture = v_texture;
f_normal = (transpose(inverse(modelMatrix)) * vec4(v_normal, 1.0)).xyz;
f_color = v_color;
}
The fragment shader that applies textures from a texture array and receives the depth texture (uniform sampler2D shadowMap) and checks if a fragment is behind something:
#version 430 core
in vec3 f_texture;
in vec3 f_normal;
in vec4 f_color;
in vec3 f_shadowCoord;
out vec4 color;
uniform vec3 lightDirection;
uniform sampler2D shadowMap;
uniform sampler2DArray tileTextureArray;
// Very basic fragment shader.
void main() {
float visibility = 1.0;
if (texture(shadowMap, f_shadowCoord.xy).z < f_shadowCoord.z) {
visibility = 0.5;
}
color = texture(tileTextureArray, f_texture) * visibility;
}
And finally: the function that renders multiple chunks to generate the shadow map and then renders the scene with the shadow map applied:
// Generating the shadow map
glBindFramebuffer(GL_FRAMEBUFFER, m_framebuffer);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_depthTexture);
m_shadowShader->bind();
glViewport(0, 0, 1024, 1024);
glDisable(GL_CULL_FACE);
glm::vec3 lightDir = glm::vec3(1.0f, -0.5f, 1.0f);
glm::vec3 sunPosition = FPSCamera::getPosition() - lightDir * 64.0f;
glm::mat4 depthViewMatrix = glm::lookAt(sunPosition, FPSCamera::getPosition(), glm::vec3(0, 1, 0));
glm::mat4 depthProjectionMatrix = glm::ortho<float>(-100.0f, 100.0f, -100.0f, 100.0f, 0.1f, 800.0f);
m_shadowShader->setUniformMatrix("depthViewMatrix", depthViewMatrix);
m_shadowShader->setUniformMatrix("depthProjectionMatrix", depthProjectionMatrix);
for (Chunk *chunk : m_chunks) {
m_shadowShader->setUniformMatrix("modelMatrix", chunk->getModelMatrix());
chunk->drawElements();
}
m_shadowShader->unbind();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// Normal draw call
m_chunkShader->bind();
glEnable(GL_CULL_FACE);
glViewport(0, 0, Window::getWidth(), Window::getHeight());
glm::mat4 viewMatrix = FPSCamera::getViewMatrix();
glm::mat4 projectionMatrix = FPSCamera::getProjectionMatrix();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_depthTexture);
glActiveTexture(GL_TEXTURE1);
m_textures->bind();
m_chunkShader->setUniformMatrix("depthViewMatrix", depthViewMatrix);
m_chunkShader->setUniformMatrix("depthProjectionMatrix", depthProjectionMatrix);
m_chunkShader->setUniformMatrix("viewMatrix", viewMatrix);
m_chunkShader->setUniformMatrix("projectionMatrix", projectionMatrix);
m_chunkShader->setUniformVec3("lightDirection", lightDir);
m_chunkShader->setUniformInteger("shadowMap", 0);
m_chunkShader->setUniformInteger("tileTextureArray", 1);
for (Chunk *chunk : m_chunks) {
m_chunkShader->setUniformMatrix("modelMatrix", chunk->getModelMatrix());
chunk->drawElements();
}
Most of the code should be self-explanatory, I'm binding a FBO with a texture attached, we do a normal rendering call into the framebuffer, it gets rendered into a texture and then I'm trying to pass it into the shader for normal rendering. I've tested whether the texture gets properly generated and it does: See the generated shadow map here
However, when rendering the scene, all I see is this.
No shadows applied, visibility is 1.0 everywhere. I also use a debug context which works properly and logs errors when there are any, but it seems to be completely fine, no warnings or errors, so I'm the one doing something terribly wrong here. I'm on OpenGL 4.3 by the way.
Hopefully one of you can help me out on this, I've never got shadow maps to work before, this is the closest I've ever come, lol. Thanks in advance.
Commonly a mat4 OpenGL transformation matrix looks like this:
( X-axis.x, X-axis.y, X-axis.z, 0 )
( Y-axis.x, Y-axis.y, Y-axis.z, 0 )
( Z-axis.x, Z-axis.y, Z-axis.z, 0 )
( trans.x, trans.y, trans.z, 1 )
So your depthBias matrix, which you use to convert from normalized device coordinates (in ranage [-1, 1]) to texture coordinates (in range [0, 1]), should look like this:
mat4 depthBias = mat4(0.5, 0.0, 0.0, 0.0,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.5, 0.5, 0.5, 1.0);
or this:
mat4 depthBias = mat4(
vec4( 0.5, 0.0, 0.0, 0.0 ),
vec4( 0.0, 0.5, 0.0, 0.0 ),
vec4( 0.0, 0.0, 0.5, 0.0 ),
vec4( 0.5, 0.5, 0.5, 1.0 ) );
After you have transformed a vertex position by the model matrix, the view matrix and the projection matrix, the vertex position is in clip space (homogeneous coordinates). You have to convert from clip space to normalized device coordinates (cartesian coordinates in range [-1, 1]). This can be done by dividing, by the w component of the homogeneous coordinate:
mat4 depthMVP = depthProjectionMatrix * depthViewMatrix * modelMatrix;
vec4 clipPos = depthMVP * vec4(v_position, 1.0);
vec4 ndcPos = vec4(clipPos.xyz / clipPos.w, 1.0);
f_shadowCoord = (depthBias * ndcPos).xyz;
A depth texture has one channel only. If you read data from the depth texture, then the data is contained in the x (or r) component of the vector.
Adapt the fragment shader code like this:
if ( texture(shadowMap, f_shadowCoord.xy).x < f_shadowCoord.z)
visibility = 0.5;
The Image Format specification of Khronos group says:
Image formats do not have to store each component. When the shader
samples such a texture, it will still resolve to a 4-value RGBA
vector. The components not stored by the image format are filled in
automatically. Zeros are used if R, G, or B is missing, while a
missing Alpha always resolves to 1.
see further:
Data Type (GLSL)
GLSL Programming/Vector and Matrix Operations
Transform the modelMatrix
How to render depth linearly in modern OpenGL with gl_FragCoord.z in fragment shader?
OpenGL Shadow map problems
Addition to the solution:
This is an important part of the solution, but there was another step needed to properly render the shadow map. The second mistake was using the wrong component of the texture to compare against f_shadowCoord.z: it should've been
texture(shadowMap, f_shadowCoord.xy).r
instead of
texture(shadowMap, f_shadowCoord.xy).z

OpenGL layered rendering interferes with layer 0

I am using gl_Layer = gl_InvocationID; in a geometry shader to render into a framebuffer with a 3D texture attached.
This mostly works fine. Except every invocation of the shader also renders into layer 0, as well as the layer that I specify.
How can I avoid this? Is there something vital I'm missing with setting up the framebuffer? Perhaps with glFramebufferTexture?
Geometry Shader
#version 400
layout(invocations = 32) in;
layout(points) in;
layout(triangle_strip, max_vertices = 3) out;
out vec3 raster_color;
float blue;
void main()
{
gl_Layer = gl_InvocationID;
blue = float(gl_InvocationID) / 31.0;
gl_Position = vec4( -1.0, -1.0, 0.0, 1.0 );
raster_color = vec3( 0.0, 0.0, blue );
EmitVertex();
gl_Position = vec4( 1.0, -1.0, 0.0, 1.0 );
raster_color = vec3( 1.0, 0.0, blue );
EmitVertex();
gl_Position = vec4( 0.0, 1.0, 0.0, 1.0 );
raster_color = vec3( 1.0, 1.0, blue );
EmitVertex();
EndPrimitive();
}
Fragment Shader
#version 400
in vec3 raster_color;
out vec4 fragment_color;
void main()
{
fragment_color = vec4( raster_color, 1.0 );
}
EmitVertex invalidates all per-vertex outputs after it returns. The most obvious per-vertex outputs in this shader are:
raster_color
gl_Position
But, you may not have realized that gl_Layer is also per-vertex or which vertex this needs to be set for.
gl_Layer will be undefined for every vertex after the first in this shader. Some drivers will leave it untouched and simply work, others will do anything they want with it and you cannot make any assumptions about gl_Layer after EmitVertex (...). You are playing with fire, because it may not be the first vertex that defines a primitive's layer (more on this later).
To fix this, re-write your geometry shader this way:
#version 400
layout(invocations = 32) in;
layout(points) in;
layout(triangle_strip, max_vertices = 3) out;
out vec3 raster_color;
float blue;
void main()
{
blue = float(gl_InvocationID) / 31.0;
gl_Position = vec4( -1.0, -1.0, 0.0, 1.0 );
raster_color = vec3( 0.0, 0.0, blue );
gl_Layer = gl_InvocationID; // Handle case where First Vertex is Layer Provoking
EmitVertex();
gl_Position = vec4( 1.0, -1.0, 0.0, 1.0 );
raster_color = vec3( 1.0, 0.0, blue );
gl_Layer = gl_InvocationID; // Handle case where Layer Provoking vertex is Undefined
EmitVertex();
gl_Position = vec4( 0.0, 1.0, 0.0, 1.0 );
raster_color = vec3( 1.0, 1.0, blue );
gl_Layer = gl_InvocationID; // Handle case where Last Vertex is Layer Provoking
EmitVertex();
EndPrimitive();
}
I would like to take this opportunity to point out that only 1 vertex in a primitive needs to have gl_Layer set; this vertex is called the Layer Provoking Vertex. Your shader assumes that the first vertex is the layer provoking vertex, but this is implementation-specific. When in doubt, the best solution is to cover all bases (set gl_Layer for all vertices).
You need to check GL_LAYER_PROVOKING_VERTEX at run-time to figure out which vertex defines your layer. If you do not want to do that, you can write your shader the way I described above. Provoking vertex conventions are usually first or last, but the way Geometry Shaders works leaves the possibility that any arbitrary vertex could define the layer (GL_UNDEFINED_VERTEX, and this is the case you should assume).
Turned out it was not a problem with gl_Layer. It was simply a syntax error in glTexParameter that was causing my resulting 3D texture to repeat rather than clamp to edges.

Sun shader not working

I'm trying to get a sun shader to work, but I can't get it to work.
What I currently get is a quarter of a circle/elipsis on the lower left of my screen, that is really stuck to my screen (if I move the camera, it also moves).
All I do is render two triangles to form a screen-covering quad, with screen width and height in uniforms.
Vertex Shader
#version 430 core
void main(void) {
const vec4 vertices[6] = vec4[](
vec4(-1.0, -1.0, 1.0, 1.0),
vec4(-1.0, 1.0, 1.0, 1.0),
vec4(1.0, 1.0, 1.0, 1.0),
vec4(1.0, 1.0, 1.0, 1.0),
vec4(1.0, -1.0, 1.0, 1.0),
vec4(-1.0, -1.0, 1.0, 1.0)
);
gl_Position = vertices[gl_VertexID];
}
Fragment Shader
#version 430 core
layout(location = 7) uniform int screen_width;
layout(location = 8) uniform int screen_height;
layout(location = 1) uniform mat4 view_matrix;
layout(location = 2) uniform mat4 proj_matrix;
out vec4 color;
uniform vec3 light_pos = vec3(-20.0, 7.5, -20.0);
void main(void) {
//calculate light position in screen space and get x, y components
vec2 screen_space_light_pos = (proj_matrix * view_matrix * vec4(light_pos, 1.0)).xy;
//calculate fragment position in screen space
vec2 screen_space_fragment_pos = vec2(gl_FragCoord.x / screen_width, gl_FragCoord.y / screen_height);
//check if it is in the radius of the sun
if (length(screen_space_light_pos - screen_space_fragment_pos) < 0.1) {
color = vec4(1.0, 1.0, 0.0, 1.0);
}
else {
discard;
}
}
What I think it does:
Get the position of the sun (light_pos) in screen space.
Get the fragment position in screen space.
If the distance between them is below a certain value, draw fragment with yellow color;
Else discard.
screen_space_light_pos is not yet in clip space. You've missed perspective division:
vec3 before_division = (proj_matrix * view_matrix * vec4(light_pos, 1.0)).xyw;
vec2 screen_space_light_pos = before_division.xy / before_division.z;
With common proj_matrix configurations, screen_space_light_pos will be in [-1,1]x[-1,1]. To match screen_space_fragment_pos range, you probably need to adjust screen_space_light_pos:
screen_space_light_pos = screen_space_light_pos * 0.5 + 0.5;