As per my previous question here, what if I want to rotate a sampler2D texture inside the fragment shader?
In that question I rotated the texture inside vertex shader
#version 120
attribute vec3 a_position;
attribute vec2 a_texCoord;
varying vec2 v_texCoord;
void main()
{
const float w = 1.57;
mat3 A = mat3(cos(w), -sin(w), 0.0,
sin(w), cos(w), 0.0,
0.0, 0.0, 1.0);
gl_Position = vec4(A * a_position, 1.0);
v_texCoord = a_texCoord;
}
but my the fragment shader applies an heavy modification that was thought for a rotated clockwise texture, so using the vertex shader I have an horizontal effect that is applied to vertical coordinates by fragment shader.
Is it possible to rotate a sampler2D before apply the modification?
You cannot rotate a sampler2D, however you can rotated the texture coordinates:
#version 120
attribute vec3 a_position;
attribute vec2 a_texCoord;
varying vec2 v_texCoord;
void main()
{
const float w = 1.57;
mat2 uvRotate = mat2(cos(w), -sin(w),
sin(w), cos(w));
gl_Position = vec4(a_position, 1.0);
v_texCoord = uvRotate * a_texCoord;
}
I'm currently working on an OpenGL project and I'm trying to get shadow mapping to work properly. I could get to a point where the shadow map gets rendered into a texture, but it doesn't seem to get applied to the scenery when rendered. Here's the most important bits of my code:
The shadow map vertex shader, basically a simple pass through shader (also does some additional stuff like normals, but that shouldn't distract you); it basically just transforms the vertices so they're seen from the perspective of the light (it's a directional light but since we need to assume a position, it's basically a point far away):
#version 430 core
layout(location = 0) in vec3 v_position;
layout(location = 1) in vec3 v_normal;
layout(location = 2) in vec3 v_texture;
layout(location = 3) in vec4 v_color;
out vec3 f_texture;
out vec3 f_normal;
out vec4 f_color;
uniform mat4 modelMatrix;
uniform mat4 depthViewMatrix;
uniform mat4 depthProjectionMatrix;
// Shadow map vertex shader.
void main() {
mat4 mvp = depthProjectionMatrix * depthViewMatrix * modelMatrix;
gl_Position = mvp * vec4(v_position, 1.0);
// Passing attributes on to the fragment shader
f_texture = v_texture;
f_normal = (transpose(inverse(modelMatrix)) * vec4(v_normal, 1.0)).xyz;
f_color = v_color;
}
The shadow map fragment shader that writes the depth value to a texture:
#version 430 core
layout(location = 0) out float fragmentDepth;
in vec3 f_texture;
in vec3 f_normal;
in vec4 f_color;
uniform vec3 lightDirection;
uniform sampler2DArray texSampler;
// Shadow map fragment shader.
void main() {
fragmentDepth = gl_FragCoord.z;
}
The vertex shader that actually renders the scene, but also calculates the position of the current vertex from the lights point of view (shadowCoord) to compare against the depth texture; it also applies a bias matrix, since the coordinates aren't in the correct [0, 1] interval for sampling:
#version 430 core
layout(location = 0) in vec3 v_position;
layout(location = 1) in vec3 v_normal;
layout(location = 2) in vec3 v_texture;
layout(location = 3) in vec4 v_color;
out vec3 f_texture;
out vec3 f_normal;
out vec4 f_color;
out vec3 f_shadowCoord;
uniform mat4 modelMatrix;
uniform mat4 viewMatrix;
uniform mat4 projectionMatrix;
uniform mat4 depthViewMatrix;
uniform mat4 depthProjectionMatrix;
// Simple vertex shader.
void main() {
mat4 mvp = projectionMatrix * viewMatrix * modelMatrix;
gl_Position = mvp * vec4(v_position, 1.0);
// This bias matrix adjusts the projection of a given vertex on a texture to be within 0 and 1 for proper sampling
mat4 depthBias = mat4(0.5, 0.0, 0.0, 0.5,
0.0, 0.5, 0.0, 0.5,
0.0, 0.0, 0.5, 0.5,
0.0, 0.0, 0.0, 1.0);
mat4 depthMVP = depthProjectionMatrix * depthViewMatrix * modelMatrix;
mat4 biasedDMVP = depthBias * depthMVP;
// Passing attributes on to the fragment shader
f_shadowCoord = (biasedDMVP * vec4(v_position, 1.0)).xyz;
f_texture = v_texture;
f_normal = (transpose(inverse(modelMatrix)) * vec4(v_normal, 1.0)).xyz;
f_color = v_color;
}
The fragment shader that applies textures from a texture array and receives the depth texture (uniform sampler2D shadowMap) and checks if a fragment is behind something:
#version 430 core
in vec3 f_texture;
in vec3 f_normal;
in vec4 f_color;
in vec3 f_shadowCoord;
out vec4 color;
uniform vec3 lightDirection;
uniform sampler2D shadowMap;
uniform sampler2DArray tileTextureArray;
// Very basic fragment shader.
void main() {
float visibility = 1.0;
if (texture(shadowMap, f_shadowCoord.xy).z < f_shadowCoord.z) {
visibility = 0.5;
}
color = texture(tileTextureArray, f_texture) * visibility;
}
And finally: the function that renders multiple chunks to generate the shadow map and then renders the scene with the shadow map applied:
// Generating the shadow map
glBindFramebuffer(GL_FRAMEBUFFER, m_framebuffer);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_depthTexture);
m_shadowShader->bind();
glViewport(0, 0, 1024, 1024);
glDisable(GL_CULL_FACE);
glm::vec3 lightDir = glm::vec3(1.0f, -0.5f, 1.0f);
glm::vec3 sunPosition = FPSCamera::getPosition() - lightDir * 64.0f;
glm::mat4 depthViewMatrix = glm::lookAt(sunPosition, FPSCamera::getPosition(), glm::vec3(0, 1, 0));
glm::mat4 depthProjectionMatrix = glm::ortho<float>(-100.0f, 100.0f, -100.0f, 100.0f, 0.1f, 800.0f);
m_shadowShader->setUniformMatrix("depthViewMatrix", depthViewMatrix);
m_shadowShader->setUniformMatrix("depthProjectionMatrix", depthProjectionMatrix);
for (Chunk *chunk : m_chunks) {
m_shadowShader->setUniformMatrix("modelMatrix", chunk->getModelMatrix());
chunk->drawElements();
}
m_shadowShader->unbind();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// Normal draw call
m_chunkShader->bind();
glEnable(GL_CULL_FACE);
glViewport(0, 0, Window::getWidth(), Window::getHeight());
glm::mat4 viewMatrix = FPSCamera::getViewMatrix();
glm::mat4 projectionMatrix = FPSCamera::getProjectionMatrix();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_depthTexture);
glActiveTexture(GL_TEXTURE1);
m_textures->bind();
m_chunkShader->setUniformMatrix("depthViewMatrix", depthViewMatrix);
m_chunkShader->setUniformMatrix("depthProjectionMatrix", depthProjectionMatrix);
m_chunkShader->setUniformMatrix("viewMatrix", viewMatrix);
m_chunkShader->setUniformMatrix("projectionMatrix", projectionMatrix);
m_chunkShader->setUniformVec3("lightDirection", lightDir);
m_chunkShader->setUniformInteger("shadowMap", 0);
m_chunkShader->setUniformInteger("tileTextureArray", 1);
for (Chunk *chunk : m_chunks) {
m_chunkShader->setUniformMatrix("modelMatrix", chunk->getModelMatrix());
chunk->drawElements();
}
Most of the code should be self-explanatory, I'm binding a FBO with a texture attached, we do a normal rendering call into the framebuffer, it gets rendered into a texture and then I'm trying to pass it into the shader for normal rendering. I've tested whether the texture gets properly generated and it does: See the generated shadow map here
However, when rendering the scene, all I see is this.
No shadows applied, visibility is 1.0 everywhere. I also use a debug context which works properly and logs errors when there are any, but it seems to be completely fine, no warnings or errors, so I'm the one doing something terribly wrong here. I'm on OpenGL 4.3 by the way.
Hopefully one of you can help me out on this, I've never got shadow maps to work before, this is the closest I've ever come, lol. Thanks in advance.
Commonly a mat4 OpenGL transformation matrix looks like this:
( X-axis.x, X-axis.y, X-axis.z, 0 )
( Y-axis.x, Y-axis.y, Y-axis.z, 0 )
( Z-axis.x, Z-axis.y, Z-axis.z, 0 )
( trans.x, trans.y, trans.z, 1 )
So your depthBias matrix, which you use to convert from normalized device coordinates (in ranage [-1, 1]) to texture coordinates (in range [0, 1]), should look like this:
mat4 depthBias = mat4(0.5, 0.0, 0.0, 0.0,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.5, 0.5, 0.5, 1.0);
or this:
mat4 depthBias = mat4(
vec4( 0.5, 0.0, 0.0, 0.0 ),
vec4( 0.0, 0.5, 0.0, 0.0 ),
vec4( 0.0, 0.0, 0.5, 0.0 ),
vec4( 0.5, 0.5, 0.5, 1.0 ) );
After you have transformed a vertex position by the model matrix, the view matrix and the projection matrix, the vertex position is in clip space (homogeneous coordinates). You have to convert from clip space to normalized device coordinates (cartesian coordinates in range [-1, 1]). This can be done by dividing, by the w component of the homogeneous coordinate:
mat4 depthMVP = depthProjectionMatrix * depthViewMatrix * modelMatrix;
vec4 clipPos = depthMVP * vec4(v_position, 1.0);
vec4 ndcPos = vec4(clipPos.xyz / clipPos.w, 1.0);
f_shadowCoord = (depthBias * ndcPos).xyz;
A depth texture has one channel only. If you read data from the depth texture, then the data is contained in the x (or r) component of the vector.
Adapt the fragment shader code like this:
if ( texture(shadowMap, f_shadowCoord.xy).x < f_shadowCoord.z)
visibility = 0.5;
The Image Format specification of Khronos group says:
Image formats do not have to store each component. When the shader
samples such a texture, it will still resolve to a 4-value RGBA
vector. The components not stored by the image format are filled in
automatically. Zeros are used if R, G, or B is missing, while a
missing Alpha always resolves to 1.
see further:
Data Type (GLSL)
GLSL Programming/Vector and Matrix Operations
Transform the modelMatrix
How to render depth linearly in modern OpenGL with gl_FragCoord.z in fragment shader?
OpenGL Shadow map problems
Addition to the solution:
This is an important part of the solution, but there was another step needed to properly render the shadow map. The second mistake was using the wrong component of the texture to compare against f_shadowCoord.z: it should've been
texture(shadowMap, f_shadowCoord.xy).r
instead of
texture(shadowMap, f_shadowCoord.xy).z
I'm using OpenGL to draw a large array of 2D points with their colors. Each point (vertex) has also defined it's alpha channel in MX.c array. I'd like to be able to increase or decrease the alpha value of whole array (of every vertex displayed). Is there a clever way to do it, using OpenGL functions? Here's my drawing method:
void PointsMX::drawMX()
{
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(4, GL_UNSIGNED_BYTE, 0, MX.c);
glVertexPointer(2, GL_DOUBLE, 0, MX.p);
glPushMatrix();
glTranslated(position[X], position[Y], 0.0);
glScaled(scale, scale, 1.0);
glDrawArrays(GL_POINTS, 0, MX.size);
glPopMatrix();
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
}
As datenwolf points out in his comments, you can do this pretty simply using a shader, but not using the fixed function pipeline (which is what you're using if you never call glUseProgram().
If you're not using lighting, reproducing the fixed function shaders isn't very hard, and a little googling will help you get up to that point.
The key here is that you want to change something that is normally a vertex attribute (the alpha channel of the color) to a configurable value for the entire drawing operation. In shader terms this means overriding the vertex attribute with a uniform. A uniform is simply a value you pass into an OpenGL program which then has the same value for every vertex or fragment processed (depending on whether you put it into the vertex or fragment shader).
Here's an example of a very basic vertex shader:
#version 330
uniform mat4 Projection = mat4(1);
uniform mat4 ModelView = mat4(1);
layout(location = 0) in vec3 Position;
layout(location = 3) in vec4 Color;
out vec4 vColor;
void main() {
gl_Position = Projection * ModelView * vec4(Position, 1);
vColor = Color;
}
And a corresponding fragment shader
#version 330
in vec4 vColor;
out vec4 FragColor;
void main()
{
FragColor = vColor;
}
In order to accomplish what you're trying to do, you'd want to change the vertex shader to add an additional uniform representing your alpha override:
#version 330
uniform mat4 Projection = mat4(1);
uniform mat4 ModelView = mat4(1);
uniform float AlphaOverride = -1.0;
layout(location = 0) in vec3 Position;
layout(location = 3) in vec4 Color;
out vec4 vColor;
void main() {
gl_Position = Projection * ModelView * vec4(Position, 1);
vColor = Color;
if (AlphaOverride > 0.0) {
vColor.a = AlphaOverride;
}
}
If you fail to set the AlphaOverride uniform it will be -1, and will therefore be ignored by the vertex shader. But if you set it to a value between 0 and 1, then it will be applied to the alpha channel of your vertex.
I've setup an OpenGL environment with deferred shading following this tutorial but I can't make the second shader output on my final buffer.
I can see that the first shader (the one that doesn't use lights) is working properly because with gDEBugger I can see that the output buffers are correct, but the second shader really can't display anything. I've also tried to make the second shader output a single color for all the scene just to see if it was displying something, bot nothing is visible (the screen should be completely red but it isn't).
The first pass shader (the one I use to create the buffers for the GBuffer) is working so I'm not add it's code or how I created and implemented my GBuffer, but if you need I'll add them, just tell me.
I think the problem is when I tell OpenGL to output on the FrameBuffer 0 (my video).
This is how I enalbe OpenGL to write to the FrameBuffer 0:
glEnable(GL_BLEND);
m_MotoreGrafico->glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_ONE, GL_ONE);
// Abilito la scrittura sul buffer finale
m_MotoreGrafico->glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
m_gBuffer.BindForReading();
glClear(GL_COLOR_BUFFER_BIT);
// Imposto le matrici dello shader
SetUpOGLProjectionViewMatrix(1);
// Passo le texture del GBuffer allo shader
pActiveShader->setUniform1i(_T("gPositionMap"), m_gBuffer.GetPositionTexture());
pActiveShader->setUniform1i(_T("gColorMap"), m_gBuffer.GetDiffuseTexture());
pActiveShader->setUniform1i(_T("gNormalMap"), m_gBuffer.GetNormalTexture());
// Passo variabili necessarie allo shader
float dimensioneFinestra[2], posizioneCamera[3];
dimensioneFinestra[0] = m_nLarghezzaFinestra;
dimensioneFinestra[1] = m_nAltezzaFinestra;
m_MotoreGrafico->GetActiveCameraPosition(posizioneCamera);
pActiveShader->setUniform2f(_T("gScreenSize"), dimensioneFinestra);
pActiveShader->setUniform3f(_T("gCameraPos"), posizioneCamera);
pActiveShader->setUniform1i(_T("gUsaLuci"), 0);
// Disegno le luci
float coloreLuce[3], posizioneLuce[3], direzioneLuce[3], vUpLuce[3], vRightLuce[3], intensita;
for(int i = 0; i < GetDocument()->m_RTL.GetNLights(); i++)
{
CRTLuce* pRTLuce = GetDocument()->m_RTL.GetRTLightAt(i);
...
m_MotoreGrafico->glBindVertexArray(pRTLuce->GetRTLuce()->GetVBO()->getVBAIndex());
glDrawArrays(GL_TRIANGLES, 0, pRTLuce->GetRTLuce()->GetNVertPerShader());
}
The function m_gBuffer.BindForReading() is like this (bot I think it doesn't matter for my problem):
for (unsigned int i = 0 ; i < ARRAY_SIZE_IN_ELEMENTS(m_textures); i++)
{
m_pMotoreGrafico->glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, m_textures[GBUFFER_TEXTURE_TYPE_POSITION + i]);
}
So far my GBuffer is working (it creates the textures) and my first shader is also working (it's drawing the textures of my GBuffer).
The problem then is that I can't reset OpenGL to draw in my video.
The first 4 textures are the ones create with the first-pass shader.
This is my back buffer (after the second-pass shader)
And this is my front buffer (after the second-pass shader)
This is my second-pass fragment shader code (it outputs only red)
out vec4 outputColor;
void main()
{
outputColor = vec4(1.0, 0.0, 0.0, 1.0);
}
Does anyone have an idea of what I'm doing wrong?
Second-pass vertex shader code:
#version 330
uniform struct Matrici
{
mat4 projectionMatrix;
mat4 modelMatrix;
mat4 viewMatrix;
} matrices;
layout (location = 0) in vec3 inPosition;
void main()
{
vec4 vEyeSpacePosVertex = matrices.viewMatrix * matrices.modelMatrix * vec4(inPosition, 1.0);
gl_Position = matrices.projectionMatrix * vEyeSpacePosVertex;
}
Second-pass fragment shader code:
#version 330
uniform struct MDLight
{
vec3 vColor;
vec3 vPosition;
vec3 vDirection;
float fAmbientIntensity;
float fStrength;
int bOn;
float fConeCosine;
float fAltezza;
float fLarghezza;
vec3 vUp;
vec3 vRight;
} gLuce;
uniform float gSpecularIntensity;
uniform float gSpecularPower;
uniform sampler2D gPositionMap;
uniform sampler2D gColorMap;
uniform sampler2D gNormalMap;
uniform vec3 gCameraPos;
uniform vec2 gScreenSize;
uniform int gLightType;
uniform int gUsaLuci;
vec2 CalcTexCoord()
{
return gl_FragCoord.xy / gScreenSize;
}
out vec4 outputColor;
void main()
{
vec2 TexCoord = CalcTexCoord();
vec4 Color = texture(gColorMap, TexCoord);
outputColor = vec4(1.0, 0.0, 0.0, 1.0);
}
I'd like to display a simple UV sphere (exported from Blender) and generate lines with normal coordinates using a unique geometry shader.
In a first time, I wrote a simple geometry shader which simply return the input vertices informations to the fragment shader. For a sake of simplicity (for the exemple) I erased the luminosity calculations in the fragment shader.
Vertex shader :
#version 400
layout (location = 0) in vec3 VertexPosition;
layout (location = 1) in vec3 VertexNormal;
uniform mat4 MVP;
out vec3 VPosition;
out vec3 VNormal;
void main(void)
{
VNormal = VertexNormal;
gl_Position = vec4(VertexPosition, 1.0f);
}
Geometry shader :
#version 400
layout(points) in;
layout(line_strip, max_vertices = 2) out;
uniform mat4 MVP;
in vec3 VNormal[];
out vec3 fcolor;
void main(void)
{
float size = 2.5f;
fcolor = vec3(0.0f, 0.0f, 1.0f);
gl_Position = MVP * gl_in[0].gl_Position;
EmitVertex();
fcolor = vec3(1.0f, 1.0f, 0.0f);
gl_Position = MVP * vec4(gl_in[0].gl_Position.xyz + vec3(
VNormal[0].x * size, VNormal[0].y * size, VNormal[0].z * size), 1.0f);
EmitVertex();
EndPrimitive();
}
And the fragment shader :
#version 400
in vec3 Position;
in vec3 Normal;
in vec2 TexCoords;
out vec4 FragColor;
in vec3 fcolor;
void main(void)
{
FragColor = vec4(fcolor, 1.0f);
}
Now in the C++ code the primitive type to draw (here triangles):
glDrawArrays(GL_TRIANGLES, 0, meshList[idx]->getVertexBuffer()->getBufferSize());
And finally the output :
Until here all is ok.
Now I want to generate strands on the sphere as normals. To do the job done I wrote the following geometry shader (the vertex and fragment shaders are the sames).
#version 400
layout(points) in;
layout(line_strip, max_vertices = 2) out;
uniform mat4 MVP;
in vec3 VNormal[];
out vec3 fcolor;
void main(void)
{
float size = 1.0f;
fcolor = vec3(0.0f, 0.0f, 1.0f);
gl_Position = MVP * gl_in[0].gl_Position;
EmitVertex();
fcolor = vec3(1.0f, 1.0f, 0.0f);
gl_Position = MVP * vec4(gl_in[0].gl_Position.xyz + vec3(
VNormal[0].x * size, VNormal[0].y * size, VNormal[0].z * size), 1.0f);
EmitVertex();
EndPrimitive();
}
The input primitive type being points I modified the C++ code to draw the scene :
glDrawArrays(GL_POINTS, 0, meshList[idx]->getVertexBuffer()->getBufferSize());
And the output:
Finally if I want to get a triangle input as input primitive and a line_strip as output primitive in the geometry shader I have the following shader:
#version 400
layout(triangles, invocations = 3) in;
layout(line_strip, max_vertices = 6) out;
uniform mat4 MVP;
in vec3 VNormal[];
out vec3 fcolor;
void main(void)
{
float size = 1.0f;
for (int i = 0; i < 3; i++)
{
fcolor = vec3(0.0f, 0.0f, 1.0f);
gl_Position = MVP * gl_in[i].gl_Position;
EmitVertex();
fcolor = vec3(1.0f, 1.0f, 0.0f);
gl_Position = MVP * vec4(gl_in[0].gl_Position.xyz + vec3(
VNormal[0].x * size, VNormal[0].y * size, VNormal[0].z * size), 1.0f);
EmitVertex();
EndPrimitive();
}
}
And the output is the following :
But my goal is to display in one output the scene (sphere + strands) using the same geometry shader. I'd like to know if it's possible to do this. I don't think so because a geometry shader must have just one type of input primitive and an other one in output and not several types. I want to be sure if it's possible or not.
Who knows, maybe one day there'll be an extension to emit multiple primitive types from a geometry shader, but as you say it can't currently be done.
One alternative might be to draw the normal lines with triangles instead.
Another, but completely useless in this case, might be to use the transform feedback extension to save the vertex shader results and reuse that data with two separate geometry shaders. I only mention this as it's the closest thing I could think of to emit multiple primitive types after the vertex stage.
EDIT
The two geometry shaders for drawing normals confuses me. In the second one, max_vertices = 3, which should be 6 for 3 separate lines and EndPrimitive should also be inside the for-loop so the 3 lines aren't connected. But you've already sorted this out by drawing GL_POINTS in the previous one. Is this intended to be structured for multiple primitive output, if it were supported? (fixed)
Given your geometry reuses many vertices, indices with glDrawElements would be more efficient. Although you'd still want to use glDrawArrays for drawing normal lines to avoid drawing duplicate vertices referenced by an index array.