GLSL shaders going black/transparent - opengl

I am trying to add some shaders to my glut scene objects.
At this time I am trying to implement "hello world" shaders
but when I use the default vertex shader, my objects dissapear.
shaders:
#define GLSL(version, shader) "#version " #version " core\n" #shader
const char* vert = GLSL
(
330,
layout (std140) uniform Matrices {
mat4 pvm;
} ;
in vec4 position;
out vec4 color;
void main()
{
color = position;
gl_Position = pvm * position ;
}
);
const char* frag = GLSL
(
330,
in vec4 color;
out vec4 outputF;
void main()
{
outputF = vec4(1.0, 0.5, 0.25, 1.0);
}
);
Compilation shows no error:
Compiling shader : vertex shader
VERTEX STATUS:1
Compiling shader : fragment shader
FRAGMENT STATUS:1
Linking program
PROGRAM STATUS:1
PROGRAM ID : 3
Before calling glUseProgram:
After calling glUseProgram:
After calling glUseProgram without attach vertex shader:
CODE for rendering:
int opengl_draw_path_gl(rendered_path_t *p) {
unsigned int num_vertices,j;
unsigned int face_size;
unsigned long i,num_elems;
vect_t *a,*b;
num_elems=p->num_prisms;
num_vertices=p->prism_faces;
face_size=num_vertices*2;
a=p->data+2; // saltem punt centre primera cara
b=a+face_size;
glColor4fv(p->color);
// dibuixem tapa inici
_opengl_draw_path_terminator(num_vertices,p->data,a);
// Dibuixem tots els prismes
glBegin(GL_TRIANGLE_STRIP);
for(i=0;i<num_elems;i++) {
for(j=0;j<num_vertices;j++) {
glNormal3fv((GLfloat *)(a+j*2));
glVertex3fv((GLfloat *)(a+j*2+1));
glNormal3fv((GLfloat *)(b+j*2));
glVertex3fv((GLfloat *)(b+j*2+1));
}
glNormal3fv((GLfloat *)(a));
glVertex3fv((GLfloat *)(a+1));
glNormal3fv((GLfloat *)(b));
glVertex3fv((GLfloat *)(b+1));
a+=face_size;
b+=face_size;
}
glEnd();
// dibuixem tapa final
_opengl_draw_path_terminator(num_vertices,b,a);
return 0;
}

First of all I recommend you, to read a tutorial about vertex array objects.
But, since you are drawing with glBegin and glEnd, which is deprecated, you have to use compatibility mode shaders. You have to use the deprecated built in uniforms gl_Vertex and gl_Normal, according to the OpenGL commands glVertex3fv and glNormal3fv.
Adapt your code somhow like this:
#define GLSL(version, shader) "#version " #version "\n" #shader
Vertex shader:
const char* vert = GLSL
(
110,
varying vec4 position;
varying vec3 normal;
void main()
{
position = gl_ModelViewMatrix * gl_Vertex;
normal = normalize( gl_NormalMatrix * gl_Normal.xyz );
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
);
Fragment shader:
const char* frag = GLSL
(
110,
varying vec4 position;
varying vec3 normal;
void main()
{
gl_FragColor = vec4(1.0, 0.5, 0.25, 1.0);
}
);

Related

gl_VertexID always zero in any useful geometry shader

I am using indexed rendering and a geometry shader. If I pass gl_VertexID to the geometry shader, it works fine as long as I do not emit any vertices; if I emit one or more vertices, gl_VertexID (passed as any name) is zero. Why?
Using the shaders below, the geometry shader will put the correct indices into my feedback buffer, if and only if I comment out both EmitVertex calls. What am I missing?
(I can work around it, but it is bugging the hell out of me!)
VERTEX SHADER
#version 440
in vec4 position;
out VSOUT{
vec4 gl_Position;
int index;
} vsout;
uniform mat4 gl_ModelViewMatrix;
void main(){
gl_Position = gl_ModelViewMatrix * position;
vsout.index = gl_VertexID;
vsout.gl_Position = gl_Position;
}
GEOMETRY SHADER
#version 440
#extension GL_ARB_shader_storage_buffer_object : enable
layout (lines) in;
layout (line_strip) out;
in VSOUT{
vec4 gl_Position;
int index;
} vdata[];
layout (std430, binding=0) buffer FeedbackBuffer{
vec2 fb[];
};
void main(){
int i = vdata[0].index;
int j = vdata[1].index;
fb[gl_PrimitiveIDIn][0] = vdata[0].index;
fb[gl_PrimitiveIDIn][1] = vdata[1].index;
gl_Position = gl_in[0].gl_Position;
EmitVertex();
gl_Position = gl_in[1].gl_Position;
EmitVertex();
}
FRAGMENT SHADER
#version 430
out vec4 outputColor;
void main(){
outputColor = vec4(.5,.5,.5,.5);
}
So this looks like an nVidia implementation thing. If I run these shaders on a GeForce GTX580, behaviour is as described above. Using an AMD FirePro V5900, it behaves as I'd expect, with the correct values in the feedback buffer whether or not I emit vertices.

OpenGL - Adding Tessellation Control Shader yields black screen

When I add my tesselation control shader to my rendering program, the viewport gets black. Without the TSC the vertex and fragment shader work fine. I also checked for compile errors but no occurs.
Vertex shader:
#version 410 core
layout (location = 0) in vec4 offset;
layout (location = 1) in vec4 color;
out VS_OUT {
vec4 color;
} vs_out;
void main(void) {
const vec4 vertices[3] = vec4[3]
(
vec4( 0.25, -0.25, 0.5, 1.0),
vec4(-0.25, -0.25, 0.5, 1.0),
vec4( 0.25, 0.25, 0.5, 1.0)
);
// Add "offset" to our hard-coded vertex position
gl_Position = vertices[gl_VertexID] + offset;
// Output the color from input attrib
vs_out.color = color;
}
Tessellation control shader:
#version 410 core
layout (vertices = 3) out;
void main(void) {
if (gl_InvocationID == 0) {
gl_TessLevelInner[0] = 5.0;
gl_TessLevelOuter[0] = 5.0;
gl_TessLevelOuter[1] = 5.0;
gl_TessLevelOuter[2] = 5.0;
}
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
Tessellation evaluation shader:
#version 410 core
layout (triangles, equal_spacing, cw) in;
void main(void) {
gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position +
gl_TessCoord.y * gl_in[1].gl_Position +
gl_TessCoord.z * gl_in[2].gl_Position);
}
Fragment shader:
#version 410 core
in VS_OUT {
vec4 color;
} fs_in;
out vec4 color;
void main(void) {
color = fs_in.color;
}
I forgot to check for shader linking errors. And this is what I get:
WARNING: Output of vertex shader '<out VS_OUT.color>' not read by tessellation control shader
ERROR: Input of fragment shader '<in VS_OUT.color>' not written by tessellation evaluation shader
How can I fix this?
Without the code of the other shaders it's hard to help you.
Make sure your tessellation evaluation shader is correct too. A default one should look like this :
#version 410 core
layout(triangles, equal_spacing, ccw) in;
layout(packed) uniform MatrixBlock
{
mat4 projmat;
mat4 viewmat;
} matTransform;
void main ()
{
vec4 pos = gl_TessCoord.x * gl_in[0].gl_Position
+ gl_TessCoord.y * gl_in[1].gl_Position
+ gl_TessCoord.z * gl_in[2].gl_Position;
gl_Position = matTransform.projmat * matTransform.viewmat * pos;
}
The important part is the interpolation using the barycentric coordinates on the patch triangle. Also if the transformations are done in your vertex shader instead of the tess eval shader you may have strange results too.
Edit :
Now that you added tessellation stages you can't pass varying data from the vertex shader to the fragment shader. Indeed their are new triangles in the original patch triangle so you have to set the color for all these new triangles too. Actually when you use tessellation stages, the vertex shader and the tess control usually forward the vertices input to the tess eval shader.
So your tess control shader should be like :
#version 410 core
layout (vertices = 3) out;
in VS_OUT { vec4 color; } tcs_in[]; /* new */
out TCS_OUT { vec4 color; } tcs_out[]; /* new */
void main(void) {
if (gl_InvocationID == 0) {
gl_TessLevelInner[0] = 5.0;
gl_TessLevelOuter[0] = 5.0;
gl_TessLevelOuter[1] = 5.0;
gl_TessLevelOuter[2] = 5.0;
}
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
tcs_out[gl_InvocationID].color = tcs_in[gl_InvocationID].color; /* forward the data */
}
And you tess eval shader must also interpolate the color :
#version 410 core
layout (triangles, equal_spacing, cw) in;
in TCS_OUT { vec4 color; } tes_in[]; /* new */
out TES_OUT { vec4 color; } tes_out; /* new */
void main(void) {
tes_out.color = (gl_TessCoord.x * tes_in[0].color + /* Interpolation */
gl_TessCoord.y * tes_in[1].color +
gl_TessCoord.z * tes_in[2].color );
gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position +
gl_TessCoord.y * gl_in[1].gl_Position +
gl_TessCoord.z * gl_in[2].gl_Position);
}
And of course in your fragment shader you now have a TES_OUT instead of VS_OUT.
I know this question is two years old now, but I this might help people in the future who experience the same issue and find this question.
After many hours of trying I figured out the problem. It seems as though the gl_in[].gl_Position inputs of the Tessellation Control Shader are not written to by the vertex shader. I suspect this must be a driver bug (maybe in the NVidia drivers?) because I cannot think of any reason this shouldn't work.
Solution:
Instead of relying on the gl_in[].gl_Position inputs of the Tessellation Control Shader just pass them yourself in a custom output/input.
This can be done by (roughly )adding the following lines to the respective shader:
// vertex shader
// ...
out vec4 vVertexOut;
void main() {
// ...
vVertexOut = uMVPMatrix * inVertex; // output your transformed vertex
}
// tesselation control shader
// ...
in vec4 vVertexOut[];
out vec4 tVertexOut[];
void main() {
// ...
tVertexOut[gl_InvocationID] = vVertexOut[gl_InvocationID];
}
// tesselation evaluation shader
// ...
in vec4 tVertexOut[];
void main() {
// ...
gl_Position = (tVertexOut[0] * gl_TessCoord[0]) + (tVertexOut[1] * gl_TessCoord[1]) + (tVertexOut[2] * gl_TessCoord[2]);
}

OpenGL 3.3 deferred shading not working

I've setup an OpenGL environment with deferred shading following this tutorial but I can't make the second shader output on my final buffer.
I can see that the first shader (the one that doesn't use lights) is working properly because with gDEBugger I can see that the output buffers are correct, but the second shader really can't display anything. I've also tried to make the second shader output a single color for all the scene just to see if it was displying something, bot nothing is visible (the screen should be completely red but it isn't).
The first pass shader (the one I use to create the buffers for the GBuffer) is working so I'm not add it's code or how I created and implemented my GBuffer, but if you need I'll add them, just tell me.
I think the problem is when I tell OpenGL to output on the FrameBuffer 0 (my video).
This is how I enalbe OpenGL to write to the FrameBuffer 0:
glEnable(GL_BLEND);
m_MotoreGrafico->glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_ONE, GL_ONE);
// Abilito la scrittura sul buffer finale
m_MotoreGrafico->glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
m_gBuffer.BindForReading();
glClear(GL_COLOR_BUFFER_BIT);
// Imposto le matrici dello shader
SetUpOGLProjectionViewMatrix(1);
// Passo le texture del GBuffer allo shader
pActiveShader->setUniform1i(_T("gPositionMap"), m_gBuffer.GetPositionTexture());
pActiveShader->setUniform1i(_T("gColorMap"), m_gBuffer.GetDiffuseTexture());
pActiveShader->setUniform1i(_T("gNormalMap"), m_gBuffer.GetNormalTexture());
// Passo variabili necessarie allo shader
float dimensioneFinestra[2], posizioneCamera[3];
dimensioneFinestra[0] = m_nLarghezzaFinestra;
dimensioneFinestra[1] = m_nAltezzaFinestra;
m_MotoreGrafico->GetActiveCameraPosition(posizioneCamera);
pActiveShader->setUniform2f(_T("gScreenSize"), dimensioneFinestra);
pActiveShader->setUniform3f(_T("gCameraPos"), posizioneCamera);
pActiveShader->setUniform1i(_T("gUsaLuci"), 0);
// Disegno le luci
float coloreLuce[3], posizioneLuce[3], direzioneLuce[3], vUpLuce[3], vRightLuce[3], intensita;
for(int i = 0; i < GetDocument()->m_RTL.GetNLights(); i++)
{
CRTLuce* pRTLuce = GetDocument()->m_RTL.GetRTLightAt(i);
...
m_MotoreGrafico->glBindVertexArray(pRTLuce->GetRTLuce()->GetVBO()->getVBAIndex());
glDrawArrays(GL_TRIANGLES, 0, pRTLuce->GetRTLuce()->GetNVertPerShader());
}
The function m_gBuffer.BindForReading() is like this (bot I think it doesn't matter for my problem):
for (unsigned int i = 0 ; i < ARRAY_SIZE_IN_ELEMENTS(m_textures); i++)
{
m_pMotoreGrafico->glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, m_textures[GBUFFER_TEXTURE_TYPE_POSITION + i]);
}
So far my GBuffer is working (it creates the textures) and my first shader is also working (it's drawing the textures of my GBuffer).
The problem then is that I can't reset OpenGL to draw in my video.
The first 4 textures are the ones create with the first-pass shader.
This is my back buffer (after the second-pass shader)
And this is my front buffer (after the second-pass shader)
This is my second-pass fragment shader code (it outputs only red)
out vec4 outputColor;
void main()
{
outputColor = vec4(1.0, 0.0, 0.0, 1.0);
}
Does anyone have an idea of what I'm doing wrong?
Second-pass vertex shader code:
#version 330
uniform struct Matrici
{
mat4 projectionMatrix;
mat4 modelMatrix;
mat4 viewMatrix;
} matrices;
layout (location = 0) in vec3 inPosition;
void main()
{
vec4 vEyeSpacePosVertex = matrices.viewMatrix * matrices.modelMatrix * vec4(inPosition, 1.0);
gl_Position = matrices.projectionMatrix * vEyeSpacePosVertex;
}
Second-pass fragment shader code:
#version 330
uniform struct MDLight
{
vec3 vColor;
vec3 vPosition;
vec3 vDirection;
float fAmbientIntensity;
float fStrength;
int bOn;
float fConeCosine;
float fAltezza;
float fLarghezza;
vec3 vUp;
vec3 vRight;
} gLuce;
uniform float gSpecularIntensity;
uniform float gSpecularPower;
uniform sampler2D gPositionMap;
uniform sampler2D gColorMap;
uniform sampler2D gNormalMap;
uniform vec3 gCameraPos;
uniform vec2 gScreenSize;
uniform int gLightType;
uniform int gUsaLuci;
vec2 CalcTexCoord()
{
return gl_FragCoord.xy / gScreenSize;
}
out vec4 outputColor;
void main()
{
vec2 TexCoord = CalcTexCoord();
vec4 Color = texture(gColorMap, TexCoord);
outputColor = vec4(1.0, 0.0, 0.0, 1.0);
}

GLSL geometry shader requires glProgramParameteriEXT regardless of layout

I created a basic quad drawing shader using a single point and a geometry shader.
I've read many posts and articles suggesting that I would not need to use glProgramParameteriEXT and could use the layout keyword so long as I was using a shader #version 150 or higher. Some suggested #version 400 or #version 420. My computer will not support #version 420 or higher.
If I use only layout and #version 150 or higher, nothing draws. If I remove layout (or even keep it; it does not seem to care because it will compile) and use glProgramParameteriEXT, it renders.
In code, this does nothing:
layout (points) in;
layout (triangle_strip, max_vertices=4) out;
This is the only code that works:
glProgramParameteriEXT( id, GL_GEOMETRY_INPUT_TYPE_EXT, GL_POINTS );
glProgramParameteriEXT( id, GL_GEOMETRY_OUTPUT_TYPE_EXT, GL_TRIANGLE_STRIP );
glProgramParameteriEXT( id, GL_GEOMETRY_VERTICES_OUT_EXT, 4 );
The alternative is to create a parser that creates the parameters via shader source.
Source for quad rendering via geometry shader:
#version 330
#ifdef VERTEX_SHADER
in vec4 aTexture0;
in vec4 aColor;
in mat4 aMatrix;
out vec4 gvTex0;
out vec4 gvColor;
out mat4 gvMatrix;
void main()
{
// Texture color
gvTex0 = aTexture0;
// Vertex color
gvColor = aColor;
// Matrix
gvMatrix = aMatrix;
}
#endif
#ifdef GEOMETRY_SHADER
layout (points) in;
layout (triangle_strip, max_vertices=4) out;
in vec4 gvTex0[1];
in vec4 gvColor[1];
in mat4 gvMatrix[1];
out vec2 vTex0;
out vec4 vColor;
void main()
{
vColor = gvColor[0];
// Top right.
//
gl_Position = gvMatrix[0] * vec4(1, 1, 0, 1);
vTex0 = vec2(gvTex0[0].z, gvTex0[0].y);
EmitVertex();
// Top left.
//
gl_Position = gvMatrix[0] * vec4(-1, 1, 0, 1);
vTex0 = vec2(gvTex0[0].x, gvTex0[0].y);
EmitVertex();
// Bottom right.
//
gl_Position = gvMatrix[0] * vec4(1, -1, 0, 1);
vTex0 = vec2(gvTex0[0].z, gvTex0[0].w);
EmitVertex();
// Bottom left.
//
gl_Position = gvMatrix[0] * vec4(-1, -1, 0, 1);
vTex0 = vec2(gvTex0[0].x, gvTex0[0].w);
EmitVertex();
EndPrimitive();
}
#endif
#ifdef FRAGMENT_SHADER
uniform sampler2D tex0;
in vec2 vTex0;
in vec4 vColor;
out vec4 vFragColor;
void main()
{
vFragColor = clamp(texture2D(tex0, vTex0) * vColor, 0.0, 1.0);
}
#endif
I am looking for suggestions as to why something like this might happen.

Passing variables from a geometry shader to a fragment shader

I have an GLSL geometry shader that looks like the following:
#version 150
uniform mat4 p;
uniform mat4 mv;
uniform mat3 nm;
layout(points) in;
layout(triangle_strip, max_vertices = 200) out;
out vec4 test;
void main() {
for (int i = 0; i < gl_in.length(); i++) {
....
gl_Position = p * mv * gl_in[i].gl_Position;
test = vec4(1.0, 0.0, 0.0, 0.0);
EmitVertex();
....
EndPrimitive();
}
}
However when I try to access "test" in my fragment shader my application crashes. Here is my fragment shader:
#version 150
out vec4 fColor;
in vec4 test;
void main(void) {
fColor = vec4(test.x, 1.0, 0.4, 0);
}
Can someone help me to pass a variable from the geometry to the fragment shader? varying is deprecated in #version 150.
You need to declare test as input in your fragment shader (I wonder why the shader compiles):
in vec4 test;