GLSL geometry shader requires glProgramParameteriEXT regardless of layout - c++

I created a basic quad drawing shader using a single point and a geometry shader.
I've read many posts and articles suggesting that I would not need to use glProgramParameteriEXT and could use the layout keyword so long as I was using a shader #version 150 or higher. Some suggested #version 400 or #version 420. My computer will not support #version 420 or higher.
If I use only layout and #version 150 or higher, nothing draws. If I remove layout (or even keep it; it does not seem to care because it will compile) and use glProgramParameteriEXT, it renders.
In code, this does nothing:
layout (points) in;
layout (triangle_strip, max_vertices=4) out;
This is the only code that works:
glProgramParameteriEXT( id, GL_GEOMETRY_INPUT_TYPE_EXT, GL_POINTS );
glProgramParameteriEXT( id, GL_GEOMETRY_OUTPUT_TYPE_EXT, GL_TRIANGLE_STRIP );
glProgramParameteriEXT( id, GL_GEOMETRY_VERTICES_OUT_EXT, 4 );
The alternative is to create a parser that creates the parameters via shader source.
Source for quad rendering via geometry shader:
#version 330
#ifdef VERTEX_SHADER
in vec4 aTexture0;
in vec4 aColor;
in mat4 aMatrix;
out vec4 gvTex0;
out vec4 gvColor;
out mat4 gvMatrix;
void main()
{
// Texture color
gvTex0 = aTexture0;
// Vertex color
gvColor = aColor;
// Matrix
gvMatrix = aMatrix;
}
#endif
#ifdef GEOMETRY_SHADER
layout (points) in;
layout (triangle_strip, max_vertices=4) out;
in vec4 gvTex0[1];
in vec4 gvColor[1];
in mat4 gvMatrix[1];
out vec2 vTex0;
out vec4 vColor;
void main()
{
vColor = gvColor[0];
// Top right.
//
gl_Position = gvMatrix[0] * vec4(1, 1, 0, 1);
vTex0 = vec2(gvTex0[0].z, gvTex0[0].y);
EmitVertex();
// Top left.
//
gl_Position = gvMatrix[0] * vec4(-1, 1, 0, 1);
vTex0 = vec2(gvTex0[0].x, gvTex0[0].y);
EmitVertex();
// Bottom right.
//
gl_Position = gvMatrix[0] * vec4(1, -1, 0, 1);
vTex0 = vec2(gvTex0[0].z, gvTex0[0].w);
EmitVertex();
// Bottom left.
//
gl_Position = gvMatrix[0] * vec4(-1, -1, 0, 1);
vTex0 = vec2(gvTex0[0].x, gvTex0[0].w);
EmitVertex();
EndPrimitive();
}
#endif
#ifdef FRAGMENT_SHADER
uniform sampler2D tex0;
in vec2 vTex0;
in vec4 vColor;
out vec4 vFragColor;
void main()
{
vFragColor = clamp(texture2D(tex0, vTex0) * vColor, 0.0, 1.0);
}
#endif
I am looking for suggestions as to why something like this might happen.

Related

Why output from vertex shader is being corrupted when an unsued output is not passed. (OpenGL/GLSL)

I'm receiving three attributes in the vertex shader and passing them to the fragment shader. If I omit one particular channel, that is not used in the frament shader at all, the fragment shader produces invalid output.
I reduced the code to the following simple examples:
A. (corrrect)
//Vertex Shader GLSL
#version 140
in vec3 a_Position;
in uvec4 a_Joint0;
in vec4 a_Weight0;
// it doesn't matter if flat is specified or not for the joint0 (apparently)
// out uvec4 o_Joint0;
flat out vec4 o_Joint0;
flat out vec4 o_Weight0;
layout (std140) uniform WorldParams
{
mat4 ModelMatrix;
};
void main()
{
o_Joint0=a_Joint0;
o_Weight0=a_Weight0;
vec4 pos = ModelMatrix * vec4(a_Position, 1.0);
gl_Position = pos;
}
//Fragment Shader GLSL
#version 140
flat in uvec4 o_Joint0;
flat in vec4 o_Weight0;
out vec4 f_FinalColor;
void main()
{
f_FinalColor=vec4(0,0,0,1);
f_FinalColor.rgb += (o_Weight0.xyz + 1.0) / 4.0+(o_Weight0.z + 1.0) / 4.0;
}
VS sends down to the FS the attributes o_Joint0 and o_Weight0, the fragment shader produces this correct output:
B. (incorrrect)
//Vertex Shader GLSL
#version 140
in vec3 a_Position;
in uvec4 a_Joint0;
in vec4 a_Weight0;
flat out vec4 o_Weight0;
layout (std140) uniform WorldParams
{
mat4 ModelMatrix;
};
void main()
{
o_Weight0=a_Weight0;
vec4 pos = ModelMatrix * vec4(a_Position, 1.0);
gl_Position = pos;
}
//Fragment Shader GLSL
#version 140
flat in vec4 o_Weight0;
out vec4 f_FinalColor;
void main()
{
f_FinalColor=vec4(0,0,0,1);
f_FinalColor.rgb += (o_Weight0.xyz + 1.0) / 4.0+(o_Weight0.z + 1.0) / 4.0;
}
VS sends down to the FS the attribute o_Weight0, as you can see the only thing omitted in both shaders was o_Joint0, the fragment shader produces this in incorrect output:
First, try completely omitting the a_Joint0 variable from the vertex shader (do not load it to the vertex shader at all, not even as a buffer).
If this does not work, try reverting your code back to before you omitted the variable and see if it works again, and then try and find out how it is actually affecting the fragment shader.

LWJGL shader normal's alias

I'm writing my app with LWJGL and on some graphic cards (AMD Radeon series) I can not change the normal's name in vertex shader because something goes wrong and only one big triangle appears on the screen. Here is my code:
#version 150
//our attributes
in vec3 a_position;
in vec2 a_textureCoords;
in vec3 a_normal; //Here is an error
//send the color out to the fragment shader
out vec2 vTextureCoords;
void main(void)
{
a_normal;
gl_Position = vec4(a_position, 1.0);
vTextureCoords = a_textureCoords;
}
if I change shader to this:
#version 150
//our attributes
in vec3 a_position;
in vec2 a_textureCoords;
in vec3 normal;
//send the color out to the fragment shader
out vec2 vTextureCoords;
void main(void)
{
normal;
gl_Position = vec4(a_position, 1.0);
vTextureCoords = a_textureCoords;
}
Everything works fine and mesh appear as it should be. Is it normal?
And my bind attribute function:
//Before
GL20.glBindAttribLocation(s_programID, 2, "a_normal");
//After
GL20.glBindAttribLocation(s_programID, 2, "normal");
edit
Attributes locations in shader:
//Before
a_normal 0
a_position 1
a_textureCoords 2
//After
a_position 0
a_textureCoords 1
normal 2
I'd suggest you to write something like this:
#version 150
#define POSITION 0
#define TEX_COORD 1
#define NORMAL 2
//our attributes
layout (location = POSITION) in vec3 a_position;
layout (location = TEX_COORD) in vec2 a_textureCoords;
layout (location = NORMAL) in vec3 a_normal;
//send the color out to the fragment shader
out vec2 vTextureCoords;
void main(void)
{
a_normal;
gl_Position = vec4(a_position, 1.0);
vTextureCoords = a_textureCoords;
}
And then on java you have something like:
interface Semantic {
interface Attr {
int POSITION = 0;
int TEX_COORD = 1;
int NORMAL = 2;
}
}
that you will use in your glVertexAttribPointer and glEnableVertexAttribute functions
Otherwise, remember to glBindAttribLocation before or glGetAttribLocation after linking the program
Ps: remember to do something useful with that a_normal otherwise the glsl compiler will optimize it out by removing it completely

gl_VertexID always zero in any useful geometry shader

I am using indexed rendering and a geometry shader. If I pass gl_VertexID to the geometry shader, it works fine as long as I do not emit any vertices; if I emit one or more vertices, gl_VertexID (passed as any name) is zero. Why?
Using the shaders below, the geometry shader will put the correct indices into my feedback buffer, if and only if I comment out both EmitVertex calls. What am I missing?
(I can work around it, but it is bugging the hell out of me!)
VERTEX SHADER
#version 440
in vec4 position;
out VSOUT{
vec4 gl_Position;
int index;
} vsout;
uniform mat4 gl_ModelViewMatrix;
void main(){
gl_Position = gl_ModelViewMatrix * position;
vsout.index = gl_VertexID;
vsout.gl_Position = gl_Position;
}
GEOMETRY SHADER
#version 440
#extension GL_ARB_shader_storage_buffer_object : enable
layout (lines) in;
layout (line_strip) out;
in VSOUT{
vec4 gl_Position;
int index;
} vdata[];
layout (std430, binding=0) buffer FeedbackBuffer{
vec2 fb[];
};
void main(){
int i = vdata[0].index;
int j = vdata[1].index;
fb[gl_PrimitiveIDIn][0] = vdata[0].index;
fb[gl_PrimitiveIDIn][1] = vdata[1].index;
gl_Position = gl_in[0].gl_Position;
EmitVertex();
gl_Position = gl_in[1].gl_Position;
EmitVertex();
}
FRAGMENT SHADER
#version 430
out vec4 outputColor;
void main(){
outputColor = vec4(.5,.5,.5,.5);
}
So this looks like an nVidia implementation thing. If I run these shaders on a GeForce GTX580, behaviour is as described above. Using an AMD FirePro V5900, it behaves as I'd expect, with the correct values in the feedback buffer whether or not I emit vertices.

OpenGL error thrown by fragment shader

I'm writing a 2D game in OpenTK, using OpenGL 4.4. Using colour and texture UV coordinates and a matrix I can succesfully draw textures between vertices with vertex shader:
public const string vertexShaderDefaultSrc =
#"
#version 330
uniform mat4 MVPMatrix;
layout (location = 0) in vec2 Position;
layout (location = 1) in vec2 Texture;
layout (location = 2) in vec4 Colour;
out vec2 InTexture;
out vec4 OutColour;
void main()
{
gl_Position = MVPMatrix * vec4(Position, 0, 1);
InTexture = Texture;
OutColour = Colour;
}";
and fragment shader:
public const string fragmentShaderDefaultSrc =
#"
#version 330
uniform sampler2D Sampler;
in vec2 InTexture;
in vec4 OutColour;
out vec4 OutFragColor;
void main()
{
OutFragColor = texture(Sampler, InTexture) * OutColour;
//Alpha test
if(OutFragColor.a <= 0)
discard;
}
";
BUT if I want to draw just a solid colour rather than a texture, I use this shader (with the same vertices, passing UV coords that won't be used):
public const string fragmentShaderSolidColourSrc =
#"
#version 330
uniform sampler2D Sampler;
in vec2 InTexture;
in vec4 OutColour;
out vec4 OutFragColor;
void main()
{
OutFragColor = OutColour;
//Alpha test
if(OutFragColor.a <= 0)
discard;
}
";
Now this works beautifully, but OpenGL reports an error - GL_INVALID_VALUE. It draws fine and everything seems to work, but ideally I would like OpenGL to be error free in that situation, so I can catch real errors. I would appreciate any help, and can share more detail of how the shader is compiled or used if that is helpful - what I don't understand is how the default shader can work but the solid colour doesn't.
I have tracked down the exact source of the errors in my render call (shader builds with no problems)
GL.EnableVertexAttribArray(shader.LocationPosition);
GL.VertexAttribPointer(shader.LocationPosition, 2, VertexAttribPointerType.Float, false, Stride, 0);
//-----everything up to here is fine
//this line throws an error
GL.EnableVertexAttribArray(shader.LocationTexture);
//as does this line
GL.VertexAttribPointer(shader.LocationTexture, 2, VertexAttribPointerType.Float, false, Stride, 8);
//this is all ok
GL.EnableVertexAttribArray(shader.LocationColour);
GL.VertexAttribPointer(shader.LocationColour, 4, VertexAttribPointerType.UnsignedByte, true, Stride, 16);
//ok
GL.BindBuffer(BufferTarget.ElementArrayBuffer, indexBuffer);
GL.DrawArrays(DrawType, 0, Vertices.Length);
//ok
GL.DisableVertexAttribArray(shader.LocationPosition);
//this line throws error
GL.DisableVertexAttribArray(shader.LocationTexture);
//this is ok
GL.DisableVertexAttribArray(shader.LocationColour);
It appears to me after some tests (would be nice to have this verified) that if a variable such as the texture coordinates are not used by the shader the compiler gets rid of it, so a call to get it's location returns -1. Simply checking if locationTexture was -1 here and then not binding locationTexture etc if so resolved my issues.

OpenGL color transform

I'm using OpenGL to draw a large array of 2D points with their colors. Each point (vertex) has also defined it's alpha channel in MX.c array. I'd like to be able to increase or decrease the alpha value of whole array (of every vertex displayed). Is there a clever way to do it, using OpenGL functions? Here's my drawing method:
void PointsMX::drawMX()
{
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(4, GL_UNSIGNED_BYTE, 0, MX.c);
glVertexPointer(2, GL_DOUBLE, 0, MX.p);
glPushMatrix();
glTranslated(position[X], position[Y], 0.0);
glScaled(scale, scale, 1.0);
glDrawArrays(GL_POINTS, 0, MX.size);
glPopMatrix();
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
}
As datenwolf points out in his comments, you can do this pretty simply using a shader, but not using the fixed function pipeline (which is what you're using if you never call glUseProgram().
If you're not using lighting, reproducing the fixed function shaders isn't very hard, and a little googling will help you get up to that point.
The key here is that you want to change something that is normally a vertex attribute (the alpha channel of the color) to a configurable value for the entire drawing operation. In shader terms this means overriding the vertex attribute with a uniform. A uniform is simply a value you pass into an OpenGL program which then has the same value for every vertex or fragment processed (depending on whether you put it into the vertex or fragment shader).
Here's an example of a very basic vertex shader:
#version 330
uniform mat4 Projection = mat4(1);
uniform mat4 ModelView = mat4(1);
layout(location = 0) in vec3 Position;
layout(location = 3) in vec4 Color;
out vec4 vColor;
void main() {
gl_Position = Projection * ModelView * vec4(Position, 1);
vColor = Color;
}
And a corresponding fragment shader
#version 330
in vec4 vColor;
out vec4 FragColor;
void main()
{
FragColor = vColor;
}
In order to accomplish what you're trying to do, you'd want to change the vertex shader to add an additional uniform representing your alpha override:
#version 330
uniform mat4 Projection = mat4(1);
uniform mat4 ModelView = mat4(1);
uniform float AlphaOverride = -1.0;
layout(location = 0) in vec3 Position;
layout(location = 3) in vec4 Color;
out vec4 vColor;
void main() {
gl_Position = Projection * ModelView * vec4(Position, 1);
vColor = Color;
if (AlphaOverride > 0.0) {
vColor.a = AlphaOverride;
}
}
If you fail to set the AlphaOverride uniform it will be -1, and will therefore be ignored by the vertex shader. But if you set it to a value between 0 and 1, then it will be applied to the alpha channel of your vertex.