I'm writing my app with LWJGL and on some graphic cards (AMD Radeon series) I can not change the normal's name in vertex shader because something goes wrong and only one big triangle appears on the screen. Here is my code:
#version 150
//our attributes
in vec3 a_position;
in vec2 a_textureCoords;
in vec3 a_normal; //Here is an error
//send the color out to the fragment shader
out vec2 vTextureCoords;
void main(void)
{
a_normal;
gl_Position = vec4(a_position, 1.0);
vTextureCoords = a_textureCoords;
}
if I change shader to this:
#version 150
//our attributes
in vec3 a_position;
in vec2 a_textureCoords;
in vec3 normal;
//send the color out to the fragment shader
out vec2 vTextureCoords;
void main(void)
{
normal;
gl_Position = vec4(a_position, 1.0);
vTextureCoords = a_textureCoords;
}
Everything works fine and mesh appear as it should be. Is it normal?
And my bind attribute function:
//Before
GL20.glBindAttribLocation(s_programID, 2, "a_normal");
//After
GL20.glBindAttribLocation(s_programID, 2, "normal");
edit
Attributes locations in shader:
//Before
a_normal 0
a_position 1
a_textureCoords 2
//After
a_position 0
a_textureCoords 1
normal 2
I'd suggest you to write something like this:
#version 150
#define POSITION 0
#define TEX_COORD 1
#define NORMAL 2
//our attributes
layout (location = POSITION) in vec3 a_position;
layout (location = TEX_COORD) in vec2 a_textureCoords;
layout (location = NORMAL) in vec3 a_normal;
//send the color out to the fragment shader
out vec2 vTextureCoords;
void main(void)
{
a_normal;
gl_Position = vec4(a_position, 1.0);
vTextureCoords = a_textureCoords;
}
And then on java you have something like:
interface Semantic {
interface Attr {
int POSITION = 0;
int TEX_COORD = 1;
int NORMAL = 2;
}
}
that you will use in your glVertexAttribPointer and glEnableVertexAttribute functions
Otherwise, remember to glBindAttribLocation before or glGetAttribLocation after linking the program
Ps: remember to do something useful with that a_normal otherwise the glsl compiler will optimize it out by removing it completely
Related
I'm receiving three attributes in the vertex shader and passing them to the fragment shader. If I omit one particular channel, that is not used in the frament shader at all, the fragment shader produces invalid output.
I reduced the code to the following simple examples:
A. (corrrect)
//Vertex Shader GLSL
#version 140
in vec3 a_Position;
in uvec4 a_Joint0;
in vec4 a_Weight0;
// it doesn't matter if flat is specified or not for the joint0 (apparently)
// out uvec4 o_Joint0;
flat out vec4 o_Joint0;
flat out vec4 o_Weight0;
layout (std140) uniform WorldParams
{
mat4 ModelMatrix;
};
void main()
{
o_Joint0=a_Joint0;
o_Weight0=a_Weight0;
vec4 pos = ModelMatrix * vec4(a_Position, 1.0);
gl_Position = pos;
}
//Fragment Shader GLSL
#version 140
flat in uvec4 o_Joint0;
flat in vec4 o_Weight0;
out vec4 f_FinalColor;
void main()
{
f_FinalColor=vec4(0,0,0,1);
f_FinalColor.rgb += (o_Weight0.xyz + 1.0) / 4.0+(o_Weight0.z + 1.0) / 4.0;
}
VS sends down to the FS the attributes o_Joint0 and o_Weight0, the fragment shader produces this correct output:
B. (incorrrect)
//Vertex Shader GLSL
#version 140
in vec3 a_Position;
in uvec4 a_Joint0;
in vec4 a_Weight0;
flat out vec4 o_Weight0;
layout (std140) uniform WorldParams
{
mat4 ModelMatrix;
};
void main()
{
o_Weight0=a_Weight0;
vec4 pos = ModelMatrix * vec4(a_Position, 1.0);
gl_Position = pos;
}
//Fragment Shader GLSL
#version 140
flat in vec4 o_Weight0;
out vec4 f_FinalColor;
void main()
{
f_FinalColor=vec4(0,0,0,1);
f_FinalColor.rgb += (o_Weight0.xyz + 1.0) / 4.0+(o_Weight0.z + 1.0) / 4.0;
}
VS sends down to the FS the attribute o_Weight0, as you can see the only thing omitted in both shaders was o_Joint0, the fragment shader produces this in incorrect output:
First, try completely omitting the a_Joint0 variable from the vertex shader (do not load it to the vertex shader at all, not even as a buffer).
If this does not work, try reverting your code back to before you omitted the variable and see if it works again, and then try and find out how it is actually affecting the fragment shader.
I have code trying to upload data into an OpenGL shader, but when I call glGetAttribLocation() going for the array of data I am looking for, it always returns -1 as location (thus not found). I have no idea how to debug this issue in the first place, since the variables are in the code (albeit the vertex shader only passes it on to the geometry shader).
Can someone help me figure out why the glGetAttribLocation returns not found? Other items, like worldMatrix for example, when using glGetUniformLocation(), work just fine.
C++ Code trying to get the attribute id:
for (unsigned int i = 0; i < _nNumCameras; ++i) {
const auto glName = "texcoords[" + std::to_string(i) + "]";
const auto location = glGetAttribLocation(id(), glName.c_str());
if (location == -1) {
continue;
}
Vertex Shader:
#version 430
#define NUM_CAMERAS 3
uniform mat4 worldMatrix;
uniform mat4 viewProjMatrix;
layout(location = 0)in vec3 position;
layout(location = 1)in vec3 normal;
layout(location = 2)in float radius;
in vec2 texcoords[NUM_CAMERAS];
in uint cameraIds[NUM_CAMERAS];
out vec4 gl_Position;
out VS_OUT {
vec2 v_texCoords[NUM_CAMERAS];
vec3 v_normal;
uint cameraIDs[NUM_CAMERAS];
} vs_out;
void main()
{
gl_Position.xyz = position.xyz;
gl_Position.w = 1;
gl_Position = worldMatrix * gl_Position;
gl_Position = viewProjMatrix * gl_Position;
vs_out.v_texCoords = texcoords;
vs_out.cameraIDs = cameraIds;
vs_out.v_normal = normal;
}
Geometry Shader:
#version 430
#define NUM_CAMERAS 3
layout(triangles) in;
layout(triangle_strip, max_vertices = 3) out;
out VS_OUT {
vec2 v_texCoords[NUM_CAMERAS];
vec3 v_normal;
uint cameraIDs[NUM_CAMERAS];
} gs_in[];
out GS_OUT {
vec2 v_texcoord;
} gs_out;
flat out uint camera_id_unique;
void main() {
// Code selecting best camera texture
...
///
gl_Position = gl_in[0].gl_Position;
gs_out.v_texcoord = gs_in[0].v_texCoords[camera_id_unique];
EmitVertex();
gl_Position = gl_in[1].gl_Position;
gs_out.v_texcoord = gs_in[1].v_texCoords[camera_id_unique];
EmitVertex();
gl_Position = gl_in[2].gl_Position;
gs_out.v_texcoord = gs_in[2].v_texCoords[camera_id_unique];
EmitVertex();
EndPrimitive();
}
Fragment Shader:
#version 430
#define NUM_CAMERAS 3
uniform sampler2D colorTextures[NUM_CAMERAS];
in GS_OUT {
vec2 v_texcoord;
} fs_in;
flat in uint camera_id_unique;
out vec4 color;
void main(){
color = texture(colorTextures[camera_id_unique], fs_in.v_texcoord);
}
Arrayed program resources work in different ways depending on whether they are arrays of basic types or arrays of structs (or arrays). Resources that are arrays of basic types only expose the entire array as a single resource, whose name is "name[0]" and which has an explicit array size (if you query that property). Other arrayed resources expose separate names for each array element.
Since texcoords is an array of basic types, there is no "texcoords[2]" or "texcoords[1]"; there is only "texcoords[0]". Arrayed attributes are always assigned contiguous locations, the locations for indices 1 and 2 will simply be 1 or 2 plus the location for index 0.
I'm trying to make 2 objects in OpenGL with 2 diffrent textures and one of them should moving.
I make 2 shader program and sign it to diffrent indicates tabs. Parameters of shader programs looking but program draws only one obejct (which I use later). Is it correctly if I make .frag and .vert file same structure and name of in/out vectors but change only texture and delete transformation from static object?
//fragment of moving object
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec2 texCoord;
out vec3 vecColor;
out vec2 TexCoord;
uniform mat4 transform;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection*view* transform * vec4(position, 1.0f);
vecColor = color;
TexCoord = texCoord;
}
// fragment of static object
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec2 texCoord;
out vec3 vecColor;
out vec2 TexCoord;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection*view * vec4(position, 1.0f);
vecColor = color;
TexCoord = texCoord;
}
// code of Use()
void Use() const
{
glUseProgram(get_programID());
}
// Drawing elements in main event loop whiile()
// Draw first object
theProgram_stelaz.Use();
glBindVertexArray(VAO_stelaz);
glDrawElements(GL_TRIANGLES, _countof(indices_stelaz), GL_UNSIGNED_INT,
0);
glBindVertexArray(0);
// Draw second object
theProgram.Use();
glBindVertexArray(VAO_wings);
glDrawElements(GL_TRIANGLES, _countof(indices_wings), GL_UNSIGNED_INT,
0);
glBindVertexArray(0);
Now program draws only secon object from indices_wings. I want to draw two elements and just object from indices_wings should move.
I was looking for a lot of this problem. I found this question Passing data into different shaders but this problem not mine. I get "The fragment shader uses varying "normal", but previous shader does not write to it." error message.
My vertey shader code:
#version 430
in layout(location=0) vec3 position;
in layout(location=1) vec3 normal;
out vec3 norm;
uniform mat4 transformation;
void main()
{
gl_Position = transformation * vec4(position, 1.0);
norm = (transformation * vec4(normal, 0.0)).xyz;
}
And my fragment shader code:
#version 430
in vec3 normal;
out vec4 colour;
vec3 lightPos = vec3(0,50,0);
vec3 lightColor = vec3(0.5, 0, 0);
vec3 materialColor = vec3(0, 1.0, 0);
void main() {
float cosTheta = dot(-lightPos, normalize(normal));
vec3 temp = materialColor * lightColor * cosTheta;
colour = vec4(temp, 1.0);
}
What is the main problem? I don't understand this message my vertex shader using the normal vector and it passing into fragment shader. I don't see difference between the linked code and mine. Please tell me some idea :\
If you want to use different variable names for some reason you can specify a location to match in- and output variables.
For example, in your case:
.vert:
out layout(location = 7) vec3 norm;
.frag:
in layout(location = 7)vec3 normal;
I created a basic quad drawing shader using a single point and a geometry shader.
I've read many posts and articles suggesting that I would not need to use glProgramParameteriEXT and could use the layout keyword so long as I was using a shader #version 150 or higher. Some suggested #version 400 or #version 420. My computer will not support #version 420 or higher.
If I use only layout and #version 150 or higher, nothing draws. If I remove layout (or even keep it; it does not seem to care because it will compile) and use glProgramParameteriEXT, it renders.
In code, this does nothing:
layout (points) in;
layout (triangle_strip, max_vertices=4) out;
This is the only code that works:
glProgramParameteriEXT( id, GL_GEOMETRY_INPUT_TYPE_EXT, GL_POINTS );
glProgramParameteriEXT( id, GL_GEOMETRY_OUTPUT_TYPE_EXT, GL_TRIANGLE_STRIP );
glProgramParameteriEXT( id, GL_GEOMETRY_VERTICES_OUT_EXT, 4 );
The alternative is to create a parser that creates the parameters via shader source.
Source for quad rendering via geometry shader:
#version 330
#ifdef VERTEX_SHADER
in vec4 aTexture0;
in vec4 aColor;
in mat4 aMatrix;
out vec4 gvTex0;
out vec4 gvColor;
out mat4 gvMatrix;
void main()
{
// Texture color
gvTex0 = aTexture0;
// Vertex color
gvColor = aColor;
// Matrix
gvMatrix = aMatrix;
}
#endif
#ifdef GEOMETRY_SHADER
layout (points) in;
layout (triangle_strip, max_vertices=4) out;
in vec4 gvTex0[1];
in vec4 gvColor[1];
in mat4 gvMatrix[1];
out vec2 vTex0;
out vec4 vColor;
void main()
{
vColor = gvColor[0];
// Top right.
//
gl_Position = gvMatrix[0] * vec4(1, 1, 0, 1);
vTex0 = vec2(gvTex0[0].z, gvTex0[0].y);
EmitVertex();
// Top left.
//
gl_Position = gvMatrix[0] * vec4(-1, 1, 0, 1);
vTex0 = vec2(gvTex0[0].x, gvTex0[0].y);
EmitVertex();
// Bottom right.
//
gl_Position = gvMatrix[0] * vec4(1, -1, 0, 1);
vTex0 = vec2(gvTex0[0].z, gvTex0[0].w);
EmitVertex();
// Bottom left.
//
gl_Position = gvMatrix[0] * vec4(-1, -1, 0, 1);
vTex0 = vec2(gvTex0[0].x, gvTex0[0].w);
EmitVertex();
EndPrimitive();
}
#endif
#ifdef FRAGMENT_SHADER
uniform sampler2D tex0;
in vec2 vTex0;
in vec4 vColor;
out vec4 vFragColor;
void main()
{
vFragColor = clamp(texture2D(tex0, vTex0) * vColor, 0.0, 1.0);
}
#endif
I am looking for suggestions as to why something like this might happen.