is it possible to be a shader interference with this lightning? - c++

Im working on Opengl ES I got 2 shader interfering with each other. I mean, if I use the second shader, the lightning shader stops working as it normally does, as if the normals on the vbo gets corrupted. It only happens when I add this shader:
"#ifdef GL_ES \n" //00
"precision mediump float; \n" //01
"#else \n" //02
"#version 100 \n" //03
"precision mediump float; \n" //04
"#endif
\n" //05
"//ProgressBarShader\n"
"uniform float iGlobalTime; \n" //06
"varying vec34 v_position; \n"
"varying vec2 v_texCoord; \n" //07
"uniform sampler2D s_texture; \n" //11
"void main() {\n" //1
" float igt=((iGlobalTime*15.0)+(sin(iGlobalTime)*25.0)); \n"
" if (gl_FragCoord.x>(1024.0-igt)) \n"
" gl_FragColor = texture2D( s_texture, v_texCoord ); "
"}\n";
The other one is a Per Fragment Shader took from this web : http://www.lighthouse3d.com/tutorials/glsl-tutorial/point-light-per-pixel/
but modified for opengl es 2.0 (and my inputs)
"#ifdef GL_ES \n"
"precision mediump float; \n"
"#else \n"
"#version 100 \n"
"precision mediump float; \n"
"#endif \n"
"uniform float iGlobalTime; \n"
"varying vec2 v_texCoord; \n"
"varying vec4 v_normal; \n"
"varying vec4 v_position; \n"
"uniform sampler2D s_texture; \n"
"uniform mat4 MVPMat; \n"
"uniform mat3 iMVPMat; \n"
"uniform vec2 iResolution; \n"
"uniform vec3 iMouse; \n"
"uniform vec4 objectMaterialEmission; // Ecm \n"
"uniform vec4 objectMaterialAmbient; // Acm \n"
"uniform vec4 objectMaterialDiffuse; // Dcm \n"
"uniform vec4 objectMaterialSpecular; // Scm \n"
"uniform float objectMaterialGlossiness; // Gcm \n"
"uniform float objectMaterialShininess; // Srm \n"
"uniform float lightStrength; \n"
"uniform vec4 lightAmbient; \n"
"uniform vec4 lightDiffuse; \n"
"uniform vec4 lightSpecular; \n"
"uniform vec3 lightPosition; \n"
"uniform vec3 lightSpotDirection; \n"
"uniform float lightSpotExponent; \n"
"uniform float lightSpotCutoff; // (range: [0.0,90.0], 180.0)\n"
"uniform float lightSpotCosCutoff; // (range: [1.0,0.0],-1.0)\n"
"uniform float lightConstantAttenuation; \n"
"uniform float lightLinearAttenuation; \n"
"uniform float lightQuadraticAttenuation;\n"
"uniform bool lightOn; \n"
"varying vec3 N;\n"
"varying vec3 v;\n"
"void main (void) \n"
"{ \n"
" vec3 L = normalize(-lightPosition.xyz - v); \n"
" vec3 E = normalize(-v); // we are in Eye Coordinates, so EyePos is (0,0,0) \n"
" vec3 R = normalize(-reflect(L,N)); \n"
" //calculate Ambient Term: \n"
" vec4 Iamb = lightAmbient; \n"
" // write Total Color: \n"
" vec4 textureColor = texture2D( s_texture, v_texCoord ); \n"
" vec4 color =textureColor; \n"
" //calculate Diffuse Term: \n"
" vec4 Idiff = lightDiffuse * max(dot(N,L), 0.0);\n"
" Idiff = clamp(Idiff, 0.0, 1.0); \n"
" float distance = length(lightPosition-v); \n"
" float attenuation = lightStrength/(lightConstantAttenuation+(distance*lightLinearAttenuation));\n"
" gl_FragColor = clamp( color *(Idiff+Iamb)*attenuation,0.0,1.0); \n"
" gl_FragColor.a = 1.0;"
"}\n";
If I use the progress bar shader (the first one), my lightnings dissapears and only some faces gets enlighted. Any other shader works fine, it only gets disrupted when I add the progress bar shader.
I researched the web, but never seen something about shader interfering each other. Is it possible? What should I do?

At the end, it was a fail on the data binding, the binding required the shader to be active (glUseProgram) the first time, the binding was correct, but when the lighting shader came, the program active was the wrong one, and the binding when to the other one.
Sorry for the misleading question.

Related

why some triangles are becoming black after I add lighting? [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 1 year ago.
Improve this question
I am trying to add specular lighting to my opengl es program which loads 3d model. Its working normally. But whenever I add lighting this happens:
some triangles are becoming black and some are staying white.
here is my Vertex and fragment shader code:
"attribute vec4 position;\n"
"attribute vec4 normal;\n"
"attribute vec4 color;\n"
"attribute vec2 texCord;\n"
"varying vec4 vcolor;\n"
"varying vec2 vtexCord;\n"
"varying vec3 s_normal;\n"
"varying vec3 toLightv;\n"
"varying vec3 toCameraV;\n"
"uniform vec3 light_pos;\n"
"uniform mat4 MVP;\n"
"uniform mat4 view;"
"uniform mat4 transform;\n"
"void main()\n"
"{\n"
"gl_Position = MVP * vec4(position.xyz, 1.0);\n"
"vcolor = color;\n"
"vtexCord = texCord;\n"
"s_normal = (transform * vec4(normal.xyz,0.0)).xyz;\n"
"toLightv = light_pos - (MVP * vec4(position.xyz, 1.0)).xyz;\n"
"toCameraV = (view * vec4(0.0,0.0,0.0,1.0)).xyz - (MVP * vec4(position.xyz, 1.0)).xyz;\n"
"}";
`
"precision mediump float;\n"
"varying vec4 vcolor;\n"
"varying vec2 vtexCord;\n"
"varying vec3 s_normal;\n"
"varying vec3 toLightv;\n"
"varying vec3 toCameraV;\n"
"uniform sampler2D s_texr;\n"
"uniform vec3 light_col;\n"
"void main()\n"
"{\n"
// "gl_FragColor = vec4(1.0,0.0,1.0,1.0);\n"
//"gl_FragColor = vec4 (vcolor.xyz,1.0);\n"
"vec3 unitCV = normalize(toCameraV);\n"
"vec3 unitNL = normalize(s_normal);\n"
"vec3 unitLV = normalize(toLightv);\n"
"vec3 lightComing = -unitLV;\n"
"vec3 reflectedL = reflect(lightComing,unitNL);\n"
"float specularFactor = dot(reflectedL,toCameraV);\n"
"specularFactor = max(specularFactor,0.0);\n"
"float dampFactor = pow(specularFactor,1.0);\n"
"vec3 Specular= dampFactor * vec3(1.0,1.0,1.0);\n"
"float nDotl = dot(unitNL,unitLV);"
"vec3 diffuse =max(nDotl,0.1) * vec3(1.0,1.0,1.0);"
// diffuse = diffuse * (1.0 / (1.0 + (0.00000025 * distance * distance)));
"gl_FragColor =vec4(diffuse.xyz,1.0)* texture2D(s_texr, vtexCord)+vec4(Specular.xyz,1.0);"
"};"
I have enabled depth testing and the problem solved.
glEnable(GL_DEPTH_TEST);

Sending multiple textures using one at a time using multiple sampler2D?

So I made a mesh in Maya, wrote some code to import it into the vertex buffer and so far so good but the mesh has multiple components that have different textures assigned and I figured thats easy ill put a key in the vertbuffer and run a switch statement (and from what i can gather from this post thats not such a bad thing) and send the required textures for the whole mesh through uniforms and have them selected through the key but as i was implementing it i noticed it would require multiple sampler2D's and was wondering if this would entail loading multiple mostly unused textures every pixel and would this be a waste?
GLchar* MeshFragmentShader = //also known as pixel shader
"#version 330 core\n" //version 3, not 440 >> 450
"\n" //GLSL 3, core = non-depricated
"layout(location = 0) out vec4 color;\n"
"\n"
"in vec2 v_TexCoord;\n"
"in vec4 v_color;\n"
"in float v_lightval;\n"
"\n"
"uniform sampler2D u_Texture;\n"//<<multiple samplers?
"\n"
"void main()\n"
"{\n"
" int i = 0;\n"//<<texture/color key (location = 4) Vertbuffer
" vec4 texColor; \n"
" switch (i) {\n"
" case 0:\n"
" texColor = texture(u_Texture, v_TexCoord);\n"
" break;\n"
" case 20:\n"
" texColor = v_color;\n"//<<override
" break;\n"
" } \n"
//" texColor = texture(u_Texture, v_TexCoord); \n"
" \n"//simple code certainly somthing i would like to improve
" vec3 Kolor = texColor.rgb * (v_lightval + 0.4f);\n"//change 0.4f to gamma
" color = vec4(Kolor, 1.0f);\n"
//WORKING
//" vec4 texColor = texture(u_Texture, v_TexCoord);\n"
//" texColor.rgb *= v_lightval;\n"
//" color = texColor;\n"
"}\n";
UPDATE:
For anyone who comes across this and maybe to elaborate a bit more i went from the above shader to this and I added the vertex shader (and an image) for reference;
GLchar* MeshVertexShader =
"#version 400 core\n" //version 3, not 440 >> 450
"\n" //GLSL 3, core = non-depricated
"layout(location = 0) in vec4 position;\n"
"layout(location = 1) in vec2 texCoord;\n"
"layout(location = 2) in vec3 normal;\n"
"layout(location = 3) in vec4 color;\n"
"layout(location = 4) in float matid;\n"
"\n"
"out vec2 v_TexCoord;\n"
"out vec4 v_color;\n"
"out float v_matid;\n"
"out vec3 norm;\n"
"\n"
"uniform mat4 u_MVP;\n"//model matrix projection matrix
"uniform mat3 u_normalmatrix;\n"
//"uniform bool u_LightingFlag;\n" //had problems passing this
"void main()\n"
"{\n"
" vec4 postposition = u_MVP * position;\n"//3 line down
" norm = normalize( u_normalmatrix * normalize(normal) );\n"
" \n"
" gl_Position = postposition;\n"
" v_TexCoord = texCoord;\n"
" v_color = color;\n"
" \n"
" v_matid = matid;\n"
"}\n";
GLchar* MeshFragmentShader = //also known as pixel shader
"#version 400 core\n" //version 4.0, not 440 >> 450
"\n" //GLSL 4, core = non-depricated
"layout(location = 0) out vec4 color;\n"
"\n"
"in vec2 v_TexCoord;\n"
"in vec4 v_color;\n"
//"in float v_lightval;\n"
"in float v_matid;\n"
"in vec3 norm;\n"
"\n"
"uniform sampler2D u_Texture0;\n"
"uniform sampler2D u_Texture1;\n"
"uniform sampler2D u_Texture2;\n"
"uniform sampler2D u_Texture3;\n"
"uniform bool u_LightingFlag;\n"
"\n"
"void main()\n"
"{\n"
" float lightval;\n"
" if (u_LightingFlag == true){\n"
" vec3 light1 = normalize( vec3(-10.0f, 9.0f, -11.0f) );\n"
" lightval = max(dot(norm,light1),0.0f);\n"//Lambert Lighting
" } else {\n"
" lightval = 0.6f;\n"
" }\n"
" vec4 texColor;\n"
" for (int i = 0; i < 1; i++) {\n" //not a loop, a goto
" if (v_matid < 0.1f) { texColor = texture(u_Texture0, v_TexCoord);
break; }\n"
" if (v_matid < 1.1f) { texColor = texture(u_Texture1, v_TexCoord);
break; }\n"
" if (v_matid < 2.1f) { texColor = texture(u_Texture2, v_TexCoord);
break; }\n"
" if (v_matid < 3.1f) { texColor = texture(u_Texture3, v_TexCoord);
break; }\n"
" texColor = v_color;\n"//override
" }\n"
" vec3 Kolor = texColor.rgb * (lightval + 0.4f);\n"
" color = vec4(Kolor, 1.0f);\n"
//" color = v_color;\n"
"}\n";
I benchmarked it by comparing that to a version that used only one texture, by forcing VSYNC off in nvidia control panel and monitoring gpu usage and the effects and metrics were really minimal, so so far so good but if anyone knows if im doing anything wrong or could be doing anything better than please do tell the one loop for-loop could possibly use some work I tried using a switch but couldnt get it to work partly cause of type-casting and Im thinking of passing the cases as variables

How to convert these OpenGL Shaders to OpenGL ES shaders for GLES3 for android NDK?

The following Shaders fail with a return of -1, when i try.
col_attr = glGetAttribLocation(prog, "v_col");
i tried different settings including
switching ,
gl_FragColor
to
outColor
and
#version 300 es
to
#version 150 core
and many more, before i realized i'm completely lost and there are so many variables i dont know. i just need these simple shaders converted to something that works with GLESv3 for Android NDK in C++. All the help is highly appreciated. Thank you.
Original Vertex Shader
#version 150 core
in vec3 v_pos;
in vec4 v_col;
out vec4 color;
uniform mat4 projection;
uniform mat4 view;
void main(){
color = v_col;
gl_Position = projection * view * vec4(v_pos, 1.0);
}
Original Fragment Shader
#version 150 core
in vec4 color;
void main(){
gl_FragColor = color;
}
Update: Found that only the Fragment Shader fails at compilation.
New Vertex Shader - Compiles!
return "#version 300 es \n"
"in vec3 v_pos; \n"
"in vec4 v_col; \n"
"out vec4 color; \n"
"uniform mat4 projection; \n"
"uniform mat4 view; \n"
"void main() \n"
"{ \n"
" color = v_col; \n"
" gl_Position = projection * view * vec4(v_pos, 1.0); \n"
"} \n";
New Fragment Shader - Doesn't Compile!
return "#version 300 es \n"
"in vec4 color; \n"
"out vec4 outColor; \n"
"void main() \n"
"{ \n"
" outColor = color; \n"
"} \n";
New Fragment Shader - Compiles!
return "#version 300 es \n"
"precision mediump float; \n"
"in vec4 color; \n"
"out vec4 outColor; \n"
"void main() \n"
"{ \n"
" outColor = color; \n"
"} \n";
You've to declare the fragment shader output variable out vec4 outColor.
Further you've to add a precision qualifier:
A valid GLSL ES 3.00 fragment shader would be:
#version 300 es
precision mediump float;
in vec4 color;
out vec4 outColor;
void main(){
outColor = color;
}
The version information (#version 300 es) has to be changed in the vertex shader, too.
See OpenGL ES Shading Language 3.00 Specification - 4.3.6 Output Variables page 42:
Fragment outputs are declared as in the following examples:
out vec4 FragmentColor;
out uint Luminosity;
See OpenGL ES Shading Language 3.00 Specification - 4.5.4 Default Precision Qualifiers page 56:
The fragment language has no default precision qualifier for floating point types. Hence for float, floating point vector and matrix variable declarations, either the declaration must include a precision qualifier or the default float precision must have been previously declared.

QOpenGLWidget cannot use more than one sampler2D

I've created a very basic shader:
static const char *frag_showImage =
"#version 150\n"
"uniform sampler2D textureSampler;\n"
"in mediump vec2 texc;\n"
"out highp vec4 fragColor;\n"
"void main() {\n"
" fragColor = texture2D(textureSampler, texc.st);\n"
"}\n";
And it works as expected, now a bit more complex one:
"#version 150\n"
"uniform sampler2D textureSampler;\n"
"uniform sampler2D maskSampler;\n"
"in mediump vec2 texc;\n"
"out highp vec4 fragColor;\n"
"void main() {\n"
" fragColor = texture2D(textureSampler, texc.st);\n"
" fragColor.x = texture2D(maskSampler, texc.st).x;\n"
" fragColor.y =0;\n"
"}\n";
It doesn't work but it has no warnings neither errors:
in both cases I bind the first texture as:
QOpenGLFunctions *f =this->context()->functions();
f->glActiveTexture(GL_TEXTURE0);
glBaseTexture->bind();
m_program->setUniformValue("textureSampler", 0);
and the second texture is binded as:
f->glActiveTexture(GL_TEXTURE1);
glMaskTexture->bind();
m_program->setUniformValue("maskSampler", 1);
Notice that if I bind glMaskTexture for the first shader it works ok so the problem is not on that QOpenGlTexture.
Any idea? Thank you in advance!
Goblins: renaming maskSampler to other name it works ok, I have no idea about why this is happening since "maskSampler" is not used in any other part of the code.

Passing data through tessellation shaders to the fragment shader

I'm a bit confused about how the shader pipeline works with regards to passing data through each stage.
What I'm trying to do is pass color data that is loaded in the vertex stage using glVertexAttrib4fv() through the tessellation control shader, and then the tessellation evaluation shader, so that it can be used in the fragment shader. I'm not sure if I've made some sort of conceptual mistake (quite possible, since I'm still trying to get my head around this over fixed functions), but either way, as soon as I try and pass anything through the tessellation shaders, my primitives refuse to render at all. Before that, my primitive renders, but it only renders in black. My shaders are as follows:
Vertex Shader:
static const GLchar* vss[] =
{
"#version 430 core \n"
" \n"
"layout (location = 0) in vec4 offset; \n"
"layout (location = 1) in vec4 color; \n"
" \n"
"out vec4 vs_color; \n"
" \n"
"void main(void) \n"
"{ \n"
" const vec4 vertices[] = vec4[](vec4( 0.25, -0.25, -0.5, 1.0), \n"
" vec4(-0.25, -0.25, -0.5, 1.0), \n"
" vec4( 0.25, 0.25, -0.5, 1.0)); \n"
" \n"
" gl_Position = vertices[gl_VertexID] + offset; \n"
" vs_color = color; \n"
"} \n"
};
Tessellation control shader:
static const GLchar* tc_ss[] =
{
"#version 430 core \n"
"layout (vertices = 3) out; \n"
"in vec4 vs_color; \n"
"out vec4 tcs_color; \n"
"void main(void) \n"
"{ \n"
" if (gl_InvocationID == 0) \n"
" { \n"
" gl_TessLevelInner[0] = 10.0; \n"
" gl_TessLevelOuter[0] = 10.0; \n"
" gl_TessLevelOuter[1] = 10.0; \n"
" gl_TessLevelOuter[2] = 10.0; \n"
" } \n"
" gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position; \n"
" tcs_color = vs_color; \n"
"}"
};
Tessellation Evaluation shader:
static const GLchar* te_ss[] =
{
"#version 430 core \n"
"in vec4 tcs_color; \n"
"out vec4 tes_color; \n"
"layout (triangles, equal_spacing, cw) in; \n"
"void main(void) \n"
"{ \n"
" gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position + \n"
" gl_TessCoord.y * gl_in[1].gl_Position + \n"
" gl_TessCoord.z * gl_in[2].gl_Position); \n"
" tes_color = tcs_color; \n"
"}"
};
Fragment shader:
static const GLchar* fss[] =
{
"#version 430 core \n"
"in vec4 tes_color; \n"
"out vec4 color; \n"
" \n"
"void main(void) \n"
"{ \n"
" color = tes_color; \n"
"} \n"
};
This is not surprising, TCS inputs/outputs must be in the form:
in vec4 vs_color [];
out vec4 tcs_color [];
or in input/output blocks that also take the form of unbounded arrays:
in CustomVertex {
vec4 color;
} custom_vs [];
out CustomVertex {
vec4 color;
} custom_tcs [];
For a little bit of context, this is what a TCS / geometry shader sees as the output from vertex shaders:
in gl_PerVertex
{
vec4 gl_Position;
float gl_PointSize;
float gl_ClipDistance [];
} gl_in [];
To keep things as simple as possible, I will avoid using interface blocks.
Instead, I will introduce the concept of per-patch inputs and outputs, because they will further simplify your shaders considering the color is constant across the entire tessellated surface...
Modified Tessellation Control Shader:
in vec4 vs_color [];
patch out vec4 patch_color;
...
patch_color = vs_color [gl_InvocationID];
Modified Tessellation Evaluation Shader:
patch in vec4 patch_color;
out vec4 tes_color;
...
tes_color = patch_color;
With these changes, you should have a working pass-through and a slightly better understanding of how the TCS and TES stages work.