So I made a mesh in Maya, wrote some code to import it into the vertex buffer and so far so good but the mesh has multiple components that have different textures assigned and I figured thats easy ill put a key in the vertbuffer and run a switch statement (and from what i can gather from this post thats not such a bad thing) and send the required textures for the whole mesh through uniforms and have them selected through the key but as i was implementing it i noticed it would require multiple sampler2D's and was wondering if this would entail loading multiple mostly unused textures every pixel and would this be a waste?
GLchar* MeshFragmentShader = //also known as pixel shader
"#version 330 core\n" //version 3, not 440 >> 450
"\n" //GLSL 3, core = non-depricated
"layout(location = 0) out vec4 color;\n"
"\n"
"in vec2 v_TexCoord;\n"
"in vec4 v_color;\n"
"in float v_lightval;\n"
"\n"
"uniform sampler2D u_Texture;\n"//<<multiple samplers?
"\n"
"void main()\n"
"{\n"
" int i = 0;\n"//<<texture/color key (location = 4) Vertbuffer
" vec4 texColor; \n"
" switch (i) {\n"
" case 0:\n"
" texColor = texture(u_Texture, v_TexCoord);\n"
" break;\n"
" case 20:\n"
" texColor = v_color;\n"//<<override
" break;\n"
" } \n"
//" texColor = texture(u_Texture, v_TexCoord); \n"
" \n"//simple code certainly somthing i would like to improve
" vec3 Kolor = texColor.rgb * (v_lightval + 0.4f);\n"//change 0.4f to gamma
" color = vec4(Kolor, 1.0f);\n"
//WORKING
//" vec4 texColor = texture(u_Texture, v_TexCoord);\n"
//" texColor.rgb *= v_lightval;\n"
//" color = texColor;\n"
"}\n";
UPDATE:
For anyone who comes across this and maybe to elaborate a bit more i went from the above shader to this and I added the vertex shader (and an image) for reference;
GLchar* MeshVertexShader =
"#version 400 core\n" //version 3, not 440 >> 450
"\n" //GLSL 3, core = non-depricated
"layout(location = 0) in vec4 position;\n"
"layout(location = 1) in vec2 texCoord;\n"
"layout(location = 2) in vec3 normal;\n"
"layout(location = 3) in vec4 color;\n"
"layout(location = 4) in float matid;\n"
"\n"
"out vec2 v_TexCoord;\n"
"out vec4 v_color;\n"
"out float v_matid;\n"
"out vec3 norm;\n"
"\n"
"uniform mat4 u_MVP;\n"//model matrix projection matrix
"uniform mat3 u_normalmatrix;\n"
//"uniform bool u_LightingFlag;\n" //had problems passing this
"void main()\n"
"{\n"
" vec4 postposition = u_MVP * position;\n"//3 line down
" norm = normalize( u_normalmatrix * normalize(normal) );\n"
" \n"
" gl_Position = postposition;\n"
" v_TexCoord = texCoord;\n"
" v_color = color;\n"
" \n"
" v_matid = matid;\n"
"}\n";
GLchar* MeshFragmentShader = //also known as pixel shader
"#version 400 core\n" //version 4.0, not 440 >> 450
"\n" //GLSL 4, core = non-depricated
"layout(location = 0) out vec4 color;\n"
"\n"
"in vec2 v_TexCoord;\n"
"in vec4 v_color;\n"
//"in float v_lightval;\n"
"in float v_matid;\n"
"in vec3 norm;\n"
"\n"
"uniform sampler2D u_Texture0;\n"
"uniform sampler2D u_Texture1;\n"
"uniform sampler2D u_Texture2;\n"
"uniform sampler2D u_Texture3;\n"
"uniform bool u_LightingFlag;\n"
"\n"
"void main()\n"
"{\n"
" float lightval;\n"
" if (u_LightingFlag == true){\n"
" vec3 light1 = normalize( vec3(-10.0f, 9.0f, -11.0f) );\n"
" lightval = max(dot(norm,light1),0.0f);\n"//Lambert Lighting
" } else {\n"
" lightval = 0.6f;\n"
" }\n"
" vec4 texColor;\n"
" for (int i = 0; i < 1; i++) {\n" //not a loop, a goto
" if (v_matid < 0.1f) { texColor = texture(u_Texture0, v_TexCoord);
break; }\n"
" if (v_matid < 1.1f) { texColor = texture(u_Texture1, v_TexCoord);
break; }\n"
" if (v_matid < 2.1f) { texColor = texture(u_Texture2, v_TexCoord);
break; }\n"
" if (v_matid < 3.1f) { texColor = texture(u_Texture3, v_TexCoord);
break; }\n"
" texColor = v_color;\n"//override
" }\n"
" vec3 Kolor = texColor.rgb * (lightval + 0.4f);\n"
" color = vec4(Kolor, 1.0f);\n"
//" color = v_color;\n"
"}\n";
I benchmarked it by comparing that to a version that used only one texture, by forcing VSYNC off in nvidia control panel and monitoring gpu usage and the effects and metrics were really minimal, so so far so good but if anyone knows if im doing anything wrong or could be doing anything better than please do tell the one loop for-loop could possibly use some work I tried using a switch but couldnt get it to work partly cause of type-casting and Im thinking of passing the cases as variables
Related
Trying to implement SSAO in my OpenGL app. But came up with some strange artifact. How could i solve this blackish stuff?
Here is the fragment shader for SSAO
"int KERNEL_MAX = 128;"
"uniform vec3 kernel[128];"
"uniform mat4 P;"
"uniform sampler2D tex;"
"uniform sampler2D texPos;"
"uniform sampler2D texNormal;"
"uniform sampler2D texDepth;"
"uniform vec3 camPos;"
"void main () {"
" vec4 pixel_color = texture2D(tex, v_uv);"
" vec3 pixel_pos = texture2D(texPos, v_uv).rgb;"
" vec3 pixel_normal = texture2D(texNormal, v_uv).rgb;"
" float pixel_depth = texture2D(texDepth, v_uv).r;"
" float occlusion = 0.0;"
" for (int i = 0; i < KERNEL_MAX; ++i) {"
" vec3 sample_ray = pixel_pos + kernel[i] * sign(dot(pixel_normal, kernel[i])) * 0.1;"
" vec4 sample_pos = (P * vec4(sample_ray, 1.0));"
" sample_pos.xy /= sample_pos.w;"
" sample_pos.xy = sample_pos.xy * 0.5 - 0.5;"
" float sample_depth = texture2D(texDepth, sample_pos.xy).r;"
" if (sample_depth < pixel_depth) { occlusion += 1.0; }"
" }"
In general a bias is used, in Screen Space Ambient Occlusion algorithms:
const float bias = 0.0001;
if (sample_depth < pixel_depth - bias) { occlusion += 1.0; }
Im working on Opengl ES I got 2 shader interfering with each other. I mean, if I use the second shader, the lightning shader stops working as it normally does, as if the normals on the vbo gets corrupted. It only happens when I add this shader:
"#ifdef GL_ES \n" //00
"precision mediump float; \n" //01
"#else \n" //02
"#version 100 \n" //03
"precision mediump float; \n" //04
"#endif
\n" //05
"//ProgressBarShader\n"
"uniform float iGlobalTime; \n" //06
"varying vec34 v_position; \n"
"varying vec2 v_texCoord; \n" //07
"uniform sampler2D s_texture; \n" //11
"void main() {\n" //1
" float igt=((iGlobalTime*15.0)+(sin(iGlobalTime)*25.0)); \n"
" if (gl_FragCoord.x>(1024.0-igt)) \n"
" gl_FragColor = texture2D( s_texture, v_texCoord ); "
"}\n";
The other one is a Per Fragment Shader took from this web : http://www.lighthouse3d.com/tutorials/glsl-tutorial/point-light-per-pixel/
but modified for opengl es 2.0 (and my inputs)
"#ifdef GL_ES \n"
"precision mediump float; \n"
"#else \n"
"#version 100 \n"
"precision mediump float; \n"
"#endif \n"
"uniform float iGlobalTime; \n"
"varying vec2 v_texCoord; \n"
"varying vec4 v_normal; \n"
"varying vec4 v_position; \n"
"uniform sampler2D s_texture; \n"
"uniform mat4 MVPMat; \n"
"uniform mat3 iMVPMat; \n"
"uniform vec2 iResolution; \n"
"uniform vec3 iMouse; \n"
"uniform vec4 objectMaterialEmission; // Ecm \n"
"uniform vec4 objectMaterialAmbient; // Acm \n"
"uniform vec4 objectMaterialDiffuse; // Dcm \n"
"uniform vec4 objectMaterialSpecular; // Scm \n"
"uniform float objectMaterialGlossiness; // Gcm \n"
"uniform float objectMaterialShininess; // Srm \n"
"uniform float lightStrength; \n"
"uniform vec4 lightAmbient; \n"
"uniform vec4 lightDiffuse; \n"
"uniform vec4 lightSpecular; \n"
"uniform vec3 lightPosition; \n"
"uniform vec3 lightSpotDirection; \n"
"uniform float lightSpotExponent; \n"
"uniform float lightSpotCutoff; // (range: [0.0,90.0], 180.0)\n"
"uniform float lightSpotCosCutoff; // (range: [1.0,0.0],-1.0)\n"
"uniform float lightConstantAttenuation; \n"
"uniform float lightLinearAttenuation; \n"
"uniform float lightQuadraticAttenuation;\n"
"uniform bool lightOn; \n"
"varying vec3 N;\n"
"varying vec3 v;\n"
"void main (void) \n"
"{ \n"
" vec3 L = normalize(-lightPosition.xyz - v); \n"
" vec3 E = normalize(-v); // we are in Eye Coordinates, so EyePos is (0,0,0) \n"
" vec3 R = normalize(-reflect(L,N)); \n"
" //calculate Ambient Term: \n"
" vec4 Iamb = lightAmbient; \n"
" // write Total Color: \n"
" vec4 textureColor = texture2D( s_texture, v_texCoord ); \n"
" vec4 color =textureColor; \n"
" //calculate Diffuse Term: \n"
" vec4 Idiff = lightDiffuse * max(dot(N,L), 0.0);\n"
" Idiff = clamp(Idiff, 0.0, 1.0); \n"
" float distance = length(lightPosition-v); \n"
" float attenuation = lightStrength/(lightConstantAttenuation+(distance*lightLinearAttenuation));\n"
" gl_FragColor = clamp( color *(Idiff+Iamb)*attenuation,0.0,1.0); \n"
" gl_FragColor.a = 1.0;"
"}\n";
If I use the progress bar shader (the first one), my lightnings dissapears and only some faces gets enlighted. Any other shader works fine, it only gets disrupted when I add the progress bar shader.
I researched the web, but never seen something about shader interfering each other. Is it possible? What should I do?
At the end, it was a fail on the data binding, the binding required the shader to be active (glUseProgram) the first time, the binding was correct, but when the lighting shader came, the program active was the wrong one, and the binding when to the other one.
Sorry for the misleading question.
When I try adding a geometry shader between working vertex and fragment shaders I get a link error:
Fragment shader(s) failed to link, vertex shader(s) failed to link.
ERROR: error(#280) Not all shaders have valid object code
ERROR: error(#280) Not all shaders have valid object code
All three shaders compile without errors. I guess the in and outs doesn't fit in the information flow pipeline. The built-in ins and outs confuse me so I can't spot the error.
Source for the shaders:
vertex_source =
"#version 330\n"
"in vec3 Position;\n"
"in vec2 TexCoord;\n"
"out vec3 oColor;\n"
"out vec2 oTexcoord;\n"
"void main() {\n"
" oTexcoord = TexCoord;\n"
" gl_Position = gl_ModelViewProjectionMatrix*vec4(Position, 1.0);\n"
"}\n";
geometry_source =
"#version 330\n";
"layout (triangles) in;\n";
"layout (triangle_strip, max_vertices=3) out;\n";
"in vec3 Color;\n";
"in vec2 TexCoord;\n";
"out vec3 oColor;\n";
"out vec2 oTexCoord;\n";
"void main() {\n";
" oColor = Color;\n";
" oTexCoord = TexCoord;\n";
" gl_Position = gl_in[0].gl_Position;\n";
" EmitVertex();\n";
" gl_Position = gl_in[1].gl_Position;\n";
" EmitVertex();\n";
" gl_Position = gl_in[2].gl_Position;\n";
" EmitVertex();\n";
" EndPrimitive();\n";
"}\n";
fragment_source =
"#version 330\n"
"in vec2 oTexcoord;\n"
"out vec4 oColor;\n"
"uniform sampler2D tex;\n"
"uniform sampler2D tex_norm;\n"
"uniform sampler2D tex_spec;\n"
"void main() {\n"
" vec4 lightpos = normalize(-gl_ModelViewProjectionMatrix*vec4(1.0, -1.0, -1.5, 1.0));\n"
" vec3 tmpNorm = normalize(texture2D(tex_norm, oTexcoord).rgb * 2.0 - 1.0);\n"
" float a = dot(tmpNorm, lightpos.xyz);\n"
" float difuse = max(a, 0.1);\n"
" float spec = texture2D(tex_spec, oTexcoord).r * pow(a, 2.0);\n"
" vec3 tmpcolor = difuse * texture2D(tex, oTexcoord).rgb;\n"
" oColor = vec4(tmpcolor+tmpcolor*spec, 1.0);\n"
"}\n";
What am I doing wrong in the geometry shader?
I have tried skipping the unused oColor out and changed to an array in geometry shader like this:
#define GLSL(src) "#version 330 core\n" #src
vertex_source = GLSL(
in vec3 Position;
in vec2 TexCoord;
out vec2 oTexcoord;
void main() {
gl_Position = gl_ModelViewProjectionMatrix*vec4(Position, 1.0);
oTexcoord = TexCoord;
}
);
geometry_source = GLSL(
layout (triangles) in;
layout (triangle_strip, max_vertices=3) out;
in vec2 gsTexCoord[];
out vec2 gsoTexCoord;
void main() {
gsoTexCoord = gsTexCoord[0];
gl_Position = gl_in[0].gl_Position;
EmitVertex();
gsoTexCoord = gsTexCoord[1];
gl_Position = gl_in[1].gl_Position;
EmitVertex();
gsoTexCoord = gsTexCoord[2];
gl_Position = gl_in[2].gl_Position;
EmitVertex();
EndPrimitive();
}
);
fragment_source = GLSL(
in vec2 oTexcoord;
out vec4 oColor;
uniform sampler2D tex;
uniform sampler2D tex_norm;
uniform sampler2D tex_spec;
void main() {
vec4 lightpos = normalize(-gl_ModelViewProjectionMatrix*vec4(1.0, -1.0, -1.5, 1.0));
vec3 tmpNorm = normalize(texture2D(tex_norm, oTexcoord).rgb * 2.0 - 1.0);
float a = dot(tmpNorm, lightpos.xyz);
float difuse = max(a, 0.1);
float spec = texture2D(tex_spec, oTexcoord).r * pow(a, 2.0);
vec3 tmpcolor = difuse * texture2D(tex, oTexcoord).rgb;
oColor = vec4(tmpcolor+tmpcolor*spec, 1.0);
}
);
That gives me the following link error:
Fragment shader(s) failed to link, vertex shader(s) failed to link.
ERROR: error(#277) Symbol 'gsTexCoord[0]' usage doesn't match between two stages
ERROR: error(#277) Symbol 'oTexcoord' usage doesn't match between two stages
ERROR: error(#277) Symbol 'gsTexCoord[0]' usage doesn't match between two stages
ERROR: error(#277) Symbol 'oTexcoord' usage doesn't match between two stages
Your geometry shader inputs need to be array-valued and match the names in your vertex shader:
// existing vertex shader outputs:
out vec3 oColor;
out vec2 oTexcoord;
// wrong geometry shader inputs:
in vec3 Color;
in vec2 TexCoord;
// correct geometry shader inputs:
in vec3 oColor[];
in vec2 oTexCoord[];
I'm a bit confused about how the shader pipeline works with regards to passing data through each stage.
What I'm trying to do is pass color data that is loaded in the vertex stage using glVertexAttrib4fv() through the tessellation control shader, and then the tessellation evaluation shader, so that it can be used in the fragment shader. I'm not sure if I've made some sort of conceptual mistake (quite possible, since I'm still trying to get my head around this over fixed functions), but either way, as soon as I try and pass anything through the tessellation shaders, my primitives refuse to render at all. Before that, my primitive renders, but it only renders in black. My shaders are as follows:
Vertex Shader:
static const GLchar* vss[] =
{
"#version 430 core \n"
" \n"
"layout (location = 0) in vec4 offset; \n"
"layout (location = 1) in vec4 color; \n"
" \n"
"out vec4 vs_color; \n"
" \n"
"void main(void) \n"
"{ \n"
" const vec4 vertices[] = vec4[](vec4( 0.25, -0.25, -0.5, 1.0), \n"
" vec4(-0.25, -0.25, -0.5, 1.0), \n"
" vec4( 0.25, 0.25, -0.5, 1.0)); \n"
" \n"
" gl_Position = vertices[gl_VertexID] + offset; \n"
" vs_color = color; \n"
"} \n"
};
Tessellation control shader:
static const GLchar* tc_ss[] =
{
"#version 430 core \n"
"layout (vertices = 3) out; \n"
"in vec4 vs_color; \n"
"out vec4 tcs_color; \n"
"void main(void) \n"
"{ \n"
" if (gl_InvocationID == 0) \n"
" { \n"
" gl_TessLevelInner[0] = 10.0; \n"
" gl_TessLevelOuter[0] = 10.0; \n"
" gl_TessLevelOuter[1] = 10.0; \n"
" gl_TessLevelOuter[2] = 10.0; \n"
" } \n"
" gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position; \n"
" tcs_color = vs_color; \n"
"}"
};
Tessellation Evaluation shader:
static const GLchar* te_ss[] =
{
"#version 430 core \n"
"in vec4 tcs_color; \n"
"out vec4 tes_color; \n"
"layout (triangles, equal_spacing, cw) in; \n"
"void main(void) \n"
"{ \n"
" gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position + \n"
" gl_TessCoord.y * gl_in[1].gl_Position + \n"
" gl_TessCoord.z * gl_in[2].gl_Position); \n"
" tes_color = tcs_color; \n"
"}"
};
Fragment shader:
static const GLchar* fss[] =
{
"#version 430 core \n"
"in vec4 tes_color; \n"
"out vec4 color; \n"
" \n"
"void main(void) \n"
"{ \n"
" color = tes_color; \n"
"} \n"
};
This is not surprising, TCS inputs/outputs must be in the form:
in vec4 vs_color [];
out vec4 tcs_color [];
or in input/output blocks that also take the form of unbounded arrays:
in CustomVertex {
vec4 color;
} custom_vs [];
out CustomVertex {
vec4 color;
} custom_tcs [];
For a little bit of context, this is what a TCS / geometry shader sees as the output from vertex shaders:
in gl_PerVertex
{
vec4 gl_Position;
float gl_PointSize;
float gl_ClipDistance [];
} gl_in [];
To keep things as simple as possible, I will avoid using interface blocks.
Instead, I will introduce the concept of per-patch inputs and outputs, because they will further simplify your shaders considering the color is constant across the entire tessellated surface...
Modified Tessellation Control Shader:
in vec4 vs_color [];
patch out vec4 patch_color;
...
patch_color = vs_color [gl_InvocationID];
Modified Tessellation Evaluation Shader:
patch in vec4 patch_color;
out vec4 tes_color;
...
tes_color = patch_color;
With these changes, you should have a working pass-through and a slightly better understanding of how the TCS and TES stages work.
I'm trying to add an uniform var to my shader , this is my shader:
String vertexShader = "attribute vec4 " + ShaderProgram.POSITION_ATTRIBUTE + ";\n" //
+ "attribute vec4 " + ShaderProgram.COLOR_ATTRIBUTE + ";\n" //
+ "attribute vec2 " + ShaderProgram.TEXCOORD_ATTRIBUTE + "0;\n" //
+ "uniform mat4 u_projTrans;\n" //
+ "varying vec4 v_color;\n" //
+ "varying vec2 v_texCoords;\n" //
+ "\n" //
+ "void main()\n" //
+ "{\n" //
+ " v_texCoords = " + ShaderProgram.TEXCOORD_ATTRIBUTE + "0;\n" //
+ " gl_Position = u_projTrans * " + ShaderProgram.POSITION_ATTRIBUTE + ";\n" //
+ "}\n";
String fragmentShader = "#ifdef GL_ES\n" //
+ "#define LOWP lowp\n" //
+ "precision mediump float;\n" //
+ "#else\n" //
+ "#define LOWP \n" //
+ "#endif\n" //
+ "varying vec2 v_texCoords;\n" //
+ "uniform sampler2D u_texture;\n" //
+ "uniform sampler2D u_texturePalette;\n" //
+ "void main()\n"//
+ "{\n" //
+ " vec4 textureColor = texture2D(u_texture, v_texCoords).rgba;\n"
+ " vec4 paletteColor = texture2D(u_texturePalette, vec2(textureColor.b,0)).rgba;\n"
+ " gl_FragColor = paletteColor;\n" //
+ "}";
The problem comes with
uniform sampler2D u_texturePalette;
ShaderProgram doesn't recognize the new uniform in the shader. Also i have done this :
shader = new ShaderProgram(vertexShader, fragmentShader);
shader.setUniformi("u_texturePalette", 0);
Texture text = new Texture(Gdx.files.internal("./data/palette.png"));
text.bind(shader.getUniformLocation("u_texturePalette"));
batch = new SpriteBatch();
batch.setShader(shader);
When i call function shader.hasUniform("u_texturePalette") , it resolves to true , but when i go to see shader.getUniformLocation("u_texturePalette") it resolves to -1
Is this a bug? . Any idea of what i'm doing bad?
Shader code looks fine.
Generally you do it like this:
shader = new ShaderProgram(vertexShader, fragmentShader);
Texture text = new Texture(Gdx.files.internal("./data/palette.png"));
text.bind(0);
shader.setUniformi("u_texturePalette", 0);
So you bind the texture to an active texture unit and then pass that unit via setUniformi().
getUniformLocation() does actually something different: https://www.khronos.org/opengles/sdk/docs/man/xhtml/glGetUniformLocation.xml
However you will run into some more issues here, since SpriteBatch internally does some very specific stuff with the custom shader. I'd recommend you take a look at the code:
https://github.com/libgdx/libgdx/blob/master/gdx/src/com/badlogic/gdx/graphics/g2d/SpriteBatch.java
Specifically it will expect the default texture to always be at unit 0.