I am trying to make a fragment shader for a 2d art style im working on.
But to make that work i must make a texture able to overlap itself multiple times.
result i want and the 2 current results:
Edit1: in my art style i have 1 texture per plane. to make the illusion of each plane having thickness i need it to draw it self 2 times, with one pixel distance.
illustration:
if it was drawn a green plane then a blue plane, yes i would like to replace the green area with blue where it overlaps.
I have attempted to subtract the next texture location to nullify texture blending, but resulted in also adding negative values at empty area.
```
varying vec2 v_vTexcoord;
varying vec4 v_vColour;
uniform float shxx;
uniform float shyy;
void main()
{
vec2 Coord1 = v_vTexcoord + vec2(0,0);
vec2 Coord2 = v_vTexcoord + vec2(shxx,shyy);
vec2 Coord3 = v_vTexcoord + vec2(shxx+shxx,shyy+shyy);
vec2 Coord4 = v_vTexcoord + vec2(+shxx+shxx+shxx,+shyy+shyy+shyy);
gl_FragColor = v_vColour * texture2D( gm_BaseTexture, Coord1)
- v_vColour * texture2D( gm_BaseTexture, Coord2)
+ v_vColour * texture2D( gm_BaseTexture, Coord2)
- v_vColour * texture2D( gm_BaseTexture, Coord3)
+ v_vColour * texture2D( gm_BaseTexture, Coord3)
- v_vColour * texture2D( gm_BaseTexture, Coord4)
+ v_vColour * texture2D( gm_BaseTexture, Coord4);
}
```
Found a solution. every time i subtract next textures position i set all fragments with alpha value less than 0,9 to default color black. this way the next texture gets drawn on top of no existing colors. thus avoiding the textures blending into white.
Final Result = https://i.imgur.com/aexqIts.png
varying vec2 v_vTexcoord;
varying vec4 v_vColour;
uniform float shxx;
uniform float shyy;
void main()
{
vec2 Coord1 = v_vTexcoord + vec2(0,0);
vec2 Coord2 = v_vTexcoord + vec2(shxx,shyy);
vec2 Coord3 = v_vTexcoord + vec2(shxx+shxx,shyy+shyy);
vec2 Coord4 = v_vTexcoord + vec2(+shxx+shxx+shxx,+shyy+shyy+shyy);
vec2 Coord5 = v_vTexcoord + vec2(+shxx+shxx+shxx+shxx,+shyy+shyy+shyy+shyy);
gl_FragColor = v_vColour * texture2D( gm_BaseTexture, Coord1)
- texture2D( gm_BaseTexture, Coord2)*0.9;
if( gl_FragColor.a < 0.9 ) gl_FragColor = vec4(0.0,0.0,0.0,0.0);
gl_FragColor += v_vColour * texture2D( gm_BaseTexture, Coord2)
- texture2D( gm_BaseTexture, Coord3)*0.9;
if( gl_FragColor.a < 0.9 ) gl_FragColor = vec4(0.0,0.0,0.0,0.0);
gl_FragColor += v_vColour * texture2D( gm_BaseTexture, Coord3)
- texture2D( gm_BaseTexture, Coord4)*0.9;
if( gl_FragColor.a < 0.9 ) gl_FragColor = vec4(0.0,0.0,0.0,0.0);
gl_FragColor += v_vColour * texture2D( gm_BaseTexture, Coord4)
- texture2D( gm_BaseTexture, Coord5)*0.9;
if( gl_FragColor.a < 0.9 ) gl_FragColor = vec4(0.0,0.0,0.0,0.0);
gl_FragColor += v_vColour * texture2D( gm_BaseTexture, Coord5);
}
Related
I've been following along with the OpenGL 4 Shading Language cookbook and have gotten a teapot rendering with bezier surfaces. The next step I'm attempting is to draw a wireframe over the surfaces using a geometry shader. The directions can be found here on pages 228-230. Following the code that is given, I've gotten the wireframe to display, however, I also have multiple fragments that flicker different shades of my material color.
An image of this can be seen
I have narrowed down the possible issues and have discovered that for some reason, when I perform my triangle height calculations, I am getting variable side lengths for my calculations, as if I hard code the values in the edge distance for each vertex of the triangle within the geometry shader, the teapot no longer flickers, but neither does a wireframe display. (variables ha, hb, hc in the geo shader below)
I was wondering if anyone has run into this issue before or are aware of a workaround.
Below are some sections of my code:
Geometry Shader:
/*
* Geometry Shader
*
* CSCI 499, Computer Graphics, Colorado School of Mines
*/
#version 410 core
layout( triangles ) in;
layout( triangle_strip, max_vertices = 3 ) out;
out vec3 GNormal;
out vec3 GPosition;
out vec3 ghalfwayVec;
out vec3 GLight;
noperspective out vec3 GEdgeDistance;
in vec4 TENormal[];
in vec4 TEPosition[];
in vec3 halfwayVec[];
in vec3 TELight[];
uniform mat4 ViewportMatrix;
void main() {
// Transform each vertex into viewport space
vec3 p0 = vec3(ViewportMatrix * (gl_in[0].gl_Position / gl_in[0].gl_Position.w));
vec3 p1 = vec3(ViewportMatrix * (gl_in[1].gl_Position / gl_in[1].gl_Position.w));
vec3 p2 = vec3(ViewportMatrix * (gl_in[2].gl_Position / gl_in[2].gl_Position.w));
// Find the altitudes (ha, hb and hc)
float a = length(p1 - p2);
float b = length(p2 - p0);
float c = length(p1 - p0);
float alpha = acos( (b*b + c*c - a*a) / (2.0*b*c) );
float beta = acos( (a*a + c*c - b*b) / (2.0*a*c) );
float ha = abs( c * sin( beta ) );
float hb = abs( c * sin( alpha ) );
float hc = abs( b * sin( alpha ) );
// Send the triangle along with the edge distances
GEdgeDistance = vec3( ha, 0, 0 );
GNormal = vec3(TENormal[0]);
GPosition = vec3(TEPosition[0]);
gl_Position = gl_in[0].gl_Position;
EmitVertex();
GEdgeDistance = vec3( 0, hb, 0 );
GNormal = vec3(TENormal[1]);
GPosition = vec3(TEPosition[1]);
gl_Position = gl_in[1].gl_Position;
EmitVertex();
GEdgeDistance = vec3( 0, 0, hc );
GNormal = vec3(TENormal[2]);
GPosition = vec3(TEPosition[2]);
gl_Position = gl_in[2].gl_Position;
EmitVertex();
EndPrimitive();
ghalfwayVec = halfwayVec[0];
GLight = TELight[0];
}
Fragment Shader:
/*
* Fragment Shader
*
* CSCI 441, Computer Graphics, Colorado School of Mines
*/
#version 410 core
in vec3 ghalfwayVec;
in vec3 GLight;
in vec3 GNormal;
in vec3 GPosition;
noperspective in vec3 GEdgeDistance;
layout( location = 0 ) out vec4 FragColor;
uniform vec3 mDiff, mAmb, mSpec;
uniform float shininess;
uniform light {
vec3 lAmb, lDiff, lSpec, lPos;
};
// The mesh line settings
uniform struct LineInfo {
float Width;
vec4 Color;
} Line;
vec3 phongModel( vec3 pos, vec3 norm ) {
vec3 lightVec2 = normalize(GLight);
vec3 normalVec2 = -normalize(GNormal);
vec3 halfwayVec2 = normalize(ghalfwayVec);
float sDotN = max( dot(lightVec2, normalVec2), 0.0 );
vec4 diffuse = vec4(lDiff * mDiff * sDotN, 1);
vec4 specular = vec4(0.0);
if( sDotN > 0.0 ) {
specular = vec4(lSpec * mSpec * pow( max( 0.0, dot( halfwayVec2, normalVec2 ) ), shininess ),1);
}
vec4 ambient = vec4(lAmb * mAmb, 1);
vec3 fragColorOut = vec3(diffuse + specular + ambient);
// vec4 fragColorOut = vec4(0.0,0.0,0.0,0.0);
return fragColorOut;
}
void main() {
// /*****************************************/
// /******* Final Color Calculations ********/
// /*****************************************/
// The shaded surface color.
vec4 color=vec4(phongModel(GPosition, GNormal), 1.0);
// Find the smallest distance
float d = min( GEdgeDistance.x, GEdgeDistance.y );
d = min( d, GEdgeDistance.z );
// Determine the mix factor with the line color
float mixVal = smoothstep( Line.Width - 1, Line.Width + 1, d );
// float mixVal = 1;
// Mix the surface color with the line color
FragColor = vec4(mix( Line.Color, color, mixVal ));
FragColor.a = 1;
}
I ended up stumbling across the solution to my issue. In the geometry shader, I was passing the halfway vector and the light vector after ending the primitive, as such, the values of these vectors was never being correctly sent to the fragment shader. Since no data was given to the fragment shader, garbage values were used and the Phong shading model used random values to compute the fragment color. Moving the two lines after EndPrimative() to the top of the main function in the geometry shader resolved the issue.
Let's say that I want to downsample from 4x4 to 2x2 texels texture, do some fancy stuff, and upsample it again from 2x2 to 4x4. How do I calculate the correct neighbor texels offsets? I can't use bilinear filtering or nearest filtering. I need to pick 4 samples for each fragment execution and pick the maximum one before downsampling. The same holds for the upsampling pass, i.e., I need to pick 4 samples for each fragment execution.
Have I calculated the neighbor offsets correctly(I'm using a fullscreen quad)?
//Downsample: 1.0 / 2.0, Upsample: 1.0 / 4.0.
vec2 texelSize = vec2(1.0 / textureWidth, 1.0 / textureHeight);
const vec2 DOWNSAMPLE_OFFSETS[4] = vec2[]
(
vec2(-0.5, -0.5) * texelSize,
vec2(-0.5, 0.5) * texelSize,
vec2(0.5, -0.5) * texelSize,
vec2(0.5, 0.5) * texelSize
);
const vec2 UPSAMPLE_OFFSETS[4] = vec2[]
(
vec2(-1.0, -1.0) * texelSize,
vec2(-1.0, 1.0) * texelSize,
vec2(1.0, -1.0) * texelSize,
vec2(1.0, 1.0) * texelSize
);
//Fragment shader.
#version 400 core
uniform sampler2D mainTexture;
in vec2 texCoord;
out vec4 fragColor;
void main(void)
{
#if defined(DOWNSAMPLE)
vec2 uv0 = texCoord + DOWNSAMPLE_OFFSETS[0];
vec2 uv1 = texCoord + DOWNSAMPLE_OFFSETS[1];
vec2 uv2 = texCoord + DOWNSAMPLE_OFFSETS[2];
vec2 uv3 = texCoord + DOWNSAMPLE_OFFSETS[3];
#else
vec2 uv0 = texCoord + UPSAMPLE_OFFSETS[0];
vec2 uv1 = texCoord + UPSAMPLE_OFFSETS[1];
vec2 uv2 = texCoord + UPSAMPLE_OFFSETS[2];
vec2 uv3 = texCoord + UPSAMPLE_OFFSETS[3];
#endif
float val0 = texture(mainTexture, uv0).r;
float val1 = texture(mainTexture, uv1).r;
float val2 = texture(mainTexture, uv2).r;
float val3 = texture(mainTexture, uv3).r;
//Do some stuff...
fragColor = ...;
}
The offsets look correct, assuming texelSize is in both cases the texel size of the render target. That is, twice as big for the downsampling pass than the upsampling pass. In the case of upsampling, you are not hitting the source texel centers exactly, but come close enough that nearest neighbor filtering snaps them to the intended result.
A more efficient option is to use textureGather instruction, specified in the ARB_texture_gather extension. When used to sample a texture, it returns the same four texels, that would be used for filtering. It only returns a single component of each texel to produce a vec4, but given that you only care about the red component, it's an ideal solution if the extension is available. The code would then be the same for both downsampling and upsampling:
#define GATHER_RED_COMPONENT 0
vec4 vals = textureGather(mainTexture, texcoord, GATHER_RED_COMPONENT);
// Output the maximum value
fragColor = max(max(vals.x, vals.y), max(vals.z, vals.w));
I cannot get the following shader to work.
Vertex Shader
#version 430
in vec4 vPosition;
in vec4 vColor;
in vec2 vTexCoord;
in vec3 vNormal;
in vec3 vTangent;
in vec3 vBitangent;
//out vec4 color;
out vec2 texCoord;
uniform mat4 Model;
uniform mat4 View;
uniform mat4 Projection;
out vec3 LightDirection_cameraspace;
out vec3 Position_worldspace;
uniform vec3 LightPosition;
out vec3 EyeDirection_cameraspace;
out vec3 LightDirection_tangentspace;
out vec3 EyeDirection_tangentspace;
void main()
{
gl_Position = Projection * View * Model * vPosition;
Position_worldspace = (Model * vPosition).xyz;
vec3 vertexPosition_cameraspace = (View * Model * vPosition).xyz;
EyeDirection_cameraspace = vec3(0, 0, 0) - vertexPosition_cameraspace;
vec3 LightPosition_cameraspace = (View * vec4(LightPosition, 1)).xyz;
LightDirection_cameraspace = LightPosition_cameraspace + EyeDirection_cameraspace;
texCoord = vTexCoord;
mat4 ModelView = View * Model;
mat3 MV3x3 = mat3(ModelView);
vec3 vertexTangent_cameraspace = MV3x3 * vTangent;
vec3 vertexBitangent_cameraspace = MV3x3 * vBitangent;
vec3 vertexNormal_cameraspace = MV3x3 * vNormal;
mat3 TBN = transpose(mat3(
vertexTangent_cameraspace,
vertexBitangent_cameraspace,
vertexNormal_cameraspace
));
LightDirection_tangentspace = TBN * LightDirection_cameraspace;
EyeDirection_tangentspace = TBN * EyeDirection_cameraspace;
}
Fragment Shader
#version 430
in vec4 vColor;
in vec2 texCoord;
out vec4 fColor;
uniform sampler2D tex;
in vec3 Position_worldspace;
in vec3 EyeDirection_cameraspace;
uniform vec3 LightPosition;
//uniform sampler2D DiffuseTex; pretty much just tex
uniform sampler2D NormalTex;
uniform sampler2D SpecularTex;
in vec3 LightDirection_tangentspace;
in vec3 EyeDirection_tangentspace;
void main()
{
vec3 LightColor = vec3(1, 1, 1);
float LightPower = 50.0f;
//vec3 MaterialDiffuseColor = texture2D(tex, texCoord).rgb; // EDITABLE VALU 3 LINE
//vec3 MaterialAmbientColor = vec3(0.1, 0.1, 0.1) * MaterialDiffuseColor;
//vec3 MaterialSpecularColor = texture2D(SpecularTex, texCoord).rgb * 0.3;
vec3 MaterialDiffuseColor = vec3(0.5, 0.5, 0.5);
vec3 MaterialAmbientColor = vec3(0.1, 0.1, 0.1);
vec3 MaterialSpecularColor = vec3(1, 1, 1);
vec3 TextureNormal_tangentspace = normalize(texture2D(NormalTex, vec2(texCoord.x, texCoord.y)).rgb * 2.0 - 1.0); // EDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDIT this line if upside down
float distance = length(LightPosition - Position_worldspace);
vec3 n = TextureNormal_tangentspace;
vec3 l = normalize(LightDirection_tangentspace);
float cosTheta = clamp(dot(n, l), 0, 1);
vec3 E = normalize(EyeDirection_tangentspace);
vec3 R = reflect(-l, n);
float cosAlpha = clamp(dot(E, R), 0, 1);
fColor = vec4(MaterialAmbientColor + MaterialDiffuseColor * LightColor * LightPower * cosTheta / (distance*distance) + MaterialSpecularColor * LightColor * LightPower * pow(cosAlpha,5) / (distance*distance), 1.0);
}
Here's an imgur album with photos including the diffuse, specular, and normal maps as well as the image the program produces. http://imgur.com/a/8MMU1
P.S. The shader DOES compile, it just does not produce the desired image.
Firstly, nice code! I've seen shaders far more difficult to read than that.
You can simplify the projection/view/model transformations so there are no duplicates...
vec4 worldPosition = Modle * vPosition;
vec4 cameraPosition = View * worldPosition;
vec4 clipPosition = Projection * cameraPosition;
gl_Position = clipPosition;
To transform the normal, you need a normal matrix (inverse transpose modelview), otherwise scales can mess things up.
It's pretty common to compute LightPosition_cameraspace outside the shader, since it doesn't change. You also don't need Position_worldspace as distance can be computed in camera/eye space. View * Model is pretty expensive, and can be computed once and passed in. Now that View isn't needed, you can simply replace both View and Model with the single ModelView matrix.
The binormal can be computed implicitly from the normal and tangent via a cross product. This saves a fair bit of storage and memory bandwidth. If your normal/tangent are not perpendicular it may be necessary to use two cross products to ensure all three are orthonormal.
The main problem seems to be in your last line...
fColor = vec4(
MaterialAmbientColor
+ (
MaterialDiffuseColor * LightColor * LightPower * cosTheta
/ (distance*distance)
)
+ (
MaterialSpecularColor * LightColor * LightPower * pow(cosAlpha,5)
/ (distance*distance)
)
, 1.0);
LightPower is scaling your diffuse and specular components. Change to the following
float attenuation = 1.0/distance*distance; //just for readability
fColor = vec4(
MaterialAmbientColor
+ (
MaterialDiffuseColor * LightColor * cosTheta * attenuation
)
+ (
MaterialSpecularColor * LightColor * pow(cosAlpha, LightPower) * attenuation
)
, 1.0);
If after this you're still not seeing the right result, you'll have to start doing some debugging of each variable to narrow down the trouble maker. For example, as #user1118321 suggested check the colours aren't just black or something. Check your LightDirection_tangentspace with fColor = vec4(l,1) - you might look for (0,0,1)/blue for the parts that face the light directly. Make sure tangent/normals/light position are non-zero, the normal texture is bound etc (fColor = vec4(n,1)).
I am looking to create a waving flag effect in a vertex shader and here is what i have so far:
#version 330
layout(location = 0) in vec3 in_position;
layout(location = 1) in vec3 in_color;
uniform mat4 model_matrix, view_matrix, projection_matrix;
uniform vec3 culoare;
uniform float currentAngle;
out vec3 vertex_to_fragment_color;
void main(){
vertex_to_fragment_color = culoare;
vec4 v = vec4( in_position.x, in_position.y, in_position.z, 1.0 );
v.y = sin( in_position.x + currentAngle );
v.y += sin( in_position.z + currentAngle );
v.y *= in_position.x * 0.08;
gl_Position = projection_matrix*view_matrix*model_matrix*v;
}
current_angle is a variable that i'm sending to the shader and it kind of looks like this:
if ( currentAngle > 360.0f ) currentAngle -= 360.0f;
if ( currentAngle < 0.0f ) currentAngle += 360.0f;
I am new to this so i could really use some help to get this right.
GLSL's sin() and cos() take their arguments in radians, not degrees.
You can use the GLSL function radians() to convert degrees to radians.
You'll also have to subdivide your flag rectangle to get a convincing effect.
I want to have two light sources: a directional one and a spotlight. I cannot seem to get what I am doing wrong -- probably not understanding how shaders work! I get the first light fine but no sign of the effects of the second one (aka spotlight). Here is the fragement shader that I came up with:
varying vec4 diffuse,ambientGlobal, ambient;
varying vec3 normal,lightDir,halfVector;
varying float dist;
void main()
{
vec3 n, halfV, viewV, ldir;
float NdotL, NdotHV;
vec4 color = ambientGlobal;
float att, spotEffect;
n = normalize(normal);
NdotL = max(dot(n,normalize(lightDir)),0.0);
if (NdotL > 0.0) {
att = 1.0 / (gl_LightSource[0].constantAttenuation +
gl_LightSource[0].linearAttenuation * dist +
gl_LightSource[0].quadraticAttenuation * dist * dist);
color += att * (diffuse * NdotL + ambient);
halfV = normalize(halfVector);
NdotHV = max(dot(n,halfV),0.0);
color += att * gl_FrontMaterial.specular * gl_LightSource[0].specular * pow(NdotHV,gl_FrontMaterial.shininess);
spotEffect = dot(normalize(gl_LightSource[1].spotDirection), normalize(-lightDir));
if (spotEffect > gl_LightSource[1].spotCosCutoff) {
spotEffect = pow(spotEffect, gl_LightSource[1].spotExponent);
att = spotEffect / (gl_LightSource[1].constantAttenuation +
gl_LightSource[1].linearAttenuation * dist +
gl_LightSource[1].quadraticAttenuation * dist * dist);
color += att * (diffuse * NdotL + ambient);
halfV = normalize(halfVector);
NdotHV = max(dot(n,halfV),0.0);
color += att * gl_FrontMaterial.specular * gl_LightSource[1].specular * pow(NdotHV,gl_FrontMaterial.shininess);
}
}
gl_FragColor = color;
}
PS: Surely this is a problem that has been solved.... Anyone?
Here is what I came up with:
The vertex shader:
varying vec3 N;
varying vec3 v;
void main(void)
{
v = vec3(gl_ModelViewMatrix * gl_Vertex);
N = normalize(gl_NormalMatrix * gl_Normal);
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
And the fragment shader:
varying vec3 N;
varying vec3 v;
#define MAX_LIGHTS 2
void main (void)
{
vec4 finalColour;
for (int i=0; i<MAX_LIGHTS; i++)
{
vec3 L = normalize(gl_LightSource[i].position.xyz - v);
vec3 E = normalize(-v);
vec3 R = normalize(-reflect(L,N));
vec4 Iamb = gl_FrontLightProduct[i].ambient;
vec4 Idiff = gl_FrontLightProduct[i].diffuse * max(dot(N,L), 0.0);
Idiff = clamp(Idiff, 0.0, 1.0);
vec4 Ispec = gl_FrontLightProduct[i].specular
* pow(max(dot(R,E),0.0),0.3*gl_FrontMaterial.shininess);
Ispec = clamp(Ispec, 0.0, 1.0);
finalColour += Iamb + Idiff + Ispec;
}
gl_FragColor = gl_FrontLightModelProduct.sceneColor + finalColour;
}
Which give this image: