Optimizing pixel-swapping shader - opengl

I'm using a shader that swaps colors/palettes on a texture. The shader checks a pixel for transparency and then sets the pixel if not transparent. Is there an efficient way to ignore 0 alpha pixels other than a potential branch? In this case, where I set pixel = newPixel:
uniform bool alternate;
uniform sampler2D texture;
void main()
{
vec4 pixel = texture2D(bitmap, openfl_TextureCoordv);
if(alternate)
{
vec4 newPixel = texture2D(texture, vec2(pixel.r, pixel.b));
if(newPixel.a != 0.0)
pixel = newPixel;
}
gl_FragColor = pixel;
}

You can use mix and step:
void main()
{
vec4 pixel = texture2D(bitmap, openfl_TextureCoordv);
vec4 newPixel = texture2D(texture, vec2(pixel.r, pixel.b));
gl_FragColor = mix(pixel, newPixel,
float(alternate) * (1.0 - step(newPixel.a, 0.0)));
}
You may want to make a smooth transition depending on the alpha channel. In this case you only need mix:
gl_FragColor = mix(pixel, newPixel, float(alternate) * newPixel.a);

I would look into the step function. You can express the if statement as a product of two conditions.
For example:
if (a >= 0) {
b = c;
}
Is equivalent to
b = (step(a, 0)*c) + (1.0 - step(a, 0)*b);

Related

'variable' : is not available in current GLSL version gl_TexCoord

I have coded a fragment shader in vizard IDE and its not working. The code is free of compilation errors except for one which says, " ERROR: 0:? : 'variable' : is not available in current GLSL version gl_TexCoord."
FYI the gl_TexCoord is the output of the vertex shader which is in build to vizard. Can someone help me to fix it. here is the code:
#version 440
// All uniforms as provided by Vizard
uniform sampler2D vizpp_InputDepthTex; // Depth texture
uniform sampler2D vizpp_InputTex; // Color texture
uniform ivec2 vizpp_InputSize; // Render size of screen in pixels
uniform ivec2 vizpp_InputPixelSize; // Pixel size (1.0/vizpp_InputSize)
uniform mat4 osg_ViewMatrix; // View matrix of camera
uniform mat4 osg_ViewMatrixInverse; // Inverse of view matrix
// Your own uniforms
//uniform sampler2D u_texture;
//uniform sampler2D u_normalTexture;
uniform sampler2D g_FinalSSAO;
const bool onlyAO = false; //Only show AO pass for debugging
const bool externalBlur = false; //Store AO in alpha slot for a later blur
struct ASSAOConstants
{
vec2 ViewportPixelSize; // .zw == 1.0 / ViewportSize.xy
vec2 HalfViewportPixelSize; // .zw == 1.0 / ViewportHalfSize.xy
vec2 DepthUnpackConsts;
vec2 CameraTanHalfFOV;
vec2 NDCToViewMul;
vec2 NDCToViewAdd;
ivec2 PerPassFullResCoordOffset;
vec2 PerPassFullResUVOffset;
vec2 Viewport2xPixelSize;
vec2 Viewport2xPixelSize_x_025; // Viewport2xPixelSize * 0.25 (for fusing add+mul into mad)
float EffectRadius; // world (viewspace) maximum size of the shadow
float EffectShadowStrength; // global strength of the effect (0 - 5)
float EffectShadowPow;
float EffectShadowClamp;
float EffectFadeOutMul; // effect fade out from distance (ex. 25)
float EffectFadeOutAdd; // effect fade out to distance (ex. 100)
float EffectHorizonAngleThreshold; // limit errors on slopes and caused by insufficient geometry tessellation (0.05 to 0.5)
float EffectSamplingRadiusNearLimitRec; // if viewspace pixel closer than this, don't enlarge shadow sampling radius anymore (makes no sense to grow beyond some distance, not enough samples to cover everything, so just limit the shadow growth; could be SSAOSettingsFadeOutFrom * 0.1 or less)
float DepthPrecisionOffsetMod;
float NegRecEffectRadius; // -1.0 / EffectRadius
float LoadCounterAvgDiv; // 1.0 / ( halfDepthMip[SSAO_DEPTH_MIP_LEVELS-1].sizeX * halfDepthMip[SSAO_DEPTH_MIP_LEVELS-1].sizeY )
float AdaptiveSampleCountLimit;
float InvSharpness;
int PassIndex;
vec2 QuarterResPixelSize; // used for importance map only
vec4 PatternRotScaleMatrices[5];
float NormalsUnpackMul;
float NormalsUnpackAdd;
float DetailAOStrength;
float Dummy0;
mat4 NormalsWorldToViewspaceMatrix;
};
uniform ASSAOConstants g_ASSAOConsts;
float PSApply( in vec4 inPos, in vec2 inUV)
{ //inPos = gl_FragCoord;
float ao;
uvec2 pixPos = uvec2(inPos.xy);
uvec2 pixPosHalf = pixPos / uvec2(2, 2);
// calculate index in the four deinterleaved source array texture
int mx = int (pixPos.x % 2);
int my = int (pixPos.y % 2);
int ic = mx + my * 2; // center index
int ih = (1-mx) + my * 2; // neighbouring, horizontal
int iv = mx + (1-my) * 2; // neighbouring, vertical
int id = (1-mx) + (1-my)*2; // diagonal
vec2 centerVal = texelFetchOffset( g_FinalSSAO, ivec2(pixPosHalf), 0, ivec2(ic, 0 ) ).xy;
ao = centerVal.x;
if (true){ // change to 0 if you want to disable last pass high-res blur (for debugging purposes, etc.)
vec4 edgesLRTB = UnpackEdges( centerVal.y );
// convert index shifts to sampling offsets
float fmx = mx;
float fmy = my;
// in case of an edge, push sampling offsets away from the edge (towards pixel center)
float fmxe = (edgesLRTB.y - edgesLRTB.x);
float fmye = (edgesLRTB.w - edgesLRTB.z);
// calculate final sampling offsets and sample using bilinear filter
vec2 uvH = (inPos.xy + vec2( fmx + fmxe - 0.5, 0.5 - fmy ) ) * 0.5 * g_ASSAOConsts.HalfViewportPixelSize;
float aoH = textureLodOffset( g_FinalSSAO, uvH, 0, ivec2(ih , 0) ).x;
vec2 uvV = (inPos.xy + vec2( 0.5 - fmx, fmy - 0.5 + fmye ) ) * 0.5 * g_ASSAOConsts.HalfViewportPixelSize;
float aoV = textureLodOffset( g_FinalSSAO, uvV, 0, ivec2( iv , 0) ).x;
vec2 uvD = (inPos.xy + vec2( fmx - 0.5 + fmxe, fmy - 0.5 + fmye ) ) * 0.5 * g_ASSAOConsts.HalfViewportPixelSize;
float aoD = textureLodOffset( g_FinalSSAO, uvD, 0, ivec2( id , 0) ).x;
// reduce weight for samples near edge - if the edge is on both sides, weight goes to 0
vec4 blendWeights;
blendWeights.x = 1.0;
blendWeights.y = (edgesLRTB.x + edgesLRTB.y) * 0.5;
blendWeights.z = (edgesLRTB.z + edgesLRTB.w) * 0.5;
blendWeights.w = (blendWeights.y + blendWeights.z) * 0.5;
// calculate weighted average
float blendWeightsSum = dot( blendWeights, vec4( 1.0, 1.0, 1.0, 1.0 ) );
ao = dot( vec4( ao, aoH, aoV, aoD ), blendWeights ) / blendWeightsSum;
}
return ao;
}
void main(void)
{
// Get base values
vec2 texCoord = gl_TexCoord[0].st;
vec4 color = texture2D(vizpp_InputTex,texCoord);
float depth = texture2D(vizpp_InputDepthTex,texCoord).x;
// Do not calculate if nothing visible (for VR for instance)
if (depth>=1.0)
{
gl_FragColor = color;
return;
}
float ao = PSApply(gl_FragCoord, texCoord);
// Output the result
if(externalBlur) {
gl_FragColor.rgb = color.rgb;
gl_FragColor.a = ao;
}
else if(onlyAO) {
gl_FragColor.rgb = vec3(ao,ao,ao);
gl_FragColor.a = 1.0;
}
else {
gl_FragColor.rgb = ao*color.rgb;
gl_FragColor.a = 1.0;
}
}
gl_TexCoord is a deprecated Compatibility Profile Built-In Language Variables and is removed after GLSL Version 1.20.
If you want to use gl_TexCoord then you would have to use GLSL version 1.20 (#version 120).
But, you don't need the deprecated compatibility profile built-in language variable at all. Define a Vertex shader output texCoord and use this output rather than gl_TexCoord:
#version 440
out vec2 texCoord;
void main()
{
texCoord = ...;
// [...]
}
Specify a corresponding input in the fragment shader:
#version 440
in vec2 texCoord;
void main()
{
vec4 color = texture2D(vizpp_InputTex, texCoord.st);
// [...]
}

GLSL shader to convert pixel colors as per the usecase

In the code mentioned below, I want to accept 2 arguments colors & activeColor. colors array contains the list of allowed colors to be drawn in the image, activeColor is the selected color which is yet to be painted on image. I am using HTML5 canvas to paint on the top of image and WebGLShader to convert pixel colors as per the use-case.
While drawing(painting on canvas) a color my use-case is to change other colors if present to activeColor.
Written a basic logic below but it's not syntactically right. Help needed.
colors: dynamic Float32Array of colors ex: [0,0,0,1, 1,1,1,1] represents black & white color array
activeColor: dynamic Float32Array of color ex: [0,0,0,1] represents black color
uniform sampler2D texture;
varying float colors;
varying float activeColor;
varying vec2 texCoord;
void main() {
vec4 color = texture2D(texture, texCoord);
for (int i = 0; i < colors.length; i+=3){
vec4 c = vec4(colors[i], colors[i+1], colors[i+2], colors[i+3]);
if(color.a > 0 && color.rgb != c) {
color.rgb = vec4(activeColor[0], activeColor[1], activeColor[2], activeColor[3]);
}
}
gl_FragColor = color;
}
The code doesn't make a lot of sense
varying float colors;
varying float activeColor;
Those are type float so they only old 1 value. They are not colors (vec4s) nor are they arrays.
colors.length
There is no such thing someArray.length in GLSL. In GLSL you can't pass variable sized arrays. They must be a fixed size. Similarly you can't pass in arrays as varying.
It's not clear what you're trying to do
Your code appears to be trying to draw activeColor everywhere the image does not contain the colors in colors.
You could do something like this
#define MAX_COLORS 10
uniform sampler2D texture;
uniform vec4 colors[MAX_COLORS];
uniform int numColors;
uniform vec4 activeColor;
varying vec2 texCoord;
void main() {
vec4 color = texture2D(texture, texCoord);
for (int i = 0; i < MAX_COLORS; ++i) {
if (i >= numColors) {
break;
}
vec4 c = colors[i];
if(color.a > 0 && color.rgb != c) {
color.rgb = activeColor.rgb;
break;
}
}
gl_FragColor = color;
}
There's a limit on the number of uniform vec4s you can have. It's between 29 and 4096 through looking at the stats you probably want to stay under 221.
It's more common to pass in arrays of data as textures.
uniform sampler2D texture;
uniform sampler2D colors; // texture holding colors
uniform vec2 colorsSize; // size of texture
uniform int numColors;
uniform vec4 activeColor;
varying vec2 texCoord;
vec4 getColor(int i) {
vec2 pixelCoord = vec2(mod(float(i), colorsSize.x,
floor(float(i) / colorsSize.x));
return texture2D(colors, vec2(pixelCoord + 0.5 / colorsSize));
}
void main() {
vec4 color = texture2D(texture, texCoord);
for (int i = 0; i < MAX_COLORS; ++i) {
if (i >= numColors) {
break;
}
vec4 c = getColor(i);
if(color.a > 0 && color.rgb != c) {
color.rgb = activeColor.rgb;
break;
}
}
gl_FragColor = color;
}
Now you can pass in the colors as a texture.
You might find these tutorials useful.
You might also find this technique semi related to your problem (replacing colors) for which there is a live example here and another explanation of the technique here

Discarding specific faces in shaders

I am trying to hide a specific face (assuming face 0) by using the discard keyword.
Vertex Shader:
uniform float v_triangleId;
varying vec3 normal;
void main() {
v_triangleId = floor(gl_VertexID / 3.0);
normal = normalize(gl_NormalMatrix * gl_Normal);
gl_Position = ftransform();
}
Pixel Shader:
uniform float v_triangleId;
void main(void)
{
float diff = abs(v_triangleId - 0); // I just want to hide face 0
if (diff < 0.01) {
discard;
}
}
However, in this case all the faces are hidden, instead of just a single face. I was wondering what went wrong.

OpenGL Deferred Pixelated Lighting

I'm working on a 3-pass deferred lighting system for a voxel game, however I am having problems with pixelated lighting and ambient occlusion.
The first stage renders the color, position and normal of each pixel on the screen into separate textures. This part works correctly:
The second shader calculates an ambient occlusion value for each pixel on the screen and renders that to a texture. This part doesn't work correctly and is pixelated:
Raw occlusion data:
The third shader uses the color, position, normal and occlusion textures to render the game scene onto the screen. The lighting in this stage is also pixelated:
The SSAO (2nd pass) fragment shader comes from the www.LearnOpenGL.com tutorial for Screen Space Ambient Occlusion:
out float FragColor;
layout (binding = 0) uniform sampler2D gPosition; // World space position
layout (binding = 1) uniform sampler2D gNormal; // Normalised normal values
layout (binding = 2) uniform sampler2D texNoise;
uniform vec3 samples[64]; // 64 random precalculated vectors (-0.1 to 0.1 magnitude)
uniform mat4 projection;
float kernelSize = 64;
float radius = 1.5;
in vec2 TexCoords;
const vec2 noiseScale = vec2(1600.0/4.0, 900.0/4.0);
void main()
{
vec4 n = texture(gNormal, TexCoords);
// The alpha value of the normal is used to determine whether to apply SSAO to this pixel
if (int(n.a) > 0)
{
vec3 normal = normalize(n.rgb);
vec3 fragPos = texture(gPosition, TexCoords).xyz;
vec3 randomVec = normalize(texture(texNoise, TexCoords * noiseScale).xyz);
// Some maths. I don't understand this bit, it's from www.learnopengl.com
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
float occlusion = 0.0;
// Test 64 points around the pixel
for (int i = 0; i < kernelSize; i++)
{
vec3 sam = fragPos + TBN * samples[i] * radius;
vec4 offset = projection * vec4(sam, 1.0);
offset.xyz = (offset.xyz / offset.w) * 0.5 + 0.5;
// If the normal's are different, increase the occlusion value
float l = length(normal - texture(gNormal, offset.xy).rgb);
occlusion += l * 0.3;
}
occlusion = 1 - (occlusion / kernelSize);
FragColor = occlusion;
}
}
The lighting and final fragment shader:
out vec4 FragColor;
in vec2 texCoords;
layout (binding = 0) uniform sampler2D gColor; // Colour of each pixel
layout (binding = 1) uniform sampler2D gPosition; // World-space position of each pixel
layout (binding = 2) uniform sampler2D gNormal; // Normalised normal of each pixel
layout (binding = 3) uniform sampler2D gSSAO; // Red channel contains occlusion value of each pixel
// Each of these textures are 300 wide and 2 tall.
// The first row contains light positions. The second row contains light colours.
uniform sampler2D playerLightData; // Directional lights
uniform sampler2D mapLightData; // Spherical lights
uniform float worldBrightness;
// Amount of player and map lights
uniform float playerLights;
uniform float mapLights;
void main()
{
vec4 n = texture(gNormal, texCoords);
// BlockData: a = 4
// ModelData: a = 2
// SkyboxData: a = 0;
// Don't do lighting calculations on the skybox
if (int(n.a) > 0)
{
vec3 Normal = n.rgb;
vec3 FragPos = texture(gPosition, texCoords).rgb;
vec3 Albedo = texture(gColor, texCoords).rgb;
vec3 lighting = Albedo * worldBrightness * texture(gSSAO, texCoords).r;
for (int i = 0; i < playerLights; i++)
{
vec3 pos = texelFetch(playerLightData, ivec2(i, 0), 0).rgb;
vec3 direction = pos - FragPos;
float l = length(direction);
if (l < 40)
{
// Direction of the light to the position
vec3 spotDir = normalize(direction);
// Angle of the cone of the light
float angle = dot(spotDir, -normalize(texelFetch(playerLightData, ivec2(i, 1), 0).rgb));
// Crop the cone
if (angle >= 0.95)
{
float fade = (angle - 0.95) * 40;
lighting += (40.0 - l) / 40.0 * max(dot(Normal, spotDir), 0.0) * Albedo * fade;
}
}
}
for (int i = 0; i < mapLights; i++)
{
// Compare this pixel's position with the light's position
vec3 difference = texelFetch(mapLightData, ivec2(i, 0), 0).rgb - FragPos;
float l = length(difference);
if (l < 7.0)
{
lighting += (7.0 - l) / 7.0 * max(dot(Normal, normalize(difference)), 0.0) * Albedo * texelFetch(mapLightData, ivec2(i, 1), 0).rgb;
}
}
FragColor = vec4(lighting, 1.0);
}
else
{
FragColor = vec4(texture(gColor, texCoords).rgb, 1.0);
}
}
The size of each block face in the game is 1x1 (world space size). I have tried splitting these faces up into smaller triangles, as illustrated below, however there wasn't much visible difference.
How can I increase the resolution of the lighting and SSAO data to reduce these pixelated artifacts? Thank you in advance
Good news! Thanks to some_rand over at the GameDev stack exchange, I was able to fix this by upgrading the resolution of my position buffer from GL_RGBA16F to GL_RGBA32F.
Here is his answer.

OpenGL GLSL blend two textures by arbitrary shape

I have a full screen quad with two textures.
I want to blend two textures in arbitrary shape according to user selection.
For example, the quad at first is 100% texture0 while texture1 is transparent.
If the user selects a region, for example a circle, by dragging the mouse on the quad, then
circle region should display both texture0 and texture1 as translucent.
The region not enclosed by the circle should still be texture0.
Please see example image, textures are simplified as colors.
For now, I have achieved blending two textures on the quad, but the blending region can only be vertical slices because I use the step() function.
My frag shader:
uniform sampler2D Texture0;
uniform sampler2D Texture1;
uniform float alpha;
uniform float leftBlend;
uniform float rightBlend;
varying vec4 oColor;
varying vec2 oTexCoord;
void main()
{
vec4 first_sample = texture2D(Texture0, oTexCoord);
vec4 second_sample = texture2D(Texture1, oTexCoord);
float stepLeft = step(leftBlend, oTexCoord.x);
float stepRight = step(rightBlend, 1.0 - oTexCoord.x);
if(stepLeft == 1.0 && stepRight == 1.0)
gl_FragColor = oColor * first_sample;
else
gl_FragColor = oColor * (first_sample * alpha + second_sample * (1.0-alpha));
if (gl_FragColor.a < 0.4)
discard;
}
To achieve arbitrary shape, I assume I need to create a alpha mask texture which is the same size as texture0 and texture 1?
Then I pass that texture to frag shader to check values, if value is 0 then texture0, if value is 1 then blend texture0 and texture1.
Is my approach correct? Can you point me to any samples?
I want effect such as OpenGL - mask with multiple textures
but I want to create mask texture in my program dynamically, and I want to implement blending in GLSL
I have got blending working with mask texture of black and white
uniform sampler2D TextureMask;
vec4 mask_sample = texture2D(TextureMask, oTexCoord);
if(mask_sample.r == 0)
gl_FragColor = first_sample;
else
gl_FragColor = (first_sample * alpha + second_sample * (1.0-alpha));
now mask texture is loaded statically from a image on disk, now I just need to create mask texture dynamically in opengl
Here's one approach and sample.
Create a boolean test for whether you want to blend.
In my sample, I use an equation for a circle centered on the screen.
Then blend (i blended by weighted addition of the 2 colors).
(NOTE: i didn't have texture coords to work with in this sample, so i used the screen resolution to determine the circle position).
uniform vec2 resolution;
void main( void ) {
vec2 position = gl_FragCoord.xy / resolution;
// test if we're "in" or "out" of the blended region
// lets use a circle of radius 0.5, but you can make this mroe complex and/or pass this value in from the user
bool isBlended = (position.x - 0.5) * (position.x - 0.5) +
(position.y - 0.5) * (position.y - 0.5) > 0.25;
vec4 color1 = vec4(1,0,0,1); // this could come from texture 1
vec4 color2 = vec4(0,1,0,1); // this could come from texture 2
vec4 finalColor;
if (isBlended)
{
// blend
finalColor = color1 * 0.5 + color2 * 0.5;
}
else
{
// don't blend
finalColor = color1;
}
gl_FragColor = finalColor;
}
See the sample running here: http://glsl.heroku.com/e#18231.0
(tried to post my sample image but i don't have enough rep) sorry :/
Update:
Here's another sample using mouse position to determine the position of the blended area.
To run, paste the code in this sandbox site: https://www.shadertoy.com/new
This one should work on objects of any shape, as long as you have the mouse data setup correct.
void main(void)
{
vec2 position = gl_FragCoord.xy;
// test if we're "in" or "out" of the blended region
// lets use a circle of radius 10px, but you can make this mroe complex and/or pass this value in from the user
float diffX = position.x - iMouse.x;
float diffY = position.y - iMouse.y;
bool isBlended = (diffX * diffX) + (diffY * diffY) < 100.0;
vec4 color1 = vec4(1,0,0,1); // this could come from texture 1
vec4 color2 = vec4(0,1,0,1); // this could come from texture 2
vec4 finalColor;
if (isBlended)
{
// blend
finalColor = color1 * 0.5 + color2 * 0.5;
}
else
{
// don't blend
finalColor = color1;
}
gl_FragColor = finalColor;
}