GLSL loop wrong behaviour - opengl

We have the following piece of GLSL code:
#version 120
#define LIGHT_COUNT 2
...
varying vec3 v_light_tan[LIGHT_COUNT];
varying float v_attenuation[LIGHT_COUNT];
uniform vec3 light_position[LIGHT_COUNT];
void main(void) {
...
for ( int index = 0; index < LIGHT_COUNT; index++ )
{
float distance = length( light_position[index] - world_pos );
v_attenuation[index] = 1.0 / ( 1.0 + 0.05 * distance * distance);
vec3 light_loc = vec3(nv_m_model_inv * vec4 (light_position[index], 1.0) );
vec3 light_dir_loc = normalize( nv_position - light_loc );
v_light_tan[index] = vec3(
dot( nv_tangent.xyz, light_dir_loc ),
dot( bitangent, light_dir_loc ),
dot( nv_normal, light_dir_loc )
);
}
I've read a lot about problems with loops, so I went with a hardcoded loop. But even this simple code seems to fail. The above works for LIGHT_COUNT = 1, but for 2, the value of v_attenuation[0] gets corrupted. Surprisingly, changing the code to:
for ( int index = 0; index < LIGHT_COUNT; index++ )
{
float distance = length( light_position[index] - world_pos );
v_attenuation[index] = 1.0 / ( 1.0 + 0.05 * distance * distance);
}
for ( int index = 0; index < LIGHT_COUNT; index++ )
{
vec3 light_loc = vec3(nv_m_model_inv * vec4 (light_position[index], 1.0) );
vec3 light_dir_loc = normalize( nv_position - light_loc );
v_light_tan[index] = vec3(
dot( nv_tangent.xyz, light_dir_loc ),
dot( bitangent, light_dir_loc ),
dot( nv_normal, light_dir_loc )
);
}
... ergo two separate loops, works. Adding #pragma optionNV(unroll all) works as well.
Both of those seem hacky though, for something that is such a simple loop example. Is GLSL that broken, or did I miss some rule or guideline of writing GLSL loops?
If relevant:
OpenGL Vendor : NVIDIA Corporation
OpenGL Renderer : GeForce GTX 690/PCIe/SSE2
OpenGL Version : 4.4.0
OpenGL GLSL Version : 4.40 NVIDIA via Cg compiler

Related

How will the value be passed to the fragment shader

This is a extract from a Geometry shader.
#version 460 core
layout(triangles) in;
layout(triangle_strip, max_vertices = 3) out;
noperspective out vec3 g_edge_distance;
in vec3 world_pos[];
in vec3 normal[];
void main()
{
// Calc triangle altitudes
float ha = abs( c * sin( beta ) );
float hb = abs( c * sin( alpha ) );
float hc = abs( b * sin( alpha ) );
g_edge_distance = vec3( ha, 0, 0 );
gl_Position = gl_in[0].gl_Position;
EmitVertex();
g_edge_distance = vec3( 0, hb, 0 );
gl_Position = gl_in[1].gl_Position;
EmitVertex();
g_edge_distance = vec3( 0, 0, hc );
gl_Position = gl_in[2].gl_Position;
EmitVertex();
EndPrimitive();
}
What i want to understand is how will the value of g_edge_distance be passed to the fragment shader.
You will have a value that is linear interpolated in a screen (window) space.
You can read more about type qualifiers on the official khronos page
I think to quickly see and understand the difference, have a look at the Geeks3d tutorial

Implementing RayPicking in a Fragment Shader

I am having trouble implementing RayPicking in a Fragment-Shader, I understand I must start from my mouse coordinates, but I am not sure what to multiply my origin with.
I have tried creating a 3 component vector, with x and y as my mouse coordinates divided by my resolution, and in z I have tried using my p(for point in space, calculated as rayOrigin + rayDirection * t) with no luck.
Here is a Shadertoy that tries what I am looking for.
float ray( vec3 ro, vec3 rd, out float d )
{
float t = 0.0; d = 0.0;
for( int i = 0; i < STEPS; ++i )
{
vec3 p = ro + rd * t;
d = map( p );
if( d < EPS || t > FAR ) break;
t += d;
}
return t;
}
vec3 shad( vec3 ro, vec3 rd, vec2 uv )
{
float t = 0.0, d = 0.0;
t = ray( ro, rd, d );
float x = ( 2.0 * iMouse.x ) / iResolution.x - 1.0;
float y = 1.0 - ( 2.0 * iMouse.y ) / iResolution.y;
float z = 1.0;
vec3 p = ro + rd * t;
vec3 n = nor( p );
vec3 lig = ( vec3( x, -y, z ) );
lig += ro + rd;
lig = normalize( lig );
vec3 ref = reflect( rd, n );
float amb = 0.5 + 0.5 * n.y;
float dif = max( 0.0, dot( n, lig ) );
float spe = pow( clamp( dot( ref, lig ), 0.0, 1.0 ), 16.0 );
vec3 col = vec3( 0 );
col += 0.1 * amb;
col += 0.2 * dif;
col += spe;
return col;
}
I expect to get a light that moves as if I was shooting a ray from my mouse coordinates to the SDF.
This is the correct code:
// Our sphere-tracing algorithm.
float ray( vec3 ro, vec3 rd, out float d )
{
float t = 0.0; d = 0.0;
for( int i = 0; i < STEPS; ++i )
{
vec3 p = ro + rd * t;
d = map( p );
if( d < EPS || t > FAR ) break;
t += d;
}
return t;
}
// Here we compute all our lighting calculations.
vec3 shad( vec3 ro, vec3 rd, vec2 uv )
{
float t = 0.0, d = 0.0;
t = ray( ro, rd, d );
vec3 p = ro + rd * t;
vec3 n = nor( p );
// The values of the variable lig are not random they are in the same position as our rayOrigin for our sphere tracing algo, that goes in main's body.
vec3 lig = ( vec3( 0, 0, 2 ) );
// Here is where we "shoot" our ray from the mouse position. Our ray's origin.
vec2 uvl = ( -iResolution.xy + 2.0 * iMouse.xy ) / iResolution.y;
// This is our ray's direction.
vec3 lir = normalize( vec3( uvl, -1 ) );
// Here we get our SDF(dO) and our incrementing value(tO).
float dO = 0.0, tO = ray( lig, lir, dO );
// Now we update our vector with the direction and incrementing steps.
lig += lir * tO;
// We must normalize lights as they are just a direction, the magnitude screws the lighting calculations.
lig = normalize( lig );
vec3 ref = reflect( rd, n );
float amb = 0.5 + 0.5 * n.y;
float dif = max( 0.0, dot( n, lig ) );
float spe = pow( clamp( dot( ref, lig ), 0.0, 1.0 ), 16.0 );
vec3 col = vec3( 0 );
col += 0.1 * amb;
col += 0.2 * dif;
col += spe;
return col;
}
// Last step, here we create the origin and direction of our rays that we shoot against the SDF.
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
//Normalize the coordinates.
vec2 uv = ( -iResolution.xy + 2.0 * fragCoord.xy ) / iResolution.y;
// This is our ray's origin. We must use the same values for our lig's origin.
vec3 ro = vec3( 0, 0, 2 );
// This is our ray's direction.
vec3 rd = normalize( vec3( uv, -1 ) );
// Our SDF(d) and our incrementing steps(t), we only need our SDF(d) to bail the shading calculations according to our epsilon(EPS).
float t = 0.0, d = 0.0;
t = ray( ro, rd, d );
vec3 col = d < EPS ? shad( ro, rd, uv ) : vec3( 0 );
fragColor = vec4( col, 1 );
}

Recolor sprites on the fly

I need to replace colors of the sprite.
Some example founded in google
Here is I've found a looks like working solution for Unity - [How to Use a Shader to Dynamically Swap a Sprite's Colors][2]
How to port it to cocos2d-x? Can someone please help with code examples?
I'm looking for cocos2d-x v3 code snippet. Really looking forward for some help.
The algorithm in the article How to Use a Shader to Dynamically Swap a Sprite's Colors is very simple. It is based on a one dimensional lookup table with 256 entries. This allows the algorithm to map only 256 different colors.
In detail, the new colors (the colors used to replace) are stored in a one dimensional texture with 256 entries. When a color is read from the original texture a key is used to find the new color in the one dimensional swap texture. The key which is used is the red color channel of the original color, this means that all different colors in the original text must also have different red color values. This is another restriction.
The original document (How to Use a Shader to Dynamically Swap a Sprite's Colors) says:
Note that this may not work as expected if two or more colors on the sprite texture share the same red value! When using this method, it's important to keep the red values of the colors in the sprite texture different.
Further the algorithm mix the original color and the swap color by the alpha channel of the swap color. That causes that the swap color is drawn if the swap color is completely opaque and the original color is drawn if the swap color is completely transparent, in between will be linearly interpolated.
A GLSL function with this algorithm is very short and looks somehow like this:
uniform sampler2D u_spriteTexture; // sprite texture
uniform sampler1D u_swapTexture; // lookup texture with swap colors
vec4 SwapColor( vec2 textureCoord )
{
vec4 originalColor = texture( u_spriteTexture, textureCoord.st );
vec4 swapColor = texture( u_swapTexture, originalColor.r );
vec3 finalColor = mix( originalColor.rgb, swapColor.rgb, swapColor.a );
return vec4( finalColor.rgb, originalColor.a );
}
Suggested Algorithm
Reading the suggested shader from the question, I came up to the following solution. The shader is using an algorithm to convert from RGB to hue, saturation, and value and back. I took this idea and introduced my own thoughts.
Performant conversion functions between RGB and HSV can be found at RGB to HSV/HSL/HCY/HCL in HLSL, which can easily translated from HLSL to GLSL:
RGB to HSV
const float Epsilon = 1e-10;
vec3 RGBtoHCV( in vec3 RGB )
{
vec4 P = (RGB.g < RGB.b) ? vec4(RGB.bg, -1.0, 2.0/3.0) : vec4(RGB.gb, 0.0, -1.0/3.0);
vec4 Q = (RGB.r < P.x) ? vec4(P.xyw, RGB.r) : vec4(RGB.r, P.yzx);
float C = Q.x - min(Q.w, Q.y);
float H = abs((Q.w - Q.y) / (6.0 * C + Epsilon) + Q.z);
return vec3(H, C, Q.x);
}
vec3 RGBtoHSV(in vec3 RGB)
{
vec3 HCV = RGBtoHCV(RGB);
float S = HCV.y / (HCV.z + Epsilon);
return vec3(HCV.x, S, HCV.z);
}
HSV to RGB
vec3 HUEtoRGB(in float H)
{
float R = abs(H * 6.0 - 3.0) - 1.0;
float G = 2.0 - abs(H * 6.0 - 2.0);
float B = 2.0 - abs(H * 6.0 - 4.0);
return clamp( vec3(R,G,B), 0.0, 1.0 );
}
vec3 HSVtoRGB(in vec3 HSV)
{
vec3 RGB = HUEtoRGB(HSV.x);
return ((RGB - 1.0) * HSV.y + 1.0) * HSV.z;
}
As in the first algorithm of this answer, again a one dimensional lookup table is of need. But the length of the look up table has not to be exactly 256, it is completely user dependent. The key is not the red channel, it is the hue value which is a clear expression of the color and can easily be calculated as seen in RGBtoHSV and RGBtoHSV. The look-up table however has to, contain a color assignment distributed linearly over the * hue * range from 0 to 1 of the original color.
The algorithm can be defined with the following steps:
Convert the original color to the original hue, saturation, and value
Use the original hue as key to find the swap color in the look up table
Convert the swap color to the swap hue, saturation, and value
Convert the hue of the swap color and the original saturation, and value to a new RGB color
Mix the original color and the new color by the alpha channel of the swap color
With this algorithm any RGB color can be swapped, by keeping the saturation and value of the original color. See the following short and clear GLSL function:
uniform sampler2D u_spriteTexture; // sprite texture
uniform sampler1D u_swapTexture; // lookup texture with swap colors
// the texture coordinate is the hue of the original color
vec4 SwapColor( vec2 textureCoord )
{
vec4 originalColor = texture( u_spriteTexture, textureCoord.st );
vec3 originalHSV = RGBtoHSV( originalColor.rgb );
vec4 lookUpColor = texture( u_swapTexture, originalHSV.x );
vec3 swapHSV = RGBtoHSV( lookUpColor.rgb );
vec3 swapColor = HSVtoRGB( vec3( swapHSV.x, originalHSV.y, originalHSV.z ) );
vec3 finalColor = mix( originalColor.rgb, swapColor.rgb, lookUpColor.a );
return vec4( finalColor.rgb, originalColor.a );
}
Apply to cocos2d-x v3.15
To apply the shader to cocos2d-x v3.15 I adapted the HelloWorldScene.h and HelloWorldScene.cpp in the project cpp-empty-test of the cocos2d-x v3.15 test projects.
The shader can be applied to any sprite a can swap up to 10 color tints, but this can easily be expanded. Note, the shader does not only change a single color, it searches all colors which are similar to a color, even the colors with a completely different saturation or brightness. Each color is swapped with a color, that has a equal saturation and brightness, but a new base color.
The information which swaps the colors, is stored in an array of vec3. The x component contains the hue of the original color, the y component contains the hue of the swap color, and the z component contains an epsilon value, which defines the color range.
The shader source files should be placed in the "resource/shader" subdirectory of the project directory.
Vertex shader shader/colorswap.vert
attribute vec4 a_position;
attribute vec2 a_texCoord;
attribute vec4 a_color;
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
void main()
{
gl_Position = CC_PMatrix * a_position;
cc_FragColor = a_color;
cc_FragTexCoord1 = a_texCoord;
}
Fragment shader shader/colorswap.frag
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
const float Epsilon = 1e-10;
vec3 RGBtoHCV( in vec3 RGB )
{
vec4 P = (RGB.g < RGB.b) ? vec4(RGB.bg, -1.0, 2.0/3.0) : vec4(RGB.gb, 0.0, -1.0/3.0);
vec4 Q = (RGB.r < P.x) ? vec4(P.xyw, RGB.r) : vec4(RGB.r, P.yzx);
float C = Q.x - min(Q.w, Q.y);
float H = abs((Q.w - Q.y) / (6.0 * C + Epsilon) + Q.z);
return vec3(H, C, Q.x);
}
vec3 RGBtoHSV(in vec3 RGB)
{
vec3 HCV = RGBtoHCV(RGB);
float S = HCV.y / (HCV.z + Epsilon);
return vec3(HCV.x, S, HCV.z);
}
vec3 HUEtoRGB(in float H)
{
float R = abs(H * 6.0 - 3.0) - 1.0;
float G = 2.0 - abs(H * 6.0 - 2.0);
float B = 2.0 - abs(H * 6.0 - 4.0);
return clamp( vec3(R,G,B), 0.0, 1.0 );
}
vec3 HSVtoRGB(in vec3 HSV)
{
vec3 RGB = HUEtoRGB(HSV.x);
return ((RGB - 1.0) * HSV.y + 1.0) * HSV.z;
}
#define MAX_SWAP 10
uniform vec3 u_swap[MAX_SWAP];
uniform int u_noSwap;
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
vec3 originalHSV = RGBtoHSV( originalColor.rgb );
vec4 swapColor = vec4( originalColor.rgb, 1.0 );
for ( int i = 0; i < 10 ; ++ i )
{
if ( i >= u_noSwap )
break;
if ( abs( originalHSV.x - u_swap[i].x ) < u_swap[i].z )
{
swapColor.rgb = HSVtoRGB( vec3( u_swap[i].y, originalHSV.y, originalHSV.z ) );
break;
}
}
vec3 finalColor = mix( originalColor.rgb, swapColor.rgb, swapColor.a );
gl_FragColor = vec4( finalColor.rgb, originalColor.a );
}
Header file HelloWorldScene.h:
#ifndef __HELLOWORLD_SCENE_H__
#define __HELLOWORLD_SCENE_H__
#include "cocos2d.h"
#define MAX_COLOR 10
class HelloWorld : public cocos2d::Scene
{
public:
virtual bool init() override;
static cocos2d::Scene* scene();
void menuCloseCallback(Ref* sender);
CREATE_FUNC(HelloWorld);
void InitSwapInfo( int i, const cocos2d::Color3B &sourceCol, const cocos2d::Color3B &swapCol, float deviation );
private:
cocos2d::GLProgram* mProgramExample;
cocos2d::Vec3 mSource[MAX_COLOR];
cocos2d::Vec3 mSwap[MAX_COLOR];
float mDeviation[MAX_COLOR];
cocos2d::Vec3 mSwapInfo[MAX_COLOR];
};
#endif // __HELLOWORLD_SCENE_H__
Source file HelloWorldScene.cpp:
Note, the C++ function RGBtoHue and the GLSL function RGBtoHue, should implement the exactly same algorithm.
The input to the function SwapInfo are RGB colors encoded to cocos2d::Vec3. If the source channels of the RGB colors are bytes (unsigned char), then this can easily converted to cocos2d::Vec3 by cocos2d::Vec3( R / 255.0f, G / 255.0f, B / 255.0f ).
#include "HelloWorldScene.h"
#include "AppMacros.h"
USING_NS_CC;
float RGBtoHue( const cocos2d::Vec3 &RGB )
{
const float Epsilon = 1e-10f;
cocos2d::Vec4 P = (RGB.y < RGB.z) ?
cocos2d::Vec4(RGB.y, RGB.z, -1.0f, 2.0f/3.0f) :
cocos2d::Vec4(RGB.y, RGB.z, 0.0f, -1.0f/3.0f);
cocos2d::Vec4 Q = (RGB.x < P.x) ?
cocos2d::Vec4(P.x, P.y, P.w, RGB.x) :
cocos2d::Vec4(RGB.x, P.y, P.z, P.x);
float C = Q.x - (Q.w < Q.y ? Q.w : Q.y);
float H = fabs((Q.w - Q.y) / (6.0f * C + Epsilon) + Q.z);
return H;
}
cocos2d::Vec3 SwapInfo( const cocos2d::Vec3 &sourceCol, const cocos2d::Vec3 &swapCol, float epsi )
{
return cocos2d::Vec3( RGBtoHue( sourceCol ), RGBtoHue( swapCol ), epsi );
}
void HelloWorld::InitSwapInfo( int i, const cocos2d::Color3B &sourceCol, const cocos2d::Color3B &swapCol, float deviation )
{
mSource[i] = cocos2d::Vec3( sourceCol.r/255.0, sourceCol.g/255.0, sourceCol.b/255.0 );
mSwap[i] = cocos2d::Vec3( swapCol.r/255.0, swapCol.g/255.0, swapCol.b/255.0 );
mDeviation[i] = deviation;
mSwapInfo[i] = SwapInfo( mSource[i], mSwap[i], mDeviation[i] );
}
Scene* HelloWorld::scene()
{
return HelloWorld::create();
}
bool HelloWorld::init()
{
if ( !Scene::init() ) return false;
auto visibleSize = Director::getInstance()->getVisibleSize();
auto origin = Director::getInstance()->getVisibleOrigin();
auto closeItem = MenuItemImage::create(
"CloseNormal.png",
"CloseSelected.png",
CC_CALLBACK_1(HelloWorld::menuCloseCallback,this));
closeItem->setPosition(origin + Vec2(visibleSize) - Vec2(closeItem->getContentSize() / 2));
auto menu = Menu::create(closeItem, nullptr);
menu->setPosition(Vec2::ZERO);
this->addChild(menu, 1);
auto sprite = Sprite::create("HelloWorld.png");
sprite->setPosition(Vec2(visibleSize / 2) + origin);
mProgramExample = new GLProgram();
mProgramExample->initWithFilenames("shader/colorswap.vert", "shader/colorswap.frag");
mProgramExample->bindAttribLocation(GLProgram::ATTRIBUTE_NAME_POSITION, GLProgram::VERTEX_ATTRIB_POSITION);
mProgramExample->bindAttribLocation(GLProgram::ATTRIBUTE_NAME_COLOR, GLProgram::VERTEX_ATTRIB_COLOR);
mProgramExample->bindAttribLocation(GLProgram::ATTRIBUTE_NAME_TEX_COORD, GLProgram::VERTEX_ATTRIB_TEX_COORDS);
mProgramExample->link();
mProgramExample->updateUniforms();
mProgramExample->use();
GLProgramState* state = GLProgramState::getOrCreateWithGLProgram(mProgramExample);
sprite->setGLProgram(mProgramExample);
sprite->setGLProgramState(state);
InitSwapInfo( 0, cocos2d::Color3B( 41, 201, 226 ), cocos2d::Color3B( 255, 0, 0 ), 0.1f );
InitSwapInfo( 1, cocos2d::Color3B( 249, 6, 6 ), cocos2d::Color3B( 255, 255, 0 ), 0.1f );
int noOfColors = 2;
state->setUniformVec3v("u_swap", noOfColors, mSwapInfo);
state->setUniformInt("u_noSwap", noOfColors);
this->addChild(sprite);
return true;
}
void HelloWorld::menuCloseCallback(Ref* sender)
{
Director::getInstance()->end();
#if (CC_TARGET_PLATFORM == CC_PLATFORM_IOS)
exit(0);
#endif
}
Compare RGB values instead of Hue
A fragment shader which directly compares RGB colors would look like this:
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
const float Epsilon = 1e-10;
vec3 RGBtoHCV( in vec3 RGB )
{
vec4 P = (RGB.g < RGB.b) ? vec4(RGB.bg, -1.0, 2.0/3.0) : vec4(RGB.gb, 0.0, -1.0/3.0);
vec4 Q = (RGB.r < P.x) ? vec4(P.xyw, RGB.r) : vec4(RGB.r, P.yzx);
float C = Q.x - min(Q.w, Q.y);
float H = abs((Q.w - Q.y) / (6.0 * C + Epsilon) + Q.z);
return vec3(H, C, Q.x);
}
vec3 RGBtoHSV(in vec3 RGB)
{
vec3 HCV = RGBtoHCV(RGB);
float S = HCV.y / (HCV.z + Epsilon);
return vec3(HCV.x, S, HCV.z);
}
vec3 HUEtoRGB(in float H)
{
float R = abs(H * 6.0 - 3.0) - 1.0;
float G = 2.0 - abs(H * 6.0 - 2.0);
float B = 2.0 - abs(H * 6.0 - 4.0);
return clamp( vec3(R,G,B), 0.0, 1.0 );
}
vec3 HSVtoRGB(in vec3 HSV)
{
vec3 RGB = HUEtoRGB(HSV.x);
return ((RGB - 1.0) * HSV.y + 1.0) * HSV.z;
}
#define MAX_SWAP 10
uniform vec3 u_orig[MAX_SWAP];
uniform vec3 u_swap[MAX_SWAP];
uniform float u_deviation[MAX_SWAP];
uniform int u_noSwap;
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
vec3 originalHSV = RGBtoHSV( originalColor.rgb );
vec4 swapColor = vec4( originalColor.rgb, 1.0 );
for ( int i = 0; i < 10 ; ++ i )
{
if ( i >= u_noSwap )
break;
if ( all( lessThanEqual( abs(originalColor.rgb - u_orig[i]), vec3(u_deviation[i]) ) ) )
{
vec3 swapHSV = RGBtoHSV( u_swap[i].rgb );
swapColor.rgb = HSVtoRGB( vec3( swapHSV.x, originalHSV.y, originalHSV.z ) );
break;
}
}
vec3 finalColor = mix( originalColor.rgb, swapColor.rgb, swapColor.a );
gl_FragColor = vec4( finalColor.rgb, originalColor.a );
}
Note, the initialization of the uniforms has to be adapt:
int noOfColors = 2;
state->setUniformVec3v("u_orig", noOfColors, mSource);
state->setUniformVec3v("u_swap", noOfColors, mSwap);
state->setUniformFloatv("u_deviation", noOfColors, mDeviation);
state->setUniformInt("u_noSwap", noOfColors);
Extension to the answer
If exactly specified colors should be exchanged, the shader can be much more simplified. For this, the deviations u_deviation have to be restricted (e.g deviation = 0.02;).
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
#define MAX_SWAP 11
uniform vec3 u_orig[MAX_SWAP];
uniform vec3 u_swap[MAX_SWAP];
uniform float u_deviation[MAX_SWAP];
uniform int u_noSwap;
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
vec4 swapColor = vec4( originalColor.rgb, 1.0 );
for ( int i = 0; i < MAX_SWAP ; ++ i )
{
vec3 deltaCol = abs( originalColor.rgb - u_orig[i] );
float hit = step( deltaCol.x + deltaCol.y + deltaCol.z, u_deviation[i] * 3.0 );
swapColor.rgb = mix( swapColor.rgb, u_swap[i].rgb, hit );
}
gl_FragColor = vec4( swapColor.rgb, originalColor.a );
}
If each color in the source texture has an individual color channel (this means the color value is only use for this special color, e.g. red color channel), then the shader code can be further simplified, because only the one channel has to be compared:
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
vec4 swapColor = vec4( originalColor.rgb, 1.0 );
for ( int i = 0; i < MAX_SWAP ; ++ i )
{
float hit = step( abs( originalColor.r - u_orig[i].r ), u_deviation[i] );
swapColor.rgb = mix( swapColor.rgb, u_swap[i].rgb, hit );
}
gl_FragColor = vec4( swapColor.rgb, originalColor.a );
}
A further optimization would bring us back to the first algorithm, which was described in this answer. The big advantage of this algorithm would be, that each color is swapped (except the alpha channel of the swap texture is 0), but no expensive searching in the look up table has to be done in the shader.
Each color will be swapped by the corresponding color according to its red color channel. As mentioned, if a color should not be swapped, the alpha channel of the swap texture has to be set to 0.
A new member mSwapTexture has to be add to the class:
cocos2d::Texture2D* mSwapTexture;
The texture can be easily created, and the uniform texture sampler can be set like this:
#include <array>
.....
std::array< unsigned char, 256 * 4 > swapPlane{ 0 };
for ( int c = 0; c < noOfColors; ++ c )
{
size_t i = (size_t)( mSource[c].x * 255.0 ) * 4;
swapPlane[i+0] = (unsigned char)(mSwap[c].x*255.0);
swapPlane[i+1] = (unsigned char)(mSwap[c].y*255.0);
swapPlane[i+2] = (unsigned char)(mSwap[c].z*255.0);
swapPlane[i+3] = 255;
}
mSwapTexture = new Texture2D();
mSwapTexture->setAliasTexParameters();
cocos2d::Size contentSize;
mSwapTexture->initWithData( swapPlane.data(), swapPlane.size(), Texture2D::PixelFormat::RGBA8888, 256, 1, contentSize );
state->setUniformTexture( "u_swapTexture", mSwapTexture );
The fragment shader would look like this:
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
uniform sampler2D u_swapTexture; // lookup texture with 256 swap colors
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
vec4 swapColor = texture2D(u_swapTexture, vec2(originalColor.r, 0.0));
vec3 finalColor = mix(originalColor.rgb, swapColor.rgb, swapColor.a);
gl_FragColor = vec4(finalColor.rgb, originalColor.a);
}
Of course, the lookup key has not always to be the red channel, any other channel is also possible.
Even a combination of 2 color channels would be possible by using a increased two dimensional lookup texture. See the following example which demonstrates the use of look up texture with 1024 entries. The look up table uses the full red channel (256 indices) in the X dimension and the green channel divided by 64 (4 indices) in the Y dimension.
Create a two dimensional look up table:
std::array< unsigned char, 1024 * 4 > swapPlane{ 0 };
for ( int c = 0; c < noOfColors; ++ c )
{
size_t ix = (size_t)( mSource[c].x * 255.0 );
size_t iy = (size_t)( mSource[c].y * 255.0 / 64.0 );
size_t i = ( iy * 256 + ix ) * 4;
swapPlane[i+0] = (unsigned char)(mSwap[c].x*255.0);
swapPlane[i+1] = (unsigned char)(mSwap[c].y*255.0);
swapPlane[i+2] = (unsigned char)(mSwap[c].z*255.0);
swapPlane[i+3] = 255;
}
mSwapTexture = new Texture2D();
mSwapTexture->setAliasTexParameters();
cocos2d::Size contentSize;
mSwapTexture->initWithData( swapPlane.data(), swapPlane.size(), Texture2D::PixelFormat::RGBA8888, 256, 4, contentSize );
And adapt the fragment shader:
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
vec4 swapColor = texture2D(u_swapTexture, originalColor.rg);
vec3 finalColor = mix(originalColor.rgb, swapColor.rgb, swapColor.a);
gl_FragColor = vec4(finalColor.rgb, originalColor.a);
}
Interpolate the texture
Since it is not possible to use GL_LINEAR with the above approach, this has to be emulated, if it would be of need:
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
uniform sampler2D u_swapTexture; // lookup texture with 256 swap colors
uniform vec2 u_spriteSize;
void main()
{
vec2 texS = 1.0 / u_spriteSize;
vec2 texF = fract( cc_FragTexCoord1 * u_spriteSize + 0.5 );
vec2 texC = (cc_FragTexCoord1 * u_spriteSize + 0.5 - texF) / u_spriteSize;
vec4 originalColor = texture2D(CC_Texture0, texC);
vec4 swapColor = texture2D(u_swapTexture, originalColor.rg);
vec3 finalColor00 = mix(originalColor.rgb, swapColor.rgb, swapColor.a);
originalColor = texture2D(CC_Texture0, texC+vec2(texS.x, 0.0));
swapColor = texture2D(u_swapTexture, originalColor.rg);
vec3 finalColor10 = mix(originalColor.rgb, swapColor.rgb, swapColor.a);
originalColor = texture2D(CC_Texture0, texC+vec2(0.0,texS.y));
swapColor = texture2D(u_swapTexture, originalColor.rg);
vec3 finalColor01 = mix(originalColor.rgb, swapColor.rgb, swapColor.a);
originalColor = texture2D(CC_Texture0, texC+texS.xy);
swapColor = texture2D(u_swapTexture, originalColor.rg);
vec3 finalColor11 = mix(originalColor.rgb, swapColor.rgb, swapColor.a);
vec3 finalColor0 = mix( finalColor00, finalColor10, texF.x );
vec3 finalColor1 = mix( finalColor01, finalColor11, texF.x );
vec3 finalColor = mix( finalColor0, finalColor1, texF.y );
gl_FragColor = vec4(finalColor.rgb, originalColor.a);
}
The new uniform variable u_spriteSize has to be set like this:
auto size = sprite->getTexture()->getContentSizeInPixels();
state->setUniformVec2( "u_spriteSize", Vec2( (float)size.width, (float)size.height ) );
Modify the texture on the CPU
Of course the texture can also be modified on the CPU, but then for each set of swap colors a separated texture has to be generated. the advantage would be that no more shader is of need.
The following code swaps the colors when the texture is loaded. The shader has to be skipped completely.
Sprite * sprite = nullptr;
std::string imageFile = ....;
std::string fullpath = FileUtils::getInstance()->fullPathForFilename(imageFile);
cocos2d::Image *img = !fullpath.empty() ? new Image() : nullptr;
if (img != nullptr && img->initWithImageFile(fullpath))
{
if ( img->getRenderFormat() == Texture2D::PixelFormat::RGBA8888 )
{
unsigned char *plane = img->getData();
for ( int y = 0; y < img->getHeight(); ++ y )
{
for ( int x = 0; x < img->getWidth(); ++ x )
{
size_t i = ( y * img->getWidth() + x ) * 4;
unsigned char t = plane[i];
for ( int c = 0; c < noOfColors; ++ c )
{
if ( fabs(mSource[c].x - plane[i+0]/255.0f) < mDeviation[c] &&
fabs(mSource[c].y - plane[i+1]/255.0f) < mDeviation[c] &&
fabs(mSource[c].z - plane[i+2]/255.0f) < mDeviation[c] )
{
plane[i+0] = (unsigned char)(mSwap[c].x*255.0);
plane[i+1] = (unsigned char)(mSwap[c].y*255.0);
plane[i+2] = (unsigned char)(mSwap[c].z*255.0);
}
}
}
}
}
std::string key = "my_swap_" + imageFile;
if ( Texture2D *texture = _director->getTextureCache()->addImage( img, key ) )
sprite = Sprite::createWithTexture( texture );
}
Combined approach on the CPU and GPU
This approach can be used if always the same regions (colors) of the texture are swapped. The advantage of this approach is, that the original texture is modified only once, but every application of the texture can hold its own swap table.
For this approach the alpha channel is used to hold the index of the swap color. In the example code below, the value range from 1 to including 11 is used to store the indices of the swap color. 0 is reserved for absolute transparency.
Sprite * sprite = nullptr;
std::string imageFile = ....;
std::string key = "my_swap_" + imageFile;
Texture2D *texture = _director->getTextureCache()->getTextureForKey( key );
if (texture == nullptr)
{
std::string fullpath = FileUtils::getInstance()->fullPathForFilename(imageFile);
cocos2d::Image *img = !fullpath.empty() ? new Image() : nullptr;
if ( img->initWithImageFile(fullpath) &&
img->getRenderFormat() == Texture2D::PixelFormat::RGBA8888 )
{
unsigned char *plane = img->getData();
for ( int y = 0; y < img->getHeight(); ++ y )
{
for ( int x = 0; x < img->getWidth(); ++ x )
{
size_t i = ( y * img->getWidth() + x ) * 4;
unsigned char t = plane[i];
for ( int c = 0; c < noOfColors; ++ c )
{
if ( fabs(mSource[c].x - plane[i+0]/255.0f) < mDeviation[c] &&
fabs(mSource[c].y - plane[i+1]/255.0f) < mDeviation[c] &&
fabs(mSource[c].z - plane[i+2]/255.0f) < mDeviation[c] )
{
plane[i+3] = (unsigned char)(c+1);
}
}
}
}
texture = _director->getTextureCache()->addImage( img, key );
}
}
if ( texture != nullptr )
sprite = Sprite::createWithTexture( texture );
The fragment shader needs only the uniforms u_swap and u_noSwap and does not have to do an expensive searching.
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
#define MAX_SWAP 11
uniform vec3 u_swap[MAX_SWAP];
uniform int u_noSwap;
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
float fIndex = originalColor.a * 255.0 - 0.5;
float maxIndex = float(u_noSwap) + 0.5;
int iIndex = int( clamp( fIndex, 0.0, maxIndex ) );
float isSwap = step( 0.0, fIndex ) * step( fIndex, maxIndex );
vec3 swapColor = mix( originalColor.rgb, u_swap[iIndex], isSwap );
gl_FragColor = vec4( swapColor.rgb, max(originalColor.a, isSwap) );
}
Change the Hue,Saturation,Value of your sprite using shader.
Shader code example:
#ifdef GL_ES
precision mediump float;
#endif
varying vec2 v_texCoord;
////uniform sampler2D CC_Texture0;
uniform float u_dH;
uniform float u_dS;
uniform float u_dL;
//algorithm ref to: https://en.wikipedia.org/wiki/HSL_and_HSV
void main() {
vec4 texColor=texture2D(CC_Texture0, v_texCoord);
float r=texColor.r;
float g=texColor.g;
float b=texColor.b;
float a=texColor.a;
//convert rgb to hsl
float h;
float s;
float l;
{
float max=max(max(r,g),b);
float min=min(min(r,g),b);
//----h
if(max==min){
h=0.0;
}else if(max==r&&g>=b){
h=60.0*(g-b)/(max-min)+0.0;
}else if(max==r&&g<b){
h=60.0*(g-b)/(max-min)+360.0;
}else if(max==g){
h=60.0*(b-r)/(max-min)+120.0;
}else if(max==b){
h=60.0*(r-g)/(max-min)+240.0;
}
//----l
l=0.5*(max+min);
//----s
if(l==0.0||max==min){
s=0.0;
}else if(0.0<=l&&l<=0.5){
s=(max-min)/(2.0*l);
}else if(l>0.5){
s=(max-min)/(2.0-2.0*l);
}
}
//(h,s,l)+(dH,dS,dL) -> (h,s,l)
h=h+u_dH;
s=min(1.0,max(0.0,s+u_dS));
l=l;//do not use HSL model to adjust lightness, because the effect is not good
//convert (h,s,l) to rgb and got final color
vec4 finalColor;
{
float q;
if(l<0.5){
q=l*(1.0+s);
}else if(l>=0.5){
q=l+s-l*s;
}
float p=2.0*l-q;
float hk=h/360.0;
float t[3];
t[0]=hk+1.0/3.0;t[1]=hk;t[2]=hk-1.0/3.0;
for(int i=0;i<3;i++){
if(t[i]<0.0)t[i]+=1.0;
if(t[i]>1.0)t[i]-=1.0;
}//got t[i]
float c[3];
for(int i=0;i<3;i++){
if(t[i]<1.0/6.0){
c[i]=p+((q-p)*6.0*t[i]);
}else if(1.0/6.0<=t[i]&&t[i]<0.5){
c[i]=q;
}else if(0.5<=t[i]&&t[i]<2.0/3.0){
c[i]=p+((q-p)*6.0*(2.0/3.0-t[i]));
}else{
c[i]=p;
}
}
finalColor=vec4(c[0],c[1],c[2],a);
}
//actually, it is not final color. the lightness has not been adjusted
//adjust lightness use the simplest method
finalColor+=vec4(u_dL,u_dL,u_dL,0.0);
gl_FragColor=finalColor;
}

GLSL Vertex shader bilinear sampling heightmap

I am creating a geomip-mapped terrain. So far I have it working fairly well. The terrain tessellation near the camera is very high and gets less so the further out the geometry is. The geometry of the terrain essentially follows the camera and samples a heightmap texture based on the position of the vertices. Because the geometry tessellation is very high, you can at times see each pixel in the texture when its sampled. It creates obvious pixel bumps. I figured I might be able to get around this by smoothing the sampling of the heightmap. However I seem to have a weird problem related to some bilinear sampling code. I am rendering the terrain by displacing each vertex according to a heightmap texture. To get the height of a vertex at a given UV coordinate I can use:
vec2 worldToMapSpace( vec2 worldPosition ) {
return ( worldPosition / worldScale + 0.5 );
}
float getHeight( vec3 worldPosition )
{
#ifdef USE_HEIGHTFIELD
vec2 heightUv = worldToMapSpace(worldPosition.xz);
vec2 tHeightSize = vec2( HEIGHTFIELD_SIZE_WIDTH, HEIGHTFIELD_SIZE_HEIGHT ); //both 512
vec2 texel = vec2( 1.0 / tHeightSize );
//float coarseHeight = texture2DBilinear( heightfield, heightUv, texel, tHeightSize ).r;
float coarseHeight = texture2D( heightfield, vUv ).r;
return altitude * coarseHeight + heightOffset;
#else
return 0.0;
#endif
}
Which produces this (notice how you can see each pixel):
Here is a wireframe:
I wanted to make the terrain sampling smoother. So I figured I could use some bilinear sampling instead of the standard texture2D function. So here is my bilinear sampling function:
vec4 texture2DBilinear( sampler2D textureSampler, vec2 uv, vec2 texelSize, vec2 textureSize )
{
vec4 tl = texture2D(textureSampler, uv);
vec4 tr = texture2D(textureSampler, uv + vec2( texelSize.x, 0.0 ));
vec4 bl = texture2D(textureSampler, uv + vec2( 0.0, texelSize.y ));
vec4 br = texture2D(textureSampler, uv + vec2( texelSize.x, texelSize.y ));
vec2 f = fract( uv.xy * textureSize ); // get the decimal part
vec4 tA = mix( tl, tr, f.x );
vec4 tB = mix( bl, br, f.x );
return mix( tA, tB, f.y );
}
The texelSize is calculated as 1 / heightmap size:
vec2 texel = vec2( 1.0 / tHeightSize );
and textureSize is the width and height of the heightmap. However, when I use this function I get this result:
float coarseHeight = texture2DBilinear( heightfield, heightUv, texel, tHeightSize ).r;
That now seems worse :( Any ideas what I might be doing wrong? Or how I can get a smoother terrain sampling?
EDIT
Here is a vertical screenshot looking down at the terrain. You can see the layers work fine. Notice however that the outer layers that have less triangulation and look smoother while the ones with higher tessellation show each pixel. Im trying to find a way to smooth out the texture sampling.
I was able to find and implement a technique that uses catmulrom interpolation. Code is below.
// catmull works by specifying 4 control points p0, p1, p2, p3 and a weight. The function is used to calculate a point n between p1 and p2 based
// on the weight. The weight is normalized, so if it's a value of 0 then the return value will be p1 and if its 1 it will return p2.
float catmullRom( float p0, float p1, float p2, float p3, float weight ) {
float weight2 = weight * weight;
return 0.5 * (
p0 * weight * ( ( 2.0 - weight ) * weight - 1.0 ) +
p1 * ( weight2 * ( 3.0 * weight - 5.0 ) + 2.0 ) +
p2 * weight * ( ( 4.0 - 3.0 * weight ) * weight + 1.0 ) +
p3 * ( weight - 1.0 ) * weight2 );
}
// Performs a horizontal catmulrom operation at a given V value.
float textureCubicU( sampler2D samp, vec2 uv00, float texel, float offsetV, float frac ) {
return catmullRom(
texture2DLod( samp, uv00 + vec2( -texel, offsetV ), 0.0 ).r,
texture2DLod( samp, uv00 + vec2( 0.0, offsetV ), 0.0 ).r,
texture2DLod( samp, uv00 + vec2( texel, offsetV ), 0.0 ).r,
texture2DLod( samp, uv00 + vec2( texel * 2.0, offsetV ), 0.0 ).r,
frac );
}
// Samples a texture using a bicubic sampling algorithm. This essentially queries neighbouring
// pixels to get an average value.
float textureBicubic( sampler2D samp, vec2 uv00, vec2 texel, vec2 frac ) {
return catmullRom(
textureCubicU( samp, uv00, texel.x, -texel.y, frac.x ),
textureCubicU( samp, uv00, texel.x, 0.0, frac.x ),
textureCubicU( samp, uv00, texel.x, texel.y, frac.x ),
textureCubicU( samp, uv00, texel.x, texel.y * 2.0, frac.x ),
frac.y );
}
// Gets the UV coordinates based on the world X Z position
vec2 worldToMapSpace( vec2 worldPosition ) {
return ( worldPosition / worldScale + 0.5 );
}
// Gets the height at a location p (world space)
float getHeight( vec3 worldPosition )
{
#ifdef USE_HEIGHTFIELD
vec2 heightUv = worldToMapSpace(worldPosition.xz);
vec2 tHeightSize = vec2( HEIGHTFIELD_WIDTH, HEIGHTFIELD_HEIGHT );
// If we increase the smoothness factor, the terrain becomes a lot smoother.
// This is because it has the effect of shrinking the texture size and increaing
// the texel size. Which means when we do sampling the samples are from farther away - making
// it smoother. However this means the terrain looks less like the original heightmap and so
// terrain picking goes a bit off.
float smoothness = 1.1;
tHeightSize /= smoothness;
// The size of each texel
vec2 texel = vec2( 1.0 / tHeightSize );
// Find the top-left texel we need to sample.
vec2 heightUv00 = ( floor( heightUv * tHeightSize ) ) / tHeightSize;
// Determine the fraction across the 4-texel quad we need to compute.
vec2 frac = vec2( heightUv - heightUv00 ) * tHeightSize;
float coarseHeight = textureBicubic( heightfield, heightUv00, texel, frac );
return altitude * coarseHeight + heightOffset;
#else
return 0.0;
#endif
}

GLSL Gif Dither Effect: Optimization

I've got a fragment shader that essentially reads the color alpha and translates it into a dithering effect across the pixels.
However, it's quite processor intensive with all the mods and if statements.
Does anyone have any recommendations on optimizing the code below?
varying vec2 the_uv;
varying vec4 color;
void main()
{
// The pixel color will correspond
// to the uv coords of the texture
// for the given vertice, retrieved
// by the Vertex shader through varying vec2 the_uv
gl_FragColor = vec4(0.0, 0.0, 0.0, 0.0);
vec4 tex = texture2D(_MainTex, the_uv);
tex = tex * color ;
float r = tex.a;
if ( r > 0.1 ) {
if ( ( mod(gl_FragCoord.x, 4.001) + mod(gl_FragCoord.y, 4.0) ) > 6.00 ) {
gl_FragColor = color;
}
}
if ( r > 0.5 ) {
if ( ( mod(gl_FragCoord.x + 2.0, 4.001) + mod(gl_FragCoord.y, 4.0) ) > 6.00 ) {
gl_FragColor = color;
}
}
if ( r > 0.7 ) {
if ( ( mod(gl_FragCoord.x, 4.001) + mod(gl_FragCoord.y + 2.0, 4.0) ) > 6.00 ) {
gl_FragColor = color;
}
}
if ( r > 0.9 ) {
if ( ( mod(gl_FragCoord.x + 1.0, 4.001) + mod(gl_FragCoord.y + 1.0, 4.0) ) > 6.00 ) {
gl_FragColor = color;
}
}
if ( r > 0.3 ) {
if ( ( mod(gl_FragCoord.x + 2.0, 4.001) + mod(gl_FragCoord.y + 2.0, 4.0) ) > 6.00 ) {
gl_FragColor = color;
}
}
}
Here is the solution based on the feedback:
varying vec2 the_uv;
varying vec4 color;
void main()
{
color = gl_Color;
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
the_uv = gl_MultiTexCoord0.st;
}
#endif
#ifdef FRAGMENT
uniform sampler2D _MainTex;
uniform sampler2D _GridTex;
varying vec2 the_uv;
varying vec4 color;
void main()
{
if (texture2D(_MainTex, the_uv).a * color.a > texture2D(_GridTex, vec2(gl_FragCoord.x, gl_FragCoord.y)*.25).a) gl_FragColor = color;
else gl_FragColor = vec4(0.0, 0.0, 0.0, 0.0);
}
What you're trying to do is to select whether each pixel should be lit on a 4x4 grid based on the source alpha. The simplest to do that is to do just that.
First initialize a 4x4 texture with the corresponding alphas that are required to have the pixel pass (I picked 1.0 as the alpha for never showing here)
1.0 0.5 1.0 0.1
1.0 1.0 0.9 1.0
1.0 0.3 1.0 0.7
1.0 1.0 1.0 1.0
Setup this texture with repeat to avoid the mod completely, and a nearest filter to avoid linear filtering.
Then, use that a sampling of that texture to decide whether to light the pixel.
vec4 dither = texture2D(_my4x4, gl_FragCoord / 4.); // 4 is the size of the texture
if (r > dither) { gl_FragColor = color; }
This assumes that r is never 1. There are simple ways to work around that, e.g. divide the values in the 4x4 texture by 2, except the 1.0's, and then multiply dither by 2 before the if test, but after the fetch.
Some further potential optimizations:
you can avoid the if test altogether by using a texture with comparison (shadow texture).
you can avoid the /4 by using the textureFetch instructions
First of all, your fragment color will be the same regardless of the results of the if statements since color is set at the beginning.
Secondly, using only if statements with no else if's makes your code run through each calculation no matter what.
A much better way to do this would be to have :
if ( r > 0.9 ) {
if ( ( mod(gl_FragCoord.x + 1.0, 4.001) + mod(gl_FragCoord.y + 1.0, 4.0) ) > 6.00) {
gl_FragColor = color;
}
}
else if ( r > 0.7 ) {
if ( ( mod(gl_FragCoord.x, 4.001) + mod(gl_FragCoord.y + 2.0, 4.0) ) > 6.00 ) {
gl_FragColor = color;
}
}
else if ( r > 0.5 ) {
if ( ( mod(gl_FragCoord.x + 2.0, 4.001) + mod(gl_FragCoord.y, 4.0) ) > 6.00 ) {
gl_FragColor = color;
}
}
else if ( r > 0.3 ) {
if ( ( mod(gl_FragCoord.x + 2.0, 4.001) + mod(gl_FragCoord.y + 2.0, 4.0) ) > 6.00 ) {
gl_FragColor = color;
}
}
else if ( r > 0.1 ) {
if ( ( mod(gl_FragCoord.x, 4.001) + mod(gl_FragCoord.y, 4.0) ) > 6.00 ) {
gl_FragColor = color;
}
}
else{
gl_FragColor = color;
}
Of course this won't help you change the output since color is never changes (as I mentioned previously). But this should make things run a bit faster.
One other thing to take into account is what cases get executed most often. I assumed that the cases with large r are more common, hence the ordering of the if statements. If small r's are more common, then reversing the order and having r < 0.1, r < 0.3, r < 0.5, r < 0.7, r < 0.9 would make more sense.
The whole point here is to make the code exit as fast as possible (i.e. have one of the if's return true). Once one of the if's returns true, the rest are ignored, saving you from calculating all the rest of the mod operations.