Alpha channel fade animation - glsl

Im pretty new to shaders and I've been attempting to create a shader that would do a alpha revel of a texture and I've gotten close but Im pretty sure there is a much better way.
This is what I have to far https://codepen.io/tkmoney/pen/REYrpV
varying vec2 vUv;
precision highp float;
precision highp int;
uniform sampler2D texture;
uniform float mask_position;
uniform float fade_size;
void main(void) {
float mask_starting_point = (0.0 - fade_size);
float mask_ending_point = (1.0 - fade_size);
vec4 orig_color = texture2D(texture, vUv);
vec4 color = texture2D(texture, vUv);
float mask_p = smoothstep(mask_starting_point, mask_ending_point, mask_position);
//color.a *= (distance(vUv.x, split_center_point));
vec2 p = vUv;
if (p.x > (mask_p)){
color.a = 0.0;
}else{
color.a *= (smoothstep(mask_position, (mask_position - fade_size), p.x ));
}
gl_FragColor = color;
}
the fade in does not completely reveal the entire image as you can see. Any insight on a better way to tackle this would be great. Thanks!

What you want to do is to make the area at the left of mask_position visible, but you want to hide the are at the right. this can be achieved by step:
color.a *= 1.0 - step(fade_size, p);
If you want a smooth transition from the visible to the invisible area, then you have to use smoothstep. The start of the fading is a certain amount before mask_position and the end a certain amount after mask_position:
float start_p = mask_position-fade_size;
float end_p = mask_position+fade_size;
color.a *= 1.0 - smoothstep(start_p, end_p, vUv.x);
This will cause that the start and the end of the image will never fade in completely. TO compensate this the [vUV.x] has to be mapped from the range [0.0, 1.0] to the range [fade_size, 1.0-fade_size]. This can be calculated by mix with ease:
float p = vUv.x * (1.0-2.0*fade_size) + fade_size;
color.a *= 1.0 - smoothstep(start_p, end_p, p;
If the alpha channel of the final culler is below a tiny threshold, the fragment can be discarded:
if ( color.a < 0.01 )
discard;
Final shader:
varying vec2 vUv;
precision highp float;
precision highp int;
uniform sampler2D texture;
uniform float mask_position;
uniform float fade_size;
void main(void) {
vec4 color = texture2D(texture, vUv);
float start_p = mask_position-fade_size;
float end_p = mask_position+fade_size;
float p = mix(fade_size, 1.0-fade_size, vUv.x);
color.a *= 1.0 - smoothstep(start_p, end_p, p);
if ( color.a < 0.01 )
discard;
gl_FragColor = color;
}
See the example:
var container;
var camera, scene, renderer;
var uniforms;
init();
animate();
function init() {
container = document.getElementById( 'container' );
camera = new THREE.Camera();
camera.position.z = 1;
scene = new THREE.Scene();
var geometry = new THREE.PlaneBufferGeometry( 2, 2 );
var texture = new THREE.TextureLoader().load( 'https://raw.githubusercontent.com/Rabbid76/graphics-snippets/master/resource/texture/background.jpg' );
uniforms = {
u_time: { type: "f", value: 1.0 },
u_resolution: { type: "v2", value: new THREE.Vector2() },
u_mouse: { type: "v2", value: new THREE.Vector2() },
texture: {type: 't', value: texture},
fade_size: { type: 'f', value: 0.2 },
mask_position: { type: 'f', value: 0 }
};
var material = new THREE.ShaderMaterial( {
uniforms: uniforms,
vertexShader: document.getElementById( 'vertexShader' ).textContent,
fragmentShader: document.getElementById( 'fragmentShader' ).textContent
} );
var mesh = new THREE.Mesh( geometry, material );
scene.add( mesh );
renderer = new THREE.WebGLRenderer( {alpha : true} );
renderer.setClearColor(0xffffff, 0.0);
renderer.setPixelRatio( window.devicePixelRatio );
container.appendChild( renderer.domElement );
onWindowResize();
window.addEventListener( 'resize', onWindowResize, false );
document.onmousemove = function(e){
uniforms.u_mouse.value.x = e.pageX
uniforms.u_mouse.value.y = e.pageY
}
}
function onWindowResize( event ) {
renderer.setSize( window.innerWidth, window.innerHeight );
uniforms.u_resolution.value.x = renderer.domElement.width;
uniforms.u_resolution.value.y = renderer.domElement.height;
}
function animate() {
requestAnimationFrame( animate );
render();
}
var mask_step = 0.01;
var mask_val = 0.0;
function render() {
if ( mask_val >= 1.0) { mask_val = 1.0; mask_step = -0.01; }
else if ( mask_val <= -0.0) { mask_val = 0.0; mask_step = 0.01; }
mask_val += mask_step;
uniforms.mask_position.value = mask_val;
uniforms.u_time.value += 0.05;
renderer.render( scene, camera );
}
<script id="vertexShader" type="x-shader/x-vertex">
void main() {
gl_Position = vec4( position, 1.0 );
}
</script>
<script id="fragmentShader" type="x-shader/x-fragment">
uniform vec2 u_resolution;
uniform float u_time;
uniform sampler2D texture;
uniform float fade_size;
uniform float mask_position;
void main() {
vec2 vUv = gl_FragCoord.xy/u_resolution.xy;
vec4 color = texture2D(texture, vUv);
float start_p = mask_position-fade_size;
float end_p = mask_position+fade_size;
float p = mix(fade_size, 1.0-fade_size, vUv.x);
color.rgba *= 1.0 - smoothstep(start_p, end_p, p);
if ( color.a < 0.01 )
discard;
gl_FragColor = color;
}
</script>
<div id="container"></div>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/100/three.min.js"></script>

Related

Blobs shader GLSL

I want to create a similar background with a shader to these images:
:
These are just blurred blobs with colors, distributed across the whole page:
Here's my current progress: https://codesandbox.io/s/lucid-bas-wvlzl9?file=/src/components/Background/Background.tsx
Vertex shader:
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
Fragment shader:
precision highp float;
uniform float uTime;
uniform float uAmplitude;
uniform float uFrequency;
varying vec2 vUv;
uniform vec2 uResolution;
vec4 Sphere(vec2 position, float radius)
{
// float dist = radius / distance(vUv, position);
// float strength = 0.01 / distance(vUv, position);
float strength = 0.1 / distance(vec2(vUv.x, (vUv.y - 0.5) * 8. + 0.5), vec2(0.));
return vec4(strength * strength);
}
void main()
{
vec2 uv = vUv;
vec4 pixel = vec4(0.0, 0.0, 0.0, 0.0);
vec2 positions[4];
positions[0] = vec2(.5, .5);
// positions[1] = vec2(sin(uTime * 3.0) * 0.5, (cos(uTime * 1.3) * 0.6) + vUv.y);
// positions[2] = vec2(sin(uTime * 2.1) * 0.1, (cos(uTime * 1.9) * 0.8) + vUv.y);
// positions[3] = vec2(sin(uTime * 1.1) * 1.1, (cos(uTime * 2.6) * 0.7) + vUv.y);
for (int i = 0; i < 2; i++)
pixel += Sphere(positions[i], 0.22);
pixel = pixel * pixel;
gl_FragColor = pixel;
}
For each blob, you can multiply it's color by a a noise function and then a 2D gaussian curve centered in a random point. Then add all the blobs together. I only added the ones of the adjacent cells to make it scrollable and the numbers in the for loops might be increased for bigger blobs.
here is my code :
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
const float blobSize = 0.125;
const float cellSize = .75;
const float noiseScale = .375;
const float background = .125;
const float blobsLuminosity = .75;
const float blobsSaturation = .5;
vec2 random2(vec2 st){
st = vec2( dot(st,vec2(127.1,311.7)),
dot(st,vec2(269.5,183.3)) );
return -1.0 + 2.0*fract(sin(st)*43758.5453123);
}
// Gradient Noise by Inigo Quilez - iq/2013
// https://www.shadertoy.com/view/XdXGW8
float noise(vec2 st) {
vec2 i = floor(st);
vec2 f = fract(st);
vec2 u = f*f*(3.0-2.0*f);
return mix( mix( dot( random2(i + vec2(0.0,0.0) ), f - vec2(0.0,0.0) ),
dot( random2(i + vec2(1.0,0.0) ), f - vec2(1.0,0.0) ), u.x),
mix( dot( random2(i + vec2(0.0,1.0) ), f - vec2(0.0,1.0) ),
dot( random2(i + vec2(1.0,1.0) ), f - vec2(1.0,1.0) ), u.x), u.y)*.5+.5;
}
float gaussFunction(vec2 st, vec2 p, float r) {
return exp(-dot(st-p, st-p)/2./r/r);
}
// Function from IƱigo Quiles
// https://www.shadertoy.com/view/MsS3Wc
vec3 hsb2rgb( in vec3 c ){
vec3 rgb = clamp(abs(mod(c.x*6.0+vec3(0.0,4.0,2.0),
6.0)-3.0)-1.0,
0.0,
1.0 );
rgb = rgb*rgb*(3.0-2.0*rgb);
return c.z * mix( vec3(1.0), rgb, c.y);
}
vec3 hash32(vec2 p)
{
vec3 p3 = fract(vec3(p.xyx) * vec3(.1031, .1030, .0973));
p3 += dot(p3, p3.yxz+33.33);
return fract((p3.xxy+p3.yzz)*p3.zyx);
}
vec3 blobs(vec2 st){
vec2 i = floor(st/cellSize);
vec3 c = vec3(0.);
for(int x = -1; x <= 1; x++)
for(int y = -1; y <= 1; y++){
vec3 h = hash32(i+vec2(x, y));
c += hsb2rgb(vec3(h.z, blobsSaturation, blobsLuminosity)) * gaussFunction(st/cellSize, i + vec2(x, y) + h.xy, blobSize) * smoothstep(0., 1., noise(noiseScale*st/cellSize / blobSize));
//c += hsb2rgb(vec3(h.z, blobsSaturation, blobsLuminosity)) * gaussFunction(st/cellSize, i + vec2(x, y) + h.xy, blobSize) * noise(noiseScale*st/cellSize / blobSize);
}
return c + vec3(background);
}
float map(float x, float a, float b, float c, float d){
return (x-a)/(b-a)*(d-c)+c;
}
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
st.x *= u_resolution.x/u_resolution.y;
vec3 color = vec3(0.0);
color = vec3(blobs(st - u_mouse/u_resolution.xy*4.));
gl_FragColor = vec4(color,1.0);
}
made in this shader editor.

Fisheye Skybox Shader

I'm trying to write a panaorama viewer. Mostly this involves mapping an image to a quad to simulate a skybox.
For a cubemap image it's pretty trival. Either copy the 6 parts of the image to a 6 planes of a cube map or else make a shader that does the cubemap math as specified in the OpenGL ES spec
For an equirectangular image like from a Ricoh Theta
you can use this math
// convert from direction (n) to texcoord (uv)
float latitude = acos(n.y);
float longitude = atan(n.z, n.x);
vec2 sphereCoords = vec2(longitude, latitude) * vec2(0.5 / PI, 1.0 / PI);
vec2 uv = fract(vec2(0.5,1.0) - sphereCoords);
const m4 = twgl.m4;
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
varying vec4 v_position;
void main() {
v_position = position;
gl_Position = position;
gl_Position.z = 1.0;
}
`;
const fs = `
precision highp float;
uniform sampler2D u_skybox;
uniform mat4 u_viewDirectionProjectionInverse;
varying vec4 v_position;
#define PI radians(180.0)
void main() {
vec4 t = u_viewDirectionProjectionInverse * v_position;
vec3 n = normalize(t.xyz / t.w);
// convert from direction (n) to texcoord (uv)
float latitude = acos(n.y);
float longitude = atan(n.z, n.x);
vec2 sphereCoords = vec2(longitude, latitude) * vec2(0.5 / PI, 1.0 / PI);
vec2 uv = fract(vec2(0.5,1.0) - sphereCoords);
// multiply u by 2 because we only have a 180degree view
gl_FragColor = texture2D(u_skybox, uv * vec2(-2, 1));
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const tex = twgl.createTexture(gl, {
src: 'https://i.imgur.com/ChIfXM0.jpg',
flipY: true,
});
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl, 2);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const projectionMatrix = m4.perspective(45 * Math.PI / 180, aspect, 1, 20);
const cameraMatrix = m4.rotationY(time * 0.1);
m4.rotateX(cameraMatrix, Math.sin(time * 0.3) * 0.5, cameraMatrix);
const viewMatrix = m4.inverse(cameraMatrix);
viewMatrix[12] = 0;
viewMatrix[13] = 0;
viewMatrix[14] = 0;
const viewDirectionProjectionMatrix = m4.multiply(projectionMatrix, viewMatrix);
const viewDirectionProjectionInverseMatrix = m4.inverse(viewDirectionProjectionMatrix);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_viewDirectionProjectionInverse: viewDirectionProjectionInverseMatrix,
u_skyBox: tex,
});
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
But some images are not equirectangular, they're fisheye?
I've been trying to figure out the math needed to do the same thing (map this to a quad based skybox) but I've been having no luck
As a reference I found this page with conversions from 3d to fisheye coords. It says
// convert from direction (n) to texcoord (uv)
float r = 2.0 * atan(length(n.xy), n.z) / PI;
float theta = atan(n.y, n.x);
vec2 uv = vec2(cos(theta), sin(theta)) * r * 0.5 + 0.5;
const m4 = twgl.m4;
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
varying vec4 v_position;
void main() {
v_position = position;
gl_Position = position;
gl_Position.z = 1.0;
}
`;
const fs = `
precision highp float;
uniform sampler2D u_skybox;
uniform mat4 u_viewDirectionProjectionInverse;
varying vec4 v_position;
#define PI radians(180.0)
void main() {
vec4 t = u_viewDirectionProjectionInverse * v_position;
vec3 n = normalize(t.xyz / t.w);
// convert from direction (n) to texcoord (uv)
float r = 2.0 * atan(length(n.xy), n.z) / PI;
float theta = atan(n.y, n.x);
vec2 uv = vec2(cos(theta), sin(theta)) * r * 0.5 + 0.5;
#if 0
// Calculate fisheye angle and radius
float theta = atan(n.z, n.x);
float phi = atan(length(n.xz), n.y);
float r = phi / PI;
// Pixel in fisheye space
vec2 uv = vec2(0.5) + r * vec2(cos(theta), sin(theta));
#endif
// multiply u by 2 because we only have a 180degree view
gl_FragColor = texture2D(u_skybox, uv * vec2(-2, 1));
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const tex = twgl.createTexture(gl, {
src: 'https://i.imgur.com/dzXCQwM.jpg',
flipY: true,
});
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl, 2);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const projectionMatrix = m4.perspective(45 * Math.PI / 180, aspect, 1, 20);
const cameraMatrix = m4.rotationY(time * 0.1);
m4.rotateX(cameraMatrix, 0.7 + Math.sin(time * 0.3) * .7, cameraMatrix);
const viewMatrix = m4.inverse(cameraMatrix);
viewMatrix[12] = 0;
viewMatrix[13] = 0;
viewMatrix[14] = 0;
const viewDirectionProjectionMatrix = m4.multiply(projectionMatrix, viewMatrix);
const viewDirectionProjectionInverseMatrix = m4.inverse(viewDirectionProjectionMatrix);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_viewDirectionProjectionInverse: viewDirectionProjectionInverseMatrix,
u_skyBox: tex,
});
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
Also this
Clearly I'm missing something.
I believe the error lies in this logic:
// multiply u by 2 because we only have a 180degree view
gl_FragColor = texture2D(u_skybox, uv * vec2(-2, 1));
Although this works in the equirectangular case because the math works out such that the z-component only affects longitude, it is no longer valid in the fisheye case because n.z affects both axes.
You can account for the negative z-component in the formula by taking the absolute value of n.z and flipping n.x when the z is negative:
// convert from direction (n) to texcoord (uv)
float r = 2.0 * atan(length(n.xy), abs(n.z)) / PI;
float theta = atan(n.y, n.x * sign(n.z));
vec2 uv = vec2(cos(theta), sin(theta)) * r * 0.5 + vec2(0.5);
Here it is in action:
const m4 = twgl.m4;
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
varying vec4 v_position;
void main() {
v_position = position;
gl_Position = position;
gl_Position.z = 1.0;
}
`;
const fs = `
precision highp float;
uniform sampler2D u_skybox;
uniform mat4 u_viewDirectionProjectionInverse;
varying vec4 v_position;
#define PI radians(180.0)
void main() {
vec4 t = u_viewDirectionProjectionInverse * v_position;
vec3 n = normalize(t.xyz / t.w);
// convert from direction (n) to texcoord (uv)
float r = 2.0 * atan(length(n.xy), abs(n.z)) / PI;
float theta = atan(n.y, n.x * sign(n.z));
vec2 uv = vec2(cos(theta), sin(theta)) * r * 0.5 + vec2(0.5);
gl_FragColor = texture2D(u_skybox, uv * vec2(-1.0, 1.0));
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const tex = twgl.createTexture(gl, {
src: 'https://i.imgur.com/dzXCQwM.jpg',
flipY: true,
});
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl, 2);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const projectionMatrix = m4.perspective(45 * Math.PI / 180, aspect, 1, 20);
const cameraMatrix = m4.rotationY(time * 0.1);
m4.rotateX(cameraMatrix, 0.7 + Math.sin(time * 0.3) * .7, cameraMatrix);
const viewMatrix = m4.inverse(cameraMatrix);
viewMatrix[12] = 0;
viewMatrix[13] = 0;
viewMatrix[14] = 0;
const viewDirectionProjectionMatrix = m4.multiply(projectionMatrix, viewMatrix);
const viewDirectionProjectionInverseMatrix = m4.inverse(viewDirectionProjectionMatrix);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_viewDirectionProjectionInverse: viewDirectionProjectionInverseMatrix,
u_skyBox: tex,
});
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>

Screen Space Reflections Artifacts

When I implemented SSR I encountered the problem of artifacts. Below I present the code and screenshots.
Fragment SSR shader:
#version 330 core
uniform sampler2D normalMap; // in view space
uniform sampler2D colorMap;
uniform sampler2D reflectionStrengthMap;
uniform sampler2D positionMap; // in view space
uniform mat4 projection;
uniform vec3 skyColor = vec3(0.1, 0, 0.5);
in vec2 texCoord;
layout (location = 0) out vec4 fragColor;
const int binarySearchCount = 10;
const int rayMarchCount = 30;
const float step = 0.05;
const float LLimiter = 0.2;
const float minRayStep = 0.2;
vec3 getPosition(in vec2 texCoord) {
return texture(positionMap, texCoord).xyz;
}
vec2 binarySearch(inout vec3 dir, inout vec3 hitCoord, inout float dDepth) {
float depth;
vec4 projectedCoord;
for(int i = 0; i < binarySearchCount; i++) {
projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
depth = getPosition(projectedCoord.xy).z;
dDepth = hitCoord.z - depth;
dir *= 0.5;
if(dDepth > 0.0)
hitCoord += dir;
else
hitCoord -= dir;
}
projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
return vec2(projectedCoord.xy);
}
vec2 rayCast(vec3 dir, inout vec3 hitCoord, out float dDepth) {
dir *= step;
for (int i = 0; i < rayMarchCount; i++) {
hitCoord += dir;
vec4 projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
float depth = getPosition(projectedCoord.xy).z;
dDepth = hitCoord.z - depth;
if((dir.z - dDepth) < 1.2 && dDepth <= 0.0) return binarySearch(dir, hitCoord, dDepth);
}
return vec2(-1.0);
}
void main() {
float reflectionStrength = texture(reflectionStrengthMap, texCoord).r;
if (reflectionStrength == 0) {
fragColor = texture(colorMap, texCoord);
return;
}
vec3 normal = texture(normalMap, texCoord).xyz;
vec3 viewPos = getPosition(texCoord);
// Reflection vector
vec3 reflected = normalize(reflect(normalize(viewPos), normalize(normal)));
// Ray cast
vec3 hitPos = viewPos;
float dDepth;
vec2 coords = rayCast(reflected * max(-viewPos.z, minRayStep), hitPos, dDepth);
float L = length(getPosition(coords) - viewPos);
L = clamp(L * LLimiter, 0, 1);
float error = 1 - L;
vec3 color = texture(colorMap, coords.xy).rgb * error;
if (coords.xy != vec2(-1.0)) {
fragColor = mix(texture(colorMap, texCoord), vec4(color, 1.0), reflectionStrength);
return;
}
fragColor = mix(texture(colorMap, texCoord), vec4(skyColor, 1.0), reflectionStrength);
}
Result without blackout (without * error):
Result with blackout:
Note: blue is filled specifically to see artifacts
And one more question, what is the best way to add fresnel without harming scene?

Recolor sprites on the fly

I need to replace colors of the sprite.
Some example founded in google
Here is I've found a looks like working solution for Unity - [How to Use a Shader to Dynamically Swap a Sprite's Colors][2]
How to port it to cocos2d-x? Can someone please help with code examples?
I'm looking for cocos2d-x v3 code snippet. Really looking forward for some help.
The algorithm in the article How to Use a Shader to Dynamically Swap a Sprite's Colors is very simple. It is based on a one dimensional lookup table with 256 entries. This allows the algorithm to map only 256 different colors.
In detail, the new colors (the colors used to replace) are stored in a one dimensional texture with 256 entries. When a color is read from the original texture a key is used to find the new color in the one dimensional swap texture. The key which is used is the red color channel of the original color, this means that all different colors in the original text must also have different red color values. This is another restriction.
The original document (How to Use a Shader to Dynamically Swap a Sprite's Colors) says:
Note that this may not work as expected if two or more colors on the sprite texture share the same red value! When using this method, it's important to keep the red values of the colors in the sprite texture different.
Further the algorithm mix the original color and the swap color by the alpha channel of the swap color. That causes that the swap color is drawn if the swap color is completely opaque and the original color is drawn if the swap color is completely transparent, in between will be linearly interpolated.
A GLSL function with this algorithm is very short and looks somehow like this:
uniform sampler2D u_spriteTexture; // sprite texture
uniform sampler1D u_swapTexture; // lookup texture with swap colors
vec4 SwapColor( vec2 textureCoord )
{
vec4 originalColor = texture( u_spriteTexture, textureCoord.st );
vec4 swapColor = texture( u_swapTexture, originalColor.r );
vec3 finalColor = mix( originalColor.rgb, swapColor.rgb, swapColor.a );
return vec4( finalColor.rgb, originalColor.a );
}
Suggested Algorithm
Reading the suggested shader from the question, I came up to the following solution. The shader is using an algorithm to convert from RGB to hue, saturation, and value and back. I took this idea and introduced my own thoughts.
Performant conversion functions between RGB and HSV can be found at RGB to HSV/HSL/HCY/HCL in HLSL, which can easily translated from HLSL to GLSL:
RGB to HSV
const float Epsilon = 1e-10;
vec3 RGBtoHCV( in vec3 RGB )
{
vec4 P = (RGB.g < RGB.b) ? vec4(RGB.bg, -1.0, 2.0/3.0) : vec4(RGB.gb, 0.0, -1.0/3.0);
vec4 Q = (RGB.r < P.x) ? vec4(P.xyw, RGB.r) : vec4(RGB.r, P.yzx);
float C = Q.x - min(Q.w, Q.y);
float H = abs((Q.w - Q.y) / (6.0 * C + Epsilon) + Q.z);
return vec3(H, C, Q.x);
}
vec3 RGBtoHSV(in vec3 RGB)
{
vec3 HCV = RGBtoHCV(RGB);
float S = HCV.y / (HCV.z + Epsilon);
return vec3(HCV.x, S, HCV.z);
}
HSV to RGB
vec3 HUEtoRGB(in float H)
{
float R = abs(H * 6.0 - 3.0) - 1.0;
float G = 2.0 - abs(H * 6.0 - 2.0);
float B = 2.0 - abs(H * 6.0 - 4.0);
return clamp( vec3(R,G,B), 0.0, 1.0 );
}
vec3 HSVtoRGB(in vec3 HSV)
{
vec3 RGB = HUEtoRGB(HSV.x);
return ((RGB - 1.0) * HSV.y + 1.0) * HSV.z;
}
As in the first algorithm of this answer, again a one dimensional lookup table is of need. But the length of the look up table has not to be exactly 256, it is completely user dependent. The key is not the red channel, it is the hue value which is a clear expression of the color and can easily be calculated as seen in RGBtoHSV and RGBtoHSV. The look-up table however has to, contain a color assignment distributed linearly over the * hue * range from 0 to 1 of the original color.
The algorithm can be defined with the following steps:
Convert the original color to the original hue, saturation, and value
Use the original hue as key to find the swap color in the look up table
Convert the swap color to the swap hue, saturation, and value
Convert the hue of the swap color and the original saturation, and value to a new RGB color
Mix the original color and the new color by the alpha channel of the swap color
With this algorithm any RGB color can be swapped, by keeping the saturation and value of the original color. See the following short and clear GLSL function:
uniform sampler2D u_spriteTexture; // sprite texture
uniform sampler1D u_swapTexture; // lookup texture with swap colors
// the texture coordinate is the hue of the original color
vec4 SwapColor( vec2 textureCoord )
{
vec4 originalColor = texture( u_spriteTexture, textureCoord.st );
vec3 originalHSV = RGBtoHSV( originalColor.rgb );
vec4 lookUpColor = texture( u_swapTexture, originalHSV.x );
vec3 swapHSV = RGBtoHSV( lookUpColor.rgb );
vec3 swapColor = HSVtoRGB( vec3( swapHSV.x, originalHSV.y, originalHSV.z ) );
vec3 finalColor = mix( originalColor.rgb, swapColor.rgb, lookUpColor.a );
return vec4( finalColor.rgb, originalColor.a );
}
Apply to cocos2d-x v3.15
To apply the shader to cocos2d-x v3.15 I adapted the HelloWorldScene.h and HelloWorldScene.cpp in the project cpp-empty-test of the cocos2d-x v3.15 test projects.
The shader can be applied to any sprite a can swap up to 10 color tints, but this can easily be expanded. Note, the shader does not only change a single color, it searches all colors which are similar to a color, even the colors with a completely different saturation or brightness. Each color is swapped with a color, that has a equal saturation and brightness, but a new base color.
The information which swaps the colors, is stored in an array of vec3. The x component contains the hue of the original color, the y component contains the hue of the swap color, and the z component contains an epsilon value, which defines the color range.
The shader source files should be placed in the "resource/shader" subdirectory of the project directory.
Vertex shader shader/colorswap.vert
attribute vec4 a_position;
attribute vec2 a_texCoord;
attribute vec4 a_color;
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
void main()
{
gl_Position = CC_PMatrix * a_position;
cc_FragColor = a_color;
cc_FragTexCoord1 = a_texCoord;
}
Fragment shader shader/colorswap.frag
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
const float Epsilon = 1e-10;
vec3 RGBtoHCV( in vec3 RGB )
{
vec4 P = (RGB.g < RGB.b) ? vec4(RGB.bg, -1.0, 2.0/3.0) : vec4(RGB.gb, 0.0, -1.0/3.0);
vec4 Q = (RGB.r < P.x) ? vec4(P.xyw, RGB.r) : vec4(RGB.r, P.yzx);
float C = Q.x - min(Q.w, Q.y);
float H = abs((Q.w - Q.y) / (6.0 * C + Epsilon) + Q.z);
return vec3(H, C, Q.x);
}
vec3 RGBtoHSV(in vec3 RGB)
{
vec3 HCV = RGBtoHCV(RGB);
float S = HCV.y / (HCV.z + Epsilon);
return vec3(HCV.x, S, HCV.z);
}
vec3 HUEtoRGB(in float H)
{
float R = abs(H * 6.0 - 3.0) - 1.0;
float G = 2.0 - abs(H * 6.0 - 2.0);
float B = 2.0 - abs(H * 6.0 - 4.0);
return clamp( vec3(R,G,B), 0.0, 1.0 );
}
vec3 HSVtoRGB(in vec3 HSV)
{
vec3 RGB = HUEtoRGB(HSV.x);
return ((RGB - 1.0) * HSV.y + 1.0) * HSV.z;
}
#define MAX_SWAP 10
uniform vec3 u_swap[MAX_SWAP];
uniform int u_noSwap;
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
vec3 originalHSV = RGBtoHSV( originalColor.rgb );
vec4 swapColor = vec4( originalColor.rgb, 1.0 );
for ( int i = 0; i < 10 ; ++ i )
{
if ( i >= u_noSwap )
break;
if ( abs( originalHSV.x - u_swap[i].x ) < u_swap[i].z )
{
swapColor.rgb = HSVtoRGB( vec3( u_swap[i].y, originalHSV.y, originalHSV.z ) );
break;
}
}
vec3 finalColor = mix( originalColor.rgb, swapColor.rgb, swapColor.a );
gl_FragColor = vec4( finalColor.rgb, originalColor.a );
}
Header file HelloWorldScene.h:
#ifndef __HELLOWORLD_SCENE_H__
#define __HELLOWORLD_SCENE_H__
#include "cocos2d.h"
#define MAX_COLOR 10
class HelloWorld : public cocos2d::Scene
{
public:
virtual bool init() override;
static cocos2d::Scene* scene();
void menuCloseCallback(Ref* sender);
CREATE_FUNC(HelloWorld);
void InitSwapInfo( int i, const cocos2d::Color3B &sourceCol, const cocos2d::Color3B &swapCol, float deviation );
private:
cocos2d::GLProgram* mProgramExample;
cocos2d::Vec3 mSource[MAX_COLOR];
cocos2d::Vec3 mSwap[MAX_COLOR];
float mDeviation[MAX_COLOR];
cocos2d::Vec3 mSwapInfo[MAX_COLOR];
};
#endif // __HELLOWORLD_SCENE_H__
Source file HelloWorldScene.cpp:
Note, the C++ function RGBtoHue and the GLSL function RGBtoHue, should implement the exactly same algorithm.
The input to the function SwapInfo are RGB colors encoded to cocos2d::Vec3. If the source channels of the RGB colors are bytes (unsigned char), then this can easily converted to cocos2d::Vec3 by cocos2d::Vec3( R / 255.0f, G / 255.0f, B / 255.0f ).
#include "HelloWorldScene.h"
#include "AppMacros.h"
USING_NS_CC;
float RGBtoHue( const cocos2d::Vec3 &RGB )
{
const float Epsilon = 1e-10f;
cocos2d::Vec4 P = (RGB.y < RGB.z) ?
cocos2d::Vec4(RGB.y, RGB.z, -1.0f, 2.0f/3.0f) :
cocos2d::Vec4(RGB.y, RGB.z, 0.0f, -1.0f/3.0f);
cocos2d::Vec4 Q = (RGB.x < P.x) ?
cocos2d::Vec4(P.x, P.y, P.w, RGB.x) :
cocos2d::Vec4(RGB.x, P.y, P.z, P.x);
float C = Q.x - (Q.w < Q.y ? Q.w : Q.y);
float H = fabs((Q.w - Q.y) / (6.0f * C + Epsilon) + Q.z);
return H;
}
cocos2d::Vec3 SwapInfo( const cocos2d::Vec3 &sourceCol, const cocos2d::Vec3 &swapCol, float epsi )
{
return cocos2d::Vec3( RGBtoHue( sourceCol ), RGBtoHue( swapCol ), epsi );
}
void HelloWorld::InitSwapInfo( int i, const cocos2d::Color3B &sourceCol, const cocos2d::Color3B &swapCol, float deviation )
{
mSource[i] = cocos2d::Vec3( sourceCol.r/255.0, sourceCol.g/255.0, sourceCol.b/255.0 );
mSwap[i] = cocos2d::Vec3( swapCol.r/255.0, swapCol.g/255.0, swapCol.b/255.0 );
mDeviation[i] = deviation;
mSwapInfo[i] = SwapInfo( mSource[i], mSwap[i], mDeviation[i] );
}
Scene* HelloWorld::scene()
{
return HelloWorld::create();
}
bool HelloWorld::init()
{
if ( !Scene::init() ) return false;
auto visibleSize = Director::getInstance()->getVisibleSize();
auto origin = Director::getInstance()->getVisibleOrigin();
auto closeItem = MenuItemImage::create(
"CloseNormal.png",
"CloseSelected.png",
CC_CALLBACK_1(HelloWorld::menuCloseCallback,this));
closeItem->setPosition(origin + Vec2(visibleSize) - Vec2(closeItem->getContentSize() / 2));
auto menu = Menu::create(closeItem, nullptr);
menu->setPosition(Vec2::ZERO);
this->addChild(menu, 1);
auto sprite = Sprite::create("HelloWorld.png");
sprite->setPosition(Vec2(visibleSize / 2) + origin);
mProgramExample = new GLProgram();
mProgramExample->initWithFilenames("shader/colorswap.vert", "shader/colorswap.frag");
mProgramExample->bindAttribLocation(GLProgram::ATTRIBUTE_NAME_POSITION, GLProgram::VERTEX_ATTRIB_POSITION);
mProgramExample->bindAttribLocation(GLProgram::ATTRIBUTE_NAME_COLOR, GLProgram::VERTEX_ATTRIB_COLOR);
mProgramExample->bindAttribLocation(GLProgram::ATTRIBUTE_NAME_TEX_COORD, GLProgram::VERTEX_ATTRIB_TEX_COORDS);
mProgramExample->link();
mProgramExample->updateUniforms();
mProgramExample->use();
GLProgramState* state = GLProgramState::getOrCreateWithGLProgram(mProgramExample);
sprite->setGLProgram(mProgramExample);
sprite->setGLProgramState(state);
InitSwapInfo( 0, cocos2d::Color3B( 41, 201, 226 ), cocos2d::Color3B( 255, 0, 0 ), 0.1f );
InitSwapInfo( 1, cocos2d::Color3B( 249, 6, 6 ), cocos2d::Color3B( 255, 255, 0 ), 0.1f );
int noOfColors = 2;
state->setUniformVec3v("u_swap", noOfColors, mSwapInfo);
state->setUniformInt("u_noSwap", noOfColors);
this->addChild(sprite);
return true;
}
void HelloWorld::menuCloseCallback(Ref* sender)
{
Director::getInstance()->end();
#if (CC_TARGET_PLATFORM == CC_PLATFORM_IOS)
exit(0);
#endif
}
Compare RGB values instead of Hue
A fragment shader which directly compares RGB colors would look like this:
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
const float Epsilon = 1e-10;
vec3 RGBtoHCV( in vec3 RGB )
{
vec4 P = (RGB.g < RGB.b) ? vec4(RGB.bg, -1.0, 2.0/3.0) : vec4(RGB.gb, 0.0, -1.0/3.0);
vec4 Q = (RGB.r < P.x) ? vec4(P.xyw, RGB.r) : vec4(RGB.r, P.yzx);
float C = Q.x - min(Q.w, Q.y);
float H = abs((Q.w - Q.y) / (6.0 * C + Epsilon) + Q.z);
return vec3(H, C, Q.x);
}
vec3 RGBtoHSV(in vec3 RGB)
{
vec3 HCV = RGBtoHCV(RGB);
float S = HCV.y / (HCV.z + Epsilon);
return vec3(HCV.x, S, HCV.z);
}
vec3 HUEtoRGB(in float H)
{
float R = abs(H * 6.0 - 3.0) - 1.0;
float G = 2.0 - abs(H * 6.0 - 2.0);
float B = 2.0 - abs(H * 6.0 - 4.0);
return clamp( vec3(R,G,B), 0.0, 1.0 );
}
vec3 HSVtoRGB(in vec3 HSV)
{
vec3 RGB = HUEtoRGB(HSV.x);
return ((RGB - 1.0) * HSV.y + 1.0) * HSV.z;
}
#define MAX_SWAP 10
uniform vec3 u_orig[MAX_SWAP];
uniform vec3 u_swap[MAX_SWAP];
uniform float u_deviation[MAX_SWAP];
uniform int u_noSwap;
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
vec3 originalHSV = RGBtoHSV( originalColor.rgb );
vec4 swapColor = vec4( originalColor.rgb, 1.0 );
for ( int i = 0; i < 10 ; ++ i )
{
if ( i >= u_noSwap )
break;
if ( all( lessThanEqual( abs(originalColor.rgb - u_orig[i]), vec3(u_deviation[i]) ) ) )
{
vec3 swapHSV = RGBtoHSV( u_swap[i].rgb );
swapColor.rgb = HSVtoRGB( vec3( swapHSV.x, originalHSV.y, originalHSV.z ) );
break;
}
}
vec3 finalColor = mix( originalColor.rgb, swapColor.rgb, swapColor.a );
gl_FragColor = vec4( finalColor.rgb, originalColor.a );
}
Note, the initialization of the uniforms has to be adapt:
int noOfColors = 2;
state->setUniformVec3v("u_orig", noOfColors, mSource);
state->setUniformVec3v("u_swap", noOfColors, mSwap);
state->setUniformFloatv("u_deviation", noOfColors, mDeviation);
state->setUniformInt("u_noSwap", noOfColors);
Extension to the answer
If exactly specified colors should be exchanged, the shader can be much more simplified. For this, the deviations u_deviation have to be restricted (e.g deviation = 0.02;).
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
#define MAX_SWAP 11
uniform vec3 u_orig[MAX_SWAP];
uniform vec3 u_swap[MAX_SWAP];
uniform float u_deviation[MAX_SWAP];
uniform int u_noSwap;
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
vec4 swapColor = vec4( originalColor.rgb, 1.0 );
for ( int i = 0; i < MAX_SWAP ; ++ i )
{
vec3 deltaCol = abs( originalColor.rgb - u_orig[i] );
float hit = step( deltaCol.x + deltaCol.y + deltaCol.z, u_deviation[i] * 3.0 );
swapColor.rgb = mix( swapColor.rgb, u_swap[i].rgb, hit );
}
gl_FragColor = vec4( swapColor.rgb, originalColor.a );
}
If each color in the source texture has an individual color channel (this means the color value is only use for this special color, e.g. red color channel), then the shader code can be further simplified, because only the one channel has to be compared:
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
vec4 swapColor = vec4( originalColor.rgb, 1.0 );
for ( int i = 0; i < MAX_SWAP ; ++ i )
{
float hit = step( abs( originalColor.r - u_orig[i].r ), u_deviation[i] );
swapColor.rgb = mix( swapColor.rgb, u_swap[i].rgb, hit );
}
gl_FragColor = vec4( swapColor.rgb, originalColor.a );
}
A further optimization would bring us back to the first algorithm, which was described in this answer. The big advantage of this algorithm would be, that each color is swapped (except the alpha channel of the swap texture is 0), but no expensive searching in the look up table has to be done in the shader.
Each color will be swapped by the corresponding color according to its red color channel. As mentioned, if a color should not be swapped, the alpha channel of the swap texture has to be set to 0.
A new member mSwapTexture has to be add to the class:
cocos2d::Texture2D* mSwapTexture;
The texture can be easily created, and the uniform texture sampler can be set like this:
#include <array>
.....
std::array< unsigned char, 256 * 4 > swapPlane{ 0 };
for ( int c = 0; c < noOfColors; ++ c )
{
size_t i = (size_t)( mSource[c].x * 255.0 ) * 4;
swapPlane[i+0] = (unsigned char)(mSwap[c].x*255.0);
swapPlane[i+1] = (unsigned char)(mSwap[c].y*255.0);
swapPlane[i+2] = (unsigned char)(mSwap[c].z*255.0);
swapPlane[i+3] = 255;
}
mSwapTexture = new Texture2D();
mSwapTexture->setAliasTexParameters();
cocos2d::Size contentSize;
mSwapTexture->initWithData( swapPlane.data(), swapPlane.size(), Texture2D::PixelFormat::RGBA8888, 256, 1, contentSize );
state->setUniformTexture( "u_swapTexture", mSwapTexture );
The fragment shader would look like this:
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
uniform sampler2D u_swapTexture; // lookup texture with 256 swap colors
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
vec4 swapColor = texture2D(u_swapTexture, vec2(originalColor.r, 0.0));
vec3 finalColor = mix(originalColor.rgb, swapColor.rgb, swapColor.a);
gl_FragColor = vec4(finalColor.rgb, originalColor.a);
}
Of course, the lookup key has not always to be the red channel, any other channel is also possible.
Even a combination of 2 color channels would be possible by using a increased two dimensional lookup texture. See the following example which demonstrates the use of look up texture with 1024 entries. The look up table uses the full red channel (256 indices) in the X dimension and the green channel divided by 64 (4 indices) in the Y dimension.
Create a two dimensional look up table:
std::array< unsigned char, 1024 * 4 > swapPlane{ 0 };
for ( int c = 0; c < noOfColors; ++ c )
{
size_t ix = (size_t)( mSource[c].x * 255.0 );
size_t iy = (size_t)( mSource[c].y * 255.0 / 64.0 );
size_t i = ( iy * 256 + ix ) * 4;
swapPlane[i+0] = (unsigned char)(mSwap[c].x*255.0);
swapPlane[i+1] = (unsigned char)(mSwap[c].y*255.0);
swapPlane[i+2] = (unsigned char)(mSwap[c].z*255.0);
swapPlane[i+3] = 255;
}
mSwapTexture = new Texture2D();
mSwapTexture->setAliasTexParameters();
cocos2d::Size contentSize;
mSwapTexture->initWithData( swapPlane.data(), swapPlane.size(), Texture2D::PixelFormat::RGBA8888, 256, 4, contentSize );
And adapt the fragment shader:
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
vec4 swapColor = texture2D(u_swapTexture, originalColor.rg);
vec3 finalColor = mix(originalColor.rgb, swapColor.rgb, swapColor.a);
gl_FragColor = vec4(finalColor.rgb, originalColor.a);
}
Interpolate the texture
Since it is not possible to use GL_LINEAR with the above approach, this has to be emulated, if it would be of need:
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
uniform sampler2D u_swapTexture; // lookup texture with 256 swap colors
uniform vec2 u_spriteSize;
void main()
{
vec2 texS = 1.0 / u_spriteSize;
vec2 texF = fract( cc_FragTexCoord1 * u_spriteSize + 0.5 );
vec2 texC = (cc_FragTexCoord1 * u_spriteSize + 0.5 - texF) / u_spriteSize;
vec4 originalColor = texture2D(CC_Texture0, texC);
vec4 swapColor = texture2D(u_swapTexture, originalColor.rg);
vec3 finalColor00 = mix(originalColor.rgb, swapColor.rgb, swapColor.a);
originalColor = texture2D(CC_Texture0, texC+vec2(texS.x, 0.0));
swapColor = texture2D(u_swapTexture, originalColor.rg);
vec3 finalColor10 = mix(originalColor.rgb, swapColor.rgb, swapColor.a);
originalColor = texture2D(CC_Texture0, texC+vec2(0.0,texS.y));
swapColor = texture2D(u_swapTexture, originalColor.rg);
vec3 finalColor01 = mix(originalColor.rgb, swapColor.rgb, swapColor.a);
originalColor = texture2D(CC_Texture0, texC+texS.xy);
swapColor = texture2D(u_swapTexture, originalColor.rg);
vec3 finalColor11 = mix(originalColor.rgb, swapColor.rgb, swapColor.a);
vec3 finalColor0 = mix( finalColor00, finalColor10, texF.x );
vec3 finalColor1 = mix( finalColor01, finalColor11, texF.x );
vec3 finalColor = mix( finalColor0, finalColor1, texF.y );
gl_FragColor = vec4(finalColor.rgb, originalColor.a);
}
The new uniform variable u_spriteSize has to be set like this:
auto size = sprite->getTexture()->getContentSizeInPixels();
state->setUniformVec2( "u_spriteSize", Vec2( (float)size.width, (float)size.height ) );
Modify the texture on the CPU
Of course the texture can also be modified on the CPU, but then for each set of swap colors a separated texture has to be generated. the advantage would be that no more shader is of need.
The following code swaps the colors when the texture is loaded. The shader has to be skipped completely.
Sprite * sprite = nullptr;
std::string imageFile = ....;
std::string fullpath = FileUtils::getInstance()->fullPathForFilename(imageFile);
cocos2d::Image *img = !fullpath.empty() ? new Image() : nullptr;
if (img != nullptr && img->initWithImageFile(fullpath))
{
if ( img->getRenderFormat() == Texture2D::PixelFormat::RGBA8888 )
{
unsigned char *plane = img->getData();
for ( int y = 0; y < img->getHeight(); ++ y )
{
for ( int x = 0; x < img->getWidth(); ++ x )
{
size_t i = ( y * img->getWidth() + x ) * 4;
unsigned char t = plane[i];
for ( int c = 0; c < noOfColors; ++ c )
{
if ( fabs(mSource[c].x - plane[i+0]/255.0f) < mDeviation[c] &&
fabs(mSource[c].y - plane[i+1]/255.0f) < mDeviation[c] &&
fabs(mSource[c].z - plane[i+2]/255.0f) < mDeviation[c] )
{
plane[i+0] = (unsigned char)(mSwap[c].x*255.0);
plane[i+1] = (unsigned char)(mSwap[c].y*255.0);
plane[i+2] = (unsigned char)(mSwap[c].z*255.0);
}
}
}
}
}
std::string key = "my_swap_" + imageFile;
if ( Texture2D *texture = _director->getTextureCache()->addImage( img, key ) )
sprite = Sprite::createWithTexture( texture );
}
Combined approach on the CPU and GPU
This approach can be used if always the same regions (colors) of the texture are swapped. The advantage of this approach is, that the original texture is modified only once, but every application of the texture can hold its own swap table.
For this approach the alpha channel is used to hold the index of the swap color. In the example code below, the value range from 1 to including 11 is used to store the indices of the swap color. 0 is reserved for absolute transparency.
Sprite * sprite = nullptr;
std::string imageFile = ....;
std::string key = "my_swap_" + imageFile;
Texture2D *texture = _director->getTextureCache()->getTextureForKey( key );
if (texture == nullptr)
{
std::string fullpath = FileUtils::getInstance()->fullPathForFilename(imageFile);
cocos2d::Image *img = !fullpath.empty() ? new Image() : nullptr;
if ( img->initWithImageFile(fullpath) &&
img->getRenderFormat() == Texture2D::PixelFormat::RGBA8888 )
{
unsigned char *plane = img->getData();
for ( int y = 0; y < img->getHeight(); ++ y )
{
for ( int x = 0; x < img->getWidth(); ++ x )
{
size_t i = ( y * img->getWidth() + x ) * 4;
unsigned char t = plane[i];
for ( int c = 0; c < noOfColors; ++ c )
{
if ( fabs(mSource[c].x - plane[i+0]/255.0f) < mDeviation[c] &&
fabs(mSource[c].y - plane[i+1]/255.0f) < mDeviation[c] &&
fabs(mSource[c].z - plane[i+2]/255.0f) < mDeviation[c] )
{
plane[i+3] = (unsigned char)(c+1);
}
}
}
}
texture = _director->getTextureCache()->addImage( img, key );
}
}
if ( texture != nullptr )
sprite = Sprite::createWithTexture( texture );
The fragment shader needs only the uniforms u_swap and u_noSwap and does not have to do an expensive searching.
#ifdef GL_ES
precision mediump float;
#endif
varying vec4 cc_FragColor;
varying vec2 cc_FragTexCoord1;
#define MAX_SWAP 11
uniform vec3 u_swap[MAX_SWAP];
uniform int u_noSwap;
void main()
{
vec4 originalColor = texture2D(CC_Texture0, cc_FragTexCoord1);
float fIndex = originalColor.a * 255.0 - 0.5;
float maxIndex = float(u_noSwap) + 0.5;
int iIndex = int( clamp( fIndex, 0.0, maxIndex ) );
float isSwap = step( 0.0, fIndex ) * step( fIndex, maxIndex );
vec3 swapColor = mix( originalColor.rgb, u_swap[iIndex], isSwap );
gl_FragColor = vec4( swapColor.rgb, max(originalColor.a, isSwap) );
}
Change the Hue,Saturation,Value of your sprite using shader.
Shader code example:
#ifdef GL_ES
precision mediump float;
#endif
varying vec2 v_texCoord;
////uniform sampler2D CC_Texture0;
uniform float u_dH;
uniform float u_dS;
uniform float u_dL;
//algorithm ref to: https://en.wikipedia.org/wiki/HSL_and_HSV
void main() {
vec4 texColor=texture2D(CC_Texture0, v_texCoord);
float r=texColor.r;
float g=texColor.g;
float b=texColor.b;
float a=texColor.a;
//convert rgb to hsl
float h;
float s;
float l;
{
float max=max(max(r,g),b);
float min=min(min(r,g),b);
//----h
if(max==min){
h=0.0;
}else if(max==r&&g>=b){
h=60.0*(g-b)/(max-min)+0.0;
}else if(max==r&&g<b){
h=60.0*(g-b)/(max-min)+360.0;
}else if(max==g){
h=60.0*(b-r)/(max-min)+120.0;
}else if(max==b){
h=60.0*(r-g)/(max-min)+240.0;
}
//----l
l=0.5*(max+min);
//----s
if(l==0.0||max==min){
s=0.0;
}else if(0.0<=l&&l<=0.5){
s=(max-min)/(2.0*l);
}else if(l>0.5){
s=(max-min)/(2.0-2.0*l);
}
}
//(h,s,l)+(dH,dS,dL) -> (h,s,l)
h=h+u_dH;
s=min(1.0,max(0.0,s+u_dS));
l=l;//do not use HSL model to adjust lightness, because the effect is not good
//convert (h,s,l) to rgb and got final color
vec4 finalColor;
{
float q;
if(l<0.5){
q=l*(1.0+s);
}else if(l>=0.5){
q=l+s-l*s;
}
float p=2.0*l-q;
float hk=h/360.0;
float t[3];
t[0]=hk+1.0/3.0;t[1]=hk;t[2]=hk-1.0/3.0;
for(int i=0;i<3;i++){
if(t[i]<0.0)t[i]+=1.0;
if(t[i]>1.0)t[i]-=1.0;
}//got t[i]
float c[3];
for(int i=0;i<3;i++){
if(t[i]<1.0/6.0){
c[i]=p+((q-p)*6.0*t[i]);
}else if(1.0/6.0<=t[i]&&t[i]<0.5){
c[i]=q;
}else if(0.5<=t[i]&&t[i]<2.0/3.0){
c[i]=p+((q-p)*6.0*(2.0/3.0-t[i]));
}else{
c[i]=p;
}
}
finalColor=vec4(c[0],c[1],c[2],a);
}
//actually, it is not final color. the lightness has not been adjusted
//adjust lightness use the simplest method
finalColor+=vec4(u_dL,u_dL,u_dL,0.0);
gl_FragColor=finalColor;
}

OpenGL - Water waves (with noise)

I am currently in the process of making water waves, so basically I am starting from the beginning. I have created a mesh which is basically a flat square and have animated it in the vertex shader (below is the code which achieves that)
vtx.y = (sin(2.0 * vtx.x + a_time/1000.0 ) * cos(1.5 * vtx.y + a_time/1000.0) * 0.2);
Basically just moving the y position based on a sin and cos function, the results of this can be observed here!
I then tried adding some Perlin noise (as per the Perlin noise functions by Ian McEwan, available here github.com/ashima/webgl-noise) as follows
vtx.y = vtx.y + 0.1*cnoise((a_time/5000.0)*a_vertex.yz);
the results of this can be observed here!
As you can plainly observe there is no real "random" effect that I was looking for (simulate some basic random roughness of an ocean).
I was wondering how it would be possible for me to achieve this (also any suggestions on how to improve either of the functions that change y would also be appreciated).
The simplest solution, is to use texture, that contain needed noise. If the displacement is kept in the texture, then it is possible to apply the displacement in the vertex shader, so there would be no need to modify vertex buffer. To make the waves moving, your may add some animated offset.
There are plenty of ways to fake, as you say, the "random" effect. You make take two samples from texture, using differently changing offsets and then simply add two displacements.
For example, see the following vertex shader:
uniform sampler2D u_heightMap;
uniform float u_time;
uniform mat4 modelViewMatrix
uniform mat4 projectionMatrix
attribute vec3 position;
void main()
{
vec3 pos = position;
vec2 offset1 = vec2(0.8, 0.4) * u_time * 0.1;
vec2 offset2 = vec2(0.6, 1.1) * u_time * 0.1;
float hight1 = texture2D(u_heightMap, uv + offset1).r * 0.02;
float hight2 = texture2D(u_heightMap, uv + offset2).r * 0.02;
pos.z += hight1 + hight2;
vec4 mvPosition = modelViewMatrix * vec4( pos, 1.0 );
gl_Position = projectionMatrix * mvPosition;
}
I've made a simple example using threejs:
var container;
var camera, scene, renderer;
var mesh;
var uniforms;
var clock = new THREE.Clock();
init();
animate();
function init() {
container = document.getElementById('container');
camera = new THREE.PerspectiveCamera(40, window.innerWidth / window.innerHeight, 0.1, 100);
camera.position.z = 0.6;
camera.position.y = 0.2;
camera.rotation.x = -0.45;
scene = new THREE.Scene();
var boxGeometry = new THREE.PlaneGeometry(0.75, 0.75, 100, 100);
var heightMap = THREE.ImageUtils.loadTexture("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAA81BMVEVZWVlRUVFdXV1EREQ8PDxBQUFWVlY3NzdKSkoyMjJqamphYWFOTk5mZmZHR0dxcXEtLS0qKip0dHRubm4aGhpTU1M+Pj4vLy8nJyeCgoJ3d3chISE0NDSKiop6eno5OTkBAQEjIyMeHh5/f38SEhKOjo6FhYUWFhaRkZELCwsPDw+bm5t8fHyfn59jY2OVlZUICAiwsLCHh4elpaWioqL///+qqqqYmJitra20tLS3t7fIyMi5ubnR0dHh4eHDw8P8/PzKysq9vb2np6fw8PDd3d3Ozs67u7vFxcX09PTY2Ni/v7/p6enV1dXT09Pl5eXt7e2kvPjWAAAMM0lEQVRYwxVW1ZbjOBAtsSwzsx1jOOlAc/f0MO7M7v9/zWr0kNg+xyVX6RIQXOhlUyIJNQwOLc/vL1ZiHuzATLhtIppQ1DzA0vd7S9nz9oBiExE/JXFo7zACFOzscM5l6yDfk/bs2vmTKhc3i8U6xIbfIGSlkUgXC5FGnft2N0kvrSQRWV+qu0sBYLu7g3t9VMicmjAPTJ74sjbWm5ubcb1YLKq84JH+X9MQW2l1UNl4s8wQmCJbRik0ISCLGKL4+OHty5dfs6qWSxNxbErO07L0Or8KV0G6Xqw3ozzvsFe1ka4cwdDEwr/pKMGgcJxg+/L6/iolQknps3A4YAwmlY4jTZsaVNJoHMvEXU2IGMnYlWy3u+YAhjDDAMwmbIrTdapdSc0d8bO1P20ftnf7cw2x4RNa9pgZWVa26PxpBZaJwh0eF34xTG5gOxJY2Ewr7ASgJ+uB2d14op22p6Obh0XBx3W/ZIbXZ2VmUFoZguV3B7JZjFFassASkkCJcte0XFl56/W6LSxAfuQLQnzq0E7G/lJiK3a4KNebLBPUi7dyUzIwyXgz9g7rgArn+OHj9wsA66N1imza+d5ybR0nZ1x6hA+fvv7a72sb4RDzUJG0XES4niblezFYqYSVGmSxYwdUdWPUdZIloifksH/5fE42y943XHW4m54en3ZStEHI/cWid39+yR1awf2+vofpEWddaRimSUVkYFwtE4SbOYjrQmyWCam6UrT3hD992CL59DzQzuPN+cizZWe4H8wYwr1LPU9YZeK0rTD6UiQOtUyOAbwUbBOzvo/asePzu8/P777lppNI7GCU9dGIODWhcMMGjzebxUYQ4J7HsMIi8x0RyfwwBNNRtuVmsSgttX/3/vVch6hbgxEZ41JA7x2PoLh7541RST20gtQXzJOp0abZJm2J0W0ialneuKFhuJ3P1/PPy1GXLzuNC0NQvP3yCPo8kV3n+/3L6U5q9N8Y9bUuWFkuS6Mro9TzWgrSygxNhfPzg54xW0bS5YT60sxdC8RyoxtRIW7qGnfjYt1b4XNAdakxivqR0l5YabpZLLPFgj/ysW9R1eNVAbL6S4sOCE2cmNlNiHmctE6/Xi7TIXfKm81yXZIqhkjIuKXdQq/eZkaC40oYohXM2tx41IAiABvTivIAIezixCCpwMgoe0OZeHu9uF5WalCi5WKTprpxSaw2FQTpIkJ0lgmMolwdYr+VVhi4EvKpgY3ejDoV4c3l363NgZLCZcRUjEVrQzq0L9OKAJaQ+AaIBI55aONs9ORQn1fb08usC9yMFTMhYTg2qMXB1LN3MC03m43gnHj6XBkn1Ep1ARKqmHApxCHYDXFrn8EV65v1jR8x22qFV/nJbOl3aBuDPy4W0VKifoyYSZhDIg8YSip/mWUonC5bRTUf1l2y7ERKMQKHVb4npMpDb7MsRdmLaLmOLMCBerzEAaIGAxckt6zVarLPH3dm3y83ZRQl2MR4hRh1uptlgucT3oyGqEoBiMhD0BThXBz328u0v0KtQLnHl+fvbwVaZlqN12vNV6NiBLY1OONms2T2Fja9Ly1GdK+4WM21DTaC1afH+xwIIvbd55c9kFHz31+ON4sxth0mHHUeBuJXZd8GuO/85KBsKh1zdVXD3cN1dw2GoKgo8ObudPf0NbCiyG/x9owT4blnzQrPiI2KKMvnOYrGzdLMV9cZIXUZWuIwf+wxB6UUbKdpMJvcFglthtDEF62Gz2+4186wiNDqEnIr3/IkExgVc1JW9lygm/X6RviRCNS0BYfFtrJsxVXeAG37ClZPL2fUZfqjYf969/j87eXnt0KFgCLfJ7LJtZM50Wi7iB+G6QAoZozELiBk0ZKApt3+muO/Kpzwu5eCBMXx3fvbr4UNO9iUfKpr5Q6BhbanUy454qA3PyRVGJiEbqKyz8ZEPYLTj55D1A6NN1WAv/337z+Px+2kCMWItn7mmMfT3bv/QsmMFg52UtEkPD4fbS2rN2sRfz9NceZ3bJ7jxU2bImybNsYuQmHhKq+PNlFCLGxyvksMCzQbQ3fIH75fAiQ8itzA3P/zFErDE6oxhKUcaUlbxa1RahG1jdIgRqZp2Qs+vbtO+xXUjTtd5nnlMhTM27v71fDz94dQwIFIpDe2Vyo0CRzizjFJXJhukA9BwJkF7n34+WO9Ah7s5ppbCQNrdfrw7c9//7wCKI4B47/yIGeXeZHk1CpQOVK3Pq12NkpbhuqZHziXMOXqfLFJi8xw//3Pc93MtuHxWZnFoFztzQSnUZU4+NuVk8TrYXCZWGqFs0BhCKfVX1kPVT27zbHev3tXMM/POmZpUEzHWg12DI4hUL67f/56zevaPXj6Vk+COUmCr48PGFoNeyPyK6i/fnzVTQU4tiSliDCiXO35gtkxTR3HaupZqYb3kSFISjiXzpCbioCnvZQ5whAymH7e3n6+B5+kBuNWlxqWqof6cm2cMtJ6pZSNB8W0coATK+UwSCoLg9Uv1x7nseNu/32vMXe7d/+y9uEcKowydv73z92ESRYRbB63zc6EYrsN5tx19UQSjALYKTFW5OCi/NfXwDu+//Hpvs7d/Y/feWyxg7tz3WBYHffDPP+lCqPu5elpfz/rpwE2HYjBA95VEuai2BeYGSiE+8un6XR7+zs/nn6/MXqwcazViwS29kgf3On0Whcu1qdFDMvKgApKWULAdiHYNlhWlcFEan/bPf73/vb91gJu+GnmM2kd6txzAqLVObCZPdT51vJKDxqKwKGG/m2mxmR9JHiFsMKE28+3ty/HXKub74G6P33+8mrTMvMMDjkedmE+BwRvoSA6hVCHgxsWw/0Rinx2z9PAqqq5PP+4vT2FE6Ya5pdfU/2xNmhMKFqtZGu0hM+rx6+gmDH6aDBtVyKz/vr99OFej5kIqzn+0pz9kJ8lsfwKx1QyrjCHWBI0x8A1+1tyvAODGilXZtjYRiRgdfy+Wj1NWkTBfTg1deAwD4Gkhu9nPkVAKqrpiQLdJKlSqS/0o86PMfINjqm53a+29vx6+2ffTA+PEGUMMx4eV670U89oK4tbrcdkDPevv1f2AasaQcGdlFJheL6Q3Aw/PQz17Zv55f2P18KJqAWmO086SQaWkzexgzUKERKr28fhCvbOfXmAlY2AaUN0rJgSaYY/f/z4ZxbFz482zQgarvuAW1V6/PP0cMndMC9chM3w231uEeJA+PoCPNIaXbr1ZCETcOITrdSaj8Pp0NJWIkfZUre6DS+BaTLeFDYcuASpaFr1LQpruOmjxciITzPR7L982M+qSvvN0joNDInUaw2PplxrrkYTjRk16+GuoR3zNFctWq1v4KZtxwTEZpPZ725vb3/93m8VCpW1uuIYLODmsMLKBrGMKC+GsLh/Uq4rHXnY8n6hFzSxKNNy3UvzSb9/sdx9PX96e97vG7Oe9w/Pbw/bq465CUeWjKWggJZekxfXDy+usV6mJrxNiREtIw/uv+zvPuXuITyfVvn5wBzIn17efw544I06xBJBE+RQp/RoxFa/7pv648qepgfAwRzjsHH8kgnD2plMI2Jlsgo50jz9889b0S82y2WpByGEPRyhlUTGUMill/94fdg5sNSJULjsb44z5tj3urXROrIVndfKrU4t+zjTrE+9WO2K4/T8scCAcZxlXSQVMUQGSBuvUYmu07lkx6Wvw6Omn5+21DGyw+ntIRa4thGWxHE4cXI9QNTQ5XrZ6bxrMBOwY5rI1zhNLHgwZSVKnbA2elfVmKaN+OlyeSxahzBEkeWPKXA7v8PES9p+HXUsBoPX9zkhCHMA+2ozq+vWm5So+bRVyupb6e5mg2B+KAi3TFJVaD7PJlYPqutSwQxAcZryYfq+B+KN6GVozttd0+htmj9vCgh1uCPIbudCPQ2ftkbUd8Gx0OyfJ2XvsHRAYhsxmI4h0G45trtaw2CyqaCwP+0swqg8X0+fv15Q/e+7LzPnfsaGFdEmzbTLhZyCidydaWtqYG7YivkSm5yM6z6zTDwo5CRseL19Bs8zHx+vu0AVu3prAltvRPEtsHcNWFlcNIqbYMeQa1KmiRnwFoamkdFGYEYAn59A9yeYYlXkcQhWduBaGi/MtoMQGMP1dXBNAGkqXrUeWjWA5rsBmEcTy4pN4fDr/qx44iciIweQQTCbgAgjDA7ofwxocDxlnyf5AAAAAElFTkSuQmCC");
heightMap.wrapT = heightMap.wrapS = THREE.RepeatWrapping;
uniforms = {u_time: {type: "f", value: 0.0 }, u_heightMap: {type: "t",value:heightMap} };
var material = new THREE.ShaderMaterial({
uniforms: uniforms,
side: THREE.DoubleSide,
wireframe: true,
vertexShader: document.getElementById('vertexShader').textContent,
fragmentShader: document.getElementById('fragment_shader').textContent
});
mesh = new THREE.Mesh(boxGeometry, material);
mesh.rotation.x = 3.14 / 2.0;
scene.add(mesh);
renderer = new THREE.WebGLRenderer();
renderer.setClearColor( 0xffffff, 1 );
container.appendChild(renderer.domElement);
onWindowResize();
window.addEventListener('resize', onWindowResize, false);
}
function onWindowResize(event) {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function animate() {
requestAnimationFrame(animate);
render();
}
function render() {
var delta = clock.getDelta();
uniforms.u_time.value += delta;
mesh.rotation.z += delta * 0.5;
renderer.render(scene, camera);
}
body { margin: 0px; overflow: hidden; }
<script src="http://threejs.org/build/three.min.js"></script>
<div id="container"></div>
<script id="fragment_shader" type="x-shader/x-fragment">
void main( void )
{
gl_FragColor = vec4(vec3(0.0), 1.0);
}
</script>
<script id="vertexShader" type="x-shader/x-vertex">
uniform lowp sampler2D u_heightMap;
uniform float u_time;
void main()
{
vec3 pos = position;
vec2 offset1 = vec2(1.0, 0.5) * u_time * 0.1;
vec2 offset2 = vec2(0.5, 1.0) * u_time * 0.1;
float hight1 = texture2D(u_heightMap, uv + offset1).r * 0.02;
float hight2 = texture2D(u_heightMap, uv + offset2).r * 0.02;
pos.z += hight1 + hight2;
vec4 mvPosition = modelViewMatrix * vec4( pos, 1.0 );
gl_Position = projectionMatrix * mvPosition;
}
</script>
Using better displacement texture, or even using two different textures for two offsets you may achieve better results.