Fisheye Skybox Shader - glsl

I'm trying to write a panaorama viewer. Mostly this involves mapping an image to a quad to simulate a skybox.
For a cubemap image it's pretty trival. Either copy the 6 parts of the image to a 6 planes of a cube map or else make a shader that does the cubemap math as specified in the OpenGL ES spec
For an equirectangular image like from a Ricoh Theta
you can use this math
// convert from direction (n) to texcoord (uv)
float latitude = acos(n.y);
float longitude = atan(n.z, n.x);
vec2 sphereCoords = vec2(longitude, latitude) * vec2(0.5 / PI, 1.0 / PI);
vec2 uv = fract(vec2(0.5,1.0) - sphereCoords);
const m4 = twgl.m4;
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
varying vec4 v_position;
void main() {
v_position = position;
gl_Position = position;
gl_Position.z = 1.0;
}
`;
const fs = `
precision highp float;
uniform sampler2D u_skybox;
uniform mat4 u_viewDirectionProjectionInverse;
varying vec4 v_position;
#define PI radians(180.0)
void main() {
vec4 t = u_viewDirectionProjectionInverse * v_position;
vec3 n = normalize(t.xyz / t.w);
// convert from direction (n) to texcoord (uv)
float latitude = acos(n.y);
float longitude = atan(n.z, n.x);
vec2 sphereCoords = vec2(longitude, latitude) * vec2(0.5 / PI, 1.0 / PI);
vec2 uv = fract(vec2(0.5,1.0) - sphereCoords);
// multiply u by 2 because we only have a 180degree view
gl_FragColor = texture2D(u_skybox, uv * vec2(-2, 1));
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const tex = twgl.createTexture(gl, {
src: 'https://i.imgur.com/ChIfXM0.jpg',
flipY: true,
});
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl, 2);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const projectionMatrix = m4.perspective(45 * Math.PI / 180, aspect, 1, 20);
const cameraMatrix = m4.rotationY(time * 0.1);
m4.rotateX(cameraMatrix, Math.sin(time * 0.3) * 0.5, cameraMatrix);
const viewMatrix = m4.inverse(cameraMatrix);
viewMatrix[12] = 0;
viewMatrix[13] = 0;
viewMatrix[14] = 0;
const viewDirectionProjectionMatrix = m4.multiply(projectionMatrix, viewMatrix);
const viewDirectionProjectionInverseMatrix = m4.inverse(viewDirectionProjectionMatrix);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_viewDirectionProjectionInverse: viewDirectionProjectionInverseMatrix,
u_skyBox: tex,
});
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
But some images are not equirectangular, they're fisheye?
I've been trying to figure out the math needed to do the same thing (map this to a quad based skybox) but I've been having no luck
As a reference I found this page with conversions from 3d to fisheye coords. It says
// convert from direction (n) to texcoord (uv)
float r = 2.0 * atan(length(n.xy), n.z) / PI;
float theta = atan(n.y, n.x);
vec2 uv = vec2(cos(theta), sin(theta)) * r * 0.5 + 0.5;
const m4 = twgl.m4;
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
varying vec4 v_position;
void main() {
v_position = position;
gl_Position = position;
gl_Position.z = 1.0;
}
`;
const fs = `
precision highp float;
uniform sampler2D u_skybox;
uniform mat4 u_viewDirectionProjectionInverse;
varying vec4 v_position;
#define PI radians(180.0)
void main() {
vec4 t = u_viewDirectionProjectionInverse * v_position;
vec3 n = normalize(t.xyz / t.w);
// convert from direction (n) to texcoord (uv)
float r = 2.0 * atan(length(n.xy), n.z) / PI;
float theta = atan(n.y, n.x);
vec2 uv = vec2(cos(theta), sin(theta)) * r * 0.5 + 0.5;
#if 0
// Calculate fisheye angle and radius
float theta = atan(n.z, n.x);
float phi = atan(length(n.xz), n.y);
float r = phi / PI;
// Pixel in fisheye space
vec2 uv = vec2(0.5) + r * vec2(cos(theta), sin(theta));
#endif
// multiply u by 2 because we only have a 180degree view
gl_FragColor = texture2D(u_skybox, uv * vec2(-2, 1));
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const tex = twgl.createTexture(gl, {
src: 'https://i.imgur.com/dzXCQwM.jpg',
flipY: true,
});
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl, 2);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const projectionMatrix = m4.perspective(45 * Math.PI / 180, aspect, 1, 20);
const cameraMatrix = m4.rotationY(time * 0.1);
m4.rotateX(cameraMatrix, 0.7 + Math.sin(time * 0.3) * .7, cameraMatrix);
const viewMatrix = m4.inverse(cameraMatrix);
viewMatrix[12] = 0;
viewMatrix[13] = 0;
viewMatrix[14] = 0;
const viewDirectionProjectionMatrix = m4.multiply(projectionMatrix, viewMatrix);
const viewDirectionProjectionInverseMatrix = m4.inverse(viewDirectionProjectionMatrix);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_viewDirectionProjectionInverse: viewDirectionProjectionInverseMatrix,
u_skyBox: tex,
});
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
Also this
Clearly I'm missing something.

I believe the error lies in this logic:
// multiply u by 2 because we only have a 180degree view
gl_FragColor = texture2D(u_skybox, uv * vec2(-2, 1));
Although this works in the equirectangular case because the math works out such that the z-component only affects longitude, it is no longer valid in the fisheye case because n.z affects both axes.
You can account for the negative z-component in the formula by taking the absolute value of n.z and flipping n.x when the z is negative:
// convert from direction (n) to texcoord (uv)
float r = 2.0 * atan(length(n.xy), abs(n.z)) / PI;
float theta = atan(n.y, n.x * sign(n.z));
vec2 uv = vec2(cos(theta), sin(theta)) * r * 0.5 + vec2(0.5);
Here it is in action:
const m4 = twgl.m4;
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
varying vec4 v_position;
void main() {
v_position = position;
gl_Position = position;
gl_Position.z = 1.0;
}
`;
const fs = `
precision highp float;
uniform sampler2D u_skybox;
uniform mat4 u_viewDirectionProjectionInverse;
varying vec4 v_position;
#define PI radians(180.0)
void main() {
vec4 t = u_viewDirectionProjectionInverse * v_position;
vec3 n = normalize(t.xyz / t.w);
// convert from direction (n) to texcoord (uv)
float r = 2.0 * atan(length(n.xy), abs(n.z)) / PI;
float theta = atan(n.y, n.x * sign(n.z));
vec2 uv = vec2(cos(theta), sin(theta)) * r * 0.5 + vec2(0.5);
gl_FragColor = texture2D(u_skybox, uv * vec2(-1.0, 1.0));
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const tex = twgl.createTexture(gl, {
src: 'https://i.imgur.com/dzXCQwM.jpg',
flipY: true,
});
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl, 2);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const projectionMatrix = m4.perspective(45 * Math.PI / 180, aspect, 1, 20);
const cameraMatrix = m4.rotationY(time * 0.1);
m4.rotateX(cameraMatrix, 0.7 + Math.sin(time * 0.3) * .7, cameraMatrix);
const viewMatrix = m4.inverse(cameraMatrix);
viewMatrix[12] = 0;
viewMatrix[13] = 0;
viewMatrix[14] = 0;
const viewDirectionProjectionMatrix = m4.multiply(projectionMatrix, viewMatrix);
const viewDirectionProjectionInverseMatrix = m4.inverse(viewDirectionProjectionMatrix);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_viewDirectionProjectionInverse: viewDirectionProjectionInverseMatrix,
u_skyBox: tex,
});
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>

Related

Compute Normals After Vertex Deformation?

I am coding a vertex and a fragment shader trying to distort the surface of some water and then computing blinn-phong lighting on the surface. I am able to successfully compute the deformed matrices with a simple noise function, but how can I find the distorted normals? Since it isn't a linear transformation I am stuck, could anyone help?
Here are the relevant files:
vertex shader:
#version 150
uniform mat4 u_Model;
uniform mat4 u_ModelInvTr;
uniform mat4 u_ViewProj;
uniform vec4 u_Color;
uniform int u_Time;
in vec4 vs_Pos; // The array of vertex positions passed to the shader
in vec4 vs_Nor; // The array of vertex normals passed to the shader
in vec4 vs_Col; // The array of vertex colors passed to the shader.
in vec2 vs_UV; // UV coords for texture to pass thru to fragment shader
in float vs_Anim; // 0.f or 1.f To pass thru to fragment shader
in float vs_T2O;
out vec4 fs_Pos;
out vec4 fs_Nor;
out vec4 fs_LightVec;
out vec4 fs_Col;
out vec2 fs_UVs;
out float fs_Anim;
out float fs_dimVal;
out float fs_T2O;
uniform vec4 u_CamPos;
out vec4 fs_CamPos;
const vec4 lightDir = normalize(vec4(0.0, 1.f, 0.0, 0));
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c, oc * axis.x * axis.y - axis.z * s, oc * axis.z * axis.x + axis.y * s, 0.0, oc * axis.x * axis.y + axis.z * s, oc * axis.y * axis.y + c, oc * axis.y * axis.z - axis.x * s, 0.0,oc * axis.z * axis.x - axis.y * s, oc * axis.y * axis.z + axis.x * s, oc * axis.z * axis.z + c, 0.0, 0.0, 0.0, 0.0, 1.0);
}
vec4 rotateLightVec(float deg, vec4 LV) {
mat4 R = rotationMatrix(vec3(0,0,1), deg);
return R * LV;
}
float random1(vec3 p) {
return fract(sin(dot(p, vec3(127.1, 311.7, 191.999)))*43758.5453);
}
vec3 random2( vec3 p ) {
return fract( sin( vec3(dot(p, vec3(127.1, 311.7, 58.24)), dot(p, vec3(269.5, 183.3, 657.3)), dot(p, vec3(420.69, 69.420, 469.20))) ) * 43758.5453);
}
void main()
{
fs_Col = vs_Col;
fs_UVs = vs_UV;
fs_Anim = vs_Anim;
fs_T2O = vs_T2O;
mat3 invTranspose = mat3(u_ModelInvTr);
fs_Nor = vec4(invTranspose * vec3(vs_Nor), 0);
vec4 modelposition = u_Model * vs_Pos;
if (vs_Anim != 0) { // if we want to animate this surface
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y <= 4.f/16.f;
if (water) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.05) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
} else if (lava) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.01) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
}
}
fs_dimVal = random1(modelposition.xyz/100.f);
fs_LightVec = rotateLightVec(0.001 * u_Time, lightDir); // Compute the direction in which the light source lies
fs_CamPos = u_CamPos; // uniform handle for the camera position instead of the inverse
fs_Pos = modelposition;
gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is
// used to render the final positions of the geometry's vertices
}
fragment shader:
#version 330
uniform vec4 u_Color; // The color with which to render this instance of geometry.
uniform sampler2D textureSampler;
uniform int u_Time;
uniform mat4 u_ViewProj;
uniform mat4 u_Model;
in vec4 fs_Pos;
in vec4 fs_Nor;
in vec4 fs_LightVec;
in vec4 fs_Col;
in vec2 fs_UVs;
in float fs_Anim;
in float fs_T2O;
in float fs_dimVal;
out vec4 out_Col;
in vec4 fs_CamPos;
float random1(vec3 p) {
return fract(sin(dot(p,vec3(127.1, 311.7, 191.999)))
*43758.5453);
}
float random1b(vec3 p) {
return fract(sin(dot(p,vec3(169.1, 355.7, 195.999)))
*95751.5453);
}
float mySmoothStep(float a, float b, float t) {
t = smoothstep(0, 1, t);
return mix(a, b, t);
}
float cubicTriMix(vec3 p) {
vec3 pFract = fract(p);
float llb = random1(floor(p) + vec3(0,0,0));
float lrb = random1(floor(p) + vec3(1,0,0));
float ulb = random1(floor(p) + vec3(0,1,0));
float urb = random1(floor(p) + vec3(1,1,0));
float llf = random1(floor(p) + vec3(0,0,1));
float lrf = random1(floor(p) + vec3(1,0,1));
float ulf = random1(floor(p) + vec3(0,1,1));
float urf = random1(floor(p) + vec3(1,1,1));
float mixLoBack = mySmoothStep(llb, lrb, pFract.x);
float mixHiBack = mySmoothStep(ulb, urb, pFract.x);
float mixLoFront = mySmoothStep(llf, lrf, pFract.x);
float mixHiFront = mySmoothStep(ulf, urf, pFract.x);
float mixLo = mySmoothStep(mixLoBack, mixLoFront, pFract.z);
float mixHi = mySmoothStep(mixHiBack, mixHiFront, pFract.z);
return mySmoothStep(mixLo, mixHi, pFract.y);
}
float fbm(vec3 p) {
float amp = 0.5;
float freq = 4.0;
float sum = 0.0;
for(int i = 0; i < 8; i++) {
sum += cubicTriMix(p * freq) * amp;
amp *= 0.5;
freq *= 2.0;
}
return sum;
}
void main()
{
vec4 diffuseColor = texture(textureSampler, fs_UVs);
bool apply_lambert = true;
float specularIntensity = 0;
if (fs_Anim != 0) {
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y < 4.f/16.f;
if (lava) {
// slowly gyrate texture and lighten and darken with random dimVal from vert shader
vec2 movingUVs = vec2(fs_UVs.x + fs_Anim * 0.065/16 * sin(0.01*u_Time),
fs_UVs.y - fs_Anim * 0.065/16 * sin(0.01*u_Time + 3.14159/2));
diffuseColor = texture(textureSampler, movingUVs);
vec4 warmerColor = diffuseColor + vec4(0.3, 0.3, 0, 0);
vec4 coolerColor = diffuseColor - vec4(0.1, 0.1, 0, 0);
diffuseColor = mix(warmerColor, coolerColor, 0.5 + fs_dimVal * 0.65*sin(0.02*u_Time));
apply_lambert = false;
} else if (water) {
// blend between 3 different points in texture to create a wavy subtle change over time
vec2 offsetUVs = vec2(fs_UVs.x - 0.5f/16.f, fs_UVs.y - 0.5f/16.f);
diffuseColor = texture(textureSampler, fs_UVs);
vec4 altColor = texture(textureSampler, offsetUVs);
altColor.x += fs_dimVal * pow(altColor.x+.15, 5);
altColor.y += fs_dimVal * pow(altColor.y+.15, 5);
altColor.z += 0.5 * fs_dimVal * pow(altColor.z+.15, 5);
diffuseColor = mix(diffuseColor, altColor, 0.5 + 0.35*sin(0.05*u_Time));
offsetUVs -= 0.25f/16.f;
vec4 newColor = texture(textureSampler, offsetUVs);
diffuseColor = mix(diffuseColor, newColor, 0.5 + 0.5*sin(0.025*u_Time)) + fs_dimVal * vec4(0.025);
diffuseColor.a = 0.7;
// ----------------------------------------------------
// Blinn-Phong Shading
// ----------------------------------------------------
vec4 lightDir = normalize(fs_LightVec - fs_Pos);
vec4 viewDir = normalize(fs_CamPos - fs_Pos);
vec4 halfVec = normalize(lightDir + viewDir);
float shininess = 400.f;
float specularIntensity = max(pow(dot(halfVec, normalize(fs_Nor)), shininess), 0);
}
}
// Calculate the diffuse term for Lambert shading
float diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec));
// Avoid negative lighting values
diffuseTerm = clamp(diffuseTerm, 0, 1);
float ambientTerm = 0.3;
float lightIntensity = diffuseTerm + ambientTerm; //Add a small float value to the color multiplier
//to simulate ambient lighting. This ensures that faces that are not
//lit by our point light are not completely black.
vec3 col = diffuseColor.rgb;
// Compute final shaded color
if (apply_lambert) {
col = col * lightIntensity + col * specularIntensity;
}
// & Check the rare, special case where we draw face between two diff transparent blocks as opaque
if (fs_T2O != 0) {
out_Col = vec4(col, 1.f);
} else {
out_Col = vec4(col, diffuseColor.a);
}
// distance fog!
vec4 fogColor = vec4(0.6, 0.75, 0.9, 1.0);
float FC = gl_FragCoord.z / gl_FragCoord.w / 124.f;
float falloff = clamp(1.05 - exp(-1.05f * (FC - 0.9f)), 0.f, 1.f);
out_Col = mix(out_Col, fogColor, falloff);
}
I tried implementing blinn-phong in the fragment shader, but I think it is wrong simple from the wrong normals. I think this can be done with some sort of tangent and cross product solution, but how can I know the tangent of the surface given we only know the vertex position?
I am not using unity, just bare c++ and most of the answers I am finding online are for java or unity which I do not understand.`

Blobs shader GLSL

I want to create a similar background with a shader to these images:
:
These are just blurred blobs with colors, distributed across the whole page:
Here's my current progress: https://codesandbox.io/s/lucid-bas-wvlzl9?file=/src/components/Background/Background.tsx
Vertex shader:
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
Fragment shader:
precision highp float;
uniform float uTime;
uniform float uAmplitude;
uniform float uFrequency;
varying vec2 vUv;
uniform vec2 uResolution;
vec4 Sphere(vec2 position, float radius)
{
// float dist = radius / distance(vUv, position);
// float strength = 0.01 / distance(vUv, position);
float strength = 0.1 / distance(vec2(vUv.x, (vUv.y - 0.5) * 8. + 0.5), vec2(0.));
return vec4(strength * strength);
}
void main()
{
vec2 uv = vUv;
vec4 pixel = vec4(0.0, 0.0, 0.0, 0.0);
vec2 positions[4];
positions[0] = vec2(.5, .5);
// positions[1] = vec2(sin(uTime * 3.0) * 0.5, (cos(uTime * 1.3) * 0.6) + vUv.y);
// positions[2] = vec2(sin(uTime * 2.1) * 0.1, (cos(uTime * 1.9) * 0.8) + vUv.y);
// positions[3] = vec2(sin(uTime * 1.1) * 1.1, (cos(uTime * 2.6) * 0.7) + vUv.y);
for (int i = 0; i < 2; i++)
pixel += Sphere(positions[i], 0.22);
pixel = pixel * pixel;
gl_FragColor = pixel;
}
For each blob, you can multiply it's color by a a noise function and then a 2D gaussian curve centered in a random point. Then add all the blobs together. I only added the ones of the adjacent cells to make it scrollable and the numbers in the for loops might be increased for bigger blobs.
here is my code :
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
const float blobSize = 0.125;
const float cellSize = .75;
const float noiseScale = .375;
const float background = .125;
const float blobsLuminosity = .75;
const float blobsSaturation = .5;
vec2 random2(vec2 st){
st = vec2( dot(st,vec2(127.1,311.7)),
dot(st,vec2(269.5,183.3)) );
return -1.0 + 2.0*fract(sin(st)*43758.5453123);
}
// Gradient Noise by Inigo Quilez - iq/2013
// https://www.shadertoy.com/view/XdXGW8
float noise(vec2 st) {
vec2 i = floor(st);
vec2 f = fract(st);
vec2 u = f*f*(3.0-2.0*f);
return mix( mix( dot( random2(i + vec2(0.0,0.0) ), f - vec2(0.0,0.0) ),
dot( random2(i + vec2(1.0,0.0) ), f - vec2(1.0,0.0) ), u.x),
mix( dot( random2(i + vec2(0.0,1.0) ), f - vec2(0.0,1.0) ),
dot( random2(i + vec2(1.0,1.0) ), f - vec2(1.0,1.0) ), u.x), u.y)*.5+.5;
}
float gaussFunction(vec2 st, vec2 p, float r) {
return exp(-dot(st-p, st-p)/2./r/r);
}
// Function from Iñigo Quiles
// https://www.shadertoy.com/view/MsS3Wc
vec3 hsb2rgb( in vec3 c ){
vec3 rgb = clamp(abs(mod(c.x*6.0+vec3(0.0,4.0,2.0),
6.0)-3.0)-1.0,
0.0,
1.0 );
rgb = rgb*rgb*(3.0-2.0*rgb);
return c.z * mix( vec3(1.0), rgb, c.y);
}
vec3 hash32(vec2 p)
{
vec3 p3 = fract(vec3(p.xyx) * vec3(.1031, .1030, .0973));
p3 += dot(p3, p3.yxz+33.33);
return fract((p3.xxy+p3.yzz)*p3.zyx);
}
vec3 blobs(vec2 st){
vec2 i = floor(st/cellSize);
vec3 c = vec3(0.);
for(int x = -1; x <= 1; x++)
for(int y = -1; y <= 1; y++){
vec3 h = hash32(i+vec2(x, y));
c += hsb2rgb(vec3(h.z, blobsSaturation, blobsLuminosity)) * gaussFunction(st/cellSize, i + vec2(x, y) + h.xy, blobSize) * smoothstep(0., 1., noise(noiseScale*st/cellSize / blobSize));
//c += hsb2rgb(vec3(h.z, blobsSaturation, blobsLuminosity)) * gaussFunction(st/cellSize, i + vec2(x, y) + h.xy, blobSize) * noise(noiseScale*st/cellSize / blobSize);
}
return c + vec3(background);
}
float map(float x, float a, float b, float c, float d){
return (x-a)/(b-a)*(d-c)+c;
}
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
st.x *= u_resolution.x/u_resolution.y;
vec3 color = vec3(0.0);
color = vec3(blobs(st - u_mouse/u_resolution.xy*4.));
gl_FragColor = vec4(color,1.0);
}
made in this shader editor.

webgl: fastest approach to drawing many circles

I'm currently drawing thousands of circles, instancing a circle geometry (many triangles).
alternatively, I could simply instance a quad (2 triangles), but cut out a circle in the fragment shader, using a distance function and discard.
which approach would be faster? -- is drawing many triangles more expensive than the calculations done in the fragment shader?
The fastest way might depend on the GPU and lots of other factors like how you're drawing the circles, 2D, 3D, are you blending them, are you using the z-buffer, etc... but in general, less triangles is faster than more, and less pixels is faster than more. So...., all we can really do is try.
First lets just draw textured quads with no blending. First off I always seem to get inconsistent perf from WebGL but in my tests on my GPU I get 20k-30k quads at 60fps in this 300x150 canvas using instancing
function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('ANGLE_instanced_arrays');
if (!ext) {
return alert('need ANGLE_instanced_arrays');
}
twgl.addExtensionsToContext(gl);
const vs = `
attribute float id;
attribute vec4 position;
attribute vec2 texcoord;
uniform float time;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
float o = id + time;
gl_Position = position + vec4(
vec2(
fract(o * 0.1373),
fract(o * 0.5127)) * 2.0 - 1.0,
0, 0);
v_texcoord = texcoord;
v_color = vec4(fract(vec3(id) * vec3(0.127, 0.373, 0.513)), 1);
}`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
varying vec4 v_color;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord) * v_color;
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const maxCount = 250000;
const ids = new Float32Array(maxCount);
for (let i = 0; i < ids.length; ++i) {
ids[i] = i;
}
const x = 16 / 300 * 2;
const y = 16 / 150 * 2;
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-x, -y,
x, -y,
-x, y,
-x, y,
x, -y,
x, y,
],
},
texcoord: [
0, 1,
1, 1,
0, 0,
0, 0,
1, 1,
1, 0,
],
id: {
numComponents: 1,
data: ids,
divisor: 1,
}
});
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
{
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = 32;
ctx.canvas.height = 32;
ctx.fillStyle = 'white';
ctx.beginPath();
ctx.arc(16, 16, 15, 0, Math.PI * 2);
ctx.fill();
const tex = twgl.createTexture(gl, { src: ctx.canvas });
}
const fpsElem = document.querySelector('#fps');
const countElem = document.querySelector('#count');
let count;
function getCount() {
count = Math.min(maxCount, parseInt(countElem.value));
}
countElem.addEventListener('input', getCount);
getCount();
const maxHistory = 60;
const fpsHistory = new Array(maxHistory).fill(0);
let historyNdx = 0;
let historyTotal = 0;
let then = 0;
function render(now) {
const deltaTime = now - then;
then = now;
historyTotal += deltaTime - fpsHistory[historyNdx];
fpsHistory[historyNdx] = deltaTime;
historyNdx = (historyNdx + 1) % maxHistory;
fpsElem.textContent = (1000 / (historyTotal / maxHistory)).toFixed(1);
gl.useProgram(programInfo.program);
twgl.setUniforms(programInfo, {time: now * 0.001});
ext.drawArraysInstancedANGLE(gl.TRIANGLES, 0, 6, count);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
canvas { display: block; border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
<div>fps: <span id="fps"></span></div>
<div>count: <input type="number" id="count" min="0" max="1000000" value="25000"></div>
And I get the same perf at 60fps using repeated to geometry instead of instancing. That's surprising to me because 7-8yrs ago when I tested repeated geometry was 20-30% faster. Whether that's because of having a better GPU now or a better driver or what I have no idea.
function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute float id;
attribute vec4 position;
attribute vec2 texcoord;
uniform float time;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
float o = id + time;
gl_Position = position + vec4(
vec2(
fract(o * 0.1373),
fract(o * 0.5127)) * 2.0 - 1.0,
0, 0);
v_texcoord = texcoord;
v_color = vec4(fract(vec3(id) * vec3(0.127, 0.373, 0.513)), 1);
}`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
varying vec4 v_color;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord) * v_color;
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const maxCount = 250000;
const x = 16 / 300 * 2;
const y = 16 / 150 * 2;
const quadPositions = [
-x, -y,
x, -y,
-x, y,
-x, y,
x, -y,
x, y,
];
const quadTexcoords = [
0, 1,
1, 1,
0, 0,
0, 0,
1, 1,
1, 0,
];
const positions = new Float32Array(maxCount * 2 * 6);
const texcoords = new Float32Array(maxCount * 2 * 6);
for (let i = 0; i < maxCount; ++i) {
const off = i * 2 * 6;
positions.set(quadPositions, off);
texcoords.set(quadTexcoords, off);
}
const ids = new Float32Array(maxCount * 6);
for (let i = 0; i < ids.length; ++i) {
ids[i] = i / 6 | 0;
}
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: positions,
},
texcoord: texcoords,
id: {
numComponents: 1,
data: ids,
}
});
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
{
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = 32;
ctx.canvas.height = 32;
ctx.fillStyle = 'white';
ctx.beginPath();
ctx.arc(16, 16, 15, 0, Math.PI * 2);
ctx.fill();
const tex = twgl.createTexture(gl, { src: ctx.canvas });
}
const fpsElem = document.querySelector('#fps');
const countElem = document.querySelector('#count');
let count;
function getCount() {
count = Math.min(maxCount, parseInt(countElem.value));
}
countElem.addEventListener('input', getCount);
getCount();
const maxHistory = 60;
const fpsHistory = new Array(maxHistory).fill(0);
let historyNdx = 0;
let historyTotal = 0;
let then = 0;
function render(now) {
const deltaTime = now - then;
then = now;
historyTotal += deltaTime - fpsHistory[historyNdx];
fpsHistory[historyNdx] = deltaTime;
historyNdx = (historyNdx + 1) % maxHistory;
fpsElem.textContent = (1000 / (historyTotal / maxHistory)).toFixed(1);
gl.useProgram(programInfo.program);
twgl.setUniforms(programInfo, {time: now * 0.001});
gl.drawArrays(gl.TRIANGLES, 0, 6 * count);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
canvas { display: block; border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
<div>fps: <span id="fps"></span></div>
<div>count: <input type="number" id="count" min="0" max="1000000" value="25000"></div>
Next thing would be textures or computing a circle in the fragment shader.
function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('ANGLE_instanced_arrays');
if (!ext) {
return alert('need ANGLE_instanced_arrays');
}
twgl.addExtensionsToContext(gl);
const vs = `
attribute float id;
attribute vec4 position;
attribute vec2 texcoord;
uniform float time;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
float o = id + time;
gl_Position = position + vec4(
vec2(
fract(o * 0.1373),
fract(o * 0.5127)) * 2.0 - 1.0,
0, 0);
v_texcoord = texcoord;
v_color = vec4(fract(vec3(id) * vec3(0.127, 0.373, 0.513)), 1);
}`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
gl_FragColor = mix(
v_color,
vec4(0),
step(1.0, length(v_texcoord.xy * 2. - 1.)));
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const maxCount = 250000;
const ids = new Float32Array(maxCount);
for (let i = 0; i < ids.length; ++i) {
ids[i] = i;
}
const x = 16 / 300 * 2;
const y = 16 / 150 * 2;
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-x, -y,
x, -y,
-x, y,
-x, y,
x, -y,
x, y,
],
},
texcoord: [
0, 1,
1, 1,
0, 0,
0, 0,
1, 1,
1, 0,
],
id: {
numComponents: 1,
data: ids,
divisor: 1,
}
});
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const fpsElem = document.querySelector('#fps');
const countElem = document.querySelector('#count');
let count;
function getCount() {
count = Math.min(maxCount, parseInt(countElem.value));
}
countElem.addEventListener('input', getCount);
getCount();
const maxHistory = 60;
const fpsHistory = new Array(maxHistory).fill(0);
let historyNdx = 0;
let historyTotal = 0;
let then = 0;
function render(now) {
const deltaTime = now - then;
then = now;
historyTotal += deltaTime - fpsHistory[historyNdx];
fpsHistory[historyNdx] = deltaTime;
historyNdx = (historyNdx + 1) % maxHistory;
fpsElem.textContent = (1000 / (historyTotal / maxHistory)).toFixed(1);
gl.useProgram(programInfo.program);
twgl.setUniforms(programInfo, {time: now * 0.001});
ext.drawArraysInstancedANGLE(gl.TRIANGLES, 0, 6, count);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
canvas { display: block; border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
<div>fps: <span id="fps"></span></div>
<div>count: <input type="number" id="count" min="0" max="1000000" value="25000"></div>
I get no measureable difference. Trying your circle function
function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('ANGLE_instanced_arrays');
if (!ext) {
return alert('need ANGLE_instanced_arrays');
}
twgl.addExtensionsToContext(gl);
const vs = `
attribute float id;
attribute vec4 position;
attribute vec2 texcoord;
uniform float time;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
float o = id + time;
gl_Position = position + vec4(
vec2(
fract(o * 0.1373),
fract(o * 0.5127)) * 2.0 - 1.0,
0, 0);
v_texcoord = texcoord;
v_color = vec4(fract(vec3(id) * vec3(0.127, 0.373, 0.513)), 1);
}`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
varying vec4 v_color;
float circle(in vec2 st, in float radius) {
vec2 dist = st - vec2(0.5);
return 1.0 - smoothstep(
radius - (radius * 0.01),
radius +(radius * 0.01),
dot(dist, dist) * 4.0);
}
void main() {
gl_FragColor = mix(
vec4(0),
v_color,
circle(v_texcoord, 1.0));
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const maxCount = 250000;
const ids = new Float32Array(maxCount);
for (let i = 0; i < ids.length; ++i) {
ids[i] = i;
}
const x = 16 / 300 * 2;
const y = 16 / 150 * 2;
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-x, -y,
x, -y,
-x, y,
-x, y,
x, -y,
x, y,
],
},
texcoord: [
0, 1,
1, 1,
0, 0,
0, 0,
1, 1,
1, 0,
],
id: {
numComponents: 1,
data: ids,
divisor: 1,
}
});
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const fpsElem = document.querySelector('#fps');
const countElem = document.querySelector('#count');
let count;
function getCount() {
count = Math.min(maxCount, parseInt(countElem.value));
}
countElem.addEventListener('input', getCount);
getCount();
const maxHistory = 60;
const fpsHistory = new Array(maxHistory).fill(0);
let historyNdx = 0;
let historyTotal = 0;
let then = 0;
function render(now) {
const deltaTime = now - then;
then = now;
historyTotal += deltaTime - fpsHistory[historyNdx];
fpsHistory[historyNdx] = deltaTime;
historyNdx = (historyNdx + 1) % maxHistory;
fpsElem.textContent = (1000 / (historyTotal / maxHistory)).toFixed(1);
gl.useProgram(programInfo.program);
twgl.setUniforms(programInfo, {time: now * 0.001});
ext.drawArraysInstancedANGLE(gl.TRIANGLES, 0, 6, count);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
canvas { display: block; border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
<div>fps: <span id="fps"></span></div>
<div>count: <input type="number" id="count" min="0" max="1000000" value="25000"></div>
I again get no measurable difference. Note: like I said above I get wildly inconsistent results in WebGL. When I ran the first test I got 28k at 60fps. When I ran the second I got 23k. I was surprised since I expected the 2nd to be faster so I ran the first again and only got 23k. The last one I got 29k and was again surprise but then I went back and did the previous and got 29k. Basically that means testing timing in WebGL is nearly impossible. There are so many moving parts given everything is multi-process that getting constant results seems impossible.
Could try discard
function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('ANGLE_instanced_arrays');
if (!ext) {
return alert('need ANGLE_instanced_arrays');
}
twgl.addExtensionsToContext(gl);
const vs = `
attribute float id;
attribute vec4 position;
attribute vec2 texcoord;
uniform float time;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
float o = id + time;
gl_Position = position + vec4(
vec2(
fract(o * 0.1373),
fract(o * 0.5127)) * 2.0 - 1.0,
0, 0);
v_texcoord = texcoord;
v_color = vec4(fract(vec3(id) * vec3(0.127, 0.373, 0.513)), 1);
}`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
varying vec4 v_color;
float circle(in vec2 st, in float radius) {
vec2 dist = st - vec2(0.5);
return 1.0 - smoothstep(
radius - (radius * 0.01),
radius +(radius * 0.01),
dot(dist, dist) * 4.0);
}
void main() {
if (circle(v_texcoord, 1.0) < 0.5) {
discard;
}
gl_FragColor = v_color;
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const maxCount = 250000;
const ids = new Float32Array(maxCount);
for (let i = 0; i < ids.length; ++i) {
ids[i] = i;
}
const x = 16 / 300 * 2;
const y = 16 / 150 * 2;
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-x, -y,
x, -y,
-x, y,
-x, y,
x, -y,
x, y,
],
},
texcoord: [
0, 1,
1, 1,
0, 0,
0, 0,
1, 1,
1, 0,
],
id: {
numComponents: 1,
data: ids,
divisor: 1,
}
});
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const fpsElem = document.querySelector('#fps');
const countElem = document.querySelector('#count');
let count;
function getCount() {
count = Math.min(maxCount, parseInt(countElem.value));
}
countElem.addEventListener('input', getCount);
getCount();
const maxHistory = 60;
const fpsHistory = new Array(maxHistory).fill(0);
let historyNdx = 0;
let historyTotal = 0;
let then = 0;
function render(now) {
const deltaTime = now - then;
then = now;
historyTotal += deltaTime - fpsHistory[historyNdx];
fpsHistory[historyNdx] = deltaTime;
historyNdx = (historyNdx + 1) % maxHistory;
fpsElem.textContent = (1000 / (historyTotal / maxHistory)).toFixed(1);
gl.useProgram(programInfo.program);
twgl.setUniforms(programInfo, {time: now * 0.001});
ext.drawArraysInstancedANGLE(gl.TRIANGLES, 0, 6, count);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
canvas { display: block; border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
<div>fps: <span id="fps"></span></div>
<div>count: <input type="number" id="count" min="0" max="1000000" value="25000"></div>
Given the inconsistent timing I can't be sure but my impression is discard is slower. IIRC discard is slow because without discard the GPU knows even before it executes the fragment shader that it's going to update the z-buffer where as with discard it doesn't know until after the shader executes and that that difference means certain things can't be optimized as well.
I'm going to stop there because there's just too many combinations of things to try.
We could try blending on. Blending is also generally slower though since it has to blend (read the background) but is it slower than discard? I don't know.
Do you have the depth test on? If so then draw order will be important.
Yet another thing to test is using non-quads like hexgons or octogons as that would run less pixels through the fragment shader. I suspect you might need to make the circles bigger to see that but if we have a 100x100 pixel quad that's 10k pixels. If we have perfect circle geometry that's about pi*r^2 or ~7853 or 21% less pixels. A Hexagon would be ~8740 pixels or 11% less. An octogon somewhere in between. Drawing 11% to 21% less pixels is usually a win but of course course for hexagon you'd be drawing 3x more triangles, for an octogon 4x more. You'd basically have to test all these cases.
That points out another issue in that I believe you'd get different relative results with larger circles on a larger canvas since there'd be more pixels per circle so for any given number of circles drawn more % of time would be spent drawing pixels and less calculating vertices and/or less time restarting the GPU to draw the next circle.
Update
Testing on Chrome vs Firefox I got 60k-66k in all cases in Chrome on the same machine. No idea why the difference is so vast given that WebGL itself is doing almost nothing. All 4 tests only have a single draw call per frame. But whatever, at least as of 2019-10 Chrome as more than twice as fast for this particular case than Firefox
One idea is I have a dual GPU laptop. When you create the context you can tell WebGL what you're targeting by passing in the powerPreference context creation attribute as in in
const gl = document.createContext('webgl', {
powerPreference: 'high-performance',
});
The options are 'default', 'low-power', 'high-performance'. 'default' means "let the browser decide" but ultimately all of them mean "let the browser decide". In any case setting that above didn't change anything in firefox for me.

How to implement textureCube using 6 sampler2D

Before I used a samplerCube to render the cube
This is my previous fragmentShader code
"uniform samplerCube tCubeTgt;",
"varying vec3 posTgt;",
"void main() {",
"vec4 reflectedColorTgt = textureCube( tCubeTgt, vec3( -posTgt.x, posTgt.yz ) );",
"gl_FragColor = reflectedColorTgt ;",
"}"
Now I want to use 6 sampler2D to render a cube.
what should I do
Why?
In any case looking up how cube mapping works there's a function and table in the OpenGL ES 2.0 spec showing how cube mapping works
where sc, tc, and ma come from this table
Major Axis Direction| Target |sc |tc |ma |
--------------------+---------------------------+---+---+---+
+rx |TEXTURE_CUBE_MAP_POSITIVE_X|−rz|−ry| rx|
−rx |TEXTURE_CUBE_MAP_NEGATIVE_X| rz|−ry| rx|
+ry |TEXTURE_CUBE_MAP_POSITIVE_Y| rx| rz| ry|
−ry |TEXTURE_CUBE_MAP_NEGATIVE_Y| rx|−rz| ry|
+rz |TEXTURE_CUBE_MAP_POSITIVE_Z| rx|−ry| rz|
−rz |TEXTURE_CUBE_MAP_NEGATIVE_Z|−rx|−ry| rz|
--------------------+---------------------------+---+---+---+
Table 3.21: Selection of cube map images based on major axis direction of texture coordinates
Using that you can make functions that will apply the same logic using 6 2d textures instead of one cubemap
"use strict";
/* global document, twgl, requestAnimationFrame */
const vs = `
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
uniform vec3 u_camera;
attribute vec4 position;
attribute vec3 normal;
varying vec3 v_normal;
varying vec3 v_eyeToSurface;
void main() {
vec4 world = u_model * position;
gl_Position = u_projection * u_view * world;
v_eyeToSurface = world.xyz - u_camera;
v_normal = (u_model * vec4(normal, 0)).xyz;
}
`;
const fs = `
precision mediump float;
varying vec3 v_eyeToSurface;
varying vec3 v_normal;
uniform sampler2D u_textures[6];
void cubemap(vec3 r, out float texId, out vec2 st) {
vec3 uvw;
vec3 absr = abs(r);
if (absr.x > absr.y && absr.x > absr.z) {
// x major
float negx = step(r.x, 0.0);
uvw = vec3(r.zy, absr.x) * vec3(mix(-1.0, 1.0, negx), -1, 1);
texId = negx;
} else if (absr.y > absr.z) {
// y major
float negy = step(r.y, 0.0);
uvw = vec3(r.xz, absr.y) * vec3(1.0, mix(1.0, -1.0, negy), 1.0);
texId = 2.0 + negy;
} else {
// z major
float negz = step(r.z, 0.0);
uvw = vec3(r.xy, absr.z) * vec3(mix(1.0, -1.0, negz), -1, 1);
texId = 4.0 + negz;
}
st = vec2(uvw.xy / uvw.z + 1.) * .5;
}
vec4 texCubemap(vec3 uvw) {
float texId;
vec2 st;
cubemap(uvw, texId, st);
vec4 color = vec4(0);
for (int i = 0; i < 6; ++i) {
vec4 side = texture2D(u_textures[i], st);
float select = step(float(i) - 0.5, texId) *
step(texId, float(i) + .5);
color = mix(color, side, select);
}
return color;
}
void main() {
vec3 normal = normalize(v_normal);
vec3 eyeToSurface = normalize(v_eyeToSurface);
gl_FragColor = texCubemap(reflect(eyeToSurface, normal));
}
`;
const m4 = twgl.m4;
const gl = document.getElementById("c").getContext("webgl");
// compile shaders, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// create buffers
const models = [
twgl.primitives.createSphereBufferInfo(gl, 1, 12, 8),
twgl.primitives.createCubeBufferInfo(gl, 1.5),
twgl.primitives.createTorusBufferInfo(gl, .7, .5, 12, 8),
];
const textures = twgl.createTextures(gl, {
posx: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/posx.jpg', },
negx: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/negx.jpg', },
posy: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/posy.jpg', },
negy: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/negy.jpg', },
posz: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/posz.jpg', },
negz: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/negz.jpg', },
});
const uniforms = {
u_textures: [
textures.posx,
textures.negx,
textures.posy,
textures.negy,
textures.posz,
textures.negz,
],
};
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 20;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [
Math.sin(time) * 7,
Math.sin(time * .5) * 3,
Math.cos(time) * 7,
];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
uniforms.u_camera = eye;
uniforms.u_projection = projection;
uniforms.u_view = view;
gl.useProgram(programInfo.program);
models.forEach((bufferInfo, ndx) => {
let u = ndx / (models.length - 1) * 2 - 1;
let model = m4.translation([u * (models.length - 1), 0, 0]);
model = m4.rotateY(model, time * (ndx + 1) * 0.7);
uniforms.u_model = m4.rotateX(model, time * (ndx + 1) * 0.2);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, uniforms);
gl.drawElements(gl.TRIANGLES, bufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
});
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body {
margin: 0;
}
canvas {
display: block;
width: 100vw;
height: 100vh;
}
<canvas id="c"></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
But you'll see there's lots of issues. One issue is what to do at the edge. The GPU can filter across edges? We'd have to add something to do that. Also we can't use random access for samplers nor can we conditionally access samplers which means doing it with 6 2d textures instead of one cubemap is going to be much much slower

OpenGL - Water waves (with noise)

I am currently in the process of making water waves, so basically I am starting from the beginning. I have created a mesh which is basically a flat square and have animated it in the vertex shader (below is the code which achieves that)
vtx.y = (sin(2.0 * vtx.x + a_time/1000.0 ) * cos(1.5 * vtx.y + a_time/1000.0) * 0.2);
Basically just moving the y position based on a sin and cos function, the results of this can be observed here!
I then tried adding some Perlin noise (as per the Perlin noise functions by Ian McEwan, available here github.com/ashima/webgl-noise) as follows
vtx.y = vtx.y + 0.1*cnoise((a_time/5000.0)*a_vertex.yz);
the results of this can be observed here!
As you can plainly observe there is no real "random" effect that I was looking for (simulate some basic random roughness of an ocean).
I was wondering how it would be possible for me to achieve this (also any suggestions on how to improve either of the functions that change y would also be appreciated).
The simplest solution, is to use texture, that contain needed noise. If the displacement is kept in the texture, then it is possible to apply the displacement in the vertex shader, so there would be no need to modify vertex buffer. To make the waves moving, your may add some animated offset.
There are plenty of ways to fake, as you say, the "random" effect. You make take two samples from texture, using differently changing offsets and then simply add two displacements.
For example, see the following vertex shader:
uniform sampler2D u_heightMap;
uniform float u_time;
uniform mat4 modelViewMatrix
uniform mat4 projectionMatrix
attribute vec3 position;
void main()
{
vec3 pos = position;
vec2 offset1 = vec2(0.8, 0.4) * u_time * 0.1;
vec2 offset2 = vec2(0.6, 1.1) * u_time * 0.1;
float hight1 = texture2D(u_heightMap, uv + offset1).r * 0.02;
float hight2 = texture2D(u_heightMap, uv + offset2).r * 0.02;
pos.z += hight1 + hight2;
vec4 mvPosition = modelViewMatrix * vec4( pos, 1.0 );
gl_Position = projectionMatrix * mvPosition;
}
I've made a simple example using threejs:
var container;
var camera, scene, renderer;
var mesh;
var uniforms;
var clock = new THREE.Clock();
init();
animate();
function init() {
container = document.getElementById('container');
camera = new THREE.PerspectiveCamera(40, window.innerWidth / window.innerHeight, 0.1, 100);
camera.position.z = 0.6;
camera.position.y = 0.2;
camera.rotation.x = -0.45;
scene = new THREE.Scene();
var boxGeometry = new THREE.PlaneGeometry(0.75, 0.75, 100, 100);
var heightMap = THREE.ImageUtils.loadTexture("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAA81BMVEVZWVlRUVFdXV1EREQ8PDxBQUFWVlY3NzdKSkoyMjJqamphYWFOTk5mZmZHR0dxcXEtLS0qKip0dHRubm4aGhpTU1M+Pj4vLy8nJyeCgoJ3d3chISE0NDSKiop6eno5OTkBAQEjIyMeHh5/f38SEhKOjo6FhYUWFhaRkZELCwsPDw+bm5t8fHyfn59jY2OVlZUICAiwsLCHh4elpaWioqL///+qqqqYmJitra20tLS3t7fIyMi5ubnR0dHh4eHDw8P8/PzKysq9vb2np6fw8PDd3d3Ozs67u7vFxcX09PTY2Ni/v7/p6enV1dXT09Pl5eXt7e2kvPjWAAAMM0lEQVRYwxVW1ZbjOBAtsSwzsx1jOOlAc/f0MO7M7v9/zWr0kNg+xyVX6RIQXOhlUyIJNQwOLc/vL1ZiHuzATLhtIppQ1DzA0vd7S9nz9oBiExE/JXFo7zACFOzscM5l6yDfk/bs2vmTKhc3i8U6xIbfIGSlkUgXC5FGnft2N0kvrSQRWV+qu0sBYLu7g3t9VMicmjAPTJ74sjbWm5ubcb1YLKq84JH+X9MQW2l1UNl4s8wQmCJbRik0ISCLGKL4+OHty5dfs6qWSxNxbErO07L0Or8KV0G6Xqw3ozzvsFe1ka4cwdDEwr/pKMGgcJxg+/L6/iolQknps3A4YAwmlY4jTZsaVNJoHMvEXU2IGMnYlWy3u+YAhjDDAMwmbIrTdapdSc0d8bO1P20ftnf7cw2x4RNa9pgZWVa26PxpBZaJwh0eF34xTG5gOxJY2Ewr7ASgJ+uB2d14op22p6Obh0XBx3W/ZIbXZ2VmUFoZguV3B7JZjFFassASkkCJcte0XFl56/W6LSxAfuQLQnzq0E7G/lJiK3a4KNebLBPUi7dyUzIwyXgz9g7rgArn+OHj9wsA66N1imza+d5ybR0nZ1x6hA+fvv7a72sb4RDzUJG0XES4niblezFYqYSVGmSxYwdUdWPUdZIloifksH/5fE42y943XHW4m54en3ZStEHI/cWid39+yR1awf2+vofpEWddaRimSUVkYFwtE4SbOYjrQmyWCam6UrT3hD992CL59DzQzuPN+cizZWe4H8wYwr1LPU9YZeK0rTD6UiQOtUyOAbwUbBOzvo/asePzu8/P777lppNI7GCU9dGIODWhcMMGjzebxUYQ4J7HsMIi8x0RyfwwBNNRtuVmsSgttX/3/vVch6hbgxEZ41JA7x2PoLh7541RST20gtQXzJOp0abZJm2J0W0ialneuKFhuJ3P1/PPy1GXLzuNC0NQvP3yCPo8kV3n+/3L6U5q9N8Y9bUuWFkuS6Mro9TzWgrSygxNhfPzg54xW0bS5YT60sxdC8RyoxtRIW7qGnfjYt1b4XNAdakxivqR0l5YabpZLLPFgj/ysW9R1eNVAbL6S4sOCE2cmNlNiHmctE6/Xi7TIXfKm81yXZIqhkjIuKXdQq/eZkaC40oYohXM2tx41IAiABvTivIAIezixCCpwMgoe0OZeHu9uF5WalCi5WKTprpxSaw2FQTpIkJ0lgmMolwdYr+VVhi4EvKpgY3ejDoV4c3l363NgZLCZcRUjEVrQzq0L9OKAJaQ+AaIBI55aONs9ORQn1fb08usC9yMFTMhYTg2qMXB1LN3MC03m43gnHj6XBkn1Ep1ARKqmHApxCHYDXFrn8EV65v1jR8x22qFV/nJbOl3aBuDPy4W0VKifoyYSZhDIg8YSip/mWUonC5bRTUf1l2y7ERKMQKHVb4npMpDb7MsRdmLaLmOLMCBerzEAaIGAxckt6zVarLPH3dm3y83ZRQl2MR4hRh1uptlgucT3oyGqEoBiMhD0BThXBz328u0v0KtQLnHl+fvbwVaZlqN12vNV6NiBLY1OONms2T2Fja9Ly1GdK+4WM21DTaC1afH+xwIIvbd55c9kFHz31+ON4sxth0mHHUeBuJXZd8GuO/85KBsKh1zdVXD3cN1dw2GoKgo8ObudPf0NbCiyG/x9owT4blnzQrPiI2KKMvnOYrGzdLMV9cZIXUZWuIwf+wxB6UUbKdpMJvcFglthtDEF62Gz2+4186wiNDqEnIr3/IkExgVc1JW9lygm/X6RviRCNS0BYfFtrJsxVXeAG37ClZPL2fUZfqjYf969/j87eXnt0KFgCLfJ7LJtZM50Wi7iB+G6QAoZozELiBk0ZKApt3+muO/Kpzwu5eCBMXx3fvbr4UNO9iUfKpr5Q6BhbanUy454qA3PyRVGJiEbqKyz8ZEPYLTj55D1A6NN1WAv/337z+Px+2kCMWItn7mmMfT3bv/QsmMFg52UtEkPD4fbS2rN2sRfz9NceZ3bJ7jxU2bImybNsYuQmHhKq+PNlFCLGxyvksMCzQbQ3fIH75fAiQ8itzA3P/zFErDE6oxhKUcaUlbxa1RahG1jdIgRqZp2Qs+vbtO+xXUjTtd5nnlMhTM27v71fDz94dQwIFIpDe2Vyo0CRzizjFJXJhukA9BwJkF7n34+WO9Ah7s5ppbCQNrdfrw7c9//7wCKI4B47/yIGeXeZHk1CpQOVK3Pq12NkpbhuqZHziXMOXqfLFJi8xw//3Pc93MtuHxWZnFoFztzQSnUZU4+NuVk8TrYXCZWGqFs0BhCKfVX1kPVT27zbHev3tXMM/POmZpUEzHWg12DI4hUL67f/56zevaPXj6Vk+COUmCr48PGFoNeyPyK6i/fnzVTQU4tiSliDCiXO35gtkxTR3HaupZqYb3kSFISjiXzpCbioCnvZQ5whAymH7e3n6+B5+kBuNWlxqWqof6cm2cMtJ6pZSNB8W0coATK+UwSCoLg9Uv1x7nseNu/32vMXe7d/+y9uEcKowydv73z92ESRYRbB63zc6EYrsN5tx19UQSjALYKTFW5OCi/NfXwDu+//Hpvs7d/Y/feWyxg7tz3WBYHffDPP+lCqPu5elpfz/rpwE2HYjBA95VEuai2BeYGSiE+8un6XR7+zs/nn6/MXqwcazViwS29kgf3On0Whcu1qdFDMvKgApKWULAdiHYNlhWlcFEan/bPf73/vb91gJu+GnmM2kd6txzAqLVObCZPdT51vJKDxqKwKGG/m2mxmR9JHiFsMKE28+3ty/HXKub74G6P33+8mrTMvMMDjkedmE+BwRvoSA6hVCHgxsWw/0Rinx2z9PAqqq5PP+4vT2FE6Ya5pdfU/2xNmhMKFqtZGu0hM+rx6+gmDH6aDBtVyKz/vr99OFej5kIqzn+0pz9kJ8lsfwKx1QyrjCHWBI0x8A1+1tyvAODGilXZtjYRiRgdfy+Wj1NWkTBfTg1deAwD4Gkhu9nPkVAKqrpiQLdJKlSqS/0o86PMfINjqm53a+29vx6+2ffTA+PEGUMMx4eV670U89oK4tbrcdkDPevv1f2AasaQcGdlFJheL6Q3Aw/PQz17Zv55f2P18KJqAWmO086SQaWkzexgzUKERKr28fhCvbOfXmAlY2AaUN0rJgSaYY/f/z4ZxbFz482zQgarvuAW1V6/PP0cMndMC9chM3w231uEeJA+PoCPNIaXbr1ZCETcOITrdSaj8Pp0NJWIkfZUre6DS+BaTLeFDYcuASpaFr1LQpruOmjxciITzPR7L982M+qSvvN0joNDInUaw2PplxrrkYTjRk16+GuoR3zNFctWq1v4KZtxwTEZpPZ725vb3/93m8VCpW1uuIYLODmsMLKBrGMKC+GsLh/Uq4rHXnY8n6hFzSxKNNy3UvzSb9/sdx9PX96e97vG7Oe9w/Pbw/bq465CUeWjKWggJZekxfXDy+usV6mJrxNiREtIw/uv+zvPuXuITyfVvn5wBzIn17efw544I06xBJBE+RQp/RoxFa/7pv648qepgfAwRzjsHH8kgnD2plMI2Jlsgo50jz9889b0S82y2WpByGEPRyhlUTGUMill/94fdg5sNSJULjsb44z5tj3urXROrIVndfKrU4t+zjTrE+9WO2K4/T8scCAcZxlXSQVMUQGSBuvUYmu07lkx6Wvw6Omn5+21DGyw+ntIRa4thGWxHE4cXI9QNTQ5XrZ6bxrMBOwY5rI1zhNLHgwZSVKnbA2elfVmKaN+OlyeSxahzBEkeWPKXA7v8PES9p+HXUsBoPX9zkhCHMA+2ozq+vWm5So+bRVyupb6e5mg2B+KAi3TFJVaD7PJlYPqutSwQxAcZryYfq+B+KN6GVozttd0+htmj9vCgh1uCPIbudCPQ2ftkbUd8Gx0OyfJ2XvsHRAYhsxmI4h0G45trtaw2CyqaCwP+0swqg8X0+fv15Q/e+7LzPnfsaGFdEmzbTLhZyCidydaWtqYG7YivkSm5yM6z6zTDwo5CRseL19Bs8zHx+vu0AVu3prAltvRPEtsHcNWFlcNIqbYMeQa1KmiRnwFoamkdFGYEYAn59A9yeYYlXkcQhWduBaGi/MtoMQGMP1dXBNAGkqXrUeWjWA5rsBmEcTy4pN4fDr/qx44iciIweQQTCbgAgjDA7ofwxocDxlnyf5AAAAAElFTkSuQmCC");
heightMap.wrapT = heightMap.wrapS = THREE.RepeatWrapping;
uniforms = {u_time: {type: "f", value: 0.0 }, u_heightMap: {type: "t",value:heightMap} };
var material = new THREE.ShaderMaterial({
uniforms: uniforms,
side: THREE.DoubleSide,
wireframe: true,
vertexShader: document.getElementById('vertexShader').textContent,
fragmentShader: document.getElementById('fragment_shader').textContent
});
mesh = new THREE.Mesh(boxGeometry, material);
mesh.rotation.x = 3.14 / 2.0;
scene.add(mesh);
renderer = new THREE.WebGLRenderer();
renderer.setClearColor( 0xffffff, 1 );
container.appendChild(renderer.domElement);
onWindowResize();
window.addEventListener('resize', onWindowResize, false);
}
function onWindowResize(event) {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function animate() {
requestAnimationFrame(animate);
render();
}
function render() {
var delta = clock.getDelta();
uniforms.u_time.value += delta;
mesh.rotation.z += delta * 0.5;
renderer.render(scene, camera);
}
body { margin: 0px; overflow: hidden; }
<script src="http://threejs.org/build/three.min.js"></script>
<div id="container"></div>
<script id="fragment_shader" type="x-shader/x-fragment">
void main( void )
{
gl_FragColor = vec4(vec3(0.0), 1.0);
}
</script>
<script id="vertexShader" type="x-shader/x-vertex">
uniform lowp sampler2D u_heightMap;
uniform float u_time;
void main()
{
vec3 pos = position;
vec2 offset1 = vec2(1.0, 0.5) * u_time * 0.1;
vec2 offset2 = vec2(0.5, 1.0) * u_time * 0.1;
float hight1 = texture2D(u_heightMap, uv + offset1).r * 0.02;
float hight2 = texture2D(u_heightMap, uv + offset2).r * 0.02;
pos.z += hight1 + hight2;
vec4 mvPosition = modelViewMatrix * vec4( pos, 1.0 );
gl_Position = projectionMatrix * mvPosition;
}
</script>
Using better displacement texture, or even using two different textures for two offsets you may achieve better results.