OpenGL - strange SSAO artifact - opengl
I followed the tutorial at Learn OpenGL to implement Screenspace Ambient Occlusion. Things are mostly looking okay besides a strange artifact at the top and bottom of the window.
The problem is more obvious moving the camera, when it appears as if top parts of the image are imprinted on the bottom and vise versa, as shown in this video.
The artifact worsens when standing close to a wall and looking up and down so perhaps the Znear value is contributing? The scale of my scene does seem small compared to other demos, Znear and Zfar are 0.01f and 1000 and the width of the shown hallway is around 1.2f.
I've read into the common SSAO artifacts and haven't found anything resembling this.
#version 330 core
in vec2 TexCoords;
layout (location = 0) out vec3 FragColor;
uniform sampler2D MyTexture0; // Position
uniform sampler2D MyTexture1; // Normal
uniform sampler2D MyTexture2; // TexNoise
const int samples = 64;
const float radius = 0.25;
const float bias = 0.025;
uniform mat4 projectionMatrix;
uniform float screenWidth;
uniform float screenHeight;
void main()
{
//tile noise texture over screen based on screen dimensions divided by noise size
vec2 noiseScale = vec2(screenWidth/4.0, screenHeight/4.0);
vec3 sample_sphere[64];
sample_sphere[0] = vec3(0.04977, -0.04471, 0.04996);
sample_sphere[1] = vec3(0.01457, 0.01653, 0.00224);
sample_sphere[2] = vec3(-0.04065, -0.01937, 0.03193);
sample_sphere[3] = vec3(0.01378, -0.09158, 0.04092);
sample_sphere[4] = vec3(0.05599, 0.05979, 0.05766);
sample_sphere[5] = vec3(0.09227, 0.04428, 0.01545);
sample_sphere[6] = vec3(-0.00204, -0.0544, 0.06674);
sample_sphere[7] = vec3(-0.00033, -0.00019, 0.00037);
sample_sphere[8] = vec3(0.05004, -0.04665, 0.02538);
sample_sphere[9] = vec3(0.03813, 0.0314, 0.03287);
sample_sphere[10] = vec3(-0.03188, 0.02046, 0.02251);
sample_sphere[11] = vec3(0.0557, -0.03697, 0.05449);
sample_sphere[12] = vec3(0.05737, -0.02254, 0.07554);
sample_sphere[13] = vec3(-0.01609, -0.00377, 0.05547);
sample_sphere[14] = vec3(-0.02503, -0.02483, 0.02495);
sample_sphere[15] = vec3(-0.03369, 0.02139, 0.0254);
sample_sphere[16] = vec3(-0.01753, 0.01439, 0.00535);
sample_sphere[17] = vec3(0.07336, 0.11205, 0.01101);
sample_sphere[18] = vec3(-0.04406, -0.09028, 0.08368);
sample_sphere[19] = vec3(-0.08328, -0.00168, 0.08499);
sample_sphere[20] = vec3(-0.01041, -0.03287, 0.01927);
sample_sphere[21] = vec3(0.00321, -0.00488, 0.00416);
sample_sphere[22] = vec3(-0.00738, -0.06583, 0.0674);
sample_sphere[23] = vec3(0.09414, -0.008, 0.14335);
sample_sphere[24] = vec3(0.07683, 0.12697, 0.107);
sample_sphere[25] = vec3(0.00039, 0.00045, 0.0003);
sample_sphere[26] = vec3(-0.10479, 0.06544, 0.10174);
sample_sphere[27] = vec3(-0.00445, -0.11964, 0.1619);
sample_sphere[28] = vec3(-0.07455, 0.03445, 0.22414);
sample_sphere[29] = vec3(-0.00276, 0.00308, 0.00292);
sample_sphere[30] = vec3(-0.10851, 0.14234, 0.16644);
sample_sphere[31] = vec3(0.04688, 0.10364, 0.05958);
sample_sphere[32] = vec3(0.13457, -0.02251, 0.13051);
sample_sphere[33] = vec3(-0.16449, -0.15564, 0.12454);
sample_sphere[34] = vec3(-0.18767, -0.20883, 0.05777);
sample_sphere[35] = vec3(-0.04372, 0.08693, 0.0748);
sample_sphere[36] = vec3(-0.00256, -0.002, 0.00407);
sample_sphere[37] = vec3(-0.0967, -0.18226, 0.29949);
sample_sphere[38] = vec3(-0.22577, 0.31606, 0.08916);
sample_sphere[39] = vec3(-0.02751, 0.28719, 0.31718);
sample_sphere[40] = vec3(0.20722, -0.27084, 0.11013);
sample_sphere[41] = vec3(0.0549, 0.10434, 0.32311);
sample_sphere[42] = vec3(-0.13086, 0.11929, 0.28022);
sample_sphere[43] = vec3(0.15404, -0.06537, 0.22984);
sample_sphere[44] = vec3(0.05294, -0.22787, 0.14848);
sample_sphere[45] = vec3(-0.18731, -0.04022, 0.01593);
sample_sphere[46] = vec3(0.14184, 0.04716, 0.13485);
sample_sphere[47] = vec3(-0.04427, 0.05562, 0.05586);
sample_sphere[48] = vec3(-0.02358, -0.08097, 0.21913);
sample_sphere[49] = vec3(-0.14215, 0.19807, 0.00519);
sample_sphere[50] = vec3(0.15865, 0.23046, 0.04372);
sample_sphere[51] = vec3(0.03004, 0.38183, 0.16383);
sample_sphere[52] = vec3(0.08301, -0.30966, 0.06741);
sample_sphere[53] = vec3(0.22695, -0.23535, 0.19367);
sample_sphere[54] = vec3(0.38129, 0.33204, 0.52949);
sample_sphere[55] = vec3(-0.55627, 0.29472, 0.3011);
sample_sphere[56] = vec3(0.42449, 0.00565, 0.11758);
sample_sphere[57] = vec3(0.3665, 0.00359, 0.0857);
sample_sphere[58] = vec3(0.32902, 0.0309, 0.1785);
sample_sphere[59] = vec3(-0.08294, 0.51285, 0.05656);
sample_sphere[60] = vec3(0.86736, -0.00273, 0.10014);
sample_sphere[61] = vec3(0.45574, -0.77201, 0.00384);
sample_sphere[62] = vec3(0.41729, -0.15485, 0.46251);
sample_sphere[63] = vec3 (-0.44272, -0.67928, 0.1865);
// get input for SSAO algorithm
vec3 fragPos = texture(MyTexture0, TexCoords).xyz;
vec3 normal = normalize(texture(MyTexture1, TexCoords).rgb);
vec3 randomVec = normalize(texture(MyTexture2, TexCoords * noiseScale).xyz);
// create TBN change-of-basis matrix: from tangent-space to view-space
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
// iterate over the sample kernel and calculate occlusion factor
float occlusion = 0.0;
for(int i = 0; i < samples; ++i)
{
// get sample position
vec3 sample = TBN * sample_sphere[i]; // from tangent to view-space
sample = fragPos + sample * radius;
// project sample position (to sample texture) (to get position on screen/texture)
vec4 offset = vec4(sample, 1.0);
offset = projectionMatrix * offset; // from view to clip-space
offset.xyz /= offset.w; // perspective divide
offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0
// get sample depth
float sampleDepth = texture(MyTexture0, offset.xy).z;
// range check & accumulate
float rangeCheck = smoothstep(0.0, 1.0, radius / abs(fragPos.z - sampleDepth));
occlusion += (sampleDepth >= sample.z + bias ? 1.0 : 0.0) * rangeCheck;
}
occlusion = 1.0 - (occlusion / samples);
FragColor = vec3(occlusion);
}
As Rabbid76 suggested, the artifacts were caused by sampling outside of the screen borders. I added a check to prevent this and things are looking much better..
vec4 clipSpacePos = projectionMatrix * vec4(sample, 1.0); // from view to clip-space
vec3 ndcSpacePos = clipSpacePos.xyz /= clipSpacePos.w; // perspective divide
vec2 windowSpacePos = ((ndcSpacePos.xy + 1.0) / 2.0) * vec2(screenWidth, screenHeight);
if ((windowSpacePos.y > 0) && (windowSpacePos.y < screenHeight))
if ((windowSpacePos.x > 0) && (windowSpacePos.x < screenWidth))
// THEN APPLY AMBIENT OCCLUSION
It hasn't entirely fixed the issue though as areas close to the windows edge now appear lighter than they should because fewer samples are tested. Perhaps somebody can suggest an approach that moves the sample area to an appropriate location?
Related
glsl strange behavior with frag shader and player movement
im creating a 2d top down game in sfml where i would like the player to only be able to see things in their fov of 45 deg, currently my fragment shader looks like follows uniform sampler2D texture; uniform vec2 pos; uniform vec2 screenSize; uniform float in_angle; void main() { vec2 fc = gl_FragCoord.xy/screenSize; vec2 ndcCoords = vec2(0.0); float fov = radians(45); ndcCoords = (pos + (screenSize/2))/screenSize; ndcCoords.y = abs(ndcCoords.y - 1.0); float angle = radians(-angle+90+45); float coT; float siT; vec2 adj = vec2(0.0); coT = cos(angle); siT = sin(angle); adj.x = coT * ndcCoords.x - siT * ndcCoords.y; adj.y = siT * ndcCoords.x + coT * ndcCoords.y; vec2 diff = normalize(ndcCoords - fc); float dist = acos(dot(diff, normalize(adj))); vec3 color = vec3(0.0f); if(dist < fov/2) { color = vec3(1.0); } gl_FragColor = vec4(color, 1.0) * texture2D(texture, gl_TexCoord[0].xy); } what this is doing is adjusting the playerPos vec2 and rotating it, so i can determine what fragcoords are within the players fov, however, when i move down without moving the mouse from directly above the player the fov shifts to the left / right without the player rotating at all, i've tried every solution i can think of but i can't seem to stop it, nor can i find a solution to this online. any suggestions would be appreciated
a solution has arisen, instead of trying to rotate the object to get its vector2 normalised direction, a simpler method is to calculate the angle between a frag and the playerPosition followed by creating a difference by subtracting the player rotation in radians as shown below. this can then be adjusted for the coordinate space and compared to the players fov void main() { float fov = radians(45); float pi = radians(180); vec3 color = vec3(0.2); vec2 st = gl_FragCoord.xy/screenSize.xy; pos = (pos + (screenSize/2)) / screenSize; float angleToObj = atan2(st.y - pos.y, st.x - pos.x); float angleDiff = angleToObj - radians(-angle); if(angleDiff > pi) angleDiff -= 2.0f * pi; if(angleDiff < -pi) angleDiff += 2.0f * pi; if(abs(angleDiff) < fov/2) color = vec3(1.0f); gl_FragColor = vec4(color, 1.0) * texture2D(texture, gl_TexCoord[0].xy); }
Billboarding using Qt3D 2.0
I am looking for the best way to create a billboard in Qt3D. I would like a plane which faces the camera wherever it is and does not change sized when the camera dollies forward or back. I have read how to do this using GLSL vertex and geometry shaders, but I am looking for the Qt3D way, unless customer shaders is the most efficient and best way of billboarding. I have looked, and it appears I can set the Matrix on a QTransform via properties, but it isn't clear to me how I would manipulate the matrix, or perhaps there is a better way? I am using the C++ api, but a QML answer would do. I could port it to C++.
If you want to draw just one billboard, you can add a plane and rotate it whenever the camera moves. However, if you want to do this efficiently with thousands or millions of billboards, I recommend using custom shaders. We did this to draw impostor spheres in Qt3D. However, we didn't use a geometry shader because we were targeting systems that didn't support geometry shaders. Instead, we used only the vertex shader by placing four vertices in the origin and moved these on the shader. To create many copies, we used instanced drawing. We moved each set of four vertices according to the positions of the spheres. Finally, we moved each of the four vertices of each sphere such that they result in a billboard that is always facing the camera. Start out by subclassing QGeometry and created a buffer functor that creates four points, all in the origin (see spherespointgeometry.cpp). Give each point an ID that we can use later. If you use geometry shaders, the ID is not needed and you can get away with creating only one vertex. class SpheresPointVertexDataFunctor : public Qt3DRender::QBufferDataGenerator { public: SpheresPointVertexDataFunctor() { } QByteArray operator ()() Q_DECL_OVERRIDE { const int verticesCount = 4; // vec3 pos const quint32 vertexSize = (3+1) * sizeof(float); QByteArray verticesData; verticesData.resize(vertexSize*verticesCount); float *verticesPtr = reinterpret_cast<float*>(verticesData.data()); // Vertex 1 *verticesPtr++ = 0.0; *verticesPtr++ = 0.0; *verticesPtr++ = 0.0; // VertexID 1 *verticesPtr++ = 0.0; // Vertex 2 *verticesPtr++ = 0.0; *verticesPtr++ = 0.0; *verticesPtr++ = 0.0; // VertexID 2 *verticesPtr++ = 1.0; // Vertex 3 *verticesPtr++ = 0.0; *verticesPtr++ = 0.0; *verticesPtr++ = 0.0; // VertexID3 *verticesPtr++ = 2.0; // Vertex 4 *verticesPtr++ = 0.0; *verticesPtr++ = 0.0; *verticesPtr++ = 0.0; // VertexID 4 *verticesPtr++ = 3.0; return verticesData; } bool operator ==(const QBufferDataGenerator &other) const Q_DECL_OVERRIDE { Q_UNUSED(other); return true; } QT3D_FUNCTOR(SpheresPointVertexDataFunctor) }; For the real positions, we used a separate QBuffer. We also set color and scale, but I have omitted those here (see spheredata.cpp): void SphereData::setPositions(QVector<QVector3D> positions, QVector3D color, float scale) { QByteArray ba; ba.resize(positions.size() * sizeof(QVector3D)); SphereVBOData *vboData = reinterpret_cast<QVector3D *>(ba.data()); for(int i=0; i<positions.size(); i++) { QVector3D &position = vboData[i]; position = positions[i]; } m_buffer->setData(ba); m_count = positions.count(); } Then, in QML, we connected the geometry with the buffer in a QGeometryRenderer. This can also be done in C++, if you prefer (see Spheres.qml): GeometryRenderer { id: spheresMeshInstanced primitiveType: GeometryRenderer.TriangleStrip enabled: instanceCount != 0 instanceCount: sphereData.count geometry: SpheresPointGeometry { attributes: [ Attribute { name: "pos" attributeType: Attribute.VertexAttribute vertexBaseType: Attribute.Float vertexSize: 3 byteOffset: 0 byteStride: (3 + 3 + 1) * 4 divisor: 1 buffer: sphereData ? sphereData.buffer : null } ] } } Finally, we created custom shaders to draw the billboards. Note that because we were drawing impostor spheres, the billboard size was increased to handle raytracing in the fragment shader from awkward angles. You likely do not need the 2.0*0.6 factor in general. Vertex shader: #version 330 in vec3 vertexPosition; in float vertexId; in vec3 pos; in vec3 col; in float scale; uniform vec3 eyePosition = vec3(0.0, 0.0, 0.0); uniform mat4 modelMatrix; uniform mat4 mvp; out vec3 modelSpherePosition; out vec3 modelPosition; out vec3 color; out vec2 planePosition; out float radius; vec3 makePerpendicular(vec3 v) { if(v.x == 0.0 && v.y == 0.0) { if(v.z == 0.0) { return vec3(0.0, 0.0, 0.0); } return vec3(0.0, 1.0, 0.0); } return vec3(-v.y, v.x, 0.0); } void main() { vec3 position = vertexPosition + pos; color = col; radius = scale; modelSpherePosition = (modelMatrix * vec4(position, 1.0)).xyz; vec3 view = normalize(position - eyePosition); vec3 right = normalize(makePerpendicular(view)); vec3 up = cross(right, view); float texCoordX = 1.0 - 2.0*(float(vertexId==0.0) + float(vertexId==2.0)); float texCoordY = 1.0 - 2.0*(float(vertexId==0.0) + float(vertexId==1.0)); planePosition = vec2(texCoordX, texCoordY); position += 2*0.6*(-up - right)*(scale*float(vertexId==0.0)); position += 2*0.6*(-up + right)*(scale*float(vertexId==1.0)); position += 2*0.6*(up - right)*(scale*float(vertexId==2.0)); position += 2*0.6*(up + right)*(scale*float(vertexId==3.0)); vec4 modelPositionTmp = modelMatrix * vec4(position, 1.0); modelPosition = modelPositionTmp.xyz; gl_Position = mvp*vec4(position, 1.0); } Fragment shader: #version 330 in vec3 modelPosition; in vec3 modelSpherePosition; in vec3 color; in vec2 planePosition; in float radius; out vec4 fragColor; uniform mat4 modelView; uniform mat4 inverseModelView; uniform mat4 inverseViewMatrix; uniform vec3 eyePosition; uniform vec3 viewVector; void main(void) { vec3 rayDirection = eyePosition - modelPosition; vec3 rayOrigin = modelPosition - modelSpherePosition; vec3 E = rayOrigin; vec3 D = rayDirection; // Sphere equation // x^2 + y^2 + z^2 = r^2 // Ray equation is // P(t) = E + t*D // We substitute ray into sphere equation to get // (Ex + Dx * t)^2 + (Ey + Dy * t)^2 + (Ez + Dz * t)^2 = r^2 float r2 = radius*radius; float a = D.x*D.x + D.y*D.y + D.z*D.z; float b = 2.0*E.x*D.x + 2.0*E.y*D.y + 2.0*E.z*D.z; float c = E.x*E.x + E.y*E.y + E.z*E.z - r2; // discriminant of sphere equation float d = b*b - 4.0*a*c; if(d < 0.0) { discard; } float t = (-b + sqrt(d))/(2.0*a); vec3 sphereIntersection = rayOrigin + t * rayDirection; vec3 normal = normalize(sphereIntersection); vec3 normalDotCamera = color*dot(normal, normalize(rayDirection)); float pi = 3.1415926535897932384626433832795; vec3 position = modelSpherePosition + sphereIntersection; // flat red fragColor = vec4(1.0, 0.0, 0.0, 1.0); } It has been some time since we first implemented this, and there might be easier ways to do it now, but this should give you an idea of the pieces you need.
WebGL Normal calculations from position texture
Iam trying to create a procedural water puddle in webGL with "water ripples" by vertex displacement. The problem I'm having is that I get a noise I can't explain. Below is the first pass vertex shader where I calculate the vertex positions that i later render to a texture that i then use in the second pass. void main() { float damping = 0.5; vNormal = normal; // wave radius float timemod = 0.55; float ttime = mod(time , timemod); float frequency = 2.0*PI/waveWidth; float phase = frequency * 0.21; vec4 v = vec4(position,1.0); // Loop through array of start positions for(int i = 0; i < 200; i++){ float cCenterX = ripplePos[i].x; float cCenterY = ripplePos[i].y; vec2 center = vec2(cCenterX, cCenterY) ; if(center.x == 0.0 && center.y == 0.0) center = normalize(center); // wave width float tolerance = 0.005; radius = sqrt(pow( uv.x - center.x , 2.0) + pow( uv.y -center.y, 2.0)); // Creating a ripple float w_height = (tolerance - (min(tolerance,pow(ripplePos[i].z-radius*10.0,2.0)) )) * (1.0-ripplePos[i].z/timemod) *5.82; // -2.07 in the end to keep plane at right height. Trial and error solution v.z += waveHeight*(1.0+w_height/tolerance) / 2.0 - 2.07; vNormal = normal+v.z; } vPosition = v.xyz; gl_Position = projectionMatrix * modelViewMatrix * v; } And the first pass fragment shader that writes to the texture: void main() { vec3 p = normalize(vPosition); p.x = (p.x+1.0)*0.5; p.y = (p.y+1.0)*0.5; gl_FragColor = vec4( normalize(p), 1.0); } The second vertex shader is a standard passthrough. Second pass fragmentshader is where I try to calculate the normals to be used for light calculations. void main() { float w = 1.0 / 200.0; float h = 1.0 / 200.0; // Nearest Nieghbours vec3 p0 = texture2D(rttTexture, vUV).xyz; vec3 p1 = texture2D(rttTexture, vUV + vec2(-w, 0)).xyz; vec3 p2 = texture2D(rttTexture, vUV + vec2( w, 0)).xyz; vec3 p3 = texture2D(rttTexture, vUV + vec2( 0, h)).xyz; vec3 p4 = texture2D(rttTexture, vUV + vec2( 0, -h)).xyz; vec3 nVec1 = p2 - p0; vec3 nVec2 = p3 - p0; vec3 vNormal = cross(nVec1, nVec2); vec3 N = normalize(vNormal); float theZ = texture2D(rttTexture, vUV).r; //gl_FragColor = vec4(1.,.0,1.,1.); //gl_FragColor = texture2D(tDiffuse, vUV); gl_FragColor = vec4(vec3(N), 1.0); } The result is this: The image displays the normalmap and the noise I'm refering to is the inconsistency of the blue. Here is a live demonstration: http://oskarhavsvik.se/jonasgerling_water_ripple/waterRTT-clean.html I appreciate any tips and pointers, not only fixes for this problem. But the code in genereal, I'm here to learn.
After a brief look it seems like your problem is in storing x/y positions. gl_FragColor = vec4(vec3(p0*0.5+0.5), 1.0); You don't need to store them anyway, because the texel position implicitly gives the x/y value. Just change your normal points to something like this... vec3 p2 = vec3(1, 0, texture2D(rttTexture, vUV + vec2(w, 0)).z); Rather than 1, 0 you will want to use a scale appropriate to the size of your displayed quad relative to the wave height. Anyway, the result now looks like this. The height/z seems to be scaled by distance from the centre, so I went looking for a normalize() and removed it... vec3 p = vPosition; gl_FragColor = vec4(p*0.5+0.5, 1.0); The normals now look like this...
Shader - Simple SSS lighting issue
I am trying to create a simple subsurface scattering effect using a shader but I am facing a small issue. Look at those screenshots. The three images represents three lighting states (above surface, really close to surface, subsurface) with various lighting colors (red and blue) and always the same subsurface color (red). As you might notice when the light is above the surface and really close to this surface its influence appears to minimize which is the expected behavior. But the problem is that is behaves the same for the subsurface part, this is normal according to my shader code but in my opinion the subsurface light influence should be higher when going close to the surface. I suggest you to look at the screenshot for the expected result. How can I do that ? Here is the simplified shader code. half ndotl = max(0.0f, dot(normalWorld, lightDir)); half inversendotl = max(0.0f, dot(normalWorld, -lightDir)); half3 lightColor = _LightColor0.rgb * ndotl; // This is the normal light color calculation half3 subsurfacecolor = translucencyColor.rgb * inversendotl; // This is the subsurface color half3 topsubsurfacecolor = translucencyColor.rgb; // This is used for adding subsurface color to top surface final = subsurfacescolor + lerp(lightColor, topsubsurfacecolor * 0.5, 1 - ndotl - inversendotl);
The way, how you have implemented subsurface scattering effect is very rough. It is hard to achieve nice result using so simple approach. Staying within selected approach, I would recommend you the following things: Take into account distance to the light source accordingly to the inverse square law. This applies to both components, direct light and subsurface. Once the light is behind the surface, it is better to ignore the dot product of the inner normal and direction to the light, because you never know how the light would travel through the object. One more reason is that because of the law of refraction (assuming that refraction coefficient of the object is higher than one of the air) makes this dot product less influential. You may just use a step function to turn on subsurface component once the light source is behind the surface. So, the modified version of your shader would be as follows: half3 toLightVector = u_lightPos - v_fragmentPos; half lightDistanceSQ = dot(toLightVector, toLightVector); half3 lightDir = normalize(toLightVector); half ndotl = max(0.0, dot(v_normal, lightDir)); half inversendotl = step(0.0, dot(v_normal, -lightDir)); half3 lightColor = _LightColor0.rgb * ndotl / lightDistanceSQ * _LightIntensity0; half3 subsurfacecolor = translucencyColor.rgb * inversendotl / lightDistanceSQ * _LightIntensity0; half3 final = subsurfacecolor + lightColor; Where u_lightPos - uniform that contains position of the light source, v_fragmentPos - varying that contains position of the fragment. Here is an example in glsl using three.js: var container; var camera, scene, renderer; var sssMesh; var lightSourceMesh; var sssUniforms; var clock = new THREE.Clock(); init(); animate(); function init() { container = document.getElementById('container'); camera = new THREE.PerspectiveCamera(40, window.innerWidth / window.innerHeight, 1, 3000); camera.position.z = 4; camera.position.y = 2; camera.rotation.x = -0.45; scene = new THREE.Scene(); var boxGeometry = new THREE.CubeGeometry(0.75, 0.75, 0.75); var lightSourceGeometry = new THREE.CubeGeometry(0.1, 0.1, 0.1); sssUniforms = { u_lightPos: { type: "v3", value: new THREE.Vector3() } }; var sssMaterial = new THREE.ShaderMaterial({ uniforms: sssUniforms, vertexShader: document.getElementById('vertexShader').textContent, fragmentShader: document.getElementById('fragment_shader').textContent }); var lightSourceMaterial = new THREE.MeshBasicMaterial(); sssMesh = new THREE.Mesh(boxGeometry, sssMaterial); sssMesh.position.x = 0; sssMesh.position.y = 0; scene.add(sssMesh); lightSourceMesh = new THREE.Mesh(lightSourceGeometry, lightSourceMaterial); lightSourceMesh.position.x = 0; lightSourceMesh.position.y = 0; scene.add(lightSourceMesh); renderer = new THREE.WebGLRenderer(); container.appendChild(renderer.domElement); onWindowResize(); window.addEventListener('resize', onWindowResize, false); } function onWindowResize(event) { camera.aspect = window.innerWidth / window.innerHeight; camera.updateProjectionMatrix(); renderer.setSize(window.innerWidth, window.innerHeight); } function animate() { requestAnimationFrame(animate); render(); } function render() { var delta = clock.getDelta(); var lightHeight = Math.sin(clock.elapsedTime * 1.0) * 0.5 + 0.7; lightSourceMesh.position.y = lightHeight; sssUniforms.u_lightPos.value.y = lightHeight; sssMesh.rotation.y += delta * 0.5; renderer.render(scene, camera); } body { color: #ffffff; background-color: #050505; margin: 0px; overflow: hidden; } <script src="http://threejs.org/build/three.min.js"></script> <div id="container"></div> <script id="fragment_shader" type="x-shader/x-fragment"> varying vec3 v_fragmentPos; varying vec3 v_normal; uniform vec3 u_lightPos; void main(void) { vec3 _LightColor0 = vec3(1.0,0.5,0.5); float _LightIntensity0 = 0.2; vec3 translucencyColor = vec3(0.8,0.2,0.2); vec3 toLightVector = u_lightPos - v_fragmentPos; float lightDistanceSQ = dot(toLightVector, toLightVector); vec3 lightDir = normalize(toLightVector); float ndotl = max(0.0, dot(v_normal, lightDir)); float inversendotl = step(0.0, dot(v_normal, -lightDir)); vec3 lightColor = _LightColor0.rgb * ndotl / lightDistanceSQ * _LightIntensity0; vec3 subsurfacecolor = translucencyColor.rgb * inversendotl / lightDistanceSQ * _LightIntensity0; vec3 final = subsurfacecolor + lightColor; gl_FragColor=vec4(final,1.0); } </script> <script id="vertexShader" type="x-shader/x-vertex"> varying vec3 v_fragmentPos; varying vec3 v_normal; void main() { vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 ); v_fragmentPos = (modelMatrix * vec4( position, 1.0 )).xyz; v_normal = (modelMatrix * vec4( normal, 0.0 )).xyz; gl_Position = projectionMatrix * mvPosition; } </script> There are large amount of different techniques of simulation of SSS. Texture-space diffusion and shadowmap-based translucency are the most frequently used techniques. Check this article from GPU Gems, it describes mentioned techniques. Also, you can find interesting this presentation from EA. It mentions approach that is very close to yours for rendering plants. Spherical harmonics also works well for static geometry, but this approach is very complicated and it needs precomputed irradiance transfer. Check this article, that shows use of spherical harmonics to approximate SSS.
OpenGL screen space motion blur
I am trying to implement post process motion vector based motion blur. It kinda "works"... I am experiencing 2 major issues with it: If neither the object nor camera move the drawing produces empty screen. There are distortions of the object along the borders of the screen. This is 2 pass approach: pass#1 -rendering the object into FBO texture.pass#2 - rendering to full screen quad and applying MB to the previous pass output. Vertex shader for the second pass: void main(void){ // transform previous and current position to eye space vec4 P = MODEL_VIEW_MATRIX * position; vec4 Pprev = prevModelView * position; vec3 motionVector = P.xyz - Pprev.xyz; // calculate window space motion vector P = PROJ * MODEL_VIEW_MATRIX * position; Pprev = PROJ * prevModelView * position; Pprev = mix(P, Pprev, blurScale); // choose previous or current position based on dot product between motion vector // and normal vec3 N = mat3(MODEL_VIEW_MATRIX) *normal; bool flag = dot(motionVector, N) > 0; vec4 Pstretch = flag ? P : Pprev; gl_Position = position; // do divide by W -> NDC coordinates P.xyz = P.xyz / P.w; Pprev.xyz = Pprev.xyz / Pprev.w; // calculate window space velocity vec3 dP = (P.xyz - Pprev.xyz) * halfWindowSize.xyz; float len = length (dP)/(halfWindowSize[0]*2); len = clamp(len, 0., 1.); dP = normalize(dP); dP.z = len; out_velocity = dP; } Fragment shader for the second pass: void main(void){ float w = 1.0 / samples; vec4 a=vec4(0.); vec2 velocity = out_velocity.xy * blurScale * .05 * out_velocity.z; ivec2 tsize = textureSize(COLOR_MAP_0, 0); vec2 screntc = gl_FragCoord.xy * (1.0 / vec2(tsize)); // sample into scene texture along motion vector for(float i=0; i<samples; i+=1) { float t = i / (samples-1); a = a + texture(COLOR_MAP_0, vec2(screntc) + velocity * t ) * w; } colorOut = a; } What is wrong in these shaders?