How to compute vertex normals for a triangle mesh in OpenGl? - c++

To give background, I'm currently generating a surface of revolution with it's center of mass centered at (0,0,0) in the WCS. The surface of a revolution is y=x^2 where 0 <= x <= 1.
I have converted this surface of revolution into a virtual buffer object, and can render it successfully on the screen. However, I cannot seem to get Blinn-Phong shading to work on the object. I'm fairly sure that the problem lies in my normal calculation.
This is the stub that creates the object and calculates normals:
GLfloat vp[49 * 49 * 18]; // array of vertex points
int _i = 50;
int _j = 50;
float vertices[50][50][3];
for (int i = 0; i < _i; i++) {
float fT = (float) i / (_i - 1);
float fY = fT;
float fZ = sqrt(fT);
for (int j = 0; j < _j; j++) {
float fS = 2 * M_PI * (float) j / (_j - 1);
vertices[i][j][0] = fZ * cos(fS);
vertices[i][j][1] = fY - 0.5; // offset by 0.5 to make center of mass the center
vertices[i][j][2] = fZ * sin(fS);
}
}
int curr = 0;
for (int i = 0; i < _i - 1; i++) {
for (int j = 0; j < _j - 1; j++) {
vp[curr++] = vertices[i][j][0];
vp[curr++] = vertices[i][j][1];
vp[curr++] = vertices[i][j][2];
vp[curr++] = vertices[i+1][j][0];
vp[curr++] = vertices[i+1][j][1];
vp[curr++] = vertices[i+1][j][2];
vp[curr++] = vertices[i][j+1][0];
vp[curr++] = vertices[i][j+1][1];
vp[curr++] = vertices[i][j+1][2];
vp[curr++] = vertices[i+1][j][0];
vp[curr++] = vertices[i+1][j][1];
vp[curr++] = vertices[i+1][j][2];
vp[curr++] = vertices[i+1][j+1][0];
vp[curr++] = vertices[i+1][j+1][1];
vp[curr++] = vertices[i+1][j+1][2];
vp[curr++] = vertices[i][j+1][0];
vp[curr++] = vertices[i][j+1][1];
vp[curr++] = vertices[i][j+1][2];
}
}
GLuint vao;
glGenVertexArrays (1, &vao); // generating and binding is common pattern in OpenGL
glBindVertexArray (vao); // basically setting up memory and associating it
GLuint points_vbo;
glGenBuffers(1, &points_vbo);
glBindBuffer(GL_ARRAY_BUFFER, points_vbo);
glBufferData(GL_ARRAY_BUFFER, 49 * 49 * 18 * sizeof (GLfloat), vp, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(0);
GLfloat normals[49 * 49 * 18 / 3];
curr = 0;
for (int i = 0; i < 49 * 49 * 18; i += 9){
int Ux = vp[i+3] - vp[i];
int Uy = vp[i+4] - vp[i+1];
int Uz = vp[i+5] - vp[i+2];
int Vx = vp[i+6] - vp[i];
int Vy = vp[i+7] - vp[i+1];
int Vz = vp[i+8] - vp[i+2];
normals[curr++] = Uy * Vz - Uz * Vy;
normals[curr++] = Uz * Vx - Ux * Vz;
normals[curr++] = Ux * Vy - Uy * Vx;
}
GLuint normals_vbo;
glGenBuffers(1, &normals_vbo);
glBindBuffer(GL_ARRAY_BUFFER, normals_vbo);
glBufferData(GL_ARRAY_BUFFER, 49 * 49 * 18 / 3 * sizeof(GLfloat), normals, GL_STATIC_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(1);
This is my vertex shader:
#version 410
layout (location = 0) in vec3 vtxPosition;
layout (location = 1) in vec3 normal;
uniform mat4 proj_mat, view_mat, model_mat;
out vec3 Normal;
out vec3 fpos;
void main () {
gl_Position = proj_mat * view_mat * model_mat * vec4(vtxPosition, 1.0);
fpos = vec3(model_mat * vec4(vtxPosition, 1.0));
Normal = normal;
}
And lastly my fragment shader:
#version 410
// Define INPUTS from fragment shader
//uniform mat4 view_mat;
in vec3 Normal;
in vec3 fpos;
// These come from the VAO for texture coordinates.
in vec2 texture_coords;
// And from the uniform outputs for the textures setup in main.cpp.
uniform sampler2D texture00;
uniform sampler2D texture01;
out vec4 fragment_color; //RGBA color
const vec3 lightPos = vec3(0.0,0.0,5.0);
const vec3 diffColor = vec3(1.0,0.5,0.0);
const vec3 specColor = vec3(1.0,1.0,1.0);
void main () {
vec3 normal = normalize(Normal);
vec3 lightDir = normalize(lightPos - fpos);
float lamb = max(dot(lightDir, normal), 0.0);
float spec = 0.0;
if (lamb > 0.0) {
vec3 refDir = reflect(-lightDir, normal);
vec3 viewDir = normalize(-fpos);
float specAngle = max(dot(refDir, viewDir), 0.0);
spec = pow(specAngle, 4.0);
}
fragment_color = vec4(lamb * diffColor + spec * specColor, 1.0);
}
This is the current rendering of the object:

You have to specify 1 normal attribute for each vertex coordinate. A vertex coordinate and its attributes form a tuple.
Furthermore you have to use the data type float rather than int, for computing the normal vectors:
GLfloat normals[49 * 49 * 18];
curr = 0;
for (int i = 0; i < 49 * 49 * 18; i += 9){
float Ux = vp[i+3] - vp[i];
float Uy = vp[i+4] - vp[i+1];
float Uz = vp[i+5] - vp[i+2];
float Vx = vp[i+6] - vp[i];
float Vy = vp[i+7] - vp[i+1];
float Vz = vp[i+8] - vp[i+2];
float nx = Uy * Vz - Uz * Vy;
float ny = Uz * Vx - Ux * Vz;
float nz = Ux * Vy - Uy * Vx;
for (int j = 0; j < 3; ++j) {
normals[curr++] = nx;
normals[curr++] = ny;
normals[curr++] = nz;
}
}
glBufferData(GL_ARRAY_BUFFER, 49 * 49 * 18 * sizeof(GLfloat), normals, GL_STATIC_DRAW);
I recommend to invert the normal vector of the back faces for a double sided light model:
vec3 normal = normalize(Normal);
vec3 viewDir = normalize(-fpos);
if (dot(normal, viewDir) < 0.0)
normal *= -1.0;
Fragment shader:
#version 410
// Define INPUTS from fragment shader
//uniform mat4 view_mat;
in vec3 Normal;
in vec3 fpos;
// These come from the VAO for texture coordinates.
in vec2 texture_coords;
// And from the uniform outputs for the textures setup in main.cpp.
uniform sampler2D texture00;
uniform sampler2D texture01;
out vec4 fragment_color; //RGBA color
const vec3 lightPos = vec3(0.0,0.0,5.0);
const vec3 diffColor = vec3(1.0,0.5,0.0);
const vec3 specColor = vec3(1.0,1.0,1.0);
void main () {
vec3 normal = normalize(Normal);
vec3 viewDir = normalize(-fpos);
if (dot(normal, viewDir) < 0.0)
normal *= -1.0;
vec3 lightDir = normalize(lightPos - fpos);
float lamb = max(dot(lightDir, normal), 0.0);
float spec = 0.0;
if (lamb > 0.0) {
vec3 refDir = reflect(-lightDir, normal);
float specAngle = max(dot(refDir, viewDir), 0.0);
spec = pow(specAngle, 4.0);
}
fragment_color = vec4(lamb * diffColor + spec * specColor, 1.0);
}

Related

Why is my Open GL Compute Shader so slow?

I have been building an OpenGL compute shader that implements ray tracing. Currently it just computes the pixel color by casting a ray against an array of triangles.
#version 430 core
struct Triangle {
vec3 vertex1;
vec3 vertex2;
vec3 vertex3;
vec3 color1;
vec3 color2;
vec3 color3;
vec3 normal1;
vec3 normal2;
vec3 normal3;
vec3 edge1;
vec3 edge2;
};
layout (std430, binding = 0) readonly buffer TriangleBuffer {
int numTriangles;
Triangle triangles[];
};
layout (std430, binding = 1, column_major) buffer CameraBuffer {
vec3 cameraPosition;
mat4 view;
mat4 projection;
mat4 inverseViewProjection;
};
layout (rgba8, binding = 2) writeonly uniform image2D outputImage;
layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
vec3 getBarycentricCoords(int triangleIndex, vec3 closestIntersectionPoint) {
vec3 v0 = triangles[triangleIndex].vertex2 - triangles[triangleIndex].vertex1;
vec3 v1 = triangles[triangleIndex].vertex3 - triangles[triangleIndex].vertex1;
vec3 v2 = closestIntersectionPoint - triangles[triangleIndex].vertex1;
float d00 = dot(v0, v0);
float d01 = dot(v0, v1);
float d11 = dot(v1, v1);
float d20 = dot(v2, v0);
float d21 = dot(v2, v1);
float denom = d00 * d11 - d01 * d01;
float b1 = (d11 * d20 - d01 * d21) / denom;
float b2 = (d00 * d21 - d01 * d20) / denom;
float b0 = 1.0f - b1 - b2;
return vec3(b0, b1, b2);
}
vec3 getTriangleColor(int triangleIndex, vec3 closestIntersectionPoint) {
vec3 barycentric = getBarycentricCoords(triangleIndex, closestIntersectionPoint);
vec3 triangleColor = barycentric.x * triangles[triangleIndex].color1 + barycentric.y * triangles[triangleIndex].color2 + barycentric.z * triangles[triangleIndex].color3;
return triangleColor;
}
bool rayTriangleIntersection(vec3 rayOrigin, vec3 rayDirection, int triangleIndex, out vec3 intersectionPoint) {
vec3 h = cross(rayDirection, triangles[triangleIndex].edge2);
float a = dot(triangles[triangleIndex].edge1, h);
if (a > -0.00001 && a < 0.00001) {
return false;
}
float f = 1.0 / a;
vec3 s = rayOrigin - triangles[triangleIndex].vertex1;
float u = f * dot(s, h);
if (u < 0.0 || u > 1.0) {
return false;
}
vec3 q = cross(s, triangles[triangleIndex].edge1);
float v = f * dot(rayDirection, q);
if (v < 0.0 || u + v > 1.0) {
return false;
}
float t = f * dot(triangles[triangleIndex].edge2, q);
if (t > 0.00001) {
intersectionPoint = rayOrigin + rayDirection * t;
return true;
}
return false;
}
vec3 unProject(vec3 win, mat4 model, mat4 proj, vec4 viewport) {
vec4 tmp = vec4(win, 1);
tmp.x = (tmp.x - viewport[0]) / viewport[2];
tmp.y = (tmp.y - viewport[1]) / viewport[3];
tmp.x = tmp.x * 2 - 1;
tmp.y = tmp.y * 2 - 1;
vec4 obj = inverseViewProjection * tmp;
obj /= obj.w;
return obj.xyz;
}
void main() {
ivec2 pixelCoord = ivec2(gl_GlobalInvocationID.xy);
vec4 viewport = vec4(0, 0, vec2(imageSize(outputImage)).xy);
vec3 near = vec3(pixelCoord.x, pixelCoord.y, -1);
vec3 far = vec3(pixelCoord.x, pixelCoord.y, 0.9518f);
vec3 rayOrigin = unProject(near, view, projection, viewport);
vec3 rayWorldFar = unProject(far, view, projection, viewport);
vec3 rayDirection = normalize(rayWorldFar - rayOrigin);
vec3 intersectionPoint;
vec3 closestIntersectionPoint = vec3(0,0,0);
float closestIntersectionDistance = 999999999.0f;
vec3 finalColor = vec3(0,0,0);
bool intersectionFound = false;
for (int triangleIndex = 0; triangleIndex < numTriangles; triangleIndex++) {
if (rayTriangleIntersection(rayOrigin, rayDirection, triangleIndex, intersectionPoint)) {
float intersectionDistance = distance(intersectionPoint, rayOrigin);
if (intersectionDistance < closestIntersectionDistance) {
closestIntersectionDistance = intersectionDistance;
closestIntersectionPoint = intersectionPoint;
finalColor = getTriangleColor(triangleIndex, closestIntersectionPoint);
intersectionFound = true;
}
}
}
if (intersectionFound) {
imageStore(outputImage, pixelCoord, vec4(finalColor, 1.0f));
}
else
imageStore(outputImage, pixelCoord, vec4(0));
}
However when running the shader I only get 30fps. There is a significant bottleneck in the code. This is running with only 20 triangles.
What optimizations can I make to increase the performance of the code? Why is there a bottleneck?
I managed to more than double my framerate by making the following modifications:
Change layout to a higher value
for this I used GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS
GLint glMaxComputeWorkGroupInvocations = 0;
glGetIntegerv(GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS, &glMaxComputeWorkGroupInvocations);
LIGHTING_SHADER_LOCAL_SIZE_Y = LIGHTING_SHADER_LOCAL_SIZE_X = sqrt(glMaxComputeWorkGroupInvocations);
and update the layout sizes:
layout (local_size_x = ${LIGHTING_SHADER_LOCAL_SIZE_X}, local_size_y = ${LIGHTING_SHADER_LOCAL_SIZE_Y}, local_size_z = 1) in;
Get pixelCoord based on group_id and local_id
ivec3 groupId = ivec3(gl_WorkGroupID);
ivec3 localId = ivec3(gl_LocalInvocationID);
ivec3 globalId = ivec3(gl_GlobalInvocationID);
ivec3 coords = groupId * ivec3(gl_WorkGroupSize) + localId;
ivec2 pixelCoord = ivec2(coords.xy);
Update glDispatchCompute
glDispatchCompute(windowWidth / LIGHTING_SHADER_LOCAL_SIZE_X, windowHeight / LIGHTING_SHADER_LOCAL_SIZE_Y, 1);

Compute Normals After Vertex Deformation?

I am coding a vertex and a fragment shader trying to distort the surface of some water and then computing blinn-phong lighting on the surface. I am able to successfully compute the deformed matrices with a simple noise function, but how can I find the distorted normals? Since it isn't a linear transformation I am stuck, could anyone help?
Here are the relevant files:
vertex shader:
#version 150
uniform mat4 u_Model;
uniform mat4 u_ModelInvTr;
uniform mat4 u_ViewProj;
uniform vec4 u_Color;
uniform int u_Time;
in vec4 vs_Pos; // The array of vertex positions passed to the shader
in vec4 vs_Nor; // The array of vertex normals passed to the shader
in vec4 vs_Col; // The array of vertex colors passed to the shader.
in vec2 vs_UV; // UV coords for texture to pass thru to fragment shader
in float vs_Anim; // 0.f or 1.f To pass thru to fragment shader
in float vs_T2O;
out vec4 fs_Pos;
out vec4 fs_Nor;
out vec4 fs_LightVec;
out vec4 fs_Col;
out vec2 fs_UVs;
out float fs_Anim;
out float fs_dimVal;
out float fs_T2O;
uniform vec4 u_CamPos;
out vec4 fs_CamPos;
const vec4 lightDir = normalize(vec4(0.0, 1.f, 0.0, 0));
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c, oc * axis.x * axis.y - axis.z * s, oc * axis.z * axis.x + axis.y * s, 0.0, oc * axis.x * axis.y + axis.z * s, oc * axis.y * axis.y + c, oc * axis.y * axis.z - axis.x * s, 0.0,oc * axis.z * axis.x - axis.y * s, oc * axis.y * axis.z + axis.x * s, oc * axis.z * axis.z + c, 0.0, 0.0, 0.0, 0.0, 1.0);
}
vec4 rotateLightVec(float deg, vec4 LV) {
mat4 R = rotationMatrix(vec3(0,0,1), deg);
return R * LV;
}
float random1(vec3 p) {
return fract(sin(dot(p, vec3(127.1, 311.7, 191.999)))*43758.5453);
}
vec3 random2( vec3 p ) {
return fract( sin( vec3(dot(p, vec3(127.1, 311.7, 58.24)), dot(p, vec3(269.5, 183.3, 657.3)), dot(p, vec3(420.69, 69.420, 469.20))) ) * 43758.5453);
}
void main()
{
fs_Col = vs_Col;
fs_UVs = vs_UV;
fs_Anim = vs_Anim;
fs_T2O = vs_T2O;
mat3 invTranspose = mat3(u_ModelInvTr);
fs_Nor = vec4(invTranspose * vec3(vs_Nor), 0);
vec4 modelposition = u_Model * vs_Pos;
if (vs_Anim != 0) { // if we want to animate this surface
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y <= 4.f/16.f;
if (water) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.05) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
} else if (lava) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.01) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
}
}
fs_dimVal = random1(modelposition.xyz/100.f);
fs_LightVec = rotateLightVec(0.001 * u_Time, lightDir); // Compute the direction in which the light source lies
fs_CamPos = u_CamPos; // uniform handle for the camera position instead of the inverse
fs_Pos = modelposition;
gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is
// used to render the final positions of the geometry's vertices
}
fragment shader:
#version 330
uniform vec4 u_Color; // The color with which to render this instance of geometry.
uniform sampler2D textureSampler;
uniform int u_Time;
uniform mat4 u_ViewProj;
uniform mat4 u_Model;
in vec4 fs_Pos;
in vec4 fs_Nor;
in vec4 fs_LightVec;
in vec4 fs_Col;
in vec2 fs_UVs;
in float fs_Anim;
in float fs_T2O;
in float fs_dimVal;
out vec4 out_Col;
in vec4 fs_CamPos;
float random1(vec3 p) {
return fract(sin(dot(p,vec3(127.1, 311.7, 191.999)))
*43758.5453);
}
float random1b(vec3 p) {
return fract(sin(dot(p,vec3(169.1, 355.7, 195.999)))
*95751.5453);
}
float mySmoothStep(float a, float b, float t) {
t = smoothstep(0, 1, t);
return mix(a, b, t);
}
float cubicTriMix(vec3 p) {
vec3 pFract = fract(p);
float llb = random1(floor(p) + vec3(0,0,0));
float lrb = random1(floor(p) + vec3(1,0,0));
float ulb = random1(floor(p) + vec3(0,1,0));
float urb = random1(floor(p) + vec3(1,1,0));
float llf = random1(floor(p) + vec3(0,0,1));
float lrf = random1(floor(p) + vec3(1,0,1));
float ulf = random1(floor(p) + vec3(0,1,1));
float urf = random1(floor(p) + vec3(1,1,1));
float mixLoBack = mySmoothStep(llb, lrb, pFract.x);
float mixHiBack = mySmoothStep(ulb, urb, pFract.x);
float mixLoFront = mySmoothStep(llf, lrf, pFract.x);
float mixHiFront = mySmoothStep(ulf, urf, pFract.x);
float mixLo = mySmoothStep(mixLoBack, mixLoFront, pFract.z);
float mixHi = mySmoothStep(mixHiBack, mixHiFront, pFract.z);
return mySmoothStep(mixLo, mixHi, pFract.y);
}
float fbm(vec3 p) {
float amp = 0.5;
float freq = 4.0;
float sum = 0.0;
for(int i = 0; i < 8; i++) {
sum += cubicTriMix(p * freq) * amp;
amp *= 0.5;
freq *= 2.0;
}
return sum;
}
void main()
{
vec4 diffuseColor = texture(textureSampler, fs_UVs);
bool apply_lambert = true;
float specularIntensity = 0;
if (fs_Anim != 0) {
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y < 4.f/16.f;
if (lava) {
// slowly gyrate texture and lighten and darken with random dimVal from vert shader
vec2 movingUVs = vec2(fs_UVs.x + fs_Anim * 0.065/16 * sin(0.01*u_Time),
fs_UVs.y - fs_Anim * 0.065/16 * sin(0.01*u_Time + 3.14159/2));
diffuseColor = texture(textureSampler, movingUVs);
vec4 warmerColor = diffuseColor + vec4(0.3, 0.3, 0, 0);
vec4 coolerColor = diffuseColor - vec4(0.1, 0.1, 0, 0);
diffuseColor = mix(warmerColor, coolerColor, 0.5 + fs_dimVal * 0.65*sin(0.02*u_Time));
apply_lambert = false;
} else if (water) {
// blend between 3 different points in texture to create a wavy subtle change over time
vec2 offsetUVs = vec2(fs_UVs.x - 0.5f/16.f, fs_UVs.y - 0.5f/16.f);
diffuseColor = texture(textureSampler, fs_UVs);
vec4 altColor = texture(textureSampler, offsetUVs);
altColor.x += fs_dimVal * pow(altColor.x+.15, 5);
altColor.y += fs_dimVal * pow(altColor.y+.15, 5);
altColor.z += 0.5 * fs_dimVal * pow(altColor.z+.15, 5);
diffuseColor = mix(diffuseColor, altColor, 0.5 + 0.35*sin(0.05*u_Time));
offsetUVs -= 0.25f/16.f;
vec4 newColor = texture(textureSampler, offsetUVs);
diffuseColor = mix(diffuseColor, newColor, 0.5 + 0.5*sin(0.025*u_Time)) + fs_dimVal * vec4(0.025);
diffuseColor.a = 0.7;
// ----------------------------------------------------
// Blinn-Phong Shading
// ----------------------------------------------------
vec4 lightDir = normalize(fs_LightVec - fs_Pos);
vec4 viewDir = normalize(fs_CamPos - fs_Pos);
vec4 halfVec = normalize(lightDir + viewDir);
float shininess = 400.f;
float specularIntensity = max(pow(dot(halfVec, normalize(fs_Nor)), shininess), 0);
}
}
// Calculate the diffuse term for Lambert shading
float diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec));
// Avoid negative lighting values
diffuseTerm = clamp(diffuseTerm, 0, 1);
float ambientTerm = 0.3;
float lightIntensity = diffuseTerm + ambientTerm; //Add a small float value to the color multiplier
//to simulate ambient lighting. This ensures that faces that are not
//lit by our point light are not completely black.
vec3 col = diffuseColor.rgb;
// Compute final shaded color
if (apply_lambert) {
col = col * lightIntensity + col * specularIntensity;
}
// & Check the rare, special case where we draw face between two diff transparent blocks as opaque
if (fs_T2O != 0) {
out_Col = vec4(col, 1.f);
} else {
out_Col = vec4(col, diffuseColor.a);
}
// distance fog!
vec4 fogColor = vec4(0.6, 0.75, 0.9, 1.0);
float FC = gl_FragCoord.z / gl_FragCoord.w / 124.f;
float falloff = clamp(1.05 - exp(-1.05f * (FC - 0.9f)), 0.f, 1.f);
out_Col = mix(out_Col, fogColor, falloff);
}
I tried implementing blinn-phong in the fragment shader, but I think it is wrong simple from the wrong normals. I think this can be done with some sort of tangent and cross product solution, but how can I know the tangent of the surface given we only know the vertex position?
I am not using unity, just bare c++ and most of the answers I am finding online are for java or unity which I do not understand.`

How to implement Screen Space Reflection with DDA

I am trying to implement screen space reflection with DDA.
http://casual-effects.blogspot.jp/2014/08/screen-space-ray-tracing.html
But, not working well.
Below is my shader codes.
This is vertex shader code.
layout(location = 0) in vec4 position;
layout(location = 1) in vec4 color_0;
layout(location = 2) in vec3 normal;
uniform mat4 mtxL2W; // Local to World space.
uniform mat4 mtxW2C; // World to Clip space.
out vec4 varColor;
out vec3 varNormal;
void main()
{
gl_Position = mtxW2C * mtxL2W * position;
varColor = color_0;
varNormal = normalize(mtxL2W * vec4(normal, 0)).xyz;
}
This is fragment shader code.
in vec4 varColor;
in vec3 varNormal;
layout(location = 0) out vec4 outColor;
uniform sampler2D s0; // color
uniform sampler2D s1; // linear depth.
uniform mat4 mtxW2V; // World to View(Camera) space.
uniform mat4 mtxV2C; // View(Camera) to Clip space.
uniform mat4 mtxC2V; // Clip to View(Camera) space.
uniform mat4 mtxV2W; // View(Camera) to World space.
uniform vec4 camPos; // Camera position (World space).
uniform float nearPlaneZ;
uniform float maxDistance;
uniform float zThickness;
uniform int maxSteps;
uniform float stride;
float squaredLength(vec2 a, vec2 b)
{
a -= b;
return dot(a, a);
}
bool intersectsDepthBuffer(float z, float minZ, float maxZ)
{
z += zThickness;
return (maxZ >= z) && (minZ - zThickness <= z);
}
bool traceScreenSpaceRay(
vec3 csOrig,
vec3 csDir,
out vec2 hitPixel,
out vec3 hitPoint)
{
// Clip to the near plane.
float rayLength = (csOrig.z + csDir.z * maxDistance) < nearPlaneZ
? (nearPlaneZ - csOrig.z) / csDir.z
: maxDistance;
vec3 csEndPoint = csOrig + csDir * rayLength;
// Project into homogeneous clip space.
vec4 H0 = mtxV2C * vec4(csOrig, 1);
vec4 H1 = mtxV2C * vec4(csEndPoint, 1);
float k0 = 1.0 / H0.w;
float k1 = 1.0 / H1.w;
// The interpolated homogeneous version of the camera-space points.
vec3 Q0 = csOrig * k0;
vec3 Q1 = csEndPoint * k1;
// Screen space point.
vec2 P0 = H0.xy * k0;
vec2 P1 = H1.xy * k1;
// [-1, 1] -> [0, 1]
P0 = P0 * 0.5 + 0.5;
P1 = P1 * 0.5 + 0.5;
ivec2 texsize = textureSize(s0, 0);
P0 *= vec2(texsize.xy);
P1 *= vec2(texsize.xy);
P1.x = min(max(P1.x, 0), texsize.x);
P1.y = min(max(P1.y, 0), texsize.y);
// If the line is degenerate, make it cover at least one pixel to avoid handling zero-pixel extent as a special case later.
P1 += squaredLength(P0, P1) < 0.0001
? vec2(0.01, 0.01)
: vec2(0.0);
vec2 delta = P1 - P0;
// Permute so that the primary iteration is in x to collapse all quadrant-specific DDA cases later.
bool permute = false;
if (abs(delta.x) < abs(delta.y))
{
permute = true;
delta = delta.yx;
P0 = P0.yx;
P1 = P1.yx;
}
float stepDir = sign(delta.x);
float invdx = stepDir / delta.x;
// Track the derivatives of Q and k.
vec3 dQ = (Q1 - Q0) / invdx;
float dk = (k1 - k0) / invdx;
// y is slope.
// slope = (y1 - y0) / (x1 - x0)
vec2 dP = vec2(stepDir, delta.y / invdx);
// Adjust end condition for iteration direction
float end = P1.x * stepDir;
int stepCount = 0;
float prevZMaxEstimate = csOrig.z;
float rayZMin = prevZMaxEstimate;
float rayZMax = prevZMaxEstimate;
float sceneZMax = rayZMax + 100.0f;
dP *= stride;
dQ *= stride;
dk *= stride;
vec4 PQk = vec4(P0, Q0.z, k0);
vec4 dPQk = vec4(dP, dQ.z, dk);
vec3 Q = Q0;
for (;
((PQk.x * stepDir) <= end)
&& (stepCount < maxSteps)
&& !intersectsDepthBuffer(sceneZMax, rayZMin, rayZMax)
&& (sceneZMax != 0.0);
++stepCount)
{
rayZMin = prevZMaxEstimate;
rayZMax = (PQk.z + dPQk.z * 0.5) / (PQk.w + dPQk.w * 0.5);
prevZMaxEstimate = rayZMax;
if (rayZMin > rayZMax) {
float tmp = rayZMin;
rayZMin = rayZMax;
rayZMax = tmp;
}
hitPixel = permute ? PQk.yx : PQk.xy;
//hitPixel.y = texsize.y - hitPixel.y;
sceneZMax = texelFetch(s1, ivec2(hitPixel), 0).r;
PQk += dPQk;
}
// Advance Q based on the number of steps
Q.xy += dQ.xy * stepCount;
hitPoint = Q * (1.0f / PQk.w);
hitPoint = vec3(sceneZMax, rayZMin, rayZMax);
return intersectsDepthBuffer(sceneZMax, rayZMin, rayZMax);
}
void main()
{
vec3 normal = normalize(varNormal);
float linearDepth = texelFetch(s1, ivec2(gl_FragCoord.xy), 0).r;
ivec2 texsize = textureSize(s0, 0);
// Ray origin is camera origin.
vec3 rayOrg = camPos.xyz;
// Screen coordinate.
vec4 pos = vec4(gl_FragCoord.xy / texsize, 0, 1);
// [0, 1] -> [-1, 1]
pos.xy = pos.xy * 2.0 - 1.0;
// Screen-space -> Clip-space
pos.xy *= linearDepth;
// Clip-space -> View-space
pos = mtxC2V * pos;
pos.z = linearDepth;
// View-space -> World-space.
vec3 worldPos = (mtxV2W * vec4(pos.xyz, 1)).xyz;
// Compute ray direction.
// From ray origin to world position.
vec3 rayDir = normalize(worldPos - rayOrg);
// Compute reflection vector.
vec3 refDir = reflect(rayDir, normal);
// Reflection vector origin is world position.
vec3 refOrg = worldPos;
// Transform to view coordinate.
refOrg = (mtxW2V * vec4(refOrg, 1)).xyz;
refDir = (mtxW2V * vec4(refDir, 0)).xyz;
vec2 hitPixel = vec2(0, 0);
vec3 hitPoint = vec3(0, 0, 0);
// Trace screen space ray.
bool isIntersect = traceScreenSpaceRay(refOrg, refDir, hitPixel, hitPoint);
vec2 uv = hitPixel / texsize.xy;
if (uv.x > 1.0 || uv.x < 0.0f || uv.y > 1.0 || uv.y < 0.0) {
isIntersect = false;
}
if (isIntersect) {
outColor = varColor * texture(s0, uv);
}
else {
outColor = vec4(1, 1, 1, 1);
}
}
I think Q0.z and Q1.z are always 1.0.
So, I think dQ.z is also always 0.0.
And, dk is always minus value.
What is wrong?

OpenGL GLSL - Projection matrix not working

I've got a basic OpenGL application and I want to use my projection matrix.
This is my matrix:
WorldCoordinates.m[0][0] = 2.0f / Width - 1.0f; WorldCoordinates.m[0][1] = 0; WorldCoordinates.m[0][2] = 0, WorldCoordinates.m[0][3] = 0;
WorldCoordinates.m[1][0] = 0; WorldCoordinates.m[1][1] = 2.0f / Height - 1.0f; WorldCoordinates.m[1][2] = 0, WorldCoordinates.m[1][3] = 0;
WorldCoordinates.m[2][0] = 0; WorldCoordinates.m[2][1] = 0; WorldCoordinates.m[2][2] = 0, WorldCoordinates.m[2][3] = 0;
WorldCoordinates.m[3][0] = 0; WorldCoordinates.m[3][1] = 0; WorldCoordinates.m[3][2] = 0, WorldCoordinates.m[3][3] = 0;
(WorldCoordinates is the Matrix4 struct that contains just a variable called m that is a float[4][4])(Width and Height are two ints).
I then apply this coordinates to my vertex shader using this:
shader.Bind();
glUniformMatrix4fv(glGetUniformLocation(shader.GetProgramID(), "worldCoordinates"), 1, GL_TRUE, &WorldCoordinates.m[0][0]);
(Shader is a class and has got a Bind() method that is just glUseProgram).
This is my Vertex Shader GLSL
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec2 texCoord;
out vec3 Color;
out vec2 TexCoord;
uniform mat4 worldCoordinates;
void main()
{
gl_Position = worldCoordinates * vec4(position, 1.0f);
Color = color;
TexCoord = texCoord;
}
Using this, it doesn't work. But changing the gl_Position call to this:
gl_Position = vec4(vec3(position.x * 1/400 -1, position.y * 1/300 -1, 1.0f), 1.0f);
it renders as expected. Why is that?
This is how you build a orthogonal projection matrix :
static void
mat4_ortho(mat4_t m, float left, float right, float bottom, float top, float near, float far)
{
float rl = right - left;
float tb = top - bottom;
float fn = far - near;
mat4_zero(m);
m[ 0] = 2.0f / rl;
m[ 5] = 2.0f / tb;
m[10] = -2.0f / fn;
m[12] = -(left + right) / rl;
m[13] = -( top + bottom) / tb;
m[14] = -( far + near) / fn;
m[15] = 1.0f;
}
For you case, you'd set left=0, right=width, bottom=0, top=height, near and far don't matter, just set -1.0 and 1.0 for instance.
With such a matrix, the vertex coordinates you use for drawing will map 1:1 with the pixels on screen.

OpenGL ES - texturing sphere

I have a sphere. I can map texture on it. But now my texture is outside sphere. And I need inside. My user sit like inside sphere, so he can view inside it (rotate and zoom). So simply like a sky dome, but sphere. Maybe I need fix uv texture coordinates or enable something?
Here code for generating sphere:
class Sphere : public ParametricSurface {
public:
Sphere(float radius) : m_radius(radius)
{
ParametricInterval interval = { ivec2(20, 20), vec2(Pi, TwoPi), vec2(8, 14) };
SetInterval(interval);
}
vec3 Evaluate(const vec2& domain) const
{
float u = domain.x, v = domain.y;
float x = m_radius * sin(u) * cos(v);
float y = m_radius * cos(u);
float z = m_radius * -sin(u) * sin(v);
return vec3(x, y, z);
}
private:
float m_radius;
};
vec2 ParametricSurface::ComputeDomain(float x, float y) const
{
return vec2(x * m_upperBound.x / m_slices.x, y * m_upperBound.y / m_slices.y);
}
void ParametricSurface::GenerateVertices(float * vertices) const
{
float* attribute = vertices;
for (int j = 0; j < m_divisions.y; j++) {
for (int i = 0; i < m_divisions.x; i++) {
// Compute Position
vec2 domain = ComputeDomain(i, j);
vec3 range = Evaluate(domain);
attribute = range.Write(attribute);
// Compute Normal
if (m_vertexFlags & VertexFlagsNormals) {
float s = i, t = j;
// Nudge the point if the normal is indeterminate.
if (i == 0) s += 0.01f;
if (i == m_divisions.x - 1) s -= 0.01f;
if (j == 0) t += 0.01f;
if (j == m_divisions.y - 1) t -= 0.01f;
// Compute the tangents and their cross product.
vec3 p = Evaluate(ComputeDomain(s, t));
vec3 u = Evaluate(ComputeDomain(s + 0.01f, t)) - p;
vec3 v = Evaluate(ComputeDomain(s, t + 0.01f)) - p;
vec3 normal = u.Cross(v).Normalized();
if (InvertNormal(domain))
normal = -normal;
attribute = normal.Write(attribute);
}
// Compute Texture Coordinates
if (m_vertexFlags & VertexFlagsTexCoords) {
float s = m_textureCount.x * i / m_slices.x;
float t = m_textureCount.y * j / m_slices.y;
attribute = vec2(s, t).Write(attribute);
}
}
}
}
void ParametricSurface::GenerateLineIndices(unsigned short * indices) const
{
unsigned short * index = indices;
for (int j = 0, vertex = 0; j < m_slices.y; j++) {
for (int i = 0; i < m_slices.x; i++) {
int next = (i + 1) % m_divisions.x;
*index++ = vertex + i;
*index++ = vertex + next;
*index++ = vertex + i;
*index++ = vertex + i + m_divisions.x;
}
vertex += m_divisions.x;
}
}
void ParametricSurface::GenerateTriangleIndices(unsigned short * indices) const
{
unsigned short * index = indices;
for (int j = 0, vertex = 0; j < m_slices.y; j++) {
for (int i = 0; i < m_slices.x; i++) {
int next = (i + 1) % m_divisions.x;
*index++ = vertex + i;
*index++ = vertex + next;
*index++ = vertex + i + m_divisions.x;
*index++ = vertex + next;
*index++ = vertex + next + m_divisions.x;
*index++ = vertex + i + m_divisions.x;
}
vertex += m_divisions.x;
}
}
And here is VBO creation:
+ (DrawableVBO *)createVBO:(SurfaceType)surfaceType
{
ISurface * surface = [self createSurface:surfaceType]; // just Sphere type
surface->SetVertexFlags(VertexFlagsNormals | VertexFlagsTexCoords); // which vertexes I need
// Get vertice from surface.
//
int vertexSize = surface->GetVertexSize();
int vBufSize = surface->GetVertexCount() * vertexSize;
GLfloat * vbuf = new GLfloat[vBufSize];
surface->GenerateVertices(vbuf);
// Get triangle indice from surface
//
int triangleIndexCount = surface->GetTriangleIndexCount();
unsigned short * triangleBuf = new unsigned short[triangleIndexCount];
surface->GenerateTriangleIndices(triangleBuf);
// Create the VBO for the vertice.
//
GLuint vertexBuffer;
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, vBufSize * sizeof(GLfloat), vbuf, GL_STATIC_DRAW);
// Create the VBO for the triangle indice
//
GLuint triangleIndexBuffer;
glGenBuffers(1, &triangleIndexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, triangleIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, triangleIndexCount * sizeof(GLushort), triangleBuf, GL_STATIC_DRAW);
delete [] vbuf;
delete [] triangleBuf;
delete surface;
DrawableVBO * vbo = [[DrawableVBO alloc] init];
vbo.vertexBuffer = vertexBuffer;
vbo.triangleIndexBuffer = triangleIndexBuffer;
vbo.vertexSize = vertexSize;
vbo.triangleIndexCount = triangleIndexCount;
return vbo;
}
Here is my light setup:
- (void)setupLights
{
// Set up some default material parameters.
//
glUniform3f(_ambientSlot, 0.04f, 0.04f, 0.04f);
glUniform3f(_specularSlot, 0.5, 0.5, 0.5);
glUniform1f(_shininessSlot, 50);
// Initialize various state.
//
glEnableVertexAttribArray(_positionSlot);
glEnableVertexAttribArray(_normalSlot);
glUniform3f(_lightPositionSlot, 1.0, 1.0, 5.0);
glVertexAttrib3f(_diffuseSlot, 0.8, 0.8, 0.8);
}
And finally shaders:
fragment:
precision mediump float;
varying vec4 vDestinationColor;
varying vec2 vTextureCoordOut;
uniform sampler2D Sampler;
void main()
{
gl_FragColor = texture2D(Sampler, vTextureCoordOut) * vDestinationColor;
}
vertex:
uniform mat4 projection;
uniform mat4 modelView;
attribute vec4 vPosition;
attribute vec2 vTextureCoord;
uniform mat3 normalMatrix;
uniform vec3 vLightPosition;
uniform vec3 vAmbientMaterial;
uniform vec3 vSpecularMaterial;
uniform float shininess;
attribute vec3 vNormal;
attribute vec3 vDiffuseMaterial;
varying vec4 vDestinationColor;
varying vec2 vTextureCoordOut;
void main(void)
{
gl_Position = projection * modelView * vPosition;
vec3 N = normalMatrix * vNormal;
vec3 L = normalize(vLightPosition);
vec3 E = vec3(0, 0, 1);
vec3 H = normalize(L + E);
float df = max(0.0, dot(N, L));
float sf = max(0.0, dot(N, H));
sf = pow(sf, shininess);
vec3 color = vAmbientMaterial + df * vDiffuseMaterial + sf * vSpecularMaterial;
vDestinationColor = vec4(color, 1);
vTextureCoordOut = vTextureCoord;
}
Some monkey code but I fix his. Firstly we enable culling and disable front side rendering:
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
Then I change position of the light source:
glUniform3f(_lightPositionSlot, 1.0, 1.0, -2.5);
(I even don't need the light, so next step - I must disable it at all). But finally I have a sphere, user is inside it, can rotate it, zoom in and out and see the texture!