Bad lighting using Phong Method - c++

I'm trying to make a cube, which is irregularly triangulated, but virtually coplanar, shade correctly.
Here is the current result I have:
With wireframe:
Normals calculated in my program:
Normals calculated by meshlabjs.net:
The lighting works properly when using regular size triangles for the cube. As you can see, I'm duplicating vertices and using angle weighting.
lighting.frag
vec4 scene_ambient = vec4(1, 1, 1, 1.0);
struct material
{
vec4 ambient;
vec4 diffuse;
vec4 specular;
float shininess;
};
material frontMaterial = material(
vec4(0.25, 0.25, 0.25, 1.0),
vec4(0.4, 0.4, 0.4, 1.0),
vec4(0.774597, 0.774597, 0.774597, 1.0),
76
);
struct lightSource
{
vec4 position;
vec4 diffuse;
vec4 specular;
float constantAttenuation, linearAttenuation, quadraticAttenuation;
float spotCutoff, spotExponent;
vec3 spotDirection;
};
lightSource light0 = lightSource(
vec4(0.0, 0.0, 0.0, 1.0),
vec4(100.0, 100.0, 100.0, 100.0),
vec4(100.0, 100.0, 100.0, 100.0),
0.1, 1, 0.01,
180.0, 0.0,
vec3(0.0, 0.0, 0.0)
);
vec4 light(lightSource ls, vec3 norm, vec3 deviation, vec3 position)
{
vec3 viewDirection = normalize(vec3(1.0 * vec4(0, 0, 0, 1.0) - vec4(position, 1)));
vec3 lightDirection;
float attenuation;
//ls.position.xyz = cameraPos;
ls.position.z += 50;
if (0.0 == ls.position.w) // directional light?
{
attenuation = 1.0; // no attenuation
lightDirection = normalize(vec3(ls.position));
}
else // point light or spotlight (or other kind of light)
{
vec3 positionToLightSource = vec3(ls.position - vec4(position, 1.0));
float distance = length(positionToLightSource);
lightDirection = normalize(positionToLightSource);
attenuation = 1.0 / (ls.constantAttenuation
+ ls.linearAttenuation * distance
+ ls.quadraticAttenuation * distance * distance);
if (ls.spotCutoff <= 90.0) // spotlight?
{
float clampedCosine = max(0.0, dot(-lightDirection, ls.spotDirection));
if (clampedCosine < cos(radians(ls.spotCutoff))) // outside of spotlight cone?
{
attenuation = 0.0;
}
else
{
attenuation = attenuation * pow(clampedCosine, ls.spotExponent);
}
}
}
vec3 ambientLighting = vec3(scene_ambient) * vec3(frontMaterial.ambient);
vec3 diffuseReflection = attenuation
* vec3(ls.diffuse) * vec3(frontMaterial.diffuse)
* max(0.0, dot(norm, lightDirection));
vec3 specularReflection;
if (dot(norm, lightDirection) < 0.0) // light source on the wrong side?
{
specularReflection = vec3(0.0, 0.0, 0.0); // no specular reflection
}
else // light source on the right side
{
specularReflection = attenuation * vec3(ls.specular) * vec3(frontMaterial.specular)
* pow(max(0.0, dot(reflect(lightDirection, norm), viewDirection)), frontMaterial.shininess);
}
return vec4(ambientLighting + diffuseReflection + specularReflection, 1.0);
}
vec4 generateGlobalLighting(vec3 norm, vec3 position)
{
return light(light0, norm, vec3(2,0,0), position);
}
mainmesh.frag
#version 430
in vec3 f_color;
in vec3 f_normal;
in vec3 f_position;
in float f_opacity;
out vec4 fragColor;
vec4 generateGlobalLighting(vec3 norm, vec3 position);
void main()
{
vec3 norm = normalize(f_normal);
vec4 l0 = generateGlobalLighting(norm, f_position);
fragColor = vec4(f_color, f_opacity) * l0;
}
Follows the code to generate the verts, normals and faces for the painter.
m_vertices_buf.resize(m_mesh.num_faces() * 3, 3);
m_normals_buf.resize(m_mesh.num_faces() * 3, 3);
m_faces_buf.resize(m_mesh.num_faces(), 3);
std::map<vertex_descriptor, std::list<Vector3d>> map;
GLDebugging* deb = GLDebugging::getInstance();
auto getAngle = [](Vector3d a, Vector3d b) {
double angle = 0.0;
angle = std::atan2(a.cross(b).norm(), a.dot(b));
return angle;
};
for (const auto& f : m_mesh.faces()) {
auto f_hh = m_mesh.halfedge(f);
//auto n = PMP::compute_face_normal(f, m_mesh);
vertex_descriptor vs[3];
Vector3d ps[3];
int i = 0;
for (const auto& v : m_mesh.vertices_around_face(f_hh)) {
auto p = m_mesh.point(v);
ps[i] = Vector3d(p.x(), p.y(), p.z());
vs[i++] = v;
}
auto n = (ps[1] - ps[0]).cross(ps[2] - ps[0]).normalized();
auto a1 = getAngle((ps[1] - ps[0]).normalized(), (ps[2] - ps[0]).normalized());
auto a2 = getAngle((ps[2] - ps[1]).normalized(), (ps[0] - ps[1]).normalized());
auto a3 = getAngle((ps[0] - ps[2]).normalized(), (ps[1] - ps[2]).normalized());
auto area = PMP::face_area(f, m_mesh);
map[vs[0]].push_back(n * a1);
map[vs[1]].push_back(n * a2);
map[vs[2]].push_back(n * a3);
auto p = m_mesh.point(vs[0]);
deb->drawLine(Vector3d(p.x(), p.y(), p.z()), Vector3d(p.x(), p.y(), p.z()) + Vector3d(n.x(), n.y(), n.z()) * 4);
p = m_mesh.point(vs[1]);
deb->drawLine(Vector3d(p.x(), p.y(), p.z()), Vector3d(p.x(), p.y(), p.z()) + Vector3d(n.x(), n.y(), n.z()) * 4);
p = m_mesh.point(vs[2]);
deb->drawLine(Vector3d(p.x(), p.y(), p.z()), Vector3d(p.x(), p.y(), p.z()) + Vector3d(n.x(), n.y(), n.z()) * 4);
}
int j = 0;
int i = 0;
for (const auto& f : m_mesh.faces()) {
auto f_hh = m_mesh.halfedge(f);
for (const auto& v : m_mesh.vertices_around_face(f_hh)) {
const auto& p = m_mesh.point(v);
m_vertices_buf.row(i) = RowVector3d(p.x(), p.y(), p.z());
Vector3d n(0, 0, 0);
//auto n = PMP::compute_face_normal(f, m_mesh);
Vector3d norm = Vector3d(n.x(), n.y(), n.z());
for (auto val : map[v]) {
norm += val;
}
norm.normalize();
deb->drawLine(Vector3d(p.x(), p.y(), p.z()), Vector3d(p.x(), p.y(), p.z()) + Vector3d(norm.x(), norm.y(), norm.z()) * 3,
Vector3d(1.0, 0, 0));
m_normals_buf.row(i++) = RowVector3d(norm.x(), norm.y(), norm.z());
}
m_faces_buf.row(j++) = RowVector3i(i - 3, i - 2, i - 1);
}
Follows the painter code:
m_vertexAttrLoc = program.attributeLocation("v_vertex");
m_colorAttrLoc = program.attributeLocation("v_color");
m_normalAttrLoc = program.attributeLocation("v_normal");
m_mvMatrixLoc = program.uniformLocation("v_modelViewMatrix");
m_projMatrixLoc = program.uniformLocation("v_projectionMatrix");
m_normalMatrixLoc = program.uniformLocation("v_normalMatrix");
//m_relativePosLoc = program.uniformLocation("v_relativePos");
m_opacityLoc = program.uniformLocation("v_opacity");
m_colorMaskLoc = program.uniformLocation("v_colorMask");
//bool for unmapping depth color
m_useDepthMap = program.uniformLocation("v_useDepthMap");
program.setUniformValue(m_mvMatrixLoc, modelView);
//uniform used for Color map to regular model switch
program.setUniformValue(m_useDepthMap, (m_showColorMap &&
(m_showProblemAreas || m_showPrepMap || m_showDepthMap || m_showMockupMap)));
QMatrix3x3 normalMatrix = modelView.normalMatrix();
program.setUniformValue(m_normalMatrixLoc, normalMatrix);
program.setUniformValue(m_projMatrixLoc, projection);
//program.setUniformValue(m_relativePosLoc, m_relativePos);
program.setUniformValue(m_opacityLoc, m_opacity);
program.setUniformValue(m_colorMaskLoc, m_colorMask);
glEnableVertexAttribArray(m_vertexAttrLoc);
m_vertices.bind();
glVertexAttribPointer(m_vertexAttrLoc, 3, GL_DOUBLE, false, 3 * sizeof(GLdouble), NULL);
m_vertices.release();
glEnableVertexAttribArray(m_normalAttrLoc);
m_normals.bind();
glVertexAttribPointer(m_normalAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
m_normals.release();
glEnableVertexAttribArray(m_colorAttrLoc);
if (m_showProblemAreas) {
m_problemColorMap.bind();
glVertexAttribPointer(m_colorAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
m_problemColorMap.release();
}
else if (m_showPrepMap) {
m_prepColorMap.bind();
glVertexAttribPointer(m_colorAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
m_prepColorMap.release();
}
else if (m_showMockupMap) {
m_mokupColorMap.bind();
glVertexAttribPointer(m_colorAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
m_mokupColorMap.release();
}
else {
//m_colors.bind();
//glVertexAttribPointer(m_colorAttrLoc, 3, GL_DOUBLE, false, 0, NULL);
//m_colors.release();
}
m_indices.bind();
glDrawElements(GL_TRIANGLES, m_indices.size() / sizeof(int), GL_UNSIGNED_INT, NULL);
m_indices.release();
glDisableVertexAttribArray(m_vertexAttrLoc);
glDisableVertexAttribArray(m_normalAttrLoc);
glDisableVertexAttribArray(m_colorAttrLoc);
EDIT: Sorry for not being clear enough. The cube is merely an example. My requirements are that the shading works for any kind of mesh. Those with very sharp edges, and those that are very organic (like humans or animals).

The issue is clearly explained by the image "Normals calculated in my program" from your question. The normal vectors at the corners and edges of the cube are not normal perpendicular to the faces:
For a proper specular reflection on plane faces, the normal vectors have to be perpendicular to the sides of the cube.
The vertex coordinate and its normal vector from a tuple with 6 components (x, y, z, nx, ny, nz).
A vertex coordinate on an edge of the cube is adjacent to 2 sides of the cube and 2 (face) normal vectors. The 8 vertex coordinates on the 8 corners of the cube are adjacent to 3 sides (3 normal vectors) each.
To define the vertex attributes with face normal vectors (perpendicular to a side) you have to define multiple tuples with the same vertex coordinate but different normal vectors. You have to use the different attribute tuples to form the triangle primitives on the different sides of the cube.
e.g. If you have defined a cube with the left, front, bottom coordinate of (-1, -1, -1) and the right, back, top coordinate of (1, 1, 1), then the vertex coordinate (-1, -1, -1) is adjacent to the left, front and bottom side of the cube:
x y z nx ny nz
left: -1 -1 -1 -1 0 0
front: -1 -1 -1 0 -1 0
bottom: -1 -1 -1 0 0 -1
Use the left attribute tuple to form the triangle primitives on the left side, the front to form the front and bottom for the triangles on the bottom.
In general you have to decide what you want. There is no general approach for all meshes.
Either you have a fine granulated mesh and you want a smooth appearance (e.g a sphere). In that case your approach is fine, it will generate a smooth light transition on the edges between the primitives.
Or you have a mesh with hard edges like a cube. In that case you have to "duplicate" vertices. If 2 (or even more) triangles share a vertex coordinate, but the face normal vectors are different, then you have to create a separate tuple, for all the combinations of the vertex coordinate and the face normal vector.
For a general "smooth" solution you would have to interpolate the normal vectors of the vertex coordinates which are in the middle of plane surfaces, according to the surrounding geometry. That means if a bunch of triangle primitives form a plane, then all the normal vectors of the vertices have to be computed dependent on there position on the plane. At the centroid the normal vector is equal to the face normal vector. For all other points the normal vector has to be interpolated with the normal vectors of the surrounding faces.
Anyway that seems to be an XY problem. Why is there a "vertex" somewhere in the middle of a plane? Probably the plane is tessellated. But if the plan is tessellated, why are the normal vectors not interpolated too, during the tessellation process?

As mentioned in the other answers the problem is your mesh normals.
Computing an average normal, like you are doing currently, is what you would want
to do for a smooth object like a sphere. cgal has a function for that CGAL::Polygon_mesh_processing::compute_vertex_normal For a cube what you want is normals perpendicular to the faces
cgal has a functoin for that too CGAL::Polygon_mesh_processing::compute_face_normal
To debug the normals you can just set fragColor = vec4(norm,1); in mainmesh.frag. Here the cubes on the left have averaged (smooth) normals and on the right have face (flat) normals:And shaded they look like this:
shading has to work for any kind of mesh (a cube or any organic mesh)
For that you can use something like per_corner_normals whitch:
Implements a simple scheme which computes corner normals as averages
of normals of faces incident on the corresponding vertex which do not
deviate by more than a specified dihedral angle (e.g. 20°)
And this is what it looks like with a angle of 1°, 20°, 100°:

In your image, we can see that the inner triangle (the one that doesn't have point on cube edges, in top left quarter) has an homogeneous color.
My interpretation is that triangles that have points on the edge/corner of the cube share the same vertex and then share the same normal and some how the normal are averaged. So it's not perpendicular to the faces.
To debug this, you should create a simple geometry of a cube with 6 faces and 2 triangles per face. Hence it's make 12 triangles.
Two options:
If you have 8 vertex in the geometry, the corner are shared between triangles of different face and the issue came from the geometry generator.
If you have 6×4=24 vertex in the geometry the truth lies elsewhere.

Related

How do I align the raytraced spheres from my fragment shader with GL_POINTS?

I have a very simple shader program that takes in a bunch of position data as GL_POINTS that generate screen-aligned squares of fragments like normal with a size depending on depth, and then in the fragment shader I wanted to draw a very simple ray-traced sphere for each one with just the shadow that is on the sphere opposite to the light. I went to this shadertoy to try to figure it out on my own. I used the sphIntersect function for ray-sphere intersection, and sphNormal to get the normal vectors on the sphere for lighting. The problem is that the spheres do not align with the squares of fragments, causing them to be cut off. This is because I am not sure how to match the projections of the spheres and the vertex positions so that they line up. Can I have an explanation of how to do this?
Here is a picture for reference.
Here are my vertex and fragment shaders for reference:
//vertex shader:
#version 460
layout(location = 0) in vec4 position; // position of each point in space
layout(location = 1) in vec4 color; //color of each point in space
layout(location = 2) uniform mat4 view_matrix; // projection * camera matrix
layout(location = 6) uniform mat4 cam_matrix; //just the camera matrix
out vec4 col; // color of vertex
out vec4 posi; // position of vertex
void main() {
vec4 p = view_matrix * vec4(position.xyz, 1.0);
gl_PointSize = clamp(1024.0 * position.w / p.z, 0.0, 4000.0);
gl_Position = p;
col = color;
posi = cam_matrix * position;
}
//fragment shader:
#version 460
in vec4 col; // color of vertex associated with this fragment
in vec4 posi; // position of the vertex associated with this fragment relative to camera
out vec4 f_color;
layout (depth_less) out float gl_FragDepth;
float sphIntersect( in vec3 ro, in vec3 rd, in vec4 sph )
{
vec3 oc = ro - sph.xyz;
float b = dot( oc, rd );
float c = dot( oc, oc ) - sph.w*sph.w;
float h = b*b - c;
if( h<0.0 ) return -1.0;
return -b - sqrt( h );
}
vec3 sphNormal( in vec3 pos, in vec4 sph )
{
return normalize(pos-sph.xyz);
}
void main() {
vec4 c = clamp(col, 0.0, 1.0);
vec2 p = ((2.0*gl_FragCoord.xy)-vec2(1920.0, 1080.0)) / 2.0;
vec3 ro = vec3(0.0, 0.0, -960.0 );
vec3 rd = normalize(vec3(p.x, p.y,960.0));
vec3 lig = normalize(vec3(0.6,0.3,0.1));
vec4 k = vec4(posi.x, posi.y, -posi.z, 2.0*posi.w);
float t = sphIntersect(ro, rd, k);
vec3 ps = ro + (t * rd);
vec3 nor = sphNormal(ps, k);
if(t < 0.0) c = vec4(1.0);
else c.xyz *= clamp(dot(nor,lig), 0.0, 1.0);
f_color = c;
gl_FragDepth = t * 0.0001;
}
Looks like you have many spheres so I would do this:
Input data
I would have VBO containing x,y,z,r describing your spheres, You will also need your view transform (uniform) that can create ray direction and start position for each fragment. Something like my vertex shader in here:
Reflection and refraction impossible without recursive ray tracing?
Create BBOX in Geometry shader and convert your POINT to QUAD or POLYGON
note that you have to account for perspective. If you are not familiar with geometry shaders see:
rendring cubics in GLSL
Where I emmit sequence of OBB from input lines...
In fragment raytrace sphere
You have to compute intersection between sphere and ray, chose the closer intersection and compute its depth and normal (for lighting). In case of no intersection you have to discard; fragment !!!
From what I can see in your images Your QUADs does not correspond to your spheres hence the clipping and also you do not discard; fragments with no intersections so you overwrite with background color already rendered stuff around last rendered spheres so you have only single sphere left in QUAD regardless of how many spheres are really there ...
To create a ray direction that matches a perspective matrix from screen space, the following ray direction formula can be used:
vec3 rd = normalize(vec3(((2.0 / screenWidth) * gl_FragCoord.xy) - vec2(aspectRatio, 1.0), -proj_matrix[1][1]));
The value of 2.0 / screenWidth can be pre-computed or the opengl built-in uniform structs can be used.
To get a bounding box or other shape for your spheres, it is very important to use camera-facing shapes, and not camera-plane-facing shapes. Use the following process where position is the incoming VBO position data, and the w-component of position is the radius:
vec4 p = vec4((cam_matrix * vec4(position.xyz, 1.0)).xyz, position.w);
o.vpos = p;
float l2 = dot(p.xyz, p.xyz);
float r2 = p.w * p.w;
float k = 1.0 - (r2/l2);
float radius = p.w * sqrt(k);
if(l2 < r2) {
p = vec4(0.0, 0.0, -p.w * 0.49, p.w);
radius = p.w;
k = 0.0;
}
vec3 hx = radius * normalize(vec3(-p.z, 0.0, p.x));
vec3 hy = radius * normalize(vec3(-p.x * p.y, p.z * p.z + p.x * p.x, -p.z * p.y));
p.xyz *= k;
Then use hx and hy as basis vectors for any 2D shape that you want the billboard to be shaped like for the vertices. Don't forget later to multiply each vertex by a perspective matrix to get the final position of each vertex. Here is a visualization of the billboarding on desmos using a hexagon shape: https://www.desmos.com/calculator/yeeew6tqwx

How to generate camera rays for ray casting

I am trying to make a simple voxel engine with OpenGL and C++. My first step is to send out rays from the camera and detect if the ray intersected with something (for testing purposes its just two planes). I have got it working with without the camera rotating by creating a full screen quad and programming the fragment shader to send out a ray for every fragment (for now I'm just assuming a fragment is a pixel) which is in the direction texCoord.x, texCoord.y, -1. Now I am trying to implement camera rotation.
I have tried to generate a rotation matrix within the cpu and send that to the shader which will multiply it with every ray. However, when I rotate the camera, the planes start to stretch in a way which I can only describe with this video.
https://www.youtube.com/watch?v=6NScMwnPe8c
Here is the code that creates the matrix and is run every frame:
float pi = 3.141592;
// camRotX and Y are defined elsewhere and can be controlled from the keyboard during runtime.
glm::vec3 camEulerAngles = glm::vec3(camRotX, camRotY, 0);
std::cout << "X: " << camEulerAngles.x << " Y: " << camEulerAngles.y << "\n";
// Convert to radians
camEulerAngles.x = camEulerAngles.x * pi / 180;
camEulerAngles.y = camEulerAngles.y * pi / 180;
camEulerAngles.z = camEulerAngles.z * pi / 180;
// Generate Quaternian
glm::quat camRotation;
camRotation = glm::quat(camEulerAngles);
// Generate rotation matrix from quaternian
glm::mat4 camToWorldMatrix = glm::toMat4(camRotation);
// No transformation matrix is created because the rays should be relative to 0,0,0
// Send the rotation matrix to the shader
int camTransformMatrixID = glGetUniformLocation(shader, "cameraTransformationMatrix");
glUniformMatrix4fv(camTransformMatrixID, 1, GL_FALSE, glm::value_ptr(camToWorldMatrix));
And the fragment shader:
#version 330 core
in vec4 texCoord;
layout(location = 0) out vec4 color;
uniform vec3 cameraPosition;
uniform vec3 cameraTR;
uniform vec3 cameraTL;
uniform vec3 cameraBR;
uniform vec3 cameraBL;
uniform mat4 cameraTransformationMatrix;
uniform float fov;
uniform float aspectRatio;
float pi = 3.141592;
int RayHitCell(vec3 origin, vec3 direction, vec3 cellPosition, float cellSize)
{
if(direction.z != 0)
{
float multiplicationFactorFront = cellPosition.z - origin.z;
if(multiplicationFactorFront > 0){
vec2 interceptFront = vec2(direction.x * multiplicationFactorFront + origin.x,
direction.y * multiplicationFactorFront + origin.y);
if(interceptFront.x > cellPosition.x && interceptFront.x < cellPosition.x + cellSize &&
interceptFront.y > cellPosition.y && interceptFront.y < cellPosition.y + cellSize)
{
return 1;
}
}
float multiplicationFactorBack = cellPosition.z + cellSize - origin.z;
if(multiplicationFactorBack > 0){
vec2 interceptBack = vec2(direction.x * multiplicationFactorBack + origin.x,
direction.y * multiplicationFactorBack + origin.y);
if(interceptBack.x > cellPosition.x && interceptBack.x < cellPosition.x + cellSize &&
interceptBack.y > cellPosition.y && interceptBack.y < cellPosition.y + cellSize)
{
return 2;
}
}
}
return 0;
}
void main()
{
// For now I'm not accounting for FOV and aspect ratio because I want to get the rotation working first
vec4 beforeRotateRayDirection = vec4(texCoord.x,texCoord.y,-1,0);
// Apply the rotation matrix that was generated on the cpu
vec3 rayDirection = vec3(cameraTransformationMatrix * beforeRotateRayDirection);
int t = RayHitCell(cameraPosition, rayDirection, vec3(0,0,5), 1);
if(t == 1)
{
// Hit front plane
color = vec4(0, 0, 1, 0);
}else if(t == 2)
{
// Hit back plane
color = vec4(0, 0, 0.5, 0);
}else{
// background color
color = vec4(0, 1, 0, 0);
}
}
Okay. Its really hard to know what is wrong, I will try non-theless.
Here are few tips and notes:
1) You can debug directions by mapping them to RGB color. Keep in mind you should normalize the vectors and map from (-1,1) to (0,1). Just do the dir*0.5+1.0 type of thing. Example:
color = vec4(normalize(rayDirection) * 0.5, 0) + vec4(1);
2) You can get the rotation matrix in a more straight manner. Quaternion is initialized from an forward direction, it will first rotate around Y axis (horizontal look) then, and only then, around X axis (vertical look). Keep in mind that the rotations order is implementation dependent if you initialize from euler-angles. Use mat4_cast to avoid experimental glm extension (gtx) whenever possible. Example:
// Define rotation quaternion starting from look rotation
glm::quat camRotation = glm::vec3(0, 0, 0);
camRotation = glm::rotate(camRotation, glm::radians(camRotY), glm::vec3(0, 1, 0));
camRotation = glm::rotate(camRotation, glm::radians(camRotX), glm::vec3(1, 0, 0));
glm::mat4 camToWorldMatrix = glm::mat4_cast(camRotation);
3) Your beforeRotateRayDirection is a vector that (probably) points from (-1,-1,-1) all the way to (1,1,-1). Which is not normalized, the length of (1,1,1) is √3 ≈ 1.7320508075688772... Be sure you have taken that into account for your collision math or just normalize the vector.
My partial answer so far...
Your collision test is a bit weird... It appears you want to cast the ray into the Z plane for the given cell position (but twice, one for the front and one for the back). I have reviewed your code logic and it makes some sense, but without the vertex program, thus not knowing what the texCoord range values are, it is not possible to be sure. You might want to rethink your logic to something like this:
int RayHitCell(vec3 origin, vec3 direction, vec3 cellPosition, float cellSize)
{
//Get triangle side vectors
vec3 tu = vec3(cellSize,0,0); //Triangle U component
vec3 tv = vec3(0,cellSize,0); //Triangle V component
//Determinant for inverse matrix
vec3 q = cross(direction, tv);
float det = dot(tu, q);
//if(abs(det) < 0.0000001) //If too close to zero
// return;
float invdet = 1.0/det;
//Solve component parameters
vec3 s = origin - cellPosition;
float u = dot(s, q) * invdet;
if(u < 0.0 || u > 1.0)
return 0;
vec3 r = cross(s, tu);
float v = dot(direction, r) * invdet;
if(v < 0.0 || v > 1.0)
return 0;
float t = dot(tv, r) * invdet;
if(t <= 0.0)
return 0;
return 1;
}
void main()
{
// For now I'm not accounting for FOV and aspect ratio because I want to get the
// rotation working first
vec4 beforeRotateRayDirection = vec4(texCoord.x, texCoord.y, -1, 0);
// Apply the rotation matrix that was generated on the cpu
vec3 rayDirection = vec3(cameraTransformationMatrix * beforeRotateRayDirection);
int t = RayHitCell(cameraPosition, normalize(rayDirection), vec3(0,0,5), 1);
if (t == 1)
{
// Hit front plane
color = vec4(0, 0, 1, 0);
}
else
{
// background color
color = vec4(0, 1, 0, 0);
}
}
This should give you a plane, let me know if it works. A cube will be very easy to do.
PS.: u and v can be used for texture mapping.

X-Y-Distance from camera to object in vertex shader

I want to show some fog / aerial view in my application. But I only want to use the x,y world distance from camera to the model to determine the appearance.
I already managed to get the signed z-distance from camera to the models with this calculation.
The red objects have positive z distance to camera, the blue ones are negative in contrast to this implementation, where all values seem positive.
Vertex shader:
uniform mat4 u_mvp; // Model-View-Projection-Matrix
uniform mat4 u_mv; // Model-View-Matrix
uniform vec4 u_color; // Object color
attribute vec4 a_pos; // Vertex position
varying vec4 color; // Out color
// Fog
const float density = 0.007;
const float gradient = 1.5;
void main() {
gl_Position = u_mvp * a_pos;
// Fog
float distance = -(u_mv * a_pos).z; // Direct distance from camera
// 4000 is some invented constant to bring distance to ~[-1,1].
float visibility = clamp((distance / 4000.0), 0.0, 1.0);
color = mix(vec4(1.0, 0.0, 0.0, 1.0), u_color, visibility);
if(distance < 0){
color = vec4(0.0, 0.0, 1.0, 1.0);
}
}
Fragment shader:
varying vec4 color;
void main() {
gl_FragColor = color;
}
Why there can be a negative z-value? Or is it common?
How can I calculate the x,y world distance to camera?
If you want to get the distance to the camera, in the range [-1, 1], then you can use the clips pace coordinated. The clipspace coordinate can be transformed to a normalized device coordinate by Perspective divide. The normalized device coordinates (x, y and z) are in range [-1, 1] and can be transformed to the range [0, 1] with ease:
gl_Position = u_mvp * a_pos; // clip space
vec3 ndc = gl_Position.xyz / gl_Position.w; // NDC in [-1, 1] (by perspective divide)
float depth = ndc.z * 0.5 + 0.5; // depth in [0, 1]

GLSL calculating normal on a sphere mesh in vertex shader using noise function by sampling is producing strange graphical errors

I'm trying to calculate the normals on surface of a sphere in the vertex shader because I defer my noise calculation to the vertex shader. The normal results are good when my theta (sampling angle) is closer to 1 but for more detailed terrain & a smaller theta my normals become very incorrect. Here's what I mean:
Accurate normals
More detailed noise with a higher theta
Zoomed in onto surface of last image. Red indicates ridges, blue indicates incorrect shadows
The code I use to calculate normals is:
vec3 calcNormal(vec3 pos)
{
float theta = .1; //The closer this is to zero the less accurate it gets
vec3 vecTangent = normalize(cross(pos, vec3(1.0, 0.0, 0.0))
+ cross(pos, vec3(0.0, 1.0, 0.0)));
vec3 vecBitangent = normalize(cross(vecTangent, pos));
vec3 ptTangentSample = getPos(pos + theta * normalize(vecTangent));
vec3 ptBitangentSample = getPos(pos + theta * normalize(vecBitangent));
return normalize(cross(ptTangentSample - pos, ptBitangentSample - pos));
}
I call calcNormal with
calcNormal(getPos(position))
where getPos is the 3D noise function that takes and returns a vec3 and position is the original position on the sphere.
Thanks to #NicoSchertler the correct version of calcNormal is
vec3 calcNormal(vec3 pos)
{
float theta = .00001;
vec3 vecTangent = normalize(cross(pos, vec3(1.0, 0.0, 0.0))
+ cross(pos, vec3(0.0, 1.0, 0.0)));
vec3 vecBitangent = normalize(cross(vecTangent, pos));
vec3 ptTangentSample = getPos(normalize(pos + theta * normalize(vecTangent)));
vec3 ptBitangentSample = getPos(normalize(pos + theta * normalize(vecBitangent)));
return normalize(cross(ptTangentSample - pos, ptBitangentSample - pos));
}

What is wrong with my normalized orthographic coordinates in openGL?

I am trying to create a triangle in openGL that won't stretch when I resize the window. I pass this orthographic matrix to my vertex shader.
void resize(int w, int h) {
float orthMat[4][4];
orthMat[0][0] = 2.0/w;
orthMat[1][1] = -2.0/h;
orthMat[3][3] = 1.0;
orthMat[3][3] = 1.0;
orthMat[3][0] = -1.0;
orthMat[3][1] = -1.0;
orthMat[0][1] = 0.0;
orthMat[0][2] = 0.0;
orthMat[0][3] = 0.0;
orthMat[1][0] = 0.0;
orthMat[1][3] = 0.0;
orthMat[1][2] = 0.0;
orthMat[2][0] = 0.0;
orthMat[2][1] = 0.0;
orthMat[2][2] = 0.0;
orthMat[2][3] = 0.0;
orthMat[3][2] = 0.0;
glUniformMatrix4fv(uniformLocationIndex, 1, GL_TRUE, &orthMat[0][0]);
glViewport(0,0,w,h);
}
That uniformLocationIndex points to "projMat" in my vertex shader.
#version 330
in vec2 position;
in mat4 projMat;
void main() {
//vec4 contains normalized x and y coordinates
gl_Position = projMat * vec4(((-1.0) + position.x*(2.0/(2.0/projMat[0][0]))),
(1.0 - position.y*(2.0/(-2.0/projMat[1][1]))),
0.0, 1.0);
}
No matter where I put the points, they all go to the center and won't scale correctly when I resize the window. It is supposed keep the aspect ratio and scale down.
I did some math, and I think the problem is your shader calculation.
I created a projection Matrix with:
Left clipping plane = -1
Right clipping plane = 1
Top clipping plane = 1
Bottom clipping plane = -1
Near clipping plane = 0.001
Far clipping plane = 2
Comparing this matrix I created to yours, there are significant differences in rows 2 and 3, but since it looks like you don't use these, I ignored that.
I then did the math involved in your gl_Position calculation, and unless I got it wrong, I got:
Point (1, 1) transforms to (0, 0)
Point (1, -1) transforms to (0, 2) (outside the field of view)
Point (-1, 1) transforms to (-2, 0) (outside the field of view)
Point (-1, -1) transforms to (-2, 2) (outside the field of view)
I think you don't have rows 2 and 3 right in your matrix (0 and 1 look OK), but again since you don't use them you should be able to get by if in your shader you say something like:
vec4 Position = projMat * vec4(position, 0.0, 1.0);
gl_Position = vec4(Position.x, Position.y, 0.0, 1.0);