Calculating normals in a triangle mesh - c++

I have drawn a triangle mesh with 10000 vertices(100x100) and it will be a grass ground. I used gldrawelements() for it. I have looked all day and still can't understand how to calculate the normals for this. Does each vertex have its own normals or does each triangle have its own normals? Can someone point me in the right direction on how to edit my code to incorporate normals?
struct vertices {
GLfloat x;
GLfloat y;
GLfloat z;
}vertices[10000];
GLuint indices[60000];
/*
99..9999
98..9998
........
01..9901
00..9900
*/
void CreateEnvironment() {
int count=0;
for (float x=0;x<10.0;x+=.1) {
for (float z=0;z<10.0;z+=.1) {
vertices[count].x=x;
vertices[count].y=0;
vertices[count].z=z;
count++;
}
}
count=0;
for (GLuint a=0;a<99;a++){
for (GLuint b=0;b<99;b++){
GLuint v1=(a*100)+b;indices[count]=v1;count++;
GLuint v2=(a*100)+b+1;indices[count]=v2;count++;
GLuint v3=(a*100)+b+100;indices[count]=v3;count++;
}
}
count=30000;
for (GLuint a=0;a<99;a++){
for (GLuint b=0;b<99;b++){
indices[count]=(a*100)+b+100;count++;//9998
indices[count]=(a*100)+b+1;count++;//9899
indices[count]=(a*100)+b+101;count++;//9999
}
}
}
void ShowEnvironment(){
//ground
glPushMatrix();
GLfloat GroundAmbient[]={0.0,0.5,0.0,1.0};
glMaterialfv(GL_FRONT,GL_AMBIENT,GroundAmbient);
glEnableClientState(GL_VERTEX_ARRAY);
glIndexPointer( GL_UNSIGNED_BYTE, 0, indices );
glVertexPointer(3,GL_FLOAT,0,vertices);
glDrawElements(GL_TRIANGLES,60000,GL_UNSIGNED_INT,indices);
glDisableClientState(GL_VERTEX_ARRAY);
glPopMatrix();
}
EDIT 1
Here is the code I have written out. I just used arrays instead of vectors and I stored all of the normals in the struct called normals. It still doesn't work however. I get an unhandled exception at *indices.
struct Normals {
GLfloat x;
GLfloat y;
GLfloat z;
}normals[20000];
Normals* normal = normals;
//***************************************ENVIRONMENT*************************************************************************
struct vertices {
GLfloat x;
GLfloat y;
GLfloat z;
}vertices[10000];
GLuint indices[59403];
/*
99..9999
98..9998
........
01..9901
00..9900
*/
void CreateEnvironment() {
int count=0;
for (float x=0;x<10.0;x+=.1) {
for (float z=0;z<10.0;z+=.1) {
vertices[count].x=x;
vertices[count].y=rand()%2-2;;
vertices[count].z=z;
count++;
}
}
//calculate normals
GLfloat vector1[3];//XYZ
GLfloat vector2[3];//XYZ
count=0;
for (int x=0;x<9900;x+=100){
for (int z=0;z<99;z++){
vector1[0]= vertices[x+z].x-vertices[x+z+1].x;//vector1x
vector1[1]= vertices[x+z].y-vertices[x+z+1].y;//vector1y
vector1[2]= vertices[x+z].z-vertices[x+z+1].z;//vector1z
vector2[0]= vertices[x+z+1].x-vertices[x+z+100].x;//vector2x
vector2[1]= vertices[x+z+1].y-vertices[x+z+100].y;//vector2y
vector2[2]= vertices[x+z+1].z-vertices[x+z+100].z;//vector2z
normals[count].x= vector1[1] * vector2[2]-vector1[2]*vector2[1];
normals[count].y= vector1[2] * vector2[0] - vector1[0] * vector2[2];
normals[count].z= vector1[0] * vector2[1] - vector1[1] * vector2[0];count++;
}
}
count=10000;
for (int x=100;x<10000;x+=100){
for (int z=0;z<99;z++){
vector1[0]= vertices[x+z].x-vertices[x+z+1].x;//vector1x -- JUST ARRAYS
vector1[1]= vertices[x+z].y-vertices[x+z+1].y;//vector1y
vector1[2]= vertices[x+z].z-vertices[x+z+1].z;//vector1z
vector2[0]= vertices[x+z+1].x-vertices[x+z-100].x;//vector2x
vector2[1]= vertices[x+z+1].y-vertices[x+z-100].y;//vector2y
vector2[2]= vertices[x+z+1].z-vertices[x+z-100].z;//vector2z
normals[count].x= vector1[1] * vector2[2]-vector1[2]*vector2[1];
normals[count].y= vector1[2] * vector2[0] - vector1[0] * vector2[2];
normals[count].z= vector1[0] * vector2[1] - vector1[1] * vector2[0];count++;
}
}
count=0;
for (GLuint a=0;a<99;a++){
for (GLuint b=0;b<99;b++){
GLuint v1=(a*100)+b;indices[count]=v1;count++;
GLuint v2=(a*100)+b+1;indices[count]=v2;count++;
GLuint v3=(a*100)+b+100;indices[count]=v3;count++;
}
}
count=30000;
for (GLuint a=0;a<99;a++){
for (GLuint b=0;b<99;b++){
indices[count]=(a*100)+b+100;count++;//9998
indices[count]=(a*100)+b+1;count++;//9899
indices[count]=(a*100)+b+101;count++;//9999
}
}
}
void ShowEnvironment(){
//ground
glPushMatrix();
GLfloat GroundAmbient[]={0.0,0.5,0.0,1.0};
GLfloat GroundDiffuse[]={1.0,0.0,0.0,1.0};
glMaterialfv(GL_FRONT,GL_AMBIENT,GroundAmbient);
glMaterialfv(GL_FRONT,GL_DIFFUSE,GroundDiffuse);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glNormalPointer( GL_FLOAT, 0, normal);
glVertexPointer(3,GL_FLOAT,0,vertices);
glDrawElements(GL_TRIANGLES,60000,GL_UNSIGNED_INT,indices);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glPopMatrix();
}
//***************************************************************************************************************************

Does each vertex have its own normals or does each triangle have its own normals?
Like so often the answer is: "It depends". Since a normal is defined as being the vector perpendicular to all vectors within a given plane (in N dimensions), you need a plane to calculate a normal. A vertex position is just a point and thus singular, so you actually need a face to calculate the normal. Thus, naively, one could assume that normals are per face as the first step in normal calculation is determining the face normals, by evaluating the cross product of the faces edges.
Say you have a triangle with points A, B, C, then these points have position vectors ↑A, ↑B, ↑C and the edges have vectors ↑B - ↑A and ↑C - ↑A so the face normal vector is ↑Nf = (↑B - ↑A) × (↑C - ↑A)
Note that the magnitude of ↑Nf as it's stated above is directly proportional to the face's area.
In smooth surfaces vertices are shared between faces (or you could say those faces share a vertex). In that case the normal at the vertex is not one of the face normals of the faces it is part of, but a linear combination of them:
↑Nv = ∑ p ↑Nf ; where p is a weighting for each face.
One could either assume a equal weighting between the participating face normals. But it makes more sense to assume that the larger a face is, the more it contributes to the normal.
Now recall that you normalize by a vector ↑v by scaling it with it's recipocal length: ↑vi = ↑v/|↑v|. But as already told the length of the face normals already depends on the face's area. So the weighting factor p given above is already contained in the vector itself: Its length, aka magnitude. So we can get the vertex normal vector by simply summing up all the face normals.
In lighting calculations the normal vector must be unit length, i.e. normalized to be useable. So after summing up, we normalize the newly found vertex normal and use that.
The carefull reader may have noticed I specifically said smooth surfaces share vertices. And in fact, if you have some creases / hard edges in your geometry, then the faces on either side don't share vertices. In OpenGL a vertex is the whole combination of
position
normal
(colour)
N texture coordinates
M further attributes
You change one of these and you got a completely different vertex. Now some 3D modelers see a vertex only as a point's position and store the rest of those attributes per face (Blender is such a modeler). This saves some memory (or considerable memory, depending on the number of attributes). But OpenGL needs the whole thing, so if working with such a mixed paradigm file you will have to decompose it into OpenGL compatible data first. Have a look at one of Blender's export scripts, like the PLY exporter to see how it's done.
Now to cover some other thing. In your code you have this:
glIndexPointer( GL_UNSIGNED_BYTE, 0, indices );
The index pointer has nothing to do with vertex array indices! This is an anachronsim from the days, when graphics still used palettes instead of true color. A pixels colour wasn't set by giving it's RGB values, but by a single number offsetting into a limited palette of colours. Palette colours can still be found in several graphics file formats, but no decent piece of hardware uses them anymore.
Please erase glIndexPointer (and glIndex) from your memory and your code, they don't do what you think they do The whole indexed color mode is arcane to used, and frankly I don't know of any hardware built after 1998 that still supported it.

Thumbs up for datenwolf! I completely agree with his approach. Adding the normal vectors of the adjacent triangles for each vertex and then normalising is the way to go. I just want to push the answer a little bit and have a closer look at the particular but quite common case of a rectangular, smooth mesh that has a constant x/y step. In other words, a rectangular x/y grid with a variable height at each point.
Such a mesh is created by looping over x and y and setting a value for z and can represent things like the surface of a hill. So each point of the mesh is represented by a vector
P = (x, y, f(x,y))
where f(x,y) is a function giving the z of each point on the grid.
Usually to draw such a mesh we use a TriangleStrip or a TriangleFan but any technique should give a similar topography for the resulting triangles.
|/ |/ |/ |/
...--+----U----UR---+--...
/| /| 2 /| /| Y
/ | / | / | / | ^
| / | / | / | / |
|/ 1 |/ 3 |/ |/ |
...--L----P----R----+--... +-----> X
/| 6 /| 4 /| /|
/ | / | / | / |
| /5 | / | / | /
|/ |/ |/ |/
...--DL---D----+----+--...
/| /| /| /|
For a triangleStrip each vertex P=(x0, y0, z0) has 6 adjacent vertices denoted
up = (x0 , y0 + ay, Zup)
upright = (x0 + ax, y0 + ay, Zupright)
right = (x0 + ax, y0 , Zright)
down = (x0 , y0 - ay, Zdown)
downleft = (x0 - ax, y0 - ay, Zdownleft)
left = (x0 - ax, y0 , Zleft)
where ax/ay is the constant grid step on the x/y axis respectively. On a square grid ax = ay.
ax = width / (nColumns - 1)
ay = height / (nRows - 1)
Thus each vertex has 6 adjacent triangles each one with its own normal vector (denoted N1 to N6). These can be calculated using the cross product of the two vectors defining the side of the triangle and being careful on the order in which we do the cross product. If the normal vector points in the Z direction towards you :
N1 = up x left =
= (Yup*Zleft - Yleft*Zup, Xleft*Zup - Xup*ZLeft, Xleft*Yup - Yleft*Xup)
=( (y0 + ay)*Zleft - y0*Zup,
(x0 - ax)*Zup - x0*Zleft,
x0*y0 - (y0 + ay)*(x0 - ax) )
N2 = upright x up
N3 = right x upright
N4 = down x right
N5 = downleft x down
N6 = left x downleft
And the resulting normal vector for each point P is the sum of N1 to N6. We normalise after summing. It's very easy to create a loop, calculate the values of each normal vector, add them and then normalise. However, as pointed out by Mr. Shickadance, this can take quite a while, especially for large meshes and/or on embedded devices.
If we have a closer look and perform the calculations by hand, we will find out that most of the terms cancel out each other, leaving us with a very elegant and easy to calculate final solution for the resulting vector N. The point here is to speed up calculations by avoiding calculating the coordinates of N1 to N6, doing 6 cross-products and 6 additions for each point. Algebra helps us to jump straight to the solution, use less memory and less CPU time.
I will not show the details of the calculations as it is long but straight-forward and will jump to the final expression of the Normal vector for any point on the grid. Only N1 is decomposed for the sake of clarity, the other vectors look alike. After summing we obtain N which is not yet normalized :
N = N1 + N2 + ... + N6
= .... (long but easy algebra) ...
= ( (2*(Zleft - Zright) - Zupright + Zdownleft + Zup - Zdown) / ax,
(2*(Zdown - Zup) + Zupright + Zdownleft - Zup - Zleft) / ay,
6 )
There you go! Just normalise this vector and you have the normal vector for any point on the grid, provided you know the Z values of its surrounding points and the horizontal/vertical step of your grid.
Note that this is the weighed average of the surrounding triangles' normal vectors. The weight is the area of the triangles and is already included in the cross product.
You can even simplify it more by only taking into account the Z values of four surrounding points (up,down,left and right). In that case you get :
| \|/ |
N = N1 + N2 + N3 + N4 ..--+----U----+--..
= ( (Zleft - Zright) / ax, | /|\ |
(Zdown - Zup ) / ay, | / | \ |
2 ) \ | / 1|2 \ | /
\|/ | \|/
..--L----P----R--...
/|\ | /|\
/ | \ 4|3 / | \
| \ | / |
| \|/ |
..--+----D----+--..
| /|\ |
which is even more elegant and even faster to calculate.
Hope this will make some meshes faster.
Cheers

Per-vertex.
Use cross-products to calculate the face normals for the triangles surrounding a given vertex, add them together, and normalize.

As simple as it may seem, calculating the normal of a triangle is only part of the problem. The cross product of 2 sides of the polygon is sufficient in triangular cases, unless the triangle is collapsed onto itself and degenerate; in that case there is no one valid normal, so you can select one to your liking.
So why is the normalized cross product only part of the problem? The winding order of the vertices in that polygon defines the direction of the normal, i.e. if one pair of vertices is swapped in place, the normal will point in the opposite direction. So in fact this can be problematic if the mesh itself contains inconsistencies in that regard, i.e. parts of it assume one ordering, while other parts assume different orderings. One famous example is the original Stanford Bunny model, where some parts of the surface will point inwards, while others point outwards. The reason for that is because the model was constructed using a scanner, and no care was taken to produce triangles with regular winding patterns. (obviously, clean versions of the bunny also exist)
The winding problem is even more prominent if polygons can have multiple vertices, because in that case you would be averaging partial normals of the semi-triangulation of that polygon. Consider the case where partial normals are pointing in opposite directions, resulting in normal vectors of length 0 when taking the mean!
In the same sense, disconnected polygon soups and point clouds present challenges for accurate reconstruction due to the ill-defined winding number.
One potential strategy that is often used to solve this problem is to shoot random rays from outward to the center of each semi-triangulation (i.e. ray-stabbing). But one cannot assume that the triangulation is valid if polygons can contain multiple vertices, so rays may miss that particular sub-triangle. If a ray hits, then normal opposite to the ray direction, i.e. with dot(ray, n) < .5 satisfied, can be used as the normal for the entire polygon. Obviously this is rather expensive and scales with the number of vertices per polygon.
Thankfully, there's great new work that describes an alternative method that is not only faster (for large and complex meshes) but also generalizes the 'winding order' concept for constructions beyond polygon meshes, such as point clouds and polygon soups, iso-surfaces, and point-set surfaces, where connectivity may not even be defined!
As outlined in the paper, the method constructs a hierarchical splitting tree representation that is refined progressively, taking the parent 'dipole' orientation into account at every split operation. A polygon normal would then simply be an integration (mean) over all di-poles (i.e. point+normal pairs) of the polygon.
For people who are dealing with unclean mesh/pcl data from Lidar scanners or other sources, this could def. be a game-changer.

For those like me who came across this question, your answer might be this :
// Compute Vertex Normals
std::vector<sf::Glsl::Vec3> verticesNormal;
verticesNormal.resize(verticesCount);
for (i = 0; i < indices.size(); i += 3)
{
// Get the face normal
auto vector1 = verticesPos[indices[(size_t)i + 1]] - verticesPos[indices[i]];
auto vector2 = verticesPos[indices[(size_t)i + 2]] - verticesPos[indices[i]];
auto faceNormal = sf::VectorCross(vector1, vector2);
sf::Normalize(faceNormal);
// Add the face normal to the 3 vertices normal touching this face
verticesNormal[indices[i]] += faceNormal;
verticesNormal[indices[(size_t)i + 1]] += faceNormal;
verticesNormal[indices[(size_t)i + 2]] += faceNormal;
}
// Normalize vertices normal
for (i = 0; i < verticesNormal.size(); i++)
sf::Normalize(verticesNormal[i]);

The easy way is to translate one of the triangles (p1,p2,p3) points (say p1) to (0,0,0) so that means (x2,y2,z2)->(x2-x1,y2-y1,z2-z1) and (x3,y3,z3)->(x3-x1,y3-y1,z3-z1). Then you perform a dot product on the transformed points to obtain the planar slope, or cross-product to obtain the outward normal.
See:
https://en.wikipedia.org/wiki/Cross_product#/media/File:Cross_product_vector.svg
for a simple visual representation of the difference between cross product and dot product.
The moving of one of the points to the origin is basically equivalent to generating vectors along p1p2 and p2p3.

Related

Clipping triangles in screen space per pixel

As I understand it, in OpenGL polygons are usually clipped in clip space and only those triangles (or parts of the triangles if the clipping process splits them) that survive the comparison with +- w. This then requires implementation of a polygon clipping algorithm such as Sutherland-Hodgman.
I am implementing my own CPU rasterizer and for now would like to avoid doing that. I have the NDC coordinates of vertices available (not really normalized since I did not clip anything so the positions may not be in range [-1, 1]). I would like to interpolate these values for all pixels and only draw pixels the NDC coordinates of which fall within [-1, 1] in the x, y and z dimensions. I would then additionally perform the depth test.
Would this work? If yes what would the interpolation look like? Can I use the OpenGl spec (page 427 14.9) formula for attribute interpolation as described here? Alternatively, should I use the formula 14.10 which is used for depth (z) interpolation for all 3 coordinates (I don't really understand why a different one is used there)?
Update:
I have tried interpolating the NDC values per pixel by two methods:
w0, w1, w2 are the barycentric weights of the vertices.
1) float x_ndc = w0 * v0_NDC.x + w1 * v1_NDC.x + w2 * v2_NDC.x;
float y_ndc = w0 * v0_NDC.y + w1 * v1_NDC.y + w2 * v2_NDC.y;
float z_ndc = w0 * v0_NDC.z + w1 * v1_NDC.z + w2 * v2_NDC.z;
2)
float x_ndc = (w0*v0_NDC.x/v0_NDC.w + w1*v1_NDC.x/v1_NDC.w + w2*v2_NDC.x/v2_NDC.w) /
(w0/v0_NDC.w + w1/v1_NDC.w + w2/v2_NDC.w);
float y_ndc = (w0*v0_NDC.y/v0_NDC.w + w1*v1_NDC.y/v1_NDC.w + w2*v2_NDC.y/v2_NDC.w) /
(w0/v0_NDC.w + w1/w1_NDC.w + w2/v2_NDC.w);
float z_ndc = w0 * v0_NDC.z + w1 * v1_NDC.z + w2 * v2_NDC.z;
The clipping + depth test always looks like this:
if (-1.0f < z_ndc && z_ndc < 1.0f && z_ndc < currentDepth &&
1.0f < y_ndc && y_ndc < 1.0f &&
-1.0f < x_ndc && x_ndc < 1.0f)
Case 1) corresponds to using equation 14.10 for their interpolation. Case 2) corresponds to using equation 14.9 for interpolation.
Results documented in gifs on imgur.
1) Strange things happen when the second cube is behind the camera or when I go into a cube.
2) Strange artifacts are not visible but as the camera approaches vertices, they start disappearing. And since this is the perspective correct interpolation of attributes vertices (nearer to the camera?) have greater weight so as soon as a vertex gets clipped this information is interpolated with strong weight to the triangle pixels.
Is all of this expected or have I done something wrong?
Clipping against the near plane is not strictly necessary, unless the triangle goes to or past 0 in the camera-space Z. Once that happens, the homogeneous coordinate math gets weird.
Most hardware only bothers to clip triangles if they extend more than a screen's width outside the clip space or if they cross the camera-Z of zero. This kind of clipping is called "guard-band clipping", and it saves a lot of performance, since clipping isn't cheap.
So yes, the math can work fine. The main thing you have to do, when setting up your scan lines, is figure out where each of them start/end on screen. The interpolation math is the same either way.
I don't see any reason why this wouldn't work. But it will be ways slower than traditional clipping. Note, that you might get into trouble with triangles close to the projection center since they will be vanishingly small and might cause problems in the barycentric coordinate calculation.
The difference between equation 14.9 and 14.10 is, that depth is basically z/w (and remapped to [0, 1]). Since the perspective divide has already happened, it has to be left away during interpolation.

Algorithm for coloring a triangle by vertex color

I'm working on a toy raytracer using vertex based triangles, similar to OpenGL. Each vertex has its own color and the coloring of a triangle at each point should be based on a weighted average of the colors of the vertex, weighted by how close the point is to each vertex.
I can't figure out how to calculate the weight of each color at a given point on the triangle to mimic the color shading done by OpenGL, as shown by many examples here. I have several thoughts, but I'm not sure which one is correct (V is a vertex, U and W are the other two vertices, P is the point to color, C is the centroid of the triangle, and |PQ| is the distance form point P to point Q):
Have weight equal to `1-(|VP|/|VC|), but this would leave black at the centroid (all colors are weighted 0), which is not correct.
Weight is equal to 1-(|VP|/max(|VU|,|VW|)), so V has non-zero weight at the closer of the two vertices, which I don't think is correct.
Weight is equal to 1-(|VP|/min(|VU|,|VW|)), so V has zero weight at the closer of the two vertices, and negative weight (which would saturate to 0) at the further of the two. I'm not sure if this is right or not.
Line segment L extends from V through P to the opposite side of the triangle (UW): weight is the ratio of |VP| to |L|. So the weight of V would be 0 all along the opposite side.
The last one seems like the most likely, but I'm having trouble implementing it so I'm not sure if its correct.
OpenGL uses Barycentric coordinates (linear interpolation precisely although you can change that using interpolation functions or qualifiers such as centroid or noperspective in latest versions).
In case you don't know, barycentric coordinates works like that:
For a location P in a triangle made of vertices V1, V2 and V3 whose respective coefficients are C1, C2, C3 such as C1+C2+C3=1 (those coefficients refers to the influence of each vertex in the color of P) OpenGL must calculate those such as the result is equivalent to
C1 = (AreaOfTriangle PV2V3) / (AreaOfTriangle V1V2V3)
C2 = (AreaOfTriangle PV3V1) / (AreaOfTriangle V1V2V3)
C3 = (AreaOfTriangle PV1V2) / (AreaOfTriangle V1V2V3)
and the area of a triangle can be calculated with half the length of the cross product of two vector defining it (in direct sens) for example AreaOfTriangle V1V2V3 = length(cross(V2-V1, V3-V1)) / 2 We then have something like:
float areaOfTriangle = length(cross(V2-V1, V3-V1)); //Two times the area of the triangle
float C1 = length(cross(V2-P, V3-P)) / areaOfTriangle; //Because A1*2/A*2 = A1/A
float C2 = length(cross(V3-P, V1-P)) / areaOfTriangle; //Because A2*2/A*2 = A2/A
float C3 = 1.0f - C1 - C2; //Because C1 + C2 + C3 = 1
But after some math (and little bit of web research :D), the most efficient way of doing this I found was:
YOURVECTYPE sideVec1 = V2 - V1, sideVec2 = V3 - V1, sideVec3 = P - V1;
float dot11 = dot(sideVec1, sideVec1);
float dot12 = dot(sideVec1, sideVec2);
float dot22 = dot(sideVec2, sideVec2);
float dot31 = dot(sideVec3, sideVec1);
float dot32 = dot(sideVec3, sideVec2);
float denom = dot11 * dot22 - dot12 * dot12;
float C1 = (dot22 * dot31 - dot12 * dot32) / denom;
float C2 = (dot11 * dot32 - dot12 * dot31) / denom;
float C3 = 1.0f - C1 - C2;
Then, to interpolate things like colors, color1, color2 and color3 being the colors of your vertices, you do:
float color = C1*color1 + C2*color2 + C3*color3;
But beware that this doesn't work properly if you're using perspective transformations (or any transformation of vertices implying the w component) so in this case, you'll have to use:
float color = (C1*color1/w1 + C2*color2/w2 + C3*color3/w3)/(C1/w1 + C2/w2 + C3/w3);
w1, w2, and w3 are respectively the fourth components of the original vertices that made V1, V2 and V3.
V1, V2 and V3 in the first calculation must be 3 dimensional because of the cross product but in the second one (the most efficient), it can be 2 dimensional as well as 3 dimensional, the results will be the same (I think you guessed that 2D was faster in the second calculation) but in both case, don't forget to divide them by the fourth component of their original vector if you're doing perspective transformations and to use the second formula for interpolation in that case. (And in case you didn't understand, all vectors in those calculations should NOT include a fourth component!)
And one last thing; I strongly advise you to use OpenGL just by rendering a big quad on the screen and putting all your code in the shaders (Although you'll need very strong knowledge about OpenGL for advanced use) because you'll benefit from parallelism (even from a s#!+ video card) except if you're writing that on a 30years-old computer or if you're just doing that to see how it works.
IIRC, for this you don't really need to do anything in GLSL -- the interpolated color will already be the input color to your fragment shader if you just pass on the vertex color in the vertex shader.
Edit: Yes, this doesnt answer the question -- the correct answer is in the first comment above already: Use Barycentric coordinates (which is what GL does).

How to add variability to my 3d curve - spherical coords in openGL / C++

Using spherical coordinates, I am drawing an arc around the surface of a sphere. Here is my code:
int goose1a_egg1_step = 0; // THIS IS INCREMENTED EACH FRAME
float goose1a_egg1_theta=7.5; // START THETA
float goose1a_egg1_phi=4; // START PHI
float goose1a_egg1_theta_increment = 1.5/goose1a_egg1_divider; // END THETA = 6
float goose1a_egg1_phi_increment = 3/goose1a_egg1_divider; // END PHI = 1
float goose1a_egg1_theta_math1 = (goose1a_egg1_theta-(goose1a_egg1_theta_increment* r_goose1a_egg1_step))/10.0*M_PI;
float goose1a_egg1_phi_math1 = (goose1a_egg1_phi-(goose1a_egg1_phi_increment* r_goose1a_egg1_step))/10.0*2*M_PI;
r_goose1a_egg1_x = radius_egg_pos * sin(goose1a_egg1_theta_math1) * cos(goose1a_egg1_phi_math1);
r_goose1a_egg1_y = radius_egg_pos * sin(goose1a_egg1_theta_math1) * sin(goose1a_egg1_phi_math1);
r_goose1a_egg1_z = radius_egg_pos * cos(goose1a_egg1_theta_math1);
glPushMatrix();
glTranslatef(r_goose1a_egg1_x,r_goose1a_egg1_y,r_goose1a_egg1_z);
glColor3f (1, 1, .8);
glutSolidSphere (0.02,5,5);
glEnd();
glPopMatrix();
I would like to draw an additional arc, that has the same START and END positions. The catch is, rather than having the second arc follow the exact same trajectory as the first, can I tune my math so that there is variability in the arc? For instance, the second arc would travel a slightly divergent path from the first. Like two flights leaving DFW for JFK, but taking slightly different routes.
The code for the second arc is as follows (note: the only difference between the two is "goose1a" and "goose1b" - I don't want to add a bunch of cruft here in which I was randomly multiplying random variables by random integers :/ )
int goose1b_egg1_step = 0; // THIS IS INCREMENTED EACH FRAME
float goose1b_egg1_theta=7.5; // START THETA
float goose1b_egg1_phi=4; // START PHI
float goose1b_egg1_theta_increment = 1.5/goose1b_egg1_divider; // END THETA = 6
float goose1b_egg1_phi_increment = 3/goose1b_egg1_divider; // END PHI = 1
float goose1b_egg1_theta_math1 = (goose1b_egg1_theta-(goose1b_egg1_theta_increment* r_goose1b_egg1_step))/10.0*M_PI;
float goose1b_egg1_phi_math1 = (goose1b_egg1_phi-(goose1b_egg1_phi_increment* r_goose1b_egg1_step))/10.0*2*M_PI;
r_goose1b_egg1_x = radius_egg_pos * sin(goose1b_egg1_theta_math1) * cos(goose1b_egg1_phi_math1);
r_goose1b_egg1_y = radius_egg_pos * sin(goose1b_egg1_theta_math1) * sin(goose1b_egg1_phi_math1);
r_goose1b_egg1_z = radius_egg_pos * cos(goose1b_egg1_theta_math1);
glPushMatrix();
glTranslatef(r_goose1b_egg1_x,r_goose1b_egg1_y,r_goose1b_egg1_z);
glColor3f (1, 1, .8);
glutSolidSphere (0.02,5,5);
glEnd();
glPopMatrix();
Because my understanding of 3D math is so poor, I'm not even sure if there's a way to do this? And if not, I'll look for an alternative solution to my design. But if it is possible to draw two different arcs that employ the same START and END positions using spherical coordinates, your advice and guidance will be greatly appreciated.
You could try computing a B-spline in spherical co-ordinates. You would position two knots at each end-point and have one knot in the middle that you could push around (perpendicular to the shortest arc) to adjust the variability.
I believe you are computing the shortest arc from A to B, and unless the points are exactly opposite, there is only one shortest arc. So you need a curve in spherical space instead of a line.
Taking a different approach, note that a direct path joining two points on the surface of a sphere is the intersection between a sphere and a plane passing through those two points. If you can rotate that plane around the axis formed between those two points, the intersection will give you alternative arcs.
The trick is to calculate them. Read up on plane-sphere intersection. The formula ought to be an elliptical section so it might not be too bad. Then you don't need spherical co-ordinates.

Generating a normal map from a height map?

I'm working on procedurally generating patches of dirt using randomized fractals for a video game. I've already generated a height map using the midpoint displacement algorithm and saved it to a texture. I have some ideas for how to turn that into a texture of normals, but some feedback would be much appreciated.
My height texture is currently a 257 x 257 gray-scale image (height values are scaled for visibility purposes):
My thinking is that each pixel of the image represents a lattice coordinate in a 256 x 256 grid (hence, why there are 257 x 257 heights). That would mean that the normal at coordinate (i, j) is determined by the heights at (i, j), (i, j + 1), (i + 1, j), and (i + 1, j + 1) (call those A, B, C, and D, respectively).
So given the 3D coordinates of A, B, C, and D, would it make sense to:
split the four into two triangles: ABC and BCD
calculate the normals of those two faces via cross product
split into two triangles: ACD and ABD
calculate the normals of those two faces
average the four normals
...or is there a much easier method that I'm missing?
Example GLSL code from my water surface rendering shader:
#version 130
uniform sampler2D unit_wave
noperspective in vec2 tex_coord;
const vec2 size = vec2(2.0,0.0);
const ivec3 off = ivec3(-1,0,1);
vec4 wave = texture(unit_wave, tex_coord);
float s11 = wave.x;
float s01 = textureOffset(unit_wave, tex_coord, off.xy).x;
float s21 = textureOffset(unit_wave, tex_coord, off.zy).x;
float s10 = textureOffset(unit_wave, tex_coord, off.yx).x;
float s12 = textureOffset(unit_wave, tex_coord, off.yz).x;
vec3 va = normalize(vec3(size.xy,s21-s01));
vec3 vb = normalize(vec3(size.yx,s12-s10));
vec4 bump = vec4( cross(va,vb), s11 );
The result is a bump vector: xyz=normal, a=height
My thinking is that each pixel of the image represents a lattice coordinate in a 256 x 256 grid (hence, why there are 257 x 257 heights). That would mean that the normal at coordinate (i, j) is determined by the heights at (i, j), (i, j + 1), (i + 1, j), and (i + 1, j + 1) (call those A, B, C, and D, respectively).
No. Each pixel of the image represents a vertex of the grid, so intuitively, from symmetry, its normal is determined by heights of neighboring pixels (i-1,j), (i+1,j), (i,j-1), (i,j+1).
Given a function f : ℝ2 → ℝ that describes a surface in ℝ3, a unit normal at (x,y) is given by
v = (−∂f/∂x, −∂f/∂y, 1) and n = v/|v|.
It can be proven that the best approximation to ∂f/∂x by two samples is archived by:
∂f/∂x(x,y) = (f(x+ε,y) − f(x−ε,y))/(2ε)
To get a better approximation you need to use at least four points, thus adding a third point (i.e. (x,y)) doesn't improve the result.
Your hightmap is a sampling of some function f on a regular grid. Taking ε=1 you get:
2v = (f(x−1,y) − f(x+1,y), f(x,y−1) − f(x,y+1), 2)
Putting it into code would look like:
// sample the height map:
float fx0 = f(x-1,y), fx1 = f(x+1,y);
float fy0 = f(x,y-1), fy1 = f(x,y+1);
// the spacing of the grid in same units as the height map
float eps = ... ;
// plug into the formulae above:
vec3 n = normalize(vec3((fx0 - fx1)/(2*eps), (fy0 - fy1)/(2*eps), 1));
A common method is using a Sobel filter for a weighted/smooth derivative in each direction.
Start by sampling a 3x3 area of heights around each texel (here, [4] is the pixel we want the normal for).
[6][7][8]
[3][4][5]
[0][1][2]
Then,
//float s[9] contains above samples
vec3 n;
n.x = scale * -(s[2]-s[0]+2*(s[5]-s[3])+s[8]-s[6]);
n.y = scale * -(s[6]-s[0]+2*(s[7]-s[1])+s[8]-s[2]);
n.z = 1.0;
n = normalize(n);
Where scale can be adjusted to match the heightmap real world depth relative to its size.
If you think of each pixel as a vertex rather than a face, you can generate a simple triangular mesh.
+--+--+
|\ |\ |
| \| \|
+--+--+
|\ |\ |
| \| \|
+--+--+
Each vertex has an x and y coordinate corresponding to the x and y of the pixel in the map. The z coordinate is based on the value in the map at that location. Triangles can be generated explicitly or implicitly by their position in the grid.
What you need is the normal at each vertex.
A vertex normal can be computed by taking an area-weighted average of the surface normals for each of the triangles that meet at that point.
If you have a triangle with vertices v0, v1, v2, then you can use a vector cross product (of two vectors that lie on two of the sides of the triangle) to compute a vector in the direction of the normal and scaled proportionally to the area of the triangle.
Vector3 contribution = Cross(v1 - v0, v2 - v1);
Each of your vertices that aren't on the edge will be shared by six triangles. You can loop through those triangles, summing up the contributions, and then normalize the vector sum.
Note: You have to compute the cross products in a consistent way to make sure the normals are all pointing in the same direction. Always pick two sides in the same order (clockwise or counterclockwise). If you mix some of them up, those contributions will be pointing in the opposite direction.
For vertices on the edge, you end up with a shorter loop and a lot of special cases. It's probably easier to create a border around your grid of fake vertices and then compute the normals for the interior ones and discard the fake borders.
for each interior vertex V {
Vector3 sum(0.0, 0.0, 0.0);
for each of the six triangles T that share V {
const Vector3 side1 = T.v1 - T.v0;
const Vector3 side2 = T.v2 - T.v1;
const Vector3 contribution = Cross(side1, side2);
sum += contribution;
}
sum.Normalize();
V.normal = sum;
}
If you need the normal at a particular point on a triangle (other than one of the vertices), you can interpolate by weighing the normals of the three vertices by the barycentric coordinates of your point. This is how graphics rasterizers treat the normal for shading. It allows a triangle mesh to appear like smooth, curved surface rather than a bunch of adjacent flat triangles.
Tip: For your first test, use a perfectly flat grid and make sure all of the computed normals are pointing straight up.

Perspective correct texture mapping; z distance calculation might be wrong

I'm making a software rasterizer, and I've run into a bit of a snag: I can't seem to get perspective-correct texture mapping to work.
My algorithm is to first sort the coordinates to plot by y. This returns a highest, lowest and center point. I then walk across the scanlines using the delta's:
// ordering by y is put here
order[0] = &a_Triangle.p[v_order[0]];
order[1] = &a_Triangle.p[v_order[1]];
order[2] = &a_Triangle.p[v_order[2]];
float height1, height2, height3;
height1 = (float)((int)(order[2]->y + 1) - (int)(order[0]->y));
height2 = (float)((int)(order[1]->y + 1) - (int)(order[0]->y));
height3 = (float)((int)(order[2]->y + 1) - (int)(order[1]->y));
// x
float x_start, x_end;
float x[3];
float x_delta[3];
x_delta[0] = (order[2]->x - order[0]->x) / height1;
x_delta[1] = (order[1]->x - order[0]->x) / height2;
x_delta[2] = (order[2]->x - order[1]->x) / height3;
x[0] = order[0]->x;
x[1] = order[0]->x;
x[2] = order[1]->x;
And then we render from order[0]->y to order[2]->y, increasing the x_start and x_end by a delta. When rendering the top part, the delta's are x_delta[0] and x_delta[1]. When rendering the bottom part, the delta's are x_delta[0] and x_delta[2]. Then we linearly interpolate between x_start and x_end on our scanline. UV coordinates are interpolated in the same way, ordered by y, starting at begin and end, to which delta's are applied each step.
This works fine except when I try to do perspective correct UV mapping. The basic algorithm is to take UV/z and 1/z for each vertex and interpolate between them. For each pixel, the UV coordinate becomes UV_current * z_current. However, this is the result:
The inversed part tells you where the delta's are flipped. As you can see, the two triangles both seem to be going towards different points in the horizon.
Here's what I use to calculate the Z at a point in space:
float GetZToPoint(Vec3 a_Point)
{
Vec3 projected = m_Rotation * (a_Point - m_Position);
// #define FOV_ANGLE 60.f
// static const float FOCAL_LENGTH = 1 / tanf(_RadToDeg(FOV_ANGLE) / 2);
// static const float DEPTH = HALFHEIGHT * FOCAL_LENGTH;
float zcamera = DEPTH / projected.z;
return zcamera;
}
Am I right, is it a z buffer issue?
ZBuffer has nothing to do with it.
THe ZBuffer is only useful when triangles are overlapping and you want to make sure that they are drawn correctly (e.g. correctly ordered in the Z). The ZBuffer will, for every pixel of the triangle, determine if a previously placed pixel is nearer to the camera, and if so, not draw the pixel of your triangle.
Since you are drawing 2 triangles which don't overlap, this can not be the issue.
I've made a software rasterizer in fixed point once (for a mobile phone), but I don't have the sources on my laptop. So let me check tonight, how I did it. In essence what you've got is not bad! A thing like this could be caused by a very small error
General tips in debugging this is to have a few test triangles (slope left-side, slope right-side, 90 degree angles, etc etc) and step through it with the debugger and see how your logic deals with the cases.
EDIT:
peudocode of my rasterizer (only U, V and Z are taken into account... if you also want to do gouraud you also have to do everything for R G and B similar as to what you are doing for U and V and Z:
The idea is that a triangle can be broken down in 2 parts. The top part and the bottom part. The top is from y[0] to y[1] and the bottom part is from y[1] to y[2]. For both sets you need to calculate the step variables with which you are interpolating. The below example shows you how to do the top part. If needed I can supply the bottom part too.
Please note that I do already calculate the needed interpolation offsets for the bottom part in the below 'pseudocode' fragment
first order the coords(x,y,z,u,v) in the order so that coord[0].y < coord[1].y < coord[2].y
next check if any 2 sets of coordinates are identical (only check x and y). If so don't draw
exception: does the triangle have a flat top? if so, the first slope will be infinite
exception2: does the triangle have a flat bottom (yes triangles can have these too ;^) ) then the last slope too will be infinite
calculate 2 slopes (left side and right side)
leftDeltaX = (x[1] - x[0]) / (y[1]-y[0]) and rightDeltaX = (x[2] - x[0]) / (y[2]-y[0])
the second part of the triangle is calculated dependent on: if the left side of the triangle is now really on the leftside (or needs swapping)
code fragment:
if (leftDeltaX < rightDeltaX)
{
leftDeltaX2 = (x[2]-x[1]) / (y[2]-y[1])
rightDeltaX2 = rightDeltaX
leftDeltaU = (u[1]-u[0]) / (y[1]-y[0]) //for texture mapping
leftDeltaU2 = (u[2]-u[1]) / (y[2]-y[1])
leftDeltaV = (v[1]-v[0]) / (y[1]-y[0]) //for texture mapping
leftDeltaV2 = (v[2]-v[1]) / (y[2]-y[1])
leftDeltaZ = (z[1]-z[0]) / (y[1]-y[0]) //for texture mapping
leftDeltaZ2 = (z[2]-z[1]) / (y[2]-y[1])
}
else
{
swap(leftDeltaX, rightDeltaX);
leftDeltaX2 = leftDeltaX;
rightDeltaX2 = (x[2]-x[1]) / (y[2]-y[1])
leftDeltaU = (u[2]-u[0]) / (y[2]-y[0]) //for texture mapping
leftDeltaU2 = leftDeltaU
leftDeltaV = (v[2]-v[0]) / (y[2]-y[0]) //for texture mapping
leftDeltaV2 = leftDeltaV
leftDeltaZ = (z[2]-z[0]) / (y[2]-y[0]) //for texture mapping
leftDeltaZ2 = leftDeltaZ
}
set the currentLeftX and currentRightX both on x[0]
set currentLeftU on leftDeltaU, currentLeftV on leftDeltaV and currentLeftZ on leftDeltaZ
calc start and endpoint for first Y range: startY = ceil(y[0]); endY = ceil(y[1])
prestep x,u,v and z for the fractional part of y for subpixel accuracy (I guess this is also needed for floats)
For my fixedpoint algorithms this was needed to make the lines and textures give the illusion of moving in much finer steps then the resolution of the display)
calculate where x should be at y[1]: halfwayX = (x[2]-x[0]) * (y[1]-y[0]) / (y[2]-y[0]) + x[0]
and same for U and V and z: halfwayU = (u[2]-u[0]) * (y[1]-y[0]) / (y[2]-y[0]) + u[0]
and using the halfwayX calculate the stepper for the U and V and z:
if(halfwayX - x[1] == 0){ slopeU=0, slopeV=0, slopeZ=0 } else { slopeU = (halfwayU - U[1]) / (halfwayX - x[1])} //(and same for v and z)
do clipping for the Y top (so calculate where we are going to start to draw in case the top of the triangle is off screen (or off the clipping rectangle))
for y=startY; y < endY; y++)
{
is Y past bottom of screen? stop rendering!
calc startX and endX for the first horizontal line
leftCurX = ceil(startx); leftCurY = ceil(endy);
clip the line to be drawn to the left horizontal border of the screen (or clipping region)
prepare a pointer to the destination buffer (doing it through array indexes everytime is too slow)
unsigned int buf = destbuf + (ypitch) + startX; (unsigned int in case you are doing 24bit or 32 bits rendering)
also prepare your ZBuffer pointer here (if you are using this)
for(x=startX; x < endX; x++)
{
now for perspective texture mapping (using no bilineair interpolation you do the following):
code fragment:
float tv = startV / startZ
float tu = startU / startZ;
tv %= texturePitch; //make sure the texture coordinates stay on the texture if they are too wide/high
tu %= texturePitch; //I'm assuming square textures here. With fixed point you could have used &=
unsigned int *textPtr = textureBuf+tu + (tv*texturePitch); //in case of fixedpoints one could have shifted the tv. Now we have to multiply everytime.
int destColTm = *(textPtr); //this is the color (if we only use texture mapping) we'll be needing for the pixel
dummy line
dummy line
dummy line
optional: check the zbuffer if the previously plotted pixel at this coordinate is higher or lower then ours.
plot the pixel
startZ += slopeZ; startU+=slopeU; startV += slopeV; //update all interpolators
} end of x loop
leftCurX+= leftDeltaX; rightCurX += rightDeltaX; leftCurU+= rightDeltaU; leftCurV += rightDeltaV; leftCurZ += rightDeltaZ; //update Y interpolators
} end of y loop
//this is the end of the first part. We now have drawn half the triangle. from the top, to the middle Y coordinate.
// we now basically do the exact same thing but now for the bottom half of the triangle (using the other set of interpolators)
sorry about the 'dummy lines'.. they were needed to get the markdown codes in sync. (took me a while to get everything sort off looking as intended)
let me know if this helps you solve the problem you are facing!
I don't know that I can help with your question, but one of the best books on software rendering that I had read at the time is available online Graphics Programming Black Book by Michael Abrash.
If you are interpolating 1/z, you need to multiply UV/z by z, not 1/z. Assuming you have this:
UV = UV_current * z_current
and z_current is interpolating 1/z, you should change it to:
UV = UV_current / z_current
And then you might want to rename z_current to something like one_over_z_current.