How to model ellipsoid in opengl using polygons - c++

I want to model ellipsoid using triangles and subdivision.
below code, referenced from OpenGL Programming guide, models sphere but I don't know how I can modify this to model ellipsoid
#define X .525731112119133606
#define Z .850650808352039932
static GLfloat vdata[12][3] = {
  { -X, 0.0, Z }, { X, 0.0, Z }, { -X, 0.0, -Z }, { X, 0.0, -Z },
  { 0.0, Z, X }, { 0.0, Z, -X }, { 0.0, -Z, X }, { 0.0, -Z, -X },
  { Z, X, 0.0 }, { -Z, X, 0.0 }, { Z, -X, 0.0 }, { -Z, -X, 0.0 }
};
static GLuint tindices[20][3] = {
{ 1, 4, 0 }, { 4, 9, 0 }, { 4, 5, 9 }, { 8, 5, 4 }, { 1, 8, 4 },
 { 1, 10, 8 }, { 10, 3, 8 }, { 8, 3, 5 }, { 3, 2, 5 }, { 3, 7, 2 },
 { 3, 10, 7 }, { 10, 6, 7 }, { 6, 11, 7 }, { 6, 0, 11 }, { 6, 1, 0 },
 { 10, 1, 6 }, { 11, 0, 9 }, { 2, 11, 9 }, { 5, 2, 9 }, { 11, 2, 7 },
};
//draws triangle at the specified coordinate
void drawtriangle(float *v1, float *v2, float *v3){
printf("v1 = %f, v3 = %f,v3 = %f\n", *v1, *v2, *v3);
glBegin(GL_TRIANGLES);
glNormal3fv(v1);
glVertex3fv(v1);
glNormal3fv(v2);
glVertex3fv(v2);
glNormal3fv(v3);
glVertex3fv(v3);
glEnd();
}
void normalize(float v[3]){
GLfloat d = sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
if (d == 0.0){
printf("zero length vector\n");
return;
}
v[0] /= d;
v[1] /= d;
v[2] /= d;
}
void subdivide(float *v1, float *v2, float *v3, long depth){
GLfloat v12[3], v23[3], v31[3];
GLint i;
//end recursion
if (depth == 0){
drawtriangle(v1, v2, v3);
return;
}
for (i = 0; i < 3; i++){
v12[i] = (v1[i] + v2[i]) / 2.0;
v23[i] = (v2[i] + v3[i]) / 2.0;
v31[i] = (v3[i] + v1[i]) / 2.0;
}
normalize(v12);
normalize(v23);
normalize(v31);
subdivide(v1, v12, v31, depth - 1);
subdivide(v2, v23, v12, depth - 1);
subdivide(v3, v31, v23, depth - 1);
subdivide(v12, v23, v31, depth - 1);
}
void display(void){
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glShadeModel(GL_FLAT);
glRotatef(300.0, 0.5, 1.0, 0.5);
for (int i = 0; i < 20; i++){
subdivide(&vdata[tindices[i][0]][0], &vdata[tindices[i][1]][0], &vdata[tindices[i][2]][0], 1);
}
glFlush();
}

As long as the ellipsoid is axis-aligned, it's not much more difficult than the sphere. The code you have calculates vertices on a unit sphere. For a sphere with radius r, you multiply those unit sphere points (vx, vy, vz) with the radius:
sx = r * vx
sy = r * vy
sz = r * vz
The ellipsoid is a generalization, where the radii in the 3 coordinate directions can be different. With the 3 radii rx, ry, and rz, the points are then calculated as:
sx = rx * vx
sy = ry * vy
sz = rz * vz
It gets slightly more interesting with the normals. Spheres have the convenient property that the position and normal vectors are identical. This does not apply to ellipsoids. For the normals, you have to divide by the corresponding radius (see normal matrix for non uniform scaling for the mathematical background). So the normals for the ellipsoid are calculated as:
nx = vx / rx
ny = vy / ry
nz = vz / rz
To fit this into your code with minimal changes, you can change the drawtriangle() function to:
glBegin(GL_TRIANGLES);
glNormal3f(v1[0] / rx, v1[1] / ry, v1[2] / rz);
glVertex3f(v1[0] * rx, v1[1] * ry, v1[2] * rz);
glNormal3f(v2[0] / rx, v2[1] / ry, v2[2] / rz);
glVertex3f(v2[0] * rx, v2[1] * ry, v2[2] * rz);
glNormal3f(v3[0] / rx, v3[1] / ry, v3[2] / rz);
glVertex3f(v3[0] * rx, v3[1] * ry, v3[2] * rz);
glEnd();
With these calculations, the normal vectors will generally not be normalized anymore. You can ask OpenGL to normalize them for you by adding this call to your initialization code:
glEnable(GL_NORMALIZE);
If you care about performance at all, calculating the points each time you want to render a sphere will be highly inefficient. You will want to calculate them once, and store them away for rendering. And while you're at it, you can store them in a vertex buffer, and get rid of the immediate mode rendering.

Related

Creating multiple Bezier curves using GL_MAP1_VERTEX_3 function in OpenGL

So I am trying to create an arbitrary curved shape using OpenGL and currently my code is only able to produce one curve between the specified control points, below is my OpenGL code:
#include <GL/glut.h>
#include <stdlib.h>
GLfloat controlPoints[18][3] =
{
{0.0, 8.0, 0.0},
{ -1.5, 3.0, 0.0}, //2
{-5.5, 4.0, 0.0},
{-5.5, 4.0, 0.0},
{-2.5, 0.0, 0.0}, //4
{-6.0, -4.0, 0.0},
{-6.0, -4.0, 0.0},
{-1.5, -3.0, 0.0}, //6
{0.0, -8.0, 0.0},
{0.0, -8.0, 0.0},
{1.0, -3.0, 0.0}, //8
{6.0, -5.0, 0.0},
{6.0, -5.0, 0.0},
{3.0, 0.0, 0.0}, //10
{6.5, 4.5, 0.0},
{6.5, 4.5, 0.0},
{1.5, 3.0, 0.0}, //12
{0.0, 8.0, 0.0}
};
void init(void)
{
glClearColor(0.0, 0.0, 0.0, 0.0);
glShadeModel(GL_FLAT);
for (int i = 0; (i + 3) < 3; i += 3)
{
glMap1f(GL_MAP1_VERTEX_3, 0.0, 1.0, 3, 4, &controlPoints[i][0]);
}
//glMap1f(GL_MAP1_VERTEX_3, 0.0, 1.0, 3, 4, &controlPoints2[0][0]);
glEnable(GL_MAP1_VERTEX_3);
// The evaluator with a stride of 3 and an order of 4
}
void display(void)
{
int i;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glColor3f(1.0, 1.0, 1.0);
//draw(controlPoints);
//draw(controlPoints2);
glBegin(GL_LINE_STRIP);
{
for (int i = 0; i <= 18; i++)
{
glEvalCoord1f((GLfloat)i / 18.0);
}
}
glEnd();
glBegin(GL_LINE_STRIP);
{
for (i = 0; i < 18; i++)
{
glVertex3fv(&controlPoints[i][0]);
}
}
glEnd();
glPointSize(6.0);
glColor3f(0.0, 0.0, 1.0);
glBegin(GL_POINTS);
{
for (i = 0; i < 18; i++)
{
glVertex3fv(&controlPoints[i][0]);
}
}
glEnd();
void reshape(int w, int h)
{
glViewport(0, 0, (GLsizei)w, (GLsizei)h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
if (w <= h)
{
glOrtho(-10.0, 10.0, -10.0 * (GLfloat)h / (GLfloat)w, 10.0 * (GLfloat)h / (GLfloat)w, -10.0, 10.0);
}
else
{
glOrtho(-10.0 * (GLfloat)h / (GLfloat)w, 10.0 * (GLfloat)h / (GLfloat)w, -10.0, 10.0, -10.0, 10.0);
}
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
void keyboard(unsigned char key, int x, int y)
{
switch (key)
{
case 27:
exit(0);
break;
}
}
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowSize(500, 500);
glutInitWindowPosition(100, 100);
glutCreateWindow(argv[0]);
init();
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutKeyboardFunc(keyboard);
glutMainLoop();
return 0;
}
How do I modify my init portion of the code such that able to produce 6 curves between the three control points totaling up to 18? and if not possible is there a way I can do it using GL_LINE_STRIP?
Below is what my current output looks like:
My advice - avoid openGL evaluators completely!
Aside from some SGI machines back in the 90's, no GPU vendor has ever added hardware support for them, so it falls back to a fairly inefficient software implementation.
Anyhow, there are a few problems in your code...
glMap1f(GL_MAP1_VERTEX_3, 0.0, 1.0, 3,
4, ///< this says you want 4 control points per curve
&controlPoints[i][0]);
However, there is something wrong here in the control points:
GLfloat controlPoints[18][3] =
{
{0.0, 8.0, 0.0},
{ -1.5, 3.0, 0.0}, //2
{-5.5, 4.0, 0.0}, ///< I'm assuming this is the last control point you want?
{-5.5, 4.0, 0.0}, ///< however this is duplicated here?
It looks as though you want a quadratic curve? (i.e. 3 control points per curve?)
// enable evaluators
glEnable(GL_MAP1_VERTEX_3);
// step through each triplet of CV's
for(int cv = 0; cv < 18; cv += 3) {
// specify the control point array
glMap1f(GL_MAP1_VERTEX_3, 0.0, 1.0,
3, ///< each vertex has 3 floats.
3, ///< I assume you want 3? (as in 3x CV per curve)
&controlPoints[cv][0]);
// render this curve segment
glBegin(GL_LINE_STRIP);
{
// choose how many divisions you want
int NUM_DIVISIONS = 32;
for (int i = 0; i <= NUM_DIVISIONS; i++)
{
glEvalCoord1f((GLfloat)i / (GLfloat) NUM_DIVISIONS);
}
}
glEnd();
}
glDisable(GL_MAP1_VERTEX_3);
However, as I said above, GL evaluators are terrible.
It's actually just a lot easier to simply write the code yourself.
One option would be to simply tessellate each curve, and then render (This would work with your current control point layout)
void render_quadratic_curves(
GLfloat controlPoints[][3],
int num_curves,
int num_divisions) {
int out_size_of_each_curve = (num_divisions + 1) * 3;
// allocate enough memory to store a curves
GLfloat* temp = new GLfloat[out_size_of_each_curve];
// re-render from the same vertex array.
glVertexPointer(3, GL_FLOAT, sizeof(float) * 3, temp);
glEnableClientState(GL_VERTEX_ARRAY);
for(int curve = 0; curve < num_curves; ++curve) {
// pointers to the control points for this curve
const GLfloat* P0 = controlPoints[3 * curve + 0];
const GLfloat* P1 = controlPoints[3 * curve + 1];
const GLfloat* P2 = controlPoints[3 * curve + 2];
for(int division = 0; division <= num_divisions; ++division) {
GLfloat t = (GLfloat) division / (GLfloat) NUM_DIVISIONS;
GLfloat inv_t = (1.0f - t);
// compute bezier coefficients for quadratic curve
GLfloat B0 = inv_t * inv_t;
GLfloat B1 = 2.0f * inv_t * t;
GLfloat B2 = t * t;
// compute XYZ coordinates
GLfloat x = P0[0] * B0 +
P1[0] * B1 +
P2[0] * B2;
GLfloat y = P0[1] * B0 +
P1[1] * B1 +
P2[1] * B2;
GLfloat z = P0[2] * B0 +
P1[2] * B1 +
P2[2] * B2;
// insert into the buffer for rendering
temp[3 * division + 0] = x;
temp[3 * division + 1] = y;
temp[3 * division + 2] = z;
}
// render this curve in one go as a strip
glDrawArrays(GL_LINE_STRIP, 0, num_divisions + 1);
}
// cleanup
glDisableClientState(GL_VERTEX_ARRAY);
delete [] temp;
}
However, in your case above you effectively have a loop, so this can be done in one go instead with GL_LINE_LOOP instead (This approach would nicely fit into a VBO)
void render_quadratic_curves_as_loop(
GLfloat controlPoints[][3],
int num_curves,
int num_divisions) {
// curves are 1 vertex smaller in size than previously,
// since the start vertex of one curve, is shared with the
// last vertex of the previous curve
int out_size_of_each_curve = num_divisions * 3;
// allocate enough memory to store all of the curves
GLfloat* temp = new GLfloat[out_size_of_each_curve * num_curves];
for(int curve = 0; curve < num_curves; ++curve) {
GLfloat* this_curve = temp + curve * out_size_of_each_curve;
// pointers to the control points for this curve
const GLfloat* P0 = controlPoints[3 * curve + 0];
const GLfloat* P1 = controlPoints[3 * curve + 1];
const GLfloat* P2 = controlPoints[3 * curve + 2];
// note! I am using less than here!
// the last vertex of each curve is simply the first
// vertex of the next one...
for(int division = 0; division < num_divisions; ++division) {
GLfloat t = (GLfloat) division / (GLfloat) NUM_DIVISIONS;
GLfloat inv_t = (1.0f - t);
// compute bezier coefficients for quadratic curve
GLfloat B0 = inv_t * inv_t;
GLfloat B1 = 2.0f * inv_t * t;
GLfloat B2 = t * t;
// compute XYZ coordinates
GLfloat x = P0[0] * B0 +
P1[0] * B1 +
P2[0] * B2;
GLfloat y = P0[1] * B0 +
P1[1] * B1 +
P2[1] * B2;
GLfloat z = P0[2] * B0 +
P1[2] * B1 +
P2[2] * B2;
// insert into the buffer for rendering
this_curve[3 * division + 0] = x;
this_curve[3 * division + 1] = y;
this_curve[3 * division + 2] = z;
}
}
// re-render from the same vertex array.
// This *could* be replaced with a VBO.
glVertexPointer(3, GL_FLOAT, sizeof(float) * 3, temp);
glEnableClientState(GL_VERTEX_ARRAY);
// render all of the curves in one go.
glDrawArrays(GL_LINE_LOOP, 0, out_size_of_each_curve * num_curves);
// cleanup
glDisableClientState(GL_VERTEX_ARRAY);
delete [] temp;
}
// You'll now need to remove the duplicate CV's from your array
GLfloat controlPoints[12][3] =
{
{0.0, 8.0, 0.0},
{ -1.5, 3.0, 0.0}, //2
{-5.5, 4.0, 0.0},
{-2.5, 0.0, 0.0}, //4
{-6.0, -4.0, 0.0},
{-1.5, -3.0, 0.0}, //6
{0.0, -8.0, 0.0},
{1.0, -3.0, 0.0}, //8
{6.0, -5.0, 0.0},
{3.0, 0.0, 0.0}, //10
{6.5, 4.5, 0.0},
{1.5, 3.0, 0.0}, //12
};
render_quadratic_curves_as_loop(controlPoints, 6, 32);
If you actually want 4 CV's per curve, then you can easily extend this into a cubic bezier.
// obviously each curve will now need an additional CV
void render_cubic_curves_as_loop(
GLfloat controlPoints[][3],
int num_curves,
int num_divisions) {
// curves are 1 vertex smaller in size than previously,
// since the start vertex of one curve, is shared with the
// last vertex of the previous curve
int out_size_of_each_curve = num_divisions * 3;
// allocate enough memory to store all of the curves
GLfloat* temp = new GLfloat[out_size_of_each_curve * num_curves];
for(int curve = 0; curve < num_curves; ++curve) {
GLfloat* this_curve = temp + curve * out_size_of_each_curve;
// pointers to the control points for this curve
const GLfloat* P0 = controlPoints[4 * curve + 0];
const GLfloat* P1 = controlPoints[4 * curve + 1];
const GLfloat* P2 = controlPoints[4 * curve + 2];
const GLfloat* P3 = controlPoints[4 * curve + 2];
// note! I am using less than here!
// the last vertex of each curve is simply the first
// vertex of the next one...
for(int division = 0; division < num_divisions; ++division) {
GLfloat t = (GLfloat) division / (GLfloat) NUM_DIVISIONS;
GLfloat inv_t = (1.0f - t);
// compute bezier coefficients for cubic curve
GLfloat B0 = inv_t * inv_t * inv_t;
GLfloat B1 = 3.0f * inv_t * inv_t * t;
GLfloat B2 = 3.0f * inv_t * t * t;
GLfloat B2 = t * t;
// compute XYZ coordinates
GLfloat x = P0[0] * B0 +
P1[0] * B1 +
P2[0] * B2 +
P3[0] * B3;
GLfloat y = P0[1] * B0 +
P1[1] * B1 +
P2[1] * B2 +
P3[1] * B3;
GLfloat z = P0[2] * B0 +
P1[2] * B1 +
P2[2] * B2 +
P3[2] * B3;
// insert into the buffer for rendering
this_curve[3 * division + 0] = x;
this_curve[3 * division + 1] = y;
this_curve[3 * division + 2] = z;
}
}
// re-render from the same vertex array.
// This *could* be replaced with a VBO.
glVertexPointer(3, GL_FLOAT, sizeof(float) * 3, temp);
glEnableClientState(GL_VERTEX_ARRAY);
// render all of the curves in one go.
glDrawArrays(GL_LINE_LOOP, 0, out_size_of_each_curve * num_curves);
// cleanup
glDisableClientState(GL_VERTEX_ARRAY);
delete [] temp;
}
NOTE: on modern hardware, if you have tessellation shaders available, that's usually the best option. Failing that, if you have hardware instancing, you can specify the basis coefficients as a shared vertex buffer, and the control points can be specified per instance.
generate a VBO to store the blending coefficients, and set the VBO to have a vertex divisor of 0.
void populate_shared_vertex_data_for_VBO(float* out, int NUM_DIVISIONS) {
for(int i = 0; i <= NUM_DIVISIONS; ++i) {
GLfloat t = (GLfloat) division / (GLfloat) (NUM_DIVISIONS + 1);
GLfloat inv_t = (1.0f - t);
// compute bezier coefficients for cubic curve
GLfloat B0 = inv_t * inv_t * inv_t;
GLfloat B1 = 3.0f * inv_t * inv_t * t;
GLfloat B2 = 3.0f * inv_t * t * t;
GLfloat B2 = t * t;
out[0] = B0;
out[1] = B1;
out[2] = B2;
out[3] = B3;
out += 4;
}
}
Load the control points for all curves into a single BIG VBO, set up the 4 per-instance attributes (i.e. specify 4 varying shader inputs, one for each CV, set each stride to sizeof(Cubic_Curve_CVS), and set the divisor to 1).
struct Cubic_Curve_CVS {
float P0[3];
float P1[3];
float P2[3];
float P3[3];
};
Cubic_Curve_CVS VBO_DATA[NUM_CURVES]; ///< load this
The vertex shader ends up being pretty simple to implement:
#version 450
uniform mat4 vs_mvp;
// share this buffer between all indices,
// i.e. glVertexAttribDivisor(0, 0);
layout(location = 0) in vec4 vs_coeffs;
// make these per-instance attributes
// i.e. :
// glVertexAttribDivisor(1, 1);
// glVertexAttribDivisor(2, 1);
// glVertexAttribDivisor(3, 1);
// glVertexAttribDivisor(4, 1);
layout(location = 1) in vec4 vs_CV0;
layout(location = 2) in vec4 vs_CV1;
layout(location = 3) in vec4 vs_CV2;
layout(location = 4) in vec4 vs_CV3;
void main()
{
float B0 = vs_coeffs.x;
float B1 = vs_coeffs.y;
float B2 = vs_coeffs.z;
float B3 = vs_coeffs.w;
vec4 V = vs_CV0 * B0 +
vs_CV1 * B1 +
vs_CV2 * B2 +
vs_CV3 * B3;
gl_Position = vs_mvp * V;
}
and then just render the whole lot in one go with glDrawArraysInstanced.

Incorrect render of a cube mesh in DirectX 11

I am practicing DirectX 11 following Frank Luna's book.
I have implemented a demo that renders a cube, but the result is not correct.
https://i.imgur.com/2uSkEiq.gif
As I hope you can see from the image (I apologize for the low quality), it seems like the camera is "trapped" inside the cube even when I move it away. There is also a camera frustum clipping problem.
I think the problem is therefore in the definition of the projection matrix.
Here is the cube vertices definition.
std::vector<Vertex> vertices =
{
{XMFLOAT3(-1, -1, -1), XMFLOAT4(1, 1, 1, 1)},
{XMFLOAT3(-1, +1, -1), XMFLOAT4(0, 0, 0, 1)},
{XMFLOAT3(+1, +1, -1), XMFLOAT4(1, 0, 0, 1)},
{XMFLOAT3(+1, -1, -1), XMFLOAT4(0, 1, 0, 1)},
{XMFLOAT3(-1, -1, +1), XMFLOAT4(0, 0, 1, 1)},
{XMFLOAT3(-1, +1, +1), XMFLOAT4(1, 1, 0, 1)},
{XMFLOAT3(+1, +1, +1), XMFLOAT4(0, 1, 1, 1)},
{XMFLOAT3(+1, -1, +1), XMFLOAT4(1, 0, 1, 1)},
};
Here is how I calculate the view and projection matrices.
void TestApp::OnResize()
{
D3DApp::OnResize();
mProj = XMMatrixPerspectiveFovLH(XM_PIDIV4, AspectRatio(), 1, 1000);
}
void TestApp::UpdateScene(float dt)
{
float x = mRadius * std::sin(mPhi) * std::cos(mTheta);
float y = mRadius * std::cos(mPhi);
float z = mRadius * std::sin(mPhi) * std::sin(mTheta);
XMVECTOR EyePosition = XMVectorSet(x, y, z, 1);
XMVECTOR FocusPosition = XMVectorZero();
XMVECTOR UpDirection = XMVectorSet(0, 1, 0, 0);
mView = XMMatrixLookAtLH(EyePosition, FocusPosition, UpDirection);
}
And here is how I update the camera position on mouse move.
glfwSetCursorPosCallback(mMainWindow, [](GLFWwindow* window, double xpos, double ypos)
{
TestApp* app = reinterpret_cast<TestApp*>(glfwGetWindowUserPointer(window));
if (glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_LEFT) == GLFW_PRESS)
{
float dx = 0.25f * XMConvertToRadians(xpos - app->mLastMousePos.x);
float dy = 0.25f * XMConvertToRadians(ypos - app->mLastMousePos.y);
app->mTheta += dx;
app->mPhi += dy;
app->mPhi = std::clamp(app->mPhi, 0.1f, XM_PI - 0.1f);
}
else if (glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_RIGHT) == GLFW_PRESS)
{
float dx = 0.05f * XMConvertToRadians(xpos - app->mLastMousePos.x);
float dy = 0.05f * XMConvertToRadians(ypos - app->mLastMousePos.y);
app->mRadius += (dx - dy);
app->mRadius = std::clamp(app->mRadius, 3.f, 15.f);
}
app->mLastMousePos = XMFLOAT2(xpos, ypos);
});
Thanks.
The root problem here was in the constant buffer vs. CPU update.
HLSL defaults to column-major matrix definitions per Microsoft Docs. DirectXMath uses row-major matrices, so you have to transpose while updating the Constant Buffer.
Alternatively, you can declare the HLSL matrix with the row_major keyword, #pragma pack_matrix, or the /Zpr compiler switch.

How to solve problem with lookat matrix on OpenGL/GLSL

I have the following code for my own look-at matrix(multiplication of matrices and cross product of vectors work perfectly, I checked it):
template<typename Type>
void setLookAt(Matrix4x4<Type>& matrix, const Vector3<Type> eye, const Vector3<Type> center, const Vector3<Type> up) noexcept
{
Math::Vector3f right = Math::cross(center, up).normalize();
Matrix4x4f lookAt({
right.getX(), right.getY(), right.getZ(), 0.0,
up.getX(), up.getY(), up.getZ(), 0.0,
center.getX(), center.getY(), center.getZ(), 0.0,
0.0, 0.0, 0.0, 1.0
});
Matrix4x4f additionalMatrix({
0.0, 0.0, 0.0, -(eye.getX()),
0.0, 0.0, 0.0, -(eye.getY()),
0.0, 0.0, 0.0, -(eye.getZ()),
0.0, 0.0, 0.0, 1.0
});
lookAt.mul(additionalMatrix);
matrix = lookAt;
}
template<typename Type>
void setPerspectiveMatrix(Matrix4x4<Type>& matrix, Type fov, Type aspect, Type znear, Type zfar) noexcept
{
const Type yScale = static_cast<Type>(1.0 / tan(RADIANS_PER_DEGREE * fov / 2));
const Type xScale = yScale / aspect;
const Type difference = znear - zfar;
matrix = {
xScale, 0, 0, 0,
0, yScale, 0, 0,
0, 0, (zfar + znear) / difference, 2 * zfar * znear / difference,
0, 0, -1, 0
};
}
Matrix multiplication implementation:
// static const std::uint8_t ROW_SIZE = 4;
// static const std::uint8_t MATRIX_SIZE = ROW_SIZE * ROW_SIZE;
// static const std::uint8_t FIRST_ROW = 0;
// static const std::uint8_t SECOND_ROW = ROW_SIZE;
// static const std::uint8_t THIRD_ROW = ROW_SIZE + ROW_SIZE;
// static const std::uint8_t FOURTH_ROW = ROW_SIZE + ROW_SIZE + ROW_SIZE;
template<class Type>
void Matrix4x4<Type>::mul(const Matrix4x4& anotherMatrix) noexcept
{
Type currentElements[MATRIX_SIZE];
std::copy(std::begin(mElements), std::end(mElements), currentElements);
const Type* otherElements = anotherMatrix.mElements;
for (std::uint8_t i = 0; i < MATRIX_SIZE; i += ROW_SIZE)
{
mElements[i] = currentElements[i] * otherElements[FIRST_ROW] +
currentElements[i + 1] * otherElements[SECOND_ROW] +
currentElements[i + 2] * otherElements[THIRD_ROW] +
currentElements[i + 3] * otherElements[FOURTH_ROW];
mElements[i + 1] = currentElements[i] * otherElements[FIRST_ROW + 1] +
currentElements[i + 1] * otherElements[SECOND_ROW + 1] +
currentElements[i + 2] * otherElements[THIRD_ROW + 1] +
currentElements[i + 3] * otherElements[FOURTH_ROW + 1];
mElements[i + 2] = currentElements[i] * otherElements[FIRST_ROW + 2] +
currentElements[i + 1] * otherElements[SECOND_ROW + 2] +
currentElements[i + 2] * otherElements[THIRD_ROW + 2] +
currentElements[i + 3] * otherElements[FOURTH_ROW + 2];
mElements[i + 3] = currentElements[i] * otherElements[FIRST_ROW + 3] +
currentElements[i + 1] * otherElements[SECOND_ROW + 3] +
currentElements[i + 2] * otherElements[THIRD_ROW + 3] +
currentElements[i + 3] * otherElements[FOURTH_ROW + 3];
}
}
Cross product implementation:
template<typename Type>
Math::Vector3<Type> cross(Vector3<Type> vector, Vector3<Type> anotherVector) noexcept
{
const Type x = vector.getY()*anotherVector.getZ() - vector.getZ()*anotherVector.getY();
const Type y = -(vector.getX()*anotherVector.getZ() - vector.getZ()*anotherVector.getX());
const Type z = vector.getX()*anotherVector.getY() - vector.getY()*anotherVector.getX();
return { x, y, z };
}
Using it:
// OpenGL
glUseProgram(mProgramID);
Matrix4x4f lookAt;
setLookAt(lookAt, { 0.0f, 0.0f, 3.0f }, { 0.0f, 0.0f, -1.0f }, { 0.0f, 1.0f, 0.0f });
glUniformMatrix4fv(glGetAttribLocation(mProgramID, "viewMatrix"), 1, GL_TRUE, lookAt);
Matrix4x4f projection;
setPerspectiveMatrix(projection, 45.0f, width / height, -0.1, 100.0f);
glUniformMatrix4fv(glGetAttribLocation(mProgramID, "projectionMatrix "), 1, GL_TRUE, projection);
// GLSL
layout (location = 0) in vec3 position;
uniform mat4 viewMatrix;
uniform mat4 projectionMatrix;
void main()
{
gl_Position = projectionMatrix * viewMatrix * vec4(position, 1.0f);
}
After using this code, I get a blank screen, although I would have to draw a cube. The problem is in the matrix itself, so other matrices work fine(offset, rotation, ...), but I can understand exactly where. Can you tell me what could be the problem?
"projectionMatrix" and "viewMatrix" are uniform variables. The uniform location can be get by glGetUniformLocation rather than glGetAttribLocation, which would return the attribute index of an active attribute:
GLint projLoc = glGetUniformLocation( mProgramID, "projectionMatrix" );
GLint viewLoc = glGetUniformLocation( mProgramID, "viewMatrix" );
At Perspective Projection the projection matrix describes the mapping from 3D points in the world as they are seen from of a pinhole camera, to 2D points of the viewport.
The eye space coordinates in the camera frustum (a truncated pyramid) are mapped to a cube (the normalized device coordinates).
At perspective projection the view space (volume) is defined by a frustum (a truncated pyramid), where the top of the pyramid is the viewer's position.
The direction of view (line of sight) and the near and the far distance define the planes which truncated the pyramid to a frustum (the direction of view is the normal vector of this planes).
This means both values, the distance to the near plane and the distance to the far plane have to be positive values:
Matrix4x4f lookAt;
setLookAt(lookAt, { 0.0f, 0.0f, 3.0f }, { 0.0f, 0.0f, -1.0f }, { 0.0f, 1.0f, 0.0f });
glUniformMatrix4fv(viewLoc, 1, GL_TRUE, lookAt);
Matrix4x4f projection;
setPerspectiveMatrix(projection, 45.0f, width / height, 0.1f, 100.0f); // 0.1f instead of -0.1f
glUniformMatrix4fv(projLoc, 1, GL_TRUE, projection);
The view space is the local system which is defined by the point of view onto the scene.
The position of the view, the line of sight and the upwards direction of the view, define a coordinate system relative to the world coordinate system.
The view matrix has to transform from world space to view space, so the view matrix is the inverse matrix of the view coordinate system.
If the coordinate system of the view space is a Right-handed system, where the X-axis points to the left and the Y-axis points up, then the Z-axis points out of the view (Note in a right hand system the Z-Axis is the cross product of the X-Axis and the Y-Axis).
The z-axis line of sight is the vector from the point of view eye to the traget center:
template<typename Type>
void setLookAt(Matrix4x4<Type>& matrix, const Vector3<Type> eye, const Vector3<Type> center, const Vector3<Type> up) noexcept
{
Vector3f mz( { eye.getX()-center.getX(), eye.getY()-center.getY(), eye.getZ()-center.getZ() } );
mz = mz.normalize();
Vector3f my = up.normalize();
Vector3f mx = cross(my, mz).normalize();
Type tx = dot( mx, eye );
Type ty = dot( my, eye );
Type tz = -dot( mz, eye );
matrix = {
mx.getX(), mx.getY(), mx.getZ(), tx,
my.getX(), my.getY(), my.getZ(), ty,
mz.getX(), mz.getY(), mz.getZ(), tz,
0.0, 0.0, 0.0, 1.0
};
}
template<typename Type>
Vector3<Type> cross(Vector3<Type> vector, Vector3<Type> anotherVector) noexcept
{
const Type x = vector.getY()*anotherVector.getZ() - vector.getZ()*anotherVector.getY();
const Type y = -(vector.getX()*anotherVector.getZ() - vector.getZ()*anotherVector.getX());
const Type z = vector.getX()*anotherVector.getY() - vector.getY()*anotherVector.getX();
return { x, y, z };
}
template<typename Type>
Vector3<Type> Vector3<Type>::normalize(void) const
{
Type len = std::sqrt(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]);
return { mV[0] / len, mV[1] / len, mV[2] / len };
}
template<typename Type>
Type dot(Vector3<Type> vector, Vector3<Type> anotherVector) noexcept
{
Type ax = vector.getX(), ay = vector.getY(), az = vector.getZ();
Type bx = anotherVector.getX(), by = anotherVector.getY(), bz = anotherVector.getZ();
return ax*bx + ay*by + az*bz;
}
A perspective projection matrix can be defined by a frustum.
The distances left, right, bottom and top, are the distances from the center of the view to the side faces of the frustum, on the near plane. near and far specify the distances to the near and far plane on the frustum.
r = right, l = left, b = bottom, t = top, n = near, f = far
x y z t
2*n/(r-l) 0 (r+l)/(r-l) 0
0 2*n/(t-b) (t+b)/(t-b) 0
0 0 -(f+n)/(f-n) -2*f*n/(f-n)
0 0 -1 0
If the projection is symmetric, where the line of sight is axis of symmetry of the view frustum, then the matrix can be simplified:
x y z t
1/(ta*a) 0 0 0
0 1/ta 0 0
0 0 -(f+n)/(f-n) -2*f*n/(f-n)
0 0 -1 0
where:
a = w / h
ta = tan( fov_y / 2 );
2 * n / (r-l) = 1 / (ta * a)
2 * n / (t-b) = 1 / ta
Further the projection matrix switches from an right-handed system to an left-handed system, because the z axis is turned.
template<typename Type>
void setPerspectiveMatrix(Matrix4x4<Type>& matrix, Type fov, Type aspect, Type znear, Type zfar) noexcept
{
const Type yScale = static_cast<Type>(1.0 / tan(RADIANS_PER_DEGREE * fov / 2));
const Type xScale = yScale / aspect;
const Type difference = zfar - znear;
matrix = {
xScale, 0, 0, 0,
0, yScale, 0, 0,
0, 0, -(zfar + znear) / difference, -2 * zfar * znear / difference,
0, 0, -1, 0
};
}

Algorithm for a geodesic sphere

I have to make a sphere out of smaller uniformely distributed balls. I think the optimal way is to build a triangle-based geodesic sphere and use the vertices as the middle points of my balls. But I fail to write an algorithm generating the vertices.
Answer in C++ or pseudo-code will be better.
Example of a geodesic sphere: http://i.stack.imgur.com/iNQfP.png
Using the link #Muckle_ewe gave me, I was able to code the following algorithm:
Outside the main()
class Vector3d { // this is a pretty standard vector class
public:
double x, y, z;
...
}
void subdivide(const Vector3d &v1, const Vector3d &v2, const Vector3d &v3, vector<Vector3d> &sphere_points, const unsigned int depth) {
if(depth == 0) {
sphere_points.push_back(v1);
sphere_points.push_back(v2);
sphere_points.push_back(v3);
return;
}
const Vector3d v12 = (v1 + v2).norm();
const Vector3d v23 = (v2 + v3).norm();
const Vector3d v31 = (v3 + v1).norm();
subdivide(v1, v12, v31, sphere_points, depth - 1);
subdivide(v2, v23, v12, sphere_points, depth - 1);
subdivide(v3, v31, v23, sphere_points, depth - 1);
subdivide(v12, v23, v31, sphere_points, depth - 1);
}
void initialize_sphere(vector<Vector3d> &sphere_points, const unsigned int depth) {
const double X = 0.525731112119133606;
const double Z = 0.850650808352039932;
const Vector3d vdata[12] = {
{-X, 0.0, Z}, { X, 0.0, Z }, { -X, 0.0, -Z }, { X, 0.0, -Z },
{ 0.0, Z, X }, { 0.0, Z, -X }, { 0.0, -Z, X }, { 0.0, -Z, -X },
{ Z, X, 0.0 }, { -Z, X, 0.0 }, { Z, -X, 0.0 }, { -Z, -X, 0.0 }
};
int tindices[20][3] = {
{0, 4, 1}, { 0, 9, 4 }, { 9, 5, 4 }, { 4, 5, 8 }, { 4, 8, 1 },
{ 8, 10, 1 }, { 8, 3, 10 }, { 5, 3, 8 }, { 5, 2, 3 }, { 2, 7, 3 },
{ 7, 10, 3 }, { 7, 6, 10 }, { 7, 11, 6 }, { 11, 0, 6 }, { 0, 1, 6 },
{ 6, 1, 10 }, { 9, 0, 11 }, { 9, 11, 2 }, { 9, 2, 5 }, { 7, 2, 11 }
};
for(int i = 0; i < 20; i++)
subdivide(vdata[tindices[i][0]], vdata[tindices[i][1]], vdata[tindices[i][2]], sphere_points, depth);
}
Then in the main():
vector<Vector3d> sphere_points;
initialize_sphere(sphere_points, DEPTH); // where DEPTH should be the subdivision depth
for(const Vector3d &point : sphere_points)
const Vector3d point_tmp = point * RADIUS + CENTER; // Then for the sphere I want to draw, I iterate over all the precomputed sphere points and with a linear function translate the sphere to its CENTER and chose the proper RADIUS
You actually only need to use initialize_sphere() once and use the result for every sphere you want to draw.
I've done this before for a graphics project, the algorithm I used is detailed on this website
http://www.opengl.org.ru/docs/pg/0208.html
just ignore any openGL drawing calls and only code up the parts that deal with creating the actual vertices
There are well known algorithms to triangulate surfaces. You should be able to use the GNU Triangulated Surface Library to generate a suitable mesh if you don't want to code one of them up yourself.
It depends on the number of triangles you want the sphere to have. You can potentially have infinite resolution.
First focus on creating a dome, you can double it later by taking the negative coordinates of your upper dome. You will generate the sphere by interlocking rows of triangles.
Your triangles are equilateral, so decide on a length.
Divide 2(pi)r by the number of triangles you want to be on the bottom row of the dome.
This will be the length of each side of each triangle.
Next you need to create a concentric circle that intersects the surface of the sphere.
Between this circle and the base of the dome will be your first row.
You will need to find the angle that each triangle is tilted. (I will post later when I figure that out)
Repeat process for each concentric circle (generating row) until the height of the row * the number of rows approximately equals the 2(pi)r that u started with.
I will try to program it later if I get a chance. You could also try posting in the Math forum.

gluDisk rotation for mapping

I'm trying to create sub-cursor for terrain mapping.
Basic by code: (old image, but rotation is same)
image http://www.sdilej.eu/pics/274a90360f9c46e2eaf94e095e0b6223.png
This is when i testing change glRotate ax to my numbers:
image2 http://www.sdilej.eu/pics/146bda9dc51708da54b9249706f874fc.png
What i want:
image3 http://www.sdilej.eu/pics/69721aa237608b423b635945d430e561.png
My code:
void renderDisk(float x1, float y1, float z1, float x2, float y2, float z2, float radius, int subdivisions, GLUquadricObj* quadric)
{
float vx = x2 - x1;
float vy = y2 - y1;
float vz = z2 - z1;
//handle the degenerate case of z1 == z2 with an approximation
if( vz == 0.0f )
vz = .0001f;
float v = sqrt( vx*vx + vy*vy + vz*vz );
float ax = 57.2957795f * acos( vz/v );
if(vz < 0.0f)
ax = -ax;
float rx = -vy * vz;
float ry = vx * vz;
glPushMatrix();
glTranslatef(x1, y1, z1);
glRotatef(ax, rx, ry, 0.0);
gluQuadricOrientation(quadric, GLU_OUTSIDE);
gluDisk(quadric, radius - 0.25, radius + 5.0, subdivisions, 5);
glPopMatrix();
}
void renderDisk_convenient(float x, float y, float z, float radius, int subdivisions)
{
// Mouse opacity
glColor4f( 0.0f, 7.5f, 0.0f, 0.5f );
GLUquadricObj* quadric = gluNewQuadric();
gluQuadricDrawStyle(quadric, GLU_LINE);
gluQuadricNormals(quadric, GLU_SMOOTH);
gluQuadricTexture(quadric, GL_TRUE);
renderDisk(x, y, z, x, y, z, radius, subdivisions, quadric);
gluDeleteQuadric(quadric);
}
renderDisk_convenient(posX, posY, posZ, radius, 20);
This is a simple one. In your call to renderDisk() you supply bad arguments. Looks like you copied the function from some tutorial without understanding how it works. The first three parameters control the center position, and the other three parameters control rotation using a second position which the disk is always facing. If the two positions are equal (which is your case), this line is executed:
//handle the degenerate case of z1 == z2 with an approximation
if( vz == 0.0f )
vz = .0001f;
And setting z to nonzero makes the disc perpendicular to XZ plane, which is also the horizontal plane for your terrain. So ... to make it okay, you need to modify your function like this:
void renderDisk_convenient(float x, float y, float z, float radius, int subdivisions)
{
// Mouse opacity
glColor4f( 0.0f, 7.5f, 0.0f, 0.5f );
GLUquadricObj* quadric = gluNewQuadric();
gluQuadricDrawStyle(quadric, GLU_LINE);
gluQuadricNormals(quadric, GLU_SMOOTH);
gluQuadricTexture(quadric, GL_TRUE);
float upX = 0, upY = 1, upZ = 0; // up vector (does not need to be normalized)
renderDisk(x, y, z, x + upX, y + upY, z + upZ, radius, subdivisions, quadric);
gluDeleteQuadric(quadric);
}
This should turn the disc into the xz plane so it will be okay if the terrain is flat. But in other places, you actually need to modify the normal direction (the (upX, upY, upZ) vector). If your terrain is generated from a heightmap, then the normal can be calculated using code such as this:
const char *p_s_heightmap16 = "ps_height_1k.png";
const float f_terrain_height = 50; // terrain is 50 units high
const float f_terrain_scale = 1000; // the longer edge of terrain is 1000 units long
TBmp *p_heightmap;
if(!(p_heightmap = p_LoadHeightmap_HiLo(p_s_heightmap16))) {
fprintf(stderr, "error: failed to load heightmap (%s)\n", p_s_heightmap16);
return false;
}
// load heightmap
TBmp *p_normalmap = TBmp::p_Alloc(p_heightmap->n_width, p_heightmap->n_height);
// alloc normalmap
const float f_width_scale = f_terrain_scale / max(p_heightmap->n_width, p_heightmap->n_height);
// calculate the scaling factor
for(int y = 0, hl = p_normalmap->n_height, hh = p_heightmap->n_height; y < hl; ++ y) {
for(int x = 0, wl = p_normalmap->n_width, wh = p_heightmap->n_width; x < wl; ++ x) {
Vector3f v_normal(0, 0, 0);
{
Vector3f v_pos[9];
for(int yy = -1; yy < 2; ++ yy) {
for(int xx = -1; xx < 2; ++ xx) {
int sx = xx + x;
int sy = yy + y;
float f_height;
if(sx >= 0 && sy >= 0 && sx < wh && sy < hh)
f_height = ((const uint16_t*)p_heightmap->p_buffer)[sx + sy * wh] / 65535.0f * f_terrain_height;
else
f_height = 0;
v_pos[(xx + 1) + 3 * (yy + 1)] = Vector3f(xx * f_width_scale, f_height, yy * f_width_scale);
}
}
// read nine-neighbourhood
/*
0 1 2
+----------+----------+
|\ | /|
| \ | / |
| \ | / |
| \ | / |
3|_________\|/_________|5
| 4/|\ |
| / | \ |
| / | \ |
| / | \ |
|/ | \|
+----------+----------+
6 7 8
*/
const int p_indices[] = {
0, 1, //4,
1, 2, //4,
2, 5, //4,
5, 8, //4,
8, 7, //4,
7, 6, //4,
6, 3, //4,
3, 0 //, 4
};
for(int i = 0; i < 8; ++ i) {
Vector3f a = v_pos[p_indices[i * 2]];
Vector3f b = v_pos[p_indices[i * 2 + 1]];
Vector3f c = v_pos[4];
// triangle
Vector3f v_tri_normal = (a - c).v_Cross(b - c);
v_tri_normal.Normalize();
// calculate normals
v_normal += v_tri_normal;
}
v_normal.Normalize();
}
// calculate normal from the heightmap (by averaging the normals of eight triangles that share the current point)
uint32_t n_normalmap =
0xff000000U |
(max(0, min(255, int(v_normal.z * 127 + 128))) << 16) |
(max(0, min(255, int(v_normal.y * 127 + 128))) << 8) |
max(0, min(255, int(-v_normal.x * 127 + 128)));
// calculate normalmap color
p_normalmap->p_buffer[x + wl * y] = n_normalmap;
// use the lightmap bitmap to store the results
}
}
(note this contains some structures and functions that are not included here so you won't be able to use this code directly, but the basic concept is there)
Once you have the normals, you need to sample normal under location (x, z) and use that in your function. This will still make the disc intersect the terrain where there is a steep slope next to flat surface (where the second derivative is high). In order to cope with that, you can either lift the cursor up a bit (along the normal), or disable depth testing.
If your terrain is polygonal, you could use vertex normals just as well, just take triangle that is below (x, y, z) and interpolate it's vertices normals to get the normal for the disc.
I hope this helps, feel free to comment if you need further advice ...