Rotate point sprite - opengl

I can already rotate point sprite on 0, 90, 180, 270 degrees
Fragment shader
precision lowp float;
uniform sampler2D us_tex;
uniform mat3 um_tex;
void main ()
{
vec2 tex_coords = (um_tex * vec3(gl_PointCoord, 1.0)).xy;
gl_FragColor = texture2D(us_tex, tex_coords);
}
2*2 Matrix operations (i know about GLM - it's great, academic purpose to handle matrix on your own)
typedef GLfloat m3[9]//3*3 matrix
#define DEG_TO_RAD(x) (x * M_PI/180.0f)
void ident_m3(m3 res)
{
memset(res, 0, sizeof(m3));
res[0] = res[4] = res[8] = 1.0f;
}
void trans_m3(m3 res, const p2* pos)
{
ident_m3(res);
res[7] = pos->x;
res[8] = pos->y;
}
void mult_m3(m3 res, const m3 m1, const m3 m2)
{
res[0] = m1[0] * m2[0] + m1[3] * m2[1] + m1[6] * m2[2];
res[1] = m1[1] * m2[0] + m1[4] * m2[1] + m1[7] * m2[2];
res[2] = m1[2] * m2[0] + m1[5] * m2[1] + m1[8] * m2[2];
res[3] = m1[0] * m2[3] + m1[3] * m2[4] + m1[6] * m2[5];
res[4] = m1[1] * m2[3] + m1[4] * m2[4] + m1[7] * m2[5];
res[5] = m1[2] * m2[3] + m1[5] * m2[4] + m1[8] * m2[5];
res[6] = m1[0] * m2[6] + m1[3] * m2[7] + m1[6] * m2[8];
res[7] = m1[1] * m2[6] + m1[4] * m2[7] + m1[7] * m2[8];
res[8] = m1[2] * m2[6] + m1[5] * m2[7] + m1[8] * m2[8];
}
in ParticlesDraw()
m3 r;
rot_m3(r, 90.0f);
...
glUniformMatrix3fv(/*um_tex uniform*/, 1, GL_FALSE, res);
glDrawArrays(GL_POINTS, 0, /*particles count*/);
...
Also i know how rotate ordinary sprite around pos(x,y,z)
Translate to pos(-x,-y,-z)
Rotate
Translate to pos(x,y,z)
Result Matrix = (Rot Matrix * Translate Matrix) * Anti-Traslate Matrix.
I want to rotate point sprite to 45, 32,64,72 e.g any degree (now it rotates not right, last frame 45 deg)
But in this case, i can translate to center of tex (0.5, 0.5), but what will be anti translate - (0.0, 0.0)?
I try something like this, but it does not work for example for 30, 45 rotation, also if my texture is 64*64, do i need to set gl_PointSize to 64.0 for rotation?

This:
Translate to pos(-x,-y,-z)
Rotate
Translate to pos(x,y,z)
Is not the same thing as this:
Result Matrix = (Rot Matrix * Translate Matrix) * Anti-Traslate Matrix.
If you wish to rotate around the point (x,y,z), then you need to do this:
Matrix T1 = Translate(x, y, z);
Matrix R1 = Rotate();
Matrix T2 = Translate(-x, -y, -z);
Which is the same thing as:
Result Matrix = T1 * R1 * T2

Related

How to do frustum culling in OpenGL with the view and projection matrix?

I'm trying to implement frustum culling to my voxel engine, basically I'm rendering chunks and I want to cull every chunk that is outside of the frustum of the camera. I tried a lot of different approaches and code that I found on the web, but yet I can't get it to work. The algorithm is in two parts:
• First I'm extracting the frustum planes from the projectionview matrix.
• Then I'm checking for each chunk if it is inside or colliding with the frustum.
The behavior is generally the same: when looking from the origin to positive direction it seems to work, but when looking to the negative direction it doesn't, and when I'm going away from the origin it starts breaking and doing non-sense. Also when looking up and down the culling is weird.
Here is my frustum plane extraction:
public static Plane[] frustumPlanes(Matrix4f mat, boolean normalize)
{
Plane[] p = new Plane[6];
p[0] = normalizePlane(mat.m30 + mat.m00, mat.m31 + mat.m01, mat.m32 + mat.m02, mat.m33 + mat.m03); // left
p[1] = normalizePlane(mat.m30 - mat.m00, mat.m31 - mat.m01, mat.m32 - mat.m02, mat.m33 - mat.m03); // right
p[2] = normalizePlane(mat.m30 - mat.m10, mat.m31 - mat.m11, mat.m32 - mat.m12, mat.m33 - mat.m13); // top
p[3] = normalizePlane(mat.m30 + mat.m10, mat.m31 + mat.m11, mat.m32 + mat.m12, mat.m33 + mat.m13); // bottom
p[4] = normalizePlane(mat.m30 + mat.m20, mat.m31 + mat.m21, mat.m32 + mat.m22, mat.m33 + mat.m23); // near
p[5] = normalizePlane(mat.m30 - mat.m20, mat.m31 - mat.m21, mat.m32 - mat.m22, mat.m33 - mat.m23); // far
return p;
}
public static Plane normalizePlane(float A, float B, float C, float D) {
float nf = 1.0f / (float)Math.sqrt(A * A + B * B + C * C);
return new Plane(new Vector3f(nf * A, nf * B, nf * C), nf * D);
}
mat is the projectionview matrix, here is the projection matrix:
private void createProjectionMatrix() {
float aspectRatio = (float) DisplayManager.WIDTH / (float) DisplayManager.HEIGHT;
float y_scale = (float) ((1f / Math.tan(Math.toRadians(FOV / 2f))));
float x_scale = y_scale / aspectRatio;
float frustum_length = FAR_PLANE - NEAR_PLANE;
projectionMatrix = new Matrix4f();
projectionMatrix.m00 = x_scale;
projectionMatrix.m11 = y_scale;
projectionMatrix.m22 = -((FAR_PLANE + NEAR_PLANE) / frustum_length);
projectionMatrix.m23 = -1;
projectionMatrix.m32 = -((2 * NEAR_PLANE * FAR_PLANE) / frustum_length);
projectionMatrix.m33 = 0;
}
Here is the view matrix:
public static Matrix4f createViewMatrix(Camera camera) {
Matrix4f viewMatrix = new Matrix4f();
viewMatrix.setIdentity();
Matrix4f.rotate((float) Math.toRadians(camera.getRotation().x), new Vector3f(1, 0, 0), viewMatrix, viewMatrix);
Matrix4f.rotate((float) Math.toRadians(camera.getRotation().y), new Vector3f(0, 1, 0), viewMatrix, viewMatrix);
Matrix4f.rotate((float) Math.toRadians(camera.getRotation().z), new Vector3f(0, 0, 1), viewMatrix, viewMatrix);
Vector3f cameraPos = camera.getPosition();
Vector3f negativeCameraPos = new Vector3f(-cameraPos.x,-cameraPos.y,-cameraPos.z);
Matrix4f.translate(negativeCameraPos, viewMatrix, viewMatrix);
return viewMatrix;
}
Here is the collision detection code aabb vs plane:
public static int boxToPlaneCollision(Plane plane, Vector3f[] minMax)
{
int result = 2; //Inside
// planes have unit-length normal, offset = -dot(normal, point on plane)
int nx = plane.normal.x > 0?1:0;
int ny = plane.normal.y > 0?1:0;
int nz = plane.normal.z > 0?1:0;
// getMinMax(): 0 = return min coordinate. 1 = return max.
float dot = (plane.normal.x*minMax[nx].x) + (plane.normal.y*minMax[nx].y) + (plane.normal.z*minMax[nx].z);
if ( dot < -plane.offset )
return 0; //Outside
float dot2 = (plane.normal.x*minMax[1-nx].x) + (plane.normal.y*minMax[1-nx].y) + (plane.normal.z*minMax[1-nx].z);
if ( dot2 <= -plane.offset )
result = 1; //Intersect
return result;
}
And finally here is where everything is called:
public boolean chunkInsideFrustum(Vector3f chunkPos) {
Vector3f chunkPosMax = new Vector3f(chunkPos.x + Terrain.CHUNK_SIZE, Terrain.CHUNK_HEIGHT, chunkPos.z + Terrain.CHUNK_SIZE);
for (int i = 0; i < 6; i++) {
if(Collider.boxToPlaneCollision(frustumPlanes[i], new Vector3f[] {chunkPos,chunkPosMax}) == 0)
return false;
}
return true;
}
I'm using openGL with LWJGL 2 (Java).
My questions are:
Where is the problem? In the frustum plane extraction code? In the collision detection?
and
I saw people calculating the frustum with projection and modelview matrix, what about this technique? is it better?
Thank you very much for your help!
EDIT:
for the second question, I saw here Extracting View Frustum Planes (Gribb & Hartmann method) someone posted that:
The missing part:
comboMatrix = projection_matrix * Matrix4_Transpose(modelview_matrix)
And then he did the exact same algorithm that I did to extract the planes, but what is modelview_matrix? What model should I use?

Custom matrices & OpenGL shaders.

I am trying to make a simple 4x4 matrix class.
The data (float) is a single dimension array, and I use this code to store numbers as if it were a grid.
const inline int ind1(short x, short y) { // Convert coords to spot on linear array, uses SIZE
return x * (SIZE >> 2) + y;
}
This part is in the .h file
float *data;
These are in the .cpp
Mat::Mat() {
define();
diagDefine(1.0f);
}
void Mat::define() {
data = new float[SIZE];
for (int x = 0; x < SIZE >> 2; x++) {
for (int y = 0; y < SIZE >> 2; y++) {
data[ind1(x, y)] = 0;
}
}
}
void Mat::diagDefine(float nval) {
data[ind1(0, 0)] = nval;
data[ind1(1, 1)] = nval;
data[ind1(2, 2)] = nval;
data[ind1(3, 3)] = nval;
}
The problem is that when I try to multiply the matrix to my position in the vertex shader, the triangle or whatever I am drawing disappears.
My class has orthographic, perspective, translation, rotation, and scaling.
Mat Mat::getOrthographic(float left, float right, float top, float bottom, float near, float far) {
Mat newmat;
newmat.data[ind1(0, 0)] = 2.0f / (right - left);
newmat.data[ind1(1, 1)] = 2.0f / (top - bottom);
newmat.data[ind1(2, 2)] = 2.0f / (near - far);
newmat.data[ind1(0, 3)] = (left + right) / (left - right);
newmat.data[ind1(1, 3)] = (bottom + top) / (bottom - top);
newmat.data[ind1(2, 3)] = (far + near) / (far - near);
return newmat;
}
Mat Mat::getPerspective(float fov, float aspectratio, float near, float far) {
Mat newmat;
newmat.data[ind1(0, 0)] = (1.0f / tan((0.5f * fov) * (3.141519 / 180.0f))) / aspectratio;
newmat.data[ind1(1, 1)] = 1.0f / tan((0.5f * fov) * (3.141519 / 180.0f));
newmat.data[ind1(2, 2)] = (near + far) / (near - far);
newmat.data[ind1(3, 2)] = -1.0f;
newmat.data[ind1(2, 3)] = (2.0f * near * far) / (near - far);
return newmat;
}
Mat Mat::getTranslation(Vec3f &vec) {
Mat newmat;
newmat.data[ind1(0, 3)] = vec.x;
newmat.data[ind1(1, 3)] = vec.y;
newmat.data[ind1(2, 3)] = vec.z;
return newmat;
}
Mat Mat::getRotation(double angle, Vec3f &vec) {
Mat newmat;
float s = sin(angle);
float c = cos(angle);
newmat.data[ind1(0, 0)] = vec.x * (1.0f - c) + c;
newmat.data[ind1(1, 0)] = vec.y * vec.x * (1.0f - c) + vec.z * s;
newmat.data[ind1(2, 0)] = vec.x * vec.z * (1.0f - c) - vec.y * s;
newmat.data[ind1(0, 1)] = vec.x * vec.y * (1.0f - c) - vec.z * s;
newmat.data[ind1(1, 1)] = vec.y * (1.0f - c) + c;
newmat.data[ind1(2, 1)] = vec.y * vec.z * (1.0f - c) + vec.x * s;
newmat.data[ind1(0, 2)] = vec.x * vec.z * (1.0f - c) + vec.y * s;
newmat.data[ind1(1, 2)] = vec.y * vec.z * (1.0f - c) - vec.x * s;
newmat.data[ind1(2, 2)] = vec.z * (1.0f - c) + c;
return newmat;
}
Mat Mat::getScale(Vec3f &vec) {
Mat newmat;
newmat.data[ind1(0, 0)] = vec.x;
newmat.data[ind1(1, 1)] = vec.y;
newmat.data[ind1(2, 2)] = vec.z;
return newmat;
}
Vertex code
#version 330
layout(location = 0) in vec3 pos;
uniform mat4 view_mat;
void main() {
gl_Position = view_mat * vec4(pos, 1.0);
}
Finally, here is how I send the data to the shader.
// In the matrix file
float *getRawDataAsArray() { return data; }
// In the shader.h file
void Shader::GL_SET_UNIFORM_MATRIX(const char *name, Mat matrix) {
GLint location = glGetUniformLocation(program, name);
if(location != -1) {
glUniformMatrix4x2fv(location, 1, GL_FALSE, matrix.getRawDataAsArray());
}
}
// In the main.cpp (sh is shader object that contained the GET_UNIFORM_MATRIX
sh.GL_SET_UNIFORM_MATRIX("view_mat", sod2::Mat::getRotation(3.141519 / 2, 0, 0, 1));
sh.GL_SET_UNIFORM_4f("color", 0.0, 1.0, 0.0, 1.0);
Final note: My shaders do compile perfectly. When I run it without anything to do with matrices it works perfectly. (Dealing with color or modifying position).
Thanks
There are 2 issues in your code:
You use the wrong glUniform* function to set the view_mat uniform in your function Shader::GL_SET_UNIFORM_MATRIX. While glUniformMatrix4fv commits 16 floats for a 4*4* matrix, glUniformMatrix4x2fv commits 8 floats for a 4*2 matrix. See glUniform.
Further See The OpenGL Shading Language 4.6, 5.4.2 Vector and Matrix Constructors, page 101:
To initialize a matrix by specifying vectors or scalars, the components are assigned to the matrix elements in column-major order.
mat4(float, float, float, float, // first column
float, float, float, float, // second column
float, float, float, float, // third column
float, float, float, float); // fourth column
But your matrix is set up in row-major order:
const inline int ind1(short x, short y) {
return x * (SIZE >> 2) + y;
}
Either the ind1 function has to be changed to fix this issue:
const inline int ind1(short x, short y) {
return y * (SIZE >> 2) + x;
}
Or the matrix has to be transposed, when it is set to the uniform variable:
glUniformMatrix4fv(
location,
1,
GL_TRUE, // <----------------- transpose
matrix.getRawDataAsArray());
Or the vector has to be multiplied to the matrix from the left:
gl_Position = vec4(pos, 1.0) * view_mat;

Convert a 2D screen coordinate to a 3D world coordinate

Right now I can transform a 3D world coordinate to a 2D screen coordinate.
I can achieve that with this:
D3DXMATRIX viewMatrix = renderer->viewMatrix;
D3DXMATRIX projectionMatrix = renderer->projectionMatrix;
D3DXMATRIX viewProjectionMatrix = D3DXMATRIX();
D3DXMatrixMultiply(&viewProjectionMatrix, &viewMatrix, &projectionMatrix);
D3DXVECTOR3 pScreen = D3DXVECTOR3(0.0f, 0.0f, 0.0f);
D3DXVECTOR3 pWorld(world->getX(), world->getY(), world->getZ());
D3DXVec3TransformCoordImpl(&pScreen, &pWorld, &viewProjectionMatrix);
Now pScreen contains the 2D screen coordinate
How could I reverse this to get a 3D world coordinate out of a 2D screen coordinate? I'm using this implementation of the D3DXVec3TransformCoord function:
D3DXVECTOR3* WINAPI D3DXVec3TransformCoordImpl(D3DXVECTOR3* pout, CONST D3DXVECTOR3* pv, CONST D3DXMATRIX* pm)
{
FLOAT norm = pm->m[0][3] * pv->x + pm->m[1][3] * pv->y + pm->m[2][3] * pv->z + pm->m[3][3];
if (norm)
{
pout->x = (pm->m[0][0] * pv->x + pm->m[1][0] * pv->y + pm->m[2][0] * pv->z + pm->m[3][0]) / norm;
pout->y = (pm->m[0][1] * pv->x + pm->m[1][1] * pv->y + pm->m[2][1] * pv->z + pm->m[3][1]) / norm;
pout->z = (pm->m[0][2] * pv->x + pm->m[1][2] * pv->y + pm->m[2][2] * pv->z + pm->m[3][2]) / norm;
}
else
{
pout->x = 0.0f;
pout->y = 0.0f;
pout->z = 0.0f;
}
return pout;
}
You could use the SolvePnP library like this:
int max = std::max(img.rows,img.cols);
Mat camMatrix = (Mat_<double>(3,3) <<
max_d, 0, img.cols/2.0,
0, max_d, img.rows/2.0,
0, 0, 1.0);
// 2d -> 3d correspondence
vector<Point2d> pts2d = ...
vector<Point3d> pts3d = ...
Mat rvec,tvec;
solvePnP(pts3d, pts2d, camMatrix, Mat(1,4,CV_64F,0.0), rvec, tvec, false, SOLVEPNP_EPNP);
// get 3d rot mat
Mat rotM(3, 3, CV_64F);
Rodrigues(rvec, rotM);
// push tvec to transposed Mat
Mat rotMT = rotM.t();
rotMT.push_back(tvec.reshape(1, 1));
// transpose back, and multiply
camMatrix * rotMT.t();

Camera in opengl not working

The problem is when I face my camera down the z axis for example and pitch this works fine however, after I have finished the pitch and would like to yaw on this new axis it begins to roll for some unknown reason =s.
void FrustumCamera::xAxisRotation(float angle)
{
// angle = angle * (double)degToRad;
Vector3<float> x = m_orientation.getXAxis();
Vector3<float> y = m_orientation.getYAxis();
Vector3<float> z = m_orientation.getZAxis();
y.rotateAroundAxis(x,angle);
x = m_orientation.getXAxis();
z.rotateAroundAxis(x,angle);
m_orientation.setYAxis(y);
m_orientation.setZAxis(z);
}
void FrustumCamera::yAxisRotation(float angle)
{
// angle = angle * (double)degToRad;
Vector3<float> x = m_orientation.getXAxis();
Vector3<float> y = m_orientation.getYAxis();
Vector3<float> z = m_orientation.getZAxis();
x.rotateAroundAxis(y,angle);
y = m_orientation.getYAxis();
z.rotateAroundAxis(y,angle);
m_orientation.setXAxis(x);
m_orientation.setZAxis(z);
}
void FrustumCamera::zAxisRotation(float angle)
{
Vector3<float> x = m_orientation.getXAxis();
Vector3<float> y = m_orientation.getYAxis();
Vector3<float> z = m_orientation.getZAxis();
x.rotateAroundAxis(z,angle);
z = m_orientation.getYAxis();
y.rotateAroundAxis(z,angle);
m_orientation.setXAxis(x);
m_orientation.setYAxis(y);
}
template <class Type>
void Vector3<Type>::rotateAroundAxis(Vector3<Type> axis, const float angle)
{
float radians = static_cast<Type>(angle * degToRad);
Type sinAngle = static_cast<Type>(sin(radians));
Type cosAngle = 0.0;
if (angle == 90 || angle == -90)
cosAngle = 0.0;
else
cosAngle = cos(radians);
normalise(axis); // normalise the axis
Type oneMinusCos = 1 - cosAngle; // (1 - cos(theta))
// construct the rotation matrix
Type tempMatrix[3][3];
tempMatrix[0][0] = (axis.x * axis.x) * oneMinusCos + cosAngle;
tempMatrix[0][1] = (axis.x * axis.y) * oneMinusCos + axis.z * sinAngle;
tempMatrix[0][2] = (axis.x * axis.z) * oneMinusCos - axis.y * sinAngle;
tempMatrix[1][0] = (axis.x * axis.y) * oneMinusCos - axis.z * sinAngle;
tempMatrix[1][1] = (axis.y * axis.y) * oneMinusCos + cosAngle;
tempMatrix[1][2] = (axis.y * axis.z) * oneMinusCos + axis.x * sinAngle;
tempMatrix[2][0] = (axis.x * axis.z) * oneMinusCos + axis.y * sinAngle;
tempMatrix[2][1] = (axis.y * axis.z) * oneMinusCos - axis.x * sinAngle;
tempMatrix[2][2] = (axis.z * axis.z) * oneMinusCos + cosAngle;
Vector3<Type> temp(*this);
Vector3<Type> result;
result.x = (temp.x * tempMatrix[0][0]) + (temp.y * tempMatrix[1][0]) + (temp.z * tempMatrix[2][0]);
result.y = (temp.x * tempMatrix[0][1]) + (temp.y * tempMatrix[1][1]) + (temp.z * tempMatrix[2][1]);
result.z = (temp.x * tempMatrix[0][2]) + (temp.y * tempMatrix[1][2]) + (temp.z * tempMatrix[2][2]);
*this = result;
}
void OpenGLRenderer::startDraw(unsigned long mask)
{
//sortBuffer(); // sort draw queue
clearBuffers(mask); // clear buffers
loadIdentity();
glTranslatef(-1*m_frustumCamera->getViewMatrix().getTranslationAxis().x,-1*m_frustumCamera->getViewMatrix().getTranslationAxis().y,-1*m_frustumCamera->getViewMatrix().getTranslationAxis().z);
glMultMatrixf(m_frustumCamera->getViewMatrix().getMatrix());
glTranslatef(m_frustumCamera->getViewMatrix().getTranslationAxis().x,m_frustumCamera->getViewMatrix().getTranslationAxis().y,m_frustumCamera->getViewMatrix().getTranslationAxis().z);// load identity
//
// push matrix stack
matrixStackPush();
}
You might be experiencing Gimbal Lock; this can happen if you pitch all the way up or down so your look vector becomes parallel with your up vector, In which case a yaw will be the same as a roll.
This can be a downside of constructing rotations piecemeal via Euler angles. You may want to look into quaternions. (Note that you cant rotate with Euler angles; they are just a representation for rotation (you need to convert it to matrix or quats), but the way you are tackling it is very much an 'Euler angle' way of thinking about it)
The strength of matrix multiplication is that any sequence of multiple rotations can be represented (and concatenated) as a single rotation matrix. What you need to be doing is something like this:
void Transformable::yaw(float angle)
{
float4x4 rot; // temp rotation matrix
float3 translate(&_transform._41); // save our translation
float3 up(&_transform._21); // y axis
// build the rotation matrix for rotation around y
MatrixRotationAxis(&rot, &up, angle);
// multiply our transform by the rotation matrix
// note that order of multiplication matters and depends on
// if your matrices are column-major or row-major
MatrixMultiply(&_transform, &_transform, &rot);
// write back our original translation
memcpy(&_transform._41, &translate, sizeof(float3));
// might want to reorthogonalise every now and then
// to make sure basis vectors are orthonormal
// or you will probably get matrix creep after a few operations
}
instead of trying to rotate one basis vector at a time. In this case _transform would be a 4x4 homogenous matrix representing the transformation matrix. (rotation and translation). The topleft 3x3 submatrix is simply the basis vectors of the orientation space.

glm::perspective explanation

I am trying to understand what the following code does:
glm::mat4 Projection = glm::perspective(35.0f, 1.0f, 0.1f, 100.0f);
Does it create a projection matrix? Clips off anything that is not in the user's view?
I wasn't able to find anything on the API page, and the only thing I could find in the pdf on their website was this:
gluPerspective:
glm::mat4 perspective(float fovy, float aspect, float zNear,
float zFar);
glm::dmat4 perspective(
double fovy, double aspect, double zNear,
double zFar);
From GLM_GTC_matrix_transform extension: <glm/gtc/matrix_transform.hpp>
But it doesn't explain the parameters. Maybe I missed something.
It creates a projection matrix, i.e. the matrix that describes the set of linear equations that transforms vectors from eye space into clip space. Matrices really are not black magic. In the case of OpenGL they happen to be a 4-by-4 arrangement of numbers:
X_x Y_x Z_x T_x
X_y Y_y Z_y T_y
X_z Y_z Z_z T_z
X_w Y_w Z_w W_w
You can multply a 4-vector by a 4×4 matrix:
v' = M * v
v'_x = M_xx * v_x + M_yx * v_y + M_zx * v_z + M_tx * v_w
v'_y = M_xy * v_x + M_yy * v_y + M_zy * v_z + M_ty * v_w
v'_z = M_xz * v_x + M_yz * v_y + M_zz * v_z + M_tz * v_w
v'_w = M_xw * v_x + M_yw * v_y + M_zw * v_z + M_tw * v_w
After reaching clip space (i.e. after the projection step), the primitives are clipped. The vertices resulting from the clipping are then undergoing the perspective divide, i.e.
v'_x = v_x / v_w
v'_y = v_y / v_w
v'_z = v_z / v_w
( v_w = 1 = v_w / v_w )
And that's it. There's really nothing more going on in all those transformation steps than ordinary matrix-vector multiplication.
Now the cool thing about this is, that matrices can be used to describe the relative alignment of a coordinate system within another coordinate system. What the perspective transform does is, that it let's the vertices z-values "slip" into their projected w-values as well. And by the perspective divide a non-unity w will cause "distortion" of the vertex coordinates. Vertices with small z will be divided by a small w, thus their coordinates "blow" up, whereas vertices with large z will be "squeezed", which is what's causing the perspective effect.
This is a c standalone version of the same function. This is roughly a copy paste version of the original.
# include <math.h>
# include <stdlib.h>
# include <string.h>
typedef struct s_mat {
float *array;
int width;
int height;
} t_mat;
t_mat *mat_new(int width, int height)
{
t_mat *to_return;
to_return = (t_mat*)malloc(sizeof(t_mat));
to_return->array = malloc(width * height * sizeof(float));
to_return->width = width;
to_return->height = height;
return (to_return);
}
void mat_zero(t_mat *dest)
{
bzero(dest->array, dest->width * dest->height * sizeof(float));
}
void mat_set(t_mat *m, int x, int y, float val)
{
if (m == NULL || x > m->width || y > m->height)
return ;
m->array[m->width * (y - 1) + (x - 1)] = val;
}
t_mat *mat_perspective(float angle, float ratio,
float near, float far)
{
t_mat *to_return;
float tan_half_angle;
to_return = mat_new(4, 4);
mat_zero(to_return);
tan_half_angle = tan(angle / 2);
mat_set(to_return, 1, 1, 1 / (ratio * tan_half_angle));
mat_set(to_return, 2, 2, 1 / (tan_half_angle));
mat_set(to_return, 3, 3, -(far + near) / (far - near));
mat_set(to_return, 4, 3, -1);
mat_set(to_return, 3, 4, -(2 * far * near) / (far - near));
return (to_return);
}