I an new to perspective division phenomenon. I am rendering a simple square in 3D space using following code :
void MatrixPersp(Mat4& matrix,const float fovy, const float aspect, const float zNear, const float zFar)
{
float sine, cotangent, deltaZ;
float radians;
for(int i=0;i<4;i++)
for(int j=0;j<4;j++)
{
if(i==j)
matrix[i][j]=1.0;
else
matrix[i][j]=0.0;
}
radians = fovy / 2 * GLES_Pi / 180;
deltaZ = zFar - zNear;
sine = (float) sin(radians);
cotangent = (float) cos(radians) / sine;
matrix[0][0] = cotangent / aspect;
matrix[1][1] = cotangent;
matrix[2][2] = -(zFar + zNear) / deltaZ;
matrix[2][3] = -1;
matrix[3][2] = -2 * zNear * zFar / deltaZ;
matrix[3][3] = 0;
return;
}
void Render()
{
GLfloat vertices[] =
{
-0.8,0.6,1.0,1.0,
-0.8,0.2,1.0,1.0,
0.2,0.2,1.0,1.0,
0.2,0.6,1.0,1.0
};
MatrixPersp(perspective,90.0,aspect,2.0,100.0);
glUniformMatrix4fv(glGetUniformLocation(program_object,"MVPMatrix"), 1, GL_FALSE, (GLfloat*)perspective);
glClearDepth(1.0f);
glClear(GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glDrawElements();
}
Vertex Shader is :
#version 150
in vec4 position;
uniform mat4 MVPMatrix;
void main()
{
gl_Position = position*MVPMatrix;
}
The problem here is that , when all the four vertices have same z-value nothing is rendered at all. On the other hand if two vertices have -1 as z-coordinate the projection matrix works fine.
I am not sure what is going wrong here.
glUniformMatrix4fv(glGetUniformLocation(program_object,"MVPMatrix"), 1, GL_FALSE, (GLfloat*)perspective);
This line suggests that your matrix is in column-major order. This is confirmed by the way your MatrixPersp function computes its matrix, depending on exactly how Mat4 is defined.
gl_Position = position*MVPMatrix;
If MVPMatrix is properly column-major, then this multiplication is backwards. The position goes on the right.
Related
Right now I have the ability to scale, rotate, and translate points by using a matrix.
// I use a left to right multiplying style (scale, rotate, then translate)
Matrix model = Matrix::Scale(0.4f) * Matrix::Rotation(45.0f, Vector3(0.0f, 0.0f, 1.0f)) * Matrix::Translation(Vector3(0.0f, 0.5f)).Transposed();
// vertex shader code
#version 460 core
layout (location = 0) in vec3 vertexPosition;
uniform mat4 model;
void main() {
gl_Position = model * vec4(vertexPosition, 1.0);
}
The main problem I'm having is creating a perspective projection matrix.
static Matrix Projection(float verticalFoV, float aspectRatio, float zNear, float zFar) {
// is this even correct?
float yScale = (1.0f / tan(verticalFoV / 2.0f)) * aspectRatio;
float xScale = yScale / aspectRatio;
float frustumLength = zFar - zNear;
return Matrix({
xScale, 0, 0, 0,
0, yScale, 0, 0,
0, 0, -((zFar + zNear) / frustumLength), -((2.0f * zNear * zFar) / frustumLength),
0, 0, -1.0f, 0
});
}
Which would then be used like this.
Matrix projection = Matrix::Projection(70.0f * DegreesToRadians, screenWidth / screenHeight, 0.1f, 100.0f);
I send over the matrices without transposing them.
glUniformMatrix4fv(glGetUniformLocation(shaderProgram, "model"), 1, false, &model[0][0]);
glUniformMatrix4fv(glGetUniformLocation(shaderProgram, "projection"), 1, false, &projection[0][0]);
And I want to be able to multiply them left to right in the vertex shader.
#version 460 core
layout (location = 0) in vec3 vertexPosition;
uniform mat4 model;
uniform mat4 projection;
void main() {
// I'm eventually gonna add view so it'd look like this
// gl_Position = model * view * projection * vec4(vertexPosition, 1.0);
gl_Position = model * projection * vec4(vertexPosition, 1.0);
}
P.S: I want to use a left handed coordinate system. (Right = +X, Up = +Y, Forward = +Z)
OpenGL matrices are stored with column major order. Your matrices are stored with row major order. Hence, you have to multiply the matrices to the vector form the right:
gl_Position = model * projection * vec4(vertexPosition, 1.0);
gl_Position = vec4(vertexPosition, 1.0) * model * projection;
I am trying to make a simple voxel engine with OpenGL and C++. My first step is to send out rays from the camera and detect if the ray intersected with something (for testing purposes its just two planes). I have got it working with without the camera rotating by creating a full screen quad and programming the fragment shader to send out a ray for every fragment (for now I'm just assuming a fragment is a pixel) which is in the direction texCoord.x, texCoord.y, -1. Now I am trying to implement camera rotation.
I have tried to generate a rotation matrix within the cpu and send that to the shader which will multiply it with every ray. However, when I rotate the camera, the planes start to stretch in a way which I can only describe with this video.
https://www.youtube.com/watch?v=6NScMwnPe8c
Here is the code that creates the matrix and is run every frame:
float pi = 3.141592;
// camRotX and Y are defined elsewhere and can be controlled from the keyboard during runtime.
glm::vec3 camEulerAngles = glm::vec3(camRotX, camRotY, 0);
std::cout << "X: " << camEulerAngles.x << " Y: " << camEulerAngles.y << "\n";
// Convert to radians
camEulerAngles.x = camEulerAngles.x * pi / 180;
camEulerAngles.y = camEulerAngles.y * pi / 180;
camEulerAngles.z = camEulerAngles.z * pi / 180;
// Generate Quaternian
glm::quat camRotation;
camRotation = glm::quat(camEulerAngles);
// Generate rotation matrix from quaternian
glm::mat4 camToWorldMatrix = glm::toMat4(camRotation);
// No transformation matrix is created because the rays should be relative to 0,0,0
// Send the rotation matrix to the shader
int camTransformMatrixID = glGetUniformLocation(shader, "cameraTransformationMatrix");
glUniformMatrix4fv(camTransformMatrixID, 1, GL_FALSE, glm::value_ptr(camToWorldMatrix));
And the fragment shader:
#version 330 core
in vec4 texCoord;
layout(location = 0) out vec4 color;
uniform vec3 cameraPosition;
uniform vec3 cameraTR;
uniform vec3 cameraTL;
uniform vec3 cameraBR;
uniform vec3 cameraBL;
uniform mat4 cameraTransformationMatrix;
uniform float fov;
uniform float aspectRatio;
float pi = 3.141592;
int RayHitCell(vec3 origin, vec3 direction, vec3 cellPosition, float cellSize)
{
if(direction.z != 0)
{
float multiplicationFactorFront = cellPosition.z - origin.z;
if(multiplicationFactorFront > 0){
vec2 interceptFront = vec2(direction.x * multiplicationFactorFront + origin.x,
direction.y * multiplicationFactorFront + origin.y);
if(interceptFront.x > cellPosition.x && interceptFront.x < cellPosition.x + cellSize &&
interceptFront.y > cellPosition.y && interceptFront.y < cellPosition.y + cellSize)
{
return 1;
}
}
float multiplicationFactorBack = cellPosition.z + cellSize - origin.z;
if(multiplicationFactorBack > 0){
vec2 interceptBack = vec2(direction.x * multiplicationFactorBack + origin.x,
direction.y * multiplicationFactorBack + origin.y);
if(interceptBack.x > cellPosition.x && interceptBack.x < cellPosition.x + cellSize &&
interceptBack.y > cellPosition.y && interceptBack.y < cellPosition.y + cellSize)
{
return 2;
}
}
}
return 0;
}
void main()
{
// For now I'm not accounting for FOV and aspect ratio because I want to get the rotation working first
vec4 beforeRotateRayDirection = vec4(texCoord.x,texCoord.y,-1,0);
// Apply the rotation matrix that was generated on the cpu
vec3 rayDirection = vec3(cameraTransformationMatrix * beforeRotateRayDirection);
int t = RayHitCell(cameraPosition, rayDirection, vec3(0,0,5), 1);
if(t == 1)
{
// Hit front plane
color = vec4(0, 0, 1, 0);
}else if(t == 2)
{
// Hit back plane
color = vec4(0, 0, 0.5, 0);
}else{
// background color
color = vec4(0, 1, 0, 0);
}
}
Okay. Its really hard to know what is wrong, I will try non-theless.
Here are few tips and notes:
1) You can debug directions by mapping them to RGB color. Keep in mind you should normalize the vectors and map from (-1,1) to (0,1). Just do the dir*0.5+1.0 type of thing. Example:
color = vec4(normalize(rayDirection) * 0.5, 0) + vec4(1);
2) You can get the rotation matrix in a more straight manner. Quaternion is initialized from an forward direction, it will first rotate around Y axis (horizontal look) then, and only then, around X axis (vertical look). Keep in mind that the rotations order is implementation dependent if you initialize from euler-angles. Use mat4_cast to avoid experimental glm extension (gtx) whenever possible. Example:
// Define rotation quaternion starting from look rotation
glm::quat camRotation = glm::vec3(0, 0, 0);
camRotation = glm::rotate(camRotation, glm::radians(camRotY), glm::vec3(0, 1, 0));
camRotation = glm::rotate(camRotation, glm::radians(camRotX), glm::vec3(1, 0, 0));
glm::mat4 camToWorldMatrix = glm::mat4_cast(camRotation);
3) Your beforeRotateRayDirection is a vector that (probably) points from (-1,-1,-1) all the way to (1,1,-1). Which is not normalized, the length of (1,1,1) is √3 ≈ 1.7320508075688772... Be sure you have taken that into account for your collision math or just normalize the vector.
My partial answer so far...
Your collision test is a bit weird... It appears you want to cast the ray into the Z plane for the given cell position (but twice, one for the front and one for the back). I have reviewed your code logic and it makes some sense, but without the vertex program, thus not knowing what the texCoord range values are, it is not possible to be sure. You might want to rethink your logic to something like this:
int RayHitCell(vec3 origin, vec3 direction, vec3 cellPosition, float cellSize)
{
//Get triangle side vectors
vec3 tu = vec3(cellSize,0,0); //Triangle U component
vec3 tv = vec3(0,cellSize,0); //Triangle V component
//Determinant for inverse matrix
vec3 q = cross(direction, tv);
float det = dot(tu, q);
//if(abs(det) < 0.0000001) //If too close to zero
// return;
float invdet = 1.0/det;
//Solve component parameters
vec3 s = origin - cellPosition;
float u = dot(s, q) * invdet;
if(u < 0.0 || u > 1.0)
return 0;
vec3 r = cross(s, tu);
float v = dot(direction, r) * invdet;
if(v < 0.0 || v > 1.0)
return 0;
float t = dot(tv, r) * invdet;
if(t <= 0.0)
return 0;
return 1;
}
void main()
{
// For now I'm not accounting for FOV and aspect ratio because I want to get the
// rotation working first
vec4 beforeRotateRayDirection = vec4(texCoord.x, texCoord.y, -1, 0);
// Apply the rotation matrix that was generated on the cpu
vec3 rayDirection = vec3(cameraTransformationMatrix * beforeRotateRayDirection);
int t = RayHitCell(cameraPosition, normalize(rayDirection), vec3(0,0,5), 1);
if (t == 1)
{
// Hit front plane
color = vec4(0, 0, 1, 0);
}
else
{
// background color
color = vec4(0, 1, 0, 0);
}
}
This should give you a plane, let me know if it works. A cube will be very easy to do.
PS.: u and v can be used for texture mapping.
I am trying to replicate the Sascha Willems SSAO example while using the LearnOpenGL SSAO tutorial as a resource. But my SSAO code is only partially covering models at certain angles/distances, and there is also a very strong self-occlusion effect when close to an object.
On the left is my renderer, and on the right side is the Sascha Willems SSAO Example:
Center: Wrong | Correct
Window: Wrong | Correct
Stairs: Wrong | Correct
EDIT: There is some strange artifacting on the Correct images from RenderDoc. Sorry about that.
Some notes about my renderer variables:
Position+Depth image is using VK_FORMAT_R32G32B32A32_SFLOAT format and looks correct in RenderDoc. [1] [2]
Normal image is using VK_FORMAT_R8G8B8A8_UNORM format and looks correct in RenderDoc. [1]
Position+Depth and Normal images are using a VkSampler with VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE.
SSAO image is VK_FORMAT_R8_UNORM and is being written correctly by the shader. [1]
SSAO Noise image is using VK_FORMAT_R32G32B32A32_SFLOAT format and looks correct in RenderDoc. [1]
SSAO Noise image is using a VkSampler with VK_SAMPLER_ADDRESS_MODE_REPEAT.
SSAO Noise
// Random Generator
std::default_random_engine rndEngine(static_cast<unsigned>(glfwGetTime()));
std::uniform_real_distribution<float> rndDist(0.0f, 1.0f);
// SSAO random noise
std::vector<glm::vec4> ssaoNoise(SSAO_NOISE_DIM * SSAO_NOISE_DIM);
for (uint32_t i = 0; i < static_cast<uint32_t>(ssaoNoise.size()); i++)
{
ssaoNoise[i] = glm::vec4(rndDist(rndEngine) * 2.0f - 1.0f, rndDist(rndEngine) * 2.0f - 1.0f, 0.0f, 0.0f);
}
SSAO Kernels
// Function for SSAOKernel generation
float lerp(float a, float b, float f)
{
return a + f * (b - a);
}
// SSAO sample kernel
std::vector<glm::vec4> ssaoKernel(SSAO_KERNEL_SIZE);
for (uint32_t i = 0; i < SSAO_KERNEL_SIZE; i++)
{
glm::vec3 sample(rndDist(rndEngine) * 2.0 - 1.0, rndDist(rndEngine) * 2.0 - 1.0, rndDist(rndEngine));
sample = glm::normalize(sample);
sample *= rndDist(rndEngine);
float scale = float(i) / float(SSAO_KERNEL_SIZE);
scale = lerp(0.1f, 1.0f, scale * scale);
ssaoKernel[i] = glm::vec4(sample * scale, 0.0f);
}
SSAO Kernel XY values are between -1.0 and 1.0, and Z values are between 0.0 and 1.0:
ssaoKernel XYZ[0]: X: -0.0428458 Y: 0.0578492 Z: 0.0569087
ssaoKernel XYZ[1]: X: 0.0191572 Y: 0.0442375 Z: 0.00108795
ssaoKernel XYZ[2]: X: 0.00155709 Y: 0.0287552 Z: 0.024916
ssaoKernel XYZ[3]: X: -0.0169349 Y: -0.0298343 Z: 0.0272303
ssaoKernel XYZ[4]: X: 0.0469432 Y: 0.0348599 Z: 0.0573885
(...)
ssaoKernel XYZ[31]: X: -0.104106 Y: -0.434528 Z: 0.321963
GLSL shaders
model.vert
mat3 normalMatrix = transpose(inverse(mat3(ubo.view * ubo.model)));
outNormalViewSpace = normalMatrix * inNormal;
outPositionViewSpace = vec3(ubo.view * ubo.model * vec4(inPosition, 1.0));
model.frag
// These are identical to the camera
float near = 0.1;
float far = 100.0;
float LinearizeDepth(float depth)
{
float z = depth * 2.0 - 1.0;
return (2.0 * near * far) / (far + near - z * (far - near));
}
(...)
outNormalViewSpace = vec4(normalize(inNormalViewSpace) * 0.5 + 0.5, 1.0);
outPositionDepth = vec4(inPositionViewSpace, LinearizeDepth(gl_FragCoord.z));
fullscreen.vert
outUV = vec2((gl_VertexIndex << 1) & 2, gl_VertexIndex & 2);
gl_Position = vec4(outUV * 2.0f - 1.0f, 0.0f, 1.0f);
ssao.frag
#version 450
layout (location = 0) in vec2 inUV;
layout (constant_id = 1) const int SSAO_KERNEL_SIZE = 32;
layout (constant_id = 2) const float SSAO_RADIUS = 0.5;
layout (binding = 0) uniform sampler2D samplerPositionDepth;
layout (binding = 1) uniform sampler2D samplerNormal;
layout (binding = 2) uniform sampler2D samplerSSAONoise;
layout (binding = 3) uniform SSAOKernel
{
vec4 samples[SSAO_KERNEL_SIZE];
} ssaoKernel;
layout( push_constant ) uniform UniformBufferObject {
mat4 projection;
} ubo;
layout (location = 0) out float outSSAO;
void main()
{
//
// SSAO Post Processing (Pre-Blur)
//
// Get a random vector using a noise lookup
ivec2 texDim = textureSize(samplerPositionDepth, 0);
ivec2 noiseDim = textureSize(samplerSSAONoise, 0);
const vec2 noiseUV = vec2(float(texDim.x) / float(noiseDim.x), float(texDim.y) / (noiseDim.y)) * inUV;
vec3 randomVec = texture(samplerSSAONoise, noiseUV).xyz * 2.0 - 1.0;
// Get G-Buffer values
vec3 fragPos = texture(samplerPositionDepth, inUV).rgb;
vec3 normal = normalize(texture(samplerNormal, inUV).rgb * 2.0 - 1.0);
// Create TBN matrix
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(tangent, normal);
mat3 TBN = mat3(tangent, bitangent, normal);
// Calculate occlusion value
float occlusion = 0.0f;
for(int i = 0; i < SSAO_KERNEL_SIZE; i++)
{
vec3 samplePos = TBN * ssaoKernel.samples[i].xyz;
samplePos = fragPos + samplePos * SSAO_RADIUS;
// project
vec4 offset = vec4(samplePos, 1.0f);
offset = ubo.projection * offset;
offset.xyz /= offset.w;
offset.xyz = offset.xyz * 0.5f + 0.5f;
float sampleDepth = -texture(samplerPositionDepth, offset.xy).w;
// Range check
float rangeCheck = smoothstep(0.0f, 1.0f, SSAO_RADIUS / abs(fragPos.z - sampleDepth));
occlusion += (sampleDepth >= samplePos.z ? 1.0f : 0.0f) * rangeCheck;
}
occlusion = 1.0 - (occlusion / float(SSAO_KERNEL_SIZE));
outSSAO = occlusion;
}
There has to be a wrong setting or improper calculation somewhere, but I can't quite put my finger on it. Feel free to request additional code snippets if something pertinent is missing.
Any help is greatly appreciated, thank you!
Credit goes to mlkn for pointing out in the comments that the LinearizeDepth function did not look right. He was correct, there was an extra unnecessary "* 2.0 - 1.0" step that did not belong. Thank you mlkn! :)
This was the original, incorrect LinearizeDepth function:
float LinearizeDepth(float depth)
{
float z = depth * 2.0 - 1.0;
return (2.0 * near * far) / (far + near - z * (far - near));
}
By removing the first line, and changing it to this:
float LinearizeDepth(float depth)
{
return (2.0 * near * far) / (far + near - depth * (far - near));
}
My output immediately changed to this, which appears to be correct:
I've got a basic OpenGL application and I want to use my projection matrix.
This is my matrix:
WorldCoordinates.m[0][0] = 2.0f / Width - 1.0f; WorldCoordinates.m[0][1] = 0; WorldCoordinates.m[0][2] = 0, WorldCoordinates.m[0][3] = 0;
WorldCoordinates.m[1][0] = 0; WorldCoordinates.m[1][1] = 2.0f / Height - 1.0f; WorldCoordinates.m[1][2] = 0, WorldCoordinates.m[1][3] = 0;
WorldCoordinates.m[2][0] = 0; WorldCoordinates.m[2][1] = 0; WorldCoordinates.m[2][2] = 0, WorldCoordinates.m[2][3] = 0;
WorldCoordinates.m[3][0] = 0; WorldCoordinates.m[3][1] = 0; WorldCoordinates.m[3][2] = 0, WorldCoordinates.m[3][3] = 0;
(WorldCoordinates is the Matrix4 struct that contains just a variable called m that is a float[4][4])(Width and Height are two ints).
I then apply this coordinates to my vertex shader using this:
shader.Bind();
glUniformMatrix4fv(glGetUniformLocation(shader.GetProgramID(), "worldCoordinates"), 1, GL_TRUE, &WorldCoordinates.m[0][0]);
(Shader is a class and has got a Bind() method that is just glUseProgram).
This is my Vertex Shader GLSL
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec2 texCoord;
out vec3 Color;
out vec2 TexCoord;
uniform mat4 worldCoordinates;
void main()
{
gl_Position = worldCoordinates * vec4(position, 1.0f);
Color = color;
TexCoord = texCoord;
}
Using this, it doesn't work. But changing the gl_Position call to this:
gl_Position = vec4(vec3(position.x * 1/400 -1, position.y * 1/300 -1, 1.0f), 1.0f);
it renders as expected. Why is that?
This is how you build a orthogonal projection matrix :
static void
mat4_ortho(mat4_t m, float left, float right, float bottom, float top, float near, float far)
{
float rl = right - left;
float tb = top - bottom;
float fn = far - near;
mat4_zero(m);
m[ 0] = 2.0f / rl;
m[ 5] = 2.0f / tb;
m[10] = -2.0f / fn;
m[12] = -(left + right) / rl;
m[13] = -( top + bottom) / tb;
m[14] = -( far + near) / fn;
m[15] = 1.0f;
}
For you case, you'd set left=0, right=width, bottom=0, top=height, near and far don't matter, just set -1.0 and 1.0 for instance.
With such a matrix, the vertex coordinates you use for drawing will map 1:1 with the pixels on screen.
currently I am learning 3D rendering theory with the book "Learning Modern 3D Graphics Programming" and are right now stuck in one of the "Further Study" activities on the review of chapter four, specifically the last activity.
The third activity was answered in this question, I understood it with no problem. However, this last activity asks me to do all that this time using only matrices.
I have a solution partially working, but it feels quite a hack to me, and probably not the correct way to do it.
My solution to the third question involved oscilating the 3d vector E's x, y, and z components by an arbitrary range and produced a zooming-in-out cube (growing from bottom-left, per OpenGL origin point). I wanted to do this again using matrices, it looked like this:
However I get this results with matrices (ignoring the background color change):
Now to the code...
The matrix is a float[16] called theMatrix that represents a 4x4 matrix with the data written in column-major order with everything but the following elements initialized to zero:
float fFrustumScale = 1.0f; float fzNear = 1.0f; float fzFar = 3.0f;
theMatrix[0] = fFrustumScale;
theMatrix[5] = fFrustumScale;
theMatrix[10] = (fzFar + fzNear) / (fzNear - fzFar);
theMatrix[14] = (2 * fzFar * fzNear) / (fzNear - fzFar);
theMatrix[11] = -1.0f;
then the rest of the code stays the same like the matrixPerspective tutorial lesson until we get to the void display()function:
//Hacked-up variables pretending to be a single vector (E)
float x = 0.0f, y = 0.0f, z = -1.0f;
//variables used for the oscilating zoom-in-out
int counter = 0;
float increment = -0.005f;
int steps = 250;
void display()
{
glClearColor(0.15f, 0.15f, 0.2f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(theProgram);
//Oscillating values
while (counter <= steps)
{
x += increment;
y += increment;
z += increment;
counter++;
if (counter >= steps)
{
counter = 0;
increment *= -1.0f;
}
break;
}
//Introduce the new data to the array before sending as a 4x4 matrix to the shader
theMatrix[0] = -x * -z;
theMatrix[5] = -y * -z;
//Update the matrix with the new values after processing with E
glUniformMatrix4fv(perspectiveMatrixUniform, 1, GL_FALSE, theMatrix);
/*
cube rendering code ommited for simplification
*/
glutSwapBuffers();
glutPostRedisplay();
}
And here is the vertex shader code that uses the matrix:
#version 330
layout(location = 0) in vec4 position;
layout(location = 1) in vec4 color;
smooth out vec4 theColor;
uniform vec2 offset;
uniform mat4 perspectiveMatrix;
void main()
{
vec4 cameraPos = position + vec4(offset.x, offset.y, 0.0, 0.0);
gl_Position = perspectiveMatrix * cameraPos;
theColor = color;
}
What I am doing wrong, or what I am confusing? Thanks for the time reading all of this.
In OpenGL there are three major matrices that you need to be aware of:
The Model Matrix D: Maps vertices from an object's local coordinate system into the world's cordinate system.
The View Matrix V: Maps vertices from the world's coordinate system to the camera's coordinate system.
The Projection Matrix P: Maps (or more suitably projects) vertices from camera's space onto the screen.
Mutliplied the model and the view matrix give us the so called Model-view Matrix M, which maps the vertices from the object's local coordinates to the camera's cordinate system.
Altering specific elements of the model-view matrix results in certain afine transfomations of the camera.
For example, the 3 matrix elements of the rightmost column are for the translation transformation. The diagonal elements are for the scaling transformation. Altering appropriately the elements of the sub-matrix
are for the rotation transformations along camera's axis X, Y and Z.
The above transformations in C++ code are quite simple and are displayed below:
void translate(GLfloat const dx, GLfloat const dy, GLfloat dz, GLfloat *M)
{
M[12] = dx; M[13] = dy; M[14] = dz;
}
void scale(GLfloat const sx, GLfloat sy, GLfloat sz, GLfloat *M)
{
M[0] = sx; M[5] = sy; M[10] = sz;
}
void rotateX(GLfloat const radians, GLfloat *M)
{
M[5] = std::cosf(radians); M[6] = -std::sinf(radians);
M[9] = -M[6]; M[10] = M[5];
}
void rotateY(GLfloat const radians, GLfloat *M)
{
M[0] = std::cosf(radians); M[2] = std::sinf(radians);
M[8] = -M[2]; M[10] = M[0];
}
void rotateZ(GLfloat const radians, GLfloat *M)
{
M[0] = std::cosf(radians); M[1] = std::sinf(radians);
M[4] = -M[1]; M[5] = M[0];
}
Now you have to define the projection matrix P.
Orthographic projection:
// These paramaters are lens properties.
// The "near" and "far" create the Depth of Field.
// The "left", "right", "bottom" and "top" represent the rectangle formed
// by the near area, this rectangle will also be the size of the visible area.
GLfloat near = 0.001, far = 100.0;
GLfloat left = 0.0, right = 320.0;
GLfloat bottom = 480.0, top = 0.0;
// First Column
P[0] = 2.0 / (right - left);
P[1] = 0.0;
P[2] = 0.0;
P[3] = 0.0;
// Second Column
P[4] = 0.0;
P[5] = 2.0 / (top - bottom);
P[6] = 0.0;
P[7] = 0.0;
// Third Column
P[8] = 0.0;
P[9] = 0.0;
P[10] = -2.0 / (far - near);
P[11] = 0.0;
// Fourth Column
P[12] = -(right + left) / (right - left);
P[13] = -(top + bottom) / (top - bottom);
P[14] = -(far + near) / (far - near);
P[15] = 1;
Perspective Projection:
// These paramaters are about lens properties.
// The "near" and "far" create the Depth of Field.
// The "angleOfView", as the name suggests, is the angle of view.
// The "aspectRatio" is the cool thing about this matrix. OpenGL doesn't
// has any information about the screen you are rendering for. So the
// results could seem stretched. But this variable puts the thing into the
// right path. The aspect ratio is your device screen (or desired area) width
// divided by its height. This will give you a number < 1.0 the the area
// has more vertical space and a number > 1.0 is the area has more horizontal
// space. Aspect Ratio of 1.0 represents a square area.
GLfloat near = 0.001;
GLfloat far = 100.0;
GLfloat angleOfView = 0.25 * 3.1415;
GLfloat aspectRatio = 0.75;
// Some calculus before the formula.
GLfloat size = near * std::tanf(0.5 * angleOfView);
GLfloat left = -size
GLfloat right = size;
GLfloat bottom = -size / aspectRatio;
GLfloat top = size / aspectRatio;
// First Column
P[0] = 2.0 * near / (right - left);
P[1] = 0.0;
P[2] = 0.0;
P[3] = 0.0;
// Second Column
P[4] = 0.0;
P[5] = 2.0 * near / (top - bottom);
P[6] = 0.0;
P[7] = 0.0;
// Third Column
P[8] = (right + left) / (right - left);
P[9] = (top + bottom) / (top - bottom);
P[10] = -(far + near) / (far - near);
P[11] = -1.0;
// Fourth Column
P[12] = 0.0;
P[13] = 0.0;
P[14] = -(2.0 * far * near) / (far - near);
P[15] = 0.0;
Then your shader will become:
#version 330
layout(location = 0) in vec4 position;
layout(location = 1) in vec4 color;
smooth out vec4 theColor;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
void main()
{
gl_Position = projectionMatrix * modelViewMatrix * position;
theColor = color;
}
Bibliography:
http://blog.db-in.com/cameras-on-opengl-es-2-x/
http://www.songho.ca/opengl/gl_transform.html