Related
I have the following code for my own look-at matrix(multiplication of matrices and cross product of vectors work perfectly, I checked it):
template<typename Type>
void setLookAt(Matrix4x4<Type>& matrix, const Vector3<Type> eye, const Vector3<Type> center, const Vector3<Type> up) noexcept
{
Math::Vector3f right = Math::cross(center, up).normalize();
Matrix4x4f lookAt({
right.getX(), right.getY(), right.getZ(), 0.0,
up.getX(), up.getY(), up.getZ(), 0.0,
center.getX(), center.getY(), center.getZ(), 0.0,
0.0, 0.0, 0.0, 1.0
});
Matrix4x4f additionalMatrix({
0.0, 0.0, 0.0, -(eye.getX()),
0.0, 0.0, 0.0, -(eye.getY()),
0.0, 0.0, 0.0, -(eye.getZ()),
0.0, 0.0, 0.0, 1.0
});
lookAt.mul(additionalMatrix);
matrix = lookAt;
}
template<typename Type>
void setPerspectiveMatrix(Matrix4x4<Type>& matrix, Type fov, Type aspect, Type znear, Type zfar) noexcept
{
const Type yScale = static_cast<Type>(1.0 / tan(RADIANS_PER_DEGREE * fov / 2));
const Type xScale = yScale / aspect;
const Type difference = znear - zfar;
matrix = {
xScale, 0, 0, 0,
0, yScale, 0, 0,
0, 0, (zfar + znear) / difference, 2 * zfar * znear / difference,
0, 0, -1, 0
};
}
Matrix multiplication implementation:
// static const std::uint8_t ROW_SIZE = 4;
// static const std::uint8_t MATRIX_SIZE = ROW_SIZE * ROW_SIZE;
// static const std::uint8_t FIRST_ROW = 0;
// static const std::uint8_t SECOND_ROW = ROW_SIZE;
// static const std::uint8_t THIRD_ROW = ROW_SIZE + ROW_SIZE;
// static const std::uint8_t FOURTH_ROW = ROW_SIZE + ROW_SIZE + ROW_SIZE;
template<class Type>
void Matrix4x4<Type>::mul(const Matrix4x4& anotherMatrix) noexcept
{
Type currentElements[MATRIX_SIZE];
std::copy(std::begin(mElements), std::end(mElements), currentElements);
const Type* otherElements = anotherMatrix.mElements;
for (std::uint8_t i = 0; i < MATRIX_SIZE; i += ROW_SIZE)
{
mElements[i] = currentElements[i] * otherElements[FIRST_ROW] +
currentElements[i + 1] * otherElements[SECOND_ROW] +
currentElements[i + 2] * otherElements[THIRD_ROW] +
currentElements[i + 3] * otherElements[FOURTH_ROW];
mElements[i + 1] = currentElements[i] * otherElements[FIRST_ROW + 1] +
currentElements[i + 1] * otherElements[SECOND_ROW + 1] +
currentElements[i + 2] * otherElements[THIRD_ROW + 1] +
currentElements[i + 3] * otherElements[FOURTH_ROW + 1];
mElements[i + 2] = currentElements[i] * otherElements[FIRST_ROW + 2] +
currentElements[i + 1] * otherElements[SECOND_ROW + 2] +
currentElements[i + 2] * otherElements[THIRD_ROW + 2] +
currentElements[i + 3] * otherElements[FOURTH_ROW + 2];
mElements[i + 3] = currentElements[i] * otherElements[FIRST_ROW + 3] +
currentElements[i + 1] * otherElements[SECOND_ROW + 3] +
currentElements[i + 2] * otherElements[THIRD_ROW + 3] +
currentElements[i + 3] * otherElements[FOURTH_ROW + 3];
}
}
Cross product implementation:
template<typename Type>
Math::Vector3<Type> cross(Vector3<Type> vector, Vector3<Type> anotherVector) noexcept
{
const Type x = vector.getY()*anotherVector.getZ() - vector.getZ()*anotherVector.getY();
const Type y = -(vector.getX()*anotherVector.getZ() - vector.getZ()*anotherVector.getX());
const Type z = vector.getX()*anotherVector.getY() - vector.getY()*anotherVector.getX();
return { x, y, z };
}
Using it:
// OpenGL
glUseProgram(mProgramID);
Matrix4x4f lookAt;
setLookAt(lookAt, { 0.0f, 0.0f, 3.0f }, { 0.0f, 0.0f, -1.0f }, { 0.0f, 1.0f, 0.0f });
glUniformMatrix4fv(glGetAttribLocation(mProgramID, "viewMatrix"), 1, GL_TRUE, lookAt);
Matrix4x4f projection;
setPerspectiveMatrix(projection, 45.0f, width / height, -0.1, 100.0f);
glUniformMatrix4fv(glGetAttribLocation(mProgramID, "projectionMatrix "), 1, GL_TRUE, projection);
// GLSL
layout (location = 0) in vec3 position;
uniform mat4 viewMatrix;
uniform mat4 projectionMatrix;
void main()
{
gl_Position = projectionMatrix * viewMatrix * vec4(position, 1.0f);
}
After using this code, I get a blank screen, although I would have to draw a cube. The problem is in the matrix itself, so other matrices work fine(offset, rotation, ...), but I can understand exactly where. Can you tell me what could be the problem?
"projectionMatrix" and "viewMatrix" are uniform variables. The uniform location can be get by glGetUniformLocation rather than glGetAttribLocation, which would return the attribute index of an active attribute:
GLint projLoc = glGetUniformLocation( mProgramID, "projectionMatrix" );
GLint viewLoc = glGetUniformLocation( mProgramID, "viewMatrix" );
At Perspective Projection the projection matrix describes the mapping from 3D points in the world as they are seen from of a pinhole camera, to 2D points of the viewport.
The eye space coordinates in the camera frustum (a truncated pyramid) are mapped to a cube (the normalized device coordinates).
At perspective projection the view space (volume) is defined by a frustum (a truncated pyramid), where the top of the pyramid is the viewer's position.
The direction of view (line of sight) and the near and the far distance define the planes which truncated the pyramid to a frustum (the direction of view is the normal vector of this planes).
This means both values, the distance to the near plane and the distance to the far plane have to be positive values:
Matrix4x4f lookAt;
setLookAt(lookAt, { 0.0f, 0.0f, 3.0f }, { 0.0f, 0.0f, -1.0f }, { 0.0f, 1.0f, 0.0f });
glUniformMatrix4fv(viewLoc, 1, GL_TRUE, lookAt);
Matrix4x4f projection;
setPerspectiveMatrix(projection, 45.0f, width / height, 0.1f, 100.0f); // 0.1f instead of -0.1f
glUniformMatrix4fv(projLoc, 1, GL_TRUE, projection);
The view space is the local system which is defined by the point of view onto the scene.
The position of the view, the line of sight and the upwards direction of the view, define a coordinate system relative to the world coordinate system.
The view matrix has to transform from world space to view space, so the view matrix is the inverse matrix of the view coordinate system.
If the coordinate system of the view space is a Right-handed system, where the X-axis points to the left and the Y-axis points up, then the Z-axis points out of the view (Note in a right hand system the Z-Axis is the cross product of the X-Axis and the Y-Axis).
The z-axis line of sight is the vector from the point of view eye to the traget center:
template<typename Type>
void setLookAt(Matrix4x4<Type>& matrix, const Vector3<Type> eye, const Vector3<Type> center, const Vector3<Type> up) noexcept
{
Vector3f mz( { eye.getX()-center.getX(), eye.getY()-center.getY(), eye.getZ()-center.getZ() } );
mz = mz.normalize();
Vector3f my = up.normalize();
Vector3f mx = cross(my, mz).normalize();
Type tx = dot( mx, eye );
Type ty = dot( my, eye );
Type tz = -dot( mz, eye );
matrix = {
mx.getX(), mx.getY(), mx.getZ(), tx,
my.getX(), my.getY(), my.getZ(), ty,
mz.getX(), mz.getY(), mz.getZ(), tz,
0.0, 0.0, 0.0, 1.0
};
}
template<typename Type>
Vector3<Type> cross(Vector3<Type> vector, Vector3<Type> anotherVector) noexcept
{
const Type x = vector.getY()*anotherVector.getZ() - vector.getZ()*anotherVector.getY();
const Type y = -(vector.getX()*anotherVector.getZ() - vector.getZ()*anotherVector.getX());
const Type z = vector.getX()*anotherVector.getY() - vector.getY()*anotherVector.getX();
return { x, y, z };
}
template<typename Type>
Vector3<Type> Vector3<Type>::normalize(void) const
{
Type len = std::sqrt(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]);
return { mV[0] / len, mV[1] / len, mV[2] / len };
}
template<typename Type>
Type dot(Vector3<Type> vector, Vector3<Type> anotherVector) noexcept
{
Type ax = vector.getX(), ay = vector.getY(), az = vector.getZ();
Type bx = anotherVector.getX(), by = anotherVector.getY(), bz = anotherVector.getZ();
return ax*bx + ay*by + az*bz;
}
A perspective projection matrix can be defined by a frustum.
The distances left, right, bottom and top, are the distances from the center of the view to the side faces of the frustum, on the near plane. near and far specify the distances to the near and far plane on the frustum.
r = right, l = left, b = bottom, t = top, n = near, f = far
x y z t
2*n/(r-l) 0 (r+l)/(r-l) 0
0 2*n/(t-b) (t+b)/(t-b) 0
0 0 -(f+n)/(f-n) -2*f*n/(f-n)
0 0 -1 0
If the projection is symmetric, where the line of sight is axis of symmetry of the view frustum, then the matrix can be simplified:
x y z t
1/(ta*a) 0 0 0
0 1/ta 0 0
0 0 -(f+n)/(f-n) -2*f*n/(f-n)
0 0 -1 0
where:
a = w / h
ta = tan( fov_y / 2 );
2 * n / (r-l) = 1 / (ta * a)
2 * n / (t-b) = 1 / ta
Further the projection matrix switches from an right-handed system to an left-handed system, because the z axis is turned.
template<typename Type>
void setPerspectiveMatrix(Matrix4x4<Type>& matrix, Type fov, Type aspect, Type znear, Type zfar) noexcept
{
const Type yScale = static_cast<Type>(1.0 / tan(RADIANS_PER_DEGREE * fov / 2));
const Type xScale = yScale / aspect;
const Type difference = zfar - znear;
matrix = {
xScale, 0, 0, 0,
0, yScale, 0, 0,
0, 0, -(zfar + znear) / difference, -2 * zfar * znear / difference,
0, 0, -1, 0
};
}
I can translate my 2d image to 0, 0 using the below code.
D3DXMATRIX worldMatrix, viewMatrix, orthoMatrix, rotation, movement;
// Get the world, view, and ortho matrices from the camera.
m_camera.GetViewMatrix(viewMatrix);
m_camera.GetWorldMatrix(worldMatrix);
m_camera.GetOrthoMatrix(orthoMatrix);
// Move the texture to the new position
D3DXMatrixTranslation(&movement, ((m_VerticeProperties->screenWidth / 2) * -1) + m_posX,
(m_VerticeProperties->screenHeight / 2) - m_posY, 0.0f);
worldMatrix = movement;
//float m_rotationZ = -90 * 0.0174532925f;
//D3DXMatrixRotationYawPitchRoll(&rotation, 0, 0, m_rotationZ);
//worldMatrix = rotation;
// Give the bitmap class what it needs to make source rect
m_bitmap->SetVerticeProperties(m_VerticeProperties->screenWidth, m_VerticeProperties->screenHeight,
m_VerticeProperties->frameWidth, m_VerticeProperties->frameHeight, m_VerticeProperties->U, m_VerticeProperties->V);
//Render the model (the vertices)
m_bitmap->Render(m_d3dManager.GetDeviceContext(), flipped);
//Render the shader
m_shader->Render(m_d3dManager.GetDeviceContext(), m_bitmap->GetIndexCount(), worldMatrix, viewMatrix,
orthoMatrix, m_bitmap->GetTexture(), m_textureTranslationU, m_VerticeProperties->translationPercentageV);
The result:
I can also rotate the image with this code:
D3DXMATRIX worldMatrix, viewMatrix, orthoMatrix, rotation, movement;
// Get the world, view, and ortho matrices from the camera.
m_camera.GetViewMatrix(viewMatrix);
m_camera.GetWorldMatrix(worldMatrix);
m_camera.GetOrthoMatrix(orthoMatrix);
//// Move the texture to the new position
//D3DXMatrixTranslation(&movement, ((m_VerticeProperties->screenWidth / 2) * -1) + m_posX,
// (m_VerticeProperties->screenHeight / 2) - m_posY, 0.0f);
//worldMatrix = movement;
float m_rotationZ = 90 * 0.0174532925f;
D3DXMatrixRotationYawPitchRoll(&rotation, 0, 0, m_rotationZ);
worldMatrix = rotation;
// Give the bitmap class what it needs to make source rect
m_bitmap->SetVerticeProperties(m_VerticeProperties->screenWidth, m_VerticeProperties->screenHeight,
m_VerticeProperties->frameWidth, m_VerticeProperties->frameHeight, m_VerticeProperties->U, m_VerticeProperties->V);
//Render the model (the vertices)
m_bitmap->Render(m_d3dManager.GetDeviceContext(), flipped);
//Render the shader
m_shader->Render(m_d3dManager.GetDeviceContext(), m_bitmap->GetIndexCount(), worldMatrix, viewMatrix,
orthoMatrix, m_bitmap->GetTexture(), m_textureTranslationU, m_VerticeProperties->translationPercentageV);
The result:
I thought multiplying the translation and rotation matrices and setting them = to the world matrix would allow me to see both effects at once.
D3DXMatrixTranslation(&movement, ((m_VerticeProperties->screenWidth / 2) * -1) + m_posX,
(m_VerticeProperties->screenHeight / 2) - m_posY, 0.0f);
float m_rotationZ = 90 * 0.0174532925f;
D3DXMatrixRotationYawPitchRoll(&rotation, 0, 0, m_rotationZ);
worldMatrix = rotation * movement;
It doesn't. The image no longer appears on the screen.
Can anyone tell me what im doing wrong? Thanks.
just do world * -translate * rotation * translate it will make you rotate local
here my code for example
void Ojbect::RotZ(float angle, Vec3 origin)
{
Mat4 w, rz, t;
rz.RotZ(angle);
t.Translation(-origin.x, -origin.y, 0);
w = t * rz * -1 * t;
Vec4 newPos;
for (int i = 0; i < countV; i++)
{
Vec3 pos(vertex[i].x, vertex[i].y, 1);
newPos.Transform(pos, w);
vertex[i].x = newPos.x;
vertex[i].y = newPos.y;
}
UpdateVertex(countV);
}
When I render my app, I'm expecting to see a number of rectangles surrounding the edges of the window. Instead I'm seeing this ..
All objects will be at z == 0.0f. If I dont render my scene using shaders, all objects show fine. So thinking it must be matrix calculation issue?
Anyone know where I might be going wrong with my matrix setups?
matrices is a custom class which contains the 3 matrices ..
public class MatrixUtils {
/* The different matrices */
private Matrix4f modelMatrix = new Matrix4f();
private Matrix4f viewMatrix = new Matrix4f();
private Matrix4f projectionMatrix = new Matrix4f();
public MatrixUtils(){
loadIdentity(modelMatrix);
}
public void loadIdentity(Matrix4f matrix) {
matrix.load(new float[][] {
new float[] { 1, 0, 0, 0 },
new float[] { 0, 1, 0, 0 },
new float[] { 0, 0, 1, 0 },
new float[] { 0, 0, 0, 1 },
});
}
}
Inside my GLEventListener, I setup the matrices with initial values. Called on reshape, setup projection, model and view matrices ..
/* (non-Javadoc)
* #see javax.media.opengl.GLEventListener#reshape(javax.media.opengl.GLAutoDrawable, int, int, int, int)
*/
public void reshape(GLAutoDrawable gLDrawable, int x, int y, int width, int height) {
setupOrtho(width, height, 0.1f, 100.0f);
}
Model and View matrices are set to identity initially. Projection uses an ortho matrix.
private void setupOrtho(float width, float height, float znear, float zfar) {
matrices.loadIdentity(matrices.getModelMatrix());
matrices.loadIdentity(matrices.getViewMatrix());
matrices.setViewMatrix(
setupViewMatrix(
new Vec3(0.0f, 0.0f, 25.0f),
new Vec3(0.0f, 0.0f, 0.0f),
new Vec3(0.0f, 1.0f, 0.0f)));
matrices.setProjectionMatrix(ortho(0, width, 0, height, znear, zfar));
}
Calculate orthographic Projection matrix ..
public Matrix4f ortho(float left, float right, float top, float bottom, float zfar, float znear) {
return new Matrix4f(new float[][] {
new float[] { 2 / (right - left), 0, 0, -((right + left) / (right - left)) },
new float[] { 0, 2 / (top - bottom), 0, -((top + bottom) / (top - bottom)) },
new float[] { 0, 0, -2 / (zfar - znear), -((zfar + znear) / (zfar - znear)) },
new float[] { 0, 0, 0, 1 },
});
}
Calculate View matrix ..
public Matrix4f setupViewMatrix(Vec3 position, Vec3 target, Vec3 up) {
Vec3f f = (new Vec3f(target.sub(position))).normalize();
Vec3f s = (new Vec3f(Vec3.cross(f, up))).normalize();
Vec3f u = (new Vec3f(Vec3.cross(s, f)));
return new Matrix4f(
new float[] {
s.x, s.y, s.z, -Vec3.dot(s, position),
u.x, u.y, u.z, -Vec3.dot(u, position),
-f.x, -f.y, -f.z, Vec3.dot(f, position),
0.0f, 0.0f, 0.0f, 1.0f});
}
Then inside my display() loop, I pass all 3 matrices into each object's draw() function.
public void display(GLAutoDrawable gLDrawable) {
for (CustomObject obj : customObjects.size()){
obj.draw(gl2, matrices, getShaderProgram(), obj.getPosition(), 0.0f);
}
}
This is how my custom objects setup vertexBuffer ..
int COORDS_PER_VERTEX = 3;
int vertexStride = COORDS_PER_VERTEX * 4; // 4 bytes per vertex
ShortBuffer drawListBuffer;
short drawOrder[] = { 0, 1, 2, 0, 2, 3 }; // order to draw vertices
float squareCoordsTemp[] = {
-(getWidth() / 2 * getP2M()), (getHeight() / 2 * getP2M()), 0.0f, // top left
-(getWidth() / 2 * getP2M()), -(getHeight() / 2 * getP2M()), 0.0f, // bottom left
(getWidth() / 2 * getP2M()), -(getHeight() / 2 * getP2M()), 0.0f, // bottom right
(getWidth() / 2 * getP2M()), (getHeight() / 2 * getP2M()), 0.0f }; // top right
squareCoords = squareCoordsTemp;
ByteBuffer bb = ByteBuffer.allocateDirect(squareCoords.length * 4); // # of coordinate values * 4 bytes per float
bb.order(ByteOrder.nativeOrder());
vertexBuffer = bb.asFloatBuffer();
vertexBuffer.put(squareCoords);
vertexBuffer.position(0);
// initialize byte buffer for the draw list
ByteBuffer dlb = ByteBuffer.allocateDirect(drawOrder.length * 2); // # of coordinate values * 2 bytes per short
dlb.order(ByteOrder.nativeOrder());
drawListBuffer = dlb.asShortBuffer();
drawListBuffer.put(drawOrder);
drawListBuffer.position(0);
This is how my CustomObject draws ..
public void draw(final GL2 gl2, MatrixUtils matrices, int shaderProgram, final Vec3 position, final float bodyAngle){
gl2.glUseProgram(shaderProgram);
// enable alpha
gl2.glEnable(GL.GL_BLEND);
gl2.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA);
// Set color for drawing
setmColorHandle(gl2.glGetUniformLocation(shaderProgram, "vColor"));
gl2.glUniform4fv(getmColorHandle(), 1, getColorArray(), 0);
// get handle to vertex shader's vPosition member
mPositionHandle = gl2.glGetAttribLocation(shaderProgram, "vPosition");
// Enable a handle to the triangle vertices
gl2.glEnableVertexAttribArray(mPositionHandle);
// Prepare the triangle coordinate data
gl2.glVertexAttribPointer(
mPositionHandle, COORDS_PER_VERTEX,
GL2.GL_FLOAT, false,
vertexStride, vertexBuffer);
// get handle to shape's transformation matrix
mProj = gl2.glGetUniformLocation(shaderProgram, "mProj");
mView = gl2.glGetUniformLocation(shaderProgram, "mView");
mModel = gl2.glGetUniformLocation(shaderProgram, "mModel");
// Apply the projection and view transformation
// getP2M() == 60.0f .. pixels to meters for box2d
matrices.loadIdentity(matrices.getModelMatrix());
matrices.setModelMatrix(matrices.translate(matrices.getModelMatrix(), new Vec3(position.x * getP2M(), position.y * getP2M(), position.z * getP2M())));
matrices.setModelMatrix(matrices.rotate(matrices.getModelMatrix(), bodyAngle, 0, 0, 1));
gl2.glUniformMatrix4fv(mProj, 1, true, matrices.getProjectionMatrix().getValues(), 0);
gl2.glUniformMatrix4fv(mView, 1, true, matrices.getViewMatrix().getValues(), 0);
gl2.glUniformMatrix4fv(mModel, 1, true, matrices.getModelMatrix().getValues(), 0);
// Draw the square
gl2.glDrawElements(
GL2.GL_TRIANGLES, drawOrder.length,
GL2.GL_UNSIGNED_SHORT, drawListBuffer);
// Disable vertex array
gl2.glDisableVertexAttribArray(mPositionHandle);
gl2.glDisable(GL.GL_BLEND);
gl2.glUseProgram(0);
}
Vertex shader ..
#version 120
attribute vec4 vPosition;
uniform mat4 mProj;
uniform mat4 mView;
uniform mat4 mModel;
void main() {
gl_Position = mProj * mView * mModel * vPosition;
}
fragment shader ..
#version 120
uniform vec4 vColor;
void main() {
gl_FragColor = vColor;
}
Definition of Matrix4f ..
public class Matrix4f {
public float[] values;
public Matrix4f() {
this.values = new float[16];
}
/**
* #param values
*/
public Matrix4f(float[] values) {
this.values = values;
}
/**
* #param values
*/
public Matrix4f(float[][] values) {
load(values);
}
/**
* #param values
*/
public void load(float[][] values) {
this.values = new float[] {
values[0][0], values[0][2], values[0][3], values[0][4],
values[1][0], values[1][5], values[1][6], values[1][7],
values[2][0], values[2][8], values[2][9], values[2][10],
values[3][0], values[3][11], values[3][12], values[3][13]
};
}
/**
* Get the values of matrix
*
* #return values
*/
public float[] getValues() {
return this.values;
}
}
Matrix functions ..
public Matrix4f translate(Matrix4f matrix, Vec3 vector) {
Matrix4f transform = new Matrix4f(new float[][] {
new float[] { 1, 0, 0, vector.x },
new float[] { 0, 1, 0, vector.y },
new float[] { 0, 0, 1, vector.z },
new float[] { 0, 0, 0, 1 },
});
return multiply(matrix, transform);
}
public Matrix4f rotate(Matrix4f matrix, float angle, int x, int y, int z) {
Matrix4f transform = new Matrix4f();
float cos = (float) Math.cos(angle);
float sin = (float) Math.sin(angle);
if (z == 1) {
transform.load(new float[][] {
new float[] { cos, -sin, 0, 0 },
new float[] { sin, cos, 0, 0 },
new float[] { 0, 0, 1, 0 },
new float[] { 0, 0, 0, 1 },
});
}
//Add onto the matrix and return the result
return multiply(matrix, transform);
}
public Matrix4f add(Matrix4f matrixA, Matrix4f matrixB) {
Matrix4f matrix = new Matrix4f();
for (int a = 0; a < matrix.values.length; a++){
matrix.values[a] = matrixA.values[a] + matrixB.values[a];
}
return matrix;
}
public Matrix4f multiply(Matrix4f matrixA, Matrix4f matrixB) {
Matrix4f matrix = new Matrix4f(new float[][] {
new float[] {
(matrixA.values[0] * matrixB.values[0]) + (matrixA.values[1] * matrixB.values[4]) + (matrixA.values[2] * matrixB.values[8]) + (matrixA.values[3] * matrixB.values[12]),
(matrixA.values[0] * matrixB.values[1]) + (matrixA.values[1] * matrixB.values[5]) + (matrixA.values[2] * matrixB.values[9]) + (matrixA.values[3] * matrixB.values[13]),
(matrixA.values[0] * matrixB.values[2]) + (matrixA.values[1] * matrixB.values[6]) + (matrixA.values[2] * matrixB.values[10]) + (matrixA.values[3] * matrixB.values[14]),
(matrixA.values[0] * matrixB.values[3]) + (matrixA.values[1] * matrixB.values[7]) + (matrixA.values[2] * matrixB.values[11]) + (matrixA.values[3] * matrixB.values[15])
},
new float[] {
(matrixA.values[4] * matrixB.values[0]) + (matrixA.values[5] * matrixB.values[4]) + (matrixA.values[6] * matrixB.values[8]) + (matrixA.values[7] * matrixB.values[12]),
(matrixA.values[4] * matrixB.values[1]) + (matrixA.values[5] * matrixB.values[5]) + (matrixA.values[6] * matrixB.values[9]) + (matrixA.values[7] * matrixB.values[13]),
(matrixA.values[4] * matrixB.values[2]) + (matrixA.values[5] * matrixB.values[6]) + (matrixA.values[6] * matrixB.values[10]) + (matrixA.values[7] * matrixB.values[14]),
(matrixA.values[4] * matrixB.values[3]) + (matrixA.values[5] * matrixB.values[7]) + (matrixA.values[6] * matrixB.values[11]) + (matrixA.values[7] * matrixB.values[15])
},
new float[] {
(matrixA.values[8] * matrixB.values[0]) + (matrixA.values[9] * matrixB.values[4]) + (matrixA.values[10] * matrixB.values[8]) + (matrixA.values[11] * matrixB.values[12]),
(matrixA.values[8] * matrixB.values[1]) + (matrixA.values[9] * matrixB.values[5]) + (matrixA.values[10] * matrixB.values[9]) + (matrixA.values[11] * matrixB.values[13]),
(matrixA.values[8] * matrixB.values[2]) + (matrixA.values[9] * matrixB.values[6]) + (matrixA.values[10] * matrixB.values[10]) + (matrixA.values[11] * matrixB.values[14]),
(matrixA.values[8] * matrixB.values[3]) + (matrixA.values[9] * matrixB.values[7]) + (matrixA.values[10] * matrixB.values[11]) + (matrixA.values[11] * matrixB.values[15])
},
new float[] {
(matrixA.values[12] * matrixB.values[0]) + (matrixA.values[13] * matrixB.values[4]) + (matrixA.values[14] * matrixB.values[8]) + (matrixA.values[15] * matrixB.values[12]),
(matrixA.values[12] * matrixB.values[1]) + (matrixA.values[13] * matrixB.values[5]) + (matrixA.values[14] * matrixB.values[9]) + (matrixA.values[15] * matrixB.values[13]),
(matrixA.values[12] * matrixB.values[2]) + (matrixA.values[13] * matrixB.values[6]) + (matrixA.values[14] * matrixB.values[10]) + (matrixA.values[15] * matrixB.values[14]),
(matrixA.values[12] * matrixB.values[3]) + (matrixA.values[13] * matrixB.values[7]) + (matrixA.values[14] * matrixB.values[11]) + (matrixA.values[15] * matrixB.values[15])
}
});
return matrix;
}
EDIT:
I've set the uniforms to transpose my matrices, but the squares are still not centered. They should form a square around the screen, instead they show like this and also they dont seem to rotate correctly? ..
EDIT:
I've changed my rotate and translation functions to multiply the matrices, which fixed the rotated issue. My last issue is that I dont seem to be looking at the center of my scene or my objects are not drawn at the center of my field of view. The squares should form a box around the edge of the screen, with a diamond like shape at center of screen.
Is there something wrong with how I position my camera? ..
The View Matrix does not need to be transposed (it's in column-major order) whereas the Projection matrix is in row-major order and does need to be transposed into GL's column-major order.
You can use the appropriate transpose flag as mentioned in the other answers.
The reason you are getting these strange triangles is because the last row of the resulting MVP matrix is not the last row of the identity matrix and thus there is an unintended perspective distortion.
Also, I am not quire sure the view matrix is set up correctly, it should be as follows:
f = normalize(target - position);
s = normalize(f x u);
u = s x f;
_ _
| s.x s.y s.z (-s dot pos) |
| u.x u.y u.z (-s dot pos) |
| -f.x -f.y -f.z (f dot pos) |
| 0 0 0 1 |
_ -
(which needs to be transposed)
It is not clear from you question, but it seems that your Matrix4f is row-major. Generally, there is two ways how to store matrices: row-major and column-major, however there is one important issue:
Historically, IRIS GL used the row-vector convention, then OpenGL (which was based on IRIS GL) switched to column vectors in its specification (to make it match up better with standard mathematical practice) but at the same time switched storage layout from row-major to column-major to make sure that existing IRIS GL code didn’t break. That’s a somewhat unfortunate legacy, since C defaults to row-major storage, so you would normally expect a C library to use that too.
Taken from here
Lets have a look at your view matrix, you have the translation component in it's last row. Assuming that the matrix is row-major, you have build it in transposed way already. When you pass it to the shader with false in glUniformMatrix4fv, due to different layout you eventually get a correct matrix. So you do not need to transpose that matrix. However, you should be aware of different order of matrix multiplication, as far as they are transposed. Transposed matrices should be multiplied as follows (it does not apply for your case, because you multiply matrices in vertex shader):
See this for more details.
On the other side, your projection matrix needs to be transposed. More over, there are some issues with signs of elements, check this.
Your code should be as follows:
public Matrix4f ortho(float left, float right, float top, float bottom, float zfar, float znear) {
return new Matrix4f(new float[][] {
new float[] { 2 / (right - left), 0, 0, -(right + left) / (right - left) },
new float[] { 0, 2 / (top - bottom), 0, -(top + bottom) / (top - bottom) },
new float[] { 0, 0, -2 / (zfar - znear), -(zfar + znear) / (zfar - znear) },
new float[] { 0, 0, 0, 1 },
});
}
Try to pass the projection matrix with true in glUniformMatrix4fv:
gl2.glUniformMatrix4fv(mProj, 1, true, matrices.getProjectionMatrix().getValues(), 0);
I can only guess how your model matrix is created, so it would be better if you make it just identity for the first time.
view matrix is:
f = normalize(pos-target);
u = normalize(cross(up,f));
s = normalize(f,u);
|s.x s.y s.z dot(s,-pos)|
|u.x u.y u.z dot(u,-pos)|
|f.x f.y f.z dot(f,-pos)|
|0 0 0 1 |
in row-major format = {s.x,s.y,s.z,dot(s,-pos),u.x,u.y,u.z,dot(u,-pos),f.x,f.y,f.z,dot(f,-pos),0,0,0,1}
in column-major format = {s.x,u.x,f.x,0,s.y,u.y,f.y,0,s.z,u.z,f.z,0,dot(s,-pos),dot(u,-pos),dot(f,-pos),1}
projection(ortho) matrix:-
|2/(r-l) 0 0 -(r+l)/(r-l)|
|0 2/(t-b) 0 -(t+b)/(t-b) |
|0 0 -2/(f-n) -(f+n)/(f-n)|
|0 0 0 1 |
in row-major = {2/(r-l),0,0,-(r+l)/(r-l),0,2/(t-b),0,-(t+b)/(t-b),0,0,-2/(f-n),-(f+n)/(f-n),0,0,0,1}
in column major = {2/(r-l),0,0,0,0,2/(t-b),0,0,0,0,-2/(f-n),0,-(r+l)/(r-l),-(t+b)/(t-b),-(f+n)/(f-n),1}
use this for column major,
gl2.glUniformMatrix4fv(mProj, 1, false, matrices.getProjectionMatrix().getValues(), 0);
gl2.glUniformMatrix4fv(mView, 1, false, matrices.getViewMatrix().getValues(), 0);
gl2.glUniformMatrix4fv(mModel, 1, false, matrices.getModelMatrix().getValues(), 0);
and this for row-major,
gl2.glUniformMatrix4fv(mProj, 1, true, matrices.getProjectionMatrix().getValues(), 0);
gl2.glUniformMatrix4fv(mView, 1, true, matrices.getViewMatrix().getValues(), 0);
gl2.glUniformMatrix4fv(mModel, 1, true, matrices.getModelMatrix().getValues(), 0);
pick either row or column major matrices and stick to it.
if that does not work then try copying your vertex data to opengl yourself with glGenBuffer()/glBindBuffer()/glBufferData etc...
r
eturn new Matrix4f(
new float[] {
s.x, s.y, s.z, -Vec3.dot(s, position),
u.x, u.y, u.z, -Vec3.dot(s, position),
-f.x, -f.y, -f.z, Vec3.dot(f, position),
0.0f, 0.0f, 0.0f, 1.0f});
}
should be,
return new Matrix4f(
new float[] {
s.x, s.y, s.z, -Vec3.dot(s, position),
u.x, u.y, u.z, -Vec3.dot(u, position),
-f.x, -f.y, -f.z, Vec3.dot(f, position),
0.0f, 0.0f, 0.0f, 1.0f});
}
also check your vertex shader, you have attribute vec4 vPosition, but you are passing in vec3 data.
i think it should be attribute vec3 vPosition and do,
gl_Position = mProj * mView * mModel * vec4(vPosition,1);
EDIT:
your vertex buffer is:-
int COORDS_PER_VERTEX = 3;
int vertexStride = COORDS_PER_VERTEX * 4; // 4 bytes per vertex
which are vec3's.
EDIT 2.
can i see your customObjects data that draw uses such as position and bodyangle. could you also print out the proj, view and model matrices before they are passed to your shader program.
I'm encountering a problem trying to replicate the OpenGL behaviour in an ambient without OpenGL.
Basically I need to create an SVG file from a list of lines my program creates. These lines are created using an othigraphic projection.
I'm sure that these lines are calculated correctly because if I try to use them with a OpenGL context with orthographic projection and save the result into an image, the image is correct.
The problem raises when I use the exactly same lines without OpenGL.
I've replicated the OpenGL projection and view matrices and I process every line point like this:
3D_output_point = projection_matrix * view_matrix * 3D_input_point
and then I calculate it's screen (SVG file) position like this:
2D_point_x = (windowWidth / 2) * 3D_point_x + (windowWidth / 2)
2D_point_y = (windowHeight / 2) * 3D_point_y + (windowHeight / 2)
I calculate the othographic projection matrix like this:
float range = 700.0f;
float l, t, r, b, n, f;
l = -range;
r = range;
b = -range;
t = range;
n = -6000;
f = 8000;
matProj.SetValore(0, 0, 2.0f / (r - l));
matProj.SetValore(0, 1, 0.0f);
matProj.SetValore(0, 2, 0.0f);
matProj.SetValore(0, 3, 0.0f);
matProj.SetValore(1, 0, 0.0f);
matProj.SetValore(1, 1, 2.0f / (t - b));
matProj.SetValore(1, 2, 0.0f);
matProj.SetValore(1, 3, 0.0f);
matProj.SetValore(2, 0, 0.0f);
matProj.SetValore(2, 1, 0.0f);
matProj.SetValore(2, 2, (-1.0f) / (f - n));
matProj.SetValore(2, 3, 0.0f);
matProj.SetValore(3, 0, -(r + l) / (r - l));
matProj.SetValore(3, 1, -(t + b) / (t - b));
matProj.SetValore(3, 2, -n / (f - n));
matProj.SetValore(3, 3, 1.0f);
and the view matrix this way:
CVettore position, lookAt, up;
position.AssegnaCoordinate(rtRay->m_pCam->Vp.x, rtRay->m_pCam->Vp.y, rtRay->m_pCam->Vp.z);
lookAt.AssegnaCoordinate(rtRay->m_pCam->Lp.x, rtRay->m_pCam->Lp.y, rtRay->m_pCam->Lp.z);
up.AssegnaCoordinate(rtRay->m_pCam->Up.x, rtRay->m_pCam->Up.y, rtRay->m_pCam->Up.z);
up[0] = -up[0];
up[1] = -up[1];
up[2] = -up[2];
CVettore zAxis, xAxis, yAxis;
float length, result1, result2, result3;
// zAxis = normal(lookAt - position)
zAxis[0] = lookAt[0] - position[0];
zAxis[1] = lookAt[1] - position[1];
zAxis[2] = lookAt[2] - position[2];
length = sqrt((zAxis[0] * zAxis[0]) + (zAxis[1] * zAxis[1]) + (zAxis[2] * zAxis[2]));
zAxis[0] = zAxis[0] / length;
zAxis[1] = zAxis[1] / length;
zAxis[2] = zAxis[2] / length;
// xAxis = normal(cross(up, zAxis))
xAxis[0] = (up[1] * zAxis[2]) - (up[2] * zAxis[1]);
xAxis[1] = (up[2] * zAxis[0]) - (up[0] * zAxis[2]);
xAxis[2] = (up[0] * zAxis[1]) - (up[1] * zAxis[0]);
length = sqrt((xAxis[0] * xAxis[0]) + (xAxis[1] * xAxis[1]) + (xAxis[2] * xAxis[2]));
xAxis[0] = xAxis[0] / length;
xAxis[1] = xAxis[1] / length;
xAxis[2] = xAxis[2] / length;
// yAxis = cross(zAxis, xAxis)
yAxis[0] = (zAxis[1] * xAxis[2]) - (zAxis[2] * xAxis[1]);
yAxis[1] = (zAxis[2] * xAxis[0]) - (zAxis[0] * xAxis[2]);
yAxis[2] = (zAxis[0] * xAxis[1]) - (zAxis[1] * xAxis[0]);
// -dot(xAxis, position)
result1 = ((xAxis[0] * position[0]) + (xAxis[1] * position[1]) + (xAxis[2] * position[2])) * -1.0f;
// -dot(yaxis, eye)
result2 = ((yAxis[0] * position[0]) + (yAxis[1] * position[1]) + (yAxis[2] * position[2])) * -1.0f;
// -dot(zaxis, eye)
result3 = ((zAxis[0] * position[0]) + (zAxis[1] * position[1]) + (zAxis[2] * position[2])) * -1.0f;
// Set the computed values in the view matrix.
matView.SetValore(0, 0, xAxis[0]);
matView.SetValore(0, 1, yAxis[0]);
matView.SetValore(0, 2, zAxis[0]);
matView.SetValore(0, 3, 0.0f);
matView.SetValore(1, 0, xAxis[1]);
matView.SetValore(1, 1, yAxis[1]);
matView.SetValore(1, 2, zAxis[1]);
matView.SetValore(1, 3, 0.0f);
matView.SetValore(2, 0, xAxis[2]);
matView.SetValore(2, 1, yAxis[2]);
matView.SetValore(2, 2, zAxis[2]);
matView.SetValore(2, 3, 0.0f);
matView.SetValore(3, 0, result1);
matView.SetValore(3, 1, result2);
matView.SetValore(3, 2, result3);
matView.SetValore(3, 3, 1.0f);
The results I get from OpenGL and from the SVG output are quite different, but in two days I couldn't come up with a solution.
This is the OpenGL output
And this is my SVG output
As you can see, it's rotation isn't corrent.
Any idea why? The line points are the same and the matrices too, hopefully.
Pasing the matrices I was creating didn't work. I mean, the matrices were wrong, I think, because OpenGL didn't show anything.
So I tryed doing the opposite, I created the matrices in OpenGL and used them with my code. The result is better, but not perfect yet.
Now I think the I do something wrong mapping the 3D points into 2D screen points because the points I get are inverted in Y and I still have some lines not perfectly matching.
This is what I get using the OpenGL matrices and my previous approach to map 3D points to 2D screen space (this is the SVG, not OpenGL render):
Ok this is the content of the view matrix I get from OpenGL:
This is the projection matrix I get from OpenGL:
And this is the result I get with those matrices and by changing my 2D point Y coordinate calculation like bofjas said:
It looks like some rotations are missing. My camera has a rotation of 30° on both the X and Y axis, and it looks like they're not computed correctly.
Now I'm using the same matrices OpenGL does. So I think that I'm doing some wrong calculations when I map the 3D point into 2D screen coordinates.
Rather than debugging your own code, you can use transform feedback to compute the projections of your lines using the OpenGL pipeline. Rather than rasterizing them on the screen you can capture them in a memory buffer and save directly to the SVG afterwards. Setting this up is a bit involved and depends on the exact setup of your OpenGL codepath, but it might be a simpler solution.
As per your own code, it looks like you either mixed x and y coordinates somewhere, or row-major and column-major matrices.
I've solved this problem in a really simple way. Since when I draw using OpenGL it's working, I've just created the matrices in OpenGL and then retrieved them with glGet(). Using those matrices everything is ok.
You're looking for a specialized version of orthographic (oblique) projections called isometric projections. The math is really simple if you want to know what's inside the matrix. Have a look on Wikipedia
OpenGL loads matrices in column major(opposite of c++).for example this matrix:
[1 ,2 ,3 ,4 ,
5 ,6 ,7 ,8 ,
9 ,10,11,12,
13,14,15,16]
loads this way in memory:
|_1 _|
|_5 _|
|_9 _|
|_13_|
|_2 _|
.
.
.
so i suppose you should transpose those matrices from openGL(if you`re doing it row major)
I am trying to reconstruct position from depth texture in Vertex Shader. Usually, this is done in Pixel Shader, but for some reason I need it in VS to transform some geometry.
So my approach.
1) I calculate View Frustrum corners in View Space
I use this input NDC.Those values are transformed via Inverse(view * proj) to put them into World Space and then transformed via view matrix.
//GL - Left Handed - need to "swap" front and back Z coordinate
MyMath::Vector4 cornersVector4[] =
{
//front
MyMath::Vector4(-1, -1, 1, 1), //A
MyMath::Vector4( 1, -1, 1, 1), //B
MyMath::Vector4( 1, 1, 1, 1), //C
MyMath::Vector4(-1, 1, 1, 1), //D
//back
MyMath::Vector4(-1, -1, -1, 1), //E
MyMath::Vector4( 1, -1, -1, 1), //F
MyMath::Vector4( 1, 1, -1, 1), //G
MyMath::Vector4(-1, 1, -1, 1), //H
};
If I print debug output, it seems correct (camera pos is at dist zNear from near plane and far is far enough)
2) post values to shader
3) In shader I do this:
vec3 _cornerPos0 = cornerPos0.xyz * mat3(viewInv);
vec3 _cornerPos1 = cornerPos1.xyz * mat3(viewInv);
vec3 _cornerPos2 = cornerPos2.xyz * mat3(viewInv);
vec3 _cornerPos3 = cornerPos3.xyz * mat3(viewInv);
float x = (TEXCOORD1.x / 100.0); //TEXCOORD1.x = <0, 100>
float y = (TEXCOORD1.y / 100.0); //TEXCOORD1.y = <0, 100>
vec3 ray = mix(mix(_cornerPos0, _cornerPos1, x),
mix(_cornerPos2, _cornerPos3, x),
y);
float depth = texture2D(depthTexture, vec2(x, y));
//depth is created in draw pass before with depth = vertexViewPos.z / farClipPlane;
vec3 reconstructed_posWS = camPos + (depth * ray);
But if I do this nad translate my geometry from [0,0,0] to reconstructed_posWS, only part of screen is covered. What can be incorrect ?
PS: some calculations are useless (transform to space and after that transform back), but speed is not concern atm.