Related
My diffuse lighting doesn't seem to be working properly.
Fragment Shader:
#version 330 core
out vec4 gl_FragColor;
in vec4 vertexColor;
in vec2 texelCoord;
in vec3 Normal;
struct DirectionalLight
{
vec3 color;
float ambientIntensity;
vec3 direction;
float diffuseIntensity;
};
uniform sampler2D textureSampler;
uniform DirectionalLight directionalLight;
void main()
{
vec4 ambientColor = vec4(directionalLight.color, 1.0f) * directionalLight.ambientIntensity;
float diffuseFactor = max(dot(normalize(Normal), normalize(directionalLight.direction)), 0.0f);
vec4 diffuseColor = vec4(directionalLight.color, 1.0f) * directionalLight.diffuseIntensity * diffuseFactor;
gl_FragColor = texture(textureSampler, texelCoord) * (ambientColor + diffuseColor);
}
Vertex Shader:
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
layout (location = 2) in vec3 normal;
out vec4 vertexColor;
out vec2 texelCoord;
out vec3 Normal;
uniform mat4 transformation;
uniform mat4 projection;
uniform mat4 view;
void main()
{
gl_Position = projection * view * transformation * vec4( position, 1.0f );
vertexColor = vec4(clamp(position, 0.0f, 1.0f), 1.0f);
texelCoord = texCoord;
Normal = mat3(transpose(inverse(transformation))) * normal;
}
How I create Meshes:
void CalcAverageNormals( unsigned int* indices , unsigned int indicesCount , float* vertices , unsigned int verticesCount , unsigned int vertexLength , unsigned int normalOffset )
{
for ( int i = 0; i < indicesCount; i += 3 )
{
unsigned int v1 = indices[i] * vertexLength;
unsigned int v2 = indices[ i + 1 ] * vertexLength;
unsigned int v3 = indices[ i + 2 ] * vertexLength;
glm::vec3 line1( vertices[ v2 ] - vertices[ v1 ] , vertices[ v2 + 1 ] - vertices[ v1 + 1 ] , vertices[ v2 + 2 ] - vertices[ v1 + 2 ] );
glm::vec3 line2( vertices[ v3 ] - vertices[ v1 ] , vertices[ v3 + 1 ] - vertices[ v1 + 1 ] , vertices[ v3 + 2 ] - vertices[ v1 + 2 ] );
glm::vec3 normal = glm::normalize( glm::cross( line1 , line2 ) );
v1 += normalOffset;
v2 += normalOffset;
v3 += normalOffset;
vertices[ v1 ] += normal.x; vertices[ v1 + 1 ] += normal.y; vertices[ v1 + 2 ] += normal.z;
vertices[ v2 ] += normal.x; vertices[ v2 + 1 ] += normal.y; vertices[ v2 + 2 ] += normal.z;
vertices[ v3 ] += normal.x; vertices[ v3 + 1 ] += normal.y; vertices[ v3 + 2 ] += normal.z;
}
for ( int j = 0; j < verticesCount / vertexLength; j++ )
{
unsigned int offset = j * vertexLength + normalOffset;
glm::vec3 normalVertex( vertices[ offset ] , vertices[ offset + 1 ] , vertices[ offset + 2 ] );
normalVertex = glm::normalize( normalVertex );
vertices[ offset ] = normalVertex.x;
vertices[ offset + 1 ] = normalVertex.y;
vertices[ offset + 2 ] = normalVertex.z;
}
}
void CreateTriangle() {
float vertices[] {
-0.5f,-0.5f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, // Left
0.5f,-0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, // Right
0.0f, 0.5f, 0.0f, 0.5f, 1.0f, 0.0f, 0.0f, 0.0f, // Top
0.0f,-0.5f, 0.5f, 0.5f, 0.0f, 0.0f, 0.0f, 0.0f // Back Z
};
unsigned int indices[]{
0, 1, 2, // Front
3, 2, 1, // Right
3, 2, 0, // Left
3, 0, 1 // Bottom
};
CalcAverageNormals( indices , 12 , vertices , 32 , 8 , 5 );
for ( int i = 0; i < 1; i++ )
{
Mesh* obj = new Mesh();
obj->CreateMesh( vertices , 32 , indices , 12 );
meshlist.push_back( obj );
}
}
CreateMesh()
void Mesh::CreateMesh( float* vertices , unsigned int numVertices , unsigned int* indices , unsigned int numIndices )
{
uIndices = numIndices;
glGenVertexArrays( 1 , &vao );
glBindVertexArray( vao );
/*Create Buffers*/
glGenBuffers( 1 , &ibo );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER , ibo );
glBufferData( GL_ELEMENT_ARRAY_BUFFER , numIndices * sizeof(unsigned) , indices , GL_STATIC_DRAW );
glGenBuffers( 1 , &vbo );
glBindBuffer( GL_ARRAY_BUFFER , vbo );
glBufferData( GL_ARRAY_BUFFER , numVertices * sizeof(float) , vertices , GL_STATIC_DRAW );
glVertexAttribPointer( 0 , 3 , GL_FLOAT , GL_FALSE , sizeof( vertices[ 0 ] ) * 8 , 0 );
glEnableVertexAttribArray( 0 );
glVertexAttribPointer( 1 , 2 , GL_FLOAT , GL_FALSE , sizeof( vertices[ 0 ] ) * 8 , ( void* )( sizeof( vertices[ 0 ] ) * 3 ) );
glEnableVertexAttribArray( 1 );
glVertexAttribPointer( 2 , 3 , GL_FLOAT , GL_FALSE , sizeof( vertices[ 0 ] ) * 8 , ( void* )( sizeof( vertices[ 0 ] ) * 5 ) );
glEnableVertexAttribArray( 2 );
/*Unbind Objects*/
glBindBuffer( GL_ARRAY_BUFFER , 0 );
glBindVertexArray( 0 );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER , 0 );
}
I only get some sort of lighting if I rotate the mesh:
Normal ( No lighting):
I been trying to figure this out for a days but I am not sure what I did wrong. If you could help me that would be great.
It turns out it was my order of indices having problem with triangle winding. I fixed this by drawing the indices in counter clockwise order because of how my diffuse factor is calculated.
unsigned int indices[]{
0, 1, 2, // Front
3, 1, 2, // Right
3, 0, 2, // Left
3, 1, 0 // Bottom
};
The transpose and inverse calculation is to correct non-uniform scale matrices.
this:
Normal = mat3(transpose(inverse(transformation))) * normal;
looks suspicious the normal itself should be transformed into the same coordinate system as the light direction vector is in. As normal is a vector the w should be zero so I would expect either
Normal = view * transformation * vec4( normal, 0.0f );
if the light direction is in camera coordinates or:
Normal = transformation * vec4( normal, 0.0f );
if the light direction is in global world coordinates (which is more likely the case)...
Also setting shader output variable like gl_Position should be the last line of your shader or you risk GL implementation optimize out the rest of code after it on some implementations.
btw. IIRC mat3(transpose(inverse(transformation))) is the same as mat3(transformation) in case the transformation but as Rabbid76 pointed out it has its purpose...
One last thing I can think of is wrong normal direction (opposite or inconsistent) in such case I would try exchange:
max(dot(normalize(Normal), normalize(directionalLight.direction)), 0.0f);
with:
abs(dot(normalize(Normal), normalize(directionalLight.direction));
If it helps you have to check the normals or just negate the dot result...
For more info see:
OpenGL - vertex normals in OBJ ... however there I use w=1 as the matrix used has no offset so it does not matter
Understanding 4x4 homogenous transform matrices
I'm working on this shaders that I modified
but I wish to simply draw a line instead of this blur / bloom effect
I understood that is the Float d that is used as a modifier but how to get this simple line instead
I based my research on this shader
Will appreciate any help
Zoltan
#ifdef GL_ES
precision mediump float;
#endif
mat4 mat = mat4 (
vec4 ( Scale * SizeTpDwn , 0.0 , 0.0 , 0.0 ),
vec4 ( 0.0 , Scale * SizeLftRght , 0.0 , 0.0 ),
vec4 ( 0.0 , 0.0 , Scale , 0.0 ),
vec4 ( 0.0 , 0.0 , 0.0 , Scale ) );
vec2 pos;
vec4 linecol = vec4 (0.5 , 0.5 , 0.7 , 0.5);
vec4 col = vec4 ( 0.0, 0.0, 0.0, 1.0 );
void Line4 ( vec4 a, vec4 b );
void Line2 ( vec2 a, vec2 b );
void main( void ) {
pos = gl_FragCoord.xy / RENDERSIZE.xy;
pos -= .5;
//Line
Line4 ( vec4 ( LengthTX, MoveTX, .2 ,-.2), vec4 (LengthTX2, MoveTX2, .2, -.2 ) );
//Line4 ( vec4 ( MoveRX, LengthRY, .2 ,-.2 ),vec4 ( MoveRX2,LengthRY2, .2, -.2 ) );
//Line4 ( vec4 (MoveLX, LengthLY, .2 ,-.2 ),vec4 (MoveLX2,LengthLY2, .2, -.2 ) );
//Line4 ( vec4 ( LengthDX,MoveDX, .2 ,-.2), vec4 (LengthDX2,MoveDX2, .2, -.2 ) );
gl_FragColor = vec4( col.xyz, 1.0 );
}
void Line4 ( vec4 a, vec4 b )
{
a = mat * a;
//a.xyz /= 1.5 + a.w * 2.;
b = mat * b;
//b.xyz /= 1.5 + b.w * 2.;
Line2 ( a.xy , b.xy );
}
void Line2 ( vec2 a, vec2 b )
{
float dtc = (distance ( pos , a ) + distance ( pos , b ) - distance ( a , b )); //+ 1e-5);
//linecol = vec4 (0.5 , 0.5 , 0.7 , 0.5);
col += max ( 1. - pow ( dtc * 14. , 0.10 ) , -.10 );
}
What you have to do is to find the closest distance of the current fragment to the line. If this distance is smaller than the half line thickness, then the fragment is on the line.
To create a line with sharp edges, I recommend to use the step function, which returns 0.0, if a value is smaller than a reference value and 1.0 otherwise.
Th draw a line which is not endless, you have to check if the point on the endless line, which is closest to the current position, is in between the start and the end of the line:
void Line2 (vec2 L1, vec2 L2)
{
vec2 P = pos;
vec2 O = L1;
vec2 D = normalize(L2-L1);
float d = dot(P-O, D);
vec2 X = L1 + D * d;
float dtc;
if (d < 0.0)
dtc = distance(L1, P); // d < 0.0 -> X is "before" L1
else if (d > distance(L1, L2))
dtc = distance(L2, P); // d > distance(L1, L2) -> X is "after" L2
else
dtc = distance(pos, X);
col += 1.0 - step(0.01, dtc);
}
Preview
Explanation:
Lets assume, that the line is defined by a Point O and a Unit vector D with gives the direction of the line. Note the length of a unit vector is 1.
Further you have the point P and you want to find the closest point X on the line (O, D) to P.
First calculate a vector V from O to P:
V = P - O;
The distance d from O to the intersection point X can be calculated by the Dot product. Note, since D is a unit vector, the dot prduct of V and D is equal the cosine of the angle between the line (O, D) and the vector V, multiplied by the amount (length) of V:
d = dot(V, D);
The intersection point X, can be calculated by shifting the point O along the line (D) by the distance d:
X = O + D * d;
So the formula for the intersection point is:
O ... any point on the line
D ... unit vector which points in the direction of the line
P ... the "Point"
X = O + D * dot(P-O, D);
Note, if the line is defined by 2 points, L1 and L2 then the unit vector D can be calcualted as follows:
D = normalize(L2-L1);
I want to light a flat surface at vertex level from the direction of the camera. I expect the lighting to not change when I rotate the view but it is brightest when I look slightly away. Is mvp[3] not a camera coordinate like I think it is?
#version 450
in vec3 vertex;
uniform mat4 mvp;
out vec4 color;
void main()
{
gl_Position = mvp * vec4(vertex,1.);
vec3 n = vec3(0.,0.,1.);
vec3 v = normalize( vec3(mvp[3])-vertex );
//I tried the other direction in the mat4
//vec3(mvp[0][3],mvp[1][3],mvp[2][3]);
color = vec4( dot(v,n) );
}
Is 4th column in the model view projection matrix the viewing position?
No, it is not. The 4th column of the view matrix would contain the camera position, but the model view projection matrix is the combination of the model matrix, the view matrix and the projection matrix.
A view matrix usually looks like this:
mat4 view;
view[0] : ( X-axis.x, X-axis.y, X-axis.z, 0 )
view[1] : ( Y-axis.x, Y-axis.y, Y-axis.z, 0 )
view[2] : ( Z-axis.x, Z-axis.y, Z-axis.z, 0 )
view[3] : ( trans.x, trans.y, trans.z, 1 )
A perspective projection matrix may look like this:
r = right, l = left, b = bottom, t = top, n = near, f = far
mat4 projection;
projection[0] : 2*n/(r-l) 0 0 0
projection[1] : 0 2*n/(t-b) 0 0
projection[2] : (r+l)/(r-l) (t+b)/(t-b) -(f+n)/(f-n) -1
projection[3] : 0 0 -2*f*n/(f-n) 0
A matrix multiplication works like this:
mat4 matA;
mat4 matB;{
mat4 matC;
for ( int i0 = 0; i0 < 4; ++ i0 )
for ( int i1 = 0; i1 < 4; ++ i1 )
matC[i0][i1] = matB[i0][0] * matA[0][i1] + matB[i0][1] * matA[1][i1] + matB[i0][2] * matA[2][i1] + matB[i0][3] * matA[3][i1];
This follows, that the 4th column of the view projection matrix contains the following:
mv[3][0] = trans.x * 2*n/(r-l) + trans.z * (r+l)/(r-l);
mv[3][1] = trans.y * 2*n/(t-b) + trans.z * (t+b)/(t-b);
mv[3][2] = -trans.z * (f+n)/(f-n) - 2*f*n/(f-n);
mv[3][3] = -trans.z;
I am developing a new video game and I've been blocked for about 5 weeks on skeletal animation. I believe I've narrowed down the problem, but can't figure out what I'm actually doing wrong.
I have a simple 12-vertex rectangular object with four bones inside. This image shows what the object looks like in its bind pose, and what the object should like with the top bone rotated ~90 degrees about the Y-axis. To test bone weights in my application, this is the simple example I'm using. In my application, I programatically turn the top bone ~90 degrees and have the shader render it.
Unfortunately, my application does not produce the same result. The bind pose displays properly, but when the top bone transform is applied, the transform is exaggerated and the top part of the rectangle simply stretches in the direction I rotate the top bone.
I have verified the following:
Bones are sent to the shader uniform as relative transforms. This means that when i rotate the top bone by 90, bones 1-3 are all identity, and bone 4 is a matrix that only rotates ~90 degrees along the Y-axis.
Bones are weighted properly for any given vertex (or at least, they are weighted identically in my application to what Blender has reported them as).
So I've reduced my problem to this single sanity check. Referring to the first screenshot above, I've chosen one vertex to transform using my bone method. One little vertex: -0.5, -0.5, 4.0. Assuming I apply everything properly, the bones should transform this vertex to -0.95638, -0.5, 2.63086. To make debugging easier, I've taken my vertex shader...
#version 330 core
layout (location = 0) in vec3 position; // The position variable has attribute position 0
layout (location = 1) in vec3 normal; // This is currently unused
layout (location = 2) in vec2 texture;
layout (location = 3) in ivec4 boneIDs;
layout (location = 4) in vec4 boneWeights;
out vec2 fragTexture;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform mat4 bones[ 16 ];
void main()
{
mat4 boneTransform =
( bones[ boneIDs[ 0 ] ] * boneWeights[ 0 ] ) +
( bones[ boneIDs[ 1 ] ] * boneWeights[ 1 ] ) +
( bones[ boneIDs[ 2 ] ] * boneWeights[ 2 ] ) +
( bones[ boneIDs[ 3 ] ] * boneWeights[ 3 ] );
mat4 mvp = projection * view * model;
gl_Position = mvp * boneTransform * vec4( position, 1.0f );
fragTexture = texture;
}
...and put it into this simple unit test-style function below, made just to transform my test vertex.
glm::mat4 id( 1.0f ); // ID 0
glm::mat4 bone( 1.0f ); // ID 1
glm::mat4 bone002( 1.0f ); // ID 2
glm::mat4 bone003( 1.0f ); // ID 3
// Keyframe is set to rotate bone003 -89.113 degrees along Y
bone003 *= glm::toMat4( glm::angleAxis( (float)glm::radians( -89.113 ), glm::vec3( 0.0f, 1.0f, 0.0f ) ) );
glm::mat4 xform =
( bone002 * 0.087f ) +
( bone003 * 0.911f ) +
( id * 0 ) +
( id * 0 );
glm::vec4 point = xform * glm::vec4( glm::vec3( -0.5f, -0.5f, 4.0f ), 1.0f );
This code simulates the state of my vertex shader, where the four mat4s above are bones[0] through bones[3]. bone003 is what is sent to my shader after transforming Bone.003 and removing its inverse bind. Despite being exactly in line with my current understanding of skeletal animation, and matching all relevant weights/values from Blender, the vertex (-0.5, -0.5, 4.0) is transformed to the nonsense value of (-3.694115, -0.499000, -0.051035). The math is right, the values match up, but the answer is all wrong.
So, here is where I come to my actual question: What am I doing wrong in transforming my mesh vertices by influence of bone transforms? Where is my understanding of skeletal animation incorrect here?
This seems wrong to me:
mat4 boneTransform =
( bones[ boneIDs[ 0 ] ] * boneWeights[ 0 ] ) +
( bones[ boneIDs[ 1 ] ] * boneWeights[ 1 ] ) +
( bones[ boneIDs[ 2 ] ] * boneWeights[ 2 ] ) +
( bones[ boneIDs[ 3 ] ] * boneWeights[ 3 ] );
You should multiply the vertex (in bone space) with every bone matrix, and add the resulting vectors together taking care of weights, like so:
vec4 temp = vec4(0.0f, 0.0f, 0.0f, 0.0f);
vec4 v = vec4(position, 1.0f);
temp += (bones[boneIDs[0]] * v) * boneWeights[0];
temp += (bones[boneIDs[1]] * v) * boneWeights[1];
temp += (bones[boneIDs[2]] * v) * boneWeights[2];
temp += (bones[boneIDs[3]] * v) * boneWeights[3];
// temp is now the vector in local space that you
// transform with MVP to clip space, or whatever
Let me know if this works!
EDIT: I guess not then. Alright:
the vertex (-0.5, -0.5, 4.0) is transformed to the nonsense value of (-3.694115, -0.499000, -0.051035)
Is that really nonsense? Rotating clockwise ~90 degrees around the Y-axis gives about that value if I just eye-ball it. Your test is "correct". At this point I'm starting to think that there's a problem with the bone hierarchy, or a problem with the interpolation of the keyframes.
I am currently working on some raycasting in GLSL which works fine. Anyways I want to go from orthogonal projection to perspective projection now but I am not sure how to properly do so.
Are there any good links on how to use a projection Matrix with raycasting?
I am not even sure what I have to apply the matrix to (propably to the ray direction somehow?). Right now I do it like this (pseudocode):
vec3 rayDir = (0.0, 0.0, -1.0); //down the negative -z axis in parallel;
but now I would like to use a projMatrix which works similar to gluPerspective function so that I can simply define an aspect ratio, fov and near and far plane.
So basically, can anybody provide me a chunk of code to set up a proj matrix similar to gluProjection does?
And secondly tell me if it is correct to multiply it with the rayDirection?
For raytracing in the same scene as a standard render, I have found that the following works for getting a scene-space ray from screen coordinates: (e.g. render a full-screen quad from [-1,-1] to [1,1], or some sub-area within that range)
Vertex Shader
uniform mat4 invprojview;
uniform float near;
uniform float far;
attribute vec2 pos; // from [-1,-1] to [1,1]
varying lowp vec3 origin;
varying lowp vec3 ray;
void main() {
gl_Position = vec4(pos, 0.0, 1.0);
origin = (invprojview * vec4(pos, -1.0, 1.0) * near).xyz;
ray = (invprojview * vec4(pos * (far - near), far + near, far - near)).xyz;
// equivalent calculation:
// ray = (invprojview * (vec4(pos, 1.0, 1.0) * far - vec4(pos, -1.0, 1.0) * near)).xyz
}
Fragment Shader
varying lowp vec3 origin;
varying lowp vec3 ray;
void main() {
lowp vec3 rayDir = normalize(ray);
// Do raytracing from origin in direction rayDir
}
Note that you need to provide the inverted projection-view matrix, as well as the near and far clipping distances. I'm sure there's a way to get those clipping distances from the matrix, but I haven't figured out how.
This will define a ray which starts at the near plane, not the camera's position. This gives the advantage of clipping at the same position that OpenGL will clip triangles, making your ray-traced object match the scene. Since the ray variable will be the correct length to reach the far plane, you can also clip there too.
As for getting a perspective matrix in the first place (and understanding the mathematics behind it), I always use this reference page:
http://www.songho.ca/opengl/gl_projectionmatrix.html
I recommend looking through the derivation on that site, but in case it becomes unavailable here is the final projection matrix definition:
2n/(r-l) 0 (r+l)/(r-l) 0
0 2n/(t-b) (t+b)/(t-b) 0
0 0 -(f+n)/(f-n) -2fn/(f-n)
0 0 -1 0
To shoot rays out into the scene, you want to start by putting yourself (mentally) into the world after the projection matrix has been applied. This means that the view frustrum is now a 2x2x1 box - this is known as the canonical view volume. (The opposing corners of the box are (-1, -1, 0) and (1, 1, -1).) The rays you generate will (in the post-projection transformed world) start at the origin and hit the rear clipping plane (located at z=-1). The "destination" of your first ray should be (-1, 1, -1) - the upper-left-hand corner of the far clipping plane. (Subsequent rays "destinations" are calculated based on the resolution of your viewport.)
Now that you have this ray in the canonical view volume, you need to get it into standard world coordinates. How do you do this? Simple - just multiply by the inverse of the projection matrix, often called the viewing transformation. This will put your rays into the same coordinate system as the objects in your scene, making ray collision testing nice and easy.
At Perspective Projection the projection matrix describes the mapping from 3D points in the world as they are seen from of a pinhole camera, to 2D points of the viewport. The eye space coordinates in the camera frustum (a truncated pyramid) are mapped to a cube (the normalized device coordinates).
The Perspective Projection Matrix looks like this:
r = right, l = left, b = bottom, t = top, n = near, f = far
2*n/(r-l) 0 0 0
0 2*n/(t-b) 0 0
(r+l)/(r-l) (t+b)/(t-b) -(f+n)/(f-n) -1
0 0 -2*f*n/(f-n) 0
wher :
r = w / h
t = tan( fov_y / 2 );
2 * n / (r-l) = 1 / (t * a)
2 * n / (t-b) = 1 / t
If the projection is symmetric, where the line of sight is in the center of the view port and the field of view is not displaced, then the matrix can be simplified:
1/(t*a) 0 0 0
0 1/t 0 0
0 0 -(f+n)/(f-n) -1
0 0 -2*f*n/(f-n) 0
The following function will calculate the same projection matrix as gluPerspective does:
#include <array>
const float cPI = 3.14159265f;
float ToRad( float deg ) { return deg * cPI / 180.0f; }
using TVec4 = std::array< float, 4 >;
using TMat44 = std::array< TVec4, 4 >;
TMat44 Perspective( float fov_y, float aspect )
{
float fn = far + near
float f_n = far - near;
float r = aspect;
float t = 1.0f / tan( ToRad( fov_y ) / 2.0f );
return TMat44{
TVec4{ t / r, 0.0f, 0.0f, 0.0f },
TVec4{ 0.0f, t, 0.0f, 0.0f },
TVec4{ 0.0f, 0.0f, -fn / f_n, -1.0f },
TVec4{ 0.0f, 0.0f, -2.0f*far*near / f_n, 0.0f }
};
}
See further:
Perspective projection and view matrix: Both depth buffer and triangle face orientation are reversed in OpenGL
How to render depth linearly in modern OpenGL with gl_FragCoord.z in fragment shader?
WebGL example:
<script type="text/javascript">
camera_vert =
"precision mediump float; \n" +
"attribute vec3 inPos; \n" +
"attribute vec3 inCol; \n" +
"varying vec3 vertCol;" +
"uniform mat4 u_projectionMat44;" +
"uniform mat4 u_viewMat44;" +
"uniform mat4 u_modelMat44;" +
"void main()" +
"{" +
" vertCol = inCol;" +
" vec4 modolPos = u_modelMat44 * vec4( inPos, 1.0 );" +
" vec4 viewPos = u_viewMat44 * modolPos;" +
" gl_Position = u_projectionMat44 * viewPos;" +
"}";
camera_frag =
"precision mediump float; \n" +
"varying vec3 vertCol;" +
"void main()" +
"{" +
" gl_FragColor = vec4( vertCol, 1.0 );" +
"}";
glArrayType = typeof Float32Array !="undefined" ? Float32Array : ( typeof WebGLFloatArray != "undefined" ? WebGLFloatArray : Array );
function IdentityMat44() {
var a=new glArrayType(16);
a[0]=1;a[1]=0;a[2]=0;a[3]=0;a[4]=0;a[5]=1;a[6]=0;a[7]=0;a[8]=0;a[9]=0;a[10]=1;a[11]=0;a[12]=0;a[13]=0;a[14]=0;a[15]=1;
return a;
};
function Cross( a, b ) { return [ a[1] * b[2] - a[2] * b[1], a[2] * b[0] - a[0] * b[2], a[0] * b[1] - a[1] * b[0], 0.0 ]; }
function Dot( a, b ) { return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]; }
function Normalize( v ) {
var len = Math.sqrt( v[0] * v[0] + v[1] * v[1] + v[2] * v[2] );
return [ v[0] / len, v[1] / len, v[2] / len ];
}
var Camera = {};
Camera.create = function() {
this.pos = [0, 8, 0.5];
this.target = [0, 0, 0];
this.up = [0, 0, 1];
this.fov_y = 90;
this.vp = [800, 600];
this.near = 0.5;
this.far = 100.0;
}
Camera.Perspective = function() {
var fn = this.far + this.near;
var f_n = this.far - this.near;
var r = this.vp[0] / this.vp[1];
var t = 1 / Math.tan( Math.PI * this.fov_y / 360 );
var m = IdentityMat44();
m[0] = t/r; m[1] = 0; m[2] = 0; m[3] = 0;
m[4] = 0; m[5] = t; m[6] = 0; m[7] = 0;
m[8] = 0; m[9] = 0; m[10] = -fn / f_n; m[11] = -1;
m[12] = 0; m[13] = 0; m[14] = -2 * this.far * this.near / f_n; m[15] = 0;
return m;
}
function ToVP( v ) { return [ v[1], v[2], -v[0] ] }
Camera.LookAt = function() {
var p = ToVP( this.pos ); t = ToVP( this.target ); u = ToVP( this.up );
var mx = Normalize( [ t[0]-p[0], t[1]-p[1], t[2]-p[2] ] );
var my = Normalize( Cross( u, mx ) );
var mz = Normalize( Cross( mx, my ) );
var eyeInv = [ -this.pos[0], -this.pos[1], -this.pos[2] ];
var tx = Dot( eyeInv, [mx[0], my[0], mz[0]] );
var ty = Dot( eyeInv, [mx[1], my[1], mz[1]] );
var tz = Dot( eyeInv, [mx[2], my[2], mz[2]] );
var m = IdentityMat44();
m[0] = mx[0]; m[1] = mx[1]; m[2] = mx[2]; m[3] = 0;
m[4] = my[0]; m[5] = my[1]; m[6] = my[2]; m[7] = 0;
m[8] = mz[0]; m[9] = mz[1]; m[10] = mz[2]; m[11] = 0;
m[12] = tx; m[13] = ty; m[14] = tz; m[15] = 1;
return m;
}
// shader program object
var ShaderProgram = {};
ShaderProgram.Create = function( shaderList, uniformNames ) {
var shaderObjs = [];
for ( var i_sh = 0; i_sh < shaderList.length; ++ i_sh ) {
var shderObj = this.CompileShader( shaderList[i_sh].source, shaderList[i_sh].stage );
if ( shderObj == 0 )
return 0;
shaderObjs.push( shderObj );
}
if ( !this.LinkProgram( shaderObjs ) )
return 0;
this.unifomLocation = {};
for ( var i_n = 0; i_n < uniformNames.length; ++ i_n ) {
var name = uniformNames[i_n];
this.unifomLocation[name] = gl.getUniformLocation( this.prog, name );
}
return this.prog;
}
ShaderProgram.Use = function() { gl.useProgram( this.prog ); }
ShaderProgram.SetUniformMat44 = function( name, mat ) { gl.uniformMatrix4fv( this.unifomLocation[name], false, mat ); }
ShaderProgram.CompileShader = function( source, shaderStage ) {
var shaderObj = gl.createShader( shaderStage );
gl.shaderSource( shaderObj, source );
gl.compileShader( shaderObj );
return gl.getShaderParameter( shaderObj, gl.COMPILE_STATUS ) ? shaderObj : 0;
}
ShaderProgram.LinkProgram = function( shaderObjs ) {
this.prog = gl.createProgram();
for ( var i_sh = 0; i_sh < shaderObjs.length; ++ i_sh )
gl.attachShader( this.prog, shaderObjs[i_sh] );
gl.linkProgram( this.prog );
return gl.getProgramParameter( this.prog, gl.LINK_STATUS ) ? true : false;
}
function drawScene(){
var canvas = document.getElementById( "camera-canvas" );
Camera.create();
Camera.vp = [canvas.width, canvas.height];
var currentTime = Date.now();
var deltaMS = currentTime - startTime;
Camera.pos = EllipticalPosition( 7, 4, CalcAng( currentTime, 10.0 ) );
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.enable( gl.DEPTH_TEST );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
ShaderProgram.Use();
ShaderProgram.SetUniformMat44( "u_projectionMat44", Camera.Perspective() );
ShaderProgram.SetUniformMat44( "u_viewMat44", Camera.LookAt() );
ShaderProgram.SetUniformMat44( "u_modelMat44", IdentityMat44() );
gl.enableVertexAttribArray( prog.inPos );
gl.bindBuffer( gl.ARRAY_BUFFER, buf.pos );
gl.vertexAttribPointer( prog.inPos, 3, gl.FLOAT, false, 0, 0 );
gl.enableVertexAttribArray( prog.inCol );
gl.bindBuffer( gl.ARRAY_BUFFER, buf.col );
gl.vertexAttribPointer( prog.inCol, 3, gl.FLOAT, false, 0, 0 );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, buf.inx );
gl.drawElements( gl.TRIANGLES, 12, gl.UNSIGNED_SHORT, 0 );
gl.disableVertexAttribArray( buf.pos );
gl.disableVertexAttribArray( buf.col );
}
var startTime;
function Fract( val ) {
return val - Math.trunc( val );
}
function CalcAng( currentTime, intervall ) {
return Fract( (currentTime - startTime) / (1000*intervall) ) * 2.0 * Math.PI;
}
function CalcMove( currentTime, intervall, range ) {
var pos = self.Fract( (currentTime - startTime) / (1000*intervall) ) * 2.0
var pos = pos < 1.0 ? pos : (2.0-pos)
return range[0] + (range[1] - range[0]) * pos;
}
function EllipticalPosition( a, b, angRag ) {
var a_b = a * a - b * b
var ea = (a_b <= 0) ? 0 : Math.sqrt( a_b );
var eb = (a_b >= 0) ? 0 : Math.sqrt( -a_b );
return [ a * Math.sin( angRag ) - ea, b * Math.cos( angRag ) - eb, 0 ];
}
var gl;
var prog;
var buf = {};
function cameraStart() {
var canvas = document.getElementById( "camera-canvas");
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return;
prog = ShaderProgram.Create(
[ { source : camera_vert, stage : gl.VERTEX_SHADER },
{ source : camera_frag, stage : gl.FRAGMENT_SHADER }
],
[ "u_projectionMat44", "u_viewMat44", "u_modelMat44"] );
prog.inPos = gl.getAttribLocation( prog, "inPos" );
prog.inCol = gl.getAttribLocation( prog, "inCol" );
if ( prog == 0 )
return;
var sin120 = 0.8660254
var pos = [ 0.0, 0.0, 1.0, 0.0, -sin120, -0.5, sin120 * sin120, 0.5 * sin120, -0.5, -sin120 * sin120, 0.5 * sin120, -0.5 ];
var col = [ 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0 ];
var inx = [ 0, 1, 2, 0, 2, 3, 0, 3, 1, 1, 3, 2 ];
buf.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, buf.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( pos ), gl.STATIC_DRAW );
buf.col = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, buf.col );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( col ), gl.STATIC_DRAW );
buf.inx = gl.createBuffer();
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, buf.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( inx ), gl.STATIC_DRAW );
startTime = Date.now();
setInterval(drawScene, 50);
}
</script>
<body onload="cameraStart();">
<canvas id="camera-canvas" style="border: none;" width="512" height="256"></canvas>
</body>
don't try to modify your rays. Instead do this:
a) create matrix using the location/rotation of your camera.
b) invert the matrix
c) apply it to all the models in the scene
d) render it using your normal methods.
This is actually the way OpenGL does it as well. Rotating the camera to the right is the same as rotating the world to the left.
I answer this after arriving here from a Google search.
The existing answers seem to miss the lack of understanding in the original question.
The idea of needing to apply projection matrix when raycasting is nonsense
We create orthogonal raycasts by starting from the view plane and raytracing the same direction for each pixel. the origin of the ray changes per pixel
We create perspective raycasts by starting at the eye position, behind the view plane and raytracing a unique direction for each pixel. i.e. the origin of the ray is fixed and the same for every pixel.
Understand that the projection matrices themselves, and the process they are usually involved in is derived from raycasting. The perspective matrix encodes a raycast of the kind I described.
Projecting a point on the screen is casting a ray from the eye/view plane to the point and finding the intersection with the view plane...