I'm using OpenGL ES.
And have two types of calculation "dir" vector, which code is fastest?
attribute vec2 order;
code1:
if( abs(sinA) < 0.2 ) {
if(order.x == 1.0){
dir = sNormalPrev;
} else {
dir = sNormalNext;
}
} else {
dir *= order.x / sinA;
}
code 2:
float k = step(0.2, abs(sinA));
dir = k * dir * order.x / sinA - (k-1.0) * (step(1.0, order.x + 1.0) * sNormalPrev + step(1.0, -order.x + 1.0) * sNormalNext);
Writing a test I don't see much of a difference
var iterationsPerTiming = 40;
var gl = document.createElement("canvas").getContext("webgl");
gl.canvas.width = 1;
gl.canvas.height = 1;
var programInfo1 = twgl.createProgramInfo(gl, ["vs1", "fs"])
var programInfo2 = twgl.createProgramInfo(gl, ["vs2", "fs"]);
var count = new Float32Array(1000000);
for (var i = 0; i < count.length; ++i) {
count[i] = i % 3 / 2;
}
var arrays = {
vertexId: {
data: count, numComponents: 1,
},
};
var bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
iterateTest(programInfo1, 10) // prime this path
.then(function() { return iterateTest(programInfo2, 10)}) // prime this path
.then(function() { return iterateTest(programInfo1, 20)})
.then(log)
.then(function() { return iterateTest(programInfo2, 20)})
.then(log);
function iterateTest(programInfo, times) {
return new Promise(function(resolve, reject) {
var timings = [];
var totalTime = 0;
function runNextIteration() {
if (times) {
--times;
timings.push(test(programInfo, iterationsPerTiming));
setTimeout(runNextIteration, 1);
} else {
var totalTime = 0;
var msgs = timings.map(function(timing, ndx) {
totalTime += timing;
return "" + ndx + ": " + timing.toFixed(3);
});
msgs.push("average timing: " + (totalTime / timings.length).toFixed(3));
resolve(msgs.join("\n"));
}
}
runNextIteration();
});
}
function test(programInfo, iterations) {
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
var startTime = performance.now();
for (var i = 0; i < iterations; ++i) {
twgl.drawBufferInfo(gl, gl.TRIANGLES, bufferInfo, count.length);
}
// this effectively does a gl.finish. It's not useful for real timing
// beacuse it stalls the pipeline but it should be useful for
// comparing times since the stalling would be included in both
var temp = new Uint8Array(4);
gl.readPixels(0, 0, 1, 1, gl.RGBA, gl.UNSIGNED_BYTE, temp);
return performance.now() - startTime;
}
function log(msg) {
var div = document.createElement("pre");
div.appendChild(document.createTextNode(msg));
document.body.appendChild(div);
return Promise.resolve();
}
html, body { font-family: monospace; }
<script src="https://twgljs.org/dist/twgl.min.js"></script>
<script id="vs1" type="notjs">
attribute float vertexId;
void main() {
vec2 order = vec2(vertexId, 0);
float sinA = vertexId;
vec3 dir = vec3(0);
vec3 sNormalPrev = vec3(1);
vec3 sNormalNext = vec3(-1);
if( abs(sinA) < 0.2 ) {
if(order.x == 1.0){
dir = sNormalPrev;
} else {
dir = sNormalNext;
}
} else {
dir *= order.x / sinA;
}
gl_Position = vec4(dir, 1.0); // have to use dir
gl_PointSize = 1.0;
}
</script>
<script id="vs2" type="notjs">
attribute float vertexId;
void main() {
vec2 order = vec2(vertexId, 0);
float sinA = vertexId;
vec3 dir = vec3(0);
vec3 sNormalPrev = vec3(1);
vec3 sNormalNext = vec3(-1);
float k = step(0.2, abs(sinA));
dir = k * dir * order.x / sinA - (k-1.0) * (step(1.0, order.x + 1.0) * sNormalPrev + step(1.0, -order.x + 1.0) * sNormalNext);
gl_Position = vec4(dir, 1.0); // have to use dir
gl_PointSize = 1.0;
}
</script>
<script id="fs" type="notjs">
precision mediump float;
void main() {
gl_FragColor = vec4(1);
}
</script>
Maybe my test is bad. Tested on an early 2015 macbook pro and an iPhone6s+
GPU cores are mostly wide SIMD units and they handle if-statements via masking. Depending on the GPU architecture the shader compiler converts control statements to masking operations pretty much the same way you did with your code.
On PCs the GPU driver has enough processing power to properly optimize shaders, so your optimization makes no difference. According to this blog post from 2010 your optimization would make sense on mobile platforms. I assume that this isn't more the case with todays modern smartphones as they have enough processing power to properly optimize shaders and also the driver matured over time.
You can also try out the tool GLSL optimizer that is also mentioned in the blog post from earlier. Also some GPU vendors provide tools for profiling shaders.
Related
So i'm making a raytracer in OpenGL, fully shader based, and i'm struggling to know where the problem is with my Shadow rays. If i multiply the radiance of the object by the shadowRay output, it seems like only the "edge" of the sphere is lighten up
I verified multiple times the code without finding where the problem comes from.
This is what i get:
vec3 TraceShadowRay(vec3 hitPoint, vec3 normal, Object objects[3])
{
Light pointLight;
pointLight.position = vec3(0, 80, 0);
pointLight.intensity = 2;
Ray ShadowRay;
ShadowRay.origin = hitPoint + normal * 1e-4;
ShadowRay.dir = normalize(pointLight.position - ShadowRay.origin);
ShadowRay.t = 100000;
//ShadowRay.dir = vec3(0, 1, 0);
for(int i = 0; i < 3; ++i)
{
if(objects[i].type == 0)
{
if(interectSphere(objects[i].position, objects[i].radius, ShadowRay))
{
return vec3(0);
}
}
if(objects[i].type == 1)
{
if(intersectPlane(objects[i].normal, objects[i].position, ShadowRay))
{
return vec3(0);
}
}
}
float AngleNormalShadow = dot(ShadowRay.dir, normal);
clamp(AngleNormalShadow, 0, 1);
return GetLight(ShadowRay.origin, pointLight);// * AngleNormalShadow;
}
The getLight function:
vec3 GetLight(vec3 origin, Light light)
{
return vec3(1, 1, 1) * light.intensity;
//float dist = sqrt( ((origin.x - light.position.x) * (origin.x - light.position.x)) + ((origin.y - light.position.y) * (origin.y - light.position.y)));
//return (vec3(1, 1, 1) * light.intensity) / (4 * M_PI * ((origin - light.position).length * (origin - light.position).length));
}
The intersectSphere function:
bool interectSphere(const vec3 center, float radius, inout Ray r)
{
vec3 o = r.origin;
vec3 d = r.dir;
vec3 v = o - center;
float b = 2 * dot(v, d);
float c = dot(v, v) - radius*radius;
float delta = b*b - 4 * c;
if(delta < 1e-4)
return false;
float t1 = (-b - sqrt(delta))/2;
float t2 = (-b + sqrt(delta))/2;
if(t1 < t2)
{
r.t = t1;
r.t2 = t2;
}
else if(t2 < t1)
{
r.t = t2;
r.t2 = t1;
}
r.reflectionNormal = normalize((r.origin + r.dir * r.t) - center);
return true;
}
The result expected is a nice shaded sphere with light coming from the top of the spheres
Could it be a missing negation? Looks like interectSphere() returns true when there is a collision, but the calling code in TraceShadowRay() bails out when it returns true.
old:
if(interectSphere(objects[i].position, objects[i].radius, ShadowRay))
{
return vec3(0);
}
new:
if(!interectSphere(objects[i].position, objects[i].radius, ShadowRay))
{
return vec3(0);
}
I'm trying to implement skeletal animation using Assimp.net and OpenTK and have been following this tutorial but I cannot get it to work.
The model appears fine with identity matrices but is terribly garbled when using the transforms I generate from Assimp.
I suspect the issue is the way I am combining all of the matrices or that there is a difference in OpenTK that I am not realising. I have making similar adjustments from the tutorial as suggested here: Matrix calculations for gpu skinning
but it is still garbled, just differently, I have also tried converting all Assimp matrices to OpenTK matrices before performing any multiplication. These are the areas of the code related to the matrices, I can provide more if needed:
Matrix Conversion
public static OpenTK.Matrix4 TKMatrix(Assimp.Matrix4x4 input)
{
return new OpenTK.Matrix4(input.A1, input.B1, input.C1, input.D1,
input.A2, input.B2, input.C2, input.D2,
input.A3, input.B3, input.C3, input.D3,
input.A4, input.B4, input.C4, input.D4);
}
Storing the GLobal Inverse
public class LoaderMesh
{
public Scene mScene;
public Mesh mMesh;
public OpenTK.Matrix4 GlobalInverseTransform { get; set; }
public LoaderMesh(Scene aiScene, Mesh aiMesh)
{
mScene = aiScene;
mMesh = aiMesh;
GlobalInverseTransform = Util.TKMatrix(mScene.RootNode.Transform);
GlobalInverseTransform.Invert();
}
Loading the bones
public void LoadBones(List<VBO.Vtx_BoneWeight.Vtx> boneData)
{
for (uint iBone = 0; iBone < mMesh.BoneCount; ++iBone)
{
uint boneIndex = 0;
String bonename = mMesh.Bones[iBone].Name;
if (!BoneMapping.ContainsKey(bonename))
{
boneIndex = (uint)NumBones;
NumBones++;
BoneInfo bi = new BoneInfo();
BoneInfos.Add(bi);
}
else
{
boneIndex = BoneMapping[bonename];
}
BoneMapping[bonename] = boneIndex;
BoneInfos[(int)boneIndex].OffsetMatrix = Util.TKMatrix(mMesh.Bones[iBone].OffsetMatrix);
for (uint iWeight = 0; iWeight < mMesh.Bones[iBone].VertexWeightCount; iWeight++)
{
uint VertexID = /*m_Entries[MeshIndex].BaseVertex*/ mMesh.Bones[iBone].VertexWeights[iWeight].VertexID;
float Weight = mMesh.Bones[iBone].VertexWeights[iWeight].Weight;
VBO.Vtx_BoneWeight.Vtx vtx = boneData[(int)VertexID];
VBO.Vtx_BoneWeight.AddWeight(ref vtx, boneIndex, Weight);
boneData[(int)VertexID] = vtx;
}
}
}
Calculating the Transforms
public void ReadNodeHierarchy(float animationTime, Node aiNode, ref OpenTK.Matrix4 parentTransform)
{
String NodeName = aiNode.Name;
Animation animation = mScene.Animations[0];
OpenTK.Matrix4 NodeTransformation = Util.TKMatrix(aiNode.Transform);
NodeAnimationChannel nodeAnim = FindNodeAnim(animation, NodeName);
OpenTK.Matrix4 localTransform = OpenTK.Matrix4.Identity;
if (nodeAnim != null)
{
// Interpolate scaling and generate scaling transformation matrix
Vector3D Scaling = new Vector3D();
CalcInterpolatedScaling(ref Scaling, animationTime, nodeAnim);
Console.WriteLine("Scaling: " + Scaling.ToString());
OpenTK.Matrix4 ScalingM = Util.TKMatrix(Matrix4x4.FromScaling(Scaling));
// Interpolate rotation and generate rotation transformation matrix
Quaternion RotationQ = new Quaternion();
CalcInterpolatedRotation(ref RotationQ, animationTime, nodeAnim);
Console.WriteLine("Rotation: " + RotationQ.ToString());
OpenTK.Matrix4 RotationM = Util.TKMatrix(RotationQ.GetMatrix());
// Interpolate translation and generate translation transformation matrix
Vector3D Translation = new Vector3D();
CalcInterpolatedPosition(ref Translation, animationTime, nodeAnim);
Console.WriteLine("Transform: " + Translation.ToString());
OpenTK.Matrix4 TranslationM = Util.TKMatrix(Matrix4x4.FromTranslation(Translation));
// Combine the above transformations
NodeTransformation = TranslationM * RotationM * ScalingM;
localTransform = TranslationM * RotationM * ScalingM;
}
OpenTK.Matrix4 GlobalTransformation = parentTransform * NodeTransformation;
OpenTK.Matrix4 parentPass = OpenTK.Matrix4.Identity;
if (BoneMapping.ContainsKey(NodeName) == true)
{
uint BoneIndex = BoneMapping[NodeName];
//BoneInfos[(int)BoneIndex].FinalTransformation = GlobalInverseTransform * BoneInfos[(int)BoneIndex].OffsetMatrix * GlobalTransformation;
BoneInfos[(int)BoneIndex].NodeTransformation = parentTransform * Util.TKMatrix(aiNode.Transform) * localTransform;
parentPass = BoneInfos[(int)BoneIndex].NodeTransformation;
BoneInfos[(int)BoneIndex].FinalTransformation = GlobalInverseTransform * BoneInfos[(int)BoneIndex].NodeTransformation * BoneInfos[(int)BoneIndex].OffsetMatrix;
}
for (uint i = 0; i < aiNode.ChildCount; i++)
{
ReadNodeHierarchy(animationTime, aiNode.Children[i], ref parentPass);
}
}
And this is the vertex shader code
#version 400
layout(location = 0)in vec4 vert;
layout(location = 1)in vec4 normal;
layout(location = 2)in vec4 texCoord;
layout(location = 3)in vec4 tanCoord;
layout(location = 4)in ivec4 boneIDs;
layout(location = 5)in vec4 boneWeights;
uniform mat4 projectionMtx;
uniform mat4 viewMtx;
uniform mat4 modelMtx;
const int MAX_BONES = 100;
uniform mat4 bones[MAX_BONES];
out vec3 positionFrg_CS;
out vec3 normalFrg_CS;
out vec3 tanCoordFrg_CS;
out vec3 bitCoordFrg_CS;
out vec4 texCoordFrg;
void main()
{
mat4 BoneTransform = bones[boneIDs[0]] * boneWeights[0];
BoneTransform += bones[boneIDs[1]] * boneWeights[1];
BoneTransform += bones[boneIDs[2]] * boneWeights[2];
BoneTransform += bones[boneIDs[3]] * boneWeights[3];
gl_Position = projectionMtx * viewMtx * modelMtx * BoneTransform * vert;
}
Is there anything I am doing wrong multiplying the matrices together?
In reply to livin_amuk, I have got this working, at least well enough for my needs, however I fixed this 6 months ago and my memory is vague...
If I remember correctly my main issue was the bone/vertex indices, I think I messed up the BaseVertex because I was in a rush. Here is my current working LoadBones function.
public void LoadBones(List<VBO.Vtx_BoneWeight.Vtx> boneData, SubMesh mesh)
{
for (int iBone = 0; iBone < mesh.mMesh.BoneCount; ++iBone)
{
uint boneIndex = 0;
String bonename = mesh.mMesh.Bones[iBone].Name;
if (!BoneMapping.ContainsKey(bonename))
{
boneIndex = (uint)NumBones;
NumBones++;
BoneInfo bi = new BoneInfo();
BoneInfos.Add(bi);
//Note, I have these two lines included inside the if statement, the original tut does not. Not sure if it makes a difference.
BoneMapping[bonename] = boneIndex;
BoneInfos[(int)boneIndex].OffsetMatrix = AssimpToOpenTK.TKMatrix(mesh.mMesh.Bones[iBone].OffsetMatrix);
}
else
{
boneIndex = BoneMapping[bonename];
}
for (int iWeight = 0; iWeight < mesh.mMesh.Bones[iBone].VertexWeightCount; iWeight++)
{
//My question has the mesh.BaseVertex commented out. it is important!
long VertexID = mesh.BaseVertex + mesh.mMesh.Bones[iBone].VertexWeights[iWeight].VertexID;
float Weight = mesh.mMesh.Bones[iBone].VertexWeights[iWeight].Weight;
VBO.Vtx_BoneWeight.Vtx vtx = boneData[(int)VertexID];
VBO.Vtx_BoneWeight.AddWeight(ref vtx, boneIndex, Weight);
boneData[(int)VertexID] = vtx;
}
}
}
I also had the transforms backwards. Read node hierarchy function.
public void ReadNodeHierarchy(float animationTime, Node aiNode, ref OpenTK.Matrix4 parentTransform)
{
String NodeName = aiNode.Name;
Animation animation = mScene.Animations[0];
OpenTK.Matrix4 NodeTransformation = AssimpToOpenTK.TKMatrix(aiNode.Transform);
NodeAnimationChannel nodeAnim = FindNodeAnim(animation, NodeName);
if (nodeAnim != null)
{
// Interpolate scaling and generate scaling transformation matrix
Vector3D Scaling = new Vector3D();
CalcInterpolatedScaling(ref Scaling, animationTime, nodeAnim);
OpenTK.Matrix4 ScalingM = AssimpToOpenTK.TKMatrix(Matrix4x4.FromScaling(Scaling));
// Interpolate rotation and generate rotation transformation matrix
Quaternion RotationQ = new Quaternion();
CalcInterpolatedRotation(ref RotationQ, animationTime, nodeAnim);
OpenTK.Matrix4 RotationM = AssimpToOpenTK.TKMatrix(RotationQ.GetMatrix());
// Interpolate translation and generate translation transformation matrix
Vector3D Translation = new Vector3D();
CalcInterpolatedPosition(ref Translation, animationTime, nodeAnim);
OpenTK.Matrix4 TranslationM = AssimpToOpenTK.TKMatrix(Matrix4x4.FromTranslation(Translation));
// Combine the above transformations
//All that local transform stuff is gone. The order of the transforms is reversed from my question AND the original tut.
NodeTransformation = ScalingM * RotationM * TranslationM;
}
//Also reversed.
OpenTK.Matrix4 GlobalTransformation = NodeTransformation * parentTransform;
//GlobalTransformation = OpenTK.Matrix4.Identity;
if (BoneMapping.ContainsKey(NodeName) == true)
{
uint BoneIndex = BoneMapping[NodeName];
//Also, Also, reversed.
BoneInfos[(int)BoneIndex].FinalTransformation = BoneInfos[(int)BoneIndex].OffsetMatrix * GlobalTransformation * GlobalInverseTransform;
}
for (int i = 0; i < aiNode.ChildCount; i++)
{
ReadNodeHierarchy(animationTime, aiNode.Children[i], ref GlobalTransformation);
}
}
The Matrix conversion at the top is also correct, as is the Shader code.
I have the following compute shader code for computing depth of field. However, very unusually, the loop executes just once, even if g_rayCount is 10. Please have a look in the main function raycastercs where the for loop lies.
//--------------------------------------------------------------------------------------
// Compute Shader
//-------------------------------------------------------------------------------------
SamplerState SSLinear
{
Filter = Min_Mag_Linear_Mip_Point;
AddressU = Border;
AddressV = Border;
AddressW = Border;
};
float3 CalculateDoF(uint seedIndex, uint2 fragPos)
{
;
}
[numthreads(RAYCASTER_THREAD_BLOCK_SIZE, RAYCASTER_THREAD_BLOCK_SIZE, 1)]
void RaycasterCS(in uint3 threadID: SV_GroupThreadID, in uint3 groupID: SV_GroupID, in uint3 dispatchThreadID :SV_DispatchThreadID)
{
uint2 fragPos = groupID.xy * RAYCASTER_THREAD_BLOCK_SIZE + threadID.xy;
float4 dstColor = g_texFinal[fragPos];
uint seedIndex = dispatchThreadID.x * dispatchThreadID.y;
float3 final = float3(0, 0, 0);
float color = 0;
[loop][allow_uav_condition]
for (int i = 0; i < g_rayCount; ++i);
{
float3 dof = CalculateDoF(seedIndex, fragPos);
final += dof;
}
final *= 1.0f / ((float) g_rayCount);
g_texFinalRW[fragPos] = float4(final, 1);
}
//--------------------------------------------------------------------------------------
technique10 Raycaster
{
pass RaycastDefault
{
SetVertexShader(NULL);
SetGeometryShader(NULL);
SetPixelShader(NULL);
SetComputeShader(CompileShader(cs_5_0, RaycasterCS()));
}
}
Remove the semicolon at the end of the for statement
for (int i = 0; i < g_rayCount; ++i) // removed semicolon
{
float3 dof = CalculateDoF(seedIndex, fragPos);
final += dof;
}
As I guess you know, the semicolon was just running an empty for loop, then the code in braces was thereafter executed just once.
I'm trying to write a program for skeletal animation in DirectX 9, I have used LoadMeshFromHierarchy function to load an animated mesh...now I would like to bypass the animController so that I can dictate the animation by reading keyframes from the animated mesh file(ex. tiny.x) and looping through those keys at will.
Here is what I have so far...at this point I have already parsed the .x file successfully and stored each animation, and animationkey for the sole animation set within a class (Anim). When I run this update function the animated mesh is disfigured, i can't figure out why...I assume it is the process by which I update the transformation matrix for each frame...here is my code:
void cAnimationCollection::Update(DWORD AnimSetIndex, DWORD time)
{
D3DXFRAME_EXTENDED *currentFrame = (D3DXFRAME_EXTENDED*)m_entity->m_frameRoot;
cAnimationSet *AnimSet = m_AnimationSets;
assert(AnimSetIndex <= index);
while(AnimSet != NULL)
{
if(AnimSet->m_index == AnimSetIndex)
{
cAnimation *Anim = AnimSet->m_Animations;
while(Anim != NULL)
{
D3DXMatrixIdentity(&Anim->m_Frame->TransformationMatrix);
if(Anim->m_NumScaleKeys && Anim->m_ScaleKeys)
{
DWORD ScaleKey=0, ScaleKey2=0;
for(DWORD i = 0; i < Anim->m_NumScaleKeys; i++)
{
if(time >= Anim->m_ScaleKeys[i].m_Time)
ScaleKey = i;
}
ScaleKey2 = (ScaleKey>=(Anim->m_NumScaleKeys-1))?ScaleKey:ScaleKey+1;
float TimeDiff = Anim->m_ScaleKeys[ScaleKey2].m_Time - Anim->m_ScaleKeys[ScaleKey].m_Time;
if(!TimeDiff)
TimeDiff = 1;
float Scalar = ((float)time - Anim->m_ScaleKeys[ScaleKey].m_Time) / (float)TimeDiff;
D3DXVECTOR3 vecScale = Anim->m_ScaleKeys[ScaleKey2].m_VecKey - Anim->m_ScaleKeys[ScaleKey].m_VecKey;
vecScale *= Scalar;
vecScale += Anim->m_ScaleKeys[ScaleKey].m_VecKey;
D3DXMATRIX matScale;
D3DXMatrixScaling(&matScale, vecScale.x, vecScale.y, vecScale.z);
Anim->m_Frame->TransformationMatrix *= matScale;
}
if(Anim->m_NumRotationKeys && Anim->m_RotationKeys)
{
DWORD RotKey=0, RotKey2=0;
for(DWORD i = 0; i < Anim->m_NumRotationKeys; i++)
{
if(time >= Anim->m_RotationKeys[i].m_Time)
RotKey = i;
}
RotKey2 = (RotKey>=(Anim->m_NumRotationKeys-1))?RotKey:RotKey+1;
float TimeDiff = Anim->m_RotationKeys[RotKey2].m_Time - Anim->m_RotationKeys[RotKey].m_Time;
if(!TimeDiff)
TimeDiff = 1;
float Scalar = ((float)time - Anim->m_RotationKeys[RotKey].m_Time) / (float)TimeDiff;
D3DXQUATERNION quatRotation;
D3DXQuaternionSlerp(&quatRotation,
&Anim->m_RotationKeys[RotKey].m_QuatKey,
&Anim->m_RotationKeys[RotKey2].m_QuatKey,
Scalar);
D3DXMATRIX matRotation;
D3DXMatrixRotationQuaternion(&matRotation, &quatRotation);
Anim->m_Frame->TransformationMatrix *= matRotation;
}
if(Anim->m_NumTranslationKeys && Anim->m_TranslationKeys)
{
DWORD PosKey=0, PosKey2=0;
for(DWORD i = 0; i < Anim->m_NumTranslationKeys; i++)
{
if(time >= Anim->m_TranslationKeys[i].m_Time)
PosKey = i;
}
PosKey2 = (PosKey>=(Anim->m_NumTranslationKeys-1))?PosKey:PosKey+1;
float TimeDiff = Anim->m_TranslationKeys[PosKey2].m_Time - Anim->m_TranslationKeys[PosKey].m_Time;
if(!TimeDiff)
TimeDiff = 1;
float Scalar = ((float)time - Anim->m_TranslationKeys[PosKey].m_Time) / (float)TimeDiff;
D3DXVECTOR3 vecPos = Anim->m_TranslationKeys[PosKey2].m_VecKey - Anim->m_TranslationKeys[PosKey].m_VecKey;
vecPos *= Scalar;
vecPos += Anim->m_TranslationKeys[PosKey].m_VecKey;;
D3DXMATRIX matTranslation;
D3DXMatrixTranslation(&matTranslation, vecPos.x, vecPos.y, vecPos.z);
Anim->m_Frame->TransformationMatrix *= matTranslation;
}
if(Anim->m_NumMatrixKeys && Anim->m_MatrixKeys)
{
DWORD Key1 = 0, Key2 = 0;
for(DWORD i=0;i<Anim->m_NumMatrixKeys;i++)
{
if(time >= Anim->m_MatrixKeys[i].m_Time)
Key1 = i;
}
Key2 = (Key1>=(Anim->m_NumMatrixKeys-1))?Key1:Key1+1;
float TimeDiff = Anim->m_MatrixKeys[Key2].m_Time - Anim->m_MatrixKeys[Key1].m_Time;
if(!TimeDiff)
TimeDiff = 1;
float Scalar = ((float)time - Anim->m_MatrixKeys[Key1].m_Time) / (float)TimeDiff;
D3DXMATRIX matDiff = Anim->m_MatrixKeys[Key2].m_MatKey - Anim->m_MatrixKeys[Key1].m_MatKey;
matDiff *= Scalar;
matDiff += Anim->m_MatrixKeys[Key1].m_MatKey;
Anim->m_Frame->TransformationMatrix *= matDiff;
}
Anim = Anim->m_Next;
}
}
AnimSet = AnimSet->m_Next;
}
m_entity->UpdateFrameMatrices(m_entity->m_frameRoot, 0);
m_entity->UpdateSkinnedMesh(m_entity->m_frameRoot);
if(AnimSet == NULL)
return;
}
Is my method correct? The first thing I do for each frame is reset the transformation matrix to identity, then I calculate an interpolated value for each key(translation, scale, rotation, & matrix) and apply it to the transformation matrix...then I update the frame matrices, and then the skinned mesh.
Any ideas?
I have written some code to preform 3D picking that for some reason dosn't work entirely correct! (Im using LWJGL just so you know.)
This is how the code looks like:
if(Mouse.getEventButton() == 1) {
if (!Mouse.getEventButtonState()) {
Camera.get().generateViewMatrix();
float screenSpaceX = ((Mouse.getX()/800f/2f)-1.0f)*Camera.get().getAspectRatio();
float screenSpaceY = 1.0f-(2*((600-Mouse.getY())/600f));
float displacementRate = (float)Math.tan(Camera.get().getFovy()/2);
screenSpaceX *= displacementRate;
screenSpaceY *= displacementRate;
Vector4f cameraSpaceNear = new Vector4f((float) (screenSpaceX * Camera.get().getNear()), (float) (screenSpaceY * Camera.get().getNear()), (float) (-Camera.get().getNear()), 1);
Vector4f cameraSpaceFar = new Vector4f((float) (screenSpaceX * Camera.get().getFar()), (float) (screenSpaceY * Camera.get().getFar()), (float) (-Camera.get().getFar()), 1);
Matrix4f tmpView = new Matrix4f();
Camera.get().getViewMatrix().transpose(tmpView);
Matrix4f invertedViewMatrix = (Matrix4f)tmpView.invert();
Vector4f worldSpaceNear = new Vector4f();
Matrix4f.transform(invertedViewMatrix, cameraSpaceNear, worldSpaceNear);
Vector4f worldSpaceFar = new Vector4f();
Matrix4f.transform(invertedViewMatrix, cameraSpaceFar, worldSpaceFar);
Vector3f rayPosition = new Vector3f(worldSpaceNear.x, worldSpaceNear.y, worldSpaceNear.z);
Vector3f rayDirection = new Vector3f(worldSpaceFar.x - worldSpaceNear.x, worldSpaceFar.y - worldSpaceNear.y, worldSpaceFar.z - worldSpaceNear.z);
rayDirection.normalise();
Ray clickRay = new Ray(rayPosition, rayDirection);
Vector tMin = new Vector(), tMax = new Vector(), tempPoint;
float largestEnteringValue, smallestExitingValue, temp, closestEnteringValue = Camera.get().getFar()+0.1f;
Drawable closestDrawableHit = null;
for(Drawable d : this.worldModel.getDrawableThings()) {
// Calcualte AABB for each object... needs to be moved later...
firstVertex = true;
for(Surface surface : d.getSurfaces()) {
for(Vertex v : surface.getVertices()) {
worldPosition.x = (v.x+d.getPosition().x)*d.getScale().x;
worldPosition.y = (v.y+d.getPosition().y)*d.getScale().y;
worldPosition.z = (v.z+d.getPosition().z)*d.getScale().z;
worldPosition = worldPosition.rotate(d.getRotation());
if (firstVertex) {
maxX = worldPosition.x; maxY = worldPosition.y; maxZ = worldPosition.z;
minX = worldPosition.x; minY = worldPosition.y; minZ = worldPosition.z;
firstVertex = false;
} else {
if (worldPosition.x > maxX) {
maxX = worldPosition.x;
}
if (worldPosition.x < minX) {
minX = worldPosition.x;
}
if (worldPosition.y > maxY) {
maxY = worldPosition.y;
}
if (worldPosition.y < minY) {
minY = worldPosition.y;
}
if (worldPosition.z > maxZ) {
maxZ = worldPosition.z;
}
if (worldPosition.z < minZ) {
minZ = worldPosition.z;
}
}
}
}
// ray/slabs intersection test...
// clickRay.getOrigin().x + clickRay.getDirection().x * f = minX
// clickRay.getOrigin().x - minX = -clickRay.getDirection().x * f
// clickRay.getOrigin().x/-clickRay.getDirection().x - minX/-clickRay.getDirection().x = f
// -clickRay.getOrigin().x/clickRay.getDirection().x + minX/clickRay.getDirection().x = f
largestEnteringValue = -clickRay.getOrigin().x/clickRay.getDirection().x + minX/clickRay.getDirection().x;
temp = -clickRay.getOrigin().y/clickRay.getDirection().y + minY/clickRay.getDirection().y;
if(largestEnteringValue < temp) {
largestEnteringValue = temp;
}
temp = -clickRay.getOrigin().z/clickRay.getDirection().z + minZ/clickRay.getDirection().z;
if(largestEnteringValue < temp) {
largestEnteringValue = temp;
}
smallestExitingValue = -clickRay.getOrigin().x/clickRay.getDirection().x + maxX/clickRay.getDirection().x;
temp = -clickRay.getOrigin().y/clickRay.getDirection().y + maxY/clickRay.getDirection().y;
if(smallestExitingValue > temp) {
smallestExitingValue = temp;
}
temp = -clickRay.getOrigin().z/clickRay.getDirection().z + maxZ/clickRay.getDirection().z;
if(smallestExitingValue < temp) {
smallestExitingValue = temp;
}
if(largestEnteringValue > smallestExitingValue) {
//System.out.println("Miss!");
} else {
if (largestEnteringValue < closestEnteringValue) {
closestEnteringValue = largestEnteringValue;
closestDrawableHit = d;
}
}
}
if(closestDrawableHit != null) {
System.out.println("Hit at: (" + clickRay.setDistance(closestEnteringValue).x + ", " + clickRay.getCurrentPosition().y + ", " + clickRay.getCurrentPosition().z);
this.worldModel.removeDrawableThing(closestDrawableHit);
}
}
}
I just don't understand what's wrong, the ray are shooting and i do hit stuff that gets removed but the result of the ray are verry strange it sometimes removes the thing im clicking at, sometimes it removes things thats not even close to what im clicking at, and sometimes it removes nothing at all.
Edit:
Okay so i have continued searching for errors and by debugging the ray (by painting smal dots where it travles) i can now se that there is something oviously wrong with the ray that im sending out... it has its origin near the world center and always shots to the same position no matter where i direct my camera...
My initial toughts is that there might be some error in the way i calculate my viewMatrix (since it's not possible to get the viewmatrix from the glulookat method in lwjgl; I have to build it my self and I guess thats where the problem is at)...
Edit2:
This is how i calculate it currently:
private double[][] viewMatrixDouble = {{0,0,0,0}, {0,0,0,0}, {0,0,0,0}, {0,0,0,1}};
public Vector getCameraDirectionVector() {
Vector actualEye = this.getActualEyePosition();
return new Vector(lookAt.x-actualEye.x, lookAt.y-actualEye.y, lookAt.z-actualEye.z);
}
public Vector getActualEyePosition() {
return eye.rotate(this.getRotation());
}
public void generateViewMatrix() {
Vector cameraDirectionVector = getCameraDirectionVector().normalize();
Vector side = Vector.cross(cameraDirectionVector, this.upVector).normalize();
Vector up = Vector.cross(side, cameraDirectionVector);
viewMatrixDouble[0][0] = side.x; viewMatrixDouble[0][1] = up.x; viewMatrixDouble[0][2] = -cameraDirectionVector.x;
viewMatrixDouble[1][0] = side.y; viewMatrixDouble[1][1] = up.y; viewMatrixDouble[1][2] = -cameraDirectionVector.y;
viewMatrixDouble[2][0] = side.z; viewMatrixDouble[2][1] = up.z; viewMatrixDouble[2][2] = -cameraDirectionVector.z;
/*
Vector actualEyePosition = this.getActualEyePosition();
Vector zaxis = new Vector(this.lookAt.x - actualEyePosition.x, this.lookAt.y - actualEyePosition.y, this.lookAt.z - actualEyePosition.z).normalize();
Vector xaxis = Vector.cross(upVector, zaxis).normalize();
Vector yaxis = Vector.cross(zaxis, xaxis);
viewMatrixDouble[0][0] = xaxis.x; viewMatrixDouble[0][1] = yaxis.x; viewMatrixDouble[0][2] = zaxis.x;
viewMatrixDouble[1][0] = xaxis.y; viewMatrixDouble[1][1] = yaxis.y; viewMatrixDouble[1][2] = zaxis.y;
viewMatrixDouble[2][0] = xaxis.z; viewMatrixDouble[2][1] = yaxis.z; viewMatrixDouble[2][2] = zaxis.z;
viewMatrixDouble[3][0] = -Vector.dot(xaxis, actualEyePosition); viewMatrixDouble[3][1] =-Vector.dot(yaxis, actualEyePosition); viewMatrixDouble[3][2] = -Vector.dot(zaxis, actualEyePosition);
*/
viewMatrix = new Matrix4f();
viewMatrix.load(getViewMatrixAsFloatBuffer());
}
Would be verry greatfull if anyone could verify if this is wrong or right, and if it's wrong; supply me with the right way of doing it...
I have read alot of threads and documentations about this but i can't seam to wrapp my head around it...
I just don't understand what's wrong, the ray are shooting and i do hit stuff that gets removed but things are not disappearing where i press on the screen.
OpenGL is not a scene graph, it's a drawing library. So after removing something from your internal representation you must redraw the scene. And your code is missing some call to a function that triggers a redraw.
Okay so i finaly solved it with the help from the guys at gamedev and a friend, here is a link to the answer where i have posted the code!