Mesh animation at directX - c++

in my game project Im using the MD5 model files, but I feel I'm doing something wrong...
At every frame I update almost 30~40 animated meshes, (updating each joint and their respectives vertices) but doing like this im using always 25% of the CPU speed and my FPS always stay at 70~80 (when I should have 200~300).
I know that maybe I should use instancing but i dont know how to do this with animated meshes.
And even if I would use, as far as I know, this only works with the same meshes, but I need something around 30 different meshes for scene (and these would be repeated using instancing).
What I do every frame is, make the new skeleton for every animated mesh, put every joint at the new position (if the joint needs update) and update all vertices that should be updated.
My video card is ok, here is the update code:
bool AnimationModelClass::UpdateMD5Model(float deltaTime, int animation)
{
MD5Model.m_animations[animation].currAnimTime += deltaTime; // Update the current animation time
if(MD5Model.m_animations[animation].currAnimTime > MD5Model.m_animations[animation].totalAnimTime)
MD5Model.m_animations[animation].currAnimTime = 0.0f;
// Which frame are we on
float currentFrame = MD5Model.m_animations[animation].currAnimTime * MD5Model.m_animations[animation].frameRate;
int frame0 = floorf( currentFrame );
int frame1 = frame0 + 1;
// Make sure we don't go over the number of frames
if(frame0 == MD5Model.m_animations[animation].numFrames-1)
frame1 = 0;
float interpolation = currentFrame - frame0; // Get the remainder (in time) between frame0 and frame1 to use as interpolation factor
std::vector<Joint> interpolatedSkeleton; // Create a frame skeleton to store the interpolated skeletons in
// Compute the interpolated skeleton
for( int i = 0; i < MD5Model.m_animations[animation].numJoints; i++)
{
Joint tempJoint;
Joint joint0 = MD5Model.m_animations[animation].frameSkeleton[frame0][i]; // Get the i'th joint of frame0's skeleton
Joint joint1 = MD5Model.m_animations[animation].frameSkeleton[frame1][i]; // Get the i'th joint of frame1's skeleton
tempJoint.parentID = joint0.parentID; // Set the tempJoints parent id
// Turn the two quaternions into XMVECTORs for easy computations
D3DXQUATERNION joint0Orient = D3DXQUATERNION(joint0.orientation.x, joint0.orientation.y, joint0.orientation.z, joint0.orientation.w);
D3DXQUATERNION joint1Orient = D3DXQUATERNION(joint1.orientation.x, joint1.orientation.y, joint1.orientation.z, joint1.orientation.w);
// Interpolate positions
tempJoint.pos.x = joint0.pos.x + (interpolation * (joint1.pos.x - joint0.pos.x));
tempJoint.pos.y = joint0.pos.y + (interpolation * (joint1.pos.y - joint0.pos.y));
tempJoint.pos.z = joint0.pos.z + (interpolation * (joint1.pos.z - joint0.pos.z));
// Interpolate orientations using spherical interpolation (Slerp)
D3DXQUATERNION qtemp;
D3DXQuaternionSlerp(&qtemp, &joint0Orient, &joint1Orient, interpolation);
tempJoint.orientation.x = qtemp.x;
tempJoint.orientation.y = qtemp.y;
tempJoint.orientation.z = qtemp.z;
tempJoint.orientation.w = qtemp.w;
// Push the joint back into our interpolated skeleton
interpolatedSkeleton.push_back(tempJoint);
}
for ( int k = 0; k < MD5Model.numSubsets; k++)
{
for ( int i = 0; i < MD5Model.m_subsets[k].numVertices; ++i )
{
Vertex tempVert = MD5Model.m_subsets[k].m_vertices[i];
// Make sure the vertex's pos is cleared first
tempVert.x = 0;
tempVert.y = 0;
tempVert.z = 0;
// Clear vertices normal
tempVert.nx = 0;
tempVert.ny = 0;
tempVert.nz = 0;
// Sum up the joints and weights information to get vertex's position and normal
for ( int j = 0; j < tempVert.WeightCount; ++j )
{
Weight tempWeight = MD5Model.m_subsets[k].m_weights[tempVert.StartWeight + j];
Joint tempJoint = interpolatedSkeleton[tempWeight.jointID];
// Convert joint orientation and weight pos to vectors for easier computation
D3DXQUATERNION tempJointOrientation = D3DXQUATERNION(tempJoint.orientation.x, tempJoint.orientation.y, tempJoint.orientation.z, tempJoint.orientation.w);
D3DXQUATERNION tempWeightPos = D3DXQUATERNION(tempWeight.pos.x, tempWeight.pos.y, tempWeight.pos.z, 0.0f);
// We will need to use the conjugate of the joint orientation quaternion
D3DXQUATERNION tempJointOrientationConjugate;
D3DXQuaternionInverse(&tempJointOrientationConjugate, &tempJointOrientation);
// Calculate vertex position (in joint space, eg. rotate the point around (0,0,0)) for this weight using the joint orientation quaternion and its conjugate
// We can rotate a point using a quaternion with the equation "rotatedPoint = quaternion * point * quaternionConjugate"
D3DXVECTOR3 rotatedPoint;
D3DXQUATERNION qqtemp;
D3DXQuaternionMultiply(&qqtemp, &tempJointOrientation, &tempWeightPos);
D3DXQuaternionMultiply(&qqtemp, &qqtemp, &tempJointOrientationConjugate);
rotatedPoint.x = qqtemp.x;
rotatedPoint.y = qqtemp.y;
rotatedPoint.z = qqtemp.z;
// Now move the verices position from joint space (0,0,0) to the joints position in world space, taking the weights bias into account
tempVert.x += ( tempJoint.pos.x + rotatedPoint.x ) * tempWeight.bias;
tempVert.y += ( tempJoint.pos.y + rotatedPoint.y ) * tempWeight.bias;
tempVert.z += ( tempJoint.pos.z + rotatedPoint.z ) * tempWeight.bias;
// Compute the normals for this frames skeleton using the weight normals from before
// We can comput the normals the same way we compute the vertices position, only we don't have to translate them (just rotate)
D3DXQUATERNION tempWeightNormal = D3DXQUATERNION(tempWeight.normal.x, tempWeight.normal.y, tempWeight.normal.z, 0.0f);
D3DXQuaternionMultiply(&qqtemp, &tempJointOrientation, &tempWeightNormal);
D3DXQuaternionMultiply(&qqtemp, &qqtemp, &tempJointOrientationConjugate);
// Rotate the normal
rotatedPoint.x = qqtemp.x;
rotatedPoint.y = qqtemp.y;
rotatedPoint.z = qqtemp.z;
// Add to vertices normal and ake weight bias into account
tempVert.nx -= rotatedPoint.x * tempWeight.bias;
tempVert.ny -= rotatedPoint.y * tempWeight.bias;
tempVert.nz -= rotatedPoint.z * tempWeight.bias;
}
// Store the vertices position in the position vector instead of straight into the vertex vector
MD5Model.m_subsets[k].m_positions[i].x = tempVert.x;
MD5Model.m_subsets[k].m_positions[i].y = tempVert.y;
MD5Model.m_subsets[k].m_positions[i].z = tempVert.z;
// Store the vertices normal
MD5Model.m_subsets[k].m_vertices[i].nx = tempVert.nx;
MD5Model.m_subsets[k].m_vertices[i].ny = tempVert.ny;
MD5Model.m_subsets[k].m_vertices[i].nz = tempVert.nz;
// Create the temp D3DXVECTOR3 for normalize
D3DXVECTOR3 dtemp = D3DXVECTOR3(0,0,0);
dtemp.x = MD5Model.m_subsets[k].m_vertices[i].nx;
dtemp.y = MD5Model.m_subsets[k].m_vertices[i].ny;
dtemp.z = MD5Model.m_subsets[k].m_vertices[i].nz;
D3DXVec3Normalize(&dtemp, &dtemp);
MD5Model.m_subsets[k].m_vertices[i].nx = dtemp.x;
MD5Model.m_subsets[k].m_vertices[i].ny = dtemp.y;
MD5Model.m_subsets[k].m_vertices[i].nz = dtemp.z;
// Put the positions into the vertices for this subset
MD5Model.m_subsets[k].m_vertices[i].x = MD5Model.m_subsets[k].m_positions[i].x;
MD5Model.m_subsets[k].m_vertices[i].y = MD5Model.m_subsets[k].m_positions[i].y;
MD5Model.m_subsets[k].m_vertices[i].z = MD5Model.m_subsets[k].m_positions[i].z;
}
// Update the subsets vertex buffer
// First lock the buffer
void* mappedVertBuff;
bool result;
result = MD5Model.m_subsets[k].vertBuff->Map(D3D10_MAP_WRITE_DISCARD, 0, &mappedVertBuff);
if(FAILED(result))
{
return false;
}
// Copy the data into the vertex buffer.
memcpy(mappedVertBuff, &MD5Model.m_subsets[k].m_vertices[0], (sizeof(Vertex) * MD5Model.m_subsets[k].numVertices));
MD5Model.m_subsets[k].vertBuff->Unmap();
}
return true;
}
Maybe I can fix some things in that code but I wonder if I'm doing it right...
I wonder also if there are other better ways to do this, if other types of animations would be better (different things from .x extension).
Thanks and sorry for my bad english :D
Doing bones transformation at shaders would be a good solution? (like this)

Are all of the meshes in the viewing frustum at the same time? If not you should only be updating the animations of the objects which are on screen and which you can see. If you're updating all the meshes in the scene regardless of if the are in view or not you are wasting a lot of cycles. It sounds to me like you are not doing any frustum culling at all that is probably the best place to start.

Related

Trying to load animations in OpenGL from an MD5 file using Assimp GLM

I'm trying to follow the tutorial at here ( at ogldev ) mentioned in this answer .
I am however facing a few issues which I believe to be related to the Row Major Order for Assimp vs the Column major order fro GLM, although I am quite not sure.
I've tried a few variations and orders to see if any of those work, but to no avail.
Here ( Gist ) is the Class which I use to load the complete MD5 file. And the current Result I have.
And, this is the part where I think it is going wrong, when I try to update the bone transformation matrices.
void SkeletalModel::ReadNodeHierarchyAnimation(float _animationTime, const aiNode* _node,
const glm::mat4& _parentTransform)
{
std::string node_name = _node->mName.data;
const aiAnimation * p_animation = scene->mAnimations[0];
glm::mat4 node_transformation(1.0f);
convert_aimatrix_to_glm(node_transformation, _node->mTransformation);
// Transpose it.
node_transformation = glm::transpose(node_transformation);
const aiNodeAnim * node_anim = FindNodeAnim(p_animation, node_name);
if (node_anim) {
//glm::mat4 transformation_matrix(1.0f);
glm::mat4 translation_matrix(1.0f);
glm::mat4 rotation_matrix(1.0f);
glm::mat4 scaling_matrix(1.0f);
aiVector3D translation;
CalcInterpolatedPosition(translation, _animationTime, node_anim);
translation_matrix = glm::translate(translation_matrix, glm::vec3(translation.x, translation.y, translation.z));
aiQuaternion rotation;
CalcInterpolatedRotation(rotation, _animationTime, node_anim);
// Transpose the matrix after this.
convert_aimatrix_to_glm(rotation_matrix, rotation.GetMatrix());
//rotation_matrix = glm::transpose(rotation_matrix);
aiVector3D scaling;
CalcInterpolatedScaling(scaling, _animationTime, node_anim);
scaling_matrix = glm::scale(scaling_matrix, glm::vec3(scaling.x, scaling.y, scaling.z));
node_transformation = scaling_matrix * rotation_matrix * translation_matrix;
//node_transformation = translation_matrix * rotation_matrix * scaling_matrix;
}
glm::mat4 global_transformation = node_transformation * _parentTransform;
if (boneMapping.find(node_name) != boneMapping.end()) {
// Update the Global Transformation.
auto bone_index = boneMapping[node_name];
//boneInfoData[bone_index].finalTransformation = globalInverseTransform * global_transformation * boneInfoData[bone_index].boneOffset;
boneInfoData[bone_index].finalTransformation = boneInfoData[bone_index].boneOffset * global_transformation * globalInverseTransform;
//boneInfoData[bone_index].finalTransformation = globalInverseTransform;
}
for (auto i = 0; i < _node->mNumChildren; i++) {
ReadNodeHierarchyAnimation(_animationTime, _node->mChildren[i], global_transformation);
}
}
My Current Output:
I tried going through each matrix used in the code to check whether I should tranpose it or not. Whether I should change the matrix multiplication order or not. I could not find my issue.
If anyone can point out my mistakes here or direct me to a different tutorial that would help me load animations, that would be great.
Also, I see suggestions to use a basic model in the initial stages of learning this. But I was told Obj format doesn't support animations, and I have been using just Obj before this. Can I use any other formats that blender exports in a manner similar to MD5 as shown in this tutorial?
I built an animated scene a few years ago using Assimp library, basically following these tutorials. http://ogldev.atspace.co.uk/www/tutorial38/tutorial38.html and http://sourceforge.net/projects/assimp/forums/forum/817654/topic/3880745
While I was using and old X format (blender can work with X, using an extension), I can definitely confirm you need to transpose the assimp animation matrices for use with GML.
Regarding using other formats, you can use what whatever you like provided they are supported by Blender (import, Editing, Export) and by Assimp. Be prepared for a fair bit of trial and error when changing formats!
Rather then me trying to understand your code, I will post the relevant fragments from my working system, that shows the calculation of bone matrices. Hopefully this will help you, as I remember having the same problem as you describe, and taking some time to track it down. Code is plain 'C'.
You can see where the transposition takes place at the end of the code.
// calculateAnimPose() calculates the bone transformations for a mesh at a particular time in an animation (in scene)
// Each bone transformation is relative to the rest pose.
void calculateAnimPose(aiMesh* mesh, const aiScene* scene, int animNum, float poseTime, mat4 *boneTransforms) {
if(mesh->mNumBones == 0 || animNum < 0) { // animNum = -1 for no animation
boneTransforms[0] = mat4(1.0); // so, just return a single identity matrix
return;
}
if(scene->mNumAnimations <= (unsigned int)animNum)
failInt("No animation with number:", animNum);
aiAnimation *anim = scene->mAnimations[animNum]; // animNum = 0 for the first animation
// Set transforms from bone channels
for(unsigned int chanID=0; chanID < anim->mNumChannels; chanID++) {
aiNodeAnim *channel = anim->mChannels[chanID];
aiVector3D curPosition;
aiQuaternion curRotation; // interpolation of scaling purposefully left out for simplicity.
// find the node which the channel affects
aiNode* targetNode = scene->mRootNode->FindNode( channel->mNodeName );
// find current positionKey
size_t posIndex = 0;
for(posIndex=0; posIndex+1 < channel->mNumPositionKeys; posIndex++)
if( channel->mPositionKeys[posIndex + 1].mTime > poseTime )
break; // the next key lies in the future - so use the current key
// This assumes that there is at least one key
if(posIndex+1 == channel-> mNumPositionKeys)
curPosition = channel->mPositionKeys[posIndex].mValue;
else {
float t0 = channel->mPositionKeys[posIndex].mTime; // Interpolate position/translation
float t1 = channel->mPositionKeys[posIndex+1].mTime;
float weight1 = (poseTime-t0)/(t1-t0);
curPosition = channel->mPositionKeys[posIndex].mValue * (1.0f - weight1) +
channel->mPositionKeys[posIndex+1].mValue * weight1;
}
// find current rotationKey
size_t rotIndex = 0;
for(rotIndex=0; rotIndex+1 < channel->mNumRotationKeys; rotIndex++)
if( channel->mRotationKeys[rotIndex + 1].mTime > poseTime )
break; // the next key lies in the future - so use the current key
if(rotIndex+1 == channel-> mNumRotationKeys)
curRotation = channel->mRotationKeys[rotIndex].mValue;
else {
float t0 = channel->mRotationKeys[rotIndex].mTime; // Interpolate using quaternions
float t1 = channel->mRotationKeys[rotIndex+1].mTime;
float weight1 = (poseTime-t0)/(t1-t0);
aiQuaternion::Interpolate(curRotation, channel->mRotationKeys[rotIndex].mValue,
channel->mRotationKeys[rotIndex+1].mValue, weight1);
curRotation = curRotation.Normalize();
}
aiMatrix4x4 trafo = aiMatrix4x4(curRotation.GetMatrix()); // now build a rotation matrix
trafo.a4 = curPosition.x; trafo.b4 = curPosition.y; trafo.c4 = curPosition.z; // add the translation
targetNode->mTransformation = trafo; // assign this transformation to the node
}
// Calculate the total transformation for each bone relative to the rest pose
for(unsigned int a=0; a<mesh->mNumBones; a++) {
const aiBone* bone = mesh->mBones[a];
aiMatrix4x4 bTrans = bone->mOffsetMatrix; // start with mesh-to-bone matrix to subtract rest pose
// Find the bone, then loop through the nodes/bones on the path up to the root.
for(aiNode* node = scene->mRootNode->FindNode(bone->mName); node!=NULL; node=node->mParent)
bTrans = node->mTransformation * bTrans; // add each bone's current relative transformation
boneTransforms[a] = mat4(vec4(bTrans.a1, bTrans.a2, bTrans.a3, bTrans.a4),
vec4(bTrans.b1, bTrans.b2, bTrans.b3, bTrans.b4),
vec4(bTrans.c1, bTrans.c2, bTrans.c3, bTrans.c4),
vec4(bTrans.d1, bTrans.d2, bTrans.d3, bTrans.d4)); // Convert to mat4
}
}

Optimizing a Ray Tracer

I'm tasked with optimizing the following ray tracer:
void Scene::RayTrace()
{
for (int v = 0; v < fb->h; v++) // all vertical pixels in framebuffer
{
calculateFPS(); // calculates the current fps and prints it
for (int u = 0; u < fb->w; u++) // all horizontal pixels in framebuffer
{
fb->Set(u, v, 0xFFAAAAAA); // background color
fb->SetZ(u, v, FLT_MAX); // sets the Z values to all be maximum at beginning
V3 ray = (ppc->c + ppc->a*((float)u + .5f) + ppc->b*((float)v + .5f)).UnitVector(); // gets the camera ray
for (int tmi = 0; tmi < tmeshesN; tmi++) // iterates over all triangle meshes
{
if (!tmeshes[tmi]->enabled) // doesn't render a tmesh if it's not set to be enabled
continue;
for (int tri = 0; tri < tmeshes[tmi]->trisN; tri++) // iterates over all triangles in the mesh
{
V3 Vs[3]; // triangle vertices
Vs[0] = tmeshes[tmi]->verts[tmeshes[tmi]->tris[3 * tri + 0]];
Vs[1] = tmeshes[tmi]->verts[tmeshes[tmi]->tris[3 * tri + 1]];
Vs[2] = tmeshes[tmi]->verts[tmeshes[tmi]->tris[3 * tri + 2]];
V3 bgt = ppc->C.IntersectRayWithTriangleWithThisOrigin(ray, Vs); // I don't entirely understand what this does
if (bgt[2] < 0.0f || bgt[0] < 0.0f || bgt[1] < 0.0f || bgt[0] + bgt[1] > 1.0f)
continue;
if (fb->zb[(fb->h - 1 - v)*fb->w + u] < bgt[2])
continue;
fb->SetZ(u, v, bgt[2]);
float alpha = 1.0f - bgt[0] - bgt[1];
float beta = bgt[0];
float gamma = bgt[1];
V3 Cs[3]; // triangle vertex colors
Cs[0] = tmeshes[tmi]->cols[tmeshes[tmi]->tris[3 * tri + 0]];
Cs[1] = tmeshes[tmi]->cols[tmeshes[tmi]->tris[3 * tri + 1]];
Cs[2] = tmeshes[tmi]->cols[tmeshes[tmi]->tris[3 * tri + 2]];
V3 color = Cs[0] * alpha + Cs[1] * beta + Cs[2] * gamma;
fb->Set(u, v, color.GetColor()); // sets this pixel accordingly
}
}
}
fb->redraw();
Fl::check();
}
}
Two things:
I don't entirely understand what ppc->C.IntersectRayWithTriangleWithThisOrigin(ray, Vs); does. Can anyone explain this, in terms of ray-tracing, to me? Here is the function inside my "Planar Pinhole Camera" class (this function was given to me):
V3 V3::IntersectRayWithTriangleWithThisOrigin(V3 r, V3 Vs[3])
{
M33 m; // 3X3 matrix class
m.SetColumn(0, Vs[1] - Vs[0]);
m.SetColumn(1, Vs[2] - Vs[0]);
m.SetColumn(2, r*-1.0f);
V3 ret; // Vector3 class
V3 &C = *this;
ret = m.Inverse() * (C - Vs[0]);
return ret;
}
The basic steps of this are apparent, I just don't see what it's actually doing.
How would I go about optimizing this ray-tracer from here? I've found something online about "kd trees," but I'm unsure how complex they are. Does anyone have some good resources on simple solutions for optimizing this? I've had some difficulty deciphering what's out there.
Thanks!
Probably the largest optimisation by far would be to use some sort of bounding volume hierarchy. Right now the code intersects all rays with all triangles of all objects. With a BVH, we instead ask: "given this ray, which triangles intersect?" This means that for each ray, you generally only need to test for intersection with a handful of primitives and triangles, rather than every single triangle in the scene.
IntersectRayWithTriangleWithThisOrigin
from the look of it
it creates inverse transform matrix from the triangle edges (triangle basis vectors are X,Y)
do not get the Z axis I would expect the ray direction there and not position of pixel (ray origin)
but can be misinterpreting something
anyway the inverse matrix computation is the biggest problem
you are computing it for each triangle per pixel that is a lot
faster would be having computed inverse transform matrix of each triangle before raytracing (once)
where X,Y are the basis and Z is perpendicular to booth of them facing always the same direction to camera
and then just transform your ray into it and check for limits of intersection
that is just matrix*vector and few ifs instead of inverse matrix computation
another way would be to algebraically solve ray vs. plane intersection
that should lead to much simpler equation then matrix inversion
after that is that just a mater of basis vector bound checking

3d projection to 2d screen coordinates

Current Code:
//calculating View Project matrix
abfw::Matrix44 view_proj_matrix_ = camera_.GetMatrix(PROJ) * camera_.GetMatrix(VIEW);
//3D position
abfw::Vector3 ball_position_ = balls_[look_at_index_].GetPosition();
abfw::Vector3 sprite_position;
//transform world to screen ratio in rang -1 to 1
ball_position_ = ball_position_.Transform(view_proj_matrix_);
//move to range 0 - 2
ball_position_.x += 1;
//scale to range 0 -1
ball_position_.x /= 2;
//scale to screen size
ball_position_.x *= platform_.width();
// same as above
ball_position_.y -= 1;
ball_position_.y /= -2;
ball_position_.y *= platform_.height();
//set sprite position
sprite_position = ball_position_;
// z is discounted
sprite_position.z = 0;
I'm getting x and y values that are way out of range of the screen but I'm not seeing why. I have broken the algorithm down as much as possible to try and see where I'm going wrong, but I can't.
Edit: when the camera is at (0,0,7) switching between balls on the same y value give the same sprite position.
GetMatrix() uses this code:
view_matrix_.LookAt(camera_eye_, camera_lookat_, camera_up_);
projection_matrix_ = platform_.PerspectiveProjectionFov(camera_fov_, (float)platform_.width() / (float)platform_.height(), near_plane_, far_plane_);
which is what I'm also using to draw with so I'm assuming it to be right.
using row vectors I believe.
Transform Code:
Vector3 result = Vector3(0.0f, 0.0f, 0.0f);
result.x = x*_mat.m[0][0]+y*_mat.m[1][0]+z*_mat.m[2][0]+_mat.m[3][0];
result.y = x*_mat.m[0][1]+y*_mat.m[1][1]+z*_mat.m[2][1]+_mat.m[3][1];
result.z = x*_mat.m[0][2]+y*_mat.m[1][2]+z*_mat.m[2][2]+_mat.m[3][2];
I have no idea what the namespace abfw stands for, but if it handles vectors and matrices in the same way as DirectX, you have to swap projection and view transformations:
abfw::Matrix44 view_proj_matrix_ = camera_.GetMatrix(VIEW) * camera_.GetMatrix(PROJ);

My shadow volumes don't move with my light

I'm currently trying to implement shadow volumes in my opengl world. Right now I'm just focusing on getting the volumes calculated correctly.
Right now I have a teapot that's rendered, and I can get it to generate some shadow volumes, however they always point directly to the left of the teapot. No matter where I move my light(and I can tell that I'm actually moving the light because the teapot is lit with diffuse lighting), the shadow volumes always go straight left.
The method I'm using to create the volumes is:
1. Find silhouette edges by looking at every triangle in the object. If the triangle isn't lit up(tested with the dot product), then skip it. If it is lit, then check all of its edges. If the edge is currently in the list of silhouette edges, remove it. Otherwise add it.
2. Once I have all the silhouette edges, I go through each edge creating a quad with one vertex at each vertex of the edge, and the other two just extended away from the light.
Here is my code that does it all:
void getSilhoueteEdges(Model model, vector<Edge> &edges, Vector3f lightPos) {
//for every triangle
// if triangle is not facing the light then skip
// for every edge
// if edge is already in the list
// remove
// else
// add
vector<Face> faces = model.faces;
//for every triangle
for ( unsigned int i = 0; i < faces.size(); i++ ) {
Face currentFace = faces.at(i);
//if triangle is not facing the light
//for this i'll just use the normal of any vertex, it should be the same for all of them
Vector3f v1 = model.vertices[currentFace.vertices[0] - 1];
Vector3f n1 = model.normals[currentFace.normals[0] - 1];
Vector3f dirToLight = lightPos - v1;
dirToLight.normalize();
float dot = n1.dot(dirToLight);
if ( dot <= 0.0f )
continue; //then skip
//lets get the edges
//v1,v2; v2,v3; v3,v1
Vector3f v2 = model.vertices[currentFace.vertices[1] - 1];
Vector3f v3 = model.vertices[currentFace.vertices[2] - 1];
Edge e[3];
e[0] = Edge(v1, v2);
e[1] = Edge(v2, v3);
e[2] = Edge(v3, v1);
//for every edge
//triangles only have 3 edges so loop 3 times
for ( int j = 0; j < 3; j++ ) {
if ( edges.size() == 0 ) {
edges.push_back(e[j]);
continue;
}
bool wasRemoved = false;
//if edge is in the list
for ( unsigned int k = 0; k < edges.size(); k++ ) {
Edge tempEdge = edges.at(k);
if ( tempEdge == e[j] ) {
edges.erase(edges.begin() + k);
wasRemoved = true;
break;
}
}
if ( ! wasRemoved )
edges.push_back(e[j]);
}
}
}
void extendEdges(vector<Edge> edges, Vector3f lightPos, GLBatch &batch) {
float extrudeSize = 100.0f;
batch.Begin(GL_QUADS, edges.size() * 4);
for ( unsigned int i = 0; i < edges.size(); i++ ) {
Edge edge = edges.at(i);
batch.Vertex3f(edge.v1.x, edge.v1.y, edge.v1.z);
batch.Vertex3f(edge.v2.x, edge.v2.y, edge.v2.z);
Vector3f temp = edge.v2 + (( edge.v2 - lightPos ) * extrudeSize);
batch.Vertex3f(temp.x, temp.y, temp.z);
temp = edge.v1 + ((edge.v1 - lightPos) * extrudeSize);
batch.Vertex3f(temp.x, temp.y, temp.z);
}
batch.End();
}
void createShadowVolumesLM(Vector3f lightPos, Model model) {
getSilhoueteEdges(model, silhoueteEdges, lightPos);
extendEdges(silhoueteEdges, lightPos, boxShadow);
}
I have my light defined as and the main shadow volume generation method is called by:
Vector3f vLightPos = Vector3f(-5.0f,0.0f,2.0f);
createShadowVolumesLM(vLightPos, boxModel);
All of my code seems self documented in places I don't have any comments, but if there are any confusing parts, let me know.
I have a feeling it's just a simple mistake I over looked. Here is what it looks like with and without the shadow volumes being rendered.
It would seem you aren't transforming the shadow volumes. You either need to set the model view matrix on them so they get transformed the same as the rest of the geometry. Or you need to transform all the vertices (by hand) into view space and then do the silhouetting and transformation in view space.
Obviously the first method will use less CPU time and would be, IMO, preferrable.

OpenGL skeleton animation

I am trying to add animation to my program.
I have human model created in Blender with skeletal animation, and I can skip through the keyframes to see the model walking.
Now I've exported the model to an XML (Ogre3D) format, and in this XML file I can see the rotation, translation and scale assigned to each bone at a specific time (t=0.00000, t=0.00040, ... etc.)
What I've done is found which vertices are assigned each bone. Now I'm assuming all I need to do is apply the transformations defined for the bone to each one of these vertices. Is this the correct approach?
In my OpenGL draw() function (rough pseudo-code):
for (Bone b : bones){
gl.glLoadIdentity();
List<Vertex> v= b.getVertices();
rotation = b.getRotation();
translation = b.getTranslation();
scale = b.getScale();
gl.glTranslatef(translation);
gl.glRotatef(rotation);
gl.glScalef(scale);
gl.glDrawElements(v);
}
Vertices are usually affected by more than one bone -- it sounds like you're after linear blend skinning. My code's in C++ unfortunately, but hopefully it'll give you the idea:
void Submesh::skin(const Skeleton_CPtr& skeleton)
{
/*
Linear Blend Skinning Algorithm:
P = (\sum_i w_i * M_i * M_{0,i}^{-1}) * P_0 / (sum i w_i)
Each M_{0,i}^{-1} matrix gets P_0 (the rest vertex) into its corresponding bone's coordinate frame.
We construct matrices M_n * M_{0,n}^-1 for each n in advance to avoid repeating calculations.
I refer to these in the code as the 'skinning matrices'.
*/
BoneHierarchy_CPtr boneHierarchy = skeleton->bone_hierarchy();
ConfiguredPose_CPtr pose = skeleton->get_pose();
int boneCount = boneHierarchy->bone_count();
// Construct the skinning matrices.
std::vector<RBTMatrix_CPtr> skinningMatrices(boneCount);
for(int i=0; i<boneCount; ++i)
{
skinningMatrices[i] = pose->bones(i)->absolute_matrix() * skeleton->to_bone_matrix(i);
}
// Build the vertex array.
RBTMatrix_Ptr m = RBTMatrix::zeros(); // used as an accumulator for \sum_i w_i * M_i * M_{0,i}^{-1}
int vertCount = static_cast<int>(m_vertices.size());
for(int i=0, offset=0; i<vertCount; ++i, offset+=3)
{
const Vector3d& p0 = m_vertices[i].position();
const std::vector<BoneWeight>& boneWeights = m_vertices[i].bone_weights();
int boneWeightCount = static_cast<int>(boneWeights.size());
Vector3d p;
if(boneWeightCount != 0)
{
double boneWeightSum = 0;
for(int j=0; j<boneWeightCount; ++j)
{
int boneIndex = boneWeights[j].bone_index();
double boneWeight = boneWeights[j].weight();
boneWeightSum += boneWeight;
m->add_scaled(skinningMatrices[boneIndex], boneWeight);
}
// Note: This is effectively p = m*p0 (if we think of p0 as (p0.x, p0.y, p0.z, 1)).
p = m->apply_to_point(p0);
p /= boneWeightSum;
// Reset the accumulator matrix ready for the next vertex.
m->reset_to_zeros();
}
else
{
// If this vertex is unaffected by the armature (i.e. no bone weights have been assigned to it),
// use its rest position as its real position (it's the best we can do).
p = p0;
}
m_vertArray[offset] = p.x;
m_vertArray[offset+1] = p.y;
m_vertArray[offset+2] = p.z;
}
}
void Submesh::render() const
{
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT);
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_DOUBLE, 0, &m_vertArray[0]);
if(m_material->uses_texcoords())
{
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(2, GL_DOUBLE, 0, &m_texCoordArray[0]);
}
m_material->apply();
glDrawElements(GL_TRIANGLES, static_cast<GLsizei>(m_vertIndices.size()), GL_UNSIGNED_INT, &m_vertIndices[0]);
glPopAttrib();
glPopClientAttrib();
}
Note in passing that real-world implementations usually do this sort of thing on the GPU to the best of my knowledge.
Your code assumes that each bone has an independent transformation matrix (you reset your matrix at the start of each loop iteration). But in reality, bones form a hierarchical structure that you must preserve when you do your rendering. Consider that when your upper arm rotates your forearm rotates along, because it is attached. The forearm may have its own rotation, but that is applied after it is rotated with the upper arm.
The rendering of the skeleton then is done recursively. Here is some pseudo-code:
function renderBone(Bone b) {
setupTransformMatrix(b);
draw(b);
foreach c in b.getChildren()
renderBone(c);
}
main() {
gl.glLoadIdentity();
renderBone(rootBone);
}
I hope this helps.