screen shoot:http://1drv.ms/1C9fgyl
pls note that the normal buffer is very strange. I use method in this link: http://www.terathon.com/code/tangent.html
the fabric tangent is very different between left half side and right half side.
I don't know what happen.
I test another sponza scene and put them in other guys who use same method to calc TBN and got same issue.
what's the bug in my algrithom?
bool OBJModel_Loader::BuildTangent(std::vector<OBJModel* >& Objects)
{
for (OBJModel* actor : Objects)
{
PracEngLogf("BuildTangent For Model: %s! \n", actor->Name.c_str());
int cnt = actor->VertexBuffer.size();
PEVector3* tan1 = new PEVector3[cnt * 2];
PEVector3* tan2 = tan1 + cnt;
ZeroMemory(tan1, cnt*sizeof(PEVector3) * 2);
for (size_t i = 0; i < actor->FaceInfos.size(); i+=3)
{
int Idx1 = actor->FaceInfos[i].VertexIdx;
int Idx2 = actor->FaceInfos[i + 1].VertexIdx;
int Idx3 = actor->FaceInfos[i + 2].VertexIdx;
const PEFloat3& v1 = actor->VertexBuffer[Idx1];
const PEFloat3& v2 = actor->VertexBuffer[Idx2];
const PEFloat3& v3 = actor->VertexBuffer[Idx3];
const PEFloat2& w1 = actor->UVs[actor->FaceInfos[i].UVIdx];
const PEFloat2& w2 = actor->UVs[actor->FaceInfos[i+1].UVIdx];
const PEFloat2& w3 = actor->UVs[actor->FaceInfos[i+2].UVIdx];
PEVector3 E1(v2.X - v1.X, v2.Y - v1.Y, v2.Z - v1.Z);
PEVector3 E2(v3.X - v1.X, v3.Y - v1.Y, v3.Z - v1.Z);
PEFloat2 St1 ( w2.X - w1.X, w2.Y - w1.Y );
PEFloat2 St2 ( w3.X - w1.X, w3.Y - w1.Y );
PEVector3 sDir;
PEVector3 tDir;
float r = St1.X*St2.Y - St2.X*St1.Y;
if (fabs(r) < 1e-6f)
{
sDir = PEVector3(1.0f, .0f, .0f);
tDir = PEVector3(.0f, 1.0f, .0f);
}
else
{
r = 1.0f / r;
sDir = PEVector3((St2.Y*E1.x - St1.Y*E2.x)*r, (St2.Y*E1.y - St1.Y*E2.y)*r, (St2.Y*E1.z - St1.Y*E2.z)*r);
tDir = PEVector3((St1.X*E2.x - St2.X*E1.x)*r, (St1.X*E2.y - St2.X*E1.y)*r, (St1.X*E2.z - St2.X*E1.z)*r);
}
tan1[Idx1] += sDir;
tan1[Idx2] += sDir;
tan1[Idx3] += sDir;
tan2[Idx1] += tDir;
tan2[Idx2] += tDir;
tan2[Idx3] += tDir;
}
for (size_t i = 0; i < actor->FaceInfos.size(); i++)
{
int Idx = actor->FaceInfos[i].VertexIdx;
const PEVector3& N = actor->VertexNormal[actor->FaceInfos[i].NormalIdx];
const PEVector3& T = tan1[Idx];
PEVector3 adjTangent = T - N * PEVector3::Dot(N, T);
adjTangent.Normalize();
actor->VertexTangent[Idx] = adjTangent;
actor->VertexTangent[Idx].w = PEVector3::Dot(N.Cross(T), tan2[Idx]) < .0f ? -1.0f : 1.0f;
}
delete[] tan1;
}
return true;
}
Related
So i'm making a raytracer in OpenGL, fully shader based, and i'm struggling to know where the problem is with my Shadow rays. If i multiply the radiance of the object by the shadowRay output, it seems like only the "edge" of the sphere is lighten up
I verified multiple times the code without finding where the problem comes from.
This is what i get:
vec3 TraceShadowRay(vec3 hitPoint, vec3 normal, Object objects[3])
{
Light pointLight;
pointLight.position = vec3(0, 80, 0);
pointLight.intensity = 2;
Ray ShadowRay;
ShadowRay.origin = hitPoint + normal * 1e-4;
ShadowRay.dir = normalize(pointLight.position - ShadowRay.origin);
ShadowRay.t = 100000;
//ShadowRay.dir = vec3(0, 1, 0);
for(int i = 0; i < 3; ++i)
{
if(objects[i].type == 0)
{
if(interectSphere(objects[i].position, objects[i].radius, ShadowRay))
{
return vec3(0);
}
}
if(objects[i].type == 1)
{
if(intersectPlane(objects[i].normal, objects[i].position, ShadowRay))
{
return vec3(0);
}
}
}
float AngleNormalShadow = dot(ShadowRay.dir, normal);
clamp(AngleNormalShadow, 0, 1);
return GetLight(ShadowRay.origin, pointLight);// * AngleNormalShadow;
}
The getLight function:
vec3 GetLight(vec3 origin, Light light)
{
return vec3(1, 1, 1) * light.intensity;
//float dist = sqrt( ((origin.x - light.position.x) * (origin.x - light.position.x)) + ((origin.y - light.position.y) * (origin.y - light.position.y)));
//return (vec3(1, 1, 1) * light.intensity) / (4 * M_PI * ((origin - light.position).length * (origin - light.position).length));
}
The intersectSphere function:
bool interectSphere(const vec3 center, float radius, inout Ray r)
{
vec3 o = r.origin;
vec3 d = r.dir;
vec3 v = o - center;
float b = 2 * dot(v, d);
float c = dot(v, v) - radius*radius;
float delta = b*b - 4 * c;
if(delta < 1e-4)
return false;
float t1 = (-b - sqrt(delta))/2;
float t2 = (-b + sqrt(delta))/2;
if(t1 < t2)
{
r.t = t1;
r.t2 = t2;
}
else if(t2 < t1)
{
r.t = t2;
r.t2 = t1;
}
r.reflectionNormal = normalize((r.origin + r.dir * r.t) - center);
return true;
}
The result expected is a nice shaded sphere with light coming from the top of the spheres
Could it be a missing negation? Looks like interectSphere() returns true when there is a collision, but the calling code in TraceShadowRay() bails out when it returns true.
old:
if(interectSphere(objects[i].position, objects[i].radius, ShadowRay))
{
return vec3(0);
}
new:
if(!interectSphere(objects[i].position, objects[i].radius, ShadowRay))
{
return vec3(0);
}
Im having an issue that I cannot seem to resolve.
Im importing obj files with TinyObjLoader, and copying vertex and index buffers to GPU memory to draw the model. There are only triangles in this model. No 4+ sided polygons, and no negative indices
The only problem is I cannot draw the full model, only 1/2 to 2/3 of the vertices are drawn, with correct vertices, correct normal, and correct textures.
I split the code in two methods with #defines to illustrate this issue.
Note, there is a heavy amount of trial and error that went into figuring this out to make it work, as there is a scarcity of tutorials and education online on DirectX11. If you see any issues with the way the code is structured please feel free to comment.
Method 1 is the failing code. Here I make duplicates of the vertices in order to have different normals. This is essential in my model because each surface needs a different shading. Note:I understand I can make this rendering more optimized with std::unordered_map for some duplicate vertices where normals point in the same direction.
#ifdef DUPVERTICES
std::vector<float> vertex_buffer;
std::vector<uint32_t> index_buffer;
struct T_Vertex
{
float vX;
float vY;
float vZ;
float nX;
float nY;
float nZ;
float tX;
float tY;
uint32_t vXIndex;
uint32_t vYIndex;
uint32_t vZIndex;
uint32_t nXIndex;
uint32_t nYIndex;
uint32_t nZIndex;
uint32_t tXIndex;
uint32_t tYIndex;
};
std::vector<T_Vertex> temp_vertices;
size_t index_offset = 0;
for (size_t f = 0; f < shapes[0].mesh.num_face_vertices.size(); f++) {
int fv = shapes[0].mesh.num_face_vertices[f];
// Loop over vertices in the face.
for (size_t v = 0; v < fv; v++) {
// access to vertex
tinyobj::index_t idx = shapes[0].mesh.indices[index_offset + v];
if (idx.vertex_index < 0 || idx.normal_index < 0 || idx.texcoord_index < 0)
continue;
T_Vertex temp_vertex;
temp_vertex.vX = idx.normal_index > 0 ? attrib.vertices[3 * idx.vertex_index + 0] : 0;
temp_vertex.vY = idx.normal_index > 0 ? attrib.vertices[3 * idx.vertex_index + 1] : 0;
temp_vertex.vZ = idx.normal_index > 0 ? attrib.vertices[3 * idx.vertex_index + 2] : 0;
temp_vertex.nX = idx.normal_index > 0 ? attrib.normals[3 * idx.normal_index + 0] : 0;
temp_vertex.nY = idx.normal_index > 0 ? attrib.normals[3 * idx.normal_index + 1] : 0;
temp_vertex.nZ = idx.normal_index > 0 ? attrib.normals[3 * idx.normal_index + 2] : 0;
temp_vertex.tX = idx.texcoord_index > 0 ? attrib.texcoords[2 * idx.texcoord_index + 0] : 0;
temp_vertex.tY = idx.texcoord_index > 0 ? attrib.texcoords[2 * idx.texcoord_index + 1] : 0;
temp_vertex.vXIndex = 3 * idx.vertex_index + 0;
temp_vertex.vYIndex = 3 * idx.vertex_index + 1;
temp_vertex.vZIndex = 3 * idx.vertex_index + 2;
temp_vertex.nXIndex = 3 * idx.normal_index + 0;
temp_vertex.nYIndex = 3 * idx.normal_index + 1;
temp_vertex.nZIndex = 3 * idx.normal_index + 2;
temp_vertex.tXIndex = 2 * idx.texcoord_index + 0;
temp_vertex.tYIndex = 2 * idx.texcoord_index + 1;
temp_vertices.push_back(temp_vertex);
}
index_offset += fv;
}
for (auto& temp_vertex : temp_vertices)
{
vertex_buffer.push_back(temp_vertex.vX);
vertex_buffer.push_back(temp_vertex.vY);
vertex_buffer.push_back(temp_vertex.vZ);
vertex_buffer.push_back(temp_vertex.nX);
vertex_buffer.push_back(temp_vertex.nY);
vertex_buffer.push_back(temp_vertex.nZ);
vertex_buffer.push_back(temp_vertex.tX); //Set to 0 for no texture
vertex_buffer.push_back(temp_vertex.tY); //Set to 0 for no texture
vertex_buffer.push_back(0.0F);
index_buffer.push_back(temp_vertex.vXIndex);
index_buffer.push_back(temp_vertex.vYIndex);
index_buffer.push_back(temp_vertex.vZIndex);
index_buffer.push_back(temp_vertex.nXIndex);
index_buffer.push_back(temp_vertex.nYIndex);
index_buffer.push_back(temp_vertex.nZIndex);
index_buffer.push_back(0);
index_buffer.push_back(0);
index_buffer.push_back(0);
}
Method 2 works (minus textures) but there are no duplicate vertices, and therefore the normal orientation is not good for rendering. However, in this configuration, every single vertex is drawn in correct place. Notice the stride and vertex and index buffer structures are the same in both codes, its the size of the buffers that differs.
#else
std::vector<float> vertex_buffer;
for (int i = 0, j = 0; i < attrib.vertices.size(); i += 3, j += 2)
{
vertex_buffer.push_back(attrib.vertices[i + 0]);
vertex_buffer.push_back(attrib.vertices[i + 1]);
vertex_buffer.push_back(attrib.vertices[i + 2]);
vertex_buffer.push_back(attrib.normals[i + 0]);
vertex_buffer.push_back(attrib.normals[i + 1]);
vertex_buffer.push_back(attrib.normals[i + 2]);
vertex_buffer.push_back(0);//attrib.texcoords[j + 0]);
vertex_buffer.push_back(0);//attrib.texcoords[j + 1]);
vertex_buffer.push_back(0.0F);
}
std::vector<UINT> index_buffer;
for (int i = 0, j = 0; i < shapes[0].mesh.indices.size(); i += 3, j += 2)
{
index_buffer.push_back(shapes[0].mesh.indices[i + 0].vertex_index);
index_buffer.push_back(shapes[0].mesh.indices[i + 1].vertex_index);
index_buffer.push_back(shapes[0].mesh.indices[i + 2].vertex_index);
index_buffer.push_back(shapes[0].mesh.indices[i + 0].normal_index);
index_buffer.push_back(shapes[0].mesh.indices[i + 1].normal_index);
index_buffer.push_back(shapes[0].mesh.indices[i + 2].normal_index);
index_buffer.push_back(0);
index_buffer.push_back(0);
index_buffer.push_back(0);
}
uint32_t vertexes_size = vertex_buffer.size() * sizeof(float);
uint32_t indexes_size = index_buffer.size() * sizeof(uint32_t);
int stride_bytes = 36;
#endif
This is where I create buffers, its the same code for both methods
//Set Vertex Buffer Array
g_Mesh11.m_pMeshArray = new SDKMESH_MESH;
g_Mesh11.m_pVertexBufferArray = new SDKMESH_VERTEX_BUFFER_HEADER;
int t = g_Mesh11.m_pMeshArray[0].VertexBuffers[0];
g_Mesh11.m_pMeshArray[0].VertexBuffers[0] = 0;
D3D11_BUFFER_DESC vertex_buf_desc;
vertex_buf_desc.ByteWidth = vertexes_size;
vertex_buf_desc.Usage = D3D11_USAGE_DEFAULT;
vertex_buf_desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertex_buf_desc.CPUAccessFlags = 0;
vertex_buf_desc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA init_vertex_data;
init_vertex_data.pSysMem = &vertex_buffer[0];
dxCtr->m_pDevice->CreateBuffer(&vertex_buf_desc, &init_vertex_data, &g_Mesh11.m_pVertexBufferArray[g_Mesh11.m_pMeshArray[0].VertexBuffers[0]].pVB11);
g_Mesh11.m_pVertexBufferArray[g_Mesh11.m_pMeshArray[0].VertexBuffers[0]].StrideBytes = stride_bytes;
g_Mesh11.m_pVertexBufferArray[g_Mesh11.m_pMeshArray[0].VertexBuffers[0]].SizeBytes = vertexes_size;
//Set Index Buffer array
g_Mesh11.m_pMeshArray[0].IndexBuffer = 0;
g_Mesh11.m_pIndexBufferArray = new SDKMESH_INDEX_BUFFER_HEADER;
g_Mesh11.m_pIndexBufferArray[g_Mesh11.m_pMeshArray[0].IndexBuffer].IndexType = IT_32BIT;
D3D11_BUFFER_DESC index_buf_desc;
index_buf_desc.ByteWidth = indexes_size;
index_buf_desc.Usage = D3D11_USAGE_DEFAULT;
index_buf_desc.BindFlags = D3D11_BIND_INDEX_BUFFER;
index_buf_desc.CPUAccessFlags = 0;
index_buf_desc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA init_index_data;
init_index_data.pSysMem = &index_buffer[0];
dxCtr->m_pDevice->CreateBuffer(&index_buf_desc, &init_index_data, &g_Mesh11.m_pIndexBufferArray[g_Mesh11.m_pMeshArray[0].IndexBuffer].pIB11);
g_Mesh11.m_pIndexBufferArray[g_Mesh11.m_pMeshArray[0].IndexBuffer].SizeBytes = indexes_size;
//Set subset
SDKMESH_SUBSET v_subset;
v_subset.MaterialID = 0;
v_subset.PrimitiveType = PT_TRIANGLE_LIST;
v_subset.IndexCount = index_buffer.size();
v_subset.VertexCount = vertex_buffer.size();
v_subset.VertexStart = 0;
v_subset.IndexStart = 0;
g_Mesh11.m_pMeshArray[0].pSubsets = new uint32_t;
g_Mesh11.m_pMeshArray[0].pSubsets[0] = 0;
g_Mesh11.m_pMeshArray[0].NumSubsets = 1;
g_Mesh11.m_pSubsetArray = new SDKMESH_SUBSET;
g_Mesh11.m_pSubsetArray[g_Mesh11.m_pMeshArray[0].pSubsets[0]] = v_subset;
Additional information:
I tried with lower poly count models and the issue is still there, so I am not hitting some limit of vertices.
Here are the sizes of the arrays
attrib.vertices.size = 150201
attrib.normals.size = 173712
attrib.normals.size = 135956
shapes[0].mesh.indices.size() = 300978
In Method 1 in the above example
temp_vertices.size() = 300978 (matches index size, so I'm not missing any vertices in temp_vertices)
vertex_buffer.size() = index_buffer.size() = 2708802
In Method 2 in the above example
vertex_buffer.size() = index_buffer.size() = 450603
Here is the input layout
// Create our vertex input layout
const D3D11_INPUT_ELEMENT_DESC layout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "NORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 24, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
Here is the Vertex Shader
cbuffer cbPerObject : register( b0 )
{
matrix g_mWorldViewProjection : packoffset( c0 );
matrix g_mWorld : packoffset( c4 );
};
//--------------------------------------------------------------------------------------
// Input / Output structures
//--------------------------------------------------------------------------------------
struct VS_INPUT
{
float4 vPosition : POSITION;
float3 vNormal : NORMAL;
float2 vTexcoord : TEXCOORD0;
};
struct VS_OUTPUT
{
float3 vNormal : NORMAL;
float2 vTexcoord : TEXCOORD0;
float4 vPosition : SV_POSITION;
};
//--------------------------------------------------------------------------------------
// Vertex Shader
//--------------------------------------------------------------------------------------
VS_OUTPUT VSMain( VS_INPUT Input )
{
VS_OUTPUT Output;
Output.vPosition = mul( Input.vPosition, g_mWorldViewProjection );
Output.vNormal = mul( Input.vNormal, (float3x3)g_mWorld );
Output.vTexcoord = Input.vTexcoord;
return Output;
}
I realize there are inconsistencies between the input layout, the shader, and my vertex_buffer vector. Mainly, the input layout is 32 bytes, my buffer is 36 bytes with 3 bytes for texcoord, and the shader is 36 bytes, but the position is 4 bytes, and texcoords are 2 bytes.
The shader and initialization was from DXUT and I did not mess with those. It draws the supplied Tiny.sdkmesh model correctly, which incidentally has a stride of 32 bytes, matching that of input layout.
If somebody can help explain why the shader VS_INPUT differs from the input layout, it would greatly help as well.
Changing the vertex shader to math input layout resulted in compilation errors. Changing input layout to add an extra byte to texcoord did not really make a difference.
Note: I tried removing the vertex_buffer.push_back(0.0F); and index_buffer.push_back(0); from the end and changing stride_bytes to 32, but it was no longer drawing vertices correctly.
I've ran out of trial and error methods to test and looking for help figuring out what I'm doing wrong.
Thank you,
After dissecting the working tiny.sdkmesh file, I found what I was doing wrong.
The index is only 1 entry/byte per vertex (which is 32 bytes)
I tried it before, but not successfully, here is the working code:
Still looking for some explanation to help understand why the Input_Layout differs from Vertex Shader in number of bytes
std::vector<float> vertex_buffer;
std::vector<uint32_t> index_buffer;
struct T_Vertex
{
float vX;
float vY;
float vZ;
float nX;
float nY;
float nZ;
float tX;
float tY;
uint32_t vIndex;
};
std::vector<T_Vertex> temp_vertices;
size_t index_offset = 0;
int ind = 0;
for (size_t f = 0; f < shapes[0].mesh.num_face_vertices.size(); f++) {
int fv = shapes[0].mesh.num_face_vertices[f];
// Loop over vertices in the face.
for (size_t v = 0; v < fv; v++) {
// access to vertex
tinyobj::index_t idx = shapes[0].mesh.indices[index_offset + v];
if (idx.vertex_index < 0 || idx.normal_index < 0 || idx.texcoord_index < 0)
continue;
T_Vertex temp_vertex;
temp_vertex.vX = idx.normal_index > 0 ? attrib.vertices[3 * idx.vertex_index + 0] : 0;
temp_vertex.vY = idx.normal_index > 0 ? attrib.vertices[3 * idx.vertex_index + 1] : 0;
temp_vertex.vZ = idx.normal_index > 0 ? attrib.vertices[3 * idx.vertex_index + 2] : 0;
temp_vertex.nX = idx.normal_index > 0 ? attrib.normals[3 * idx.normal_index + 0] : 0;
temp_vertex.nY = idx.normal_index > 0 ? attrib.normals[3 * idx.normal_index + 1] : 0;
temp_vertex.nZ = idx.normal_index > 0 ? attrib.normals[3 * idx.normal_index + 2] : 0;
temp_vertex.tX = idx.texcoord_index > 0 ? attrib.texcoords[2 * idx.texcoord_index + 0] : 0;
temp_vertex.tY = idx.texcoord_index > 0 ? attrib.texcoords[2 * idx.texcoord_index + 1] : 0;
temp_vertices.push_back(temp_vertex);
temp_vertex.vIndex = ++ind;
}
index_offset += fv;
}
for (auto& temp_vertex : temp_vertices)
{
vertex_buffer.push_back(temp_vertex.vX);
vertex_buffer.push_back(temp_vertex.vY);
vertex_buffer.push_back(temp_vertex.vZ);
vertex_buffer.push_back(temp_vertex.nX);
vertex_buffer.push_back(temp_vertex.nY);
vertex_buffer.push_back(temp_vertex.nZ);
vertex_buffer.push_back(temp_vertex.tX); //Set to 0 for no texture
vertex_buffer.push_back(temp_vertex.tY); //Set to 0 for no texture
index_buffer.push_back(temp_vertex.vIndex);
}
uint32_t vertexes_size = vertex_buffer.size() * sizeof(float);
uint32_t indexes_size = index_buffer.size() * sizeof(uint32_t);
int stride_bytes = 32;
I currently have a OpenGL sprite drawing class that buffers up a bunch of sprite data then dumps it with glDrawElements. The problem is, creating the sprites that go into the buffer is cumbersome as I have loads of parameters to pass into the buffer with even more redundancy for the shaders. I was wondering if I could reduce CPU load by only loading the buffer with the essentials, location, orientation, texture coordinates etc... and then let a geometry shader turn that nonsense into quads for the fragment shader.
If theres a different answer, I've added the offending method so you can see what I mean:
void Machine::draw(key num, BoundingBox loc, float angle){
SpriteSyncData* props;
VertexAttribArray* vdata;
GLushort* idata;
SpriteProperties* sprite_props;
int sliceW;
int sliceH;
sprite_props = &spriteList[num];
props = &spriteToSync[sprite_props->atlas];
props->size++;
if(props->size > props->capacity){
props->capacity += COARSE_MEM_SCALE;
props->data = (VertexAttribArray*) realloc((void*) props->data, (sizeof(VertexAttribArray)*4) * props->capacity);
props->i_data = (GLushort*) realloc((void*) props->i_data, (sizeof(GLushort)*4) * props->capacity);
}
vdata = props->data + (props->size - 1) * 4;
idata = props->i_data + (props->size - 1) * 4;
sliceW = sprite_props->location.x1 - sprite_props->location.x0;
sliceH = sprite_props->location.y1 - sprite_props->location.y0;
if(sprite_props->flags & DRAW_TILED){
vdata[0].p = QVector3D(loc.x1, loc.y0, UNIFORM_DEPTH);
vdata[1].p = QVector3D(loc.x0, loc.y0, UNIFORM_DEPTH);
vdata[2].p = QVector3D(loc.x0, loc.y1, UNIFORM_DEPTH);
vdata[3].p = QVector3D(loc.x1, loc.y1, UNIFORM_DEPTH);
vdata[0].s = QVector2D(((float) (loc.x1 - loc.x0)) / sliceW,
((float) (loc.y1 - loc.y0)) / sliceH);
vdata[0].r = QVector2D(0, 0);
vdata[1].r = vdata[0].r;
vdata[2].r = vdata[0].r;
vdata[3].r = vdata[0].r;
}
else{
vdata[0].p = QVector3D(loc.x0 + sliceW, loc.y0, UNIFORM_DEPTH);
vdata[1].p = QVector3D(loc.x0, loc.y0, UNIFORM_DEPTH);
vdata[2].p = QVector3D(loc.x0, loc.y0 + sliceH, UNIFORM_DEPTH);
vdata[3].p = QVector3D(loc.x0 + sliceW, loc.y0 + sliceH, UNIFORM_DEPTH);
vdata[0].s = QVector2D(1, 1);
vdata[0].r = QVector2D(sliceW, sliceH);
vdata[1].r = vdata[0].r;
vdata[2].r = vdata[0].r;
vdata[3].r = vdata[0].r;
}
vdata[0].t = QVector2D(sprite_props->texCoords[2], sprite_props->texCoords[1]);
vdata[1].t = QVector2D(sprite_props->texCoords[0], sprite_props->texCoords[1]);
vdata[2].t = QVector2D(sprite_props->texCoords[0], sprite_props->texCoords[3]);
vdata[3].t = QVector2D(sprite_props->texCoords[2], sprite_props->texCoords[3]);
vdata[1].s = vdata[0].s;
vdata[2].s = vdata[0].s;
vdata[3].s = vdata[0].s;
vdata[0].s_lo = QVector2D(sprite_props->texCoords[0], sprite_props->texCoords[1]);
vdata[0].s_hi = QVector2D(sprite_props->texCoords[2] - sprite_props->texCoords[0],
sprite_props->texCoords[3] - sprite_props->texCoords[1]);
vdata[1].s_lo = vdata[0].s_lo;
vdata[1].s_hi = vdata[0].s_hi;
vdata[2].s_lo = vdata[0].s_lo;
vdata[2].s_hi = vdata[0].s_hi;
vdata[3].s_lo = vdata[0].s_lo;
vdata[3].s_hi = vdata[0].s_hi;
vdata[0].o = (vdata[1].p + vdata[3].p) * 0.5;
vdata[1].o = vdata[0].o;
vdata[2].o = vdata[0].o;
vdata[3].o = vdata[0].o;
vdata[0].a = angle;
vdata[1].a = angle;
vdata[2].a = angle;
vdata[3].a = angle;
idata[0] = (props->size - 1) * 4;
idata[1] = idata[0] + 1;
idata[2] = idata[0] + 2;
idata[3] = idata[0] + 3;
}
I wrote a little program to test how projected shadows work.
I wanted to check in particular the case where the point to project (it could be the vertex of a triangle) is not situated between the light source and the plane but behind the light itself, that is the light is between the point and the plane.
The problem is that my little program is not even working in the case where the point is between the light and plane. I checked the calculations tens of times, so I guess the error should be logic, but I cant find it..
Here the code
public class test {
int x = 0;
int y = 1;
int z = 2;
int w = 3;
float floor[][] = {
{-100.0f, -100.0f, 0.0f},
{100.0f, -100.0f, 0.0f},
{100.0f, 100.0f, 0.0f},
{-100.0f, 100.0f, 0.0f}};
private float shadow_floor[] = new float[16];
float light_position[] = {0.0f, 0.0f, 10.0f, 1.0f};
public test() {
//Find floorplane based on thre known points
float plane_floor[] = calculatePlane(floor[1], floor[2], floor[3]);
//store shadowMatrix for floor
shadow_floor = shadowMatrix(plane_floor, light_position);
float[] point = new float[]{1.0f, 0.0f, 5.0f, 1.0f};
float[] projectedPoint = pointFmatrixF(point, shadow_floor);
System.out.println("point: (" + point[x] + ", " + point[y] + ", " + point[z] + ", "
+ point[w] + ")");
System.out.println("projectedPoint: (" + projectedPoint[x] + ", " + projectedPoint[y]
+ ", " + projectedPoint[z] + ", " + projectedPoint[w] + ")");
}
public static void main(String args[]) {
test test = new test();
}
// make shadow matrix
public float[] shadowMatrix(float plane[], float light_pos[]) {
float shadow_mat[] = new float[16];
float dot;
dot = plane[x] * light_pos[x] + plane[y] * light_pos[y]
+ plane[z] * light_pos[z] + plane[w] * light_pos[w];
shadow_mat[0] = dot - light_pos[x] * plane[x];
shadow_mat[4] = -light_pos[x] * plane[y];
shadow_mat[8] = -light_pos[x] * plane[z];
shadow_mat[12] = -light_pos[x] * plane[3];
shadow_mat[1] = -light_pos[y] * plane[x];
shadow_mat[5] = dot - light_pos[y] * plane[y];
shadow_mat[9] = -light_pos[y] * plane[z];
shadow_mat[13] = -light_pos[y] * plane[w];
shadow_mat[2] = -light_pos[z] * plane[x];
shadow_mat[6] = -light_pos[z] * plane[y];
shadow_mat[10] = dot - light_pos[z] * plane[z];
shadow_mat[14] = -light_pos[z] * plane[w];
shadow_mat[3] = -light_pos[w] * plane[x];
shadow_mat[7] = -light_pos[w] * plane[y];
shadow_mat[11] = -light_pos[w] * plane[z];
shadow_mat[15] = dot - light_pos[w] * plane[w];
return shadow_mat;
}
public float[] calculatePlane(float p1[], float p2[], float p3[]) {
//Array for planlikningen
float plane[] = new float[4];
//Gitt to vektorer (tre punkter) i planet kan normalen regnes ut
//Vi vil ha aboluttverdier
plane[x] = Math.abs(((p2[y] - p1[y]) * (p3[z] - p1[z])) - ((p2[z] - p1[z])
* (p3[y] - p1[y])));
plane[y] = Math.abs(((p2[z] - p1[z]) * (p3[x] - p1[x])) - ((p2[x] - p1[x])
* (p3[z] - p1[z])));
plane[z] = Math.abs(((p2[x] - p1[x]) * (p3[y] - p1[y])) - ((p2[y] - p1[y])
* (p3[x] - p1[x])));
plane[w] = -(plane[x] * p1[x] + plane[y] * p1[y] + plane[z] * p1[z]);
return plane;
}
public float[] pointFmatrixF(float[] point, float[] matrix) {
int x = 0;
int y = 1;
int z = 2;
float[] transformedPoint = new float[4];
transformedPoint[x] =
matrix[0] * point[x]
+ matrix[4] * point[y]
+ matrix[8] * point[z]
+ matrix[12];
transformedPoint[y] =
matrix[1] * point[x]
+ matrix[5] * point[y]
+ matrix[9] * point[z]
+ matrix[13];
transformedPoint[z] =
matrix[2] * point[x]
+ matrix[6] * point[y]
+ matrix[10] * point[z]
+ matrix[14];
transformedPoint[w] = 1;
return transformedPoint;
}
}
If the plane is an xy plane, the light is on (0, 0, 10) and the point on (1, 0, 5) then the projected point on the plane should be (2, 0, 0), but the program is returning (400000.0, 0.0, 0.0, 1.0)
Solved, I was assuming incorrectly that the last coordinate of the projected point was 1, but it wasn't.
https://math.stackexchange.com/questions/320527/projecting-a-point-on-a-plane-through-a-matrix
I'm trying to write a program for skeletal animation in DirectX 9, I have used LoadMeshFromHierarchy function to load an animated mesh...now I would like to bypass the animController so that I can dictate the animation by reading keyframes from the animated mesh file(ex. tiny.x) and looping through those keys at will.
Here is what I have so far...at this point I have already parsed the .x file successfully and stored each animation, and animationkey for the sole animation set within a class (Anim). When I run this update function the animated mesh is disfigured, i can't figure out why...I assume it is the process by which I update the transformation matrix for each frame...here is my code:
void cAnimationCollection::Update(DWORD AnimSetIndex, DWORD time)
{
D3DXFRAME_EXTENDED *currentFrame = (D3DXFRAME_EXTENDED*)m_entity->m_frameRoot;
cAnimationSet *AnimSet = m_AnimationSets;
assert(AnimSetIndex <= index);
while(AnimSet != NULL)
{
if(AnimSet->m_index == AnimSetIndex)
{
cAnimation *Anim = AnimSet->m_Animations;
while(Anim != NULL)
{
D3DXMatrixIdentity(&Anim->m_Frame->TransformationMatrix);
if(Anim->m_NumScaleKeys && Anim->m_ScaleKeys)
{
DWORD ScaleKey=0, ScaleKey2=0;
for(DWORD i = 0; i < Anim->m_NumScaleKeys; i++)
{
if(time >= Anim->m_ScaleKeys[i].m_Time)
ScaleKey = i;
}
ScaleKey2 = (ScaleKey>=(Anim->m_NumScaleKeys-1))?ScaleKey:ScaleKey+1;
float TimeDiff = Anim->m_ScaleKeys[ScaleKey2].m_Time - Anim->m_ScaleKeys[ScaleKey].m_Time;
if(!TimeDiff)
TimeDiff = 1;
float Scalar = ((float)time - Anim->m_ScaleKeys[ScaleKey].m_Time) / (float)TimeDiff;
D3DXVECTOR3 vecScale = Anim->m_ScaleKeys[ScaleKey2].m_VecKey - Anim->m_ScaleKeys[ScaleKey].m_VecKey;
vecScale *= Scalar;
vecScale += Anim->m_ScaleKeys[ScaleKey].m_VecKey;
D3DXMATRIX matScale;
D3DXMatrixScaling(&matScale, vecScale.x, vecScale.y, vecScale.z);
Anim->m_Frame->TransformationMatrix *= matScale;
}
if(Anim->m_NumRotationKeys && Anim->m_RotationKeys)
{
DWORD RotKey=0, RotKey2=0;
for(DWORD i = 0; i < Anim->m_NumRotationKeys; i++)
{
if(time >= Anim->m_RotationKeys[i].m_Time)
RotKey = i;
}
RotKey2 = (RotKey>=(Anim->m_NumRotationKeys-1))?RotKey:RotKey+1;
float TimeDiff = Anim->m_RotationKeys[RotKey2].m_Time - Anim->m_RotationKeys[RotKey].m_Time;
if(!TimeDiff)
TimeDiff = 1;
float Scalar = ((float)time - Anim->m_RotationKeys[RotKey].m_Time) / (float)TimeDiff;
D3DXQUATERNION quatRotation;
D3DXQuaternionSlerp(&quatRotation,
&Anim->m_RotationKeys[RotKey].m_QuatKey,
&Anim->m_RotationKeys[RotKey2].m_QuatKey,
Scalar);
D3DXMATRIX matRotation;
D3DXMatrixRotationQuaternion(&matRotation, &quatRotation);
Anim->m_Frame->TransformationMatrix *= matRotation;
}
if(Anim->m_NumTranslationKeys && Anim->m_TranslationKeys)
{
DWORD PosKey=0, PosKey2=0;
for(DWORD i = 0; i < Anim->m_NumTranslationKeys; i++)
{
if(time >= Anim->m_TranslationKeys[i].m_Time)
PosKey = i;
}
PosKey2 = (PosKey>=(Anim->m_NumTranslationKeys-1))?PosKey:PosKey+1;
float TimeDiff = Anim->m_TranslationKeys[PosKey2].m_Time - Anim->m_TranslationKeys[PosKey].m_Time;
if(!TimeDiff)
TimeDiff = 1;
float Scalar = ((float)time - Anim->m_TranslationKeys[PosKey].m_Time) / (float)TimeDiff;
D3DXVECTOR3 vecPos = Anim->m_TranslationKeys[PosKey2].m_VecKey - Anim->m_TranslationKeys[PosKey].m_VecKey;
vecPos *= Scalar;
vecPos += Anim->m_TranslationKeys[PosKey].m_VecKey;;
D3DXMATRIX matTranslation;
D3DXMatrixTranslation(&matTranslation, vecPos.x, vecPos.y, vecPos.z);
Anim->m_Frame->TransformationMatrix *= matTranslation;
}
if(Anim->m_NumMatrixKeys && Anim->m_MatrixKeys)
{
DWORD Key1 = 0, Key2 = 0;
for(DWORD i=0;i<Anim->m_NumMatrixKeys;i++)
{
if(time >= Anim->m_MatrixKeys[i].m_Time)
Key1 = i;
}
Key2 = (Key1>=(Anim->m_NumMatrixKeys-1))?Key1:Key1+1;
float TimeDiff = Anim->m_MatrixKeys[Key2].m_Time - Anim->m_MatrixKeys[Key1].m_Time;
if(!TimeDiff)
TimeDiff = 1;
float Scalar = ((float)time - Anim->m_MatrixKeys[Key1].m_Time) / (float)TimeDiff;
D3DXMATRIX matDiff = Anim->m_MatrixKeys[Key2].m_MatKey - Anim->m_MatrixKeys[Key1].m_MatKey;
matDiff *= Scalar;
matDiff += Anim->m_MatrixKeys[Key1].m_MatKey;
Anim->m_Frame->TransformationMatrix *= matDiff;
}
Anim = Anim->m_Next;
}
}
AnimSet = AnimSet->m_Next;
}
m_entity->UpdateFrameMatrices(m_entity->m_frameRoot, 0);
m_entity->UpdateSkinnedMesh(m_entity->m_frameRoot);
if(AnimSet == NULL)
return;
}
Is my method correct? The first thing I do for each frame is reset the transformation matrix to identity, then I calculate an interpolated value for each key(translation, scale, rotation, & matrix) and apply it to the transformation matrix...then I update the frame matrices, and then the skinned mesh.
Any ideas?