I am creating a UV sphere (similar to an Earth globe divided into lines of latitude). I am doing this by:
Calculating all of the vertices around each each parallel latitude circle (e.g. 72 points per circle)
Using GL_TRIANGLE_STRIP to fill in each "slice" between each of the latitude circles.
Unfortunately I keep seeing dots on my otherwise perfect sphere.
What would cause this and how do I get rid of it?
void CSphere2::AddVertices( void )
{
#define SPHERE2_RES 72
// Create sphere using horizontal slices/circles
int nPointsPerCircle = SPHERE2_RES;
int nStackedCircles = SPHERE2_RES;
GLfloat r = m_Size;
GLfloat yAngle = - (PI / 2.0f); // Start at -90deg and work up to +90deg (south to north pole)
GLfloat yAngleStep = PI / nStackedCircles;
// Sweep angle is zero initially for pointing towards me (-Z direction)
GLfloat horizSweepAngle = 0;
GLfloat horizSweepStep = ( 2 * PI ) / nPointsPerCircle;
// Each time we have a slice, the top and bottom radii vary..
GLfloat sweepRadiusTop;
GLfloat sweepRadiusBottom;
GLfloat xBottomPoint;
GLfloat zBottomPoint;
GLfloat xTopPoint;
GLfloat zTopPoint;
for( int c = 0; c < nStackedCircles; c ++ )
{
// Draw a circle - note that this always uses two circles - a top and bottom circle.
GLfloat yBottomCircle;
GLfloat yTopCircle;
yTopCircle = r * sin( yAngle + yAngleStep );
yBottomCircle = r * sin( yAngle );
std::vector<GLfloat> vBottom_x;
std::vector<GLfloat> vBottom_z;
std::vector<GLfloat> vTop_x;
std::vector<GLfloat> vTop_z;
sweepRadiusTop = r * cos( yAngle + yAngleStep );
sweepRadiusBottom = r * cos( yAngle );
// Add 1 face - a triangle strip per slice..
AddFace();
m_Faces[ c ].m_DrawType = GL_TRIANGLE_STRIP;
// Now work out the position of the points around each circle - bottom points will always be the
// same as the last top circle points.. but I'm not going to try optimising yet..
for( int s = 0; s < nPointsPerCircle; s ++ )
{
GLfloat xBottomPoint = sweepRadiusBottom * sin( horizSweepAngle );
GLfloat zBottomPoint = sweepRadiusBottom * cos( horizSweepAngle );
GLfloat xTopPoint = sweepRadiusTop * sin( horizSweepAngle + horizSweepStep );
GLfloat zTopPoint = sweepRadiusTop * cos( horizSweepAngle + horizSweepStep );
vBottom_x.push_back( xBottomPoint );
vBottom_z.push_back( zBottomPoint );
vTop_x.push_back( xTopPoint );
vTop_z.push_back( zTopPoint );
horizSweepAngle += horizSweepStep;
}
// OPTIMISE THIS!!
for( int s = 1; s <= nPointsPerCircle + 1; s ++ )
{
if( s == nPointsPerCircle + 1 )
{
// Join the last bottom point with the very first top point - go one more to fully close and leave no vertical gap
xTopPoint = vTop_x[ 1 ];
zTopPoint = vTop_z[ 1 ];
xBottomPoint = vBottom_x[ 0 ];
zBottomPoint = vBottom_z[ 0 ];
}
else
if( s == nPointsPerCircle )
{
// Join the last bottom point with the very first top point
xTopPoint = vTop_x[ 0 ];
zTopPoint = vTop_z[ 0 ];
xBottomPoint = vBottom_x[ s - 1 ];
zBottomPoint = vBottom_z[ s - 1 ];
}
else
{
xTopPoint = vTop_x[ s ];
zTopPoint = vTop_z[ s ];
xBottomPoint = vBottom_x[ s - 1 ];
zBottomPoint = vBottom_z[ s - 1 ];
}
// Calculate and add the Normal for each vertex.. Normal for a point on surface of a Sphere2 should be the unit vector going from centre
// of the Sphere2 to the surface (x,y,z).
//
// If centre of Sphere2 is 0,0,0 then N = | {x,y,z} - {0,0,0} | = | {x,y,z} |
glm::vec3 vNormalBottom = glm::vec3( xBottomPoint, yBottomCircle, zBottomPoint );
vNormalBottom = glm::normalize( vNormalBottom );
glm::vec3 vNormalTop = glm::vec3( xTopPoint, yTopCircle, zTopPoint );
vNormalTop = glm::normalize( vNormalTop );
// Add bottom of slice vertex..
m_Faces[ c ].AddVertexWithNormal( xBottomPoint, yBottomCircle, zBottomPoint, vNormalBottom.x, vNormalBottom.y, vNormalBottom.z );
// Add top of slice vertex, next step position..
m_Faces[ c ].AddVertexWithNormal( xTopPoint, yTopCircle, zTopPoint, vNormalTop.x, vNormalTop.y, vNormalTop.z );
}
int nVertexCount = m_Faces[ c ].m_Vertices.size();
m_Faces[ c ].m_SideCount = nVertexCount;
// Face colouring colours the vertices so they need to be created first..
m_Faces[ c ].SetRGB( m_RGBA.r, m_RGBA.g, m_RGBA.b );
yAngle += yAngleStep;
}
}
void CSphere2::Create( GLfloat fSize )
{
m_Size = fSize;
// Must add vertices first..
AddVertices();
glGenBuffers( 1, &m_VBO );
glBindBuffer( GL_ARRAY_BUFFER, m_VBO );
int nFaces = m_Faces.size();
int nVertexCount = 0;
for( int f = 0; f < nFaces; f ++ )
{
nVertexCount += m_Faces[ f ].m_Vertices.size();
m_Faces[ f ].m_SideCount = nVertexCount;
}
// Define the size of the buffer..
glBufferData( GL_ARRAY_BUFFER, sizeof( COLVERTEX ) * nVertexCount, NULL, GL_STATIC_DRAW );
int nOffset = 0;
for( int f = 0; f < nFaces; f ++ )
{
// Copy in each vertice's data..
for( int v = 0; v < (int) m_Faces[ f ].m_Vertices.size(); v ++ )
{
glBufferSubData( GL_ARRAY_BUFFER, nOffset, sizeof( COLVERTEX ), &m_Faces[ f ].m_Vertices[ v ].m_VertexData );
nOffset += sizeof( COLVERTEX );
}
}
glBindBuffer( GL_ARRAY_BUFFER, 0 );
}
I had the same problem with other examples that I'd copied from elsewhere so I sat down, did the math myself and I still have the same problem.
Vertex shader:
char *vs3DShader =
"#version 140\n"
"#extension GL_ARB_explicit_attrib_location : enable\n"
"layout (location = 0) in vec3 Position;"
"layout (location = 1) in vec4 color;"
"layout (location = 2) in vec3 aNormal;"
"out vec4 frag_color;"
"out vec3 Normal;"
"out vec3 FragPos;"
"uniform mat4 model;"
"uniform mat4 view;"
"uniform mat4 projection;"
"void main()"
"{"
" FragPos = vec3(model * vec4(Position, 1.0));"
" gl_Position = projection * view * vec4(FragPos, 1.0);"
// Rotate normals with respect to current Model matrix (object rotation).
" Normal = mat3( transpose( inverse( model ) ) ) * aNormal; "
" // Pass vertex color to fragment shader.. \n"
" frag_color = color;"
"}"
;
Fragment shader:
char *fs3DShader =
"#version 140\n"
"in vec4 frag_color;"
"in vec3 Normal;"
"in vec3 FragPos;"
"out vec4 FragColor;"
"uniform vec3 lightPos; "
"uniform vec3 lightColor; "
"void main()"
"{"
" // ambient\n"
" float ambientStrength = 0.1;"
" vec3 ambient = ambientStrength * lightColor;"
" // diffuse \n"
" vec3 norm = normalize(Normal);"
" vec3 lightDir = normalize(lightPos - FragPos);"
" float diff = max(dot(norm, lightDir), 0.0);"
" vec3 diffuse = diff * lightColor;"
" vec3 result = (ambient + diffuse) * frag_color;"
" FragColor = vec4(result, 1.0);"
"}"
;
Am I missing some sort of smoothing option? I have tried moving my viewpoint to both sides of the sphere and the dots are happening all around - so it isn't where the triangle strip band "closes" that's the problem - its all over the sphere.
See bright dots below:
Update: I just wanted to prove that the wrapping back to zero degrees isn't the problem. Below is an image when only a quarter of each circle is swept through 90 degrees. The dots are still appear in the mid regions.
Floating point accuracy is not infinite, when working with transcendental numbers you will inevitably accumulate errors.
Here is an example program that does the same loop that your program does, except it just prints out the final angle:
#include <cmath>
#include <cstdio>
int main() {
const int N = 72;
const float step = std::atan(1.0f) * 8 / N;
float x = 0.0f;
for (int i = 0; i < N; i++) {
x += step;
}
std::printf("x - 2pi = %f\n", x - 8 * std::atan(1.0f));
return 0;
}
On my system, it prints out -0.000001. Close to zero, but not zero.
If you want two points in your mesh to line up, don't give them different values. Otherwise you get small seams like this.
A typical approach to this problem is to just generate a circle like this:
#include <cmath>
#include <cstdio>
#include <vector>
struct vec2 { float x, y; };
int main() {
const int N = 72;
const float step = std::atan(1.0f) * 8 / N;
std::vector<vec2> circle;
for (int i = 0; i < N; i++) {
float a = i * step;
circle.push_back({ std::cos(a), std::sin(a) });
}
return 0;
}
At every point in the circle, circle[i], the next point is now just circle[(i+1)%N]. This ensures that the point after circle[N-1] will always be exactly the same as circle[0].
I found a couple of problems with the vertex calculation in the question. Since I was calculating both bottom and top vertices every time I was sweeping around a horizontal slice there was rounding/precision error produced. A point on the top of the current slice should be the same as the bottom point on the next slice up - but I was calculating this top and bottom after incrementing as Dietrich Epp suggested. This resulted in different values. My solution was to re-use the previous top circle vertices as the bottom vertices of the next slice up.
I also hadn't calculated the x/z positions for top and bottom circles using the same sweep angle - I'd incremented the angle which I shouldn't have done.
So fundamentally, problem was caused by 2 overlapping vertices that should have had identical coordinates but were ever so slightly different.
Here's the working solution:
void CSphere2::AddVertices( void )
{
#define SPHERE2_RES 72
// Create sphere using horizontal slices/circles
int nPointsPerCircle = SPHERE2_RES;
int nStackedCircles = SPHERE2_RES;
GLfloat r = m_Size;
GLfloat yAngle = - (PI / 2.0f); // Start at -90deg and work up to +90deg (south to north pole)
GLfloat yAngleStep = PI / nStackedCircles;
// Sweep angle is zero initially for pointing towards me (-Z direction)
GLfloat horizSweepAngle = 0;
GLfloat horizSweepStep = ( 2 * PI ) / nPointsPerCircle;
// Each time we have a slice, the top and bottom radii vary..
GLfloat sweepRadiusTop;
GLfloat sweepRadiusBottom;
GLfloat xBottomPoint;
GLfloat zBottomPoint;
GLfloat xTopPoint;
GLfloat zTopPoint;
std::vector<GLfloat> vCircle_x;
std::vector<GLfloat> vCircle_z;
std::vector<GLfloat> vLastCircle_x;
std::vector<GLfloat> vLastCircle_z;
int nFace = 0;
for( int c = 0; c <= nStackedCircles + 1; c ++ )
{
// Draw a circle - note that this always uses two circles - a top and bottom circle.
GLfloat yBottomCircle;
GLfloat yTopCircle;
yTopCircle = r * sin( yAngle + yAngleStep );
yBottomCircle = r * sin( yAngle );
sweepRadiusTop = r * cos( yAngle );
GLfloat xCirclePoint;
GLfloat zCirclePoint;
horizSweepAngle = 0;
vCircle_x.clear();
vCircle_z.clear();
// Now work out the position of the points around each circle - bottom points will always be the
// same as the last top circle points..
for( int s = 0; s < nPointsPerCircle; s ++ )
{
zCirclePoint = sweepRadiusTop * sin( horizSweepAngle );
xCirclePoint = sweepRadiusTop * cos( horizSweepAngle );
vCircle_x.push_back( xCirclePoint );
vCircle_z.push_back( zCirclePoint );
horizSweepAngle += horizSweepStep;
}
if( c == 0 )
{
// First time around there is no last circle, so just use the same points..
vLastCircle_x = vCircle_x;
vLastCircle_z = vCircle_z;
// And don't add vertices until next time..
continue;
}
// Add 1 face - a triangle strip per slice..
AddFace();
m_Faces[ nFace ].m_DrawType = GL_TRIANGLE_STRIP;
for( int s = 1; s <= nPointsPerCircle + 1; s ++ )
{
if( s == nPointsPerCircle + 1 )
{
// Join the last bottom point with the very first top point
xTopPoint = vCircle_x[ 1 ];
zTopPoint = vCircle_z[ 1 ];
xBottomPoint = vLastCircle_x[ 0 ];
zBottomPoint = vLastCircle_z[ 0 ];
}
else
if( s == nPointsPerCircle )
{
// Join the last bottom point with the very first top point
xTopPoint = vCircle_x[ 0 ];
zTopPoint = vCircle_z[ 0 ];
xBottomPoint = vLastCircle_x[ s - 1 ];
zBottomPoint = vLastCircle_z[ s - 1 ];
}
else
{
xTopPoint = vCircle_x[ s ];
zTopPoint = vCircle_z[ s ];
xBottomPoint = vLastCircle_x[ s - 1 ];
zBottomPoint = vLastCircle_z[ s - 1 ];
}
// Calculate and add the Normal for each vertex.. Normal for a point on surface of a Sphere2 should be the unit vector going from centre
// of the Sphere2 to the surface (x,y,z).
//
// If centre of Sphere2 is 0,0,0 then N = | {x,y,z} - {0,0,0} | = | {x,y,z} |
glm::vec3 vNormalBottom = glm::vec3( xBottomPoint, yBottomCircle, zBottomPoint );
vNormalBottom = glm::normalize( vNormalBottom );
glm::vec3 vNormalTop = glm::vec3( xTopPoint, yTopCircle, zTopPoint );
vNormalTop = glm::normalize( vNormalTop );
// Add bottom of slice vertex..
m_Faces[ nFace ].AddVertexWithNormal( xBottomPoint, yBottomCircle, zBottomPoint, vNormalBottom.x, vNormalBottom.y, vNormalBottom.z );
// Add top of slice vertex, next step position..
m_Faces[ nFace ].AddVertexWithNormal( xTopPoint, yTopCircle, zTopPoint, vNormalTop.x, vNormalTop.y, vNormalTop.z );
}
// Now copy the current circle x/y positions as the last circle positions (bottom circle)..
vLastCircle_x = vCircle_x;
vLastCircle_z = vCircle_z;
int nVertexCount = m_Faces[ nFace ].m_Vertices.size();
m_Faces[ nFace ].m_SideCount = nVertexCount;
// Face colouring colours the vertices so they need to be created first..
m_Faces[ nFace ].SetRGB( m_RGBA.r, m_RGBA.g, m_RGBA.b );
yAngle += yAngleStep;
nFace ++;
}
}
Related
I am working on a simple raytracer in c++. I am currently implementing an intersection function but have encountered some issues.
For some reason, the collision detection only works for a tiny rectangle in my image. In the image below you can see that it draws the room quite fine for a small part of the screen but fails to do so for the rest of the scene. Only a small section gets drawn correctly.
Why does my intersection detection not work? I have included the code for the intersection and draw function below.
LoadTestModel(m_Model);
m_Light.position = glm::vec3(0.0f, -1.0f, 0.0);
m_Light.color = glm::vec3(0.f, 0.f, 0.f);
m_Light.ambient = glm::vec3(0.5f, 0.5f, 0.5f);
m_Camera.position = glm::vec3(0.0, 0.0, -2.0);
m_Camera.yaw = 0.0f;
}
void Lab2Scene::Draw(Window& window)
{
if (!m_RenderNext) return;
m_RenderNext = false;
for (uint32_t y = 0; y < window.GetHeight(); ++y)
{
for (uint32_t x = 0; x < window.GetWidth(); ++x)
{
Ray ray = {};
glm::vec3 d(x - (window.GetWidth() / 2), y - (window.GetHeight() / 2), (window.GetHeight() / 2));
d = glm::normalize(d);
ray.direction = d * m_Camera.GetRotationY();
ray.start = m_Camera.position;
// Find the closest intersection of the casted ray.
Intersection nearest_intersection = {};
if (ClosestIntersection(ray, m_Model, nearest_intersection))
{
//window.PutPixel(x, y, glm::vec3(1.f, 0.f, 0.f));
window.PutPixel(x, y, DirectLight(m_Light, nearest_intersection, m_Model) + m_Model[nearest_intersection.triangleIndex].color * m_Light.ambient); // DirectLight(m_Light, intersection, m_Model)
}
else
{
window.PutPixel(x, y, m_Light.color);
}
}
}
}
bool Lab2Scene::ClosestIntersection(const Ray& ray, const std::vector<Triangle>& triangles, Intersection& intersection)
{
float m = std::numeric_limits<float>::max();
intersection.distance = m;
bool inters = false;
for (int i = 0; i < triangles.size(); ++i) {
float dot = glm::dot(ray.direction, triangles[i].normal);
if (dot != 0) {
using glm::vec3;
using glm::mat3;
vec3 v0 = triangles[i].v0;
vec3 v1 = triangles[i].v1;
vec3 v2 = triangles[i].v2;
vec3 e1 = v1 - v0;
vec3 e2 = v2 - v0;
vec3 b = ray.start - v0;
mat3 A(-ray.direction, e1, e2);
vec3 x = glm::inverse(A) * b;
if (x[1] >= 0 && x[2] >= 0 && x[1] + x[2] <= 1 && x[0] >= 0) {
vec3 intersect = ray.start + (x[0] * ray.direction);
if (glm::distance(ray.start, intersect) <= intersection.distance) {
intersection.position = intersect;
intersection.distance = glm::distance(ray.start, intersect);
intersection.triangleIndex = i;
inters = true;
}
}
}
}
return inters;
}
I'm trying to calculate a per tile frustum by getting the screen space coordinates and then using a cross product to get the view frustum planes. However when I check which tiles are affected by a light they are in the opposite direction, as in, they're moving in the opposite direction of the camera as well as being behind it. I've tried changing the cross product order but it doesn't appear to be working either way. Here's the code that generates the frustums and checks if a light intersects it:
//Start by getting the corners in screen space
uint minX = MAX_WORK_GROUP_SIZE * gl_WorkGroupID.x;
uint minY = MAX_WORK_GROUP_SIZE * gl_WorkGroupID.y;
uint maxX = MAX_WORK_GROUP_SIZE * (gl_WorkGroupID.x + 1);
uint maxY = MAX_WORK_GROUP_SIZE * (gl_WorkGroupID.y + 1);
//Convert these corners into NDC and then convert them to view space
vec4 tileCorners[4];
tileCorners[0] = unProject(vec4( (float(minX)/SCREEN_WIDTH) * 2.0f - 1.0f, (float(minY)/SCREEN_HEIGHT) * 2.0f - 1.0f, 1.0f, 1.0f));
tileCorners[1] = unProject(vec4( (float(maxX)/SCREEN_WIDTH) * 2.0f - 1.0f, (float(minY)/SCREEN_HEIGHT) * 2.0f - 1.0f, 1.0f, 1.0f));
tileCorners[2] = unProject(vec4( (float(maxX)/SCREEN_WIDTH) * 2.0f - 1.0f, (float(maxY)/SCREEN_HEIGHT) * 2.0f - 1.0f, 1.0f, 1.0f));
tileCorners[3] = unProject(vec4( (float(minX)/SCREEN_WIDTH) * 2.0f - 1.0f, (float(maxY)/SCREEN_HEIGHT) * 2.0f - 1.0f, 1.0f, 1.0f));
//Create the frustum planes by using the cross product between these points
frustum[0] = CreatePlane(tileCorners[0], tileCorners[1]); //bot
frustum[1] = CreatePlane(tileCorners[1], tileCorners[2]); //right
frustum[2] = CreatePlane(tileCorners[2], tileCorners[3]); //top
frustum[3] = CreatePlane(tileCorners[3], tileCorners[0]); //left
and the functions:
vec4 unProject(vec4 v)
{
v = inverseProjectionMatrix * v;
v /= v.w;
return v;
}
vec4 CreatePlane( vec4 b, vec4 c )
{
vec4 normal;
normal.xyz = normalize(cross( b.xyz, c.xyz ));
normal.w = 0;
return normal;
}
float GetSignedDistanceFromPlane( vec4 p, vec4 eqn )
{
return dot( eqn.xyz, p.xyz );
}
And how I check for lights
int threadsPerTile = MAX_WORK_GROUP_SIZE*MAX_WORK_GROUP_SIZE;
for (uint i = 0; i < NUM_OF_LIGHTS; i+= threadsPerTile)
{
uint il = gl_LocalInvocationIndex + i;
if (il < NUM_OF_LIGHTS)
{
PointLight p = pointLights[il];
vec4 viewPos = viewMatrix * vec4(p.position.xyz, 1.0f);
float r = p.radius;
// if (viewPos.z + minDepthZ < r && viewPos.z - maxDepthZ < r)
// {
if( ( GetSignedDistanceFromPlane( viewPos, frustum[0] ) < r ) &&
( GetSignedDistanceFromPlane( viewPos, frustum[1] ) < r ) &&
( GetSignedDistanceFromPlane( viewPos, frustum[2] ) < r ) &&
( GetSignedDistanceFromPlane( viewPos, frustum[3] ) < r) )
{
uint id = atomicAdd(pointLightCount, 1);
pointLightIndex[id] = il;
}
// }
}
}
I've commented out the z part just for debugging. The frustums are completely reversed or I'm doing something very wrong, in this picture I'm looking behind me and up, so tiles are affected which are in the complete opposite direction of the scene, and when I move the camera the tiles move in the opposite directions as well
Apparently the frustum was calculated correctly but something about the ARB extensions (which I thought was unrelated) made everything explode. I used
#extension GL_ARB_compute_variable_group_size : enable
layout( local_size_variable ) in;
Which didn't work at all so I just changed it to
layout(local_size_x = MAX_WORK_GROUP_SIZE, local_size_y = MAX_WORK_GROUP_SIZE) in;
And on the CPU:
glDispatchCompute((1280 / 16), (720 / 16), 1);
//glDispatchComputeGroupSizeARB((1280 / 16), (720 / 16), 1, 16, 16, 1);
Which works fine, so I guess there's something about the ARB method that doesn't initialize the amount of work threads properly
Here is what I need:
Given a point(x,y,z) in 3d space, and a mesh compose of some vertices(x,y,z), to calculate and return the close point coordinate on that mesh.
The function probably like this:
bool closePointOnMesh(const Point& queryPoint, const Mesh& myMesh, float maxDistance);
I have done some searching, and probably I will choose octree to reduce the calculation.
But there are still many details that I can't understand:
1: How the octree node been subdivided, so each node contains may contains 0~some triangles? It is easier to subdivided the cell further based on vertices and just store vertices directly.
2: How the octree structure helps to reduce the calculation, I know if the cell is empty I will just disregard it. But do I need to get all the closest point within each triangle face in a octree cell to the queryPoint, so I finally get the most closest point of all? that sound still heavy. Beside it will be more easier if I just iter through all the triangles, get the closest point from them, which means no need for the octree???
3: Is there a fast way to get the closest point to a point within a triangle face?
4: how the maxDistance limit helps to reduce the calculation?
For #3, here's some code on how to get the closest point of a triangle. It projects the point onto the triangle's plane, and then clamps the barycentric coordinates to [0,1], and uses those values computes the closest point.
Copied below:
vector3 closesPointOnTriangle( const vector3 *triangle, const vector3 &sourcePosition )
{
vector3 edge0 = triangle[1] - triangle[0];
vector3 edge1 = triangle[2] - triangle[0];
vector3 v0 = triangle[0] - sourcePosition;
float a = edge0.dot( edge0 );
float b = edge0.dot( edge1 );
float c = edge1.dot( edge1 );
float d = edge0.dot( v0 );
float e = edge1.dot( v0 );
float det = a*c - b*b;
float s = b*e - c*d;
float t = b*d - a*e;
if ( s + t < det )
{
if ( s < 0.f )
{
if ( t < 0.f )
{
if ( d < 0.f )
{
s = clamp( -d/a, 0.f, 1.f );
t = 0.f;
}
else
{
s = 0.f;
t = clamp( -e/c, 0.f, 1.f );
}
}
else
{
s = 0.f;
t = clamp( -e/c, 0.f, 1.f );
}
}
else if ( t < 0.f )
{
s = clamp( -d/a, 0.f, 1.f );
t = 0.f;
}
else
{
float invDet = 1.f / det;
s *= invDet;
t *= invDet;
}
}
else
{
if ( s < 0.f )
{
float tmp0 = b+d;
float tmp1 = c+e;
if ( tmp1 > tmp0 )
{
float numer = tmp1 - tmp0;
float denom = a-2*b+c;
s = clamp( numer/denom, 0.f, 1.f );
t = 1-s;
}
else
{
t = clamp( -e/c, 0.f, 1.f );
s = 0.f;
}
}
else if ( t < 0.f )
{
if ( a+d > b+e )
{
float numer = c+e-b-d;
float denom = a-2*b+c;
s = clamp( numer/denom, 0.f, 1.f );
t = 1-s;
}
else
{
s = clamp( -e/c, 0.f, 1.f );
t = 0.f;
}
}
else
{
float numer = c+e-b-d;
float denom = a-2*b+c;
s = clamp( numer/denom, 0.f, 1.f );
t = 1.f - s;
}
}
return triangle[0] + s * edge0 + t * edge1;
}
I want to create a page roll effect in a shader. So i have a XZ plane points with y=0. Now i assume a cylender with R radius and Inf. height is lied down on the plane with certain angle rotated in Y axis. See the image:
I want a equation so that paper can rolled over the sphere in the given XZ direction.
what I am doing is:
float2 currPoint = gl_Vertex.xz;
float2 normDir = normalize(-1, 0); //direction at which paper will start rolling out.
float cylRadius = 1.f;
float dist = sqrt(normDir.x *vi.x * vi.x + normDir.y *vi.y * vi.y);
float beta = dist / cylRadius;
float3 outPos = 0;
outPos.x = currPoint.x + N.x * cylRadius * sin(beta);
outPos.z = cylRadius * (1 -cos(beta));
outPos.y = currPoint.y + N.y * cylRadius * sin(beta);
but it only works in the case of normDir = normalize(-1, 0), in other cases result not as expected.
I got this.My implementation is based on Pawel's page Flip implimentation ( http://nomtek.com/page-flip-3d/ )
Here is the code in HLSL.
float DistToLine(float2 pt1, float2 pt2, float2 testPt)
{
float2 lineDir = pt2 - pt1;
float2 perpDir = float2(lineDir.y, -lineDir.x);
float2 dirToPt1 = pt1 - testPt;
return (dot(normalize(perpDir), dirToPt1));
}
float3 Roll(float2 pos ) //per vertex
{
float time = param1.z ;
float t = (time);
float2 A = float2( 0 , 1 ); //tweak these 4 variables for the direction of Roll
float2 B = float2( 5.f , 1 ); //
float2 C = float2( 1 , 0 ); //
float2 D = float2( 0 , 0 ); //
float2 P1 = lerp( B , A , time ) ;
float2 P2 = lerp( C , D , time ) ; ;
float2 N = normalize( float2(-(P2-P1).y , (P2-P1).x ) );
float dist = DistToLine(P1 , P2 , float2(pos.x , pos.y) );
float3 vOut;
if (dist > 0 )
{
float distFromEnd = DistToLine(C , B , float2(pos.x , pos.y) ) ;
float R = lerp( .1 , .13 , distFromEnd );
float2 p = pos - N * dist;
float alpha = dist / R;
float sinAlpha = R * sin(alpha);
vOut.x = p.x + N.x * sinAlpha;
vOut.y = p.y + N.y * sinAlpha;
vOut.z = (1 - cos(alpha)) * R;
}
else
{
vOut.x = pos.x;
vOut.y = pos.y;
vOut.z = 0;
}
return vOut;
}
I'm trying to find the mouse position in world coordinates but am having trouble finding the right code. At the moment I use this to determine the ray:
float pointX, pointY;
D3DXMATRIX projectionMatrix, viewMatrix, inverseViewMatrix, worldMatrix, translateMatrix, inverseWorldMatrix;
D3DXVECTOR3 direction, origin, rayOrigin, rayDirection;
bool intersect, result;
// Move the mouse cursor coordinates into the -1 to +1 range.
pointX = ((2.0f * (float)mouseX) / (float)m_screenWidth) - 1.0f;
pointY = (((2.0f * (float)mouseY) / (float)m_screenHeight) - 1.0f) * -1.0f;
// Adjust the points using the projection matrix to account for the aspect ratio of the viewport.
m_Direct3D->GetProjectionMatrix(projectionMatrix);
pointX = pointX / projectionMatrix._11;
pointY = pointY / projectionMatrix._22;
// Get the inverse of the view matrix.
m_Camera->GetViewMatrix(viewMatrix);
D3DXMatrixInverse(&inverseViewMatrix, NULL, &viewMatrix);
// Calculate the direction of the picking ray in view space.
direction.x = (pointX * inverseViewMatrix._11) + (pointY * inverseViewMatrix._21) + inverseViewMatrix._31;
direction.y = (pointX * inverseViewMatrix._12) + (pointY * inverseViewMatrix._22) + inverseViewMatrix._32;
direction.z = (pointX * inverseViewMatrix._13) + (pointY * inverseViewMatrix._23) + inverseViewMatrix._33;
// Get the origin of the picking ray which is the position of the camera.
origin = m_Camera->GetPosition();
This gives me the origin and direction of the ray.
But...
I use a custom mesh (not the one from directX) with a heightmap, separated into quadtrees and I don't know if my logic is correct, I tried using the frustum to determine which nodes in the quadtree are visible and so do the checking intersection of triangles only on those nodes, here is this code:
Note* m_mousepos is a vector.
bool QuadTreeClass::getTriangleRay(NodeType* node, FrustumClass* frustum, ID3D10Device* device, D3DXVECTOR3 vPickRayDir, D3DXVECTOR3 vPickRayOrig){
bool result;
int count, i, j, indexCount;
unsigned int stride, offset;
float fBary1, fBary2;
float fDist;
D3DXVECTOR3 v0, v1, v2;
float p1, p2, p3;
// Check to see if the node can be viewed.
result = frustum->CheckCube(node->positionX, 0.0f, node->positionZ, (node->width / 2.0f));
if(!result)
{
return false;
}
// If it can be seen then check all four child nodes to see if they can also be seen.
count = 0;
for(i=0; i<4; i++)
{
if(node->nodes[i] != 0)
{
count++;
getTriangleRay(node->nodes[i], frustum, device, vPickRayOrig, vPickRayDir);
}
}
// If there were any children nodes then dont continue
if(count != 0)
{
return false;
}
// Now intersect each triangle in this node
j = 0;
for(i=0; i<node->triangleCount; i++){
j = i * 3;
v0 = D3DXVECTOR3( node->vertexArray[j].x, node->vertexArray[j].y, node->vertexArray[j].z);
j++;
v1 = D3DXVECTOR3( node->vertexArray[j].x, node->vertexArray[j].y, node->vertexArray[j].z);
j++;
v2 = D3DXVECTOR3( node->vertexArray[j].x, node->vertexArray[j].y, node->vertexArray[j].z);
result = IntersectTriangle( vPickRayOrig, vPickRayDir, v0, v1, v2, &fDist, &fBary1, &fBary2);
if(result == true){
// intersection = true, so get a aproximate center of the triangle on the world
p1 = (v0.x + v0.x + v0.x)/3;
p2 = (v0.y + v1.y + v2.y)/3;
p3 = (v0.z + v1.z + v2.z)/3;
m_mousepos = D3DXVECTOR3(p1, p2, p3);
return true;
}
}
}
bool QuadTreeClass::IntersectTriangle( const D3DXVECTOR3& orig, const D3DXVECTOR3& dir,D3DXVECTOR3& v0, D3DXVECTOR3& v1, D3DXVECTOR3& v2, FLOAT* t, FLOAT* u, FLOAT* v ){
// Find vectors for two edges sharing vert0
D3DXVECTOR3 edge1 = v1 - v0;
D3DXVECTOR3 edge2 = v2 - v0;
// Begin calculating determinant - also used to calculate U parameter
D3DXVECTOR3 pvec;
D3DXVec3Cross( &pvec, &dir, &edge2 );
// If determinant is near zero, ray lies in plane of triangle
FLOAT det = D3DXVec3Dot( &edge1, &pvec );
D3DXVECTOR3 tvec;
if( det > 0 )
{
tvec = orig - v0;
}
else
{
tvec = v0 - orig;
det = -det;
}
if( det < 0.0001f )
return FALSE;
// Calculate U parameter and test bounds
*u = D3DXVec3Dot( &tvec, &pvec );
if( *u < 0.0f || *u > det )
return FALSE;
// Prepare to test V parameter
D3DXVECTOR3 qvec;
D3DXVec3Cross( &qvec, &tvec, &edge1 );
// Calculate V parameter and test bounds
*v = D3DXVec3Dot( &dir, &qvec );
if( *v < 0.0f || *u + *v > det )
return FALSE;
// Calculate t, scale parameters, ray intersects triangle
*t = D3DXVec3Dot( &edge2, &qvec );
FLOAT fInvDet = 1.0f / det;
*t *= fInvDet;
*u *= fInvDet;
*v *= fInvDet;
return TRUE;
}
Please is this code right? If it is then my problem must be related to the quadtree.
Thanks!
Iterating over all visible triangle to find the intersection is very expensive. Additional the cost will rise if your heightmap gets finer.
For my heightmap I use a different approach:
I do a step-by-step search regarding the height on the clickray starting at the origin. At every step the current position is moved along the ray and tested against the height of the heightmap (therefore you need a heightfunction). If the current position is below the heightmap, the last intervall is searched again by an additional iteration to find a finer position. This works as long as your heightmap hasn't a too high frequency in the heightvalues regarding to the stepsize (otherwise you could jump over a peak).