I want to find when a collision between a static and a moving ball occurs, but the algorithm I came up with, sometimes doesn't detect a collision and the moving ball goes through the static one. The moving ball is affected by gravity and the static one is not.
Here's my collision detection code:
GLfloat whenSpheresCollide(const sphere2d &firstSphere, const sphere2d &secondSphere)
{
Vector2f relativePosition = subtractVectors(firstSphere.vPosition, secondSphere.vPosition);
Vector2f relativeVelocity = subtractVectors(firstSphere.vVelocity, secondSphere.vVelocity);
GLfloat radiusSum = firstSphere.radius + secondSphere.radius;
//We'll find the time when objects collide if a collision takes place
//r(t) = P[0] + t * V[0]
//
//d^2(t) = P[0]^2 + 2 * t * P[0] * V[0] + t^2 * V[0]^2
//
//d^2(t) = V[0]^2 * t^2 + 2t( P[0] . V[0] ) + P[0]^2
//
//d(t) = R
//
//d(t)^2 = R^2
//
//V[0]^2 * t^2 + 2t( P[0] . V[0] ) + P[0]^2 - R^2 = 0
//
//delta = ( P[0] . V[0] )^2 - V[0]^2 * (P[0]^2 - R^2)
//
// We are interested in the lowest t:
//
//t = ( -( P[0] . V[0] ) - sqrt(delta) ) / V[0]^2
//
GLfloat equationDelta = squaref( dotProduct(relativePosition, relativeVelocity) ) - squarev( relativeVelocity ) * ( squarev( relativePosition ) - squaref(radiusSum) );
if (equationDelta >= 0.0f)
{
GLfloat collisionTime = ( - dotProduct(relativePosition, relativeVelocity) - sqrtf(equationDelta) ) / squarev(relativeVelocity);
if (collisionTime >= 0.0f && collisionTime <= 1.0f / GAME_FPS)
{
return collisionTime;
}
}
return -1.0f;
}
And here is the updating function that calls collision detection:
void GamePhysicsManager::updateBallPhysics()
{
//
//Update velocity
vVelocity->y -= constG / GAME_FPS; //v = a * t = g * 1 sec / (updates per second)
shouldApplyForcesToBall = TRUE;
vPosition->x += vVelocity->x / GAME_FPS;
vPosition->y += vVelocity->y / GAME_FPS;
if ( distanceBetweenVectors( *pBall->getPositionVector(), *pBasket->getPositionVector() ) <= pBasket->getRadius() + vectorLength(*vVelocity) / GAME_FPS )
{
//Ball sphere
sphere2d ballSphere;
ballSphere.radius = pBall->getRadius();
ballSphere.mass = 1.0f;
ballSphere.vPosition = *( pBall->getPositionVector() );
ballSphere.vVelocity = *( pBall->getVelocityVector() );
sphere2d ringSphereRight;
ringSphereRight.radius = 0.05f;
ringSphereRight.mass = -1.0f;
ringSphereRight.vPosition = *( pBasket->getPositionVector() );
//ringSphereRight.vPosition.x += pBasket->getRadius();
ringSphereRight.vPosition.x += (pBasket->getRadius() - ringSphereRight.radius);
ringSphereRight.vVelocity = zeroVector();
GLfloat collisionTime = whenSpheresCollide(ballSphere, ringSphereRight);
if ( collisionTime >= 0.0f )
{
DebugLog("collision");
respondToCollision(&ballSphere, &ringSphereRight, collisionTime, pBall->getRestitution() * 0.75f );
}
//
//Implement selection of the results that are first to collide collision
vVelocity->x = ballSphere.vVelocity.x;
vVelocity->y = ballSphere.vVelocity.y;
vPosition->x = ballSphere.vPosition.x;
vPosition->y = ballSphere.vPosition.y;
}
Why isn't the collision being detected in 100% of cases? It's being detected only in 70% of cases.
Thanks.
UPDATE: Problem seems to be solved when I change FPS from 30 to 10. How does FPS affect my collision detection?
delta = ( P[0] . V[0] )^2 - V[0]^2 * (P[0]^2 - R^2)
Shouldn't that be delta = b2 - 4 ac?
[Edit] Oh I see, you factored the 4 out. In that case, are you sure you're considering both solutions for t?
t = ( -( P[0] . V[0] ) - sqrt(delta) ) / V[0]^2
and
t = ( -( P[0] . V[0] ) + sqrt(delta) ) / V[0]^2
How large are the sphere's and how fast are they moving? Can a sphere "jump" over the second one during a frame (i.e., is it's velocity vector longer than it's width?).
Along those lines, what happens if you remove the upper limit here:
if (collisionTime >= 0.0f && collisionTime <= 1.0f / GAME_FPS)
{
return collisionTime;
}
If the sphere was moving too fast, maybe your algorithm is detecting a collision that happened more than one frame ago .. (?)
Related
I want to move my carroms on hitting of the striker am using simple setVelocity but it looks like my carroms coins are floating in air on carrom board so plz help me to move smoothly them
carrom board velocity of carroms
void GameScene::adjustStrikerVelocity() {
strikerSprite->getPhysicsBody()->setDynamic(true);
float x = start.x - ( origin.x + strikerSprite->getPositionX() );
float y = start.y - ( origin.y + boardSprite->getPositionY( ) - boardSprite->getContentSize().height/2 + strikerSprite->getPositionY( ) );
strikerSprite->getPhysicsBody()->setVelocity( Vec2( x * powerBooster, y * powerBooster ) );
strikerSprite->getPhysicsBody()->applyForce(Vec2(100,100),Vec2::ZERO);
strikerSprite->getPhysicsBody()->setVelocityLimit(400);
strikerSprite->getPhysicsBody()->setLinearDamping(0.5);
this->schedule(schedule_selector(GameScene::checkPuck));
}
So I'm trying to rotate a point about another point in a window, drawing it with DirectX. My issue is that the rotation is in a weird shape:
http://prntscr.com/iynh5f
What I'm doing is just rotating a point around the center of a window and drawing lines between the points.
vec2_t vecCenter1 { gui.iWindowSize[ 0 ] / 2.f, gui.iWindowSize[ 1 ] / 2.f };
for ( float i { 0.f }; i < 360.f; i += 2.f )
{
vec2_t vecLocation { vecCenter1.x, vecCenter1.y - 100.f };
static vec2_t vecOldLocation = vecLocation;
vecLocation.Rotate( i, vecCenter1 );
if ( i > 0.f )
Line( vecOldLocation, vecLocation, 2, true, D3DCOLOR_ARGB( 255, 255, 255, 255 ) );
vecOldLocation = vecLocation;
}
Here is my rotation:
void vec2_t::Rotate( float flDegrees, vec2_t vecSubtractVector )
{
flDegrees = ToRadian( flDegrees );
float flSin = sin( flDegrees );
float flCos = cos( flDegrees );
*this -= vecSubtractVector;
x = x * flCos - y * flSin;
y = x * flSin + y * flCos;
*this += vecSubtractVector;
}
I've tried a few different methods of rotation and none of them seem to work. If anyone could tell my what I'm doing wrong, I'd appreciate it.
Key lines:
x = x * flCos - y * flSin;
y = x * flSin + y * flCos; << problem
The second line is using the modified value of x, whereas it should be using the original. You must cache both coordinates (or at least x) before updating:
void vec2_t::Rotate( float flDegrees, vec2_t vecSubtractVector )
{
float flRadians = ToRadian( flDegrees );
float flSin = sin( flRadians );
float flCos = cos( flRadians );
// cache both values + pre-subtract
float xOld = x - vecSubtractVector.x;
float yOld = y - vecSubtractVector.y;
// perform the rotation and add back
x = xOld * flCos - yOld * flSin + vecSubtractVector.x;
y = xOld * flSin + yOld * flCos + vecSubtractVector.y;
}
To get rid of the if-statement in your for-loop, just compute the first point outside the loop, and start from the delta value instead of zero
Don't use static because it might cause thread safety issues (although not important in your case) - just declare it outside the loop
You seem to be missing a line segment - the condition needs to be <= 360.f (ideally plus an epsilon)
vec2_t vecCenter1 = { gui.iWindowSize[ 0 ] / 2.f, gui.iWindowSize[ 1 ] / 2.f };
const float delta_angle = 2.f;
vec2_t vecOldLocation = { vecCenter1.x, vecCenter1.y - 100.f };
for ( float i = delta_angle; i <= 360.f; i += delta_angle ) // complete cycle
{
vec2_t vecLocation = { vecCenter1.x, vecCenter1.y - 100.f };
vecLocation.Rotate( i, vecCenter1 );
Line( vecOldLocation, vecLocation, 2, true, // no if statement
D3DCOLOR_ARGB( 255, 255, 255, 255 ) );
vecOldLocation = vecLocation;
}
I took someone's advice but it did not work as I intended:
M=inverse(inverse(M)*rotation_matrix);
This is the code for my update:
void TestApp::Update(float dt) {
DirectX::SimpleMath::Matrix rotation =
Matrix::CreateFromYawPitchRoll(rot.y, rot.x, 0.0f); //Rotation Matrix
DirectX::SimpleMath::Matrix position =
Matrix::CreateTranslation(pos); //Postion Matrix
m_view =
DirectX::XMMatrixInverse(nullptr, DirectX::XMMatrixMultiply(
DirectX::XMMatrixInverse(nullptr, position), rotation)); //This uses the advice
//m_view is the Camera/View Matrix
for (int i = 0; i < 256; ++i) {
if (GetAsyncKeyState(i)) {
if (i == 87) { // W
pos.z += dt * playerSpeed; //Move Forward
continue;
}
else if (i == 68) { //D
pos.x -= dt * playerSpeed; //Move Right
continue;
}
else if(i == 83){//S
pos.z -= dt * playerSpeed; //Move Backwards
continue;
}
else if (i == 65) { // A
pos.x += dt * playerSpeed; //Move Left
continue;
}
else if (i == VK_NUMPAD8) {
rot.x -= dt;
continue;
}
else if (i == VK_NUMPAD4) {
rot.y += dt;
}
else if (i == VK_NUMPAD5) {
rot.x += dt;
continue;
}
else if (i == VK_NUMPAD6) {
rot.y -= dt;
}
}
}
The movement works perfectly fine but the rotation is iffy. It rotates around the world origin not like an FPS camera. Any help?
I am using DirectX 11 with DirectX Tool Kit. The model rendered fine, movement forward, backwards, left, right works like an FPS camera but it is rotation the around the world origin(0, 0).
Here is a snippet of an older Game Engine using OpenGL instead of Direct X. You may have to adjust for the handedness of the coordinate system but the basic principles still apply. When working with movement in a 3D environment; the movement that either the camera, player or world objects experience should be done through a switch statement instead of a bunch of if else statements.
Take a look at this snippet for rotational motion done within an OpenGL Game Engine.
void Player::move( Action action, float fDeltaTime ) {
v3LookDirection = m_v3LookCenter - m_v3Position;
switch( action ) {
case MOVING_FORWARD: {
// ... code here ...
}
case MOVING_BACK: {
// ... code here ...
}
case MOVING_RIGHT: {
// ... code here ...
}
case MOVING_LEFT: {
// ... code here ...
}
case LOOKING_LEFT: {
/*float fSin = -sin( fDeltaTime * m_fAngularSpeed );
float fCos = cos( fDeltaTime * m_fAngularSpeed );
m_v3LookCenter.m_fX = m_v3Position.m_fX + (-fSin * v3LookDirection.m_fZ + fCos * v3LookDirection.m_fX );
m_v3LookCenter.m_fZ = m_v3Position.m_fZ + ( fCos * v3LookDirection.m_fZ + fSin * v3LookDirection.m_fX );
break;*/
// Third Person
float fSin = sin( fDeltaTime * m_fAngularSpeed );
float fCos = -cos( fDeltaTime * m_fAngularSpeed );
m_v3Position.m_fX = m_v3LookCenter.m_fX + (-fSin * v3LookDirection.m_fZ + fCos * v3LookDirection.m_fX );
m_v3Position.m_fZ = m_v3LookCenter.m_fZ + ( fCos * v3LookDirection.m_fZ + fSin * v3LookDirection.m_fX );
break;
}
case LOOKING_RIGHT: {
/*float fSin = sin( fDeltaTime * m_fAngularSpeed );
float fCos = cos( fDeltaTime * m_fAngularSpeed );
m_v3LookCenter.m_fX = m_v3Position.m_fX + (-fSin * v3LookDirection.m_fZ + fCos * v3LookDirection.m_fX );
m_v3LookCenter.m_fZ = m_v3Position.m_fZ + ( fCos * v3LookDirection.m_fZ + fSin * v3LookDirection.m_fX );
break;*/
// Third Person
float fSin = -sin( fDeltaTime * m_fAngularSpeed );
float fCos = -cos( fDeltaTime * m_fAngularSpeed );
m_v3Position.m_fX = m_v3LookCenter.m_fX + (-fSin * v3LookDirection.m_fZ + fCos * v3LookDirection.m_fX );
m_v3Position.m_fZ = m_v3LookCenter.m_fZ + ( fCos * v3LookDirection.m_fZ + fSin * v3LookDirection.m_fX );
break;
}
case LOOKING_UP: {
m_v3LookCenter.m_fY -= fDeltaTime * m_fAngularSpeed * m_MouseLookState;
// Check Maximum Values
if ( m_v3LookCenter.m_fY > (m_v3Position.m_fY + m_fMaxUp ) ) {
m_v3LookCenter.m_fY = m_v3Position.m_fY + m_fMaxUp;
} else if ( m_v3LookCenter.m_fY < (m_v3Position.m_fY - m_fMaxDown) ) {
m_v3LookCenter.m_fY = m_v3Position.m_fY - m_fMaxDown;
}
break;
}
} // switch
}
Where all of the declared local and member variables that begin with m_v3... are Vector3 objects. Vector3 objects has a x,y,z components and all of the available math that can be done to vectors and Action is an enumerated type.
And this function is called within my Scene class.
void Scene::playerAction( float fMouseXDelta, float fMouseYDelta ) {
if ( fMouseXDelta != 0.0f ) {
m_player.move( LOOKING_RIGHT, fMouseXDelta );
}
if ( fMouseYDelta != 0.0f ) {
m_player.move( LOOKING_UP, fMouseYDelta );
}
}
And also in Scene::update()
void Scene::update() {
UserSettings* pUserSettings = UserSettings::get();
AudioManager* pAudio = AudioManager::getAudio();
bool bPlayerMoving = false;
// Movement
if ( pUserSettings->isAction( MOVING_FORWARD ) ) {
m_player.move( MOVING_FORWARD, GameOGL::getPhysicsTimeStep() );
bPlayerMoving = true;
}
if ( pUserSettings->isAction( MOVING_BACK ) ) {
m_player.move( MOVING_BACK, GameOGL::getPhysicsTimeStep() );
bPlayerMoving = true;
}
if ( pUserSettings->isAction( MOVING_LEFT ) ) {
m_player.move( MOVING_LEFT, GameOGL::getPhysicsTimeStep() );
bPlayerMoving = true;
}
if ( pUserSettings->isAction( MOVING_RIGHT ) ) {
m_player.move( MOVING_RIGHT, GameOGL::getPhysicsTimeStep() );
bPlayerMoving = true;
}
if ( bPlayerMoving && !m_bPlayerWalking ) {
pAudio->setLooping( AUDIO_FOOTSTEPS, true );
pAudio->play( AUDIO_FOOTSTEPS );
m_bPlayerWalking = true;
}
else if ( !bPlayerMoving && m_bPlayerWalking ) {
pAudio->stop( AUDIO_FOOTSTEPS );
m_bPlayerWalking = false;
}
// Bunch more code here.
}
This is also tied into the GameOGL class that works with the messageHandler() that I'm not going to show here. This is coming from a medium to large scale project that consists of nearly 50k lines of code. It is to large to display every working piece here so please don't ask because everything in this engine is integrated together. I just was showing the basic math that is used for doing rotational movement, either if it is invoked by a key press or mouse movement.
Now you have to remember this because it is important. The actual calculations that you see coming from the Player class that does the rotations you may not be able to use directly. If the handedness of the coordinate system is different than the one used here; you will have to use the appropriate trig functions to the appropriate coordinate axis members with the proper signs for the calculations to be correct. When the handedness changes so does the axis of rotation that is being implied as well as the initial direction of the rotation. Isn't 3D Math Fun?
EDIT
Oh I also noticed that you are using DirectX's ::CreateFromYawPitchRoll() to create your rotation matrix; this is okay but you need to be careful with rotations that use standard Euler Angles. If you begin to do rotations in more than one degree of motion at the same time; you will end up experiencing Gimbal Lock. To avoid using Gimbal Lock Problems within 3D rotations; it is better to use Quaternions The math to them is a little harder to take in, the concepts of what they are isn't too hard to understand, but using them is actually quite easy and is also very efficient on calculations. Many math libraries contain them; DirectX's math library should and so does the Open Source GLM math library that is used widely with OpenGL & GLSL. If you are unsure of Gimbal Lock and Quaternions you can do a Google search to look up those topics; there is plenty of information out there about them. Isn't Advanced 3D... eh hem... 4D Math Fun?
You say it rotates from the world origin point of view, like "if you stay on the edge of a carousel and you were looking at the center"
I think you want your object to rotate from it's own center.
The solution is that you should rotate your object and then apply your position matrix.
this is responsable
m_view =
DirectX::XMMatrixInverse(nullptr, DirectX::XMMatrixMultiply(
DirectX::XMMatrixInverse(nullptr, position), rotation))
the fix I think should be to apply the position after the rotation
In OpenGl you would apply the rotation on the Model matrice
glm::mat4 MVPmatrix = projection * view * model;
you can rotate the view or the model matrices and obtain 2 differents results.
I am not aware of your code and DirectX in general but maybe you should invert the 2
m_view =
DirectX::XMMatrixInverse(nullptr, DirectX::XMMatrixMultiply(
DirectX::XMMatrixInverse(nullptr, rotation), position))
have a look http://www.opengl-tutorial.org/beginners-tutorials/tutorial-3-matrices/
I spent quite some time to get this working, but my Sphere just won't display.
Used the following code to make my function:
Creating a 3D sphere in Opengl using Visual C++
And the rest is simple OSG with osg::Geometry.
(Note: Not ShapeDrawable, as you can't implement custom shapes using that.)
Added the vertices, normals, texcoords into VecArrays.
For one, I suspect something misbehaving, as my saved object is half empty.
Is there a way to convert the existing description into OSG?
Reason? I want to understand how to create objects later on.
Indeed, it is linked with a later assignment, but currently I'm just prepairing beforehand.
Sidenote: Since I have to make it without indices, I left them out.
But my cylinder displays just fine without them.
Caveat: I'm not an OSG expert. But, I did do some research.
OSG requires all of the faces to be defined in counter-clockwise order, so that backface culling can reject faces that are "facing away". The code you're using to generate the sphere does not generate all the faces in counter-clockwise order.
You can approach this a couple ways:
Adjust how the code generates the faces, by inserting the faces CCW order.
Double up your model and insert each face twice, once with the vertices on each face in their current order and once with the vertices in reverse order.
Option 1 above will limit your total polygon count to what's needed. Option 2 will give you a sphere that's visible from outside the sphere as well as within.
To implement Option 2, you merely need to modify this loop from the code you linked to:
indices.resize(rings * sectors * 4);
std::vector<GLushort>::iterator i = indices.begin();
for(r = 0; r < rings-1; r++)
for(s = 0; s < sectors-1; s++) {
*i++ = r * sectors + s;
*i++ = r * sectors + (s+1);
*i++ = (r+1) * sectors + (s+1);
*i++ = (r+1) * sectors + s;
}
Double up the set of quads like so:
indices.resize(rings * sectors * 8);
std::vector<GLushort>::iterator i = indices.begin();
for(r = 0; r < rings-1; r++)
for(s = 0; s < sectors-1; s++) {
*i++ = r * sectors + s;
*i++ = r * sectors + (s+1);
*i++ = (r+1) * sectors + (s+1);
*i++ = (r+1) * sectors + s;
*i++ = (r+1) * sectors + s;
*i++ = (r+1) * sectors + (s+1);
*i++ = r * sectors + (s+1);
*i++ = r * sectors + s;
}
That really is the "bigger hammer" solution, though.
Personally, I'm having a hard time figuring out why the original loop isn't sufficient; intuiting my way through the geometry, it feels like it's already generating CCW faces, because each successive ring is above the previous, and each successive sector is CCW around the surface of the sphere from the previous. So, the original order itself should be CCW with respect to the face nearest the viewer.
EDIT Using the OpenGL code you linked before and the OSG tutorial you linked today, I put together what I think is a correct program to generate the osg::Geometry / osg::Geode for the sphere. I have no way to test the following code, but desk-checking it, it looks correct or at least largely correct.
#include <vector>
class SolidSphere
{
protected:
osg::Geode sphereGeode;
osg::Geometry sphereGeometry;
osg::Vec3Array sphereVertices;
osg::Vec3Array sphereNormals;
osg::Vec2Array sphereTexCoords;
std::vector<osg::DrawElementsUInt> spherePrimitiveSets;
public:
SolidSphere(float radius, unsigned int rings, unsigned int sectors)
{
float const R = 1./(float)(rings-1);
float const S = 1./(float)(sectors-1);
int r, s;
sphereGeode.addDrawable( &sphereGeometry );
// Establish texture coordinates, vertex list, and normals
for(r = 0; r < rings; r++)
for(s = 0; s < sectors; s++)
{
float const y = sin( -M_PI_2 + M_PI * r * R );
float const x = cos(2*M_PI * s * S) * sin( M_PI * r * R );
float const z = sin(2*M_PI * s * S) * sin( M_PI * r * R );
sphereTexCoords.push_back( osg::Vec2(s*R, r*R) );
sphereVertices.push_back ( osg::Vec3(x * radius,
y * radius,
z * radius) );
sphereNormals.push_back ( osg::Vec3(x, y, z) );
}
sphereGeometry.setVertexArray ( &spehreVertices );
sphereGeometry.setTexCoordArray( &sphereTexCoords );
// Generate quads for each face.
for(r = 0; r < rings-1; r++)
for(s = 0; s < sectors-1; s++)
{
spherePrimitiveSets.push_back(
DrawElementUint( osg::PrimitiveSet::QUADS, 0 )
);
osg::DrawElementsUInt& face = spherePrimitiveSets.back();
// Corners of quads should be in CCW order.
face.push_back( (r + 0) * sectors + (s + 0) );
face.push_back( (r + 0) * sectors + (s + 1) );
face.push_back( (r + 1) * sectors + (s + 1) );
face.push_back( (r + 1) * sectors + (s + 0) );
sphereGeometry.addPrimitveSet( &face );
}
}
osg::Geode *getGeode() const { return &sphereGeode; }
osg::Geometry *getGeometry() const { return &sphereGeometry; }
osg::Vec3Array *getVertices() const { return &sphereVertices; }
osg::Vec3Array *getNormals() const { return &sphereNormals; }
osg::Vec2Array *getTexCoords() const { return &sphereTexCoords; }
};
You can use the getXXX methods to get the various pieces. I didn't see how to hook the surface normals to anything, but I do store them in a Vec2Array. If you have a use for them, they're computed and stored and waiting to be hooked to something.
That code calls glutSolidSphere() to draw a sphere, but it doesn't make sense to call it if your application is not using GLUT to display a window with 3D context.
There is another way to draw a sphere easily, which is by invoking gluSphere() (you probably have GLU installed):
void gluSphere(GLUquadric* quad,
GLdouble radius,
GLint slices,
GLint stacks);
Parameters
quad - Specifies the quadrics object (created with gluNewQuadric).
radius - Specifies the radius of the sphere.
slices - Specifies the number of subdivisions around the z axis (similar
to lines of longitude).
stacks - Specifies the number of subdivisions along the z axis (similar
to lines of latitude).
Usage:
// If you also need to include glew.h, do it before glu.h
#include <glu.h>
GLUquadric* _quadratic = gluNewQuadric();
if (_quadratic == NULL)
{
std::cerr << "!!! Failed gluNewQuadric" << std::endl;
return;
}
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0, 0.0, -5.0);
glColor3ub(255, 97, 3);
gluSphere(_quadratic, 1.4f, 64, 64);
glFlush();
gluDeleteQuadric(_quadratic);
It's probably wiser to move the gluNewQuadric() call to the constructor of your class since it needs to be allocated only once, and move the call to gluDeleteQuadric() to the destructor of the class.
#JoeZ's answer is excellent, but the OSG code has some errors/bad practices. Here's the updated code. It's been tested and it shows a very nice sphere.
osg::ref_ptr<osg::Geode> buildSphere( const double radius,
const unsigned int rings,
const unsigned int sectors )
{
osg::ref_ptr<osg::Geode> sphereGeode = new osg::Geode;
osg::ref_ptr<osg::Geometry> sphereGeometry = new osg::Geometry;
osg::ref_ptr<osg::Vec3Array> sphereVertices = new osg::Vec3Array;
osg::ref_ptr<osg::Vec3Array> sphereNormals = new osg::Vec3Array;
osg::ref_ptr<osg::Vec2Array> sphereTexCoords = new osg::Vec2Array;
float const R = 1. / static_cast<float>( rings - 1 );
float const S = 1. / static_cast<float>( sectors - 1 );
sphereGeode->addDrawable( sphereGeometry );
// Establish texture coordinates, vertex list, and normals
for( unsigned int r( 0 ); r < rings; ++r ) {
for( unsigned int s( 0) ; s < sectors; ++s ) {
float const y = sin( -M_PI_2 + M_PI * r * R );
float const x = cos( 2 * M_PI * s * S) * sin( M_PI * r * R );
float const z = sin( 2 * M_PI * s * S) * sin( M_PI * r * R );
sphereTexCoords->push_back( osg::Vec2( s * R, r * R ) );
sphereVertices->push_back ( osg::Vec3( x * radius,
y * radius,
z * radius) )
;
sphereNormals->push_back ( osg::Vec3( x, y, z ) );
}
}
sphereGeometry->setVertexArray ( sphereVertices );
sphereGeometry->setTexCoordArray( 0, sphereTexCoords );
// Generate quads for each face.
for( unsigned int r( 0 ); r < rings - 1; ++r ) {
for( unsigned int s( 0 ); s < sectors - 1; ++s ) {
osg::ref_ptr<osg::DrawElementsUInt> face =
new osg::DrawElementsUInt( osg::PrimitiveSet::QUADS,
4 )
;
// Corners of quads should be in CCW order.
face->push_back( ( r + 0 ) * sectors + ( s + 0 ) );
face->push_back( ( r + 0 ) * sectors + ( s + 1 ) );
face->push_back( ( r + 1 ) * sectors + ( s + 1 ) );
face->push_back( ( r + 1 ) * sectors + ( s + 0 ) );
sphereGeometry->addPrimitiveSet( face );
}
}
return sphereGeode;
}
Changes:
The OSG elements used in the code now are smart pointers1. Moreover, classes like Geode and Geometry have their destructors protected, so the only way to instantiate them are via dynamic allocation.
Removed spherePrimitiveSets as it isn't needed in the current version of the code.
I put the code in a free function, as I don't need a Sphere class in my code. I omitted the getters and the protected attributes. They aren't needed: if you need to access, say, the geometry, you can get it via: sphereGeode->getDrawable(...). The same goes for the rest of the attributes.
[1] See Rule of thumb #1 here. It's a bit old but the advice maintains.
I am trying to perform a realtime painting to the object texture. Using Irrlicht for now, but that does not really matter.
So far, i've got the right UV coordinates using this algorithm:
find out which object's triangle user selected (raycasting, nothing
really difficult)
find out the UV (baricentric) coordinates of intersection point on
that triangle
find out the UV (texture) coordinates of each triangle vertex
find out the UV (texture) coordinates of intersection point
calculate the texture image coordinates for intersection point
But somehow, when i am drawing in the point i got in the 5th step on texture image, i get totally wrong results. So, when drawing a rectangle in cursor point, the X (or Z) coordinate of its is inverted:
Here's the code i am using to fetch texture coordinates:
core::vector2df getPointUV(core::triangle3df tri, core::vector3df p)
{
core::vector3df
v0 = tri.pointC - tri.pointA,
v1 = tri.pointB - tri.pointA,
v2 = p - tri.pointA;
float dot00 = v0.dotProduct(v0),
dot01 = v0.dotProduct(v1),
dot02 = v0.dotProduct(v2),
dot11 = v1.dotProduct(v1),
dot12 = v1.dotProduct(v2);
float invDenom = 1.f / ((dot00 * dot11) - (dot01 * dot01)),
u = (dot11 * dot02 - dot01 * dot12) * invDenom,
v = (dot00 * dot12 - dot01 * dot02) * invDenom;
scene::IMesh* m = Mesh->getMesh(((scene::IAnimatedMeshSceneNode*)Model)->getFrameNr());
core::array<video::S3DVertex> VA, VB, VC;
video::SMaterial Material;
for (unsigned int i = 0; i < m->getMeshBufferCount(); i++)
{
scene::IMeshBuffer* mb = m->getMeshBuffer(i);
video::S3DVertex* vertices = (video::S3DVertex*) mb->getVertices();
for (unsigned long long v = 0; v < mb->getVertexCount(); v++)
{
if (vertices[v].Pos == tri.pointA)
VA.push_back(vertices[v]); else
if (vertices[v].Pos == tri.pointB)
VB.push_back(vertices[v]); else
if (vertices[v].Pos == tri.pointC)
VC.push_back(vertices[v]);
if (vertices[v].Pos == tri.pointA || vertices[v].Pos == tri.pointB || vertices[v].Pos == tri.pointC)
Material = mb->getMaterial();
if (VA.size() > 0 && VB.size() > 0 && VC.size() > 0)
break;
}
if (VA.size() > 0 && VB.size() > 0 && VC.size() > 0)
break;
}
core::vector2df
A = VA[0].TCoords,
B = VB[0].TCoords,
C = VC[0].TCoords;
core::vector2df P(A + (u * (C - A)) + (v * (B - A)));
core::dimension2du Size = Material.getTexture(0)->getSize();
CursorOnModel = core::vector2di(Size.Width * P.X, Size.Height * P.Y);
int X = Size.Width * P.X, Y = Size.Height * P.Y;
// DRAWING SOME RECTANGLE
Material.getTexture(0)->lock(true);
Device->getVideoDriver()->setRenderTarget(Material.getTexture(0), true, true, 0);
Device->getVideoDriver()->draw2DRectangle(video::SColor(255, 0, 100, 75), core::rect<s32>((X - 10), (Y - 10),
(X + 10), (Y + 10)));
Device->getVideoDriver()->setRenderTarget(0, true, true, 0);
Material.getTexture(0)->unlock();
return core::vector2df(X, Y);
}
I just wanna make my object paintable in realtime. My current problems are: wrong texture coordinate calculation and non-unique vertex UV coordinates (so, drawing something on the one side of the dwarfe's axe would draw the same on the other side of that axe).
How should i do this?
I was able to use your codebase and get it to work for me.
Re your second problem "non-unique vertex UV coordinates":
Well you are absolutely right, you need unique vertexUVs to get this working, which means that you have to unwrap you models and don't make use of shared uv-space for e.g. mirrored elements and stuff. (e.g. left/right boot - if they use the same uv-space, you'll paint automatically on both, where you want the one to be red and the other to be green). You can check out "uvlayout" (tool) or the uv-unwrap modifier ind 3ds max.
Re the first and more important problem: "**wrong texture coordinate calculation":
the calculation of your baycentric coordinates is correct, but as i suppose your input-data is wrong. I assume you get the triangle and the collisionPoint by using irrlicht's CollisionManager and TriangleSelector. The problem is, that the positions of the triangle's vertices (which you get as returnvalue from the collisionTest) is in WorldCoordiates. But you'll need them in ModelCoordinates for the calculation, so here's what you need to do:
pseudocode:
add the node which contains the mesh of the hit triangle as parameter to getPointUV()
get the inverse absoluteTransformation-Matrix by calling node->getAbsoluteTransformation() [inverse]
transform the vertices of the triangle by this inverse Matrix and use those values for the rest of the method.
Below you'll find my optimized method wich does it for a very simple mesh (one mesh, only one meshbuffer).
Code:
irr::core::vector2df getPointUV(irr::core::triangle3df tri, irr::core::vector3df p, irr::scene::IMeshSceneNode* pMeshNode, irr::video::IVideoDriver* pDriver)
{
irr::core::matrix4 inverseTransform(
pMeshNode->getAbsoluteTransformation(),
irr::core::matrix4::EM4CONST_INVERSE);
inverseTransform.transformVect(tri.pointA);
inverseTransform.transformVect(tri.pointB);
inverseTransform.transformVect(tri.pointC);
irr::core::vector3df
v0 = tri.pointC - tri.pointA,
v1 = tri.pointB - tri.pointA,
v2 = p - tri.pointA;
float dot00 = v0.dotProduct(v0),
dot01 = v0.dotProduct(v1),
dot02 = v0.dotProduct(v2),
dot11 = v1.dotProduct(v1),
dot12 = v1.dotProduct(v2);
float invDenom = 1.f / ((dot00 * dot11) - (dot01 * dot01)),
u = (dot11 * dot02 - dot01 * dot12) * invDenom,
v = (dot00 * dot12 - dot01 * dot02) * invDenom;
irr::video::S3DVertex A, B, C;
irr::video::S3DVertex* vertices = static_cast<irr::video::S3DVertex*>(
pMeshNode->getMesh()->getMeshBuffer(0)->getVertices());
for(unsigned int i=0; i < pMeshNode->getMesh()->getMeshBuffer(0)->getVertexCount(); ++i)
{
if( vertices[i].Pos == tri.pointA)
{
A = vertices[i];
}
else if( vertices[i].Pos == tri.pointB)
{
B = vertices[i];
}
else if( vertices[i].Pos == tri.pointC)
{
C = vertices[i];
}
}
irr::core::vector2df t2 = B.TCoords - A.TCoords;
irr::core::vector2df t1 = C.TCoords - A.TCoords;
irr::core::vector2df uvCoords = A.TCoords + t1*u + t2*v;
return uvCoords;
}