I try to implement the idea behind the FP camera to move the player object so I have 4 vectors position, right, up and front, I use the following method to update vectors and populate the matrix to send to the shader:
void Mesh::updateVectors() {
glm::vec3 f;
f.x = cos(glm::radians(this->yaw)) * cos(glm::radians(this->pitch));
f.y = sin(glm::radians(this->pitch));
f.z = sin(glm::radians(this->yaw)) * cos(glm::radians(this->pitch));
this->front = glm::normalize(f);
this->right = glm::normalize(glm::cross(this->front, this->worldUp));
this->up = glm::normalize(glm::cross(this->right, this->front));
matrix = glm::lookAt(this->position, this->position + this->front, this->up);
glm::vec3 s(scale);
matrix = glm::scale(matrix, s);
for (GLuint i = 0; i < this->m_Entries.size(); i++) {
this->m_Entries[i].setModelMatrix(matrix);
}
}
and these methods to receive position and rotation:
void Mesh::ProcessKeyboard(Move_Directions direction, GLfloat deltaTime) {
std::cout << this->front.x << " / " << this->front.z << std::endl;
GLfloat velocity = this->movementSpeed * deltaTime;
if (direction == FORWARD)
this->position += this->front * velocity;
if (direction == BACKWARD)
this->position -= this->front * velocity;
updateVectors();
}
void Mesh::Turn(GLfloat y) {
this->yaw += y;
this->updateVectors();
}
the object can rotate correctly but always move along one axis (1,0,0) not at the front (Direction) vector.
this method runs successfully with camera which can move in any direction I point to.
glm::vec3 front;
position.x = cos(glm::radians(this->yaw)) * cos(glm::radians(this->pitch));
position.y = sin(glm::radians(this->pitch));
position.z = sin(glm::radians(this->yaw)) * cos(glm::radians(this->pitch));
this->front = glm::normalize(front);
You are normalising a default constructed glm::vec3 front. That's undefined behaviour. Apart from the code above, I don't see where else you modify Mesh::front. You are not touching it in Mesh::turn(), and updating Mesh::yaw doesn't seem to affect Mesh::front in any way.
Related
I keep getting nan (not a number) as a result for the various floats and vector components throughout my program. I am almost 100% certain is has to do with collision because this was never an issue before I messed with the collision code.
Even the sum in the magnitude method is showing up as NaN when I debug it, which leads me to believe a vector is breaking before being passed into the function, but I cannot find which one it is.
Here are the methods that I believe are relevant to the problem
void Simplex::PhysicsInfo::Collision(PhysicsInfo info)
{
vector3 oldVel = velocity;
if (magnitude(oldVel) == 0.0f)
{
return;
}
vector3 nextVelDirect = glm::normalize(info.position - position);
//make all ball to ball collisions elastic
float angle = acosf(glm::dot(oldVel, nextVelDirect)
/ (magnitude(glm::normalize(oldVel)) * magnitude(nextVelDirect)));
angle = sinf(angle);
if (angle < 0)
angle *= -1;
float nextVecMag;
if (magnitude(info.velocity) == 0 && angle != 1)
{
//This next line is not correct, use if actual line isn't working and you absolutely need something
//info.velocity = 0.5f * oldVel.length * nextVelDirect;
//actual line
info.velocity = angle * magnitude(oldVel) * nextVelDirect;
vector3 nextVec = (magnitude(oldVel) * oldVel)
- (magnitude(info.velocity) * info.velocity);
nextVecMag = magnitude(nextVec);
if (nextVecMag < 0)
{
nextVecMag *= -1;
}
nextVecMag = sqrt(nextVecMag);
velocity = nextVecMag * glm::normalize(nextVec);
}
else if (magnitude(info.velocity) == 0)
{
info.velocity = oldVel;
velocity = vector3(0.0f);
}
if (isnan(velocity.x) || isnan(velocity.y) || isnan(velocity.z))
{
std::cout << "-" << std::endl;
}
}
PhysicsInfo::PhysicsInfo(float mss, vector3 pos, vector3 cent, vector3 limit)
{
velocity = vector3(0.1f);
acceleration = vector3(0.0f);
mass = mss;
position = pos;
center = cent;
limits = limit;
frictionMagnitude = 0.005f;
}
vector3 PhysicsInfo::normalize(const vector3 &v)
{
float sum = (v.x * v.x) + (v.y * v.y) + (v.z * v.z);
if (sum < 0)
{
sum *= -1;
}
float length_of_v = sqrt(sum);
return vector3(v.x / length_of_v, v.y / length_of_v, v.z / length_of_v);
}
float PhysicsInfo::magnitude(const vector3 &v)
{
float sum = (v.x * v.x) + (v.y * v.y) + (v.z * v.z);
if (sum < 0)
{
sum *= -1;
}
float length_of_v = sqrt(sum);
if (isnan(length_of_v))
{
throw ExceptionCollidedUnwind;
}
return length_of_v;
}
Sorry for the formatting. I am not used to posting here. Any help would be greatly appreciated.
I have not fixed the issue yet, but I have deciphered that the cause was the parameter to the acosf() function located the the Collision method. The dot method sometimes returns outside of the required range of [-1, 1], the required range of floats for the parameter to the acosf() method. This was caused because I did not normalize both vectors before getting the dot product.
I am trying to implement an omni-directional light source (a.k.a., point light source) in my raytracing program in C++. I am not getting the expected results, but I can't figure out the problem. Maybe someone can see what I am doing wrong.
I have included the two functions that are responsible for raytracing and the light. The ClosestIntersection function finds the closest intersection and a triangle. That is used later in the DirectLight function.
I would really appreciate any help.
#include <iostream>
#include <glm/glm.hpp>
#include <SDL.h>
#include "SDLauxiliary.h"
#include "TestModel.h"
#include "math.h"
using namespace std;
using glm::vec3;
using glm::mat3;
// ----------------------------------------------------------------------------
// GLOBAL VARIABLES
const int SCREEN_WIDTH = 500;
const int SCREEN_HEIGHT = 500;
SDL_Surface* screen;
int t;
vector<Triangle> triangles;
float focalLength = 900;
vec3 cameraPos(0, 0, -4.5);
vec3 lightPos(0.5, 0.5, 0);
vec3 lightColor = 14.f * vec3(1,1,1);
// Translate camera
float translation = 0.1; // use this to set translation increment
// Rotate camera
float yaw;
vec3 trueCameraPos;
const float PI = 3.1415927;
// ----------------------------------------------------------------------------
// CLASSES
class Intersection;
// ----------------------------------------------------------------------------
// FUNCTIONS
void Update();
void Draw();
bool ClosestIntersection(vec3 start, vec3 dir, const vector<Triangle>& triangles,
Intersection& closestIntersection);
vec3 DirectLight(const Intersection& i);
// ----------------------------------------------------------------------------
// STRUCTURES
struct Intersection
{
vec3 position;
float distance;
int triangleIndex;
};
float m = std::numeric_limits<float>::max();
int main(int argc, char* argv[])
{
LoadTestModel(triangles);
screen = InitializeSDL(SCREEN_WIDTH, SCREEN_HEIGHT);
t = SDL_GetTicks(); // Set start value for timer.
while (NoQuitMessageSDL())
{
Update();
Draw();
}
SDL_SaveBMP(screen, "screenshot.bmp");
return 0;
}
void Update()
{
// Compute frame time:
int t2 = SDL_GetTicks();
float dt = float(t2 - t);
t = t2;
cout << "Render time: " << dt << " ms." << endl;
}
}
void Draw()
{
if (SDL_MUSTLOCK(screen))
SDL_LockSurface(screen);
for (int y = 0; y<SCREEN_HEIGHT; ++y)
{
for (int x = 0; x < SCREEN_WIDTH; ++x)
{
vec3 start = cameraPos;
vec3 dir(x - SCREEN_WIDTH / 2, y - SCREEN_HEIGHT / 2, focalLength);
Intersection intersection;
if (ClosestIntersection(start, dir, triangles, intersection))
{
//vec3 theColor = triangles[intersection.triangleIndex].color;
vec3 theColor = DirectLight(intersection);
PutPixelSDL(screen, x, y, theColor);
}
else
{
vec3 color(0, 0, 0);
PutPixelSDL(screen, x, y, color);
}
}
}
if (SDL_MUSTLOCK(screen))
SDL_UnlockSurface(screen);
SDL_UpdateRect(screen, 0, 0, 0, 0);
}
bool ClosestIntersection(vec3 s, vec3 d,
const vector<Triangle>& triangles, Intersection& closestIntersection)
{
closestIntersection.distance = m;
for (size_t i = 0; i < triangles.size(); i++)
{
vec3 v0 = triangles[i].v0;
vec3 v1 = triangles[i].v1;
vec3 v2 = triangles[i].v2;
vec3 u = v1 - v0;
vec3 v = v2 - v0;
vec3 b = s - v0;
vec3 x;
// Determinant of A = [-d u v]
float det = -d.x * ((u.y * v.z) - (v.y * u.z)) -
u.x * ((-d.y * v.z) - (v.y * -d.z)) +
v.x * ((-d.y * u.z) - (u.y * -d.z));
// Cramer'r Rule for t = x.x
x.x = (b.x * ((u.y * v.z) - (v.y * u.z)) -
u.x * ((b.y * v.z) - (v.y * b.z)) +
v.x * ((b.y * u.z) - (u.y * b.z))) / det;
if (x.x >= 0)
{
// Cramer'r Rule for u = x.y
x.y = (-d.x * ((b.y * v.z) - (v.y * b.z)) -
b.x * ((-d.y * v.z) - (v.y * -d.z)) +
v.x * ((-d.y * b.z) - (b.y * -d.z))) / det;
// Cramer'r Rule for v = x.z
x.z = (-d.x * ((u.y * b.z) - (b.y * u.z)) -
u.x * ((-d.y * b.z) - (b.y * -d.z)) +
b.x * ((-d.y * u.z) - (u.y * -d.z))) / det;
if (x.y >= 0 && x.z >= 0 && x.y + x.z <= 1 && x.x < closestIntersection.distance)
{
closestIntersection.position = x;
closestIntersection.distance = x.x;
closestIntersection.triangleIndex = i;
}
}
}
//end of for loop
if (closestIntersection.distance != m)
{
return true;
}
else
{
return false;
}
}
vec3 DirectLight(const Intersection& i)
{
vec3 n = triangles[i.triangleIndex].normal;
vec3 r = lightPos - i.position;
float R2 = r.x * r.x + r.y * r.y + r.z * r.z;
vec3 D = (lightColor * fmaxf((glm::dot(glm::normalize(r), n)), 0)) / (4 * PI * R2);
return D;
}
If I'm understanding the code in ClosestIntersection correctly, here's what it's doing for each triangle:
Let u,v be the vectors from one vertex of the triangle to the other two vertices. Let d be (the reverse of) the direction of the ray we're considering.
And let b be the vector from that vertex of the triangle to the camera.
Find p,q,r so that b = pd+qu+rv (p,q,r are what your code calls x.x, x.y, x.z).
Now the ray meets the triangle if p>0, q>=0, r>=0, q+r<=1 and the distance to the intersection point is p.
So, the conditions on q,r make sense; the idea is that b-qu-rv is the vector from the camera to the relevant point in the triangle and it's in direction d. Your distances aren't really distances, but along a single ray they're the same multiple of the actual distance, which means that this works fine for determining which triangle you've hit, and that's all you use them for. So far, so good.
But then you say closestIntersection.position = x; and surely that's all wrong, because this x isn't in the same coordinate system as your camera location, triangle vertices, etc. It's in this funny "how much of d, how much of u, how much of v" coordinate system which isn't even the same from one triangle to the next. (Which is why you are getting discontinuities at triangle boundaries even within a single face, I think.)
Try setting it to v0+x.y*(v1-v0)+x.z*(v2-v0) instead (I think this is right; it's meant to be the actual point where the ray crosses the triangle, in the same coordinates as all your other points) and see what it does.
This isn't a super-great answer, but I managed to make your code work without the strange shading discontinuities. The problem happens in ClosestIntersection and maybe Gareth's answer covers it. I need to stop looking at this now, but I wanted to show you what I have before I leave, and I need an Answer to post some code.
// This starts with some vec3 helper functions which make things
// easier to look at
float Dot(const vec3& a, const vec3& b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
vec3 Cross(const vec3& a, const vec3& b) {
return vec3(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x);
}
float L2(const vec3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; }
float Abs(const vec3& v) { return std::sqrt(L2(v)); }
// Here is the replacement version of ClosestIntersection
bool ClosestIntersection(vec3 cam, vec3 dir,
const vector<Triangle>& triangles, Intersection& closestIntersection)
{
closestIntersection.distance = m;
vec3 P0 = cam;
vec3 P1 = cam + dir;
for (size_t i = 0; i < triangles.size(); ++i) {
vec3 v0 = triangles[i].v0;
vec3 v1 = triangles[i].v1;
vec3 v2 = triangles[i].v2;
// Dan Sunday
// http://geomalgorithms.com/a06-_intersect-2.html
vec3 u = v1 - v0;
vec3 v = v2 - v0;
// w = P-v0, solve w = su +tv (s, t are parametric scalars)
vec3 n = Cross(u, v);
float ri = Dot(n, (v0 - P0)) / Dot(n, (P1 - P0));
vec3 Pi = P0 + ri * (P1- P0);
vec3 w = Pi - v0;
// s = w . (n x v) / (u . (n x v))
// t = w . (n x u) / (v . (n x u))
float s = Dot(w, Cross(n, v)) / Dot(u, Cross(n, v));
float t = Dot(w, Cross(n, u)) / Dot(v, Cross(n, u));
if(s >= 0 && t >= 0 && s+t <= 1) {
float dist = Abs(cam - Pi);
if(dist < closestIntersection.distance) {
closestIntersection.position = Pi;
closestIntersection.distance = dist;
closestIntersection.triangleIndex = int(i);
}
}
}
return closestIntersection.distance != m;
}
Good luck.
I have a function called getWorldPosition that is supposed to return a vec3 representing the current position of any VisualObject (a super class I defined).
glm::vec3 VisualObject::getWorldPosition()
{
glm::mat4 totalTransformation = getParentModelMatrix() * modelMatrix;
return totalTransformation[3].xyz;
} // end getWorldPosition
I am trying to use the getWorldPosition function to calculate the distance between two objects in the world.
for (int i = enemiesOnBoard.size() - 1; i >= 0; i--){
EnemySphere* s = (EnemySphere*)enemiesOnBoard.at(i);
glm::vec3 sPosition = s->getWorldPosition();
cout << sPosition[0] << endl;
for (int j = cannonBallsOnBoard.size() - 1; j >= 0; j--){
CannonBall* cb = (CannonBall*)cannonBallsOnBoard.at(i);
glm::vec3 cbPosition = cb->getWorldPosition();
GLfloat radiiSum = s->sRadius + cb->sRadius;
GLfloat distance = calcDistance(sPosition, cbPosition);
//cout << distance << endl;
if (distance < radiiSum){
//cout << "COLLISION BABY!" << endl;
}
}
}
The problem is that every getWorldPosition is returning a vec3 with 0 for the x,y,z coordinate.
One of the spheres is defined as such,
EnemySphere* s = new EnemySphere();
enemiesOnBoard.push_back(s);
s->setShader(glutObjectShaderProgram);
s->addController(new EnemySphereController(rand() % 8 - 3.5, 1.0));
s->initialize();
addChild(s);
The relevant controller is this:
EnemySphereController::EnemySphereController(GLfloat x, GLfloat r, GLfloat t)
: Controller(), startX(x), rate(r), translation(t)
{ }
void EnemySphereController::update(float elapsedTimeSec){
if (translation < 3.9f){
translation += elapsedTimeSec * rate;
}
else {
target->getParent()->removeChild(target->getObjectSerialNumber());
}
glm::mat4 t4;
t4 = glm::translate(glm::mat4(1.0f), glm::vec3(startX, -2.50f, translation)); //add 0.5 because the sphere is calculated from the center
target->fixedTransformation = t4;
}
I know this is a complicated problem, but do you guys have any ideas on where I can start?
As I know, the world position resides in glm::vec3(mat[3], mat[7], mat[11]), so you should change your function to:
glm::vec3 VisualObject::getWorldPosition()
{
glm::mat4 totalTransformation = getParentModelMatrix() * modelMatrix;
return glm::vec3(totalTransformation[3], totalTransformation[7], totalTransformation[11]);
} // end getWorldPosition
The part which you were using as translation is used for projection.
So, I am trying to create a class in OpenGL that will make objects very easy to create, delete and orient. The interface would have the standard functions move(), translate(), rotate(), scale(), but also has a few methods related to parenting. I want an object to be able to set a joint to its parent and rotate, scale and transform along with its parents. This would allow me to create essentially a human skeleton from smaller components.
Assuming I have the vertex drawing working (which I do), but I am having a little bit of trouble constructing the MVP for the model here was my thought process. If I just knew how to combine rotation matrices I would be okay.. but I don't think that's possible without quaternions. Anyone know of how to fix or a better way to solve it?
glm::vec3 scaling; //Scale x, y, z
glm::vec3 rotation; //Rotation x, y, z
glm::vec3 position; //Position x, y, z
glm::vec3 offset; //Offset of the connection between the parent and this
Drawable* parent; //Parent connection joint
std::set<Drawable* const> children; //All child connections
inline glm::mat4 getModelMatrixCopy() const
{
glm::mat4 mm(1.0f);
glm::mat4 s(1.0f);
s = glm::scale(s, scaling);
mm = glm::translate(mm, position);
mm = glm::rotate(mm, glm::radians(rotation.x), glm::vec3(1.0, 0.0, 0.0f));
mm = glm::rotate(mm, glm::radians(rotation.y), glm::vec3(0.0, 1.0, 0.0f));
mm = glm::rotate(mm, glm::radians(rotation.z), glm::vec3(0.0, 0.0, 1.0f));
return mm * s;
}
inline void translate(float x, float y, float z)
{
position.x += x;
position.y += y;
position.z += z;
for (auto it = children.begin(); it != children.end(); it++)
{
(*it)->translate(x, y, z);
}
}
inline void move(float x, float y, float z)
{
for (auto it = children.begin(); it != children.end(); it++)
{
auto child = (*it);
float xOffset = (position.x - child->position.x);
float yOffset = (position.y - child->position.y);
float zOffset = (position.z - child->position.z);
child->move(x + xOffset, y + yOffset, z + zOffset);
}
position.x = x;
position.y = y;
position.z = z;
}
inline void rotate(float xDeg, float yDeg, float zDeg)
{
//???? This just seems wrong, need to rotate based on the addition of all previous children
rotation.x += xDeg;
rotation.y += yDeg;
rotation.z += zDeg;
for (auto it = children.begin(); it != children.end(); it++)
{
auto child = (*it);
child->rotate(xDeg, yDeg, zDeg);
}
}
inline void scale(float x, float y, float z)
{
scaling.x *= x;
scaling.y *= y;
scaling.z *= z;
for (auto it = children.begin(); it != children.end(); it++)
{
auto child = (*it);
child->offset.x *= x;
child->offset.y *= y;
child->offset.z *= z;
child->scale(x, y, z);
}
}
I have been trying to use vectors to move objects at angles and I did get it working, however, when I try to move an object to a specific point it gets there and then disappears. In my code I test if within the next step if it will reach it's destination and if it will, I snap it to the destination.
void Dot::moveToVector(Vector& vec)
{
float dx;
float dy;
dx = vec.X - position.X;
dy = vec.Y - position.Y;
Vector distanceVec(dx, dy);
float distance = distanceVec.Length();
float scale;
scale = speed / distance;
velocity.X = dx * scale;
velocity.Y = dy * scale;
if(velocity.X < scale || velocity.Y < scale)
{
velocity.X = 0;
velocity.Y = 0;
position.X = vec.X;
position.Y = vec.Y;
}
move();
}
When I debugged it, one frame after it snaps into position, the x and y values of the position = -nan(0x400000).
scale = speed / distance;
If distance == 0 what do you think will happen?
When your object reaches the target position, distance becomes zero. Then you are dividing by distance. I suspect that is why your object disappears!
Here is a more straightforward way to set it up:
void Dot::moveToVector(Vector& vec)
{
Vector distanceVec = vec - position;
float distance = distanceVec.Length();
if(distance <= speed)
{
velocity.X = 0;
velocity.Y = 0;
position.X = vec.X;
position.Y = vec.Y;
}
else
{
Vector direction = (distanceVec / distance);
velocity = direction * speed;
}
move();
}