Creating a Unity-Like DrawableObject in OpenGL - c++

So, I am trying to create a class in OpenGL that will make objects very easy to create, delete and orient. The interface would have the standard functions move(), translate(), rotate(), scale(), but also has a few methods related to parenting. I want an object to be able to set a joint to its parent and rotate, scale and transform along with its parents. This would allow me to create essentially a human skeleton from smaller components.
Assuming I have the vertex drawing working (which I do), but I am having a little bit of trouble constructing the MVP for the model here was my thought process. If I just knew how to combine rotation matrices I would be okay.. but I don't think that's possible without quaternions. Anyone know of how to fix or a better way to solve it?
glm::vec3 scaling; //Scale x, y, z
glm::vec3 rotation; //Rotation x, y, z
glm::vec3 position; //Position x, y, z
glm::vec3 offset; //Offset of the connection between the parent and this
Drawable* parent; //Parent connection joint
std::set<Drawable* const> children; //All child connections
inline glm::mat4 getModelMatrixCopy() const
{
glm::mat4 mm(1.0f);
glm::mat4 s(1.0f);
s = glm::scale(s, scaling);
mm = glm::translate(mm, position);
mm = glm::rotate(mm, glm::radians(rotation.x), glm::vec3(1.0, 0.0, 0.0f));
mm = glm::rotate(mm, glm::radians(rotation.y), glm::vec3(0.0, 1.0, 0.0f));
mm = glm::rotate(mm, glm::radians(rotation.z), glm::vec3(0.0, 0.0, 1.0f));
return mm * s;
}
inline void translate(float x, float y, float z)
{
position.x += x;
position.y += y;
position.z += z;
for (auto it = children.begin(); it != children.end(); it++)
{
(*it)->translate(x, y, z);
}
}
inline void move(float x, float y, float z)
{
for (auto it = children.begin(); it != children.end(); it++)
{
auto child = (*it);
float xOffset = (position.x - child->position.x);
float yOffset = (position.y - child->position.y);
float zOffset = (position.z - child->position.z);
child->move(x + xOffset, y + yOffset, z + zOffset);
}
position.x = x;
position.y = y;
position.z = z;
}
inline void rotate(float xDeg, float yDeg, float zDeg)
{
//???? This just seems wrong, need to rotate based on the addition of all previous children
rotation.x += xDeg;
rotation.y += yDeg;
rotation.z += zDeg;
for (auto it = children.begin(); it != children.end(); it++)
{
auto child = (*it);
child->rotate(xDeg, yDeg, zDeg);
}
}
inline void scale(float x, float y, float z)
{
scaling.x *= x;
scaling.y *= y;
scaling.z *= z;
for (auto it = children.begin(); it != children.end(); it++)
{
auto child = (*it);
child->offset.x *= x;
child->offset.y *= y;
child->offset.z *= z;
child->scale(x, y, z);
}
}

Related

Translate Assimp 3D Models also rotates

I have a 3D Model in an OpenGL (C++) loaded using Assimp.
I need to move this model around the screen (translate) like it is always facing the camera in the X and Y axis (no Z axis).
It would be like moving this model like it is 2D only (but of course if I rotate it, it would show the Z axis as well).
my render function is :
camX = CamY = 0;
camZ = 5;
lookatX = lookatY = lookatZ = 0;
void C3DModel::render(void)
{
static float step = 0.0f;
setCamera(camX, camY, camZ, lookatX, lookatY, lookatZ);
translate(-3, 1, 0); // here is the issue .
scale(scaleFactor, scaleFactor, scaleFactor);
rotate(step, 0.0f, 1.0f, 0.0f);
}
void C3DModel::translate(float x, float y, float z)
{
float aux[16];
setTranslationMatrix(aux, x, y, z);
multMatrix(modelMatrix, aux);
setModelMatrix();
}
void C3DModel::setTranslationMatrix(float *mat, float x, float y, float z)
{
setIdentityMatrix(mat, 4);
mat[12] = x;
mat[13] = y;
mat[14] = z;
}
void C3DModel::setScaleMatrix(float *mat, float sx, float sy, float sz)
{
setIdentityMatrix(mat, 4);
mat[0] = sx;
mat[5] = sy;
mat[10] = sz;
}
void C3DModel::setRotationMatrix(float *mat, float angle, float x, float y, float z)
{
float radAngle = DegToRad(angle);
float co = cos(radAngle);
float si = sin(radAngle);
float x2 = x * x;
float y2 = y * y;
float z2 = z * z;
mat[0] = x2 + (y2 + z2) * co;
mat[4] = x * y * (1 - co) - z * si;
mat[8] = x * z * (1 - co) + y * si;
mat[12] = 0.0f;
mat[1] = x * y * (1 - co) + z * si;
mat[5] = y2 + (x2 + z2) * co;
mat[9] = y * z * (1 - co) - x * si;
mat[13] = 0.0f;
mat[2] = x * z * (1 - co) - y * si;
mat[6] = y * z * (1 - co) + x * si;
mat[10] = z2 + (x2 + y2) * co;
mat[14] = 0.0f;
mat[3] = 0.0f;
mat[7] = 0.0f;
mat[11] = 0.0f;
mat[15] = 1.0f;
}
void C3DModel::rotate(float angle, float x, float y, float z)
{
float aux[16];
setRotationMatrix(aux, angle, x, y, z);
multMatrix(modelMatrix, aux);
setModelMatrix();
}
void C3DModel::scale(float x, float y, float z)
{
float aux[16];
setScaleMatrix(aux, x, y, z);
multMatrix(modelMatrix, aux);
setModelMatrix();
}
void C3DModel::setIdentityMatrix(float *mat, int size)
{
// fill matrix with 0s
for (int i = 0; i < size * size; ++i)
mat[i] = 0.0f;
// fill diagonal with 1s
for (int i = 0; i < size; ++i)
mat[i + i * size] = 1.0f;
}
void C3DModel::multMatrix(float *a, float *b)
{
float res[16];
for (int i = 0; i < 4; ++i)
{
for (int j = 0; j < 4; ++j)
{
res[j * 4 + i] = 0.0f;
for (int k = 0; k < 4; ++k)
{
res[j * 4 + i] += a[k * 4 + i] * b[j * 4 + k];
}
}
}
memcpy(a, res, 16 * sizeof(float));
}
void C3DModel::setModelMatrix()
{
glBindBuffer(GL_UNIFORM_BUFFER, matricesUniBuffer);
glBufferSubData(GL_UNIFORM_BUFFER, ModelMatrixOffset, MatrixSize, modelMatrix);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
}
void C3DModel::crossProduct(float *a, float *b, float *res)
{
res[0] = a[1] * b[2] - b[1] * a[2];
res[1] = a[2] * b[0] - b[2] * a[0];
res[2] = a[0] * b[1] - b[0] * a[1];
}
// Normalize a vec3
void C3DModel::normalize(float *a)
{
float mag = sqrt(a[0] * a[0] + a[1] * a[1] + a[2] * a[2]);
a[0] /= mag;
a[1] /= mag;
a[2] /= mag;
}
void C3DModel::setCamera(float posX, float posY, float posZ, float lookAtX, float lookAtY, float lookAtZ)
{
float dir[3], right[3], up[3];
up[0] = 0.0f; up[1] = 1.0f; up[2] = 0.0f;
dir[0] = (lookAtX - posX);
dir[1] = (lookAtY - posY);
dir[2] = (lookAtZ - posZ);
normalize(dir);
crossProduct(dir, up, right);
normalize(right);
crossProduct(right, dir, up);
normalize(up);
float viewMatrix[16], aux[16];
viewMatrix[0] = right[0];
viewMatrix[4] = right[1];
viewMatrix[8] = right[2];
viewMatrix[12] = 0.0f;
viewMatrix[1] = up[0];
viewMatrix[5] = up[1];
viewMatrix[9] = up[2];
viewMatrix[13] = 0.0f;
viewMatrix[2] = -dir[0];
viewMatrix[6] = -dir[1];
viewMatrix[10] = -dir[2];
viewMatrix[14] = 0.0f;
viewMatrix[3] = 0.0f;
viewMatrix[7] = 0.0f;
viewMatrix[11] = 0.0f;
viewMatrix[15] = 1.0f;
setTranslationMatrix(aux, -posX, -posY, -posZ);
multMatrix(viewMatrix, aux);
glBindBuffer(GL_UNIFORM_BUFFER, matricesUniBuffer);
glBufferSubData(GL_UNIFORM_BUFFER, ViewMatrixOffset, MatrixSize, viewMatrix);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
}
What i will try is to separate the rotation of your object and the translation requested for your screen position, in 2 different matrices.
At each frame, I would compute the rotation matrice with the code inside your C3DModel::setRotationMatrix and the translation with C3DModel::setTranslationMatrix, combine them in a fresh new model matrice and apply it to your object. Keep in mind that the order matters, if you rotate first the object will turn around the origin in your obj file, if you rotate after the translation it will rotate around the worl origin (like a planet around the sun, the sun would be the origin).
In the end, it would looks like:
void C3DModel::render(void){
float* rotation = createRotation(angle, x, y, z);
float* translation = createTranslation(x, y, z);
float* updatedModel = mul(rotation, translation) //order matters
setModel(updatedModel);
}

OpenGL move player along the direction vector

I try to implement the idea behind the FP camera to move the player object so I have 4 vectors position, right, up and front, I use the following method to update vectors and populate the matrix to send to the shader:
void Mesh::updateVectors() {
glm::vec3 f;
f.x = cos(glm::radians(this->yaw)) * cos(glm::radians(this->pitch));
f.y = sin(glm::radians(this->pitch));
f.z = sin(glm::radians(this->yaw)) * cos(glm::radians(this->pitch));
this->front = glm::normalize(f);
this->right = glm::normalize(glm::cross(this->front, this->worldUp));
this->up = glm::normalize(glm::cross(this->right, this->front));
matrix = glm::lookAt(this->position, this->position + this->front, this->up);
glm::vec3 s(scale);
matrix = glm::scale(matrix, s);
for (GLuint i = 0; i < this->m_Entries.size(); i++) {
this->m_Entries[i].setModelMatrix(matrix);
}
}
and these methods to receive position and rotation:
void Mesh::ProcessKeyboard(Move_Directions direction, GLfloat deltaTime) {
std::cout << this->front.x << " / " << this->front.z << std::endl;
GLfloat velocity = this->movementSpeed * deltaTime;
if (direction == FORWARD)
this->position += this->front * velocity;
if (direction == BACKWARD)
this->position -= this->front * velocity;
updateVectors();
}
void Mesh::Turn(GLfloat y) {
this->yaw += y;
this->updateVectors();
}
the object can rotate correctly but always move along one axis (1,0,0) not at the front (Direction) vector.
this method runs successfully with camera which can move in any direction I point to.
glm::vec3 front;
position.x = cos(glm::radians(this->yaw)) * cos(glm::radians(this->pitch));
position.y = sin(glm::radians(this->pitch));
position.z = sin(glm::radians(this->yaw)) * cos(glm::radians(this->pitch));
this->front = glm::normalize(front);
You are normalising a default constructed glm::vec3 front. That's undefined behaviour. Apart from the code above, I don't see where else you modify Mesh::front. You are not touching it in Mesh::turn(), and updating Mesh::yaw doesn't seem to affect Mesh::front in any way.

Omni-directional light in raytracing program gives wrong render c++

I am trying to implement an omni-directional light source (a.k.a., point light source) in my raytracing program in C++. I am not getting the expected results, but I can't figure out the problem. Maybe someone can see what I am doing wrong.
I have included the two functions that are responsible for raytracing and the light. The ClosestIntersection function finds the closest intersection and a triangle. That is used later in the DirectLight function.
I would really appreciate any help.
#include <iostream>
#include <glm/glm.hpp>
#include <SDL.h>
#include "SDLauxiliary.h"
#include "TestModel.h"
#include "math.h"
using namespace std;
using glm::vec3;
using glm::mat3;
// ----------------------------------------------------------------------------
// GLOBAL VARIABLES
const int SCREEN_WIDTH = 500;
const int SCREEN_HEIGHT = 500;
SDL_Surface* screen;
int t;
vector<Triangle> triangles;
float focalLength = 900;
vec3 cameraPos(0, 0, -4.5);
vec3 lightPos(0.5, 0.5, 0);
vec3 lightColor = 14.f * vec3(1,1,1);
// Translate camera
float translation = 0.1; // use this to set translation increment
// Rotate camera
float yaw;
vec3 trueCameraPos;
const float PI = 3.1415927;
// ----------------------------------------------------------------------------
// CLASSES
class Intersection;
// ----------------------------------------------------------------------------
// FUNCTIONS
void Update();
void Draw();
bool ClosestIntersection(vec3 start, vec3 dir, const vector<Triangle>& triangles,
Intersection& closestIntersection);
vec3 DirectLight(const Intersection& i);
// ----------------------------------------------------------------------------
// STRUCTURES
struct Intersection
{
vec3 position;
float distance;
int triangleIndex;
};
float m = std::numeric_limits<float>::max();
int main(int argc, char* argv[])
{
LoadTestModel(triangles);
screen = InitializeSDL(SCREEN_WIDTH, SCREEN_HEIGHT);
t = SDL_GetTicks(); // Set start value for timer.
while (NoQuitMessageSDL())
{
Update();
Draw();
}
SDL_SaveBMP(screen, "screenshot.bmp");
return 0;
}
void Update()
{
// Compute frame time:
int t2 = SDL_GetTicks();
float dt = float(t2 - t);
t = t2;
cout << "Render time: " << dt << " ms." << endl;
}
}
void Draw()
{
if (SDL_MUSTLOCK(screen))
SDL_LockSurface(screen);
for (int y = 0; y<SCREEN_HEIGHT; ++y)
{
for (int x = 0; x < SCREEN_WIDTH; ++x)
{
vec3 start = cameraPos;
vec3 dir(x - SCREEN_WIDTH / 2, y - SCREEN_HEIGHT / 2, focalLength);
Intersection intersection;
if (ClosestIntersection(start, dir, triangles, intersection))
{
//vec3 theColor = triangles[intersection.triangleIndex].color;
vec3 theColor = DirectLight(intersection);
PutPixelSDL(screen, x, y, theColor);
}
else
{
vec3 color(0, 0, 0);
PutPixelSDL(screen, x, y, color);
}
}
}
if (SDL_MUSTLOCK(screen))
SDL_UnlockSurface(screen);
SDL_UpdateRect(screen, 0, 0, 0, 0);
}
bool ClosestIntersection(vec3 s, vec3 d,
const vector<Triangle>& triangles, Intersection& closestIntersection)
{
closestIntersection.distance = m;
for (size_t i = 0; i < triangles.size(); i++)
{
vec3 v0 = triangles[i].v0;
vec3 v1 = triangles[i].v1;
vec3 v2 = triangles[i].v2;
vec3 u = v1 - v0;
vec3 v = v2 - v0;
vec3 b = s - v0;
vec3 x;
// Determinant of A = [-d u v]
float det = -d.x * ((u.y * v.z) - (v.y * u.z)) -
u.x * ((-d.y * v.z) - (v.y * -d.z)) +
v.x * ((-d.y * u.z) - (u.y * -d.z));
// Cramer'r Rule for t = x.x
x.x = (b.x * ((u.y * v.z) - (v.y * u.z)) -
u.x * ((b.y * v.z) - (v.y * b.z)) +
v.x * ((b.y * u.z) - (u.y * b.z))) / det;
if (x.x >= 0)
{
// Cramer'r Rule for u = x.y
x.y = (-d.x * ((b.y * v.z) - (v.y * b.z)) -
b.x * ((-d.y * v.z) - (v.y * -d.z)) +
v.x * ((-d.y * b.z) - (b.y * -d.z))) / det;
// Cramer'r Rule for v = x.z
x.z = (-d.x * ((u.y * b.z) - (b.y * u.z)) -
u.x * ((-d.y * b.z) - (b.y * -d.z)) +
b.x * ((-d.y * u.z) - (u.y * -d.z))) / det;
if (x.y >= 0 && x.z >= 0 && x.y + x.z <= 1 && x.x < closestIntersection.distance)
{
closestIntersection.position = x;
closestIntersection.distance = x.x;
closestIntersection.triangleIndex = i;
}
}
}
//end of for loop
if (closestIntersection.distance != m)
{
return true;
}
else
{
return false;
}
}
vec3 DirectLight(const Intersection& i)
{
vec3 n = triangles[i.triangleIndex].normal;
vec3 r = lightPos - i.position;
float R2 = r.x * r.x + r.y * r.y + r.z * r.z;
vec3 D = (lightColor * fmaxf((glm::dot(glm::normalize(r), n)), 0)) / (4 * PI * R2);
return D;
}
If I'm understanding the code in ClosestIntersection correctly, here's what it's doing for each triangle:
Let u,v be the vectors from one vertex of the triangle to the other two vertices. Let d be (the reverse of) the direction of the ray we're considering.
And let b be the vector from that vertex of the triangle to the camera.
Find p,q,r so that b = pd+qu+rv (p,q,r are what your code calls x.x, x.y, x.z).
Now the ray meets the triangle if p>0, q>=0, r>=0, q+r<=1 and the distance to the intersection point is p.
So, the conditions on q,r make sense; the idea is that b-qu-rv is the vector from the camera to the relevant point in the triangle and it's in direction d. Your distances aren't really distances, but along a single ray they're the same multiple of the actual distance, which means that this works fine for determining which triangle you've hit, and that's all you use them for. So far, so good.
But then you say closestIntersection.position = x; and surely that's all wrong, because this x isn't in the same coordinate system as your camera location, triangle vertices, etc. It's in this funny "how much of d, how much of u, how much of v" coordinate system which isn't even the same from one triangle to the next. (Which is why you are getting discontinuities at triangle boundaries even within a single face, I think.)
Try setting it to v0+x.y*(v1-v0)+x.z*(v2-v0) instead (I think this is right; it's meant to be the actual point where the ray crosses the triangle, in the same coordinates as all your other points) and see what it does.
This isn't a super-great answer, but I managed to make your code work without the strange shading discontinuities. The problem happens in ClosestIntersection and maybe Gareth's answer covers it. I need to stop looking at this now, but I wanted to show you what I have before I leave, and I need an Answer to post some code.
// This starts with some vec3 helper functions which make things
// easier to look at
float Dot(const vec3& a, const vec3& b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
vec3 Cross(const vec3& a, const vec3& b) {
return vec3(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x);
}
float L2(const vec3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; }
float Abs(const vec3& v) { return std::sqrt(L2(v)); }
// Here is the replacement version of ClosestIntersection
bool ClosestIntersection(vec3 cam, vec3 dir,
const vector<Triangle>& triangles, Intersection& closestIntersection)
{
closestIntersection.distance = m;
vec3 P0 = cam;
vec3 P1 = cam + dir;
for (size_t i = 0; i < triangles.size(); ++i) {
vec3 v0 = triangles[i].v0;
vec3 v1 = triangles[i].v1;
vec3 v2 = triangles[i].v2;
// Dan Sunday
// http://geomalgorithms.com/a06-_intersect-2.html
vec3 u = v1 - v0;
vec3 v = v2 - v0;
// w = P-v0, solve w = su +tv (s, t are parametric scalars)
vec3 n = Cross(u, v);
float ri = Dot(n, (v0 - P0)) / Dot(n, (P1 - P0));
vec3 Pi = P0 + ri * (P1- P0);
vec3 w = Pi - v0;
// s = w . (n x v) / (u . (n x v))
// t = w . (n x u) / (v . (n x u))
float s = Dot(w, Cross(n, v)) / Dot(u, Cross(n, v));
float t = Dot(w, Cross(n, u)) / Dot(v, Cross(n, u));
if(s >= 0 && t >= 0 && s+t <= 1) {
float dist = Abs(cam - Pi);
if(dist < closestIntersection.distance) {
closestIntersection.position = Pi;
closestIntersection.distance = dist;
closestIntersection.triangleIndex = int(i);
}
}
}
return closestIntersection.distance != m;
}
Good luck.

Line defined as start and lenght + orientation differs from start and end point definition - wrong orientation calculation?

I am trying to write some position/orientation methods for my small & simple 3d-space calculation library. But I'm stuck on the following problem.
I store 3d line as start and end points. However it should be possible to store it as start point and line's length + orientation as well (it's just a good example to test if orientation calculations works).
By orientation I mean rotation from the initial "0" orientation (which places the end at start + [0,legth,0]). So I first rotate the [0,length,0] by orientation and then add start to it to get end point.
The problem is, my orientation calculations fails somewhere. After calculating the orientation I get different ending point.
I use left-handed coordinate system with Y-axis pointing up, but I don't think it's important here.
Here's the code (I've tried to name the methods in the way you can check if the steps are ok; here's the full source code if you want to compile it yourself):
Point3D start = { 5.0f, 4.0f, 7.0f };
Point3D end = { 15.0f, 6.0f, 14.0f };
Point3D direction = (end - start);
std::wcout << L"Direction: "; direction.output();
float angle = Point3D(0.0f, 1.0f, 0.0f).getAngleToAnotherVectorInRadians(direction);
Point3D axis = direction.getCrossProduct(Point3D(0.0f, 1.0f, 0.0f)).getNormalized();
Quaternion o = Quaternion(AxisAngle(axis, angle));
std::wcout << L"\nAxisAngle: "; AxisAngle(axis, angle).output();
std::wcout << L"\nOrientation: "; o.output();
//test - end2 should be equal to end
Point3D offset(0.0f, (end - start).getLengthAsVector(), 0.0f);
offset = o.rotatePoint(offset);
std::wcout << L"\nOffset: "; offset.output();
Point3D end2 = start + offset;
std::wcout << L"\nEnd2: "; end2.output();
The code produces such output (without a comments, of course):
Direction: {10, 2, 7} //looks ok
AxisAngle: {{-0.573462, 0, 0.819232}, 1.40839}
Orientation: {-0.371272, 0, 0.530388, 0.762132}
Offset: {-10, 2, -7} //Almost! It should be {10, 2, 7}
End2: {-5, 6, -9.53674e-07} //Wrong! It should be { 15, 6, 14 }
In case that all steps are ok but there are some mistakes in methods' implementations I post here the important code for classes (so you can reproduce the problem): Point3D, AxisAngle, Quaternion.
I highly believe that problem(s) lay(s) in my main steps or in AxisAngle calculations. I think that AxisAngle to Quaternion transformation is ok (but I pass the wrong AxisAngle to Quaternion constructor).
The Point3D:
struct Point3D {
protected:
float x, y, z;
public:
Point3D() : x(0.0f), y(0.0f), z(0.0f) {}
Point3D(float x, float y, float z) : x(x), y(y), z(z) {}
void output() { std::wcout << L"{" << x << L", " << y << L", " << z << L"}"; }
Point3D operator-(const Point3D &point) const {
Point3D temp;
temp.setX(getX() - point.getX());
temp.setY(getY() - point.getY());
temp.setZ(getZ() - point.getZ());
return temp;
}
Point3D operator+ (const Point3D &value) const {
Point3D temp;
temp.setX(getX() + value.getX());
temp.setY(getY() + value.getY());
temp.setZ(getZ() + value.getZ());
return temp;
}
inline float getX() const { return x; } inline float getY() const { return y; } inline float getZ() const { return z; }
inline void setX(float x) { this->x = x; } inline void setY(float y) { this->y = y; } inline void setZ(float z) { this->z = z; }
inline float getLengthAsVector() const {
return sqrt(x*x + y*y + z*z);
}
inline Point3D getCrossProduct(const Point3D &anotherVector) const {
//based on: http://www.sciencehq.com/physics/vector-product-multiplying-vectors.html
return Point3D(
y * anotherVector.z - anotherVector.y * z,
z * anotherVector.x - anotherVector.z * x,
x * anotherVector.y - anotherVector.x * y
);
}
inline float getDotProduct(const Point3D &anotherVector) const {
//based on: https://www.ltcconline.net/greenl/courses/107/Vectors/DOTCROS.HTM
return x * anotherVector.x + y * anotherVector.y + z * anotherVector.z;
}
inline float getAngleToAnotherVectorInRadians(const Point3D &anotherVector) const {
//based on: http://math.stackexchange.com/questions/974178/how-to-calculate-the-angle-between-2-vectors-in-3d-space-given-a-preset-function
return acos(getDotProduct(anotherVector) / (getLengthAsVector() * anotherVector.getLengthAsVector()));
}
Point3D getNormalized() const {
float length = std::abs(sqrt(x*x + y*y + z*z));
Point3D result(x / length, y / length, z / length);
return result;
}
};
The AxisAngle:
class AxisAngle {
protected:
Point3D axis;
float angleInRadians;
public:
AxisAngle(const AxisAngle &other) { axis = other.axis; angleInRadians = other.angleInRadians; }
AxisAngle::AxisAngle(float x, float y, float z, float angleInRadians) {
this->axis = Point3D(x, y, z);
this->angleInRadians = angleInRadians;
}
AxisAngle::AxisAngle(const Point3D &axis, float angleInRadians) {
this->axis = axis;
this->angleInRadians = angleInRadians;
}
Point3D getAxis() const { return axis; }
float getAngleInRadians() const { return angleInRadians; }
void output() { std::wcout << L"{"; axis.output(); std::wcout << L", " << angleInRadians << L"}"; }
};
And last but not least, Quaternion:
class Quaternion {
protected:
float x; float y; float z; float w;
public:
Quaternion() { x = 0.0f; y = 0.0f; z = 0.0f; w = 1.0f; }
Quaternion(const Quaternion &other) { x = other.x; y = other.y; z = other.z; w = other.w; }
Quaternion(float x, float y, float z, float w) { this->x = x; this->y = y; this->z = z; this->w = w; }
Quaternion(const AxisAngle &axisAngle) {
Point3D axis = axisAngle.getAxis();
float angleInRadians = axisAngle.getAngleInRadians();
x = sin(angleInRadians / 2) * axis.getX();
y = sin(angleInRadians / 2) * axis.getY();
z = sin(angleInRadians / 2) * axis.getZ();
w = cos(angleInRadians / 2);
normalizeIt();
}
float getLength() const {
return sqrt(x*x + y*y + z*z + w*w);
}
void normalizeIt() {
float length = getLength();
x = x / length;
y = y / length;
z = z / length;
w = w / length;
}
Quaternion getConjugated() const {
return Quaternion(-x, -y, -z, w);
}
Quaternion multiply(Quaternion by) {
//"R" for result
float wR = w * by.getW() - x * by.getX() - y * by.getY() - z * by.getZ();
float xR = x * by.getW() + w * by.getX() + y * by.getZ() - z * by.getY();
float yR = y * by.getW() + w * by.getY() + z * by.getX() - x * by.getZ();
float zR = z * by.getW() + w * by.getZ() + x * by.getY() - y * by.getX();
return Quaternion(xR, yR, zR, wR);
}
//rotate Point3D p around [0,0,0] with this Quaternion
Point3D rotatePoint(Point3D p) const {
Quaternion temp = multiply(p).multiply(getConjugated());
return Point3D(temp.getX(), temp.getY(), temp.getZ());
//G: P' = Q(P-G)Q' + G <- to rotate P around G with Quaternion
}
Quaternion multiply(Point3D r) const {
float wR = -x * r.getX() - y * r.getY() - z * r.getZ();
float xR = w * r.getX() + y * r.getZ() - z * r.getY();
float yR = w * r.getY() + z * r.getX() - x * r.getZ();
float zR = w * r.getZ() + x * r.getY() - y * r.getX();
return Quaternion(xR, yR, zR, wR);
}
inline float getX() const { return x; } inline void setX(float x) { this->x = x; }
inline float getY() const { return y; } inline void setY(float y) { this->y = y; }
inline float getZ() const { return z; } inline void setZ(float z) { this->z = z; }
inline float getW() const { return w; } inline void setW(float w) { this->w = w; }
void output() { std::wcout << L"{" << x << L", " << y << L", " << z << L", " << w << L"}"; }
};
In case somebody would ask: I do want to use quaternions. They may not look 100% needed here, but storing 3d object's orientation as quaternion has many benefits in more complex computations (and most game engines / 3d software use it as well "under the mask").
Your axis has the wrong orientation. It should be:
Point3D axis = Point3D(0.0f, 1.0f, 0.0f).getCrossProduct(direction).getNormalized();
Use the two left-hand rules to figure out the correct order.

Draw Point or filled in circle

In OpenGL, when I want to draw a filled circle, I'd do:
void DrawPoint(float X, float Y, float Z, float Radius) const
{
glRasterPos2f(X, Y);
glPointSize(Radius);
glBegin(GL_POINTS);
glVertex3f(X, Y, Z);
glEnd();
glPointSize(this->PointSize);
glFlush();
}
However, I could not find any equivalent for glPointSize in Direct-X. So I tried:
struct Vector3
{
double X, Y, Z;
};
#include <vector>
void DrawCircle1(float X, float Y, DWORD Color)
{
const int sides = 20;
std::vector<D3DXVECTOR3> points;
for(int i = 0; i < sides; ++i)
{
double angle = D3DX_PI * 2 / sides * i;
points.emplace_back(D3DXVECTOR3(sin(angle), cos(angle), 0));
}
device->DrawPrimitiveUP(D3DPT_TRIANGLEFAN, sides, &points[0], sizeof(D3DXVECTOR3));
}
void DrawCircle2(float CenterX, float CenterY, float Radius, int Rotations)
{
std::vector<D3DXVECTOR3> Points;
float Theta = 2 * 3.1415926535897932384626433832795 / float(Rotations);
float Cos = cosf(Theta);
float Sine = sinf(Theta);
float X = Radius, Y = 0, Temp = 0;
for(int I = 0; I < Rotations; ++I)
{
Points.push_back(D3DXVECTOR3(X + CenterX, Y + CenterY, 0));
Temp = X;
X = Cos * X - Sine * Y;
Y = Sine * Temp + Cos * Y;
}
device->DrawPrimitiveUP(D3DPT_TRIANGLEFAN, Points.size(), &Points[0], sizeof(D3DXVECTOR3));
}
But none of these work. I cannot figure out why nothing works. The first one draws a gigantic circle that is black and the second one draws a long triangle.
Any ideas how I can draw a filled in circle or a point of a certain size and colour in Direct-X?
static const int CIRCLE_RESOLUTION = 64;
struct VERTEX_2D_DIF { // transformed colorized
float x, y, z, rhw;
D3DCOLOR color;
static const DWORD FVF = D3DFVF_XYZRHW|D3DFVF_DIFFUSE;
};
void DrawCircleFilled(float mx, float my, float r, D3DCOLOR color)
{
VERTEX_2D_DIF verts[CIRCLE_RESOLUTION+1];
for (int i = 0; i < CIRCLE_RESOLUTION+1; i++)
{
verts[i].x = mx + r*cos(D3DX_PI*(i/(CIRCLE_RESOLUTION/2.0f)));
verts[i].y = my + r*sin(D3DX_PI*(i/(CIRCLE_RESOLUTION/2.0f)));
verts[i].z = 0;
verts[i].rhw = 1;
verts[i].color = color;
}
m_pDevice->SetFVF(VERTEX_2D_DIF::FVF);
m_pDevice->DrawPrimitiveUP(D3DPT_TRIANGLEFAN, CIRCLE_RESOLUTION-1, &verts, sizeof(VERTEX_2D_DIF));
}