I'm writing simple renderer in C++. It uses convention similar to OpenGL, but it does not use OpenGL nor DirectX. float3, float4, float4x4 are my own custom structures.
The problem is, when I set the eye somewhere other then 0, 0, 0, I get strange results with triangles where I would not expect to see them.
I guess it's because of wrong matrix multiplication formula, wrong multiplication order, normalization, or wrong formula of lookAt/setPerspective. But I'm stuck at it and I cannot find the mistake.
I will upload some illustrations/screens later, as I don't have access to them now.
I use column-notation for matrices (matrix[column][row]), like OpenGL does.
Here is the matrix multiplication code:
class float4x4 { //[column][row]
float4 columns[4];
public:
float4x4 multiplyBy(float4x4 &b){
float4x4 c = float4x4();
c.columns[0] = float4(
columns[0].x * b.columns[0].x + columns[1].x * b.columns[0].y + columns[2].x * b.columns[0].z + columns[3].x * b.columns[0].w,
columns[0].y * b.columns[0].x + columns[1].y * b.columns[0].y + columns[2].y * b.columns[0].z + columns[3].y * b.columns[0].w,
columns[0].z * b.columns[0].x + columns[1].z * b.columns[0].y + columns[2].z * b.columns[0].z + columns[3].z * b.columns[0].w,
columns[0].w * b.columns[0].x + columns[1].w * b.columns[0].y + columns[2].w * b.columns[0].z + columns[3].w * b.columns[0].w
);
c.columns[1] = float4(
columns[0].x * b.columns[1].x + columns[1].x * b.columns[1].y + columns[2].x * b.columns[1].z + columns[3].x * b.columns[1].w,
columns[0].y * b.columns[1].x + columns[1].y * b.columns[1].y + columns[2].y * b.columns[1].z + columns[3].y * b.columns[1].w,
columns[0].z * b.columns[1].x + columns[1].z * b.columns[1].y + columns[2].z * b.columns[1].z + columns[3].z * b.columns[1].w,
columns[0].w * b.columns[1].x + columns[1].w * b.columns[1].y + columns[2].w * b.columns[1].z + columns[3].w * b.columns[1].w
);
c.columns[2] = float4(
columns[0].x * b.columns[2].x + columns[1].x * b.columns[2].y + columns[2].x * b.columns[2].z + columns[3].x * b.columns[2].w,
columns[0].y * b.columns[2].x + columns[1].y * b.columns[2].y + columns[2].y * b.columns[2].z + columns[3].y * b.columns[2].w,
columns[0].z * b.columns[2].x + columns[1].z * b.columns[2].y + columns[2].z * b.columns[2].z + columns[3].z * b.columns[2].w,
columns[0].w * b.columns[2].x + columns[1].w * b.columns[2].y + columns[2].w * b.columns[2].z + columns[3].w * b.columns[2].w
);
c.columns[3] = float4(
columns[0].x * b.columns[3].x + columns[1].x * b.columns[3].y + columns[2].x * b.columns[3].z + columns[3].x * b.columns[3].w,
columns[0].y * b.columns[3].x + columns[1].y * b.columns[3].y + columns[2].y * b.columns[3].z + columns[3].y * b.columns[3].w,
columns[0].z * b.columns[3].x + columns[1].z * b.columns[3].y + columns[2].z * b.columns[3].z + columns[3].z * b.columns[3].w,
columns[0].w * b.columns[3].x + columns[1].w * b.columns[3].y + columns[2].w * b.columns[3].z + columns[3].w * b.columns[3].w
);
return c;
}
float4 multiplyBy(const float4 &b){
//based on http://stackoverflow.com/questions/25805126/vector-matrix-product-efficiency-issue
float4x4 a = *this; //getTransposed(); ???
float4 result(
dotProduct(a[0], b),
dotProduct(a[1], b),
dotProduct(a[2], b),
dotProduct(a[3], b)
);
return result;
}
inline float4x4 getTransposed() {
float4x4 transposed;
for (unsigned i = 0; i < 4; i++) {
for (unsigned j = 0; j < 4; j++) {
transposed.columns[i][j] = columns[j][i];
}
}
return transposed;
}
};
Where #define dotProduct(a, b) a.getDotProduct(b) and:
inline float getDotProduct(const float4 &anotherVector) const {
return x * anotherVector.x + y * anotherVector.y + z * anotherVector.z + w * anotherVector.w;
}
My VertexProcessor:
class VertexProcessor {
float4x4 obj2world;
float4x4 world2view;
float4x4 view2proj;
float4x4 obj2proj;
public:
inline float3 tr(const float3 & v) { //in object space
float4 r = obj2proj.multiplyBy(float4(v.x, v.y, v.z, 1.0f/*v.w*/));
return float3(r.x / r.w, r.y / r.w, r.z / r.w); //we get vector in unified cube from -1,-1,-1 to 1,1,1
}
inline void transform() {
obj2proj = obj2world.multiplyBy(world2view);
obj2proj = obj2proj.multiplyBy(view2proj);
}
inline void setIdentity() {
obj2world = float4x4(
float4(1.0f, 0.0f, 0.0f, 0.0f),
float4(0.0f, 1.0f, 0.0f, 0.0f),
float4(0.0f, 0.0f, 1.0f, 0.0f),
float4(0.0f, 0.0f, 0.0f, 1.0f)
);
}
inline void setPerspective(float fovy, float aspect, float nearP, float farP) {
fovy *= PI / 360.0f;
float fValue = cos(fovy) / sin(fovy);
view2proj[0] = float4(fValue/aspect, 0.0f, 0.f, 0.0f);
view2proj[1] = float4(0.0f, fValue, 0.0f, 0.0f);
view2proj[2] = float4(0.0f, 0.0f, (farP + nearP) / (nearP - farP), -1.0f);
view2proj[3] = float4(0.0f, 0.0f, 2.0f * farP * nearP / (nearP - farP), 0.0f);
}
inline void setLookat(float3 eye, float3 center, float3 up) {
float3 f = center - eye;
f.normalizeIt();
up.normalizeIt();
float3 s = f.getCrossProduct(up);
float3 u = s.getCrossProduct(f);
world2view[0] = float4(s.x, u.x, -f.x, 0.0f);
world2view[1] = float4(s.y, u.y, -f.y, 0.0f);
world2view[2] = float4(s.z, u.z, -f.z, 0.0f);
world2view[3] = float4(eye/*.getNormalized() ???*/ * -1.0f, 1.0f);
}
inline void multByTranslation(float3 v) {
float4x4 m(
float4(1.0f, 0.0f, 0.0f, 0.0f),
float4(0.0f, 1.0f, 0.0f, 0.0f),
float4(0.0f, 0.0f, 1.0f, 0.0f),
float4(v.x, v.y, v.z, 1.0f)
);
world2view = m.multiplyBy(world2view);
}
inline void multByScale(float3 v) {
float4x4 m(
float4(v.x, 0.0f, 0.0f, 0.0f),
float4(0.0f, v.y, 0.0f, 0.0f),
float4(0.0f, 0.0f, v.z, 0.0f),
float4(0.0f, 0.0f, 0.0f, 1.0f)
);
world2view = m.multiplyBy(world2view);
}
inline void multByRotation(float a, float3 v) {
float s = sin(a*PI / 180.0f), c = cos(a*PI / 180.0f);
v.normalizeIt();
float4x4 m(
float4(v.x*v.x*(1-c)+c, v.y*v.x*(1 - c) + v.z*s, v.x*v.z*(1-c)-v.y*s, 0.0f),
float4(v.x*v.y*(1-c)-v.z*s, v.y*v.y*(1-c)+c, v.y*v.z*(1-c)+v.x*s, 0.0f),
float4(v.x*v.z*(1-c)+v.y*s, v.y*v.z*(1-c)-v.x*s, v.z*v.z*(1-c)+c, 0.0f),
float4(0.0f, 0.0f, 0.0f, 1.0f)
);
world2view = m.multiplyBy(world2view);
}
};
And the Rasterizer:
class Rasterizer final {
Buffer * buffer = nullptr;
inline float toScreenSpaceX(float x) { return (x + 1) * buffer->getWidth() * 0.5f; }
inline float toScreenSpaceY(float y) { return (y + 1) * buffer->getHeight() * 0.5f; }
inline int orient2d(float ax, float ay, float bx, float by, const float2& c) {
return (bx - ax)*(c.y - ay) - (by - ay)*(c.x - ax);
}
public:
Rasterizer(Buffer * buffer) : buffer(buffer) {}
//v - position in screen space ([0, width], [0, height], [-1, -1])
void triangle(
float3 v0, float3 v1, float3 v2,
float3 n0, float3 n1, float3 n2,
float2 uv0, float2 uv1, float2 uv2,
Light * light0, Light * light1,
float3 camera, Texture * texture
) {
v0.x = toScreenSpaceX(v0.x);
v0.y = toScreenSpaceY(v0.y);
v1.x = toScreenSpaceX(v1.x);
v1.y = toScreenSpaceY(v1.y);
v2.x = toScreenSpaceX(v2.x);
v2.y = toScreenSpaceY(v2.y);
//based on: https://fgiesen.wordpress.com/2013/02/08/triangle-rasterization-in-practice/
//compute triangle bounding box
int minX = MIN3(v0.x, v1.x, v2.x);
int minY = MIN3(v0.y, v1.y, v2.y);
int maxX = MAX3(v0.x, v1.x, v2.x);
int maxY = MAX3(v0.y, v1.y, v2.y);
//clip against screen bounds
minX = MAX(minX, 0);
minY = MAX(minY, 0);
maxX = MIN(maxX, buffer->getWidth() - 1);
maxY = MIN(maxY, buffer->getHeight() - 1);
//rasterize
float2 p(0.0f, 0.0f);
for (p.y = minY; p.y <= maxY; p.y++) {
for (p.x = minX; p.x <= maxX; p.x++) {
// Determine barycentric coordinates
//int w0 = orient2d(v1.x, v1.y, v2.x, v2.y, p);
//int w1 = orient2d(v2.x, v2.y, v0.x, v0.y, p);
//int w2 = orient2d(v0.x, v0.y, v1.x, v1.y, p);
float w0 = (v1.y - v2.y)*(p.x - v2.x) + (v2.x - v1.x)*(p.y - v2.y);
w0 /= (v1.y - v2.y)*(v0.x - v2.x) + (v2.x - v1.x)*(v0.y - v2.y);
float w1 = (v2.y - v0.y)*(p.x - v2.x) + (v0.x - v2.x)*(p.y - v2.y);
w1 /= (v2.y - v0.y)*(v1.x - v2.x) + (v0.x - v2.x)*(v1.y - v2.y);
float w2 = 1 - w0 - w1;
// If p is on or inside all edges, render pixel.
if (w0 >= 0 && w1 >= 0 && w2 >= 0) {
float depth = w0 * v0.z + w1 * v1.z + w2 * v2.z;
if (depth < buffer->getDepthForPixel(p.x, p.y)) {
//...
buffer->setPixel(p.x, p.y, diffuse.r, diffuse.g, diffuse.b, ALPHA_VISIBLE, depth);
}
}
}
}
}
};
I strongly believe that Rasterizer itself works well , because when I test it with code (instead of main loop):
float3 v0{ 0, 0, 0.1f };
float3 v1{ 0.5, 0, 0.1f };
float3 v2{ 1, 1, 0.1f };
//Rasterizer test (without VertexProcessor)
rasterizer->triangle(v0, v1, v2, n0, n1, n2, uv0, uv1, uv2, light0, light1, eye, defaultTexture);
I get the right image, with triangle that has one corner at the middle of the screen ([0, 0] in unified space), one at bottom-right corner ([1, 1]) and one at [0.5, 0].
The float3 structure:
class float3 {
public:
union {
struct { float x, y, z; };
struct { float r, g, b; };
float p[3];
};
float3() = delete;
float3(const float3 &other) : x(other.x), y(other.y), z(other.z) {}
float3(float x, float y, float z) : x(x), y(y), z(z) {}
float &operator[](unsigned index){
ERROR_HANDLE(index < 3, L"The float3 index out of bounds (0-2 range, " + C::toWString(index) + L" given).");
return p[index];
}
float getLength() const { return std::abs(sqrt(x*x + y*y + z*z)); }
void normalizeIt();
inline float3 getNormalized() const {
float3 result(*this);
result.normalizeIt();
return result;
}
inline float3 getCrossProduct(const float3 &anotherVector) const {
//based on: http://www.sciencehq.com/physics/vector-product-multiplying-vectors.html
return float3(
y * anotherVector.z - anotherVector.y * z,
z * anotherVector.x - anotherVector.z * x,
x * anotherVector.y - anotherVector.x * y
);
}
inline float getDotProduct(const float3 &anotherVector) const {
//based on: https://www.ltcconline.net/greenl/courses/107/Vectors/DOTCROS.HTM
return x * anotherVector.x + y * anotherVector.y + z * anotherVector.z;
}
...
};
The main loop:
VertexProcessor vp;
DirectionalLight * light0 = new DirectionalLight({ 0.3f, 0.3f, 0.3f }, { 0.0f, -1.0f, 0.0f });
DirectionalLight * light1 = new DirectionalLight({ 0.4f, 0.4f, 0.4f }, { 0.0f, -1.0f, 0.5f });
while(!my_window.is_closed()) {
tgaBuffer.clearDepth(10.0f); //it could be 1.0f but 10.0f won't hurt, we draw pixel if it's depth < actual depth in buffer
tgaBuffer.clearColor(0, 0, 255, ALPHA_VISIBLE);
vp.setPerspective(75.0f, tgaBuffer.getWidth() / tgaBuffer.getHeight(), 10.0f, 2000.0f);
float3 eye = { 10.0f, 10.0f - frameTotal / 10.0f, 10.0f }; //animate eye
vp.setLookat(eye, float3{ 0.0f, 0.0f, 0.0f }.getNormalized(), { 0.0f, 1.0f, 0.0f });
vp.setIdentity();
//we could call e.g. vp.multByRotation(...) here, but we won't to keep it simple
vp.transform();
//bottom
drawTriangle(0, 1, 2);
drawTriangle(2, 3, 0);
drawTriangle(3, 2, 7);
drawTriangle(7, 2, 6);
drawTriangle(5, 1, 0);
drawTriangle(0, 5, 4);
drawTriangle(4, 5, 6);
drawTriangle(6, 7, 4);
frameTotal++;
}
Where drawTriangle(...) stands for:
#define drawTriangle(i0, i1, i2) rasterizer->triangle(vp.tr(v[i0]), vp.tr(v[i1]), vp.tr(v[i2]), v[i0], v[i1], v[i2], n0, n1, n2, uv0, uv1, uv2, light0, light1, eye, defaultTexture);
And here is the initialization of triangles' data:
float3 offset{ 0.0f, 0.0f, 0.0f };
v.push_back(offset + float3{ -10, -10, -10 });
v.push_back(offset + float3{ +10, -10, -10 });
v.push_back(offset + float3{ +10, -10, +10 });
v.push_back(offset + float3{ -10, -10, +10 });
v.push_back(offset + float3{ -10, +10, -10 });
v.push_back(offset + float3{ +10, +10, -10 });
v.push_back(offset + float3{ +10, +10, +10 });
v.push_back(offset + float3{ -10, +10, +10 });
I've created a little c-library for opengl long time ago. It was generally for learning purpose during my studies of computer graphics. I've looked up my sources and my implementation of perspective projection and orientation very much differs.
pbm_Mat4 pbm_mat4_projection_perspective(PBfloat fov, PBfloat ratio, PBfloat near, PBfloat far) {
PBfloat t = near * tanf(fov / 2.0f);
PBfloat b = -t;
PBfloat r = ratio * t, l = ratio * b;
return pbm_mat4_create(pbm_vec4_create(2.0f * near / (r - l), 0, 0, 0),
pbm_vec4_create(0, 2.0f * near / (t - b), 0, 0),
pbm_vec4_create((r + l) / (r - l), (t + b) / (t - b), - (far + near) / (far - near), -1.0f),
pbm_vec4_create(0, 0, -2.0f * far * near / (far - near), 0));
}
pbm_Mat4 pbm_mat4_orientation_lookAt(pbm_Vec3 pos, pbm_Vec3 target, pbm_Vec3 up) {
pbm_Vec3 forward = pbm_vec3_normalize(pbm_vec3_sub(target, pos));
pbm_Vec3 right = pbm_vec3_normalize(pbm_vec3_cross(forward, up));
up = pbm_vec3_normalize(pbm_vec3_cross(right, forward));
forward = pbm_vec3_scalar(forward, -1);
pos = pbm_vec3_scalar(pos, -1);
return pbm_mat4_create(pbm_vec4_create_vec3(right),
pbm_vec4_create_vec3(up),
pbm_vec4_create_vec3(forward),
pbm_vec4_create_vec3_w(pbm_vec3_create(pbm_vec3_dot(right, pos),
pbm_vec3_dot(up, pos),
pbm_vec3_dot(forward, pos)), 1));
}
These methods are tested and you may want to test against them. Iff you want full sources are availabe here. Furthermore you could revisit frustums and projection matrices online. Unfortanetly I can not share the material from my university with you:(
I'm trying to implement a functionality that can convert an Euler angle into an Quaternion and back "YXZ"-convention using Eigen. Later this should be used to let the user give you Euler angles and rotate around as Quaternion and convert Back for the user. In fact i am realy bad at math but tried my best. I have no Idea if this matrices are correct or anything. The code Works, but my results are way to off, i suppose. Any idea where i take the wrong turn? This is what my Quat.cpp looks like:
#include "Quat.h"
#include <Eigen/Geometry>
#include <Eigen/Dense>
#include <cmath>
#include <iostream>
using namespace Eigen;
Vector3f Quat::MyRotation(const Vector3f YPR)
{
Matrix3f matYaw(3, 3), matRoll(3, 3), matPitch(3, 3), matRotation(3, 3);
const auto yaw = YPR[2]*M_PI / 180;
const auto pitch = YPR[0]*M_PI / 180;
const auto roll = YPR[1]*M_PI / 180;
matYaw << cos(yaw), sin(yaw), 0.0f,
-sin(yaw), cos(yaw), 0.0f, //z
0.0f, 0.0f, 1.0f;
matPitch << cos(pitch), 0.0f, -sin(pitch),
0.0f, 1.0f, 0.0f, // X
sin(pitch), 0.0f, cos(pitch);
matRoll << 1.0f, 0.0f, 0.0f,
0.0f, cos(roll), sin(roll), // Y
0.0f, -sin(roll), cos(roll);
matRotation = matYaw*matPitch*matRoll;
Quaternionf quatFromRot(matRotation);
quatFromRot.normalize(); //Do i need to do this?
return Quat::toYawPitchRoll(quatFromRot);
}
Vector3f Quat::toYawPitchRoll(const Eigen::Quaternionf& q)
{
Vector3f retVector;
const auto x = q.y();
const auto y = q.z();
const auto z = q.x();
const auto w = q.w();
retVector[2] = atan2(2.0 * (y * z + w * x), w * w - x * x - y * y + z * z);
retVector[1] = asin(-2.0 * (x * z - w * y));
retVector[0] = atan2(2.0 * (x * y + w * z), w * w + x * x - y * y - z * z);
#if 1
retVector[0] = (retVector[0] * (180 / M_PI));
retVector[1] = (retVector[1] * (180 / M_PI))*-1;
retVector[2] = retVector[2] * (180 / M_PI);
#endif
return retVector;
}
Input: x = 55.0, y = 80.0, z = 12.0
Quaternion: w:0.872274, x: -0.140211, y:0.447012, z:-0.140211
Return Value: x:-55.5925, y: -6.84901, z:-21.8771
The X-Value seems about right disregarding the prefix, but Y and z are off.
From Euler to Quaternion:
using namespace Eigen;
//Roll pitch and yaw in Radians
float roll = 1.5707, pitch = 0, yaw = 0.707;
Quaternionf q;
q = AngleAxisf(roll, Vector3f::UnitX())
* AngleAxisf(pitch, Vector3f::UnitY())
* AngleAxisf(yaw, Vector3f::UnitZ());
std::cout << "Quaternion" << std::endl << q.coeffs() << std::endl;
From Quaternion to Euler:
auto euler = q.toRotationMatrix().eulerAngles(0, 1, 2);
std::cout << "Euler from quaternion in roll, pitch, yaw"<< std::endl << euler << std::endl;
Taken from https://eigen.tuxfamily.org/dox/classEigen_1_1AngleAxis.html
Here's one approach (not tested):
Vector3d euler = quaternion.toRotationMatrix().eulerAngles(2, 1, 0);
yaw = euler[0]; pitch = euler[1]; roll = euler[2];
The Quaternation to Euler solution didnt work for me, so i researched and modified the code, now it works for my purpose:
Vector3f ToEulerAngles(const Eigen::Quaternionf& q) {
Vector3f angles; //yaw pitch roll
const auto x = q.x();
const auto y = q.y();
const auto z = q.z();
const auto w = q.w();
// roll (x-axis rotation)
double sinr_cosp = 2 * (w * x + y * z);
double cosr_cosp = 1 - 2 * (x * x + y * y);
angles[2] = std::atan2(sinr_cosp, cosr_cosp);
// pitch (y-axis rotation)
double sinp = 2 * (w * y - z * x);
if (std::abs(sinp) >= 1)
angles[1] = std::copysign(M_PI / 2, sinp); // use 90 degrees if out of range
else
angles[1] = std::asin(sinp);
// yaw (z-axis rotation)
double siny_cosp = 2 * (w * z + x * y);
double cosy_cosp = 1 - 2 * (y * y + z * z);
angles[0] = std::atan2(siny_cosp, cosy_cosp);
return angles;
}
I was inspired by this wiki entry and did some bench marking with the presented solution here.
Checkout the wiki:
https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
When I use
auto euler = q.toRotationMatrix().eulerAngles(0, 1, 2)
It can not work perfectly all the time, the euler angle always has a regular beat (the actual value and the calculated value have a deviation of ±π).
For example, read and show yaw angle by rqt
picture.
I have no idea about this, but I find ros tf::getYaw() also can achieve "Quaternion to Euler" (because I just need yaw angle).
Without Eigen (just in case), I did:
tf2::Matrix3x3 ( quat ) . getEulerYPR( &roll, &pitch, &yaw );
// and
tf2::Matrix3x3 ( quat ) . getRPY( &roll, &pitch, &yaw );
Though, these can give only two of the 24 configurations possible.
Say I have a cube. Say the coordinate values are like this. (1 unit an arm)
GLfloat vertA[3] = { 0.5, 0.5, 0.5};
GLfloat vertB[3] = {-0.5, 0.5, 0.5};
GLfloat vertC[3] = {-0.5,-0.5, 0.5};
GLfloat vertD[3] = { 0.5,-0.5, 0.5};
GLfloat vertE[3] = { 0.5, 0.5,-0.5};
GLfloat vertF[3] = {-0.5, 0.5,-0.5};
GLfloat vertG[3] = {-0.5,-0.5,-0.5};
GLfloat vertH[3] = { 0.5,-0.5,-0.5};
If I translate it like
glTranslatef(1,2,3);
then 1,2 and 3 will be added to x,y and z coordinates respectively. and those are the new coordinate values of the translated cube. But if I rotate it some degree (with or without a translation)
glRotatef(25,0,0,1);
what is the coordinates of the rotated cube now?
I am working new in opengl. I am using c++ on windows.
You should make yourself familiar with linear algebra and transformation matrices.
What glRotate will do is generating a rotation matrix and post-multiplying it to the current matrix. You should be aware of some things here: the glTranslate will not directly add anything to the vertex coordinates, and the glRotate will also not change the coordinates. All what these do is changing a single matrix. This matrix will accumulate the composition of all the transformations, and will be applied once to all the vertices during the draw call.
In your case, a rotation of 25 degrees around the z axis is desired, so the z coordinates will not be changed. The rotation matrix will look like this
| cos(25°) -sin(25°) 0 0 |
| sin(25°) cos(25°) 0 0 |
| 0 0 1 0 |
| 0 0 0 1 |
To apply this matrix to a vector (x,y,z,w)^T, we just multiply the matrix by the vector.
Following the rules of that multiplcation, we get a new vector with
x' = cos(25°)*x -sin(25°)*y
y' = sin(25°)*x +cos(25°)*y
z' = z
w' = w
This is just the rotation alone, not considering the translation. But you can put int the values of zour vertex and will get the transformed result back.
Here you are rotating the current matrix 25 degrees in the z axis. This is the the code for glm::rotate which does the same.
template <typename T, precision P>
GLM_FUNC_QUALIFIER detail::tmat4x4<T, P> rotate
(
detail::tmat4x4<T, P> const & m,
T const & angle,
detail::tvec3<T, P> const & v
)
{
T c = cos(a);
T s = sin(a);
detail::tvec3<T, P> axis(normalize(v));
detail::tvec3<T, P> temp((T(1) - c) * axis);
detail::tmat4x4<T, P> Rotate(detail::tmat4x4<T, P>::_null);
Rotate[0][0] = c + temp[0] * axis[0];
Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2];
Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1];
Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2];
Rotate[1][1] = c + temp[1] * axis[1];
Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0];
Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1];
Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0];
Rotate[2][2] = c + temp[2] * axis[2];
detail::tmat4x4<T, P> Result(detail::tmat4x4<T, P>::_null);
Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2];
Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2];
Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2];
Result[3] = m[3];
return Result;
}