Opengl View Transformation matrix Rotation - opengl

Recently i implemented a simple Opengl program that composes of a scene of objects, i've applied most of the transformation & projection matrices, in such away that i am able to rotate transform & scale objects, move my camera through z & x coordinates and applied perspective projection however when it comes to camera rotation things get weird, my rotation matrix for my camera is simply a rotation matrix that rotates the world uniformly, however when i rotate the world so that i look in the up direction;+y; and when i move forward, the camera doesn't seem to advance in the direction where it is looking at;as it is the case in FPS games my camera moves relative to the world space, i know that i am missing the vectors that specify directions in x,y,z coordinates, but i am unable to incorporate these vectors with my camera (view Transformation) matrix, most of the tutorial on internet either describes it in a block diagram or uses the conventional gluLookAt() function, i really need a brief explanation about view Transformations and specifically camera rotation and how i should implement it in my matrices, my my final matrix is as follows:
resultTransform = perspectiveTrans * cameraTrans * modelTrans;
where:
perspectiveTrans = applies only a perspective projection transformation
cameraTrans = is a combination of rotate,translate matrices that affect all obj.s in the scene
modelTrans =is the transformation that is applied to the models
Matrix4X4.cpp file:
#include "Matrix4X4.h"
using namespace std;
////////////////////////////////// Constructor Declerations ////////////////////////////////
Matrix4X4::Matrix4X4()
{
setIdentity();
}
Matrix4X4::Matrix4X4(float value)
{
for(int i = 0 ; i < 4; i++)
for ( int j = 0; j < 4; j++)
Matrix[i][j] = value;
}
/////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Destructor Decleration //////////////////////////////
Matrix4X4::~Matrix4X4()
{
}
///////////////////////////////////////////////////////////////////////////////////
/////////////////////// Set Identity Matrix /////////////////////////////////////////
void Matrix4X4::setIdentity()
{
Matrix[0][0] =1; Matrix[0][1] = 0; Matrix[0][2] = 0; Matrix[0][3] = 0;
Matrix[1][0] =0; Matrix[1][1] = 1; Matrix[1][2] = 0; Matrix[1][3] = 0;
Matrix[2][0] =0; Matrix[2][1] = 0; Matrix[2][2] = 1; Matrix[2][3] = 0;
Matrix[3][0] =0; Matrix[3][1] = 0; Matrix[3][2] = 0; Matrix[3][3] = 1;
}
///////////////////////////////////////////////////////////////////////////////////
///////////////////////// Set Translation Matrix //////////////////////////////////
Matrix4X4 Matrix4X4::setTranslation(float x,float y,float z)
{
Matrix[0][0] =1; Matrix[0][1] = 0; Matrix[0][2] = 0; Matrix[0][3] = x;
Matrix[1][0] =0; Matrix[1][1] = 1; Matrix[1][2] = 0; Matrix[1][3] = y;
Matrix[2][0] =0; Matrix[2][1] = 0; Matrix[2][2] = 1; Matrix[2][3] = z;
Matrix[3][0] =0; Matrix[3][1] = 0; Matrix[3][2] = 0; Matrix[3][3] = 1;
return *this;
}
/////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////// Set Rotation Matrix ///////////////////////////////////////////
Matrix4X4 Matrix4X4::setRotation(float x,float y,float z)
{
Matrix4X4 xRot;
Matrix4X4 yRot;
Matrix4X4 zRot;
x = (float)x * 3.14/ 180.0;
y = (float)y * 3.14/ 180.0;
z = (float)z * 3.14/ 180.0;
xRot.Matrix[0][0] =1; xRot.Matrix[0][1] = 0; xRot.Matrix[0][2] = 0; xRot.Matrix[0][3] = 0;
xRot.Matrix[1][0] =0; xRot.Matrix[1][1] = cosf(x); xRot.Matrix[1][2] = -sinf(x); xRot.Matrix[1][3] = 0;
xRot.Matrix[2][0] =0; xRot.Matrix[2][1] = sinf(x); xRot.Matrix[2][2] = cosf(x); xRot.Matrix[2][3] = 0;
xRot.Matrix[3][0] =0; xRot.Matrix[3][1] = 0; xRot.Matrix[3][2] = 0; xRot.Matrix[3][3] = 1;
yRot.Matrix[0][0] = cosf(y); yRot.Matrix[0][1] = 0; yRot.Matrix[0][2] = -sinf(y); yRot.Matrix[0][3] = 0;
yRot.Matrix[1][0] =0; yRot.Matrix[1][1] = 1; yRot.Matrix[1][2] = 0; yRot.Matrix[1][3] = 0;
yRot.Matrix[2][0] = sinf(y); yRot.Matrix[2][1] = 0; yRot.Matrix[2][2] = cosf(y); yRot.Matrix[2][3] = 0;
yRot.Matrix[3][0] =0; yRot.Matrix[3][1] = 0; yRot.Matrix[3][2] = 0; yRot.Matrix[3][3] = 1;
zRot.Matrix[0][0] = cosf(z); zRot.Matrix[0][1] = -sinf(z); zRot.Matrix[0][2] = 0; zRot.Matrix[0][3] = 0;
zRot.Matrix[1][0] = sinf(z); zRot.Matrix[1][1] = cosf(z); zRot.Matrix[1][2] = 0; zRot.Matrix[1][3] = 0;
zRot.Matrix[2][0] =0; zRot.Matrix[2][1] = 0; zRot.Matrix[2][2] = 1; zRot.Matrix[2][3] = 0;
zRot.Matrix[3][0] =0; zRot.Matrix[3][1] = 0; zRot.Matrix[3][2] = 0; zRot.Matrix[3][3] = 1;
return (zRot * yRot * xRot) ;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////// Set Scale Matrix //////////////////////////////////////////
Matrix4X4 Matrix4X4::setScale(float x,float y,float z)
{
Matrix[0][0] =x; Matrix[0][1] = 0; Matrix[0][2] = 0; Matrix[0][3] = 0;
Matrix[1][0] =0; Matrix[1][1] = y; Matrix[1][2] = 0; Matrix[1][3] = 0;
Matrix[2][0] =0; Matrix[2][1] = 0; Matrix[2][2] = z; Matrix[2][3] = 0;
Matrix[3][0] =0; Matrix[3][1] = 0; Matrix[3][2] = 0; Matrix[3][3] = 1;
return *this;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Set Perspective Projection ///////////////////////////////////////
void Matrix4X4::setPerspective(float fov,float aRatio,float zNear,float zFar)
{
fov = (fov/2) * 3.14 / 180.0;
float tanHalfFOV = tanf(fov);
float zRange = zNear - zFar;
Matrix[0][0] =1.0f / (tanHalfFOV * aRatio); Matrix[0][1] = 0; Matrix[0][2] = 0; Matrix[0][3] = 0;
Matrix[1][0] =0; Matrix[1][1] = 1.0f / tanHalfFOV; Matrix[1][2] = 0; Matrix[1][3] = 0;
Matrix[2][0] =0; Matrix[2][1] = 0; Matrix[2][2] = (-zNear - zFar)/ zRange; Matrix[2][3] = 2* zFar * zNear / zRange;
Matrix[3][0] =0; Matrix[3][1] = 0; Matrix[3][2] = 1; Matrix[3][3] = 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////// Getters & Setters ////////////////////////////////////////////
float * Matrix4X4::getMat()
{
return (float *) Matrix;
}
float Matrix4X4::getMember(int x, int y) const
{
return Matrix[x][y];
}
void Matrix4X4::setMat(int row,int col,float value)
{
Matrix[row][col] = value;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// (*) Operator Overload //////////////////////////////////////
Matrix4X4 operator * (const Matrix4X4 & lhs,const Matrix4X4 & rhs)
{
Matrix4X4 result;
for(int i = 0 ; i < 4; i++)
for ( int j = 0; j < 4; j++)
result.setMat(i, j, lhs.getMember(i,0) * rhs.getMember(0, j) +
lhs.getMember(i,1) * rhs.getMember(1, j) +
lhs.getMember(i,2) * rhs.getMember(2, j) +
lhs.getMember(i,3) * rhs.getMember(3, j));
return result;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
the Transformation code i use in my main block:
SDL_PumpEvents();
for (int x = 0; x< 256; x++)
{
if (state[x] == 1 )
{
if(x == 26)
tranForward -= 0.001;
if (x == 22)
tranForward += 0.001;
if (x == 4)
tranRight += 0.0009;
if (x == 7)
tranRight -= 0.0009;
if (x == 82)
lookUp += 0.02;
if (x == 81)
lookUp -= 0.02;
if (x == 80)
lookRight -= 0.02;
if (x == 79)
lookRight += 0.02;
}
}
modelTrans = Translation.setTranslation(0, 0, 5) * Scale.setScale(0.5, 0.5, 0.5);
camTrans = Rotation.setRotation(lookUp, lookRight, 0) * Translation.setTranslation(tranRight, 0, tranForward);
Projection.setPerspective(70, win.getWidth()/win.getHeight(), 0.1, 1000);
result = Projection * camTrans * modelTrans;
glUniformMatrix4fv(uniformloc, 1, GL_TRUE, result.getMat());

The matrix multiplication does not have the same rules as the scalar multiplication and in your case A*B does NOT equal B*A when multiplying the matrices. If rest of the code is good your solution might simply be turning
result = Projection * camTrans * modelTrans;
into
result = Projection * (modelTrans * camTrans);
Do alway watch out for both, multiplication order and parentheses when dealing with anything but scalar values.
In general when you are combining a translation and rotation matrix you need to think in matrix own space coordinate system, that means like playing a FPS:
Multiplying rotation*translation means the object will rotate first and then translate meaning the object position will depend on the rotation being already applied and a 180 degrees rotation will translate the object backwards from the 3rd view perspective.
Multiplying translation*rotation means the object will translate first and then rotate meaning it will in fact be moved into the same direction no matter the rotation, only the direction of where the object is facing will be changed by rotation matrix.
Just a nice example, if you want to present a movement of earth around sun (the earth is circling the sun while rotating around its own axis being on some radius):
Matrix4X4 orbitRotation; //rotation matrix for where in orbit the object is
Matrix4X4 objectRotation; //object rotation around its own axis
Matrix4X4 orbitRadius; //object orbit radius
Matrix4X4 result = (orbitRotation*orbitRadius)*objectRotation;

my code seemed to ignore the previous matrix calculation and re calculated the transformations with respect to my scene's initial state, the desired world rotation & Translation is achieved by using a fixed value for rotation & Translation, the modified code blocks are as follows:
for (int x = 0; x< 256; x++)
{
if (state[x] == 1 )
{
if(x == 26)
tranForward = -0.001;
if (x == 22)
tranForward = 0.001;
if (x == 4)
tranRight = 0.0009;
if (x == 7)
tranRight = -0.0009;
if (x == 82)
lookUp = 0.02;
if (x == 81)
lookUp = -0.02;
if (x == 80)
lookRight = -0.02;
if (x == 79)
lookRight = 0.02;
}
}
camTrans = Rotation.setRotation(lookUp, lookRight, 0) * Translation.setTranslation(tranRight, 0, tranForward);
result = camTrans * result;
modelTrans = Projection * result;
tranForward = 0.0;
tranRight = 0.0;
lookUp = 0.0;
lookRight = 0.0;
glUniformMatrix4fv(uniformloc, 1, GL_TRUE, modelTrans.getMat());
note that result matrix keeps track of the previous state and the current state transformations are applied with respect to it.

Related

Can i get rotation from this matrix algorithm

Can i get rotation(yaw , pitch,roll) from this matrix algorithm ?
thanks
matrix[0][0] = cos(pitch)*cos(yaw);
matrix[0][2] = cos(pitch)*sin(yaw);
matrix[0][1] = sin(pitch);
matrix[0][3] = 0;
matrix[1][0] = -cos(roll)*sin(pitch)*cos(yaw)+sin(roll)*sin(yaw);
matrix[1][1] = cos(roll)*cos(pitch);
matrix[1][2] = -cos(roll)*sin(pitch)*sin(yaw)-sin(roll)*cos(yaw);
matrix[1][3] = 0;
matrix[2][0] = -sin(roll)*sin(pitch)*cos(yaw)-cos(roll)*sin(yaw);
matrix[2][1] = sin(roll)*cos(pitch);
matrix[2][2] = cos(roll)*cos(yaw)-sin(roll)*sin(pitch)*sin(yaw);
matrix[2][3] = 0;
matrix[3][0] = 0;
matrix[3][1] = 0;
matrix[3][2] = 0;
matrix[3][3] = 1;
edit : i used this code but it not working :
D3DXMATRIX matrix2 = D3DXMATRIX(matrix);
yaw = atan2(matrix2._13, matrix2._11);
pitch = asin(matrix2._12);
roll = atan2(matrix2._32, matrix2._22);
Edit 3:
this is my old function , it works but not 100%
VECTOR getAnglefromMatrix(float* m)
{
float pitch, yaw, roll;
D3DXMATRIX matrix = D3DXMATRIX(m);
if (matrix._11 == 1.0f)
{
yaw = todegree(atan2f(matrix._13, matrix._34));
pitch = 0;
roll = 0;
}
else if (matrix._11 == -1.0f)
{
yaw = todegree(atan2f(matrix._13, matrix._34));
pitch = 0;
roll = 0;
}
else
{
yaw = todegree(atan2(-matrix._31,matrix._11));
pitch = todegree(asin(matrix._21));
roll = todegree(atan2(-matrix._23,matrix._22));
}
return vector3d(yaw,pitch,roll);
}
i did something wrong ?
We can go through the entries and find some that are nice for back-calculations:
matrix[0][0] = cos(pitch)*cos(yaw);
matrix[0][2] = cos(pitch)*sin(yaw);
This gives us:
yaw = atan2(matrix[0][2], matrix[0][0])
For the pitch:
matrix[0][1] = sin(pitch);
This gives us:
pitch = asin(matrix[0][1]) //this assumes pitch is between -pi/2 and +pi/2
Then
matrix[1][1] = cos(roll)*cos(pitch);
matrix[2][1] = sin(roll)*cos(pitch);
This gives us:
roll = atan2(matrix[2][1], matrix[1][1])
I omitted the cases where the arguments to atan2 become zero. I am confident you can figure them out. In these cases, the Euler angles are not uniquely defined.

Exponential Smoothing of Newton Fractal

I'm writing myself a Newton Fractal Generator. The images all looked like this:
But I actually would like it to look a bit smoother - sure I've done some research and I ran over http://www.hiddendimension.com/FractalMath/Convergent_Fractals_Main.html and this looks rather correct, except that there are at the edges of the basins some issues..
This is my generation loop:
while (i < 6000 && fabs(z.r) < 10000 && !found){
f = computeFunction(z, params, paramc[0]);
d = computeFunction(z, paramsD, paramc[1]);
iterexp = iterexp + exp(-fabs(z.r) - 0.5 / (fabs(subComplex(zo, z).r)));
zo = z;
z = subComplex(z, divComplex(f, d));
i++;
for (int j = 0; j < paramc[0] - 1; j++){
if (compComplex(z, zeros[j], RESOLUTION)){
resType[x + xRes * y] = j;
result[x + xRes * y] = iterexp;
found = true;
break;
}
}
if (compComplex(z, zo, RESOLUTION/100)){
resType[x + xRes * y] = 12;
break;
}
}
The coloration:
const int xRes = res[0];
const int yRes = res[1];
for (int y = 0; y < fraktal->getHeight(); y++){
for (int x = 0; x < fraktal->getWidth(); x++){
int type, it;
double conDiv;
if (genCL && genCL->err == CL_SUCCESS){
conDiv = genCL->result[x + y * xRes];
type = genCL->typeRes[x + y * xRes];
it = genCL->iterations[x + y * xRes];
} else {
type = 3;
conDiv = runNewton(std::complex<double>((double)((x - (double)(xRes / 2)) / zoom[0]), (double)((y - (double)(yRes / 2)) / zoom[1])), type);
}
if (type < 15){
Color col;
col.setColorHexRGB(colors[type]);
col.setColorHSV(col.getHue(), col.getSaturation(), 1-conDiv);
fraktal->setPixel(x, y, col);
} else {
fraktal->setPixel(x, y, conDiv, conDiv, conDiv, 1);
}
}
}
I appreciate any help to actually smooth this ;-)
Thanks,
- fodinabor

How to match SketchUp Camera in Processing/OpenGL?

I'm trying to match a scene from Sketchup in Processing/OpenGL but can't seem to get the measurements right.
I'm using these simply commands in the Ruby Console in Sketchup:
model = Sketchup.active_model
cam = model.active_view.camera
print cam.eye, cam.direction, cam.fov
Which prints these values for my file:
(1668.854717mm, -1723.414322mm, 131.550996mm)(-0.688802494154077, 0.649067164730165, 0.322897723306109)63.6653435710446nil
The FOV seems to work, but I don't think I've figured out the camera position(units) yet.
Here's my attempt:
float eyeScale = 1.0f;
float camEyeX = 1668.854717f * eyeScale;
float camEyeY = -1723.414322f * eyeScale;
float camEyeZ = 131.550996f * eyeScale;
float camTargetX = -0.688802494154077f;
float camTargetY = 0.649067164730165f;
float camTargetZ = 0.322897723306109f;
float camFOV = 63.665f;
float div = 10;
void setup(){
size(1280,720,P3D);
}
void draw(){
background(255);
perspective(radians(camFOV), width/height, camEyeZ * 0.1f, camEyeZ * 10);
camera(camEyeX/div, camEyeY/div, camEyeZ/div, camTargetX, camTargetY, camTargetZ, 1, 0, 0);
drawGrid(20,10,10,0);
drawGrid(20,10,10,1);
drawGrid(20,10,10,2);
}
void keyPressed(){
if(keyCode == UP) div++;
if(keyCode == DOWN) div--;
}
void drawGrid(int size,int w,int h,int plane){
pushStyle();
noFill();
if(plane == 0) stroke(255,0,0);
if(plane == 1) stroke(0,255,0);
if(plane == 2) stroke(0,0,255);
int total = w * h;
int tw = w * size;
int th = h * size;
beginShape(LINES);
for(int i = 0 ; i < total; i++){
int x = (i % w) * size;
int y = (i / w) * size;
if(plane == 0){
vertex(0,x,0);vertex(0,x,th);
vertex(0,0,y);vertex(0,tw,y);
}
if(plane == 1){
vertex(x,0,0);vertex(x,0,th);
vertex(0,0,y);vertex(tw,0,y);
}
if(plane == 2){
vertex(x,0,0);vertex(x,th,0);
vertex(0,y,0);vertex(tw,y,0);
}
}
endShape();
popStyle();
}
The grids look ok above using the perspective() call, but if I comment back the camera() call the scene disappears.
Any hints or tips will help. Processing code is ok, but I don't mind raw GL calls/matrices either.
Update
Based on #Majlik's notes it's good to point out the differences between the sketchup camera api and Processing's camera api, and on top of that difference in coordinate systems (Sketchup uses Z up, while Processing uses Y up).
I've tried playing with the coordinate system a bit using keys, but I'm not understanding the conversion from Sketchup to Processing/OpenGL:
float eyeScale = 1.0f;
float camEyeX = 1668.854717f * eyeScale;
float camEyeY = -1723.414322f * eyeScale;
float camEyeZ = 131.550996f * eyeScale;
float camTargetX = -0.688802494154077f;
float camTargetY = 0.649067164730165f;
float camTargetZ = 0.322897723306109f;
float camFOV = 63.665f;
float div = 10;
PImage bg;
int axis = 0;
boolean flipAxis = false;
void setup(){
size(1280,720,P3D);
bg = loadImage("SketchupCam.png");
}
void draw(){
background(bg);
perspective(radians(camFOV), width/height, camEyeZ * 0.1f, camEyeZ * 10);
camera(camEyeX/div, camEyeY/div, camEyeZ/div,
camTargetX+camEyeX/div, camTargetY+camEyeY/div, camTargetZ+camEyeZ/div,
axis == 0 ? (flipAxis ? -1 : 1) : 0,
axis == 1 ? (flipAxis ? -1 : 1) : 0,
axis == 2 ? (flipAxis ? -1 : 1) : 0);
drawGrid(20,10,10,0);
drawGrid(20,10,10,1);
drawGrid(20,10,10,2);
}
void keyPressed(){
if(keyCode == UP) div++;
if(keyCode == DOWN) div--;
println(div);
if(key == 'x') axis = 0;
if(key == 'y') axis = 1;
if(key == 'z') axis = 2;
if(key == ' ') flipAxis = !flipAxis;
}
void drawGrid(int size,int w,int h,int plane){
pushStyle();
noFill();
if(plane == 0) stroke(255,0,0);
if(plane == 1) stroke(0,255,0);
if(plane == 2) stroke(0,0,255);
int total = w * h;
int tw = w * size;
int th = h * size;
beginShape(LINES);
for(int i = 0 ; i < total; i++){
int x = (i % w) * size;
int y = (i / w) * size;
if(plane == 0){
vertex(0,x,0);vertex(0,x,th);
vertex(0,0,y);vertex(0,tw,y);
}
if(plane == 1){
vertex(x,0,0);vertex(x,0,th);
vertex(0,0,y);vertex(tw,0,y);
}
if(plane == 2){
vertex(x,0,0);vertex(x,th,0);
vertex(0,y,0);vertex(tw,y,0);
}
}
endShape();
popStyle();
}
And here is a Sketchup screenshot I'm trying to match in Processing:
The smaller box is 1000mm in Sketchup.
How I could match the view from Sketchup in Processing/OpenGL ?

C++ triangle rasterization

I'm trying to fix this triangle rasterizer, but cannot make it work correctly. For some reason it only draws half of the triangles.
void DrawTriangle(Point2D p0, Point2D p1, Point2D p2)
{
Point2D Top, Middle, Bottom;
bool MiddleIsLeft;
if (p0.y < p1.y) // case: 1, 2, 5
{
if (p0.y < p2.y) // case: 1, 2
{
if (p1.y < p2.y) // case: 1
{
Top = p0;
Middle = p1;
Bottom = p2;
MiddleIsLeft = true;
}
else // case: 2
{
Top = p0;
Middle = p2;
Bottom = p1;
MiddleIsLeft = false;
}
}
else // case: 5
{
Top = p2;
Middle = p0;
Bottom = p1;
MiddleIsLeft = true;
}
}
else // case: 3, 4, 6
{
if (p0.y < p2.y) // case: 4
{
Top = p1;
Middle = p0;
Bottom = p2;
MiddleIsLeft = false;
}
else // case: 3, 6
{
if (p1.y < p2.y) // case: 3
{
Top = p1;
Middle = p2;
Bottom = p0;
MiddleIsLeft = true;
}
else // case 6
{
Top = p2;
Middle = p1;
Bottom = p0;
MiddleIsLeft = false;
}
}
}
float xLeft, xRight;
xLeft = xRight = Top.x;
float mLeft, mRight;
// Region 1
if(MiddleIsLeft)
{
mLeft = (Top.x - Middle.x) / (Top.y - Middle.y);
mRight = (Top.x - Bottom.x) / (Top.y - Bottom.y);
}
else
{
mLeft = (Top.x - Bottom.x) / (Top.y - Bottom.y);
mRight = (Middle.x - Top.x) / (Middle.y - Top.y);
}
int finalY;
float Tleft, Tright;
for (int y = ceil(Top.y); y < (int)Middle.y; y++)
{
Tleft=float(Top.y-y)/(Top.y-Middle.y);
Tright=float(Top.y-y)/(Top.y-Bottom.y);
for (int x = ceil(xLeft); x <= ceil(xRight) - 1 ; x++)
{
FrameBuffer::SetPixel(x, y, p0.r,p0.g,p0.b);
}
xLeft += mLeft;
xRight += mRight;
finalY = y;
}
// Region 2
if (MiddleIsLeft)
{
mLeft = (Bottom.x - Middle.x) / (Bottom.y - Middle.y);
}
else
{
mRight = (Middle.x - Bottom.x) / (Middle.y - Bottom.y);
}
for (int y = Middle.y; y <= ceil(Bottom.y) - 1; y++)
{
Tleft=float(Bottom.y-y)/(Bottom.y-Middle.y);
Tright=float(Top.y-y)/(Top.y-Bottom.y);
for (int x = ceil(xLeft); x <= ceil(xRight) - 1; x++)
{
FrameBuffer::SetPixel(x, y, p0.r,p0.g,p0.b);
}
xLeft += mLeft;
xRight += mRight;
}
}
Here is what happens when I use it to draw shapes.
When I disable the second region, all those weird triangles disappear.
The wireframe mode works perfect, so this eliminates all the other possibilities other than the triangle rasterizer.
I kind of got lost in your implementation, but here's what I do (I have a slightly more complex version for arbitrary convex polygons, not just triangles) and I think apart from the Bresenham's algorithm it's very simple (actually the algorithm is simple too):
#include <stddef.h>
#include <limits.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#define SCREEN_HEIGHT 22
#define SCREEN_WIDTH 78
// Simulated frame buffer
char Screen[SCREEN_HEIGHT][SCREEN_WIDTH];
void SetPixel(long x, long y, char color)
{
if ((x < 0) || (x >= SCREEN_WIDTH) ||
(y < 0) || (y >= SCREEN_HEIGHT))
{
return;
}
Screen[y][x] = color;
}
void Visualize(void)
{
long x, y;
for (y = 0; y < SCREEN_HEIGHT; y++)
{
for (x = 0; x < SCREEN_WIDTH; x++)
{
printf("%c", Screen[y][x]);
}
printf("\n");
}
}
typedef struct
{
long x, y;
unsigned char color;
} Point2D;
// min X and max X for every horizontal line within the triangle
long ContourX[SCREEN_HEIGHT][2];
#define ABS(x) ((x >= 0) ? x : -x)
// Scans a side of a triangle setting min X and max X in ContourX[][]
// (using the Bresenham's line drawing algorithm).
void ScanLine(long x1, long y1, long x2, long y2)
{
long sx, sy, dx1, dy1, dx2, dy2, x, y, m, n, k, cnt;
sx = x2 - x1;
sy = y2 - y1;
if (sx > 0) dx1 = 1;
else if (sx < 0) dx1 = -1;
else dx1 = 0;
if (sy > 0) dy1 = 1;
else if (sy < 0) dy1 = -1;
else dy1 = 0;
m = ABS(sx);
n = ABS(sy);
dx2 = dx1;
dy2 = 0;
if (m < n)
{
m = ABS(sy);
n = ABS(sx);
dx2 = 0;
dy2 = dy1;
}
x = x1; y = y1;
cnt = m + 1;
k = n / 2;
while (cnt--)
{
if ((y >= 0) && (y < SCREEN_HEIGHT))
{
if (x < ContourX[y][0]) ContourX[y][0] = x;
if (x > ContourX[y][1]) ContourX[y][1] = x;
}
k += n;
if (k < m)
{
x += dx2;
y += dy2;
}
else
{
k -= m;
x += dx1;
y += dy1;
}
}
}
void DrawTriangle(Point2D p0, Point2D p1, Point2D p2)
{
int y;
for (y = 0; y < SCREEN_HEIGHT; y++)
{
ContourX[y][0] = LONG_MAX; // min X
ContourX[y][1] = LONG_MIN; // max X
}
ScanLine(p0.x, p0.y, p1.x, p1.y);
ScanLine(p1.x, p1.y, p2.x, p2.y);
ScanLine(p2.x, p2.y, p0.x, p0.y);
for (y = 0; y < SCREEN_HEIGHT; y++)
{
if (ContourX[y][1] >= ContourX[y][0])
{
long x = ContourX[y][0];
long len = 1 + ContourX[y][1] - ContourX[y][0];
// Can draw a horizontal line instead of individual pixels here
while (len--)
{
SetPixel(x++, y, p0.color);
}
}
}
}
int main(void)
{
Point2D p0, p1, p2;
// clear the screen
memset(Screen, ' ', sizeof(Screen));
// generate random triangle coordinates
srand((unsigned)time(NULL));
p0.x = rand() % SCREEN_WIDTH;
p0.y = rand() % SCREEN_HEIGHT;
p1.x = rand() % SCREEN_WIDTH;
p1.y = rand() % SCREEN_HEIGHT;
p2.x = rand() % SCREEN_WIDTH;
p2.y = rand() % SCREEN_HEIGHT;
// draw the triangle
p0.color = '1';
DrawTriangle(p0, p1, p2);
// also draw the triangle's vertices
SetPixel(p0.x, p0.y, '*');
SetPixel(p1.x, p1.y, '*');
SetPixel(p2.x, p2.y, '*');
Visualize();
return 0;
}
Output:
*111111
1111111111111
111111111111111111
1111111111111111111111
111111111111111111111111111
11111111111111111111111111111111
111111111111111111111111111111111111
11111111111111111111111111111111111111111
111111111111111111111111111111111111111*
11111111111111111111111111111111111
1111111111111111111111111111111
111111111111111111111111111
11111111111111111111111
1111111111111111111
11111111111111
11111111111
1111111
1*
The original code will only work properly with triangles that have counter-clockwise winding because of the if-else statements on top that determines whether middle is left or right. It could be that the triangles which aren't drawing have the wrong winding.
This stack overflow shows how to Determine winding of a 2D triangles after triangulation
The original code is fast because it doesn't save the points of the line in a temporary memory buffer. Seems a bit over-complicated even given that, but that's another problem.
The following code is in your implementation:
if (p0.y < p1.y) // case: 1, 2, 5
{
if (p0.y < p2.y) // case: 1, 2
{
if (p1.y < p2.y) // case: 1
{
Top = p0;
Middle = p1;
Bottom = p2;
MiddleIsLeft = true;
}
else // case: 2
{
Top = p0;
Middle = p2;
Bottom = p1;
MiddleIsLeft = false;
}
}
This else statement means that p2.y (or Middle) can equal p1.y (or Bottom). If this is true, then when region 2 runs
if (MiddleIsLeft)
{
mLeft = (Bottom.x - Middle.x) / (Bottom.y - Middle.y);
}
else
{
mRight = (Middle.x - Bottom.x) / (Middle.y - Bottom.y);
}
That else line will commit division by zero, which is not possible.

Creating a 3d plane using Frank Luna's technique: what are sinf and cosf?

I am creating a 3d plane that lies on the x and z axis, and has hills that extend on the y axis. The bulk of the code looks like this:
float PeaksAndValleys::getHeight(float x, float z)const
{
return 0.3f*( z*sinf(0.1f*x) + x*cosf(0.1f*z) );
}
void PeaksAndValleys::init(ID3D10Device* device, DWORD m, DWORD n, float dx)
{
md3dDevice = device;
mNumRows = m;
mNumCols = n;
mNumVertices = m*n;
mNumFaces = (m-1)*(n-1)*2;
// Create the geometry and fill the vertex buffer.
std::vector<Vertex> vertices(mNumVertices);
float halfWidth = (n-1)*dx*0.5f;
float halfDepth = (m-1)*dx*0.5f;
for(DWORD i = 0; i < m; ++i)
{
float z = halfDepth - i*dx;
for(DWORD j = 0; j < n; ++j)
{
float x = -halfWidth + j*dx;
// Graph of this function looks like a mountain range.
float y = getHeight(x,z);
vertices[i*n+j].pos = D3DXVECTOR3(x, y, z);
// Color the vertex based on its height.
if( y < -10.0f )
vertices[i*n+j].color = BEACH_SAND;
else if( y < 5.0f )
vertices[i*n+j].color = LIGHT_YELLOW_GREEN;
else if( y < 12.0f )
vertices[i*n+j].color = DARK_YELLOW_GREEN;
else if( y < 20.0f )
vertices[i*n+j].color = DARKBROWN;
else
vertices[i*n+j].color = WHITE;
}
}
D3D10_BUFFER_DESC vbd;
vbd.Usage = D3D10_USAGE_IMMUTABLE;
vbd.ByteWidth = sizeof(Vertex) * mNumVertices;
vbd.BindFlags = D3D10_BIND_VERTEX_BUFFER;
vbd.CPUAccessFlags = 0;
vbd.MiscFlags = 0;
D3D10_SUBRESOURCE_DATA vinitData;
vinitData.pSysMem = &vertices[0];
HR(md3dDevice->CreateBuffer(&vbd, &vinitData, &mVB));
// Create the index buffer. The index buffer is fixed, so we only
// need to create and set once.
std::vector<DWORD> indices(mNumFaces*3); // 3 indices per face
// Iterate over each quad and compute indices.
int k = 0;
for(DWORD i = 0; i < m-1; ++i)
{
for(DWORD j = 0; j < n-1; ++j)
{
indices[k] = i*n+j;
indices[k+1] = i*n+j+1;
indices[k+2] = (i+1)*n+j;
indices[k+3] = (i+1)*n+j;
indices[k+4] = i*n+j+1;
indices[k+5] = (i+1)*n+j+1;
k += 6; // next quad
}
}
D3D10_BUFFER_DESC ibd;
ibd.Usage = D3D10_USAGE_IMMUTABLE;
ibd.ByteWidth = sizeof(DWORD) * mNumFaces*3;
ibd.BindFlags = D3D10_BIND_INDEX_BUFFER;
ibd.CPUAccessFlags = 0;
ibd.MiscFlags = 0;
D3D10_SUBRESOURCE_DATA iinitData;
iinitData.pSysMem = &indices[0];
HR(md3dDevice->CreateBuffer(&ibd, &iinitData, &mIB));
}
My question pertains to the cosf and sinf. I am familiar with trigonometry and I understand sin, cosine, and tangent, but I am not familiar with cosf and sinf and what they do. From looking at this example, they have a lot to do with finding a y value.
cosf and sinf are simply the float versions of cos and sin. The normal cos and sin functions return double values instead of floats. Note that all these functions work in radians, not degrees.
Combined in the way above, they give a landscape that looks somewhat like a mountain range, as in this plot.