I want to implement collision of 6 torus which are randomly disturbed in the game area. It is a simple 3D space game using the perspective view and in first person. I saw some stack overflow answer suggesting to compute distance of whatever (player) to torus cell and if bigger than half or whole cell size you are colliding +/- your coordinate system and map topology tweaks. But if we take the distance that means we're only considering the z co-ordinates so if the camera moved to that distance (without considering x,y coordinates) it's always taking as a collision which is wrong right?
I'm hoping to do this using AABB algorithm. Is it ok to consider camera position and torus position as 2 boxes and check the collision (box to box collision) or camera as a point and torus as a box (point to box)? Or can somebody suggest best way to do that?
Below is the code that I've tried so far.
float im[16], m[16], znear = 0.1, zfar = 100.0, fovx = 45.0 * M_PI / 180.0;
glm::vec3 p0, p1, p2, p3, o, u, v;
//p0, p1, p2, p3 holds your znear camera screen corners in world coordinates
void ChangeSize(int w, int h)
{
GLfloat fAspect;
// Prevent a divide by zero
if(h == 0)
h = 1;
// Set Viewport to window dimensions
glViewport(0, 0, w, h);
// Calculate aspect ratio of the window
fAspect = (GLfloat)w*1.0/(GLfloat)h;
// Set the perspective coordinate system
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// field of view of 45 degrees, near and far planes 1.0 and 1000
//that znear and zfar should typically have a ratio of 1000:1 to make sorting out z depth easier for the GPU
gluPerspective(45.0f, fAspect, 0.1f, 300.0f); //may need to make larger depending on project
// Modelview matrix reset
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// get camera matrix (must be in right place in code before model transformations)
glGetFloatv(GL_MODELVIEW_MATRIX, im); // get camera inverse matrix
matrix_inv(m, im); // m = inverse(im)
u = glm::vec3(m[0], m[1], m[2]); // x axis
v = glm::vec3(m[4], m[5], m[6]); // y axis
o = glm::vec3(m[12], m[13], m[14]); // origin
o -= glm::vec3(m[8], m[9], m[10]) * znear; // z axis offset
// scale by FOV
u *= znear * tan(0.5 * fovx);
v *= znear * tan(0.5 * fovx / fAspect);
// get rectangle coorners
p0 = o - u - v;
p1 = o + u - v;
p2 = o + u + v;
p3 = o - u + v;
}
void matrix_inv(float* a, float* b) // a[16] = Inverse(b[16])
{
float x, y, z;
// transpose of rotation matrix
a[0] = b[0];
a[5] = b[5];
a[10] = b[10];
x = b[1]; a[1] = b[4]; a[4] = x;
x = b[2]; a[2] = b[8]; a[8] = x;
x = b[6]; a[6] = b[9]; a[9] = x;
// copy projection part
a[3] = b[3];
a[7] = b[7];
a[11] = b[11];
a[15] = b[15];
// convert origin: new_pos = - new_rotation_matrix * old_pos
x = (a[0] * b[12]) + (a[4] * b[13]) + (a[8] * b[14]);
y = (a[1] * b[12]) + (a[5] * b[13]) + (a[9] * b[14]);
z = (a[2] * b[12]) + (a[6] * b[13]) + (a[10] * b[14]);
a[12] = -x;
a[13] = -y;
a[14] = -z;
}
//Store torus coordinates
std::vector<std::vector<GLfloat>> translateTorus = { { 0.0, 1.0, -10.0, 1 }, { 0.0, 4.0, -6.0, 1 } , { -1.0, 0.0, -4.0, 1 },
{ 3.0, 1.0, -6.0, 1 }, { 1.0, -1.0, -9.0, 1 } , { 4.0, 1.0, -4.0, 1 } };
GLfloat xpos, ypos, zpos, flagToDisplayCrystal;
//Looping through 6 Torus
for (int i = 0; i < translateTorus.size(); i++) {
//Get the torus coordinates
xpos = translateTorus[i][0];
ypos = translateTorus[i][1];
zpos = translateTorus[i][2];
//This variable will work as a variable to display crystal after collision
flagToDisplayCrystal = translateTorus[i][3];
//p0 min, p2 max
//Creating a square using Torus index coordinates and radius
double halfside = 1.0 / 2;
//This (xpos+halfside), (xpos-halfside), (ypos+halfside), (ypos-halfside) are //created using Torus index and radius
float d1x = p0[0] - (xpos + halfside);
float d1y = p0[1] - (ypos + halfside);
float d2x = (xpos - halfside) - p2[0];
float d2y = (ypos - halfside) - p2[1];
//Collision is checking here
//For square's min z and max z is checking whether equal to camera's min //z and max z
if ((d1x > 0.0f || d1y > 0.0f || d2x > 0.0f || d2y > 0.0f) && p2[2] == zpos && p0[2] == zpos) {
//If there is collision update the variable as 0
translateTorus[i][3] = 0;
}
else {
if (flagToDisplayCrystal == 1) {
glPushMatrix();
glEnable(GL_TEXTURE_2D);
glTranslatef(xpos, ypos, zpos);
glRotatef(fPlanetRot, 0.0f, -1.0f, 0.0f);
glColor3f(0.0, 0.0, 0.0);
// Select the texture object
glBindTexture(GL_TEXTURE_2D, textures[3]);
glutSolidTorus(0.1, 1.0, 30, 30);
glDisable(GL_TEXTURE_2D);
glPopMatrix();
}
}
}
as I mentioned in the comments you got 2 options either use OpenGL rendering or compute entirely on CPU side without it. Let start with rendering first:
render your scene
but instead of color of torus and stuff use integer indexes (for example 0 empty space, 1 obstacle, 2 torus ...) you can even have separate indexes for each object in the world so you know exactly which one is hit etc ...
so: clear screen with empty color, render your scene (using indexes instead of color with glColor??(???)) without lighting or shading or whatever. But Do not swap buffers !!! as that would show the stuff on screen and cause flickering.
read rendered screen and depth buffers
you simply use glReadPixels to copy your screen and depth buffers into CPU side memory (1D arrays) lets call them scr[],zed[].
scan the scr[] for color matching torus indexes
simply loop through all pixels and if torus pixel found check its depth. If it is close enough to camera you found your collision.
render normally
now clear screen again and render your screen with colors and lighting... now you can swap buffers too.
Beware depth buffer will be non linear which requires linearization to obtain original depth in world units. For more about it and example of reading both scr,zed see:
depth buffer got by glReadPixels is always 1
OpenGL 3D-raypicking with high poly meshes
The other approach is is much faster in case you have not too many torus'es. You simply compute intersection between camera znear plane and torus. Which boils down to either AABB vs rectangle intersection or cylinder vs. rectangle intersection.
However if you not familiar with 3D vector math you might get lost quickly.
let assume the torus is described by AABB. Then intersection between that and rectangle boils down to checking intersection between line (each edge of AABB) and rectangle. So simply finding instersection between plane and line and checking if the point is inside rectangle.
if our rectangle is defined by its vertexes in CW or CCW order (p0,p1,p2,p3) and line by endpoints q0,q1 then:
n = normalize(cross(p1-p0,p2-p1)) // is rectangle normal
dq = normalize(q1-q0) // is line direction
q = q0 + dq*dot(dq,p1-p0) // is plane/line intersection
So now just check if q is inside rectangle. There are 2 ways either test if all crosses between q-edge_start and edge_end-edge_start have the same direction or all dots between all edge_normal and q-edge_point has the same sign or zero.
The problem is that both AABB and rectangle must be in the same coordinate system so either transform AABB into camera coordinates by using modelview matrix or transform the rectangle into world coordinates using inverse of modelview. The latter is better as you do it just once instead of transforming each torus'es AABB ...
For more info about math side see:
Cone to box collision
Understanding 4x4 homogenous transform matrices
The rectangle itself is just extracted from your camera matrix (part of modelviev) position, and x,y basis vectors gives you the "center" and axises of your rectangle... The size must be derived from the perspective matrix (or parameters you passed to it especially aspect ratio, FOV and znear)
Well first you need to obtain camera (view) matrix. The GL_MODELVIEW usually holds:
GL_MODELVIEW = Inverse(Camera)*Rendered_Object
so you need to find the place in your code where your GL_MODELVIEW holds just the Inverse(Camera) transformation and there place:
float aspect=float(xs)/float(ys); // aspect from OpenGL window resolution
float im[16],m[16],znear=0.1,zfar=100.0,fovx=60.0*M_PI/180.0;
vec3 p0,p1,p2,p3,o,u,v; // 3D vectors
// this is how my perspective is set
// glMatrixMode(GL_PROJECTION);
// glLoadIdentity();
// gluPerspective(fovx*180.0/(M_PI*aspect),aspect,znear,zfar);
// get camera matrix (must be in right place in code before model transformations)
glGetFloatv(GL_MODELVIEW_MATRIX,im); // get camera inverse matrix
matrix_inv(m,im); // m = inverse(im)
u =vec3(m[ 0],m[ 1],m[ 2]); // x axis
v =vec3(m[ 4],m[ 5],m[ 6]); // y axis
o =vec3(m[12],m[13],m[14]); // origin
o-=vec3(m[ 8],m[ 9],m[10])*znear; // z axis offset
// scale by FOV
u*=znear*tan(0.5*fovx);
v*=znear*tan(0.5*fovx/aspect);
// get rectangle coorners
p0=o-u-v;
p1=o+u-v;
p2=o+u+v;
p3=o-u+v;
// render it for debug
glColor3f(1.0,1.0,0.0);
glBegin(GL_QUADS);
glColor3f(1.0,0.0,0.0); glVertex3fv(p0.dat);
glColor3f(0.0,0.0,0.0); glVertex3fv(p1.dat);
glColor3f(0.0,0.0,1.0); glVertex3fv(p2.dat);
glColor3f(1.0,1.0,1.0); glVertex3fv(p3.dat);
glEnd();
Which basicaly loads the matrix into CPU side variables inverse it like this:
void matrix_inv(float *a,float *b) // a[16] = Inverse(b[16])
{
float x,y,z;
// transpose of rotation matrix
a[ 0]=b[ 0];
a[ 5]=b[ 5];
a[10]=b[10];
x=b[1]; a[1]=b[4]; a[4]=x;
x=b[2]; a[2]=b[8]; a[8]=x;
x=b[6]; a[6]=b[9]; a[9]=x;
// copy projection part
a[ 3]=b[ 3];
a[ 7]=b[ 7];
a[11]=b[11];
a[15]=b[15];
// convert origin: new_pos = - new_rotation_matrix * old_pos
x=(a[ 0]*b[12])+(a[ 4]*b[13])+(a[ 8]*b[14]);
y=(a[ 1]*b[12])+(a[ 5]*b[13])+(a[ 9]*b[14]);
z=(a[ 2]*b[12])+(a[ 6]*b[13])+(a[10]*b[14]);
a[12]=-x;
a[13]=-y;
a[14]=-z;
}
And compute the corners with perspective in mind as described above...
I used GLSL like vec3 but you can use any 3D math even own like float p0[3],.... You just need +,- and multiplying by constant.
Now the p0,p1,p2,p3 holds your znear camera screen corners in world coordinates.
[Edit1] example
I managed to put together simple example for this. Here support functiosn used first:
//---------------------------------------------------------------------------
void glutSolidTorus(float r,float R,int na,int nb) // render torus(r,R)
{
float *pnt=new float[(na+1)*(nb+1)*3*2]; if (pnt==NULL) return;
float *nor=pnt+((na+1)*(nb+1)*3);
float ca,sa,cb,sb,a,b,da,db,x,y,z,nx,ny,nz;
int ia,ib,i,j;
da=2.0*M_PI/float(na);
db=2.0*M_PI/float(nb);
glBegin(GL_LINES);
for (i=0,a=0.0,ia=0;ia<=na;ia++,a+=da){ ca=cos(a); sa=sin(a);
for ( b=0.0,ib=0;ib<=nb;ib++,b+=db){ cb=cos(b); sb=sin(b);
z=r*ca;
x=(R+z)*cb; nx=(x-(R*cb))/r;
y=(R+z)*sb; ny=(y-(R*sb))/r;
z=r*sa; nz=sa;
pnt[i]=x; nor[i]=nx; i++;
pnt[i]=y; nor[i]=ny; i++;
pnt[i]=z; nor[i]=nz; i++;
}}
glEnd();
for (ia=0;ia<na;ia++)
{
i=(ia+0)*(nb+1)*3;
j=(ia+1)*(nb+1)*3;
glBegin(GL_QUAD_STRIP);
for (ib=0;ib<=nb;ib++)
{
glNormal3fv(nor+i); glVertex3fv(pnt+i); i+=3;
glNormal3fv(nor+j); glVertex3fv(pnt+j); j+=3;
}
glEnd();
}
delete[] pnt;
}
//---------------------------------------------------------------------------
const int AABB_lin[]= // AABB lines
{
0,1,
1,2,
2,3,
3,0,
4,5,
5,6,
6,7,
7,4,
0,4,
1,5,
2,6,
3,7,
-1
};
const int AABB_fac[]= // AABB quads
{
3,2,1,0,
4,5,6,7,
0,1,5,4,
1,2,6,5,
2,3,7,6,
3,0,4,7,
-1
};
void AABBSolidTorus(vec3 *aabb,float r,float R) // aabb[8] = AABB of torus(r,R)
{
R+=r;
aabb[0]=vec3(-R,-R,-r);
aabb[1]=vec3(+R,-R,-r);
aabb[2]=vec3(+R,+R,-r);
aabb[3]=vec3(-R,+R,-r);
aabb[4]=vec3(-R,-R,+r);
aabb[5]=vec3(+R,-R,+r);
aabb[6]=vec3(+R,+R,+r);
aabb[7]=vec3(-R,+R,+r);
}
//---------------------------------------------------------------------------
void matrix_inv(float *a,float *b) // a[16] = Inverse(b[16])
{
float x,y,z;
// transpose of rotation matrix
a[ 0]=b[ 0];
a[ 5]=b[ 5];
a[10]=b[10];
x=b[1]; a[1]=b[4]; a[4]=x;
x=b[2]; a[2]=b[8]; a[8]=x;
x=b[6]; a[6]=b[9]; a[9]=x;
// copy projection part
a[ 3]=b[ 3];
a[ 7]=b[ 7];
a[11]=b[11];
a[15]=b[15];
// convert origin: new_pos = - new_rotation_matrix * old_pos
x=(a[ 0]*b[12])+(a[ 4]*b[13])+(a[ 8]*b[14]);
y=(a[ 1]*b[12])+(a[ 5]*b[13])+(a[ 9]*b[14]);
z=(a[ 2]*b[12])+(a[ 6]*b[13])+(a[10]*b[14]);
a[12]=-x;
a[13]=-y;
a[14]=-z;
}
//---------------------------------------------------------------------------
const int QUAD_lin[]= // quad lines
{
0,1,
1,2,
2,3,
3,0,
-1
};
const int QUAD_fac[]= // quad quads
{
0,1,2,3,
-1
};
void get_perspective_znear(vec3 *quad) // quad[4] = world coordinates of 4 corners of screen at znear distance from camera
{
vec3 o,u,v; // 3D vectors
float im[16],m[16],znear,zfar,aspect,fovx;
// get stuff from perspective
glGetFloatv(GL_PROJECTION_MATRIX,m); // get perspective projection matrix
zfar =0.5*m[14]*(1.0-((m[10]-1.0)/(m[10]+1.0)));// compute zfar from perspective matrix
znear=zfar*(m[10]+1.0)/(m[10]-1.0); // compute znear from perspective matrix
aspect=m[5]/m[0];
fovx=2.0*atan(1.0/m[5])*aspect;
// get stuff from camera matrix (must be in right place in code before model transformations)
glGetFloatv(GL_MODELVIEW_MATRIX,im); // get camera inverse matrix
matrix_inv(m,im); // m = inverse(im)
u =vec3(m[ 0],m[ 1],m[ 2]); // x axis
v =vec3(m[ 4],m[ 5],m[ 6]); // y axis
o =vec3(m[12],m[13],m[14]); // origin
o-=vec3(m[ 8],m[ 9],m[10])*znear; // z axis offset
// scale by FOV
u*=znear*tan(0.5*fovx);
v*=znear*tan(0.5*fovx/aspect);
// get rectangle coorners
quad[0]=o-u-v;
quad[1]=o+u-v;
quad[2]=o+u+v;
quad[3]=o-u+v;
}
//---------------------------------------------------------------------------
bool collideLineQuad(vec3 *lin,vec3 *quad) // return if lin[2] is colliding quad[4]
{
float t,l,u,v;
vec3 p,p0,p1,dp;
vec3 U,V,W;
// quad (rectangle) basis vectors
U=quad[1]-quad[0]; u=length(U); u*=u;
V=quad[3]-quad[0]; v=length(V); v*=v;
W=normalize(cross(U,V));
// convert line from world coordinates to quad local ones
p0=lin[0]-quad[0]; p0=vec3(dot(p0,U)/u,dot(p0,V)/v,dot(p0,W));
p1=lin[1]-quad[0]; p1=vec3(dot(p1,U)/u,dot(p1,V)/v,dot(p1,W));
dp=p1-p0;
// test if crossing the plane
if (fabs(dp.z)<1e-10) return false;
t=-p0.z/dp.z;
p=p0+(t*dp);
// test inside 2D quad (rectangle)
if ((p.x<0.0)||(p.x>1.0)) return false;
if ((p.y<0.0)||(p.y>1.0)) return false;
// inside line
if ((t<0.0)||(t>1.0)) return false;
return true;
}
//---------------------------------------------------------------------------
bool collideQuadQuad(vec3 *quad0,vec3 *quad1) // return if quad0[4] is colliding quad1[4]
{
int i;
vec3 l[2];
// lines vs. quads
for (i=0;QUAD_lin[i]>=0;)
{
l[0]=quad0[QUAD_lin[i]]; i++;
l[1]=quad0[QUAD_lin[i]]; i++;
if (collideLineQuad(l,quad1)) return true;
}
for (i=0;QUAD_lin[i]>=0;)
{
l[0]=quad1[QUAD_lin[i]]; i++;
l[1]=quad1[QUAD_lin[i]]; i++;
if (collideLineQuad(l,quad0)) return true;
}
// ToDo coplanar quads tests (not needed for AABB test)
return false;
}
//---------------------------------------------------------------------------
bool collideAABBQuad(vec3 *aabb,vec3 *quad) // return if aabb[8] is colliding quad[4]
{
int i;
vec3 q[4],n,p;
// test all AABB faces (rectangle) for intersection with quad (rectangle)
for (i=0;AABB_fac[i]>=0;)
{
q[0]=aabb[AABB_fac[i]]; i++;
q[1]=aabb[AABB_fac[i]]; i++;
q[2]=aabb[AABB_fac[i]]; i++;
q[3]=aabb[AABB_fac[i]]; i++;
if (collideQuadQuad(q,quad)) return true;
}
// test if one point of quad is fully inside AABB
for (i=0;AABB_fac[i]>=0;i+=4)
{
n=cross(aabb[AABB_fac[i+1]]-aabb[AABB_fac[i+0]],
aabb[AABB_fac[i+2]]-aabb[AABB_fac[i+1]]);
if (dot(n,quad[0]-aabb[AABB_fac[i+0]])>0.0) return false;
}
return true;
}
//---------------------------------------------------------------------------
And here the usage (during rendering):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
int i;
float m[16];
mat4 m0,m1;
vec4 v4;
float aspect=float(xs)/float(ys);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0/aspect,aspect,0.1,20.0);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
static float anim=180.0; anim+=0.1; if (anim>=360.0) anim-=360.0;
glEnable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
vec3 line[2],quad[4],aabb[8]; // 3D vectors
get_perspective_znear(quad);
// store view matrix for latter
glMatrixMode(GL_MODELVIEW);
glGetFloatv(GL_MODELVIEW_MATRIX,m);
m0=mat4(m[0],m[1],m[2],m[3],m[4],m[5],m[6],m[7],m[8],m[9],m[10],m[11],m[12],m[13],m[14],m[15]);
m0=inverse(m0);
// <<-- here should be for start that loop through your toruses
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
// set/animate torus position
glTranslatef(0.3,0.3,3.5*(-1.0-cos(anim)));
glRotatef(+75.0,0.5,0.5,0.0);
// get actual matrix and convert it to the change
glGetFloatv(GL_MODELVIEW_MATRIX,m);
m1=m0*mat4(m[0],m[1],m[2],m[3],m[4],m[5],m[6],m[7],m[8],m[9],m[10],m[11],m[12],m[13],m[14],m[15]);
// render torus and compute its AABB
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glColor3f(1.0,1.0,1.0);
glutSolidTorus(0.1,0.5,36,36);
AABBSolidTorus(aabb,0.1,0.5);
glDisable(GL_LIGHT0);
glDisable(GL_LIGHTING);
// convert AABB to the same coordinates as quad
for (i=0;i<8;i++) aabb[i]=(m1*vec4(aabb[i],1.0)).xyz;
// restore original view matrix
glPopMatrix();
// render wireframe AABB
glColor3f(0.0,1.0,0.0);
glBegin(GL_LINES);
for (i=0;AABB_lin[i]>=0;i++)
glVertex3fv(aabb[AABB_lin[i]].dat);
glEnd();
/*
// render filled AABB for debug
glBegin(GL_QUADS);
for (i=0;AABB_fac[i]>=0;i++)
glVertex3fv(aabb[AABB_fac[i]].dat);
glEnd();
// render quad for debug
glBegin(GL_QUADS);
glColor3f(1.0,1.0,1.0);
for (i=0;QUAD_fac[i]>=0;i++)
glVertex3fv(quad[QUAD_fac[i]].dat);
glEnd();
*/
// render X on colision
if (collideAABBQuad(aabb,quad))
{
glColor3f(1.0,0.0,0.0);
glBegin(GL_LINES);
glVertex3fv(quad[0].dat);
glVertex3fv(quad[2].dat);
glVertex3fv(quad[1].dat);
glVertex3fv(quad[3].dat);
glEnd();
}
// <<-- here should be end of the for that loop through your toruses
glFlush();
SwapBuffers(hdc);
just ignore the GLUT solid torus function as you already got it ... Here preview:
The red cross indicates collision with screen ...
I'm trying to get the coordinates (x,y) of the grid (z = 0) using only the cursor coordinates. After a long search I found this way to do that using the glm::unproject.
First I'm getting the cursor coordinates using the callback:
void cursorCallback(GLFWwindow *window, double x, double y)
{
this->cursorCoordinate = glm::vec3(x, (this->windowHeight - y - 1.0f), 0.0f);
}
an then converting these coordinates:
glm::vec3 cursorCoordinatesToWorldCoordinates()
{
glm::vec3 pointInitial = glm::unProject(
glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 0.0),
this->modelMatrix * this->viewMatrix,
this->projectionMatrix,
this->viewPort
);
glm::vec3 pointFinal = glm::unProject(
glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 1.0),
this->modelMatrix * this->viewMatrix,
this->projectionMatrix,
this->viewPort
);
glm::vec3 vectorDirector = pointFinal - pointInitial;
double lambda = (-pointInitial.y) / vectorDirector.y;
double x = pointInitial.x + lambda * vectorDirector.x;
double y = pointInitial.z + lambda * vectorDirector.z;
return glm::vec3(x, y, 0.0f);
}
I use an ArcBall camera to rotate the world around specified axis, so that is how I generate the MVP matrixes:
this->position = glm::vec3(0.0f, 10.0f, 5.0f);
this->up = glm::vec3(0.0f, 1.0f, 0.0f);
this->lookAt = glm::vec3(0.0f, 0.0f, 0.0f);
this->fieldView = 99.0f;
this->farDistance = 100.0f;
this->nearDistance = 0.1f;
this->modelMatrix = glm::mat4(1.0f);
this->viewMatrix = glm::lookAt(this->position, this->lookAt, this->up) * glm::rotate(glm::degrees(this->rotationAngle) * this->dragSpeed, this->rotationAxis);
this->projectionMatrix = glm::perspective(glm::radians(this->fieldView), 1.0f, this->nearDistance, this->farDistance);
But something is going wrong because I'm not getting the right results. Look this print of the application:
each square is 1 unit, the cube is rendered at position (0, 0, 0). With rotationAngle = 0 when a put the cursor at (0,0), (1,1), (2,2), (3,3), (4,4), (5,5) I get (0, 5.7), (0.8, 6.4), (1.6, 6.9), (2.4, 7.6), (3.2, 8.2), (4.2, 8.8) respectivally. That's not expected.
Why y is delayed by 6 units?
It's necessary rotate the result cursorCoordinatesToWorldCoordinates based on rotationAngle isn't?
--
That I already did:
Checked if the viewport match with glViewport - OK
Checked the opengl coordinates (Y is up, not Z) - OK
You want to intersect the ray from glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 0.0) to glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 1.0) with the grid in world space, rather than model space (of the cuboid).
You've to skip this.modelMatrix:
glm::vec3 pointInitial = glm::unProject(
glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 0.0),
this->viewMatrix,
this->projectionMatrix,
this->viewPort);
glm::vec3 pointFinal = glm::unProject(
glm::vec3(this->cursorCoordinate.x, this->cursorCoordinate.y, 1.0),
this->viewMatrix,
this->projectionMatrix,
this->viewPort);
In any case this->modelMatrix * this->viewMatrix is incorrect. If you eant to intersect the ray with an object in model space, then it has to be this->viewMatrix * this->modelMatrix. Matrix multiplication is not Commutative.
I have been searching around for a simple solution, but I have not found anything. Currently I am loading a texture from a file and rendering it into the buffer using C++ 2012 Express DirectX9. But what I want to do is be able to copy parts of the buffer, and use the part that is copied as the texture, instead of the loaded texture.
I want to be able to copy/select like a map-editor would do.
EDIT: Problem Solves :) It was just dumb mistakes.
You can use the StretchRect function (see documentation).
You should copy a subset of the source buffer into the whole destination buffer (which is the new texture's buffer in your case). Something like this:
LPDIRECT3DTEXTURE9 pTexSrc, // source texture
pTexDst; // new texture (a subset of the source texture)
// create the textures
// ...
LPDIRECT3DSURFACE9 pSrc, pDst;
pTexSrc->GetSurfaceLevel(0, &pSrc);
pTexDst->GetSurfaceLevel(0, &pDst);
RECT rect; // (x0, y0, x1, y1) - coordinates of the subset to copy
rect.left = x0;
rect.right = x1;
rect.top = y0;
rect.bottom = y1;
pd3dDevice->StretchRect(pSrc, &rect, pDst, NULL, D3DTEXF_NONE);
// the last parameter could be also D3DTEXF_POINT or D3DTEXF_LINEAR
pSrc->Release();
pDst->Release(); // remember to release the surfaces when done !!!
EDIT:
OK, I've just got through the tones of your code and I think the best solution would be to use uv coordinates instead of copying subsets of the palette texture. You should calculate the appropriate uv coordinates for a given tile in game_class:: game_gui_add_current_graphic and use them in the CUSTOMVERTEX structure:
float width; // the width of the palette texture
float height; // the height of the palette texture
float tex_x, tex_y; // the coordinates of the upper left corner
// of the palette texture's subset to use for
// the current tile texturing
float tex_w, tex_h; // the width and height of the above mentioned subset
float u0, u1, v0, v1;
u0 = tex_x / width;
v0 = tex_y / height;
u1 = u0 + tex_w / width;
v1 = v0 + tex_h / height;
// create the vertices using the CUSTOMVERTEX struct
CUSTOMVERTEX vertices[] = {
{ 0.0f, 32.0f, 1.0f, u0, v1, D3DCOLOR_XRGB(255, 0, 0), },
{ 0.0f, 0.0f, 1.0f, u0, v0, D3DCOLOR_XRGB(255, 0, 0), },
{ 32.0f, 32.0f, 1.0f, u1, v1, D3DCOLOR_XRGB(0, 0, 255), },
{ 32.0f, 0.0f, 1.0f, u1, v0, D3DCOLOR_XRGB(0, 255, 0), } };
Example: Your palette consists of 3 rows and 4 columns with the 12 possible cell textures. Each texture is 32 x 32. So tex_w = tex_h = 32;, width = 4 * tex_w; and height = 3 * tex_h;. Suppose you want to calculate uv coordinates for a tile which should be textured with the image in the second row and the third column of the palette. Then tex_x = (3-1)*tex_w; and tex_y = (2-1)*tex_h;. Finally, you calculate the UVs as in the code above (in this example you'll get {u0,v0,u1,v1} = {(3-1)/4, (2-1)/3, 3/4, 2/3} = {0.5, 0.33, 0.75, 0.66}).
For a simple 2d game I'm making I'm trying to rotate sprites around the z axis using matrices. I'm clearly doing something wrong as when I attempt to rotate my sprite it looks like it's being rotated around the screen origin (bottom, left) and not the sprite origin. I'm confused as my quad is at the origin already so I didn't think I need to translate -> rotate and translate back. Here's a code snippet and a small video or the erroneous transformation
void MatrixMultiply(
MATRIX &mOut,
const MATRIX &mA,
const MATRIX &mB);
/*!***************************************************************************
#Function TransTransformArray
#Output pTransformedVertex Destination for transformed vectors
#Input pV Input vector array
#Input nNumberOfVertices Number of vectors to transform
#Input pMatrix Matrix to transform the vectors of input vector (e.g. use 1 for position, 0 for normal)
#Description Transform all vertices in pVertex by pMatrix and store them in
pTransformedVertex
- pTransformedVertex is the pointer that will receive transformed vertices.
- pVertex is the pointer to untransformed object vertices.
- nNumberOfVertices is the number of vertices of the object.
- pMatrix is the matrix used to transform the object.
*****************************************************************************/
void TransTransformArray(
VECTOR3 * const pTransformedVertex,
const VECTOR3 * const pV,
const int nNumberOfVertices,
const MATRIX * const pMatrix);
RenderQuad CreateRenderQuad(
const Texture2D & texture,
float x,
float y,
float scaleX,
float scaleY,
float rotateRadians,
int zIndex,
const Color & color,
const Quad2 & textureCoord,
const char * name
) {
MATRIX mT;
MATRIX mS;
MATRIX concat;
MATRIX mR;
MatrixTranslation(mT, x, y, 0.0f);
MatrixRotationZ(mR, rotateRadians);
MatrixScaling(mS, scaleX, scaleY, 1.0f);
VECTOR3 quad[] = {
{-0.5f, 0.5f, 0.f}, //tl
{0.5f, 0.5f, 0.f}, //tr
{-0.5, -0.5f, 0.0f}, //bl
{0.5f, -0.5f, 0.0f}, //br
};
MatrixMultiply(concat, mR, mT);
MatrixMultiply(concat, concat, mS);
// apply to all the points in the quad
TransTransformArray(quad, quad, 4, &concat);
== Update:
here's the structs and render code:
I'm using the matrix class from the oolongengine code.google.com/p/oolongengine/source/browse/trunk/Oolong%20Engine2/Math/Matrix.cpp
I transform all the quads then later render them using OpenGL. Here are my data structs and render code:
typedef struct _RenderData {
VECTOR3 vertex;
RenderColor3D color;
RenderTextureCoord textureCoord;
float zIndex;
GLuint textureId;
} RenderData;
typedef struct _RenderQuad {
//! top left
RenderData tl;
//! top right
RenderData tr;
//! bottom left
RenderData bl;
//! bottom right
RenderData br;
float zIndex;
Texture2D * texture; // render quad draws a source rect from here
ESpriteBlendMode blendMode;
} RenderQuad ;
/// Draw
class QuadBatch {
GLushort * m_indices;
const Texture2D * m_texture;
GLuint m_vbos[2];
RenderData * m_vertices;
};
QuadBatch::Draw () {
int offset = (int)&m_vertices[startIndex];
// vertex
int diff = offsetof( RenderData, vertex);
glVertexPointer(3, GL_FLOAT, kRenderDataSize, (void*) (offset + diff) );
// color
diff = offsetof( RenderData, color);
glColorPointer(4, GL_FLOAT, kRenderDataSize, (void*)(offset + diff));
// tex coords
diff = offsetof( RenderData, textureCoord);
glTexCoordPointer(2, GL_FLOAT, kRenderDataSize, (void*)(offset + diff));
// each quad has 6 indices
glDrawElements(GL_TRIANGLES, vertexCount * elementMultiplier, GL_UNSIGNED_SHORT, m_indices);
'Rotation', by definition, is around the origin (0,0,0). If you want a different axis of rotation, you have to apply a Translation component. Say you want to apply a rotation R around an axis a. The transformation to apply to an arbitrary vector x is:
x --> a + R(x - a) = Rx + (a - Ra)
(This might take some staring to digest). So, after applying your rotation - which, as you observed, rotates around the origin - you have to add the constant vector (a - Ra).
[Edit:] This answer is language and platform agnostic - the math is the same wherever you look. Specific libraries contain different structures and API to apply transformations. Both DirectX and OpenGL, for example, maintain 4x4 matrix transforms, to unify rotations and translations into a single matrix multiplication (via an apparatus called homogeneous coordinates).