Incorrect right, up and forward after rotation - opengl

I am working on adding a matrix handler in my game using OpenGL, and so far most of it works, but I seem to not understand how to properly extract the right, up and forward vectors out of the rotation matrix. I've got the following code:
glm::mat4 clsMatrixHandler::SetRotation( float a_Pitch, float a_Yaw, float a_Roll )
{
glm::mat4 l_Rotx;
glm::mat4 l_Roty;
glm::mat4 l_Rotz;
PitchYawRollToXYZMatrices( a_Pitch, a_Yaw, a_Roll, l_Rotx, l_Roty, l_Rotz );
m_PitchYawRolls.clear( );
m_PitchYawRolls.push_back( glm::vec3( a_Pitch, a_Yaw, a_Roll ) );
m_RotationMatrix = l_Rotx * l_Roty * l_Rotz;
m_Right = glm::vec3( m_RotationMatrix[ 0 ][ 0 ], m_RotationMatrix[ 1 ][ 0 ], m_RotationMatrix[ 2 ][ 0 ] );
m_Up = glm::vec3( m_RotationMatrix[ 0 ][ 1 ], m_RotationMatrix[ 1 ][ 1 ], m_RotationMatrix[ 2 ][ 1 ] );
m_Forward = glm::vec3( m_RotationMatrix[ 0 ][ 2 ], m_RotationMatrix[ 1 ][ 2 ], m_RotationMatrix[ 2 ][ 2 ] );
return m_RotationMatrix;
}
void clsMatrixHandler::PitchYawRollToXYZMatrices( float a_Pitch, float a_Yaw, float a_Roll, glm::mat4& a_rX, glm::mat4& a_rY, glm::mat4& a_rZ )
{
float l_cPitch = glm::cos( glm::radians( a_Pitch ) );
float l_sPitch = glm::sin( glm::radians( a_Pitch ) );
float l_cYaw = glm::cos( glm::radians( a_Yaw ) );
float l_sYaw = glm::sin( glm::radians( a_Yaw ) );
float l_cRoll = glm::cos( glm::radians( a_Roll ) );
float l_sRoll = glm::sin( glm::radians( a_Roll ) );
a_rX = {
l_cPitch, -l_sPitch, 0.0f, 0.0f,
l_sPitch, l_cPitch, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
a_rY = {
l_cYaw, 0.0f, -l_sYaw, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
l_sYaw, 0.0f, l_cYaw, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
a_rZ = {
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, l_cRoll, -l_sRoll, 0.0f,
0.0f, l_sRoll, l_cRoll, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
}
void clsMatrixHandler::Test()
{
float l_Pitch = -90.0f;
float l_Yaw = 0.0f;
float l_Roll = 0.0f;
glm::mat4 l_RotationMatrix = m_MatrixHandler.SetRotation( l_Pitch, l_Yaw, l_Roll );
std::vector<glm::vec3> l_PYRs = m_MatrixHandler.GetPitchYawRolls( );
OutputDebugStringA( ( "Pitch: " + std::to_string( l_PYRs[ l_PYRs.size( ) - 1 ].x ) + "\n" +
"Yaw: " + std::to_string( l_PYRs[ l_PYRs.size( ) - 1 ].y ) + "\n" +
"Roll: " + std::to_string( l_PYRs[ l_PYRs.size( ) - 1 ].z ) + "\n" ).c_str( ) );
glm::vec4 l_Point = glm::vec4( 10, 0, 0, 0 );
glm::vec4 l_PointInLocalSpace = l_RotationMatrix * glm::vec4( l_Point.x, l_Point.y, l_Point.z, 0 );
OutputDebugStringA( ( "New Transformation Matrix: \n" +
std::to_string( l_RotationMatrix[ 0 ][ 0 ] ) + ", " + std::to_string( l_RotationMatrix[ 1 ][ 0 ] ) + ", " + std::to_string( l_RotationMatrix[ 2 ][ 0 ] ) + "\n" +
std::to_string( l_RotationMatrix[ 0 ][ 1 ] ) + ", " + std::to_string( l_RotationMatrix[ 1 ][ 1 ] ) + ", " + std::to_string( l_RotationMatrix[ 2 ][ 1 ] ) + "\n" +
std::to_string( l_RotationMatrix[ 0 ][ 2 ] ) + ", " + std::to_string( l_RotationMatrix[ 1 ][ 2 ] ) + ", " + std::to_string( l_RotationMatrix[ 2 ][ 2 ] ) + "\n"
).c_str( ) );
OutputDebugStringA( ( "New Point Position: \n" + std::to_string( l_PointInLocalSpace.x ) + ", " + std::to_string( l_PointInLocalSpace.y ) + ", " + std::to_string( l_PointInLocalSpace.z ) + "\n" ).c_str( ) );
}
Output:
Pitch: -90.000000
Yaw: 0.000000
Roll: 0.000000
New Transformation Matrix:
-0.000000, -1.000000, 0.000000
1.000000, -0.000000, 0.000000
0.000000, 0.000000, 1.000000
New Point Position:
-0.000000, 10.000000, 0.000000
As far as I know the first, second and third row in the matrix are the right, up and forward, so:
Right: 0.0, -1.0, 0.0
Up: 1.0, 0.0, 0.0
Forward: 0.0, 0.0, 1.0
Also, a -90 pitch should point the forward in (0.0, 1.0, 0.0) now.
How is it possible I just rolled and not pitched?
Why does multiplying the matrix with the point give me the correct result?

In your code a_rX calculates a rotation around the z-axis, and a_rZ a rotation around the x-axis.
To solve your issue, pitch has to be:
a_rX = {
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, l_cPitch, -l_sPitch, 0.0f,
0.0f, l_sPitch, l_cPitch, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
and roll has to be:
a_rZ = {
l_cRoll, -l_sRoll, 0.0f, 0.0f,
l_sRoll, l_cRoll, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};

Related

C++ OpenGL texture not rendering as it should

I've successfully got some triangles to show up on screen together with some textures and a couple of event listeners. However, my texture is not rendering properly, it seems like pixels between 0 and 255 in brightness gets disorted while completely black/white pixels are rendering as they should. <- This might not be the best description but I will provide an example below.
This is the texture I'm trying to map onto my two triangles at the moment:
Here is the result after compiling and running the program:
What I think is that it may have something to do with the shaders, at the moment I do not have any shaders activated, neither vertex nor fragment shaders. I don't know if this is the problem but it might be a clue to it all.
Below is my main.cpp:
#include <iostream>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/keysymdef.h>
#include <GL/glx.h>
#include <GL/gl.h>
// #include <GL/glut.h>
// #include <GL/glu.h>
#include <sys/time.h>
#include <unistd.h>
#define GLX_CONTEXT_MAJOR_VERSION_ARB 0x2091
#define GLX_CONTEXT_MINOR_VERSION_ARB 0x2092
typedef GLXContext (*GLXCREATECONTEXTATTRIBSARBPROC)(Display*, GLXFBConfig, GLXContext, Bool, const int*);
#define WINDOW_WIDTH 800
#define WINDOW_HEIGHT 800
#define FPS 30
uint8_t shutdown = 0;
#define SKIP_TICKS ( 1000 / FPS )
// macros
void Render( void );
void HandleEvents( XEvent ev );
void Resize( int w, int h );
void Shutdown( void );
void fill_triangle_buffer( void );
struct triangle {
float color[ 3 ];
float p1[ 3 ];
float p2[ 3 ];
float p3[ 3 ];
};
struct triangle triangles[ 12 ];
static double GetMilliseconds() {
static timeval s_tTimeVal;
gettimeofday(&s_tTimeVal, NULL);
double time = s_tTimeVal.tv_sec * 1000.0; // sec to ms
time += s_tTimeVal.tv_usec / 1000.0; // us to ms
return time;
}
GLuint loadBMP_custom( const char * imagepath ) {
// Data read from the header of the BMP file
unsigned char header[54]; // Each BMP file begins by a 54-bytes header
unsigned int dataPos; // Position in the file where the actual data begins
unsigned int width, height;
unsigned int imageSize; // = width*height*3
// Actual RGB data
unsigned char * data;
// Open the file
FILE * file = fopen(imagepath,"rb");
if (!file) {
printf("Image could not be opened\n");
return 0;
}
if ( fread(header, 1, 54, file)!=54 ){ // If not 54 bytes read : problem
printf("Not a correct BMP file\n");
return false;
}
if ( header[0]!='B' || header[1]!='M' ){
printf("Not a correct BMP file\n");
return 0;
}
// Read ints from the byte array
dataPos = *(int*)&(header[0x0A]);
imageSize = *(int*)&(header[0x22]);
width = *(int*)&(header[0x12]);
height = *(int*)&(header[0x16]);
// Some BMP files are misformatted, guess missing information
if (imageSize==0)
imageSize=width*height*3; // 3 : one byte for each Red, Green and Blue component
if (dataPos==0)
dataPos=54; // The BMP header is done that way
// Create a buffer
data = new unsigned char [imageSize];
// Read the actual data from the file into the buffer
fread(data,1,imageSize,file);
//Everything is in memory now, the file can be closed
fclose(file);
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// return GLuint
return textureID;
}
int main (int argc, char ** argv){
Display *dpy = XOpenDisplay(0);
Window win;
XEvent ev;
int nelements;
GLXFBConfig *fbc = glXChooseFBConfig(dpy, DefaultScreen(dpy), 0, &nelements);
static int attributeList[] = { GLX_RGBA, GLX_DOUBLEBUFFER, GLX_RED_SIZE, 1, GLX_GREEN_SIZE, 1, GLX_BLUE_SIZE, 1, None };
XVisualInfo *vi = glXChooseVisual(dpy, DefaultScreen(dpy),attributeList);
// Set Window attributes
XSetWindowAttributes swa;
swa.colormap = XCreateColormap(dpy, RootWindow(dpy, vi->screen), vi->visual, AllocNone);
swa.border_pixel = 0;
swa.event_mask = StructureNotifyMask;
win = XCreateWindow(dpy, RootWindow(dpy, vi->screen), 0, 0, WINDOW_WIDTH, WINDOW_HEIGHT, 0, vi->depth, InputOutput, vi->visual, CWBorderPixel|CWColormap|CWEventMask, &swa);
// Select window inputs to be triggered in the eventlistener
XSelectInput( dpy, win, PointerMotionMask | ButtonPressMask | ButtonReleaseMask | KeyPressMask );
XMapWindow (dpy, win);
//oldstyle context:
// GLXContext ctx = glXCreateContext(dpy, vi, 0, GL_TRUE);
std::cout << "glXCreateContextAttribsARB " << (void*) glXGetProcAddress((const GLubyte*)"glXCreateContextAttribsARB") << std::endl;
GLXCREATECONTEXTATTRIBSARBPROC glXCreateContextAttribsARB = (GLXCREATECONTEXTATTRIBSARBPROC) glXGetProcAddress((const GLubyte*)"glXCreateContextAttribsARB");
int attribs[] = {
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
GLX_CONTEXT_MINOR_VERSION_ARB, 0,
0};
// Redirect Close
Atom atomWmDeleteWindow = XInternAtom(dpy, "WM_DELETE_WINDOW", False);
XSetWMProtocols(dpy, win, &atomWmDeleteWindow, 1);
// Create GLX OpenGL context
GLXContext ctx = glXCreateContextAttribsARB(dpy, *fbc, 0, true, attribs);
glXMakeCurrent( dpy, win, ctx );
glMatrixMode( GL_PROJECTION );
glEnable( GL_CULL_FACE );
glCullFace( GL_FRONT );
glFrustum( -1, 1, 1, -1, -1, 1 );
glClearColor (0, 0.5, 1, 1);
glClear (GL_COLOR_BUFFER_BIT);
glXSwapBuffers (dpy, win);
fill_triangle_buffer();
// Load and bind texture
glEnable(GL_TEXTURE_2D);
loadBMP_custom("textures/texture.bmp");
// prepare gameloop
double prevTime = GetMilliseconds();
double currentTime = GetMilliseconds();
double deltaTime = 0.0;
timeval time;
long sleepTime = 0;
gettimeofday(&time, NULL);
long nextGameTick = (time.tv_sec * 1000) + (time.tv_usec / 1000);
while ( shutdown != 1 ) {
// Get events if there is any
if (XPending(dpy) > 0) {
XNextEvent(dpy, &ev);
if (ev.type == Expose) {
XWindowAttributes attribs;
XGetWindowAttributes(dpy, win, &attribs);
Resize(attribs.width, attribs.height);
}
if (ev.type == ClientMessage) {
if (ev.xclient.data.l[0] == atomWmDeleteWindow) {
break;
}
}
else if (ev.type == DestroyNotify) {
break;
}
}
// Framelimit calculations, before heavy load
currentTime = GetMilliseconds();
deltaTime = double( currentTime - prevTime ) * 0.001;
prevTime = currentTime;
// Do work
HandleEvents( ev );
//Render();
glClear( GL_COLOR_BUFFER_BIT );
glBegin(GL_TRIANGLES);
glColor3f( 255, 255, 255 );
/*
glTexCoord2f( 0.0f, 1.0f );
glVertex3f( -0.5f, 0.5f, 0.0f );
glTexCoord2f( 1.0f, 1.0f );
glVertex3f( 0.5f, 0.5f, 0.0f );
glTexCoord2f( 0.0f, 0.0f );
glVertex3f( -0.5f, -0.5f, 0.0f );
glVertex3f( 0.5f, 0.5f, 0.0f );
glTexCoord2f( 1.0f, 0.0f );
glVertex3f( 0.5f, -0.5f, 0.0f );
glVertex3f( -0.5f, -0.5f, 0.0f );
*/
// first triangle, bottom left half
glTexCoord2f( 0.0f, 0.0f ); glVertex3f( -0.5f, -0.5f, 0 );
glTexCoord2f( 0.0f, 1.0f ); glVertex3f( -0.5f, 0.5f, 0 );
glTexCoord2f( 1.0f, 0.0f ); glVertex3f( 0.5f, -0.5f, 0 );
// second triangle, top right half
glTexCoord2f( 1.0f, 0.0f ); glVertex3f( 0.5f, -0.5f, 0 );
glTexCoord2f( 0.0f, 1.0f ); glVertex3f( -0.5f, 0.5f, 0 );
glTexCoord2f( 1.0f, 1.0f ); glVertex3f( 0.5f, 0.5f, 0 );
glEnd();
glFlush();
glXSwapBuffers( dpy, win );
// Limit Framerate
gettimeofday( &time, NULL );
nextGameTick += SKIP_TICKS;
sleepTime = nextGameTick - ( (time.tv_sec * 1000) + (time.tv_usec / 1000) );
usleep((unsigned int)(sleepTime/1000));
}
ctx = glXGetCurrentContext();
glXDestroyContext(dpy, ctx);
}
void fill_triangle_buffer( void ){
triangles[ 0 ] = {
{ 1.0f, 0.0f, 0.0f },
{ -0.5f, -0.5f, -0.5f },
{ 0.5f, 0.5f, -0.5f },
{ 0.5f, -0.5f, -0.5f } };
triangles[ 1 ] = {
{ 0.0f, 0.0f, 1.0f },
{ -0.5f, -0.5f, -0.5f },
{ -0.5f, 0.5f, -0.5f },
{ 0.5f, 0.5f, -0.5f } };
triangles[ 2 ] = {
{ 0.0f, 0.0f, 1.0f },
{ 0.5f, 0.5f, -0.5f },
{ 0.5f, -0.5f, 0.5f },
{ 0.5f, -0.5f, -0.5f } };
triangles[ 3 ] = {
{ 1.0f, 1.0f, 0.0f },
{ 0.5f, 0.5f, -0.5f },
{ 0.5f, 0.5f, 0.5f },
{ 0.5f, -0.5f, 0.5f } };
triangles[ 4 ] = {
{ 1.0f, 0.0f, 0.0f },
{ -0.5f, -0.5f, 0.5f },
{ 0.5f, -0.5f, 0.5f },
{ -0.5f, 0.5f, 0.5f } };
triangles[ 5 ] = {
{ 0.0f, 0.0f, 1.0f },
{ 0.5f, -0.5f, 0.5f },
{ 0.5f, 0.5f, 0.5f },
{ -0.5f, 0.5f, 0.5f } };
triangles[ 6 ] = {
{ 0.0f, 0.0f, 1.0f },
{ -0.5f, 0.5f, -0.5f },
{ -0.5f, -0.5f, 0.5f },
{ -0.5f, 0.5f, 0.5f } };
triangles[ 7 ] = {
{ 1.0f, 1.0f, 0.0f },
{ -0.5f, 0.5f, -0.5f },
{ -0.5f, -0.5f, -0.5f },
{ -0.5f, -0.5f, 0.5f } };
triangles[ 8 ] = {
{ 1.0f, 0.0f, 0.0f },
{ -0.5f, 0.5f, -0.5f },
{ -0.5f, 0.5f, 0.5f },
{ 0.5f, 0.5f, 0.5f } };
triangles[ 9 ] = {
{ 0.0f, 0.0f, 1.0f },
{ -0.5f, 0.5f, -0.5f },
{ 0.5f, 0.5f, 0.5f },
{ 0.5f, 0.5f, -0.5f } };
triangles[ 10 ] = {
{ 0.0f, 0.0f, 1.0f },
{ 0.5f, -0.5f, -0.5f },
{ 0.5f, -0.5f, 0.5f },
{ -0.5f, -0.5f, 0.5f } };
triangles[ 11 ] = {
{ 1.0f, 1.0f, 0.0f },
{ 0.5f, -0.5f, -0.5f },
{ -0.5f, -0.5f, 0.5f },
{ -0.5f, -0.5f, -0.5f } };
}
void Resize(int w, int h) {
glViewport(0, 0, w, h);
}
void Shutdown() {
shutdown = 1;
}
void Render( void ) {
glClearColor ( 1.0f, 1.0f, 1.0f, 1 );
glClear( GL_COLOR_BUFFER_BIT );
glBegin(GL_TRIANGLES);
/*glColor3f( 0.0f, 1.0f, 0.0f );
glVertex3f( test_cnt, 0.0f, 1.0f );
glVertex3f( 1.0f, 0.0f, 1.0f );
glVertex3f( 0.0f, -1.0f, 1.0f );
*/
int numTriangles = sizeof(triangles)/sizeof(triangles[0]);
for ( int i = 0; i < numTriangles; i++ ) {
glClear( GL_COLOR_BUFFER_BIT );
glBegin(GL_TRIANGLES);
struct triangle tr = triangles[ i ];
glColor3f( tr.color[ 0 ], tr.color[ 1 ], tr.color[ 2 ] );
glVertex3f( tr.p1[ 0 ], tr.p1[ 1 ], tr.p1[ 2 ] );
glVertex3f( tr.p2[ 0 ], tr.p2[ 1 ], tr.p2[ 2 ] );
glVertex3f( tr.p3[ 0 ], tr.p3[ 1 ], tr.p3[ 2 ] );
}
glEnd();
glFlush();
}
float dx, dy;
float prevx, prevy;
uint8_t prev_defined = 0;
uint8_t key_down = 0;
void HandleEvents( XEvent ev ) {
int x, y;
switch ( ev.type ) {
case ButtonPress:
if ( ev.xbutton.button == 1 ) {
std::cout << "Left mouse down \n";
// glRotatef( 0.01, 0.0, 0.0, 1.0 );
if ( key_down == 0 )
prev_defined = 0;
key_down = 1;
}
break;
case ButtonRelease:
if ( ev.xbutton.button == 1 ) {
std::cout << "Left mouse up \n";
key_down = 0;
}
break;
case KeyPress:
if ( ev.xkey.keycode == 9 ) { // ESC
Shutdown();
}
break;
case MotionNotify:
x = ev.xmotion.x;
y = ev.xmotion.y;
if ( key_down == 0 )
break;
if ( !prev_defined ) {
prevx = x;
prevy = y;
prev_defined = 1;
break;
}
dx = x - prevx;
dy = y - prevy;
prevx = x;
prevy = y;
glRotatef( -dy/10, 1.0f, 0.0f, 0.0f );
glRotatef( -dx/10, 0.0f, 1.0f, 0.0f );
//std::cout << "Mouse X:" << x << ", Y: " << y << "\n";
break;
}
}
I compile with:
g++ -g -Wall -o _build/main main.cpp -I/opt/x11/include -L/usr/x11/lib -lglfw -lGLEW -lGL -lX11
OS:
Linux kali 5.9.0-kali4-amd64 #1 SMP Debian 5.9.11-1kali1 (2020-12-01) x86_64 GNU/Linux
Do anyone know how to resolve this issue in order to completely show above texture on my triangles?

Why does glm give a wrong result when transformating a 3D vector?

glm::vec4 x = glm::vec4(0.0f, 0.0f, 1.0f, 0.0f);
glm::vec4 y = glm::vec4(0.0f, 1.0f, 0.0f, 0.0f);
glm::vec4 z = glm::vec4(-1.0f, 0.0f, 0.0f, 0.0f);
glm::vec4 t(0.0f, 0.0f, 0.0f, 1.0f);
glm::mat4 rot(x, y, z, t);
glm::vec4 test = rot * glm::vec4(10.0f, 0.0f, 0.0f, 1.0f);
The "test" varible should be (0.0f, 0.0f, -10.0f, 1.0f) but the returned result is (0.0f, 0.0f, 10.0f, 1.0f). Is this a bug of glm?
I suppose you are assuming that the four vectors x, y, z, and t are "row vectors", but it appears that they are "column vectors". Therefore, the matrix rot is most likely:
x y z t
__________
0 0 -1 0
0 1 0 0
1 0 0 0
0 0 0 1
Which when multiplied with column vector:
10
0
0
1
Gives column vector:
0
0
10
1
EDIT: Fixed inverted terminology
Wouldn't the actual result be (0, 0, 10, 2)? Essentially, matrix times a vector means a linear combination of matrix columns with coefficients from a vector. So, in your example
rot * glm::vec4(10.0f, 0.0f, 0.0f, 1.0f)
is equivalent to
10.0f * x + 0.0f * y + 0.0f * z + 1.0f * t

c++ opengl glm combine rotation

Let's say I want to rotate 30° on the x axis, then 60° to the y axis, how to do this with glm?
I tried this to combine them :
mat4 xRotation, yRotation, zRotation;
xRotation = rotate( mat4( 1.0f ), rotation.x, vec3( 1.0f, 0.0f, 0.0f ) );
yRotation = rotate( mat4( 1.0f ), rotation.y, vec3( 0.0f, 1.0f, 0.0f ) );
zRotation = rotate( mat4( 1.0f ), rotation.z, vec3( 0.0f, 0.0f, 1.0f ) );
return zRotation * yRotation * xRotation;
But the resulted rotation is weird.

What's the difference between vector and array for vertex buffers

I have the following code that renders a square beautifully:
static const VertexPositionColor cubeVertices[] =
{
{ XMFLOAT3( 1.0f, -1.0f, 0.0f ), XMFLOAT3( 1.0f, 0.0f, 0.0f ) },
{ XMFLOAT3( -1.0f, -1.0f, 0.0f ), XMFLOAT3( 1.0f, 0.0f, 0.0f ) },
{ XMFLOAT3( 1.0f, 1.0f, 0.0f ), XMFLOAT3( 1.0f, 0.0f, 0.0f ) },
{ XMFLOAT3( -1.0f, 1.0f, 0.0f ), XMFLOAT3( 1.0f, 0.0f, 0.0f ) },
};
D3D11_SUBRESOURCE_DATA vertexBufferData = { 0 };
vertexBufferData.pSysMem = &cubeVertices;
vertexBufferData.SysMemPitch = 0;
vertexBufferData.SysMemSlicePitch = 0;
CD3D11_BUFFER_DESC vertexBufferDesc( sizeof( cubeVertices ), D3D11_BIND_VERTEX_BUFFER );
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateBuffer(
&vertexBufferDesc,
&vertexBufferData,
&m_vertexBuffer[ a ]
)
);
Now, take this code... which is exact replica with exact same sizeof (96)... but this one uses a vector... why does it render nothing?
FormatCollada* colladaObj = new FormatCollada();
static const vector<VertexPositionColor> cubeVertices = colladaObj->Format( *geometryData->Collada->LibraryGeometries->Geometry[ a ] );
D3D11_SUBRESOURCE_DATA vertexBufferData = { 0 };
vertexBufferData.pSysMem = &cubeVertices;
vertexBufferData.SysMemPitch = 0;
vertexBufferData.SysMemSlicePitch = 0;
CD3D11_BUFFER_DESC vertexBufferDesc( sizeof( VertexPositionColor ) * cubeVertices.size(), D3D11_BIND_VERTEX_BUFFER );
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateBuffer(
&vertexBufferDesc,
&vertexBufferData,
&m_vertexBuffer[ a ]
)
);
&cubeVertices does not give you the address of the first element of the vector, it gives you the address of the vector object itself. You probably wanted cubeVertices.data().

how to convert world coordinates to screen coordinates

I am creating a game that will have 2d pictures inside a 3d world.
I originally started off by not caring about my images been stretched to a square while I learnt more about how game mechanics work... but it's now time to get my textures to display in the correct ratio and... size.
Just a side note, I have played with orthographic left hand projections but I noticed that you cannot do 3d in that... (I guess that makes sense... but I could be wrong, I tried it and when I rotated my image, it went all stretchy and weirdosss).
the nature of my game is as follows:
In the image it says -1.0 to 1.0... i'm not fussed if the coordinates are:
topleft = 0,0,0
bottom right = 1920, 1200, 0
But if that's the solution, then whatever... (p.s the game is not currently set up so that -1.0 and 1.0 is left and right of screen. infact i'm not sure how i'm going to make the screen edges the boundaries (but that's a question for another day)
Question:
The issue I am having is that my image for my player (2d) is 128 x 64 pixels. After world matrix multiplication (I think that's what it is) the vertices I put in scale my texture hugely... which makes sense but it looks butt ugly and I don't want to just whack a massive scaling matrix into the mix because it'll be difficult to work out how to make the texture 1:1 to my screen pixels (although maybe you will tell me it's actually how you do it but you need to do a clever formula to work out what the scaling should be).
But basically, I want the vertices to hold a 1:1 pixel size of my image, unstretched...
So I assume I need to convert my world coords to screen coords before outputting my textures and vertices??? I'm not sure how it works...
Anyways, here are my vertices.. you may notice what I've done:
struct VERTEX
{
float X, Y, Z;
//float R, G, B, A;
float NX, NY, NZ;
float U, V; // texture coordinates
};
const unsigned short SquareVertices::indices[ 6 ] = {
0, 1, 2, // side 1
2, 1, 3
};
const VERTEX SquareVertices::vertices[ 4 ] = {
//{ -1.0f, -1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f }, // side 1
//{ 1.0f, -1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 1.0f, 0.0f },
//{ -1.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 1.0f },
//{ 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 1.0f, 1.0f }
{ -64.0f, -32.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f }, // side 1
{ 64.0f, -32.0f, 0.0f, 0.0f, 0.0f, -1.0f, 1.0f, 0.0f },
{ -64.0f, 32.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 1.0f },
{ 64.0f, 64.0f, 0.0f, 0.0f, 0.0f, -1.0f, 1.0f, 1.0f }
};
(128 pixels / 2 = 64 ), ( 64 / 2 = 32 ) because the centre is 0.0... but what do I need to do to projections, world transdoobifications and what nots to get the worlds to screens?
My current setups look like this:
// called 1st
void Game::SetUpViewTransformations( )
{
XMVECTOR vecCamPosition = XMVectorSet( 0.0f, 0.0f, -20.0f, 0 );
XMVECTOR vecCamLookAt = XMVectorSet( 0, 0, 0, 0 );
XMVECTOR vecCamUp = XMVectorSet( 0, 1, 0, 0 );
matView = XMMatrixLookAtLH( vecCamPosition, vecCamLookAt, vecCamUp );
}
// called 2nd
void Game::SetUpMatProjection( )
{
matProjection = XMMatrixPerspectiveFovLH(
XMConvertToRadians( 45 ), // the field of view
windowWidth / windowHeight, // aspect ratio
1, // the near view-plane
100 ); // the far view-plan
}
and here is a sneaky look at my update and render methods:
// called 3rd
void Game::Update( )
{
world->Update();
worldRotation = XMMatrixRotationY( world->rotation );
player->Update( );
XMMATRIX matTranslate = XMMatrixTranslation( player->x, player->y, 0.0f );
//XMMATRIX matTranslate = XMMatrixTranslation( 0.0f, 0.0f, 1.0f );
matWorld[ 0 ] = matTranslate;
}
// called 4th
void Game::Render( )
{
// set our new render target object as the active render target
d3dDeviceContext->OMSetRenderTargets( 1, rendertarget.GetAddressOf( ), zbuffer.Get( ) );
// clear the back buffer to a deep blue
float color[ 4 ] = { 0.0f, 0.2f, 0.4f, 1.0f };
d3dDeviceContext->ClearRenderTargetView( rendertarget.Get( ), color );
d3dDeviceContext->ClearDepthStencilView( zbuffer.Get( ), D3D11_CLEAR_DEPTH, 1.0f, 0 ); // clear the depth buffer
CBUFFER cBuffer;
cBuffer.DiffuseVector = XMVectorSet( 0.0f, 0.0f, 1.0f, 0.0f );
cBuffer.DiffuseColor = XMVectorSet( 0.5f, 0.5f, 0.5f, 1.0f );
cBuffer.AmbientColor = XMVectorSet( 0.2f, 0.2f, 0.2f, 1.0f );
//cBuffer.Final = worldRotation * matWorld[ 0 ] * matView * matProjection;
cBuffer.Final = worldRotation * matWorld[ 0 ] * matView * matProjection;
cBuffer.Rotation = XMMatrixRotationY( world->rotation );
// calculate the view transformation
SetUpViewTransformations();
SetUpMatProjection( );
//matFinal[ 0 ] = matWorld[0] * matView * matProjection;
UINT stride = sizeof( VERTEX );
UINT offset = 0;
d3dDeviceContext->PSSetShaderResources( 0, 1, player->texture.GetAddressOf( ) ); // Set up texture
d3dDeviceContext->IASetVertexBuffers( 0, 1, player->vertexbuffer.GetAddressOf( ), &stride, &offset ); // Set up vertex buffer
d3dDeviceContext->IASetPrimitiveTopology( D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST ); // How the vertices be drawn
d3dDeviceContext->IASetIndexBuffer( player->indexbuffer.Get( ), DXGI_FORMAT_R16_UINT, 0 ); // Set up index buffer
d3dDeviceContext->UpdateSubresource( constantbuffer.Get( ), 0, 0, &cBuffer, 0, 0 ); // set the new values for the constant buffer
d3dDeviceContext->OMSetBlendState( blendstate.Get( ), 0, 0xffffffff ); // DONT FORGET IF YOU DISABLE THIS AND YOU WANT COLOUR, * BY Color.a!!!
d3dDeviceContext->DrawIndexed( ARRAYSIZE( player->indices ), 0, 0 ); // draw
swapchain->Present( 1, 0 );
}
Just to clarify, if I make my vertices use 2 and 1 respective of the fact my image is 128 x 64.. I get a normal looking size image.. and yet at 0,0,0 it's not at 1:1 size... wadduuuppp buddyyyy
{ -2.0f, -1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f }, // side 1
{ 2.0f, -1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 1.0f, 0.0f },
{ -2.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 1.0f },
{ 2.0f, 2.0f, 0.0f, 0.0f, 0.0f, -1.0f, 1.0f, 1.0f }
Desired outcome 2 teh max:
Cool picture isn't it :D ?
Comment help:
I'm not familliar with direct-x but as far as I can see the thing with your image that is screen coordinates are [-1...+1] on x and y. So total length on both axis equals 2 and your image is scaled times 2. Try consider this scale in camera matrix.