C++ header array initialization in constructor - c++

I am working with OpenGL and I am trying to make a simple ground class that draws a rectangle.
I have a class called Vertex:
class Vertex
{
public:
Vertex(
glm::vec3 _position,
glm::vec3 _color = glm::vec3(0.0, 0.0, 0.0),
glm::vec3 _normal = glm::vec3(0.0, 1.0, 0.0),
glm::vec3 _texture = glm::vec3(0.0, 0.0, 0.0));
void setNormal(glm::vec3 _normal);
void setTexture(glm::vec3 _texture);
virtual ~Vertex();
protected:
private:
glm::vec3 position;
glm::vec3 color;
glm::vec3 normal;
glm::vec3 texture;
};
And this is my Ground class:
class Ground
{
private:
double widht;
double length;
double y;
int* indexes;
Vertex* vertices;
public:
Ground(double _width, double_length, double y);
}
And here is what I want to do in the Ground constructor:
this->indexes = {0, 3, 1, 1, 3, 2};
this->vertices = {
Vertex(glm::vec3(0 - width/2, y, 0-length/2)),
Vertex(glm::vec3(0 - width/2, y, 0+length/2)),
Vertex(glm::vec3(0 + width/2, y, 0+length/2)),
Vertex(glm::vec3(0 + width/2, y, 0-length/2))
}
I am getting this error when compiling:
error: cannot convert ‘<brace-enclosed initializer list>’ to ‘Vertex*’
Now I know I could hard-code initialize all of this, but I want a solution for a general ClassX* array header definition and constructor initialization.

vertices is a Vertex pointer, rather than an array. You can either make the member variable an array of fixed size or create an array of type Vertex on the heap.
for the first way something like this:
const int NUM_VERTS = 4;
class Ground{
.
.
.
Vertex vertices[NUM_VERTS]
.
.
.
};
Or if you want several different Ground objects with varying number of vertices you can use templates, but this will cause a big code bloat and executable size. Not a good idea, but it is possible
template<size n>
class Ground{
.
.
.
Vertex vertices[n];
.
.
.
}
otherwise, of course
vertices = new Vertex[4];
vertices[0] = Vertex(glm::vec3(0 - width/2, y, 0-length/2));
//etc
.
.
.

Change indexes (indices) and vertices to be std::vector<int> and std::vector<Vertex> respectively. Then you can write:
Ground::Ground(double _width, double _length, double y)
: indexes{0, 3, 1, 1, 3, 2}
, vertices{
Vertex(glm::vec3(0 - width/2, y, 0-length/2)),
Vertex(glm::vec3(0 - width/2, y, 0+length/2)),
Vertex(glm::vec3(0 + width/2, y, 0+length/2)),
Vertex(glm::vec3(0 + width/2, y, 0-length/2))
}
{
}

Related

How to use freetypegl with {0, w, h, 0} ortho projection?

freetypegl is written to work with {0, w, 0, h) projection, almost all demos included to this library work with that projection. I prefere {0, w, h, 0} and I do not know how to change the code to be able to work with that ortho. I tried to reverse uvs, but then text is aligned to upper bound and it does not look good.
How can I change the code to work with {0, w, h, 0} projection?
void AddGlyph(Vector2& position, const char c, uint color, texture_font_t* ftFont)
{
texture_glyph_t* glyph = texture_font_get_glyph(ftFont, &c);
if (c) {
if (string) {
position.x += texture_glyph_get_kerning(glyph, &c- 1) * scale;
}
//One glyph is a quad and quad has 4 vertices and 6 elements,
//it does not impact the case
MapBuffer(4, 6);
float x0 = position.x + static_cast<float>(glyph->offset_x),
y0 = position.y + static_cast<float>(glyph->offset_y),
x1 = x0 + static_cast<float>(glyph->width),
y1 = y0 - static_cast<float>(glyph->height),
u0 = glyph->s0,
v0 = glyph->t0,
u1 = glyph->s1,
v1 = glyph->t1;
//vertex struct is position, uv, color
AddVertexData(Vector2(x0, y0), Vector2(u0, v0), color);
AddVertexData(Vector2(x0, y1), Vector2(u0, v1), color);
AddVertexData(Vector2(x1, y1), Vector2(u1, v1), color);
AddVertexData(Vector2(x1, y0), Vector2(u1, v0), color);
//default rectangle elements dependence
AddRectElements();
position.x += glyph->advance_x * scale;
}
}
void DrawString(const std::string& text, Vector2& position, uint color, const Font& font)
{
using namespace ftgl;
texture_font_t* ftFont = font.getFTFont();
for (const auto& c : text) {
AddGlyph(position, c, color, ftFont);
}
}
Maybe I should change library code? If so, what should I change?

Finding backup winZ for glm::unProject

I have an OpenGL program that basically just renders a bunch of points. I need to find the world-space coordinates from an arbitrary cursor position, which I do like this:
glm::mat4 modelview = graphics.view*graphics.model;
glm::vec4 viewport = { 0.0, 0.0, windowWidth, windowHeight };
float winX = cursorX;
float winY = viewport[3] - cursorY;
float winZ;
glReadPixels(winX, winY, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT, &winZ);
glm::vec3 screenCoords = { winX, winY, winZ };
glm::vec3 cursorPosition = glm::unProject(screenCoords, modelview, graphics.projection, viewport);
This works fine if the cursor is on an object, but if it's on an empty part of the screen (i.e. most of it), it's assumed to be on the far clipping plane (winZ = 1), and glm::unProject returns inf values. Ideally, I'd like to pass it a different winZ corresponding to the xy plane at z=0 in world space coordinates, but I can't figure out how to get that value.
As there is a glm::unProject function, the is a glm::project function too.
If you want to know the depth of a point, which is located on parallel plane to the view space, with a z-coordinate of 0, then you have to glm::project a point on this plane. It can be any point on the plane, because you are only interested in its z-coordinate. You can use the origin of the world for this:
glm::vec3 world_origin{ 0.0f, 0.0f, 0.0f };
glm::vec3 origin_ndc = glm::project(screenCoords, view, graphics.projection, viewport);
float depth0 = world_origin[2];
where view is the view matrix, which transforms from world space to camera space (got from glm::lookAt).
So the implementation may look like this:
glm::mat4 modelview = graphics.view * graphics.model;
glm::vec4 viewport = { 0.0, 0.0, windowWidth, windowHeight };
float winX = cursorX;
float winY = viewport[3] - cursorY;
float depth;
glReadPixels(winX, winY- cursorY, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT, &depth);
const float epsi = 0.00001f;
if ( depth > 1.0f-epsi )
{
glm::vec3 world_origin{ 0.0f, 0.0f, 0.0f };
glm::vec3 origin_ndc = glm::project(world_origin, graphics.view, graphics.projection, viewport);
depth = origin_ndc[2];
}
glm::vec3 screenCoords{ winX, winY, depth };
glm::vec3 cursorPosition = glm::unProject(screenCoords, modelview, graphics.projection, viewport);

How to ripple on a sphere

I'm trying to implement a program that turns a cube into a sphere based on key presses, and ripples whenever it's clicked. I managed to implement the cube-to-sphere-and-back part, but I have completely no idea where to start on the rippling. I've looked at tons of sources online, I get the math, but I have no idea how to implement it on my vertex shader. Can anyone help me with my dilemma? Thank you!
Here's my cpp, vsh, and fsh: https://drive.google.com/file/d/0B4hkcF9foOTgbUozMjZmSHJhQWM/view?usp=sharing
I'm using GLSL, OpenGL 4.4.0
Here's my code for the vertex shader:
#version 120
attribute vec3 pos;
varying vec4 out_color;
uniform float t;
float PI = 3.14159265357;
int factor = 2; //for determining colors
int num_colors; // = factor * 3 (because RGB)
float currang = 0;
float angfac;
vec4 calculate( float a )
{
//this is just to calculate for the color
}
void main() {
num_colors = factor*3;
angfac = 2*PI/num_colors;
float ang = atan( pos.z, pos.x )+PI;
out_color = calculate(ang);
//rotation
mat3 rotateX = mat3(
vec3( 1, 0, 0),
vec3( 0, cos(t), sin(t)),
vec3( 0, -sin(t), cos(t))
);
mat3 rotateY = mat3(
vec3( cos(t), 0, -sin(t)),
vec3( 0, 1, 0),
vec3( sin(t), 0, cos(t))
);
mat3 rotateZ = mat3(
vec3( cos(t), sin(t), 0),
vec3(-sin(t), cos(t), 0),
vec3( 0, 0, cos(t))
);
gl_Position = gl_ModelViewProjectionMatrix * vec4((pos.xyz*rotateY*rotateX) , 1.0 );
}
and here's parts of my cpp file:
//usual include statements
using namespace std;
enum { ATTRIB_POS };
GLuint mainProgram = 0;
// I use this to indicate the position of the vertices
struct Vtx {
GLfloat x, y, z;
};
const GLfloat PI = 3.14159265357;
const int sideLength = 10;
const size_t nVertices = (sideLength*sideLength*sideLength)-((sideLength-2)*(sideLength-2)*(sideLength-2));
Vtx cube[nVertices];
Vtx sphere[nVertices];
Vtx diff[nVertices];
const double TIME_SPEED = 0.01;
int mI = 4*(sideLength-1);
const int sLCubed = sideLength*sideLength*sideLength;
int indices[nVertices*nVertices];
GLfloat originX = 0.0f; //offset
GLfloat originY = 0.0f; //offset
bool loadShaderSource(GLuint shader, const char *path) {...}
void checkShaderStatus(GLuint shader) {...}
bool initShader() {...}
//in this part of the code, I instantiate an array of indices to be used by glDrawElements()
void transform(int fac)
{
//move from cube to sphere and back by adding/subtracting values and updating cube[].xyz
//moveSpeed = diff[]/speedFac
//fac is to determine direction (going to sphere or going to cube; going to sphere is plus, going back to cube is minus)
for( int i = 0; i<nVertices; i++ )
{
cube[i].x += fac*diff[i].x;
cube[i].y += fac*diff[i].y;
cube[i].z += fac*diff[i].z;
}
}
void initCube() {...} //computation for the vertices of the cube depending on sideLength
void initSphere() {...} //computation for the vertices of the sphere based on the vertices of the cube
void toSphere() {...} //changes the values of the array of vertices of the cube to those of the sphere
void initDiff() {...} //computes for the difference of the values of the vertices of the sphere and the vertices of the cube for the slow transformation
int main() {
//error checking (GLEW, OpenGL versions, etc)
glfwSetWindowTitle("CS177 Final Project");
glfwEnable( GLFW_STICKY_KEYS );
glfwSwapInterval( 1 );
glClearColor(0,0,0,0);
if ( !initShader() ) {
return -1;
}
glEnableVertexAttribArray(ATTRIB_POS);
glVertexAttribPointer(ATTRIB_POS, 3, GL_FLOAT, GL_FALSE, sizeof(Vtx), cube);
initCube();
initIndices();
initSphere();
initDiff();
glUseProgram(mainProgram);
GLuint UNIF_T = glGetUniformLocation(mainProgram, "t");
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
float t = 0;
glUniform1f(UNIF_T, t);
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
glPointSize(2.0);
glEnable(GL_POINT_SMOOTH);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE);
glfwOpenWindowHint(GLFW_FSAA_SAMPLES,16);
glEnable(GL_MULTISAMPLE);
do {
int width, height;
glfwGetWindowSize( &width, &height );
glViewport( 0, 0, width, height );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
t += TIME_SPEED;
glUniform1f(UNIF_T, t);
if (glfwGetKey(GLFW_KEY_DEL)) transform(-1);
if (glfwGetKey(GLFW_KEY_INSERT)) transform( 1 );
if (glfwGetKey(GLFW_KEY_HOME)) initCube();
if (glfwGetKey(GLFW_KEY_END)) toSphere();
glDrawElements( GL_TRIANGLES, nVertices*nVertices, GL_UNSIGNED_INT, indices);
glfwSwapBuffers();
} while ( glfwGetKey(GLFW_KEY_ESC) != GLFW_PRESS &&
glfwGetWindowParam(GLFW_OPENED) );
glDeleteProgram(mainProgram);
glfwTerminate();
return 0;
}

Finding center of image for rotation in opengl

So I have this piece of code, which pretty much draws various 2D textures on the screen, though there are multiple sprites that have to be 'dissected' from the texture (spritesheet). The problem is that rotation is not working properly; while it rotates, it does not rotate on the center of the texture, which is what I am trying to do. I have narrowed it down to the translation being incorrect:
glTranslatef(x + sr->x/2 - sr->w/2,
y + sr->y/2 - sr->h/2,0);
glRotatef(ang,0,0,1.f);
glTranslatef(-x + -sr->x/2 - -sr->w/2,
-y + -sr->y/2 - -sr->h/2,0);
X and Y is the position that it's being drawn to, the sheet rect struct contains the position X and Y of the sprite being drawn from the texture, along with w and h, which are the width and heights of the 'sprite' from the texture. I've tried various other formulas, such as:
glTranslatef(x, y, 0);
The below three switching the negative sign to positive (x - y to x + y)
glTranslatef(sr->x/2 - sr->w/2, sr->y/2 - sr->h/2 0 );
glTranslatef(sr->x - sr->w/2, sr->y - sr->h/2, 0 );
glTranslatef(sr->x - sr->w, sr->y - sr->w, 0 );
glTranslatef(.5,.5,0);
It might also be helpful to say that:
glOrtho(0,screen_width,screen_height,0,-2,10);
is in use.
I've tried reading various tutorials, going through various forums, asking various people, but there doesn't seem to be a solution that works, nor can I find any useful resources that explain to me how I find the center of the image in order to translate it to '(0,0)'. I'm pretty new to OpenGL so a lot of this stuff takes awhile for me to digest.
Here's the entire function:
void Apply_Surface( float x, float y, Sheet_Container* source, Sheet_Rect* sr , float ang = 0, bool flipx = 0, bool flipy = 0, int e_x = -1, int e_y = -1 ) {
float imgwi,imghi;
glLoadIdentity();
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D,source->rt());
// rotation
imghi = source->rh();
imgwi = source->rw();
Sheet_Rect t_shtrct(0,0,imgwi,imghi);
if ( sr == NULL ) // in case a sheet rect is not provided, assume it's width
//and height of texture with 0/0 x/y
sr = &t_shtrct;
glPushMatrix();
//
int wid, hei;
glGetTexLevelParameteriv(GL_TEXTURE_2D,0,GL_TEXTURE_WIDTH,&wid);
glGetTexLevelParameteriv(GL_TEXTURE_2D,0,GL_TEXTURE_HEIGHT,&hei);
glTranslatef(-sr->x + -sr->w,
-sr->y + -sr->h,0);
glRotatef(ang,0,0,1.f);
glTranslatef(sr->x + sr->w,
sr->y + sr->h,0);
// Yeah, out-dated way of drawing to the screen but it works for now.
GLfloat tex[] = {
(sr->x+sr->w * flipx) /imgwi, 1 - (sr->y+sr->h *!flipy )/imghi,
(sr->x+sr->w * flipx) /imgwi, 1 - (sr->y+sr->h * flipy)/imghi,
(sr->x+sr->w * !flipx) /imgwi, 1 - (sr->y+sr->h * flipy)/imghi,
(sr->x+sr->w * !flipx) /imgwi, 1 - (sr->y+sr->h *!flipy)/imghi
};
GLfloat vertices[] = { // vertices to put on screen
x, (y + sr->h),
x, y,
(x +sr->w), y,
(x +sr->w),(y +sr->h)
};
// index array
GLubyte index[6] = { 0,1,2, 2,3,0 };
float fx = (x/(float)screen_width)-(float)sr->w/2/(float)imgwi;
float fy = (y/(float)screen_height)-(float)sr->h/2/(float)imghi;
// activate arrays
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// pass verteices and texture information
glVertexPointer(2, GL_FLOAT, 0, vertices);
glTexCoordPointer(2, GL_FLOAT, 0, tex);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_BYTE, index);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glPopMatrix();
glDisable(GL_TEXTURE_2D);
}
Sheet container class:
class Sheet_Container {
GLuint texture;
int width, height;
public:
Sheet_Container();
Sheet_Container(GLuint, int = -1,int = -1);
void Load(GLuint,int = -1,int = -1);
float rw();
float rh();
GLuint rt();
};
Sheet rect class:
struct Sheet_Rect {
float x, y, w, h;
Sheet_Rect();
Sheet_Rect(int xx,int yy,int ww,int hh);
};
Image loading function:
Sheet_Container Game_Info::Load_Image(const char* fil) {
ILuint t_id;
ilGenImages(1, &t_id);
ilBindImage(t_id);
ilLoadImage(const_cast<char*>(fil));
int width = ilGetInteger(IL_IMAGE_WIDTH), height = ilGetInteger(IL_IMAGE_HEIGHT);
return Sheet_Container(ilutGLLoadImage(const_cast<char*>(fil)),width,height);
}
Your quad (two triangles) is centered at:
( x + sr->w / 2, y + sr->h / 2 )
You need to move that point to the origin, rotate, and then move it back:
glTranslatef ( (x + sr->w / 2.0f), (y + sr->h / 2.0f), 0.0f); // 3rd
glRotatef (0,0,0,1.f); // 2nd
glTranslatef (-(x + sr->w / 2.0f), -(y + sr->h / 2.0f), 0.0f); // 1st
Here is where I think you are getting tripped up. People naturally assume that OpenGL applies transformations in the order they appear (top-to-bottom), that is not the case. OpenGL effectively swaps the operands everytime it multiplies two matrices:
M1 x M2 x M3
~~~~~~~
(1)
~~~~~~~~~~
(2)
(1) M2 * M1
(2) M3 * (M2 * M1) --> M3 * M2 * M1 (row-major / textbook math notation)
The technical term for this is post-multiplication, it all has to do with the way matrices are implemented in OpenGL (column-major). Suffice it to say, you should generally read glTranslatef, glRotatef, glScalef, etc. calls from bottom-to-top.
With that out of the way, your current rotation does not make any sense.
You are telling GL to rotate 0 degrees around an axis: <0,0,1> (the z-axis in other words). The axis is correct, but a 0 degree rotation is not going to do anything ;)

Broken line if using glTranslate

I'm trying to draw a line strip on an opengl project.
If I use the glTranslatef function to the transformation matrix, the magenta line strip is drawn broken as show in the figure:
And moving the view, the line strip is broken in different points, or drawn correctly, or not drawn at all.
If I translate manually the points, the line strip is always displayed correctly.
The other lines (red ones: GL_LINE_LOOP, cyan ones: GL_LINES) are manually translated and work properly.
Here is the code with glTranslate:
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glTranslatef( offs_x, offs_y, 0);
glLineWidth(2.0f);
glColor3f(1.0f, 0.0f, 1.0f);
glVertexPointer( 3, GL_FLOAT, 0, trailPoints );
glDrawArrays(GL_LINE_STRIP,0,numTrailPoints);
glPopMatrix();
and here the working code with manual translation:
for (i=0; i< numTrailPoints; i++)
{
translatedTrailPoints[i].x = trailPoints[i].x + offs_x;
translatedTrailPoints[i].y = trailPoints[i].y + offs_y;
translatedTrailPoints[i].z = trailPoints[i].z;
}
glLineWidth(2.0f);
glColor3f(1.0f, 0.0f, 1.0f);
glVertexPointer( 3, GL_FLOAT, 0, translatedTrailPoints);
glDrawArrays(GL_LINE_STRIP,0,numTrailPoints);
What I am missing here?
EDIT :
To complete the question, here are the data structures (in inverted declaration order for better readability):
vec3 translatedTrailPoints[C_MAX_NUM_OF_TRAIL_POINTS];
vec3 trailPoints[C_MAX_NUM_OF_TRAIL_POINTS];
typedef union
{
float array[3];
struct { float x,y,z; };
struct { float r,g,b; };
struct { float s,t,p; };
struct { vec2 xy; float zz; };
struct { vec2 rg; float bb; };
struct { vec2 st; float pp; };
struct { float xx; vec2 yz; };
struct { float rr; vec2 gb; };
struct { float ss; vec2 tp; };
struct { float theta, phi, radius; };
struct { float width, height, depth; };
struct { float longitude, latitude, altitude; };
struct { float pitch, yaw, roll; };
} vec3;
typedef union
{
float array[2];
struct { float x,y; };
struct { float s,t; };
} vec2;
I'd like to second datenwolf's suggestion, but with no success: I tried pragma pack(1 | 2 | 4) before vec2 and vec3 declaration, I tried compiling with /Zp1 | /Zp2 | /Zp4 (I'm under VisualStudio 2008) but the broken line/points still persists.
EDIT2 :
Same problems with textured quads:
vec3 point;
point.x = lon;
point.y = lat;
point.z = 500;
glTranslatef( offs_x, offs_y, 0);
glBindTexture(GL_TEXTURE_2D, iconTextures[0]);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex3f(point.x-C_ICON_WORLD_SIZE, point.y-C_ICON_WORLD_SIZE, point.z);
glTexCoord2f(1.0f, 0.0f); glVertex3f(point.x+C_ICON_WORLD_SIZE, point.y-C_ICON_WORLD_SIZE, point.z);
glTexCoord2f(1.0f, 1.0f); glVertex3f(point.x+C_ICON_WORLD_SIZE, point.y+C_ICON_WORLD_SIZE, point.z);
glTexCoord2f(0.0f, 1.0f); glVertex3f(point.x-C_ICON_WORLD_SIZE, point.y+C_ICON_WORLD_SIZE, point.z);
glEnd();
Results changing the view:
Correct drawn
Bad 1
Bad 2
EDIT3 :
I was able to correct the textured quads case by translating by (point.x + offs_x, point.y + offs_y, point.z) and removing the point coordinates in the glVertex definitions. The behaviour in the previous mode still puzzles me.
Try using glLoadIdentity() between the glPushMatrix() and glPopMatrix() calls since it resets the coordinate system and applies the translation for a fresh matrix.