I'm trying to texture a 3D pool table with two different textures, one for the table top and one for the legs and the walls. The table top is basically a 2D rectangle on which I applied some green texture and everything worked fine. On the other hand when I try to texture the cubes composing the table I run into some sampling or mapping problems.
This is my code:
create the vaos and map
// Cube vertex data
float cubeVertices[ CUBE_NUM_VERTICES * VALS_PER_VERT ] = {
-1.0, -1.0, 1.0,
1.0, -1.0, 1.0,
1.0, 1.0, 1.0,
-1.0, 1.0, 1.0,
-1.0, -1.0, -1.0,
1.0, -1.0, -1.0,
1.0, 1.0, -1.0,
-1.0, 1.0, -1.0 };
// 12 triangles - 2 per face of the cube
unsigned int legWallIndices[CUBE_NUM_TRIS*3] = {
0,1,2, 2,3,0,
1,5,6, 6,2,1,
5,4,7, 7,6,5,
4,0,3, 3,7,4,
3,2,6, 6,7,3,
4,5,1, 1,0,4
};
float tableTopVertices[4 * VALS_PER_VERT] = {
-2.5f, 0.26f, -5.0f,
2.5f, 0.26f, -5.0f,
2.5f, 0.26f, 5.0f,
-2.5f, 0.26f, 5.0f,
};
unsigned int tableTopIndices[2*3] = {
0,1,2, 2,3,0,
};
float tabletTopTexCoord[4 * VALS_PER_TEX] = {
0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f
};
float legWallTexCoord[8 * VALS_PER_TEX] = {
1.0f, 0.0f,
0.0f, 0.0f,
0.0f, 1.0f,
1.0f, 1.0f,
// not working properly idk why
1.0f, 0.0f,
0.0f, 0.0f,
0.0f, 1.0f,
1.0f, 1.0f,
};
vertex shader
in vec2 green_tex_coord;
in vec2 wood_tex_coord;
out vec2 st1;
out vec2 st2;
void main(void)
{
st1 = green_tex_coord;
st2 = wood_tex_coord;
}
fragment shader
in vec2 st1;
in vec2 st2;
uniform sampler2D greenTexMap;
uniform sampler2D woodTexMap;
void main(void)
{
vec4 topClr = vec4(0.04, 0.42, 0.1, 1.0);
vec4 sideClr = vec4(0.5, 0.35, 0.05, 1.0);
if (predicate){
fragColour = topClr * texture(greenTexMap, st1);
}else{
fragColour = sideClr * texture(woodTexMap, st2);
}
}
This is the result (the green texture is working but the wood one is not) table
If I remove * texture(woodTexMap, st2) the colour changes (it shows sideClr), this means the texture reaches the fragment but it is badly sampled or mapped. If I remove sideClr * the colour changes again showing a plain colour and not a texture (actually it looks like it samples the bottom left corner of my picture).
Am I mapping it the wrong way or is it a sampling problem?
Related
I am initializing an array
float verticesRect[] = {
// Positions // Normal Coords // Texture Coords
width, height, 0.0f, 0.0 , 0.0, 1.0 , 1.0f, 0.0f, // Top Right
width, -height, 0.0f, 0.0 , 0.0, 1.0 , 1.0f, 1.0f, // Bottom Right
-width, -height, 0.0f, 0.0 , 0.0, 1.0 , 0.0f, 1.0f, // Bottom Left
-width, height, 0.0f, 0.0 , 0.0, 1.0 , 0.0f, 0.0f // Top Left
};
after initializing i can again write values to the array using array index operator []
verticesRect[0] = 3;
but to change all the values i would have to go through all the indices , can i do something like this ?
verticesRect = {
// Positions // Normal Coords // Texture Coords
0.0, height, 0.0f, 0.0 , 0.0, 1.0 , 1.0f, 0.0f, // Top Right
0.0, -height, 0.0f, 0.0 , 0.0, 1.0 , 1.0f, 1.0f, // Bottom Right
-10.0, -height, 0.0f, 0.0 , 0.0, 1.0 , 0.0f, 1.0f, // Bottom Left
-10.0, height, 0.0f, 0.0 , 0.0, 1.0 , 0.0f, 0.0f // Top Left
};
You can use std::vector for this.
std::vector<float> verticesRect = {
// Positions // Normal Coords // Texture Coords
width, height, 0.0f, 0.0 , 0.0, 1.0 , 1.0f, 0.0f, // Top Right
width, -height, 0.0f, 0.0 , 0.0, 1.0 , 1.0f, 1.0f, // Bottom Right
-width, -height, 0.0f, 0.0 , 0.0, 1.0 , 0.0f, 1.0f, // Bottom Left
-width, height, 0.0f, 0.0 , 0.0, 1.0 , 0.0f, 0.0f // Top Left
};
verticesRect = {
// Positions // Normal Coords // Texture Coords
width, height, 0.0f, 0.0 , 0.0, 1.0 , 1.0f, 0.0f, // Top Right
width, -height, 0.0f, 0.0 , 0.0, 1.0 , 1.0f, 1.0f, // Bottom Right
-width, -height, 0.0f, 0.0 , 0.0, 1.0 , 0.0f, 1.0f, // Bottom Left
-width, height, 0.0f, 0.0 , 0.0, 1.0 , 0.0f, 0.0f // Top Left
};
In case of c++ you should 100% of the time use an std::vector or even better an std::array if you know the size at compile time. But if you want to stick to using c-style arrays instead, you can do this using pointers and dynamic allocation instead:
float* verticesRect = new float[32]{
// Positions // Normal Coords // Texture Coords
width, height, 0.0f, 0.0 , 0.0, 1.0 , 1.0f, 0.0f, // Top Right
width, -height, 0.0f, 0.0 , 0.0, 1.0 , 1.0f, 1.0f, // Bottom Right
-width, -height, 0.0f, 0.0 , 0.0, 1.0 , 0.0f, 1.0f, // Bottom Left
-width, height, 0.0f, 0.0 , 0.0, 1.0 , 0.0f, 0.0f // Top Left
};
delete verticesRect;
verticesRect = new float[32]{
// Positions // Normal Coords // Texture Coords
0.0, height, 0.0f, 0.0 , 0.0, 1.0 , 1.0f, 0.0f, // Top Right
0.0, -height, 0.0f, 0.0 , 0.0, 1.0 , 1.0f, 1.0f, // Bottom Right
-10.0, -height, 0.0f, 0.0 , 0.0, 1.0 , 0.0f, 1.0f, // Bottom Left
-10.0, height, 0.0f, 0.0 , 0.0, 1.0 , 0.0f, 0.0f // Top Left
};
I have a code (game) with a fixed camera in an ortho projection. It runs smoothly until I change the camera position from (0,0,1) to (0,0,-1).
In a nutshell, I have 2 textures:
{ //texture 1
960.0f, 0.0f, -5.0f, 0.0f, 0.0f,
960.0f, 1080.0f, -5.0f, 1.0f, 0.0f,
1920.0f, 0.0f, -5.0f, 0.0f, 1.0f,
1920.0f, 1080.0f, -5.0f, 1.0f, 1.0f
}
{ // texture 2
1290.0f, 390.0f, -7.0f, 0.0f, 0.0f,
1290.0f, 690.0f, -7.0f, 1.0f, 0.0f,
1590.0f, 390.0f, -7.0f, 0.0f, 1.0f,
1590.0f, 690.0f, -7.0f, 1.0f, 1.0f
}
the transformation matrices:
view = glm::lookAt
(
glm::vec3( 0.0f, 0.0f, 1.0f ),
glm::vec3( 0.0f, 0.0f, 0.0f ),
glm::vec3( 0.0f, 1.0f, 0.0f )
);
projection = glm::ortho
(
0.0f,
1920.0f,
0.0f,
1080.0f,
1.0f, // zNear
10.0f // zFar
);
the vertex shader:
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec2 aTexCoord;
out vec2 TexCoord;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4( aPos, 1.0 );
TexCoord = vec2( aTexCoord.x, aTexCoord.y );
}
If I run this code, it properly displays both textures, does depth testing,...
However, if I change the camera position to (0, 0, -1) and textures' Z-coordinate to their inverse +5 and +7, and keep the same direction (0, 0, 0), no texture is displayed (rendered). Shouldn't it display the same as before the changes ?
The issue is related to the orthographic projection matrix, because it is not centered. When the z axis of the view is inverted, then the x axis is inverted, too. Note the Right-hand rule has to be still fulfilled and the x.axis is the cross product of the y-axis and z-axis.
When the geometry is at z-5and the view and projection matrix is as follows
view = glm::lookAt(
glm::vec3(0.0f, 0.0f, 1.0f),
glm::vec3(0.0f, 0.0f, 0.0f),
glm::vec3(0.0f, 1.0f, 0.0f);
projection = glm::ortho(0.0f, 1920.0f, 0.0f, 1080.0f, 1.0f, 10.0f);
then the object is projected to the viewport:
But if you switch the z position of the geometry and the view, then you get the following situation:
view = glm::lookAt(
glm::vec3(0.0f, 0.0f, -1.0f),
glm::vec3(0.0f, 0.0f, 0.0f),
glm::vec3(0.0f, 1.0f, 0.0f);
then the object is beside the viewport:
Shift the the orthographic projection along the X-axis, to solve your issue:
projection = glm::ortho(-1920.0f, 0.0f, 0.0f, 1080.0f, 1.0f, 10.0f);
How can I change the following code so that it actually draws the triangle?
First is the shader, then the implementation of the glwiedget class which is derived from QOpenglWidget.
// shaders here
static const char* vertexShaderSource =
"#version 330 core\n"
"in vec3 posAttr;\n"
//"attribute lowp vec3 colAttr;\n"
//"varying lowp vec4 col;\n"
//"uniform highp mat4 matrix;\n"
"void main() {\n"
//" col = colAttr;\n"
" gl_Position = vec4(posAttr, 1) ;\n"
"}\n";
// fragment shader
static const char* fragmentShaderSource =
"#version 330 core\n"
//"varying lowp vec4 col;\n"
"void main() {\n"
"gl_FragColor = vec4(1.0, 0.0, 1.0, 0.0);\n"
"}\n";
Glwidget::Glwidget(QWidget* parent):QOpenGLWidget(parent)
{
//
}
Glwidget::~Glwidget()
{
cleanup();
}
void Glwidget::initializeGL()
{
connect(context(), &QOpenGLContext::aboutToBeDestroyed, this, &Glwidget::cleanup);
initializeOpenGLFunctions();
glClearColor(.0f, .0f, .0f, 1.0f);
shader = new QOpenGLShaderProgram(this);
shader->addShaderFromSourceCode(QOpenGLShader::Vertex, vertexShaderSource);
shader->addShaderFromSourceCode(QOpenGLShader::Fragment, fragmentShaderSource);
// posAttribute = shader->attributeLocation("posAttr");
//colAttribute = shader->attributeLocation("colAttr");
//matrixAttribute = shader->uniformLocation("matrix");
Q_ASSERT(shader->link());
Q_ASSERT(shader->bind());
//shader->release();
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
}
void Glwidget::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT);
makeCurrent();
matrix.perspective(60.0, 4.0f/3.0f, 0.1f, 10.0f);
matrix.translate(0, 0, -2);
matrix.rotate(100.0f, 0, 1, 0);
//shader->setUniformValue(matrixAttribute, matrix);
// shader->bind();
GLfloat vertices[] = {
0.0f, 0.707f, 1.0f,
-0.5f, -0.5f, 1.0f,
0.5f, -0.5f, 1.0f
};
shader->setAttributeArray(posAttribute,vertices, 3);
GLfloat colors[] = {
1.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 1.0f
};
glVertexAttribPointer(posAttribute, 3, GL_FLOAT, GL_FALSE, 0, vertices);
//glVertexAttribPointer(colAttribute, 3, GL_FLOAT, GL_FALSE, 0, colors);
glEnableVertexAttribArray(posAttribute);
//glEnableVertexAttribArray(colAttribute);
glDrawArrays(GL_TRIANGLES, 0, 1);
glDisableVertexAttribArray(posAttribute);
//glDisableVertexAttribArray(colAttribute);
//shader->release();
}
void Glwidget::resizeGL(int w, int h)
{
matrix.setToIdentity();
matrix.perspective(45.0f, w / float(h), 0.01f, 1000.0f);
//glViewport(0, 0, w, h);
}
void Glwidget::mousePressEvent(QMouseEvent *event)
{
Q_UNUSED(event);
}
void Glwidget::mouseMoveEvent(QMouseEvent *event)
{
Q_UNUSED(event);
}
void Glwidget::cleanup()
{
if (shader == nullptr)
return;
makeCurrent();
delete shader;
shader = 0;
doneCurrent();
}
You have to determine the attribute index of posAttr, after the program is linked:
Q_ASSERT(shader->link());
posAttribute = shader->attributeLocation("posAttr");
Since the depth test is enabled, you have to clear the depth buffer. See glClear:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
You do not use any model, view or projection matrix, so the coordinates have to be set in normalized device space. This means all the coordinates have to be in the range [-1.0, 1.0], especially the near plane and far plane of -1.0 and 1.0 have to be considered. By default the depth test function is GL_LESS, so you tringle is clipped by the far plane, because a z coordinate of 1.0 is not less than the far plane of 1.0. Use a z coordinate of 0.0, for the vertices (of course somthing like 0.99 would work, too):
GLfloat vertices[] = { 0.0f, 0.707f, 0.0f, -0.5f, -0.5f, 0.0f, 0.5f, -0.5f, 0.0f };
shader->setAttributeArray(posAttribute,vertices, 3);
The 3d paramter of glDrawArrays has to be the number of vertices and not the number of primitives:
glDrawArrays(GL_TRIANGLES, 0, 3);
The relevant code parts may look like this:
void Glwidget::initializeGL()
{
connect(context(), &QOpenGLContext::aboutToBeDestroyed, this, &Glwidget::cleanup);
initializeOpenGLFunctions();
glClearColor(.0f, .0f, .0f, 1.0f);
shader = new QOpenGLShaderProgram(this);
shader->addShaderFromSourceCode(QOpenGLShader::Vertex, vertexShaderSource);
shader->addShaderFromSourceCode(QOpenGLShader::Fragment, fragmentShaderSource);
Q_ASSERT(shader->link());
posAttribute = shader->attributeLocation("posAttr");
Q_ASSERT(shader->bind());
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
}
void Glwidget::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
GLfloat vertices[] = { 0.0f, 0.707f, 0.0f, -0.5f, -0.5f, 0.0f, 0.5f, -0.5f, 0.0f };
shader->setAttributeArray(posAttribute,vertices, 3);
glEnableVertexAttribArray(posAttribute);
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableVertexAttribArray(posAttribute);
}
Note, since Face Culling is enabled, you have to respect the winding order of the primitives (this is the case, in your code).
Furthermore, as mentioned by #derhass, in the comments, you have to change the fragment shader. You have to define an Output Variable.
(Side note, use Raw string literals):
static const char* fragmentShaderSource = R"(
#version 330 core
out vec4 fragColor;
void main() {
fragColor = vec4(1.0, 0.0, 1.0, 0.0);
}
)";
See the preview:
If you want to draw lines instead of polygons, then you can do this by GL_LINE_LOOP. See Line primitives.
glDrawArrays(GL_LINE_LOOP, 0, 3);
I want to render two triangles in blue and a "windmill" in red. To do this I have created 2 shaders, which are the same except for the color. The two triangles are a lot bigger than the "windmill". The problem I'm facing is that if I switch between shaders, ONLY the last object will be rendered. If I switch to using only 1 shader, both objects will be drawn, but I can barely see the "windmill" because of the same color. So my question is how to draw both objects with two shaders? (I know I can just pass a color to the fragment shader, but I don't want to do that).
Render loop:
GLint index, index2;
index = glGetUniformLocation(shaders[LINE], "projectionMatrix");
index2 = glGetUniformLocation(shaders[TRIANGLE], "projectionMatrix");
glUniformMatrix3fv(index, 1, true, value_ptr(projectionMatrix));
glUniformMatrix3fv(index2, 1, true, value_ptr(projectionMatrix));
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(shaders[TRIANGLE]);
glBindVertexArray(vaos[TRIANGLE]);
glDrawArrays(GL_TRIANGLES, 0, tbufindex/sizeof(glm::vec3));
glUseProgram(shaders[LINE]); // If I comment out this line both objects will be drawn
glBindVertexArray(vaos[LINE]);
glDrawArrays(GL_LINE_STRIP, 0, sizeof(vertices_position)/sizeof(glm::vec3));
Line/Triangle.vert:
#version 450
layout (location = 0) in vec3 vPosition;
uniform mat3 projectionMatrix;
void main()
{
vec3 tmp = projectionMatrix*vPosition;
gl_Position = vec4(tmp, 1.0f);
}
Line/triangle.frag:
#version 450
in vec4 gl_FragCoord;
out vec4 fColor;
void main()
{
fColor = vec4(0.0, 0.0, 1.0, 1.0);
}
Also note that I don't have GL_DEPTH_TEST enabled, I'm using 2D coordinates.
Edit positions:
triangles[2] = { { vec3(-0.90f, -0.90f, 1.0f), vec3(0.85f, -0.90f, 1.0f), vec3(-0.90f, 0.85f, 1.0f) },
{ vec3(0.90f, -0.85f, 1.0f), vec3(0.90f, 0.90f, 1.0f), vec3(-0.85f, 0.90f, 1.0f) } };
lines[39] = {
0.0f, 0.0f, 1.0f,
0.5f, 0.0f, 1.0f,
0.5f, 0.5f, 1.0f,
0.0f, 0.0f, 1.0f,
0.0f, 0.5f, 1.0f,
-0.5f, 0.5f, 1.0f,
0.0f, 0.0f, 1.0f,
-0.5f, 0.0f, 1.0f,
-0.5f, -0.5f, 1.0f,
0.0f, 0.0f, 1.0f,
0.0f, -0.5f, 1.0f,
0.5f, -0.5f, 1.0f,
0.0f, 0.0f, 1.0f
};
glUniform..() must be called after binding the program (source). So the following should work:
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(shaders[TRIANGLE]);
glUniformMatrix3fv(index2, 1, true, value_ptr(projectionMatrix));
glBindVertexArray(vaos[TRIANGLE]);
glDrawArrays(GL_TRIANGLES, 0, tbufindex/sizeof(glm::vec3));
glUseProgram(shaders[LINE]); // If I comment out this line both objects will be drawn
glUniformMatrix3fv(index, 1, true, value_ptr(projectionMatrix));
glBindVertexArray(vaos[LINE]);
glDrawArrays(GL_LINE_STRIP, 0, sizeof(vertices_position)/sizeof(glm::vec3));
Had some trouble with animation in my OpenGL project (window was non-responsive after a few seconds or when clicked, and animations were getting stuck and looping at seemingly random points), so I was advised to add an idle callback and then make my Display function only render the "current" frame each time it's called.
Although I don't know how to make my Display function only render the "current" frame.
Here's my Display function:
// GLUT display callback function
void Display(void)
{
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glLoadIdentity(); // transformations are represented by matrices
// for placing camera
gluLookAt(0,1,50,0,0,0,0,1,0);
// for mouse movement
glTranslatef(g_fTransX,g_fTransY,g_fZoom);
glRotatef (g_fSpinX,1.0f,0.0f,0.0f);
glRotatef (g_fSpinY,0.0f,1.0f,0.0f);
glLightfv(GL_LIGHT0,GL_POSITION,lpos);
// xyz axes
glDisable(GL_LIGHTING);
glLineWidth(2.0);
glBegin(GL_LINES);
glColor3f(1.0, 0.0, 0.0);
glVertex3f(0.0, 0.0, 0.0);
glVertex3f(20.0, 0.0, 0.0);
glColor3f(0.0, 1.0, 0.0);
glVertex3f(0.0, 0.0, 0.0);
glVertex3f(0.0, 20.0, 0.0);
glColor3f(0.0, 0.0, 1.0);
glVertex3f(0.0, 0.0, 0.0);
glVertex3f(0.0, 0.0, 20.0);
glEnd();
glEnable(GL_LIGHTING);
float Ambient_m[] = {0.0f,1.0f,0.0f,0.0f};
glMaterialfv(GL_FRONT,GL_AMBIENT,Ambient_m);
float Ambient_l[] = {0.2f,0.2f,0.2f,0.0f};
glLightfv(GL_LIGHT0,GL_AMBIENT,Ambient_l);
// implementing our custom cylinder function
//draw_cylinder(g_translate_x, g_translate_y, g_dof3_angle);
// our csg object with 4 DOF
GLUquadricObj * qobj = gluNewQuadric();
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
//draw_lamp(g_dof3_angle);
//glutSwapBuffers();
/*animate(time_from, time_to, dof1_from, dof1_to,
dof2_from, dof2_to, dof3_from, dof3_to,
dof4_from, dof4_to, dof5_from, dof5_to,
dof6_from, dof6_to, dof7_from, dof7_to)*/
animate_lamp(0.0f, 5.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f,
0.0f, 45.0f, 0.0f, -45.0f,
0.0f, 0.0f, 0.0f, 0.0f);
animate_lamp(5.0f, 7.0f, 0.0f, 5.0f,
0.0f, 5.0f, 0.0f, 0.0f,
45.0f, -45.0f, -45.0f, 45.0f,
0.0f, 0.0f, 0.0f, 0.0f);
animate_lamp(7.0f, 10.0f, 5.0f, 10.0f,
5.0f, 0.0f, 0.0f, 0.0f,
45.0f, -45.0f, -45.0f, 45.0f,
0.0f, 0.0f, 0.0f, 0.0f);
//glutSwapBuffers(); // swap back buffer to front buffer (which is drawn on screen)
}
It's simple - store the current state of the animation somewhere (since you're using C++, you can create an object for it), and then, in your render function, query the state from that object and render it.
Here's a simplified example:
class AnimationData
{
float pos;
public:
void step() { pos += 0.1f; }
void render() const
{
glBegin(GL_TRIANGLES);
glVertex3f(pos, 1.f, 1.f);
glVertex3f(pos, -1.f, 1.f);
glVertex3f(pos + 1.f, 0.f, 1.f);
glEnd(GL_TRIANGLES);
}
};
AnimationData myAnimation;
void Display()
{
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glLoadIdentity(); // transformations are represented by matrices
// for placing camera
gluLookAt(0,1,50,0,0,0,0,1,0);
// for mouse movement
glTranslatef(g_fTransX,g_fTransY,g_fZoom);
glRotatef (g_fSpinX,1.0f,0.0f,0.0f);
glRotatef (g_fSpinY,0.0f,1.0f,0.0f);
glLightfv(GL_LIGHT0,GL_POSITION,lpos);
// And any other setup
myAnimation.render();
glutSwapBuffers();
}
void timerCallbackFunction()
{
myAnimation.step();
glutPostRedisplay();
}
This will animate a triangle by moving it to the right a bit each time timerCallbackFunction() is called. The display function simply renders the triangle on its current position.