Related
I'm trying to get the depthmap of a shape using the depth buffer (and ortographic projection), but somehow the y-axis is flipped.
I use an OpenCV Mat to display the depth map.
Here is my code:
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
int main(void)
{
GLFWwindow *window;
int screenWidth = 450, screenHeight = 350;
GLfloat color[] =
{
255, 0, 0,
0, 255, 0,
0, 0, 255
};
GLfloat vertices[] =
{
-200.0f, -50.0f, -100.0f,
250.0f, 150.0f, 0.0f,
100.0f, 300.0f, 0.0f
};
if (!glfwInit())
{
return -1;
}
// Create windows
window = glfwCreateWindow(screenWidth, screenHeight, "Test", NULL, NULL);
cv::namedWindow("Depthmap", cv::WindowFlags::WINDOW_AUTOSIZE);
if (!window)
{
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
glEnable(GL_DEPTH_TEST);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-200.0f, 250.0f, -50.0f, 300.0f, 0, 100.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
while (true)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glPushMatrix();
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glColorPointer(3, GL_FLOAT, 0, color);
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
glPopMatrix();
//To opencv mat
cv::Mat depth = cv::Mat_<float>(screenHeight, screenWidth);
glReadPixels(0, 0, depth.cols, depth.rows, GL_DEPTH_COMPONENT, GL_FLOAT, depth.data);
cv::imshow("Depthmap", depth);
glfwSwapBuffers(window);
glfwPollEvents();
}
return 0;
}
And here is the result:
The actual depth seems to be correct (the red point is 1.0 and the green and blue points are 0.0) but the y-axis is inverted. Why is this and how can I fix it properly?
In the code below, I don't understand why some faces have their normals reversed.
The triangles looks ordered in the anti-clockwise direction, but some faces remain black.
When I modify the fragment shader with color = -vnormal; the two black faces are rendered correctly, but not the others, obviously.
Thanks for any help given
// minimalist but fonctional code using glew, glfw, glm
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <glm/vec3.hpp>
#include <glm/vec4.hpp>
#include <glm/mat4x4.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include "shaders.h"
GLuint myVAO;
void createCube() {
// v6----- v5
// /| /|
// v1------v0|
// | | | |
// | |v7---|-|v4
// |/ |/
// v2------v3
const GLfloat cube_vertices[] = {
1, 1, 1, -1, 1, 1, -1,-1, 1, // v0-v1-v2 (front)
-1,-1, 1, 1,-1, 1, 1, 1, 1, // v2-v3-v0
1, 1, 1, 1,-1, 1, 1,-1,-1, // v0-v3-v4 (right)
1,-1,-1, 1, 1,-1, 1, 1, 1, // v4-v5-v0
1, 1, 1, 1, 1,-1, -1, 1,-1, // v0-v5-v6 (top)
-1, 1,-1, -1, 1, 1, 1, 1, 1, // v6-v1-v0
-1, 1, 1, -1, 1,-1, -1,-1,-1, // v1-v6-v7 (left)
-1,-1,-1, -1,-1, 1, -1, 1, 1, // v7-v2-v1
-1,-1,-1, 1,-1,-1, 1,-1, 1, // v7-v4-v3 (bottom)
1,-1, 1, -1,-1, 1, -1,-1,-1, // v3-v2-v7
1,-1,-1, -1,-1,-1, -1, 1,-1, // v4-v7-v6 (back)
-1, 1,-1, 1, 1,-1, 1,-1,-1 }; // v6-v5-v4
// normal array
const GLfloat cube_normalsI[] = {
0, 0, 1, 0, 0, 1, 0, 0, 1, // v0-v1-v2 (front)
0, 0, 1, 0, 0, 1, 0, 0, 1, // v2-v3-v0
1, 0, 0, 1, 0, 0, 1, 0, 0, // v0-v3-v4 (right)
1, 0, 0, 1, 0, 0, 1, 0, 0, // v4-v5-v0
0, 1, 0, 0, 1, 0, 0, 1, 0, // v0-v5-v6 (top)
0, 1, 0, 0, 1, 0, 0, 1, 0, // v6-v1-v0
-1, 0, 0, -1, 0, 0, -1, 0, 0, // v1-v6-v7 (left)
-1, 0, 0, -1, 0, 0, -1, 0, 0, // v7-v2-v1
0,-1, 0, 0,-1, 0, 0,-1, 0, // v7-v4-v3 (bottom)
0,-1, 0, 0,-1, 0, 0,-1, 0, // v3-v2-v7
0, 0,-1, 0, 0,-1, 0, 0,-1, // v4-v7-v6 (back)
0, 0,-1, 0, 0,-1, 0, 0,-1 }; // v6-v5-v4
// Upload per-vertex positions
GLuint positionVBO = 0;
glGenBuffers(1, &positionVBO);
glBindBuffer(GL_ARRAY_BUFFER, positionVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(cube_vertices) * sizeof(GLfloat), cube_vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// Upload per-vertex normals
GLuint normalVBO = 0;
glGenBuffers(1, &normalVBO);
glBindBuffer(GL_ARRAY_BUFFER, normalVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(cube_normalsI) * sizeof(GLfloat), cube_normalsI, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// Hook up vertex/normals buffers to a "vertex array object" (VAO)
glGenVertexArrays(1, &myVAO);
glBindVertexArray(myVAO);
// Attach position buffer as attribute 0
glBindBuffer(GL_ARRAY_BUFFER, positionVBO);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 3, 0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// Attach normal buffer as attribute 1
glBindBuffer(GL_ARRAY_BUFFER, normalVBO);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 3, 0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
int main(int argc, char** argv) {
glfwInit();
GLFWwindow* window = glfwCreateWindow(768, 768, "", NULL, NULL);
glfwMakeContextCurrent(window);
glewInit();
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE); // same problem with glEnable(GL_FRONT_AND_BACK);
glClearColor(0.8f, 0.7f, 0.5f, 1.0f);
unsigned int program = shaders::CreateShader("simple.vert", "simple.frag");
createCube();
while (glfwWindowShouldClose(window) == GL_FALSE) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glm::mat4 model = glm::translate(glm::mat4(1.0f), glm::vec3(0.0, 0.0, -4.0));
glm::mat4 view = glm::lookAt(glm::vec3(-2.0, -2.0, 0.0), glm::vec3(0.0, 0.0, -4.0), glm::vec3(0.0, 1.0, 0.0));
glm::mat4 projection = glm::perspective(45.0f, 1.0f, 0.1f, 10.0f);
glm::mat4 mvp = projection * view * model;
glUseProgram(program);
GLuint imvp = glGetUniformLocation(program, "mvp");
glUniformMatrix4fv(imvp, 1, false, glm::value_ptr(mvp));
glBindVertexArray(myVAO);
glDrawArrays(GL_TRIANGLES, 0, 36);
glBindVertexArray(0);
glUseProgram(0);
glfwSwapBuffers(window);
}
}
The vertex shader:
#version 330 core
layout (location = 0) in vec3 in_position;
layout (location = 1) in vec3 in_normal;
uniform mat4 mvp;
out vec3 vnormal;
void main() {
vnormal = in_normal;
gl_Position = mvp * vec4(in_position,1);
}
The fragment shader:
#version 330 core
in vec3 vnormal;
out vec3 color;
void main() {
color= vnormal;
}
Output colors get clamped to the 0.0-1.0 range.
So your negative normals like -1, 0, 0 end up as RGB(0,0,0) in the color buffer.
I'm having a problem with my OpenGL application. When I don't have DEPTH_TEST enabled it draws fine (except for the wrong draw order), but if I enable it everything starts to flicker. OpenGL doesn't give any errors. I also didn't forget the glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);. I'm pretty clueless to what causes my problem. Any help would be appreciated.
main.cpp
#include <iostream>
#include <SDL2\SDL.h>
#include <GL\glew.h>
#include <glm\glm.hpp>
#include <glm\gtc\matrix_transform.hpp>
#include <fstream>
#include <string>
#include "vertexdata.h"
#define WIDTH 800
#define HEIGHT 600
std::string filetobuf(const char*);
int main(int, char**) {
// Init the window with an OpenGL context
SDL_Window *window;
SDL_Init(SDL_INIT_EVERYTHING);
window = SDL_CreateWindow("SDL window", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, WIDTH, HEIGHT, SDL_WINDOW_OPENGL);
SDL_GLContext context = SDL_GL_CreateContext(window);
glewInit();
// Create the shaders
std::string vertexsourceString = filetobuf("tutorial2.vert");
std::string fragmentsourceString = filetobuf("tutorial2.frag");
const char* vertexsource = vertexsourceString.c_str();
const char* fragmentsource = fragmentsourceString.c_str();
GLuint vertexshader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexshader, 1, (const GLchar**)&vertexsource, 0);
glCompileShader(vertexshader);
GLuint fragmentshader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragmentshader, 1, (const GLchar**)&fragmentsource, 0);
glCompileShader(fragmentshader);
GLuint shaderprogram = glCreateProgram();
glAttachShader(shaderprogram, vertexshader);
glAttachShader(shaderprogram, fragmentshader);
glBindAttribLocation(shaderprogram, 0, "in_Position");
glBindAttribLocation(shaderprogram, 1, "in_Color");
glLinkProgram(shaderprogram);
glUseProgram(shaderprogram);
// Setup rectangles
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
GLuint vbo[3];
glGenBuffers(3, vbo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbo[0]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, INDICES_SIZE, indices, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, vbo[1]);
glBufferData(GL_ARRAY_BUFFER, VERTEX_BUFFER_SIZE, vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vbo[2]);
glBufferData(GL_ARRAY_BUFFER, COLOR_BUFFER_SIZE, colors, GL_STATIC_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(1);
GLint projectionMatrixLocation = glGetUniformLocation(shaderprogram, "projectionMatrix");
glm::mat4 projectionMatrix = glm::perspective(70.0f, (float)WIDTH / (float)HEIGHT, 0.0f, 100.0f);
glUniformMatrix4fv(projectionMatrixLocation, 1, GL_FALSE, &projectionMatrix[0][0]);
GLint modelMatrixLocation = glGetUniformLocation(shaderprogram, "modelMatrix");
glClearColor(0, 0, 1, 1);
glEnable(GL_DEPTH_TEST);
bool running = true;
while (running) {
SDL_Event e;
while (SDL_PollEvent(&e)) {
if (e.type == SDL_QUIT) {
running = false;
}
}
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glm::mat4 modelMatrix(1);
modelMatrix = glm::translate(modelMatrix, glm::vec3(0, 0, -5));
modelMatrix = glm::rotate(modelMatrix, SDL_GetTicks() / 500.0f, glm::vec3(0, 1, 0));
glUniformMatrix4fv(modelMatrixLocation, 1, GL_FALSE, &modelMatrix[0][0]);
glDrawElements(GL_TRIANGLES, INDICES_SIZE, GL_UNSIGNED_BYTE, 0);
SDL_GL_SwapWindow(window);
}
SDL_GL_DeleteContext(context);
SDL_DestroyWindow(window);
SDL_Quit();
return 0;
}
std::string filetobuf(const char* path)
{
if (path == nullptr) {
std::cout << "No file specified!" << std::endl;
return 0;
}
std::ifstream file(path);
if (!file.is_open()) {
std::cout << "File not found! '" << path << "'" << std::endl;
return 0;
}
std::string source;
std::string line;
while (file.good()) {
std::getline(file, line);
source += line + "\n";
}
file.close();
return source;
}
vertexdata.h
#pragma once
#include <GL\GL.h>
#define INDICES_SIZE 12
#define COLOR_VECTOR_SIZE 3
#define VERTEX_VECTOR_SIZE 3
#define VERTEX_COUNT 8
#define COLOR_BUFFER_SIZE COLOR_VECTOR_SIZE * VERTEX_COUNT * sizeof(GLfloat)
#define VERTEX_BUFFER_SIZE VERTEX_VECTOR_SIZE * VERTEX_COUNT * sizeof(GLfloat)
static GLubyte indices[] = {
0, 1, 2,
2, 3, 0,
4, 5, 6,
6, 7, 4,
};
static GLfloat colors[] = {
1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
};
static GLfloat vertices[] = {
1, 1, 1,
1, -1, 1,
-1, -1, 1,
-1, 1, 1,
1, 1, -1,
1, -1, -1,
-1, -1, -1,
-1, 1, -1,
};
Note: I removed the error checking from the main.cpp file to decrease the file size.
You can't set near plane to zero. Put something reasonable, like 0.1f instead.
glm::mat4 projectionMatrix = glm::perspective(70.0f, (float)WIDTH / (float)HEIGHT, 0.1f, 100.0f);
I'm trying to move my code over to modern Opengl, but am having trouble. Right now my code will draw a cube and it will put a texture on, but it will only attach the first texture to all of my face. I am also using SOIL to load my textures into my program. What am I doing wrong?
This is my code:
class Rectangle
{
public:
Rectangle();
Rectangle(float x, float y, float z, float width, float height, float depth, string frontFace, string backFace, string leftFace,
string RightFace, string topFace, string bottomFace);
void Draw();
private:
GLuint m_Textures[6];
string m_TextureNames[6];
GLfloat m_Vertices[72]; // v0,v1,v2,v3 (front)
// normal array
GLfloat m_Normals[72]; // v0,v1,v2,v3 (front)
// color array
GLfloat m_Colours[72]; // v0,v1,v2,v3 (front)
// index array of vertex array for glDrawElements() & glDrawRangeElement()
GLubyte m_Indices[36]; // front
GLfloat m_Texcoords[48];
};
Rectangle::Rectangle(float x, float y, float z, float width, float height, float depth, string topFace, string bottomFace, string frontFace,
string backFace, string leftFace, string rightFace)
{
m_CenterX = x;
m_CenterY = y;
m_CenterZ = z;
m_Width = width;
m_Height = height;
m_Depth = depth;
m_TextureNames[0] = topFace;
m_TextureNames[1] = bottomFace;
m_TextureNames[2] = frontFace;
m_TextureNames[3] = backFace;
m_TextureNames[4] = leftFace;
m_TextureNames[5] = rightFace;
m_ObjectType = Textured;
GLfloat tempVert[] = { // front
-1.0, -1.0, 1.0,
1.0, -1.0, 1.0,
1.0, 1.0, 1.0,
-1.0, 1.0, 1.0,
// top
-1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, -1.0,
-1.0, 1.0, -1.0,
// back
1.0, -1.0, -1.0,
-1.0, -1.0, -1.0,
-1.0, 1.0, -1.0,
1.0, 1.0, -1.0,
// bottom
-1.0, -1.0, -1.0,
1.0, -1.0, -1.0,
1.0, -1.0, 1.0,
-1.0, -1.0, 1.0,
// left
-1.0, -1.0, -1.0,
-1.0, -1.0, 1.0,
-1.0, 1.0, 1.0,
-1.0, 1.0, -1.0,
// right
1.0, -1.0, 1.0,
1.0, -1.0, -1.0,
1.0, 1.0, -1.0,
1.0, 1.0, 1.0,
};
// normal array
GLfloat tempNormals[] = { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, // v0,v1,v2,v3 (front)
1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, // v0,v3,v4,v5 (right)
0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, // v0,v5,v6,v1 (top)
-1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, // v1,v6,v7,v2 (left)
0,-1, 0, 0,-1, 0, 0,-1, 0, 0,-1, 0, // v7,v4,v3,v2 (bottom)
0, 0,-1, 0, 0,-1, 0, 0,-1, 0, 0,-1 }; // v4,v7,v6,v5 (back)
// color array
GLfloat tempColors[] = { 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, // v0,v1,v2,v3 (front)
1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, // v0,v3,v4,v5 (right)
1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, // v0,v5,v6,v1 (top)
1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, // v1,v6,v7,v2 (left)
0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, // v7,v4,v3,v2 (bottom)
0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1 }; // v4,v7,v6,v5 (back)
// index array of vertex array for glDrawElements() & glDrawRangeElement()
GLubyte tempIndices[] = { 0, 1, 2, 2, 3, 0, // front
4, 5, 6, 6, 7, 4, // right
8, 9,10, 10,11, 8, // top
12,13,14, 14,15,12, // left
16,17,18, 18,19,16, // bottom
20,21,22, 22,23,20 }; // back
GLfloat tempTexcoords[2*4*6] = {
// front
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,
};
for (int i = 1; i < 6; i++)
memcpy(&tempTexcoords[i*4*2], &tempTexcoords[0], 2*4*sizeof(GLfloat));
copy(tempVert, tempVert + 72, m_Vertices);
copy(tempNormals, tempNormals + 72, m_Normals);
copy(tempColors, tempColors + 72, m_Colours);
copy(tempIndices, tempIndices + 36, m_Indices);
std::copy(tempTexcoords, tempTexcoords + 48, m_Texcoords);
LoadTexture(m_TextureNames);
}
void Rectangle::LoadTexture(string TextureName[6])
{
// Create texture index array.
glGenTextures(6, m_Textures);
for(int i = 0 ; i < 1 ; i++)
{
glBindTexture(GL_TEXTURE_2D, m_Textures[i]);
// Set our texture parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
// Set texture filtering
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST); // NOTE the GL_NEAREST Here!
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST); // NOTE the GL_NEAREST Here!
std::string fileType;
fileType.append(m_TextureNames[i], m_TextureNames[i].size()-3,3);
if(fileType == "jpg")
{
m_Textures[i] = SOIL_load_OGL_texture // load an image file directly as a new OpenGL texture
(
m_TextureNames[i].c_str(),
SOIL_LOAD_AUTO,
SOIL_CREATE_NEW_ID,
SOIL_FLAG_MIPMAPS | SOIL_FLAG_INVERT_Y | SOIL_FLAG_NTSC_SAFE_RGB | SOIL_FLAG_COMPRESS_TO_DXT
);
// allocate a texture name
}
else
{
m_Textures[i] = SOIL_load_OGL_texture // load an image file directly as a new OpenGL texture
(
m_TextureNames[i].c_str(),
SOIL_LOAD_AUTO,
SOIL_CREATE_NEW_ID,
SOIL_FLAG_MIPMAPS | SOIL_FLAG_NTSC_SAFE_RGB | SOIL_FLAG_COMPRESS_TO_DXT
);
// allocate a texture name
}
}
}
// Function to draw Sphere.
void Rectangle::Draw()
{
// enable and specify pointers to vertex arrays
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glNormalPointer(GL_FLOAT, 0, m_Normals);
glTexCoordPointer(2, GL_FLOAT, 0, m_Texcoords);
glColorPointer(3, GL_FLOAT, 0, m_Colours);
glVertexPointer(3, GL_FLOAT, 0, m_Vertices);
for (int i=0;i<6;i++)
{
glPushMatrix();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_Textures[i]);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_BYTE, m_Indices);
glPopMatrix();
}
glDisableClientState(GL_VERTEX_ARRAY); // disable vertex arrays
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
}
Rectangle testRect;
// Drawing routine.
void drawScene(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
testRect.Draw();
glutSwapBuffers();
}
// Initialization routine.
void setup(void)
{
glClearColor(0.0, 0.0, 0.0, 0.0);
testRect = Rectangle(2, 0.0, 0.0, 1, 2, 1, "grass.bmp","grass.bmp", "grass.bmp", "launch.png", "launch.png", "launch.png");
// Turn on OpenGL texturing.
glEnable(GL_TEXTURE_2D);
glShadeModel (GL_SMOOTH);
}
The posted code has a few issues:
It's loading only one texture:
glGenTextures(6, m_Textures);
for(int i = 0 ; i < 1 ; i++)
{
glBindTexture(GL_TEXTURE_2D, m_Textures[i]);
...
If you want to load 6 textures, like the rest of the code suggests, you'll have to use 6 for the end value of the loop:
for(int i = 0 ; i < 6 ; i++)
It's creating texture ids twice, and sets parameters with the wrong texture bound. At the start of LoadTexture(), it generates 6 texture ids:
glGenTextures(6, m_Textures);
and then binds them, and makes glTexParameteri() to set various parameters on them. But then it calls SOIL_load_ogl_texture() with a flag asking it to create a new id again, and then stores that one away:
m_Textures[i] = SOIL_load_OGL_texture(
m_TextureNames[i].c_str(),
SOIL_LOAD_AUTO,
SOIL_CREATE_NEW_ID,
SOIL_FLAG_MIPMAPS | SOIL_FLAG_INVERT_Y | SOIL_FLAG_NTSC_SAFE_RGB | SOIL_FLAG_COMPRESS_TO_DXT
);
To fix this, you can omit the gGenTextures() call, and move the code to bind the texture and call glTexParameteri() after the SOIL_load_ogl_texture() call. Also, this uses a flag to generate mipmaps, but sets the texture filters to not use mipmapping.
In the draw function, it loops over the 6 faces, but then draws the entire cube each time:
for (int i=0;i<6;i++)
{
glPushMatrix();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_Textures[i]);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_BYTE, m_Indices);
glPopMatrix();
}
The second argument to glDrawElements() specifies the number of vertices to be rendered. With the value 36, all vertices will be used (6 sides of the cube, with 2 triangles each, with 3 vertices each.
Also, the glPushMatrix()/glPopMatrix() serves absolutely no purpose. The draw loop should look something like this:
glActiveTexture(GL_TEXTURE0);
for (int i=0;i<6;i++)
{
glBindTexture(GL_TEXTURE_2D, m_Textures[i]);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_BYTE, m_Indices + i * 6);
}
I don't see the depth test being enabled anywhere. Add this to setup():
glEnable(GL_DEPTH_TEST);
I do it a little differently that works, so maybe the issue is in the difference.
I would bind each texture to a different GL_Texture (GL_Texture0, GL_Texture1...) so that each texture has it's own data. I don't know how SOIL works, but in my case for the first texture after:
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
I would include a call:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texture1Width, texture1Height, 0, GL_RGBA, GL_UNSIGNED_BYTE, bitmapData);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_Textures[0]);
And I would repeat this process for each of the 6 textures.
Then I would draw each face:
// First face
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_Textures[0]);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_BYTE, m_Indices);
// Second face
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, m_Textures[1]);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_BYTE, m_Indices);
And so on for each face.
EDIT:
I did some checking about SOIL, and it looks to me like (using SOIL) you would:
GLuint m_Textures[6];
int img_width, img_height;
glGenTextures(6, m_Textures);
// For each texture
unsigned char* img = SOIL_load_image(m_TextureNames[0].c_str(), &img_width, &img_height, NULL, 0); // or m_TextureNames[1].c_str() ...
glBindTexture(GL_TEXTURE_2D, m_Textures[0]); // or m_textures[1]...
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
// Set texture filtering
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST); // NOTE the GL_NEAREST Here!
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST); // NOTE the GL_NEAREST Here!
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img_width, img_height, 0, GL_RGB, GL_UNSIGNED_BYTE, img);
glActiveTexture(GL_TEXTURE0); // or GL_TEXTURE1....
glBindTexture(GL_TEXTURE_2D, m_Textures[0]); // or m_Textures[1]...
I have an Object class that keeps track of the objects scale, translation and everything else, and I want it to set those transform matrices when it draws(obviously), but for some reason, despite that I set the transform and everything, it does not work.
---------- IN OBJECT------------------
vertices = new Vertex[vertexCount];
scaleX = 100.0f;
scaleY = 100.0f;
scaleZ = 100.0f;
vertices[0] = { 100.0f, 0.0f, 0.5f, 1.0f, D3DCOLOR_XRGB(255, 255, 255), 1, 0, };
vertices[1] = { 100.0f, 100.0f, 0.5f, 1.0f, D3DCOLOR_XRGB(255, 255, 255), 1, 1, };
vertices[2] = { 0.0f, 100.0f, 0.5f, 1.0f, D3DCOLOR_XRGB(255, 255, 255), 0, 1, };
vertices[3] = { 0.0f, 0.0f, 0.5f, 1.0f, D3DCOLOR_XRGB(255, 255, 255), 0, 0, };
v_buffer = NULL;
i_buffer = NULL;
texture = 0;
d3ddev->CreateVertexBuffer(vertexCount * sizeof(Vertex),
0,
CUSTOMFVF,
D3DPOOL_MANAGED,
&v_buffer,
NULL);
short indices[] =
{
0, 1, 2, // side 1
2, 3, 0,
};
// create an index buffer interface called i_buffer
d3ddev->CreateIndexBuffer(6 * sizeof(short),
0,
D3DFMT_INDEX16,
D3DPOOL_MANAGED,
&i_buffer,
NULL);
// lock i_buffer and load the indices into it
i_buffer->Lock(0, 0, (void**)&pVoid, 0);
memcpy(pVoid, indices, sizeof(indices));
i_buffer->Unlock();
// lock v_buffer and load the vertices into it
v_buffer->Lock(0, 0, (void**)&pVoid, 0);
memcpy(pVoid, vertices, sizeof(Vertex)* vertexCount);
v_buffer->Unlock();
-----------DRAW FUNCTION --------------
D3DXVECTOR3 pos = { obj.GetX(), obj.GetY(), obj.GetZ() };
d3ddev->SetFVF(CUSTOMFVF);
// select the vertex buffer to display
d3ddev->SetStreamSource(0, obj.GetVBuffer(), 0, sizeof(Vertex));
d3ddev->SetIndices(obj.GetIBuffer());
D3DXMatrixRotationYawPitchRoll(&obj.rotationTransform, obj.GetRotationX(), obj.GetRotationY(), obj.GetRotationZ());
D3DXMatrixTranslation(&obj.translationTransform, obj.GetX(), obj.GetY(), obj.GetZ());
D3DXMatrixScaling(&obj.scalingTransform, obj.GetScaleX(), obj.GetScaleY(), obj.GetScaleZ());
D3DXMatrixMultiply(&obj.worldTransform, &obj.scalingTransform, &obj.translationTransform);
D3DXMatrixMultiply(&obj.worldTransform, &obj.rotationTransform, &obj.worldTransform);
//obj.worldTransform = obj.rotationTransform * obj.scalingTransform * obj.translationTransform;
d3ddev->SetTransform(D3DTS_WORLD, &obj.worldTransform);
D3DXMatrixLookAtRH(&obj.viewTransform, &D3DXVECTOR3(0, 0, 10), &D3DXVECTOR3(0, 0, 0), &D3DXVECTOR3(0, 0, 0));
d3ddev->SetTransform(D3DTS_VIEW, &obj.viewTransform);
D3DXMatrixPerspectiveFovRH(&obj.projectionTransform, D3DXToRadian(90), (float)SCREEN_WIDTH / (float)SCREEN_HEIGHT, 1.0f, 10.0f);
d3ddev->SetTransform(D3DTS_PROJECTION, &obj.projectionTransform);
// copy the vertex buffer to the back buffer
d3ddev->DrawIndexedPrimitive(D3DPT_TRIANGLELIST, 0, 0, 4, 0, 2);
Okay, you are doing wrong with the multiplication of the matrices. Matrix multiplication is not commutative, that is A*B != B*A. Change the order you multiply them in from rotation*scale*translate to scale*rotation*translate.
Hope that helps.