I need to create a small game using OpenGL and C++ . I am using this tutorial to get started(my aim isn't actually to create the game but to use the code I eventually create).
I finished video 8 (the one linked) but I ran into a problem. My code crashed on the line
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageData.Width, imageData.Height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData.Data);
Wen I change the loadAndBufferImage method parameters to something random (hgsjrkbgfdkj works fine) the code doesn't crash but obviously it doesn't load any image. I'm not sure what I am getting wrong. My IDE throws a warning on the glfwReadImage line since it doesn't like variable 3 being NULL (although it runs that line perfectly).
glfwReadImage(fileName, &imageData, NULL);
I'm not sure what I missed out/am doing wrong. Can it be the image? I used one converted via the link provided in the description of the video. The only hing I didn't do in the video was the small image importing part at around 7.40. I am using NetBeans not XCode, and I just imported the rocket.tga file in my resources folder (right click o resources folder, Add Existing Item, added image).
Here is a full copy of my GameWindow.cpp code up till now
#define GLEW_STATIC
#include "GameWindow.h"
#define GL_GLEXT_PROTOTYPES
#include <iostream>
typedef struct {
GLfloat positionCoordinates[3];
GLfloat textureCoordinates[2];
} VertexData;
#define Square_Size 100
VertexData vertices[] = {
{{0.0f, 0.0f, 0.0f},{0.0f,0.0f}},
{{Square_Size, 0.0f, 0.0f}, {1.0f,0.0f}},
{{Square_Size, Square_Size, 0.0f}, {1.0f,1.0f}},
{{0.0f, Square_Size, 0.0f}, {0.0f,1.0f}}
};
void GameWindow::setRunning(bool newRunning) {
_running = newRunning;
}
bool GameWindow::getRunning() {
return _running;
}
GLuint GameWindow::loadAndBufferImage(const char *fileName){
GLFWimage imageData;
glfwReadImage(fileName, &imageData, NULL);
GLuint textureBufferID;
glGenTextures(1, &textureBufferID);
glBindTexture(GL_TEXTURE_2D, textureBufferID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageData.Width, imageData.Height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData.Data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glfwFreeImage(&imageData);
return textureBufferID;
}
GameWindow::GameWindow(bool running):_running(running), _height(800), _width(800*16/9) {
glClearColor(1.0f,1.0f,1.0f,1.0f);
glViewport(0.0f, 0.0f, _width, _height);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(0,_width,0,_height);
glMatrixMode(GL_MODELVIEW);
glGenBuffers(1, &_vertexBufferID);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferID);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, sizeof(VertexData), (GLvoid *) offsetof(VertexData,positionCoordinates));
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(2, GL_FLOAT, sizeof(VertexData), (GLvoid *) offsetof(VertexData, textureCoordinates));
_textureBufferID = loadAndBufferImage("rocket.tga");
}
void GameWindow::render() {
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_QUADS, 0, 4);
glfwSwapBuffers();
}
void GameWindow::update() {
}
I am using an oldish version of GLFW I think. Around version 2.7 because I couldn't get the new ones to work. Once again, not really relevant though I think.
Use imageData.Format for format in glTexImage2D():
glTexImage2D
(
GL_TEXTURE_2D,
0,
GL_RGBA,
imageData.Width,
imageData.Height,
0,
imageData.Format, // instead of GL_RGBA
GL_UNSIGNED_BYTE,
imageData.Data
);
Otherwise if imageData.Format == GL_RGB and you lie and say it's GL_RGBA instead then OpenGL will happily read right off the end of imageData.Data looking for RGBA tuples that aren't there.
Related
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 3 months ago.
Improve this question
I use opengl in qt. I have several pictures with a size of 30000 * 20000, including single channel, three channel and four channel pictures. The function I want to achieve is to switch and display different pictures through the keyboard, and support moving and zooming. I have implemented this function now, but I encountered some problems.
I want to bind them to different texture units and switch them when painting. But it seems to have no effect.
void MyOpenGLWidget::initializeGL()
{
initializeOpenGLFunctions();
glGenVertexArrays(2, VAO);
glGenVertexArrays(2, VBO);
glEnable(GL_DEPTH_TEST);
{
if (!bindShaderProgram(m_shaderProgram, vertexSourceBG, fragmentSourceBG))
return;
glBindVertexArray(VAO[0]);
glBindBuffer(GL_ARRAY_BUFFER, VBO[0]);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glBufferData(GL_ARRAY_BUFFER, m_vBGVertices.size() * sizeof(GLfloat), m_vBGVertices.data(), GL_STREAM_DRAW);
readImage();
glGenTextures(IMAGE_NUM, texture);
for (int i = 0; i < IMAGE_NUM; i++) {
glActiveTexture(GL_TEXTURE0+i);
glBindTexture(GL_TEXTURE_2D, texture[i]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
int channel = m_vImage[m_imageIdx].channels();
int internalFormat = GL_RGB, format = GL_RGB;
getImageFormat(channel, internalFormat, format);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, m_imageWidth, m_imageHeight, 0, format, GL_UNSIGNED_BYTE, m_vImage[m_imageIdx].data);
glBindTexture(GL_TEXTURE_2D, texture[i]);
}
m_shaderProgram.setAttributeBuffer("aPos", GL_FLOAT, 0, 3, sizeof(GLfloat) * 5);//8
m_shaderProgram.enableAttributeArray("aPos");
m_shaderProgram.setAttributeBuffer("aTexCoord", GL_FLOAT, sizeof(GLfloat) * 3, 2, sizeof(GLfloat) * 5);
m_shaderProgram.enableAttributeArray("aTexCoord");
m_shaderProgram.release();
glBindBuffer(GL_ARRAY_BUFFER, VBO[0]);
glBindVertexArray(VAO[0]);
}
}
void MyOpenGLWidget::paintGL()
{
glClearColor(0.1f, 0.5f, 0.7f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
{
glBindVertexArray(VAO[0]);
m_shaderProgram.bind();
createCameraTransformMatrix();
m_shaderProgram.setUniformValue("model", m_mModel);
m_shaderProgram.setUniformValue("view", m_mView);
m_shaderProgram.setUniformValue("projection", m_mProjection);
glActiveTexture(GL_TEXTURE0+m_imageIdx);
glBindTexture(GL_TEXTURE_2D, texture[m_imageIdx]);
m_shaderProgram.setUniformValue("ourTexture", GL_TEXTURE0+m_imageIdx);
m_shaderProgram.setUniformValue("imgWidth", m_imageWidth);
m_shaderProgram.setUniformValue("imgHeight", m_imageHeight);
glDrawArrays(GL_POLYGON, 0, 4);
m_shaderProgram.release();
glBindTexture(GL_TEXTURE_2D, texture[m_imageIdx]);
glBindVertexArray(VAO[0]);
}
}
Switch the currently displayed texture by switching m_imageIdx. But no response.
I bound them to the same texture unit, and then loaded them when refreshing. Now they can be loaded, but the speed is too slow to accept.
void MyOpenGLWidget::paintGL()
{
glClearColor(0.1f, 0.5f, 0.7f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
{
glBindVertexArray(VAO[0]);
m_shaderProgram.bind();
createCameraTransformMatrix();
m_shaderProgram.setUniformValue("model", m_mModel);
m_shaderProgram.setUniformValue("view", m_mView);
m_shaderProgram.setUniformValue("projection", m_mProjection);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture[m_imageIdx]);
updateImage();
if (!m_vImage[m_imageIdx].empty()) {
int channel = m_vImage[m_imageIdx].channels();
int internalFormat = GL_RGB, format = GL_RGB;
getImageFormat(channel, internalFormat, format);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, m_imageWidth, m_imageHeight, 0, format, GL_UNSIGNED_BYTE, m_vImage[m_imageIdx].data);
}
m_shaderProgram.setUniformValue("imgWidth", m_imageWidth);
m_shaderProgram.setUniformValue("imgHeight", m_imageHeight);
glDrawArrays(GL_POLYGON, 0, 4);
m_shaderProgram.release();
glBindTexture(GL_TEXTURE_2D, texture[m_imageIdx]);
glBindVertexArray(VAO[0]);
}
I referred to some similar problems and tried glTexSubImage2D, but I needed to refresh the whole picture, so the efficiency was not improved.
I can't reduce the texture precision because the texture display needs high precision.
I hope someone can give me some advice.
Available code snippets.
//init
for (int i = 0; i < IMAGE_NUM; i++) {
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, texture[i]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
int channel = m_vImage[m_imageIdx].channels();
int internalFormat = GL_RGB, format = GL_RGB;
getImageFormat(channel, internalFormat, format);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, m_imageWidth, m_imageHeight, 0, format, GL_UNSIGNED_BYTE, m_vImage[i].data);
glBindTexture(GL_TEXTURE_2D, texture[i]);
}
//paintGL
glBindTexture(GL_TEXTURE_2D, texture[m_imageIdx]);
m_shaderProgram.setUniformValue("ourTexture", m_imageIdx);
m_shaderProgram.setUniformValue("imgWidth", m_imageWidth);
m_shaderProgram.setUniformValue("imgHeight", m_imageHeight);
glDrawArrays(GL_POLYGON, 0, 4);
m_shaderProgram.release();
glBindTexture(GL_TEXTURE_2D, texture[m_imageIdx]);
You definitely do not need to upload them every frame. What you did forget was to set the correct value for sampler2D uniform variable in the fragment shader (which you did not post).
m_shaderProgram.setUniformValue("ourTexture", m_imageIdx); must be called in the draw loop
with correctly bound shader
Do not use GL_TEXTURE0 here, as pointed by #HolyBlackCat .
By default, it uses data from GL_TEXTURE0. You can imagine GL_TEXTURE as an array of pointers to the actual textures. glActiveTexture sets which pointer is used for the subsequent GL calls and glBindTexture assigns to this active pointer.
The uniform sampler acts exactly the same, the index you assign to it is the array element it will work with.
This also means that you need to just bind all textures to separate units only once, e.g. how you do (needlessly) during initialization and then vary only the uniform sampler, no binds, not activates needing.
I am currently experiencing an issue with rendering multiple textures at once with OpenGL 3.3 Core. I load 2 separate images through SOIL ( an image loading library ) but only one of the images appears in the final program. This is my current code:
#include <iostream>
#include <GL\glew.h>
#include <GL\GL.h>
#include <GLFW\glfw3.h>
#include <SOIL.h>
#include "Shader.h"
#include "texture2D.h"
using namespace std;
void processInput(GLFWwindow *window) {
if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) {
glfwSetWindowShouldClose(window, true);
}
}
int main() {
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
GLFWwindow *window = glfwCreateWindow(800, 600, "OpenGL", NULL, NULL);
if (window == NULL) {
cout << "GLFW WINDOW CREATION FAILED! " << endl;
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
cout << "Made By Rial Seebran " << endl;
cout << glfwGetVersionString() << endl;
glewInit();
if (glewInit() != GLEW_OK) {
cout << "GLEW INITIALIZATION FAILED! " << endl;
glfwTerminate();
return -1;
}
float positions[] = {
-0.5f, -0.5f, 0.0f, 0.0f, 0.0f,
-0.5f, 0.5f, 0.0f, 0.0f, 1.0f,
0.5f, -0.5f, 0.0f, 1.0f, 0.0f,
0.5f, 0.5f, 0.0f, 1.0f, 1.0f
};
unsigned int indices[] = {
0, 1, 2,
3, 2, 1
};
unsigned int VAO;
unsigned int VBO;
unsigned int EBO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
glBufferData(GL_ARRAY_BUFFER, sizeof(positions), positions, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 5, ((GLvoid*)(0)));
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(float) * 5, ((GLvoid*)(sizeof(float) * 3)));
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
Shader shader("shader.vs", "shader.fs");
Texture2D texture1;
texture1.LoadTexture("container.jpg");
Texture2D texture2;
texture2.LoadTexture("awesomeface.png");
shader.Use();
while (!glfwWindowShouldClose(window)) {
glClearColor(0.1f, 0.15f, 0.2f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
texture1.BindTexture(0);
texture2.BindTexture(1);
shader.Uniform1I("myTexture1", 0);
shader.Uniform1I("myTexture2", 1);
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
processInput(window);
glfwPollEvents();
glfwSwapBuffers(window);
}
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
glDeleteBuffers(1, &EBO);
glfwTerminate();
return 0;
}
And this is the texture class (all in a .h file):
#ifndef TEXTURE2D
#define TEXTURE2D
#include <GL\glew.h>
#include <GL\GL.h>
#include <SOIL.h>
#include <iostream>
using namespace std;
class Texture2D {
public:
Texture2D();
~Texture2D();
void LoadTexture(const char* texPath);
void BindTexture(unsigned int texUnit);
unsigned int texture_M;
};
Texture2D::Texture2D() {
}
Texture2D::~Texture2D() {
glDeleteTextures(1, &texture_M);
}
void Texture2D::LoadTexture(const char* texPath) {
int width;
int height;
unsigned char *image = SOIL_load_image(texPath, &width, &height, 0, SOIL_LOAD_RGBA);
if (image == NULL) {
cout << "Failed to load Image! " << endl;
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, texture_M);
if (image != NULL) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
glGenerateMipmap(GL_TEXTURE_2D);
}
SOIL_free_image_data(image);
glBindTexture(GL_TEXTURE_2D, 0);
}
void Texture2D::BindTexture(unsigned int texUnit) {
glActiveTexture(GL_TEXTURE0 + texUnit);
glBindTexture(GL_TEXTURE_2D, texture_M);
}
#endif
In the fragment shader, I use the mix() function which linearly interpolates the 2 textures, yet the "awesomeface.png" image is the only texture which shows up in the final program. What should be fixed, for the program to display both textures?
You have to add the following line at the beggining of Texture::LoadTexture() to generate the texture ID:
glGenTextures(1, &texture_M);
This will reserve a valid ID used to identify the texture object (that will later have to be initialized by a call to glBindTexture()). It can be used to reference the texture in subsequent OpenGL calls (including deleting the texture using glDeleteTextures()).
When a unused name is bound to a texture target by glBindTexture, then a texture object is generated.
But a new unused texture name has to be reserved by glGenTextures.
The texture parameters which set by glTexParameteri are set to the texture object which is bound to the specified target.
Reserve the unused texture name, then create the texture object. After that you can set the parameters and specify the two-dimensional texture image.
// 1. reserve texture name
glGenTextures(1, &texture_M);
// 2. generate texture object
glBindTexture(GL_TEXTURE_2D, texture_M);
// set parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// generate texture image
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
glGenerateMipmap(GL_TEXTURE_2D);
I did not call glGenTextures() in the texture2D class. Thanks to stackOverflow user #Rabbid76 for pointing that out
I'm working on a 3D PBR engine along with some colleagues (OpenGL 3.3 + GLFW3 and ImGui), and we have a peculiar error : the application enters its drawing loop normally and gives no OpenGL error whatsoever (as shown by extensive use of glCheckError()), but the screen is completely blank. The peculiar part is that this only happens on my colleagues' computer that uses an Nvidia GeForce GTX 1060 as its graphic cards (2 of my friends have the same exact computer).
After some investigation, it turns out that this error is triggered by a function that is called a single time before the main loop. Long story short, it renders a quad to a texture bound via a framebuffer using a couple shaders. I narrowed down the problem to the glDrawElements call ; on those computers, not changing anything except commenting out this line fixes the display (at the cost of the texture displaying garbage instead of the expected result of the draw call), whereas all of the other computers run the app fine even with the line uncommented.
The culprit function is as follows :
/**
* Returns a texture identifier filled with a precomputed irradiance map calculated
* from a provided, already set up-environment map
*/
GLuint precomputeIrradianceMap(GLuint envMapId)
{
std::cerr << "Precomputing irradiance map ... " << std::endl;
GLuint irradianceVAO, irradianceVBO, irradianceMap,
irradianceVertShader, irradianceFragShader, irradianceProgram, captureFBO,
captureRBO;
GLint aPos_location, env_map;
glGenVertexArrays(1, &irradianceVAO);
glBindVertexArray(irradianceVAO);
glGenBuffers(1, &irradianceVBO);
glBindBuffer(GL_ARRAY_BUFFER, irradianceVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(quad_vertices), quad_vertices, GL_STATIC_DRAW);
glGenFramebuffers(1, &captureFBO);
glGenRenderbuffers(1, &captureRBO);
glBindFramebuffer(GL_FRAMEBUFFER, captureFBO);
glBindRenderbuffer(GL_RENDERBUFFER, captureRBO);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, 320, 320);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, captureRBO);
checkGLerror();
glGenTextures(1, &irradianceMap);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, irradianceMap);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, 320, 320, 0, GL_RGBA, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glBindTexture(GL_TEXTURE_2D, envMapId);
checkGLerror();
irradianceVertShader = createShaderFromSource(GL_VERTEX_SHADER, "shaders/irradiance_vert.glsl");
irradianceFragShader = createShaderFromSource(GL_FRAGMENT_SHADER, "shaders/irradiance_frag.glsl");
irradianceProgram = glCreateProgram();
glAttachShader(irradianceProgram, irradianceVertShader);
glAttachShader(irradianceProgram, irradianceFragShader);
printShaderLog(irradianceVertShader);
printShaderLog(irradianceFragShader);
glLinkProgram(irradianceProgram);
glUseProgram(irradianceProgram);
checkGLerror();
env_map = glGetUniformLocation(irradianceProgram, "environmentMap");
aPos_location = glGetAttribLocation(irradianceProgram, "aPos");
glEnableVertexAttribArray(aPos_location);
checkGLerror();
glVertexAttribPointer(aPos_location, 3, GL_FLOAT, GL_FALSE, 0, (void *) 0);
glUniform1i(env_map, 0);
checkGLerror();
glViewport(0, 0, 320, 320);
glBindFramebuffer(GL_FRAMEBUFFER, captureFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, irradianceMap, 0);
checkGLerror();
glClearDepth(1.f);
glClearColor(0.f, 0.f, 0.f, 1.f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
checkGLerror();
// This is the culprit
// glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, quad_indices);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
checkGLerror();
// glDisableVertexAttribArray(aPos_location);
glDeleteShader(irradianceVertShader);
glDeleteShader(irradianceFragShader);
glDeleteProgram(irradianceProgram);
glDeleteFramebuffers(1, &captureFBO);
glDeleteRenderbuffers(1, &captureRBO);
glDeleteBuffers(1, &irradianceVBO);
glDeleteVertexArrays(1, &irradianceVAO);
checkGLerror();
std::cerr << "... done " << std::endl;
return irradianceMap;
}
You can safely replace checkGLerror() by std::cerr << glCheckError() << std::endl. As I said before, there is no OpenGL error whatsoever, all the shaders compile fine, and this only breaks on computers equipped with an Nvidia GeForce GTX 1060.
The rest of the code is mostly setting up VAOs, VBOs and the like, and the render loop is as follows :
while (!glfwWindowShouldClose(window))
{
glfwGetFramebufferSize(window, &display_w, &display_h);
float newRatio = (float)display_w / display_h;
if(ratio != newRatio)
{
ratio = newRatio;
setAspectRatio(p, ratio);
invP = p.inverse();
glViewport(0, 0, display_w, display_h);
}
ImGui_ImplGlfwGL3_NewFrame();
// Rendering + OpenGL rendering
// Draw the skybox, then the model
ImGui::Begin("Physical parameters", NULL, ImGuiWindowFlags_AlwaysAutoResize);
ImGui::SliderFloat("Dielectric specularity", &ds, 0.f, 1.f, "%.3f");
ImGui::SliderFloat("Light intensity", &l0, 0.f, 10.f, "%.2f");
ImGui::Checkbox("Use irradiance map as skybox", &skyOrIrradiance);
ImGui::Checkbox("Debug draw irradiance map", &debugDraw);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, skyOrIrradiance ? irradianceTexture : skybox_texture);
ImGui::End();
Matrix4f v = camera->getMatrix(), invV = v.inverse();
// glClearColor(0.45f, 0.55f, 0.60f, 1.00f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBindVertexArray(vertex_array_objs[0]);
glUseProgram(skybox_program);
glUniformMatrix4fv(skybox_v_location, 1, GL_FALSE, (const GLfloat *) invV.data());
glUniformMatrix4fv(skybox_p_location, 1, GL_FALSE, (const GLfloat *) invP.data());
glDisable(GL_DEPTH_TEST);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, quad_indices);
glBindVertexArray(vertex_array_objs[1]);
glUseProgram(program);
glUniformMatrix4fv(mv_location, 1, GL_FALSE, (const GLfloat *) v.data());
glUniformMatrix4fv(p_location, 1, GL_FALSE, (const GLfloat *)p.data());
glUniform1f(uDS_location, ds);
glUniform1f(L0_location, l0);
glActiveTexture(GL_TEXTURE0 + model.activeTexturesCount());
glBindTexture(GL_TEXTURE_2D, irradianceTexture);
glUniform1i(irradiance_location, model.activeTexturesCount());
glEnable(GL_DEPTH_TEST);
model.render();
if(debugDraw)
{
displayTexture(irradianceTexture);
displayTexture(skybox_texture, 0.f, -1.f);
}
camera->drawControls();
// Draw basic interface
basicInterface();
ImGui::Render();
ImGui_ImplGlfwGL3_RenderDrawData(ImGui::GetDrawData());
glfwSwapBuffers(window);
glfwPollEvents();
camera->update(window);
}
model.render() is as follows :
void GltfModel::render() const
{
for(unsigned int i = 0; i < _activeTextures.size(); i++)
{
if(_textureLocations[i] > -1)
{
int j = _activeTextures[i];
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, _textureIds[j]);
glUniform1i(_textureLocations[i], i);
}
}
glBindBuffer(GL_ARRAY_BUFFER, _buffers[ARRAY_BUFFER]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _buffers[ELEMENT_ARRAY_BUFFER]);
glDrawElements(_drawingMode, _indicesCount, _indicesType, NULL);
}
Thanks by advance for your time !
EDIT : putting a glClear(GL_COLOR_BUFFER_BIT) call right before glfwSwapBuffers(window) still displays a white screen with the culprit line uncommented even though the clear color has been set to light blue. Commenting the culprit line indeed displays a light blue screen, so this makes me think it's a framebuffer issue, but I can't say for sure.
I making an OpenGL video game using GLFW version 2. I am getting an error I do not understand
The following code is:
//
// GameWindow.cpp
// RocketGame
//
// Created by Vaibhav Malhotra on 12/5/17.
// Copyright © 2017 Vaibhav Malhotra. All rights reserved.
//
#include "GameWindow.hpp"
typedef struct
{
GLfloat positionCoordinates[3];
GLfloat textureCoordinates[2];
} vertexData;
#define Square_Size 100
vertexData vertices[] = {
{{0.0f,0.0f,0.0f}, {0.0f,0.0f}},
{{Square_Size,0.0f,0.0f},{1.0f,0.0f}},
{{Square_Size,Square_Size,0.0f},{1.0f,1.0f}},
{{0.0f,Square_Size,0.0f},{0.0f,1.0f}}
};
void GameWindow::setRunning(bool newRunning)
{
_running = newRunning;
}
bool GameWindow::getRunning()
{
return _running;
}
GLuint GameWindow::loadAndBufferImage(const char *filename)
{
GLFWimage imageData;
glfwReadImage(filename, &imageData, NULL);
GLuint textureBufferID;
glGenTextures(1, &textureBufferID);
glBindTexture(GL_TEXTURE_2D, textureBufferID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageData.Width, imageData.Height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData.Data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glfwFreeImage(&imageData);
return textureBufferID;
}
GameWindow::GameWindow(bool running):_running(running),_height(800),_width(800*16/9),_vertexBufferID(0)
{
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glViewport(0.0f, 0.0f, _width, _height);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(0, _width, 0, _height);
glMatrixMode(GL_MODELVIEW);
glGenBuffers(1, &_vertexBufferID);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferID);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, sizeof(vertexData), (GLvoid *) offsetof(vertexData, positionCoordinates));
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(vertexData), (GLvoid *) offsetof(vertexData, textureCoordinates));
_textureBufferID = loadAndBufferImage("rocket.tga");
}
void GameWindow::render()
{
glClear(GL_COLOR_BUFFER_BIT);
//glColor3f(1.0f, 0.0f, 0.0f);
glDrawArrays(GL_QUADS, 0, 4);
glfwSwapBuffers();
}
void GameWindow::update()
{
}
Under the render function the code glDrawArrays(GL_QUADS, 0, 4); is returning a runtime error:
Thread 1: EXC_BAD_ACCESS (code=1, address=0x0).
For output I am just getting a black screen.
Why is this error is coming for me?
Under the render function the code glDrawArrays(GL_QUADS, 0, 4); is returning a runtime error Thread 1: EXC_BAD_ACCESS (code=1, address=0x0).
A memory access violation on glDrawArrays generally hints at some sort of problem with VBOs or similar storage objects.
From what I can tell, there are three issues with your code:
You are only calling glEnableClientState and its corresponding gl*Pointer calls once, when they need to be called every frame before glDrawArrays. If you'd like to only call them once, look into Vertex Array Objects (VAOs), although these are only available since OpenGL 3. Bear in mind, that when you move the glEnableClientState calls, you will have to ensure that your buffer is bound again with glBindBuffer.
Each client state has a corresponding gl*Pointer call. You correctly use glVertexPointer for GL_VERTEX_ARRAY, but for GL_TEXTURE_COORD_ARRAY, you need glTexCoordPointer.
Your glEnableClientState calls are missing their corresponding glDisableClientState calls, which need to be called in reverse order of the enabling calls, after your glDrawArrays call.
Just as a side note, the GLU library is deprecated. Your gluOrtho2D call appears to be the only instance where you depend on it, and it can be easily replaced with:
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, _width, 0, _height, -1.0f, 1.0f);
You are getting an exception in glDrawArrays because it tries to read memory at address 0, which is what your code actually specifies.
GL's texture coordinate array pointer is initializez to NULL and no VBO. Since you use legacy GL, this will be interpreted as a client memory address. Now you enable GL_TEXCOORD_ARRAY but never actually set glTexCoordPointer, so it stays NULL:
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(vertexData), (GLvoid *) offsetof(vertexData, textureCoordinates));
The quick solution is of course to not overwrite your vertex array pointer state by having it point to the the texture coords, but to specify the texcoord array pointer via glTexCoordPointer.
The real solution is to give up on legacy GL which is deprecated0 since a decade by now, and switch to "modern" OpenGL using a GL 3.2 core profile or above.
Okay still having a few problems with it, this is what I have so far:
Bitmap Display::m_HeightMap;
unsigned int Display:: textures;
My initialise method:
glEnable(GL_TEXTURE_2D);
Bitmap image[2];
GLuint *textures = new GLuint[2];
glGenTextures(1, textures);
glGenTextures(2, textures);
image[0].loadBMP("myTexture.bmp");
image[1].loadBMP("myOtherTexture.bmp");
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE , GL_MODULATE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
gluBuild2DMipmaps(GL_TEXTURE_2D, 3, image.width, image.height, GL_RGB, GL_UNSIGNED_BYTE, image.data);
The above line gives an error: left of .data must have class/struct/ union
glDisable(GL_TEXTURE_2D);
Draw Method:
void Draw()
{
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS); //TOP
glBindTexture(GL_TEXTURE_2D, textures[0]);
glNormal3f(0,1,0);
glColor4f(1,1,1,0);
//glColor3d(0.5,0.40,0.05);
glTexCoord2f(0.0f,0.0f);
glVertex3f(-4.5, 0.3, 2);//bottom left
glTexCoord2f(1.0f,0.0f);
glVertex3f(-4.5, 0.3, 2.5);//bottom right
glTexCoord2f(1.0f,1.0f);
glVertex3f(4.5, 0.3, 2.5);//top right
glTexCoord2f(0.0f,1.0f);
glVertex3f(4.5, 0.3, 2);//top left
glEnd();
glDisable(GL_TEXTURE_2D);
}
Only problem here is that texture is undefined.
One last thing hopefully!
void loadTexture(GLuint texture, const char* filename)
{
Bitmap image;
Bitmap image[2];
image[0].loadBMP("myTexture.bmp"); <=== error
image[1].loadBMP("myTexture2.bmp"); <=== error
glBindTexture(GL_TEXTURE_2D, texture);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE , GL_MODULATE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
gluBuild2DMipmaps(GL_TEXTURE_2D, 3, image.width, image.height, GL_RGB, GL_UNSIGNED_BYTE,
image.data);
}
When I try and load multiple bitmaps I get 7 errors,
error C2040: image:'Bitmap [2]' differs in level of indirection of 'Bitmap'
error C2088: '[' illegal for class (twice)
error C2228: left of .BMP must have class/struct/union (twice)
no operator "[]" matches these operands (twice)
So. glGenTextures takes two parameters. An int, and a GLuint*. The int tells GL how many textures to generate, and the GLuint* is an array of GLuints (where to generate the textures). The reason you do the following...
GLuint m_TextureID
glGenTextures(1, &m_TextureID)
is because you only have one texture. If you have more than one, you would do this:
// Substitute 'n' for some const number
GLuint *textures = new GLuint[n];
glGenTextures(n, textures);
This way, you are telling GL I want to generate n textures, and here's an array with allocated space for at least that many textures.
Say you want to use both of them in your draw loop, you would implement that like this:
void draw()
{
glBindTexture(GL_TEXTURE_2D, textures[0]); // Tell GL to use the first texture
// Any drawing here will use first texture
glBindTexture(GL_TEXTURE_2D, textures[1]); // Tell GL to use the second textures
// Any drawing here will use second texture
glBindTexture(GL_TEXTURE_2D, 0); // Set the GL texture to NULL, standard cleanup
}
Make sure to delete textures; at the end of your program to properly clean up after allocating that space.
There are also ways to not have to bind separate textures. You can use what's called a "texture atlas". Basically, this is one bitmap that contains multiple sub-images. So you just generate and bind one bitmap, and use separate parts of it.
To deal with multiple bitmaps, do this:
Bitmap image[2];
image[0].loadBMP("myTexture.bmp");
image[1].loadBMP("myOtherTexture.bmp");
Then follow the process to generate one bitmap for both bitmaps.
Everything below this line is responding to your updated question.
This should pretty much do what you're trying to do.
// Global variable
GLuint textures[2];
// init function
void init()
{
textures = new GLuint[2];
glGenTextures(2, textures);
loadTexture(textures[0], "texture1.bmp");
loadTexture(textures[1], "texture2.bmp");
}
void loadTexture(GLuint texture, const char* filename)
{
Bitmap image;
image.loadBMP(filename);
glBindTexture(GL_TEXTURE_2D, texture);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE , GL_MODULATE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
gluBuild2DMipmaps(GL_TEXTURE_2D, 3, image.width, image.height, GL_RGB, GL_UNSIGNED_BYTE, image.data);
}
// draw function
void draw()
{
glBegin(GL_QUADS); //TOP
glBindTexture(GL_TEXTURE_2D, textures[0]);
glNormal3f(0,1,0);
glColor4f(1,1,1,0);
glTexCoord2f(0.0f,0.0f); glVertex3f(-4.5, 0.3, 2); //bottom left
glTexCoord2f(1.0f,0.0f); glVertex3f(-4.5, 0.3, 2.5); //bottom right
glTexCoord2f(1.0f,1.0f); glVertex3f(4.5, 0.3, 2.5); //top right
glTexCoord2f(0.0f,1.0f); glVertex3f(4.5, 0.3, 2); //top left
glEnd();
}
// cleanup function
void cleanup()
{
delete textures;
}
Some of the issues you are pointing out aren't exactly OpenGL related, they are more C/C++ related. I know this isn't the answer you're looking for, but it would probably help you out a lot to learn about C functions, pointers, arrays, etc, and spend a solid month working closely with functions/pointers/arrays before moving on to something like OpenGL, which requires a pretty moderate understanding of C.
const GLsizei n = (number of textures here);
GLuint textureIDs = new GLuint[ n ];
glGenTextures( n, textureIDs );
I assume you mean you want to generate multiple textures and store the id's in an array? You can do that like this:
GLsizei num_textures = 5;
GLuint textures[num_textures];
glGenTextures(num_textures, textures);
Once you've done that, just loop through your textures and for each one, bind it, set parameters, load in image data, etc.
You may want to create your texture array on the heap so you can access the texture IDs later:
GLuint* textures = new GLuint[num_textures];
Just make sure to delete the array later:
delete textures;