I making an OpenGL video game using GLFW version 2. I am getting an error I do not understand
The following code is:
//
// GameWindow.cpp
// RocketGame
//
// Created by Vaibhav Malhotra on 12/5/17.
// Copyright © 2017 Vaibhav Malhotra. All rights reserved.
//
#include "GameWindow.hpp"
typedef struct
{
GLfloat positionCoordinates[3];
GLfloat textureCoordinates[2];
} vertexData;
#define Square_Size 100
vertexData vertices[] = {
{{0.0f,0.0f,0.0f}, {0.0f,0.0f}},
{{Square_Size,0.0f,0.0f},{1.0f,0.0f}},
{{Square_Size,Square_Size,0.0f},{1.0f,1.0f}},
{{0.0f,Square_Size,0.0f},{0.0f,1.0f}}
};
void GameWindow::setRunning(bool newRunning)
{
_running = newRunning;
}
bool GameWindow::getRunning()
{
return _running;
}
GLuint GameWindow::loadAndBufferImage(const char *filename)
{
GLFWimage imageData;
glfwReadImage(filename, &imageData, NULL);
GLuint textureBufferID;
glGenTextures(1, &textureBufferID);
glBindTexture(GL_TEXTURE_2D, textureBufferID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageData.Width, imageData.Height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData.Data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glfwFreeImage(&imageData);
return textureBufferID;
}
GameWindow::GameWindow(bool running):_running(running),_height(800),_width(800*16/9),_vertexBufferID(0)
{
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glViewport(0.0f, 0.0f, _width, _height);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(0, _width, 0, _height);
glMatrixMode(GL_MODELVIEW);
glGenBuffers(1, &_vertexBufferID);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferID);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, sizeof(vertexData), (GLvoid *) offsetof(vertexData, positionCoordinates));
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(vertexData), (GLvoid *) offsetof(vertexData, textureCoordinates));
_textureBufferID = loadAndBufferImage("rocket.tga");
}
void GameWindow::render()
{
glClear(GL_COLOR_BUFFER_BIT);
//glColor3f(1.0f, 0.0f, 0.0f);
glDrawArrays(GL_QUADS, 0, 4);
glfwSwapBuffers();
}
void GameWindow::update()
{
}
Under the render function the code glDrawArrays(GL_QUADS, 0, 4); is returning a runtime error:
Thread 1: EXC_BAD_ACCESS (code=1, address=0x0).
For output I am just getting a black screen.
Why is this error is coming for me?
Under the render function the code glDrawArrays(GL_QUADS, 0, 4); is returning a runtime error Thread 1: EXC_BAD_ACCESS (code=1, address=0x0).
A memory access violation on glDrawArrays generally hints at some sort of problem with VBOs or similar storage objects.
From what I can tell, there are three issues with your code:
You are only calling glEnableClientState and its corresponding gl*Pointer calls once, when they need to be called every frame before glDrawArrays. If you'd like to only call them once, look into Vertex Array Objects (VAOs), although these are only available since OpenGL 3. Bear in mind, that when you move the glEnableClientState calls, you will have to ensure that your buffer is bound again with glBindBuffer.
Each client state has a corresponding gl*Pointer call. You correctly use glVertexPointer for GL_VERTEX_ARRAY, but for GL_TEXTURE_COORD_ARRAY, you need glTexCoordPointer.
Your glEnableClientState calls are missing their corresponding glDisableClientState calls, which need to be called in reverse order of the enabling calls, after your glDrawArrays call.
Just as a side note, the GLU library is deprecated. Your gluOrtho2D call appears to be the only instance where you depend on it, and it can be easily replaced with:
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, _width, 0, _height, -1.0f, 1.0f);
You are getting an exception in glDrawArrays because it tries to read memory at address 0, which is what your code actually specifies.
GL's texture coordinate array pointer is initializez to NULL and no VBO. Since you use legacy GL, this will be interpreted as a client memory address. Now you enable GL_TEXCOORD_ARRAY but never actually set glTexCoordPointer, so it stays NULL:
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(vertexData), (GLvoid *) offsetof(vertexData, textureCoordinates));
The quick solution is of course to not overwrite your vertex array pointer state by having it point to the the texture coords, but to specify the texcoord array pointer via glTexCoordPointer.
The real solution is to give up on legacy GL which is deprecated0 since a decade by now, and switch to "modern" OpenGL using a GL 3.2 core profile or above.
Related
I'm trying to implement shadows on my custom renderer through dynamic shadow mapping and forward rendering (deferred rendering will be implemented later). For instance, everything renders correctly to the Framebuffer used to generate the shadow map. However, when using the default Framebuffer to render the scene normally only the skybox gets rendered (it means that the default Framebuffer is used) and my only hypothesis is that the problem is related with the depth buffer since disabling the call to DrawActors(...) (in ForwardRenderShadows) appears to solve the problem but I can't generate my depth map if doing so. Any suggestions on the matter?
Code:
void Scene::DrawActors(const graphics::Shader& shader)
{
for(const auto& actor : actors_)
actor->Draw(shader);
}
template <typename T>
void ForwardRenderShadows(const graphics::Shader& shadow_shader, const std::vector<T>& lights)
{
for(const auto& light : lights)
{
if(light->Shadow())
{
light->DrawShadows(shadow_shader);
DrawActors(shadow_shader); //removing this line "solves the problem"
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
}
}
/*
shadow mapping is only implemented on directional lights for the moment, and that is the
relevant code that gets called when the process starts, more code details at the end of code
snippet.
*/
void DirectionalLight::SetupShadows()
{
glGenFramebuffers(1, &framebuffer_);
glGenTextures(1, &shadow_map_);
glBindTexture(GL_TEXTURE_2D, shadow_map_);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, constants::SHADOW_WIDTH, constants::SHADOW_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer_);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, shadow_map_, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
if(glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
throw std::runtime_error("Directional light framebuffer is not complete \n");
glBindFramebuffer(GL_FRAMEBUFFER, 0);
ShadowSetup(true);
}
void DirectionalLight::DrawShadows(const graphics::Shader& shader)
{
if(!ShadowSetup())
SetupShadows();
glViewport(0, 0, constants::SHADOW_WIDTH, constants::SHADOW_HEIGHT);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer_);
glClear(GL_DEPTH_BUFFER_BIT);
shader.Use();
projection_ = clutch::Orthopraphic(-10.0f, 10.0f, -10.0f, 10.0f, 1.0f, 100.0f);
transform_ = clutch::LookAt(direction_ * -1.0f, {0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 1.0f, 0.0f, 0.0f});
shader.SetUniformMat4("light_transform", projection_ * transform_);
}
void DirectionalLight::Draw(const graphics::Shader& shader)
{
shader.SetUniform4f("light_dir", direction_);
shader.SetUniform4f("light_color", color_);
shader.SetUniformMat4("light_transform", transform_);
shader.SetUniformMat4("light_projection", projection_);
shader.SetUniformInt("cast_shadow", shadows_);
glActiveTexture(GL_TEXTURE12);
shader.SetUniformInt("shadow_map", 12);
glBindTexture(GL_TEXTURE_2D, shadow_map_);
}
Code the repo: https://github.com/rxwp5657/Nitro
Relevant Files for the problem:
include/core/scene.hpp
include/core/directional_light.hpp
include/core/light_shadow.hpp
include/core/directional_light.hpp
include/graphics/mesh.hpp
src/core/scene.cpp
src/core/directional_light.cpp
src/core/light_shadow.cpp
src/core/directional_light.cpp
src/graphics/mesh.cpp
Finally, what I have tried so far is:
Deactivating depth testing with glDepthMask(GL_FALSE) and glDisable(GL_DEPTH_TEST) //same problem.
Changing depth function to glDepthFunc(GL_ALWAYS); // No desired results;
If you have a NVIDIA graphics card, you could have a look a Nsight. You can capture a frame and view all occurred GL-calls.
Then, you can select an event, for instance the event 22 in my example, and inspect all textures, the color buffer, the depth buffer and the stencil buffer.
Furthermore, you can have a look on all GL-state parameters at one event.
Ok, after using apitrace I found out that the VBO changes when switching from a custom Framebuffer to the default one. Because of this, the solution to the problem is to set again the VBO after switching to the default frame buffer.
Based on the code of the project, the solution is calling the Setup function again after switching to the default Framebuffer.
Setup function of the Mesh class:
void Mesh::Setup(const Shader& shader)
{
glGenVertexArrays(1, &vao_);
glBindVertexArray(vao_);
glGenBuffers(1, &elementbuffer_);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer_);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices_.size() * sizeof(unsigned int), &indices_[0], GL_STATIC_DRAW);
glGenBuffers(1, &vbo_);
glBindBuffer(GL_ARRAY_BUFFER, vbo_);
glBufferData(GL_ARRAY_BUFFER, vertices_.size() * sizeof(Vertex), &vertices_[0], GL_STATIC_DRAW);
shader.PosAttrib("aPosition", 3, GL_FLOAT, sizeof(Vertex), 0);
shader.PosAttrib("aNormal", 3, GL_FLOAT, sizeof(Vertex), offsetof(Vertex, normal));
shader.PosAttrib("aTexCoord", 2, GL_FLOAT, sizeof(Vertex), offsetof(Vertex, tex_coord));
shader.PosAttrib("aTangent", 3, GL_FLOAT, sizeof(Vertex), offsetof(Vertex, tangent));
shader.PosAttrib("aBitangent",3, GL_FLOAT, sizeof(Vertex), offsetof(Vertex, bitangent));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
loaded_ = true;
}
Vertices buffer after switching to default frame buffer
Vertices buffer with no Framebuffer switching (no shadow map generated)
I'm working on a 3D PBR engine along with some colleagues (OpenGL 3.3 + GLFW3 and ImGui), and we have a peculiar error : the application enters its drawing loop normally and gives no OpenGL error whatsoever (as shown by extensive use of glCheckError()), but the screen is completely blank. The peculiar part is that this only happens on my colleagues' computer that uses an Nvidia GeForce GTX 1060 as its graphic cards (2 of my friends have the same exact computer).
After some investigation, it turns out that this error is triggered by a function that is called a single time before the main loop. Long story short, it renders a quad to a texture bound via a framebuffer using a couple shaders. I narrowed down the problem to the glDrawElements call ; on those computers, not changing anything except commenting out this line fixes the display (at the cost of the texture displaying garbage instead of the expected result of the draw call), whereas all of the other computers run the app fine even with the line uncommented.
The culprit function is as follows :
/**
* Returns a texture identifier filled with a precomputed irradiance map calculated
* from a provided, already set up-environment map
*/
GLuint precomputeIrradianceMap(GLuint envMapId)
{
std::cerr << "Precomputing irradiance map ... " << std::endl;
GLuint irradianceVAO, irradianceVBO, irradianceMap,
irradianceVertShader, irradianceFragShader, irradianceProgram, captureFBO,
captureRBO;
GLint aPos_location, env_map;
glGenVertexArrays(1, &irradianceVAO);
glBindVertexArray(irradianceVAO);
glGenBuffers(1, &irradianceVBO);
glBindBuffer(GL_ARRAY_BUFFER, irradianceVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(quad_vertices), quad_vertices, GL_STATIC_DRAW);
glGenFramebuffers(1, &captureFBO);
glGenRenderbuffers(1, &captureRBO);
glBindFramebuffer(GL_FRAMEBUFFER, captureFBO);
glBindRenderbuffer(GL_RENDERBUFFER, captureRBO);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, 320, 320);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, captureRBO);
checkGLerror();
glGenTextures(1, &irradianceMap);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, irradianceMap);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, 320, 320, 0, GL_RGBA, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glBindTexture(GL_TEXTURE_2D, envMapId);
checkGLerror();
irradianceVertShader = createShaderFromSource(GL_VERTEX_SHADER, "shaders/irradiance_vert.glsl");
irradianceFragShader = createShaderFromSource(GL_FRAGMENT_SHADER, "shaders/irradiance_frag.glsl");
irradianceProgram = glCreateProgram();
glAttachShader(irradianceProgram, irradianceVertShader);
glAttachShader(irradianceProgram, irradianceFragShader);
printShaderLog(irradianceVertShader);
printShaderLog(irradianceFragShader);
glLinkProgram(irradianceProgram);
glUseProgram(irradianceProgram);
checkGLerror();
env_map = glGetUniformLocation(irradianceProgram, "environmentMap");
aPos_location = glGetAttribLocation(irradianceProgram, "aPos");
glEnableVertexAttribArray(aPos_location);
checkGLerror();
glVertexAttribPointer(aPos_location, 3, GL_FLOAT, GL_FALSE, 0, (void *) 0);
glUniform1i(env_map, 0);
checkGLerror();
glViewport(0, 0, 320, 320);
glBindFramebuffer(GL_FRAMEBUFFER, captureFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, irradianceMap, 0);
checkGLerror();
glClearDepth(1.f);
glClearColor(0.f, 0.f, 0.f, 1.f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
checkGLerror();
// This is the culprit
// glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, quad_indices);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
checkGLerror();
// glDisableVertexAttribArray(aPos_location);
glDeleteShader(irradianceVertShader);
glDeleteShader(irradianceFragShader);
glDeleteProgram(irradianceProgram);
glDeleteFramebuffers(1, &captureFBO);
glDeleteRenderbuffers(1, &captureRBO);
glDeleteBuffers(1, &irradianceVBO);
glDeleteVertexArrays(1, &irradianceVAO);
checkGLerror();
std::cerr << "... done " << std::endl;
return irradianceMap;
}
You can safely replace checkGLerror() by std::cerr << glCheckError() << std::endl. As I said before, there is no OpenGL error whatsoever, all the shaders compile fine, and this only breaks on computers equipped with an Nvidia GeForce GTX 1060.
The rest of the code is mostly setting up VAOs, VBOs and the like, and the render loop is as follows :
while (!glfwWindowShouldClose(window))
{
glfwGetFramebufferSize(window, &display_w, &display_h);
float newRatio = (float)display_w / display_h;
if(ratio != newRatio)
{
ratio = newRatio;
setAspectRatio(p, ratio);
invP = p.inverse();
glViewport(0, 0, display_w, display_h);
}
ImGui_ImplGlfwGL3_NewFrame();
// Rendering + OpenGL rendering
// Draw the skybox, then the model
ImGui::Begin("Physical parameters", NULL, ImGuiWindowFlags_AlwaysAutoResize);
ImGui::SliderFloat("Dielectric specularity", &ds, 0.f, 1.f, "%.3f");
ImGui::SliderFloat("Light intensity", &l0, 0.f, 10.f, "%.2f");
ImGui::Checkbox("Use irradiance map as skybox", &skyOrIrradiance);
ImGui::Checkbox("Debug draw irradiance map", &debugDraw);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, skyOrIrradiance ? irradianceTexture : skybox_texture);
ImGui::End();
Matrix4f v = camera->getMatrix(), invV = v.inverse();
// glClearColor(0.45f, 0.55f, 0.60f, 1.00f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBindVertexArray(vertex_array_objs[0]);
glUseProgram(skybox_program);
glUniformMatrix4fv(skybox_v_location, 1, GL_FALSE, (const GLfloat *) invV.data());
glUniformMatrix4fv(skybox_p_location, 1, GL_FALSE, (const GLfloat *) invP.data());
glDisable(GL_DEPTH_TEST);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, quad_indices);
glBindVertexArray(vertex_array_objs[1]);
glUseProgram(program);
glUniformMatrix4fv(mv_location, 1, GL_FALSE, (const GLfloat *) v.data());
glUniformMatrix4fv(p_location, 1, GL_FALSE, (const GLfloat *)p.data());
glUniform1f(uDS_location, ds);
glUniform1f(L0_location, l0);
glActiveTexture(GL_TEXTURE0 + model.activeTexturesCount());
glBindTexture(GL_TEXTURE_2D, irradianceTexture);
glUniform1i(irradiance_location, model.activeTexturesCount());
glEnable(GL_DEPTH_TEST);
model.render();
if(debugDraw)
{
displayTexture(irradianceTexture);
displayTexture(skybox_texture, 0.f, -1.f);
}
camera->drawControls();
// Draw basic interface
basicInterface();
ImGui::Render();
ImGui_ImplGlfwGL3_RenderDrawData(ImGui::GetDrawData());
glfwSwapBuffers(window);
glfwPollEvents();
camera->update(window);
}
model.render() is as follows :
void GltfModel::render() const
{
for(unsigned int i = 0; i < _activeTextures.size(); i++)
{
if(_textureLocations[i] > -1)
{
int j = _activeTextures[i];
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, _textureIds[j]);
glUniform1i(_textureLocations[i], i);
}
}
glBindBuffer(GL_ARRAY_BUFFER, _buffers[ARRAY_BUFFER]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _buffers[ELEMENT_ARRAY_BUFFER]);
glDrawElements(_drawingMode, _indicesCount, _indicesType, NULL);
}
Thanks by advance for your time !
EDIT : putting a glClear(GL_COLOR_BUFFER_BIT) call right before glfwSwapBuffers(window) still displays a white screen with the culprit line uncommented even though the clear color has been set to light blue. Commenting the culprit line indeed displays a light blue screen, so this makes me think it's a framebuffer issue, but I can't say for sure.
I need to create a small game using OpenGL and C++ . I am using this tutorial to get started(my aim isn't actually to create the game but to use the code I eventually create).
I finished video 8 (the one linked) but I ran into a problem. My code crashed on the line
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageData.Width, imageData.Height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData.Data);
Wen I change the loadAndBufferImage method parameters to something random (hgsjrkbgfdkj works fine) the code doesn't crash but obviously it doesn't load any image. I'm not sure what I am getting wrong. My IDE throws a warning on the glfwReadImage line since it doesn't like variable 3 being NULL (although it runs that line perfectly).
glfwReadImage(fileName, &imageData, NULL);
I'm not sure what I missed out/am doing wrong. Can it be the image? I used one converted via the link provided in the description of the video. The only hing I didn't do in the video was the small image importing part at around 7.40. I am using NetBeans not XCode, and I just imported the rocket.tga file in my resources folder (right click o resources folder, Add Existing Item, added image).
Here is a full copy of my GameWindow.cpp code up till now
#define GLEW_STATIC
#include "GameWindow.h"
#define GL_GLEXT_PROTOTYPES
#include <iostream>
typedef struct {
GLfloat positionCoordinates[3];
GLfloat textureCoordinates[2];
} VertexData;
#define Square_Size 100
VertexData vertices[] = {
{{0.0f, 0.0f, 0.0f},{0.0f,0.0f}},
{{Square_Size, 0.0f, 0.0f}, {1.0f,0.0f}},
{{Square_Size, Square_Size, 0.0f}, {1.0f,1.0f}},
{{0.0f, Square_Size, 0.0f}, {0.0f,1.0f}}
};
void GameWindow::setRunning(bool newRunning) {
_running = newRunning;
}
bool GameWindow::getRunning() {
return _running;
}
GLuint GameWindow::loadAndBufferImage(const char *fileName){
GLFWimage imageData;
glfwReadImage(fileName, &imageData, NULL);
GLuint textureBufferID;
glGenTextures(1, &textureBufferID);
glBindTexture(GL_TEXTURE_2D, textureBufferID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageData.Width, imageData.Height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData.Data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glfwFreeImage(&imageData);
return textureBufferID;
}
GameWindow::GameWindow(bool running):_running(running), _height(800), _width(800*16/9) {
glClearColor(1.0f,1.0f,1.0f,1.0f);
glViewport(0.0f, 0.0f, _width, _height);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(0,_width,0,_height);
glMatrixMode(GL_MODELVIEW);
glGenBuffers(1, &_vertexBufferID);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferID);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, sizeof(VertexData), (GLvoid *) offsetof(VertexData,positionCoordinates));
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(2, GL_FLOAT, sizeof(VertexData), (GLvoid *) offsetof(VertexData, textureCoordinates));
_textureBufferID = loadAndBufferImage("rocket.tga");
}
void GameWindow::render() {
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_QUADS, 0, 4);
glfwSwapBuffers();
}
void GameWindow::update() {
}
I am using an oldish version of GLFW I think. Around version 2.7 because I couldn't get the new ones to work. Once again, not really relevant though I think.
Use imageData.Format for format in glTexImage2D():
glTexImage2D
(
GL_TEXTURE_2D,
0,
GL_RGBA,
imageData.Width,
imageData.Height,
0,
imageData.Format, // instead of GL_RGBA
GL_UNSIGNED_BYTE,
imageData.Data
);
Otherwise if imageData.Format == GL_RGB and you lie and say it's GL_RGBA instead then OpenGL will happily read right off the end of imageData.Data looking for RGBA tuples that aren't there.
I am currently working on a application with (not yet implemented) post effects. So the 3D scene gets rendered in a buffer and then, another shader renders the buffer on the screen.
My problem is that the program uses my basic shader vor the 3d scene instead of the post effect shader in the second pass. Do you have an idea, what could be doing this?
My approach: I have a class which has two major functions:
- bindBuffer() sets the render Target to a buffer
- renderSSAO() renders the buffer to the screen and hopfuly one day will add an ambient occlusion effect. :)
BIND BUFFER FUNCTION
void SSAOShader::bindBuffer() {
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
glViewport(0,0,windowWidth,windowHeight);
}
RENDER FUNCTION
void SSAOShader::renderSSAO(GLuint currentShader) {
// set shader
glUseProgram(shader);
//draw on Screen
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0,0,windowWidth,windowHeight);
// clear screen
glClearColor(0.4, 0.4, 0.4, 1.0);
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
// bind texture, pass to shader
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, renderTexture);
glUniform1i(textureID, 0);
glBindVertexArray(vertexarray);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindVertexArray(0);
glUseProgram(currentShader);
}
Render Loop (the functions are from the ssaoShader-object)
while (!glfwWindowShouldClose(mainWindow)) {
ssaoShader->bindBuffer();
// clear the screen
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// draw cool stuff
ssaoShader->renderSSAO(programID);
// Swap buffers
glfwSwapBuffers(mainWindow);
glUseProgram(programID);
}
So basically, the render target gets set to my buffer with bindBuffer();, then I draw objects with my basic (phong etc) shader, and the renderSSAO function changes the target to my screen. This function also changes the shader to my post-effects shader and changes it back to whatever it was before. programID is the shader-GLuint.
I know that the problem is the used shader since the result of all is that I only have a black screen with a quad-gon in the world origin. I can also move around like normal.
CONSTRUCTOR
SSAOShader::SSAOShader(float windowWidth, float windowHeight, GLuint currentShader) {
this->windowWidth = windowWidth;
this->windowHeight = windowHeight;
//shader
shader = LoadShaders("Passthrough.vertexshader", "SSAO.fragmentshader");
glUseProgram(shader);
textureID = glGetUniformLocation(shader, "renderTexture");
int status;
glGetProgramiv(shader, GL_COMPILE_STATUS, &status);
std::cout << (status == GL_TRUE) << std::endl;
GLfloat planeVertices[] = {
-1, -1,
1, -1,
-1, 1,
1, 1
};
// create vertex array
glGenVertexArrays(1, &vertexarray);
glBindVertexArray(vertexarray);
// create "uv" (vertex) buffer
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(planeVertices), planeVertices, GL_STATIC_DRAW);
// add vertecies to as attributes
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, (void*)0);
glBindVertexArray(0);
// ---- set up framebuffer ----
glGenFramebuffers(1, &framebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
glGenTextures(1, &renderTexture);
glBindTexture(GL_TEXTURE_2D, renderTexture);
glTexImage2D(GL_TEXTURE_2D, 0,GL_RGB, windowWidth, windowHeight, 0,GL_RGB, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glGenRenderbuffers(1, &depthrenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, depthrenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, windowWidth, windowHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthrenderbuffer);
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, renderTexture, 0);
DrawBuffers[1] = GL_COLOR_ATTACHMENT0;
glDrawBuffers(1, DrawBuffers);
if(glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE){
exit(0); // bad luck
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
//glUseProgram(currentShader);
}
VS2012 Project
https://studi.f4.htw-berlin.de/~s0539750/SSAOTest.zip
For who ever is mad enough to look at my code. ;)
I found the problem.
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, renderTexture, 0);
DrawBuffers[1] = GL_COLOR_ATTACHMENT0;
glDrawBuffers(1, DrawBuffers);
This part in the constructor changed my GLuint shader;. I don't know why exactly but the reason was that DrawBuffers is a fixed array with size 1. Obviously, accessing DrawBuffers[1] isn't the best idea of the day. ;)
My guess would be that either DarBuffers[1] was used for my "shader" GLuint or the problem was that glDrawBuffers never realy attached GL_COLOR_ATTACHMENT0 to my framebuffer. What is the case?
Thanks for all the help by the way, It helped alot. :)
i have animation project which requires multiple textures ,i intend to see all of them yet only 1 texture covers on my model.i cant see all tga files on the object.
GLuint LoadTexture(char *TexName)
{
TGAImg Img; // Image loader
// Load our Texture
if(Img.Load(TexName)!=IMG_OK)
return -1;
glGenTextures(1,textures); // Allocate space for texture
glBindTexture(GL_TEXTURE_2D,textures[0]); // Set our Tex handle as current
// Create the texture
if(Img.GetBPP()==24)
glTexImage2D(GL_TEXTURE_2D,0,3,Img.GetWidth(),Img.GetHeight(),0,GL_RGB,GL_UNSIGNED_BYTE,Img.GetImg());
else if(Img.GetBPP()==32)
glTexImage2D(GL_TEXTURE_2D,0,4,Img.GetWidth(),Img.GetHeight(),0,GL_RGBA,GL_UNSIGNED_BYTE,Img.GetImg());
else
return -1;
// Specify filtering and edge actions
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_T,GL_CLAMP);
return textures[0];
}
void Draw()
{
glEnable(GL_TEXTURE_2D);
glGenTextures(17,textures);
for (int i = 0; i<17; i++){
textures[i] = LoadTexture(textureNames[i
glBindTexture(GL_TEXTURE_2D, textures[i]);
}
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glVertexPointer(3,GL_FLOAT,0,triangleArr);
glNormalPointer(GL_FLOAT, 0, normals);
glDrawArrays(GL_TRIANGLES, 0, TotalConnectedTriangles);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
}
void initialize ()
{
glViewport(0, 0, 500, 500);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45.0, 1, 0.1, 1000.0);
glMatrixMode(GL_MODELVIEW);
glClearColor( 0.0f, 0.0f, 0.0f, 0.0f );
glShadeModel( GL_SMOOTH );
glEnable( GL_DEPTH_TEST );
glEnable(GL_TEXTURE_2D);
textures[textureIndex] = LoadTexture(textureNames[textureIndex]);
glBindTexture(GL_TEXTURE_2D, textures[textureIndex]);
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(2, GL_FLOAT, 0, textureArr);
}
</code>`enter code here`
i am not sure if i need to post my obj file?
Look at the docs of glActiveTexture. Calling glBindTexture in a loop doesn't do what you think it does.
Your code is wrong for many reasons :
if the draw() function is your rendering callback, then you are going to create a set of textures and load data into them. It is wrong, since you need to do it only once.
LoadTexture() is going to create a new texture, and load it with data from a file. This is wrong, since you already created 17 textures i function
Then you are binding each texture in a loop, and only the last is going to be active
Then you dump all vertexes from a VBO to be rendered. I do not see where you initialized them, but they are going to use only the last binded texture.
You have another call to LoadTexture() in the initialize(). Actually all textures should be generated and loaded there