Framebuffer Texture rendered to screen is stretched at certain points - c++

I'm currently trying to test out rendering to a framebuffer for various uses, but whenever I have an object(say a square) at a certain y-value, it appears "stretched", and then past a certain y-value or a certain x-value it seems to "thin out" and disappears. I have determined the x and y-values that it disappears at, but the coordinates seem to not have any rhyme or reason.
When I remove the framebuffer binding and render directly to the screen it draws the square perfectly fine, no matter the x or y-value.
Drawing a basic square(using immediate mode to remove possible errors) with a wide x-value looks like this:
Code here:
Window window("Framebuffer Testing", 1600, 900); //1600x900 right now
int fbowidth = 800, fboheight = 600;
mat4 ortho = mat4::orthographic(0, width, 0, height, -1.0f, 1.0f);
//trimmed out some code from shader creation that is bugless and unneccessary to include
Shader shader("basic"); shader.setUniform("pr_matrix", ortho);
Shader drawFromFBO("fbotest"); shader.setUniform("pr_matrix", ortho);
GLfloat screenVertices[] = {
0, 0, 0, 0, height, 0,
width, height, 0, width, 0, 0};
GLushort indices[] = {
0, 1, 2,
2, 3, 0 };
GLfloat texcoords[] = { //texcoords sent to the drawFromFBO shader
0, 0, 0, 1, 1, 1,
1, 1, 1, 0, 0, 0 };
IndexBuffer ibo(indices, 6);
VertexArray vao;
vao.addBuffer(new Buffer(screenVertices, 4 * 3, 3), 0);
vao.addBuffer(new Buffer(texcoords, 2 * 6, 2), 1);
GLuint fbo;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, fbowidth, fboheight, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, texture, 0);
if(glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
std::cout << "false" << std::endl;
glEnable(GL_TEXTURE_2D);
//the x-values mess up at ~783 thru 800 and the y-values at 0 thru ~313
while(!window.closed()) {
glClearColor(0.2f, 0.2f, 0.2f, 1.0f); //grey
window.clear(); //calls glclear for depth and color buffer
//bind framebuffer and shader
shader.enable(); //literally just calls glUseProgram(id) with the compiled shader id
glViewport(0, 0, fbowidth, fboheight);
glBindFramebuffer(GL_FRAMEBUFFER, fbo); //bind the fbo
glClearColor(1.0f, 0.0f, 1.0f, 1.0f); //set clear color to pink
glClear(GL_COLOR_BUFFER_BIT);
//render a red square to the framebuffer texture
glBegin(GL_QUADS); {
glColor3f(1.0f, 0.0f, 0.0f); //set the color to red
glVertex3f(700, 400, 0);
glVertex3f(700, 450, 0);
glVertex3f(750, 450, 0);
glVertex3f(750, 400, 0);
} glEnd();
shader.disable();
glBindFramebuffer(GL_FRAMEBUFFER, 0); //set framebuffer to the default
//render from framebuffer to screen
glViewport(0, 0, width, height);
drawFromFBO.enable();
glActiveTexture(GL_TEXTURE0);
drawFromFBO.setUniform1i("texfbo0", 0);
glBindTexture(GL_TEXTURE_2D, texture);
vao.bind();
ibo.bind();
glDrawElements(GL_TRIANGLES, ibo.getCount(), GL_UNSIGNED_SHORT, NULL);
ibo.unbind();
vao.unbind();
drawFromFBO.disable();
window.update();
}
If you want to see any thing extra, the file is located at my Github: here

Related

Why the texture created from the framebuffer is not mapping correctly

I am creating a custom framebuffer of size 1920 X 1080 and then mapping the texture of this frame buffer to a full screen rectangle in default frame buffer of size 800 X 600.
I had drawn a rectangle in the center of the screen in the custom frame buffer and after mapping the texture i was expecting the rectangle to appear in the center.
But the rectangle appeared in the lower left corner.
when i draw a full screen rectangle in the custom buffer and map it to the full screen rectangle in the default frame buffer of size 800 X 600 instead of appearing fullscreen it covers the entire lower left corner.
SCR_WIDTH = 800;
SCR_HEIGHT = 600;
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
cont.SetName("RootItem");
TreeModel* model = new TreeModel("RootElement", &cont);
WavefrontRenderer w(model);
w.show();
glfwInit();
int return_code;
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
GLFWwindow* window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "Renderer", nullptr, nullptr); // Create the render window
glfwSetWindowPos(window, 1120, 480);
glfwFocusWindow(window);
glfwMakeContextCurrent(window);
GLenum GlewInitResult;
glewExperimental = GL_TRUE;
GlewInitResult = glewInit();
glEnable(GL_MULTISAMPLE);
glEnable(GL_DEPTH_TEST);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
ResourceManager::LoadShader("C:\\Shaders\\Test\\Vert.txt", "C:\\Shaders\\Test\\Frag.txt", nullptr, "ScreenShader");
//create a texture object
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1920, 1080, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
// create a renderbuffer object for depthbuffer
glGenRenderbuffers(1, &rboDepthId);
glBindRenderbuffer(GL_RENDERBUFFER, rboDepthId);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, 1920, 1080);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
// create a framebuffer
glGenFramebuffers(1, &fboMsaaId);
glBindFramebuffer(GL_FRAMEBUFFER, fboMsaaId);
// attach colorbuffer image to FBO
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureId , 0);
// attach depthbuffer image to FBO
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, rboDepthId);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
while (!glfwWindowShouldClose(window))
{
glBindFramebuffer(GL_FRAMEBUFFER, fboMsaaId);
glClearColor(1.0, 0.0, 0.0, 1.0);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
w.render(); // Do rendering here
ResourceManager::GetShader("ScreenShader").Use();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDisable(GL_DEPTH_TEST);
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT);
ResourceManager::GetShader("ScreenShader").Use();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureId);
renderQuad();
glfwPollEvents();
glfwSwapBuffers(window);
}
glfwTerminate();
return a.exec();
}
/////////////////////////////////////////////////////////////////////////////////////////////////// Defination for Render Quad function
unsigned int quadVAO = 0;
unsigned int quadVBO;
void renderQuad()
{
if (quadVAO == 0)
{
float quadVertices[] = { // vertex attributes for a quad that fills the entire screen in Normalized Device Coordinates.
// positions // texCoords
-1.0f, 1.0f, 0.0f, 1.0f,
-1.0f, -1.0f, 0.0f, 0.0f,
1.0f, -1.0f, 1.0f, 0.0f,
-1.0f, 1.0f, 0.0f, 1.0f,
1.0f, -1.0f, 1.0f, 0.0f,
1.0f, 1.0f, 1.0f, 1.0f
};
// VAO
glGenVertexArrays(1, &quadVAO);
glGenBuffers(1, &quadVBO);
glBindVertexArray(quadVAO);
glBindBuffer(GL_ARRAY_BUFFER, quadVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(quadVertices), &quadVertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (void*)0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (void*)(2 * sizeof(float)));
}
glBindVertexArray(quadVAO);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 6);
glBindVertexArray(0);
}
You have to adjust the viewport to the new size, when you switch between framebuffers with different sizes. Use glViewport to set the viewport. The size of the default framebuffer can be get by glfwGetFramebufferSize (the size of the window framebuffer changes when the size of the window is changed).
Furthermore OpenGL is a state engine. States are persistent until they are changed again, even beyond frames. If the first pass uses the Depth Test, but the 2nd pass does not, then the depth test has to be switched on and off in the loop:
int main(int argc, char *argv[])
{
// [...]
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1920, 1080, ....);
// [...]
while (!glfwWindowShouldClose(window))
{
int sizex, sizey;
glfwGetFramebufferSize(window, &sizex, &sizey);
glBindFramebuffer(GL_FRAMEBUFFER, fboMsaaId);
glViewport(0, 0, 1920, 1080);
glClearColor(1.0, 0.0, 0.0, 1.0);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glEnable(GL_DEPTH_TEST);
// [...]
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, sizex, sizey);
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT);
glDisable(GL_DEPTH_TEST);
// [...]
}
}

OpenGL Deferred rendering static?

I'm trying to implement deferred rendering in my engine but I'm having some problems.
It seems that there is some static in the triangles being drawn.
Here is the drawing code:
// GEOMETRY PASS
gbuffer->BindForWriting(); // glBind(GL_DRAW_FRAMEBUFFER, fbo);
window->Clear(); // glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST); // Before it was glDisable(GL_DEPTH_TEST); (I was testing)
... // Render all objects (they are rendering well when not using deferred rendering)
// LIGHTING PASS
glBindFramebuffer(GL_FRAMEBUFFER, 0);
window->Clear(); // Clear buffer again (same thing above)
gbuffer->BindForReading(); // glBind(GL_READ_FRAMEBUFFER, fbo);
GLsizei HalfWidth = (GLsizei)(window->GetSize().x / 2.0f);
GLsizei HalfHeight = (GLsizei)(window->GetSize().y / 2.0f); // Get half the size of the window
gbuffer->SetReadBuffer(GBUFFER_POSITION); // glReadBuffer(GL_COLOR_ATTACHMENT0);
glBlitFramebuffer(0, 0, window->GetSize().x, window->GetSize().y, 0, 0, HalfWidth, HalfHeight, GL_COLOR_BUFFER_BIT, GL_LINEAR);
gbuffer->SetReadBuffer(GBUFFER_DIFFUSE); // glReadBuffer(GL_COLOR_ATTACHMENT1);
glBlitFramebuffer(0, 0, window->GetSize().x, window->GetSize().y, 0, HalfHeight, HalfWidth, window->GetSize().y, GL_COLOR_BUFFER_BIT, GL_LINEAR);
gbuffer->SetReadBuffer(GBUFFER_NORMAL); // glReadBuffer(GL_COLOR_ATTACHMENT2);
glBlitFramebuffer(0, 0, window->GetSize().x, window->GetSize().y, HalfWidth, HalfHeight, window->GetSize().x, window->GetSize().y, GL_COLOR_BUFFER_BIT, GL_LINEAR);
gbuffer->SetReadBuffer(GBUFFER_TEXCOORD); // glReadBuffer(GL_COLOR_ATTACHMENT3);
glBlitFramebuffer(0, 0, window->GetSize().x, window->GetSize().y, HalfWidth, 0, window->GetSize().x, HalfHeight, GL_COLOR_BUFFER_BIT, GL_LINEAR);
(upleft is diffuse, upright is normals, downleft is position and downright is texture coords)
EDIT:
New screenshot after i enabled GL_DEPTH_TEST:
EDIT 2
Here is the GBuffer setup:
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
glGenTextures(4, textures);
glGenTextures(1, &depthTexture);
for (int i = 0; i < 4; i++) {
glBindTexture(GL_TEXTURE_2D, textures[i]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, windowSize.x, windowSize.y, 0, GL_RGB, GL_FLOAT, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D, textures[i], 0);
}
glBindTexture(GL_TEXTURE_2D, depthTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32F, windowSize.x, windowSize.y, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthTexture, 0);
GLenum drawBuffers[] = {
GL_COLOR_ATTACHMENT0,
GL_COLOR_ATTACHMENT1,
GL_COLOR_ATTACHMENT2,
GL_COLOR_ATTACHMENT3
};
glDrawBuffers(4, drawBuffers);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
// PRINT ERROR TO LOG ( No errors here i've already checked)
return;
}
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
EDIT 3 I'm following this tutorial if it helps in anything
I forgot to disable GL_BLEND before drawing the objects in the buffer.
Now it's working fine.
glDisable(GL_BLEND);
(Deferred rendering...)
glEnable(GL_BLEND);

Deferred shader textures from FBO display as black

I am trying to use deferred shading to implement SSAO and I have problems to access my textures in the deferred fragment shader. The code is in C++/Qt5 and makes use of Coin3D to generate the rest of the UI (but this shouldn't really matter here).
The fragment shader of the deferred pass is:
#version 150 compatibility
uniform sampler2D color;
uniform sampler2D position;
uniform sampler2D normal;
uniform vec3 dim;
uniform vec3 camPos;
uniform vec3 camDir;
void main()
{
// screen position
vec2 t = gl_TexCoord[0].st;
// the color
vec4 c = texture2D(color, t);
gl_FragColor = c + vec4(1.0, t.x, t.y, 1.0);
}
The code for running the deferred pass is
_geometryBuffer.Unbind();
// push state
{
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glPushAttrib(GL_DEPTH_BUFFER_BIT |
GL_COLOR_BUFFER_BIT |
GL_LIGHTING_BIT |
GL_SCISSOR_BIT |
GL_POLYGON_BIT |
GL_CURRENT_BIT);
glDisable(GL_DEPTH_TEST);
glDisable(GL_ALPHA_TEST);
glDisable(GL_LIGHTING);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_SCISSOR_TEST);
glDisable(GL_CULL_FACE);
}
// bind shader
// /!\ IMPORTANT to do before specifying locations
_deferredShader->bind();
_CheckGLErrors("deferred");
// specify positions
_deferredShader->setUniformValue("camPos", ...);
_deferredShader->setUniformValue("camDir", ...);
_geometryBuffer.Bind(GBuffer::TEXTURE_TYPE_NORMAL, 2);
_deferredShader->setUniformValue("normal", GLint(2));
_geometryBuffer.Bind(GBuffer::TEXTURE_TYPE_POSITION, 1);
_deferredShader->setUniformValue("position", GLint(1));
_geometryBuffer.Bind(GBuffer::TEXTURE_TYPE_DIFFUSE, 0);
_deferredShader->setUniformValue("color", GLint(0));
_CheckGLErrors("bind");
// draw screen quad
{
glBegin(GL_QUADS);
glTexCoord2f(0, 0);
glColor3f(0, 0, 0);
glVertex2f(-1, -1);
glTexCoord2f(1, 0);
glColor3f(0, 0, 0);
glVertex2f( 1, -1);
glTexCoord2f(1, 1);
glColor3f(0, 0, 0);
glVertex2f( 1, 1);
glTexCoord2f(0, 1);
glColor3f(0, 0, 0);
glVertex2f(-1, 1);
glEnd();
}
_deferredShader->release();
// for debug
_geometryBuffer.Unbind(2);
_geometryBuffer.Unbind(1);
_geometryBuffer.Unbind(0);
_geometryBuffer.DeferredPassBegin();
_geometryBuffer.DeferredPassDebug();
// pop state
{
glPopAttrib();
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glMatrixMode(GL_MODELVIEW);
glPopMatrix();
}
I know that the textures have been correctly processed in the geometry buffer creation because I can dump them into files and get the expected result.
The deferred pass doesn't work. The shader compiled correctly and I get the following result on screen:
And the last part of my code (DeferredPassBegin/Debug) is to draw the FBO to the screen (as shown in screenshot) as a proof that the GBuffer is correct.
The current result seems to mean that the textures are not correctly bound to their respective uniform, but I know that the content is valid as I dumped the textures to files and got the same results as shown above.
My binding functions in GBuffer are:
void GBuffer::Unbind()
{
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
}
void GBuffer::Bind(TextureType type, uint32_t idx)
{
glActiveTexture(GL_TEXTURE0 + idx);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, _textures[static_cast<uint32_t>(type)]);
}
void GBuffer::Unbind(uint32_t idx)
{
glActiveTexture(GL_TEXTURE0 + idx);
glDisable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 0);
}
Finally, the textures are 512/512, and I created them in my GBuffer with:
WindowWidth = WindowHeight = 512;
// Create the FBO
glGenFramebuffers(1, &_fbo);
glBindFramebuffer(GL_FRAMEBUFFER, _fbo);
const uint32_t NUM = static_cast<uint32_t>(NUM_TEXTURES);
// Create the gbuffer textures
glGenTextures(NUM, _textures);
glGenTextures(1, &_depthTexture);
for (unsigned int i = 0 ; i < NUM; i++) {
glBindTexture(GL_TEXTURE_2D, _textures[i]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, WindowWidth, WindowHeight, 0, GL_RGBA, GL_FLOAT, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i + _firstIndex, GL_TEXTURE_2D, _textures[i], 0);
}
// depth
glBindTexture(GL_TEXTURE_2D, _depthTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32F, WindowWidth, WindowHeight, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, _depthTexture, 0);
GLenum buffers[NUM];
for(uint32_t i = 0; i < NUM; ++i){
buffers[i] = GLenum(GL_COLOR_ATTACHMENT0 + i + _firstIndex);
}
glDrawBuffers(NUM, buffers);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
printf("FB error, status: 0x%x\n", status);
return _valid = false;
}
// unbind textures
glBindTexture(GL_TEXTURE_2D, 0);
// restore default FBO
glBindFramebuffer(GL_FRAMEBUFFER, 0);
How can I debug farther at this stage?
I know that the texture data is valid, but I can't seem to bind it to the shader correctly (but I have other shaders that use textures loaded from files and which work fine).
--- Edit 1 ---
As asked, the code for DeferredPassBegin/Debug (mostly coming from this tutorial )
void GBuffer::DeferredPassBegin() {
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, _fbo);
}
void GBuffer::DeferredPassDebug() {
GLsizei HalfWidth = GLsizei(_texWidth / 2.0f);
GLsizei HalfHeight = GLsizei(_texHeight / 2.0f);
SetReadBuffer(TEXTURE_TYPE_POSITION);
glBlitFramebuffer(0, 0, _texWidth, _texHeight,
0, 0, HalfWidth, HalfHeight, GL_COLOR_BUFFER_BIT, GL_LINEAR);
SetReadBuffer(TEXTURE_TYPE_DIFFUSE);
glBlitFramebuffer(0, 0, _texWidth, _texHeight,
0, HalfHeight, HalfWidth, _texHeight, GL_COLOR_BUFFER_BIT, GL_LINEAR);
SetReadBuffer(TEXTURE_TYPE_NORMAL);
glBlitFramebuffer(0, 0, _texWidth, _texHeight,
HalfWidth, HalfHeight, _texWidth, _texHeight, GL_COLOR_BUFFER_BIT, GL_LINEAR);
}
Arghk!!!
So I expected that texture parameters were not mandatory, but as I looked at some code, I just tried to specify my texture parameters. When generating the FBO textures, I use now
for (unsigned int i = 0 ; i < NUM; i++) {
glBindTexture(GL_TEXTURE_2D, _textures[i]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, WindowWidth, WindowHeight, 0, GL_RGBA, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i + _firstIndex, GL_TEXTURE_2D, _textures[i], 0);
}
And with this change, I get the expected result (with only c in the fragment shader, and similar correct results if I switch to visualizing the normal / position).
Conclusion: one must specify the texture parameters for deferred shading to work (at least with the graphics setup of my application / machine).

Correctly use stencil_texturing in OpenGL

I am trying to implement the stencil_texturing extension of OpenGL as a proof of concept. My video card supports up to GL 4.3 so stencil_texturing is available to me. If more clarification is necessary here is the spec provided: http://www.opengl.org/registry/specs/ARB/stencil_texturing.txt.
So the goal of my test is to render my color buffer to a texture in frame 0, then the depth buffer in frame 1 and finally the stencil buffer in frame 2. The easy part is done and I have my color and depth buffer textures rendered fine. My issue lies with the stencil buffer and I believe the issue is coming from either my lack of understanding stencil buffers (which could very well be the case) or is my misuse of stencil_texturing. I tried to find some info online but there is very little available.
To give you an idea of what I am rendering here are my current frame captures:
Color buffer, Depth buffer, Stencil buffer
So my vision for the stencil buffer is to just stencil out the middle triangle, so everything in the middle triangle has a value of 1 and every part of the texture has a value of 0. I am not sure how this will come up when rendering but I imagine the areas with a stencil value of 1 will be different than those with 0.
Here is my code below. It is just a test class that I throw into a framework I made for them. I believe the only thing not definied is GLERR() which basically calls glGetError() to make sure everything is correct.
typedef struct
{
GLuint program;
GLuint vshader;
GLuint fshader;
} StencilTexturingState;
class TestStencilTexturing : public TestInfo
{
public:
TestStencilTexturing(TestConfig& config, int argc, char** argv)
:width(config.windowWidth), height(config.windowHeight)
{
state = (StencilTexturingState*) malloc(sizeof(StencilTexturingState));
}
~TestStencilTexturing()
{
destroyTestStencilTexturing();
}
void loadFBOShaders()
{
const char* vshader = "assets/stencil_texturing/fbo_vert.vs";
const char* fshader = "assets/stencil_texturing/fbo_frag.fs";
state->vshader = LoadShader(vshader, GL_VERTEX_SHADER);
GLERR();
state->fshader = LoadShader(fshader, GL_FRAGMENT_SHADER);
GLERR();
state->program = Link(state->vshader, state->fshader, 1, "inPosition");
GLERR();
glUseProgram(state->program);
}
void loadTextureShaders()
{
const char* vshader = "assets/stencil_texturing/tex_vert.vs";
const char* fshader = "assets/stencil_texturing/tex_frag.fs";
state->vshader = LoadShader(vshader, GL_VERTEX_SHADER);
GLERR();
state->fshader = LoadShader(fshader, GL_FRAGMENT_SHADER);
GLERR();
state->program = Link(state->vshader, state->fshader, 1, "inPosition");
GLERR();
glUseProgram(state->program);
}
void destroyTestStencilTexturing()
{
glUseProgram(0);
glDeleteShader(state->vshader);
glDeleteShader(state->fshader);
glDeleteProgram(state->program);
free(state);
}
void RenderToTexture(GLuint renderedTexture, int frame)
{
GLint posId, colId;
GLuint fboId, depth_stencil_rb;
const float vertexFBOPositions[] =
{
-0.7f, -0.7f, 0.5f, 1.0f,
0.7f, -0.7f, 0.5f, 1.0f,
0.6f, 0.7f, 0.5f, 1.0f,
};
const float vertexFBOColors[] =
{
1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 0.0f, 0.0f, 1.0f,
};
// Load shaders for the FBO
loadFBOShaders();
// Setup the FBO
glGenFramebuffers(1, &fboId);
glBindFramebuffer(GL_FRAMEBUFFER, fboId);
glViewport(0, 0, width, height);
// Set up renderbuffer for depth_stencil formats.
glGenRenderbuffers(1, &depth_stencil_rb);
glBindRenderbuffer(GL_RENDERBUFFER, depth_stencil_rb);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, width, height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
GL_RENDERBUFFER, depth_stencil_rb);
// Depending on the frame bind the 2D texture differently.
// Frame 0 - Color, Frame 1 - Depth, Frame 2 - Stencil
glBindTexture(GL_TEXTURE_2D, renderedTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// Create our RGBA texture to render our color buffer into.
if (frame == 0)
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderedTexture, 0);
}
// Create our Depth24_Stencil8 texture to render our depth buffer into.
if (frame == 1)
{
glEnable(GL_DEPTH_TEST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH24_STENCIL8, width, height, 0, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, renderedTexture, 0);
}
// Create our Depth24_Stencil8 texture and change depth_stencil_texture mode
// to render our stencil buffer into.
if (frame == 2)
{
glEnable(GL_DEPTH_TEST | GL_STENCIL_TEST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH24_STENCIL8, width, height, 0, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, renderedTexture, 0);
glTexParameteri(GL_TEXTURE_2D, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_STENCIL_INDEX);
}
GLERR();
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
{
printf("There is an error with the Framebuffer, fix it!\n");
}
GLERR();
// Give the values of the position and color of our triangle to the shaders.
posId = glGetAttribLocation(state->program, "position");
colId = glGetAttribLocation(state->program, "color");
GLERR();
glVertexAttribPointer(posId, 4, GL_FLOAT, 0, 0, vertexFBOPositions);
glEnableVertexAttribArray(posId);
glVertexAttribPointer(colId, 4, GL_FLOAT, 0, 0, vertexFBOColors);
glEnableVertexAttribArray(colId);
// Clear the depth buffer back to 1.0f to draw our RGB stripes far back.
glClearDepth(1.0f);
glClear(GL_DEPTH_BUFFER_BIT);
if (frame == 2)
{
glStencilFunc(GL_NEVER, 1, 0xFF); // never pass stencil test
glStencilOp(GL_REPLACE, GL_KEEP, GL_KEEP); // replace stencil buffer values to ref=1
glStencilMask(0xFF); // stencil buffer free to write
glClear(GL_STENCIL_BUFFER_BIT); // first clear stencil buffer by writing default stencil value (0) to all of stencil buffer.
glDrawArrays(GL_TRIANGLES, 0, 3); // at stencil shape pixel locations in stencil buffer replace stencil buffer values to ref = 1
// no more modifying of stencil buffer on stencil and depth pass.
glStencilMask(0x00);
// can also be achieved by glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
// stencil test: only pass stencil test at stencilValue == 1 (Assuming depth test would pass.) and write actual content to depth and color buffer only at stencil shape locations.
glStencilFunc(GL_EQUAL, 1, 0xFF);
}
// Use the Scissors to clear the FBO with a RGB stripped pattern.
glEnable(GL_SCISSOR_TEST);
glScissor(width * 0/3, 0, width * 1/3, height);
glClearColor(0.54321f, 0.0f, 0.0f, 0.54321f); // Red
glClear(GL_COLOR_BUFFER_BIT);
glScissor(width * 1/3, 0, width * 2/3, height);
glClearColor(0.0f, 0.65432f, 0.0f, 0.65432f); // Green
glClear(GL_COLOR_BUFFER_BIT);
glScissor(width * 2/3, 0, width * 3/3, height);
glClearColor(0.0f, 0.0f, 0.98765f, 0.98765f); // Blue
glClear(GL_COLOR_BUFFER_BIT);
glDisable(GL_SCISSOR_TEST);
GLERR();
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisable(GL_DEPTH_TEST);
GLERR();
// Remove FBO and shaders and return to original viewport.
glUseProgram(0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDeleteShader(state->vshader);
glDeleteShader(state->fshader);
glDeleteProgram(state->program);
glDeleteFramebuffers(1, &fboId);
glViewport(0, 0, width, height);
GLERR();
}
void drawFrameTestStencilTexturing(int frame)
{
GLint posLoc, texLoc;
GLuint renderedTexture;
const GLubyte indxBuf[] = {0, 1, 2, 1, 3, 2};
const float positions[] =
{
-0.8f, -0.8f,
-0.8f, 0.8f,
0.8f, -0.8f,
0.8f, 0.8f,
};
const float texCoords[] =
{
0.0f, 0.0f,
0.0f, 1.0f,
1.0f, 0.0f,
1.0f, 1.0f
};
// Allocate and initialize the texture that will be rendered to, and then
// textured onto a quad on the default framebuffer.
glGenTextures(1, &renderedTexture);
// Render to the texture using FBO.
RenderToTexture(renderedTexture, frame);
// Create and load shaders to draw the texture.
loadTextureShaders();
// Draw texture to the window.
glClearColor(0.25f, 0.25f, 0.25f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
posLoc = glGetAttribLocation(state->program, "position");
texLoc = glGetAttribLocation(state->program, "a_texCoords");
glVertexAttribPointer(posLoc, 2, GL_FLOAT, 0, 0, positions);
glEnableVertexAttribArray(posLoc);
glVertexAttribPointer(texLoc, 2, GL_FLOAT, 0, 0, texCoords);
glEnableVertexAttribArray(texLoc);
// Draw our generated texture onto a quad.
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_BYTE, indxBuf);
glFlush();
glDeleteTextures(1, &renderedTexture);
GLERR();
}
void renderTest(int frame)
{
drawFrameTestStencilTexturing(frame);
}
private:
StencilTexturingState* state;
const int height, width;
};
RUN_TEST(StencilTexturing, "stencil_texturing", 2);
The line
glEnable(GL_DEPTH_TEST | GL_STENCIL_TEST);
is not going to work, the GL enable enums are NOT bit values, but just enums, so you might enable something else, or just get some GL_INVALID_ENUM error, but you don't enable the stencil test here.

OpenGL ES render bitmap glyph as texture positioning issues

I'm just testing this stuff out, so I don't need an alternate approach (no GL extensions). Just hoping someone sees an obvious mistake in my usage of GLES.
I want to take an bitmap of a glyph that is smaller than 32x32 (width and height are not necessarily powers of 2) and put it into a texture so I can render it. I've first created an empty 32x32 texture then I copy the pixels into the larger texture.
Gluint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTextImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 32, 32, 0,
GL_ALPHA, GL_UNSIGNED_BYTE NULL);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, bitmap.width(), bitmap.height(),
GL_ALPHA, GL_UNSIGNED_BYTE, bitmap.pixels());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParamteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParamteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
Then I try to draw only the bitmap portion of the texture using the texture coordinates:
const GLfloat vertices[] = {
x + bitmap.width(), y + bitmap.height(),
x, y + bitmap.height(),
x, y,
x + bitmap.width(), y
};
const GLfloat texCoords[] = {
0, bitmap.height() / 32,
bitmap.width() / 32, bitmap.height() / 32,
0, 0,
bitmap.width() / 32, 0
};
const GLushort indices[] = { 0, 1, 2, 0, 2, 3 };
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, vertices);
glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, indices);
Now if all were well in the world, the size of the square created by the vertices would be the same size as the bitmap portion of the texture and it would draw my bitmap exactly.
Lets say for example that my glyph was 16x16, then it should take up the bottom left quadrant of the 32x32 texture. Then the texCoords would seem to be correct with (0, 0.5), (0.5, 0.5), (0, 0) and (0.5, 0).
However my 12x12 'T' glyph looks like this:
Anyone know why?
BTW. I start by setting up the projection matrix for 2D work as such:
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0f, 480, 800, 0.0f, 0.0f, 1.0f);
glDisable(GL_DEPTH_TEST);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslate(0.375f, 0.375f, 0.0f); // for exact pixelization
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_ALPHA);
glEnable(GL_TEXTURE_2D);
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
The mapping between vertex coordinates and texture coordinates seems to be mixed up. Try changing your vertex coordinates to:
const GLfloat vertices[] = {
x, y + bitmap.height(),
x + bitmap.width(), y + bitmap.height(),
x, y,
x + bitmap.width(), y
};
As an aside:
I don't think you need to go the route via vertex indices in your case. Easier would be a call to glDrawArrays:
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
(as you have already set up your glVertexPointer and glTexCoordPointer).