Rendering to texture with OpenGL C++ - c++

I am trying to render some polygons to a texture, and then render the texture to the screen.
I'm not sure how to debug my code since that would require to probe the internal state of OpenGL, so I would appreciate tips on how to debug myself more than pointing out the error I have done.
Anyway, I commented the code I wrote explaining what I expect each line to do.
Here is a description of what the code is supposed to do.
Basically, I made a vertex shader that provides the position, UV and color to the fragment shader. The fragment shader has a uniform to activate texture sampling, otherwise it will just output the input color. In both cases, the color is multiplied by a uniform color. First I create a texture, and I fill it with red and green raw pixel data to test. This texture is correcly rendered to the screen (I see the red and green part correctly as I initialized it). Then i try to do the actual rendering on the texture. I try to render a small blue square in the middle of it (sampler disabled on the fragment shader, color uniform set to blue) but I can't get this blue square to appear on the rendered texture.
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include "utils.h"
#include <glm/glm.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <iostream>
using namespace std;
#define numVAOs 1
#define numVBOs 1
GLuint shaderProgram;
GLuint unifUseTexture, unifInTexture, unifTMat, unifDrawColor;
GLuint texture;
GLuint textureFrameBuffer;
GLuint vao[numVAOs];
GLuint vbo[numVBOs];
void drawRectangle() {
}
void init() {
// Compile the shaderProgram
shaderProgram = createShaderProgram("vertex.glsl","fragment.glsl");
// Retrieve the uniform location
unifUseTexture = glGetUniformLocation(shaderProgram,"useTexture");
unifInTexture = glGetUniformLocation(shaderProgram,"inTexture");
unifTMat = glGetUniformLocation(shaderProgram,"tMat");
unifDrawColor = glGetUniformLocation(shaderProgram,"drawColor");
// Create vertex array object and vertex buffer object
glGenVertexArrays(numVAOs,vao);
glBindVertexArray(vao[0]);
float xyzuvrgbaSquare[54] = {
/* C */ 1.0,-1.0,0.0, 1.0,0.0, 1.0,1.0,1.0,1.0,
/* A */ -1.0,1.0,0.0, 0.0,1.0, 1.0,1.0,1.0,1.0,
/* B */ 1.0,1.0,0.0, 1.0,1.0, 1.0,1.0,1.0,1.0,
/* A */ -1.0,1.0,0.0, 0.0,1.0, 1.0,1.0,1.0,1.0,
/* C */ 1.0,-1.0,0.0, 1.0,0.0, 1.0,1.0,1.0,1.0,
/* D */-1.0,-1.0,0.0, 0.0,0.0, 1.0,1.0,1.0,1.0
};
glGenBuffers(numVBOs,vbo);
glBindBuffer(GL_ARRAY_BUFFER,vbo[0]);
glBufferData(GL_ARRAY_BUFFER, 4*54,xyzuvrgbaSquare,GL_STATIC_DRAW);
// Associate vbo with the correct vertex attribute to display the rectangle
glBindBuffer(GL_ARRAY_BUFFER,vbo[0]);
glVertexAttribPointer(0,3,GL_FLOAT,GL_FALSE,36,0); // inPosition
glVertexAttribPointer(1,4,GL_FLOAT,GL_FALSE,36,(void*)20); // inColor
glVertexAttribPointer(2,2,GL_FLOAT,GL_FALSE,36,(void*)12); // inUV
glEnableVertexAttribArray(0); // location=0 in the shader
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
// Generate a small 128x128 texture. I followed the tutorial
// over http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-14-render-to-texture/
// generate a frameBuffer to contain the texture
glGenFramebuffers(1,&textureFrameBuffer);
// Bind it, so when I will generate the texture it will be associated with it
glBindFramebuffer(GL_FRAMEBUFFER, textureFrameBuffer);
glGenTextures(1,&texture);
glBindTexture(GL_TEXTURE_2D,texture);
// Put some raw data inside of it for testing purposes. I will fill it
// half with green, half with red
unsigned char* imageRaw = new unsigned char[4*128*128];
for(int i=0; i<4*128*64; i+=4) {
imageRaw[i] = 255;
imageRaw[i+1] = 0;
imageRaw[i+2] = 0;
imageRaw[i+3] = 255;
imageRaw[4*128*64+i] = 0;
imageRaw[4*128*64+i+1] = 255;
imageRaw[4*128*64+i+2] = 0;
imageRaw[4*128*64+i+3] = 255;
}
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,128,128,0,GL_RGBA,GL_UNSIGNED_BYTE,imageRaw);
// Setup some required parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// Draw a small blue square on the texture
// So, activate the previously compiled shader program and setup the uniforms
glUseProgram(shaderProgram);
// First, create a transform matrix to make the square smaller (20% of texture)
glm::mat4 tMat = glm::scale(glm::mat4(1.0f),glm::vec3(0.2,0.2,0));
glUniformMatrix4fv(unifTMat,1,GL_FALSE,glm::value_ptr(tMat));
// do not use a texture (ignore sampler2D in fragment shader)
glUniform1i(unifUseTexture,0);
// use the color BLUE for the rectangle
glUniform4f(unifDrawColor,0.0,0.0,1.0,1.0);
// Bind the textureFrameBuffer to render on the texture instead of the screen
glBindFramebuffer(GL_FRAMEBUFFER,textureFrameBuffer);
glFramebufferTexture(GL_FRAMEBUFFER,GL_COLOR_ATTACHMENT0,texture,0);
GLenum drawBuffers[1] = {GL_COLOR_ATTACHMENT0};
glDrawBuffers(1, drawBuffers);
GLenum status = glCheckFramebufferStatus(GL_DRAW_FRAMEBUFFER);
if( status != GL_FRAMEBUFFER_COMPLETE ) {
cout << "framebuffer status: " << status << endl;
}
// the vertex framebuffer and vertex attribute pointer have already been
// described, so I'll just do the draw call here
glDrawArrays(GL_TRIANGLES,0,6);
// Display the textore on screen
// Bind the screen framebuffer (0) so the following rendering will occurr on screen
glBindFramebuffer(GL_FRAMEBUFFER,0);
// Put a white background color
glClearColor(1.0,1.0,1.0,1.0);
glClear(GL_COLOR_BUFFER_BIT);
// Change properly the shader uniforms
glUniform4f(unifDrawColor,1.0,1.0,1.0,1.0); // multiply by white, no changes
glUniform1i(unifUseTexture,1); // set useTexture to True
// Create a transform matrix to scale the rectangle so that it uses up only half screen
tMat = glm::scale(glm::mat4(1.0f),glm::vec3(.5,.5,.0));
glUniformMatrix4fv(unifTMat,1,GL_FALSE,glm::value_ptr(tMat));
// Put the sampler2D
glActiveTexture(GL_TEXTURE0); // Work on texture0
// 0 because of (binding = 0) on the fragment shader
glBindTexture(GL_TEXTURE_2D,texture);
glDrawArrays(GL_TRIANGLES,0,6); // 6 vertices
}
int main(int argc, char** argv) {
// Build the window
if (!glfwInit()) exit(EXIT_FAILURE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR,4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR,3);
GLFWwindow* window = glfwCreateWindow(600,600,"Dashboard",NULL,NULL);
glfwMakeContextCurrent(window);
if(glewInit() != GLEW_OK) exit(EXIT_FAILURE);
glfwSwapInterval(1);
init();
while(!glfwWindowShouldClose(window)) {
//display(window,glfwGetTime());
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
edit: I forgot to put the shader code here, though the problem is not within the shader because it does work when used to render the texture to screen.
vertex.glsl:
#version 430
layout (location=0) in vec3 inPosition;
layout (location=1) in vec4 inColor;
layout (location=2) in vec2 inUV;
uniform mat4 tMat;
uniform vec4 drawColor;
out vec4 varyingColor;
out vec2 varyingUV;
void main(void) {
gl_Position = tMat * vec4(inPosition,1.0);
varyingColor = inColor*drawColor;
varyingUV = inUV;
}
fragment.glsl:
#version 430
in vec4 varyingColor;
in vec2 varyingUV;
layout(location = 0) out vec4 color;
layout (binding=0) uniform sampler2D inTexture;
uniform bool useTexture;
void main(void) {
if( useTexture )
color = vec4(texture(inTexture,varyingUV).rgb,1.0) * varyingColor;
else
color = varyingColor;
}

The texture which is attached to the framebuffer, has a different size than the window. Hence you've to adjust the viewport rectangle (glViewport) to the size of the size of the currently bound framebuffer, before drawing the geometry:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 128, 128, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageRaw);
// [...]
glBindFramebuffer(GL_FRAMEBUFFER, textureFrameBuffer);
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, texture,0);
glViewport(0, 0, 128, 128);
// [...]
glDrawArrays(GL_TRIANGLES, 0, 6);
// [...]
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, WIDTH, HEIGHT);
// [...]
glDrawArrays(GL_TRIANGLES, 0, 6);

Related

Why there is no output to the framebuffer's textures?

Here is the code:
int main(){
//init gl environment
//...
//create textures for pass 1
GLuint normal_color_output;
glCreateTextures(GL_TEXTURE_2D_MULTISAMPLE, 1, &normal_color_output);
glTextureStorage2DMultisample(normal_color_output, 8, GL_RGBA32F, 1000, 800, GL_TRUE);
GLuint high_color_output;
glCreateTextures(GL_TEXTURE_2D_MULTISAMPLE, 1, &high_color_output);
glTextureStorage2DMultisample(high_color_output,8, GL_R11F_G11F_B10F, 1000, 800,GL_TRUE);
//init framebuffer
GLuint render_buffer;
glCreateRenderbuffers(1, &render_buffer);
glNamedRenderbufferStorageMultisample(render_buffer, 8, GL_DEPTH24_STENCIL8, 1000, 800);
GLuint framebuffer;
glCreateFramebuffers(1, &framebuffer);
glNamedFramebufferTexture(framebuffer, GL_COLOR_ATTACHMENT0, normal_color_output,0);
glNamedFramebufferTexture(framebuffer, GL_COLOR_ATTACHMENT1, high_color_output, 0);
glNamedFramebufferRenderbuffer(framebuffer, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, render_buffer);
const GLenum drawbuffers[] = {GL_COLOR_ATTACHMENT0,GL_COLOR_ATTACHMENT1};
glNamedFramebufferDrawBuffers(framebuffer, 2, drawbuffers);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
//init another framebuffer
//What I want to do is trying to achieve implementing my own msaa color resolve solution.
GLuint mix_framebuffer;
glCreateFramebuffers(1, &mix_framebuffer);
GLuint mix_renderbuffer;
glCreateRenderbuffers(1, &mix_renderbuffer);
glNamedRenderbufferStorage(mix_renderbuffer, GL_DEPTH24_STENCIL8, 1000, 800);
GLuint normal_antialiasing_texture, hdr_antialiasing_texture;
glCreateTextures(GL_TEXTURE_2D, 1, &normal_antialiasing_texture);
glTextureStorage2D(normal_antialiasing_texture, 1, GL_RGBA32F, 1000, 800);
glCreateTextures(GL_TEXTURE_2D, 1, &hdr_antialiasing_texture);
glTextureStorage2D(hdr_antialiasing_texture, 1, GL_RGBA32F, 1000, 800);
glNamedFramebufferTexture(mix_framebuffer, GL_COLOR_ATTACHMENT0, normal_antialiasing_texture, 0);
glNamedFramebufferTexture(mix_framebuffer, GL_COLOR_ATTACHMENT1, hdr_antialiasing_texture, 0);
glNamedFramebufferDrawBuffers(mix_framebuffer,2, drawbuffers);
glNamedFramebufferRenderbuffer(mix_framebuffer, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, mix_renderbuffer);
glBindFramebuffer(GL_FRAMEBUFFER, mix_framebuffer);
//....
//draw commands
while (!glfwWindowShouldClose(window)) {
// pass 1
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
glUseProgram(program);
glUniformMatrix4fv(3, 1, GL_FALSE, glm::value_ptr(camera.GetViewMat()));
model.Render(program);
glPointSize(20.f);
glUseProgram(light_shader);// I draw a point to show the light's position
glUniformMatrix4fv(0, 1, GL_FALSE, glm::value_ptr(camera.GetViewMat()));
glDrawArrays(GL_POINTS, 0, 1);
//pass 2
glBindFramebuffer(GL_FRAMEBUFFER, mix_framebuffer);
glUseProgram(mix_program);
glBindTextureUnit(0, normal_color_output);
glBindTextureUnit(1, high_color_output);
glClear(GL_COLOR_BUFFER_BIT);
glNamedFramebufferDrawBuffers(mix_framebuffer, 2, drawbuffers);
glDrawArrays(GL_POINTS, 0, 1);
//...
}
}
I use geometry shader to model a square, here is the code:
//mix_gs.glsl
#version 450 core
layout(points) in;
layout(triangle_strip) out;
layout(max_vertices = 4) out;
void main(){
gl_Position = vec4(-1,1,-1,1);
EmitVertex();
gl_Position = vec4(-1,-1,-1,1);
EmitVertex();
gl_Position = vec4(1,1,-1,1);
EmitVertex();
gl_Position = vec4(1,-1,-1,1);
EmitVertex();
EndPrimitive();
}
here is the mix_fs.glsl:
#version 450 core
layout(location = 0)out vec4 color;
layout(location = 1)out vec4 hdr_color;
layout(binding = 0) uniform sampler2DMS color_sdms;
layout(binding = 1) uniform sampler2DMS hdr_sdms;
void main(){
/*
for(int i=0;i<8;i++){
color += texelFetch(color_sdms,ivec2(gl_FragCoord.xy),i);
hdr_color += vec4(texelFetch(hdr_sdms,ivec2(gl_FragCoord.xy),i).xyz,1);
}
*/
color = vec4(1,0,0,1);//I just output a color
hdr_color = vec4(0,1,0,1);
}
I encount a problem that I find during the draw pass 2, gl cannot outout any color to textures bind to mix_framebuffer.
Here is the debugging info in RenderDoc:
draw pass 1 texture output:
draw pass 2's geometry output:
draw pass 2 texture input:
draw pass 2 texture output:
You can see, draw-pass 1's output was passed to draw-pass2's pipeline successfully, but there is no output to draw-pass2's textures. I don't know why.
If you don't see even the plain color, the first I'd recommend to check how it was discarded. There are no so many options:
glColorMask. Highly likely it's not your case, since pass 1 works;
Wrong face culling and polygon winding order (CW, CCW). By your geometry shader, it looks like CW;
Blending options;
Depth-stencil state. I see you use glNamedRenderbufferStorage(mix_renderbuffer, GL_DEPTH24_STENCIL8, 1000, 800); What are depth-stencil settings?
If everything above looks good, any glGetError messages? If it's ok, try to remove MRT for a debugging purposes and output the only color in the second pass. If it would work, probably some error in MRT + Depth buffer setup.

How to setup shaders in openGL

I'm working on developing code in OpenGL, and I was completing one of the tutorials for a lesson. However, the code that I completed did not color the triangle. Based off of the tutorial, my triangle should come out as green, but it keeps turning out white. I think there is an error in the code for my shaders, but I can't seem to find the error.
I tried altering the code a few times, and I even moved on to the next tutorial, which shades each vertex. However, my triangle is still coming out as white.
#include <iostream> //Includes C++ i/o stream
#include <GL/glew.h> //Includes glew header
#include <GL/freeglut.h> //Includes freeglut header
using namespace std; //Uses the standard namespace
#define WINDOW_TITLE "Modern OpenGL" //Macro for window title
//Vertex and Fragment Shader Source Macro
#ifndef GLSL
#define GLSL(Version, Source) "#version " #Version "\n" #Source
#endif
//Variables for window width and height
int WindowWidth = 800, WindowHeight = 600;
/* User-defined Function prototypes to:
* initialize the program, set the window size,
* redraw graphics on the window when resized,
* and render graphics on the screen
* */
void UInitialize(int, char*[]);
void UInitWindow(int, char*[]);
void UResizeWindow(int, int);
void URenderGraphics(void);
void UCreateVBO(void); //This step is missing from Tutorial 3-3
void UCreateShaders(void);
/*Vertex Shader Program Source Code*/
const GLchar * VertexShader = GLSL(440,
in layout(location=0) vec4 vertex_Position; //Receive vertex coordinates from attribute 0. i.e. 2
void main(){
gl_Position = vertex_Position; //Sends vertex positions to gl_position vec 4
}
);
/*Fragment Shader Program Source Code*/
const GLchar * FragmentShader = GLSL(440,
void main(){
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0); //Sets the pixels / fragments of the triangle to green
}
);
//main function. Entry point to the OpenGL Program
int main(int argc, char* argv[])
{
UInitialize(argc, argv); //Initialize the OpenGL program
glutMainLoop(); // Starts the Open GL loop in the background
exit(EXIT_SUCCESS); //Terminates the program successfully
}
//Implements the UInitialize function
void UInitialize(int argc, char* argv[])
{
//glew status variable
GLenum GlewInitResult;
UInitWindow(argc, argv); //Creates the window
//Checks glew status
GlewInitResult = glewInit();
if(GLEW_OK != GlewInitResult)
{
fprintf(stderr, "Error: %s\n", glewGetErrorString(GlewInitResult));
exit(EXIT_FAILURE);
}
//Displays GPU OpenGL version
fprintf(stdout, "INFO: OpenGL Version: %s\n", glGetString(GL_VERSION));
UCreateVBO(); //Calls the function to create the Vertex Buffer Object
UCreateShaders(); //Calls the function to create the Shader Program
//Sets the background color of the window to black. Optional
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
}
//Implements the UInitWindow function
void UInitWindow(int argc, char* argv[])
{
//Initializes freeglut
glutInit(&argc, argv);
//Sets the window size
glutInitWindowSize(WindowWidth, WindowHeight);
//Memory buffer setup for display
glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);
//Creates a window with the macro placeholder title
glutCreateWindow(WINDOW_TITLE);
glutReshapeFunc(UResizeWindow); //Called when the window is resized
glutDisplayFunc(URenderGraphics); //Renders graphics on the screen
}
//Implements the UResizeWindow function
void UResizeWindow(int Width, int Height)
{
glViewport(0,0, Width, Height);
}
//Implements the URenderGraphics function
void URenderGraphics(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //Clears the screen
/*Creates the triangle*/
GLuint totalVertices = 3; //Specifies the number of vertices for the triangle i.e. 3
glDrawArrays(GL_TRIANGLES, 0, totalVertices); //Draws the triangle
glutSwapBuffers(); //Flips the back buffer with the front buffer every frame. Similar to GL Flush
}
//Implements the CreateVBO function
void UCreateVBO(void)
{
//Specifies coordinates for triangle vertices on x and y
GLfloat verts[] =
{
0.0f, 1.0f, //top-center of the screen
-1.0f, -1.0f, //bottom-left of the screen
1.0f, -1.0f //bottom-right of the screen
};
//Stores the size of the verts array / number of the coordinates needed for the triangle i.e. 6
float numVertices = sizeof(verts);
GLuint myBufferID; //Variable for vertex buffer object id
glGenBuffers(1, &myBufferID); //Creates 1 buffer
glBindBuffer(GL_ARRAY_BUFFER, myBufferID); //Activates the buffer
glBufferData(GL_ARRAY_BUFFER, numVertices, verts, GL_STATIC_DRAW); //Sends vertex or coordinate data to GPU
/*Creates the Vertex Attribute Pointer*/
GLuint floatsPerVertex = 2; //Number of coordinates per vertex
glEnableVertexAttribArray(0); //Specifies the initial position of the coordinates in the buffer
/*Instructs the GPU on how to handle the vertex bugger object data.
* Parameters: attribPointerPosition | coordinates per vertex | data type | deactivate normalization | 0 strides | 0 offset
*/
glVertexAttribPointer(0, floatsPerVertex, GL_FLOAT, GL_FALSE, 0, 0);
}
//Implements the UCreateShaders function
void UCreateShaders(void)
{
//Create a shader program object
GLuint ProgramId = glCreateProgram();
GLuint vertexShaderId = glCreateShader(GL_VERTEX_SHADER); //Create a Vertex Shader Object
GLuint fragmentShaderId = glCreateShader(GL_FRAGMENT_SHADER); //Create a Fragment Shader Object
glShaderSource(vertexShaderId, 1, &VertexShader, NULL); //Retrieves the vertex shader source code
glShaderSource(fragmentShaderId, 1, &FragmentShader, NULL); //Retrieves the fragment shader source code
glCompileShader(vertexShaderId); //Compile the vertex shader
glCompileShader(fragmentShaderId); //Compile the fragment shader
//Attaches the vertex and fragment shaders to the shader program
glAttachShader(ProgramId, vertexShaderId);
glAttachShader(ProgramId, fragmentShaderId);
glLinkProgram(ProgramId); //Links the shader program
glUseProgram(ProgramId); //Uses the shader program
}
When completed correctly, the code should result in a solid green triangle.
The variable gl_FragColor is unavailable in GLSL 4.4 core profile since it was deprecated. Because you don't specify a compatibility profile, the default core is assumed. Either use
#version 440 compatibility
for your shaders, or, even better, use the GLSL 4.4 onwards approach:
#version 440 core
layout(location = 0) out vec4 OUT;
void main(){
OUT = vec4(0.0, 1.0, 0.0, 1.0);
}

Memory barrier problems for writing and reading an image OpenGL

i'm having a problem trying to reading an image from a fragment shader, first i write into the image in shader porgram A (im just painting blue on the image) then i'm reading from another shader program B to display the image, but the reading part is not getting the right color i'm getting a black image
Unexpected result
This is my application code:
void GLAPIENTRY MessageCallback(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar* message, const void* userParam)
{
std::cout << "GL CALLBACK: type = " << std::hex << type << ", severity = " << std::hex << severity << ", message = " << message << "\n"
<< (type == GL_DEBUG_TYPE_ERROR ? "** GL ERROR **" : "") << std::endl;
}
class ImgRW
: public Core
{
public:
ImgRW()
: Core(512, 512, "JFAD")
{}
virtual void Start() override
{
glEnable(GL_DEBUG_OUTPUT);
glDebugMessageCallback(MessageCallback, nullptr);
shader_w = new Shader("w_img.vert", "w_img.frag");
shader_r = new Shader("r_img.vert", "r_img.frag");
glGenTextures(1, &space);
glBindTexture(GL_TEXTURE_2D, space);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA32F, 512, 512);
glBindImageTexture(0, space, 0, GL_FALSE, 0, GL_READ_WRITE, GL_RGBA32F);
glGenVertexArrays(1, &vertex_array);
glBindVertexArray(vertex_array);
}
virtual void Update() override
{
shader_w->use(); // writing shader
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glMemoryBarrier(GL_TEXTURE_FETCH_BARRIER_BIT | GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
shader_r->use(); // reading shader
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}
virtual void End() override
{
delete shader_w;
delete shader_r;
glDeleteTextures(1, &space);
glDeleteVertexArrays(1, &vertex_array);
}
private:
Shader* shader_w;
Shader* shader_r;
GLuint vertex_array;
GLuint space;
};
#if 1
CORE_MAIN(ImgRW)
#endif
and these are my fragment shaders:
Writing to image
Code glsl:
#version 430 core
layout (binding = 0, rgba32f) uniform image2D img;
out vec4 out_color;
void main()
{
imageStore(img, ivec2(gl_FragCoord.xy), vec4(0.0f, 0.0f, 1.0f, 1.0f));
}
Reading from image
Code glsl:
#version 430 core
layout (binding = 0, rgba32f) uniform image2D img;
out vec4 out_color;
void main()
{
vec4 color = imageLoad(img, ivec2(gl_FragCoord.xy));
out_color = color;
}
The only way that i get the correct result is if i change the order of the drawing commands and i dont need the memory barriers, like this (in the Update fuction of above):
shader_r->use(); // reading shader
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
shader_w->use(); // writing shader
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
I don't know if the problem is the graphics card or the drivers or if i'm missing some kind of flag that enables memoryBarriers or if i put the wrong barrier bits or if i placed the barriers in the code in the wrong part
The Vertex shader for both shader programs is the next:
#version 430 core
void main()
{
vec2 v[4] = vec2[4]
(
vec2(-1.0, -1.0),
vec2( 1.0, -1.0),
vec2(-1.0, 1.0),
vec2( 1.0, 1.0)
);
vec4 p = vec4(v[gl_VertexID], 0.0, 1.0);
gl_Position = p;
}
and in my init function is:
void Window::init()
{
glfwInit();
window = glfwCreateWindow(getWidth(), getHeight(), name, nullptr, nullptr);
glfwMakeContextCurrent(window);
glfwSetFramebufferSizeCallback(window, framebufferSizeCallback);
glfwSetCursorPosCallback(window, cursorPosCallback);
//glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
assert(gladLoadGLLoader((GLADloadproc)glfwGetProcAddress) && "Couldn't initilaize OpenGL");
glEnable(GL_DEPTH_TEST);
}
and in my function run i'm calling my start, update and end functions
void Core::Run()
{
std::cout << glGetString(GL_VERSION) << std::endl;
Start();
float lastFrame{ 0.0f };
while (!window.close())
{
float currentFrame = static_cast<float>(glfwGetTime());
Time::deltaTime = currentFrame - lastFrame;
lastFrame = currentFrame;
glViewport(0, 0, getWidth(), getHeight());
glClearBufferfv(GL_COLOR, 0, &color[0]);
glClearBufferfi(GL_DEPTH_STENCIL, 0, 1.0f, 0);
Update();
glfwSwapBuffers(window);
glfwPollEvents();
}
End();
}
glEnable(GL_DEPTH_TEST);
As I suspected.
Just because a fragment shader doesn't write a color output doesn't mean that those fragments will not affect the depth buffer. If the fragment passes the depth test and the depth write mask is on (assuming no other state is involved), it will update the depth buffer with the current fragment's depth (and the color buffer with uninitialized values, but that's a different matter).
Since you're drawing the same geometry both times, the second rendering's fragments will get the same depth values as the corresponding fragments from the first rendering. But the default depth function is GL_LESS. Since any value is not less than itself, this means that all fragments from the second rendering fail the depth test.
And therefore, they don't get rendered.
So just turn off the depth test. And while you're at it, turn off color writes for your "writing" rendering pass, since you're not writing to the color buffers.
Now, you do properly need the memory barrier between the two draw calls. But you only need the GL_SHADER_IMAGE_ACCESS_BARRIER_BIT, since that's how you're reading the data (via image load/store, not samplers).

GLSL Render to Texture not working

I'm trying to do a compute pass where I render to a texture that will be used in a draw pass later on. My initial implementation was based on shader storage buffer objects and was working nicely. But I want to apply a computation method that is going to take advantage of the blend hardware of the GPU so I started porting the SSBO implementation to RTT one. Unfortunately the code has stopped working. Now when I read back the texture it is getting wrong values.
Here is my texture and frame buffer setup code:
glGenFramebuffers(1, &m_fbo);
glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
// Create render textures
glGenTextures(NUM_TEX_OUTPUTS, m_renderTexs);
m_texSize = square_approximation(m_numVertices);
cout << "Textures size: " << glm::to_string(m_texSize) << endl;
GLenum drawBuffers[NUM_TEX_OUTPUTS];
for (int i = 0 ; i < NUM_TEX_OUTPUTS; ++i)
{
glBindTexture(GL_TEXTURE_2D, m_renderTexs[i]);
// 1st 0: level, 2nd 0: no border, 3rd 0: no initial data
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, m_texSize.x, m_texSize.y, 0, GL_RGBA, GL_FLOAT, 0);
// XXX: do we need this?
// Poor filtering. Needed !
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
// 0: level
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D, m_renderTexs[i], 0);
drawBuffers[i] = GL_COLOR_ATTACHMENT0 + i;
}
glDrawBuffers(NUM_TEX_OUTPUTS, drawBuffers);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
cout << "Error when setting frame buffer" << endl;
// throw exception?
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
And this is the code to start the compute pass:
m_shaderProgram.use();
// setup openGL
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glDisable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
glViewport(0, 0, m_texSize.x, m_texSize.y); // setup viewport (equal to textures size)
// make a single patch have the vertex, the bases and the neighbours
glPatchParameteri(GL_PATCH_VERTICES, m_maxNeighbours + 5);
// Wait all writes to shader storage to finish
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
glUniform1i(m_shaderProgram.getUniformLocation("curvTex"), m_renderTexs[2]);
glUniform2i(m_shaderProgram.getUniformLocation("size"), m_texSize.x, m_texSize.y);
glUniform2f(m_shaderProgram.getUniformLocation("vertexStep"), (umax - umin)/divisoes,
(vmax-vmin)/divisoes);
// Bind buffers
glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_ibo);
glBindBufferBase(GL_UNIFORM_BUFFER, m_mvp_location, m_mvp_ubo);
// Make textures active
for (int i = 0; i < NUM_TEX_OUTPUTS; ++i)
{
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, m_renderTexs[i]);
}
// no need to pass index array 'cause ibo is bound already
glDrawElements(GL_PATCHES, m_numElements, GL_UNSIGNED_INT, 0);
I then read back the textures using the following:
bool readTex(GLuint tex, void *dest)
{
glBindTexture(GL_TEXTURE_2D, tex);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_FLOAT, dest);
glBindTexture(GL_TEXTURE_2D, 0);
// TODO: check glGetTexImage return values for error
return true;
}
for (int i = 0; i < NUM_TEX_OUTPUTS; ++i)
{
if (m_tensors[i] == NULL) {
m_tensors[i] = new glm::vec4[m_texSize.x*m_texSize.y];
}
memset(m_tensors[i], 0, m_texSize.x*m_texSize.y*sizeof(glm::vec4));
readTex(m_renderTexs[i], m_tensors[i]);
}
Finally, the fragment shader code is:
#version 430
#extension GL_ARB_shader_storage_buffer_object: require
layout(pixel_center_integer) in vec4 gl_FragCoord;
layout(std140, binding=6) buffer EvalBuffer {
vec4 evalDebug[];
};
uniform ivec2 size;
in TEData {
vec4 _a;
vec4 _b;
vec4 _c;
vec4 _d;
vec4 _e;
};
layout(location = 0) out vec4 a;
layout(location = 1) out vec4 b;
layout(location = 2) out vec4 c;
layout(location = 3) out vec4 d;
layout(location = 4) out vec4 e;
void main()
{
a= _a;
b= _b;
c= _c;
d= _d;
e= _e;
evalDebug[gl_PrimitiveID] = gl_FragCoord;
}
The fragment coordinates are correct (each fragment is pointing to a x,y coordinate in the texture), so are all the input values (_a to _e), but I do not see them outputted correctly to the textures when reading back. I also tried accessing the texture in the shader to see if it was only a read-back error, but my debug SSBO returned all zeroes.
Am I missing some setup step?
I've tested both on linux and windows (titan and 540M geforces) and I'm using openGL 4.3.
As derhass pointed out in the comments above, the problem was with the texture format. I assumed that by passing GL_FLOAT as the data type it would use 32bit floats for each of the RGBA channels. It was not so.
As derhass said, the data type parameter here does not change the texture format. I had to change the internalFormat parameter to what I wanted (GL_RGBA32F) so that it would work as expected.
So, after changing glTexImage2D call to:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, m_texSize.x, m_texSize.y, 0, GL_RGBA, GL_FLOAT, 0);
I was able to correctly render the results to the texture and read it back. :)

COLOR_ATTACHMENT's - How to render to multiple textures as color attachments inside a Framebuffer Object?

I am trying to render to multiple textures as COLOR_ATTACHMENTs without success. All I get from displaying them is a black screen (with a red clear fill) meaning my texture is read but is 'empty'.
My pseudo code is : attach 3 textures to an FBO with texture indexes 1, 2 and 3 and color attachments 0, 1 and 2 respectively. As a test case, I tried to render my scene to the 3 color attachments so they are supposed to hold the same exact data. Then read either of those textures at shader pass 2 (with a 2Dsampler) and display them on a quad.
My original intent for those 2 extra color attachments is to use them as random data buffers using the GPU ping-pong technique. So far I just use them as texture clones for testing purpose.
When trying to read from GL_TEXTURE1 (COLOR_ATTACHMENT0) things go fine but not from the other 2 (black screen).
The code :
// Texture indices - inside a 'myGlut' struct
GLenum skyboxTextureIndex = GL_TEXTURE0;
GLenum colorTextureIndex = GL_TEXTURE1;
unsigned int colorTextureIndexInt = 1;
GLenum depthTexture1Index = GL_TEXTURE2;
unsigned int depthTexture1IndexInt = 2;
GLenum depthTexture2Index = GL_TEXTURE3;
unsigned int depthTexture2IndexInt = 3;
//** Below is inside 'main()' **//
// Create frame buffer
myGlut.frameBuffer = glutils::createFrameBuffer();
// Create texture to hold color buffer
glActiveTexture(myGlut.colorTextureIndex);
glBindTexture(GL_TEXTURE_2D, myGlut.colorTexture);
myGlut.colorTexture = glutils::createTextureAttachment(myGlut.camera -> getRenderResizedWidthPx(), myGlut.camera -> getRenderResizedHeightPx());
glutils::bindTextureAttachment(GL_COLOR_ATTACHMENT0, myGlut.colorTexture);
// Create 1st texture to hold depth buffer wannabe :>
glActiveTexture(myGlut.depthTexture1Index);
glBindTexture(GL_TEXTURE_2D, myGlut.depthTexture1);
myGlut.depthTexture1 = glutils::createTextureAttachment(myGlut.camera -> getRenderResizedWidthPx(), myGlut.camera -> getRenderResizedHeightPx());
glutils::bindTextureAttachment(GL_COLOR_ATTACHMENT1, myGlut.depthTexture1);
// Create 2nd texture to hold depth buffer wannabe :>
glActiveTexture(myGlut.depthTexture2Index);
glBindTexture(GL_TEXTURE_2D, myGlut.depthTexture2);
myGlut.depthTexture2 = glutils::createTextureAttachment(myGlut.camera -> getRenderResizedWidthPx(), myGlut.camera -> getRenderResizedHeightPx());
glutils::bindTextureAttachment(GL_COLOR_ATTACHMENT2, myGlut.depthTexture2);
// Check FBO
if (!glutils::checkFBOStatus()) return 0;
With glutils:: functions
// Clear screen
void glutils::clearScreen (float r, float g, float b, float a) {
glClearColor(r, g, b, a);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}
// Bind select framebuffer
void glutils::bindFrameBuffer(int frameBuffer, int width, int height) {
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer);
glViewport(0, 0, width, height);
}
// Create frame buffer
GLuint glutils::createFrameBuffer() {
GLuint frameBuffer;
glGenFramebuffers(1, &frameBuffer);
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer);
return frameBuffer;
}
// Create a texture attachment
GLuint glutils::createTextureAttachment(int width, int height) {
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
return texture;
}
// Bind a texture attachment to select framebuffer
void glutils::bindTextureAttachment (GLenum colorAttachment, GLuint texture) {
glFramebufferTexture2D(GL_FRAMEBUFFER, colorAttachment, GL_TEXTURE_2D, texture, 0);
}
// Check current frame buffer status
bool glutils::checkFBOStatus () {
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
std::cerr << "##### ERROR : Frambuffer not complete... #####" << std::endl;
return false;
}
else return true;
}
Then the glut display func :
// Clear screen
glutils::clearScreen(1.f, 0.f, 0.f, 1.f);
// Bind to custom framebuffer
glutils::bindFrameBuffer(myGlut.frameBuffer, myGlut.camera -> getScreenWidthPx(), myGlut.camera -> getScreenHeightPx());
// Set draw context
GLuint drawBuffers[2];
if (myGlut.depthTextureSwitch) { drawBuffers[0] = GL_COLOR_ATTACHMENT0;
drawBuffers[1] = GL_COLOR_ATTACHMENT2;
} else { drawBuffers[0] = GL_COLOR_ATTACHMENT0;
drawBuffers[1] = GL_COLOR_ATTACHMENT1;
} glDrawBuffers(2, drawBuffers);
// Use main program and bind uniforms
glUseProgram(myGlut.theProgram);
myGlut.refreshUniformsPass_1();
// Draw quad to sample
glutils::drawQuad();
// Unbind custom framebuffer -> use default (screen)
glutils::unbindCurrentFrameBuffer(myGlut.camera -> getScreenWidthPx(), myGlut.camera -> getScreenHeightPx());
// Use secondary program and bind uniforms
glUseProgram(myGlut.theProgram2);
myGlut.refreshUniformsPass_2();
// Draw quad to apply texture to
glutils::drawQuad();
// Switch
myGlut.depthTextureSwitch = !myGlut.depthTextureSwitch;
// Display & loop
glutSwapBuffers();
glutPostRedisplay();
Relevant uniform bindings -> pass 1
glUniform1i(glGetUniformLocation(myGlut.theProgram, "depthTexture"), !myGlut.depthTextureSwitch ? myGlut.depthTexture2IndexInt : myGlut.depthTexture1IndexInt);
Relevant shader code -> Pass 1
layout (location = 0) out vec4 outputColor;
layout (location = 1) out vec4 outputDepth1;
layout (location = 2) out vec4 outputDepth2;
uniform sampler2D depthTexture;
void main() {
// ...
outputColor = someColor;
outputDepth1 = someColor;
outputDepth2 = someColor;
}
Relevant uniform bindings -> pass 2
glUniform1i(glGetUniformLocation(myGlut.theProgram2, "texFramebuffer"), myGlut.depthTextureSwitch ? myGlut.depthTexture1IndexInt : myGlut.depthTexture2IndexInt);
With relevant shader code -> pass 2
uniform sampler2D texFramebuffer;
out vec4 outputColor;
// ...
void main() {
outputColor = texture(texFramebuffer, vec2(gl_FragCoord.x / screenWidthPx * resRatio, gl_FragCoord.y / screenHeightPx * resRatio));
}
In a nutshell : my GL_TEXTURE0 holds the scene while GL_TEXTURE1 and GL_TEXTURE2 are black. Why ?
I finally found the culprit. Because I am binding the framebuffer inside the looped display() function, I needed to bind texture attachments as well after I bound the FBO. Changing to
// Bind to custom framebuffer
glutils::bindFrameBuffer(myGlut.frameBuffer, myGlut.camera -> getScreenWidthPx(), myGlut.camera -> getScreenHeightPx());
// Bind to select attachments
glutils::bindTextureAttachment(GL_COLOR_ATTACHMENT0, myGlut.colorTexture);
if (!myGlut.depthTextureSwitch) glutils::bindTextureAttachment(GL_COLOR_ATTACHMENT1, myGlut.depthTexture1);
else glutils::bindTextureAttachment(GL_COLOR_ATTACHMENT1, myGlut.depthTexture2);
allowed me to render to all needed color attachments.