Related
I am using OpenGL for the first time with GLFW, GLEW, and GLM. I've been more or less following a tutorial but also diverted in order to focus on the aspects I'm interested in at the moment.
Currently, I'm trying to draw pixels to a texture and then display that on the screen as a fullscreen quad. However, the texture is always displaying as black and I'm unsure where I went wrong.
Initializing the texture here (as well as the fullscreen quad):
int InitializeRenderTarget(GameManager* gameManager)
{
// Vertex Array Object
GLuint vertexArrayID;
glGenVertexArrays(1, &vertexArrayID);
glBindVertexArray(vertexArrayID);
programID = LoadShaders("Source/Graphics/SimpleVertexShader.vert", "Source/Graphics/RenderTextureFragmentShader.frag");
// The texture we're going to render to
glGenTextures(1, &renderedTexture);
glBindTexture(GL_TEXTURE_2D, renderedTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
gameManager->GRID_SIZE, gameManager->GRID_SIZE,
0, GL_RGB, GL_UNSIGNED_BYTE, 0);
glGenerateMipmap(GL_TEXTURE_2D);
// The fullscreen quad's FBO
static const GLfloat g_quad_vertex_buffer_data[] = {
-1.0f, -1.0f, 0.0f, //0.0f, 0.0f,
1.0f, -1.0f, 0.0f, //1.0f, 0.0f,
-1.0f, 1.0f, 0.0f, //0.0f, 1.0f,
-1.0f, 1.0f, 0.0f, //0.0f, 1.0f,
1.0f, -1.0f, 0.0f, //1.0f, 0.0f,
1.0f, 1.0f, 0.0f, //1.0f, 1.0f
};
glGenBuffers(1, &quad_vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, quad_vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(g_quad_vertex_buffer_data), g_quad_vertex_buffer_data, GL_STATIC_DRAW);
texID = glGetUniformLocation(programID, "renderedTexture");
glEnable(GL_TEXTURE_2D);
return 0;
}
Drawing onto the texture here:
void RenderToTexture(GameManager* gameManager)
{
glBindTexture(GL_TEXTURE_2D, renderedTexture);
float* particleColors = gameManager->getParticleColors();
glTexSubImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
gameManager->GRID_SIZE, gameManager->GRID_SIZE,
0, GL_RGB, GL_FLOAT, (GLvoid*)particleColors);
glGenerateMipmap(GL_TEXTURE_2D);
// Poor filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
Drawing to screen here:
void RenderToScreen()
{
// Render to the screen
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// Render on the whole framebuffer, complete from the lower left corner to the upper right
glViewport(100, 100, screenWidth-200, screenHeight-200);
// Clear the screen
glClearColor(0.0f, 0.0f, 0.4f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Use our shader
glUseProgram(programID);
// Bind our texture in Texture Unit 0
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, renderedTexture);
// Set our "renderedTexture" sampler to use Texture Unit 0
glUniform1i(texID, 0);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, quad_vertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the triangles !
glDrawArrays(GL_TRIANGLES, 0, 6); // 2*3 indices starting at 0 -> 2 triangles
glDisableVertexAttribArray(0);
// Swap buffers
glfwSwapBuffers(window);
glfwPollEvents();
}
The main loop:
int main(void)
{
if (InitializeWindow() != 0) {
fprintf(stderr, "Failed to open GLFW window.\n");
getchar();
glfwTerminate();
return -1;
}
GameManager* gameManager = gameManager->getInstance();
if (InitializeRenderTarget(gameManager) != 0) {
fprintf(stderr, "Failed to initialize render target.\n");
getchar();
glfwTerminate();
return -1;
}
do {
gameManager->Update();
RenderToTexture(gameManager);
RenderToScreen();
} // Check if the ESC key was pressed or the window was closed
while (glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS &&
glfwWindowShouldClose(window) == 0);
CleanUp();
return 0;
}
Vertex shader:
#version 460 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
// Output data ; will be interpolated for each fragment.
out vec2 UV;
void main(){
gl_Position = vec4(vertexPosition_modelspace, 1);
UV = (vertexPosition_modelspace.xy+vec2(1,1))/2.0;
}
and finally fragment shader:
#version 460 core
in vec2 UV;
out vec4 color;
uniform sampler2D renderedTexture;
void main(){
color = texture(renderedTexture, UV);
//color = vec4(UV.x, UV.y, 0, 1);
}
I'm fairly certain that I am drawing the quad to the screen correctly as I used the commented out line in the fragment shader to make a nice colorful gradient effect.
However, I might not be binding the texture correctly.
I confirmed that the particleColors array does contain float values correctly. I've tried setting all the reds to 1.0f, and all the values to 1.0f, as well as all values to 255.0f (just in case).
I tried adding lines like:
glEnable(GL_TEXTURE_2D);
glGenerateMipmap(GL_TEXTURE_2D);
// Poor filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
based on some other posts I saw but I have yet to see any visible effect.
I also tried changing how the vertex and fragment shader were calculating and using the UVs but that only made things worse as the gradient effect wouldn't even appear at that point.
Hopefully you can figure out what I've missed. Thank you for any help.
You're using glTexSubImage2D completely wrong. The 2nd and 3rd arguments are the offsets and the 4th and 5th arguments are the size:
glTexSubImage2D(GL_TEXTURE_2D, 0, GL_RGBA, gameManager->GRID_SIZE, gameManager->GRID_SIZE, 0, GL_RGB, GL_FLOAT, (GLvoid*)particleColors);
glTexSubImage2D(
GL_TEXTURE_2D, 0,
0, 0, gameManager->GRID_SIZE, gameManager->GRID_SIZE,
GL_RGB, GL_FLOAT, (GLvoid*)particleColors);
glEnable(GL_TEXTURE_2D); only has meaning when using the fixed function pipeline and no shader program.
I have figured it out. The correction by Rabbid76, was a big first step.
Afterwards I played some with my pixel array, and after making it smaller found that I did have a small line of pixels at the bottom of my texture. More poking around and I found I had actually filled my pixel data wrong. Fixing my logic in error in my loop filled the texture properly.
I'm trying to work on a texture mapping 2D to a triangle. But currently, I can only get a triangle that has a gradient colour without any texture on it. Which means, my texture function in glsl always return vec4(1,1,1,1) and my textCoords is working. How should I fix it? Any suggestions would be helpful to try!
By the way, have been working on this for 3 days.
in texture class:
constructor:
Texture::Texture(const std::string& fileName){
int width, height, numComponents;
//float* imageData = stbi_loadf(fileName.c_str(), &width, &height, &numComponents, 4);
unsigned char* imageData = stbi_load(fileName.c_str(), &width, &height, &numComponents, 4);
if(imageData == NULL){
cerr << "Texture loading failed for "<<fileName<< endl;
}
glGenTextures(1, &m_texture);
glBindTexture(GL_TEXTURE_2D, m_texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA8,width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData);
stbi_image_free(imageData);
}
Texture binding function:
void Texture::Bind(){
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_texture);
}
In my main.cpp:
m_shader.generateProgramObject();
m_shader.attachVertexShader( getAssetFilePath("VertexShader.vs").c_str() );
m_shader.attachFragmentShader( getAssetFilePath("FragmentShader.fs").c_str() );
m_shader.link();
// texture created here
texture = Texture("Assets/bricks.jpg");
// enable vertex attribute indices
glGenVertexArrays(1, &m_vao_triangle);
glBindVertexArray(m_vao_triangle);
// Enable the attribute index location for "position" when rendering.
GLint positionAttribLocation = m_shader.getAttribLocation( "position" );
glEnableVertexAttribArray(positionAttribLocation);
GLint textCoordLocation = m_shader.getAttribLocation( "atextCoord" );
glEnableVertexAttribArray(textCoordLocation);
// Restore defaults
glBindVertexArray(0);
CHECK_GL_ERRORS;
uploade triangle data to buffer
vec3 triangleVertices[] = {
// Construct equalaterial triangle
vec3(0.0f, 0.0f, 0.0f),
vec3(0.25f, 1.0f, 0.0),
vec3(0.5f, 0.0f, 0.0f)
};
vec2 textCoords[] = {
vec2(1.0f, 0.0f),
vec2(0.25f, 1.0f),
vec2(0.5f, 0.0f)};
// Generate a vertex buffer object to hold the triangle's vertex data.
glGenBuffers(1, &m_vbo_triangle);
//-- Upload triangle vertex data to the vertex buffer:
glBindBuffer(GL_ARRAY_BUFFER, m_vbo_triangle);
glBufferData(GL_ARRAY_BUFFER, sizeof(triangleVertices), triangleVertices,
GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CHECK_GL_ERRORS;
//====generate buffer for holding texture coordinates====
glGenBuffers(1, &m_uv_triangle);
glBindBuffer(GL_ARRAY_BUFFER, m_uv_triangle);
glBufferData(GL_ARRAY_BUFFER, sizeof(textCoords), textCoords,
GL_STATIC_DRAW);
// Unbind the target GL_ARRAY_BUFFER, now that we are finished using it.
glBindBuffer(GL_ARRAY_BUFFER, 0);
CHECK_GL_ERRORS;
map buffer data to shader
glBindVertexArray(m_vao_triangle);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo_triangle);
GLint positionAttribLocation = m_shader.getAttribLocation( "position" );
glVertexAttribPointer(positionAttribLocation, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
glBindBuffer(GL_ARRAY_BUFFER, m_uv_triangle);
GLint textCoordLocation = m_shader.getAttribLocation( "atextCoord" );
glVertexAttribPointer(textCoordLocation,2, GL_FLOAT, GL_FALSE, 0, nullptr);
//-- Unbind target, and restore default values:
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
CHECK_GL_ERRORS;
upload uniform to shader
m_shader.enable();
...
GLint uniformLocation_diffuse = m_shader.getUniformLocation("diffuse");
glUniform1i(uniformLocation_diffuse, 0);
CHECK_GL_ERRORS;
m_shader.disable();
CHECK_GL_ERRORS;
And in my draw function:
glBindVertexArray(m_vao_triangle);
// below I tried, but didn't work
// glClear(GL_STENCIL_BUFFER_BIT);
// glEnable(GL_BLEND);
// glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// glEnable(GL_DEPTH_TEST);
texture.Bind();
// do these below:
// glActiveTexture(GL_TEXTURE0);
// glBindTexture(GL_TEXTURE_2D, texture.m_texture);
m_shader.enable();
glDrawArrays(GL_TRIANGLES, 0, 3);
m_shader.disable();
// Restore defaults
glBindVertexArray(0);
CHECK_GL_ERRORS;
Here I will also attach my shaders
vertex shader:
#version 330
in vec3 position;
in vec2 atextCoord;
uniform mat4 transform;
out vec2 textCoord;
void main() {
gl_Position = transform * vec4(position, 1.0);
textCoord = atextCoord;
}
And my fragment shader:
#version 330
uniform sampler2D diffuse;
out vec4 fragColor;
in vec2 textCoord;
void main() {
fragColor = vec4(texture(diffuse, textCoord).rgb,1.0) * vec4(textCoord,0.0,1.0);
// texture(...) shows vec4(1,1,1,1)
// radiant colour can only prove my textCoord is working
// fragColor = texture(diffuse, textCoord); <- only shows a default texture
}
here is the running result
Here is the texture image
I found one way to make it work.
I copy every lines in Texture constructor and paste it to draw(), instead calling texture.Bind().
Looks like I make a texture just before it's ready to draw a geometry and this way is working.
But I still have to know why it happens like that. For coding style, I still have to put my code in Texture class. Would you mind to provide a solution that what happened before?
it looks like this right now
I am really having nightmare to achieve what I required in OpenGles 2.0
Before posting the code reference, let me tell what I need.
I have 2D texture fragment shader. On top of the texture I want to draw red color line. I am able to draw the line but coloring to red is not working.
Shader declaration:
static const char s_v_shader[] =
"attribute vec4 vPosition; \n"
"attribute vec2 my_Texcoor; \n"
"uniform mat4 u_TransMatrix; \n"
"varying vec2 vTexcoor; \n"
"void main() \n"
"{ \n"
" vTexcoor = my_Texcoor; \n"
" gl_Position = u_TransMatrix*vPosition; \n"
"} \n";
static const char s_f_shader[] =
"precision mediump float;\n"
"uniform sampler2D my_Sampler; \n"
"varying vec2 vTexcoor; \n"
"void main() \n"
"{ \n"
" vec4 tex = texture2D(my_Sampler, vTexcoor); \n"
" gl_FragColor = tex; \n"
"} \n";
On top of texture I am rendering video frames from camera in infinite loop.
Before rendering video, I am setting up co-ordinates of 2D texture with below code.
Now I will explain my code from main function
main()
{
const GLfloat vertices[][2] = {
{ -1.0f, -1.0f},
{ 1.0f, -1.0f},
{ -1.0f, 1.0f},
{ 1.0f, 1.0f}
};
const GLfloat texcoords[][2] = {
{ 0.0f, 1.0f},
{ 1.0f, 1.0f},
{ 0.0f, 0.0f},
{ 1.0f, 0.0f}
};
GLfloat transformMatrix[16] =
{
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
// setup OpenGl environment......
Setup_coordinates()
}
Setup_coordinates()
{
LoadShaders(s_v_shader, s_f_shader);
-- Complete function defined below
// By now I should be using shader program.
// Grab location of shader attributes.
GLint locVertices = glGetAttribLocation(programHandle, "vPosition");
GLint locTexcoord = glGetAttribLocation(programHandle, "my_Texcoor");
// Transform Matrix is uniform for all vertices here.
GLint locTransformMat = glGetUniformLocation(programHandle, "u_TransMatrix");
GLint locSampler = glGetUniformLocation(programHandle, "my_Sampler");
/* Create the texture. */
glGenTextures(1, &gTexObj);
glBindTexture(GL_TEXTURE_2D, gTexObj);
if (gTexObj == 0)
{
printf("Could not load the texture \n");
return -1;
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glUniformMatrix4fv(locTransformMat, 1, GL_FALSE, transformMatrix);
glUniform1i(locSampler, 0);
glClearColor(0.0f, 0.5f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
while(1) -- Infinite loop to render video frames on 2D texture and draw red color line.
{
// enable vertex arrays to push the data.
glEnableVertexAttribArray(locVertices);
glEnableVertexAttribArray(locTexcoord);
// set data in the arrays.
glVertexAttribPointer(locVertices, 2, GL_FLOAT, GL_FALSE, 0, &vertices[0][0]);
glVertexAttribPointer(locTexcoord, 2, GL_FLOAT, GL_FALSE, 0, &texcoords[0][0]);
Render video frames logic goes here...................................
Each frame of video is abosultely rendering fine.
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
Now comes the tricky part to draw the line and color it with red.
float red_left_1[] =
{
-0.85f, -0.9f, -0.6f, -0.5f,
};
glVertexAttribPointer ( 1, 2, GL_FLOAT, GL_FALSE, 0, red_left_1 );
glEnableVertexAttribArray (1 );
glDrawArrays ( GL_LINES , 0, 2 );
glLineWidth( width_test );
}
}
void LoadShaders(const char * vShader, const char * pShader)
{
vertShaderNum = glCreateShader(GL_VERTEX_SHADER);
pixelShaderNum = glCreateShader(GL_FRAGMENT_SHADER);
if (CompileShader(vShader, vertShaderNum) == 0)
{
printf("%d: PS compile failed.\n", __LINE__);
return;
}
if (CompileShader(pShader, pixelShaderNum) == 0)
{
printf("%d: VS compile failed.\n", __LINE__);
return;
}
programHandle = glCreateProgram();
glAttachShader(programHandle, vertShaderNum);
glAttachShader(programHandle, pixelShaderNum);
// Bind vPosition to attribute 0
glBindAttribLocation ( programHandle, 0, "vPosition" );
glLinkProgram(programHandle);
// Check if linking succeeded.
GLint linked = 0;
glGetProgramiv(programHandle, GL_LINK_STATUS, &linked);
if (!linked)
{
printf("%d: Link failed.\n", __LINE__);
// Retrieve error buffer size.
GLint errorBufSize, errorLength;
glGetShaderiv(programHandle, GL_INFO_LOG_LENGTH, &errorBufSize);
char * infoLog = (char*)malloc(errorBufSize * sizeof (char) + 1);
if (infoLog)
{
// Retrieve error.
glGetProgramInfoLog(programHandle, errorBufSize, &errorLength, infoLog);
infoLog[errorBufSize + 1] = '\0';
fprintf(stderr, "%s", infoLog);
free(infoLog);
}
return;
}
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glUseProgram(programHandle);
}
Most of the genius peoples suggested to declare one more shader as above but replace uniform sampler2D my_Sampler with uniform vec4 color.
void main()
{
gl_FragColor = color
}
Then switch between these shader programs while showing texture and drawing color lines using glUseProgram.
I tried and absolutely given up as switching to shader program for drawing lines is not working.
Here is code for generating a colored 1x1 texture that you can use for your line (goes in your main or Setup_coordinates). With this solution you won't need another shader.
GLuint lineTexture;
glGenTextures(1, &lineTexture);
unsigned char red[4] = { 255, 0, 0, 255};
glBindTexture(GL_TEXTURE_2D, lineTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, red);
glBindTexture(GL_TEXTURE_2D, 0);
Before calling glDrawArrays, use this to switch to the correct texture.
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, <lineTexture or gTexObj>);
glUniform1i(locSampler, 0);
A more general solution (that I personally implement in my OpenGL projects) is to create a white texture, add a color uniform to your shader and multiply the uniform with the texture2D return value. With this method you can create different colored lines and graphics from the same white texture, only changing the color uniform. For the video frames, you would send in a white color and the pixels will remain unchanged. This will require very few changes to your code, I'm sure you can figure it out if you think it sounds better. :)
I have an HDR radiance environment map as a LatLong 2D texture image that I want to convert to a cubemap. I do this by loading the HDR map as a 2D float texture, project it onto a cube and then render the scene inside this cube from 6 different directions, directly filling a cubemap with glFramebufferTexture2D with the respective cubemap faces as the function's texture target.
The generated cubemap is a floating point cubemap generated as follows:
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
for (unsigned int i = 0; i < 6; ++i)
{
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGB, width, height, 0, GL_RGB, GL_FLOAT, NULL);
}
if (mipmap)
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
Note that the type parameter is GL_FLOAT so it should properly accept HDR values. The HDR image is loaded using stb_image.h as follows:
if (stbi_is_hdr(path.c_str()))
{
stbi_set_flip_vertically_on_load(true);
int width, height, nrComponents;
float *data = stbi_loadf(path.c_str(), &width, &height, &nrComponents, 0);
if (data)
{
GLenum format;
if (nrComponents == 3)
format = GL_RGB;
else if (nrComponents == 4)
format = GL_RGBA;
Bind();
glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_FLOAT, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
if (Mipmapping)
glGenerateMipmap(GL_TEXTURE_2D);
Unbind();
stbi_image_free(data);
}
}
I also tried iterating over this array and retrieving the max float value to see if the HDR loaded correctly and the highest float value of my current HDR map is 288 which is far above 1.0 which I would expect.
Here's where things get tricky, based on the input texture (HDR float map) and the output cubemap (as a float) I'd expect that the cubemap faces get properly treated as floating point textures and directly copy the HDR values. However, the cubemap appears LDR as the moment I add tonemapping (w/ a variable exposure) I get quite a lot of banding and I'm clearly missing the precision of HDR as the following image shows (with an exposure of ~7.5)
I'm not sure whether there's something I'm missing and I couldn't find much on OpenGL's docs regarding rendering directly to floating point framebuffers; I assume this is possible as it wouldn't make sense if it wasn't.
For completeness' sake, here is the relevant code that generates the cubemap from the LatLong image (with renderCustomCommand rendering the cube with proper samplers set):
glGenFramebuffers(1, &m_FramebufferCubemap);
glGenRenderbuffers(1, &m_CubemapDepthRBO);
Camera faceCameras[6] = {
Camera(position, vec3( 1.0f, 0.0f, 0.0f), vec3(0.0f, -1.0f, 0.0f)),
Camera(position, vec3(-1.0f, 0.0f, 0.0f), vec3(0.0f, -1.0f, 0.0f)),
Camera(position, vec3( 0.0f, 1.0f, 0.0f), vec3(0.0f, 0.0f, 1.0f)),
Camera(position, vec3( 0.0f, -1.0f, 0.0f), vec3(0.0f, 0.0f,- 1.0f)),
Camera(position, vec3( 0.0f, 0.0f, 1.0f), vec3(0.0f, -1.0f, 0.0f)),
Camera(position, vec3( 0.0f, 0.0f, -1.0f), vec3(0.0f, -1.0f, 0.0f))
};
glBindFramebuffer(GL_FRAMEBUFFER, m_FramebufferCubemap);
glBindRenderbuffer(GL_RENDERBUFFER, m_CubemapDepthRBO);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, width, height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, m_CubemapDepthRBO);
glViewport(0, 0, width, height);
glBindFramebuffer(GL_FRAMEBUFFER, m_FramebufferCubemap);
for (unsigned int i = 0; i < 6; ++i)
{
Camera *camera = &faceCameras[i];
camera->SetPerspective(90.0f, width/height, 0.1f, 100.0f);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, cubeTarget->ID, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
for (unsigned int i = 0; i < renderCommands.size(); ++i)
{
renderCustomCommand(&renderCommands[i], camera);
}
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, m_RenderSize.x, m_RenderSize.y);
And here's the code for sampling the LatLong 2D image -> cube:
#version 330 core
out vec4 FragColor;
in vec3 wPos;
#include sample.glsl
uniform sampler2D environment;
void main()
{
vec2 uv = SampleLatLong(normalize(wPos));
vec3 color = texture(environment, uv).rgb;
FragColor = vec4(color, 1.0);
}
Note that the LatLong to Cubemap conversion goes well, as the 2D environment is properly rendered on a cubemap, but simply clamped to the [0,1] range the moment it's rendered as a skybox, as if somewhere along the process it lost its floating point data.
I've been stuck on this problem for a while now and was hoping any one of you could shed some insight (is it even possible to render directly to float cubemaps like this?). Thank you.
EDIT: Here is the same picture with a high exposure set from Photoshop, as you can see a lot of details emerge which I've lost in the renderer.
The third parameter of your glTexImage2D call needs to be GL_RGB16F or GL_RGB32F.
It specifies the internal format.
The two parameters GL_RGB and GL_FLOAT at the end are only used to specify memory layout of the optional data pointer. They do not influence the internal format.
I am trying to do a basic shadow map but for some reason, It doesn't render properly.
Video of the Problem
I render the house using a flat shader:
int shadowMapWidth = WINDOW_SIZE_X * (int)SHADOW_MAP_RATIO;
int shadowMapHeight = WINDOW_SIZE_Y * (int)SHADOW_MAP_RATIO;
// Rendering into the shadow texture.
glActiveTexture(GL_TEXTURE0);
CALL_GL(glBindTexture(GL_TEXTURE_2D, shadowTexture));
// Bind the framebuffer.
CALL_GL(glBindFramebuffer(GL_FRAMEBUFFER, shadowFBO));
//Clear it
CALL_GL(glClear(GL_DEPTH_BUFFER_BIT));
CALL_GL(glViewport(0, 0, shadowMapWidth, shadowMapHeight));
CALL_GL(glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE));
//Render stuff
flatShader.use();
flatShader["baseColor"] = glm::vec4(1.0f,1.0f,1.0f,1.0f);
flatShader["pvm"] = projectionMatrix*pointLight.viewMatrix*cursor.modelMatrix;
cursor.draw(); //binds the vao and draws
// Revert for the scene.
CALL_GL(glBindFramebuffer(GL_FRAMEBUFFER, 0));
CALL_GL(glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE));
CALL_GL(glViewport(0, 0, WINDOW_SIZE_X, WINDOW_SIZE_Y));
Notice that I only render the house. I don't render the floor in the depth-buffer pass.
Following this I render the quad that represents the floor using the following shader pair:
/* [VERT] */
#version 330
in vec3 in_Position;
in vec2 in_TexCoord;
uniform mat4 shadowMatrix;
uniform mat4 mvp;
out vec2 UV;
out vec4 shadowProj;
void main()
{
gl_Position = mvp*vec4(in_Position,1.0);
shadowProj = shadowMatrix*vec4(in_Position,1.0);
UV = in_TexCoord;
}
And the Fragment Shader:
/* [FRAG] */
#version 330
in vec2 UV;
in vec4 shadowProj;
out vec4 fragColor;
uniform sampler2D texturex;
uniform sampler2DShadow shadowMap;
void main()
{
fragColor = vec4(texture(texturex, UV).rgb,1);
float shadow = 1.0;
shadow = textureProj(shadowMap,shadowProj);
fragColor *= shadow;
}
I then draw the house again in color and ... the floor:
textureShader.use();
glUniform1i(baseImageLoc, 0); //Texture unit 0 is for base images.
glUniform1i(shadowMapLoc, 1); //Texture unit 1 is for shadow maps.
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, floorTexture);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, shadowTexture);
textureShader["shadowMatrix"] = projectionMatrix*pointLight.viewMatrix*floorMatrix;
textureShader["mvp"] = projectionMatrix*viewMatrix*floorMatrix;
CALL_GL(glBindVertexArray(floorVAO));
CALL_GL(glDrawArrays(GL_TRIANGLES,0,18));
glfwSwapBuffers();
Has anybody seen this behavior before? Any idea what could be wrong? By the way, the light's coordinates place it directly on top of the house so the shadow should be directly below the house on the floor (but it ends up sideways).
For reference here is how I generate the shadow FBO:
int shadowMapWidth = WINDOW_SIZE_X * (int)SHADOW_MAP_RATIO;
int shadowMapHeight = WINDOW_SIZE_Y * (int)SHADOW_MAP_RATIO;
glGenTextures(1, &shadowTexture);
glBindTexture(GL_TEXTURE_2D, shadowTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, shadowMapWidth, shadowMapHeight, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LESS);
glTexImage2D(GL_TEXTURE_2D,0,GL_DEPTH_COMPONENT,shadowMapWidth,shadowMapHeight,0,GL_DEPTH_COMPONENT,GL_FLOAT,NULL);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_COMPARE_MODE,GL_COMPARE_R_TO_TEXTURE);
glBindTexture(GL_TEXTURE_2D, 0); //unbind the texture
glGenFramebuffers(1, &shadowFBO);
glBindFramebuffer(GL_FRAMEBUFFER, shadowFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, shadowTexture, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{ printf("GL_FRAMEBUFFER_COMPLETE error 0x%x", glCheckFramebufferStatus(GL_FRAMEBUFFER)); }
glClearDepth(1.0f); glEnable(GL_DEPTH_TEST);
// Needed when rendering the shadow map. This will avoid artifacts.
glPolygonOffset(1.0f, 0.0f); glBindFramebuffer(GL_FRAMEBUFFER, 0);
//to convert the texture coordinates to -1 ~ 1
GLfloat biasMatrixf[] = {
0.5f, 0.0f, 0.0f, 0.0f,
0.0f, 0.5f, 0.0f, 0.0f,
0.0f, 0.0f, 0.5f, 0.0f,
0.5f, 0.5f, 0.5f, 1.0f };
biasMatrix = glm::make_mat4(biasMatrixf);
It looks like you forgot to multiply your shadow matrix by the bias matrix.