I have an HDR radiance environment map as a LatLong 2D texture image that I want to convert to a cubemap. I do this by loading the HDR map as a 2D float texture, project it onto a cube and then render the scene inside this cube from 6 different directions, directly filling a cubemap with glFramebufferTexture2D with the respective cubemap faces as the function's texture target.
The generated cubemap is a floating point cubemap generated as follows:
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
for (unsigned int i = 0; i < 6; ++i)
{
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGB, width, height, 0, GL_RGB, GL_FLOAT, NULL);
}
if (mipmap)
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
Note that the type parameter is GL_FLOAT so it should properly accept HDR values. The HDR image is loaded using stb_image.h as follows:
if (stbi_is_hdr(path.c_str()))
{
stbi_set_flip_vertically_on_load(true);
int width, height, nrComponents;
float *data = stbi_loadf(path.c_str(), &width, &height, &nrComponents, 0);
if (data)
{
GLenum format;
if (nrComponents == 3)
format = GL_RGB;
else if (nrComponents == 4)
format = GL_RGBA;
Bind();
glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_FLOAT, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
if (Mipmapping)
glGenerateMipmap(GL_TEXTURE_2D);
Unbind();
stbi_image_free(data);
}
}
I also tried iterating over this array and retrieving the max float value to see if the HDR loaded correctly and the highest float value of my current HDR map is 288 which is far above 1.0 which I would expect.
Here's where things get tricky, based on the input texture (HDR float map) and the output cubemap (as a float) I'd expect that the cubemap faces get properly treated as floating point textures and directly copy the HDR values. However, the cubemap appears LDR as the moment I add tonemapping (w/ a variable exposure) I get quite a lot of banding and I'm clearly missing the precision of HDR as the following image shows (with an exposure of ~7.5)
I'm not sure whether there's something I'm missing and I couldn't find much on OpenGL's docs regarding rendering directly to floating point framebuffers; I assume this is possible as it wouldn't make sense if it wasn't.
For completeness' sake, here is the relevant code that generates the cubemap from the LatLong image (with renderCustomCommand rendering the cube with proper samplers set):
glGenFramebuffers(1, &m_FramebufferCubemap);
glGenRenderbuffers(1, &m_CubemapDepthRBO);
Camera faceCameras[6] = {
Camera(position, vec3( 1.0f, 0.0f, 0.0f), vec3(0.0f, -1.0f, 0.0f)),
Camera(position, vec3(-1.0f, 0.0f, 0.0f), vec3(0.0f, -1.0f, 0.0f)),
Camera(position, vec3( 0.0f, 1.0f, 0.0f), vec3(0.0f, 0.0f, 1.0f)),
Camera(position, vec3( 0.0f, -1.0f, 0.0f), vec3(0.0f, 0.0f,- 1.0f)),
Camera(position, vec3( 0.0f, 0.0f, 1.0f), vec3(0.0f, -1.0f, 0.0f)),
Camera(position, vec3( 0.0f, 0.0f, -1.0f), vec3(0.0f, -1.0f, 0.0f))
};
glBindFramebuffer(GL_FRAMEBUFFER, m_FramebufferCubemap);
glBindRenderbuffer(GL_RENDERBUFFER, m_CubemapDepthRBO);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, width, height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, m_CubemapDepthRBO);
glViewport(0, 0, width, height);
glBindFramebuffer(GL_FRAMEBUFFER, m_FramebufferCubemap);
for (unsigned int i = 0; i < 6; ++i)
{
Camera *camera = &faceCameras[i];
camera->SetPerspective(90.0f, width/height, 0.1f, 100.0f);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, cubeTarget->ID, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
for (unsigned int i = 0; i < renderCommands.size(); ++i)
{
renderCustomCommand(&renderCommands[i], camera);
}
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, m_RenderSize.x, m_RenderSize.y);
And here's the code for sampling the LatLong 2D image -> cube:
#version 330 core
out vec4 FragColor;
in vec3 wPos;
#include sample.glsl
uniform sampler2D environment;
void main()
{
vec2 uv = SampleLatLong(normalize(wPos));
vec3 color = texture(environment, uv).rgb;
FragColor = vec4(color, 1.0);
}
Note that the LatLong to Cubemap conversion goes well, as the 2D environment is properly rendered on a cubemap, but simply clamped to the [0,1] range the moment it's rendered as a skybox, as if somewhere along the process it lost its floating point data.
I've been stuck on this problem for a while now and was hoping any one of you could shed some insight (is it even possible to render directly to float cubemaps like this?). Thank you.
EDIT: Here is the same picture with a high exposure set from Photoshop, as you can see a lot of details emerge which I've lost in the renderer.
The third parameter of your glTexImage2D call needs to be GL_RGB16F or GL_RGB32F.
It specifies the internal format.
The two parameters GL_RGB and GL_FLOAT at the end are only used to specify memory layout of the optional data pointer. They do not influence the internal format.
Related
I am using OpenGL for the first time with GLFW, GLEW, and GLM. I've been more or less following a tutorial but also diverted in order to focus on the aspects I'm interested in at the moment.
Currently, I'm trying to draw pixels to a texture and then display that on the screen as a fullscreen quad. However, the texture is always displaying as black and I'm unsure where I went wrong.
Initializing the texture here (as well as the fullscreen quad):
int InitializeRenderTarget(GameManager* gameManager)
{
// Vertex Array Object
GLuint vertexArrayID;
glGenVertexArrays(1, &vertexArrayID);
glBindVertexArray(vertexArrayID);
programID = LoadShaders("Source/Graphics/SimpleVertexShader.vert", "Source/Graphics/RenderTextureFragmentShader.frag");
// The texture we're going to render to
glGenTextures(1, &renderedTexture);
glBindTexture(GL_TEXTURE_2D, renderedTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
gameManager->GRID_SIZE, gameManager->GRID_SIZE,
0, GL_RGB, GL_UNSIGNED_BYTE, 0);
glGenerateMipmap(GL_TEXTURE_2D);
// The fullscreen quad's FBO
static const GLfloat g_quad_vertex_buffer_data[] = {
-1.0f, -1.0f, 0.0f, //0.0f, 0.0f,
1.0f, -1.0f, 0.0f, //1.0f, 0.0f,
-1.0f, 1.0f, 0.0f, //0.0f, 1.0f,
-1.0f, 1.0f, 0.0f, //0.0f, 1.0f,
1.0f, -1.0f, 0.0f, //1.0f, 0.0f,
1.0f, 1.0f, 0.0f, //1.0f, 1.0f
};
glGenBuffers(1, &quad_vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, quad_vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(g_quad_vertex_buffer_data), g_quad_vertex_buffer_data, GL_STATIC_DRAW);
texID = glGetUniformLocation(programID, "renderedTexture");
glEnable(GL_TEXTURE_2D);
return 0;
}
Drawing onto the texture here:
void RenderToTexture(GameManager* gameManager)
{
glBindTexture(GL_TEXTURE_2D, renderedTexture);
float* particleColors = gameManager->getParticleColors();
glTexSubImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
gameManager->GRID_SIZE, gameManager->GRID_SIZE,
0, GL_RGB, GL_FLOAT, (GLvoid*)particleColors);
glGenerateMipmap(GL_TEXTURE_2D);
// Poor filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
Drawing to screen here:
void RenderToScreen()
{
// Render to the screen
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// Render on the whole framebuffer, complete from the lower left corner to the upper right
glViewport(100, 100, screenWidth-200, screenHeight-200);
// Clear the screen
glClearColor(0.0f, 0.0f, 0.4f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Use our shader
glUseProgram(programID);
// Bind our texture in Texture Unit 0
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, renderedTexture);
// Set our "renderedTexture" sampler to use Texture Unit 0
glUniform1i(texID, 0);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, quad_vertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the triangles !
glDrawArrays(GL_TRIANGLES, 0, 6); // 2*3 indices starting at 0 -> 2 triangles
glDisableVertexAttribArray(0);
// Swap buffers
glfwSwapBuffers(window);
glfwPollEvents();
}
The main loop:
int main(void)
{
if (InitializeWindow() != 0) {
fprintf(stderr, "Failed to open GLFW window.\n");
getchar();
glfwTerminate();
return -1;
}
GameManager* gameManager = gameManager->getInstance();
if (InitializeRenderTarget(gameManager) != 0) {
fprintf(stderr, "Failed to initialize render target.\n");
getchar();
glfwTerminate();
return -1;
}
do {
gameManager->Update();
RenderToTexture(gameManager);
RenderToScreen();
} // Check if the ESC key was pressed or the window was closed
while (glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS &&
glfwWindowShouldClose(window) == 0);
CleanUp();
return 0;
}
Vertex shader:
#version 460 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
// Output data ; will be interpolated for each fragment.
out vec2 UV;
void main(){
gl_Position = vec4(vertexPosition_modelspace, 1);
UV = (vertexPosition_modelspace.xy+vec2(1,1))/2.0;
}
and finally fragment shader:
#version 460 core
in vec2 UV;
out vec4 color;
uniform sampler2D renderedTexture;
void main(){
color = texture(renderedTexture, UV);
//color = vec4(UV.x, UV.y, 0, 1);
}
I'm fairly certain that I am drawing the quad to the screen correctly as I used the commented out line in the fragment shader to make a nice colorful gradient effect.
However, I might not be binding the texture correctly.
I confirmed that the particleColors array does contain float values correctly. I've tried setting all the reds to 1.0f, and all the values to 1.0f, as well as all values to 255.0f (just in case).
I tried adding lines like:
glEnable(GL_TEXTURE_2D);
glGenerateMipmap(GL_TEXTURE_2D);
// Poor filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
based on some other posts I saw but I have yet to see any visible effect.
I also tried changing how the vertex and fragment shader were calculating and using the UVs but that only made things worse as the gradient effect wouldn't even appear at that point.
Hopefully you can figure out what I've missed. Thank you for any help.
You're using glTexSubImage2D completely wrong. The 2nd and 3rd arguments are the offsets and the 4th and 5th arguments are the size:
glTexSubImage2D(GL_TEXTURE_2D, 0, GL_RGBA, gameManager->GRID_SIZE, gameManager->GRID_SIZE, 0, GL_RGB, GL_FLOAT, (GLvoid*)particleColors);
glTexSubImage2D(
GL_TEXTURE_2D, 0,
0, 0, gameManager->GRID_SIZE, gameManager->GRID_SIZE,
GL_RGB, GL_FLOAT, (GLvoid*)particleColors);
glEnable(GL_TEXTURE_2D); only has meaning when using the fixed function pipeline and no shader program.
I have figured it out. The correction by Rabbid76, was a big first step.
Afterwards I played some with my pixel array, and after making it smaller found that I did have a small line of pixels at the bottom of my texture. More poking around and I found I had actually filled my pixel data wrong. Fixing my logic in error in my loop filled the texture properly.
I have a texture in a game that clamps to the edge instead of linearly scaling up. These are the parameters I'm providing OpenGL with:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EGDE);
How should I change them in order to make the texture scale up?
These are the vertices I'm giving OpenGL:
typedef struct{
float positionX;
float positionY;
float textureX ;
float textureY ;
} textureVertex;
textureVertex vertices[4] = {
{-3.5f, 3.5f, -1.0f, 1.0f},
{-3.5f, -3.5f, -1.0f, -1.0f},
{ 3.5f, -3.5f, 1.0f, -1.0f},
{ 3.5f, 3.5f, 1.0f, 1.0f}
};
You should specify texture coordinates for your vertices. CLAMP just tells OpenGL what to do when the coordinates go outside the range 0 -> 1. It does not say where that range is.
I'm trying to add a texture to a simple square for more than 5h now but I still don't manage to do so.
Here's the code I'm using in paintGL():
glEnable(GL_TEXTURE_2D);
GLuint id;
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D, id);
float pixels[] = {
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f
};
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 2, 2, 0, GL_RGB, GL_FLOAT, pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex3f(0,0,0);
glTexCoord2f(1.0f, 0.0f); glVertex3f(1,0,0);
glTexCoord2f(1.0f, 1.0f); glVertex3f(1,1,0);
glTexCoord2f(0.0f, 1.0f); glVertex3f(0,1,0);
glEnd();
if (glGetError() != 0)
{
qDebug() << glGetError();
}
There is no errors nor OpenGL's ones. I initialized the clear color to grey.
When I try to change color with glColor3f(...), it works.
I read about all tutorials that I could find with Google but no one helped.
SOLUTION:
I never put
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
at the right place: just after the glTexImage2D! Code above is edited and now work like a charm.
You HAVE to specify filtering for your texture:
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
Because default filter uses mipmaps, but you don't generate them.
It looks to me like you're only using one quarter of your texture. It's only 4 pixels, and you've set the texture coordinates to be just the first pixel. If that pixel is white and your texture environment is set to multiply the quad's color by the texture, and you're set to use nearest neighbor sampling, then you'll get just the color. Try changing the texture coordinates to be (0,0) to (1,1) instead of (0,0) to (0.5,0.5) and see if that gives you the expected result. You can also try setting the various texture parameters and environments differently to see how that affects your drawing.
I have been trying to implement shadow mapping. Whilst I think that I am now close, I have come stuck with a strange effect (illustrated below):
As you can see, the shadow region appears too small. There is also an unusual effect on the cube itself.
The geometry being rendered is a cube of dimensions 1.0 on a square plane of dimensions 100.0. The scene contains a single spotlight with an angle (from one side to the other) of 0.5 radians and a range of 100.0. This spotlight orbits about the y-axis and adjusts its rotation to look at the origin.
I setup the framebuffer and depth texture (512 x 512) as follows:
// Create and configure the depth texture.
glGenTextures(1, &m_depthTexture);
glBindTexture(GL_TEXTURE_2D, m_depthTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
GLfloat border[] = { 1.0f, 1.0f, 1.0f, 1.0f };
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, border);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_NONE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32, width, height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, (void*)0);
// Assign the depth texture to texture channel 0.
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_depthTexture);
// Create and configure the framebuffer.
glGenFramebuffers(1, &m_framebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, m_framebuffer);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, m_depthTexture, 0);
GLenum drawBuffers[] = { GL_NONE };
glDrawBuffers(1, drawBuffers);
I then render the scene to the shadow map framebuffer from the perspective of the spotlight. This seems to be working. Inspecting the depth texture using an OpenGL debugging tool revealed the following:
The scene is rendered a second time, where I set the uniforms for the depth texture and shadow matrix:
glActiveTexture(GL_TEXTURE0 + 1);
glBindTexture(GL_TEXTURE_2D, shadowMap.depthTexture());
program->uniform("shadowMap", 1);
const M3D::Matrix4 lightViewMatrix = lightTransformComponent->transformationMatrix().inverse();
const float invTanHalfFov = 1.0f / std::tan(coneAngle * 0.5f);
const float nearClipPlane = 0.3f;
const float farClipPlane = lightLightComponent->range();
const float zRange = nearClipPlane - farClipPlane;
const Matrix4 lightProjectionMatrix(
invTanHalfFov, 0.0f, 0.0f, 0.0f,
0.0f, invTanHalfFov, 0.0f, 0.0f,
0.0f, 0.0f, -(nearClipPlane + farClipPlane) / zRange, 2.0f * nearClipPlane * farClipPlane / zRange,
0.0f, 0.0f, 1.0f, 0.0f
);
const Matrix4 shadowMatrix = lightProjectionMatrix * lightViewMatrix * modelMatrix;
program->uniform("shadowMatrix", shadowMatrix);
I compute the shadow coordinate in the vertex shader:
f_shadowCoordinate = shadowMatrix * vec4(v_position, 1.0f);
Then, in the fragment shader, I project this coordinate and bias it to range in the interval [0, 1].
vec2 projectedShadowCoordinates = (f_shadowCoordinate.xy / f_shadowCoordinate.w) * 0.5f + vec2(0.5f, 0.5f);
float shadowDistance = texture(shadowMap, projectedShadowCoordinates).x;
return vec4(1.0f) * shadowDistance;
The problem was caused by mistakenly setting the projection matrix uniform to the camera's projection matrix (instead of the light's projection matrix) when rendering to the shadow framebuffer.
I create the texture like this:
GLuint PingPongShader::GenerateTexture()
{
float* pixels = new float[width*height * 4];
for(int i = 0; i < width * height; i+=4)
{
pixels[i] = 1.0;
pixels[i+1] = 1.0;
pixels[i+2] = 1.0;
pixels[i+3] = 1.0;
}
GLuint t;
glGenTextures(1, &t);
glBindTexture(GL_TEXTURE_2D, t);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_FLOAT, pixels);
delete[] pixels;
LogGLError();
return t;
}
My display function looks like this:
void display()
{
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
shader.BindCurrentTexture();
shader.EnableShader();
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f);
glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f);
glVertex2f(1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f);
glVertex2f(1.0f, 1.0f);
glTexCoord2f(0.0f, 1.0f);
glVertex2f(-1.0f, 1.0f);
}
shader.DisableShader();
glDisable(GL_TEXTURE_2D);
glutSwapBuffers();
}
The shader looks like this:
Vertex:
varying vec2 texture_coordinate;
void main()
{
// Transforming The Vertex
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
// Passing The Texture Coordinate Of Texture Unit 0 To The Fragment Shader
texture_coordinate = vec2(gl_MultiTexCoord0);
}
Fragment:
varying vec2 texture_coordinate;
uniform sampler2D my_color_texture;
void main()
{
// Sampling The Texture And Passing It To The Frame Buffer
gl_FragColor = texture2D(my_color_texture, texture_coordinate);
}
Somehow I'm only getting a black screen, even though the pixels are all 1.0 (white). I don't see where the problem is :/
You are writing code using deprecated functions. In OpenGL 4.4 you can't use glBegin() and friends. Please use VBO for vertex and texture coordinate data. You can look it up in a modern OpenGL tutorial. You can also request older OpenGL context using glutInitContextVersion(). Make sure to use OpenGL 2.1 or enable complitability profile for versions > 3.0.