I have a texture in a game that clamps to the edge instead of linearly scaling up. These are the parameters I'm providing OpenGL with:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EGDE);
How should I change them in order to make the texture scale up?
These are the vertices I'm giving OpenGL:
typedef struct{
float positionX;
float positionY;
float textureX ;
float textureY ;
} textureVertex;
textureVertex vertices[4] = {
{-3.5f, 3.5f, -1.0f, 1.0f},
{-3.5f, -3.5f, -1.0f, -1.0f},
{ 3.5f, -3.5f, 1.0f, -1.0f},
{ 3.5f, 3.5f, 1.0f, 1.0f}
};
You should specify texture coordinates for your vertices. CLAMP just tells OpenGL what to do when the coordinates go outside the range 0 -> 1. It does not say where that range is.
Related
I have an HDR radiance environment map as a LatLong 2D texture image that I want to convert to a cubemap. I do this by loading the HDR map as a 2D float texture, project it onto a cube and then render the scene inside this cube from 6 different directions, directly filling a cubemap with glFramebufferTexture2D with the respective cubemap faces as the function's texture target.
The generated cubemap is a floating point cubemap generated as follows:
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
for (unsigned int i = 0; i < 6; ++i)
{
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGB, width, height, 0, GL_RGB, GL_FLOAT, NULL);
}
if (mipmap)
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
Note that the type parameter is GL_FLOAT so it should properly accept HDR values. The HDR image is loaded using stb_image.h as follows:
if (stbi_is_hdr(path.c_str()))
{
stbi_set_flip_vertically_on_load(true);
int width, height, nrComponents;
float *data = stbi_loadf(path.c_str(), &width, &height, &nrComponents, 0);
if (data)
{
GLenum format;
if (nrComponents == 3)
format = GL_RGB;
else if (nrComponents == 4)
format = GL_RGBA;
Bind();
glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_FLOAT, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
if (Mipmapping)
glGenerateMipmap(GL_TEXTURE_2D);
Unbind();
stbi_image_free(data);
}
}
I also tried iterating over this array and retrieving the max float value to see if the HDR loaded correctly and the highest float value of my current HDR map is 288 which is far above 1.0 which I would expect.
Here's where things get tricky, based on the input texture (HDR float map) and the output cubemap (as a float) I'd expect that the cubemap faces get properly treated as floating point textures and directly copy the HDR values. However, the cubemap appears LDR as the moment I add tonemapping (w/ a variable exposure) I get quite a lot of banding and I'm clearly missing the precision of HDR as the following image shows (with an exposure of ~7.5)
I'm not sure whether there's something I'm missing and I couldn't find much on OpenGL's docs regarding rendering directly to floating point framebuffers; I assume this is possible as it wouldn't make sense if it wasn't.
For completeness' sake, here is the relevant code that generates the cubemap from the LatLong image (with renderCustomCommand rendering the cube with proper samplers set):
glGenFramebuffers(1, &m_FramebufferCubemap);
glGenRenderbuffers(1, &m_CubemapDepthRBO);
Camera faceCameras[6] = {
Camera(position, vec3( 1.0f, 0.0f, 0.0f), vec3(0.0f, -1.0f, 0.0f)),
Camera(position, vec3(-1.0f, 0.0f, 0.0f), vec3(0.0f, -1.0f, 0.0f)),
Camera(position, vec3( 0.0f, 1.0f, 0.0f), vec3(0.0f, 0.0f, 1.0f)),
Camera(position, vec3( 0.0f, -1.0f, 0.0f), vec3(0.0f, 0.0f,- 1.0f)),
Camera(position, vec3( 0.0f, 0.0f, 1.0f), vec3(0.0f, -1.0f, 0.0f)),
Camera(position, vec3( 0.0f, 0.0f, -1.0f), vec3(0.0f, -1.0f, 0.0f))
};
glBindFramebuffer(GL_FRAMEBUFFER, m_FramebufferCubemap);
glBindRenderbuffer(GL_RENDERBUFFER, m_CubemapDepthRBO);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, width, height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, m_CubemapDepthRBO);
glViewport(0, 0, width, height);
glBindFramebuffer(GL_FRAMEBUFFER, m_FramebufferCubemap);
for (unsigned int i = 0; i < 6; ++i)
{
Camera *camera = &faceCameras[i];
camera->SetPerspective(90.0f, width/height, 0.1f, 100.0f);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, cubeTarget->ID, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
for (unsigned int i = 0; i < renderCommands.size(); ++i)
{
renderCustomCommand(&renderCommands[i], camera);
}
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, m_RenderSize.x, m_RenderSize.y);
And here's the code for sampling the LatLong 2D image -> cube:
#version 330 core
out vec4 FragColor;
in vec3 wPos;
#include sample.glsl
uniform sampler2D environment;
void main()
{
vec2 uv = SampleLatLong(normalize(wPos));
vec3 color = texture(environment, uv).rgb;
FragColor = vec4(color, 1.0);
}
Note that the LatLong to Cubemap conversion goes well, as the 2D environment is properly rendered on a cubemap, but simply clamped to the [0,1] range the moment it's rendered as a skybox, as if somewhere along the process it lost its floating point data.
I've been stuck on this problem for a while now and was hoping any one of you could shed some insight (is it even possible to render directly to float cubemaps like this?). Thank you.
EDIT: Here is the same picture with a high exposure set from Photoshop, as you can see a lot of details emerge which I've lost in the renderer.
The third parameter of your glTexImage2D call needs to be GL_RGB16F or GL_RGB32F.
It specifies the internal format.
The two parameters GL_RGB and GL_FLOAT at the end are only used to specify memory layout of the optional data pointer. They do not influence the internal format.
I'm trying to add a texture to a simple square for more than 5h now but I still don't manage to do so.
Here's the code I'm using in paintGL():
glEnable(GL_TEXTURE_2D);
GLuint id;
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D, id);
float pixels[] = {
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f
};
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 2, 2, 0, GL_RGB, GL_FLOAT, pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex3f(0,0,0);
glTexCoord2f(1.0f, 0.0f); glVertex3f(1,0,0);
glTexCoord2f(1.0f, 1.0f); glVertex3f(1,1,0);
glTexCoord2f(0.0f, 1.0f); glVertex3f(0,1,0);
glEnd();
if (glGetError() != 0)
{
qDebug() << glGetError();
}
There is no errors nor OpenGL's ones. I initialized the clear color to grey.
When I try to change color with glColor3f(...), it works.
I read about all tutorials that I could find with Google but no one helped.
SOLUTION:
I never put
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
at the right place: just after the glTexImage2D! Code above is edited and now work like a charm.
You HAVE to specify filtering for your texture:
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
Because default filter uses mipmaps, but you don't generate them.
It looks to me like you're only using one quarter of your texture. It's only 4 pixels, and you've set the texture coordinates to be just the first pixel. If that pixel is white and your texture environment is set to multiply the quad's color by the texture, and you're set to use nearest neighbor sampling, then you'll get just the color. Try changing the texture coordinates to be (0,0) to (1,1) instead of (0,0) to (0.5,0.5) and see if that gives you the expected result. You can also try setting the various texture parameters and environments differently to see how that affects your drawing.
Here is what I have so far :
Rectangle::Rectangle(int x, int y)
{
sizeX_ = x * CASE;
sizeY_ = y * CASE;
texture_ = gdl::Image::load("./ressources/floor.jpg");
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
}
void Rectangle::draw()
{
texture_.bind();
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex3f(0.0f, 0.0f, 0.0f);
glTexCoord2f(sizeX_ / CASE, 0.0f);
glVertex3f(sizeX_, 0.0f, 0.0f);
glTexCoord2f(0.0f, sizeY_ / CASE);
glVertex3f(sizeX_, 0.0f, sizeY_);
glTexCoord2f(sizeX_ / CASE, sizeY_ / CASE);
glVertex3f(0.0f, 0.0f, sizeY_);
glEnd();
}
The constructor takes the size of the map in cases. For example : (10, 10). and the real size will be (10 * CASE), where CASE = 400.
But this is not repeating correctly the texture. The texture seems to be reduced (good point) and packed from the up left point to the bottom right point.
Am I doing something wrong ?
glTexParameter works on the currently bound texture. It doesn't globally set parameters. This means that you need to bind a texture before calling this function:
glBindTexture(GL_TEXTURE_2D, ...);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
I'm pretty new at ES 2.0, I'm trying to draw a single texture in openGL ES 2.0.
Here is my code:
data : a pointer to the texture (it is working, I already could test it with ES 1.0)
w : texture width
h : texture height
texture is 256x256 pixels
glGetError does return GL_NO_ERROR on every lineā¦
Hope someone can help ! All I can see is a black screen !
Thanks,
Wise
{
{
enum {
ATTRIB_VERTEX=0,
ATTRIB_TEXTURE_POSITION,
NUM_ATTRIB
};
glClearColor (0.0f, 0.0f, 0.0f, 0.0f);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
const GLfloat vertices[] = { -0.5f, -0.5f, 0.5f, -0.5f, -0.5f, 0.5f, 0.5f, 0.5f };
const GLfloat texture_coord[] = { 0.0f, 1.0f,0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f };
GLuint texName;
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texName);
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, vertices);
glEnableVertexAttribArray(ATTRIB_TEXTURE_POSITION);
glVertexAttribPointer(ATTRIB_TEXTURE_POSITION, 2, GL_FLOAT, 0, 0, texture_coord);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D (GL_TEXTURE_2D, 0, GL_RGBA,w,h,
0, GL_RGBA,
GL_UNSIGNED_BYTE, data);
glDrawArrays (GL_TRIANGLES, 0, 4);
}
}
Is That full source ?? If so , you have to do these but you must do setting shader before these .
Setting hadles
ATTRIB_VERTEX = glGetAttribLocation(m_paintProgram, "position");
ATTRIB_TEXTURE_POSITION = glGetAttribLocation(m_paintProgram, "inputTextureCoordinate");
maintextureHandle= glGetUniformLocation(m_EffectProgram[displayMode], "texture0");
giving the value through handles
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texName);
glUniform1i(maintextureHandle, 0); // add
And also you don't have to call this code every time , just call it once
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texName);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D (GL_TEXTURE_2D, 0, GL_RGBA,w,h,0, GL_RGBA,GL_UNSIGNED_BYTE, data);
Actually , your code doesn't have so many required codes such as Shader load, Shader compile, glUseProgram() , glBindFramebuffer() , glviewport(), glBindRenderbuffer things like that. but I can't teach all of them so
I want you to refer to this link and learn more information of opengl es 2.0.
http://examples.oreilly.com/9780596804831/readme.html#WireframeSkeleton
How can I convert a .png image to an OpenGL surface, with SDL? what I have now:
typedef GLuint texture;
texture load_texture(std::string fname){
SDL_Surface *tex_surf = IMG_Load(fname.c_str());
if(!tex_surf){
return 0;
}
texture ret;
glGenTextures(1, &ret);
glBindTexture(GL_TEXTURE_2D, ret);
glTexImage2D(GL_TEXTURE_2D, 0, 3, tex_surf->w, tex_surf->h, 0, GL_RGB, GL_UNSIGNED_BYTE, tex_surf->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
SDL_FreeSurface(tex_surf);
return ret;
}
and my code to draw the thing:
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, tex);
//Use blurry texture mapping (replace GL_LINEAR with GL_NEAREST for blocky)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glColor4f( 1.0, 1.0, 1.0, 1.0 ); //Don't use special coloring
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex3f(0.0f, 0.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f);
glVertex3f(128.0f, 0.0f, 0.0f);
glTexCoord2f(1.0f, 1.0f);
glVertex3f(128.0f, 128.0f, 0.0f);
glTexCoord2f(0.0f, 1.0f);
glVertex3f(0.0f, 128.0f, 0.0f);
glEnd();
glDisable(GL_TEXTURE_2D);
The problem is that it only works with .bmp files, and they turn bluish, so what is wrong?
Also, when I try to load a .png, it shows up really weird.
Wrong colors can be caused by getting the channel order wrong. The code I have lying around for loading .bmp's uses GL_BGR instead of GL_RGB so I think that will solve your problem with bmp's.
The problem with your png image is more likely caused by the png being 32-bits per pixel. Probably the best solution for you is to inspect the format field of the SDL surface to determine to appropriate flags/values to pass to glTexImage2D.