I am trying to create a normal map in opengl that I can load into the shader and change dynamically, though currently i am stuck at how to create the texture.
I currently have this:
glActiveTexture(GL_TEXTURE7);
glGenTextures(1, &normals);
glBindTexture(GL_TEXTURE_2D, normals);
texels = new Vector3f*[256];
for(int i = 0; i < 256; ++i){
texels[i] = new Vector3f[256];
}
this->setup_normals();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, 3, 256, 256, 0, GL_RGB, GL_FLOAT, texels);
...
void setup_normals(){
for(int i = 0; i < 256; ++i){
for(int j = 0; j < 256; ++j){
texels[i][j][0] = 0.0f;
texels[i][j][1] = 1.0f;
texels[i][j][2] = 0.0f;
}
}
}
where Vector3f is: typedef float Vector3f[3];
and texels is: Vector3f** texels;
When I draw this texture to a screenquad using an orthogonal matrix( which works for textures loaded in) I get .
I am unsure why it does not appear fully green and also what is causing the black streaks to appear within it. Any help appreciated.
Your array needs to be contiguous since glTexImage2D() doesn't take any sort of stride or row mapping parameters:
texels = new Vector3f[256*256];
Related
I'm trying to generate a cubemap array and save depth values etc. I have confirmed that it is generated correctly by glCheckFramebufferStatus, but when I actually render it, it does not store anything.
The simple code is shown below.
// Initialize
glGenTextures(1, &CubeMap);
glBindTexture(GL_TEXTURE_CUBE_MAP_ARRAY, CubeMap);
for (int j = 0; j < 6; j++) {
glTexImage3D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + j, 0, GL_RGBA16F, 512, 512, ArrayNum, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
}
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARRAY, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARRAY, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARRAY, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARRAY, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glGenRenderbuffers(1, &CubeMapRBO);
glBindRenderbuffer(GL_RENDERBUFFER, CubeMapRBO);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, 512, 512);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
// Render to CubeMap Array
for(int i = 0; i < ArrayNum; i++) {
for(int j = 0; j < 6; j++) {
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_POSITIVE_X + j, CubeMap, i);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, CubeMapRBO);
}
}
In the case of a 2D texture array, it seems to use glFramebufferTextureLayer instead of glFramebufferTexture2D, but when I applied this to a cubemap, an error was returned.
If anyone knows anything, please comment.
I create a terrain grid with 1024x1024 points, generate a normal map from that and try to use it in my vertex/fragment shader. Im getting strange results when i draw normals from normalmap compared to normals passed from the vertex shader. I try to improve quality by increase the texture resolution up to 8192, but doesnt remove strange aliasing effect.
The same happens when i get the normal from neighbouring pixels in a heightmap.
How can i fix this?
normalmap texture creation:
uint16_t channels = 3;
GLuint Texture = m_frameBuffers;
float* pData = new float[width * height * channels];
glGenTextures(1, &Texture);
glBindTexture(GL_TEXTURE_2D, Texture);
for (uint16_t y = 0; y < height; ++y)
{
for (uint16_t x = 0; x < width; ++x)
{
uint32_t index = y * width + x;
pData[index * channels + 0] = (normals[index].x) * 0.5 + 0.5;
pData[index * channels + 1] = (normals[index].y) * 0.5 + 0.5;
pData[index * channels + 2] = (normals[index].z) * 0.5 + 0.5;
}
}
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_FLOAT, pData);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_R, GL_REPEAT);
delete[] pData;
++m_frameBuffers;
return Texture;
fragment shader:
vec3 normal = texture(normalMap, VertexIn.texcoord).rgb;
normal = normalize(normal * 2.0 - 1.0);
normal = normalize(vec3(normalmatrix*vec4(normal,1.0)));
edit: i uploaded better pictures to compare. it shows same area of a 1024x1024 grid, first is from vertex normals, second uses normal map with RGB16F
I am generating a bitmap (1 byte per pixel) and attempting to use it for alpha blending. I am successfully using the bitmap, but it appears that the texture does not wrap lines as I expect.
When I use the following code, it wraps where I would expect, given the input image. I get the set of Xs that I expect.
std::ofstream file{ R"(FileName.txt)" };
file << "width: " << gs.width() << "\theight: " << gs.height() << "\n";
for (int i = 0; i < gs.height(); ++i)
{
for (int j = 0; j < gs.width(); ++j)
{
file << ((gs.alpha()[j + i * gs.width()]) ? 'X' : ' ');
}
file << "\n";
}
When I load the texture it appears that the width of the texture does not match gs.width(), since it wraps oddly.
This is the code that I use to create the texture and load it with the bitmap.
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, gs.width(), gs.height(), 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, gs.alpha());
Can anyone suggest what I might be doing wrong?
I'm trying to generate textures like so:
#define checkImageWidth 64
#define checkImageHeight 64
static GLubyte checkImage[checkImageHeight][checkImageWidth][4];
static GLubyte otherImage[checkImageHeight][checkImageWidth][4];
static GLuint texName[2];
void makeCheckImages(void)
{
int i, j, c;
for (i = 0; i < checkImageHeight; i++) {
for (j = 0; j < checkImageWidth; j++) {
c = ((((i&0x8)==0)^((j&0x8))==0))*255;
checkImage[i][j][0] = (GLubyte) c;
checkImage[i][j][1] = (GLubyte) c;
checkImage[i][j][2] = (GLubyte) c;
checkImage[i][j][3] = (GLubyte) 255;
c = ((((i&0x10)==0)^((j&0x10))==0))*255;
otherImage[i][j][0] = (GLubyte) c;
otherImage[i][j][1] = (GLubyte) 0;
otherImage[i][j][2] = (GLubyte) 0;
otherImage[i][j][3] = (GLubyte) 255;
}
}
}
void init(void)
{
glClearColor (1.0, 0.0, 0.0, 0.0);
glShadeModel(GL_FLAT);
glEnable(GL_DEPTH_TEST);
makeCheckImages();
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glGenTextures(2, texName);
glBindTexture(GL_TEXTURE_2D, texName[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, checkImageWidth,
checkImageHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE,
checkImage);
glBindTexture(GL_TEXTURE_2D, texName[1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, checkImageWidth,
checkImageHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE,
otherImage);
glEnable(GL_TEXTURE_2D);
engineGL.current.tex = texName[1];
}
But when I check the values of texName[0] and [1] they are both 0, I do not understand why, what am I doing wrong. Thanks.
You probably are calling glGenTextures before creating the OpenGL context, and that will generate a GL error. Don't try to create textures before you've initialized OpenGL.
I had this problem, and glGetError() was returning 0.
In my case it was caused by calling glGenTextures(...) on a different thread to the one the GL context was created on (because I was loading the textures asynchronously). Calling it from the main thread after the async load made glGenTextures(...) start working again.
Try calling glGetError. It should tell you in more detail what went wrong. In general, if an OpenGL function fails, the first thing you do should be to ask OpenGL why it failed. It knows, because it just tried to execute the function.
It's much harder for us to guess at what might have gone wrong.
In my case, I was "lazy loading" my texture, so the glGenTexture was inside a glBegin / glEnd command !
before calling glGenTexture, your opengl context must be created and XXXMakeCurrent'ed
According to ARB_geometry_shader4 it is possible to render a scene onto the 6 faces of a cube map with a geometry shader and the cube map attached to a framebuffer object. I want to create a shadow map using this way. However there seems to be a conflict that I can't resolve:
I can only attach a texture with GL_DEPTH_COMPONENT as internal type to the GL_DEPTH_ATTACHMENT_EXT.
A depth texture can only be 1D or 2D.
If I want to attach a cube map, all other attached textures must be cube maps as well.
So it looks like I can't use any depth testing when I want to render to a cube map. Or what exactly am I missing here?
EDIT: It looks like newer Nvidia drivers (180.48) support depth cube maps.
Ok, to answer some other questions here:
Of course it is possible to use 6 FBOs, one for each face. Or to use one FBO and attach each face before you draw to it. In both cases the cube map face will be treated like any other 2D texture and you can use it together with normal 2D textures or Renderbuffers. And there's probably not much of a difference in all the possible ways (if the hardware supports them).
However, it's also possible to draw everything in one step and as I was curious as to how this is done I did some research.
To create a FBO with all faces of a cube map attached to a single attachment point I used this code (written in D):
// depth cube map
glGenTextures(1, &tDepthCubeMap);
glBindTexture(GL_TEXTURE_CUBE_MAP, tDepthCubeMap);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
for (uint face = 0; face < 6; face++) {
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, 0, GL_DEPTH_COMPONENT24,
width, height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, null);
}
// color cube map
glGenTextures(1, &tColorCubeMap);
glBindTexture(GL_TEXTURE_CUBE_MAP, tColorCubeMap);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
for (uint face = 0; face < 6; face++) {
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, 0, GL_RGBA,
width, height, 0, GL_RGBA, GL_FLOAT, null);
}
// framebuffer object
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
glFramebufferTextureARB(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, tDepthCubeMap, 0);
glFramebufferTextureARB(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, tColorCubeMap, 0);
glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT);
if (!isValidFBO()) {
glDeleteFramebuffersEXT(1, &fbo);
fbo = 0;
}
If you want to have only a depth map you have to change glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT); to glDrawBuffer(GL_NONE); before validating it (and before drawing to it)
MIN and MAG filters must be set to something valid (default would be GL_NEAREST_MIPMAP_LINEAR)
width and height of all textures must be the same
To render to the faces of a cube map you need a geometry shader. The following shader misses some rotations but it should be clear what it does. gl_Layer is used to direct the primitive to the correct face (0 = +X, 1 = -X, ...).
#version 120
#extension GL_EXT_geometry_shader4 : enable
void main(void) {
int i, layer;
for (layer = 0; layer < 6; layer++) {
gl_Layer = layer;
for (i = 0; i < 3; i++) {
gl_Position = gl_PositionIn[i];
EmitVertex();
}
EndPrimitive();
}
}