OpenGL Texture corruption - opengl

i am rendering simple pixel buffer in OpenGL. First, i create a quad, then i create a texture. It works correctly if there is no changes in buffer. When i change my buffer and add new buffer into texture by glTexSubImage2D or glTexImage2D my texture's top section corrupts like image.
I create my buffer like this.
int length = console->width * console->height * 3;
GLubyte buf[length];
for(int i = 0; i < length; i += 3) {
buf[i] = 0;
buf[i + 1] = 0;
buf[i + 2] = 0;
}
console->buffer = buf;
I create texture like this.
glGenTextures(1, &console->textureID);
glBindTexture(GL_TEXTURE_2D, console->textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, console->width, console->height, 0, GL_RGB, GL_UNSIGNED_BYTE, console->buffer);
tpUseShader(console); // -> calls glUseShader(console->programID);
glUniform1i(glGetUniformLocation(console->programID, "texture"), 0);
I update texture like this.
glBindTexture(GL_TEXTURE_2D, console->textureID);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, console->width, console->height, GL_RGB, GL_UNSIGNED_BYTE, console->buffer);
For testing i change my buffer like this in render function
if(console->buffer[6] == 255) {
console->buffer[6] = 0; // 6 is second pixel's red value.
console->buffer[10] = 255; // 10 is third pixel's green value
} else {
console->buffer[6] = 255;
console->buffer[10] = 0;
}
Then i call tpUseShader and render my quad.
How can i fix this problem?
I changed my console size to 10x10 and run again this time i got same results but, in image you can see from bottom left 3rd pixel is dark blue. When i print printf("3rd pixel: %d- %d - %d\n", console->buffer[12], console->buffer[13], console->buffer[14]);. value i got red: 0 green: 0 blue: 0 values. That means my buffer is normal.

I got the solution. As pleluron said in comments of question. I changed buf in to console->buffer, and it worked!. Now my buffer initialization code is like this:
console->buffer = malloc(sizeof(GLubyte) * length);
for(int i = 0; i < length; i += 3) {
console->buffer[i] = 0;
console->buffer[i + 1] = 0;
console->buffer[i + 2] = 0;
}

Related

C++ OpenGL Texture not loading

void OGLRectangle::LoadTexture(const char* filename)
{
unsigned int texture;
int width, height;
BYTE * data;
FILE * file;
file = fopen(filename, "rb");
width = 1920;
height = 1080;
data = new BYTE[height * width * 3];
fread(data, width * height * 3, 1, file);
fclose(file);
glGenTextures(1.0, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
tex = texture;
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, 2, width, height,0, GL_RGB, GL_UNSIGNED_BYTE, data);
delete [] data;
}
I have this code to render in an image, the method is called with:
LoadTexture("C:\\Users\Rhys\Documents\Hills.bmp");
The file exists.
Then I'm trying to render it to the openGL window using;
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, tex);
glBegin(GL_QUADS);
glTexCoord2d(0.0, 0.0); glVertex2d(0.0, 0.0);
glTexCoord2d(1.0, 0.0); glVertex2d(100.0, 0.0);
glTexCoord2d(1.0, 1.0); glVertex2d(100.0, 100.0);
glTexCoord2d(0.0, 1.0); glVertex2d(0.0, 100.0);
glEnd();
glDisable(GL_TEXTURE_2D);
However, all I'm getting on screen is a darkish blue box, with no texture rendered in it.
I have searched for tutorials on how to do this, even asked my lecturer and I still cannot seem to find out why its not working.
Any help will be greatly appreciated.
The .bmp files loading must be little different
This code simply loads bmp file to memory m_pcbData without compression and indexed color support.
bool CBMPImage::LoadFromFile(const CString& FileName)
{
BITMAPINFOHEADER BitmapInfo;
ZeroMemory(&BitmapInfo, sizeof(BITMAPINFOHEADER));
BITMAPFILEHEADER BitmapFile;
ZeroMemory(&BitmapFile, sizeof(BITMAPFILEHEADER));
std::ifstream FileStream(FileName, std::ios::binary | std::ios::in);
if (!FileStream.good())
return false;
// Read bitmap file info
FileStream.read(reinterpret_cast<char*>(&BitmapFile), sizeof(BITMAPFILEHEADER));
// Read bitmap info
FileStream.read(reinterpret_cast<char*>(&BitmapInfo), sizeof(BITMAPINFOHEADER));
// Proper bitmap file supports only 1 plane
if (BitmapInfo.biPlanes != 1)
return false;
m_cbAlphaBits = 0;
m_cbRedBits = 0;
m_cbGreenBits = 0;
m_cbBlueBits = 0;
// Retrives bits per pixel info
m_cbBitsPerPel = (BMPbyte)BitmapInfo.biBitCount;
// Width and height of image
m_nWidth = BitmapInfo.biWidth;
m_nHeight = BitmapInfo.biHeight;
// Compute bitmap file size
m_nSize = 4 * ((m_nWidth * m_cbBitsPerPel + 31) / 32) * m_nHeight;
// Less important info
m_nPixelWidthPerMeter = BitmapInfo.biXPelsPerMeter;
m_nPixelHeightPerMeter = BitmapInfo.biYPelsPerMeter;
// Indexes info not important in our case
m_nClrCount = BitmapInfo.biClrUsed;
m_nClrImportant = BitmapInfo.biClrImportant;
// COMPRESSION MUST BE BI_RGB
m_Compression = (BMPCompression)BitmapInfo.biCompression;
delete [] m_pcbData;
m_pcbData = NULL;
// Allocate proper data size
m_pcbData = new BMPbyte[m_nSize];
// Read actual image data, considering offset of file header
FileStream.seekg(BitmapFile.bfOffBits);
FileStream.read(reinterpret_cast<char*>(m_pcbData), m_nSize);
FileStream.close();
return true;
}
than load bmp texture data to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, Image.GetWidth(), Image.GetHeight(), 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, (GLvoid*)Image.GetImageData());
GL_BGR_EXT is important because bmp stores image data in reverse byte order.
Secondly you must specify your material color as white because of usage that texture environment GL_TEXTURE_ENV_MODE, GL_MODULATE
And as mentioned #Reto Koradi, you must specify to generate mipmaps before texture image loading using one of these function calls.
glGenerateMipmap(GL_TEXTURE_2D);
or
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
Plus as you used not power of two textures (width = 1920;
height = 1080;) it may not work.
You're setting the attribute to sample with mipmaps:
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
You should only set that if your textures actually has mipmaps. To generate mipmaps, you can call:
glGenerateMipmap(GL_TEXTURE_2D);
after the glTexImage2D() call. Or you can simply set the sampler attribute to not use mipmaps:
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
As has already been pointed out: If your image file is indeed a BMP, and not just a raw image file, your image loading code will also need work.

OpenGL changing color of generated texture

I'm creating a sheet of characters and symbols from a font file, which works fine, except on the generated sheet all the pixels are black (with varying alpha). I would prefer them to be white so I can apply color multiplication and have different colored text. I realize that I can simply invert the color in the fragment shader, but I want to reuse the same shader for all my GUI elements.
I'm following this tutorial: http://en.wikibooks.org/wiki/OpenGL_Programming/Modern_OpenGL_Tutorial_Text_Rendering_02
Here's a snippet:
// Create map texture
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &map);
glBindTexture(GL_TEXTURE_2D, map);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, mapWidth, mapHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// Draw bitmaps onto map
for (uint i = start; i < end; i++) {
charInfo curChar = character[i];
if (FT_Load_Char(face, i, FT_LOAD_RENDER)) {
cout << "Loading character " << (char)i << " failed!" << endl;
continue;
}
glTexSubImage2D(GL_TEXTURE_2D, 0, curChar.mapX, 0, curChar.width, curChar.height, GL_ALPHA, GL_UNSIGNED_BYTE, glyph->bitmap.buffer);
}
The buffer of each glyph contains values of 0-255 for the alpha of the pixels. My question is, how do I generate white colored pixels instead of black? Is there a setting for this? (I've tried some blend modes but without success)
Since you create the texture with
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, mapWidth, mapHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
you can either change the GL_RGBA to GL_RED (or GL_LUMINANCE for pre-3.0 OpenGL) or you can create the RGBA buffer and copy the glyph data there.
I.e., you have
glyph->bitmap.buffer
then you do
unsigned char* glyphRGBA = new unsigned char[ curChar.width * curChar.height * 4];
for(int j = 0 ; j < curChar.height ; j++)
for(int i = 0 ; i < curChar.width ; i++)
{
int ofs = j * curChar.width + i;
for(int k = 0; k < 3 ; k++)
glyphRGBA[ofs + k] = YourTextColor[k];
// set alpha
glyphRGBA[ofs + 3] = glyph->bitmap.buffer[ofs];
}
In the code above YourTextColor is unsigned char[3] array with RGB components of the text color. The glyphRGBA array can be fed to glTexSubImage2D.

Can't load image into OpenGL texture if it is larger than 256x128

I am trying to load a jpeg image into a texture which I will use for volume rendering. However, the image fails to load (only white rectangle shown) whenever I try to load a jpeg larger than 256x128 pixels.
I am using OpenCV to convert the jpeg into raw values. This sounds like overkill, but I had OpenCV already. I am open to using another library
My code may seem strange, but it is because I am using luminance values, but also using an alpha value. As a result, I am taking the luminance value and using it across the RGBA channels for now.
This code worked when I used raw luminance data. But now, I am just trying to load a single jpeg image. (It works when my image is 256x128, but fails if it is bigger)
My texture loading code:
unsigned char* chRGBABuffer = new unsigned char[IMAGEWIDTH * IMAGEHEIGHT * IMAGECOUNT * 4];
//Only create 1 3D texture now
glGenTextures(1, (GLuint*)&textureID3D);
// Set the properties of the texture.
glBindTexture(GL_TEXTURE_3D, textureID3D);
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// Convert the data to RGBA data.
// Here we are simply putting the same value to R, G, B and A channels.
// This can be changed depending on the source data
// Usually for raw data, the alpha value will
// be constructed by a threshold value given by the user
for (int i = 0; i < IMAGECOUNT; ++i)
{
cv::Mat image;
image = cv::imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
unsigned char * chBuffer = image.data;
if (!image.data) // Check for invalid input
{
fprintf(stderr, "Could not open or find image\n");
return -1;
}
for (int nIndx = 0; nIndx < IMAGEWIDTH * IMAGEHEIGHT; ++nIndx)
{
chRGBABuffer[nIndx * 4] = chBuffer[nIndx];
chRGBABuffer[nIndx * 4 + 1] = chBuffer[nIndx];
chRGBABuffer[nIndx * 4 + 2] = chBuffer[nIndx];
chRGBABuffer[nIndx * 4 + 3] = chBuffer[nIndx];
}
}
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGBA, IMAGEWIDTH, IMAGEHEIGHT, IMAGECOUNT, 0,
GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid *)chRGBABuffer);
glBindTexture(GL_TEXTURE_3D, 0);
Probably you're simply running out of texture memory. Volumetric images are memory hogs and only few OpenGL implementations are capable of swapping in portions of a 3D texture on demand (at a significant performance drop penality).

texture formats in glTeximage3D

I am using teximage3D with gl_texture_3D and gl_texture_2D_array as a targets.
I am creating 4 layers of colors and applying that on a sphere. So i am expecting that it will apply 4 layers on sphere equally.
But for GL_TEXTURE_3D, it repeats all the layers 2 times. whereas for gl_texture_2D_array it applies those layers only once as per expected.
int w = 4, h = 4, d = 4;
size_t size = w * h * d;
*format=GL_RGBA;
GLubyte *dataRGBA=new GLubyte[4*size];
for (int i=0; i<size/4; i++)
{
dataRGBA[4*i]=200;
dataRGBA[4*i+1]=0;
dataRGBA[4*i+2]=0;
dataRGBA[4*i+3]=255;
}
for (int i=size/4; i<size/2; i++)
{
dataRGBA[4*i]=0;
dataRGBA[4*i+1]=255;
dataRGBA[4*i+2]=0;
dataRGBA[4*i+3]=255;
}
for ( int i=size/2; i<(3*size)/4; i++)
{
dataRGBA[4*i]=0;
dataRGBA[4*i+1]=0;
dataRGBA[4*i+2]=255;
dataRGBA[4*i+3]=255;
}
for ( int i=(3*size)/4; i<size; i++)
{
dataRGBA[4*i]=255;
dataRGBA[4*i+1]=0;
dataRGBA[4*i+2]=255;
dataRGBA[4*i+3]=255;
}
glGenTextures(1,&id);
glBindTexture(*target11, id);
glTexParameteri(*target11, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// when this texture needs to be magnified to fit on a big polygon, use linear interpolation of the texels to determine the color
glTexParameteri(*target11, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// we want the texture to repeat over the S axis, so if we specify coordinates out of range we still get textured.
glTexParameteri(*target11, GL_TEXTURE_WRAP_S, GL_REPEAT);
// same as above for T axis
glTexParameteri(*target11, GL_TEXTURE_WRAP_T, GL_REPEAT);
// same as above for R axis
glTexParameteri(*target11, GL_TEXTURE_WRAP_R, GL_REPEAT);
glTexImage3D(*target11, 0, *format, w, h, d, 0, GL_RGBA, GL_UNSIGNED_BYTE, dataRGBA);
I believe that the issue is that a 2d texture array uses integers as texture indices, while a 3d texture scales the R axis 0-1 like the S and T axes.

Odd results using glTexImage2D

I've been trying to figure out how glTexImage2D works and an seeing some odd results from some pretty clear-cut code. My code simply draws a rough circle into a 256*256 length unsigned array and then sends that data out to become a texture. The texture displayed however is turning out as variations of red and orange no matter what combinations I select inside my image creation loop:
unsigned* data = new unsigned[256*256];
for (int y = 0; y < 256; ++y)
for (int x = 0; x < 256; ++x)
if ((x - 100)*(x - 100) + (y - 156)*(y - 156) < 75*75)
data[256*y + x] = ((156 << 24) | (256 << 16) | (156 << 8) | (200 << 0));
else
data[256*y + x] = 0; // I'd expect this to be transparent and the above to be slightly transparent and green, but it's red somehow.
glBindTexture(GL_TEXTURE_2D, texid);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 256, 256, 0, GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid*)data);
OpenGL options:
glEnable(GL_TEXTURE_2D);
glShadeModel(GL_SMOOTH);
glClearColor(0.0f, 0.0f, 0.0f, 0.5f);
glClearDepth(1.0f);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
//glBlendFunc(GL_SRC_ALPHA, GL_ONE);
//glEnable(GL_BLEND);
//glDisable(GL_CULL_FACE);
glGenTextures(1, &leaf[0]);
createLeaf(leaf[0]); // createLeaf(GLuint& texid) is posted entirely above
The rest of the code does nothing but display the texture on a single quad in a window. (x64 win7)
Edit: I tried Rickard's solution exactly and I'm still getting a purple circle.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 256, 256, 0, GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid*)data);
First the positive things. You use a sized internal format (GL_RGBA8, rather than GL_RGBA). This is very good; keep doing that. You have a clear understanding of the difference between the internal format (GL_RGBA8) and the pixel transfer format (GL_RGBA). This is also good.
The problem is this. You told OpenGL that your data was a stream of unsigned bytes. But it's not a stream of unsigned bytes; it's a stream of unsigned integers. That's how you declared data, that's how you filled data. So why are you lying to OpenGL?
The problem is with your colors. This is one of your color values:
((156 << 24) | (256 << 16) | (156 << 8) | (200 << 0))
First, 256 is not a valid color. 256 in hex is 0x100, which is two bytes, not one.
The unsigned integer you would get from this is:
0x9D009CC8
If these are intended to be RGBA colors in that order, then the red is 0x9D, green is 0x00, blue is 0x9C, and alpha is 0xC8.
Now, because you're probably working on a little-endian computer, that 4 bytes is stored flipped, like this:
0xC89C009D
When you tell OpenGL to pretend that this is a byte array (which it is not), you are losing the little-endian conversion. So OpenGL sees the byte array starting with 0xC8, so that is the red value. And so on.
You need to tell OpenGL what you're actually doing: you're storing four 8-bit unsigned values in a single unsigned 32-bit integer. To do this, use the following:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 256, 256, 0, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, (GLvoid*)data);
The GL_UNSIGNED_INT_8_8_8_8 says that you're feeding OpenGL an array of unsigned 32-bit integers (which you are). The first 8-bits of the 32-bit integer is red, the second is green, the third is blue, and the fourth is alpha.
So, to completely fix your code, you need this:
GLuint* data = new GLuint[256*256]; //Use OpenGL's types
for (int y = 0; y < 256; ++y)
for (int x = 0; x < 256; ++x)
if ((x - 100)*(x - 100) + (y - 156)*(y - 156) < 75*75)
data[256*y + x] = ((0x9C << 24) | (0xFF << 16) | (0x9C << 8) | (0xC8 << 0));
else
data[256*y + x] = 0; // I'd expect this to be transparent and the above to be slightly transparent and green, but it's red somehow.
glBindTexture(GL_TEXTURE_2D, texid);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0); //Always set the base and max mipmap levels of a texture.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 256, 256, 0, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, (GLvoid*)data);
// I'd expect this to be transparent and the above to be slightly transparent and green, but it's red somehow.
Alpha doesn't mean transparent; it means nothing at all unless you give it a meaning. Alpha only represents transparency if you use blending and set up a blending mode that causes a low alpha to make things transparent. Otherwise, it means nothing at all.
If I where to do the same thing as you I would use a array of unsigned chars instead of unsigned int with 4 times the length.
unsigned char* data = new unsigned char[256*256*4];
for (int y = 0; y < 256; ++y)
for (int x = 0; x < 256; ++x)
if ((x - 100)*(x - 100) + (y - 156)*(y - 156) < 75*75){
data[(256*y + x)*4+0] = 156;
data[(256*y + x)*4+1] = 256;
data[(256*y + x)*4+2] = 156;
data[(256*y + x)*4+3] = 200;
}else{
data[(256*y + x)*4+0] = 0;
data[(256*y + x)*4+1] = 0;
data[(256*y + x)*4+2] = 0;
data[(256*y + x)*4+3] = 0;
}
glBindTexture(GL_TEXTURE_2D, texid);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 256, 256, 0, GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid*)data);
But your code looks right to me and I not sure if the code above will change anything. If its the same result, then try change GL_RGBA8 to just GL_RGBA. And what is the varible type of texid. I alwas call glBindTexture with a GLuint by refrense (&texid) but if your texid is a pointer to a GLuint (GLuint *texid;) then I guess that part is ok. (Edit: Just realize that im wrong on the last part, i was thinking about glGenTexture and not glBindTexture)