OpenGL changing color of generated texture - c++

I'm creating a sheet of characters and symbols from a font file, which works fine, except on the generated sheet all the pixels are black (with varying alpha). I would prefer them to be white so I can apply color multiplication and have different colored text. I realize that I can simply invert the color in the fragment shader, but I want to reuse the same shader for all my GUI elements.
I'm following this tutorial: http://en.wikibooks.org/wiki/OpenGL_Programming/Modern_OpenGL_Tutorial_Text_Rendering_02
Here's a snippet:
// Create map texture
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &map);
glBindTexture(GL_TEXTURE_2D, map);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, mapWidth, mapHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// Draw bitmaps onto map
for (uint i = start; i < end; i++) {
charInfo curChar = character[i];
if (FT_Load_Char(face, i, FT_LOAD_RENDER)) {
cout << "Loading character " << (char)i << " failed!" << endl;
continue;
}
glTexSubImage2D(GL_TEXTURE_2D, 0, curChar.mapX, 0, curChar.width, curChar.height, GL_ALPHA, GL_UNSIGNED_BYTE, glyph->bitmap.buffer);
}
The buffer of each glyph contains values of 0-255 for the alpha of the pixels. My question is, how do I generate white colored pixels instead of black? Is there a setting for this? (I've tried some blend modes but without success)

Since you create the texture with
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, mapWidth, mapHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
you can either change the GL_RGBA to GL_RED (or GL_LUMINANCE for pre-3.0 OpenGL) or you can create the RGBA buffer and copy the glyph data there.
I.e., you have
glyph->bitmap.buffer
then you do
unsigned char* glyphRGBA = new unsigned char[ curChar.width * curChar.height * 4];
for(int j = 0 ; j < curChar.height ; j++)
for(int i = 0 ; i < curChar.width ; i++)
{
int ofs = j * curChar.width + i;
for(int k = 0; k < 3 ; k++)
glyphRGBA[ofs + k] = YourTextColor[k];
// set alpha
glyphRGBA[ofs + 3] = glyph->bitmap.buffer[ofs];
}
In the code above YourTextColor is unsigned char[3] array with RGB components of the text color. The glyphRGBA array can be fed to glTexSubImage2D.

Related

Copy vector<GLubyte> buffer to framebuffer

I am trying to avoid using glDrawPixels in my code, so I'm looking for an alternative.
Below is the code I'm using to read the framebuffer contents into a vector<GLubyte>. Now I need code to transfer the vector contents back to the framebuffer. I've tried a dozen different attempts, but no luck.
glReadBuffer(GL_COLOR_ATTACHMENT0);
GLuint copy_tex = 0;
glGenTextures(1, &copy_tex);
vector<GLubyte> tex_buf(4 * win_x * win_y, 0);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, copy_tex);
glCopyTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0, win_x, win_y, 0);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_BYTE, &tex_buf[0]);
for (size_t i = 0; i < win_x; i++)
{
for (size_t j = 0; j < win_y; j++)
{
size_t index = 4 * (i*win_y + j);
tex_buf[index + 0] = 255;
tex_buf[index + 1] = 127;
tex_buf[index + 2] = 0;
}
}
glDrawPixels(win_x, win_y, GL_RGBA, GL_UNSIGNED_BYTE, &tex_buf[0]);
glDeleteTextures(1, &copy_tex);
glGetTexImage returns a texture image. If you want to write the image back to the texture then you can use glTexSubImage2D:
glDrawPixels(GL_TEXTURE_2D, 0, 0, 0, win_x, win_y, GL_RGBA, GL_UNSIGNED_BYTE, &tex_buf[0]);
If you want to copy a block of pixels from one framebuffer object to another then you can use glBlitFramebuffer. For instance copy from a named framebuffer to the default framebuffer:
glBindFramebuffer(GL_READ_FRAMEBUFFER, my_fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(0, 0, win_x, win_y, 0, 0, window_h, window_w, GL_COLOR_BUFFER_BIT, GL_LINEAR);
window_h, window_w can be different to win_x, win_y and is the size of the default framebuffer.

OpenGL Texture corruption

i am rendering simple pixel buffer in OpenGL. First, i create a quad, then i create a texture. It works correctly if there is no changes in buffer. When i change my buffer and add new buffer into texture by glTexSubImage2D or glTexImage2D my texture's top section corrupts like image.
I create my buffer like this.
int length = console->width * console->height * 3;
GLubyte buf[length];
for(int i = 0; i < length; i += 3) {
buf[i] = 0;
buf[i + 1] = 0;
buf[i + 2] = 0;
}
console->buffer = buf;
I create texture like this.
glGenTextures(1, &console->textureID);
glBindTexture(GL_TEXTURE_2D, console->textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, console->width, console->height, 0, GL_RGB, GL_UNSIGNED_BYTE, console->buffer);
tpUseShader(console); // -> calls glUseShader(console->programID);
glUniform1i(glGetUniformLocation(console->programID, "texture"), 0);
I update texture like this.
glBindTexture(GL_TEXTURE_2D, console->textureID);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, console->width, console->height, GL_RGB, GL_UNSIGNED_BYTE, console->buffer);
For testing i change my buffer like this in render function
if(console->buffer[6] == 255) {
console->buffer[6] = 0; // 6 is second pixel's red value.
console->buffer[10] = 255; // 10 is third pixel's green value
} else {
console->buffer[6] = 255;
console->buffer[10] = 0;
}
Then i call tpUseShader and render my quad.
How can i fix this problem?
I changed my console size to 10x10 and run again this time i got same results but, in image you can see from bottom left 3rd pixel is dark blue. When i print printf("3rd pixel: %d- %d - %d\n", console->buffer[12], console->buffer[13], console->buffer[14]);. value i got red: 0 green: 0 blue: 0 values. That means my buffer is normal.
I got the solution. As pleluron said in comments of question. I changed buf in to console->buffer, and it worked!. Now my buffer initialization code is like this:
console->buffer = malloc(sizeof(GLubyte) * length);
for(int i = 0; i < length; i += 3) {
console->buffer[i] = 0;
console->buffer[i + 1] = 0;
console->buffer[i + 2] = 0;
}

Loading Images (using their RGB(A) pixel data) in openGL textures

In the main function:
img = cvLoadImage("test.JPG");
//openCV functions to load and create matrix
CvMat *mat = cvCreateMat(img->height,img->width,CV_8SC3 );
cvGetMat( img, mat,0,1);
//creating the 3-dimensional array during runtime
data = new float**[img->height];
for(int i=0;i<img->height;i++){
data[i] = new float*[img->width];
}
for(int i=0;i<img->height;i++){
for(int j=0;j<img->width;j++)
data[i][j] = new float[3];
}
//setting RGB values
for(int i=0;i<img->height;i++)
{
for(int j=0;j<img->width;j++)
{
CvScalar scal = cvGet2D( mat,i,j);
data[i][j][0] = scal.val[0];
data[i][j][1] = scal.val[1];
data[i][j][2] = scal.val[2];
}
}
I am using openCV to get the image pixel data, storing it in the dynamically created matrix "data".Now generating textures and binding them:
glGenTextures(1,&texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_MODULATE);
glTexImage2D(GL_TEXTURE_2D, 0 ,GL_RGB, img->width,img->height,0,GL_RGB,GL_FLOAT,data);
glBindTexture(GL_TEXTURE_2D, texName);
glBegin(GL_QUADS);
glTexCoord2f(0,0);
glVertex3f(-1,-1,0);
glTexCoord2f(0,1);
glVertex3f(-1,1,0);
glTexCoord2f(1,1);
glVertex3f(1,1,0);
glTexCoord2f(1,0);
glVertex3f(1,-1,0);
glEnd();
There are no compilation errors but during the runtime, the window displays the square I made, but not the image that I tried to convert into texture.
How do I achieve loading any image into texture using the pixel data that I extract using openCV. I have printed the RGB values and they seem legit enough and the number of triplets printed are as expected.
glBindTexture should be called before the glTexParemeteri...glTexImage2D calls, so openGL knows which texture you're setting up.
glEnable(GL_TEXTURE_2D);
glGenTextures(1,&texName);
glBindTexture(GL_TEXTURE_2D, texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_MODULATE);
glTexImage2D(GL_TEXTURE_2D, 0 ,GL_RGB, img->width,img->height,0,GL_RGB,GL_FLOAT,data);
More importantly, you are not setting up your data variable correctly:
float* data = new float[img->height * img->width * 3];
for (int i = 0; i < img->height; i++)
{
for (int j = 0; j < img->width; j++)
{
CvScalar scal = cvGet2D(mat, i, j);
data[(i * img->width + j) + 0] = scal.val[0];
data[(i * img->width + j) + 1] = scal.val[1];
data[(i * img->width + j) + 2] = scal.val[2];
}
}
Also, you might need to swap the order of color components and/or convert them to 0..1 range, I don't know how openCV loads images.

c++ tga parsing incorrect_color/distortion with some resolutions

i'd like to get some help on my issue with .tga file format parsing. i have the code, which i use for a long time:
int fileLength = Input.tellg();
vector<char> tempData;
tempData.resize(fileLength);
Input.seekg(0);
Input.read(&tempData[0], fileLength);
Input.close();
// Load information about the tga, aka the header.
// Seek to the width.
w = byteToUnsignedShort(tempData[12], tempData[13]);
// Seek to the height.
h = byteToUnsignedShort(tempData[14], tempData[15]);
// Seek to the depth.
depth = unsigned(tempData[16]);
// Mode = components per pixel.
md = depth / 8;
// Total bytes = h * w * md.
t = h * w * md;
//Delete allocated data, if need to
clear();
//Allocate new storage
data.resize(t);
// Copy image data.
for(unsigned i = 0, s = 18; s < t + 18; s++, i++)
data[i] = unsigned char(tempData[s]);
// Mode 3 = RGB, Mode 4 = RGBA
// TGA stores RGB(A) as BGR(A) so
// we need to swap red and blue.
if(md > 2)
{
char aux;
for(unsigned i = 0; i < t; i+= md)
{
aux = data[i];
data[i] = data[i + 2];
data[i + 2] = aux;
}
}
but it keeps failing occasionally for some image resolutions(mostly odd numbers and non-POT resolutions). it results in distorted image(with diagonal patterns) or wrong colors. last time i've encountered it - it was 9x9 24bpp image showing weird colors.
i'm on windows(so it means little-endian), rendering with opengl(i'm taking in account alpha channel existence, when passing image data with glTexImage2D). i'm saving my images with photoshop, not setting RLE flag. this code always reads correct image resolution and color depth.
example of image causing trouble:
http://pastie.org/private/p81wbh5sb6coldspln6mw
after loading problematic image, this code:
for(unsigned f = 0; f < imageData.w * imageData.h * imageData.depth; f += imageData.depth)
{
if(f % (imageData.w * imageData.depth) == 0)
writeLog << endl;
writeLog << "[" << unsigned(imageData.data[f]) << "," << unsigned(imageData.data[f + 1]) << "," << unsigned(imageData.data[f + 2]) << "]" << flush;
}
outputs this:
[37,40,40][37,40,40][37,40,40][37,40,40][37,40,40][37,40,40][37,40,40][37,40,40][37,40,40]
[37,40,40][173,166,164][93,90,88][93,90,88][93,90,88][93,90,88][93,90,88][88,85,83][37,40,40]
[37,40,40][228,221,219][221,212,209][221,212,209][221,212,209][221,212,209][221,212,209][140,134,132][37,40,40]
[37,40,40][228,221,219][221,212,209][221,212,209][221,212,209][221,212,209][221,212,209][140,134,132][37,40,40]
[37,40,40][228,221,219][221,212,209][221,212,209][221,212,209][221,212,209][221,212,209][140,134,132][37,40,40]
[37,40,40][228,221,219][221,212,209][221,212,209][221,212,209][221,212,209][221,212,209][140,134,132][37,40,40]
[37,40,40][228,221,219][221,212,209][221,212,209][221,212,209][221,212,209][221,212,209][140,134,132][37,40,40]
[37,40,40][237,232,230][235,229,228][235,229,228][235,229,228][235,229,228][235,229,228][223,214,212][37,40,40]
[37,40,40][37,40,40][37,40,40][37,40,40][37,40,40][37,40,40][37,40,40][37,40,40][37,40,40]
so i guess it does read correct data.
that brings us to opengl;
glGenTextures(1, &textureObject);
glBindTexture(GL_TEXTURE_2D, textureObject);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
GLenum in_comp_mode, comp_mode;
if(linear) //false for that image
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
else
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//i don't use 1 or 2 - channel textures, so it's always 24 or 32bpp
if(imageData.depth == 24)
{
in_tex_mode = GL_RGB8;
tex_mode = GL_RGB;
}
else
{
in_tex_mode = GL_RGBA8;
tex_mode = GL_RGBA;
}
glTexImage2D(GL_TEXTURE_2D, 0, in_tex_mode, imageData.w, imageData.h, 0, tex_mode, GL_UNSIGNED_BYTE, &imageData.data[0]);
glBindTexture(GL_TEXTURE_2D, NULL);
texture compression code is omitted, 'cause it's not active for that texture.
This is probably a padding/alignment issue.
You're loading a TGA, which has no row-padding, but passing it to GL which by default expects rows of pixels to be padded to a multiple of 4 bytes.
Possible fixes for this are:
Tell GL how your texture is packed, using (for example) glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
Change the dimensions of your texture, such that there will be no padding.
Change the loading of your texture, such that the padding is consistent with what GL expects
most image format save image data aligned(4 bytes commonly).
for example, resolution: 1rows 1columns
each row has one pixel, so if RGB is used, each row has 3bytes.
and will be extend to 4bytes for alignment because the CPU like that.
english is not my native language, so my bad grammar will kill you. just try to understand it.

Odd results using glTexImage2D

I've been trying to figure out how glTexImage2D works and an seeing some odd results from some pretty clear-cut code. My code simply draws a rough circle into a 256*256 length unsigned array and then sends that data out to become a texture. The texture displayed however is turning out as variations of red and orange no matter what combinations I select inside my image creation loop:
unsigned* data = new unsigned[256*256];
for (int y = 0; y < 256; ++y)
for (int x = 0; x < 256; ++x)
if ((x - 100)*(x - 100) + (y - 156)*(y - 156) < 75*75)
data[256*y + x] = ((156 << 24) | (256 << 16) | (156 << 8) | (200 << 0));
else
data[256*y + x] = 0; // I'd expect this to be transparent and the above to be slightly transparent and green, but it's red somehow.
glBindTexture(GL_TEXTURE_2D, texid);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 256, 256, 0, GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid*)data);
OpenGL options:
glEnable(GL_TEXTURE_2D);
glShadeModel(GL_SMOOTH);
glClearColor(0.0f, 0.0f, 0.0f, 0.5f);
glClearDepth(1.0f);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
//glBlendFunc(GL_SRC_ALPHA, GL_ONE);
//glEnable(GL_BLEND);
//glDisable(GL_CULL_FACE);
glGenTextures(1, &leaf[0]);
createLeaf(leaf[0]); // createLeaf(GLuint& texid) is posted entirely above
The rest of the code does nothing but display the texture on a single quad in a window. (x64 win7)
Edit: I tried Rickard's solution exactly and I'm still getting a purple circle.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 256, 256, 0, GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid*)data);
First the positive things. You use a sized internal format (GL_RGBA8, rather than GL_RGBA). This is very good; keep doing that. You have a clear understanding of the difference between the internal format (GL_RGBA8) and the pixel transfer format (GL_RGBA). This is also good.
The problem is this. You told OpenGL that your data was a stream of unsigned bytes. But it's not a stream of unsigned bytes; it's a stream of unsigned integers. That's how you declared data, that's how you filled data. So why are you lying to OpenGL?
The problem is with your colors. This is one of your color values:
((156 << 24) | (256 << 16) | (156 << 8) | (200 << 0))
First, 256 is not a valid color. 256 in hex is 0x100, which is two bytes, not one.
The unsigned integer you would get from this is:
0x9D009CC8
If these are intended to be RGBA colors in that order, then the red is 0x9D, green is 0x00, blue is 0x9C, and alpha is 0xC8.
Now, because you're probably working on a little-endian computer, that 4 bytes is stored flipped, like this:
0xC89C009D
When you tell OpenGL to pretend that this is a byte array (which it is not), you are losing the little-endian conversion. So OpenGL sees the byte array starting with 0xC8, so that is the red value. And so on.
You need to tell OpenGL what you're actually doing: you're storing four 8-bit unsigned values in a single unsigned 32-bit integer. To do this, use the following:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 256, 256, 0, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, (GLvoid*)data);
The GL_UNSIGNED_INT_8_8_8_8 says that you're feeding OpenGL an array of unsigned 32-bit integers (which you are). The first 8-bits of the 32-bit integer is red, the second is green, the third is blue, and the fourth is alpha.
So, to completely fix your code, you need this:
GLuint* data = new GLuint[256*256]; //Use OpenGL's types
for (int y = 0; y < 256; ++y)
for (int x = 0; x < 256; ++x)
if ((x - 100)*(x - 100) + (y - 156)*(y - 156) < 75*75)
data[256*y + x] = ((0x9C << 24) | (0xFF << 16) | (0x9C << 8) | (0xC8 << 0));
else
data[256*y + x] = 0; // I'd expect this to be transparent and the above to be slightly transparent and green, but it's red somehow.
glBindTexture(GL_TEXTURE_2D, texid);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0); //Always set the base and max mipmap levels of a texture.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 256, 256, 0, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, (GLvoid*)data);
// I'd expect this to be transparent and the above to be slightly transparent and green, but it's red somehow.
Alpha doesn't mean transparent; it means nothing at all unless you give it a meaning. Alpha only represents transparency if you use blending and set up a blending mode that causes a low alpha to make things transparent. Otherwise, it means nothing at all.
If I where to do the same thing as you I would use a array of unsigned chars instead of unsigned int with 4 times the length.
unsigned char* data = new unsigned char[256*256*4];
for (int y = 0; y < 256; ++y)
for (int x = 0; x < 256; ++x)
if ((x - 100)*(x - 100) + (y - 156)*(y - 156) < 75*75){
data[(256*y + x)*4+0] = 156;
data[(256*y + x)*4+1] = 256;
data[(256*y + x)*4+2] = 156;
data[(256*y + x)*4+3] = 200;
}else{
data[(256*y + x)*4+0] = 0;
data[(256*y + x)*4+1] = 0;
data[(256*y + x)*4+2] = 0;
data[(256*y + x)*4+3] = 0;
}
glBindTexture(GL_TEXTURE_2D, texid);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 256, 256, 0, GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid*)data);
But your code looks right to me and I not sure if the code above will change anything. If its the same result, then try change GL_RGBA8 to just GL_RGBA. And what is the varible type of texid. I alwas call glBindTexture with a GLuint by refrense (&texid) but if your texid is a pointer to a GLuint (GLuint *texid;) then I guess that part is ok. (Edit: Just realize that im wrong on the last part, i was thinking about glGenTexture and not glBindTexture)