Loading Images (using their RGB(A) pixel data) in openGL textures - c++

In the main function:
img = cvLoadImage("test.JPG");
//openCV functions to load and create matrix
CvMat *mat = cvCreateMat(img->height,img->width,CV_8SC3 );
cvGetMat( img, mat,0,1);
//creating the 3-dimensional array during runtime
data = new float**[img->height];
for(int i=0;i<img->height;i++){
data[i] = new float*[img->width];
}
for(int i=0;i<img->height;i++){
for(int j=0;j<img->width;j++)
data[i][j] = new float[3];
}
//setting RGB values
for(int i=0;i<img->height;i++)
{
for(int j=0;j<img->width;j++)
{
CvScalar scal = cvGet2D( mat,i,j);
data[i][j][0] = scal.val[0];
data[i][j][1] = scal.val[1];
data[i][j][2] = scal.val[2];
}
}
I am using openCV to get the image pixel data, storing it in the dynamically created matrix "data".Now generating textures and binding them:
glGenTextures(1,&texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_MODULATE);
glTexImage2D(GL_TEXTURE_2D, 0 ,GL_RGB, img->width,img->height,0,GL_RGB,GL_FLOAT,data);
glBindTexture(GL_TEXTURE_2D, texName);
glBegin(GL_QUADS);
glTexCoord2f(0,0);
glVertex3f(-1,-1,0);
glTexCoord2f(0,1);
glVertex3f(-1,1,0);
glTexCoord2f(1,1);
glVertex3f(1,1,0);
glTexCoord2f(1,0);
glVertex3f(1,-1,0);
glEnd();
There are no compilation errors but during the runtime, the window displays the square I made, but not the image that I tried to convert into texture.
How do I achieve loading any image into texture using the pixel data that I extract using openCV. I have printed the RGB values and they seem legit enough and the number of triplets printed are as expected.

glBindTexture should be called before the glTexParemeteri...glTexImage2D calls, so openGL knows which texture you're setting up.
glEnable(GL_TEXTURE_2D);
glGenTextures(1,&texName);
glBindTexture(GL_TEXTURE_2D, texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_MODULATE);
glTexImage2D(GL_TEXTURE_2D, 0 ,GL_RGB, img->width,img->height,0,GL_RGB,GL_FLOAT,data);
More importantly, you are not setting up your data variable correctly:
float* data = new float[img->height * img->width * 3];
for (int i = 0; i < img->height; i++)
{
for (int j = 0; j < img->width; j++)
{
CvScalar scal = cvGet2D(mat, i, j);
data[(i * img->width + j) + 0] = scal.val[0];
data[(i * img->width + j) + 1] = scal.val[1];
data[(i * img->width + j) + 2] = scal.val[2];
}
}
Also, you might need to swap the order of color components and/or convert them to 0..1 range, I don't know how openCV loads images.

Related

Do I pass the wrong data to glTexImage2D?

I'm trying to make an OpenGL texture by populating a pixel buffer with data from a baked font. I'm taking each value from the font array and making a bitmap essentially.
The problem is when I'm displaying the full texture I get noise. However by creating an 8x8 texture of one glyph the texture is displayed correctly.
The pixel buffer is 8bit monochrome, so I pass GL_ALPHA as buffer format.
I tried using 32bpp GL_RGBA format as well and it yields the same result.
DebugFont
LoadBakedFont(void)
{
glEnable(GL_BLEND);
glEnable(GL_TEXTURE_2D);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
unsigned char baked_font[128][8] = {} //In my source code this is not empty :)
unsigned char *pixels = (unsigned char*)malloc(sizeof(unsigned char) * 128 * 8 * 8);
memset(pixels, 0, sizeof(unsigned char) * 128 * 8 * 8);
int counter = 0;
for(int i = 0; i < 128; ++i)
{
for(int j = 0; j < 8; ++j)
{
for(int k = 0; k < 8; ++k)
{
unsigned char val = (baked_font[i][j] >> k & 1);
pixels[counter++] = val == 1 ? 0xff : 0x00;
}
}
}
//Renders the exlamation mark perfectly
for(int y = 0; y < 8; ++y)
{
for(int x = 0; x < 8; ++x)
{
unsigned char *test = pixels + (0x21 * 64);
if(test[y * 8 + x])
printf("#");
else
printf(".");
}
printf("\n");
}
//POD struct
DebugFont font;
glGenTextures(1, &font.tex);
glBindTexture(GL_TEXTURE_2D, font.tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 8 * 128, 8, 0, GL_ALPHA, GL_UNSIGNED_BYTE, pixels);
glBindTexture(GL_TEXTURE_2D, 0);
free(pixels);
return font;
}
void
DrawTexture(DebugFont font)
{
glBindTexture(GL_TEXTURE_2D, font.tex);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex2f(0,0);
glTexCoord2f(1.0f, 0.0f); glVertex2f(8 * 128,0);
glTexCoord2f(1.0f, 1.0f); glVertex2f(8 * 128, 8);
glTexCoord2f(0.0f, 1.0f); glVertex2f(0, 8);
glEnd();
glBindTexture(GL_TEXTURE_2D, 0);
}
Random noise?
Exclamation Mark
The way you arrange the data makes sense for a tall 8x1024 image where each 8x8 makes up a character.
But you load it as a 1024x8 image instead, putting all the pixels in the wrong places.

Calculating texture coordinates from a heightmap

I am currently building a height map terrain generator using OpenGL. It's a simple program that loads a height map image, iterates over the image data and generates vertices, indices and normals. At its current state it can render a height map with a single colour based on the normals.
My problem is generating correct UV coordinates for the diffuse map. It just comes out wrong:
This is the diffuse map I am trying to load:
Here is what I currently have:
Generate Vertices, Normals and Indices
// Generate Vertices and texture coordinates
for (int row = 0; row <= this->imageHeight; row++)
{
for (int column = 0; column <= this->imageWidth; column++)
{
float x = (float)column / (float)this->imageWidth;
float y = (float)row / (float)this->imageHeight;
float pixel = this->imageData[this->imageWidth * row + column];
float z;
if (row == this->imageHeight || column == this->imageWidth || row == 0 || column == 0)
{
z = 0.0f;
}
else
{
z = float(pixel / 256.0)*this->scale;
}
MeshV3 mesh;
mesh.position = glm::vec3(x, y, z);
mesh.normal = glm::vec3(0.0, 0.0, 0.0);
mesh.texture = glm::vec2(x, y);
this->mesh.push_back(mesh);
}
}
// Generate indices
for (int row = 0; row < this->imageHeight; row++)
{
for (int column = 0; column < this->imageWidth; column++)
{
int row1 = row * (this->imageWidth + 1);
int row2 = (row + 1) * (this->imageWidth + 1);
// triangle 1
this->indices.push_back(glm::uvec3(row1 + column, row1 + column + 1, row2 + column + 1));
// triangle 2
this->indices.push_back(glm::uvec3(row1 + column, row2 + column + 1, row2 + column));
}
}
// Generate normals
for (int i = 0; i < this->indices.size(); i++)
{
glm::vec3 v1 = this->mesh[this->indices[i].x].position;
glm::vec3 v2 = this->mesh[this->indices[i].y].position;
glm::vec3 v3 = this->mesh[this->indices[i].z].position;
glm::vec3 edge1 = v1 - v2;
glm::vec3 edge2 = v1 - v3;
glm::vec3 normal = glm::normalize(glm::cross(edge1, edge2));
this->mesh[this->indices[i].x].normal += normal;
this->mesh[this->indices[i].y].normal += normal;
this->mesh[this->indices[i].z].normal += normal;
}
I load the diffuse map with the following method
void Terrein::getDIffuseMap()
{
glGenTextures(1, &this->texture);
glBindTexture(GL_TEXTURE_2D, this->texture); // all upcoming GL_TEXTURE_2D operations now have effect on this texture object
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
int width, height, nrChannels;
std::string path = "assets/diffuse.jpg";
this->diffuseData = stbi_load(path.c_str(), &width, &height, &nrChannels, 0);
if (this->diffuseData)
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, this->diffuseData);
glGenerateMipmap(GL_TEXTURE_2D);
}
else
{
std::cout << "Failed to load diffuse texture" << std::endl;
}
}
I can't seem to figure out what might be wrong here. Is there an issue with how I am loading the image? Or am I not calculating the texture coordinates coorectly? Please let me know if there is anything else I should provide. I have been stuck at this for a few days now. Thanks!
By default OpenGL assumes that the start of each row of an image is aligned to 4 bytes.
This is because the GL_UNPACK_ALIGNMENT parameter by default is 4.
Since the image has 3 color channels (GL_RGB), and is tightly packed the size of a row of the image may not be aligned to 4 bytes.
When a RGB image with 3 color channels is loaded to a texture object, then GL_UNPACK_ALIGNMENT has to be set to 1:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0,
GL_RGB, GL_UNSIGNED_BYTE, this->diffuseData);
The diffuse image in the question has a dimension of 390x390. So each row of the image has a size of 390 * 3 = 1170 bytes.
Since 1170 is not divisible by 4 (1170 / 4 = 292,5), the start of a row is not aligned to 4 bytes.
Related question: Failing to map a simple unsigned byte rgb texture to a quad

OpenGL Texture corruption

i am rendering simple pixel buffer in OpenGL. First, i create a quad, then i create a texture. It works correctly if there is no changes in buffer. When i change my buffer and add new buffer into texture by glTexSubImage2D or glTexImage2D my texture's top section corrupts like image.
I create my buffer like this.
int length = console->width * console->height * 3;
GLubyte buf[length];
for(int i = 0; i < length; i += 3) {
buf[i] = 0;
buf[i + 1] = 0;
buf[i + 2] = 0;
}
console->buffer = buf;
I create texture like this.
glGenTextures(1, &console->textureID);
glBindTexture(GL_TEXTURE_2D, console->textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, console->width, console->height, 0, GL_RGB, GL_UNSIGNED_BYTE, console->buffer);
tpUseShader(console); // -> calls glUseShader(console->programID);
glUniform1i(glGetUniformLocation(console->programID, "texture"), 0);
I update texture like this.
glBindTexture(GL_TEXTURE_2D, console->textureID);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, console->width, console->height, GL_RGB, GL_UNSIGNED_BYTE, console->buffer);
For testing i change my buffer like this in render function
if(console->buffer[6] == 255) {
console->buffer[6] = 0; // 6 is second pixel's red value.
console->buffer[10] = 255; // 10 is third pixel's green value
} else {
console->buffer[6] = 255;
console->buffer[10] = 0;
}
Then i call tpUseShader and render my quad.
How can i fix this problem?
I changed my console size to 10x10 and run again this time i got same results but, in image you can see from bottom left 3rd pixel is dark blue. When i print printf("3rd pixel: %d- %d - %d\n", console->buffer[12], console->buffer[13], console->buffer[14]);. value i got red: 0 green: 0 blue: 0 values. That means my buffer is normal.
I got the solution. As pleluron said in comments of question. I changed buf in to console->buffer, and it worked!. Now my buffer initialization code is like this:
console->buffer = malloc(sizeof(GLubyte) * length);
for(int i = 0; i < length; i += 3) {
console->buffer[i] = 0;
console->buffer[i + 1] = 0;
console->buffer[i + 2] = 0;
}

OpenGL changing color of generated texture

I'm creating a sheet of characters and symbols from a font file, which works fine, except on the generated sheet all the pixels are black (with varying alpha). I would prefer them to be white so I can apply color multiplication and have different colored text. I realize that I can simply invert the color in the fragment shader, but I want to reuse the same shader for all my GUI elements.
I'm following this tutorial: http://en.wikibooks.org/wiki/OpenGL_Programming/Modern_OpenGL_Tutorial_Text_Rendering_02
Here's a snippet:
// Create map texture
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &map);
glBindTexture(GL_TEXTURE_2D, map);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, mapWidth, mapHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// Draw bitmaps onto map
for (uint i = start; i < end; i++) {
charInfo curChar = character[i];
if (FT_Load_Char(face, i, FT_LOAD_RENDER)) {
cout << "Loading character " << (char)i << " failed!" << endl;
continue;
}
glTexSubImage2D(GL_TEXTURE_2D, 0, curChar.mapX, 0, curChar.width, curChar.height, GL_ALPHA, GL_UNSIGNED_BYTE, glyph->bitmap.buffer);
}
The buffer of each glyph contains values of 0-255 for the alpha of the pixels. My question is, how do I generate white colored pixels instead of black? Is there a setting for this? (I've tried some blend modes but without success)
Since you create the texture with
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, mapWidth, mapHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
you can either change the GL_RGBA to GL_RED (or GL_LUMINANCE for pre-3.0 OpenGL) or you can create the RGBA buffer and copy the glyph data there.
I.e., you have
glyph->bitmap.buffer
then you do
unsigned char* glyphRGBA = new unsigned char[ curChar.width * curChar.height * 4];
for(int j = 0 ; j < curChar.height ; j++)
for(int i = 0 ; i < curChar.width ; i++)
{
int ofs = j * curChar.width + i;
for(int k = 0; k < 3 ; k++)
glyphRGBA[ofs + k] = YourTextColor[k];
// set alpha
glyphRGBA[ofs + 3] = glyph->bitmap.buffer[ofs];
}
In the code above YourTextColor is unsigned char[3] array with RGB components of the text color. The glyphRGBA array can be fed to glTexSubImage2D.

Can't load image into OpenGL texture if it is larger than 256x128

I am trying to load a jpeg image into a texture which I will use for volume rendering. However, the image fails to load (only white rectangle shown) whenever I try to load a jpeg larger than 256x128 pixels.
I am using OpenCV to convert the jpeg into raw values. This sounds like overkill, but I had OpenCV already. I am open to using another library
My code may seem strange, but it is because I am using luminance values, but also using an alpha value. As a result, I am taking the luminance value and using it across the RGBA channels for now.
This code worked when I used raw luminance data. But now, I am just trying to load a single jpeg image. (It works when my image is 256x128, but fails if it is bigger)
My texture loading code:
unsigned char* chRGBABuffer = new unsigned char[IMAGEWIDTH * IMAGEHEIGHT * IMAGECOUNT * 4];
//Only create 1 3D texture now
glGenTextures(1, (GLuint*)&textureID3D);
// Set the properties of the texture.
glBindTexture(GL_TEXTURE_3D, textureID3D);
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// Convert the data to RGBA data.
// Here we are simply putting the same value to R, G, B and A channels.
// This can be changed depending on the source data
// Usually for raw data, the alpha value will
// be constructed by a threshold value given by the user
for (int i = 0; i < IMAGECOUNT; ++i)
{
cv::Mat image;
image = cv::imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
unsigned char * chBuffer = image.data;
if (!image.data) // Check for invalid input
{
fprintf(stderr, "Could not open or find image\n");
return -1;
}
for (int nIndx = 0; nIndx < IMAGEWIDTH * IMAGEHEIGHT; ++nIndx)
{
chRGBABuffer[nIndx * 4] = chBuffer[nIndx];
chRGBABuffer[nIndx * 4 + 1] = chBuffer[nIndx];
chRGBABuffer[nIndx * 4 + 2] = chBuffer[nIndx];
chRGBABuffer[nIndx * 4 + 3] = chBuffer[nIndx];
}
}
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGBA, IMAGEWIDTH, IMAGEHEIGHT, IMAGECOUNT, 0,
GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid *)chRGBABuffer);
glBindTexture(GL_TEXTURE_3D, 0);
Probably you're simply running out of texture memory. Volumetric images are memory hogs and only few OpenGL implementations are capable of swapping in portions of a 3D texture on demand (at a significant performance drop penality).