Can load a single texture but having problems loading into an array - c++

I am having trouble loading textures onto an array of type GLuints but the problem doesn't occur with loading a single texture. When I debug it, I see that textureIDs isn't assigned any values and has only 0s even after setTexture(...) is called
but the problem doesn't happen on a single texture loaded onto textureID. Either I'm missing something so obvious or my understanding of Opengl or C++ is lacking.
Relevant functions within this context
GLuint textureIDs[3];
GLuint textureID;
Ground Constructor
Ground::Ground(void) :
textureId(0),groundTextures()
{
setAllTexture();
}
Draw : ground function that draws.. well the ground
void Ground::draw()
{
glPushMatrix();
//Ground
glPushMatrix();
glTranslatef(0,-1,0); //all buildings and ground
//ground
glPushMatrix();
glTranslatef(-5,0,-20);
glScalef(40,0.2,40);
setTexture("Textures/rue2.bmp", textureId, true, true);
drawRectangle(1.0f);
glPopMatrix();
glPopMatrix();
}
settexture() sets textures
void Ground::setTexture(const char* textureName, GLuint& textId, bool stretchX, bool stretchZ)
{
if (textId == 0)
{
textId = (GLuint)createTexture(textureName);
}
if (textId != 0)
{
//enable texture coordinate generation
glEnable(GL_NORMALIZE);
glEnable(GL_DEPTH_TEST);
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND); //Enable alpha blending
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); //Set the blend function
glBindTexture(GL_TEXTURE_2D, textId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, stretchX ? GL_REPEAT : GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, stretchZ ? GL_REPEAT : GL_CLAMP);
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
}
}
setAllTexture: loads textures onto the array of GLuints
int Ground::setAllTexture()
{
setTexture("Textures/asphalt.bmp",groundTextures[0],false, false );
//set up textures for all of them and bind them to ground textures.
glBindTexture(GL_TEXTURE_2D, groundTextures[0]);
setTexture("Textures/concreteFloor.bmp",groundTextures[1],false, false);
//set up textures for all of them and bind them to ground textures.
glBindTexture(GL_TEXTURE_2D, groundTextures[1]);
setTexture("Textures/dirtyGrass.bmp",groundTextures[2],false, false);
//set up textures for all of them and bind them to ground texture
glBindTexture(GL_TEXTURE_2D, groundTextures[2]);
for ( unsigned int i =0 ; i < 3 ; i ++ )
//check to see if all textures loaded properly
{
if ( groundTextures[i] == 0 )
return false;
}
return true;
//if you're here, every texture was loaded correctly.
}
ImageLoader: loads a 24bit RGB bitmap
Image* loadBMP(const char* filename) {
ifstream input;
input.open(filename, ifstream::binary);
assert(!input.fail() || !"Could not find file");
char buffer[2];
input.read(buffer, 2);
assert(buffer[0] == 'B' && buffer[1] == 'M' || !"Not a bitmap file");
input.ignore(8);
int dataOffset = readInt(input);
//Read the header
int headerSize = readInt(input);
int width;
int height;
switch(headerSize) {
case 40:
//V3
width = readInt(input);
height = readInt(input);
input.ignore(2);
assert(readShort(input) == 24 || !"Image is not 24 bits per pixel");
assert(readShort(input) == 0 || !"Image is compressed");
break;
case 12:
//OS/2 V1
width = readShort(input);
height = readShort(input);
input.ignore(2);
assert(readShort(input) == 24 || !"Image is not 24 bits per pixel");
break;
case 64:
//OS/2 V2
assert(!"Can't load OS/2 V2 bitmaps");
break;
case 108:
//Windows V4
assert(!"Can't load Windows V4 bitmaps");
break;
case 124:
//Windows V5
assert(!"Can't load Windows V5 bitmaps");
break;
default:
assert(!"Unknown bitmap format");
}
//Read the data
int bytesPerRow = ((width * 3 + 3) / 4) * 4 - (width * 3 % 4);
int size = bytesPerRow * height;
auto_array<char> pixels(new char[size]);
input.seekg(dataOffset, ios_base::beg);
input.read(pixels.get(), size);
//Get the data into the right format
auto_array<char> pixels2(new char[width * height * 3]);
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
for(int c = 0; c < 3; c++) {
pixels2[3 * (width * y + x) + c] =
pixels[bytesPerRow * y + 3 * x + (2 - c)];
}
}
}
input.close();
return new Image(pixels2.release(), width, height);
}
createTexture: creates a texture and returns a GLuint
unsigned int createTexture( const char* imageName )
{
Image* image = loadBMP(imageName);
GLuint textureId = 0;
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGB,image->width, image- >height,0,GL_RGB,GL_UNSIGNED_BYTE,image->pixels);
delete image;
return (unsigned int) textureId;
}

Related

Do I pass the wrong data to glTexImage2D?

I'm trying to make an OpenGL texture by populating a pixel buffer with data from a baked font. I'm taking each value from the font array and making a bitmap essentially.
The problem is when I'm displaying the full texture I get noise. However by creating an 8x8 texture of one glyph the texture is displayed correctly.
The pixel buffer is 8bit monochrome, so I pass GL_ALPHA as buffer format.
I tried using 32bpp GL_RGBA format as well and it yields the same result.
DebugFont
LoadBakedFont(void)
{
glEnable(GL_BLEND);
glEnable(GL_TEXTURE_2D);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
unsigned char baked_font[128][8] = {} //In my source code this is not empty :)
unsigned char *pixels = (unsigned char*)malloc(sizeof(unsigned char) * 128 * 8 * 8);
memset(pixels, 0, sizeof(unsigned char) * 128 * 8 * 8);
int counter = 0;
for(int i = 0; i < 128; ++i)
{
for(int j = 0; j < 8; ++j)
{
for(int k = 0; k < 8; ++k)
{
unsigned char val = (baked_font[i][j] >> k & 1);
pixels[counter++] = val == 1 ? 0xff : 0x00;
}
}
}
//Renders the exlamation mark perfectly
for(int y = 0; y < 8; ++y)
{
for(int x = 0; x < 8; ++x)
{
unsigned char *test = pixels + (0x21 * 64);
if(test[y * 8 + x])
printf("#");
else
printf(".");
}
printf("\n");
}
//POD struct
DebugFont font;
glGenTextures(1, &font.tex);
glBindTexture(GL_TEXTURE_2D, font.tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 8 * 128, 8, 0, GL_ALPHA, GL_UNSIGNED_BYTE, pixels);
glBindTexture(GL_TEXTURE_2D, 0);
free(pixels);
return font;
}
void
DrawTexture(DebugFont font)
{
glBindTexture(GL_TEXTURE_2D, font.tex);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex2f(0,0);
glTexCoord2f(1.0f, 0.0f); glVertex2f(8 * 128,0);
glTexCoord2f(1.0f, 1.0f); glVertex2f(8 * 128, 8);
glTexCoord2f(0.0f, 1.0f); glVertex2f(0, 8);
glEnd();
glBindTexture(GL_TEXTURE_2D, 0);
}
Random noise?
Exclamation Mark
The way you arrange the data makes sense for a tall 8x1024 image where each 8x8 makes up a character.
But you load it as a 1024x8 image instead, putting all the pixels in the wrong places.

SDL_TTF rendering garbage to texture

I am attempting to use OpenGL and SDL, using SDL_ttf to render text to a texture, but the code is rendering garbage.
My "Render to texture code":
GLuint textToTexture(std::string & text, TTF_Font* font, glm::vec4 textColour, glm::vec4 bgColour)
{
if (!TTF_WasInit())
{
if (TTF_Init() == -1)
exit(6);
}
SDL_Color colour = { (Uint8)(textColour.r*255), (Uint8)(textColour.g*255), (Uint8)(textColour.b*255), (Uint8)(textColour.a*255) };
SDL_Color bg = { (Uint8)(bgColour.r*255), (Uint8)(bgColour.g*255), (Uint8)(bgColour.b*255), (Uint8)(bgColour.a*255) };
SDL_Surface *stringImage = NULL;
stringImage = TTF_RenderText_Blended(font, text.c_str(), colour);
if (stringImage == NULL)
{
exit(5);
}
GLuint trueH = powerofTwo(stringImage->h);
GLuint trueW = powerofTwo(stringImage->w);
unsigned char* pixels = NULL;
GLuint w = stringImage->w;
GLuint h = stringImage->h;
GLuint colours = stringImage->format->BytesPerPixel;
pixels = padTexture((unsigned char*)stringImage->pixels, w, h, pixels, trueW, trueH, colours);
GLuint format, internalFormat;
if (colours == 4) {
if (stringImage->format->Rmask == 0x000000ff)
format = GL_RGBA;
else
format = GL_BGRA;
}
else {
// no alpha
if (stringImage->format->Rmask == 0x000000ff)
format = GL_RGB;
else
format = GL_BGR;
}
internalFormat = (colours == 4) ? GL_RGBA : GL_RGB;
GLuint texId = 0;
//GLuint texture;
glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, trueW, trueH, 0,format, GL_UNSIGNED_BYTE, pixels);
// SDL surface was used to generate the texture but is no longer
// required. Release it to free memory
SDL_FreeSurface(stringImage);
free(pixels)
return texId;
}
The code for computing the correct dimensions for padding:
int powerofTwo(int num)
{
if (num != 0)
{
num--;
num |= num >> 1; // Divide by 2^k for consecutive doublings of k up to 32,
num |= num >> 2; // and then or the results.
num |= num >> 4;
num |= num >> 8;
num |= num >> 16;
num++;
}
return num;
}
and finally, the code that copies the bytes to a texture of the correct dimensions:
unsigned char* padTexture(unsigned char * src, int srcW, int srcH, unsigned char * dest, int width, int height, int bpp)
{
dest = (unsigned char*)calloc(1, width*height*bpp);
for (int i = 0; i < srcH; i++)
{
memcpy(dest + (width*i*bpp),src + (srcW*i*bpp), srcW*bpp);
}
return dest;
}
The result of this code is as follows: [![Garbled Texture][1]][1]
I have confirmed and error checked that SDL_TTF is properly initialized elsewhere in the codebase, and that the font is also being loaded.
I have tested with three different ttf fonts, with the same results.
Also, if I use any other TTF_rendering function (Shaded, Solid etc), A solid quad is rendered, and the "colours" variable in the textToTexture function also ends up as 1.
Additional:
As I previously stated, I tested with three ttf fonts:
MavenPro-Regular,
HelveticaNeueLTStd-Th
and another I found off the internet.
I was trying to render the string "Select Scenario".
The pre padded image dimensions are 138x25 pixels.
The post padded image dimensions are 256x32 pixels.
Update 1:
After fixing the bpp issue the new texture is as follows:
This image changes everytime I run the program.
Update 2:
After fixing the additional spotted errors with padding the image, and setting the pixel data to the texture itself, when I use TTF_RenderText_Blended all I get is a black quad, and when I use TTF_RenderText_Shaded I get:
Update 3:
I used SDL_SaveBMP immedietly before calling the GL code and after calling SDL_RenderText_Blended, the result was a completely white image, (given which text colour).
When I do the same using TTF_RenderText_Solid, The saved image is as it should be, but is rendered by opengl like the images you see above.
SDL_TTF initialized fine, the fonts load without error, and the text rendering returns no errors, so I can't think what to do next.
Update 4:
I have since refactored all the ttf code into a single function and removed the padding code (as modern opengl doesn't seem to care about it). However, despite all project settings and code now being identical to a test project that is known to work on the same hardware, the problem persists.
GLuint textToTexture(const char * text, const char * font, glm::vec4 textColour, glm::vec4 bgColour, unsigned int & texID)
{
if (!TTF_WasInit()) {
if (TTF_Init() == -1)
exit(6);
}
SDL_Color colour = { (Uint8)(textColour.r * 255), (Uint8)(textColour.g * 255), (Uint8)(textColour.b * 255),(Uint8)(textColour.a * 255) };
SDL_Color bg = { (Uint8)(bgColour.r * 255), (Uint8)(bgColour.g * 255), (Uint8)(bgColour.b * 255),255 };
TTF_Font* fontObj = TTF_OpenFont(font, 24);
if (!fontObj)
{
SDL_ShowSimpleMessageBox(SDL_MESSAGEBOX_ERROR,
"Texture Error",
"Cannot load font to create texture.",
NULL);
return 0;
}
SDL_Surface *image = NULL;
image = TTF_RenderText_Blended(fontObj, text, colour);
if (image == NULL)
{
exit(5);
//exitFatalError("String surface not created.");
std::cout << "String surface not created." << std::endl;
}
unsigned char* pixels = NULL;
GLuint w = image->w;
GLuint h = image->h;
GLuint colours = image->format->BytesPerPixel;
GLuint externalFormat, internalFormat;
SDL_PixelFormat *format = image->format;
if (colours == 4) {
if (image->format->Rmask == 0x000000ff)
externalFormat = GL_RGBA;
else
externalFormat = GL_BGRA;
}
else {
// no alpha
if (image->format->Rmask == 0x000000ff)
externalFormat = GL_RGB;
else
externalFormat = GL_BGR;
}
internalFormat = (colours == 4) ? GL_RGBA : GL_RGB;
GLuint texId = 0;
//GLuint texture;
glGenTextures(1, &texID);
glBindTexture(GL_TEXTURE_2D, texID);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, w, h, 0, externalFormat, GL_UNSIGNED_BYTE, image->pixels);
//glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, trueW, trueH, 0, externalFormat, GL_UNSIGNED_BYTE, pixels);
glGenerateMipmap(GL_TEXTURE_2D);
//// SDL surface was used to generate the texture but is no longer
//// required. Release it to free memory
SDL_FreeSurface(image);
TTF_CloseFont(fontObj);
return texID;
}
I have a workaround that saves the image to bmp, then reloads it and creates a texture, but only when I use TTF_RenderText_Shaded. If I use TTF_RenderText_Blended, I get an single colour image which corresponds to the text colour.
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, trueH, trueW, 0,format, GL_UNSIGNED_BYTE, pixels);
trueH and trueW order is reversed
memcpy(src + (srcW*i*bpp), dest + (width*i*bpp), srcW*bpp);
Source and destination order reversed.
dest = (unsigned char*)calloc(0, width*height*bpp);
0 elements of size width*height*bpp allocated, which is 0 bytes. Should be 1 instead of 0.
Here is a complete example:
#include <SDL2/SDL.h>
#include <GL/gl.h>
#include <SDL2/SDL_ttf.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
static unsigned char* padTexture(unsigned char * src, int srcW, int srcH, unsigned char * dest, int width, int height, int bpp, const SDL_Palette *palette)
{
int dst_bpp = (bpp == 1) ? 4 : bpp;
dest = (unsigned char*)calloc(1, width*height*dst_bpp);
if(bpp != 1) {
for (int i = 0; i < srcH; i++)
{
memcpy(dest + (width*i*bpp), src + (srcW*i*bpp), srcW*bpp);
}
} else {
/* indexed - read colours from palette */
for(int i = 0; i < srcH; i++) {
for(int j = 0; j < srcW; j++) {
memcpy(dest + (width*i+j)*dst_bpp,
&palette->colors[src[srcW*i+j]], sizeof(SDL_Color));
}
}
}
return dest;
}
static int powerofTwo(int num) {
if (num != 0)
{
num--;
num |= num >> 1; // Divide by 2^k for consecutive doublings of k up to 32,
num |= num >> 2; // and then or the results.
num |= num >> 4;
num |= num >> 8;
num |= num >> 16;
num++;
}
return num;
}
static GLuint textToTexture(const char *text, TTF_Font* font) {
if (!TTF_WasInit()) {
if (TTF_Init() == -1)
exit(6);
}
SDL_Color colour = { 255, 255, 255, 255 };
SDL_Color bg = { 0, 0, 0, 255 };
SDL_Surface *stringImage = NULL;
// stringImage = TTF_RenderText_Blended(font, text, colour);
stringImage = TTF_RenderText_Shaded(font, text, colour, bg);
if (stringImage == NULL) {
exit(5);
}
GLuint trueH = powerofTwo(stringImage->h);
GLuint trueW = powerofTwo(stringImage->w);
unsigned char* pixels = NULL;
GLuint w = stringImage->w;
GLuint h = stringImage->h;
GLuint colours = stringImage->format->BytesPerPixel;
pixels = padTexture((unsigned char*)stringImage->pixels, w, h, pixels, trueW, trueH,
colours, stringImage->format->palette);
GLuint format, internalFormat;
/* If indexed, want resulting image to be 32bit */
if(colours == 1) {
colours = 4;
}
if (colours == 4) {
if (stringImage->format->Rmask == 0x000000ff)
format = GL_RGBA;
else
format = GL_BGRA;
}
else {
// no alpha
if (stringImage->format->Rmask == 0x000000ff)
format = GL_RGB;
else
format = GL_BGR;
}
internalFormat = (colours == 4) ? GL_RGBA : GL_RGB;
GLuint texId = 0;
//GLuint texture;
glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, trueW, trueH, 0,format, GL_UNSIGNED_BYTE, pixels);
// SDL surface was used to generate the texture but is no longer
// required. Release it to free memory
SDL_FreeSurface(stringImage);
free(pixels);
return texId;
}
int main(int argc, char* argv[])
{
SDL_Init(SDL_INIT_VIDEO);
TTF_Init();
SDL_Window *window = SDL_CreateWindow("SDL2 Example", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, 600, 400, SDL_WINDOW_OPENGL);
SDL_GLContext gl_ctx = SDL_GL_CreateContext(window);
TTF_Font *font = TTF_OpenFont(".fonts/tahoma.ttf", 16);
if(font) {
printf("font loaded\n");
textToTexture("Select Scenario", font);
TTF_CloseFont(font);
}
int quit = 0;
while(!quit) {
SDL_Event ev;
while(SDL_PollEvent(&ev)) {
if(ev.type == SDL_QUIT || ev.type == SDL_KEYUP) {
quit = 1;
}
}
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_TEXTURE_2D);
glColor3f(1.0f, 1.0f, 1.0f);
glBegin(GL_QUADS);
glTexCoord2f(0, 1);
glVertex2f(-0.5, -0.5);
glTexCoord2f(0, 0);
glVertex2f(-0.5, 0.5);
glTexCoord2f(1, 0);
glVertex2f(0.5, 0.5);
glTexCoord2f(1, 1);
glVertex2f(0.5, -0.5);
glEnd();
glFlush();
SDL_GL_SwapWindow(window);
}
SDL_GL_DeleteContext(gl_ctx);
SDL_DestroyWindow(window);
TTF_Quit();
SDL_Quit();
return 0;
}

Load image with GDAL Libraries (VC++)

I have a problem when I try to load an image with GDAL Libraries, and implements it(image) to the OpenGL Control. The problem is on the color as you can see on the picture.
And this is the functions to generate texture from the image:
GLuint COpenGLControl::ReadGDALData(CString filename)
{
BYTE* tempReturn;
GLuint texture;
GDALDataset *poDataset;
GDALAllRegister();
poDataset = (GDALDataset *) GDALOpen((const char *)(CStringA)filename, GA_ReadOnly);
int Height = poDataset->GetRasterXSize(), Width = poDataset->GetRasterYSize();
LONG LineBytes = (Width*8+31)/32*4;
BYTE * pData = (BYTE *)new char[ LineBytes * Height * 3];
if (poDataset == NULL)
{
AfxMessageBox("Couldn't open selected file!");
return NULL;
}
nBands = poDataset->GetRasterCount();
GDALRasterBand **poBand;
poBand = new GDALRasterBand *[nBands];
if (poBand == NULL)
{
AfxMessageBox("Couldn't open the bands!", MB_ICONWARNING);
return NULL;
}
for (int i=0; i<nBands; i++)
{
poBand[i] = poDataset->GetRasterBand(i+1);
if (poBand[i] == NULL)
{
AfxMessageBox("Couldn't open selected bands", MB_ICONWARNING);
return NULL;
}
}
int BandChoice = 2;
nXsize = poBand[BandChoice]->GetXSize();
nYsize = poBand[BandChoice]->GetYSize();
if (BandChoice == 1)
{
poBandBlock_Gray = (BYTE*)CPLMalloc(sizeof(BYTE)*(nXsize*nYsize));
poBand[BandChoice]->RasterIO(GF_Read, 0, 0, nXsize, nYsize, poBandBlock_Gray, nXsize, nYsize, poBand[BandChoice]->GetRasterDataType(), 0, 0);
}
else
{
int nXsize_R, nXsize_G, nXsize_B;
int nYsize_R, nYsize_G, nYsize_B;
int BandChoiceR = 0;
int BandChoiceG = 1;
int BandChoiceB = 2;
nXsize_R = poBand[BandChoiceR]->GetXSize();
nXsize_G = poBand[BandChoiceG]->GetXSize();
nXsize_B = poBand[BandChoiceB]->GetXSize();
nYsize_R = poBand[BandChoiceR]->GetYSize();
nYsize_G = poBand[BandChoiceG]->GetYSize();
nYsize_B = poBand[BandChoiceB]->GetYSize();
nXsize = nXsize_R;
nYsize = nYsize_R;
poBandBlock_R = (BYTE*)CPLMalloc(sizeof(BYTE)*(nXsize_R*nYsize_R));
poBandBlock_G = (BYTE*)CPLMalloc(sizeof(BYTE)*(nXsize_G*nYsize_G));
poBandBlock_B = (BYTE*)CPLMalloc(sizeof(BYTE)*(nXsize_B*nYsize_B));
poBand[BandChoiceR]->RasterIO(GF_Read, 0, 0, nXsize_R, nYsize_R, poBandBlock_R, nXsize_R, nYsize_R, poBand[BandChoiceR]->GetRasterDataType(), 0, 0);
poBand[BandChoiceG]->RasterIO(GF_Read, 0, 0, nXsize_G, nYsize_G, poBandBlock_G, nXsize_G, nYsize_G, poBand[BandChoiceG]->GetRasterDataType(), 0, 0);
poBand[BandChoiceB]->RasterIO(GF_Read, 0, 0, nXsize_B, nYsize_B, poBandBlock_B, nXsize_B, nYsize_B, poBand[BandChoiceB]->GetRasterDataType(), 0, 0);
delete poDataset;
}
if (BandChoice == 1)
{
for ( int i=0; i < Height; i++)
{
for ( int j=0; j < Width; j++)
{
pData[(Height-i-1) * LineBytes + j] = poBandBlock_Gray[i*Width + j];
}
}
CPLFree(poBandBlock_Gray);
}
else
{
int j2 ;
for ( int i=0; i<Height; i++)
{
for ( int j=0, j2=0; j < Width, j2 < 3 * Width; j++, j2+=3)
{
pData[(Height-i-1)*LineBytes + j2+2] = poBandBlock_R[i*Width + j];
pData[(Height-i-1)*LineBytes + j2+1] = poBandBlock_G[i*Width + j];
pData[(Height-i-1)*LineBytes + j2] = poBandBlock_B[i*Width + j];
}
}
CPLFree(poBandBlock_B);
CPLFree(poBandBlock_R);
CPLFree(poBandBlock_G);
}
// allocate a texture name
glGenTextures( 1, &texture );
// select our current texture
glBindTexture( GL_TEXTURE_2D, texture );
// select modulate to mix texture with color for shading
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
// when texture area is small, bilinear filter the closest mipmap
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_LINEAR_MIPMAP_NEAREST );
// when texture area is large, bilinear filter the first mipmap
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
// if wrap is true, the texture wraps over at the edges (repeat)
// ... false, the texture ends at the edges (clamp)
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, FALSE );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, FALSE );
// build our texture mipmaps
gluBuild2DMipmaps( GL_TEXTURE_2D, 3, Width, Height, GL_RGB, GL_UNSIGNED_BYTE, pData );
// free buffer
free( pData );
return texture;
}
This is the Draw function:
void COpenGLControl::OnDraw(CDC *pDC)
{
// TODO: Camera controls
wglMakeCurrent(hdc,hrc);
// Set color to use when clearing the background.
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClearDepth(1.0f);
// Turn on backface culling
glFrontFace(GL_CCW);
glCullFace(GL_FRONT_AND_BACK);
// Turn on depth testing
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Clear all objects
glEnable( GL_TEXTURE_2D ); // enable texture for 2 dimensions
glPushMatrix();
if (filename.IsEmpty() == false)
{
imgData = ReadGDALData( filename );
glBindTexture( GL_TEXTURE_2D, imgData );
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Clear all objects
glLoadIdentity ();
gluLookAt (0,0,1,0,0,0,0,1,0);
glTranslatef (m_fPosX, m_fPosY, 0.0f);
glScalef (m_fZoom,m_fZoom,1.0);
glBegin( GL_QUADS ); // apply loaded texture to viewport
glTexCoord2d(0.0,0.0); glVertex2d(-1.0,-1.0);
glTexCoord2d(1.0,0.0); glVertex2d(+1.0,-1.0);
glTexCoord2d(1.0,1.0); glVertex2d(+1.0,+1.0);
glTexCoord2d(0.0,1.0); glVertex2d(-1.0,+1.0);
glEnd();
}
glPopMatrix();
glDisable( GL_TEXTURE_2D );
glFlush();
// Swap buffers
SwapBuffers(hdc);
wglMakeCurrent(NULL, NULL);
}
The problem is not so much in the color, but (from what I can tell from the sample) in the way your data is packed. Look into what byte ordering / row padding / color packing your OpenGL buffer expects, and in your GDAL loader what it provides. Just a hunch here, but it seems like your OpenGL expects a 4th (alpha) component in your RGB structs, but your GDAL code doesn't supply that. Also your GDAL loader aligns on 32 bit boundaries it seems, check if your OpenGL texture calls require that, too. Did you copy/paste the GDAL loader from a sample where somebody uses it to draw with BitBlt()? It looks that way.

android native app opengl es white textures

I'm writing a native app that should only display a little triangle with a texture.
But unfortunately, it everytime only displays a white triangle.
My code is very simple.
First to load a tga Image:
static const GLenum gl_format[4] = { GL_LUMINANCE, GL_LUMINANCE_ALPHA, GL_RGB, GL_RGBA };
unsigned int LoadTGATextureFromFile(const char* filename)
{
unsigned int handle;
unsigned char hdr[18];
unsigned char file_id[256 + 1];
int file;
file = open(filename, O_RDONLY);
if(file < 0)
{
Log(LOGLEVEL_ERROR, APPNAME, "Error: Failed to open tga file '%s' for read ing\n", filename);
return 0;
}
if(read(file, hdr, 18) != 18 || read(file, file_id, hdr[0]) != hdr[0])
{
Log(LOGLEVEL_ERROR, APPNAME, "Error: Unexpected EOF while reading header of '%s'\n", filename);
close(file);
return 0;
}
file_id[hdr[0]] = 0;
if(hdr[1] != 0 || (hdr[2] != 2 && hdr[2] != 3) || (hdr[16] != 8 && hdr[16] != 16 && hdr[16] != 24 && hdr[16] != 32))
{
Log(LOGLEVEL_ERROR, APPNAME, "Error: File '%s' has invalid format\n", filename);
close(file);
return 0;
}
int width = *(short*)(hdr + 12);
int height = *(short*)(hdr + 14);
if((width & (width - 1)) != 0 || (height & (height - 1)) != 0)
{
Log(LOGLEVEL_ERROR, APPNAME, "Error: File '%s' has invalid resolution %dx%d\n", filename, width, height);
close(file);
return 0;
}
int components = hdr[16] / 8;
unsigned char* data = new unsigned char [width * height * components];
if (read(file, data, width * height * components) != width * height * components)
{
Log(LOGLEVEL_ERROR, APPNAME, "Error: Unexpected EOF while reading image data of '%s'\n", filename);
close(file);
return 0;
}
close(file);
char dummy;
if(read(file, &dummy, 1) == 1)
Log(LOGLEVEL_ERROR, APPNAME, "Warning: TGA file '%s' has overlength\n", filename);
switch (components - 1)
{
char tmp;
case 2:
for (int i = 0; i < width * height; i += 3)
{
tmp = data[i];
data[i] = data[i + 2];
data[i + 2] = tmp;
}
break;
case 3:
for (int i = 0; i < width * height; i += 4)
{
tmp = data[i];
data[i] = data[i + 2];
data[i + 2] = tmp;
}
break;
default:
break;
}
glGenTextures(1, &handle);
glBindTexture(GL_TEXTURE_2D, handle);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glBindTexture(GL_TEXTURE_2D, 0);
delete [] data;
Log(LOGLEVEL_ERROR, APPNAME, "'%s' successfully loaded [handle = %d, FILE_ID = \"%s\", width = %d, height = %d, depth = %d] :)\n", filename, handle, file_id, width, height, components * 8);
return handle;
}
Loading the texture:
int texture = LoadTextureFormFile("/sdcard/test.tga");
Then to draw:
float tricoords[6] = { 0.0, 0.0, 1.0, 1.0, 0.0, 1.0 };
float texcoords[6] = { 0.0, 0.0, 1.0, 1.0, 0.0, 1.0 };
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, tricoords);
glTexCoordPointer(2, GL_FLOAT, 0, texcoords);
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
I know, that this code isnt optimized, but its only for debugging.
The logcat of my app prints:
successfully loaded tga [handle = 1, FILE_ID = "", width = 64, height = 128, depth = 32] :)
But the texture stays white.
Just found the mistake, texture mipmapping was enabled for the loaded texture the mipmaps were never created.
Changing this line:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
to this will disable mipmaps for the texture.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);

OpenGL texture shifted somewhat to the left when applied to a quad

I'm a bit new to OpenGL and I've been having a problem with using textures. The texture seems to load fine, but when I run the program, the texture displays shifted a couple pixels to the left, with the section cut off by the shift appearing on the right side. I don't know if the problem here is in the my TGA loader or if it's the way I'm applying the texture to the quad.
Here is the loader:
#include "texture.h"
#include <iostream>
GLubyte uncompressedheader[12] = {0,0, 2,0,0,0,0,0,0,0,0,0};
GLubyte compressedheader[12] = {0,0,10,0,0,0,0,0,0,0,0,0};
TGA::TGA()
{
}
//Private loading function called by LoadTGA. Loads uncompressed TGA files
//Returns: TRUE on success, FALSE on failure
bool TGA::LoadCompressedTGA(char *filename,ifstream &texturestream)
{
return false;
}
bool TGA::LoadUncompressedTGA(char *filename,ifstream &texturestream)
{
cout << "G position status:" << texturestream.tellg() << endl;
texturestream.read((char*)header, sizeof(header)); //read 6 bytes into the file to get the tga header
width = (GLuint)header[1] * 256 + (GLuint)header[0]; //read and calculate width and save
height = (GLuint)header[3] * 256 + (GLuint)header[2]; //read and calculate height and save
bpp = (GLuint)header[4]; //read bpp and save
cout << bpp << endl;
if((width <= 0) || (height <= 0) || ((bpp != 24) && (bpp !=32))) //check to make sure the height, width, and bpp are valid
{
return false;
}
if(bpp == 24)
{
type = GL_RGB;
}
else
{
type = GL_RGBA;
}
imagesize = ((bpp/8) * width * height); //determine size in bytes of the image
cout << imagesize << endl;
imagedata = new GLubyte[imagesize]; //allocate memory for our imagedata variable
texturestream.read((char*)imagedata,imagesize); //read according the the size of the image and save into imagedata
for(GLuint cswap = 0; cswap < (GLuint)imagesize; cswap += (bpp/8)) //loop through and reverse the tga's BGR format to RGB
{
imagedata[cswap] ^= imagedata[cswap+2] ^= //1st Byte XOR 3rd Byte XOR 1st Byte XOR 3rd Byte
imagedata[cswap] ^= imagedata[cswap+2];
}
texturestream.close(); //close ifstream because we're done with it
cout << "image loaded" << endl;
glGenTextures(1, &texID); // Generate OpenGL texture IDs
glBindTexture(GL_TEXTURE_2D, texID); // Bind Our Texture
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); // Linear Filtered
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, type, width, height, 0, type, GL_UNSIGNED_BYTE, imagedata);
delete imagedata;
return true;
}
//Public loading function for TGA images. Opens TGA file and determines
//its type, if any, then loads it and calls the appropriate function.
//Returns: TRUE on success, FALSE on failure
bool TGA::loadTGA(char *filename)
{
cout << width << endl;
ifstream texturestream;
texturestream.open(filename,ios::binary);
texturestream.read((char*)header,sizeof(header)); //read 6 bytes into the file, its the header. //if it matches the uncompressed header's first 6 bytes, load it as uncompressed
LoadUncompressedTGA(filename,texturestream);
return true;
}
GLubyte* TGA::getImageData()
{
return imagedata;
}
GLuint& TGA::getTexID()
{
return texID;
}
And here's the quad:
void Square::show()
{
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture.texID);
//Move to offset
glTranslatef( x, y, 0 );
//Start quad
glBegin( GL_QUADS );
//Set color to white
glColor4f( 1.0, 1.0, 1.0, 1.0 );
//Draw square
glTexCoord2f(0.0f, 0.0f); glVertex3f( 0, 0, 0 );
glTexCoord2f(1.0f, 0.0f); glVertex3f( SQUARE_WIDTH, 0, 0 );
glTexCoord2f(1.0f, 1.0f); glVertex3f( SQUARE_WIDTH, SQUARE_HEIGHT, 0 );
glTexCoord2f(0.0f, 1.0f); glVertex3f( 0, SQUARE_HEIGHT, 0 );
//End quad
glEnd();
//Reset
glLoadIdentity();
}
A screenshot would be very helpful.
My first guess is that your rows are not 4 byte aligned. If so, change unpack alignment to 1 byte with glPixelStorei(GL_UNPACK_ALIGNMENT, 1); before calling glTexImage2D().
You want to set the texture parameter so that the texture scales to fit the quad. You do that with the following lines of code:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
These will have textures scale linearly up or down to fit the quad.
You should probably also clamp the texture with the following calls:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);