I have a sample code which draws 8 bit rasters to a window using QUADS (glTexImage2D w/ GL_LUMINANCE set for both pixel format and internal format).
PFD was initialized like this:
PIXELFORMATDESCRIPTOR pfd;
int iFormat;
hDC = GetDC( hWnd );
ZeroMemory( &pfd, sizeof( pfd ) );
pfd.nSize = sizeof( pfd );
pfd.nVersion = 1;
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 8;
pfd.iLayerType = PFD_MAIN_PLANE;
iFormat = ChoosePixelFormat( hDC, &pfd );
SetPixelFormat( hDC, iFormat, &pfd );
And the texture was initialized like this:
glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexEnvf (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE, width, height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
The machine consists of one Matrox card, which is connected to one regular color monitor and two monochrome monitors.
Problem is, the application draws into any part of the window which is in the color monitor, but blanks on the monochrome monitors. These are configured to be 8-bit linear grayscale in the control panel, which works fine otherwise.
Be glad to hear your ideas on this...
Related
I have read this tutorial: Array Texture,
but I don't want to use the glTexStorage3D()(requires OpenGL 4.2) function. First of all, can someone check whether I have implemented this code properly(I'm using glTexImage3D instead of glTexStorage3D):
unsigned int nrTextures = 6;
GLsizei width = 256;
GLsizei height = 256;
GLuint arrayTextureID;
std::vector<unsigned char*> textures(nrTextures);
//textures: Load textures here...
glGenTextures(1, &arrayTextureID);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D_ARRAY, arrayTextureID);
//Gamma to linear color space for each texture.
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_SRGB, width, height, nrTextures, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
for(unsigned int i = 0; i < nrTextures; i++)
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0, 0, 0, i, width, height, 1, GL_RGB, GL_UNSIGNED_BYTE, textures[i]);
/*glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);*/
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
/*glGenerateMipmap(GL_TEXTURE_2D_ARRAY);*/
glBindTexture(GL_TEXTURE_2D_ARRAY, 0);
How do I implement mipmapping with this implementation?
This mostly looks ok. The only problem I can see is that GL_SRGB is not a valid internal texture format. So the glTexImage3D() call needs to use GL_SRGB8 instead:
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_SRGB8, width, height, nrTextures, 0,
GL_RGB, GL_UNSIGNED_BYTE, NULL);
For generating mipmaps, you can call:
glGenerateMipmap(GL_TEXTURE_2D_ARRAY);
after you filled the texture with data with the glTexSubImage3D() calls.
I have decided to use SOIL to load images for use with OpenGL for my project. I have this method, which loads an image and returns a GLTexture, which is a struct that hold a GLuint textureid and two ints width and height:
GLTexture loadTexture(const char *filePath) {
GLTexture texture = {};
int width;
int height;
unsigned char *data;
//Load Image File Directly into an OpenGL Texture
texture.id = SOIL_load_OGL_texture
(
filePath,
SOIL_LOAD_AUTO,
SOIL_CREATE_NEW_ID,
SOIL_FLAG_MIPMAPS | SOIL_FLAG_INVERT_Y | SOIL_FLAG_NTSC_SAFE_RGB | SOIL_FLAG_COMPRESS_TO_DXT
);
//Error Checking (Load Process)
if (texture.id == 0) {
fatalError("SOIL Loading Error!");
}
//Generate and Bind Texture
glGenTextures(1, &(texture.id));
glBindTexture(GL_TEXTURE_2D, texture.id);
//Get Width, Height and Data of Image
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_BYTE, &data);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, &data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
//Unbind Texture
glBindTexture(GL_TEXTURE_2D, 0);
//Return Texture
texture.width = width;
texture.height = height;
return texture;
}
As far as I know, glGetTexLevelParameteriv() should return the width of the texture that is binded into width and height, but whenever I load an image, this returns 0.
Should I fill in width and height as parameters for the method or is it possible to get them via OpenGL?
The texture id generated by SOIL_load_OGL_texture is overridden in the
glGenTextures(1, &(texture.id));
line (glGenTextures creates a new texture and stores the id in &(texture.id)). All operations afterwards work on the newly created texture. Since this new texture is empty, width and height are 0.
I'm not sure what you want to achieve here, but if you only want to load the texture, then this code might work:
texture.id = SOIL_load_OGL_texture (...);
//Error Checking (Load Process)
if (texture.id == 0) {
fatalError("SOIL Loading Error!");
}
//Just bind and do not create a new texture
glBindTexture(GL_TEXTURE_2D, texture.id);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
//Unbind Texture
glBindTexture(GL_TEXTURE_2D, 0);
//Return Texture
texture.width = width;
texture.height = height;
return texture;
I can't load texture through glGenerateMipmap on my cube.
I load the BMP texture from resource.h file.
My load function is called on WM_CREATE and looks like this:
void LoadTEX()
{
GLuint texture;
HBITMAP GLtex;
BITMAP tex;
byte Texture=TEX;
GLtex= (HBITMAP)LoadImage(GetModuleHandle(NULL),MAKEINTRESOURCE(Texture), IMAGE_BITMAP, 0, 0, LR_CREATEDIBSECTION);
GetObject(GLtex,sizeof(tex), &tex);
glPixelStorei(GL_UNPACK_ALIGNMENT,sizeof(Texture));
glEnable(GL_TEXTURE_2D);
glGenTextures(sizeof(Texture), &texture);
glBindTexture( GL_TEXTURE_2D, texture );
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_NEAREST );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D( GL_TEXTURE_2D,
0,
GL_RGB,
tex.bmWidth,
tex.bmHeight,
0,
GL_RGB,GL_UNSIGNED_BYTE,
tex.bmBits);
glDeleteTextures(sizeof(Texture), &texture);
}
On WM_PAINT:
glColor3f(1.0f,1.0f,1.0f);
glEnable(GL_TEXTURE_2D);
glBindTexture( GL_TEXTURE_2D, texture );
// glActiveTexture(GL_TEXTURE0);
// glGenerateMipmap(GL_TEXTURE_2D);
gluBuild2DMipmaps(GL_TEXTURE_2D, 3, tex.bmWidth, tex.bmHeight, GL_RGB, GL_UNSIGNED_BYTE, tex.bmBits);
The above code works fine (texture is in place) but the problem is that it gives me 40 FPS which is very poor result (without texture been bonded I get around 300 FPS).
Second is that I don't want to use gluBuild2DMipmaps because it's a bit old, instead I want to use glGenerateMipmap. Problem is that if I comment out gluBuild2DMipmaps and uncomment glActiveTexture(GL_TEXTURE0);, glGenerateMipmap(GL_TEXTURE_2D) nothing is shown.
My PIXELFORMATDESCRIPTOR looks like:
PIXELFORMATDESCRIPTOR pfd;
int format;
// get the device context (DC)
*hdc = GetDC( hwnd );
// set the pixel format for the DC
ZeroMemory( &pfd, sizeof( pfd ) );
pfd.nSize = sizeof( pfd );
pfd.nVersion = 1;
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 32;
pfd.cDepthBits = 32;
pfd.iLayerType = PFD_MAIN_PLANE;
format = ChoosePixelFormat( *hdc, &pfd );
SetPixelFormat( *hdc, format, &pfd );
All this running on ATI and OPENGL 3.1 support. I call the glEnable(GL_TEXTURE_2D); so this should work with ati cards so the problem is elsewhere.
In LoadTEX, don't delete the texture straight after loading it. (Also it should be glGenTextures(1, &texture) and glDeleteTextures(1, &texture) to generate and delete one texture handle)
Then call glGenerateMipmap after loading it with glTexImage2D. Unless the texture changes and you need to regenerate the mipmaps each frame, in which case leave the call where it is.
glActiveTexture(GL_TEXTURE0) is the default anyway. If you were binding other textures at the same time you'd use glActiveTexture(GL_TEXTURE1) or glActiveTexture(GL_TEXTURE0 + textureIndex) etc.
I'm not sure on the specifics of windows bitmap loading, but double check the glPixelStorei line, given the other sizeof(Texture) uses I have to assume this may be incorrect too. To be safe, just set it to 1 byte.
I use the following order during loading of texture:
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, n2width, n2height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixmap);
glHint(GL_GENERATE_MIPMAP_HINT, GL_FASTEST);//GL_FASTEST);
glGenerateMipmap(GL_TEXTURE_2D); // Generate mip mapping
And dont rebuild mipmaps on every frame.
I'm having some trouble loading a 32 bit .bmp image in my opengl game. So far i can load and display 24 bit perfectly. I now want to load a bit map with portions of its texture being transparent or invisible.
This function has no problems with 24 bit. but 32 bit with alpha channel .bmp seem to distort the colors and cause transparently in unintended places.
Texture LoadTexture( const char * filename, int width, int height, bool alpha)
{
GLuint texture;
GLuint* data;
FILE* file;
fopen_s(&file, filename, "rb");
if(!file)
{
std::cout << filename <<": Load Texture Fail!\n";
exit(0);
}
data = new GLuint[width * height];
fread( data, width * height * sizeof(GLuint), 1, file );
fclose(file);
glGenTextures( 1, &texture );
glBindTexture( GL_TEXTURE_2D, texture);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
if(alpha) //for .bmp 32 bit
{
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, 4, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
}
else //for .bmp 24 bit
{
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
glTexImage2D(GL_TEXTURE_2D, 0, 3, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
}
std::cout<<"Texture: "<<filename<< " loaded"<<std::endl;
delete data;
return Texture(texture);
}
In Game Texture, drawn on a flat plane
this might look like its working but the 0xff00ff color is the one that should be transparent. and if i reverse the alpha channel in photoshop the result is the same the iner sphere is always transparent.
i also enabled:
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
there is no problems with transparency the problems seems to be with loading the bitmap with an alpha channel. also all bit maps that I load seem to be off a bit to the right. Just wondering if there was a reason for this?
I'm going to answer this on the assumption that your file "parsing" is in-fact correct. That the file data is just the image part of a .BMP without any of the header information.
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, 4, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
glTexImage2D(GL_TEXTURE_2D, 0, 3, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
I find it curious that your 3-component data is in BGR order, yet your 4-component data is in RGBA order. Especially if this data comes from a .BMP file (though they don't allow alpha channels). Are you sure that your data isn't in a different ordering? For example, perhaps ABGR order?
Also, stop using numbers for image formats. You should use a real, sized internal format for your textures, not "3" or "4".
So code that i use work for 100% for me compiled by visual c 2012
glBindTexture(GL_TEXTURE_2D, texture_id);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); //Thia is very important!!!!
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imag_ptr, ptr->image->height, 0,GL_RGBA, GL_UNSIGNED_BYTE, imag_ptr);
and than in render i use
glPushMatrix();
glTranslatef(0,0,0);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 1);
glEnable(GL_BLEND);
glColor4ub(255,255,255,255); //This is veryveryvery importent !!!!!!!!!!! (try to play and you see)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBegin(GL_QUADS);
glTexCoord3d(1, 1, 0);
glVertex2f(8,8);
glTexCoord3d(0, 1, 0);
glVertex2f(-8,8);
glTexCoord3d(0, 0, 0);
glVertex2f(-8,-8);
glTexCoord3d(1, 0, 0);
glVertex2f(8,-8);
glEnd();
glEnd();
glDisable(GL_TEXTURE_2D);
glPopMatrix();
and i use for example android png icon and another i try to post another image but i have no peputation for this so if you want i send it to you
So all of this in png format
but in bmp format and tga you must swap colors from ARGB to RGBA without this its not working
for( x=0;x<bmp_in_fheader.width;x++)
{
t=pixel[i];
t1=pixel[i+1];
t2=pixel[i+2];
t3=pixel[i+3];
pixel_temp[j]=t2;
pixel_temp[j+1]=t1;
pixel_temp[j+2]=t;
pixel_temp[j+3]=t3;
i+=4;
j+=4;
}
==Next==
To crate them in photoshop you must delete your background and draw on new layer than add alpha layer
in channels REMEMBER !! Very important to that in alpha all black color is represent transparency
and your image must be under white color only
We were happily using glTexSubImage2d to update a texture every few frames which had been initialised with glTexImage2d in our GL initialisation. After adding in a particle system with each particle textured itself our quad showing the glTexSubImage2d texture doesn't display.
The particle's textures are PNG and so we use SDL to load the PNG to an SDL Surface and then glTexImage2d is used to bind the PNG to a texture.
If we change the quad's glTexSubImage2d call to a glTexImage2d call the texture shows but this is extremely inefficient and cuts the framerate in half at least and so would rather be using glTexSubImage2d (as it worked before).
Does anyone have any idea why we now can't use glTexSubImage2d?
Below is relevant pieces of code for the initialisation and binding of textures:
Loading in the particle texture
//Load smoke texture
SDL_Surface *surface;
SDL_Surface *alpha_image;
if( (surface = IMG_Load("smoke_particle.png")))
{
SDL_PixelFormat *pixf = SDL_GetVideoSurface()->format;
alpha_image = SDL_CreateRGBSurface( SDL_SWSURFACE, surface->w, surface->h, 32, pixf->Bmask, pixf->Gmask, pixf->Rmask, pixf->Amask );
SDL_SetAlpha(surface,0,0);
SDL_BlitSurface( surface, NULL, alpha_image, NULL );
glGenTextures( 1, &texture );
glBindTexture( GL_TEXTURE_2D, texture );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, surface->w, surface->h, 0,
GL_RGBA, GL_UNSIGNED_BYTE, surface->pixels );
}
Setting up the quad's texture:
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &texVid);
glBindTexture(GL_TEXTURE_2D, texVid);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, 3, VIDEO_WIDTH, VIDEO_HEIGHT, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glPixelStorei(GL_UNPACK_ALIGNMENT, 2);
The only calls in initGL() are the enabling of GL_TEXTURE_2D, GL_BLEND setting up glBlendFunc() and the setting up of the quad's texture as above.
Any ideas?
Stupidly we had VIDEO_WIDTH set to the height of the texture and VIDEO_HEIGHT to the width of the texture.
Sorry if we wasted anyone's time.
Can anyone lock this or delete this or anything?
Thanks,
Infinitifizz