I can't load texture through glGenerateMipmap on my cube.
I load the BMP texture from resource.h file.
My load function is called on WM_CREATE and looks like this:
void LoadTEX()
{
GLuint texture;
HBITMAP GLtex;
BITMAP tex;
byte Texture=TEX;
GLtex= (HBITMAP)LoadImage(GetModuleHandle(NULL),MAKEINTRESOURCE(Texture), IMAGE_BITMAP, 0, 0, LR_CREATEDIBSECTION);
GetObject(GLtex,sizeof(tex), &tex);
glPixelStorei(GL_UNPACK_ALIGNMENT,sizeof(Texture));
glEnable(GL_TEXTURE_2D);
glGenTextures(sizeof(Texture), &texture);
glBindTexture( GL_TEXTURE_2D, texture );
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_NEAREST );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D( GL_TEXTURE_2D,
0,
GL_RGB,
tex.bmWidth,
tex.bmHeight,
0,
GL_RGB,GL_UNSIGNED_BYTE,
tex.bmBits);
glDeleteTextures(sizeof(Texture), &texture);
}
On WM_PAINT:
glColor3f(1.0f,1.0f,1.0f);
glEnable(GL_TEXTURE_2D);
glBindTexture( GL_TEXTURE_2D, texture );
// glActiveTexture(GL_TEXTURE0);
// glGenerateMipmap(GL_TEXTURE_2D);
gluBuild2DMipmaps(GL_TEXTURE_2D, 3, tex.bmWidth, tex.bmHeight, GL_RGB, GL_UNSIGNED_BYTE, tex.bmBits);
The above code works fine (texture is in place) but the problem is that it gives me 40 FPS which is very poor result (without texture been bonded I get around 300 FPS).
Second is that I don't want to use gluBuild2DMipmaps because it's a bit old, instead I want to use glGenerateMipmap. Problem is that if I comment out gluBuild2DMipmaps and uncomment glActiveTexture(GL_TEXTURE0);, glGenerateMipmap(GL_TEXTURE_2D) nothing is shown.
My PIXELFORMATDESCRIPTOR looks like:
PIXELFORMATDESCRIPTOR pfd;
int format;
// get the device context (DC)
*hdc = GetDC( hwnd );
// set the pixel format for the DC
ZeroMemory( &pfd, sizeof( pfd ) );
pfd.nSize = sizeof( pfd );
pfd.nVersion = 1;
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 32;
pfd.cDepthBits = 32;
pfd.iLayerType = PFD_MAIN_PLANE;
format = ChoosePixelFormat( *hdc, &pfd );
SetPixelFormat( *hdc, format, &pfd );
All this running on ATI and OPENGL 3.1 support. I call the glEnable(GL_TEXTURE_2D); so this should work with ati cards so the problem is elsewhere.
In LoadTEX, don't delete the texture straight after loading it. (Also it should be glGenTextures(1, &texture) and glDeleteTextures(1, &texture) to generate and delete one texture handle)
Then call glGenerateMipmap after loading it with glTexImage2D. Unless the texture changes and you need to regenerate the mipmaps each frame, in which case leave the call where it is.
glActiveTexture(GL_TEXTURE0) is the default anyway. If you were binding other textures at the same time you'd use glActiveTexture(GL_TEXTURE1) or glActiveTexture(GL_TEXTURE0 + textureIndex) etc.
I'm not sure on the specifics of windows bitmap loading, but double check the glPixelStorei line, given the other sizeof(Texture) uses I have to assume this may be incorrect too. To be safe, just set it to 1 byte.
I use the following order during loading of texture:
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, n2width, n2height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixmap);
glHint(GL_GENERATE_MIPMAP_HINT, GL_FASTEST);//GL_FASTEST);
glGenerateMipmap(GL_TEXTURE_2D); // Generate mip mapping
And dont rebuild mipmaps on every frame.
Related
The main steps for depth testing from my understanding:
1) enable depth testing and how we want to depth test
2) create the frame buffer object and make sure it has a depth attached to it
3) bind our frame buffer object ( make sure to clear it before rendering )
4) draw stuff
And that should be it no? our frame buffer depth attachment should have depth data? But I always get straight 1's default depth clear color
step 1:
glEnable(GL_DEPTH_TEST);
glDepthFunc( GL_LEQUAL );
step 2:
//create the frame buffer object
glGenFramebuffers(1, &m_uifboHandle);
// Initialize FBO
glBindFramebuffer(GL_FRAMEBUFFER, m_uifboHandle);
//create 2 texture handles 1 for diffuse, 1 for depth
unsigned int m_uiTextureHandle[2];
glGenTextures( 2, m_uiTextureHandle );
//create the diffuse texture
glBindTexture( GL_TEXTURE_2D, m_uiTextureHandle[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, uiWidth, uiHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_uiTextureHandle[0], 0);
.
//create the depth buffer
glBindTexture(GL_TEXTURE_2D, m_uiTextureHandle[1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP );
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, uiWidth, uiHeight, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, m_uiTextureHandle[1], 0);
//go back to default binding
glBindFramebuffer(GL_FRAMEBUFFER, 0);
step 3:
//bind the frame buffer object
glBindFramebuffer( GL_FRAMEBUFFER, m_uifboHandle );
//clear it
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
step 4:
//draw things
Are these not the steps?
Am i missing something?
I've tried a few different tutorials.
I can't get any depth to render to a texture
I keep getting straight 1's over and over.
The framebuffer probably is not complete. Try checking for completeness. Moreover your code was:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, uiWidth, uiHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
However, it should be (watch the RGB-RGBA):
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, uiWidth, uiHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
I'm having some trouble loading a 32 bit .bmp image in my opengl game. So far i can load and display 24 bit perfectly. I now want to load a bit map with portions of its texture being transparent or invisible.
This function has no problems with 24 bit. but 32 bit with alpha channel .bmp seem to distort the colors and cause transparently in unintended places.
Texture LoadTexture( const char * filename, int width, int height, bool alpha)
{
GLuint texture;
GLuint* data;
FILE* file;
fopen_s(&file, filename, "rb");
if(!file)
{
std::cout << filename <<": Load Texture Fail!\n";
exit(0);
}
data = new GLuint[width * height];
fread( data, width * height * sizeof(GLuint), 1, file );
fclose(file);
glGenTextures( 1, &texture );
glBindTexture( GL_TEXTURE_2D, texture);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
if(alpha) //for .bmp 32 bit
{
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, 4, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
}
else //for .bmp 24 bit
{
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
glTexImage2D(GL_TEXTURE_2D, 0, 3, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
}
std::cout<<"Texture: "<<filename<< " loaded"<<std::endl;
delete data;
return Texture(texture);
}
In Game Texture, drawn on a flat plane
this might look like its working but the 0xff00ff color is the one that should be transparent. and if i reverse the alpha channel in photoshop the result is the same the iner sphere is always transparent.
i also enabled:
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
there is no problems with transparency the problems seems to be with loading the bitmap with an alpha channel. also all bit maps that I load seem to be off a bit to the right. Just wondering if there was a reason for this?
I'm going to answer this on the assumption that your file "parsing" is in-fact correct. That the file data is just the image part of a .BMP without any of the header information.
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, 4, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
glTexImage2D(GL_TEXTURE_2D, 0, 3, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
I find it curious that your 3-component data is in BGR order, yet your 4-component data is in RGBA order. Especially if this data comes from a .BMP file (though they don't allow alpha channels). Are you sure that your data isn't in a different ordering? For example, perhaps ABGR order?
Also, stop using numbers for image formats. You should use a real, sized internal format for your textures, not "3" or "4".
So code that i use work for 100% for me compiled by visual c 2012
glBindTexture(GL_TEXTURE_2D, texture_id);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); //Thia is very important!!!!
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imag_ptr, ptr->image->height, 0,GL_RGBA, GL_UNSIGNED_BYTE, imag_ptr);
and than in render i use
glPushMatrix();
glTranslatef(0,0,0);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 1);
glEnable(GL_BLEND);
glColor4ub(255,255,255,255); //This is veryveryvery importent !!!!!!!!!!! (try to play and you see)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBegin(GL_QUADS);
glTexCoord3d(1, 1, 0);
glVertex2f(8,8);
glTexCoord3d(0, 1, 0);
glVertex2f(-8,8);
glTexCoord3d(0, 0, 0);
glVertex2f(-8,-8);
glTexCoord3d(1, 0, 0);
glVertex2f(8,-8);
glEnd();
glEnd();
glDisable(GL_TEXTURE_2D);
glPopMatrix();
and i use for example android png icon and another i try to post another image but i have no peputation for this so if you want i send it to you
So all of this in png format
but in bmp format and tga you must swap colors from ARGB to RGBA without this its not working
for( x=0;x<bmp_in_fheader.width;x++)
{
t=pixel[i];
t1=pixel[i+1];
t2=pixel[i+2];
t3=pixel[i+3];
pixel_temp[j]=t2;
pixel_temp[j+1]=t1;
pixel_temp[j+2]=t;
pixel_temp[j+3]=t3;
i+=4;
j+=4;
}
==Next==
To crate them in photoshop you must delete your background and draw on new layer than add alpha layer
in channels REMEMBER !! Very important to that in alpha all black color is represent transparency
and your image must be under white color only
I have a sample code which draws 8 bit rasters to a window using QUADS (glTexImage2D w/ GL_LUMINANCE set for both pixel format and internal format).
PFD was initialized like this:
PIXELFORMATDESCRIPTOR pfd;
int iFormat;
hDC = GetDC( hWnd );
ZeroMemory( &pfd, sizeof( pfd ) );
pfd.nSize = sizeof( pfd );
pfd.nVersion = 1;
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 8;
pfd.iLayerType = PFD_MAIN_PLANE;
iFormat = ChoosePixelFormat( hDC, &pfd );
SetPixelFormat( hDC, iFormat, &pfd );
And the texture was initialized like this:
glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexEnvf (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE, width, height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
The machine consists of one Matrox card, which is connected to one regular color monitor and two monochrome monitors.
Problem is, the application draws into any part of the window which is in the color monitor, but blanks on the monochrome monitors. These are configured to be 8-bit linear grayscale in the control panel, which works fine otherwise.
Be glad to hear your ideas on this...
I'm working on opengl 2.1 and opengl es 2.0. I want to implement Depth-of-field effect for whole scene. I know how to do this for single model with shader, but this would require all models to have shader with same code. Is there a way to retrieve final scene depth and color buffer and store them into textures so I could use them again with DOF shader? Or is it a bad idea?
[EDIT]
Finally got it working.
The initialization code looks like this:
glGenFramebuffers(1, &frameBuffer_);
glGenTextures(1, &colorBuffer_);
glGenTextures(1, &depthBuffer_);
//glGenRenderbuffers(1, &depthBuffer_);
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer_);
glBindTexture(GL_TEXTURE_2D, colorBuffer_);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGBA,
width,
height,
0,
GL_RGBA,
GL_UNSIGNED_BYTE,
NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glFramebufferTexture2D(
GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, colorBuffer_, 0);
glBindTexture(GL_TEXTURE_2D, depthBuffer_);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_DEPTH24_STENCIL8,
width,
height,
0,
GL_DEPTH_STENCIL,
GL_UNSIGNED_INT_24_8,
NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glFramebufferTexture2D(
GL_FRAMEBUFFER,
GL_DEPTH_ATTACHMENT,
GL_TEXTURE_2D, depthBuffer_, 0);
//glBindRenderbuffer(GL_RENDERBUFFER, depthBuffer_);
//glRenderbufferStorage(
// GL_RENDERBUFFER,
// GL_DEPTH_COMPONENT24,
// width,
// height);
//glFramebufferRenderbuffer(
// GL_FRAMEBUFFER,
// GL_DEPTH_ATTACHMENT,
// GL_RENDERBUFFER, depthBuffer_);
GLenum status;
status = glCheckFramebufferStatus(GL_DRAW_FRAMEBUFFER);
switch(status) {
case GL_FRAMEBUFFER_COMPLETE:
// Success.
break;
case GL_FRAMEBUFFER_UNSUPPORTED:
LOGE("Frame buffer format not supported.");
break;
default:
LOGE("Framebuffer Error.");
}
And later when rendering first call:
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer_);
Render scene, then call:
glBindFramebuffer(GL_FRAMEBUFFER, 0);
And now anything can be rendered here using depthBuffer_ or colorBuffer_ as OpengGL texture handles.
Use a framebuffer object. They're part of OpenGL-ES-2 and are available as (well supported) extension to OpenGL-2.1.
See
https://github.com/datenwolf/codesamples/tree/master/samples/OpenGL/minimalfbo
for a minimal working example using extensions on desktop OpenGL-2.x
We were happily using glTexSubImage2d to update a texture every few frames which had been initialised with glTexImage2d in our GL initialisation. After adding in a particle system with each particle textured itself our quad showing the glTexSubImage2d texture doesn't display.
The particle's textures are PNG and so we use SDL to load the PNG to an SDL Surface and then glTexImage2d is used to bind the PNG to a texture.
If we change the quad's glTexSubImage2d call to a glTexImage2d call the texture shows but this is extremely inefficient and cuts the framerate in half at least and so would rather be using glTexSubImage2d (as it worked before).
Does anyone have any idea why we now can't use glTexSubImage2d?
Below is relevant pieces of code for the initialisation and binding of textures:
Loading in the particle texture
//Load smoke texture
SDL_Surface *surface;
SDL_Surface *alpha_image;
if( (surface = IMG_Load("smoke_particle.png")))
{
SDL_PixelFormat *pixf = SDL_GetVideoSurface()->format;
alpha_image = SDL_CreateRGBSurface( SDL_SWSURFACE, surface->w, surface->h, 32, pixf->Bmask, pixf->Gmask, pixf->Rmask, pixf->Amask );
SDL_SetAlpha(surface,0,0);
SDL_BlitSurface( surface, NULL, alpha_image, NULL );
glGenTextures( 1, &texture );
glBindTexture( GL_TEXTURE_2D, texture );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, surface->w, surface->h, 0,
GL_RGBA, GL_UNSIGNED_BYTE, surface->pixels );
}
Setting up the quad's texture:
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &texVid);
glBindTexture(GL_TEXTURE_2D, texVid);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, 3, VIDEO_WIDTH, VIDEO_HEIGHT, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glPixelStorei(GL_UNPACK_ALIGNMENT, 2);
The only calls in initGL() are the enabling of GL_TEXTURE_2D, GL_BLEND setting up glBlendFunc() and the setting up of the quad's texture as above.
Any ideas?
Stupidly we had VIDEO_WIDTH set to the height of the texture and VIDEO_HEIGHT to the width of the texture.
Sorry if we wasted anyone's time.
Can anyone lock this or delete this or anything?
Thanks,
Infinitifizz