Alpha channel being rendered black. How to make it transparent? - c++

I'm loading a png texture with 32bit and some transparent regions. I have setted this code in my initialization function:
// OpenGL
glEnable(GL_DEPTH_TEST);
glShadeModel(GL_SMOOTH);
glEnable( GL_ALPHA_TEST );
glEnable( GL_BLEND );
glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );
glClearColor( 0.0, 0.0, 0.0, 0.0 );
This is how I load a texture:
// Load textures
glGenTextures( 1, &this->texture );
int width, height;
unsigned char* image;
glActiveTexture( GL_TEXTURE0 );
glBindTexture( GL_TEXTURE_2D, this->texture );
std::string path = "../assets/textures/" + name;
image = SOIL_load_image( path.c_str(), &width, &height, 0, SOIL_LOAD_RGBA );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image );
SOIL_free_image_data( image );
glUniform1i( glGetUniformLocation( shader->shader, "materialTex" ), 0 );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
This is the effect I get:
I'm trying to make the grass background go from black to transparent.
Can you help me?

There are a couple of problems here, but they all boil down to the fact that alpha transparency is order dependent.
It is not clear why you are using a depth buffer in the first place. The application appears to be 2D and furthermore you seem to be drawing every one of your 2D layers with the same depth. Nevertheless, the red background you are trying to draw needs to be drawn first.
If you drew the background with depth testing enabled, it would write its depth value to the depth buffer and then the other sprites you tried to draw on top of it would fail because the default depth test is GL_LESS (this rejects parts of objects with the same depth). If you disable depth testing when you draw the background, that also disables depth writes, and that is one possible way to fix your problem.
Realistically, you could just eliminate depth testing altogether and follow the golden rule for alpha blending:
Draw opaque geometry first, and then sort translucent objects by depth

Related

Slow stencil texture on AMD

I'm trying to add soft shadows to a modified Doom3 engine using FBO + stencil texture attachment that I bind and use in the light interaction fragment shader.
It works good enough, but there's a serious performance problem on a Radeon 460 (I don't have other AMD GPU's but suspect it's same or worse since it's relatively new).
I'm on the latest drivers.
The fps drop is so bad that it's actually faster to do qglCopyTexImage2D to another texture (per each light!) than bind the stencil texture used in FBO.
Another problem is that when I try to optimize qglCopyTexImage2D with qglCopyTexSubImage2D it's starting to flicker.
Any real-use advice on stencil texture from fellow programmers?
Both nVidia and Intel appear to perform well in regard of speed here.
globalImages->currentRenderImage->Bind();
globalImages->currentRenderImage->uploadWidth = curWidth; // used as a shader param
globalImages->currentRenderImage->uploadHeight = curHeight;
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
qglTexImage2D( GL_TEXTURE_2D, 0, r_fboColorBits.GetInteger() == 15 ? GL_RGB5_A1 : GL_RGBA, curWidth, curHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL ); //NULL means reserve texture memory, but texels are undefined
globalImages->currentRenderFbo->Bind();
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
qglTexImage2D( GL_TEXTURE_2D, 0, r_fboColorBits.GetInteger() == 15 ? GL_RGB5_A1 : GL_RGBA, curWidth, curHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL ); //NULL means reserve texture memory, but texels are undefined
if ( glConfig.vendor != glvAny ) {
globalImages->currentStencilFbo->Bind();
globalImages->currentStencilFbo->uploadWidth = curWidth;
globalImages->currentStencilFbo->uploadHeight = curHeight;
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
qglTexImage2D( GL_TEXTURE_2D, 0, GL_STENCIL_INDEX8, curWidth, curHeight, 0, GL_STENCIL_INDEX, GL_UNSIGNED_BYTE, 0 );
}
globalImages->currentDepthImage->Bind();
globalImages->currentDepthImage->uploadWidth = curWidth; // used as a shader param
globalImages->currentDepthImage->uploadHeight = curHeight;
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
qglTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
if ( glConfig.vendor == glvIntel ) { // FIXME allow 24-bit depth for low-res monitors
qglTexImage2D( GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, curWidth, curHeight, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0 );
} else {
qglTexImage2D( GL_TEXTURE_2D, 0, GL_DEPTH_STENCIL, curWidth, curHeight, 0, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, 0 );
}
}
// (re-)attach textures to FBO
if ( !fboId || r_fboSharedColor.IsModified() || r_fboSharedDepth.IsModified() ) {
// create a framebuffer object, you need to delete them when program exits.
if ( !fboId )
qglGenFramebuffers( 1, &fboId );
qglBindFramebuffer( GL_FRAMEBUFFER_EXT, fboId );
// attach a texture to FBO color attachement point
qglFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, globalImages->currentRenderImage->texnum, 0 );
// attach a renderbuffer to depth attachment point
GLuint depthTex = r_fboSharedDepth.GetBool() ? globalImages->currentDepthImage->texnum : globalImages->currentDepthFbo->texnum;
qglFramebufferTexture2D( GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthTex, 0 );
if ( glConfig.vendor == glvIntel ) // separate stencil, thank God
qglFramebufferTexture2D( GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, globalImages->currentStencilFbo->texnum, 0 );
else
qglFramebufferTexture2D( GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, depthTex, 0 );
int status = qglCheckFramebufferStatus( GL_FRAMEBUFFER );
if ( GL_FRAMEBUFFER_COMPLETE != status ) { // something went wrong, fall back to default
common->Printf( "glCheckFramebufferStatus %d\n", status );
qglDeleteFramebuffers( 1, &fboId );
fboId = 0; // try from scratch next time
r_useFbo.SetBool( false );
}
qglBindFramebuffer( GL_FRAMEBUFFER, 0 ); // not obvious, but let it be
}
qglBindFramebuffer( GL_FRAMEBUFFER, fboId );
qglClear( GL_COLOR_BUFFER_BIT ); // otherwise transparent skybox blends with previous frame
fboUsed = true;
GL_CheckErrors();
}
/*
Soft shadows vendor specific implementation
Intel: separate stencil, direct access, fastest
nVidia: combined stencil & depth, direct access, fast
AMD: combined stencil & depth, direct access very slow, resorting to stencil copy
*/
void FB_CopyStencil() { // duzenko: why, AMD? WHY??
if ( glConfig.vendor != glvAMD || !r_softShadows.GetBool() )
return;
globalImages->currentStencilFbo->Bind();
qglCopyTexImage2D( GL_TEXTURE_2D, 0, GL_DEPTH_STENCIL, 0, 0, glConfig.vidWidth, glConfig.vidHeight, 0 );
/*globalImages->currentDepthFbo->Bind();
idScreenRect& r = backEnd.currentScissor;
//qglCopyTexSubImage2D( GL_TEXTURE_2D, 0, r.x1, r.y1, r.x1, r.y1, r.x2 - r.x1 + 1, r.y2 - r.y1 + 1 );*/
GL_CheckErrors();
}
void FB_BindStencilTexture() {
const GLenum GL_DEPTH_STENCIL_TEXTURE_MODE = 0x90EA;
idImage* stencil = glConfig.vendor != glvAny ? globalImages->currentStencilFbo : globalImages->currentDepthImage;
stencil->Bind();
if ( glConfig.vendor != glvIntel )
glTexParameteri( GL_TEXTURE_2D, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_STENCIL_INDEX );
}
I ended up with two framebuffers: one for shadows only and the other for everything else.
The shadow texture is an FBO attachment in the former and bound as texture2D in the latter.

Alpha mask of overlapping objects in OpenGL

What is the best way of getting alpha mask of overlapping objects in OpenGL? In the first picture below I have three grids overlapping with ellipsoid, depth test is enabled. My goal is to get result similar to the second image, where the white represents the alpha. Below are the depth testing flags that I am using.
glEnable(GL_DEPTH_TEST);
glDepthMask(GL_TRUE);
glDepthFunc(GL_LEQUAL);
Before you draw your red, yellow and blue grids you should enable the stencil test and set your stencil operations glStencilOp function like this:
glEnable( GL_STENCIL_TEST );
glStencilOp( GL_KEEP, GL_KEEP, GL_INCR );
glStencilFunc( GL_ALWAYS, 0, 1 ); // these are also the default parameters
This means the stencil buffer is kept as is is if the depth test fails and increment if the depth test passes.
After drawing the stencil buffer contains the mask, where 0 means black and > 0 means white in your case.
Ensure your stencil buffer to be cleared before drawing ( glClear( GL_STENCIL_BUFFER_BIT ) ).
If you have to get this mask to a texture you have to bind a texture to the stencil buffer:
GLint with = ...;
GLint height = ...;
GLuint depthAndStencilMap;
glGenTextures( 1, &depthAndStencilMap );
glBindTexture( GL_TEXTURE_2D, depthAndStencilMap );
glTexImage2D( GL_TEXTURE_2D, 0, GL_DEPTH_STENCIL, with, height, 0, GL_DEPTH_STENCIL, GL_FLOAT, NULL );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
GLuint frameBuffer;
glGenFramebuffers( 1, &frameBuffer );
glBindFramebuffer( GL_FRAMEBUFFER, frameBuffer );
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, depthAndStencilMap, 0 );
glBindFramebuffer( GL_FRAMEBUFFER, 0);
Before drawing you have to bind and clear the frame buffer:
glBindFramebuffer( GL_FRAMEBUFFER, frameBuffer );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT )
After drawing the texture depthAndStencilMap contains the depth buffer in the Red channel and the stencil buffer, which is your mask, in the 'Blue' channel.
Note the scene is drawn to the frame buffer and not further to the viewport.
"best" is a four letter word ;)
You could either
use the stencil buffer
clear it to 0/false for "not your-grids"
stencil in 1/true for "is your-grids"
draw a second pass
only include the objects you want in the mask
don't clear the depth buffer
use a depth == test
use a different fragment shader for "is your-grids"

Using single channel texture (OpenGL 2)?

Short storry:
when I render anything using texture loaded like this
glTexImage2D ( GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, pixels );
I get only black
Long storry:
I can get RGBA texture with alpha channel (e.g. text with transparent backgorund using this code):
This code works:
// === load
#define GL_ABGR 0x8000
SDL_Surface * surf = SDL_LoadBMP( "common_resources/dejvu_sans_mono_RGBA.bmp" );
glGenTextures ( 1, &itex );
glBindTexture ( GL_TEXTURE_2D, itex );
glTexImage2D ( GL_TEXTURE_2D, 0, GL_RGBA, surf->w, surf->h, 0, GL_ABGR, GL_UNSIGNED_BYTE, surf->pixels );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
// ....
// === render
glEnable( GL_TEXTURE_2D );
glBindTexture( GL_TEXTURE_2D, itex );
glColor3f(1.0f,1.0f,1.0f);
glEnable(GL_BLEND);
glEnable(GL_ALPHA_TEST);
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
drawString ( caption, xmin, ymin+12, 6 );
renders like
But I'm trying to use just one channel (8-bit; grayscale) images / textures instead of RGBA. These I cannot get to render neither with nor without transparancy. Whatever I do I get only black image.
This doesn't
// === load
#define GL_ABGR 0x8000
SDL_Surface * surf = SDL_LoadBMP( "common_resources/dejvu_sans_mono_Alpha.bmp" );
glGenTextures ( 1, &itex );
glBindTexture ( GL_TEXTURE_2D, itex );
glTexImage2D ( GL_TEXTURE_2D, 0, GL_R8, surf->w, surf->h, 0, GL_RED, GL_UNSIGNED_BYTE, surf->pixels );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
// ....
// === render
glEnable( GL_TEXTURE_2D );
glBindTexture( GL_TEXTURE_2D, itex );
glColor3f(1.0f,1.0f,1.0f);
//glEnable(GL_BLEND);
//glEnable(GL_ALPHA_TEST);
//glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
drawString ( caption, xmin, ymin+12, 6 );
renders like
Notes:
I know that I should somehow use glTexEnv according to e.g. here but my main problem is that apparently the monochrome texture does not render at all
I tried also other GL_LUMINANCE and GL_INTENSITY instead of GL_RED in glTexImage2D with no difference
there are other questions like here and here but mostly with OpenGL>3.0 and fragment shaders
Also, is it possible that my graphics card or driver does not support this ? I'm on ubuntu 16.04
GL_VENDOR: Intel Open Source Technology Center
GL_RENDERER: Mesa DRI Intel(R) HD Graphics 530 (Skylake GT2)
GL_VERSION: 3.0 Mesa 11.2.0
for completeness - although it is not importaint the drawString looks like this:
drawString ( caption, xmin, ymin+12, 6 ){
const int nchars = 95;
float persprite = 1.0f/nchars;
glBegin(GL_QUADS);
for(int i=0; i<65536; i++){
if( str[i] == 0 ) break; // 0-terminated string
int isprite = str[i] - 33; // 33 is offset of meaningfull ASCII characters
float offset = isprite*persprite+(persprite*0.57);
float xi = i*sz + x;
glTexCoord2f( offset , 1.0f ); glVertex3f( xi, y, 3.0f );
glTexCoord2f( offset+persprite, 1.0f ); glVertex3f( xi+sz, y, 3.0f );
glTexCoord2f( offset+persprite, 0.0f ); glVertex3f( xi+sz, y+sz*2, 3.0f );
glTexCoord2f( offset , 0.0f ); glVertex3f( xi, y+sz*2, 3.0f );
}
glEnd();
}
I want to try to help you. In my projects I am using this arguments for generating textures from grayscale source images:
glTexImage2D(GL_TEXTURE_2D, 0, 1, width, height, 0, GL_RED,
GL_UNSIGNED_BYTE, pixels);
As written in documentation, third argument - number of color components (1 in our case). Need to check integer value of GL_R8 or replace it explicitly.
GL_RED means that you place luminances in red channel (not in each red, green, blue channels as for grayscale image).

nVidia openGL fails to display simple COLOR_INDEX texture

I have my first simple OpenGL program to display 2D images using OpenGL. I'm using an index-based image, calling glTexImage2D(.. GL_RGB, ... GL_COLOR_IMAGE...)
This is working as expected on an ATI card.
Having swapped to an nVidia card, I see a black quad instead of my image. Given that it works on the ATI I guess the code is basically correct - but maybe I have missed a setting - or maybe the card doesn't support what I'm doing (?!)
First the Setup code (I'm using Qt btw, so there's probably some context calls I'm missing):-
glClearColor( 0.1, 0.1, 0.25, 0); // background color
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glPixelStorei(GL_UNPACK_ALIGNMENT, 4); // 4-byte pixel alignment
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL );
Here's the code to set the texture :-
GLfloat Greys[256];
GLfloat Ones[256];
for( int I(0); I < 256; ++I )
{
Greys[I] = (GLfloat)I/256;
Ones[I] = 1.0;
}
makeCurrent();
glPixelMapfv( GL_PIXEL_MAP_I_TO_R, 256, Greys );
glPixelMapfv( GL_PIXEL_MAP_I_TO_G, 256, Greys );
glPixelMapfv( GL_PIXEL_MAP_I_TO_A, 256, Ones );
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, m_pImage->size().width(), m_pImage->size().height(), 0, GL_COLOR_INDEX, GL_UNSIGNED_BYTE, m_pImage->bits() );
Here's the display code
glLoadIdentity();
// Get the camera in the right place
glRotatef( 180, 1, 0, 0 );
// Apply the Pan(Offset), and Zoom
glTranslatef( m_Offset.x(), m_Offset.y(), 0);
glScalef( m_Zoom, m_Zoom, 1 );
// Display the image texture mapped to a rectangle
glColor3f( 1,1,0 );
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS);
glTexCoord2f( 0, 0 ); glVertex3f( 0, 0, 10 );
glTexCoord2f( 1, 0 ); glVertex3f( ImSize.width(), 0, 10 );
glTexCoord2f( 1, 1 ); glVertex3f( ImSize.width(), ImSize.height(), 10 );
glTexCoord2f( 0, 1 ); glVertex3f( 0, ImSize.height(), 10 );
glEnd();
glDisable(GL_TEXTURE_2D);
I also display the same image in full colour, in a separate window, using a straight RGB - RGB call to glTexImage2D. So I'm confident the dimensions are acceptable.
If I remove the call to glTexImage2D then I get a yellow quad as expected. Thus I suspect I have a problem with my calls to set the colour LUTs.
Board is an ASUS GeForce 210 silent
Windows XP 32 bit.
nVidia Drivers 6.14.13.681 (9-23-2012), R306.81 (branch: r304_70-122)
Did you test for OpenGL error codes? You may use this code: https://gist.github.com/4144988 – regarding color index formats? I wouldn't be surprised if it simply wasn't supported by the driver. Nobody uses color index formats these days. If you want to draw a palleted texture, upload the pallete into a 1D RGB texture and the color indexed image into a single channel (GL_RED or GL_LUMINANCE, depending on the OpenGL version) 2D texture and use the value as index into the pallete texture.

glTexSubImage2d not working after adding in a particle system (with textures on each particle)

We were happily using glTexSubImage2d to update a texture every few frames which had been initialised with glTexImage2d in our GL initialisation. After adding in a particle system with each particle textured itself our quad showing the glTexSubImage2d texture doesn't display.
The particle's textures are PNG and so we use SDL to load the PNG to an SDL Surface and then glTexImage2d is used to bind the PNG to a texture.
If we change the quad's glTexSubImage2d call to a glTexImage2d call the texture shows but this is extremely inefficient and cuts the framerate in half at least and so would rather be using glTexSubImage2d (as it worked before).
Does anyone have any idea why we now can't use glTexSubImage2d?
Below is relevant pieces of code for the initialisation and binding of textures:
Loading in the particle texture
//Load smoke texture
SDL_Surface *surface;
SDL_Surface *alpha_image;
if( (surface = IMG_Load("smoke_particle.png")))
{
SDL_PixelFormat *pixf = SDL_GetVideoSurface()->format;
alpha_image = SDL_CreateRGBSurface( SDL_SWSURFACE, surface->w, surface->h, 32, pixf->Bmask, pixf->Gmask, pixf->Rmask, pixf->Amask );
SDL_SetAlpha(surface,0,0);
SDL_BlitSurface( surface, NULL, alpha_image, NULL );
glGenTextures( 1, &texture );
glBindTexture( GL_TEXTURE_2D, texture );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, surface->w, surface->h, 0,
GL_RGBA, GL_UNSIGNED_BYTE, surface->pixels );
}
Setting up the quad's texture:
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &texVid);
glBindTexture(GL_TEXTURE_2D, texVid);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, 3, VIDEO_WIDTH, VIDEO_HEIGHT, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glPixelStorei(GL_UNPACK_ALIGNMENT, 2);
The only calls in initGL() are the enabling of GL_TEXTURE_2D, GL_BLEND setting up glBlendFunc() and the setting up of the quad's texture as above.
Any ideas?
Stupidly we had VIDEO_WIDTH set to the height of the texture and VIDEO_HEIGHT to the width of the texture.
Sorry if we wasted anyone's time.
Can anyone lock this or delete this or anything?
Thanks,
Infinitifizz