OpenGL drawing textured quad as a solid colour - opengl

I would like to generate a texture every update loop and display it on a quad. I've looked at numerous tutorials and can only find segments of code instead of complete examples.
First I define the texture and create the data.
glGenTextures(1, &textureID);
auto *data = new unsigned char[CHUNK_SIZE * CHUNK_SIZE * 4];
// Element* elements[4096];
for (int i = 0; i < CHUNK_SIZE * CHUNK_SIZE * 4; i += 4) {
Element *element = elements[i / 4];
if (element != nullptr) {
data[i] = element->r * 255;
data[i + 1] = element->g * 255;
data[i + 2] = element->b * 255;
data[i + 3] = element->a * 255;
} else {
data[i] = 0;
data[i + 1] = 0;
data[i + 2] = 0;
data[i + 3] = 0;
}
}
Then I draw it.
glPushMatrix();
glTranslatef(0.5f + chunkX * CHUNK_SIZE, 0.5f + chunkY * CHUNK_SIZE, 0);
glEnable(GL_TEXTURE);
glBindTexture(GL_TEXTURE_2D, textureID);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, CHUNK_SIZE, CHUNK_SIZE, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glBegin(GL_QUADS);
glTexCoord2d(0, 0);
glVertex2d(0, 0);
glTexCoord2d(1, 0);
glVertex2d(CHUNK_SIZE, 0);
glTexCoord2d(1, 1);
glVertex2d(CHUNK_SIZE, CHUNK_SIZE);
glTexCoord2d(0, 1);
glVertex2d(0, CHUNK_SIZE);
glEnd();
glPopMatrix();
The quad is rendering in the correct positions but it is solid red.
What is the correct way of generating a texture at runtime and drawing it?

The texture wasn't displaying as I was enabling GL_TEXTURE instead of GL_TEXTURE_2D.
glEnable(GL_TEXTURE_2D);
This made the textures render however they were faded and seemingly affecting other textures. After adding glColor3f(1,1,1); before rendering the quads it fixed this.
glPushMatrix();
glTranslatef(0.5f + chunkX * CHUNK_SIZE, 0.5f + chunkY * CHUNK_SIZE, 0);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, whiteTextureID);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, CHUNK_SIZE, CHUNK_SIZE, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glColor3f(1,1,1);
glBegin(GL_QUADS);
glTexCoord2d(0, 0);
glVertex2d(0, 0);
glTexCoord2d(1, 0);
glVertex2d(CHUNK_SIZE, 0);
glTexCoord2d(1, 1);
glVertex2d(CHUNK_SIZE, CHUNK_SIZE);
glTexCoord2d(0, 1);
glVertex2d(0, CHUNK_SIZE);
glEnd();
glPopMatrix();

Related

How to create bitmap font texture in OpenGL?

So I was trying to learn OpenGL from learnopengl.com and I'm currently on the topic of text rendering and I thought about combining scalable font textures in my program to one big texture, but for some reason, I get an exception bc of glCopyImageSubData(...).
First I try to measure how big the texture should be and then copy textures I already created to one big texture, and I was playing with this function for quite a while now but I can't find a solution.
Here is the original code with texture used for each face.
I tried to create fbo and attach a texture to it, but after research, I found this function that was far more clear to me so I decided to use it instead.
So I added xoffset to Character structure:
struct Character {
GLuint textureID;
glm::ivec2 size;
glm::ivec2 bearing;
GLuint advance;
GLuint xoffset;
};
I add this offset to each face in the first for loop:
Character character = {
texture,
glm::ivec2(face->glyph->bitmap.width, face->glyph->bitmap.rows),
glm::ivec2(face->glyph->bitmap_left, face->glyph->bitmap_top),
face->glyph->advance.x,
font_texture_width
};
font_texture_width += character.size.x;
characters.insert(std::pair<GLchar, Character>(c, character));
And then I try to paste each face texture to fontTexture:
glGenTextures(1, &fontTexture);
glBindTexture(GL_TEXTURE_2D, fontTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED,
font_texture_width, 100, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
std::cout << glGetError();
for (GLubyte c = 0; c < 128; ++c)
glCopyImageSubData( characters[c].textureID, GL_TEXTURE_2D, 0, 0, 0, 0,
fontTexture, GL_TEXTURE_2D, 0, characters[c].xoffset, 0, 0,
characters[c].size.x, characters[c].size.y, 0);
Here is modyfied renderText function:
void renderText(Shader &s, std::string text, GLfloat x, GLfloat y, GLfloat scale, glm::vec3 color)
{
s.use();
s.setVec3("textColor", color);
glActiveTexture(GL_TEXTURE0);
glBindVertexArray(VAO);
std::string::const_iterator c;
glBindTexture(GL_TEXTURE_2D, fontTexture);
for (c = text.begin(); c != text.end(); ++c)
{
Character ch = characters[*c];
GLfloat xpos = x + ch.bearing.x * scale;
GLfloat ypos = y - (ch.size.y - ch.bearing.y) * scale;
GLfloat w = ch.size.x * scale;
GLfloat h = ch.size.y * scale;
GLfloat vertices[6][4] = {
{ xpos + characters[*c].xoffset, ypos + h, 0.0, 0.0 },
{ xpos + characters[*c].xoffset, ypos, 0.0, 1.0 },
{ xpos + w + characters[*c].xoffset, ypos, 1.0, 1.0 },
{ xpos + characters[*c].xoffset, ypos + h, 0.0, 0.0 },
{ xpos + w + characters[*c].xoffset, ypos, 1.0, 1.0 },
{ xpos + w + characters[*c].xoffset, ypos + h, 1.0, 0.0 }
};
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(vertices), vertices);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDrawArrays(GL_TRIANGLES, 0, 6);
x += (ch.advance >> 6) * scale;
}
glBindVertexArray(0);
glBindTexture(GL_TEXTURE_2D, 0);
}
Exception is being thrown where I call glCopyImageSubData.
Here is my whole program: https://pastebin.com/vAeeX3Xh
EDIT:
Now I figured out that it would be better to use glTexSubImage2D instead, so this is how it looks like (instead of this block of code with glCopyImageSubData)
glGenTextures(1, &fontTexture);
glBindTexture(GL_TEXTURE_2D, fontTexture);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RED,
face->glyph->bitmap.width * 250,
face->glyph->bitmap.rows * 250,
0,
GL_RED,
GL_UNSIGNED_BYTE,
NULL
);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
for (GLubyte c = 0; c < 128; ++c)
{
if (FT_Load_Char(face, c, FT_LOAD_RENDER))
{
std::cout << "ERROR::FREETYPE: Failed to load Glyph" << std::endl;
continue;
}
glTexSubImage2D(GL_TEXTURE_2D, 0, characters[c].xoffset, 0, characters[c].size.x, characters[c].size.y, GL_RED, GL_UNSIGNED_BYTE, face->glyph->bitmap.buffer);
}
Now when I render this texture it looks like this:
https://imgur.com/a/A0gDy6T

How to multisample FBOS

Notice: I am using LWJGL.
Here is my code for creating a new FBO:
/**
* Creates a new FBO.
* #param width The width of the FBO to create.
* #param height The height of the FBO to create.
* #return an int[] array containing the buffer IDs in the
* following order: {frameBufferID, colorBufferID (texture), depthBufferID}.
*/
public static int[] newFBO(int width, int height) {
int[] out = new int[3];
out[0] = glGenFramebuffersEXT();
out[1] = glGenTextures();
out[2] = glGenRenderbuffersEXT();
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, out[0]);
glBindTexture(GL_TEXTURE_2D, out[1]);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, org.lwjgl.opengl.GL12.GL_TEXTURE_MAX_LEVEL,20);
glTexParameteri(GL_TEXTURE_2D, GL14.GL_GENERATE_MIPMAP,GL_TRUE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0,GL_RGBA, GL_INT, (java.nio.ByteBuffer) null);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT,GL_COLOR_ATTACHMENT0_EXT,GL_TEXTURE_2D, out[1], 0);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, out[2]);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL14.GL_DEPTH_COMPONENT24, width, height);
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT,GL_DEPTH_ATTACHMENT_EXT,GL_RENDERBUFFER_EXT, out[2]);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
return out;
}
Here is how I draw the FBO to the screen:
public static void rectOnScreen(int tex) {
glBindTexture(GL_TEXTURE_2D, tex);
glDisable(GL_DEPTH_TEST);
glEnable(GL_TEXTURE_2D);
glLoadIdentity();
glBegin(GL_QUADS);
glTexCoord2f(0, 0);
glVertex2f(-1, -1);
glTexCoord2f(0, 1);
glVertex2f(-1, 1);
glTexCoord2f(1, 1);
glVertex2f(1, 1);
glTexCoord2f(1, 0);
glVertex2f(1, -1);
glEnd();
glDisable(GL_TEXTURE_2D);
glEnable(GL_DEPTH_TEST);
}
Basically, I use out[2] as the argument for that function.
Now, how do I apply multisampling here? I don't really like the jaggedish look of the results I'm getting. I want to take multiple samples of the FBO when I draw it. I would be very happy to get the code written out, but I guess a link to a tutorial or something is fine too.

Loading texture using opengl and opencv

I'm having a problem trying to get my texture showing correctly on some geometry. Basically, I have some triangles which which describe the shape of a person's face, and then have a png which I have to load with opencv.
I have the following code
void Geometry::loadTex(const char* fileName){
RenderUtils * u = new RenderUtils();
IplImage* img = u->getImg(fileName);
GLuint texture;
// allocate a texture name
glGenTextures( 1, &texture );
// select our current texture
glBindTexture( GL_TEXTURE_2D, texture );
// select modulate to mix texture with color for shading
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texWidth, texHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, img->imageData);
glEnable(GL_TEXTURE_2D);
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
//delete [] t;
}
IplImage* RenderUtils::getImg(const char *fileName){
IplImage* img = 0;
img = cvLoadImage(fileName, CV_LOAD_IMAGE_COLOR);
if(!img) cout << "Couldn't load image " << fileName << endl;
return img;
}
void Geometry::display(){
glPushMatrix();
glTranslatef(20.0f, 20.0f, -40.f);
glRotatef(180.f, 0.0, 0.0, 1.0);
for(int i = 0; i < numTris; i+=3){
glBegin(GL_TRIANGLES);
glTexCoord3f(mean[tri[i]] / texWidth, ((mean[meanLength/2 + tri[i]] - maximumMu) * -1) / texHeight, 0 );
glVertex3f(mean[tri[i]], (mean[meanLength/2 + tri[i]] - maximumMu) * -1, 0);
glTexCoord3f(mean[tri[i+1]] / texWidth, ((mean[meanLength/2 + tri[i+1]] - maximumMu) * -1) / texHeight, 0 );
glVertex3f(mean[tri[i+1]], (mean[meanLength/2 + tri[i+1]] - maximumMu) * -1, 0);
glTexCoord3f(mean[tri[i+2]] / texWidth, ((mean[meanLength/2 + tri[i+2]] - maximumMu) * -1) / texHeight, 0 );
glVertex3f(mean[tri[i+2]], (mean[meanLength/2 + tri[i+2]] - maximumMu) * -1, 0);
glEnd();
}
glPopMatrix();
glutSwapBuffers();
glFlush();
}
So loadTex is called first, which calls load getImg, and then the display method is called from Glut.
You might want to take a look at this. It does exactly what you want.

Trouble displaying texture in OpenGL ES with C++

I'm trying to write a small, portable 2D engine for iOS to learn C++ and OpenGL. Right now I'm trying to display a texture that I've loaded in. I've been successful displaying a texture when loading in the with CoreGraphic libraries, but now I'm trying to load the file in C++ with fread and libpng and all I see is a white box.
My texture is 64x64 so it's not a power of 2 problem. Also I have enabled GL_TEXTURE_2D.
The first block of code is used to load the png, convert the png to image data, and load the data into OpenGL.
void AssetManager::loadImage(const std::string &filename)
{
Texture texture(filename);
png_structp png_ptr = NULL;
png_infop info_ptr = NULL;
png_bytep *row_pointers = NULL;
int bitDepth, colourType;
FILE *pngFile = fopen(std::string(Game::environmentData.basePath + "/" + filename).c_str(), "rb");
if(!pngFile)
return;
png_byte sig[8];
fread(&sig, 8, sizeof(png_byte), pngFile);
rewind(pngFile);//so when we init io it won't bitch
if(!png_check_sig(sig, 8))
return;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL,NULL,NULL);
if(!png_ptr)
return;
if(setjmp(png_jmpbuf(png_ptr)))
return;
info_ptr = png_create_info_struct(png_ptr);
if(!info_ptr)
return;
png_init_io(png_ptr, pngFile);
png_read_info(png_ptr, info_ptr);
bitDepth = png_get_bit_depth(png_ptr, info_ptr);
colourType = png_get_color_type(png_ptr, info_ptr);
if(colourType == PNG_COLOR_TYPE_PALETTE)
png_set_palette_to_rgb(png_ptr);
/*if(colourType == PNG_COLOR_TYPE_GRAY && bitDepth < 8)
png_set_gray_1_2_4_to_8(png_ptr);*/
if(png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS))
png_set_tRNS_to_alpha(png_ptr);
if(bitDepth == 16)
png_set_strip_16(png_ptr);
else if(bitDepth < 8)
png_set_packing(png_ptr);
png_read_update_info(png_ptr, info_ptr);
png_get_IHDR(png_ptr, info_ptr, &texture.width, &texture.height,
&bitDepth, &colourType, NULL, NULL, NULL);
int components = GetTextureInfo(colourType);
if(components == -1)
{
if(png_ptr)
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return;
}
GLubyte *pixels = (GLubyte *)malloc(sizeof(GLubyte) * (texture.width * texture.height * components));
row_pointers = (png_bytep *)malloc(sizeof(png_bytep) * texture.height);
for(int i = 0; i < texture.height; ++i)
row_pointers[i] = (png_bytep)(pixels + (i * texture.width * components));
png_read_image(png_ptr, row_pointers);
png_read_end(png_ptr, NULL);
// make it
glGenTextures(1, &texture.name);
// bind it
glBindTexture(GL_TEXTURE_2D, texture.name);
GLuint glcolours;
switch (components) {
case 1:
glcolours = GL_LUMINANCE;
break;
case 2:
glcolours = GL_LUMINANCE_ALPHA;
break;
case 3:
glcolours = GL_RGB;
break;
case 4:
glcolours = GL_RGBA;
break;
}
glTexImage2D(GL_TEXTURE_2D, 0, components, texture.width, texture.height, 0, glcolours, GL_UNSIGNED_BYTE, pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
fclose(pngFile);
free(row_pointers);
free(pixels);
textures.push_back(texture);
}
Here is the code for my Texture class:
class Texture
{
public:
Texture(const std::string &filename) { this->filename = filename; }
unsigned int name;
unsigned int size;
unsigned int width;
unsigned int height;
std::string filename;
};
Here is how I setup my view:
void Renderer::Setup(Rectangle rect, CameraType cameraType)
{
_viewRect = rect;
glViewport(0,0,_viewRect.width,_viewRect.height);
if(cameraType == Renderer::Orthographic)
{
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrthof(_viewRect.x,_viewRect.width,_viewRect.y,_viewRect.height,kZNear,kZFar);
glMatrixMode(GL_MODELVIEW);
}
else
{
GLfloat size = kZNear * tanf(DegreesToRadians(kFieldOfView) / 2.0);
glEnable(GL_DEPTH_TEST);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustumf(-size, size, -size / (_viewRect.width / _viewRect.height), size / (_viewRect.width / _viewRect.height), kZNear, kZFar);
glMatrixMode(GL_MODELVIEW);
}
glEnable(GL_TEXTURE_2D);
glBlendFunc(GL_ONE, GL_SRC_COLOR);
glEnable(GL_BLEND);
}
Now here is where I draw the texture:
void Renderer::DrawTexture(int x, int y, Texture &texture)
{
GLfloat vertices[] = {
x, _viewRect.height - y, 0.0,
x, _viewRect.height - (y + texture.height), 0.0,
x + texture.width, _viewRect.height - y, 0.0,
x + texture.width, _viewRect.height - (y + texture.height), 0.0
};
static const GLfloat normals[] = {
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
0.0, 0.0, 1.0
};
GLfloat texCoords[] = {
0.0, 1.0,
0.0, 0.0,
1.0, 1.0,
1.0, 0.0
};
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glLoadIdentity();
glTranslatef(0.0, 0.0, -3.0);
glBindTexture(GL_TEXTURE_2D, texture.name);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glNormalPointer(GL_FLOAT, 0, normals);
glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
}
UPDATE:
I changed the function I use to generate a texture. It now create a random test texture. I still see a white texture no matter what. Gonna keep digging into how I'm renderering.
Here's the new function:
void AssetManager::CreateNoisyTexture(const char * key)
{
Texture texture(key, 64, 64);
const unsigned int components = 4;
GLubyte *pixels = (GLubyte *)malloc(sizeof(GLubyte) * (texture.width * texture.height * components));
GLubyte *pitr1 = pixels;
GLubyte *pitr2 = pixels + (texture.width * texture.height * components);
while (pitr1 != pitr2) {
*pitr1 = rand() * 0xFF;
*(pitr1 + 1) = rand() * 0xFF;
*(pitr1 + 2) = rand() * 0xFF;
*(pitr1 + 3) = 0xFF;
pitr1 += 4;
}
glGenTextures(1, &texture.name);
glBindTexture(GL_TEXTURE_2D, texture.name);
glTexImage2D(GL_TEXTURE_2D, 0, components, texture.width, texture.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
free(pixels);
printf("Created texture with key: %s name: %d", texture.key, texture.name);
textures.push_back(texture);
}
Ok I figured it out. The problem was that I was using the wrong internal pixel format.
glTexImage2D(GL_TEXTURE_2D, 0, 4, texture.width, texture.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
Should be:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texture.width, texture.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
I needed to pass in GL_RGBA instead of 4. In the docs I read this:
internalFormat - Specifies the number of color components in the texture. Must be 1, 2, 3, or 4, or one of the following symbolic constants:
I figured it didn't matter but I guess it does. One other thing to note, I figured this out by using glGetError() to figure out where the error was occurring. When I call glTexImage2D() the error was GL_INVALID_OPERATION.
Thanks for your help IronMensan!
UPDATE:
So the reason why I couldn't send 4 is because it is not allowed and internalFormat and format need to be the same in the OpenGL ES 1.1 spec. You can only send GL_ALPHA, GL_RGB, GL_RGBA, GL_LUMINANCE, or GL_LUMINANCE_ALPHA. Lesson learned.

How to determine which error i'm receiving, when calling glTexSubImage2d

I'm trying to draw some texture with CAOpenGLLayer, but receiving GL_INVALID_OPERATION when i try to call glTexSubImage2d. According to this document : http://www.opengl.org/sdk/docs/man/xhtml/glTexSubImage2D.xml it's one of the errors described there. But seems i'm not breaking any rule described there and i don't understand what i'm doing wrong. Here is a code that i'm trying to run:
- (CGLContextObj)copyCGLContextForPixelFormat:(CGLPixelFormatObj)pixelFormat
{
uint32_t plugin_width = 32, plugin_height = 32;
texture_data = new uint8_t[plugin_width * plugin_height * 4];
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glScalef(1.0f, -1.0f, 1.0f);
glOrtho(0, plugin_width , 0, plugin_height , -1.0, 1.0);
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_RECTANGLE_EXT);
glGenTextures(1, &textureName);
glBindTexture(GL_TEXTURE_RECTANGLE_EXT, textureName);
glTextureRangeAPPLE(GL_TEXTURE_RECTANGLE_EXT, plugin_width * plugin_height * 4, texture_data);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_STORAGE_HINT_APPLE , GL_STORAGE_SHARED_APPLE);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glTexImage2D(GL_TEXTURE_RECTANGLE_EXT, 0, GL_RGBA, plugin_width, plugin_height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8, texture_data);
glBindTexture(GL_TEXTURE_RECTANGLE_EXT, 0);
return [super copyCGLContextForPixelFormat:pixelFormat];
}
- (void)drawInCGLContext:(CGLContextObj)ctx pixelFormat:(CGLPixelFormatObj)pf forLayerTime:(CFTimeInterval)t displayTime:(const CVTimeStamp *)ts
{
uint32_t plugin_width = 32, plugin_height = 32;
uint32_t width = plugin_width;
uint32_t height = plugin_height;
srand(time(NULL));
for (int i = 0; i < 32*32*4; i ++)
texture_data[i] = rand() % 255;
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE_EXT, textureName);
glTexSubImage2D (GL_TEXTURE_RECTANGLE_EXT, 0, 0, 0, width, height,GL_BGRA, GL_UNSIGNED_INT_8_8_8_8 ,texture_data);
assert(glGetError() == GL_NO_ERROR); // here i'm getting GL_INVALID_OPERATION
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glBegin(GL_QUADS);
glTexCoord2f(0.0f,0.0f);
glVertex2f(0.0f, 0.0f);
glTexCoord2f(1.0f,0.0f);
glVertex2f(width, 0);
glTexCoord2f(0.0f,1.0f);
glVertex2f(0, height);
glTexCoord2f(1.0f,1.0f);
glVertex2f(width, height);
glEnd();
[super drawInCGLContext:ctx pixelFormat:pf forLayerTime:t displayTime:ts];
}
Did you try to replace the external format GL_UNSIGNED_INT_8_8_8_8 with GL_UNSIGNED_BYTE in both calls (glTexImage2D and glTexSubImage2D)?
That might help.