I'm just testing this stuff out, so I don't need an alternate approach (no GL extensions). Just hoping someone sees an obvious mistake in my usage of GLES.
I want to take an bitmap of a glyph that is smaller than 32x32 (width and height are not necessarily powers of 2) and put it into a texture so I can render it. I've first created an empty 32x32 texture then I copy the pixels into the larger texture.
Gluint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTextImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 32, 32, 0,
GL_ALPHA, GL_UNSIGNED_BYTE NULL);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, bitmap.width(), bitmap.height(),
GL_ALPHA, GL_UNSIGNED_BYTE, bitmap.pixels());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParamteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParamteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
Then I try to draw only the bitmap portion of the texture using the texture coordinates:
const GLfloat vertices[] = {
x + bitmap.width(), y + bitmap.height(),
x, y + bitmap.height(),
x, y,
x + bitmap.width(), y
};
const GLfloat texCoords[] = {
0, bitmap.height() / 32,
bitmap.width() / 32, bitmap.height() / 32,
0, 0,
bitmap.width() / 32, 0
};
const GLushort indices[] = { 0, 1, 2, 0, 2, 3 };
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, vertices);
glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, indices);
Now if all were well in the world, the size of the square created by the vertices would be the same size as the bitmap portion of the texture and it would draw my bitmap exactly.
Lets say for example that my glyph was 16x16, then it should take up the bottom left quadrant of the 32x32 texture. Then the texCoords would seem to be correct with (0, 0.5), (0.5, 0.5), (0, 0) and (0.5, 0).
However my 12x12 'T' glyph looks like this:
Anyone know why?
BTW. I start by setting up the projection matrix for 2D work as such:
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0f, 480, 800, 0.0f, 0.0f, 1.0f);
glDisable(GL_DEPTH_TEST);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslate(0.375f, 0.375f, 0.0f); // for exact pixelization
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_ALPHA);
glEnable(GL_TEXTURE_2D);
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
The mapping between vertex coordinates and texture coordinates seems to be mixed up. Try changing your vertex coordinates to:
const GLfloat vertices[] = {
x, y + bitmap.height(),
x + bitmap.width(), y + bitmap.height(),
x, y,
x + bitmap.width(), y
};
As an aside:
I don't think you need to go the route via vertex indices in your case. Easier would be a call to glDrawArrays:
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
(as you have already set up your glVertexPointer and glTexCoordPointer).
Related
I've been trying to implement shadow mapping into my OpenGL engine in SFML 2.2, and they don't seem to be rendering right. I believe I narrowed down the issue to the ortho projection used to calculate the shadows.
/* before the main loop, creating the depth buffer for shadow mapping */
glm::vec3 lightPos(glm::vec3(-45.f, 45.f, -40.f));
const GLuint SHADOW_WIDTH = 1024, SHADOW_HEIGHT = 1024;
GLuint depthMapFBO;
glGenFramebuffers(1, &depthMapFBO);
GLuint depthMap;
glGenTextures(1, &depthMap);
glBindTexture(GL_TEXTURE_2D, depthMap);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, SHADOW_WIDTH, SHADOW_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
GLfloat borderColor[] = { 1.0, 1.0, 1.0, 1.0 };
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, borderColor);
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
/* in the main loop, sending information to the appropiate shaders and setting viewports */
glm::mat4 lightProjection, lightView;
glm::mat4 lightSpaceMatrix;
GLfloat near_plane = 1.f, far_plane = 300.f;
lightProjection = glm::ortho(-10.f, 10.f, -10.f, 10.f, near_plane, far_plane);
lightView = glm::lookAt(lightPos, glm::vec3(0.0f), glm::vec3(0.0, 1.0, 0.0));
lightSpaceMatrix = lightProjection * lightView;
glUseProgram(simpleDepthShader);
glUniformMatrix4fv(glGetUniformLocation(simpleDepthShader, "lightSpaceMat"), 1, GL_FALSE, glm::value_ptr(lightSpaceMatrix));
glViewport(0, 0, SHADOW_WIDTH, SHADOW_HEIGHT);
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glClear(GL_DEPTH_BUFFER_BIT);
RenderScene();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// reset viewport, and display the scene as normal
glViewport(0, 0, window.getSize().x, window.getSize().y);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUniform3fv(glGetUniformLocation(lightingShader, "lightPos_shade"), 1, &lightPos[0]);
glUniform3fv(glGetUniformLocation(lightingShader, "viewPos"), 1, &getPos()[0]);
glUniformMatrix4fv(glGetUniformLocation(lightingShader, "lightSpaceMat"), 1, GL_FALSE, glm::value_ptr(lightSpaceMatrix));
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, depthMap);
RenderScene();
I've applied the view and projection matrix to the camera to see what the light sees, and this was the result (along with the view from the depth buffer).
Here's what the scene looks like with the shadows (which the shadow for the stall doesn't even cast onto itself for some reason).
It seems that your shader doesn't fetch your shadow pixels correctly.
When you transform your vertices with your light matrix they are in the [-1, 1] range, but texture sampling is in the range [0, 1].
http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-16-shadow-mapping/
I'm currently trying to test out rendering to a framebuffer for various uses, but whenever I have an object(say a square) at a certain y-value, it appears "stretched", and then past a certain y-value or a certain x-value it seems to "thin out" and disappears. I have determined the x and y-values that it disappears at, but the coordinates seem to not have any rhyme or reason.
When I remove the framebuffer binding and render directly to the screen it draws the square perfectly fine, no matter the x or y-value.
Drawing a basic square(using immediate mode to remove possible errors) with a wide x-value looks like this:
Code here:
Window window("Framebuffer Testing", 1600, 900); //1600x900 right now
int fbowidth = 800, fboheight = 600;
mat4 ortho = mat4::orthographic(0, width, 0, height, -1.0f, 1.0f);
//trimmed out some code from shader creation that is bugless and unneccessary to include
Shader shader("basic"); shader.setUniform("pr_matrix", ortho);
Shader drawFromFBO("fbotest"); shader.setUniform("pr_matrix", ortho);
GLfloat screenVertices[] = {
0, 0, 0, 0, height, 0,
width, height, 0, width, 0, 0};
GLushort indices[] = {
0, 1, 2,
2, 3, 0 };
GLfloat texcoords[] = { //texcoords sent to the drawFromFBO shader
0, 0, 0, 1, 1, 1,
1, 1, 1, 0, 0, 0 };
IndexBuffer ibo(indices, 6);
VertexArray vao;
vao.addBuffer(new Buffer(screenVertices, 4 * 3, 3), 0);
vao.addBuffer(new Buffer(texcoords, 2 * 6, 2), 1);
GLuint fbo;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, fbowidth, fboheight, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, texture, 0);
if(glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
std::cout << "false" << std::endl;
glEnable(GL_TEXTURE_2D);
//the x-values mess up at ~783 thru 800 and the y-values at 0 thru ~313
while(!window.closed()) {
glClearColor(0.2f, 0.2f, 0.2f, 1.0f); //grey
window.clear(); //calls glclear for depth and color buffer
//bind framebuffer and shader
shader.enable(); //literally just calls glUseProgram(id) with the compiled shader id
glViewport(0, 0, fbowidth, fboheight);
glBindFramebuffer(GL_FRAMEBUFFER, fbo); //bind the fbo
glClearColor(1.0f, 0.0f, 1.0f, 1.0f); //set clear color to pink
glClear(GL_COLOR_BUFFER_BIT);
//render a red square to the framebuffer texture
glBegin(GL_QUADS); {
glColor3f(1.0f, 0.0f, 0.0f); //set the color to red
glVertex3f(700, 400, 0);
glVertex3f(700, 450, 0);
glVertex3f(750, 450, 0);
glVertex3f(750, 400, 0);
} glEnd();
shader.disable();
glBindFramebuffer(GL_FRAMEBUFFER, 0); //set framebuffer to the default
//render from framebuffer to screen
glViewport(0, 0, width, height);
drawFromFBO.enable();
glActiveTexture(GL_TEXTURE0);
drawFromFBO.setUniform1i("texfbo0", 0);
glBindTexture(GL_TEXTURE_2D, texture);
vao.bind();
ibo.bind();
glDrawElements(GL_TRIANGLES, ibo.getCount(), GL_UNSIGNED_SHORT, NULL);
ibo.unbind();
vao.unbind();
drawFromFBO.disable();
window.update();
}
If you want to see any thing extra, the file is located at my Github: here
I have a game which is using a pixel art style, upscaled so 1 pixel is equal to a 2x2 area onscreen. However, when I rotate a sprite in OpenGl it draws it using the screen pixels so it breaks the illusion of a low-res style game:
How can I rotate the sprite and draw it using the larger pixels? Right now sprites are drawn using a Sprite class with a method called Draw, and the code looks like this:
void Sprite::Draw(int x, int y, int w, int h, int tx = 0, int ty = 0, int tw = 1, int th = 1, int rotation = 0, int rx = 0, int ry = 0, int sr = 0, float r = 1, float g = 1, float b = 1) {
glEnable(GL_TEXTURE_2D);
glTranslatef(x+(w/2), y+(h/2), 0);
glRotatef((float) sr, 0.0f, 0.0f, 1.0f);
glTranslatef(-x-(w/2), -y-(h/2), 0);
glTranslatef(x+(w/2)+rx, y+(h/2)+ry, 0);
glRotatef((float) rotation, 0.0f, 0.0f, 1.0f);
glTranslatef(-(w/2)-rx, -(h/2)-ry, 0);
glColor3f(r, g, b);
glBindTexture(GL_TEXTURE_2D, texture);
const float verts[] = {
0, (float) h,
(float) w, (float) h,
0, 0,
(float) w, 0
};
const float tVerts[] = {
(float)tx/(float)width, ((float)ty+(float)th)/(float)height,
((float)tx+(float)tw)/(float)width, ((float)ty+(float)th)/(float)height,
(float)tx/(float)width, (float)ty/(float)height,
((float)tx+(float)tw)/(float)width, (float)ty/(float)height
};
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, verts);
glTexCoordPointer(2, GL_FLOAT, 0, tVerts);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glLoadIdentity();
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
glDisable(GL_TEXTURE_2D);
}
Thanks!
EDIT: I guess I should mention I'm using SDL2 for window management.
You can render the whole scene at half the resolution, and then scale it up. To do this, you use a FBO (Frame Buffer Object) for your primary rendering, and then blit it to the default framebuffer.
Once, during setup, create your FBO and render target, and attach the render target to the FBO:
GLuint fboId = 0;
glGenFramebuffers(1, &fboId);
GLuint rbId = 0;
glGenRenderbuffers(1, &rbId);
// Need to bind this once so object is created.
glBindRenderbuffer(GL_RENDERBUFFER, rbId);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fboId);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, rbId);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
On every window resize, with the window resized to width/height, allocate the render target at half the window size. If your window is not resizable, you can combine this with the setup code:
glBindRenderbuffer(GL_RENDERBUFFER, rbId);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA8, width / 2, height / 2);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
On every redraw, render to the FBO, and blit the result to the default framebuffer:
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fboId);
glViewport(0, 0, width / 2, height / 2);
// draw content
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, fboId);
glBlitFramebuffer(0, 0, width / 2, height / 2,
0, 0, width, height,
GL_COLOR_BUFFER_BIT, GL_NEAREST);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
The above assumes that width and height are even numbers. You may have to tweak it slightly if they can be odd.
I'm trying to display some text using OpenGL with FreeType library. It's working, yet text looks not so smooth. In FreeType documentation it says that there's some antialiasing happing to the texture during loading, but it doesn't look that way in my case.
This is what I'm doing:
FT_Init_FreeType(&m_fontLibrary);
FT_New_Face(m_fontLibrary, "src/VezusLight.OTF", 0, &m_BFont);
FT_Set_Pixel_Sizes(m_BFont, 0, 80);
m_glyph = m_BFont->glyph;
GLuint tex;
glActiveTexture(GL_TEXTURE1);
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glUseProgram(m_textPipeline);
glUniform1i(m_texLocation, 1);
glUseProgram(0);
and then rendering:
glActiveTexture(GL_TEXTURE1);
glEnableVertexAttribArray(m_coordTex);
glBindBuffer(GL_ARRAY_BUFFER, m_VBO);
const char *p;
float x = x_i, y = y_i;
const char* result = text.c_str();
for (p = result; *p; p++)
{
if (FT_Load_Char(m_BFont, *p, FT_LOAD_RENDER))
continue;
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_ALPHA,
m_glyph->bitmap.width,
m_glyph->bitmap.rows,
0,
GL_ALPHA,
GL_UNSIGNED_BYTE,
m_glyph->bitmap.buffer
);
float x2 = x - 1024 + m_glyph->bitmap_left;
float y2 = y - 600 - m_glyph->bitmap_top;
float w = m_glyph->bitmap.width;
float h = m_glyph->bitmap.rows;
GLfloat box[4][4] = {
{ x2, -y2 - h, 0, 1 },
{ x2 + w, -y2 - h, 1, 1 },
{ x2, -y2, 0, 0 },
{ x2 + w, -y2, 1, 0 },
};
glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(GLfloat), box, GL_DYNAMIC_DRAW);
glVertexAttribPointer(m_coordTex, 4, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), NULL);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
x += (m_glyph->advance.x >> 6);
y += (m_glyph->advance.y >> 6);
}
glDisableVertexAttribArray(m_coordTex);
Result looks like this:
Can anyone spot a problem in my code?
Two issues with your code.
First one is a buffer overflow: Texture coodinates in your box structure are vec2, however you tell glVertexAttribPointer it was a vec4 (the stride of 4*sizeof(float) is what matters, and the mismatched size parameters makes OpenGL read out of bounds 2 elements over the end of the box array).
That your texture looks pixelated stems from the fact that texture coordinates 0 and 1 do not come to lie on pixel centers, but the edges of the texture. Either use texelFetch in the fragment shader to address pixels by their pixel coordinate, or remap the texture extents to the range [0…1] properly like explained in https://stackoverflow.com/a/5879551/524368
I think for having transparent color or smooth or anti-aliased glyphs,
you must enable blending in opengl and also disable depth-testing. (you can find out why and how by searching in the internet).
Something like this:
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_BLEND);
//and if it didn't work, then disable depth testing by uncommenting this:
//glDisable(GL_DEPTH_TEST);
hope it helps!
I try to use data raw texture using GL_TEXTURE_RECTANGLE_ARB:
void Display::tex(){
GLubyte Texture[16] =
{
0,0,0,0, 0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF, 0,0,0,0
};
GLuint Nom;
glLoadIdentity();//load identity matrix
glTranslatef(0.0f,0.0f,-4.0f);//move forward 4 units
glEnable(GL_DEPTH_TEST); //Active le depth test
glDisable( GL_CULL_FACE );
glEnable (GL_TEXTURE_RECTANGLE_ARB);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 2);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glGenTextures(1, &Nom);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, Nom);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,
GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA8, 2, 2,
0, GL_BGRA_EXT, GL_UNSIGNED_INT_8_8_8_8_REV, Texture);
angle = 0.01 * glutGet ( GLUT_ELAPSED_TIME );
glRotatef(angle,0,1,1);
glBegin(GL_QUADS); //Et c'est parti pour le cube !
glTexCoord2i(0,0);glVertex3i(-1,-1,-1);
glTexCoord2i(1,0);glVertex3i(+1,-1,-1);
glTexCoord2i(1,1);glVertex3i(+1,+1,-1);
glTexCoord2i(0,1);glVertex3i(-1,+1,-1);
//1 face
glTexCoord2i(0,0);glVertex3i(-1,-1,+1);
glTexCoord2i(1,0);glVertex3i(+1,-1,+1);
glTexCoord2i(1,1);glVertex3i(+1,+1,+1);
glTexCoord2i(0,1);glVertex3i(-1,+1,+1);
//2 faces
glTexCoord2i(0,0);glVertex3i(+1,-1,-1);
glTexCoord2i(1,0);glVertex3i(+1,-1,+1);
glTexCoord2i(1,1);glVertex3i(+1,+1,+1);
glTexCoord2i(0,1);glVertex3i(+1,+1,-1);
//3 faces
glTexCoord2i(0,0);glVertex3i(-1,-1,-1);
glTexCoord2i(1,0);glVertex3i(-1,-1,+1);
glTexCoord2i(1,1);glVertex3i(-1,+1,+1);
glTexCoord2i(0,1);glVertex3i(-1,+1,-1);
//4 faces
glTexCoord2i(1,0);glVertex3i(-1,+1,-1);
glTexCoord2i(1,1);glVertex3i(+1,+1,-1);
glTexCoord2i(0,1);glVertex3i(+1,+1,+1);
glTexCoord2i(0,0);glVertex3i(-1,+1,+1);
//5 faces
glTexCoord2i(1,0);glVertex3i(-1,-1,+1);
glTexCoord2i(1,1);glVertex3i(+1,-1,+1);
glTexCoord2i(0,1);glVertex3i(+1,-1,-1);
glTexCoord2i(0,0);glVertex3i(-1,-1,-1);
//6 faces
glEnd();
glFlush();
}
The result is not very good :
http://shareimage.ro/images/xdkyd12oty44c0qpuo1b.png
The cube should have all faces with an texture 4 square (2 black and 2 white ) .
I dont know where is the error...
GL_ARB_texture_rectangle extension uses dimension-dependent texture coordinates. Use [0..W]x[0..H] range for texture coordinates, instead of normalized coordinates range [0..1]x[0..1].
For example, to draw a full image on a quad;
glTexCoord2f(0, 0); glVertex3f(...); // top-left
glTexCoord2f(0, imageHeight); glVertex3f(...); // bottom-left
glTexCoord2f(imageWidth, imageHeight); glVertex3f(...); // bottom-right
glTexCoord2f(imageWidth, 0); glVertex3f(...); // top-right
Note that there are several limitations using GL_ARB_texture_rectangle extension.
Mipmap filtering is not supported.
Texture border is not supported.
GL_REPEAT wrap mode is not supported.
Palette texture is not supported.
texture coods are addressed by [0..w]x[0..h].
Luckily, OpenGL provides GL_ARB_texture_non_power_of_two extension as well to resolve the above limitations, while it is still supporting NPOT (Non Power Of Two) textures.
The biggest advantages of GL_ARB_texture_non_power_of_two are;
GL_ARB_texture_non_power_of_two uses the conventional normalized texture coords, [0..1]x[0..1].
It does NOT require an additional texture target token, GL_TEXTURE_RECTANGLE_ARB for glEnable(), glTexImage*D(), glBindTexture(), etc. That is, you can still use GL_TEXTURE_2D as usual for NPOT textures.
Using texture rectangles, the texture coordinates are absolute pixel positions. In your case your texture coordinates would be 0,0 2,0 2,2 0,2
It's working now, see next:
GLubyte Texture[16] =
{
0,0xFF,0,0xFF, 0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF, 0,0,0,0
};
GLuint Nom;
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT ); //Efface le framebuffer et le depthbuffer
glMatrixMode(GL_MODELVIEW); //Un petit gluLookAt()...
glLoadIdentity();//load identity matrix
glTranslatef(0.0f,0.0f,-4.0f);//move forward 4 units
angle = 0.1 * glutGet ( GLUT_ELAPSED_TIME );
glRotatef(angle,0,2,2);
glEnable(GL_TEXTURE_2D);
glEnable (GL_TEXTURE_RECTANGLE_ARB);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 2);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glGenTextures(1,&Nom); //Génère un n° de texture
glBindTexture(GL_TEXTURE_2D,Nom); //Sélectionne ce n°
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 4, 4, 0, GL_RGB,GL_UNSIGNED_BYTE, Texture);
glTexParameterf(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); // GL_CLAMP_TO_EDGE
glTexParameterf(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // GL_CLAMP_TO_EDGE
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA8, 2, 2, 0, GL_BGRA_EXT, GL_UNSIGNED_INT_8_8_8_8_REV, Texture);
glBegin(GL_QUADS);
glTexCoord2i(0,0);glVertex3i(-1,-1,-1);
glTexCoord2i(2,0);glVertex3i(+1,-1,-1);
glTexCoord2i(2,2);glVertex3i(+1,+1,-1);
glTexCoord2i(0,2);glVertex3i(-1,+1,-1);
//1 face
glTexCoord2i(0,0);glVertex3i(-1,-1,+1);
glTexCoord2i(2,0);glVertex3i(+1,-1,+1);
glTexCoord2i(2,2);glVertex3i(+1,+1,+1);
glTexCoord2i(0,2);glVertex3i(-1,+1,+1);
//2 faces
glTexCoord2i(0,0);glVertex3i(+1,-1,-1);
glTexCoord2i(2,0);glVertex3i(+1,-1,+1);
glTexCoord2i(2,2);glVertex3i(+1,+1,+1);
glTexCoord2i(0,2);glVertex3i(+1,+1,-1);
//3 faces
glTexCoord2i(0,0);glVertex3i(-1,-1,-1);
glTexCoord2i(2,0);glVertex3i(-1,-1,+1);
glTexCoord2i(2,2);glVertex3i(-1,+1,+1);
glTexCoord2i(0,2);glVertex3i(-1,+1,-1);
glEnd();
glDisable(GL_TEXTURE_2D);