I need to update an array of pixels to the screen every frame. It works initially, however when I try to resize the screen it glitches and eventually throws EXC_BAD_ACCESS 1. I already checked that the buffer is allocated to the correct size before every frame, however it does not seem to affect the result.
#include <stdio.h>
#include <stdlib.h>
#include <GLUT/GLUT.h>
unsigned char *buffer = NULL;
int width = 400, height = 400;
unsigned int screenTexture;
void Display()
{
for (int y = 0; y < height; y+=4) {
for (int x = 0; x < width; x++) {
buffer[(x + y * width) * 3] = 255;
}
}
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
// This function results in EXC_BAD_ACCESS 1, although the buffer is always correctly allocated
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, buffer);
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, height, 0, 0, 1);
glMatrixMode(GL_MODELVIEW);
glBegin (GL_QUADS);
glTexCoord2f(0,0); glVertex2i(0, 0);
glTexCoord2f(1,0); glVertex2i(width,0);
glTexCoord2f(1,1); glVertex2i(width,height);
glTexCoord2f(0,1); glVertex2i(0, height);
glEnd ();
glFlush();
glutPostRedisplay();
}
void Resize(int w, int h)
{
width = w;
height = h;
buffer = (unsigned char *)realloc(buffer, sizeof(unsigned char) * width * height * 3);
if (!buffer) {
printf("Error Reallocating buffer\n");
exit(1);
}
}
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE);
glutInitWindowSize(width, height);
glutCreateWindow("Rasterizer");
glutDisplayFunc(Display);
glutReshapeFunc(Resize);
glGenTextures(1, &screenTexture);
glBindTexture(GL_TEXTURE_2D, screenTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glDisable(GL_DEPTH_TEST);
buffer = (unsigned char *)malloc(sizeof(unsigned char) * width * height * 3);
glutMainLoop();
}
After resizing the screen does not display properly either:
What is causing this problem? The code compiles and runs you just have to link GLUT and OpenGL.
As #genpfault mentioned, OpenGL reads 4 bytes per pixel instead of your assumption of 3.
Instead of changing GL_UNPACK_ALIGNMENT, you can also change your code to the correct assumption of 4 bytes per pixel via a simple struct:
struct pixel {
unsigned char r, g, b;
unsigned char unused;
};
Then, instead of using the magic constant 3, you can use the much clearer sizeof(struct pixel). This makes it easier to read and to convey the intent of the code, and it doesn't result in any extra code (as the structure is "effectively" an array of 4 bytes).
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, buffer);
^^^^^^
GL_UNPACK_ALIGNMENT defaults to 4, not 1. So OpenGL will read 4 bytes for every pixel, not the 3 that you're assuming.
Set GL_UNPACK_ALIGNMENT to 1 using glPixelStorei().
It sounds like you found something that works, but I don't think the problem was properly diagnosed. I believe the biggest issue is in the way you initialize your texture data here:
for (int y = 0; y < height; y+=4) {
for (int x = 0; x < width; x++) {
buffer[(x + y * width) * 3] = 255;
}
}
This only sets data in every 4th row, and then only for every 3rd byte within those rows. To initialize all the data to white, you need to increment the row number (y) by 1 instead of 4, and set all 3 components inside the loop:
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
buffer[(x + y * width) * 3 ] = 255;
buffer[(x + y * width) * 3 + 1] = 255;
buffer[(x + y * width) * 3 + 2] = 255;
}
}
You also need to set GL_UNPACK_ALIGNMENT to 1:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
This controls the row alignment (not the pixel alignment, as suggested in a couple other answers). The default value for GL_UNPACK_ALIGNMENT is 4. But with 3 bytes per pixel in the GL_RGB format you are using, the size of a row is only a multiple of 4 bytes if the number of pixels is a multiple of 4. So for tightly packed rows with 3 bytes/pixel, the value needs to be set to 1.
Related
I'm trying to make an OpenGL texture by populating a pixel buffer with data from a baked font. I'm taking each value from the font array and making a bitmap essentially.
The problem is when I'm displaying the full texture I get noise. However by creating an 8x8 texture of one glyph the texture is displayed correctly.
The pixel buffer is 8bit monochrome, so I pass GL_ALPHA as buffer format.
I tried using 32bpp GL_RGBA format as well and it yields the same result.
DebugFont
LoadBakedFont(void)
{
glEnable(GL_BLEND);
glEnable(GL_TEXTURE_2D);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
unsigned char baked_font[128][8] = {} //In my source code this is not empty :)
unsigned char *pixels = (unsigned char*)malloc(sizeof(unsigned char) * 128 * 8 * 8);
memset(pixels, 0, sizeof(unsigned char) * 128 * 8 * 8);
int counter = 0;
for(int i = 0; i < 128; ++i)
{
for(int j = 0; j < 8; ++j)
{
for(int k = 0; k < 8; ++k)
{
unsigned char val = (baked_font[i][j] >> k & 1);
pixels[counter++] = val == 1 ? 0xff : 0x00;
}
}
}
//Renders the exlamation mark perfectly
for(int y = 0; y < 8; ++y)
{
for(int x = 0; x < 8; ++x)
{
unsigned char *test = pixels + (0x21 * 64);
if(test[y * 8 + x])
printf("#");
else
printf(".");
}
printf("\n");
}
//POD struct
DebugFont font;
glGenTextures(1, &font.tex);
glBindTexture(GL_TEXTURE_2D, font.tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 8 * 128, 8, 0, GL_ALPHA, GL_UNSIGNED_BYTE, pixels);
glBindTexture(GL_TEXTURE_2D, 0);
free(pixels);
return font;
}
void
DrawTexture(DebugFont font)
{
glBindTexture(GL_TEXTURE_2D, font.tex);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex2f(0,0);
glTexCoord2f(1.0f, 0.0f); glVertex2f(8 * 128,0);
glTexCoord2f(1.0f, 1.0f); glVertex2f(8 * 128, 8);
glTexCoord2f(0.0f, 1.0f); glVertex2f(0, 8);
glEnd();
glBindTexture(GL_TEXTURE_2D, 0);
}
Random noise?
Exclamation Mark
The way you arrange the data makes sense for a tall 8x1024 image where each 8x8 makes up a character.
But you load it as a 1024x8 image instead, putting all the pixels in the wrong places.
I'm working on a program that creates an SDL_Surface using http://www.fontspace.com/work-ins-studio/variane-script. I set the background of the surface to be transparent. Then I extract the pixels from the surface, and place them in part of an opengl texture.
It all works fine, except that the text ends up looking like this (the text should read "testing")
My question: Did I mess up the math somehow and do this myself, or is this just the behaviour of SDL_TTF? And, if it is just the behaviour of SDL_TTF, how do I work around it to get pixel data that I can use?
Here is the relevant code:
int main(int argc, char* args[]) {
//other sdl and opengl overhead stuff here...
TTF_Init();
//shader setup here...
TTF_Font *font;
font = TTF_OpenFont("VarianeScript.ttf", 50);
SDL_Surface* surface;
SDL_Color color = { 255, 0, 0 };
surface = TTF_RenderText_Solid(font, "testing", color);
SDL_SetSurfaceAlphaMod(surface, 255);
int surfaceWidth = surface->w;
int surfaceHeight = surface->h;
Uint8 red, green, blue, alpha;
float* textImage = new float[(surfaceWidth * surfaceHeight) * 4];
int countText = 0;
SDL_LockSurface(surface);
Uint8* p = (Uint8*)surface->pixels;
for (int y = 0; y < surfaceHeight; ++y) {
for (int x = 0; x < (surfaceWidth); ++x) {
Uint8 pixel = p[(y * surface->w) + x];
SDL_GetRGBA(pixel, surface->format, &red, &green, &blue, &alpha);
textImage[countText] = ((float)red / 255.0f);
++countText;
textImage[countText] = ((float)green / 255.0f);
++countText;
textImage[countText] = ((float)blue / 255.0f);
++countText;
textImage[countText] = ((float)alpha / 255.0f);
++countText;
}
}
SDL_UnlockSurface(surface);
SDL_FreeSurface(surface);
GLuint texture;
float* image;
int width = 1000, height = 1000;
int textX = width - (int)(width / 1.5);
int textY = height - (int)(height / 1.5);
setupTexture(texture, shader, width, height, image, textImage, textX, textY, surfaceWidth, surfaceHeight);
//etc...
also (the important part starts around where I declare the startpos variables)
void setupTexture(GLuint &texture, Shader &shader, int &width, int &height, float* &image, float* text, int textX, int textY, int textW, int textH) {
glGenTextures(1, &texture);
image = new float[(width * height) * 3];
for (int a = 0; a < (width * height) * 3; ) {
if (a < ((width * height) * 3) / 2) {
image[a] = 0.5f;
++a;
image[a] = 1.0f;
++a;
image[a] = 0.3f;
++a;
}
else {
image[a] = 0.0f;
++a;
image[a] = 0.5f;
++a;
image[a] = 0.7f;
++a;
}
}
int startpos1, startpos2;
for(int y = 0; y < textH; ++y) {
for(int x = 0; x < textW; ++x) {
startpos1 = (((y + textY) * width) * 3) + ((x + textX) * 3);
startpos2 = ((y * textW) *4) + (x * 4);
if (text[startpos2 + 3] != 0.0) {
image[startpos1] = text[startpos2];
image[startpos1 + 1] = text[startpos2 + 1];
image[startpos1 + 2] = text[startpos2 + 2];
}
}
}
glActiveTexture(GL_TEXTURE0);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_FLOAT, image);
glUniform1i(glGetUniformLocation(shader.shaderProgram, "texSampler"), 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
Your problem is in the way you extract pixel from surface:
Uint8 pixel = p[(y * surface->w) + x];
You assume that each pixel takes one byte (could be verified by inspecting surface->format->BytesPerPixel), and that each row is surface->w*1 bytes long - but it isn't. Instead, each row is surface->pitch bytes long, so your code should be
Uint8 pixel = p[y * surface->pitch + x];
(that still assumes it is 1 byte long, but that's beside the point).
It is quite weird that you use floats to represent pixel data, as it gives you nothing here aside from much slower loading.
I have a function that draws pixels 1 by 1 on a window, but what I want to know is how to get the pixels to be drawn in a different color other than red. Thanks in advance. I've tried some stuff like glSetColor, glColor3f, etc. Just to try and get the pixels to display in different colors but nothing seemed to work so far.
#include <GL/glut.h>
#include <iostream>
using namespace std;
float *PixelBuffer;
void setPixel(int, int);
void display();
int size = 400 * 400 * 3;
int main(int argc, char *argv[])
{
PixelBuffer = new float[400 * 400 * 3];
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowSize(400, 400);
glutInitWindowPosition(100, 100);
glColor3f(0, 1.0, 0);
int firstWindow = glutCreateWindow("First Color");
glClearColor(0, 0, 0, 0); //clears the buffer of OpenGL
for(int i = 0; i < 20; i++)
{
setPixel(i, 10);
}
glutDisplayFunc(display);
glutMainLoop();
return 0;
}
void display()
{
glClear(GL_COLOR_BUFFER_BIT);
glLoadIdentity();
glDrawPixels(400, 400, GL_RGB, GL_FLOAT, PixelBuffer);
glFlush();
}
void setPixel(int x, int y)
{
int pixelLocation;
int width = 400;
pixLocation = (y * width * 3) + (x * 3);
PixelBuffer[pixelLocation] = 1;
};
You specify GL_RGB as format when calling glDrawPixels.
Then you calculate the correct position of the pixel in your buffer on the line:
pixLocation = (y * width * 3) + (x * 3);
But then you only set the Red pixel intensity value on the next line.
You can access the other color values in your buffer like this:
PixelBuffer[pixelLocation + 0] = 1; // Red pixel intensity
PixelBuffer[pixelLocation + 1] = 1; // Green pixel intensity
PixelBuffer[pixelLocation + 2] = 1; // Blue pixel intensity
I'm traying to render texture to plane using:
unsigned char image[HEIGHT][WIDTH][3];
...
GLuint textureId;
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB,
WIDTH, HEIGHT,
0,
GL_RGB,
GL_UNSIGNED_BYTE,
image);
...
draw();
and that code ran smothly, but wher I'm traying to do this on dynamicly alocated array GLut is rendering artefacts. shorted code:
unsigned char ***image;
image = new unsigned char**[HEIGHT];
for (int i = 0; i < HEIGHT; i++ )
{
image[i] = new unsigned char*[WIDTH];
for (int j = 0; j < WIDTH ; j++ )
{
image[i][j] = new unsigned char[3];
}
}
...
GLuint textureId;
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB,
WIDTH, HEIGHT,
0,
GL_RGB,
GL_UNSIGNED_BYTE,
image);
...
draw();
both arrays has identical content (checked bit by bit).
full code:
main.cpp
http://pastebin.com/dzDbNgMa
TEXT_PLANE.hpp (using headers, to ensure inlinement):
http://pastebin.com/0HxcAnkW
I'm sory for the mess in code, but it's only a blasting side.
I would be very greatfull for any help.
What you're using as your texture is the WIDTH * HEIGHT * 3 bytes of memory starting at image.
For this, you need contiguous data like in the first example.
Your second example is not an array of array of arrays, it's an array of pointers to arrays of pointers. These pointers can point anywhere.
(An array is not a pointer, and a pointer is not an array.)
If you need dynamic allocation, use
unsigned char image* = new unsigned char [WIDTH * HEIGHT * 3];
and do your own indexing arithmetic; the components would be
image[row * WIDTH + 3 * column]
image[row * WIDTH + 3 * column + 1]
image[row * WIDTH + 3 * column + 2]
(or
image[column * HEIGHT + 3 * row], etc.
Pick one.)
I'm trying to draw to a renderbuffer (512x512) that's larger than the screen size (i.e., 320x480).
After doing a glReadPixels, the image looks correct, except once the dimensions of the image exceed that of the screen size- in this example, past 320 horizontal and 480 vertical. What causes this anomaly? Is there something I'm missing?
When the window size is >= the size of the renderbuffer, this code works absolutely fine.
Example image that was rendered to the buffer & glReadPixel'd:
http://img593.imageshack.us/img593/3220/rendertobroke.png
unsigned int canvasFrameBuffer;
bglGenFramebuffers(1, &canvasFrameBuffer);
bglBindFramebuffer(BGL_RENDERBUFFER, canvasFrameBuffer);
// Attach renderbuffer
unsigned int canvasRenderBuffer;
bglGenRenderbuffers(1, &canvasRenderBuffer);
bglBindRenderbuffer(BGL_RENDERBUFFER, canvasRenderBuffer);
bglRenderbufferStorage(BGL_RENDERBUFFER, BGL_RGBA4, width, height);
bglFramebufferRenderbuffer(BGL_FRAMEBUFFER, BGL_COLOR_ATTACHMENT0, BGL_RENDERBUFFER, canvasRenderBuffer);
bglViewport(0, 0, width, height);
Matrix::matrix_t identity, colorMatrix;
Matrix::LoadIdentity(&identity);
Matrix::LoadIdentity(&colorMatrix);
bglClearColor(1.0f, 1.0f, 1.0f, 1.0f);
bglClear(BGL_COLOR_BUFFER_BIT);
Vector::vector_t oldPos, oldScale;
Vector::Copy(&oldPos, &pos);
Vector::Mul(&pos, 0.0f);
Vector::Copy(&oldScale, &scale);
Vector::Load(&scale, 1, 1, 1);
int oldHAlign = halignment;
int oldVAlign = valignment;
halignment = Font::HALIGN_LEFT;
valignment = Font::VALIGN_BOTTOM;
float oldXRatio = vid.xratio;
float oldYRatio = vid.yratio;
vid.xratio = 1;
vid.yratio = 1;
Drawing::Set2D(this->size.x, this->size.y); // glOrtho and setup projection/modelview matrices
Draw(&identity, &colorMatrix);
Vector::Copy(&pos, &oldPos);
Vector::Copy(&scale, &oldScale);
halignment = oldHAlign;
valignment = oldVAlign;
vid.xratio = oldXRatio;
vid.yratio = oldYRatio;
byte *buffer = (byte*)Z_Malloc(width * height * 3, ZT_STATIC);
bglPixelStorei(BGL_PACK_ALIGNMENT, 1);
bglReadPixels(0, 0, width, height, BGL_RGB, BGL_UNSIGNED_BYTE, buffer);
byte *final = RGBtoLuminance(buffer, width, height);
SaveTGA("canvas.tga", final, width, height, 1);
Z_Free(buffer);
// unbind frame buffer
bglBindRenderbuffer(BGL_RENDERBUFFER, 0);
bglBindFramebuffer(BGL_FRAMEBUFFER, 0);
bglDeleteRenderbuffers(1, &canvasRenderBuffer);
bglDeleteFramebuffers(1, &canvasFrameBuffer);
bglViewport(0, 0, vid.width, vid.height);
Here's the answer.
Change this line:
bglBindFramebuffer(BGL_RENDERBUFFER, canvasFrameBuffer);
to this:
bglBindFramebuffer(BGL_FRAMEBUFFER, canvasFrameBuffer);