Read pixels from SFML texture using PBOs - c++

I am creating a render texture in SFML (sf::RenderTexture for off-screen rendering), drawing to it and trying to read the pixels asynchronously using PBOs. Here is a minimal example of what I'm doing:
#include <SFML/Window.hpp>
#include <SFML/Graphics.hpp>
#define GL_SILENCE_DEPRECATION
#include <SFML/OpenGL.hpp>
#include <iostream>
int main()
{
// create texture and circle to draw on texture
int texSize = 200;
sf::RenderTexture tex;
tex.create(texSize, texSize);
sf::CircleShape circle(50);
circle.setPosition(0, 0);
circle.setFillColor(sf::Color::Blue);
// initialize PBOs
int nPbos = 2;
GLuint* pbos = new GLuint[nPbos];
glGenBuffers(nPbos, pbos);
for (int i = 0; i < nPbos; ++i) {
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[i]);
glBufferData(GL_PIXEL_PACK_BUFFER, texSize * texSize * 4, NULL, GL_STREAM_READ);
}
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
int pboIdx = 0;
for (int frame = 0; frame < 100; ++frame) {
// draw stuff
tex.clear(sf::Color::White);
tex.draw(circle);
tex.display();
glReadBuffer(GL_COLOR_ATTACHMENT0);
if (frame < nPbos) {
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[pboIdx]);
glReadPixels(0, 0, texSize, texSize, GL_BGRA, GL_UNSIGNED_BYTE, 0);
}
else {
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[pboIdx]);
unsigned char* ptr = (unsigned char*)glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
if (ptr != nullptr) {
std::cout << "OK" << std::endl;
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
}
glReadPixels(0, 0, texSize, texSize, GL_BGRA, GL_UNSIGNED_BYTE, 0);
}
pboIdx = (pboIdx + 1) % nPbos;
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
}
return 0;
}
I am not getting error from glGetError(). However I am never entering the condition, ie. the array of pixels is always empty. I can't figure out what is wrong with the code and why I am not getting the pixels from the texture, am I missing a bind somewhere?

This is a example where data is being read from the color buffer.
This example uses glfw and glew.
#include <gl\glew.h>
#include <glfw3.h>
int w_readIndex = 0;
int w_writeIndex = 1;
int main()
{
// glfw: initialize and configure
// ------------------------------
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
// glfw window creation
// --------------------
GLFWwindow* window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "LearnOpenGL", NULL, NULL);
if (window == NULL)
{
std::cout << "Failed to create GLFW window" << std::endl;
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
glewInit();
glGenBuffers(2, w_pbo);
glBindBuffer(GL_PIXEL_PACK_BUFFER, w_pbo[0]);
glBufferData(GL_PIXEL_PACK_BUFFER, SCR_WIDTH * SCR_HEIGHT * 4, 0, GL_STREAM_READ);
glBindBuffer(GL_PIXEL_PACK_BUFFER, w_pbo[1]);
glBufferData(GL_PIXEL_PACK_BUFFER, SCR_WIDTH * SCR_HEIGHT * 4, 0, GL_STREAM_READ);
// unbind buffers for now
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
while (!glfwWindowShouldClose(window))
{
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Draw your objects here
w_writeIndex = (w_writeIndex + 1) % 2;
w_readIndex = (w_readIndex + 1) % 2;
glBindBuffer(GL_PIXEL_PACK_BUFFER, w_pbo[w_writeIndex]);
// copy from framebuffer to PBO asynchronously. it will be ready in the NEXT frame
glReadPixels(0, 0, SCR_WIDTH, SCR_HEIGHT, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
// now read other PBO which should be already in CPU memory
glBindBuffer(GL_PIXEL_PACK_BUFFER, w_pbo[w_readIndex]);
unsigned char* downsampleData = (unsigned char*)glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
if (downsampleData) {
std::cout << "Pointer is not NULL" << static_cast<unsigned>(downsampleData[2]) << std::endl;
}
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
glfwSwapBuffers(window);
glfwPollEvents();
}
}

Related

c++ cuda opengl not rendering vbo

I am trying to draw a bunch of points on the screen. I'm using CUDA to generate the data (position and color), and OpenGL to draw it. I am trying to get CUDA to update a VBO and then OpenGL to draw it, but I get a blank screen. I am not sure if CUDA is not able to update the buffer, or that the buffer is not drawing properly. My GPU is a GTX 1080, and I'm trying to use OpenGL 4.0. Colors are specified by CUDA as well. If my problem is that I need a shader, how do I add that, but also still specify the color through CUDA?
UPDATE: problem seems to be openGL. Updated code to use triangle So new question to add. Why is my VBO not being rendered?
Here is the code:
GPUmain.cuh:
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/remove.h>
#include <curand.h>
#include <GL/glew.h>
#include <SDL_opengl.h>
#include <cuda_gl_interop.h>
#define BUFFER_OFFSET(i) ((char *)NULL + (i))
//ver: x, y, z, r, g, b, a
struct ver {
// x, y, z pos
GLuint x, y, z;
// r, g, b, a color
GLubyte r, g, b, a;
};
class GPU {
public:
static int nParticles;
static GLuint vboid;
static cudaGraphicsResource *CGR;
//collection of vertices to be simulated and rendered
static thrust::device_vector<ver> rverts;
static void init(int w, int h);
static void compute();
static void render();
static void GPUmain();
static void free();
};
GPUmain.cu:
#include "GPUmain.cuh"
__global__ void uploadVerts(ver *vv, ver *vb) {
int id = threadIdx.x + (blockDim.x * blockIdx.x);
vb[id] = vv[id];
vb[id].x = vv[id].x;
vb[id].y = vv[id].y;
vb[id].z = vv[id].z;
vb[id].r = vv[id].r;
vb[id].g = vv[id].g;
vb[id].b = vv[id].b;
vb[id].a = vv[id].a;
}
__global__ void genGrid(ver *v) {
int i = threadIdx.x + (blockDim.x * blockIdx.x);
float x = (float)(i % ((int)1080));
float y = (float)(i / ((int)1920));
v[i].x = x;
v[i].y = y;
v[i].z = 1;
v[i].r = 255;
v[i].g = 0;
v[i].b = 0;
v[i].a = 0;
}
int GPU::nParticles;
GLuint GPU::vboid;
cudaGraphicsResource *GPU::CGR;
//collection of vertices to be simulated and rendered
thrust::device_vector<ver> GPU::rverts;
void GPU::init(int w, int h)
{
nParticles = w * h;
/*rverts.resize(nParticles, ver{0,0,0,0,0,0,0});
genGrid<<<nParticles/1024,1024>>>(thrust::raw_pointer_cast(&rverts[0]));*/
ver e[3] = {
ver{1024,200,2,255,0,0,255},
ver{499,288,173,0,255,0,255},
ver{462,1674,8,0,0,255,255}
};
glGenBuffers(1,&vboid);
glBindBuffer(GL_ARRAY_BUFFER,vboid);
glBufferData(GL_ARRAY_BUFFER,3*sizeof(ver),e,GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
/*cudaGraphicsGLRegisterBuffer(&CGR,vboid,cudaGraphicsMapFlagsWriteDiscard);*/
}
void GPU::compute()
{
}
void GPU::render()
{
/*ver *verts;
size_t size;
cudaGraphicsMapResources(1, &CGR, 0);
cudaGraphicsResourceGetMappedPointer((void**)&verts, &size, CGR);
uploadVerts<<<nParticles/1024, 1024>>>(thrust::raw_pointer_cast(&rverts[0]), verts);
cudaGraphicsUnmapResources(1, &CGR, 0);
cudaDeviceSynchronize();*/
glClearColor(0, 0, 0, 0); // we clear the screen with black (else, frames would overlay...)
glClear(GL_COLOR_BUFFER_BIT); // clear the buffer
glBindBuffer(GL_ARRAY_BUFFER, vboid);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(3, GL_INT, 4 * sizeof(GLubyte), 0);
glColorPointer(4, GL_BYTE, 3 * sizeof(GLuint), BUFFER_OFFSET(3 * sizeof(GLuint)));
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void GPU::GPUmain()
{
compute();
render();
}
void GPU::free()
{
cudaGraphicsUnregisterResource(CGR);
glBindBuffer(GL_ARRAY_BUFFER, vboid);
glDeleteBuffers(1, &vboid);
glBindBuffer(GL_ARRAY_BUFFER, 0);
rverts.clear();
thrust::device_vector<ver>().swap(rverts);
}
The relevant (that contain OpenGL code) parts of window.cpp:
bool Window::init()
{
//initialize SDL
if (SDL_Init(SDL_INIT_EVERYTHING) != 0) {
log << "Failed to initialize SDL!\n";
return false;
}
//set window atributes
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
//creat window
window = SDL_CreateWindow(
name.c_str(),
SDL_WINDOWPOS_CENTERED,
SDL_WINDOWPOS_CENTERED,
width,
height,
SDL_WINDOW_OPENGL
);
//create opengl context in the window
glcontext = SDL_GL_CreateContext(window);
SDL_GL_SetSwapInterval(1);
//check if the window was created
if (window == nullptr) {
log << "Failed to create window!\n";
return false;
}
//turn on experimental features
glewExperimental = GL_TRUE;
//initiallize glew
if (glewInit() != GLEW_OK) {
log << "Failed to Init GLEW";
return false;
}
//set drawing parameters
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, 0, height, 0, 255);
glPointSize(1);
glEnable(GL_BLEND); // Allow Transparency
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // how transparency acts
std::cout << sizeof(ver);
GPU::init(width, height);
return true;
}
void Window::renderFrame()
{
GPU::render();
SDL_GL_SwapWindow(window); //swap buffers
}
If you use the fixed-function attributes and client side capabilities, then you've to use a compatibility profile context.
See Fixed Function Pipeline and Legacy OpenGL.
If you want to use a core profile, then you've to use Vertex Array Object and Shader:
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_COMPATIBILITY);
The following geometry
ver e[3] = {
// x y z r g b a
ver{1024, 200, 2, 255, 0, 0, 255},
ver{ 499, 288, 173, 0, 255, 0, 255},
ver{462, 1674, 8, 0, 0, 255, 255}
};
is clipped by the near plane of the orthographic projection. Note, in view space the z-axis points out of the viewport.
Change the orthographic projection (or invert the z coordinates of the geometry):
glOrtho(0, width, 0, height, 0, 255);
glOrtho(0, width, 0, height, -255, 0);
The stride parameter of glVertexPointer respectively glColorPointer is the offset between consecutive attributes. So it has to be sizeof(ver).
The type of the color attributes is GL_UNSIGNED_BYTE rather than GL_BYTE:
glVertexPointer(3, GL_INT, 4 * sizeof(GLubyte), 0);
glColorPointer(4, GL_BYTE, 3 * sizeof(GLuint), BUFFER_OFFSET(3 * sizeof(GLuint)));
glVertexPointer(3, GL_INT, sizeof(ver), 0);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(ver), BUFFER_OFFSET(3 * sizeof(GLuint)));

SDL_TTF rendering garbage to texture

I am attempting to use OpenGL and SDL, using SDL_ttf to render text to a texture, but the code is rendering garbage.
My "Render to texture code":
GLuint textToTexture(std::string & text, TTF_Font* font, glm::vec4 textColour, glm::vec4 bgColour)
{
if (!TTF_WasInit())
{
if (TTF_Init() == -1)
exit(6);
}
SDL_Color colour = { (Uint8)(textColour.r*255), (Uint8)(textColour.g*255), (Uint8)(textColour.b*255), (Uint8)(textColour.a*255) };
SDL_Color bg = { (Uint8)(bgColour.r*255), (Uint8)(bgColour.g*255), (Uint8)(bgColour.b*255), (Uint8)(bgColour.a*255) };
SDL_Surface *stringImage = NULL;
stringImage = TTF_RenderText_Blended(font, text.c_str(), colour);
if (stringImage == NULL)
{
exit(5);
}
GLuint trueH = powerofTwo(stringImage->h);
GLuint trueW = powerofTwo(stringImage->w);
unsigned char* pixels = NULL;
GLuint w = stringImage->w;
GLuint h = stringImage->h;
GLuint colours = stringImage->format->BytesPerPixel;
pixels = padTexture((unsigned char*)stringImage->pixels, w, h, pixels, trueW, trueH, colours);
GLuint format, internalFormat;
if (colours == 4) {
if (stringImage->format->Rmask == 0x000000ff)
format = GL_RGBA;
else
format = GL_BGRA;
}
else {
// no alpha
if (stringImage->format->Rmask == 0x000000ff)
format = GL_RGB;
else
format = GL_BGR;
}
internalFormat = (colours == 4) ? GL_RGBA : GL_RGB;
GLuint texId = 0;
//GLuint texture;
glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, trueW, trueH, 0,format, GL_UNSIGNED_BYTE, pixels);
// SDL surface was used to generate the texture but is no longer
// required. Release it to free memory
SDL_FreeSurface(stringImage);
free(pixels)
return texId;
}
The code for computing the correct dimensions for padding:
int powerofTwo(int num)
{
if (num != 0)
{
num--;
num |= num >> 1; // Divide by 2^k for consecutive doublings of k up to 32,
num |= num >> 2; // and then or the results.
num |= num >> 4;
num |= num >> 8;
num |= num >> 16;
num++;
}
return num;
}
and finally, the code that copies the bytes to a texture of the correct dimensions:
unsigned char* padTexture(unsigned char * src, int srcW, int srcH, unsigned char * dest, int width, int height, int bpp)
{
dest = (unsigned char*)calloc(1, width*height*bpp);
for (int i = 0; i < srcH; i++)
{
memcpy(dest + (width*i*bpp),src + (srcW*i*bpp), srcW*bpp);
}
return dest;
}
The result of this code is as follows: [![Garbled Texture][1]][1]
I have confirmed and error checked that SDL_TTF is properly initialized elsewhere in the codebase, and that the font is also being loaded.
I have tested with three different ttf fonts, with the same results.
Also, if I use any other TTF_rendering function (Shaded, Solid etc), A solid quad is rendered, and the "colours" variable in the textToTexture function also ends up as 1.
Additional:
As I previously stated, I tested with three ttf fonts:
MavenPro-Regular,
HelveticaNeueLTStd-Th
and another I found off the internet.
I was trying to render the string "Select Scenario".
The pre padded image dimensions are 138x25 pixels.
The post padded image dimensions are 256x32 pixels.
Update 1:
After fixing the bpp issue the new texture is as follows:
This image changes everytime I run the program.
Update 2:
After fixing the additional spotted errors with padding the image, and setting the pixel data to the texture itself, when I use TTF_RenderText_Blended all I get is a black quad, and when I use TTF_RenderText_Shaded I get:
Update 3:
I used SDL_SaveBMP immedietly before calling the GL code and after calling SDL_RenderText_Blended, the result was a completely white image, (given which text colour).
When I do the same using TTF_RenderText_Solid, The saved image is as it should be, but is rendered by opengl like the images you see above.
SDL_TTF initialized fine, the fonts load without error, and the text rendering returns no errors, so I can't think what to do next.
Update 4:
I have since refactored all the ttf code into a single function and removed the padding code (as modern opengl doesn't seem to care about it). However, despite all project settings and code now being identical to a test project that is known to work on the same hardware, the problem persists.
GLuint textToTexture(const char * text, const char * font, glm::vec4 textColour, glm::vec4 bgColour, unsigned int & texID)
{
if (!TTF_WasInit()) {
if (TTF_Init() == -1)
exit(6);
}
SDL_Color colour = { (Uint8)(textColour.r * 255), (Uint8)(textColour.g * 255), (Uint8)(textColour.b * 255),(Uint8)(textColour.a * 255) };
SDL_Color bg = { (Uint8)(bgColour.r * 255), (Uint8)(bgColour.g * 255), (Uint8)(bgColour.b * 255),255 };
TTF_Font* fontObj = TTF_OpenFont(font, 24);
if (!fontObj)
{
SDL_ShowSimpleMessageBox(SDL_MESSAGEBOX_ERROR,
"Texture Error",
"Cannot load font to create texture.",
NULL);
return 0;
}
SDL_Surface *image = NULL;
image = TTF_RenderText_Blended(fontObj, text, colour);
if (image == NULL)
{
exit(5);
//exitFatalError("String surface not created.");
std::cout << "String surface not created." << std::endl;
}
unsigned char* pixels = NULL;
GLuint w = image->w;
GLuint h = image->h;
GLuint colours = image->format->BytesPerPixel;
GLuint externalFormat, internalFormat;
SDL_PixelFormat *format = image->format;
if (colours == 4) {
if (image->format->Rmask == 0x000000ff)
externalFormat = GL_RGBA;
else
externalFormat = GL_BGRA;
}
else {
// no alpha
if (image->format->Rmask == 0x000000ff)
externalFormat = GL_RGB;
else
externalFormat = GL_BGR;
}
internalFormat = (colours == 4) ? GL_RGBA : GL_RGB;
GLuint texId = 0;
//GLuint texture;
glGenTextures(1, &texID);
glBindTexture(GL_TEXTURE_2D, texID);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, w, h, 0, externalFormat, GL_UNSIGNED_BYTE, image->pixels);
//glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, trueW, trueH, 0, externalFormat, GL_UNSIGNED_BYTE, pixels);
glGenerateMipmap(GL_TEXTURE_2D);
//// SDL surface was used to generate the texture but is no longer
//// required. Release it to free memory
SDL_FreeSurface(image);
TTF_CloseFont(fontObj);
return texID;
}
I have a workaround that saves the image to bmp, then reloads it and creates a texture, but only when I use TTF_RenderText_Shaded. If I use TTF_RenderText_Blended, I get an single colour image which corresponds to the text colour.
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, trueH, trueW, 0,format, GL_UNSIGNED_BYTE, pixels);
trueH and trueW order is reversed
memcpy(src + (srcW*i*bpp), dest + (width*i*bpp), srcW*bpp);
Source and destination order reversed.
dest = (unsigned char*)calloc(0, width*height*bpp);
0 elements of size width*height*bpp allocated, which is 0 bytes. Should be 1 instead of 0.
Here is a complete example:
#include <SDL2/SDL.h>
#include <GL/gl.h>
#include <SDL2/SDL_ttf.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
static unsigned char* padTexture(unsigned char * src, int srcW, int srcH, unsigned char * dest, int width, int height, int bpp, const SDL_Palette *palette)
{
int dst_bpp = (bpp == 1) ? 4 : bpp;
dest = (unsigned char*)calloc(1, width*height*dst_bpp);
if(bpp != 1) {
for (int i = 0; i < srcH; i++)
{
memcpy(dest + (width*i*bpp), src + (srcW*i*bpp), srcW*bpp);
}
} else {
/* indexed - read colours from palette */
for(int i = 0; i < srcH; i++) {
for(int j = 0; j < srcW; j++) {
memcpy(dest + (width*i+j)*dst_bpp,
&palette->colors[src[srcW*i+j]], sizeof(SDL_Color));
}
}
}
return dest;
}
static int powerofTwo(int num) {
if (num != 0)
{
num--;
num |= num >> 1; // Divide by 2^k for consecutive doublings of k up to 32,
num |= num >> 2; // and then or the results.
num |= num >> 4;
num |= num >> 8;
num |= num >> 16;
num++;
}
return num;
}
static GLuint textToTexture(const char *text, TTF_Font* font) {
if (!TTF_WasInit()) {
if (TTF_Init() == -1)
exit(6);
}
SDL_Color colour = { 255, 255, 255, 255 };
SDL_Color bg = { 0, 0, 0, 255 };
SDL_Surface *stringImage = NULL;
// stringImage = TTF_RenderText_Blended(font, text, colour);
stringImage = TTF_RenderText_Shaded(font, text, colour, bg);
if (stringImage == NULL) {
exit(5);
}
GLuint trueH = powerofTwo(stringImage->h);
GLuint trueW = powerofTwo(stringImage->w);
unsigned char* pixels = NULL;
GLuint w = stringImage->w;
GLuint h = stringImage->h;
GLuint colours = stringImage->format->BytesPerPixel;
pixels = padTexture((unsigned char*)stringImage->pixels, w, h, pixels, trueW, trueH,
colours, stringImage->format->palette);
GLuint format, internalFormat;
/* If indexed, want resulting image to be 32bit */
if(colours == 1) {
colours = 4;
}
if (colours == 4) {
if (stringImage->format->Rmask == 0x000000ff)
format = GL_RGBA;
else
format = GL_BGRA;
}
else {
// no alpha
if (stringImage->format->Rmask == 0x000000ff)
format = GL_RGB;
else
format = GL_BGR;
}
internalFormat = (colours == 4) ? GL_RGBA : GL_RGB;
GLuint texId = 0;
//GLuint texture;
glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, trueW, trueH, 0,format, GL_UNSIGNED_BYTE, pixels);
// SDL surface was used to generate the texture but is no longer
// required. Release it to free memory
SDL_FreeSurface(stringImage);
free(pixels);
return texId;
}
int main(int argc, char* argv[])
{
SDL_Init(SDL_INIT_VIDEO);
TTF_Init();
SDL_Window *window = SDL_CreateWindow("SDL2 Example", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, 600, 400, SDL_WINDOW_OPENGL);
SDL_GLContext gl_ctx = SDL_GL_CreateContext(window);
TTF_Font *font = TTF_OpenFont(".fonts/tahoma.ttf", 16);
if(font) {
printf("font loaded\n");
textToTexture("Select Scenario", font);
TTF_CloseFont(font);
}
int quit = 0;
while(!quit) {
SDL_Event ev;
while(SDL_PollEvent(&ev)) {
if(ev.type == SDL_QUIT || ev.type == SDL_KEYUP) {
quit = 1;
}
}
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_TEXTURE_2D);
glColor3f(1.0f, 1.0f, 1.0f);
glBegin(GL_QUADS);
glTexCoord2f(0, 1);
glVertex2f(-0.5, -0.5);
glTexCoord2f(0, 0);
glVertex2f(-0.5, 0.5);
glTexCoord2f(1, 0);
glVertex2f(0.5, 0.5);
glTexCoord2f(1, 1);
glVertex2f(0.5, -0.5);
glEnd();
glFlush();
SDL_GL_SwapWindow(window);
}
SDL_GL_DeleteContext(gl_ctx);
SDL_DestroyWindow(window);
TTF_Quit();
SDL_Quit();
return 0;
}

OpenGl glCreateShader makes window white and gives an error

When I'm trying to create shader with glCreateShader and running my program, the window i just made goes white and gives me an error:
Exception thrown at 0x03D312F0 (atioglxx.dll) in OpenGl.exe: 0xC0000005: Access violation reading location 0x73553A43.
Does anyone know, why is this happening?
Here's my code
#include <GL/glew.h>
#include <GLFW\glfw3.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main()
{
float speed = 0.005;
float edge = 3.0;
float move = 0.0;
float zamik = 0.1;
GLFWwindow *window;
// initialize GLFW
if (!glfwInit())
{
return -1;
}
// create a window mode and its OpenGl context
window = glfwCreateWindow(640, 480, "Window", NULL, NULL);
if (!window)
{
glfwTerminate();
return -1;
}
//make the windows contex current
glfwMakeContextCurrent(window);
glewInit();
GLenum err = glewInit();
if (GLEW_OK != err)
{
/* Problem: glewInit failed, something is seriously wrong. */
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
}
GLfloat verts[] =
{
0.0f, 0.5f, 0.0f,
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f
};
/*GLfloat color[] =
{
255, 0, 0,
100, 255, 0,
0, 0, 255,
255, 255, 255
};*/
GLuint vertexShader;
vertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexShader, 1, "C:\Users\Ghost.corp\Documents\Visual Studio 2015\Projects\OpenGl\OpenGl\VertexShader.shdVertx", NULL);
//glCompileShader(vertexShader);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_PROFILE, 0);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
glfwWindowHint(GLFW_SAMPLES, 4);
//loop
while (!glfwWindowShouldClose(window))
{
//clears our screen
glClear(GL_COLOR_BUFFER_BIT);
//render opengl content
glEnableClientState(GL_VERTEX_ARRAY);
//glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, verts);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 3);
//glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
//sweap front and back buffers
glfwSwapBuffers(window);
glfwPollEvents();
//glColorPointer(4, GL_FLOAT, 0, color);
}
glfwTerminate();
return -1;
}
Concentrate on the glCreateShader if I'm passing parameters wrong. Else i dont know.
The glCreateShader looks fine. But for the next line, you should read the shader file first. glShaderSource expects the shader content, and not a path. An example would be:
char *vs_source =
"attribute vec3 pos;\n"
"void main() {\n"
" gl_Position = vec4(pos,1.0);\n"
"}\n";
glShaderSource(vertexShader, 1, (const GLchar**)&vs_source, NULL);
So, first read the file and put the contents into a string, and then pass it to this function.
Loading a file in C, is a bit confusing, so you need a function like this one:
int load_file(const char *filename, char **result)
{
int size = 0;
FILE *f = fopen(filename, "rb");
if (f == NULL)
{
*result = NULL;
return -1; // -1 means file opening fail
}
fseek(f, 0, SEEK_END);
size = ftell(f);
fseek(f, 0, SEEK_SET);
*result = (char *)malloc(size+1);
if (size != fread(*result, sizeof(char), size, f))
{
free(*result);
return -2; // -2 means file reading fail
}
fclose(f);
(*result)[size] = 0;
return size;
}
And then, you can use it in this way:
char *vs_source;
int size;
size = load_file("C:\\Users\\Ghost.corp\\Documents\\Visual Studio 2015\\Projects\\OpenGl\\OpenGl\\VertexShader.shdVertx", &vs_source);
glShaderSource(vertexShader, 1, (const GLchar**)&vs_source, NULL);
You may check the returned size to see there was no problem loading the file. Also, as you are using C, you need to keep in mind freeing this new memory created is on you. If you don't free it, you'll have memory leak.
Resources:
Loading files function is taken from here.

Fast way to rasterize a grid of points/pixels

I want to fill the screen with a grid of points. My desired performance would be about the same speed as drawing that many pixels as a contiguous quad (or equivalent triangle clipped with glViewport). Using GL_POINT primitives (positioned via gl_VertexID, not attribs) or glPolygonStipple are possibilities, but are still a little slower. Here's an example of what I want (though the black points drawn may be yet more sparse):
Are there any other methods to draw this grid? (in a similar time to a smaller quad of the same number of pixels)
Wouldn't it be great if the rasterizer was programmable!
The main point of this is to be able to write to both stencil and colour buffers in this grid pattern from a fragment shader.
EDIT
Some rendering times:
Full screen for me is 1680x1050, GTX670. Times are calculated drawing 10,000 times each frame, no depth test. I draw a quad with a big triangle and clip using glViewport.
Rendering a full screen quad and calling discard for coord%4>0: 0.112ms
Rendering a full screen quad, assigning const colour: 0.059ms
Rendering with glPolygonStipple creating %4 pattern: 0.009ms
Rendering quarter full screen quad: 0.003ms
Rendering a 1x1 quad: 0.002ms (binding VBO and shader, could prob be optimized)
The differences get larger with a more sparse grid, for example %16.
EDIT
OK, I've thrown together a small example. Requires glut and glew libraries:
#include <GL/glew.h>
#include <GL/gl.h>
#include <GL/glut.h>
#include <memory.h>
#include <assert.h>
#include <stdio.h>
#define RESOLUTION_X 1680
#define RESOLUTION_Y 1050
#define USE_32_BIT 0
#define TEST_LOOP 1000 //number of quads to draw per frame
#define WARMUP_MS 1000 //time between switching methods
#define TEST_MS 4000 //time to benchmark for
#define TESTS 6
#define DRAW_GRAPH 1
#define SCALE_MS 0.2f //for drawing the graph
GLuint fbo, colourTex, vbo, shader, shaderPoints, shaderDiscard;
int viewport[2];
int test = 0;
int results_time[TESTS];
int results_frames[TESTS];
float colours[TESTS][3] = {
{1,0,0},
{1,1,0},
{1,0,1},
{0,1,0},
{0,1,1},
{0,0,1},
};
const char* names[TESTS] = {
"full",
"full discard",
"full stipple",
"draw points",
"quarter",
"one"
};
float triangleVerts[9] = {-1,-1,0,-1,4,0,4,-1,0};
const char* vertexShaderSrc = "#version 150\nin vec4 v;\nvoid main() {gl_Position = v;}\n";
const char* vertexShaderPointsSrc = "#version 150\nuniform ivec2 s;\nvoid main() {ivec2 p = ivec2(gl_VertexID%(s.x/4),gl_VertexID/(s.x/4)); gl_Position = vec4(2.0*(p*4+0.5)/s-1.0, 0, 1);}\n";
const char* fragmentShaderSrc = "#version 150\nout vec4 c;\nvoid main() {c = vec4(1,0,0,1);}\n";
const char* fragmentShaderDiscardSrc = "#version 150\nout vec4 c;\nvoid main() {if (int(gl_FragCoord.x)%4>0||int(gl_FragCoord.y)%4>0) discard; c = vec4(1,0,0,1);}\n";
void setupDraw(GLuint program, int x, int y)
{
glUseProgram(program);
glViewport(0, 0, x, y);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
GLuint loc = glGetAttribLocation(program, "v");
glEnableVertexAttribArray(loc);
glVertexAttribPointer(loc, 3, GL_FLOAT, GL_FALSE, 0, 0);
}
void polygonStippleGrid(int x, int y)
{
unsigned char tilePattern[32*32];
memset(tilePattern, 0, sizeof(tilePattern));
for (int j = 0; j < 32; j += y)
{
for (int i = 0; i < 32; i += x)
{
int index = (j * 32 + i);
tilePattern[index / 8] |= 1 << (index % 8);
}
}
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glPolygonStipple(tilePattern);
}
void display()
{
static int lastTime = -1;
int elapsed = glutGet(GLUT_ELAPSED_TIME);
if (lastTime == -1) lastTime = elapsed;
int dt = elapsed - lastTime;
lastTime = elapsed;
static int warmup = WARMUP_MS + 2000;
static int running = TEST_MS;
warmup -= dt;
if (warmup <= 0 && test < TESTS)
{
running -= dt;
results_time[test] += dt;
results_frames[test] += 1;
if (running <= 0)
{
printf("%s %s %.6fms\n", names[test], USE_32_BIT?"rgba32":"rgba8", results_time[test]/(float)(results_frames[test] * TEST_LOOP));
test += 1;
warmup = WARMUP_MS;
running = TEST_MS;
}
}
#if DRAW_GRAPH
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, viewport[0], viewport[1]);
glClear(GL_COLOR_BUFFER_BIT);
float s = 2.0f / TESTS;
glBegin(GL_QUADS);
for (int i = 0; i < TESTS; ++i)
{
if (!results_frames[i]) continue;
glColor3fv(colours[i]);
float x = -1.0f + 2.0f * i / (float)TESTS;
float y = -1.0f + 2.0f * (results_time[i]/(float)(results_frames[i] * TEST_LOOP)) / SCALE_MS;
glVertex2f(x, -1.0f); glVertex2f(x, y); glVertex2f(x + s, y); glVertex2f(x + s, -1.0f);
}
glEnd();
#endif
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
switch (test)
{
case 0: //straight full screen quad
setupDraw(shader, RESOLUTION_X, RESOLUTION_Y);
for (int i = 0; i < TEST_LOOP; ++i)
glDrawArrays(GL_TRIANGLES, 0, 3);
break;
case 1: //full screen quad, discarding pixels in the frag shader
setupDraw(shaderDiscard, RESOLUTION_X, RESOLUTION_Y);
for (int i = 0; i < TEST_LOOP; ++i)
glDrawArrays(GL_TRIANGLES, 0, 3);
break;
case 2: //using polygon stipple to mask out fragments
polygonStippleGrid(4, 4);
glEnable(GL_POLYGON_STIPPLE);
setupDraw(shader, RESOLUTION_X, RESOLUTION_Y);
for (int i = 0; i < TEST_LOOP; ++i)
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisable(GL_POLYGON_STIPPLE);
break;
case 3: //drawing points, but computing the position in the vertex shader
glUseProgram(shaderPoints);
glUniform2i(glGetUniformLocation(shaderPoints, "s"), RESOLUTION_X, RESOLUTION_Y);
for (int i = 0; i < TEST_LOOP; ++i)
glDrawArrays(GL_POINTS, 0, (RESOLUTION_X/4)*(RESOLUTION_Y/4));
break;
case 4: //a quad one quarter of the screen (as a speed comparison)
setupDraw(shader, RESOLUTION_X / 4, RESOLUTION_Y / 4);
for (int i = 0; i < TEST_LOOP; ++i)
glDrawArrays(GL_TRIANGLES, 0, 3);
break;
case 5: //a 1x1 quad (as a speed comparison)
setupDraw(shader,1, 1);
for (int i = 0; i < TEST_LOOP; ++i)
glDrawArrays(GL_TRIANGLES, 0, 3);
break;
default: break;
}
glUseProgram(0);
glDisableVertexAttribArray(0); //HACK: assumes location is always zero
//printf("%i %i %i\n", test, warmup, running);
glFinish();
glutSwapBuffers();
glutPostRedisplay();
assert(glGetError() == GL_NO_ERROR);
}
void reshape(int x, int y)
{
viewport[0] = x;
viewport[1] = y;
}
int main(int argc, char **argv)
{
memset(results_time, 0, sizeof(results_time));
memset(results_frames, 0, sizeof(results_frames));
//init glut
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutCreateWindow("quadtest");
glutReshapeFunc(reshape);
glutDisplayFunc(display);
glewInit();
//init gl stuff
glGenTextures(1, &colourTex);
glBindTexture(GL_TEXTURE_2D, colourTex);
#if USE_32_BIT
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, RESOLUTION_X, RESOLUTION_Y, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
#else
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, RESOLUTION_X, RESOLUTION_Y, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
#endif
/*
GLuint stencilRB;
glGenRenderbuffers(1, &stencilRB);
glBindRenderbuffer(GL_RENDERBUFFER, stencilRB);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_STENCIL, RESOLUTION_X, RESOLUTION_Y);
*/
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, colourTex, 0);
//glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, stencilRB);
assert(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(triangleVerts), triangleVerts, GL_STATIC_DRAW);
GLuint v = glCreateShader(GL_VERTEX_SHADER);
GLuint vp = glCreateShader(GL_VERTEX_SHADER);
GLuint f = glCreateShader(GL_FRAGMENT_SHADER);
GLuint fd = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(v, 1, &vertexShaderSrc, NULL);
glShaderSource(vp, 1, &vertexShaderPointsSrc, NULL);
glShaderSource(f, 1, &fragmentShaderSrc, NULL);
glShaderSource(fd, 1, &fragmentShaderDiscardSrc, NULL);
GLint ok = GL_TRUE;
shader = glCreateProgram();
glAttachShader(shader, v);
glAttachShader(shader, f);
glLinkProgram(shader);
glGetProgramiv(shader, GL_LINK_STATUS, &ok);
assert(ok == GL_TRUE);
/*
char log[512];
int n;
glGetShaderInfoLog(v, 512, &n, log);
printf("%s\n", log);
glGetProgramInfoLog(shader, 512, &n, log);
printf("%s\n", log);
*/
shaderPoints = glCreateProgram();
glAttachShader(shaderPoints, vp);
glAttachShader(shaderPoints, f);
glLinkProgram(shaderPoints);
glGetProgramiv(shaderPoints, GL_LINK_STATUS, &ok);
assert(ok == GL_TRUE);
shaderDiscard = glCreateProgram();
glAttachShader(shaderDiscard, v);
glAttachShader(shaderDiscard, fd);
glLinkProgram(shaderDiscard);
glGetProgramiv(shaderDiscard, GL_LINK_STATUS, &ok);
assert(ok == GL_TRUE);
glDisable(GL_DEPTH_TEST);
assert(glGetError() == GL_NO_ERROR);
glutMainLoop();
return 0;
}
Interestingly, using GL_RGBA32F 32 bit colour impacts performance a fair bit, also bringing back the overhead of the discard method to approximately the same as a full screen quad. The glPolygonStipple method gives dramatic improvements in this case, more so than with 8 bit. There is a discrepancy with the previous glPolygonStipple result too, I can reproduce both and haven't narrowed down the difference yet.
output for GL_RGBA:
full rgba8 0.059ms
full discard rgba8 0.112ms
full stipple rgba8 0.050ms
draw points rgba8 0.079ms
quarter rgba8 0.004ms
one rgba8 <0.001ms
output for GL_RGBA32F:
full rgba32 0.240ms
full discard rgba32 0.241ms
full stipple rgba32 0.101ms
draw points rgba32 0.091ms
quarter rgba32 0.015ms
one rgba32 <0.001ms
Drawing points and positioning from gl_VertexID will beat glPolygonStipple for GL_RGBA32F. I'd assume this trend would carry on for more expensive shaders (or at least memory-intensive).
Are there any other methods to draw this grid?
Exactly this grid? Well in that case your grid has a periodicity of 4 and an offset of -1 in x and -2 in y direction. So the fragment shader to produce it (discarding the "black" pixels) would be
void main()
{
if( ((gl_FragPosition.x-1) % 4) == 0 && ((gl_FragPosition.y-2) % 4) == 0 )
discard;
gl_FragColor = vec4(1,1,1,1);
}
Setting the stencil op to always replace the stencil value, will set the stencil buffer to your ref value everywhere, where no pixels are discarded.
If you can't express your grid by some kind of formula, well, use a texture instead.
The scattered memory writes of a sparse grid may simply mean more overhead that can't be avoided.
Draw GL_POINTs
Use glPolygonStipple
Initialize the stencil buffer with the pattern for a masking a full screen quad
What ever you do do not use the discard method if the fragment shader is expensive[1]. This is really stupid because you clog the pipeline with many threads which don't do anything.
[1] Either takes a long time to execute or uses lots of registers or local memory

OpenGL repeated calls to glTexImage2D and alpha blending

This is more out of curiosity than for any practical purpose: is there anything in the OpenGL specification that suggests that calling glTexImage2D many times (e.g., once per frame) is illegal? I mean illegal as in 'it could produce wrong results', not just inefficient (suppose I don't care about the performance impact of not using glTexSubImage2D instead).
The reason I'm asking is that I noticed some very odd artifacts when drawing overlapping, texture-mapped primitives that use a partly-transparent texture which is loaded once per every frame using glTexImage2D (see the attached picture): after a few seconds (i.e., a few hundred frames), small rectangular black patches appear on the screen (they're actually flipping between black and normal between consecutive frames).
I'm attaching below the simplest example code I could write that exhibits the problem.
#include <stdio.h>
#ifndef __APPLE__
# include <SDL/SDL.h>
# include <SDL/SDL_opengl.h>
#else
# include <SDL.h>
# include <SDL_opengl.h>
#endif
/* some constants and variables that several functions use */
const int width = 640;
const int height = 480;
#define texSize 64
GLuint vbo;
GLuint tex;
/* forward declaration, creates a random texture; uses glTexSubImage2D if
update is non-zero (otherwise glTexImage2D) */
void createTexture(GLuint label, int update);
int init()
{
/* SDL initialization */
if (SDL_Init(SDL_INIT_VIDEO) < 0)
return 0;
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
if (!SDL_SetVideoMode(width, height, 0, SDL_OPENGL)) {
fprintf(stderr, "Couldn't initialize OpenGL");
return 0;
}
/* OpenGL initialization */
glClearColor(0, 0, 0, 0);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, height, 0, -1, 1);
glMatrixMode(GL_MODELVIEW);
/* creating the VBO and the textures */
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 1024, 0, GL_DYNAMIC_DRAW);
glGenTextures(1, &tex);
createTexture(tex, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
return 1;
}
/* draw a triangle at the specified point */
void drawTriangle(GLfloat x, GLfloat y)
{
GLfloat coords1[12] = {0, 0, 0, 0, /**/200, 0, 1, 0, /**/200, 150, 1, 1};
glLoadIdentity();
glTranslatef(x, y, 0);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(coords1), coords1);
glVertexPointer(2, GL_FLOAT, 4*sizeof(GLfloat), (void*)0);
glTexCoordPointer(2, GL_FLOAT, 4*sizeof(GLfloat),
(char*)0 + 2*sizeof(GLfloat));
glDrawArrays(GL_TRIANGLES, 0, 3);
}
void render()
{
glClear(GL_COLOR_BUFFER_BIT);
drawTriangle(250, 50);
createTexture(tex, 0);
drawTriangle(260, 120);
SDL_GL_SwapBuffers();
}
void cleanup()
{
glDeleteTextures(1, &tex);
glDeleteBuffers(1, &vbo);
SDL_Quit();
}
int main(int argc, char* argv[])
{
SDL_Event event;
if (!init()) return 1;
while (1) {
while (SDL_PollEvent(&event))
if (event.type == SDL_QUIT)
return 0;
render();
}
cleanup();
return 0;
}
void createTexture(GLuint label, int update)
{
GLubyte data[texSize*texSize*4];
GLubyte* p;
int i, j;
glBindTexture(GL_TEXTURE_2D, label);
for (i = 0; i < texSize; ++i) {
for (j = 0; j < texSize; ++j) {
p = data + (i + j*texSize)*4;
p[0] = ((i % 8) > 4?255:0);
p[1] = ((j % 8) > 4?255:0);
p[2] = ((i % 8) > 4?255:0);
p[3] = 255 - i*3;
}
}
if (!update)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, texSize, texSize, 0, GL_RGBA,
GL_UNSIGNED_BYTE, data);
else
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, texSize, texSize, GL_RGBA,
GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
Notes:
I'm using SDL, but I've seen the same happening in wxWidgets, so it's not an SDL-related problem.
If I use glTexSubImage2D instead for every frame (use update = 1 in createTexture), the artifacts disappear.
If I disable blending, there are no more artifacts.
I've been testing this on a late 2010 MacBook Air, though I doubt that's particularly relevant.
This clearly an OpenGL implementation bug (just calling glTexImage2D in a loop should not cause this to happen).