OpenGL repeated calls to glTexImage2D and alpha blending - opengl

This is more out of curiosity than for any practical purpose: is there anything in the OpenGL specification that suggests that calling glTexImage2D many times (e.g., once per frame) is illegal? I mean illegal as in 'it could produce wrong results', not just inefficient (suppose I don't care about the performance impact of not using glTexSubImage2D instead).
The reason I'm asking is that I noticed some very odd artifacts when drawing overlapping, texture-mapped primitives that use a partly-transparent texture which is loaded once per every frame using glTexImage2D (see the attached picture): after a few seconds (i.e., a few hundred frames), small rectangular black patches appear on the screen (they're actually flipping between black and normal between consecutive frames).
I'm attaching below the simplest example code I could write that exhibits the problem.
#include <stdio.h>
#ifndef __APPLE__
# include <SDL/SDL.h>
# include <SDL/SDL_opengl.h>
#else
# include <SDL.h>
# include <SDL_opengl.h>
#endif
/* some constants and variables that several functions use */
const int width = 640;
const int height = 480;
#define texSize 64
GLuint vbo;
GLuint tex;
/* forward declaration, creates a random texture; uses glTexSubImage2D if
update is non-zero (otherwise glTexImage2D) */
void createTexture(GLuint label, int update);
int init()
{
/* SDL initialization */
if (SDL_Init(SDL_INIT_VIDEO) < 0)
return 0;
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
if (!SDL_SetVideoMode(width, height, 0, SDL_OPENGL)) {
fprintf(stderr, "Couldn't initialize OpenGL");
return 0;
}
/* OpenGL initialization */
glClearColor(0, 0, 0, 0);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, height, 0, -1, 1);
glMatrixMode(GL_MODELVIEW);
/* creating the VBO and the textures */
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 1024, 0, GL_DYNAMIC_DRAW);
glGenTextures(1, &tex);
createTexture(tex, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
return 1;
}
/* draw a triangle at the specified point */
void drawTriangle(GLfloat x, GLfloat y)
{
GLfloat coords1[12] = {0, 0, 0, 0, /**/200, 0, 1, 0, /**/200, 150, 1, 1};
glLoadIdentity();
glTranslatef(x, y, 0);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(coords1), coords1);
glVertexPointer(2, GL_FLOAT, 4*sizeof(GLfloat), (void*)0);
glTexCoordPointer(2, GL_FLOAT, 4*sizeof(GLfloat),
(char*)0 + 2*sizeof(GLfloat));
glDrawArrays(GL_TRIANGLES, 0, 3);
}
void render()
{
glClear(GL_COLOR_BUFFER_BIT);
drawTriangle(250, 50);
createTexture(tex, 0);
drawTriangle(260, 120);
SDL_GL_SwapBuffers();
}
void cleanup()
{
glDeleteTextures(1, &tex);
glDeleteBuffers(1, &vbo);
SDL_Quit();
}
int main(int argc, char* argv[])
{
SDL_Event event;
if (!init()) return 1;
while (1) {
while (SDL_PollEvent(&event))
if (event.type == SDL_QUIT)
return 0;
render();
}
cleanup();
return 0;
}
void createTexture(GLuint label, int update)
{
GLubyte data[texSize*texSize*4];
GLubyte* p;
int i, j;
glBindTexture(GL_TEXTURE_2D, label);
for (i = 0; i < texSize; ++i) {
for (j = 0; j < texSize; ++j) {
p = data + (i + j*texSize)*4;
p[0] = ((i % 8) > 4?255:0);
p[1] = ((j % 8) > 4?255:0);
p[2] = ((i % 8) > 4?255:0);
p[3] = 255 - i*3;
}
}
if (!update)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, texSize, texSize, 0, GL_RGBA,
GL_UNSIGNED_BYTE, data);
else
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, texSize, texSize, GL_RGBA,
GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
Notes:
I'm using SDL, but I've seen the same happening in wxWidgets, so it's not an SDL-related problem.
If I use glTexSubImage2D instead for every frame (use update = 1 in createTexture), the artifacts disappear.
If I disable blending, there are no more artifacts.
I've been testing this on a late 2010 MacBook Air, though I doubt that's particularly relevant.

This clearly an OpenGL implementation bug (just calling glTexImage2D in a loop should not cause this to happen).

Related

Read pixels from SFML texture using PBOs

I am creating a render texture in SFML (sf::RenderTexture for off-screen rendering), drawing to it and trying to read the pixels asynchronously using PBOs. Here is a minimal example of what I'm doing:
#include <SFML/Window.hpp>
#include <SFML/Graphics.hpp>
#define GL_SILENCE_DEPRECATION
#include <SFML/OpenGL.hpp>
#include <iostream>
int main()
{
// create texture and circle to draw on texture
int texSize = 200;
sf::RenderTexture tex;
tex.create(texSize, texSize);
sf::CircleShape circle(50);
circle.setPosition(0, 0);
circle.setFillColor(sf::Color::Blue);
// initialize PBOs
int nPbos = 2;
GLuint* pbos = new GLuint[nPbos];
glGenBuffers(nPbos, pbos);
for (int i = 0; i < nPbos; ++i) {
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[i]);
glBufferData(GL_PIXEL_PACK_BUFFER, texSize * texSize * 4, NULL, GL_STREAM_READ);
}
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
int pboIdx = 0;
for (int frame = 0; frame < 100; ++frame) {
// draw stuff
tex.clear(sf::Color::White);
tex.draw(circle);
tex.display();
glReadBuffer(GL_COLOR_ATTACHMENT0);
if (frame < nPbos) {
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[pboIdx]);
glReadPixels(0, 0, texSize, texSize, GL_BGRA, GL_UNSIGNED_BYTE, 0);
}
else {
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[pboIdx]);
unsigned char* ptr = (unsigned char*)glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
if (ptr != nullptr) {
std::cout << "OK" << std::endl;
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
}
glReadPixels(0, 0, texSize, texSize, GL_BGRA, GL_UNSIGNED_BYTE, 0);
}
pboIdx = (pboIdx + 1) % nPbos;
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
}
return 0;
}
I am not getting error from glGetError(). However I am never entering the condition, ie. the array of pixels is always empty. I can't figure out what is wrong with the code and why I am not getting the pixels from the texture, am I missing a bind somewhere?
This is a example where data is being read from the color buffer.
This example uses glfw and glew.
#include <gl\glew.h>
#include <glfw3.h>
int w_readIndex = 0;
int w_writeIndex = 1;
int main()
{
// glfw: initialize and configure
// ------------------------------
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
// glfw window creation
// --------------------
GLFWwindow* window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "LearnOpenGL", NULL, NULL);
if (window == NULL)
{
std::cout << "Failed to create GLFW window" << std::endl;
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
glewInit();
glGenBuffers(2, w_pbo);
glBindBuffer(GL_PIXEL_PACK_BUFFER, w_pbo[0]);
glBufferData(GL_PIXEL_PACK_BUFFER, SCR_WIDTH * SCR_HEIGHT * 4, 0, GL_STREAM_READ);
glBindBuffer(GL_PIXEL_PACK_BUFFER, w_pbo[1]);
glBufferData(GL_PIXEL_PACK_BUFFER, SCR_WIDTH * SCR_HEIGHT * 4, 0, GL_STREAM_READ);
// unbind buffers for now
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
while (!glfwWindowShouldClose(window))
{
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Draw your objects here
w_writeIndex = (w_writeIndex + 1) % 2;
w_readIndex = (w_readIndex + 1) % 2;
glBindBuffer(GL_PIXEL_PACK_BUFFER, w_pbo[w_writeIndex]);
// copy from framebuffer to PBO asynchronously. it will be ready in the NEXT frame
glReadPixels(0, 0, SCR_WIDTH, SCR_HEIGHT, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
// now read other PBO which should be already in CPU memory
glBindBuffer(GL_PIXEL_PACK_BUFFER, w_pbo[w_readIndex]);
unsigned char* downsampleData = (unsigned char*)glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
if (downsampleData) {
std::cout << "Pointer is not NULL" << static_cast<unsigned>(downsampleData[2]) << std::endl;
}
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
glfwSwapBuffers(window);
glfwPollEvents();
}
}

c++ cuda opengl not rendering vbo

I am trying to draw a bunch of points on the screen. I'm using CUDA to generate the data (position and color), and OpenGL to draw it. I am trying to get CUDA to update a VBO and then OpenGL to draw it, but I get a blank screen. I am not sure if CUDA is not able to update the buffer, or that the buffer is not drawing properly. My GPU is a GTX 1080, and I'm trying to use OpenGL 4.0. Colors are specified by CUDA as well. If my problem is that I need a shader, how do I add that, but also still specify the color through CUDA?
UPDATE: problem seems to be openGL. Updated code to use triangle So new question to add. Why is my VBO not being rendered?
Here is the code:
GPUmain.cuh:
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/remove.h>
#include <curand.h>
#include <GL/glew.h>
#include <SDL_opengl.h>
#include <cuda_gl_interop.h>
#define BUFFER_OFFSET(i) ((char *)NULL + (i))
//ver: x, y, z, r, g, b, a
struct ver {
// x, y, z pos
GLuint x, y, z;
// r, g, b, a color
GLubyte r, g, b, a;
};
class GPU {
public:
static int nParticles;
static GLuint vboid;
static cudaGraphicsResource *CGR;
//collection of vertices to be simulated and rendered
static thrust::device_vector<ver> rverts;
static void init(int w, int h);
static void compute();
static void render();
static void GPUmain();
static void free();
};
GPUmain.cu:
#include "GPUmain.cuh"
__global__ void uploadVerts(ver *vv, ver *vb) {
int id = threadIdx.x + (blockDim.x * blockIdx.x);
vb[id] = vv[id];
vb[id].x = vv[id].x;
vb[id].y = vv[id].y;
vb[id].z = vv[id].z;
vb[id].r = vv[id].r;
vb[id].g = vv[id].g;
vb[id].b = vv[id].b;
vb[id].a = vv[id].a;
}
__global__ void genGrid(ver *v) {
int i = threadIdx.x + (blockDim.x * blockIdx.x);
float x = (float)(i % ((int)1080));
float y = (float)(i / ((int)1920));
v[i].x = x;
v[i].y = y;
v[i].z = 1;
v[i].r = 255;
v[i].g = 0;
v[i].b = 0;
v[i].a = 0;
}
int GPU::nParticles;
GLuint GPU::vboid;
cudaGraphicsResource *GPU::CGR;
//collection of vertices to be simulated and rendered
thrust::device_vector<ver> GPU::rverts;
void GPU::init(int w, int h)
{
nParticles = w * h;
/*rverts.resize(nParticles, ver{0,0,0,0,0,0,0});
genGrid<<<nParticles/1024,1024>>>(thrust::raw_pointer_cast(&rverts[0]));*/
ver e[3] = {
ver{1024,200,2,255,0,0,255},
ver{499,288,173,0,255,0,255},
ver{462,1674,8,0,0,255,255}
};
glGenBuffers(1,&vboid);
glBindBuffer(GL_ARRAY_BUFFER,vboid);
glBufferData(GL_ARRAY_BUFFER,3*sizeof(ver),e,GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
/*cudaGraphicsGLRegisterBuffer(&CGR,vboid,cudaGraphicsMapFlagsWriteDiscard);*/
}
void GPU::compute()
{
}
void GPU::render()
{
/*ver *verts;
size_t size;
cudaGraphicsMapResources(1, &CGR, 0);
cudaGraphicsResourceGetMappedPointer((void**)&verts, &size, CGR);
uploadVerts<<<nParticles/1024, 1024>>>(thrust::raw_pointer_cast(&rverts[0]), verts);
cudaGraphicsUnmapResources(1, &CGR, 0);
cudaDeviceSynchronize();*/
glClearColor(0, 0, 0, 0); // we clear the screen with black (else, frames would overlay...)
glClear(GL_COLOR_BUFFER_BIT); // clear the buffer
glBindBuffer(GL_ARRAY_BUFFER, vboid);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(3, GL_INT, 4 * sizeof(GLubyte), 0);
glColorPointer(4, GL_BYTE, 3 * sizeof(GLuint), BUFFER_OFFSET(3 * sizeof(GLuint)));
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void GPU::GPUmain()
{
compute();
render();
}
void GPU::free()
{
cudaGraphicsUnregisterResource(CGR);
glBindBuffer(GL_ARRAY_BUFFER, vboid);
glDeleteBuffers(1, &vboid);
glBindBuffer(GL_ARRAY_BUFFER, 0);
rverts.clear();
thrust::device_vector<ver>().swap(rverts);
}
The relevant (that contain OpenGL code) parts of window.cpp:
bool Window::init()
{
//initialize SDL
if (SDL_Init(SDL_INIT_EVERYTHING) != 0) {
log << "Failed to initialize SDL!\n";
return false;
}
//set window atributes
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
//creat window
window = SDL_CreateWindow(
name.c_str(),
SDL_WINDOWPOS_CENTERED,
SDL_WINDOWPOS_CENTERED,
width,
height,
SDL_WINDOW_OPENGL
);
//create opengl context in the window
glcontext = SDL_GL_CreateContext(window);
SDL_GL_SetSwapInterval(1);
//check if the window was created
if (window == nullptr) {
log << "Failed to create window!\n";
return false;
}
//turn on experimental features
glewExperimental = GL_TRUE;
//initiallize glew
if (glewInit() != GLEW_OK) {
log << "Failed to Init GLEW";
return false;
}
//set drawing parameters
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, 0, height, 0, 255);
glPointSize(1);
glEnable(GL_BLEND); // Allow Transparency
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // how transparency acts
std::cout << sizeof(ver);
GPU::init(width, height);
return true;
}
void Window::renderFrame()
{
GPU::render();
SDL_GL_SwapWindow(window); //swap buffers
}
If you use the fixed-function attributes and client side capabilities, then you've to use a compatibility profile context.
See Fixed Function Pipeline and Legacy OpenGL.
If you want to use a core profile, then you've to use Vertex Array Object and Shader:
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_COMPATIBILITY);
The following geometry
ver e[3] = {
// x y z r g b a
ver{1024, 200, 2, 255, 0, 0, 255},
ver{ 499, 288, 173, 0, 255, 0, 255},
ver{462, 1674, 8, 0, 0, 255, 255}
};
is clipped by the near plane of the orthographic projection. Note, in view space the z-axis points out of the viewport.
Change the orthographic projection (or invert the z coordinates of the geometry):
glOrtho(0, width, 0, height, 0, 255);
glOrtho(0, width, 0, height, -255, 0);
The stride parameter of glVertexPointer respectively glColorPointer is the offset between consecutive attributes. So it has to be sizeof(ver).
The type of the color attributes is GL_UNSIGNED_BYTE rather than GL_BYTE:
glVertexPointer(3, GL_INT, 4 * sizeof(GLubyte), 0);
glColorPointer(4, GL_BYTE, 3 * sizeof(GLuint), BUFFER_OFFSET(3 * sizeof(GLuint)));
glVertexPointer(3, GL_INT, sizeof(ver), 0);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(ver), BUFFER_OFFSET(3 * sizeof(GLuint)));

Wrong texture is used when running this in Windows (7 & 10 tested) without graphics drivers installed/in safe mode (OpenGL 1.1 used) is it a bug?

I'm writing something that I'm wanting to work on as much hardware as possible, but have found an issue where I don't believe that the fault is with my code. can anyone confirm?
UPDATE - also happens on windows 7, and when booting in safe mode to force no drivers, both are:
OpenGL Version: - 1.1.0,
Vendor: - Microsoft Corporation,
Renderer: - GDI Generic
when running on windows 10 with an older graphics card, microsoft opengl 1.1 driver is used. yeah, i know that shaders are the way to go, and that older cards aren't actually supported on windows 10, but since when did that stop anyone... etc. but like i said, I'm wanting maximum compatibility. Code that demonstrates the issue is included:-
//link library
//opengl32
//linker options
//-lmingw32 -lSDL2main -lSDL2
//include SDL2 dirs-
//compiler-
//SDL2-2.0.4\i686-w64-mingw32\include\SDL2
//linker-
//SDL2-2.0.4\i686-w64-mingw32\lib
#include <SDL.h>
#include <SDL_opengl.h>
void draw();
bool fix = false;
bool wire = false;
GLuint tex1 = 0;
GLuint tex2 = 0;
int main(int argc, char *argv[])
{
SDL_Init(SDL_INIT_VIDEO|SDL_INIT_TIMER);
SDL_Window* mywindow = SDL_CreateWindow("press q or w to toggle fixes...",100,100,640,480,SDL_WINDOW_OPENGL|SDL_WINDOW_SHOWN);
SDL_GL_CreateContext(mywindow);
glEnable(GL_TEXTURE_2D);
bool loop = true;
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
while (loop)
{
SDL_Event event;
while (SDL_PollEvent(&event))
{
if (event.type == SDL_QUIT)
loop = false;
if (event.type == SDL_KEYDOWN)
{
switch (event.key.keysym.sym)
{
case SDLK_ESCAPE:
loop = false;
break;
case SDLK_q:
fix = !fix;
break;
case SDLK_w:
if(wire)
{
glPolygonMode(GL_FRONT, GL_FILL);
}
else
{
glPolygonMode(GL_FRONT, GL_LINE);
}
wire = !wire;
break;
default:
break;
}
}
}
draw();
SDL_GL_SwapWindow(mywindow);
}
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDeleteTextures(1, &tex1);
glDeleteTextures(1, &tex2);
return 0;
}
void texturator(GLuint* tex, unsigned char r, unsigned char g, unsigned char b)
{
if(!(*tex))
{
glGenTextures(1, tex) ;
glBindTexture(GL_TEXTURE_2D, *tex);
unsigned char text[4*4*4];//4*w*h
for(unsigned int x = 0;x<4;x++)
{
for(unsigned int y = 0;y<4;y++)
{
unsigned char* pix = &(text[((y*4)+x)*4]);
pix[0] = x*r;
pix[1] = y*g;
pix[2] = x*b;
pix[3] = 255;
}
}
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 4, 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, text);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
}
else
{
if(fix)
{
glBindTexture(GL_TEXTURE_2D, 0);
}
glBindTexture(GL_TEXTURE_2D, *tex);
}
}
void draw()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
GLfloat mat1[] = {1.0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 1};
glLoadMatrixf(mat1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
GLfloat verts[] = {0.0f,0.0f,0.0f, 1.0f,0.0f,0.0f, 1.0f,1.0f,0.0f, 0.0f,1.0f,0.0f};
GLfloat uvs[] = {0.0f,0.0f, 1.0f,0.0f, 1.0f,1.0f, 0.0f,1.0f};
glVertexPointer(3, GL_FLOAT, 0, verts);
glTexCoordPointer(2, GL_FLOAT, 0, uvs);
texturator(&tex1,0,60,60);
GLuint elem[] = {0,1,2, 2,3,0};
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, elem);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
GLfloat mat2[] = {1, 0, 0, 0, -0, 1, 0, 0, 0, 0, 1, 0, -1, 0, 0, 1};
glLoadMatrixf(mat2);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
texturator(&tex2,60,10,0);
glTexCoordPointer(2, GL_FLOAT, 0, uvs);
glVertexPointer(3, GL_FLOAT, 0, verts);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
}
from the second frame on, tex2 will continue to be displayed for both draws, even though gdebugger says that tex1 is bound. binding 0 before the new bind fixes this. switching to wireframe instead of fill mode also, curiously, fixes it, whilst it is still drawn as wireframe. (use q and w keys to toggle these).
can anyone shed any light on this? is it just microsoft's implementation being..............
and is there a better way to include compatibility with older stuff?
cheers!!

Using CUDA, SFML, and OpenGL: Texture Refuses to Appear on Quad

Using various tutorials/examples/documentations/forums online, I have typed out code to allow CUDA to manipulate OpenGL textures such that it can be outputted to the screen. My method of displaying is to use PBO and an allocated texture image of uchar4 array. Despite all my attempts at fixing the problem, the texture would not show up on the 2D surface. I cannot seem to pinpoint the problem.
These are all the things I have checked/done thus far: I have created a PBO and registered it with CUDA, called cudaGraphicsResourceGetMappedPointer and the unmapping equivalent before and after the GPU function calls, made sure that glEnable is called for 2D_TEXTURE, glDisable called for any unnecessary values, unbinded textures/buffers when not in need. I have also reset SFML OpenGL states in case SFML was the cause. Square textures have also been employed. My OpenGL verision and CUDA version work for all function calls I use.
There did not seem to be any errors within the program when I checked cudaErrors and OpenGL Errors.
I am not sure if this has something to do with it but when I call:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
My quad does not seem to display.
I have mainly found inspiration from this website.
Thank you very much!
Here is my code:
Main.cpp
#include <GL/glew.h>
#include <windows.h>
#include <GL/GL.h>
#include <SFML/Window.hpp>
#include <SFML/OpenGL.hpp>
#include <SFML/System.hpp>
#include <SFML/Graphics/RenderWindow.hpp>
#include "GeneralTypedef.h"
#include "OpenGLTest.cuh"
int main()
{
// create the window
sf::RenderWindow window(sf::VideoMode(1024, 1024), "OpenGL");
//window.setVerticalSyncEnabled(true);
sf::Vector2u windowSize;
windowSize = sf::Vector2u(window.getSize());
bool running = true;
glewInit();
window.resetGLStates();
std::printf("OpenGL: %s:", glGetString(GL_VERSION));
// We will not be using SFML's gl states.
OpenGLTest* test = new OpenGLTest(window.getSize());
sf::Time time;
while (running)
{
// handle events
sf::Event event;
while (window.pollEvent(event))
{
if (event.type == sf::Event::Closed)
{
// end the program
running = false;
}
else if (event.type == sf::Event::Resized)
{
// adjust the viewport when the window is resized
glViewport(0, 0, event.size.width, event.size.height);
windowSize = window.getSize();
}
}
// clear the buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
test->createFrame(time.asMicroseconds());
test->drawFrame();
window.display();
}
// release resources...
delete test;
return 0;
}
OpenGLTest.cuh
#ifndef OPENGLTEST_CUH
#define OPENGLTEST_CUH
#include <GL/glew.h>
#include <windows.h>
#include <GL/GL.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <SFML/OpenGL.hpp>
#include <SFML/Graphics.hpp>
#include <SFML/System.hpp>
#include "GeneralTypedef.h"
class OpenGLTest
{
public:
uchar4* image;
GLuint gltexture;
GLuint pbo;
cudaGraphicsResource_t cudaPBO;
uchar4* d_textureBufferData;
sf::Vector2u windowSize;
OpenGLTest(sf::Vector2u windowSize)
{
this->windowSize = sf::Vector2u(windowSize);
this->setupOpenGL();
};
~OpenGLTest()
{
delete image;
image == nullptr;
cudaFree(d_textureBufferData);
d_textureBufferData == nullptr;
glDeleteTextures(1, &gltexture);
}
void drawFrame();
void createFrame(float time);
private:
void setupOpenGL();
};
#endif //OPENGLTEST_CUH
OpenGLTest.cu
#include "OpenGLTest.cuh"
__global__ void createGPUTexture(uchar4* d_texture)
{
uint pixelID = blockIdx.x*blockDim.x + threadIdx.x;
d_texture[pixelID].x = 0;
d_texture[pixelID].y = 1;
d_texture[pixelID].z = 1;
d_texture[pixelID].w = 0;
}
__global__ void wow(uchar4* pos, unsigned int width, unsigned int height,
float time)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int x = index%width;
unsigned int y = index / width;
if (index < width*height) {
unsigned char r = (x + (int)time) & 0xff;
unsigned char g = (y + (int)time) & 0xff;
unsigned char b = ((x + y) + (int)time) & 0xff;
// Each thread writes one pixel location in the texture (textel)
pos[index].w = 0;
pos[index].x = r;
pos[index].y = g;
pos[index].z = b;
}
}
void OpenGLTest::drawFrame()
{
glColor3f(1.0f,1.0f,1.0f);
glBindTexture(GL_TEXTURE_2D, gltexture);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, windowSize.x, windowSize.y, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex2f(0.0f, float(windowSize.y));
glTexCoord2f(1.0f, 0.0f);
glVertex2f(float(windowSize.x), float(windowSize.y));
glTexCoord2f(1.0f, 1.0f);
glVertex2f(float(windowSize.x), 0.0f);
glTexCoord2f(0.0f,1.0f);
glVertex2f(0.0f, 0.0f);
glEnd();
glFlush();
// Release
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glBindTexture(GL_TEXTURE_2D, 0);
// Test Triangle
/*
glBegin(GL_TRIANGLES);
glColor3f(0.1, 0.2, 0.3);
glVertex2f(0, 0);
glVertex2f(10, 0);
glVertex2f(0, 100);
glEnd();
*/
}
void OpenGLTest::createFrame(float time)
{
cudaGraphicsMapResources(1, &cudaPBO, 0);
size_t numBytes;
cudaGraphicsResourceGetMappedPointer((void**)&d_textureBufferData, &numBytes, cudaPBO);
int totalThreads = windowSize.x * windowSize.y;
int nBlocks = totalThreads/ 256;
// Run code here.
createGPUTexture << <nBlocks, 256>> >(d_textureBufferData);
//wow << <nBlocks, 256 >> >(d_textureBufferData, windowSize.x, windowSize.y, time);
// Unmap mapping to PBO so that OpenGL can access.
cudaGraphicsUnmapResources(1, &cudaPBO, 0);
}
void OpenGLTest::setupOpenGL()
{
image = new uchar4[1024*1024];
glViewport(0, 0, windowSize.x, windowSize.y);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, windowSize.x, windowSize.y, 0.0, -1.0, 1.0);
glEnable(GL_TEXTURE_2D);
glDisable(GL_LIGHTING);
glDisable(GL_DEPTH_TEST);
// Unbind any textures from previous.
glBindTexture(GL_TEXTURE_2D, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
// Create new textures.
glGenTextures(1, &gltexture);
glBindTexture(GL_TEXTURE_2D, gltexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// Create image with same resolution as window.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, windowSize.x , windowSize.y, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
// Create pixel buffer boject.
glGenBuffers(1, &pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, windowSize.x * windowSize.y * sizeof(uchar4), image, GL_STREAM_COPY);
cudaGraphicsGLRegisterBuffer(&cudaPBO, pbo, cudaGraphicsMapFlagsNone);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
GeneralType
#ifndef GENERALTYPEDEF_CUH
#define GENERALTYPEDEF_CUH
typedef unsigned int uint;
#endif // GENERALTYPEDEF_CUH
After rewriting the entire code and understanding it more, I have figured out the reason. The color components for the uchar4 in the kernel function is mapped from 0-255. The w component is transparency. As such, it should be mapped to 255 for the image to show. I hope this helps for those who may have the same problem. Some sites have this value set very low as well.

texCoordPointer not working in lwjgl

I've been having problems storing texture coordinate points in a VBO, and then telling OpenGL to use it when it's time to render. In the code below, what I should be getting is a nice 16x16 texture on a square I am making using quads. However what I do get is the first top left pixel of the image instead which is red, so I get a big red square. Please tell me what I am doing wrong in great detail.
public void start() {
try {
Display.setDisplayMode(new DisplayMode(800,600));
Display.create();
} catch (LWJGLException e) {
e.printStackTrace();
System.exit(0);
}
// init OpenGL
GL11.glMatrixMode(GL11.GL_PROJECTION);
GL11.glLoadIdentity();
GL11.glOrtho(0, 800, 0, 600, 1, -1);
GL11.glMatrixMode(GL11.GL_MODELVIEW);
glEnable(GL_DEPTH_TEST);
glEnable(GL_TEXTURE_2D);
glLoadIdentity();
//loadTextures();
TextureManager.init();
makeCube();
// init OpenGL here
while (!Display.isCloseRequested()) {
GL11.glClear(GL11.GL_COLOR_BUFFER_BIT | GL11.GL_DEPTH_BUFFER_BIT);
// render OpenGL here
renderCube();
Display.update();
}
Display.destroy();
}
public static void main(String[] argv) {
Screen screen = new Screen();
screen.start();
}
int cube;
int texture;
private void makeCube() {
FloatBuffer cubeBuffer;
FloatBuffer textureBuffer;
//Tried using 0,0,16,0,16,16,0,16 for textureData did not work.
float[] textureData = new float[]{
0,0,
1,0,
1,1,
0,1};
textureBuffer = BufferUtils.createFloatBuffer(textureData.length);
textureBuffer.put(texture);
textureBuffer.flip();
texture = glGenBuffers();
glBindBuffer(GL_ARRAY_BUFFER, texture);
glBufferData(GL_ARRAY_BUFFER, textureBuffer, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
float[] cubeData = new float[]{
/*Front Face*/
100, 100,
100 + 200, 100,
100 + 200, 100 + 200,
100, 100 + 200};
cubeBuffer = BufferUtils.createFloatBuffer(cubeData.length);
cubeBuffer.put(cubeData);
cubeBuffer.flip();
cube = glGenBuffers();
glBindBuffer(GL_ARRAY_BUFFER, cube);
glBufferData(GL_ARRAY_BUFFER, cubeBuffer, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
private void renderCube(){
TextureManager.texture.bind();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
GL11.glClear(GL11.GL_COLOR_BUFFER_BIT | GL11.GL_DEPTH_BUFFER_BIT);
glBindBuffer(GL_ARRAY_BUFFER, texture);
glTexCoordPointer(2, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, cube);
glVertexPointer(2, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glDrawArrays(GL_QUADS, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
}
I believe your problem is in the argument to textureBuffer.put() in this code fragment:
textureBuffer = BufferUtils.createFloatBuffer(textureData.length);
textureBuffer.put(texture);
textureBuffer.flip();
texture is a variable of type int, which has not even been initialized yet. You later use it as a buffer name. The argument should be textureData instead:
textureBuffer.put(textureData);
I normally try to focus on functionality over style when answering questions here, but I can't help it this time: IMHO, texture is a very unfortunate name for a buffer name. It's not only a style and readability question. If you used descriptive names for the variables, you most likely would have spotted this problem immediately.
Say you named the variable for the buffer name bufferId (I call object identifiers "id", even though the official OpenGL terminology is "name"), and the buffer holding the texture coordinates textureCoordBuf. The statement in question would then become:
textureCoordBuf.put(bufferId);
which would jump out as highly suspicious from even a very superficial look at the code.