I wrote a simple parser for the ASCII STL format. When I try to render the triangles with the supplied normals, the resulting object is missing many faces:
This is how it should look like:
What I already tried:
explicitly disabled backface culling (though it shouldn't have been active before)
ensured that the depth buffer is enabled
Here is a minimal sample program which reproduces the error:
#include <SDL2/SDL.h>
#include <SDL2/SDL_main.h>
#include <SDL2/SDL_render.h>
#include <SDL2/SDL_opengl.h>
int main(int argc, char **argv) {
SDL_Init(SDL_INIT_VIDEO);
int screen_w=1280,screen_h=720;
SDL_Window * win = SDL_CreateWindow("test", 20, 20, screen_w, screen_h,
SDL_WINDOW_OPENGL);
SDL_GLContext glcontext = SDL_GL_CreateContext(win);
STLParser stlparser;
std::ifstream file(".\\logo.stl");
stlparser.parseAscii(file);
const auto& ndata = stlparser.getNData();
const auto& vdata = stlparser.getVData();
std::cout << "number of facets: " << ndata.size() << std::endl;
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
glMatrixMode(GL_PROJECTION | GL_MODELVIEW);
glLoadIdentity();
glScalef(1.f, -1.f, 1.f);
glOrtho(0, screen_w, 0, screen_h, -screen_w, screen_w);
glClearDepth(1.0f);
glDepthFunc(GL_LEQUAL);
glEnable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glNormalPointer(GL_FLOAT, 0, ndata.data());
glVertexPointer(3, GL_FLOAT, 0, vdata.data());
SDL_Event event;
bool quit = false;
while (!quit) {
while (SDL_PollEvent(&event))
switch(event.type) {
case SDL_QUIT: quit = true; break;
}
;
// Drawing
glClearColor(255,255,255,255);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glTranslatef(screen_w/2,0,0);
glRotatef(0.5,0,1,0);
glTranslatef(-screen_w/2,0,0);
glPushMatrix();
glTranslatef(screen_w/2,screen_h/2,0);
glColor3f(0.5,0.5,0);
glDrawArrays(GL_TRIANGLES, 0, vdata.size());
glPopMatrix();
SDL_GL_SwapWindow(win);
SDL_Delay(10);
}
SDL_DestroyWindow(win);
SDL_Quit();
return 0;
}
The STLParser methods getNData() and getVData() have the following signatures:
const std::vector<std::array<float,3>>& getNData() const;
const std::vector<std::array<std::array<float,3>,3>>& getVData() const;
STLParser output should be correct, but I can provide the sources as well if needed.
What am I doing wrong?
You should change
glDrawArrays(GL_TRIANGLES, 0, vdata.size());
to
glDrawArrays(GL_TRIANGLES, 0, 3 * vdata.size());
I.e. count should be vertex count, but not triangle count.
Related
I am trying to draw a bunch of points on the screen. I'm using CUDA to generate the data (position and color), and OpenGL to draw it. I am trying to get CUDA to update a VBO and then OpenGL to draw it, but I get a blank screen. I am not sure if CUDA is not able to update the buffer, or that the buffer is not drawing properly. My GPU is a GTX 1080, and I'm trying to use OpenGL 4.0. Colors are specified by CUDA as well. If my problem is that I need a shader, how do I add that, but also still specify the color through CUDA?
UPDATE: problem seems to be openGL. Updated code to use triangle So new question to add. Why is my VBO not being rendered?
Here is the code:
GPUmain.cuh:
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/remove.h>
#include <curand.h>
#include <GL/glew.h>
#include <SDL_opengl.h>
#include <cuda_gl_interop.h>
#define BUFFER_OFFSET(i) ((char *)NULL + (i))
//ver: x, y, z, r, g, b, a
struct ver {
// x, y, z pos
GLuint x, y, z;
// r, g, b, a color
GLubyte r, g, b, a;
};
class GPU {
public:
static int nParticles;
static GLuint vboid;
static cudaGraphicsResource *CGR;
//collection of vertices to be simulated and rendered
static thrust::device_vector<ver> rverts;
static void init(int w, int h);
static void compute();
static void render();
static void GPUmain();
static void free();
};
GPUmain.cu:
#include "GPUmain.cuh"
__global__ void uploadVerts(ver *vv, ver *vb) {
int id = threadIdx.x + (blockDim.x * blockIdx.x);
vb[id] = vv[id];
vb[id].x = vv[id].x;
vb[id].y = vv[id].y;
vb[id].z = vv[id].z;
vb[id].r = vv[id].r;
vb[id].g = vv[id].g;
vb[id].b = vv[id].b;
vb[id].a = vv[id].a;
}
__global__ void genGrid(ver *v) {
int i = threadIdx.x + (blockDim.x * blockIdx.x);
float x = (float)(i % ((int)1080));
float y = (float)(i / ((int)1920));
v[i].x = x;
v[i].y = y;
v[i].z = 1;
v[i].r = 255;
v[i].g = 0;
v[i].b = 0;
v[i].a = 0;
}
int GPU::nParticles;
GLuint GPU::vboid;
cudaGraphicsResource *GPU::CGR;
//collection of vertices to be simulated and rendered
thrust::device_vector<ver> GPU::rverts;
void GPU::init(int w, int h)
{
nParticles = w * h;
/*rverts.resize(nParticles, ver{0,0,0,0,0,0,0});
genGrid<<<nParticles/1024,1024>>>(thrust::raw_pointer_cast(&rverts[0]));*/
ver e[3] = {
ver{1024,200,2,255,0,0,255},
ver{499,288,173,0,255,0,255},
ver{462,1674,8,0,0,255,255}
};
glGenBuffers(1,&vboid);
glBindBuffer(GL_ARRAY_BUFFER,vboid);
glBufferData(GL_ARRAY_BUFFER,3*sizeof(ver),e,GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
/*cudaGraphicsGLRegisterBuffer(&CGR,vboid,cudaGraphicsMapFlagsWriteDiscard);*/
}
void GPU::compute()
{
}
void GPU::render()
{
/*ver *verts;
size_t size;
cudaGraphicsMapResources(1, &CGR, 0);
cudaGraphicsResourceGetMappedPointer((void**)&verts, &size, CGR);
uploadVerts<<<nParticles/1024, 1024>>>(thrust::raw_pointer_cast(&rverts[0]), verts);
cudaGraphicsUnmapResources(1, &CGR, 0);
cudaDeviceSynchronize();*/
glClearColor(0, 0, 0, 0); // we clear the screen with black (else, frames would overlay...)
glClear(GL_COLOR_BUFFER_BIT); // clear the buffer
glBindBuffer(GL_ARRAY_BUFFER, vboid);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(3, GL_INT, 4 * sizeof(GLubyte), 0);
glColorPointer(4, GL_BYTE, 3 * sizeof(GLuint), BUFFER_OFFSET(3 * sizeof(GLuint)));
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void GPU::GPUmain()
{
compute();
render();
}
void GPU::free()
{
cudaGraphicsUnregisterResource(CGR);
glBindBuffer(GL_ARRAY_BUFFER, vboid);
glDeleteBuffers(1, &vboid);
glBindBuffer(GL_ARRAY_BUFFER, 0);
rverts.clear();
thrust::device_vector<ver>().swap(rverts);
}
The relevant (that contain OpenGL code) parts of window.cpp:
bool Window::init()
{
//initialize SDL
if (SDL_Init(SDL_INIT_EVERYTHING) != 0) {
log << "Failed to initialize SDL!\n";
return false;
}
//set window atributes
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
//creat window
window = SDL_CreateWindow(
name.c_str(),
SDL_WINDOWPOS_CENTERED,
SDL_WINDOWPOS_CENTERED,
width,
height,
SDL_WINDOW_OPENGL
);
//create opengl context in the window
glcontext = SDL_GL_CreateContext(window);
SDL_GL_SetSwapInterval(1);
//check if the window was created
if (window == nullptr) {
log << "Failed to create window!\n";
return false;
}
//turn on experimental features
glewExperimental = GL_TRUE;
//initiallize glew
if (glewInit() != GLEW_OK) {
log << "Failed to Init GLEW";
return false;
}
//set drawing parameters
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, 0, height, 0, 255);
glPointSize(1);
glEnable(GL_BLEND); // Allow Transparency
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // how transparency acts
std::cout << sizeof(ver);
GPU::init(width, height);
return true;
}
void Window::renderFrame()
{
GPU::render();
SDL_GL_SwapWindow(window); //swap buffers
}
If you use the fixed-function attributes and client side capabilities, then you've to use a compatibility profile context.
See Fixed Function Pipeline and Legacy OpenGL.
If you want to use a core profile, then you've to use Vertex Array Object and Shader:
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_COMPATIBILITY);
The following geometry
ver e[3] = {
// x y z r g b a
ver{1024, 200, 2, 255, 0, 0, 255},
ver{ 499, 288, 173, 0, 255, 0, 255},
ver{462, 1674, 8, 0, 0, 255, 255}
};
is clipped by the near plane of the orthographic projection. Note, in view space the z-axis points out of the viewport.
Change the orthographic projection (or invert the z coordinates of the geometry):
glOrtho(0, width, 0, height, 0, 255);
glOrtho(0, width, 0, height, -255, 0);
The stride parameter of glVertexPointer respectively glColorPointer is the offset between consecutive attributes. So it has to be sizeof(ver).
The type of the color attributes is GL_UNSIGNED_BYTE rather than GL_BYTE:
glVertexPointer(3, GL_INT, 4 * sizeof(GLubyte), 0);
glColorPointer(4, GL_BYTE, 3 * sizeof(GLuint), BUFFER_OFFSET(3 * sizeof(GLuint)));
glVertexPointer(3, GL_INT, sizeof(ver), 0);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(ver), BUFFER_OFFSET(3 * sizeof(GLuint)));
I'm writing something that I'm wanting to work on as much hardware as possible, but have found an issue where I don't believe that the fault is with my code. can anyone confirm?
UPDATE - also happens on windows 7, and when booting in safe mode to force no drivers, both are:
OpenGL Version: - 1.1.0,
Vendor: - Microsoft Corporation,
Renderer: - GDI Generic
when running on windows 10 with an older graphics card, microsoft opengl 1.1 driver is used. yeah, i know that shaders are the way to go, and that older cards aren't actually supported on windows 10, but since when did that stop anyone... etc. but like i said, I'm wanting maximum compatibility. Code that demonstrates the issue is included:-
//link library
//opengl32
//linker options
//-lmingw32 -lSDL2main -lSDL2
//include SDL2 dirs-
//compiler-
//SDL2-2.0.4\i686-w64-mingw32\include\SDL2
//linker-
//SDL2-2.0.4\i686-w64-mingw32\lib
#include <SDL.h>
#include <SDL_opengl.h>
void draw();
bool fix = false;
bool wire = false;
GLuint tex1 = 0;
GLuint tex2 = 0;
int main(int argc, char *argv[])
{
SDL_Init(SDL_INIT_VIDEO|SDL_INIT_TIMER);
SDL_Window* mywindow = SDL_CreateWindow("press q or w to toggle fixes...",100,100,640,480,SDL_WINDOW_OPENGL|SDL_WINDOW_SHOWN);
SDL_GL_CreateContext(mywindow);
glEnable(GL_TEXTURE_2D);
bool loop = true;
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
while (loop)
{
SDL_Event event;
while (SDL_PollEvent(&event))
{
if (event.type == SDL_QUIT)
loop = false;
if (event.type == SDL_KEYDOWN)
{
switch (event.key.keysym.sym)
{
case SDLK_ESCAPE:
loop = false;
break;
case SDLK_q:
fix = !fix;
break;
case SDLK_w:
if(wire)
{
glPolygonMode(GL_FRONT, GL_FILL);
}
else
{
glPolygonMode(GL_FRONT, GL_LINE);
}
wire = !wire;
break;
default:
break;
}
}
}
draw();
SDL_GL_SwapWindow(mywindow);
}
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDeleteTextures(1, &tex1);
glDeleteTextures(1, &tex2);
return 0;
}
void texturator(GLuint* tex, unsigned char r, unsigned char g, unsigned char b)
{
if(!(*tex))
{
glGenTextures(1, tex) ;
glBindTexture(GL_TEXTURE_2D, *tex);
unsigned char text[4*4*4];//4*w*h
for(unsigned int x = 0;x<4;x++)
{
for(unsigned int y = 0;y<4;y++)
{
unsigned char* pix = &(text[((y*4)+x)*4]);
pix[0] = x*r;
pix[1] = y*g;
pix[2] = x*b;
pix[3] = 255;
}
}
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 4, 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, text);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
}
else
{
if(fix)
{
glBindTexture(GL_TEXTURE_2D, 0);
}
glBindTexture(GL_TEXTURE_2D, *tex);
}
}
void draw()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
GLfloat mat1[] = {1.0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 1};
glLoadMatrixf(mat1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
GLfloat verts[] = {0.0f,0.0f,0.0f, 1.0f,0.0f,0.0f, 1.0f,1.0f,0.0f, 0.0f,1.0f,0.0f};
GLfloat uvs[] = {0.0f,0.0f, 1.0f,0.0f, 1.0f,1.0f, 0.0f,1.0f};
glVertexPointer(3, GL_FLOAT, 0, verts);
glTexCoordPointer(2, GL_FLOAT, 0, uvs);
texturator(&tex1,0,60,60);
GLuint elem[] = {0,1,2, 2,3,0};
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, elem);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
GLfloat mat2[] = {1, 0, 0, 0, -0, 1, 0, 0, 0, 0, 1, 0, -1, 0, 0, 1};
glLoadMatrixf(mat2);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
texturator(&tex2,60,10,0);
glTexCoordPointer(2, GL_FLOAT, 0, uvs);
glVertexPointer(3, GL_FLOAT, 0, verts);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
}
from the second frame on, tex2 will continue to be displayed for both draws, even though gdebugger says that tex1 is bound. binding 0 before the new bind fixes this. switching to wireframe instead of fill mode also, curiously, fixes it, whilst it is still drawn as wireframe. (use q and w keys to toggle these).
can anyone shed any light on this? is it just microsoft's implementation being..............
and is there a better way to include compatibility with older stuff?
cheers!!
Within my project, I've been having trouble getting an triangle to display within OnRender(), but for some reason, nothing other than the background color (green) is visible.
int main(int argc, char **argv)
{
if (!OnInit())
return -1;
SDL_Event Event;
while (_isRunning)
{
while (SDL_PollEvent(&Event))
OnEvent(&Event);
OnRender();
OnLoop();
SDL_GL_SwapWindow(_screen);
}
OnCleanup();
return 0;
}
void generalSetup()
{
// Initialize SDL2
if (SDL_Init(SDL_INIT_VIDEO) < 0)
sdldie("Failed to initial SDL2.");
else
{
/* Request OpenGL 3.2 */
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 2);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
// Create window
_screen = SDL_CreateWindow("Window", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED,
800, 600, SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN);
// Create Context
_mainContext = SDL_GL_CreateContext(_screen);
// Create Surface
_surface = SDL_GetWindowSurface(_screen);
SDL_FillRect(_surface, NULL, SDL_MapRGB(_surface->format, 0xCC, 0x20, 0x20));
SDL_UpdateWindowSurface(_screen);
/* swap synchronized */
SDL_GL_SetSwapInterval(1);
// Initialize GLew 1.10
glewExperimental = GL_TRUE;
GLenum error = glewInit();
if (error != GLEW_OK)
printf("Warning: Unable to set VSync! SDL Error: %s\n", SDL_GetError());
else
std::cout << "GLew Initialized" << std::endl;
glClearColor(0, 1, 0, 0);
glViewport(0, 0, 800, 600);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45, 800 / 600, 1, 1000);
gluLookAt(0, 0, 20, 0, 0, 0, 0, 1, 0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
}
bool OnInit()
{
generalSetup();
return true;
}
void OnEvent(SDL_Event* Event)
{
if (Event->type == SDL_QUIT)
_isRunning = false;
}
void OnLoop()
{
}
void OnRender()
{
glClearColor(1.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glPushMatrix();
glTranslatef(0.f, 0.f, -10.f);
glBegin(GL_TRIANGLES);
glColor3f(0.1, 0.2, 0.3);
glVertex3f(0, 0, 0);
glVertex3f(1, 0, 0);
glVertex3f(0, 1, 0);
glEnd();
glPopMatrix();
}
void OnCleanup()
{
SDL_GL_DeleteContext(_mainContext);
SDL_DestroyWindow(_screen);
SDL_Quit();
}
You requested a Core context. None of your immediate-mode (matrix stack, glBegin(), etc.) code will work.
Drop back to a compatibility context (SDL_GL_CONTEXT_PROFILE_COMPATIBILITY) or supply all the necessary shaders, vertex buffers, etc. that Core requires.
You are trying to render through the immediate mode, but it is not supported by the OpenGL 3.2. Try using the version 2.0 or 2.1, which support both shaders (if you are intending to use them) and the immediate mode.
This is more out of curiosity than for any practical purpose: is there anything in the OpenGL specification that suggests that calling glTexImage2D many times (e.g., once per frame) is illegal? I mean illegal as in 'it could produce wrong results', not just inefficient (suppose I don't care about the performance impact of not using glTexSubImage2D instead).
The reason I'm asking is that I noticed some very odd artifacts when drawing overlapping, texture-mapped primitives that use a partly-transparent texture which is loaded once per every frame using glTexImage2D (see the attached picture): after a few seconds (i.e., a few hundred frames), small rectangular black patches appear on the screen (they're actually flipping between black and normal between consecutive frames).
I'm attaching below the simplest example code I could write that exhibits the problem.
#include <stdio.h>
#ifndef __APPLE__
# include <SDL/SDL.h>
# include <SDL/SDL_opengl.h>
#else
# include <SDL.h>
# include <SDL_opengl.h>
#endif
/* some constants and variables that several functions use */
const int width = 640;
const int height = 480;
#define texSize 64
GLuint vbo;
GLuint tex;
/* forward declaration, creates a random texture; uses glTexSubImage2D if
update is non-zero (otherwise glTexImage2D) */
void createTexture(GLuint label, int update);
int init()
{
/* SDL initialization */
if (SDL_Init(SDL_INIT_VIDEO) < 0)
return 0;
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
if (!SDL_SetVideoMode(width, height, 0, SDL_OPENGL)) {
fprintf(stderr, "Couldn't initialize OpenGL");
return 0;
}
/* OpenGL initialization */
glClearColor(0, 0, 0, 0);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, height, 0, -1, 1);
glMatrixMode(GL_MODELVIEW);
/* creating the VBO and the textures */
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 1024, 0, GL_DYNAMIC_DRAW);
glGenTextures(1, &tex);
createTexture(tex, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
return 1;
}
/* draw a triangle at the specified point */
void drawTriangle(GLfloat x, GLfloat y)
{
GLfloat coords1[12] = {0, 0, 0, 0, /**/200, 0, 1, 0, /**/200, 150, 1, 1};
glLoadIdentity();
glTranslatef(x, y, 0);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(coords1), coords1);
glVertexPointer(2, GL_FLOAT, 4*sizeof(GLfloat), (void*)0);
glTexCoordPointer(2, GL_FLOAT, 4*sizeof(GLfloat),
(char*)0 + 2*sizeof(GLfloat));
glDrawArrays(GL_TRIANGLES, 0, 3);
}
void render()
{
glClear(GL_COLOR_BUFFER_BIT);
drawTriangle(250, 50);
createTexture(tex, 0);
drawTriangle(260, 120);
SDL_GL_SwapBuffers();
}
void cleanup()
{
glDeleteTextures(1, &tex);
glDeleteBuffers(1, &vbo);
SDL_Quit();
}
int main(int argc, char* argv[])
{
SDL_Event event;
if (!init()) return 1;
while (1) {
while (SDL_PollEvent(&event))
if (event.type == SDL_QUIT)
return 0;
render();
}
cleanup();
return 0;
}
void createTexture(GLuint label, int update)
{
GLubyte data[texSize*texSize*4];
GLubyte* p;
int i, j;
glBindTexture(GL_TEXTURE_2D, label);
for (i = 0; i < texSize; ++i) {
for (j = 0; j < texSize; ++j) {
p = data + (i + j*texSize)*4;
p[0] = ((i % 8) > 4?255:0);
p[1] = ((j % 8) > 4?255:0);
p[2] = ((i % 8) > 4?255:0);
p[3] = 255 - i*3;
}
}
if (!update)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, texSize, texSize, 0, GL_RGBA,
GL_UNSIGNED_BYTE, data);
else
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, texSize, texSize, GL_RGBA,
GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
Notes:
I'm using SDL, but I've seen the same happening in wxWidgets, so it's not an SDL-related problem.
If I use glTexSubImage2D instead for every frame (use update = 1 in createTexture), the artifacts disappear.
If I disable blending, there are no more artifacts.
I've been testing this on a late 2010 MacBook Air, though I doubt that's particularly relevant.
This clearly an OpenGL implementation bug (just calling glTexImage2D in a loop should not cause this to happen).
I am using cygwin SDL 1.2.15 using the latest cygwin
Here is my code using SDL and openGL
#include <SDL/SDL.h>
#include <SDL/SDL_opengl.h>
#include <iostream>
size_t sx=600, sy=600, bpp=32;
void render(void) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity(); // set location in front of camera
//glTranslated(0, 0, -10);
glBegin(GL_QUADS); // draw a square
glColor3d(1, 0, 0);
glVertex3d(-2, 2, 0);
glVertex3d( 2, 2, 0);
glVertex3d( 2, -2, 0);
glVertex3d(-2, -2, 0);
glEnd();
glFlush();
SDL_GL_SwapBuffers();
GLenum e;
while ((e =glGetError()) != GL_NO_ERROR)
std::cout<<"Error "<< e << std::endl;
}
int input(void) {
SDL_Event event;
while (SDL_PollEvent(&event))
if (event.type == SDL_QUIT || (event.type == SDL_KEYUP && event.key.keysym.sym == SDLK_ESCAPE)) return 0;
return 1;
}
and this is my main function
int main(int argc, char *argv[]) {
SDL_Surface *surf;
if (SDL_Init(SDL_INIT_EVERYTHING) != 0) return 0;
if (!(surf = SDL_SetVideoMode(sx, sy, bpp, SDL_OPENGL))) return 0;
glViewport(0, 0, sx, sy);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45.0, (float)sx / (float)sy, 1.0, 100.0);
glMatrixMode(GL_MODELVIEW);
glClearColor(0, 0, 0, 1);
glClearDepth(1.0);
glEnable(GL_DEPTH_TEST);
GLenum e;
while ((e =glGetError()) != GL_NO_ERROR)
std::cout<<"Error "<< e << std::endl;
for (;;) {
if (!input()) break;
render();
SDL_Delay(10);
}
SDL_FreeSurface(surf);
SDL_Quit();
return 0;
}
it compiles with no error but when I run it only the window shows up and now openGL rectangle..
You have setup a near plane to one :
gluPerspective(45.0, (float)sx / (float)sy, 1.0/*near plane*/, 100.0);
Everything that is closer to the camera is clipped.
Your quad lies in plane z = 0. Try moving it a bit backward.
glBegin(GL_QUADS); // draw a square
glColor3d(1, 0, 0);
glVertex3d(-2, 2, 5);
glVertex3d( 2, 2, 5);
glVertex3d( 2, -2, 5);
glVertex3d(-2, -2, 5);
glEnd();
I don't remember if Z is facing the camera, so you might need negative Z value.
You also need to pay attention to face culling. It might be better to deactivate it to be sure ( glDisable( GL_CULL_FACE ))
Try changing the black color of the SDL window. Some times it renders the drawing with black color...may be this helps!