This code is supposedly the code to draw a triangle however all I get is a black screen. Why am I not getting anything drawn?
Secondly, in my array of vertices, if I put an 'f' after my coordinates like I always see in tutorials I get an error about an invalid digit in an octal. Why can everyone else use 'f' after their numbers and not me?
I am using openGL 4.1 on OSX Yosemite.
#include <iostream>
//Using SDL and standard IO
#include <SDL2/SDL.h>
//#define GL_GLEXT_PROTOTYPES 1
//#include <SDL2/SDL_opengl.h>
#include <GLUT/glut.h>
#include <stdio.h>
#include <OpenGL/gl3.h>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <string.h>
using namespace std;
//Screen dimension constants
const int SCREEN_WIDTH = 640;
const int SCREEN_HEIGHT = 480;
bool SetOpenGLAttributes()
{
// Set our OpenGL version.
// SDL_GL_CONTEXT_CORE gives us only the newer version, deprecated functions are disabled
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
return true;
}
int main( int argc, char* args[] )
{
//The window we'll be rendering to
SDL_Window* window = NULL;
//Initialize SDL
if( SDL_Init( SDL_INIT_VIDEO ) < 0 )
{
printf( "SDL could not initialize! SDL_Error: %s\n", SDL_GetError() );
}
else
{
SetOpenGLAttributes();
//Create window
window = SDL_CreateWindow( "SDL Tutorial", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, SCREEN_WIDTH, SCREEN_HEIGHT, SDL_WINDOW_OPENGL );
if( window == NULL )
{
printf( "Window could not be created! SDL_Error: %s\n", SDL_GetError() );
}
else
{
//creating new context
SDL_GL_CreateContext(window);
GLuint vertexArrayID;
glGenVertexArrays(1, &vertexArrayID);
glBindVertexArray(vertexArrayID);
printf("%s", "This is your version");
printf("%s\n", glGetString(GL_VERSION));
printf("%s", glGetString(GL_RENDERER));
SDL_GL_SetSwapInterval(1);
glEnable(GL_DEPTH_TEST);
float r = 0.5;
static const GLfloat cubeV[] = {
-.5, 0, 0,
.5, 0, 0,
0, .5, 0
};
GLuint vertexBuffer;
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(cubeV), cubeV, GL_STATIC_DRAW);
SDL_GL_SwapWindow(window);
bool running = true;
while(running){
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableVertexAttribArray(0);
glFlush();
SDL_GL_SwapWindow(window);
SDL_Delay(17);
}
}
}
//Destroy window
//SDL_DestroyWindow( window );
//Quit SDL subsystems
//SDL_Quit();
return 0;
}
Related
In my rendering application if i run the render function in the main loop than everything works fine but if i take the rendering function to another thread than the destructor of the objects is not able to release the buffer.
when any object gets destroyed
The destructor for the objects are called but it seems as if gl_deletebuffers are not able to release the buffer.
How i came to this conclusion
1) when i run everthing in the main loop and if i create a object and the VAO number for the object is 1
2) after destroying the object the next object VAO is also assigned number 1.
///////////////////////////////////////////////////////////////////////////////////////////////////////////
1) But when Rendering part goes to a seprate thread than VAO number keeps on incrementing with every object
2) System Ram memory also keeps increasing and when i close the application than only the memory is released.
3) Destructor for objects is definitely called when i delete a object but it seems as if destructor has not been able to release the buffer.
//#define GLEW_STATIC
#include <gl\glew.h>
#include <glfw3.h>
#include "TreeModel.h"
#include "ui_WavefrontRenderer.h"
#include <QtWidgets/QApplication>
#include <QMessageBox>
#include <thread>
#define FPS_WANTED 60
const double limitFPS = 1.0 / 50.0;
Container cont;
const unsigned int SCR_WIDTH = 800;
const unsigned int SCR_HEIGHT = 600;
void framebuffer_size_callback(GLFWwindow* window, int width, int height);
void processInput(GLFWwindow *window);
GLFWwindow* window = nullptr;
void RenderThread(WavefrontRenderer* w)
{
glfwMakeContextCurrent(window);
GLenum GlewInitResult;
glewExperimental = GL_TRUE;
GlewInitResult = glewInit();
if (GLEW_OK != GlewInitResult) // Check if glew is initialized properly
{
QMessageBox msgBox;
msgBox.setText("Not able to Initialize Glew");
msgBox.exec();
glfwTerminate();
}
if (window == NULL)
{
QMessageBox msgBox;
msgBox.setText("Not able to create GL Window");
msgBox.exec();
glfwTerminate();
//return -1;
}
w->InitData();
glEnable(GL_MULTISAMPLE);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBlendEquation(GL_FUNC_ADD);
while (!glfwWindowShouldClose(window))
{
// input
// -----
processInput(window);
// - Measure time
glClearColor(0.3, 0.3, 0.3, 0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
w->render(); // DO the Rendering
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwTerminate();
std::terminate();
}
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
cont.SetName("RootItem");
TreeModel* model = new TreeModel("RootElement", &cont);
WavefrontRenderer w(model);
w.show();
glfwInit();
glfwWindowHint(GLFW_RESIZABLE, GL_TRUE);
glfwWindowHint(GLFW_SAMPLES, 4);
window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "Renderer", nullptr, nullptr); // Create the render
window
glfwMakeContextCurrent(0);
std::thread renderThread(RenderThread, &w);
renderThread.detach();
return a.exec();
return 0;
}
Class defination for a Object
the render function w->render() calls the draw() function of a object.
The Base class has a virtual destructor.
#include "Triangle.h"
#include "qdebug.h"
#include "qmessagebox.h"
float verticesTriangle[] = {
-50.0f, -50.0f, 0.0f, 0.0f , 0.0f,1.0f ,0.0f, 0.0f,
50.0f, -50.0f, 0.0f, 0.0f , 0.0f,1.0f ,1.0f, 0.0f,
0.0f, 50.0f, 0.0f, 0.0f, 0.0f,1.0f ,0.5f, 1.0f
};
Triangle::Triangle() : Geometry("TRIANGLE", true)
{
this->isInited = 0;
this->m_VBO = 0;
this->m_VAO = 0;
this->iNumsToDraw = 0;
this->isChanged = true;
}
Triangle::Triangle(const Triangle& triangle) : Geometry( triangle )
{
CleanUp();
this->isInited = 0;
this->m_VBO = 0;
this->m_VAO = 0;
this->iNumsToDraw = triangle.iNumsToDraw;
this->isChanged = true;
this->shader = ResourceManager::GetShader("BasicShader");
iEntries = 3;
}
Triangle& Triangle::operator=(const Triangle& triangle)
{
CleanUp();
Geometry::operator=(triangle);
this->isInited = 0;
this->m_VBO = 0;
this->m_VAO = 0;
this->iNumsToDraw = triangle.iNumsToDraw;
this->isChanged = true;
this->shader = ResourceManager::GetShader("BasicShader");
return (*this);
}
void Triangle::init()
{
glGenVertexArrays(1, &m_VAO);
glGenBuffers(1, &m_VBO);
glBindVertexArray(m_VAO);
glBindBuffer(GL_ARRAY_BUFFER, m_VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(verticesTriangle), verticesTriangle, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
isInited = true;
}
void Triangle::CleanUp()
{
if (!this->isInited)
{
return;
}
if (this->m_VAO)
glDeleteVertexArrays(1, &this->m_VAO);
if (this->m_VBO)
glDeleteBuffers(1, &this->m_VBO);
this->isInited = false;
}
void Triangle::draw()
{
if (isChanged)
{
init();
isChanged = false;
}
this->shader.Use();
glBindVertexArray(m_VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
}
Triangle::~Triangle()
{
if (this->m_VAO)
glDeleteVertexArrays(1, &this->m_VAO);
if (this->m_VBO)
glDeleteBuffers(1, &this->m_VBO);
this->isInited = false;
}
OpenGL contexts are thread local state:
Every thread has exactly one or no OpenGL contexts active in it at any given time.
Each OpenGL context must be active in no or exactly one thread at any given time.
OpenGL contexts are not automatically migrated between thread.
I.e. if you don't explicitly unmake current the OpenGL context in question on the threads it's currently active, and subsequently make it active on the thread you're calling glDeleteBuffers on, the call on that will have no effect; on the context you expected it to have an effect on, at least.
I am trying to draw a bunch of points on the screen. I'm using CUDA to generate the data (position and color), and OpenGL to draw it. I am trying to get CUDA to update a VBO and then OpenGL to draw it, but I get a blank screen. I am not sure if CUDA is not able to update the buffer, or that the buffer is not drawing properly. My GPU is a GTX 1080, and I'm trying to use OpenGL 4.0. Colors are specified by CUDA as well. If my problem is that I need a shader, how do I add that, but also still specify the color through CUDA?
UPDATE: problem seems to be openGL. Updated code to use triangle So new question to add. Why is my VBO not being rendered?
Here is the code:
GPUmain.cuh:
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/remove.h>
#include <curand.h>
#include <GL/glew.h>
#include <SDL_opengl.h>
#include <cuda_gl_interop.h>
#define BUFFER_OFFSET(i) ((char *)NULL + (i))
//ver: x, y, z, r, g, b, a
struct ver {
// x, y, z pos
GLuint x, y, z;
// r, g, b, a color
GLubyte r, g, b, a;
};
class GPU {
public:
static int nParticles;
static GLuint vboid;
static cudaGraphicsResource *CGR;
//collection of vertices to be simulated and rendered
static thrust::device_vector<ver> rverts;
static void init(int w, int h);
static void compute();
static void render();
static void GPUmain();
static void free();
};
GPUmain.cu:
#include "GPUmain.cuh"
__global__ void uploadVerts(ver *vv, ver *vb) {
int id = threadIdx.x + (blockDim.x * blockIdx.x);
vb[id] = vv[id];
vb[id].x = vv[id].x;
vb[id].y = vv[id].y;
vb[id].z = vv[id].z;
vb[id].r = vv[id].r;
vb[id].g = vv[id].g;
vb[id].b = vv[id].b;
vb[id].a = vv[id].a;
}
__global__ void genGrid(ver *v) {
int i = threadIdx.x + (blockDim.x * blockIdx.x);
float x = (float)(i % ((int)1080));
float y = (float)(i / ((int)1920));
v[i].x = x;
v[i].y = y;
v[i].z = 1;
v[i].r = 255;
v[i].g = 0;
v[i].b = 0;
v[i].a = 0;
}
int GPU::nParticles;
GLuint GPU::vboid;
cudaGraphicsResource *GPU::CGR;
//collection of vertices to be simulated and rendered
thrust::device_vector<ver> GPU::rverts;
void GPU::init(int w, int h)
{
nParticles = w * h;
/*rverts.resize(nParticles, ver{0,0,0,0,0,0,0});
genGrid<<<nParticles/1024,1024>>>(thrust::raw_pointer_cast(&rverts[0]));*/
ver e[3] = {
ver{1024,200,2,255,0,0,255},
ver{499,288,173,0,255,0,255},
ver{462,1674,8,0,0,255,255}
};
glGenBuffers(1,&vboid);
glBindBuffer(GL_ARRAY_BUFFER,vboid);
glBufferData(GL_ARRAY_BUFFER,3*sizeof(ver),e,GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
/*cudaGraphicsGLRegisterBuffer(&CGR,vboid,cudaGraphicsMapFlagsWriteDiscard);*/
}
void GPU::compute()
{
}
void GPU::render()
{
/*ver *verts;
size_t size;
cudaGraphicsMapResources(1, &CGR, 0);
cudaGraphicsResourceGetMappedPointer((void**)&verts, &size, CGR);
uploadVerts<<<nParticles/1024, 1024>>>(thrust::raw_pointer_cast(&rverts[0]), verts);
cudaGraphicsUnmapResources(1, &CGR, 0);
cudaDeviceSynchronize();*/
glClearColor(0, 0, 0, 0); // we clear the screen with black (else, frames would overlay...)
glClear(GL_COLOR_BUFFER_BIT); // clear the buffer
glBindBuffer(GL_ARRAY_BUFFER, vboid);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(3, GL_INT, 4 * sizeof(GLubyte), 0);
glColorPointer(4, GL_BYTE, 3 * sizeof(GLuint), BUFFER_OFFSET(3 * sizeof(GLuint)));
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void GPU::GPUmain()
{
compute();
render();
}
void GPU::free()
{
cudaGraphicsUnregisterResource(CGR);
glBindBuffer(GL_ARRAY_BUFFER, vboid);
glDeleteBuffers(1, &vboid);
glBindBuffer(GL_ARRAY_BUFFER, 0);
rverts.clear();
thrust::device_vector<ver>().swap(rverts);
}
The relevant (that contain OpenGL code) parts of window.cpp:
bool Window::init()
{
//initialize SDL
if (SDL_Init(SDL_INIT_EVERYTHING) != 0) {
log << "Failed to initialize SDL!\n";
return false;
}
//set window atributes
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
//creat window
window = SDL_CreateWindow(
name.c_str(),
SDL_WINDOWPOS_CENTERED,
SDL_WINDOWPOS_CENTERED,
width,
height,
SDL_WINDOW_OPENGL
);
//create opengl context in the window
glcontext = SDL_GL_CreateContext(window);
SDL_GL_SetSwapInterval(1);
//check if the window was created
if (window == nullptr) {
log << "Failed to create window!\n";
return false;
}
//turn on experimental features
glewExperimental = GL_TRUE;
//initiallize glew
if (glewInit() != GLEW_OK) {
log << "Failed to Init GLEW";
return false;
}
//set drawing parameters
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, 0, height, 0, 255);
glPointSize(1);
glEnable(GL_BLEND); // Allow Transparency
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // how transparency acts
std::cout << sizeof(ver);
GPU::init(width, height);
return true;
}
void Window::renderFrame()
{
GPU::render();
SDL_GL_SwapWindow(window); //swap buffers
}
If you use the fixed-function attributes and client side capabilities, then you've to use a compatibility profile context.
See Fixed Function Pipeline and Legacy OpenGL.
If you want to use a core profile, then you've to use Vertex Array Object and Shader:
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_COMPATIBILITY);
The following geometry
ver e[3] = {
// x y z r g b a
ver{1024, 200, 2, 255, 0, 0, 255},
ver{ 499, 288, 173, 0, 255, 0, 255},
ver{462, 1674, 8, 0, 0, 255, 255}
};
is clipped by the near plane of the orthographic projection. Note, in view space the z-axis points out of the viewport.
Change the orthographic projection (or invert the z coordinates of the geometry):
glOrtho(0, width, 0, height, 0, 255);
glOrtho(0, width, 0, height, -255, 0);
The stride parameter of glVertexPointer respectively glColorPointer is the offset between consecutive attributes. So it has to be sizeof(ver).
The type of the color attributes is GL_UNSIGNED_BYTE rather than GL_BYTE:
glVertexPointer(3, GL_INT, 4 * sizeof(GLubyte), 0);
glColorPointer(4, GL_BYTE, 3 * sizeof(GLuint), BUFFER_OFFSET(3 * sizeof(GLuint)));
glVertexPointer(3, GL_INT, sizeof(ver), 0);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(ver), BUFFER_OFFSET(3 * sizeof(GLuint)));
I want to use OpenGL 3.1.
I'm using a Macbook Pro with 2 graphic cards: NVIDIA GeForce GT 650M 1024 MB, and Intel HD Graphics 4000 1536 MB. They both support up to OpenGL 4.1.
Previously i was able to draw a triangle however, my program was using version 2.1. Therefore I added: SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);. However, now the triangle is no longer drawn.
#include <string>
#include <iostream>
#include <SDL2/SDL.h>
#define GL3_PROTOTYPES 1
#include "../include/GL3/gl3.h"
int main(int argc, const char *argv[]) {
// Initialize the SDL
if(SDL_Init(SDL_INIT_VIDEO) < 0) {
std::cout << "Failed to initialize the SDL: " << SDL_GetError() << std::endl;
SDL_Quit();
return -1;
}
// Configure the SDL to use OpenGL 3.1
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 1);
// ======= HERE =======
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
// ====================
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24);
SDL_Window* window = SDL_CreateWindow("Triangle Test", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 800, 600, SDL_WINDOW_SHOWN | SDL_WINDOW_OPENGL);
if (window == 0) {
std::cout << "Error when creating the window: " << SDL_GetError() << std::endl;
SDL_Quit();
return -1;
}
// Create the OpenGL context
SDL_GLContext contextOpenGL = SDL_GL_CreateContext(window);
// Initialization may fail
if (contextOpenGL == 0) {
std::cout << SDL_GetError() << std::endl;
SDL_DestroyWindow(window);
SDL_Quit();
return -1;
}
SDL_Event events;
bool end = false;
// Define the vertices of our triangle
static const GLfloat vertices[] = {0.0, 1.0, // left point
-0.5, 0.0, // right point
0.5, 0.0}; // upper point
const int TRIANGLE_IDX = 0;
while(!end) {
SDL_WaitEvent(&events);
if(events.window.event == SDL_WINDOWEVENT_CLOSE) {
end = true;
}
// Clear the screen
glClear(GL_COLOR_BUFFER_BIT);
// Send vertices to OpenGL
glVertexAttribPointer(TRIANGLE_IDX, 2, GL_FLOAT, GL_FALSE, 0, vertices);
// Activate our vertex array
glEnableVertexAttribArray(TRIANGLE_IDX);
// Draw the points passed previously
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableVertexAttribArray(TRIANGLE_IDX);
// Refresh the screen
SDL_GL_SwapWindow(window);
}
return 0;
}
I tried to first use glGenBuffers, glBindBuffer and glBufferData but i could not manage to make it work.
The Fixed Function Pipeline has been removed from core OpenGL 3.1 and above.
You will have to use shaders instead. This site has a nice example of how to use them.
I am trying to use openGL with SDL and whenever I check my version during runtime, it always returns that I am using openGL version 2.1. Now to my understanding including gl3.h gave you the 3.2+ functionality of openGL. Besides that point I am specifically asking for version 4.1 of openGL and still apparently running 2.1. Can somebody please tell me what I am doing wrong? I am running OSX Yosemite.
#include <iostream>
//Using SDL and standard IO
#include <SDL2/SDL.h>
#define GL_GLEXT_PROTOTYPES 1
//#include <SDL2/SDL_opengl.h>
#include <GLUT/glut.h>
#include <stdio.h>
#include <OpenGL/gl3.h>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <string.h>
#ifdef __APPLE__
#define glGenVertexArrays glGenVertexArraysAPPLE
#define glBindVertexArray glBindVertexArrayAPPLE
#define glDeleteVertexArrays glDeleteVertexArraysAPPLE
#endif
using namespace std;
//Screen dimension constants
const int SCREEN_WIDTH = 640;
const int SCREEN_HEIGHT = 480;
bool SetOpenGLAttributes()
{
// Set our OpenGL version.
// SDL_GL_CONTEXT_CORE gives us only the newer version, deprecated functions are disabled
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
return true;
}
int main( int argc, char* args[] )
{
//The window we'll be rendering to
SDL_Window* window = NULL;
//Initialize SDL
if( SDL_Init( SDL_INIT_VIDEO ) < 0 )
{
printf( "SDL could not initialize! SDL_Error: %s\n", SDL_GetError() );
}
else
{
//Create window
window = SDL_CreateWindow( "SDL Tutorial", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, SCREEN_WIDTH, SCREEN_HEIGHT, SDL_WINDOW_OPENGL );
if( window == NULL )
{
printf( "Window could not be created! SDL_Error: %s\n", SDL_GetError() );
}
else
{
//creating new context
SDL_GL_CreateContext(window);
//GLuint vertexArrayID;
// glGenVertexArrays(1, &vertexArrayID);
SetOpenGLAttributes();
printf("%s", "This is your version");
printf("%s\n", glGetString(GL_VERSION));
SDL_GL_SetSwapInterval(1);
glEnable(GL_DEPTH_TEST);
SDL_GL_SwapWindow(window);
bool running = true;
while(running){
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glFlush();
//drawCube(.5);
SDL_GL_SwapWindow(window);
SDL_Delay(17);
}
}
}
//Destroy window
//SDL_DestroyWindow( window );
//Quit SDL subsystems
//SDL_Quit();
return 0;
You are calling SetOpenGLAttributes after creating the context. Try calling it before SDL_GL_CreateContext(window);.
I wrote a simple parser for the ASCII STL format. When I try to render the triangles with the supplied normals, the resulting object is missing many faces:
This is how it should look like:
What I already tried:
explicitly disabled backface culling (though it shouldn't have been active before)
ensured that the depth buffer is enabled
Here is a minimal sample program which reproduces the error:
#include <SDL2/SDL.h>
#include <SDL2/SDL_main.h>
#include <SDL2/SDL_render.h>
#include <SDL2/SDL_opengl.h>
int main(int argc, char **argv) {
SDL_Init(SDL_INIT_VIDEO);
int screen_w=1280,screen_h=720;
SDL_Window * win = SDL_CreateWindow("test", 20, 20, screen_w, screen_h,
SDL_WINDOW_OPENGL);
SDL_GLContext glcontext = SDL_GL_CreateContext(win);
STLParser stlparser;
std::ifstream file(".\\logo.stl");
stlparser.parseAscii(file);
const auto& ndata = stlparser.getNData();
const auto& vdata = stlparser.getVData();
std::cout << "number of facets: " << ndata.size() << std::endl;
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
glMatrixMode(GL_PROJECTION | GL_MODELVIEW);
glLoadIdentity();
glScalef(1.f, -1.f, 1.f);
glOrtho(0, screen_w, 0, screen_h, -screen_w, screen_w);
glClearDepth(1.0f);
glDepthFunc(GL_LEQUAL);
glEnable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glNormalPointer(GL_FLOAT, 0, ndata.data());
glVertexPointer(3, GL_FLOAT, 0, vdata.data());
SDL_Event event;
bool quit = false;
while (!quit) {
while (SDL_PollEvent(&event))
switch(event.type) {
case SDL_QUIT: quit = true; break;
}
;
// Drawing
glClearColor(255,255,255,255);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glTranslatef(screen_w/2,0,0);
glRotatef(0.5,0,1,0);
glTranslatef(-screen_w/2,0,0);
glPushMatrix();
glTranslatef(screen_w/2,screen_h/2,0);
glColor3f(0.5,0.5,0);
glDrawArrays(GL_TRIANGLES, 0, vdata.size());
glPopMatrix();
SDL_GL_SwapWindow(win);
SDL_Delay(10);
}
SDL_DestroyWindow(win);
SDL_Quit();
return 0;
}
The STLParser methods getNData() and getVData() have the following signatures:
const std::vector<std::array<float,3>>& getNData() const;
const std::vector<std::array<std::array<float,3>,3>>& getVData() const;
STLParser output should be correct, but I can provide the sources as well if needed.
What am I doing wrong?
You should change
glDrawArrays(GL_TRIANGLES, 0, vdata.size());
to
glDrawArrays(GL_TRIANGLES, 0, 3 * vdata.size());
I.e. count should be vertex count, but not triangle count.