glDrawArrays cause out of memory - c++

I am building a Qt application with OpenGL using VAO and VBOs. I got a simple reference grid that I want to draw with the following code
void ReferenceGrid::initialize()
{
// Buffer allocation and initialization
Float3Array vertices;
for (float pos = -GridSide; pos <= GridSide; pos += 1.0) {
// X line
vertices.push_back(Float3(pos, -GridSide, 0.0f));
vertices.push_back(Float3(pos, GridSide, 0.0f));
// Y line
vertices.push_back(Float3(-GridSide, pos, 0.0f));
vertices.push_back(Float3( GridSide, pos, 0.0f));
LineCount += 2;
}
s_gridVao.create();
s_gridVao.bind();
s_gridBuffer.create();
s_gridBuffer.setUsagePattern(QOpenGLBuffer::StaticDraw);
s_gridBuffer.allocate(vertices.data(), vertices.memorySize());
// Shader allocation and initialization
s_gridShader.create();
if (!s_gridShader.addShaderFromSourceFile(QOpenGLShader::Vertex, ":/shaders/Grid.vert")) {
qWarning() << "Cannot grid vertex shader";
}
if (!s_gridShader.addShaderFromSourceFile(QOpenGLShader::Fragment, ":/shaders/Grid.frag")) {
qWarning() << "Cannot grid fragment shader";
}
if (!s_gridShader.link()) {
qWarning() << "Cannot link grid shader";
}
s_gridBuffer.bind();
s_gridShader.enableAttributeArray("vertexPosition");
s_gridShader.setAttributeBuffer("vertexPosition", GL_FLOAT, 0, 3);
s_gridBuffer.release();
s_gridShader.release();
s_gridVao.release();
}
void ReferenceGrid::draw()
{
s_gridVao.bind();
s_gridShader.bind();
s_gridBuffer.bind();
glfuncs->glDrawArrays(GL_LINES, 0, LineCount);
// Return GL_OUT_OF_MEMORY
assert(glfuncs->glGetError() == GL_NO_ERROR);
s_gridBuffer.release();
s_gridShader.release();
s_gridVao.release();
}
The problem is that after the call to glDrawArrays an error (GL_OUT_OF_MEMORY) is returned. I cannot understand what is going on.
Has anyone already encountered this problem and has a solution?

I forgot to bind the buffer before allocating it. I thought Qt is doing this automatically but I was wrong. So the right thing to do is:
s_gridBuffer.create();
s_gridBuffer.setUsagePattern(QOpenGLBuffer::StaticDraw);
s_gridBuffer.bind();
s_gridBuffer.allocate(vertices.data(), vertices.memorySize());
I found a good tutorial on the topic, http://www.kdab.com/opengl-in-qt-5-1-part-2/

Related

OpenGL 3.3/GLSL & C++ error: "must write to gl_Position"

I'm currently trying to get a triangle to render using OpenGL 3.3 and C++ with the GLM, GLFW3 and GLEW libraries, but get an error when trying to create my shaderprogram.
Vertex info
(0) : error C5145: must write to gl_Position
I already tried to find out why this happens and asked on other forums, but no one knew what the reason is. There are three possible points where this error could have his origin - in my main.cpp, where I create the window, the context, the program, the vao etc. ...
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
#include <iostream>
#include <string>
#include "util/shaderutil.hpp"
#define WIDTH 800
#define HEIGHT 600
using namespace std;
using namespace glm;
GLuint vao;
GLuint shaderprogram;
void initialize() {
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glClearColor(0.5, 0.7, 0.9, 1.0);
string vShaderPath = "shaders/shader.vert";
string fShaderPath = "shaders/shader.frag";
shaderprogram = ShaderUtil::createProgram(vShaderPath.c_str(), fShaderPath.c_str());
}
void render() {
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(shaderprogram);
glDrawArrays(GL_TRIANGLES, 0, 3);
}
void clean() {
glDeleteProgram(shaderprogram);
}
int main(int argc, char** argv) {
if (!glfwInit()) {
cerr << "GLFW ERROR!" << endl;
return -1;
}
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
GLFWwindow* win = glfwCreateWindow(WIDTH, HEIGHT, "Rendering a triangle!", NULL, NULL);
glfwMakeContextCurrent(win);
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK) {
cerr << "GLEW ERROR!" << endl;
return -1;
} else {
glGetError();
//GLEW BUG: SETTING THE ERRORFLAG TO INVALID_ENUM; THEREFORE RESET
}
initialize();
while (!glfwWindowShouldClose(win)) {
render();
glfwPollEvents();
glfwSwapBuffers(win);
}
clean();
glfwDestroyWindow(win);
glfwTerminate();
return 0;
}
...the ShaderUtil class, where I read in the shader files, compile them, do error checking and return a final program...
#include "shaderutil.hpp"
#include <iostream>
#include <string>
#include <fstream>
#include <vector>
using namespace std;
GLuint ShaderUtil::createProgram(const char* vShaderPath, const char* fShaderPath) {
/*VARIABLES*/
GLuint vertexShader;
GLuint fragmentShader;
GLuint program;
ifstream vSStream(vShaderPath);
ifstream fSStream(fShaderPath);
string vSCode, fSCode;
/*CREATING THE SHADER AND PROGRAM OBJECTS*/
vertexShader = glCreateShader(GL_VERTEX_SHADER);
fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
program = glCreateProgram();
/*READING THE SHADERCODE*/
/*CONVERTING THE SHADERCODE TO CHAR POINTERS*/
while (vSStream.is_open()) {
string line = "";
while (getline(vSStream, line)) {
vSCode += "\n" + line;
}
vSStream.close();
}
const char* vSCodePointer = vSCode.c_str();
while (fSStream.is_open()) {
string line = "";
while (getline(fSStream, line)) {
fSCode += "\n" + line;
}
fSStream.close();
}
const char* fSCodePointer = fSCode.c_str();
/*COMPILING THE VERTEXSHADER*/
glShaderSource(vertexShader, 1, &vSCodePointer, NULL);
glCompileShader(vertexShader);
/*VERTEXSHADER ERROR CHECKING*/
GLint vInfoLogLength;
glGetShaderiv(vertexShader, GL_INFO_LOG_LENGTH, &vInfoLogLength);
if (vInfoLogLength > 0) {
vector<char> vInfoLog(vInfoLogLength + 1);
glGetShaderInfoLog(vertexShader, vInfoLogLength, &vInfoLogLength, &vInfoLog[0]);
for(int i = 0; i < vInfoLogLength; i++) {
cerr << vInfoLog[i];
}
}
/*COMPILING THE FRAGMENTSHADER*/
glShaderSource(fragmentShader, 1, &fSCodePointer, NULL);
glCompileShader(fragmentShader);
/*FRAGMENTSHADER ERROR CHECKING*/
GLint fInfoLogLength;
glGetShaderiv(fragmentShader, GL_INFO_LOG_LENGTH, &fInfoLogLength);
if (fInfoLogLength > 0) {
vector<char> fInfoLog(fInfoLogLength + 1);
glGetShaderInfoLog(fragmentShader, fInfoLogLength, &fInfoLogLength, &fInfoLog[0]);
for(int i = 0; i < fInfoLogLength; i++) {
cerr << fInfoLog[i];
}
}
/*LINKING THE PROGRAM*/
glAttachShader(program, vertexShader);
glAttachShader(program, fragmentShader);
glLinkProgram(program);
//glValidateProgram(program);
/*SHADERPROGRAM ERROR CHECKING*/
GLint programInfoLogLength;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &programInfoLogLength);
if (programInfoLogLength > 0) {
vector<char> programInfoLog(programInfoLogLength + 1);
glGetProgramInfoLog(program, programInfoLogLength, &programInfoLogLength, &programInfoLog[0]);
for(int i = 0; i < programInfoLogLength; i++) {
cerr << programInfoLog[i];
}
}
/*CLEANUP & RETURNING THE PROGRAM*/
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
return program;
}
...and the vertex shader itself, which is nothing special. I just create an array of vertices and push them into gl_Position.
#version 330 core
void main() {
const vec3 VERTICES[3] = vec3[3] {
0.0, 0.5, 0.5,
0.5,-0.5, 0.5,
-0.5,-0.5, 0.5
};
gl_Position.xyz = VERTICES;
gl_Position.w = 1.0;
}
The fragmentshader just outputs a vec4 called color, which is set to (1.0, 0.0, 0.0, 1.0). The compiler doesn't show me any errors, but when I try to execute the program, I just get a window without the triangle and the error message that's shown above.
There's a few things I already tried to solve this problem, but none of them worked:
I tried creating the vertices inside my main.cpp and pushing them into the vertex-shader via a vertex buffer object; I changed some code inspired by opengl-tutorials.org and finally got a triangle to show up, but the shaders weren't applied; I only got the vertices inside my main.cpp to show up on the screen, but the "must write to gl_Position" problem remained.
I tried using glGetError() on different places and got 2 different error-codes: 1280 and 1282; the first one was caused by a bug inside GLEW, which causes the state to change from GL_NO_ERROR to GL_INVALID_ENUM or something like that. I was told to ignore this one and just change the state back to GL_NO_ERROR by using glGetError() after initializing GLEW. The other error code appeared after using glUseProgram() in the render-function. I wanted to get some information out of this, but the gluErrorString() function is deprecated in OpenGL 3.3 and I couldn't find an alternative provided by any of my libraries.
I tried validating my program via glValidateProgram() after linking it. When I did this, the gl_Position error message didn't show up anymore, but the triangle didn't either, so I assumed that this function just clears the infolog to put in some new information about the validation process
So right now, I have no idea what causes this error.
The problem got solved! I tried to print the source that OpenGL tries to compile and saw that there was no source loaded by the ifstream. Things I had to change:
Change the "while (vVStream.is_open())" to "if (vVStream.is_open())".
Error check, if the condition I listed first is executed (add "else {cerr << "OH NOES!" << endl}
Add a second parameter to the ifstreams I'm creating: change "ifstream(path)" to "ifstream(path, ios::in)"
Change the path I'm passing from a relative path (e.g "../shaders/shader.vert") to an absolute path (e.g "/home/USERNAME/Desktop/project/src/shaders/shader.vert"); this somehow was necessary, because the relative path wasn't understood; using an absolute one isn't a permanent solution though, but it fixes the problem of not finding the shader.
Now it actually loads and compiles the shaders; there are still some errors to fix, but if someone has the same "must write to gl_Position" problem, double, no, triple-check if the source you're trying to compile is actually loaded and if the ifstream is actually open.
I thank everyone who tried to help me, especially #MtRoad. This problem almost made me go bald.
Vertex shaders run on each vertex individually, so gl_Position is the output vertex after whatever transforms you wish to apply to the vertex being processed by vertex shader, so trying to emit multiple vertices doesn't make sense. Geometry shaders can emit additional geometry on the fly and can be used to do this to create motion blur for example.
For typical drawing, you bind a vertex array object like you did, but put data into buffers called Vertex Buffer Objects and tell OpenGL how to interpret the data's "attributes" using glVertexAttrib which you can read in your shaders.
Recently encountered this issue & I suspect the cause may be same as yours.
I'm not familiar with g++ however, on VS ones' build environment & the location on where your *exe is running from when you're debugging can impact on this. For example one such setting:
Project Properties -> General -> Output directory ->
Visual Studios Express - change debug output directory
And another similar issue here "The system cannot find the file specified" when running C++ program
You need to make sure if you've change the build environment and you're debugging from a different output directory that any of the relevant files are relative from where the *exe is being executed from.
This would explain why you've had to resort to using "if (vVStream.is_open())", which I suspect fails, & so then subsequently use full filepath of the shaders as the original referenced files are not relative.
My issue was exactly as yours but only in release mode. Once I copied over my shaders, into the release folder where the *exe could access them, the problem went away.

PhysX - simulate() never ends if GPU used

Im still working on physics system for simulating fluids. I rewrote my application to use PhysX 3.3.0 and to be more objective and now i have a problem which i cant resolve for like a week or two.
This is my initiation of PhysX Context:
void PhysXSPH::initContext(void){
static LogPxErrorCallback gLogPxErrorCallback;
static PxDefaultAllocator gDefaultAllocatorCallback;
mFoundation = PxCreateFoundation(PX_PHYSICS_VERSION, gDefaultAllocatorCallback, gLogPxErrorCallback);
check(mFoundation, "PxFoundation creating failed!");
static PxProfileZoneManager *mProfileZoneManager = &PxProfileZoneManager::createProfileZoneManager(mFoundation);
check(mProfileZoneManager, "PxProfileZoneManager creation failed!");
bool recordMemoryAllocations = true;
mPhysics = PxCreateBasePhysics(PX_PHYSICS_VERSION, *mFoundation,
PxTolerancesScale(), recordMemoryAllocations, mProfileZoneManager );
check(mPhysics, "PxPhysics creating failed!");
PxRegisterParticles(*mPhysics);
if(!PxInitExtensions(*mPhysics)){
check(NULL, "PxInitExtensions failed!");
}
static PxSimulationFilterShader gDefaultFilterShader = PxDefaultSimulationFilterShader;
PxSceneDesc sceneDesc(mPhysics->getTolerancesScale());
sceneDesc.gravity = PxVec3(0.0f, -9.81f, 0.0f);
if(!sceneDesc.cpuDispatcher){
mCpuDispatcher = PxDefaultCpuDispatcherCreate(4);
check(mCpuDispatcher, "PxDefaultCpuDispatcherCreate failed!");
sceneDesc.cpuDispatcher = mCpuDispatcher;
}
if(!sceneDesc.filterShader){
sceneDesc.filterShader = gDefaultFilterShader;
}
#ifdef PX_WINDOWS
PxCudaContextManagerDesc cudaContextManagerDesc;
mCudaContextManager = PxCreateCudaContextManager(*mFoundation, cudaContextManagerDesc, mProfileZoneManager);
if( mCudaContextManager ){
if( !mCudaContextManager->contextIsValid() ){
mCudaContextManager->release();
mCudaContextManager = NULL;
CLOG(ERROR, "physX")<<"Invalid CUDA context.";
exit(EXIT_FAILURE);
}
if(!sceneDesc.gpuDispatcher){
sceneDesc.gpuDispatcher = mCudaContextManager->getGpuDispatcher();
}
CLOG(INFO, "physX")<<"CUDA context created.";
} else {
CLOG(ERROR, "physX")<<"Creating CUDA context manager failed.";
exit(EXIT_FAILURE);
}
#endif
mScene = mPhysics->createScene(sceneDesc);
check(mScene, "createScene failed!");
createScene(mScene);
}
and initiation of a physX scene but problem occures even with an empty scene:
void PhysXSPH::createScene(PxScene *mScene){
mScene->setVisualizationParameter(PxVisualizationParameter::eSCALE, 1.0);
mScene->setVisualizationParameter(PxVisualizationParameter::eCOLLISION_SHAPES, 1.0f);
createPlanes(mScene);
createParticles(mScene);
CLOG(INFO, "physX") << "PhysX scene created.";
}
void PhysXSPH::createPlanes(PxScene *mScene){
PxMaterial* mMaterial = mPhysics->createMaterial(0.5,0.5,0.5);
//Create actors
//1) Create ground plane
PxReal d = 0.0f;
PxTransform pose = PxTransform(PxVec3(0.0f, 0, 0.0f),PxQuat(PxHalfPi, PxVec3(0.0f, 0.0f, 1.0f)));
PxRigidStatic* plane = mPhysics->createRigidStatic(pose);
check(plane, "Creating plane failed!");
//create 4 more planes for aquarium
PxRigidStatic* plane2 = PxCreatePlane(*mPhysics, PxPlane(PxVec3(-4.0f, 0.0, 0.0), PxVec3(1.0, 0.0, 0.0)), *mMaterial);
PxRigidStatic* plane3 = PxCreatePlane(*mPhysics, PxPlane(PxVec3(4.0f, 0.0, 0.0), PxVec3(-1.0, 0.0, 0.0)), *mMaterial);
PxRigidStatic* plane4 = PxCreatePlane(*mPhysics, PxPlane(PxVec3(0.0f, 0.0, -4.0f), PxVec3(0.0, 0.0, 1.0)), *mMaterial);
PxRigidStatic* plane5 = PxCreatePlane(*mPhysics, PxPlane(PxVec3(0.0f, 0.0, 4.0f), PxVec3(0.0, 0.0, -1.0)), *mMaterial);
// create shape
PxShape* shape = plane->createShape(PxPlaneGeometry(), *mMaterial);
check(shape, "Creating shape failed!");
mScene->addActor(*plane);
PxShape* shape2 = plane2->createShape(PxPlaneGeometry(), *mMaterial);
check(shape2, "Creating shape failed!");
mScene->addActor(*plane2);
PxShape* shape3 = plane3->createShape(PxPlaneGeometry(), *mMaterial);
check(shape3, "Creating shape failed!");
mScene->addActor(*plane3);
PxShape* shape4 = plane4->createShape(PxPlaneGeometry(), *mMaterial);
check(shape4, "Creating shape failed!");
mScene->addActor(*plane4);
PxShape* shape5 = plane5->createShape(PxPlaneGeometry(), *mMaterial);
check(shape5, "Creating shape failed!");
mScene->addActor(*plane5);
}
void PhysXSPH::createParticles(PxScene *mScene){
// set immutable properties.
bool perParticleRestOffset = false;
//get data from scene model
int maxParticles = scene->getMaxParticles();
int xDim = scene->getXDim();
int yDim = scene->getYDim();
int zDim = scene->getZDim();
// create particle system in PhysX SDK
particleSystem = mPhysics->createParticleFluid(maxParticles, perParticleRestOffset);
check(particleSystem, "Creating particle system failed!");
particleSystem->setRestOffset(particleRadius);
particleSystem->setRestParticleDistance(particleRadius);
particleSystem->setParticleBaseFlag(PxParticleBaseFlag::eGPU,true);
// TODO set fluid parameters
// add particle system to scene, in case creation was successful
if (particleSystem)
mScene->addActor(*particleSystem);
indexes = new PxU32[maxParticles];
particle_positions = new PxVec3[maxParticles];
int index=0;
for(int x=0; x<xDim ;x++){
for(int y=0; y<yDim ;y++){
for(int z=0; z<zDim; z++){
indexes[index]=(PxU32)index;
int v=3*index;
particle_positions[index]=PxVec3((physx::PxReal)(scene->m_vPos[v]), (physx::PxReal)(scene->m_vPos[v+1]), (physx::PxReal)(scene->m_vPos[v+2]));
//CLOG(INFO, "physX")<<index<<"["<<particle_positions[index].x<<"; "<<particle_positions[index].y<<"; "<<particle_positions[index].z<<"]";
index++;
}
}
}
PxParticleCreationData particleCreationData;
particleCreationData.numParticles = maxParticles;
particleCreationData.indexBuffer = PxStrideIterator<const PxU32>(indexes);
particleCreationData.positionBuffer = PxStrideIterator<const PxVec3>(particle_positions);
// create particles in *PxParticleSystem* ps
bool success = particleSystem->createParticles(particleCreationData);
if(!success){
CLOG(ERROR, "physX")<<"Creating particles failed.";
exit(EXIT_FAILURE);
}
}
If code in the #ifdef PX_WINDOWS is commented, everything works fine. Fluid flows like it should. But when I try to use my GPU, application freezes on first fetchResult() invokation (simulate() method never finishes its job). I have no error log, it just freezes. It happens no matter if its DEBUG or RELEASE, and if it is 32 or 64 build.
I have a GeForce 560Ti. I use Physx SDK 3.3.0. I link (eg. on win64 debug build) vs:
opengl32.lib glew32.lib glfw3.lib PhysX3DEBUG_x64.lib
PhysX3CommonDEBUG_x64.lib PxTaskDEBUG.lib PhysX3ExtensionsDEBUG.lib
PhysXProfileSDKDEBUG.lib
using:
nvToolsExt64_1.dll PhysX3XHECKED_x64.dll PhysX3CommonCHECKED_x64.dll
PhysX3GpuCHECKED_x64.dll PhysX3GpuDEBUG_x64.dll
I tried to use different versions of .libs and add .dlls wanted by the application, but every set ended up with a freezing on fetchResult().
I have no idea where to look for mistakes. Everything looks fine. I'd be thankful for any help!
I know this is an old thread but I had the exact same problem when I switched from 3.2.5 to 3.3.0.
I did find a solution. The problem is that you initialize the extensions twice here. You are using PxCreateBasePhysics to create your SDK object which does some extra work in the background. Namely it calls PxInitExtensions if I'm not mistaken.
The fix is to just change the PxCreateBasePhysics function to the standard PxCreatePhysics call with the exact same arguments. This one doesn't do any additional setups behind the scene. Just leaving out the InitExtensions call might work as well but I have only tried the first idea.
It is an odd thing that this only causes the freeze if GPU is in use, maybe nVidia should look at it.

Load a .obj model with ASSIMP in DirectX9

This is my first time posting. I have this issue with this 3d model loading library called ASSIMP. I am trying to integrate it in a sample Direct3d9 app. and it is not going so well. I am an experienced C++ programmer so it shouldn't take too much hassle to help me :). So i have in the past made several d3d9 apps and rendered manual primitives. but now i am trying to render an obj model loaded with ASSIMP. when i try to render it, NOTHING is rendered at all. this is very weird, not even one poly is rendered. this is VERY frustrating as i have spent 1 week trying to fix this one problem and searching on google returns no useful results. you guys are honestly my last hope lol. ok so here is my code. pretty please take a look and help me understand what i am doing wrong. also if you know of a link where a directx9 ASSIMP example may be that would also be appreciated as google only shows OpenGL :(. Any help will be much appreciated thanks :)
bool Mesh::LoadMesh(const std::string& Filename)
{
Assimp::Importer Importer;
const aiScene *pScene = NULL;
const aiMesh *pMesh = NULL;
pScene = Importer.ReadFile(Filename.c_str(), aiProcess_Triangulate | aiProcess_ConvertToLeftHanded | aiProcess_ValidateDataStructure | aiProcess_FindInvalidData);
if (!pScene)
{
printf("Error parsing '%s': '%s'\n", Filename.c_str(), Importer.GetErrorString());
return false;
}
pMesh = pScene->mMeshes[0];
if (!pMesh)
{
printf("Error Finding Model In file. Did you export an empty scene?");
return false;
}
for (unsigned int i = 0; i < pMesh->mNumFaces; i++)
{
if (pMesh->mFaces[i].mNumIndices == 3)
{
m_NumIndices = m_NumIndices + 3;
}
else
{
printf("Error parsing Faces. Try to Re-Export model from 3d package!");
return false;
}
}
m_NumFaces = pMesh->mNumFaces;
m_NumVertecies = pMesh->mNumVertices;
ZeroMemory(&m_pVB, sizeof(m_pVB));
m_pRenderDevice->CreateVertexBuffer(sizeof(Vertex) * m_NumVertecies, 0, VertexFVF, D3DPOOL_DEFAULT, &m_pVB, NULL);
m_pVB->Lock(0, 0, (void**)&m_pVertecies, 0);
for (int i = 0; i < pMesh->mNumVertices; i++)
{
Vertex *pvertex = new Vertex(D3DXVECTOR3(pMesh->mVertices[i].x, pMesh->mVertices[i].y, pMesh->mVertices[i].z), D3DXVECTOR2(pMesh->mTextureCoords[0][i].x, pMesh->mTextureCoords[0][i].y), D3DXVECTOR3(pMesh->mNormals[i].x, pMesh->mNormals[i].y, pMesh->mNormals[i].z));
m_pVertecies[i] = pvertex;
}
m_pVB->Unlock();
return true;
}
void Mesh::Render()
{
m_pRenderDevice->SetStreamSource(0, m_pVB, 0, sizeof(Vertex));
m_pRenderDevice->SetFVF(VertexFVF);
m_pRenderDevice->DrawPrimitive(D3DPT_TRIANGLELIST, 0, m_NumFaces);
}
void Render()
{
D3DCOLOR Color = D3DCOLOR_ARGB(255, 0, 0, 255);
//Clear the Z and Back buffers
g_pRenderDevice->Clear(0, NULL, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER, Color, 1.0f, 0);
g_pRenderDevice->BeginScene();
InitializeViewMatrix();
D3DXMATRIX Scale;
D3DXMatrixScaling(&Scale, CameraScaleX, CameraScaleY, CameraScaleZ);
D3DXMATRIX Rotation;
CameraRotX += 0.025;
D3DXMatrixRotationYawPitchRoll(&Rotation, CameraRotX, CameraRotY, CameraRotZ);
g_pRenderDevice->SetTransform(D3DTS_WORLD, &D3DXMATRIX(Scale * Rotation));
if (pMesh)
{
pMesh->Render();
}
g_pRenderDevice->EndScene();
g_pRenderDevice->Present(NULL, NULL, NULL, NULL);
}
I might be getting old but i can't find anything wrong in this code. Are you sure your pointers are all pointing where they should?

SIGSEGV error while using c++ on linux with openGL and SDL

Myself and a few other guys are taking a crack at building a simple side scroller type game. However, I can not get a hold of them to help answer my question so I put it to you, the following code leaves me with a SIGSEGV error in the notated place... if anyone can tell me why, I would really appreciate it. If you need anymore info I will be watching this closely.
Main.cpp
Vector2 dudeDim(60,60);
Vector2 dudePos(300, 300);
Entity *test = new Entity("img/images.jpg", dudeDim, dudePos, false);
leads to:
Entity.cpp
Entity::Entity(std::string filename, Vector2 size, Vector2 position, bool passable):
mTexture(filename)
{
mTexture.load(false);
mDimension2D = size;
mPosition2D = position;
mPassable = passable;
}
leads to:
Textures.cpp
void Texture::load(bool generateMipmaps)
{
FREE_IMAGE_FORMAT imgFormat = FIF_UNKNOWN;
FIBITMAP *dib(0);
imgFormat = FreeImage_GetFileType(mFilename.c_str(), 0);
//std::cout << "File format: " << imgFormat << std::endl;
if (FreeImage_FIFSupportsReading(imgFormat)) // Check if the plugin has reading capabilities and load the file
dib = FreeImage_Load(imgFormat, mFilename.c_str());
if (!dib)
std::cout << "Error loading texture files!" << std::endl;
BYTE* bDataPointer = FreeImage_GetBits(dib); // Retrieve the image data
mWidth = FreeImage_GetWidth(dib); // Get the image width and height
mHeight = FreeImage_GetHeight(dib);
mBitsPerPixel = FreeImage_GetBPP(dib);
if (!bDataPointer || !mWidth || !mHeight)
std::cout << "Error loading texture files!" << std::endl;
// Generate and bind ID for this texture
vvvvvvvvvv!!!ERROR HERE!!!vvvvvvvvvvv
glGenTextures(1, &mId);
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
glBindTexture(GL_TEXTURE_2D, mId);
int format = mBitsPerPixel == 24 ? GL_BGR_EXT : mBitsPerPixel == 8 ? GL_LUMINANCE : 0;
int iInternalFormat = mBitsPerPixel == 24 ? GL_RGB : GL_DEPTH_COMPONENT;
if(generateMipmaps)
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, mWidth, mHeight, 0, format, GL_UNSIGNED_BYTE, bDataPointer);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); // Linear Filtering
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); // Linear Filtering
//std::cout << "texture generated " << mId << std::endl;
FreeImage_Unload(dib);
}
after reading Peter's suggestion I have changed my main.cpp file to:
#include <iostream>
#include <vector>
#include "Game.h"
using namespace std;
int main(int argc, char** argv)
{
Game theGame;
/* Initialize game control objects and resources */
if (theGame.onInit() != false)
{
return theGame.onExecute();
}
else
{
return -1;
}
}
and it would seem the SIGSEGV error is gone and I'm now left with something not initializing. So thank you peter you were correct now I'm off to solve this issue.
ok so this is obviously a small amount of the code but in order to save time and a bit of sanity: all the code is available at:
GitHub Repo
So after looking at your code I can say that it's probably that you have not initialized you OpenGL context before executing that code.
You need to call your Game::onInit() which also calls RenderEngine::initGraphics() before making any calls to OpenGL. Which you currently don't do. You currently do main()->Game ctor (calls rendering engine ctor but that ctor doesn't init SDL and OpenGL)->Entity ctor->load texture
For details look at the OpenGL Wiki FAQ

OpenGL adds unwanted vertex at origin when drawing triangle strips

I'm having an issue in my OpenGL application where OpenGL appears to add a vertex at the origin of some of my meshes. The issue has me befuddled because it seems to only affect meshes composed of triangle strips. The basic procedure used to create the screenshot at the end of the post is as follows:
Use simple math to generate vertices that lie on the unit sphere, as well as texture coordinates and element indices.
Load the data from step 1 into buffers bound to a VAO.
Every frame: bind the VAO and draw with glDrawElements()
Prior to posting, I checked the vertex data for (a simplified version of) my mesh at each stage in the process, and at no time is there a vertex at the origin (in model-space). However, if you look at the screenshot at the end of the post, there is obviously a vertex at the center of the mesh, and its presence is deforming my mesh and causing issues with texturing.
I'm pretty inexperienced with 3D graphics, and have never heard of an issue like this before, so I'm not sure what other useful information I can provide. If there's anything else, let me know in the comments.
(The same model of the Earth drawn as GL_POINTS, GL_LINE_LOOP, and GL_TRIANGLE_STRIP, respectively)
UPDATE: As requested, here's the code that sets up and draws my vertex arrays:
public void init(GL3 gl){
//If there is no graphics data, this object isn't going to get rendered and we should just leave
if(gData == null){
return;
}
//Prepare the index data
short[] indexData = gData.getIndexArray();
indexCount = indexData.length;
//This is required because interleaving the data
//screws with how the data is laid out in memory
for(int i = 0; i < indexCount; i++){
indexData[i] *= gData.getVertexData().size();
indexData[i] = indexData[i] < 0 ? -1 : indexData[i];
}
//Put the program together
glProgram = gl.glCreateProgram();
for(Shader shader : gData.getShaders()){
shader.compile(gl);
gl.glAttachShader(glProgram, shader.location);
}
gl.glLinkProgram(glProgram);
int[] result = new int[1];
gl.glGetProgramiv(glProgram, GL3.GL_LINK_STATUS, result, 0);
if(result[0] != GL3.GL_TRUE){
byte[] info = new byte[512];
gl.glGetProgramInfoLog(glProgram, 512, result, 0, info, 0);
Logger.log(new String(info), Logger.ERROR, "GameObject -- init -- Link Error");
}
//Interleave the per-vertex data
float[] vertexData = interleave(gData.getVertexData().toArray(new VertexData[0]));
//Generate buffers
gl.glGenVertexArrays(1, glBuffer, 0);
gl.glGenBuffers(2, glBuffer, 1);
if(checkGLErrors(gl, "After generating VAO and buffers")){
sHandler.Quit();
}
//Bind the VAO, program, and buffers. In that order.
gl.glBindVertexArray(glBuffer[0]);
if(checkGLErrors(gl, "After binding VAO")){
sHandler.Quit();
}
gl.glUseProgram(glProgram);
if(checkGLErrors(gl, "After binding program")){
sHandler.Quit();
}
gl.glBindBuffer(GL3.GL_ARRAY_BUFFER, glBuffer[1]);
gl.glBindBuffer(GL3.GL_ELEMENT_ARRAY_BUFFER, glBuffer[2]);
if(checkGLErrors(gl, "After binding buffers")){
sHandler.Quit();
}
//Send in the per-vertex data
gl.glBufferData(
GL3.GL_ARRAY_BUFFER,
vertexData.length * SIZEOF_FLOAT,
FloatBuffer.wrap(vertexData),
GL3.GL_STATIC_DRAW);
if(checkGLErrors(gl, "After loading data into the buffers")){
sHandler.Quit();
}
//Set up pointers to the vertex attributes
for(VertexData a : gData.getVertexData()){
a.location = gl.glGetAttribLocation(glProgram, a.name);
gl.glVertexAttribPointer(a.location, a.components, GL3.GL_FLOAT, false, a.stride * SIZEOF_FLOAT, a.offset * SIZEOF_FLOAT);
gl.glEnableVertexAttribArray(a.location);
if(checkGLErrors(gl, "After setting pointers to the vertex attributes")){
sHandler.Quit();
}
}
//Set up pointers to the uniform variables
for(UniformData uniform : gData.getUniformData()){
uniform.location = gl.glGetUniformLocation(glProgram, uniform.name);
if(checkGLErrors(gl, "After setting pointers to the uniform variables")){
sHandler.Quit();
}
}
//Send in index data
gl.glBufferData(
GL3.GL_ELEMENT_ARRAY_BUFFER,
indexData.length * SIZEOF_SHORT,
ShortBuffer.wrap(indexData),
GL3.GL_STATIC_DRAW);
if(checkGLErrors(gl, "After loading in index data")){
sHandler.Quit();
}
//Load textures
for(int i = 0; i < glTextures.length; i++){
glTextures[i] = gData.getTextureData().get(i).loadTexture(gl);
if(checkGLErrors(gl, "After loading texture "+i)){
sHandler.Quit();
}
}
//Bind only the first texture....
if(glTextures.length >= 1){
gl.glBindTexture(GL3.GL_TEXTURE_2D, glTextures[0]);
if(checkGLErrors(gl, "After binding textures")){
sHandler.Quit();
}
}
//-----BEGIN OPENGL STATE SETTINGS-----
//Depth Testing
gl.glEnable(GL3.GL_DEPTH_TEST);
gl.glDepthFunc(GL3.GL_LEQUAL);
//Primitive Restart
gl.glEnable(GL3.GL_PRIMITIVE_RESTART);
gl.glPrimitiveRestartIndex(-1);
//Enlargen points
gl.glPointSize(5.0f);
//-----END OPENGL STATE SETTINGS-----
//...And we're done here.
gl.glBindVertexArray(0);
if(checkGLErrors(gl, "After completing Object Initialization")){
sHandler.Quit();
}
}
public void draw(GL3 gl){
//Check to see if this object actually needs to be drawn
if(gData == null){
return;
}
//Bind the vertex array
gl.glBindVertexArray(glBuffer[0]);
gl.glUseProgram(glProgram);
checkGLErrors(gl, "After binding vertex array");
//Load the uniform data
for(UniformData uniform: gData.getUniformData()){
uniform.loadData(gl);
}
checkGLErrors(gl, "After loading uniform data");
//Draw
gl.glDrawElements(gData.getPrimitive(), indexCount, GL3.GL_UNSIGNED_SHORT, 0);
//Unbind
gl.glBindVertexArray(0);
checkGLErrors(gl, "After finishing draw");
}
gl.glPrimitiveRestartIndex(-1);
That doesn't look right, as the parameter to glPrimitiveRestartIndex is unsigned. The result is that the restart index is set to 4294967295.
gl.glDrawElements(gData.getPrimitive(), indexCount, GL3.GL_UNSIGNED_SHORT, 0);
And here, the indices are unsigned short. (GLuint)-1 != (GLushort)-1, so the "restart" points in your VBO aren't causing restart, they're causing index 65535 to be used, which being outside the range of the data, is treated as (0,0,0).