Heap has been corrupted when using std::unique_ptr - c++

I've been working on a Quake 3 BSP Loader in OpenGL and C++.
And I've ran into a problem, when I run my code. I get a problem, it says "heap has been corrupted!" in debug mode. I have commented the line it corrupts on. My comment is 'gives me the error at this line "Heap has been corrupted"'
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <iostream>
#include <glm/glm.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <glm/vec3.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <vector>
#include "map.h"
#include <fstream>
#include <memory>
#include "game_manager.h"
#include <thread>
bool KikoBSP::load_map(std::string file_name)
{
this->file.open(file_name.c_str(), std::ios::in | std::ios::binary);
if (this->file.is_open())
{
this->file.read(reinterpret_cast<char*>(&this->header), sizeof(this->header));
std::unique_ptr<BSPEntities> ents(new BSPEntities);
ents->ents_array = new char[this->header.lumps[BSPLUMPS::ENTITIES].length];
this->num_textures = this->header.lumps[BSPLUMPS::TEXTURES].length / sizeof(BSPTexture);
this->num_planes = this->header.lumps[BSPLUMPS::PLANES].length / sizeof(BSPPlane);
this->num_textures = this->header.lumps[BSPLUMPS::TEXTURES].length / sizeof(BSPTexture);
this->num_nodes = this->header.lumps[BSPLUMPS::NODES].length / sizeof(BSPNode);
this->num_leafs = this->header.lumps[BSPLUMPS::LEAFS].length / sizeof(BSPLeaf);
this->num_leaf_faces = this->header.lumps[BSPLUMPS::LEAF_FACES].length / sizeof(BSPLeafFace);
this->num_leaf_brushes = this->header.lumps[BSPLUMPS::LEAF_BRUSHES].length / sizeof(BSPLeafBrush);
this->num_models = this->header.lumps[BSPLUMPS::MODELS].length / sizeof(BSPModel);
this->num_brushes = this->header.lumps[BSPLUMPS::BRUSHES].length / sizeof(BSPBrush);
this->num_brush_sides = this->header.lumps[BSPLUMPS::BRUSHSIDES].length / sizeof(BSPBrushSides);
this->num_vertexes = this->header.lumps[BSPLUMPS::VERTEXES].length / sizeof(BSPVerts);
this->num_meshverts = this->header.lumps[BSPLUMPS::MESHVERTS].length / sizeof(BSPMeshVerts);
this->num_effects = this->header.lumps[BSPLUMPS::EFFECTS].length / sizeof(BSPEffects);
this->num_faces = this->header.lumps[BSPLUMPS::FACES].length / sizeof(BSPFaces);
std::unique_ptr<BSPTexture[]> textures(new BSPTexture[this->num_textures]);
std::unique_ptr<BSPPlane[]> planes(new BSPPlane[this->num_planes]);
std::unique_ptr<BSPNode[]> nodes(new BSPNode[this->num_nodes]);
std::unique_ptr<BSPLeaf[]> leafs(new BSPLeaf[this->num_leafs]);
std::unique_ptr<BSPLeafFace[]> leaf_faces(new BSPLeafFace[this->num_leaf_faces]);
std::unique_ptr<BSPLeafBrush[]> leaf_brushes(new BSPLeafBrush[this->num_leaf_brushes]);
std::unique_ptr<BSPModel[]> models(new BSPModel[this->num_models]);
std::unique_ptr<BSPBrush[]> brushes(new BSPBrush[this->num_brushes]);
std::unique_ptr<BSPBrushSides[]> brush_sides(new BSPBrushSides[this->num_brush_sides]);
std::unique_ptr<BSPVerts[]> vertexes(new BSPVerts[this->num_vertexes]);
std::unique_ptr<BSPMeshVerts[]> mesh_verts(new BSPMeshVerts[this->num_mesh_verts]);
std::unique_ptr<BSPEffects[]> effects(new BSPEffects[this->num_effects]);
std::unique_ptr<BSPFaces[]> faces(new BSPFaces[this->num_faces]);
this->file.seekg(this->header.lumps[BSPLUMPS::ENTITIES].offset);
this->file.read(reinterpret_cast<char*>(ents->ents_array), this->header.lumps[BSPLUMPS::ENTITIES].length);
this->file.seekg(this->header.lumps[BSPLUMPS::TEXTURES].offset);
this->file.read(reinterpret_cast<char*>(textures.get()), this->header.lumps[BSPLUMPS::TEXTURES].length);
this->file.seekg(this->header.lumps[BSPLUMPS::PLANES].offset);
this->file.read(reinterpret_cast<char*>(planes.get()), this->header.lumps[BSPLUMPS::PLANES].length);
this->file.seekg(this->header.lumps[BSPLUMPS::NODES].offset);
this->file.read(reinterpret_cast<char*>(nodes.get()), this->header.lumps[BSPLUMPS::NODES].length);
this->file.seekg(this->header.lumps[BSPLUMPS::LEAFS].offset);
this->file.read(reinterpret_cast<char*>(leafs.get()), this->header.lumps[BSPLUMPS::LEAFS].length);
this->file.seekg(this->header.lumps[BSPLUMPS::LEAF_FACES].offset);
this->file.read(reinterpret_cast<char*>(leaf_faces.get()), this->header.lumps[BSPLUMPS::LEAF_FACES].length);
this->file.seekg(this->header.lumps[BSPLUMPS::LEAF_BRUSHES].offset);
this->file.read(reinterpret_cast<char*>(leaf_brushes.get()), this->header.lumps[BSPLUMPS::LEAF_BRUSHES].length);
this->file.seekg(this->header.lumps[BSPLUMPS::MODELS].offset);
this->file.read(reinterpret_cast<char*>(models.get()), this->header.lumps[BSPLUMPS::MODELS].length);
this->file.seekg(this->header.lumps[BSPLUMPS::BRUSHES].offset);
this->file.read(reinterpret_cast<char*>(brushes.get()), this->header.lumps[BSPLUMPS::BRUSHES].length);
this->file.seekg(this->header.lumps[BSPLUMPS::BRUSHSIDES].offset);
this->file.read(reinterpret_cast<char*>(brush_sides.get()), this->header.lumps[BSPLUMPS::BRUSHSIDES].length);
this->file.seekg(this->header.lumps[BSPLUMPS::VERTEXES].offset);
this->file.read(reinterpret_cast<char*>(vertexes.get()), this->header.lumps[BSPLUMPS::VERTEXES].length);
this->file.seekg(this->header.lumps[BSPLUMPS::MESHVERTS].offset);
this->file.read(reinterpret_cast<char*>(mesh_verts.get()), this->header.lumps[BSPLUMPS::MESHVERTS].length);
this->file.seekg(this->header.lumps[BSPLUMPS::EFFECTS].offset);
this->file.read(reinterpret_cast<char*>(effects.get()), this->header.lumps[BSPLUMPS::EFFECTS].length);
this->file.seekg(this->header.lumps[BSPLUMPS::FACES].offset);
this->file.read(reinterpret_cast<char*>(faces.get()), this->header.lumps[BSPLUMPS::FACES].length);
std::printf("BSP VERSION: '%s'\n", this->header.magic);
if (std::strncmp(this->header.magic, "IBSP", 4) == 0)
{
std::printf("SUCCESS: VALID BSP FORMAT!\n");
}
else
{
std::printf("ERROR: INVALID BSP FORMAT!\n");
return false;
}
std::printf("this->num_of_verts == %i\n", this->num_vertexes);
for (int32_t x = 0; x <= this->num_vertexes; x++)
{
this->vertices.push_back(vertexes.get()[x].position.x);
this->vertices.push_back(vertexes.get()[x].position.y); /* gives me the error at this line "Heap has been corrupted" */
this->vertices.push_back(vertexes.get()[x].position.z);
this->colors.push_back((float)x); /* doesnt follow my code style (using C-style cast), sorry!! I copied this from my old project ;) */
}
std::printf("this->vertices.size() == %i\n", this->vertices.size());
this->shader.load_shader("bsp.vs", "bsp.fs");
glGenVertexArrays(1, &this->vao);
glBindVertexArray(this->vao);
glGenBuffers(1, &this->vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, this->vertices.size() * sizeof(float), &this->vertices.front(), GL_STATIC_DRAW);
glGenBuffers(1, &this->color_vbo);
glBindBuffer(GL_ARRAY_BUFFER, this->color_vbo);
glBufferData(GL_ARRAY_BUFFER, this->colors.size() * sizeof(float), &this->colors.front(), GL_STATIC_DRAW);
this->coord3d = glGetAttribLocation(this->shader.program, "coord3d");
this->mvp = glGetUniformLocation(this->shader.program, "mvp");
this->attrib_color = glGetAttribLocation(this->shader.program, "v_color");
glBindBuffer(GL_ARRAY_BUFFER, this->vbo);
glVertexAttribPointer(this->coord3d, // attribute
3, // number of elements per vertex, here (R,G,B)
GL_FLOAT, // the currentBlock of each element
GL_FALSE, // take our values as-is
0, // no extra data between each position
nullptr // offset of first element
);
glBindBuffer(GL_ARRAY_BUFFER, this->color_vbo);
glVertexAttribPointer(this->attrib_color,
3,
GL_FLOAT,
GL_FALSE,
0,
nullptr
);
glBindVertexArray(0);
glVertexAttrib3fv(this->attrib_color, this->colors.data());
std::printf("size of vector = %i\n", this->vertices.size());
return true;
}
else
{
std::printf("ERROR: COULDN'T OPEN FILE!\n");
return false;
}
return false;
}
void KikoBSP::render(glm::vec3 position)
{
glBindVertexArray(this->vao);
glEnableVertexAttribArray(this->coord3d);
glEnableVertexAttribArray(this->attrib_color);
glm::mat4 model = glm::translate(glm::mat4(1.0f), glm::vec3(position.x, position.y, position.z));
glm::mat4 mvp = game_manager->projection * game_manager->view * model;
glUniformMatrix4fv(this->mvp, 1, GL_FALSE, glm::value_ptr(mvp));
glDrawArrays(GL_LINES, 0, this->vertices.size());
glDisableVertexAttribArray(this->coord3d);
glDisableVertexAttribArray(this->attrib_color);
glBindVertexArray(0);
}
void KikoBSP::cleanup_map()
{
/* OUTDATED FUNCTION BACK WHEN I WAS MANUALLY MANAGING MEMORY */
}
However, the error goes away, when I take off these lines:
this->file.seekg(this->header.lumps[BSPLUMPS::EFFECTS].offset);
this->file.read(reinterpret_cast<char*>(effects.get()), this->header.lumps[BSPLUMPS::EFFECTS].length);
this->file.seekg(this->header.lumps[BSPLUMPS::FACES].offset);
this->file.read(reinterpret_cast<char*>(faces.get()), this->header.lumps[BSPLUMPS::FACES].length);
Which leads me to believe the Heap is overflowing with all the allocated memory.
I also believe this, because when I was manually managing memory. I ran into the same problem as well. So I switched to unique_ptrs, and I'm still getting the same problem! :(
Anyone here have any ideas? Thanks! :)

You've got incorrect loop condition:
for (int32_t x = 0; x <= this->num_vertexes; x++)
In its last iteration x == num_vertexes, which means you are trying to read the value beyond the array boundary. Check with the debugger - you'll see that x takes this value when the heap corruption happens. Also, I'm pretty sure it's not the line you've marked causing heap corruption, but the line before - many debuggers show the next line to be executed, not the one being executed.
BTW I'm not familiar with the classes you use and thus can't say for sure, but you're most likely abusing the use of std::unique_ptr. Just use std::vector instead of unique pointer to dynamic array - it's way simplier, easier to use and should work exactly the way you expect.

Related

Vertex Buffer Abstraction in OpenGl [duplicate]

This question already has answers here:
OpenGL object in C++ RAII class no longer works
(2 answers)
Closed 1 year ago.
I have been trying to write some classes to abstract OpenGl. So far I have written a VertexArray class that uses template functions which are working fine so far. However I have encountered problems with my VertexBuffer class that I have not been capable of fixing.
C++ and OpenGl are still fairly new to me so the solution might be pretty simple.
Whenever I try to replace the Vertex Buffer creation code with the constructor of the VertexBuffer class things go wrong and nothing is displayed on the screen even though the code of the constructor and the one directly written in the Mesh class are (I believe) the same.
Here is the code:
VertexBuffer.h:
#ifndef VERTEX_BUFFER_H
#define VERTEX_BUFFER_H
class VertexBuffer {
private:
public:
unsigned int ID;
VertexBuffer(const void* data, unsigned int size);
VertexBuffer();
~VertexBuffer();
void bind();
void unbind();
};
#endif
VertexBuffer.cpp:
#include "VertexBuffer.h"
#define GLEW_STATIC
#include <GL/glew.h>
#include <iostream>
VertexBuffer::VertexBuffer(const void* data, unsigned int size) {
glGenBuffers(1, &ID);
glBindBuffer(GL_ARRAY_BUFFER, ID);
glBufferData(GL_ARRAY_BUFFER, size, data, GL_STATIC_DRAW);
std::cout << "ID = " << ID << std::endl;
std::cout << "data = " << data << std::endl;
std::cout << "size = " << size << std::endl << std::endl;
}
VertexBuffer::VertexBuffer() {}
VertexBuffer::~VertexBuffer() {
glDeleteBuffers(1, &ID);
}
void VertexBuffer::bind() {
glBindBuffer(GL_ARRAY_BUFFER, ID);
}
void VertexBuffer::unbind() {
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
The Mesh constructor where I have been trying to implement it via the class:
Mesh::Mesh(std::vector<Vertex> vertices) {
model = glm::mat4(1.0f);
indicesSize = vertices.size();
// generate vertex array object
va = VertexArray();
va.bind();
//============================================================
//This code works:
/*
glGenBuffers(1, &vb.ID);
glBindBuffer(GL_ARRAY_BUFFER, vb.ID);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex) * vertices.size(), &vertices[0], GL_STATIC_DRAW);
*/
//------------------------------------------------------------
//This code does not work:
vb = VertexBuffer(&vertices[0], sizeof(Vertex) * vertices.size());
//============================================================
// points
va.push<float>(3, 9, false);
// colors
va.push<float>(3, 9, false);
// normals
va.push<float>(3, 9, false);
};
I would be happy if anyone can help me. Thanks in advance!
You have to store vb in some kind of smart pointer or add move semantics to VertexBuffer.
vb = VertexBuffer(&vertices[0], sizeof(Vertex) * vertices.size());
^^^ [1]
in [1] temporary VertexBuffer is created, then by operator =, the member ID is copied in shallow way into vb and finally at the end of expression temporary VertexBuffer is destroyed with deleting generated ID (by destructor of temporary instance), which is still stored in vb, but it is not valid anymore.
Solution with move semantics could be like:
class VertexBuffer {
std::unique_ptr<unsigned int> ID;
public:
VertexBuffer& operator=(VertexBuffer&& other) {
ID = std::move(other.ID);
return *this;
}
~VertexBuffer() {
if (ID)
glDeleteBuffers(1, ID.get() );
}
};

Update SSBO in Compute shader

I am currently trying to update a SSBO linked/bound to a Computeshader. Doing it this way, I only write the first 32byte into the out_picture, because i only memcpy that many (sizeof(pstruct)).
Computeshader:
#version 440 core
struct Pstruct{
float picture[1920*1080*3];
float factor;
};
layout(std430, binding = 0) buffer Result{
float out_picture[];
};
layout(std430, binding = 1) buffer In_p1{
Pstruct in_p1;
};
layout(local_size_x = 1000) in;
void main() {
out_picture[gl_GlobalInvocationID.x] = out_picture[gl_GlobalInvocationID.x] +
in_p1.picture[gl_GlobalInvocationID.x] * in_p1.factor;
}
GLSL:
struct Pstruct{
std::vector<float> picture;
float factor;
};
Pstruct tmp;
tmp.factor = 1.0f;
for(int i = 0; i < getNUM_PIX(); i++){
tmp.picture.push_back(5.0f);
}
SSBO ssbo;
glGenBuffers(1, &ssbo.handle);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, ssbo.handle);
glBufferData(GL_SHADER_STORAGE_BUFFER, (getNUM_PIX() + 1) * sizeof(float), NULL, GL_DYNAMIC_DRAW);
...
glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo.handle);
Pstruct* ptr = (Pstruct *) glMapBuffer(GL_SHADER_STORAGE_BUFFER, GL_WRITE_ONLY);
memcpy(ptr, &pstruct, sizeof(pstruct));
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
...
glUseProgram(program);
glDispatchCompute(getNUM_PIX() / getWORK_GROUP_SIZE(), 1, 1);
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
How can I copy both my picture array and my float factor at the same time?
Do I have to split the memcpy call into array and float? and if yes how? I can copy the first part, but I am not allowed to add an offset to the ptr.
First of all,
float picture[1920*1080*3];
clearly should be either a texture (you're only reading from it anyway) or at least an image.
Second:
struct Pstruct{
std::vector<float> picture;
float factor;
};
This definition does not match the definition in your shader in any way. The std::vector object will just be a meta object internally managing the data storage used by the vector. memcpy that to a GL buffer and passing that to the GPU does not make sense at all.
The correct approach would be to either copy the contents of that vector separately into the appropriate places inside the buffer, or to just us a struct definition on your client side which actually matches the one you're using in the shader (and taking all the rules of std430 into account). But, as my first point already was, the correct solution here is most likely to use a texture or image object instead.

Why is std::vector so much slower then array pointer [closed]

Closed. This question is not reproducible or was caused by typos. It is not currently accepting answers.
This question was caused by a typo or a problem that can no longer be reproduced. While similar questions may be on-topic here, this one was resolved in a way less likely to help future readers.
Closed 4 years ago.
Improve this question
Im creating a Opengl font batch draw.
I want to know why my std::vector is so much slower then using a array pointer.
I have tried adding vector reserve and everything i can think of.
My FPS drops just over half when using vectors.
Vectors seem easier to manage and look nicer in code, but im really stuck on this.
Not sure if its something simple that i have missed?
Or is it better to just use array pointers?
#ifdef USE_VECTORS
std::vector<Vertex> m_vertices;
#else
Vertex *m_pVertex;
Vertex m_vertices[MAX_VERTICES];
#endif
drawing char function
void GLFont::drawChar(char c, int x, int y)
{
// 1------4
// | | 1 = (x, y)
// | | 2 = (x, y + charHeight)
// | | 3 = (x + charWidth, y + charHeight)
// | | 4 = (x + charWidth, y)
// | |
// | |
// 2------3
//
const Glyph &glyph = getChar(c);
int charWidth = glyph.width;
int charHeight = m_charHeight;
#ifdef USE_VECTORS
Vertex vert[] = {
x, y,
glyph.upperLeft[0], glyph.upperLeft[1],
m_color[0], m_color[1], m_color[2], m_color[3],
x, y + charHeight,
glyph.lowerLeft[0], glyph.lowerLeft[1],
m_color[0], m_color[1], m_color[2], m_color[3],
x + charWidth, y + charHeight,
glyph.lowerRight[0], glyph.lowerRight[1],
m_color[0], m_color[1], m_color[2], m_color[3],
x + charWidth, y,
glyph.upperRight[0], glyph.upperRight[1],
m_color[0], m_color[1], m_color[2], m_color[3]
};
//unsigned dataArraySize = sizeof(vert) / sizeof(Vertex);
m_vertices.insert(m_vertices.end(), &vert[0], &vert[4]);
++m_numCharsToDraw;
#else
//1
m_pVertex->x = x;
m_pVertex->y = y;
m_pVertex->s = glyph.upperLeft[0];
m_pVertex->t = glyph.upperLeft[1];
m_pVertex->r = m_color[0];
m_pVertex->g = m_color[1];
m_pVertex->b = m_color[2];
m_pVertex->a = m_color[3];
++m_pVertex;
// 2
m_pVertex->x = x;
m_pVertex->y = y + charHeight;
m_pVertex->s = glyph.lowerLeft[0];
m_pVertex->t = glyph.lowerLeft[1];
m_pVertex->r = m_color[0];
m_pVertex->g = m_color[1];
m_pVertex->b = m_color[2];
m_pVertex->a = m_color[3];
++m_pVertex;
// 3
m_pVertex->x = x + charWidth;
m_pVertex->y = y + charHeight;
m_pVertex->s = glyph.lowerRight[0];
m_pVertex->t = glyph.lowerRight[1];
m_pVertex->r = m_color[0];
m_pVertex->g = m_color[1];
m_pVertex->b = m_color[2];
m_pVertex->a = m_color[3];
++m_pVertex;
// 4
m_pVertex->x = x + charWidth;
m_pVertex->y = y;
m_pVertex->s = glyph.upperRight[0];
m_pVertex->t = glyph.upperRight[1];
m_pVertex->r = m_color[0];
m_pVertex->g = m_color[1];
m_pVertex->b = m_color[2];
m_pVertex->a = m_color[3];
++m_pVertex;
if (++m_numCharsToDraw == MAX_CHARS_PER_BATCH)
{
drawTextEnd();
drawBatchOfChars();
drawTextBegin();
}
#endif
}
void GLFont::drawBatchOfChars()
{
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
#ifdef USE_VECTORS
glVertexPointer(2, GL_INT, sizeof(Vertex), &m_vertices[0].x);
glTexCoordPointer(2, GL_FLOAT, sizeof(Vertex), &m_vertices[0].s);
glColorPointer(4, GL_FLOAT, sizeof(Vertex), &m_vertices[0].r);
#else
glVertexPointer(2, GL_INT, sizeof(Vertex), &m_vertices->x);
glTexCoordPointer(2, GL_FLOAT, sizeof(Vertex), &m_vertices->s);
glColorPointer(4, GL_FLOAT, sizeof(Vertex), &m_vertices->r);
#endif
glDrawArrays(GL_QUADS, 0, m_numCharsToDraw * 4);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
}
You are comparing apples to oranges:
Containers contain pointers themselves, reducing locality of data.
Containers need to reallocate storage when changing size.
Microsoft's Visual Studio enables additional diagnostics in their standardlibrary containers. For example, accessing via vec[123] causes undefined behaviour per standard if the index is out of range. This allows implementing this as simple indexing via the pointer to the array's payload. With additional diagnostics, the index is validated, which is just a small comparison and a branch, but in tight loops it makes a difference.
That said, your approach to prove something is flawed. You'd first have to implement equivalent code that is reduced as far as possible (in the spirit of an MCVE). Hooking an OpenGL backend onto it isn't going to make things reproducible.
The main reason for you to see a large performance difference is Debug mode.
In Debug mode MSVC by default inlines nothing, does not store any variable in registers and always allocates and validates a stack frame, no name a few.
The extra abstractions when using a vector in Debug mode directly translate to more work for the CPU.

Multiple quads in one vbo

I am working on a minecraft-ish game, and I've been working a little more with vbos. However; when drawing multiple faces in a single vbo I seem to have a little bit of a issue.
Here is my vbo-generation code:
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, verts);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, verts * 9 * sizeof(GLfloat), NULL, GL_STATIC_DRAW);
void* ptr = glMapBuffer(GL_ARRAY_BUFFER, GL_READ_WRITE);
GLfloat*model = (GLfloat*)ptr;
GLfloat*tex = ((GLfloat*)ptr) + verts * 6;
GLfloat*color = ((GLfloat*)ptr) + verts * 3;
int p = 0;
int k = p * 3;
for (int mcy = 0; mcy < 5; mcy++) {
for (int mcx = 0; mcx < 5; mcx++) {
double addonX = mcx*32.0;
double addonY = mcy*32.0;
int addonx = mcx * 32;
int addony = mcy * 32;
if (!(hill.get(addonX, addonY)*400.0 > 100 && hill.get(32 + addonX, addonY)*400.0 > 100 && hill.get(addonX, 32 + addonY)*400.0 > 100 && hill.get(32 + addonX, 32 + addonY)*400.0 > 100)) {
draw = true;
int biome1 = BiomeToColor(GetBiome(x, y, addonX, addonY), hill.get(addonX, addonY)*400.0);
int biome2 = BiomeToColor(GetBiome(x, y, 32 + addonX, addonY), hill.get(32 + addonX, addonY)*400.0);
int biome3 = BiomeToColor(GetBiome(x, y, addonX, 32 + addonY), hill.get(addonX, 32 + addonY)*400.0);
int biome4 = BiomeToColor(GetBiome(x, y, 32 + addonX, 32 + addonY), hill.get(32 + addonY, 32 + addonY)*400.0);
model[k] = addonx+ 32;
model[k + 1] = addony;
model[k + 2] = hill.get(addonX + 32, addonY)*400.0;
color[k] = BiomeColors[biome2].r;
color[k + 1] = BiomeColors[biome2].g;
color[k + 2] = BiomeColors[biome2].b;
p++;
k = p * 3;
model[k] = addonx + 32;
model[k + 1] = addony + 32;
model[k + 2] = hill.get(addonX + 32, addonY + 32)*400.0;
color[k] = BiomeColors[biome4].r;
color[k + 1] = BiomeColors[biome4].g;
color[k + 2] = BiomeColors[biome4].b;
p++;
k = p * 3;
model[k] = addonx;
model[k + 1] = addony + 32;
model[k + 2] = hill.get(addonX, addonY + 32)*400.0;
color[k] = BiomeColors[biome3].r;
color[k + 1] = BiomeColors[biome3].g;
color[k + 2] = BiomeColors[biome3].b;
p++;
k = p * 3;
model[k] = addony;
model[k + 1] = addony;
model[k + 2] = hill.get(addonX, addonY)*400.0;
color[k] = BiomeColors[biome1].r;
color[k + 1] = BiomeColors[biome1].g;
color[k + 2] = BiomeColors[biome1].b;
p++;
k = p * 3;
}
}
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glBindBuffer(GL_ARRAY_BUFFER, 0);
And here's the code I use to draw the vbo:
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glVertexPointer(3, GL_FLOAT, 0, 0);
glTexCoordPointer(3, GL_FLOAT, 0, (char*)NULL + verts * 6 * sizeof(GLfloat));
glColorPointer(3, GL_FLOAT, 0, (char*)NULL + verts * 3 * sizeof(GLfloat));
glDrawArrays(GL_QUADS, 0, VBO);
glBindBuffer(GL_ARRAY_BUFFER, 0);
Here's the result I want (using a single quad in every vbo):
unfortunatly I'm still new so you have to click this link :/
And here is the result I get with multiple quads in every vbo:
image
So why do I want to draw multiple quads in a single vbo?
One word: performance, if you compare the two images the thing that really pops out (well, except for the bug with the second image) is the framerate counter. I want to make this game into a big thing, so every fps matters to me.
EDIT:
Omg, I'm so stupid:
model[k] = addony;
A very simple mistake, but so devistating.
Just proves how so small things can brake the game.
It all workes now.
glDrawArrays(GL_QUADS, 0, VBO);
There are a few problems with this call:
the third parameter of glDrawArrays is the count of the things you are drawing so what you are actually saying is:
Draw Quads from my Buffer at 0 until VBO and then stop.
What you should be saying is:
Draw Quads from my Buffer at 0 until Buffer Length and then stop
so now it looks like this:
glDrawArrays(GL_QUADS, 0, verts);
'VBO' in your code is the ID of the Buffer that you want to use.
think about it like a pointer who's number you know or rather a user with an ID.
GL_QUADS is not good use GL_TRIANGLES there are many problems with GL_QUADS later especialy on mobile phones and on other platforms making your data in triangles is much much nicer.
You shouldn't be drawing in GL_QUADS for multiple reasons
Why are you not using VAO's? Are you using an older version of OpenGL that doesn't have VAO's? Otherwise I would suggest using VAO here instead of VBO so you dont need to bind pointers for each draw call.
glBindBuffer(GL_ARRAY_BUFFER, verts);
What you are trying to here is bind a VBO of id: 'verts' to be our current VBO.
'So why do I want to draw multiple quads in a single vbo? One word: performance'
Have you tried to draw multiple quads using instancing?
So sending a model matrix for each of the shapes so that you modify their positions and shapes in the shader and not in the buffer. This way you can draw one vbo over and over again just slightly transformed with a single draw call.
Here is a good tutorial on instancing:
http://learnopengl.com/#!Advanced-OpenGL/Instancing
Just out of curiosity but why did you decide to use:
glMapBuffer(GL_ARRAY_BUFFER, GL_READ_WRITE);
instead of buffering your data in the glBufferData call?
If you need to buffer the data later you can use glBufferSubData
Honestly though I think your performance problems stem from a range of factors.
I would personally use glBufferData instead of map data and when I need to do it during run time and not during loading I would use glBufferSubData.
I would upload the colors to the shader and draw multiples of the SAME VBO again and again with a different model matrix and colors allowing me to instance it.
However you shouldn't need to do that.
What I would recommend is making up the data in triangles and colors and drawing the whole ground as a mesh which you have seemed to tried to do. Your problem was most likely caused by glDrawArrays length being set to that of a VBO.
However in this case I would build a VBO using glBufferData with the size of a chunk then I would use glBufferSubData for each of the quads with colors etc. and once I am done I would draw that multiple times alongside different chunks.
I think it would be of use to you to do more theory of OpenGL.

QGLBuffer and VBO

I have a problem with QGLBuffer. I'm trying to implement a dynamic VBO with QT + Opengl.
In the .h file
struct CVert {
float x;
float y;
};
...
typedef struct CVert CVert;
CVert* m_data;
QGLBuffer* m_bufferData;
int m_size;
in the .cpp
Constructor.
m_size = numberOfVertex;
m_bufferData = new QGLBuffer(QGLBuffer::VertexBuffer);
m_bufferData->create();
m_bufferData->bind();
m_bufferData->setUsagePattern(QGLBuffer::DynamicDraw);
m_bufferData->allocate(2*sizeof(float)* p_size);
m_data = (CVert*)m_bufferData->map (QGLBuffer::ReadWrite);
In the execution of the program I change some m_data values
m_data[pos].x = X1
m_data[pos].y = y1
In the draw method.
glEnableClientState(GL_VERTEX_ARRAY);
if (m_bufferData->bind ()) {
glVertexPointer( 2, GL_FLOAT, 0, (char *) NULL );;
glDrawArrays( GL_LINES, 0,m_size );
glDisableClientState(GL_VERTEX_ARRAY);
}
But nothig it's being drawn.
I've checked that m_data is not null, and m_bufferData->bind() returns true.
What am I doing wrong?
I think i've solved. Every time i've to edit the VBO.
I have to
m_data = (CVert*)data->map (QGLBuffer::ReadWrite);
m_data[pos].x = X1;
m_data[pos].y = y1
data->unmap ();
it's doesn't work if I map only once in the constructor