I am having an issue with openGL when drawing my sketched line to the screen in that it seems to be adding an extra point at the origin which is not in the point list:
(0,0) is definitely not in the points that are in the array to be drawn, just cant seem to reason it out. I would guess that maybe my array size is too big or something but I can't see it
Here is my code for populating the array and the draw call
void PointArray::repopulateObjectBuffer()
{
//Rebind Appropriate VAO
glBindVertexArray( vertex_array_object_id );
//Recalculate Array Size
vertexArraySize = points.size()*sizeof(glm::vec2);
//Rebind Appropriate VBO
glBindBuffer( GL_ARRAY_BUFFER, buffer_object_id );
glBufferData( GL_ARRAY_BUFFER, vertexArraySize, NULL, GL_STATIC_DRAW );
glBufferSubData( GL_ARRAY_BUFFER, 0, vertexArraySize, (const GLvoid*)(&points[0]) );
//Set up Vertex Arrays
glEnableVertexAttribArray( 0 ); //SimpleShader attrib at position 0 = "vPosition"
glVertexAttribPointer( (GLuint)0, 2, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0) );
//Unbind
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
void PointArray::draw() {
glBindVertexArray(vertex_array_object_id);
glLineWidth(4);
glDrawArrays( GL_LINE_STRIP, 0, vertexArraySize );
glBindVertexArray(0);
}
and here is where I am adding points in the mouse callback
void mouseMovement(int x, int y)
{
mouseDX = x - lastX ;
mouseDY = y - lastY ;
lastX = x;
lastY = y;
if(mouse_drag)
{
cam->RotateByMouse(glm::vec2(mouseDX*mouseSens, mouseDY * mouseSens));
}
if(sketching)
{
sketchLine.addPoint(glm::vec2((float)x,(float)y));
sketchLine.repopulateObjectBuffer();
updatePickray(x,y);
}
}
Finally my simple ortho vertex shader
in vec2 vPosition;
void main(){
const float right = 800.0;
const float bottom = 600.0;
const float left = 0.0;
const float top = 0.0;
const float far = 1.0;
const float near = -1.0;
mat4 orthoMat = mat4(
vec4(2.0 / (right - left), 0, 0, 0),
vec4(0, 2.0 / (top - bottom), 0, 0),
vec4(0, 0, -2.0 / (far - near), 0),
vec4(-(right + left) / (right - left), -(top + bottom) / (top - bottom), -(far + near) / (far - near), 1)
);
gl_Position = orthoMat * vec4(vPosition, 0.0, 1.0);
}
You seem to be using vertexArraySize as the parameter to glDrawArrays, which is not correct. The count of glDrawArrays should be the number of vertices, not the number of bytes of the vertex array.
In your case I guess it would be glDrawArrays( GL_LINE_STRIP, 0, points.size());.
Related
I have done class which render 2d objects based on Dear ImGui DrawList, because it can draw many different variants of objects thanks index vector dynamic array and still stay well optimized. Dear ImGui can render 30k unfilled rects while having ~36fps and ~70MB on debug mode, without antialiasing (my computer). Mine very limited version draws 30k unfilled rects while having ~3 fps and ~130MB on debug mode.
class Renderer
{
public:
Renderer();
~Renderer();
void Create();
void DrawRect(float x, float y, float w, float h, GLuint color, float thickness);
void Render(float w, float h);
void Clear();
void ReserveData(int numVertices, int numElements);
void CreatePolygon(const Vector2* vertices, const GLuint verticesCount, GLuint color, float thickness);
GLuint vao, vbo, ebo;
GLShader shader;
Vertex* mappedVertex = nullptr;
GLuint* mappedElement = nullptr,
currentVertexIndex = 0;
std::vector<Vertex> vertexBuffer;
std::vector<GLuint> elementBuffer;
std::vector<Vector2> vertices;
};
const char* vtx =
R"(
#version 460 core
layout(location = 0) in vec3 a_position;
layout(location = 1) in vec4 a_color;
out vec3 v_position;
out vec4 v_color;
uniform mat4 projection;
void main()
{
gl_Position = projection * vec4(a_position, 1.0);
v_color = a_color;
}
)";
const char* frag =
R"(
#version 460 core
layout (location = 0) out vec4 outColor;
in vec4 v_color;
void main()
{
outColor = v_color;
}
)";
void Renderer::Clear()
{
vertexBuffer.resize(0);
elementBuffer.resize(0);
vertices.resize(0);
mappedVertex = nullptr;
mappedElement = nullptr;
currentVertexIndex = 0;
}
void Renderer::Create()
{
glGenBuffers(1, &vbo);
glGenBuffers(1, &ebo);
shader.VtxFromFile(vtx);
shader.FragFromFile(frag);
}
void Renderer::DrawRect(float x, float y, float w, float h, GLuint color, float thickness)
{
// Add vertices
vertices.push_back({ x, y });
vertices.push_back(Vector2(x, y + w));
vertices.push_back(Vector2( x, y ) + Vector2(w, h));
vertices.push_back(Vector2(x + w, y));
// Create rect
CreatePolygon(vertices.data(), vertices.size(), color, thickness);
}
void Renderer::Render(float w, float h)
{
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
shader.UseProgram();
shader.UniformMatrix4fv("projection", glm::ortho(0.0f, w, 0.0f, h));
GLuint elemCount = elementBuffer.size();
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const void*)offsetof(Vertex, position));
glVertexAttribPointer(1, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(Vertex), (const void*)offsetof(Vertex, position));
glBufferData(GL_ARRAY_BUFFER, vertexBuffer.size() * sizeof(Vertex), vertexBuffer.data(), GL_STREAM_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, elementBuffer.size() * sizeof(GLuint), elementBuffer.data(), GL_STREAM_DRAW);
const unsigned short* idxBufferOffset = 0;
glDrawElements(GL_TRIANGLES, elemCount, GL_UNSIGNED_INT, idxBufferOffset);
idxBufferOffset += elemCount;
glDeleteVertexArrays(1, &vao);
glDisable(GL_BLEND);
}
void Renderer::CreatePolygon(const Vector2* vertices, const GLuint verticesCount, GLuint color, float thickness)
{
// To create for example unfilled rect, we have to draw 4 rects with small sizes
// So, unfilled rect is built from 4 rects and each rect contains 4 vertices ( * 4) and 6 indices ( *6)
ReserveData(verticesCount * 4, verticesCount * 6);
for (GLuint i = 0; i < verticesCount; ++i)
{
const int j = (i + 1) == verticesCount ? 0 : i + 1;
const Vector2& position1 = vertices[i];
const Vector2& position2 = vertices[j];
Vector2 difference = position2 - position1;
difference *= difference.Magnitude() > 0 ? 1.0f / difference.Magnitude() : 1.0f;
const float dx = difference.x * (thickness * 0.5f);
const float dy = difference.y * (thickness * 0.5f);
mappedVertex[0].position = Vector2(position1.x + dy, position1.y - dx);
mappedVertex[1].position = Vector2(position2.x + dy, position2.y - dx);
mappedVertex[2].position = Vector2(position2.x - dy, position2.y + dx);
mappedVertex[3].position = Vector2(position1.x - dy, position1.y + dx);
mappedVertex[0].color = color;
mappedVertex[1].color = color;
mappedVertex[2].color = color;
mappedVertex[3].color = color;
mappedVertex += 4;
mappedElement[0] = currentVertexIndex;
mappedElement[1] = currentVertexIndex + 1;
mappedElement[2] = currentVertexIndex + 2;
mappedElement[3] = currentVertexIndex + 2;
mappedElement[4] = currentVertexIndex + 3;
mappedElement[5] = currentVertexIndex;
mappedElement += 6;
currentVertexIndex += 4;
}
this->vertices.clear();
}
void Renderer::ReserveData(int numVertices, int numElements)
{
currentVertexIndex = vertexBuffer.size();
// Map vertex buffer
int oldVertexSize = vertexBuffer.size();
vertexBuffer.resize(oldVertexSize + numVertices);
mappedVertex = vertexBuffer.data() + oldVertexSize;
// Map element buffer
int oldIndexSize = elementBuffer.size();
elementBuffer.resize(oldIndexSize + numElements);
mappedElement = elementBuffer.data() + oldIndexSize;
}
int main()
{
//Create window, init opengl, etc.
Renderer renderer;
renderer.Create();
bool quit=false;
while(!quit) {
//Events
//Clear color bit
renderer.Clear();
for(int i = 0; i < 30000; ++i)
renderer.DrawRect(100.0f, 100.0f, 50.0f, 50.0f, 0xffff0000, 1.5f);
renderer.Render(windowW, windowH);
//swap buffers
}
return 0;
}
Why is it that much slower?
How can I make it faster and less memory-consuming?
The biggest bottleneck in that code looks like your allocations are never amortized across frames, since you are clearing the buffers capacity instead of reusing them, leading you to lots of realloc/copies (probably Log2(n) reallocs/copies if your vector implementation grows by factor of 2). Try changing your .clear() call with .resize(0) and maybe you can have a more lazy/rare call to .clear() when things gets unused.
In debug or in release mode? Vectors are terribly slow in debug due to memory checking. Profiling should always be done in Release.
Profiling should be done both in Release and Debug/Unoptimized mode if you intend to ever use and work with your application in Debug/Unoptimized mode. The gross "zero-cost abstraction" lie of modern C++ is that it makes it a pain to work with a debugger because large applications don't run at correct frame-rate in "Debug" mode any more. Ideally you should always run all your applications in Debug mode. Do yourself a productivity favour and ALSO do some profiling/optimization for your worse case.
Good luck with your learning quest! :)
Solution
I do not use std::vector anymore. I use ImVector instead (it maybe your own implementation as well),
I set position directly to a Vector2.x/.y
I tried to create a cylinder VBO by passing a std::vector to the vertex buffer instead of the static const GLfloat variableName[]= { some data}; . However, nothing gets drawn in my window.
What's wrong with my code?
My VBO:
glGenBuffers(1, &vertexbuffer2);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer2);
std::vector<float> bufferData = CreateCylinder();
glBufferData(
GL_ARRAY_BUFFER,
sizeof(bufferData),
&bufferData.front(),
GL_STATIC_DRAW);
CreateCylinder method:
std::vector<float> CreateCylinder(){
std::vector<float> outputArray;
int i, j, k;
for (j = 0; j <= 360; j += precision_deg){
outputArray.push_back(float(cos((pi / 180 * j))));
outputArray.push_back(1.0);
outputArray.push_back(float(sin((pi / 180 * j))));
outputArray.push_back(float(cos((pi / 180 * j))));
outputArray.push_back(-1.0);
outputArray.push_back(float(sin((pi / 180 * j))));
}
for (i = 1; i >= -1; i -= 2){
outputArray.push_back(0.0);
outputArray.push_back(float(i));
outputArray.push_back(0.0);
for (k = 0; k <= 360; k += precision_deg){
outputArray.push_back(i*float(cos((pi / 180 * k))));
outputArray.push_back(float(i));
outputArray.push_back(float(sin((pi / 180 * k))));
}
}
return outputArray;
}
Render method:
void RenderScene6(){
glDisable(GL_DEPTH_TEST);
Projection = glm::perspective(45.0f, 4.0f / 3.0f, 0.1f, 100.0f);
View = glm::lookAt(
glm::vec3(5.63757, 1.7351, -2.19067),
glm::vec3(0, 0, 0),
glm::vec3(0, 1, 0)
);
Model = glm::mat4(1.0f);
MVP = Projection * View * Model;
glUseProgram(programID_1);
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer2);
glVertexAttribPointer(
0,
3,
GL_FLOAT,
GL_FALSE,
0,
(void*)0
);
glDrawArrays(GL_QUAD_STRIP, 0, 437);
glDrawArrays(GL_TRIANGLE_FAN, 438, 888); //THESE ARE HARDCODED ATM.
glDisableVertexAttribArray(0);
}
The problem is the size you are passing to glBufferData: sizeof(bufferData) is the same as std::vector<float> and has nothing to do with the size of the vectors data.
What you have to do is to pass the actual size of the data stored in the vector as follows:
glBufferData(
GL_ARRAY_BUFFER,
bufferData.size() * sizeof(float),
&bufferData.front(),
GL_STATIC_DRAW);
I'm trying to manipulate some code to draw a circle instead of the triangle that is already being printed by the tutorial. I'm not super familiar with C++ or OpenGL, which is why I'm just trying it out.
Any suggestions or corrections to my code would be greatly appreciated.
I keep getting a breakpoint error in XCODE on this line:
glDrawArrays(GL_TRIANGLE_FAN, 0, numPoints); // draw the points and fill it in
and it says:
Thread 1: EXC_BAD_ACCESS(code=1, address=0x0)
Here is the original vector buffer for the triangle in the tutorial I'm following:
static const GLfloat g_vertex_buffer_data[] = {
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f,
};
I'm pretty sure that my calculations are correct, but I don't know why it' not drawing a circle. Here is my manipulation:
// Make a circle
GLfloat x;
GLfloat y;
GLfloat z = 0.0f;
int theta = 0;
float radius = 50.0f;
int currentSize = 0;
int numPoints = 30;
GLfloat g_vertex_buffer_data[numPoints*3];
while (theta <= 360) {
x = (GLfloat) radius * cosf(theta);
y = (GLfloat) radius * sinf(theta);
g_vertex_buffer_data[currentSize++] = x;
g_vertex_buffer_data[currentSize++] = y;
g_vertex_buffer_data[currentSize++] = z;
/*
cout << "Theta: " << theta << endl;
for (int i = 0; i < currentSize; i++) {
cout << "g_vertex_buffer_data[" << g_vertex_buffer_data[i] << "]" << endl;
}
*/
theta = theta + (360/numPoints);
}
Here is the rest of the code in the .cpp file:
GLuint vertexbuffer;
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data),g_vertex_buffer_data, GL_STATIC_DRAW);
do{
// Clear the screen
glClear( GL_COLOR_BUFFER_BIT );
// Use our shader
glUseProgram(programID);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(vertexPosition_modelspaceID);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
vertexPosition_modelspaceID, // The attribute we want to configure
numPoints, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the circle!
glDrawArrays(GL_TRIANGLE_FAN, 0, numPoints); // draw the points and fill it in
glDisableVertexAttribArray(vertexPosition_modelspaceID);
// Swap buffers
glfwSwapBuffers(window);
glfwPollEvents();
} // Check if the ESC key was pressed or the window was closed
while( glfwGetKey(window, GLFW_KEY_ESCAPE ) != GLFW_PRESS &&
glfwWindowShouldClose(window) == 0 );
// Cleanup VBO
glDeleteBuffers(1, &vertexbuffer);
glDeleteProgram(programID);
// Close OpenGL window and terminate GLFW
glfwTerminate();
return 0;
There are a few problems in this code. The most severe one that probably causes the crash is here:
glVertexAttribPointer(
vertexPosition_modelspaceID, // The attribute we want to configure
numPoints, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
The second argument to glVertexAttribPointer() is the number of components per vertex. Since you have 3 floats (x, y and z) per vertex, the correct value is 3. So the call should be:
glVertexAttribPointer(
vertexPosition_modelspaceID, // The attribute we want to configure
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
There is also a 1-off error where the points are created:
while (theta <= 360) {
If you include 360 in the range, you will effectively repeat the first vertex, and write one more vertex than the allocated space. This should be:
while (theta < 360) {
Also, the arguments to cosf() and sinf() are in radians. So you will have to convert the angles from degrees to radians for these functions:
x = (GLfloat) radius * cosf(theta * M_PI / 180.0f);
y = (GLfloat) radius * sinf(theta * M_PI / 180.0f);
I'm trying to implement a program that turns a cube into a sphere based on key presses, and ripples whenever it's clicked. I managed to implement the cube-to-sphere-and-back part, but I have completely no idea where to start on the rippling. I've looked at tons of sources online, I get the math, but I have no idea how to implement it on my vertex shader. Can anyone help me with my dilemma? Thank you!
Here's my cpp, vsh, and fsh: https://drive.google.com/file/d/0B4hkcF9foOTgbUozMjZmSHJhQWM/view?usp=sharing
I'm using GLSL, OpenGL 4.4.0
Here's my code for the vertex shader:
#version 120
attribute vec3 pos;
varying vec4 out_color;
uniform float t;
float PI = 3.14159265357;
int factor = 2; //for determining colors
int num_colors; // = factor * 3 (because RGB)
float currang = 0;
float angfac;
vec4 calculate( float a )
{
//this is just to calculate for the color
}
void main() {
num_colors = factor*3;
angfac = 2*PI/num_colors;
float ang = atan( pos.z, pos.x )+PI;
out_color = calculate(ang);
//rotation
mat3 rotateX = mat3(
vec3( 1, 0, 0),
vec3( 0, cos(t), sin(t)),
vec3( 0, -sin(t), cos(t))
);
mat3 rotateY = mat3(
vec3( cos(t), 0, -sin(t)),
vec3( 0, 1, 0),
vec3( sin(t), 0, cos(t))
);
mat3 rotateZ = mat3(
vec3( cos(t), sin(t), 0),
vec3(-sin(t), cos(t), 0),
vec3( 0, 0, cos(t))
);
gl_Position = gl_ModelViewProjectionMatrix * vec4((pos.xyz*rotateY*rotateX) , 1.0 );
}
and here's parts of my cpp file:
//usual include statements
using namespace std;
enum { ATTRIB_POS };
GLuint mainProgram = 0;
// I use this to indicate the position of the vertices
struct Vtx {
GLfloat x, y, z;
};
const GLfloat PI = 3.14159265357;
const int sideLength = 10;
const size_t nVertices = (sideLength*sideLength*sideLength)-((sideLength-2)*(sideLength-2)*(sideLength-2));
Vtx cube[nVertices];
Vtx sphere[nVertices];
Vtx diff[nVertices];
const double TIME_SPEED = 0.01;
int mI = 4*(sideLength-1);
const int sLCubed = sideLength*sideLength*sideLength;
int indices[nVertices*nVertices];
GLfloat originX = 0.0f; //offset
GLfloat originY = 0.0f; //offset
bool loadShaderSource(GLuint shader, const char *path) {...}
void checkShaderStatus(GLuint shader) {...}
bool initShader() {...}
//in this part of the code, I instantiate an array of indices to be used by glDrawElements()
void transform(int fac)
{
//move from cube to sphere and back by adding/subtracting values and updating cube[].xyz
//moveSpeed = diff[]/speedFac
//fac is to determine direction (going to sphere or going to cube; going to sphere is plus, going back to cube is minus)
for( int i = 0; i<nVertices; i++ )
{
cube[i].x += fac*diff[i].x;
cube[i].y += fac*diff[i].y;
cube[i].z += fac*diff[i].z;
}
}
void initCube() {...} //computation for the vertices of the cube depending on sideLength
void initSphere() {...} //computation for the vertices of the sphere based on the vertices of the cube
void toSphere() {...} //changes the values of the array of vertices of the cube to those of the sphere
void initDiff() {...} //computes for the difference of the values of the vertices of the sphere and the vertices of the cube for the slow transformation
int main() {
//error checking (GLEW, OpenGL versions, etc)
glfwSetWindowTitle("CS177 Final Project");
glfwEnable( GLFW_STICKY_KEYS );
glfwSwapInterval( 1 );
glClearColor(0,0,0,0);
if ( !initShader() ) {
return -1;
}
glEnableVertexAttribArray(ATTRIB_POS);
glVertexAttribPointer(ATTRIB_POS, 3, GL_FLOAT, GL_FALSE, sizeof(Vtx), cube);
initCube();
initIndices();
initSphere();
initDiff();
glUseProgram(mainProgram);
GLuint UNIF_T = glGetUniformLocation(mainProgram, "t");
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
float t = 0;
glUniform1f(UNIF_T, t);
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
glPointSize(2.0);
glEnable(GL_POINT_SMOOTH);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE);
glfwOpenWindowHint(GLFW_FSAA_SAMPLES,16);
glEnable(GL_MULTISAMPLE);
do {
int width, height;
glfwGetWindowSize( &width, &height );
glViewport( 0, 0, width, height );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
t += TIME_SPEED;
glUniform1f(UNIF_T, t);
if (glfwGetKey(GLFW_KEY_DEL)) transform(-1);
if (glfwGetKey(GLFW_KEY_INSERT)) transform( 1 );
if (glfwGetKey(GLFW_KEY_HOME)) initCube();
if (glfwGetKey(GLFW_KEY_END)) toSphere();
glDrawElements( GL_TRIANGLES, nVertices*nVertices, GL_UNSIGNED_INT, indices);
glfwSwapBuffers();
} while ( glfwGetKey(GLFW_KEY_ESC) != GLFW_PRESS &&
glfwGetWindowParam(GLFW_OPENED) );
glDeleteProgram(mainProgram);
glfwTerminate();
return 0;
}
I'm writing an Opengl game but I got some problems using Opengl buffers.
My old code thats works (but has a lot of CPU consumption and low fps) looks like this:
void Terrain::drawObject(sf::RenderWindow* window)
{
float scale = 5.0f / max(width_ - 1, length_ - 1);
glScalef(scale, scale, scale);
glTranslatef(-(float) (width_ - 1) / 2, 0.0f, -(float) (length_ - 1) / 2);
bool texture = true;
for (int z = 0; z < width_ - 1; z++) {
//Makes OpenGL draw a triangle at every three consecutive vertices
if (getHeight(0, z) > 15)
{
glBindTexture(GL_TEXTURE_2D, textures_.find(Layer::High)->second);
}
else
{
glBindTexture(GL_TEXTURE_2D, textures_.find(Layer::Mid)->second);
}
glBegin(GL_TRIANGLE_STRIP);
for (int x = 0; x < width_; x++) {
sf::Vector3f normal = getNormal(x, z);
glNormal3f(normal.x, normal.y, normal.z);
if (texture)
{
glTexCoord2f(0, 0);
}
else
{
glTexCoord2f(1, 0);
}
glVertex3f((GLfloat) x, (GLfloat) getHeight(x, z), (GLfloat) z);
normal = getNormal(x, z + 1);
glNormal3f(normal.x, normal.y, normal.z);
if (texture)
{
glTexCoord2f(0, 1);
texture = !texture;
}
else
{
glTexCoord2f(1, 1);
texture = !texture;
}
glVertex3f((GLfloat) x, (GLfloat) getHeight(x, z + 1), (GLfloat) z + 1);
}
glEnd();
}
}
Now I have changed my code to get a higher fps. I use Opengl buffers to get that. But when I use them everything on the screen is corrupted. i use following source code now:
void Terrain::drawObject(sf::RenderWindow* window)
{
if (!buffersCreated_)
{
createBuffers();
buffersCreated_ = true;
}
float scale = 5.0f / max(width_ - 1, length_ - 1);
glScalef(scale, scale, scale);
glTranslatef(-(float) (width_ - 1) / 2, 0.0f, -(float) (length_ - 1) / 2);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textures_.find(Layer::Mid)->second);
glBindBuffer(GL_ARRAY_BUFFER, textCoordBuffer_);
glTexCoordPointer(2, GL_FLOAT, 0, (char *) NULL);
glEnableClientState(GL_NORMAL_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, normalBuffer_);
glNormalPointer(GL_FLOAT, 0, (char *) NULL);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer_);
glVertexPointer(3, GL_FLOAT, 0, (char *) NULL);
glDrawArrays(GL_TRIANGLE_STRIP, 0, vhVertexCount);
glDisableClientState(GL_VERTEX_ARRAY);
glDisable(GL_TEXTURE_2D);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
}
void Terrain::createBuffers()
{
vhVertexCount = (int) (width_ * length_ * 6) / (1 * 1);
sf::Vector3f* vhVertices = new sf::Vector3f[vhVertexCount];
sf::Vector3f* vhNormal = new sf::Vector3f[vhVertexCount];
sf::Vector2i* vhTexCoords = new sf::Vector2i[vhVertexCount];
bool texture = true;
int nIndex = 0;
for (int z = 0; z < length_ - 1; z++) {
for (int x = 0; x < width_; x++) {
sf::Vector3f normal = getNormal(x, z);
if (texture)
{
vhTexCoords[nIndex] = sf::Vector2i(0, 0);
}
else
{
vhTexCoords[nIndex] = sf::Vector2i(1, 0);
}
vhVertices[nIndex] = sf::Vector3f((float) x, getHeight(x, z), (float) z);
vhNormal[nIndex] = sf::Vector3f(normal.x, normal.y, normal.z);
nIndex++;
normal = getNormal(x, z + 1);
if (texture)
{
vhTexCoords[nIndex] = sf::Vector2i(0, 1);
}
else
{
vhTexCoords[nIndex] = sf::Vector2i(1, 1);
}
vhVertices[nIndex] = sf::Vector3f((float) x, getHeight(x, z + 1), (float) z + 1);
vhNormal[nIndex] = sf::Vector3f(normal.x, normal.y, normal.z);
nIndex++;
}
}
glGenBuffers(1, &vertexBuffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer_);
glBufferData(GL_ARRAY_BUFFER, vhVertexCount * sizeof(sf::Vector3f), vhVertices, GL_STATIC_DRAW);
glGenBuffers(1, &normalBuffer_);
glBindBuffer(GL_ARRAY_BUFFER, normalBuffer_);
glBufferData(GL_ARRAY_BUFFER, vhVertexCount * sizeof(sf::Vector3f), vhNormal, GL_STATIC_DRAW);
glGenBuffers(1, &textCoordBuffer_);
glBindBuffer(GL_ARRAY_BUFFER, textCoordBuffer_);
glBufferData(GL_ARRAY_BUFFER, vhVertexCount * sizeof(sf::Vector2i), vhTexCoords, GL_STATIC_DRAW);
delete [] vhVertices;
vhVertices = nullptr;
delete [] vhNormal;
vhNormal = nullptr;
delete [] vhTexCoords;
vhTexCoords = nullptr;
}
I use SFML to create the window and render 2D stuff like the menu in the lower left corner.
The code to render SFML stuff with Opengl stuff looks like:
void GameEngine::gameDraw()
{
// Clear the depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
if (camera_ != nullptr)
{
camera_->drawCamera();
}
openglObjectsMutex_.lock();
for (OpenglObject* openglObject : openglObjects_)
{
openglObject->drawObject(window_);
}
openglObjectsMutex_.unlock();
window_->pushGLStates();
sfmlObjectsMutex_.lock();
for (SfmlObject * gameObject : sfmlObjects_)
{
gameObject->drawObject(window_);
}
sfmlObjectsMutex_.unlock();
window_->popGLStates();
}
Can someone find any problems with the buffer code?
The above image is the correct one but with low fps.
After changing the source to using buffers i got the below image.
SFML but it can only saves/restores OpenGL 2.x states. We must disable what we enable in +3.x states. It works adding:
It's fixed adding at the end of own drawing something like:
glBindBuffer( GL_ARRAY_BUFFER, 0 );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 );
glBindTexture( GL_TEXTURE_2D, 0 );
glDisableVertexAttribArray( 0 );
glUseProgram( 0 );