I'm trying to create random terrain, surrounded by a skybox, which I will then apply a texture to. I have 2 shader programs running interchangeably but I can't seem to make the skybox appear on screen, whereas the terrain is rendered normally.
The terrain is essentially a grid on the x-z plane and every vertex has a y value generated by a noise algorithm. The grid is square and has a side of SIDE. I have centered the terrain by using an offset on the x and z values, equal to SIDE/2.
The skybox is a cube which also has a side SIDE, so it should fit perfectly with the grid inside it, and should be visible since the y values of the terrain's vertices are small compared to SIDE or even SIDE/2.
This is the code:
GLfloat skyboxVertices[24] = {
-HALF_SIDE, HALF_SIDE, -HALF_SIDE, //A = 1
HALF_SIDE, HALF_SIDE, -HALF_SIDE, //B = 2
HALF_SIDE, -HALF_SIDE, -HALF_SIDE, //C = 3
-HALF_SIDE, -HALF_SIDE, -HALF_SIDE, //D = 4
-HALF_SIDE, HALF_SIDE, HALF_SIDE, //E = 5
HALF_SIDE, HALF_SIDE, HALF_SIDE, //F = 6
HALF_SIDE, -HALF_SIDE, HALF_SIDE, //G = 7
-HALF_SIDE, -HALF_SIDE, HALF_SIDE //H = 8
};
//generating & binding skybox VAO
glGenVertexArrays(1, &skyboxVAO);
glBindVertexArray(skyboxVAO);
//creating & binding VBO for skybox vertices
glGenBuffers(1, &skyboxVBO);
glBindBuffer(GL_ARRAY_BUFFER, skyboxVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(skyboxVertices),
skyboxVertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(0);
//3 indices per triangle, 2 triangles per side, 6 sides for a cube
//for a total of 36
totalSkyboxElements = 36;
GLuint skyboxElements[36]{
0, 1, 3, 3, 2, 0, //front
4, 5, 1, 1, 0, 4, //up
4, 5, 6, 6, 7, 4, //back
7, 6, 3, 3, 2, 7, //bottom
1, 5, 6, 6, 3, 1, //right
0, 4, 7, 7, 2, 0 //left
};
//creating & binding VBO for skybox vertices
glGenBuffers(1, &skyboxElementsVBO);
glBindBuffer(GL_ARRAY_BUFFER, skyboxElementsVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(skyboxElements),
skyboxElements, GL_STATIC_DRAW);
The code of the vertex and fragment shaders are as follows, and are compiled and linked without errors:
//vertex shader
#version 330 core
layout(location = 0) in vec3 position;
void main()
{
gl_Position = vec4(position, 1.0);
}
//fragment shader
#version 330 core
out vec4 color;
void main()
{
color = vec4(1.0, 1.0, 1.0, 1.0);
}
Finally, in the mainloop the code is as follows. shaderProgramSB refers to the skybox and shaderProgram refers to the terrain
glClear(GL_COLOR_BUFFER_BIT| GL_DEPTH_BUFFER_BIT);
glUseProgram(shaderProgramSB);
glBindVertexArray(skyboxVAO);
glDrawElements(GL_TRIANGLES, totalSkyboxElements, GL_UNSIGNED_INT, 0);
glUseProgram(shaderProgram);
glBindVertexArray(terrainVAO);
camera->update();
projectionMatrix = camera->projectionMatrix;
viewMatrix = camera->viewMatrix;
modelMatrix = glm::mat4(1.0);
MVP = projectionMatrix * viewMatrix * modelMatrix;
glUniformMatrix4fv(MVPLocation, 1, GL_FALSE, &MVP[0][0]);
glDrawElements(GL_TRIANGLES, totalElements, GL_UNSIGNED_INT, 0);
I have tried rendering the terrain first and the skybox after, but the result is always the same. Terrain rendered normally, skybox not rendered at all. I've been searching and searching but can't seem to locate the problem. Any help is greatly appreciated.
Related
I am trying to apply texture mapping to my cubes but are unsure on how to proceed. Current I am using indices to avoid having to repeat vec3s to make a cube and a vertex array of the points and their normals like so:
// Cube data as our basic building block
unsigned int indices[] = {
10, 8, 0, 2, 10, 0, 12, 10, 2, 4, 12, 2,
14, 12, 4, 6, 14, 4, 8, 14, 6, 0, 8, 6,
12, 14, 8, 10, 12, 8, 2, 0, 6, 4, 2, 6
};
vec3 vertexArray[] = {
vec3(-0.5f, -0.5f, -0.5f), vec3(-0.408248, -0.816497, -0.408248),
vec3(0.5f, -0.5f, -0.5f), vec3(0.666667, -0.333333, -0.666667),
vec3(0.5f, 0.5f, -0.5f), vec3(0.408248, 0.816497, -0.408248),
vec3(-0.5f, 0.5f, -0.5f), vec3(-0.666667, 0.333333, -0.666667),
vec3(-0.5f, -0.5f, 0.5f), vec3(-0.666667, -0.333333, 0.666667),
vec3(0.5f, -0.5f, 0.5f), vec3(0.666667, -0.666667, 0.333333),
vec3(0.5f, 0.5f, 0.5f), vec3(0.408248, 0.408248, 0.816497),
vec3(-0.5f, 0.5f, 0.5f), vec3(-0.408248, 0.816497, 0.408248),
};
// convert arrays to vectors
std::vector<vec3> vertexArrayVector;
vertexArrayVector.insert(vertexArrayVector.begin(), std::begin(vertexArray), std::end(vertexArray));
std::vector<unsigned int> indicesVector;
indicesVector.insert(indicesVector.begin(), std::begin(indices), std::end(indices));
I want to now apply textures to the cube but I am not sure how to add the use of a vec2 for UV when using indices. My creating of VBOs and VAOs like this if it helps:
GLuint vertexBufferObject;
GLuint indexBufferObject;
GLuint vertexArrayObject;
glGenVertexArrays(1, &vertexArrayObject);
glGenBuffers(1, &indexBufferObject);
glGenBuffers(1, &vertexBufferObject);
glBindVertexArray(vertexArrayObject);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferObject);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(vertexIndicesArray[0]) * vertexIndicesArray.size(), &vertexIndicesArray[0], GL_STATIC_DRAW);
// Upload Vertex Buffer to the GPU, keep a reference to it (vertexBufferObject)
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertexPointsArray[0]) * vertexPointsArray.size(), &vertexPointsArray[0], GL_STATIC_DRAW);
// Teach GPU how to read position data from vertexBufferObject
glVertexAttribPointer(0, // attribute 0 matches aPos in Vertex Shader
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // 0 stride
(void*)0 // array buffer offset
);
glEnableVertexAttribArray(0);
// Teach GPU how to read normals data from vertexBufferObject
glVertexAttribPointer(1, // attribute 1 matches normals in Vertex Shader
3,
GL_FLOAT,
GL_FALSE,
0,
(void*)sizeof(glm::vec3) // normal is offseted a vec3 (comes after position)
);
glEnableVertexAttribArray(1);
The vertex coordinate an the texture coordinates for a tuple with 5 components (x, y, z, u, v). If you have a vertex coordinate that is shared by the face but is associated to different texture coordinates, you need to duplicate a vertex coordinate. You must specify 1 attribute tuple for each vertex coordinate and texture coordinate combination required in your mesh.
It is not possible to specify different indices for the vertex coordinates and texture coordinates. See Rendering meshes with multiple indices and Why does OpenGL not support multiple index buffering?.
I'm trying to draw 3D cube's vertices (edges only) using OpenGL (4.3 core profile), I know the glPolygonMode function but I'd like not to draw the intermediate diagonal lines. I declare my vertices and my indices like so :
struct Vertex {
glm::vec3 pos;
glm::vec3 color;
glm::vec3 normal;
glm::vec2 uv;
};
Vertex vertices[8];
// Front vertices
vertices[0].pos = glm::vec3(-0.5f, -0.5f, +0.5f);
vertices[1].pos = glm::vec3(+0.5f, -0.5f, +0.5f);
vertices[2].pos = glm::vec3(+0.5f, +0.5f, +0.5f);
vertices[3].pos = glm::vec3(-0.5f, +0.5f, +0.5f);
// Back vertices
vertices[4].pos = glm::vec3(-0.5f, -0.5f, -0.5f);
vertices[5].pos = glm::vec3(+0.5f, -0.5f, -0.5f);
vertices[6].pos = glm::vec3(+0.5f, +0.5f, -0.5f);
vertices[7].pos = glm::vec3(-0.5f, +0.5f, -0.5f);
GLuint indices[36] = {
0, 1, 2, 2, 3, 0, // Front
1, 5, 6, 6, 2, 1, // Right
7, 6, 5, 5, 4, 7, // Back
4, 0, 3, 3, 7, 4, // Left
4, 5, 1, 1, 0, 4, // Bottom
3, 2, 6, 6, 7, 3 // Top
};
My buffer is updated accordingly :
// Bind Vertex Array
glBindVertexArray(_VAO);
// Bind VBO to GL_ARRAY_BUFFER type so that all calls to GL_ARRAY_BUFFER use VBO
glBindBuffer(GL_ARRAY_BUFFER, _VBO);
// Upload vertices to VBO
glBufferData(GL_ARRAY_BUFFER, verticesNb * sizeof(Vertex), vertices, GL_STATIC_DRAW);
// Bind EBO to GL_ARRAY_BUFFER type so that all calls to GL_ARRAY_BUFFER use EBO
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _EBO);
// Updload indices to EBO
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indicesNb * sizeof(GLuint), indices, GL_STATIC_DRAW);
I'm using glDrawElements(GL_LINES, 36, GL_UNSIGNED_INT, 0); to draw my cube's edges, but for some reason it doesn't draw some edges and I don't understand why.
If I use GL_TRIANGLES, it works pretty well though when I want to render my 3D cube in fill-mode. Does anyone know what I'm missing here ? Is it an issue with the indices ?
(The "cube" below has a custom size of (1.0f, 2.0f, 3.0f))
Your indices form GL_TRIANGLES primitives rather than GL_LINES primitives. See GL_LINES:
Vertices 0 and 1 are considered a line. Vertices 2 and 3 are considered a line. And so on.
The indices form the primitives. Change the indices:
GLuint indices[] = {
0, 1, 1, 2, 2, 3, 3, 0, // Front
4, 5, 5, 6, 6, 7, 7, 4, // Back
0, 4, 1, 5, 2, 6, 3, 7
};
glDrawElements(GL_LINES, 24, GL_UNSIGNED_INT, 0);
Closed. This question is not reproducible or was caused by typos. It is not currently accepting answers.
This question was caused by a typo or a problem that can no longer be reproduced. While similar questions may be on-topic here, this one was resolved in a way less likely to help future readers.
Closed 2 years ago.
Improve this question
I have this code :
Upp::Vector<float> verticesTriangle{
1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, -0.5f, -0.5f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.5f, -0.5f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f,
};
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
//Setting up the VAO Attribute format
glVertexArrayAttribFormat(VAO, 0, 3, GL_FLOAT, GL_FALSE, 0); //Will be colors (R G B in float)
glVertexArrayAttribFormat(VAO, 1, 2, GL_FLOAT, GL_FALSE, 3); //Will be texture coordinates
glVertexArrayAttribFormat(VAO, 2, 3, GL_FLOAT, GL_FALSE, 5); //Normals
glVertexArrayAttribFormat(VAO, 3, 3, GL_FLOAT, GL_FALSE, 8); //Will be my position
glEnableVertexArrayAttrib(VAO, 0);
glEnableVertexArrayAttrib(VAO, 1);
glEnableVertexArrayAttrib(VAO, 2);
glEnableVertexArrayAttrib(VAO, 3);
//Generating a VBO
glGenBuffers(1, &VBOCarre);
glBindBuffer(GL_ARRAY_BUFFER, VBOCarre);
glBufferStorage(GL_ARRAY_BUFFER, sizeof(float) * verticesTriangle.GetCount(), verticesTriangle, GL_MAP_READ_BIT | GL_MAP_WRITE_BIT);
//Binding the VBO to be read by VAO
glVertexArrayVertexBuffer(VAO, 0, VBOCarre, 0 * sizeof(float), 11 * sizeof(float));
glVertexArrayVertexBuffer(VAO, 1, VBOCarre, 3 * sizeof(float), 11 * sizeof(float));
glVertexArrayVertexBuffer(VAO, 2, VBOCarre, 5 * sizeof(float), 11 * sizeof(float));
glVertexArrayVertexBuffer(VAO, 3, VBOCarre, 8 * sizeof(float), 11 * sizeof(float));
//Bind VAO
glBindVertexArray(VAO);
I have no problem retrieving the first attribute in my shader however, when I trying to retrieve others, It dont work. To test it, I have setup an float array and a simple shader program and I try to retrieve my position to draw a triangle.
Here is how my datas are ordered :
Here is my vertex shader :
#version 400
layout (location = 0) in vec3 colors;
layout (location = 1) in vec2 textCoords;
layout (location = 2) in vec3 normals;
layout (location = 3) in vec3 positions;
out vec3 fs_colors;
void main()
{
gl_Position = vec4(positions.x, positions.y, positions.z, 1.0);
// gl_Position = vec4(colors.x, colors.y, colors.z, 1.0); //This line work, proofing my
// first attribute is sended well to my shader
fs_colors = colors;
}
The problem is, except the first attribute, all others seems to not be sent to the shader. What am I missing ?!
You're putting stuff in the wrong place.
glVertexArrayAttribFormat(VAO, 1, 2, GL_FLOAT, GL_FALSE, 3); //Will be texture coordinates
The "3" here is being passed as a byte offset from the start of a vertex in the array to the particular data for that vertex in the attribute. Obviously, your texture coordinate is not 3 bytes from the start of your vertex; it's 3 * sizeof(float) bytes from the start of the vertex.
Similarly:
glVertexArrayVertexBuffer(VAO, 1, VBOCarre, 3 * sizeof(float), 11 * sizeof(float));
This makes no sense either. You're only using a single buffer, and all four attributes read from the same binding. So you should only bind a single buffer.
The offset ought to be 0, because that's where a vertex in the buffer starts. And the stride should be what you wrote.
You also never directly set the association between the attributes and the binding index with glVertexArrayAttribBinding. You probably got things to work by relying on the default, but you shouldn't be using the default here.
The correct code would be:
//Generating a VBO
glCreateBuffers(1, &VBOCarre);
//No need to call glBindBuffer(GL_ARRAY_BUFFER, VBOCarre);, since we're doing DSA.
glNamedBufferStorage(VBOCarre, sizeof(float) * verticesTriangle.GetCount(), verticesTriangle, GL_MAP_READ_BIT | GL_MAP_WRITE_BIT);
glCreateVertexArrays(1, &VAO);
//No need to glBindVertexArray(VAO);, since we're using DSA.
//Setting up the VAO Attribute format
glEnableVertexArrayAttrib(VAO, 0);
glVertexArrayAttribFormat(VAO, 0, 3, GL_FLOAT, GL_FALSE, 0); //Will be colors (R G B in float)
glEnableVertexArrayAttrib(VAO, 1);
glVertexArrayAttribFormat(VAO, 1, 2, GL_FLOAT, GL_FALSE, 3 * sizeof(float)); //Will be texture coordinates
glEnableVertexArrayAttrib(VAO, 2);
glVertexArrayAttribFormat(VAO, 2, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float)); //Normals
glEnableVertexArrayAttrib(VAO, 3);
glVertexArrayAttribFormat(VAO, 3, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float)); //Will be my position
//One buffer, one binding.
glVertexArrayVertexBuffer(VAO, 0, VBOCarre, 0, 11 * sizeof(float));
//Make all attributes read from the same buffer.
glVertexArrayAttribBinding(VAO, 0, 0);
glVertexArrayAttribBinding(VAO, 1, 0);
glVertexArrayAttribBinding(VAO, 2, 0);
glVertexArrayAttribBinding(VAO, 3, 0);
//We can glBindVertexArray(VAO); when we're about to use it, not just because we finished setting it up.
i'm working on a project where i'm using OpenMesh to read stl and obj files and draw them on the screen using openGL.
i've been doing the following,
#include <OpenMesh/Core/Mesh/TriMesh_ArrayKernelT.hh>
#include <OpenMesh/Core/IO/MeshIO.hh>
OpenMesh::TriMesh_ArrayKernelT<> mesh;
std::vector<point> vertices;
std::vector<point> normals;
void readMesh(std::string file)
{
OpenMesh::IO::read_mesh(mesh, file);
mesh.requestFaceNormals();
mesh.request_vertex_normals();
mesh.updateNormals();
vertices.clear();
normals.clear();
for (auto face : mesh.faces())
{
for (auto vertex : mesh.fv_range(face))
{
auto point = mesh.point(vertex);
auto normal = mesh.normal(face);
vertices.push_back(point);
normals.push_back(normal);
}
}
mesh.releaseFaceNormals();
mesh.releaseVertexNormals();
}
and when drawing i just pass the vertices and normals vectors to the vertex shader like this
void paint()
{
glSetAttributeArray(0, vertices.data());
glSetAttributeArray(1, normals.data());
glDrawArrays(GL_TRIANGLES, 0, vertices.length());
}
where the vertex shader looks like this:
attribute vec3 position;
attribute vec3 normal;
uniform mat4 modelViewMatrix;
void main(void)
{
vec4 color = vec4(0.25, 0.25, 0.25, 0.0);
vec4 P = vec4(position, 0);
vec4 N = vec4(normal, 0);
vec3 L = vec3(20, 20, 20) - position;
vec3 V = -position;
N = normalize(N);
L = normalize(L);
V = normalize(V);
vec3 R = reflect(-L, vec3(N));
vec3 diffuse = max(dot(vec3(N), L), 0.0) * color.rgb;
vec3 specular = pow(max(dot(R, V), 0.0), 0.2) * vec3(0.1, 0.1, 0.1);
color = vec4(color.a * (ambient + diffuse + specular), color.a);
color = clamp(color, 0.0, 1.0);
gl_Color = color;
gl_Position = modelViewMatrix * P;
}
and the fragment shader is:
void main(void)
{
gl_FragColor = gl_Color;
}
this produces pretty good results, but the idea of having another copy of the vertices and normals stored in another location (normals and vertices) to be able to draw the mesh looks very counter-intuitive.
i was wondering if i can use openGL buffers with openMesh to optimize this. i've been searching for anything concerning this topic for a while but found nothing.
See Vertex Specification. You can create 2 Vertex Buffer Object for the verticex cooridantes and nortmal vertors:
GLuint vbos[2];
glGenBuffers(2, vbos);
glBindBuffer(GL_ARRAY_BUFFER, vbos[0]);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(vertices[0]), vertices.data(), GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, vbos[1]);
glBufferData(GL_ARRAY_BUFFER, normals.size() * sizeof(normals[0]), normals.data(), GL_STATIC_DRAW);
If you use OpenGL 3.0 or later, then you can specify a Vertex Array Object a nd state the vertex specification:
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbos[0]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
glBindBuffer(GL_ARRAY_BUFFER, vbos[1]);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
When you want to draw the mesh, then it is sufficient to bind the VAO:
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLES, 0, vertices.length());
If you use OpenGL 2.0, the you cannot create a VAO, thus you have to specify the arrays of generic vertex attribute data, before drawing the mesh:
glBindBuffer(GL_ARRAY_BUFFER, vbos[0]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
glBindBuffer(GL_ARRAY_BUFFER, vbos[1]);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glDrawArrays(GL_TRIANGLES, 0, vertices.length());
Furthermore note, that the attribute indices are not guaranteed to be 0 and 1. The attribute indices can be any arbitrary number.
If you would use GLSL version 3.30 the it would be possible to set the attribute indices in the shader code by Layout Qualifier.
Anyway you an define the attribute indices by glBindAttribLocation before linking the program or retrieve the attribute indices by glGetAttribLocation after linking the program.
I am trying to draw a grid of velocity vectors, I expect the velocity at each grid point to be a line with a slop of 1. A slanting line, but I always end up with a vertical line. I'm not sure what I'm doing wrong. Is there something I'm overlooking?
Here is how my vertex buffer looks :
float vel_pos[6*(N+2)*(N+2)];
int index1 = 0;
for (int i=0;i<N+2;i++)
{
for (int j=0;j<N+2;j++)
{
vel_pos[index1] = float(i);
vel_pos[index1+1] = float(j);
vel_pos[index1+2] = 0.0f;
vel_pos[index1+3] = float(i) +0.5f;
vel_pos[index1+4] = float(j) + 0.5f;
vel_pos[index1+5] = 0.0f;
index1 += 6;
}
}
Here is how I am creating my VBO and VAO :
unsigned int VBO, VAO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
// Bind vertex array object first and then bind the vertex buffer objects
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vel_pos), vel_pos, GL_STREAM_DRAW);
GLint velAttrib = glGetAttribLocation(ourShader.ID, "aPos");
// iterpreting data from buffer
glVertexAttribPointer(velAttrib, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
Here is my vertex shader :
out vec4 vertexColor;
layout (location = 0) in vec3 aPos;
layout (location = 1) in float densitySource; /* source of density */
uniform mat4 transform;
uniform mat4 projection;
void main()
{
gl_Position = projection*transform * vec4(aPos, 1.0);
vertexColor = vec4(1, 0.0, 0.0, 1.0);
}
And here's my drawing code :
ourShader.use();
glm::mat4 trans = glm::mat4(1.0f);
trans = glm::translate(trans, glm::vec3(-0.5f, -0.5f, 0.0f));
unsigned int transformMatrixLocation = glGetUniformLocation(ourShader.ID, "transform");
glUniformMatrix4fv(transformMatrixLocation, 1, GL_FALSE, glm::value_ptr(trans));
glm::mat4 projection = glm::ortho(-10.0f, 110.0f, -1.0f, 110.0f, -1.0f, 100.0f);
unsigned int projectionMatrixLocation = glGetUniformLocation(ourShader.ID, "projection");
glUniformMatrix4fv(projectionMatrixLocation, 1, GL_FALSE, glm::value_ptr(projection));
glBindVertexArray(VAO);
glLineWidth(1.0f);
glDrawArrays(GL_LINES, 0, (N+2)*(N+2));
This is the image I get :
resulting image
The 5th parameter (stride) of glVertexAttribPointer is the offset between two vertex coordinates and not between to primitives. Since your vertex coordinates have 3 components of type float, the offset has to be 3 * sizeof(float):
glVertexAttribPointer(velAttrib, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0);
Because you set an offset of 6 * sizeof(float), you have skipped every 2 coordinate and have drawn lines between the points of the grid.
But note, if stride is 0, the generic vertex attributes are understood to be tightly packed in the array. This is the case, so you ca use an offset of 0:
glVertexAttribPointer(velAttrib, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);