Setting up vertex attributes with OpenTK - opengl

I am having trouble setting the position, normal, and texture coordinate attributes in my shader. I am using meshomatic to load obj files, here is how the attributes are added to a single vbo:
void LoadBuffers(MeshData m)
{
float[] verts, norms, texcoords;
uint[] indices;
m.OpenGLArrays(out verts, out norms, out texcoords, out indices);
GL.GenBuffers(1, out dataBuffer);
GL.GenBuffers(1, out indexBuffer);
// Set up data for VBO.
// We're going to use one VBO for all geometry, and stick it in
// in (VVVVNNNNCCCC) order. Non interleaved.
int buffersize = (verts.Length + norms.Length + texcoords.Length);
float[] bufferdata = new float[buffersize];
vertOffset = 0;
normOffset = verts.Length;
texcoordOffset = (verts.Length + norms.Length);
verts.CopyTo(bufferdata, vertOffset);
norms.CopyTo(bufferdata, normOffset);
texcoords.CopyTo(bufferdata, texcoordOffset);
bool v = false;
for (int i = texcoordOffset; i < bufferdata.Length; i++)
{
if (v)
{
bufferdata[i] = 1 - bufferdata[i];
v = false;
}
else
{
v = true;
}
}
// Load geometry data
GL.BindBuffer(BufferTarget.ArrayBuffer, dataBuffer);
GL.BufferData<float>(BufferTarget.ArrayBuffer, (IntPtr)(buffersize * sizeof(float)), bufferdata,
BufferUsageHint.StaticDraw);
// Load index data
GL.BindBuffer(BufferTarget.ElementArrayBuffer, indexBuffer);
GL.BufferData<uint>(BufferTarget.ElementArrayBuffer,
(IntPtr)(indices.Length * sizeof(uint)), indices, BufferUsageHint.StaticDraw);
}
And here is how I am drawing:
void DrawBuffer()
{
// Push current Array Buffer state so we can restore it later
GL.PushClientAttrib(ClientAttribMask.ClientVertexArrayBit);
GL.ClientActiveTexture(TextureUnit.Texture0);
GL.BindTexture(TextureTarget.Texture2D, diff);
GL.EnableVertexAttribArray(positionIndex);
GL.BindBuffer(BufferTarget.ArrayBuffer, dataBuffer);
GL.VertexAttribPointer(positionIndex, 3, VertexAttribPointerType.Float, false, 0, vertOffset);
GL.EnableVertexAttribArray(texcoordIndex);
GL.BindBuffer(BufferTarget.ArrayBuffer, dataBuffer);
GL.VertexAttribPointer(texcoordIndex, 2, VertexAttribPointerType.Float, false, 0, texcoordOffset);
GL.EnableVertexAttribArray(normalIndex);
GL.BindBuffer(BufferTarget.ArrayBuffer, dataBuffer);
GL.VertexAttribPointer(normalIndex, 3, VertexAttribPointerType.Float, false, 0, normOffset);
// Index array
GL.BindBuffer(BufferTarget.ElementArrayBuffer, indexBuffer);
GL.DrawElements(PrimitiveType.Triangles, m.Tris.Length * 3, DrawElementsType.UnsignedInt, IntPtr.Zero);
// Restore the state
GL.PopClientAttrib();
}
However my texture coordinates are wrong. It seems that only a single pixel from my texture is used to cover the entire obj. I think I am using GL.VertexAttribPointer(...) incorrectly. What is the second arg int "size"?

Related

Opengl simple quad rendering

Do you have any idea why this isn't working?
The old immediate mode works but I want to use VAO and VBOs.
(PS: I know the VOA creation should only be created once, but I build it all in this method for the test. I will move thoses lines after testing)
private void allocateIndexBuffer(GL2 graphics, int[] indices) {
int[] id = new int[1];
graphics.glGenBuffers(1, id, 0);
int vboId = id[0];
graphics.glBindBuffer(GL2.GL_ELEMENT_ARRAY_BUFFER, vboId);
IntBuffer buffer = IntBuffer.allocate(indices.length);
buffer.put(0, indices);
buffer.flip();
graphics.glBufferData(GL2.GL_ELEMENT_ARRAY_BUFFER, indices.length, buffer, GL2.GL_DYNAMIC_DRAW);
//graphics.glDeleteBuffers(vboId, buffer); TODO: clean up when on closing
}
private void allocateAttributeBuffer(GL2 graphics, int attribute, float[] data) {
int[] id = new int[1];
graphics.glGenBuffers(1, id, 0);
int vboId = id[0];
graphics.glBindBuffer(GL2.GL_ARRAY_BUFFER, vboId); //juste remplir vboId ou le remplacer à chaque fois ?
FloatBuffer buffer = FloatBuffer.allocate(data.length);
buffer.put(0, data);
buffer.flip();
graphics.glBufferData(GL2.GL_ARRAY_BUFFER, data.length, buffer, GL2.GL_DYNAMIC_DRAW);
graphics.glVertexAttribPointer(0, 2, GL2.GL_FLOAT, false, 0, 0); //once the buffer is bound
graphics.glEnableVertexAttribArray(0);
graphics.glBindBuffer(GL2.GL_ARRAY_BUFFER, 0);
//graphics.glDeleteBuffers(vboId, buffer); TODO: clean up when on closing
//graphics.glDeleteVertexArrays(vboId, null); TODO: clean up vaos
}
#Override
protected void draw(GL2 graphics) {
String mode = "new";
if (mode.equals("new")) {
float[] vertices = {
bounds.getX(), bounds.getY(),
bounds.getX(), bounds.getY() + bounds.getHeight(),
bounds.getX() + bounds.getWidth(), bounds.getY() + bounds.getHeight(),
bounds.getX() + bounds.getWidth(), bounds.getY(),
};
int[] indices = { 0, 1, 2, 2, 1, 3 };
//creation vao
int[] id = new int[1];
graphics.glGenVertexArrays(1, id, 0);
int vaoId = id[0];
graphics.glBindVertexArray(vaoId);
allocateIndexBuffer(graphics, indices);
allocateAttributeBuffer(graphics, 0, vertices);
graphics.glBindVertexArray(0);
//render
graphics.glBindVertexArray(vaoId);
graphics.glEnableVertexAttribArray(0);
graphics.glDrawElements(GL2.GL_TRIANGLES, indices.length, GL2.GL_UNSIGNED_INT, 0);
graphics.glDisableVertexAttribArray(0);
graphics.glBindVertexArray(0);
graphics.glFlush();
} else if (mode.equals("old")) {
graphics.glColor3f(255, 0, 0);
graphics.glBegin(GL2.GL_QUADS);
graphics.glVertex2f(bounds.getX(), bounds.getY());
graphics.glVertex2f(bounds.getX() + bounds.getWidth(), bounds.getY());
graphics.glVertex2f(bounds.getX() + bounds.getWidth(), bounds.getY() + bounds.getHeight());
graphics.glVertex2f(bounds.getX(), bounds.getY() + bounds.getHeight());
graphics.glEnd();
}
}
The size of the buffer has to be specified in bytes (see glBufferData);
graphics.glBufferData(GL2.GL_ELEMENT_ARRAY_BUFFER, indices.length, buffer, GL2.GL_DYNAMIC_DRAW);
graphics.glBufferData(GL2.GL_ELEMENT_ARRAY_BUFFER, indices.capacity() * 4,
buffer, GL2.GL_DYNAMIC_DRAW);
graphics.glBufferData(GL2.GL_ARRAY_BUFFER, data.length, buffer, GL2.GL_DYNAMIC_DRAW);
graphics.glBufferData(GL2.GL_ARRAY_BUFFER, data.capacity() * 4,
buffer, GL2.GL_DYNAMIC_DRAW);

OpenGL can not delete buffer

I draw objects with using glBufferData. I want to replace drawing data consecutively. And I want to delete bufferdata and I rewrite new buffer data but I can not do it. Because memory can not being released. And After a few times, I am taking System Out of Memory errors. How can I delete data? GLDeleteBuffer is not running.
My codes are given as below.
public void ReDraw()
{
for (int xx = 0; xx < 500; xx++)
{
this.DeleteBuffers(0)
this.LoadDrawDatas(0, Obje);
this.openGLControl.Invalidate();
}
}
private void DeleteBuffers(int data)
{
if (this.texturesBuffer[data] != null)
{
GL.DeleteBuffers(this.texturesBuffer[data].Length, this.texturesBuffer[data]);
GL.DeleteTextures(1, this.textures[data]);
}
if (this.vertex!= null)
{
GL.DeleteBuffers(this.vertex[data].Length, this.vertex[data]);
GL.DeleteVertexArrays(this.vertex[data].Length, this.vertex[data);
GL.DeleteFramebuffers(this.vertex[data].Length, this.vertex[data]);
GL.DeleteRenderbuffers(this.vertex[data].Length, this.vertex[data]);
}
if (this.normal != null)
{
GL.DeleteBuffers(this.normal[data].Length, this.normal[data]);
GL.DeleteFramebuffers(this.normal[data].Length, this.normal[data]);
GL.DeleteRenderbuffers(this.normal[data].Length, this.normal[data]);
GL.DeleteVertexArrays(this.normal[data].Length, this.normal[data]);
}
}
private void LoadDrawDatas(int data,UcBoyutluNesneBilgisi obje)
{
Rectangle r1 = new Rectangle(0, 0, obje.Texture.Width, obje.Texture.Height);
GL.GenTextures(1, this.textures[data]);
GL.ActiveTexture(TextureUnit.Texture0);
GL.Enable(EnableCap.Texture2D);
GL.BindTexture(TextureTarget.Texture2D, this.textures[data][0]);
System.Drawing.Imaging.BitmapData bitmapData21 = obje.Texture.LockBits(r1, System.Drawing.Imaging.ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Rgba, obje.Texture.Width, obje.Texture.Height, 0, PixelFormat.Bgra, PixelType.UnsignedByte, bitmapData21.Scan0);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
obje.Texture.UnlockBits(bitmapData21);
GL.GenBuffers(1, this.texturesBuffer[data]);
GL.BindBuffer(BufferTarget.PixelPackBuffer, this.texturesBuffer[data][0]);
GL.BufferData(BufferTarget.PixelPackBuffer, (IntPtr)(obje.texturesCoord.Count * sizeof(float)), obje.texturesCoord.ToArray(), BufferUsageHint.StreamDraw);
GL.GenBuffers(1, this.normal[data]);
GL.BindBuffer(BufferTarget.PixelPackBuffer, this.normal[data][0]);
GL.BufferData(BufferTarget.PixelPackBuffer, (IntPtr)(obje.Normals.Count * sizeof(float)), obje.Normals.ToArray(), BufferUsageHint.StreamDraw);
GL.GenBuffers(1, this.vertex[data]);
GL.BindBuffer(BufferTarget.PixelPackBuffer, this.vertex[data][0]);
GL.BufferData(BufferTarget.PixelPackBuffer, (IntPtr)(obje.VertexKoordinatlari.Count * sizeof(float)), IntPtr.Zero, BufferUsageHint.StreamDraw);
GL.BufferSubData(BufferTarget.PixelPackBuffer, IntPtr.Zero, (obje.Vertexs.Count * sizeof(float)), obje.VertexsToArray());
GL.GenBuffers(1, this.indeks[data]);
GL.BindBuffer(BufferTarget.PixelPackBuffer, this.indeks[data][0]);
GL.BufferData(BufferTarget.PixelPackBuffer, (IntPtr)this.indexsData[data].Length * sizeof(int)), this.indexsData[data], BufferUsageHint.StreamDraw);
}
private void openGLControl_Paint(object sender, PaintEventArgs e)
{
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
GL.MatrixMode(MatrixMode.Modelview);
GL.LoadIdentity();
GL.MatrixMode(MatrixMode.Projection);
GL.LoadIdentity();
GL.Rotate(X, 1, 0, 0);
GL.Rotate(Y, 0, 1, 0);
GL.Rotate(Z, 0, 0, 1);
GL.Scale(this.zoom* 0.01f, this.zoom * 0.01f, this.zoom * 0.01f);
GL.PushMatrix();
GL.EnableClientState(ArrayCap.NormalArray);
GL.BindBuffer(BufferTarget.ArrayBuffer, normals[0]);
GL.NormalPointer(NormalPointerType.Float, 0, 0);
GL.EnableClientState(ArrayCap.VertexArray);
GL.BindBuffer(BufferTarget.ArrayBuffer, vertex[0]);
GL.VertexPointer(3, VertexPointerType.Float, 0, IntPtr.Zero);
if (texturesBuffer!= null)
{
GL.EnableClientState(ArrayCap.TextureCoordArray);
GL.BindBuffer(BufferTarget.ArrayBuffer, texturesBuffer[0]);
GL.TexCoordPointer(2, TexCoordPointerType.Float, 0, IntPtr.Zero);
}
GL.BindBuffer(BufferTarget.ElementArrayBuffer, indeks[0]);
GL.DrawElements(PrimitiveType.Triangles, indexsData.Length, DrawElementsType.UnsignedInt, IntPtr.Zero);
GL.BindBuffer(BufferTarget.ArrayBuffer, 0);
GL.BindBuffer(BufferTarget.ElementArrayBuffer, 0);
GL.DisableClientState(ArrayCap.NormalArray);
GL.DisableClientState(ArrayCap.VertexArray);
GL.DisableClientState(ArrayCap.TextureCoordArray);
GL.PopMatrix();
}
You don't need to call glGenBuffers everytime you want to upload data to your VBO (the buffers created by glGenBuffers are called VBOs).
Once your VBOs are created, you can simply overwrite the data using glBufferData. OpenGL will reallocate the memory on the GPU by itself in the background.
Also, for better performance, if your VBOs are the same size from one frame to another, you can use glBufferSubData to update the content of your buffers without suffering the performance penalty of OpenGL reallocating the memory on the GPU

LWJGL texture with shaders produced skewed image

I'm trying to do 2D graphics in orthogonal mode. The code loads a picture of a cat and 2 simple shaders, which just pass through their input, unmodified. I expect the program to display the picture of the cat (or at least a part of it) in the middle of the screen, without any rotation or skew.
The program executes successfully, but I can't figure out why the result looks like this:
An OpenGL guru might spot the problem quickly, but I can't find it. I have the feeling that the problem might be at the "Create buffer for vertex and texture coordinates" part, but everything looked okay.
The cat image:
Vertex shader:
#version 150 core
in vec4 in_Position;
in vec2 in_TextureCoord;
out vec2 pass_TextureCoord;
void main(void) {
gl_Position = in_Position;
pass_TextureCoord = in_TextureCoord;
}
Pixel shader:
#version 150 core
uniform sampler2D texture_diffuse;
in vec2 pass_TextureCoord;
out vec4 out_Color;
void main(void) {
out_Color = texture(texture_diffuse, pass_TextureCoord);
}
Java (LWJGL) code:
package lwjgl_test1;
import java.awt.image.BufferedImage;
import java.io.*;
import java.nio.*;
import javax.imageio.ImageIO;
import org.lwjgl.*;
import org.lwjgl.opengl.*;
import static org.lwjgl.glfw.GLFW.*;
import java.util.concurrent.TimeUnit;
import static org.lwjgl.opengl.GL11.*;
public class Main {
public static void main(String[] args) {
try {
if (!glfwInit()) {
throw(new Exception("Can't init glfw."));
}
/*
* Create Window
*/
glfwWindowHint(GLFW_RESIZABLE, 0);
long windowGlID = glfwCreateWindow(1024, 768, "Example OpenGL App", 0, 0);
glfwSetWindowPos(windowGlID, 50, 50);
glfwMakeContextCurrent(windowGlID);
glfwShowWindow(windowGlID);
/*
* Initialize OpenGL
*/
GL.createCapabilities();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, 1024, 768, 0, 0, 1);
glMatrixMode(GL_MODELVIEW);
/*
* Load texture
*/
int cat = loadTexture("cat.png");
/*
* Load shaders
*/
int vertexShader = loadShader("vertex_shader.txt", GL20.GL_VERTEX_SHADER);
int pixelShader = loadShader("pixel_shader.txt", GL20.GL_FRAGMENT_SHADER);
int pId = GL20.glCreateProgram();
GL20.glAttachShader(pId, vertexShader);
GL20.glAttachShader(pId, pixelShader);
// Position information will be attribute 0
GL20.glBindAttribLocation(pId, 0, "in_Position");
// Textute information will be attribute 1
GL20.glBindAttribLocation(pId, 1, "in_TextureCoord");
GL20.glLinkProgram(pId);
GL20.glValidateProgram(pId);
exitOnGLError("Compiling shaders failed.");
/*
* Create buffer for vertex and texture coordinates
*/
float size = 120.0f;
FloatBuffer vertex_data = BufferUtils.createFloatBuffer(20);
vertex_data.put(new float[] { -size, -size, 0f, 0f, 0f }); // (Vx, Vy, Vz, Tx, Ty)
vertex_data.put(new float[] { size, -size, 0f, 0f, 1f });
vertex_data.put(new float[] { size, size, 0f, 1f, 1f });
vertex_data.put(new float[] { -size, size, 0f, 1f, 0f });
vertex_data.flip();
int vbo_vertex_handle = GL15.glGenBuffers();
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, vbo_vertex_handle);
GL15.glBufferData(GL15.GL_ARRAY_BUFFER, vertex_data, GL15.GL_STATIC_DRAW);
GL20.glVertexAttribPointer(0, 3, GL11.GL_FLOAT, false, 2 * 4, 0); // mark vertex coordinates
GL20.glVertexAttribPointer(1, 2, GL11.GL_FLOAT, false, 3 * 4, 3 * 4); // mark texture coordinates
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, 0);
GL30.glBindVertexArray(0);
exitOnGLError("Creating buffers failed.");
/*
* Main rendering loop
*/
while(true) {
/*
* Clear screen
*/
glClearColor(0.0f, 1.0f, 1.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
/*
* Apply shader program
*/
GL20.glUseProgram(pId);
// Bind the texture
GL13.glActiveTexture(GL13.GL_TEXTURE0);
GL11.glBindTexture(GL11.GL_TEXTURE_2D, cat);
/*
* Draw (use buffers)
*/
GL20.glEnableVertexAttribArray(0);
GL20.glEnableVertexAttribArray(1);
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, vbo_vertex_handle);
GL11.glDrawArrays(GL11.GL_QUADS, 0, 4); // Draw an entity with 4 vertices
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, 0);
exitOnGLError("Draw failed.");
GL11.glBindTexture(GL11.GL_TEXTURE_2D, 0);
GL20.glUseProgram(0); // deselect
/*
* Swap buffers
*/
glfwSwapBuffers(windowGlID);
/*
* Events
*/
glfwPollEvents();
if (glfwWindowShouldClose(windowGlID)) {
break;
}
TimeUnit.MILLISECONDS.sleep(10);
}
} catch (Exception e) {
e.printStackTrace();
}
}
private static int loadTexture(String path) throws Exception {
int[] pixels = null;
BufferedImage image = null;
image = ImageIO.read(new FileInputStream(path));
int width = image.getWidth();
int height = image.getHeight();
pixels = new int[width * height];
image.getRGB(0, 0, width, height, pixels, 0, width);
int[] data = new int[width * height];
for (int i = 0; i < width * height; i++) {
int a = (pixels[i] & 0xff000000) >> 24;
int r = (pixels[i] & 0xff0000) >> 16;
int g = (pixels[i] & 0xff00) >> 8;
int b = (pixels[i] & 0xff);
data[i] = a << 24 | b << 16 | g << 8 | r;
}
IntBuffer intBuffer1 = ByteBuffer.allocateDirect(data.length << 2).order(ByteOrder.nativeOrder()).asIntBuffer();
intBuffer1.put(data).flip();
int result = glGenTextures();
glBindTexture(GL_TEXTURE_2D, result);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, intBuffer1);
glBindTexture(GL_TEXTURE_2D, 0);
exitOnGLError("Loading texture '"+ path +"' failed.");
return result;
}
private static int loadShader(String filename, int type) {
StringBuilder shaderSource = new StringBuilder();
int shaderID = 0;
try {
BufferedReader reader = new BufferedReader(new FileReader(filename));
String line;
while ((line = reader.readLine()) != null) {
shaderSource.append(line).append("\n");
}
reader.close();
} catch (IOException e) {
System.err.println("Could not read file.");
e.printStackTrace();
System.exit(-1);
}
shaderID = GL20.glCreateShader(type);
GL20.glShaderSource(shaderID, shaderSource);
GL20.glCompileShader(shaderID);
if (GL20.glGetShaderi(shaderID, GL20.GL_COMPILE_STATUS) == GL11.GL_FALSE) {
System.err.println("Could not compile shader.");
System.exit(-1);
}
return shaderID;
}
private static void exitOnGLError(String errorMessage) throws Exception {
int errorValue = GL11.glGetError();
if (errorValue != GL11.GL_NO_ERROR) {
throw new Exception(errorMessage);
}
}
}
The problem lies in the stride parameter in this lines:
GL20.glVertexAttribPointer(0, 3, GL11.GL_FLOAT, false, 2 * 4, 0);
GL20.glVertexAttribPointer(1, 2, GL11.GL_FLOAT, false, 3 * 4, 3 * 4);
Stride tells OpenGL how many bytes apart from each other the begin of two consecutive entries are. Since you are using 5 floats per vertex, this has to be 5 * 4 in both lines:
GL20.glVertexAttribPointer(0, 3, GL11.GL_FLOAT, false, 5 * 4, 0);
GL20.glVertexAttribPointer(1, 2, GL11.GL_FLOAT, false, 5 * 4, 3 * 4);

I can't get a simple indexed array rendered properly

I am porting this sample (site) to jogl but I noticed something wasn't perfect in the image, some artefacts on the floor and shapes not exactly squared, as you can see (dont care about color, is varying), also the floor doesnt look good:
Therefore I tried to render only the floor first (if you wanna try, pretty easy, swith SQRT_BUILDING_COUNT from 100 -> 0) and there I have already the first problems, it is supposed to be a square based on two triangles, but I see only one of them.
My vertex structure:
public float[] position = new float[3];
public byte[] color = new byte[4];
public float[] attrib0 = new float[4];
public float[] attrib1 = new float[4];
public float[] attrib2 = new float[4];
public float[] attrib3 = new float[4];
public float[] attrib4 = new float[4];
public float[] attrib5 = new float[4];
public float[] attrib6 = new float[4];
attrib0-6 are unused at the moment
My VS inputs:
// Input attributes
layout(location=0) in vec4 iPos;
layout(location=1) in vec4 iColor;
layout(location=2) in PerMeshUniforms* bindlessPerMeshUniformsPtr;
layout(location=3) in vec4 iAttrib3;
layout(location=4) in vec4 iAttrib4;
layout(location=5) in vec4 iAttrib5;
layout(location=6) in vec4 iAttrib6;
layout(location=7) in vec4 iAttrib7;
I am declaring iPos as vec3, so I guess it will padded as vec4(iPos, 1) in the VS
I transfer data to gpu:
gl4.glNamedBufferData(vertexBuffer[0], Vertex.size() * vertices.size(),
GLBuffers.newDirectFloatBuffer(verticesArray), GL4.GL_STATIC_DRAW);
gl4.glNamedBufferData(indexBuffer[0], GLBuffers.SIZEOF_SHORT * indices.size(),
GLBuffers.newDirectShortBuffer(indicesArray), GL4.GL_STATIC_DRAW);
Then before I render I call:
gl4.glEnableVertexArrayAttrib(0, 0);
gl4.glEnableVertexArrayAttrib(0, 1);
Then render, original code is:
// Set up attribute 0 for the position (3 floats)
glVertexArrayVertexAttribOffsetEXT(0, m_vertexBuffer, 0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), Vertex::PositionOffset);
// Set up attribute 1 for the color (4 unsigned bytes)
glVertexArrayVertexAttribOffsetEXT(0, m_vertexBuffer, 1, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(Vertex), Vertex::ColorOffset);
I substituted it with:
// Set up attribute 0 for the position (3 floats)
gl4.glVertexArrayVertexBuffer(0, 0, vertexBuffer[0], Vertex.positionOffset,
Vertex.size());
gl4.glVertexArrayAttribFormat(0, 0, 3, GL4.GL_FLOAT, false, Vertex.size());
// Set up attribute 1 for the color (4 unsigned bytes)
gl4.glVertexArrayVertexBuffer(0, 1, vertexBuffer[0], Vertex.colorOffset,
Vertex.size());
gl4.glVertexArrayAttribFormat(0, 1, 4, GL4.GL_UNSIGNED_BYTE, true, Vertex.size());
And then I finish the render:
// Reset state
gl4.glDisableVertexArrayAttrib(0, 0);
gl4.glDisableVertexArrayAttrib(0, 1);
I admit I never used dsa before, I always used GL3 with the normal vbo, vao and ibo, binding and unbinding..
Culling is off.
What's wrong then?
Solved, the problem was I didnt implement properly dsa
glEnableVertexAttribArray(vao, 0);
glEnableVertexAttribArray(vao, 1);
// Setup the formats
glVertexArrayAttribFormat(vao, 0, 3, GL_FLOAT, GL_FALSE, 0);
glVertexArrayAttribFormat(vao, 1, 2, GL_FLOAT, GL_FALSE, 0);
// Setup the buffer sources
glVertexArrayVertexBuffer(vao, 0, buffers[0], 0, 0); // Note the 2nd argument here is a 'binding index', not the attribute index
glVertexArrayVertexBuffer(vao, 1, buffers[1], 0, 0);
// Link them up
glVertexArrayAttribBinding(vao, 0, 0); // Associate attrib 0 (first 0) with binding 0 (second 0).
glVertexArrayAttribBinding(vao, 1, 1);
plus glVertexArrayElementBuffer if you have indexed rendering

VBO doesn't use UV coordinates

My render method currently looks like this:
void Renderer::render() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
checkGlError("glClear");
EntityCamera* camera = (EntityCamera*) resourceManager_->getResource(GHOST_CAMERA);
mat4 proj;
Matrix::projection3D(proj, 45.0f,
(float) nScreenWidth_ / nScreenHeight_, GHOST_NEAR_DISTANCE, GHOST_FAR_DISTANCE);
mat4 view;
Matrix::multiply(proj, camera_->getMatrix(), view);
camera->extractPlanes(view);
for (vector<Node*>::const_iterator it = renderArray_.begin(); it != renderArray_.end();
it++) {
Node* node = *it;
if (!node->isRenderable()) {
continue;
}
if (node->hasBV() && node->getBV()->isInFrustum(camera, node) == BoundingVolume::OUTSIDE) {
LOGI("Node %s is outside :O", node->getName().c_str());
continue;
}
EntityModel* entity =
static_cast<EntityModel*>(resourceManager_->getResource(
(*it)->getEntity()));
if (entity == 0 || entity->getVertices() == 0 || entity->getVertices()->size() == 0) {
LOGI("Empty entity %s.", node->getName().c_str());
continue;
}
Resource* resource = resourceManager_->getResource(node->getShader());
Shader* shader = static_cast<Shader*>(resource);
Resource* resource2 = resourceManager_->getResource(entity->getTexture());
Image* image = static_cast<Image*>(resource2);
mat4 res;
Matrix::multiply(view, node->getMatrix(), res);
// Select shader program to use.
glUseProgram(shader->getId());
checkGlError("glUseProgram");
int matrix = glGetUniformLocation(shader->getId(), "uWVP");
int texture = glGetUniformLocation(shader->getId(), "texture_0");
checkGlError("glGetUniformLocation");
int textureCoords = glGetAttribLocation(shader->getId(), "attrTexCoords");
int vertices = glGetAttribLocation(shader->getId(), "attrPos");
checkGlError("glGetAttribLocation");
// Specify WVP matrix.
glUniformMatrix4fv(matrix, 1, false, res);
checkGlError("glUniformMatrix4fv");
// Load vertex positions.
if (!entity->isCompiled()) {
//LOGI("Entity %s, not compiled.", entity->getName().c_str());
continue;
}
glEnableVertexAttribArray(vertices);
checkGlError("glEnableVertexAttribArray");
//glVertexAttribPointer(vertices, 3, GL_FLOAT, GL_FALSE, 0,
// &(*entity->getVertices())[0]);
//LOGI("%s vbo id: %d", node->getName().c_str(), entity->getVBO());
glBindBuffer(GL_ARRAY_BUFFER, entity->getVBO());
checkGlError("glBindBuffer");
glVertexAttribPointer(vertices, 3, GL_FLOAT, GL_FALSE, 0, 0);
checkGlError("glVertexAttribPointer");
// Load UV coordinates.
glEnableVertexAttribArray(textureCoords);
checkGlError("glEnableVertexAttribArray");
glVertexAttribPointer(textureCoords, 2, GL_FLOAT, GL_FALSE, 0,
&(*entity->getTextureCoords())[0]);
checkGlError("glVertexAttribPointer");
// Bind the texture.
glActiveTexture(GL_TEXTURE0);
checkGlError("glActiveTexture");
glBindTexture(GL_TEXTURE_2D, image->getId());
checkGlError("glBindTexture");
glUniform1i(texture, 0);
checkGlError("glUniform1i");
if (entity->hasIndices()) {
vector<vector<GLushort>*>* indices = entity->getIndices();
for (unsigned int i = 0; i < indices->size(); i++) {
if (entity->hasBoundingVolumes()) {
BoundingVolume* volume = (*entity->getBoundingVolumes())[i];
if (volume->isInFrustum(camera, node) == BoundingVolume::OUTSIDE) {
continue;
}
}
vector<GLushort>* ind = (*indices)[i];
glDrawElements(GL_TRIANGLES, ind->size(), GL_UNSIGNED_SHORT, &(*ind)[0]);
checkGlError("glDrawElements");
}
}
else {
glDrawArrays(GL_TRIANGLES, 0, entity->getVertices()->size() / 3);
checkGlError("glDrawArrays");
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
checkGlError("glBindBuffer");
}
}
I just recently tried to use VBO, before I was sending vertex data directly and everything worked fine, textures were mapped correctly. Now I changed vertex array with VBO and even though it works, no textures are applied, I could only see black object.
What might be wrong with my textures?
Why when I change glVertexAttribPointer(vertices, 3, GL_FLOAT, GL_FALSE, 0, 0); line order with glBindBuffer(GL_ARRAY_BUFFER, entity->getVBO()); I get disfigured objects? Is this the right call order that I'm using?
You're sending your UV coordinates from plain memory, while you seem to send your vertex coordinates from a VBO. This may not be so efficient, you should have both data sets in VBO to profit of the VBO advantages.
That being said, I think your problem is that you don't unbind your VBO before sending your UV coordinates. Your code should be :
glBindBuffer(GL_ARRAY_BUFFER, 0);
glVertexAttribPointer(textureCoords, 2, GL_FLOAT, GL_FALSE, 0,
&(*entity->getTextureCoords())[0]);
as I suppose your getTextureCoords() does not return an offset in your VBO.