GLSL: Rendering a 2D texture - c++

I was following LazyFoo's tutorial on GLSL 2D texturing (http://lazyfoo.net/tutorials/OpenGL/34_glsl_texturing/index.php), and I was able to get most parts working.
However, the program renders the texture zoomed up real close. Is this an issue with the vertex, or the texture lookup? Below is the vertex shader I was using in my implementation:
texCoord = LTexCoord;
gl_Position = gl_ProjectionMatrix * gl_ModelViewMatrix * vec4( LVertexPos2D.x, LVertexPos2D.y, 0.0, 1.0 );
And below is the fragment shader I was using:
gl_FragColor = texture( textureID, texCoord );
As for the render function, I deviate from the tutorial by using opengl's fixed pipeline matrices (don't need to update matrices ):
//If the texture exists
if( mTextureID != 0 )
{
//Texture coordinates
GLfloat texTop = 0.f;
GLfloat texBottom = (GLfloat)mImageHeight / (GLfloat)mTextureHeight;
GLfloat texLeft = 0.f;
GLfloat texRight = (GLfloat)mImageWidth / (GLfloat)mTextureWidth;
//Vertex coordinates
GLfloat quadWidth = mImageWidth;
GLfloat quadHeight = mImageHeight;
//Set vertex data
LVertexData2D vData[ 4 ];
//Texture coordinates
vData[ 0 ].texCoord.s = texLeft; vData[ 0 ].texCoord.t = texTop;
vData[ 1 ].texCoord.s = texRight; vData[ 1 ].texCoord.t = texTop;
vData[ 2 ].texCoord.s = texRight; vData[ 2 ].texCoord.t = texBottom;
vData[ 3 ].texCoord.s = texLeft; vData[ 3 ].texCoord.t = texBottom;
//Vertex positions
vData[ 0 ].position.x = 0.f; vData[ 0 ].position.y = 0.f;
vData[ 1 ].position.x = quadWidth; vData[ 1 ].position.y = 0.f;
vData[ 2 ].position.x = quadWidth; vData[ 2 ].position.y = quadHeight;
vData[ 3 ].position.x = 0.f; vData[ 3 ].position.y = quadHeight;
glEnable(GL_TEXTURE_2D);
glBindTexture( GL_TEXTURE_2D, mTextureID );
glContext.textureShader->bind();
glContext.textureShader->setTextureID( mTextureID );
glContext.textureShader->enableVertexPointer();
glContext.textureShader->enableTexCoordPointer();
glBindBuffer( GL_ARRAY_BUFFER, mVBOID );
glBufferSubData( GL_ARRAY_BUFFER, 0, 4 * sizeof(LVertexData2D), vData );
glContext.textureShader->setTexCoordPointer( sizeof(LVertexData2D), (GLvoid*)offsetof( LVertexData2D, texCoord ) );
glContext.textureShader->setVertexPointer( sizeof(LVertexData2D), (GLvoid*)offsetof( LVertexData2D, position ) );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, mIBOID );
glDrawElements( GL_TRIANGLE_FAN, 4, GL_UNSIGNED_INT, NULL );
glContext.textureShader->disableVertexPointer();
glContext.textureShader->disableTexCoordPointer();
glContext.textureShader->unbind();
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindTexture( GL_TEXTURE_2D, NULL );
glDisable(GL_TEXTURE_2D); // disable texture 2d
}
}
In response to Koradi, the vertex and texture coordinates are instantiated as such below:
void TextureShader::setVertexPointer( GLsizei stride, const GLvoid* data )
{
glVertexAttribPointer( mVertexPosLocation, 2, GL_FLOAT, GL_FALSE, stride, data );
}
void TextureShader::setTexCoordPointer( GLsizei stride, const GLvoid* data )
{
glVertexAttribPointer( mTexCoordLocation, 2, GL_FLOAT, GL_FALSE, stride, data );
}
It is rendered in the main loop with the following code:
glPushMatrix();
glTranslatef( glContext.gFBOTexture->imageWidth() / -2.f, glContext.gFBOTexture->imageHeight() / -2.f, 0.f );
glContext.gFBOTexture->render();
glPopMatrix();
Is there something obvious that I am overlooking? I am new to GLSL.
Edit: Added more code

After mulling over it for a few days, the issue was with how to send sampler2D uniforms into GLSL:
glBindTexture( GL_TEXTURE_2D, mTextureID );
glContext.textureShader->bind();
glContext.textureShader->setTextureID( mTextureID );
was corrected to:
glBindTexture( GL_TEXTURE_2D, mTextureID );
glContext.textureShader->bind();
glContext.textureShader->setTextureID( 0 );
setTextureID() sets the sampler2D uniform variable. Once the texture is binded, the sampler2D uniform should be set to 0, not the texture address.

Related

Relationship of offset in glMapBufferRange(...) and first in glDrawArraysInstanced(...)

I'm struggling to understand the relationship between the offset variables in the two functions and how the offset value affects gl_VertexID and gl_InstanceID variables in the shader.
Through reading of the functions documentation, I think glMapBufferRange expects offset to be the number of bytes from the start of the buffer, whereas glDrawArraysInstanced expects first to be the the number of strides as specified by glVertexAttribPointer.
However that doesn't seem to be the case, as the below code doesn't work if offsetVerts has a value different from 0. For 0 it renders 3 squares on the screen, as I expected it.
The other possible error source would be the value of gl_VertexID. I'd expect it to be 0,1,2,3 for the 4 vertex shader calls per instance, regardless of the offset value.
Just to make sure I also tried using a first value that is multiple of 4 and vertices[int(mod(gl_VertexID,4))] for the position lookup, without success.
How can I alternate the code to make it work with offsets other than 0?
glGetError() calls are omitted here to shorten the code, it's 0 through the whole process. GL version is 3.3.
Init code:
GLuint buff_id, v_id;
GLint bytesPerVertex = 2*sizeof(GLfloat); //8
glGenBuffers( 1, &buff_id );
glBindBuffer( GL_ARRAY_BUFFER, buff_id );
glGenVertexArrays( 1, &v_id );
glBufferData( GL_ARRAY_BUFFER, 1024, NULL, GL_STREAM_DRAW );
glBindVertexArray( v_id );
glEnableVertexAttribArray( posLoc );
glVertexAttribPointer( posLoc, 2, GL_FLOAT, GL_FALSE, bytesPerVertex, (void *)0 );
glVertexAttribDivisor( posLoc, 1 );
glBindVertexArray( 0 );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
float *data_ptr = nullptr;
int numVerts = 3;
int offsetVerts = 0;
render code:
glBindBuffer( GL_ARRAY_BUFFER, buff_id );
data_ptr = (float *)glMapBufferRange( GL_ARRAY_BUFFER,
bytesPerVertex * offsetVerts,
bytesPerVertex * numVerts,
GL_MAP_WRITE_BIT );
data_ptr[0] = 50;
data_ptr[1] = 50;
data_ptr[2] = 150;
data_ptr[3] = 50;
data_ptr[4] = 250;
data_ptr[5] = 50;
glUnmapBuffer( GL_ARRAY_BUFFER );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
glBindVertexArray( v_id );
glDrawArraysInstanced( GL_TRIANGLE_STRIP, offsetVerts, 4, 3 );
glBindVertexArray( 0 );
vertex shader:
#version 330
uniform mat4 proj;
in vec2 pos;
void main() {
vec2 vertices[4]= vec2[4](
vec2(pos.x, pos.y),
vec2(pos.x + 10.0f, pos.y),
vec2(pos.x, pos.y + 10.0f ),
vec2(pos.x + 10.0f, pos.y + 10.0f )
);
gl_Position = proj * vec4(vertices[gl_VertexID], 1, 1);
}
fragment shader:
#version 330
out vec4 LFragment;
void main() {
LFragment = vec4( 1.0f, 1.0f, 1.0f, 1.0f );
}
The other possible error source would be the value of gl_VertexID. I'd expect it to be 0,1,2,3 for the 4 vertex shader calls per instance, regardless of the offset value.
There is no offset value in glDrawArrays*
The base function for this is
glDrawArrays(type, first, count), and this just will generate primitives from a consecutive sub-array of the specified vertex attribute arrays, from index frist to frist+count-1. Hence, gl_VertexID will be in the range first,first+count-1.
You are actually not using any vertex attribute array, you turned your attribute into an per-instance attribute. But the first parameter will not introduce an offset into these. You can either adjust your attribute pointer to include the offset, or you can use glDrawArraysInstancedBaseInstance to specify the offset you need.
Note that the gl_InstanceID will not reflect the base instance you set there, it will still count from 0 relative to the begin of the draw call. But the actuall instance values fetched from the array will use the offset.

OpenGL drawing meshes incorrectly

I'm attempting to make an OpenGL Engine in C++, but cannot render meshes correctly. Meshes, when rendered, create faces that connect two random points on the mesh, or a random point on the mesh with 0,0,0.
The problem can be seen here:
(I made it a wireframe to see the problem more clearly)
Code:
// Render all meshes (Graphics.cpp)
for( int curMesh = 0; curMesh < numMesh; curMesh++ ) {
// Save pointer of buffer
meshes[curMesh]->updatebuf();
Buffer buffer = meshes[curMesh]->buffer;
// Update model matrix
glm::mat4 mvp = Proj*View*(meshes[curMesh]->model);
// Initialize vertex array
glBindBuffer( GL_ARRAY_BUFFER, vertbuffer );
glBufferData( GL_ARRAY_BUFFER, sizeof(GLfloat)*buffer.numcoords*3, meshes[curMesh]->verts, GL_STATIC_DRAW );
// Pass information to shader
GLuint posID = glGetAttribLocation( shader, "s_vPosition" );
glVertexAttribPointer( posID, 3, GL_FLOAT, GL_FALSE, 0, (void*)0 );
glEnableVertexAttribArray( posID );
// Check if texture applicable
if( meshes[curMesh]->texID != NULL && meshes[curMesh]->uvs != NULL ) {
// Initialize uv array
glBindBuffer( GL_ARRAY_BUFFER, uvbuffer );
glBufferData( GL_ARRAY_BUFFER, sizeof(GLfloat)*buffer.numcoords*2, meshes[curMesh]->uvs, GL_STATIC_DRAW );
// Pass information to shader
GLuint uvID = glGetAttribLocation( shader, "s_vUV" );
glVertexAttribPointer( uvID, 2, GL_FLOAT, GL_FALSE, 0, (void*)(0) );
glEnableVertexAttribArray( uvID );
// Set mesh texture
glActiveTexture( GL_TEXTURE0 );
glBindTexture( GL_TEXTURE_2D, meshes[curMesh]->texID );
GLuint texID = glGetUniformLocation( shader, "Sampler" );
glUniform1i( texID, 0 );
}
// Actiavte shader
glUseProgram( shader );
// Set MVP matrix
GLuint mvpID = glGetUniformLocation( shader, "MVP" );
glUniformMatrix4fv( mvpID, 1, GL_FALSE, &mvp[0][0] );
// Draw verticies on screen
bool wireframe = true;
if( wireframe )
for(int i = 0; i < buffer.numcoords; i += 3)
glDrawArrays(GL_LINE_LOOP, i, 3);
else
glDrawArrays( GL_TRIANGLES, 0, buffer.numcoords );
}
// Mesh Class (Graphics.h)
class mesh {
public:
mesh();
void updatebuf();
Buffer buffer;
GLuint texID;
bool updated;
GLfloat* verts;
GLfloat* uvs;
glm::mat4 model;
};
My Obj loading code is here: https://www.dropbox.com/s/tdcpg4vok11lf9d/ObjReader.txt (It's pretty crude and isn't organized, but should still work)
This looks like a primitive restart issue to me. Hard to tell what exactly is the problem without seeing some code. It would help a lot to see the about 20 lines above and below and including the drawing calls render the teapot. I.e. the 20 lines before the corresponding glDrawArrays, glDrawElements or glBegin call and the 20 lines after.
subtract 1 from the indices for your use, since these are 1-based indices, and you will almost certainly need 0-based indices.
This is because your triangles are not connected for the wireframe to look perfect.
In case triangles is not connected you should construct index buffer.

Drawing With a Shader Storage Object Not Working

With all of my objects that are to be rendered, I use glDrawElements. However, my venture into Compute Shaders has left me a setup that uses glDrawArrays. As with many who are breaching the topic, I used this PDF as a basis. The problem is that when it is rendered, nothing appears.
#include "LogoTail.h"
LogoTail::LogoTail(int tag1) {
tag = tag1;
needLoad = false;
shader = LoadShaders("vertex-shader[LogoTail].txt","fragment-shader[LogoTail].txt");
shaderCompute = LoadShaders("compute-shader[LogoTail].txt");
for( int i = 0; i < NUM_PARTICLES; i++ )
{
points[ i ].x = 0.0f;
points[ i ].y = 0.0f;
points[ i ].z = 0.0f;
points[ i ].w = 1.0f;
}
glGenBuffers( 1, &posSSbo);
glBindBuffer( GL_SHADER_STORAGE_BUFFER, posSSbo );
glBufferData( GL_SHADER_STORAGE_BUFFER, sizeof(points), points, GL_STATIC_DRAW );
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
for( int i = 0; i < NUM_PARTICLES; i++ )
{
times[ i ].x = 0.0f;
}
glGenBuffers( 1, &birthSSbo);
glBindBuffer( GL_SHADER_STORAGE_BUFFER, birthSSbo );
glBufferData( GL_SHADER_STORAGE_BUFFER, sizeof(times), times, GL_STATIC_DRAW );
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
for( int i = 0; i < NUM_PARTICLES; i++ )
{
vels[ i ].vx = 0.0f;
vels[ i ].vy = 0.0f;
vels[ i ].vz = 0.0f;
vels[ i ].vw = 0.0f;
}
glGenBuffers( 1, &velSSbo );
glBindBuffer( GL_SHADER_STORAGE_BUFFER, velSSbo );
glBufferData( GL_SHADER_STORAGE_BUFFER, sizeof(vels), vels, GL_STATIC_DRAW );
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
}
void LogoTail::Update(const double dt, float sunTime,glm::vec3 sunN) {
position=glm::translate(glm::mat4(), glm::vec3(4.5f,0,0));
}
void LogoTail::Draw(shading::Camera& camera){
shaderCompute->use();
glBindBufferBase( GL_SHADER_STORAGE_BUFFER, 4, posSSbo );
glBindBufferBase( GL_SHADER_STORAGE_BUFFER, 5, velSSbo );
glBindBufferBase( GL_SHADER_STORAGE_BUFFER, 6, birthSSbo );
glDispatchCompute( NUM_PARTICLES / WORK_GROUP_SIZE, 1, 1 );
glMemoryBarrier( GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT );
shaderCompute->stopUsing();
shader->use();
shader->setUniform("camera", camera.matrix());
shader->setUniform("model",position);
glBindBuffer( GL_ARRAY_BUFFER, posSSbo );
glVertexPointer( 4, GL_FLOAT, 0, (void *)0 );
glEnableClientState( GL_VERTEX_ARRAY );
glDrawArrays( GL_POINTS, 0, NUM_PARTICLES );
glDisableClientState( GL_VERTEX_ARRAY );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
shader->stopUsing();
}
The header contains the needed structures and other variables so they do not fall out of scope for the specific object.
Here is the compute shader itself.
#version 430 core
#extension GL_ARB_compute_shader : enable
#extension GL_ARB_shader_storage_buffer_object : enable
layout( std140, binding=4 ) buffer Pos
{
vec4 Positions[ ]; // array of vec4 structures
};
layout( std140, binding=5 ) buffer Vel
{
vec4 Velocities[ ]; // array of vec4 structures
};
layout( std140, binding=6 ) buffer Tim
{
float BirthTimes[ ]; // array of structures
};
layout( local_size_x = 128, local_size_y = 1, local_size_z = 1 ) in;
const vec3 G = vec3( 0., -0.2, 0. );
const float DT = 0.016666;
void main() {
uint gid = gl_GlobalInvocationID.x; // the .y and .z are both 1
vec3 p = Positions[ gid ].xyz;
vec3 v = Velocities[ gid ].xyz;
vec3 pp = p + v*DT + .5*DT*DT*G;
vec3 vp = v + G*DT;
Positions[ gid ].xyz = pp;
Velocities[ gid ].xyz = vp;
}
For testing purposes I lowered the gravity.
I believe that nothing is out of scope, nor is there a needed bind, but yet it alludes me to why the particles are not drawing.
In addition, I also added a geometry shader that constructs a quad around each point but it did not solve anything.
Last 5 lines seems to me problematic:
glBindBuffer( GL_ARRAY_BUFFER, posSSbo );
glVertexPointer( 4, GL_FLOAT, 0, (void *)0 );
glEnableClientState( GL_VERTEX_ARRAY );
glDrawArrays( GL_POINTS, 0, NUM_PARTICLES );
glDisableClientState( GL_VERTEX_ARRAY );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
My guess is You are trying to use old way of doing things in programmable pipeline.I am not sure how it is stated in OpenGL specs but it seems that in the newer versions (GL4.2) you are forced to bind your vertex buffers to VAO(may be that is vendor specific rules?).Once I needed to implement OIT and tried Cyril Crassin's demo which was using buffers with elements draw-just like you.I am using GL4.2 and NVidia cards.Nothing was showing up.I then bound them to a VAO and the issue was gone.So that is what I suggest you to try.

OpenGL Orthographic Sketching With GL_LINE_STRIP

I am having an issue with openGL when drawing my sketched line to the screen in that it seems to be adding an extra point at the origin which is not in the point list:
(0,0) is definitely not in the points that are in the array to be drawn, just cant seem to reason it out. I would guess that maybe my array size is too big or something but I can't see it
Here is my code for populating the array and the draw call
void PointArray::repopulateObjectBuffer()
{
//Rebind Appropriate VAO
glBindVertexArray( vertex_array_object_id );
//Recalculate Array Size
vertexArraySize = points.size()*sizeof(glm::vec2);
//Rebind Appropriate VBO
glBindBuffer( GL_ARRAY_BUFFER, buffer_object_id );
glBufferData( GL_ARRAY_BUFFER, vertexArraySize, NULL, GL_STATIC_DRAW );
glBufferSubData( GL_ARRAY_BUFFER, 0, vertexArraySize, (const GLvoid*)(&points[0]) );
//Set up Vertex Arrays
glEnableVertexAttribArray( 0 ); //SimpleShader attrib at position 0 = "vPosition"
glVertexAttribPointer( (GLuint)0, 2, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0) );
//Unbind
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
void PointArray::draw() {
glBindVertexArray(vertex_array_object_id);
glLineWidth(4);
glDrawArrays( GL_LINE_STRIP, 0, vertexArraySize );
glBindVertexArray(0);
}
and here is where I am adding points in the mouse callback
void mouseMovement(int x, int y)
{
mouseDX = x - lastX ;
mouseDY = y - lastY ;
lastX = x;
lastY = y;
if(mouse_drag)
{
cam->RotateByMouse(glm::vec2(mouseDX*mouseSens, mouseDY * mouseSens));
}
if(sketching)
{
sketchLine.addPoint(glm::vec2((float)x,(float)y));
sketchLine.repopulateObjectBuffer();
updatePickray(x,y);
}
}
Finally my simple ortho vertex shader
in vec2 vPosition;
void main(){
const float right = 800.0;
const float bottom = 600.0;
const float left = 0.0;
const float top = 0.0;
const float far = 1.0;
const float near = -1.0;
mat4 orthoMat = mat4(
vec4(2.0 / (right - left), 0, 0, 0),
vec4(0, 2.0 / (top - bottom), 0, 0),
vec4(0, 0, -2.0 / (far - near), 0),
vec4(-(right + left) / (right - left), -(top + bottom) / (top - bottom), -(far + near) / (far - near), 1)
);
gl_Position = orthoMat * vec4(vPosition, 0.0, 1.0);
}
You seem to be using vertexArraySize as the parameter to glDrawArrays, which is not correct. The count of glDrawArrays should be the number of vertices, not the number of bytes of the vertex array.
In your case I guess it would be glDrawArrays( GL_LINE_STRIP, 0, points.size());.

Code in main.cpp works but when I put it in a class it doesn't

I have some simple code that draws a 30x30 ball to the screen. It worked perfectly when it was all inside of main.cpp, but when I put it into its own separate class I got a very faint grey box taking up the top right of the screen instead. When I paste the exact code I have inside of my Ball class into main.cpp it works perfectly again.
When the code is in the Ball class:
When I paste all of the code from the Ball class into main (and how it was before the Ball class):
The code can be found here.
main.cpp
#include "Includes.hpp"
static unsigned int Width = 800;
static unsigned int Height = 600;
int main()
{
bool CursorEnabled = true;
bool PolygonMode = false;
//Camera.CameraInit(45.0f , Width , Height , Model); use ortho instead \/
Camera.CameraInit (Width, Height, glm::mat4 (1.0f));
GLuint vao;
glGenVertexArrays (1 , &vao);
glBindVertexArray(vao);
Ball ball;
float LastTime = glfwGetTime(); // (for deltatime)
while (true)
{
float DeltaTime = glfwGetTime() - LastTime; // Deltatime init
LastTime = glfwGetTime(); // update for deltatime
glClearColor (0.0f , 0.0f , 0.0f , 1.0f);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
ball.Draw();
glfwSwapBuffers();
if (glfwGetKey (GLFW_KEY_ESC) || !glfwGetWindowParam(GLFW_OPENED))
{
std::cout << "\nShutdown command. Exiting...\n" << std::flush;
return 0;
}
}
}
Ball.hpp
#ifndef BALL
#define BALL
class Ball
{
private:
//Vertices and UV coordiantes.
std::vector <glm::vec2> ballVert;
std::vector <glm::vec2> ballUV;
//Buffers
GLuint vertBuf;
GLuint uvBuf;
//Image
GLuint ballTex;
//IDs for the shader.
GLuint shader;
GLuint MVPid;
GLuint sampler;
GLuint coord;
GLuint uv;
public:
//Constructor
Ball();
void Draw();
};
#endif
Ball.cpp
#include "Includes.hpp"
Ball::Ball()
{
//Create ball vertices and UV coords
ballVert.push_back(glm::vec2(0 , 0)); /*left /\*/
ballVert.push_back(glm::vec2(0 , 30)); /*right / \*/
ballVert.push_back(glm::vec2(30 , 0)); /*top ------*/
ballVert.push_back(glm::vec2(0 , 30)); /*left /\*/
ballVert.push_back(glm::vec2(30 , 30)); /*right / \*/
ballVert.push_back(glm::vec2(30 , 0)); /*top ------*/
ballUV.push_back(glm::vec2(0 , 1));
ballUV.push_back(glm::vec2(0 , 0));
ballUV.push_back(glm::vec2(1 , 1));
ballUV.push_back(glm::vec2(0 , 0));
ballUV.push_back(glm::vec2(1 , 0));
ballUV.push_back(glm::vec2(1 , 1));
//Load the image
ballTex = SOIL_load_OGL_texture (
"img/ball.png",
SOIL_LOAD_AUTO,
SOIL_CREATE_NEW_ID, SOIL_FLAG_MIPMAPS | SOIL_FLAG_INVERT_Y | SOIL_FLAG_NTSC_SAFE_RGB | SOIL_FLAG_COMPRESS_TO_DXT);
//Generate buffers
glGenBuffers (1 , &vertBuf);
glBindBuffer (GL_ARRAY_BUFFER , vertBuf);
glBufferData (GL_ARRAY_BUFFER , ballVert.size() * sizeof(glm::vec2) , &ballVert[0] , GL_STATIC_DRAW);
glGenBuffers (1 , &uvBuf);
glBindBuffer (GL_ARRAY_BUFFER , uvBuf);
glBufferData (GL_ARRAY_BUFFER , ballUV.size() * sizeof (glm::vec2) , &ballUV[0] , GL_STATIC_DRAW);
shader = Game.LoadShaders ("src/glsl/ball.vert" , "src/glsl/ball.frag");
MVPid = glGetUniformLocation (shader , "MVP");
sampler = glGetUniformLocation (shader, "sampler");
coord = glGetAttribLocation(shader, "coord");
uv = glGetAttribLocation (shader, "uv");
}
void Ball::Draw()
{
glUseProgram (shader);
glUniformMatrix4fv(MVPid, 1, GL_FALSE, &Camera.GetMVP()[0][0]); // Send MVP to shader
glActiveTexture (GL_TEXTURE0); // bind the ball texture
glBindTexture (GL_TEXTURE_2D, ballTex);
glUniform1i (sampler , 0);
glEnableVertexAttribArray (coord); // sending the vertex data
glBindBuffer(GL_ARRAY_BUFFER, vertBuf);
glVertexAttribPointer(
coord, // The attribute we want to configure
2, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glEnableVertexAttribArray (uv);
glBindBuffer (GL_ARRAY_BUFFER , uvBuf);
glVertexAttribPointer (uv, 2, GL_FLOAT, GL_FALSE, 0, (void*)0);
// Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, ballVert.size());
glDisableVertexAttribArray(coord);
glDisableVertexAttribArray(uv);
}