With all of my objects that are to be rendered, I use glDrawElements. However, my venture into Compute Shaders has left me a setup that uses glDrawArrays. As with many who are breaching the topic, I used this PDF as a basis. The problem is that when it is rendered, nothing appears.
#include "LogoTail.h"
LogoTail::LogoTail(int tag1) {
tag = tag1;
needLoad = false;
shader = LoadShaders("vertex-shader[LogoTail].txt","fragment-shader[LogoTail].txt");
shaderCompute = LoadShaders("compute-shader[LogoTail].txt");
for( int i = 0; i < NUM_PARTICLES; i++ )
{
points[ i ].x = 0.0f;
points[ i ].y = 0.0f;
points[ i ].z = 0.0f;
points[ i ].w = 1.0f;
}
glGenBuffers( 1, &posSSbo);
glBindBuffer( GL_SHADER_STORAGE_BUFFER, posSSbo );
glBufferData( GL_SHADER_STORAGE_BUFFER, sizeof(points), points, GL_STATIC_DRAW );
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
for( int i = 0; i < NUM_PARTICLES; i++ )
{
times[ i ].x = 0.0f;
}
glGenBuffers( 1, &birthSSbo);
glBindBuffer( GL_SHADER_STORAGE_BUFFER, birthSSbo );
glBufferData( GL_SHADER_STORAGE_BUFFER, sizeof(times), times, GL_STATIC_DRAW );
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
for( int i = 0; i < NUM_PARTICLES; i++ )
{
vels[ i ].vx = 0.0f;
vels[ i ].vy = 0.0f;
vels[ i ].vz = 0.0f;
vels[ i ].vw = 0.0f;
}
glGenBuffers( 1, &velSSbo );
glBindBuffer( GL_SHADER_STORAGE_BUFFER, velSSbo );
glBufferData( GL_SHADER_STORAGE_BUFFER, sizeof(vels), vels, GL_STATIC_DRAW );
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
}
void LogoTail::Update(const double dt, float sunTime,glm::vec3 sunN) {
position=glm::translate(glm::mat4(), glm::vec3(4.5f,0,0));
}
void LogoTail::Draw(shading::Camera& camera){
shaderCompute->use();
glBindBufferBase( GL_SHADER_STORAGE_BUFFER, 4, posSSbo );
glBindBufferBase( GL_SHADER_STORAGE_BUFFER, 5, velSSbo );
glBindBufferBase( GL_SHADER_STORAGE_BUFFER, 6, birthSSbo );
glDispatchCompute( NUM_PARTICLES / WORK_GROUP_SIZE, 1, 1 );
glMemoryBarrier( GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT );
shaderCompute->stopUsing();
shader->use();
shader->setUniform("camera", camera.matrix());
shader->setUniform("model",position);
glBindBuffer( GL_ARRAY_BUFFER, posSSbo );
glVertexPointer( 4, GL_FLOAT, 0, (void *)0 );
glEnableClientState( GL_VERTEX_ARRAY );
glDrawArrays( GL_POINTS, 0, NUM_PARTICLES );
glDisableClientState( GL_VERTEX_ARRAY );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
shader->stopUsing();
}
The header contains the needed structures and other variables so they do not fall out of scope for the specific object.
Here is the compute shader itself.
#version 430 core
#extension GL_ARB_compute_shader : enable
#extension GL_ARB_shader_storage_buffer_object : enable
layout( std140, binding=4 ) buffer Pos
{
vec4 Positions[ ]; // array of vec4 structures
};
layout( std140, binding=5 ) buffer Vel
{
vec4 Velocities[ ]; // array of vec4 structures
};
layout( std140, binding=6 ) buffer Tim
{
float BirthTimes[ ]; // array of structures
};
layout( local_size_x = 128, local_size_y = 1, local_size_z = 1 ) in;
const vec3 G = vec3( 0., -0.2, 0. );
const float DT = 0.016666;
void main() {
uint gid = gl_GlobalInvocationID.x; // the .y and .z are both 1
vec3 p = Positions[ gid ].xyz;
vec3 v = Velocities[ gid ].xyz;
vec3 pp = p + v*DT + .5*DT*DT*G;
vec3 vp = v + G*DT;
Positions[ gid ].xyz = pp;
Velocities[ gid ].xyz = vp;
}
For testing purposes I lowered the gravity.
I believe that nothing is out of scope, nor is there a needed bind, but yet it alludes me to why the particles are not drawing.
In addition, I also added a geometry shader that constructs a quad around each point but it did not solve anything.
Last 5 lines seems to me problematic:
glBindBuffer( GL_ARRAY_BUFFER, posSSbo );
glVertexPointer( 4, GL_FLOAT, 0, (void *)0 );
glEnableClientState( GL_VERTEX_ARRAY );
glDrawArrays( GL_POINTS, 0, NUM_PARTICLES );
glDisableClientState( GL_VERTEX_ARRAY );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
My guess is You are trying to use old way of doing things in programmable pipeline.I am not sure how it is stated in OpenGL specs but it seems that in the newer versions (GL4.2) you are forced to bind your vertex buffers to VAO(may be that is vendor specific rules?).Once I needed to implement OIT and tried Cyril Crassin's demo which was using buffers with elements draw-just like you.I am using GL4.2 and NVidia cards.Nothing was showing up.I then bound them to a VAO and the issue was gone.So that is what I suggest you to try.
Related
I'm struggling to understand the relationship between the offset variables in the two functions and how the offset value affects gl_VertexID and gl_InstanceID variables in the shader.
Through reading of the functions documentation, I think glMapBufferRange expects offset to be the number of bytes from the start of the buffer, whereas glDrawArraysInstanced expects first to be the the number of strides as specified by glVertexAttribPointer.
However that doesn't seem to be the case, as the below code doesn't work if offsetVerts has a value different from 0. For 0 it renders 3 squares on the screen, as I expected it.
The other possible error source would be the value of gl_VertexID. I'd expect it to be 0,1,2,3 for the 4 vertex shader calls per instance, regardless of the offset value.
Just to make sure I also tried using a first value that is multiple of 4 and vertices[int(mod(gl_VertexID,4))] for the position lookup, without success.
How can I alternate the code to make it work with offsets other than 0?
glGetError() calls are omitted here to shorten the code, it's 0 through the whole process. GL version is 3.3.
Init code:
GLuint buff_id, v_id;
GLint bytesPerVertex = 2*sizeof(GLfloat); //8
glGenBuffers( 1, &buff_id );
glBindBuffer( GL_ARRAY_BUFFER, buff_id );
glGenVertexArrays( 1, &v_id );
glBufferData( GL_ARRAY_BUFFER, 1024, NULL, GL_STREAM_DRAW );
glBindVertexArray( v_id );
glEnableVertexAttribArray( posLoc );
glVertexAttribPointer( posLoc, 2, GL_FLOAT, GL_FALSE, bytesPerVertex, (void *)0 );
glVertexAttribDivisor( posLoc, 1 );
glBindVertexArray( 0 );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
float *data_ptr = nullptr;
int numVerts = 3;
int offsetVerts = 0;
render code:
glBindBuffer( GL_ARRAY_BUFFER, buff_id );
data_ptr = (float *)glMapBufferRange( GL_ARRAY_BUFFER,
bytesPerVertex * offsetVerts,
bytesPerVertex * numVerts,
GL_MAP_WRITE_BIT );
data_ptr[0] = 50;
data_ptr[1] = 50;
data_ptr[2] = 150;
data_ptr[3] = 50;
data_ptr[4] = 250;
data_ptr[5] = 50;
glUnmapBuffer( GL_ARRAY_BUFFER );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
glBindVertexArray( v_id );
glDrawArraysInstanced( GL_TRIANGLE_STRIP, offsetVerts, 4, 3 );
glBindVertexArray( 0 );
vertex shader:
#version 330
uniform mat4 proj;
in vec2 pos;
void main() {
vec2 vertices[4]= vec2[4](
vec2(pos.x, pos.y),
vec2(pos.x + 10.0f, pos.y),
vec2(pos.x, pos.y + 10.0f ),
vec2(pos.x + 10.0f, pos.y + 10.0f )
);
gl_Position = proj * vec4(vertices[gl_VertexID], 1, 1);
}
fragment shader:
#version 330
out vec4 LFragment;
void main() {
LFragment = vec4( 1.0f, 1.0f, 1.0f, 1.0f );
}
The other possible error source would be the value of gl_VertexID. I'd expect it to be 0,1,2,3 for the 4 vertex shader calls per instance, regardless of the offset value.
There is no offset value in glDrawArrays*
The base function for this is
glDrawArrays(type, first, count), and this just will generate primitives from a consecutive sub-array of the specified vertex attribute arrays, from index frist to frist+count-1. Hence, gl_VertexID will be in the range first,first+count-1.
You are actually not using any vertex attribute array, you turned your attribute into an per-instance attribute. But the first parameter will not introduce an offset into these. You can either adjust your attribute pointer to include the offset, or you can use glDrawArraysInstancedBaseInstance to specify the offset you need.
Note that the gl_InstanceID will not reflect the base instance you set there, it will still count from 0 relative to the begin of the draw call. But the actuall instance values fetched from the array will use the offset.
So I have created all of the correct sphere vertices using this algorithm:
GLint total = 100;
GLfloat radius = 200;
GLfloat sphereVertices[30000];
for (int i = 0; i < total; i++)
{
float lon = map(i, 0, total, -M_PI, M_PI);
for (int j = 0; j < total; j++)
{
float lat = map(j, 0, total, -M_PI/2, M_PI/2);
sphereVertices[(i * 300) + (j * 3)] = radius * sin(lon) * cos(lat);
sphereVertices[(i * 300) + (j * 3) + 1] = radius * sin(lon) * sin(lat);
sphereVertices[(i * 300) + (j * 3) + 2] = radius * cos(lon);
}
}
But when I draw it using either GL_TRIANGLES and GL_TRIANGLE_STRIP, I'm produced with this result:
As you can see the only triangles which are being rendered are slicing through the center of the sphere. Is my math wrong? Or am I not plotting my data into my array the correct way for my glVertexAttribPointer function to read the data correctly?
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), 0);
EDIT 1:
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glDrawArrays(GL_TRIANGLES, 0, 10000);
You cant't draw the inices directly by glDrawArrays. You have to bring them in a proper order.
Create a triangle index list and use glDrawElements
See also How to map texture to sphere that rendered by parametric equation using points primitive
Further note, that your loop should run from i = 0 to i <= total, because you want to create vertex coordinates on both poles of the sphere.
GLint layers = 100;
GLint circumferenceTiles = 100;
std::vector<GLfloat> sphereVertices;
va.reserve( (layers+1)*(circumferenceTiles+1)*3 ); // 3 floats: x, y, z
for ( int i = 0; i <= layers; ++ i )
{
GLfloat lon = map(i, 0, layers, -M_PI, M_PI);
GLfloat lon_sin = std::sin( lon );
GLfloat lon_cos = std::cos( lon );
for ( int j = 0; j <= circumferenceTiles; j ++ )
{
GLfloat lat = map(j, 0, circumferenceTiles, -M_PI/2, M_PI/2);
GLfloat lat_sin = std::sin( lat);
GLfloat lat_cos = std::cos( lat);
va.push_back( lon_cos * lat_cos ); // x
va.push_back( lon_cos * lat_sin ); // y
va.push_back( lon_sin ); // z
}
}
You can create triangles by stacking up discs:
// create the face indices
std::vector<GLuint> ia;
ia.reserve( layers*circumferenceTiles*6 );
for ( GLuint il = 0; il < layers; ++ il )
{
for ( GLuint ic = 0; ic < circumferenceTiles; ic ++ )
{
GLuint i0 = il * (circumferenceTiles+1) + ic;
GLuint i1 = i0 + 1;
GLuint i3 = i0 + circumferenceTiles+1;
GLuint i2 = i3 + 1;
int faces[]{ i0, i1, i2, i0, i2, i3 };
ia.insert(ia.end(), faces+(il==0?3:0), faces+(il==layers-1?3:6));
}
}
Specify the vertex array object:
GLuint vao;
glGenVertexArrays( 1, &vao );
glBindVertexArray( vao );
GLuint vbo;
glGenBuffers( 1, &vbo );
glBindBuffer( GL_ARRAY_BUFFER, vbo );
glBufferData( GL_ARRAY_BUFFER, sphereVertices.size()*sizeof(GLfloat), sphereVertices.data(),
GL_STATIC_DRAW );
GLuint ibo;
glGenBuffers( 1, &ibo );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, ibo );
glBufferData( GL_ELEMENT_ARRAY_BUFFER, ia.size()*sizeof(GLuint), ia.data(), GL_STATIC_DRAW );
GLuint v_attr_inx = 0;
glVertexAttribPointer( v_attr_inx , 3, GL_FLOAT, GL_FALSE, 0, 0 );
glEnableVertexAttribArray( v_attr_inx );
glBindVertexArray( 0 );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 );
Draw the sphere:
glBindVertexArray( vao );
glDrawElements( GL_TRIANGLES, (GLsizei)ia.size(), GL_UNSIGNED_INT, 0 );
glBindVertexArray( 0 );
I'm drawing sprites that are all in a buffer using glDrawElements.
To tell sprites what texture to be in the fragment shader I have uniform sampler2D textures[32]; each vertex has an index, which is passed to the fragment shader from the vertex shader:
color = texture(textures[index], fs_in.uv);
when I try draw my sprites with more than 1 texture active it gets the wrong textures in the top right corner
http://puu.sh/lyr5j/d8c2cf6c8f.png
I have no clue why this is happening have tried texture parameters
I cant seem to find anyone who has had a similar problem.
This is my renderer's init function (I am purposly passing the texid as float since I have heard ints don't work well (also tried))
glGenBuffers(1, &m_VDBO);
glGenVertexArrays(1, &m_VAO);
glBindVertexArray(m_VAO);
glBindBuffer(GL_ARRAY_BUFFER, m_VDBO);
glBufferData(GL_ARRAY_BUFFER, RENDERER_BUFFER_SIZE, 0, GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(SHADER_VERTEX_INDEX);
glEnableVertexAttribArray(SHADER_UV_INDEX);
glEnableVertexAttribArray(SHADER_COLOR_INDEX);
glEnableVertexAttribArray(SHADER_TEXID_INDEX);
glVertexAttribPointer(SHADER_VERTEX_INDEX, 3, GL_FLOAT, GL_FALSE, RENDERER_VERTEX_SIZE, (const void *) offsetof(VertexData, VertexData::vertex));
glVertexAttribPointer(SHADER_UV_INDEX, 2, GL_FLOAT, GL_FALSE, RENDERER_VERTEX_SIZE, (const void *) offsetof(VertexData, VertexData::uv));
glVertexAttribPointer(SHADER_COLOR_INDEX, 4, GL_UNSIGNED_BYTE, GL_TRUE, RENDERER_VERTEX_SIZE, (const void *) offsetof(VertexData, VertexData::color));
glVertexAttribPointer(SHADER_TEXID_INDEX, 1, GL_FLOAT, GL_FALSE, RENDERER_VERTEX_SIZE, (const void *)offsetof(VertexData, VertexData::texID));
glBindBuffer(GL_ARRAY_BUFFER, 0);
const GLushort modelindices[] = { 0, 1, 2, 2, 3, 0 };
GLuint indices[RENDERER_INDICES_SIZE];
for (int i = 0; i < RENDERER_INDICES_SIZE; i += 6)
{
for (int o = 0; o < 6; o++)
{
indices[i + o] = modelindices[o] + (i / 6 * 4);
}
}
glGenBuffers(1, &m_IBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_IBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, RENDERER_INDICES_SIZE * sizeof(GLuint), indices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindVertexArray(0);
the flush function
glBindVertexArray(m_VAO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_IBO);
for (int i = 0; i < m_TextureSlots.size(); i++)
{
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, m_TextureSlots[i]);
}
glDrawElements(GL_TRIANGLES, m_IndexCount, GL_UNSIGNED_INT, 0);
m_TextureSlots.clear();
m_IndexCount = 0;
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindVertexArray(0);
It is hard for me to see where your problem is coming from, the only thing I can suggest is taking a look at an Image2d class object constructor that I have. Now, my source depends on outside classes such as a ShaderManager class that relies heavily on template types and a Batch class and a BatchManager class to send over the vertices to the video card. Also this Image2d is an inherited object. However, this may serve as some help to you in tracking down your own problem. There are two different constructors for different versions of the implementation depending on which version of OpenGL and GLSL is being used. If I remember correctly version 2 uses the BatchManager to send the vertices to the video card where version 1 doesn't which can be seen within the render() method.
Image2d v1.0
// ----------------------------------------------------------------------------
// Image2d()
Image2d::Image2d( float fWidth, float fHeight, TextureInfo::FilterQuality filterQuality, bool generateMipMap, const std::string& strTextureFilename, const std::string& strId ) :
VisualMko( glm::uvec2(), strId ),
m_vboTexture( 0 ),
m_vboPosition( 0 ),
m_vboIndices( 0 ),
m_vao( 0 ) {
if ( fWidth <= 0 || fHeight <= 0 ) {
std::ostringstream strStream;
strStream << __FUNCTION__ << " Invalid image size (" << fWidth << "," << fHeight << ") must be more then 0 in each dimension.";
throw ExceptionHandler( strStream );
}
// Save TextureId
TextureFileReader textureFileReader( strTextureFilename );
m_textureInfo = textureFileReader.getOrCreateTextureInfo( filterQuality, generateMipMap, false );
// Define Texture Co-Ordinates
std::vector<float> vTextureCoordinates;
vTextureCoordinates.push_back( 0.0f );
vTextureCoordinates.push_back( 1.0f );
vTextureCoordinates.push_back( 0 );
vTextureCoordinates.push_back( 0 );
vTextureCoordinates.push_back( 1.0f );
vTextureCoordinates.push_back( 1.0f );
vTextureCoordinates.push_back( 1.0f );
vTextureCoordinates.push_back( 0 );
// Define Vertex Positions (x,y,z)
std::vector<float> vVertexPositions;
vVertexPositions.push_back( 0 );
vVertexPositions.push_back( fHeight );
vVertexPositions.push_back( 0 );
vVertexPositions.push_back( 0 );
vVertexPositions.push_back( 0 );
vVertexPositions.push_back( 0 );
vVertexPositions.push_back( fWidth );
vVertexPositions.push_back( fHeight );
vVertexPositions.push_back( 0 );
vVertexPositions.push_back( fWidth );
vVertexPositions.push_back( 0 );
vVertexPositions.push_back( 0 );
// Define 2 Triangle Faces
std::vector<unsigned char> vIndices;
vIndices.push_back( 0 );
vIndices.push_back( 1 );
vIndices.push_back( 2 );
vIndices.push_back( 3 );
// Create Vertex Array Object
glGenVertexArrays( 1, &m_vao );
glBindVertexArray( m_vao ); // Start Array
m_pShaderManager->setAttribute( A_COLOR, COLOR_WHITE );
// Create Position Buffer And Store On Video Card
glGenBuffers( 1, & m_vboPosition );
glBindBuffer( GL_ARRAY_BUFFER, m_vboPosition );
glBufferData( GL_ARRAY_BUFFER, vVertexPositions.size() * sizeof( vVertexPositions[0] ), &vVertexPositions[0], GL_STATIC_DRAW );
m_pShaderManager->enableAttribute( A_POSITION );
// Create Texture Coordinate Buffer
glGenBuffers( 1, &m_vboTexture );
glBindBuffer( GL_ARRAY_BUFFER, m_vboTexture );
glBufferData( GL_ARRAY_BUFFER, vTextureCoordinates.size() * sizeof( vTextureCoordinates[0] ), &vTextureCoordinates[0], GL_STATIC_DRAW );
m_pShaderManager->enableAttribute( A_TEXTURE_COORD0 );
// Create Index Buffer
glGenBuffers( 1, &m_vboIndices );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, m_vboIndices );
glBufferData( GL_ELEMENT_ARRAY_BUFFER, vIndices.size() * sizeof( vIndices[0] ), &vIndices[0], GL_STATIC_DRAW );
glBindVertexArray( 0 ); // Stop Array
// Disable Attribute Pointers
m_pShaderManager->disableAttribute( A_POSITION );
m_pShaderManager->disableAttribute( A_TEXTURE_COORD0 );
// THIS MUST BE AFTER Vertex Array Buffer Is Unbound!
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 ); // Stop Buffer Index
glBindBuffer( GL_ARRAY_BUFFER, 0 ); // Stop Buffer
// We have a Valid Image2d Save Filename
m_strFilename = strTextureFilename;
} // Image2D - v1.0
Image2D - v2.0
// ----------------------------------------------------------------------------
// Image2d()
Image2d::Image2d( const glm::uvec2& origin, const glm::uvec2& size, const std::string& strTextureFilename, const std::string& strId ) :
VisualMko( size, strId ),
m_vboTexture( 0 ),
m_vboPosition( 0 ),
m_vboIndices( 0 ),
m_vao( 0 ) {
m_version = 2;
TextureFileReader textureFileReader( strTextureFilename );
m_textureInfo = textureFileReader.getOrCreateTextureInfo( TextureInfo::FILTER_NONE, false, false );
m_config.uTextureId = m_textureInfo.uTextureId;
if ( 0 == m_textureInfo.size.x || 0 == m_textureInfo.size.y ) {
std::ostringstream strStream;
strStream << __FUNCTION__ << "size of " << strTextureFilename << " is invalid " << m_textureInfo.size;
throw ExceptionHandler( strStream );
}
// Verify Image Fits Inside Texture
if ( m_textureInfo.size.x < size.x + origin.x || m_textureInfo.size.y < size.y + origin.y ) {
std::ostringstream strStream;
strStream << __FUNCTION__ << " " << strTextureFilename << " size is " << m_textureInfo.size
<< " which is too small for an image that is " << size
<< " pixels in size, with an origin point set at " << origin ;
throw ExceptionHandler( strStream );
}
glm::vec2 textureCoordScaleFactor( 1.0f / static_cast<float>( m_textureInfo.size.x ),
1.0f / static_cast<float>( m_textureInfo.size.y ) );
glm::vec2 textureCoordBottomLeft = glm::vec2( textureCoordScaleFactor.x * origin.x,
textureCoordScaleFactor.y * ( m_textureInfo.size.y - origin.y - size.y ) );
glm::vec2 textureCoordTopRight = glm::vec2( textureCoordScaleFactor.x * ( origin.x + size.x ),
textureCoordScaleFactor.y * ( m_textureInfo.size.y - origin.y ) );
// Set Colors And Texture Coordinates (Position Will Be Updated In Render Function)
m_vVertices.push_back( GuiVertex( glm::vec2(), COLOR_WHITE, glm::vec2( textureCoordBottomLeft.x, textureCoordTopRight.y ) ) );
m_vVertices.push_back( GuiVertex( glm::vec2(), COLOR_WHITE, glm::vec2( textureCoordBottomLeft.x, textureCoordBottomLeft.y ) ) );
m_vVertices.push_back( GuiVertex( glm::vec2(), COLOR_WHITE, glm::vec2( textureCoordTopRight.x, textureCoordTopRight.y ) ) );
m_vVertices.push_back( GuiVertex( glm::vec2(), COLOR_WHITE, glm::vec2( textureCoordTopRight.x, textureCoordBottomLeft.y ) ) );
} // Image2d - v2.0
and here is my render() method
// ----------------------------------------------------------------------------
// render()
void Image2d::render() {
if ( 1 == m_version ) {
m_pShaderManager->setTexture( 0, U_TEXTURE0_SAMPLER_2D, m_textureInfo.uTextureId );
glBindVertexArray( m_vao );
glDrawElements( GL_TRIANGLE_STRIP, 4, GL_UNSIGNED_BYTE, nullptr );
glBindVertexArray( 0 );
} else {
// Version 2.0
// Update Vertices
if ( m_transformMatrix.updateTranslation || m_transformMatrix.updateScale || m_transformMatrix.updateRotation ) {
m_transformMatrix.updateTranslation = m_transformMatrix.updateScale = m_transformMatrix.updateRotation = false;
// Order Of Operations Matter Here!
glm::mat4 matrix; // Identity
if ( m_transformMatrix.hasTranslation ) {
matrix[3][0] = m_transformMatrix.translation.x;
matrix[3][1] = m_transformMatrix.translation.y;
}
if ( m_transformMatrix.hasRotation ) {
matrix = glm::rotate( matrix, m_transformMatrix.fRotationAngleRadians, glm::vec3( 0.0f, 0.0f, -1.0f ) );
}
if ( m_transformMatrix.hasScale ) {
matrix = matrix * glm::mat4( m_transformMatrix.scale.x, 0.0f, 0.0f, 0.0f,
0.0f, m_transformMatrix.scale.y, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f );
}
// Center Offset
if ( m_offsetPosition.x != 0 || m_offsetPosition.y != 0 ) {
matrix = glm::translate( matrix, glm::vec3( -m_offsetPosition.x, -m_offsetPosition.y, 0.0f ) );
}
// Apply Transformation To All 4 Vertices
m_vVertices[0].position = glm::vec2( matrix * glm::vec4( 0, 0, 0, 1.0f ) );
m_vVertices[1].position = glm::vec2( matrix * glm::vec4( 0, m_size.y, 0, 1.0f ) );
m_vVertices[2].position = glm::vec2( matrix * glm::vec4( m_size.x, 0, 0, 1.0f ) );
m_vVertices[3].position = glm::vec2( matrix * glm::vec4( m_size.x, m_size.y, 0, 1.0f ) );
}
renderBatch();
}
} // render
Make sure that the sizes you are specifying in your glBufferData(GL_ARRAY_BUFFER, RENDERER_BUFFER_SIZE, 0, GL_DYNAMIC_DRAW);
is accurate. Also make sure that you are stopping your your VertexArray at the appropriate time as well as disabling your Attribute Pointers. Everything that you bound must be unbound, and for some types Order does matter!
I was following LazyFoo's tutorial on GLSL 2D texturing (http://lazyfoo.net/tutorials/OpenGL/34_glsl_texturing/index.php), and I was able to get most parts working.
However, the program renders the texture zoomed up real close. Is this an issue with the vertex, or the texture lookup? Below is the vertex shader I was using in my implementation:
texCoord = LTexCoord;
gl_Position = gl_ProjectionMatrix * gl_ModelViewMatrix * vec4( LVertexPos2D.x, LVertexPos2D.y, 0.0, 1.0 );
And below is the fragment shader I was using:
gl_FragColor = texture( textureID, texCoord );
As for the render function, I deviate from the tutorial by using opengl's fixed pipeline matrices (don't need to update matrices ):
//If the texture exists
if( mTextureID != 0 )
{
//Texture coordinates
GLfloat texTop = 0.f;
GLfloat texBottom = (GLfloat)mImageHeight / (GLfloat)mTextureHeight;
GLfloat texLeft = 0.f;
GLfloat texRight = (GLfloat)mImageWidth / (GLfloat)mTextureWidth;
//Vertex coordinates
GLfloat quadWidth = mImageWidth;
GLfloat quadHeight = mImageHeight;
//Set vertex data
LVertexData2D vData[ 4 ];
//Texture coordinates
vData[ 0 ].texCoord.s = texLeft; vData[ 0 ].texCoord.t = texTop;
vData[ 1 ].texCoord.s = texRight; vData[ 1 ].texCoord.t = texTop;
vData[ 2 ].texCoord.s = texRight; vData[ 2 ].texCoord.t = texBottom;
vData[ 3 ].texCoord.s = texLeft; vData[ 3 ].texCoord.t = texBottom;
//Vertex positions
vData[ 0 ].position.x = 0.f; vData[ 0 ].position.y = 0.f;
vData[ 1 ].position.x = quadWidth; vData[ 1 ].position.y = 0.f;
vData[ 2 ].position.x = quadWidth; vData[ 2 ].position.y = quadHeight;
vData[ 3 ].position.x = 0.f; vData[ 3 ].position.y = quadHeight;
glEnable(GL_TEXTURE_2D);
glBindTexture( GL_TEXTURE_2D, mTextureID );
glContext.textureShader->bind();
glContext.textureShader->setTextureID( mTextureID );
glContext.textureShader->enableVertexPointer();
glContext.textureShader->enableTexCoordPointer();
glBindBuffer( GL_ARRAY_BUFFER, mVBOID );
glBufferSubData( GL_ARRAY_BUFFER, 0, 4 * sizeof(LVertexData2D), vData );
glContext.textureShader->setTexCoordPointer( sizeof(LVertexData2D), (GLvoid*)offsetof( LVertexData2D, texCoord ) );
glContext.textureShader->setVertexPointer( sizeof(LVertexData2D), (GLvoid*)offsetof( LVertexData2D, position ) );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, mIBOID );
glDrawElements( GL_TRIANGLE_FAN, 4, GL_UNSIGNED_INT, NULL );
glContext.textureShader->disableVertexPointer();
glContext.textureShader->disableTexCoordPointer();
glContext.textureShader->unbind();
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindTexture( GL_TEXTURE_2D, NULL );
glDisable(GL_TEXTURE_2D); // disable texture 2d
}
}
In response to Koradi, the vertex and texture coordinates are instantiated as such below:
void TextureShader::setVertexPointer( GLsizei stride, const GLvoid* data )
{
glVertexAttribPointer( mVertexPosLocation, 2, GL_FLOAT, GL_FALSE, stride, data );
}
void TextureShader::setTexCoordPointer( GLsizei stride, const GLvoid* data )
{
glVertexAttribPointer( mTexCoordLocation, 2, GL_FLOAT, GL_FALSE, stride, data );
}
It is rendered in the main loop with the following code:
glPushMatrix();
glTranslatef( glContext.gFBOTexture->imageWidth() / -2.f, glContext.gFBOTexture->imageHeight() / -2.f, 0.f );
glContext.gFBOTexture->render();
glPopMatrix();
Is there something obvious that I am overlooking? I am new to GLSL.
Edit: Added more code
After mulling over it for a few days, the issue was with how to send sampler2D uniforms into GLSL:
glBindTexture( GL_TEXTURE_2D, mTextureID );
glContext.textureShader->bind();
glContext.textureShader->setTextureID( mTextureID );
was corrected to:
glBindTexture( GL_TEXTURE_2D, mTextureID );
glContext.textureShader->bind();
glContext.textureShader->setTextureID( 0 );
setTextureID() sets the sampler2D uniform variable. Once the texture is binded, the sampler2D uniform should be set to 0, not the texture address.
I'm trying to bind a sdl2 texture to a glsl shader though I'm not entirely sure how? I'm using a library called glfx to handle the glsl shaders and I've been helping with the development of this library as well. I'm pretty sure I've got everything else right but it crashes when I call SDL_GL_BindTexture. Can anyone see what I've done wrong?
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <string>
#include <GL/glew.h>
#include <GL/glfx.h>
#include <SDL2/SDL.h>
#include <FreeImage.h>
int main()
{
SDL_Window *mainwindow;
SDL_Renderer *renderer;
SDL_GLContext maincontext;
SDL_Init( SDL_INIT_VIDEO );
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MAJOR_VERSION, 3 );
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MINOR_VERSION, 2 );
SDL_GL_SetAttribute( SDL_GL_DOUBLEBUFFER, 1 );
SDL_GL_SetAttribute( SDL_GL_DEPTH_SIZE, 24 );
SDL_CreateWindowAndRenderer( 512, 512, SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN, &mainwindow, &renderer );
maincontext = SDL_GL_CreateContext( mainwindow );
glewExperimental = GL_TRUE;
glewInit( );
fprintf( stdout, "%s\n", glGetString(GL_VERSION) );
fprintf( stdout, "%s\n", glGetString(GL_SHADING_LANGUAGE_VERSION) );
FIBITMAP* dib = FreeImage_Load( FIF_PNG, "test.png" );
uint32_t w = FreeImage_GetWidth( dib );
uint32_t h = FreeImage_GetHeight( dib );
dib = FreeImage_ConvertTo32Bits( dib );
BYTE* pixeles = FreeImage_GetBits( dib );
GLubyte* textura = new GLubyte[4*w*h];
SDL_Texture* texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_STATIC, w, h );
const SDL_Rect rect = { 0, 0, w, h };
int pitch = 32;
SDL_LockTexture( texture, &rect, (void**)&textura, &pitch );
for( uint32_t j = 0; j < w * h; j++ )
{
textura[j*4+0] = pixeles[j*4+2];
textura[j*4+1] = pixeles[j*4+1];
textura[j*4+2] = pixeles[j*4+0];
textura[j*4+3] = pixeles[j*4+3];
}
SDL_UnlockTexture( texture );
FreeImage_Unload( dib );
delete [] textura;
int effect = glfxGenEffect( );
std::string shader;
shader ="struct VSinput\n"
"{\n"
" vec3 Position;\n"
"};\n"
"shader VSmain(in VSinput VSin, out vec2 TexCoord)\n"
"{\n"
" gl_Position = vec4(VSin.Position, 1.0);\n"
" TexCoord = vec2( 0.8, 0.8 );\n"
"};\n"
"uniform sampler2D gColorMap;\n"
"shader FSmain(in vec2 TexCoord, out vec4 FragColor)\n"
"{\n"
" FragColor = texture(gColorMap, TexCoord);\n"
"}\n"
"program SimpleTechnique\n"
"{\n"
" vs(150) = VSmain();\n"
" fs(150) = FSmain();\n"
"};\0";
glfxParseEffectFromMemory( effect, shader.c_str() );
int shaderProg = glfxCompileProgram( effect, "SimpleTechnique" );
if (shaderProg < 0)
{
std::string log = glfxGetEffectLog(effect);
fprintf( stderr, "%s\n", log.c_str() );
}
glClearColor ( 0.0, 0.0, 1.0, 1.0 );
glClear ( GL_COLOR_BUFFER_BIT );
float* vert = new float[9];
vert[0] = 0.0; vert[1] = 0.5; vert[2] =-1.0;
vert[3] =-1.0; vert[4] =-0.5; vert[5] =-1.0;
vert[6] = 1.0; vert[7] =-0.5; vert[8] =-1.0;
unsigned int m_vaoID;
unsigned int m_vboID;
glGenVertexArrays( 1, &m_vaoID );
glBindVertexArray( m_vaoID );
glGenBuffers( 1, &m_vboID );
glBindBuffer( GL_ARRAY_BUFFER, m_vboID );
glBufferData( GL_ARRAY_BUFFER, 9 * sizeof(GLfloat), vert, GL_STATIC_DRAW );
glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, 0, 0 );
glEnableVertexAttribArray( 0 );
glEnable( GL_TEXTURE_2D );
int loc = glGetUniformLocation( shaderProg, "gColorMap" );
glActiveTexture( GL_TEXTURE0 );
SDL_GL_BindTexture(texture, NULL, NULL );
glUniform1i( loc, 0 );
glUseProgram( shaderProg );
glDrawArrays( GL_TRIANGLES, 0, 3 );
glDisableVertexAttribArray( 0 );
glBindVertexArray( 0 );
delete[] vert;
glBindBuffer( GL_ARRAY_BUFFER, 0 );
glDeleteBuffers( 1, &m_vboID );
glDeleteVertexArrays( 1, &m_vaoID );
SDL_GL_SwapWindow( mainwindow );
SDL_Delay( 2000 );
SDL_GL_DeleteContext( maincontext );
SDL_DestroyWindow( mainwindow );
SDL_Quit( );
return 0;
}
glUniform - Specify the value of a uniform variable for the current program object
glUseProgram() then glUniform1i(), not the other way around.
EDIT: This is looking like a bug in SDL2. You might try the demo program I attached to the report and see if you can repro on your system.
EDIT2: Looks like Sam has a fix in already.