I have a very simple program that maps a dummy red texture to a quad.
Here is the texture definition in C++:
struct DummyRGB8Texture2d
{
uint8_t data[3*4];
int width;
int height;
};
DummyRGB8Texture2d myTexture
{
{
255,0,0,
255,0,0,
255,0,0,
255,0,0
},
2u,
2u
};
This is how I setup the texture:
void SetupTexture()
{
// allocate a texture on the default texture unit (GL_TEXTURE0):
GL_CHECK(glCreateTextures(GL_TEXTURE_2D, 1, &m_texture));
// allocate texture:
GL_CHECK(glTextureStorage2D(m_texture, 1, GL_RGB8, myTexture.width, myTexture.height));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_WRAP_S, GL_REPEAT));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_WRAP_T, GL_REPEAT));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
// tell the shader that the sampler2d uniform uses the default texture unit (GL_TEXTURE0)
GL_CHECK(glProgramUniform1i(m_program->Id(), /* location in shader */ 3, /* texture unit index */ 0));
// bind the created texture to the specified target. this is necessary even in dsa
GL_CHECK(glBindTexture(GL_TEXTURE_2D, m_texture));
GL_CHECK(glGenerateMipmap(GL_TEXTURE_2D));
}
This is how I draw the texture to the quad:
void Draw()
{
m_target->ClearTargetBuffers();
m_program->MakeCurrent();
// load the texture to the GPU:
GL_CHECK(glTextureSubImage2D(m_texture, 0, 0, 0, myTexture.width, myTexture.height,
GL_RGB, GL_UNSIGNED_BYTE, myTexture.data));
GL_CHECK(glBindVertexArray(m_vao));
GL_CHECK(glDrawElements(GL_TRIANGLES, static_cast<GLsizei>(VideoQuadElementArray.size()), GL_UNSIGNED_INT, 0));
m_target->SwapTargetBuffers();
}
The Result:
I can't figure out why this texture won't appear Red. Also, if I change the texture internal format to RGBA / RGBA8 and the texture data array to have another element in each row, I get a nice red texture.
In case its relevant, here are my vertex attributes and my (very simple) shaders:
struct VideoQuadVertex
{
glm::vec3 vertex;
glm::vec2 uv;
};
std::array<VideoQuadVertex, 4> VideoQuadInterleavedArray
{
/* vec3 */ VideoQuadVertex{ glm::vec3{ -0.25f, -0.25f, 0.5f }, /* vec2 */ glm::vec2{ 0.0f, 0.0f } },
/* vec3 */ VideoQuadVertex{ glm::vec3{ 0.25f, -0.25f, 0.5f }, /* vec2 */ glm::vec2{ 1.0f, 0.0f } },
/* vec3 */ VideoQuadVertex{ glm::vec3{ 0.25f, 0.25f, 0.5f }, /* vec2 */ glm::vec2{ 1.0f, 1.0f } },
/* vec3 */ VideoQuadVertex{ glm::vec3{ -0.25f, 0.25f, 0.5f }, /* vec2 */ glm::vec2{ 0.0f, 1.0f } }
};
vertex setup:
void SetupVertexData()
{
// create a VAO to hold all node rendering states, no need for binding:
GL_CHECK(glCreateVertexArrays(1, &m_vao));
// create vertex buffer objects for data and indices and initialize them:
GL_CHECK(glCreateBuffers(static_cast<GLsizei>(m_vbo.size()), m_vbo.data()));
// allocate memory for interleaved vertex attributes and transfer them to the GPU:
GL_CHECK(glNamedBufferData(m_vbo[EVbo::Data], VideoQuadInterleavedArray.size() * sizeof(VideoQuadVertex), VideoQuadInterle
GL_CHECK(glVertexArrayAttribBinding(m_vao, 0, 0));
GL_CHECK(glVertexArrayVertexBuffer(m_vao, 0, m_vbo[EVbo::Data], 0, sizeof(VideoQuadVertex)));
// setup the indices array:
GL_CHECK(glNamedBufferData(m_vbo[EVbo::Element], VideoQuadElementArray.size() * sizeof(GLuint), VideoQuadElementArray.data
GL_CHECK(glVertexArrayElementBuffer(m_vao, m_vbo[EVbo::Element]));
// enable the relevant attributes for this VAO and
// specify their format and binding point:
// vertices:
GL_CHECK(glEnableVertexArrayAttrib(m_vao, 0 /* location in shader*/));
GL_CHECK(glVertexArrayAttribFormat(
m_vao,
0, // attribute location
3, // number of components in each data member
GL_FLOAT, // type of each component
GL_FALSE, // should normalize
offsetof(VideoQuadVertex, vertex) // offset from the begining of the buffer
));
// uvs:
GL_CHECK(glEnableVertexArrayAttrib(m_vao, 1 /* location in shader*/));
GL_CHECK(glVertexAttribFormat(
1, // attribute location
2, // number of components in each data member
GL_FLOAT, // type of each component
GL_FALSE, // should normalize
offsetof(VideoQuadVertex, uv) // offset from the begining of the buffer
));
GL_CHECK(glVertexArrayAttribBinding(m_vao, 1, 0));
}
vertex shader:
layout(location = 0) in vec3 position;
layout(location = 1) in vec2 texture_coordinate;
out FragmentData
{
vec2 uv;
} toFragment;
void main(void)
{
toFragment.uv = texture_coordinate;
gl_Position = vec4 (position, 1.0f);
}
fragment shader:
in FragmentData
{
vec2 uv;
} data;
out vec4 color;
layout (location = 3) uniform sampler2D tex_object;
void main()
{
color = texture(tex_object, data.uv);
}
GL_UNPACK_ALIGNMENT specifies the alignment requirements for the start of each pixel row in memory. By default GL_UNPACK_ALIGNMENT is set to 4.
This means each row of the texture is supposed to have a length of 4*N bytes.
You specify a 2*2 texture with the data: 255, 0, 0, 255, 0, 0, 255, 0, 0, 255, 0, 0
With GL_UNPACK_ALIGNMENT set to 4 this is interpreted as
column 1 column 2 alignment
row 1: 255, 0, 0, 255, 0, 0, 255, 0,
row 2: 0, 255, 0, 0, undef, undef
So the texture is read as
column 1 olumn 2
row 1: red, red,
row 2: green, RGB(0, ?, ?)
You have to set glPixelStorei(GL_UNPACK_ALIGNMENT, 1); before glTextureSubImage2D, for reading a tight packed texture.
If you do not want to change GL_UNPACK_ALIGNMENT (the alignment remains set to 4), you must adjust the data as follows:
struct DummyRGB8Texture2d
{
uint8_t data[8*2];
int width;
int height;
};
DummyRGB8Texture2d myTexture
{
{
255, 0, 0, 255, 0, 0, // row 1
0, 0, // 2 bytes alignment
255, 0, 0, 255, 0, 0, // row 2
0, 0 // 2 bytes alignment
},
2u,
2u
};
See further:
Stackoverflow question glPixelStorei(GL_UNPACK_ALIGNMENT, 1) Disadvantages?
Stackoverflow question OpenGL GL_UNPACK_ALIGNMENT
Khronos OpenGL Common Mistakes - Texture upload and pixel reads
Related
I have a very simple program that maps a dummy red texture to a quad.
Here is the texture definition in C++:
struct DummyRGB8Texture2d
{
uint8_t data[3*4];
int width;
int height;
};
DummyRGB8Texture2d myTexture
{
{
255,0,0,
255,0,0,
255,0,0,
255,0,0
},
2u,
2u
};
This is how I setup the texture:
void SetupTexture()
{
// allocate a texture on the default texture unit (GL_TEXTURE0):
GL_CHECK(glCreateTextures(GL_TEXTURE_2D, 1, &m_texture));
// allocate texture:
GL_CHECK(glTextureStorage2D(m_texture, 1, GL_RGB8, myTexture.width, myTexture.height));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_WRAP_S, GL_REPEAT));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_WRAP_T, GL_REPEAT));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
// tell the shader that the sampler2d uniform uses the default texture unit (GL_TEXTURE0)
GL_CHECK(glProgramUniform1i(m_program->Id(), /* location in shader */ 3, /* texture unit index */ 0));
// bind the created texture to the specified target. this is necessary even in dsa
GL_CHECK(glBindTexture(GL_TEXTURE_2D, m_texture));
GL_CHECK(glGenerateMipmap(GL_TEXTURE_2D));
}
This is how I draw the texture to the quad:
void Draw()
{
m_target->ClearTargetBuffers();
m_program->MakeCurrent();
// load the texture to the GPU:
GL_CHECK(glTextureSubImage2D(m_texture, 0, 0, 0, myTexture.width, myTexture.height,
GL_RGB, GL_UNSIGNED_BYTE, myTexture.data));
GL_CHECK(glBindVertexArray(m_vao));
GL_CHECK(glDrawElements(GL_TRIANGLES, static_cast<GLsizei>(VideoQuadElementArray.size()), GL_UNSIGNED_INT, 0));
m_target->SwapTargetBuffers();
}
The Result:
I can't figure out why this texture won't appear Red. Also, if I change the texture internal format to RGBA / RGBA8 and the texture data array to have another element in each row, I get a nice red texture.
In case its relevant, here are my vertex attributes and my (very simple) shaders:
struct VideoQuadVertex
{
glm::vec3 vertex;
glm::vec2 uv;
};
std::array<VideoQuadVertex, 4> VideoQuadInterleavedArray
{
/* vec3 */ VideoQuadVertex{ glm::vec3{ -0.25f, -0.25f, 0.5f }, /* vec2 */ glm::vec2{ 0.0f, 0.0f } },
/* vec3 */ VideoQuadVertex{ glm::vec3{ 0.25f, -0.25f, 0.5f }, /* vec2 */ glm::vec2{ 1.0f, 0.0f } },
/* vec3 */ VideoQuadVertex{ glm::vec3{ 0.25f, 0.25f, 0.5f }, /* vec2 */ glm::vec2{ 1.0f, 1.0f } },
/* vec3 */ VideoQuadVertex{ glm::vec3{ -0.25f, 0.25f, 0.5f }, /* vec2 */ glm::vec2{ 0.0f, 1.0f } }
};
vertex setup:
void SetupVertexData()
{
// create a VAO to hold all node rendering states, no need for binding:
GL_CHECK(glCreateVertexArrays(1, &m_vao));
// create vertex buffer objects for data and indices and initialize them:
GL_CHECK(glCreateBuffers(static_cast<GLsizei>(m_vbo.size()), m_vbo.data()));
// allocate memory for interleaved vertex attributes and transfer them to the GPU:
GL_CHECK(glNamedBufferData(m_vbo[EVbo::Data], VideoQuadInterleavedArray.size() * sizeof(VideoQuadVertex), VideoQuadInterle
GL_CHECK(glVertexArrayAttribBinding(m_vao, 0, 0));
GL_CHECK(glVertexArrayVertexBuffer(m_vao, 0, m_vbo[EVbo::Data], 0, sizeof(VideoQuadVertex)));
// setup the indices array:
GL_CHECK(glNamedBufferData(m_vbo[EVbo::Element], VideoQuadElementArray.size() * sizeof(GLuint), VideoQuadElementArray.data
GL_CHECK(glVertexArrayElementBuffer(m_vao, m_vbo[EVbo::Element]));
// enable the relevant attributes for this VAO and
// specify their format and binding point:
// vertices:
GL_CHECK(glEnableVertexArrayAttrib(m_vao, 0 /* location in shader*/));
GL_CHECK(glVertexArrayAttribFormat(
m_vao,
0, // attribute location
3, // number of components in each data member
GL_FLOAT, // type of each component
GL_FALSE, // should normalize
offsetof(VideoQuadVertex, vertex) // offset from the begining of the buffer
));
// uvs:
GL_CHECK(glEnableVertexArrayAttrib(m_vao, 1 /* location in shader*/));
GL_CHECK(glVertexAttribFormat(
1, // attribute location
2, // number of components in each data member
GL_FLOAT, // type of each component
GL_FALSE, // should normalize
offsetof(VideoQuadVertex, uv) // offset from the begining of the buffer
));
GL_CHECK(glVertexArrayAttribBinding(m_vao, 1, 0));
}
vertex shader:
layout(location = 0) in vec3 position;
layout(location = 1) in vec2 texture_coordinate;
out FragmentData
{
vec2 uv;
} toFragment;
void main(void)
{
toFragment.uv = texture_coordinate;
gl_Position = vec4 (position, 1.0f);
}
fragment shader:
in FragmentData
{
vec2 uv;
} data;
out vec4 color;
layout (location = 3) uniform sampler2D tex_object;
void main()
{
color = texture(tex_object, data.uv);
}
GL_UNPACK_ALIGNMENT specifies the alignment requirements for the start of each pixel row in memory. By default GL_UNPACK_ALIGNMENT is set to 4.
This means each row of the texture is supposed to have a length of 4*N bytes.
You specify a 2*2 texture with the data: 255, 0, 0, 255, 0, 0, 255, 0, 0, 255, 0, 0
With GL_UNPACK_ALIGNMENT set to 4 this is interpreted as
column 1 column 2 alignment
row 1: 255, 0, 0, 255, 0, 0, 255, 0,
row 2: 0, 255, 0, 0, undef, undef
So the texture is read as
column 1 olumn 2
row 1: red, red,
row 2: green, RGB(0, ?, ?)
You have to set glPixelStorei(GL_UNPACK_ALIGNMENT, 1); before glTextureSubImage2D, for reading a tight packed texture.
If you do not want to change GL_UNPACK_ALIGNMENT (the alignment remains set to 4), you must adjust the data as follows:
struct DummyRGB8Texture2d
{
uint8_t data[8*2];
int width;
int height;
};
DummyRGB8Texture2d myTexture
{
{
255, 0, 0, 255, 0, 0, // row 1
0, 0, // 2 bytes alignment
255, 0, 0, 255, 0, 0, // row 2
0, 0 // 2 bytes alignment
},
2u,
2u
};
See further:
Stackoverflow question glPixelStorei(GL_UNPACK_ALIGNMENT, 1) Disadvantages?
Stackoverflow question OpenGL GL_UNPACK_ALIGNMENT
Khronos OpenGL Common Mistakes - Texture upload and pixel reads
Edit: Removed alot of clutter and rephrased the question.
I have stored an array of floats into my shader using:
float simpleArray2D[4] = { 10.0f, 20.0f, 30.0f, 400.0f };
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, 2, 2, 0, GL_RGB, GL_FLOAT, &simpleArray2D);
How do I access specific elements from the float array in the shader?
Specific fragment shader code showing what I've done to test it so far, displaying a green color when the value is the specified one (10.0f in this case), and red if it's not.
vec2 textureCoordinates = vec2(0.0f, 0.0f);
float testValueFloat = float(texture(floatArraySampler, textureCoordinates));
outColor = testValueFloat >= 10.0f ? vec4(0,1,0,1) : vec4(1,0,0,1); //Showed green
//outColor = testValueFloat >= 10.1f ? vec4(0,1,0,1) : vec4(1,0,0,1); //Showed red
In GLSL you can use texelFetch to get a texel from a texture by integral coordinates.
This means the texels of the texture can be addressed similar the elements of an array, by its index:
ivec2 ij = ivec2(0, 0);
float testValueFloat = texelFetch(floatArraySampler, ij, 0).r;
But note, the array consists of 4 elements.
float simpleArray2D[4] = { 10.0f, 20.0f, 30.0f, 400.0f };
So the texture can be a 2x2 texture with one color channel (GL_RED)
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, 2, 2, 0, GL_RED, GL_FLOAT, &simpleArray2D);
or a 1x1 texture with 4 color channels (GL_RGBA)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, 1, 1, 0, GL_RGBA, GL_FLOAT, &simpleArray2D);
but it can't be a 2x2 RGBA texture, because for this the array would have to have 16 elements.
I am newbie in Opengl. I used the following example from Qt as a start in Opengl as I know Qt.
http://doc.qt.io/qt-5/qtquick-scenegraph-openglunderqml-squircle-cpp.html
I replaced the paint function of the program with following code with an intent to draw chess board pattern. Following is not the paint or render function of my program
paint()
{
if (!m_program) {
initializeOpenGLFunctions();
m_program = new QOpenGLShaderProgram();
m_program->addShaderFromSourceCode(QOpenGLShader::Vertex,
"attribute highp vec4 vertices;"
"varying highp vec2 coords;"
"void main() {"
" gl_Position = vertices;"
" coords = vertices.xy;"
"}");
m_program->addShaderFromSourceCode(QOpenGLShader::Fragment,
"uniform lowp float t;"
"varying highp vec2 coords;"
"void main() {"
" lowp float i = 1. - (pow(abs(coords.x), 4.) + pow(abs(coords.y), 4.));"
" i = smoothstep(t - 0.8, t + 0.8, i);"
" i = floor(i * 20.) / 20.;"
" gl_FragColor = vec4(coords * .5 + .5, i, i);"
"}");
m_program->bindAttributeLocation("vertices", 0);
m_program->link();
}
auto width = static_cast<float>(m_viewportSize.width());
auto height = static_cast<float>(m_viewportSize.height());
auto a = 2.f / width;
auto b = 2.f / height;
std::vector<float> matrix =
{
a , 0, 0, 0,
0, -b, 0, 0,
0, 0, 1, 0,
-1, 1, 0, 1
};
// Set the projection matrix
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(matrix.data());
// Initialize vertices:
std::vector<float> vertices =
{
0, 0,
0, height,
width, height,
width, 0
};
// Initialize colors
std::vector<float> colors =
{
1, 0, 0,
0, 1, 0,
0, 0, 1,
0, 1, 1
};
// Initialize texture virtice
std::vector<float> texCoord =
{
0, 0,
0, 1,
1, 1,
1, 0
};
// Create texture: simple chess board 8x8
auto numRows = 8u;
auto numCols = 8u;
auto character = 172u;
auto remain = 255u - character;
std::vector<unsigned char> texture(numCols * numRows);
for (auto i = 0u; i < texture.size(); ++i)
texture[i] = ((i + (i / numCols)) % 2) * remain + character;
// Upload to GPU texture
unsigned textureID;
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_2D, textureID);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, numCols, numRows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, texture.data());
// Initialize clear colors
glClearColor(0.f, 0.f, 0.f, 1.f);
// Activate necessary states
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, vertices.data());
glColorPointer(3, GL_FLOAT, 0, colors.data());
glTexCoordPointer(2, GL_FLOAT, 0, texCoord.data());
// render
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_QUADS, 0, 4);
m_program->disableAttributeArray(0);
m_program->release();
m_window->resetOpenGLState();
}
The chess board is drawn. But its drawn for a split second & then the screen turns fully white. I want to draw the chess board pattern continiously with each frame draw.
Can someone pls point out what might be going wrong ?
At the very top you have:
if (!m_program) {
then you initialize m_program, at the very bottom you have:
m_program->release();
which as Harish in comments points out is equivalent to calling glUseProgram(0);. So in the next iteration of paint your shader is not bound and not available.
According to docs, the reverse of release(); is bind(); so (I am not expert on this class) the solution might be to call QOpenGLShaderProgram::bind() on the next iteration of your paint.
For future reference, it's a good idea to run opengl programs through a graphics debugger like gDEbugger to give you an insight into what's actually happening inside the API.
I am trying to wrap my head around the various types of GLSL shaders in OpenGL.
At the moment I am struggling with a 2d layered-tile implementation. For some reason the int values that get passed into my shader are always 0 (or more likely, null).
I currently have a 2048x2048px 2d texture composed of 20x20 tiles. I am trying to texture one quad with it and change the index of the tile based upon the block of ints I pass into the vertex shader.
I am passing in a vec2 of floats for the position of the quad (really a TRIANGLE_STRIP). I am also attempting to pass in 6 ints that will represent the 6 layers of tiles.
My input:
// Build and compile our shader program
Shader ourShader("b_vertex.vertexShader", "b_fragment.fragmentShader");
const int floatsPerPosition = 2;
const int intsPerTriangle = 6;
const int numVertices = 4;
const int sizeOfPositions = sizeof(float) * numVertices * floatsPerPosition;
const int sizeOfColors = sizeof(int) * numVertices * intsPerTriangle;
const int numIndices = 4;
const int sizeOfIndices = sizeof(int) * numIndices;
float positions[numVertices][floatsPerPosition] =
{
{ -1, 1 },
{ -1, -1 },
{ 1, 1 },
{ 1, -1 },
};
// ints indicating Tile Index
int colors[numVertices][intsPerTriangle] =
{
{ 1, 2, 3, 4, 5, 6 },
{ 1, 2, 3, 4, 5, 6 },
{ 1, 2, 3, 4, 5, 6 },
{ 1, 2, 3, 4, 5, 6 },
};
// Indexes on CPU
int indices[numVertices] =
{
0, 1, 2, 3,
};
My setup:
GLuint vao, vbo1, vbo2, ebo; // Identifiers of OpenGL objects
glGenVertexArrays(1, &vao); // Create new VAO
// Binded VAO will store connections between VBOs and attributes
glBindVertexArray(vao);
glGenBuffers(1, &vbo1); // Create new VBO
glBindBuffer(GL_ARRAY_BUFFER, vbo1); // Bind vbo1 as current vertex buffer
// initialize vertex buffer, allocate memory, fill it with data
glBufferData(GL_ARRAY_BUFFER, sizeOfPositions, positions, GL_STATIC_DRAW);
// indicate that current VBO should be used with vertex attribute with index 0
glEnableVertexAttribArray(0);
// indicate how vertex attribute 0 should interpret data in connected VBO
glVertexAttribPointer(0, floatsPerPosition, GL_FLOAT, GL_FALSE, 0, 0);
glGenBuffers(1, &vbo2); // Create new VBO
glBindBuffer(GL_ARRAY_BUFFER, vbo2); // Bind vbo2 as current vertex buffer
// initialize vertex buffer, allocate memory, fill it with data
glBufferData(GL_ARRAY_BUFFER, sizeOfColors, colors, GL_STATIC_DRAW);
// indicate that current VBO should be used with vertex attribute with index 1
glEnableVertexAttribArray(1);
// indicate how vertex attribute 1 should interpret data in connected VBO
glVertexAttribPointer(1, intsPerTriangle, GL_INT, GL_FALSE, 0, 0);
// Create new buffer that will be used to store indices
glGenBuffers(1, &ebo);
// Bind index buffer to corresponding target
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
// ititialize index buffer, allocate memory, fill it with data
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeOfIndices, indices, GL_STATIC_DRAW);
// reset bindings for VAO, VBO and EBO
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// Load and create a texture
GLuint texture1 = loadBMP_custom("uvtemplate3.bmp");
GLuint texture2 = loadBMP_custom("texture1.bmp");
My draw:
// Game loop
while (!glfwWindowShouldClose(window))
{
// Check if any events have been activiated (key pressed, mouse moved etc.) and call corresponding response functions
glfwPollEvents();
// Render
// Clear the colorbuffer
glClearColor(1.f, 0.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Activate shader
ourShader.Use();
// Bind Textures using texture units
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture1);
//add some cool params
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
float borderColor[] = { 0.45f, 0.25f, 0.25f, 0.25f };
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, borderColor);
glUniform1i(glGetUniformLocation(ourShader.Program, "ourTexture1"), 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture2);
glUniform1i(glGetUniformLocation(ourShader.Program, "ourTexture2"), 1);
// Draw container
//glBindVertexArray(VAO);
//glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glBindVertexArray(vao);
//glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glDrawElements(GL_TRIANGLE_STRIP, numIndices, GL_UNSIGNED_INT, NULL);
glBindVertexArray(0);
// Swap the screen buffers
glfwSwapBuffers(window);
}
My shader most definitely works, as I can adjust the output by hard-coding the
values from within the vertexShader. My suspicion is I am not passing the values correctly/ in the correct format or not indicating somewhere that the int[6] needs to be included per vertex.
I cannot read anything from my layout (location = 1) in int Base[6]; I've tried just about everything I can think of. Declaring each int individually, trying to read two ivec3's, uint and what ever else I could think of but everything comes back with 0.
The following are my vertex and fragment shader for completeness:
#version 330 core
layout (location = 0) in vec2 position;
layout (location = 1) in int Base[6];
out vec2 TexCoord;
out vec2 TexCoord2;
out vec2 TexCoord3;
out vec2 TexCoord4;
out vec2 TexCoord5;
out vec2 TexCoord6;
// 0.5f, 0.5f,// 0.0f, 118.0f, 0.0f, 0.0f, 0.0f, 0.0f, // Top Right
// 0.5f, -0.5f,// 0.0f, 118.0f, 1.0f, 0.0f, 0.0f,0.009765625f, // Bottom Right
// -0.5f, -0.5f,// 0.0f, 118.0f, 0.0f, 1.0f, 0.009765625f, 0.009765625f, // Bottom Left
// -0.5f, 0.5f//, 0.0f, 118.0f, 1.0f, 0.0f, 0.009765625f, 0.0f // Top Left
void main()
{
int curBase = Base[5];
int curVertex = gl_VertexID % 4;
vec2 texCoord = (curVertex == 0?
vec2(0.0,0.0):(
curVertex == 1?
vec2(0.0,0.009765625):(
curVertex == 2?
vec2(0.009765625,0.0):(
curVertex == 3?
vec2(0.009765625,0.009765625):(
vec2(0.0,0.0)))))
);
gl_Position = vec4(position, 0.0f, 1.0f);
TexCoord = vec2(texCoord.x + ((int(curBase)%102)*0.009765625f)
, (1.0 - texCoord.y) - ((int(curBase)/102)*0.009765625f));
//curBase = Base+1;
TexCoord2 = vec2(texCoord.x + ((int(curBase)%102)*0.009765625f)
, (1.0 - texCoord.y) - ((int(curBase)/102)*0.009765625f));
//curBase = Base+2;
TexCoord3 = vec2(texCoord.x + ((int(curBase)%102)*0.009765625f)
, (1.0 - texCoord.y) - ((int(curBase)/102)*0.009765625f));
}
Fragment:
#version 330 core
//in vec3 ourColor;
in vec2 TexCoord;
in vec2 TexCoord2;
in vec2 TexCoord3;
in vec2 TexCoord4;
in vec2 TexCoord5;
in vec2 TexCoord6;
out vec4 color;
// Texture samplers
uniform sampler2D ourTexture1;
uniform sampler2D ourTexture2;
void main()
{
color = (texture(ourTexture2, TexCoord )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord2 )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord3 )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord4 )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord5 )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord6 )== vec4(1.0,0.0,1.0,1.0)?
vec4(0.0f,0.0f,0.0f,0.0f)
:texture(ourTexture2, TexCoord6 ))
:texture(ourTexture2, TexCoord5 ))
:texture(ourTexture2, TexCoord4 ))
:texture(ourTexture2, TexCoord3 ))
:texture(ourTexture2, TexCoord2 ))
:texture(ourTexture2, TexCoord ));
}
This is wrong in two different ways:
glVertexAttribPointer(1, intsPerTriangle, GL_INT, GL_FALSE, 0, 0);
Vertex attributes in the GL can be scalars or vectors of 2 to 4 components. Hence, the size parameter of glVertexAttribPointer can take the values of 1, 2, 3 or 4. Using a different value (intsPerTriangle == 6) means that the call will just generate an GL_INVALID_VALUE error and has no ther effect, so you don't even set a pointer.
If you you want to pass 6 values per vertex, you can either use 6 different scalr attributes (consuming 6 attribute slots), or pack this into some vectors, like 2 3d vectors (consuming only 2 slots). No matter what packing you chose, you'll need a proper attrib pointer setup for each attribute slot in use.
However, glVertexAttribPointer is also the wrong function for your use case. It is defining floating-point attributes, which musthave matching declarations as float/vec* in the shader. The fact that you can input GL_INT just means that the GPU can do the conversion to floating-point on the fly for you.
If you want to use an int or ivec (or their unsigned counterparts) attribute, you have to use glVertexAttribIPointer (note the I in that function name) when setting up the attribute.
Trying to render a rectangle in OpenGL desktop, but the internal Format used in glTexBuffer(...) and respective code for texelFetch(...) is not working for me. I have the correct primitive rendering, need to correct the texture buffer part only. Below is the relevant code snippet
unsigned char texData [16] =
{
255, 0, 0, 0, //Red
0,255,0,255, //Green
0,0,255,255, //Blue
255,0,255,255, //PINK
};
glGenBuffers( 2,texBuffObj);
glBindBuffer( GL_TEXTURE_BUFFER,texBuffObj[0]);
glBufferData( GL_TEXTURE_BUFFER,sizeof(texData),texData,GL_DYNAMIC_DRAW);
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_BUFFER, textureID);
glTexBuffer(GL_TEXTURE_BUFFER,GL_RGBA8UI,texBuffObj[0]);
Fragement Shader Snippet:
#version 330 core
uniform usamplerBuffer samplerTexBuffer;
out vec4 color;
in vec2 vs_texCoord;
in vec3 vert_Color;
void main()
{
int offset = 8; // 0:RED 4:GREEN 8:BLUE 12:PINK
vec4 colBuff;
colBuff = texelFetch(samplerTexBuffer,offset) ;
color = colBuff;
}
The required rendering is such that with :
offset value 0 primitive color : 255, 0, 0, 0, //Red
offset value 4 primitive color : 0, 255, 0, 255, //Green
offset value 8 primitive color : 0, 0, 255, 255, //Blue
offset value 12 primitive color : 255, 0, 255, 255, //PINK
What are the necessary corrections required ?
texelFetch takes texel coordinates, not a byte offset into the buffer. Since your texels are 4 bytes wide, you want to retrieve them using indices 0, 1, 2, 3 rather than 0, 4, 8, 12.