OpenGL rendering black square - c++

I am attempting to render a colored rectangle, but I just end up with a black one.
My shaders and shader program do not produce any errors when compiling or linking, so I am sure there is something going wrong when I am attempting to buffer the data. But, everything looks correct to me.
Shader sources
std::string const fragment_shader_source = "#version 330 core\n"
""
"in vec4 fColor;\n"
"\n"
"out vec4 finalColor;\n"
"\n"
"void main() {\n"
" finalColor = fColor;\n"
"}";
std::string const vertex_shader_source = "#version 330 core\n"
""
"layout (location = 0) in vec2 vPos;\n"
"layout (location = 1) in vec4 vColor;\n"
"\n"
"out vec4 fColor;\n"
"void main() {\n"
" gl_Position = vec4(vPos.xy, 1.0, 1.0);\n"
" fColor = vColor;\n"
"}";
Vertex data
static GLfloat const vertex_data[] = {
// //Position //Color //UV Coords
0.5f, 0.5f, 1.0f, 0.0f, 1.0f, 1.0f,
-0.5f, 0.5f, 1.0f, 0.0f, 0.0f, 1.0f,
0.5f, -0.5f, 1.0f, 1.0f, 0.0f, 1.0f,
-0.5f, -0.5f, 0.0f, 1.0f, 1.0f, 1.0f,
-0.5f, 0.5f, 0.0f, 0.0f, 1.0f, 1.0f,
0.5f, -0.5f, 0.0f, 1.0f, 0.0f, 1.0f
};
Here is my main loop:
int SDLApp::AppMain(int argc, char** argv) {
if(sdl_window_ == nullptr) {
acorn::Log(acorn::LoggingLevel::Fatal, "Could not initialize the window.");
return -1;
}
if(sdl_gl_context_ == nullptr) {
acorn::Log(acorn::LoggingLevel::Fatal, "Could not create an OpenGL context.");
return -1;
}
acorn::shader::FragmentShader f(fragment_shader_source);
acorn::shader::VertexShader v(vertex_shader_source);
acorn::shader::Program shader_program(f, v);
shader_program.Link();
shader_program.Use();
GLuint vao_;
glGenVertexArrays(1, &vao_);
GLuint vbo_;
glGenBuffers(1, &vbo_);
glBindVertexArray(vao_);
glBindBuffer(GL_ARRAY_BUFFER, vbo_);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (void*)0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (void*)2);
glBufferData(GL_ARRAY_BUFFER, 6 * sizeof(GLfloat) * 6, vertex_data, GL_STATIC_DRAW);
glClearColor(1.0f, 0.0f, 0.0f,1.0f);
SDL_Event event;
while (true) {
while (SDL_PollEvent(&event)) {
if (event.type == SDL_QUIT) {
return 0;
}
}
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLES, 0, 6);
SDL_GL_SwapWindow(sdl_window_);
}
}

The last parameter of glVertexAttribPointer is treated as a byte offset into the buffer object's data store.
So the offset has to be 8, 2*sizeof(float):
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 6*sizeof(GLfloat),
(void*)(2*sizeof(GLfloat)));
After changing that, your code works fine

Related

array of sampler2D uses last bound texture [duplicate]

This question already has answers here:
Will any of the following texture lookups cause undefined behavior, non-uniform flow, or both?
(1 answer)
Why are Uniform variables not working in GLSL?
(1 answer)
Closed 10 months ago.
I am trying to implement a simple batchrenderer. I am using an array of sampler2D for it. however, the array is using the last bound texture instead of using the texture bound to the texture unit. I would really appreciate if anyone can help me out. here's my code:
Edit: the sampler2d array index is working fine. when i used FragColor = vec4(t, 0, 0, 0); it gave me a black quad and a red quad. But for some, it is using the last bound texture. both the quads are using texture2.
main.cpp
int main()
{
Window window("DJROXZHIA", 800, 600, true);
Shader shader("res/shaders/vertex.glsl", "res/shaders/fragment.glsl");
shader.enable();
float vertices[] = {
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
-0.5f, -0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f,
-1.0f, -0.5f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f,
-1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
-0.5f, -1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f
};
unsigned int indices[] = {
0, 1, 2, 2, 3, 0,
4, 5, 6, 6, 7, 4
};
Texture texture("res/texture/DodgerCover.png");
Texture texture2("res/texture/game-play-image.png"); //both the quads are using texture2
GLuint vao, vbo, ibo;
glGenVertexArrays(1, &vao);
glGenBuffers(1, &vbo);
glGenBuffers(1, &ibo);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glEnableVertexAttribArray(3);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(DJROXZHIA::maths::Vertex2D), (void*)offsetof(DJROXZHIA::maths::Vertex2D, pos));
glVertexAttribPointer(1, 1, GL_FLOAT, GL_FALSE, sizeof(DJROXZHIA::maths::Vertex2D), (void*)offsetof(DJROXZHIA::maths::Vertex2D, texture));
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(DJROXZHIA::maths::Vertex2D), (void*)offsetof(DJROXZHIA::maths::Vertex2D, texCoords));
glVertexAttribPointer(3, 4, GL_FLOAT, GL_FALSE, sizeof(DJROXZHIA::maths::Vertex2D), (void*)offsetof(DJROXZHIA::maths::Vertex2D, color));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
GLfloat t[2] = { 0.0f, 1.0f };
glUniform1fv(glGetUniformLocation(shader.getID(), "u_texture"), 2, t);
while (!window.closed())
{
window.clear(glm::vec4(0.0f));
glActiveTexture(GL_TEXTURE + 0);
glBindTexture(GL_TEXTURE_2D, texture.getTextureID());
glActiveTexture(GL_TEXTURE + 1);
glBindTexture(GL_TEXTURE_2D, texture2.getTextureID());
glBindVertexArray(vao);
glDrawElements(GL_TRIANGLES, 12, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
window.tick();
}
return 0;
}
vertex.glsl
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in float aTexture;
layout (location = 2) in vec2 aTexCoords;
layout (location = 3) in vec4 aColor;
out float tex;
out vec2 texCoords;
out vec4 color;
out vec3 pos;
//uniform mat4 u_ProjView;
void main()
{
gl_Position = vec4(aPos, 1.0);
tex = aTexture;
texCoords = aTexCoords;
color = aColor;
pos = aPos;
}
fragment.glsl
#version 330 core
in float tex;
in vec2 texCoords;
in vec4 color;
in vec3 pos;
uniform sampler2D u_texture[2];
out vec4 FragColor;
void main()
{
int t = int(tex);
FragColor = texture(u_texture[t], texCoords) * color;
}
Thanks in advance

When my quad draws in OpenGl, the texture stretches oddly, i need it to morph to the shape of the quad uniformly, not as shown below. How do I do so? [duplicate]

I wanna make the OpenGL program can present images and warp the images presented.
Although I achieved image rendering using OpenGL, I don't know how to warp an image.
An warpable example I want is (Reference):
But a picture I got is:
As I know, this problem is related to perspective correction mapping.
But I don't know about that well.
Here is my source code.
void imageRender(Shader initShader, Shader imgShader, char *path){
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
float positions = { 0.5f, 1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
-1.0f, -1.0f, 0.0f,
-1.0f, 1.0f, 0.0f };
float vertices[] = {
// positions // colors // texture coords
position[0], position[1], position[2], 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right
position[3], position[4], position[5], 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right
position[6], position[7], position[8], 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left
position[9], position[10],position[11], 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left
};
unsigned int indices[] = {
0, 1, 3,
1, 2, 3
};
unsigned int VAO, VBO, EBO;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
// position attribute
//glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
glEnableVertexAttribArray(1);
// color attribute
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(2);
//texture attribute
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glEnableVertexAttribArray(3);
FREE_IMAGE_FORMAT format = FreeImage_GetFileType(path, 0);
if (format == -1){
cerr << BOLDRED << "[ERROR] IMAGE_NOT_FOUND" << RESET << endl;
exit(1);
}
if (format == FIF_UNKNOWN){
cerr << BOLDRED << "[ERROR] UNKNOWN_IMAGE_FORMAT" << RESET << endl;
format = FreeImage_GetFIFFromFilename(path);
if (!FreeImage_FIFSupportsReading(format)){
cerr << BOLDRED << "[ERROR] IMAGE_FORMAT_NOT_READABLE" << RESET << endl;
exit(1);
}
}
FIBITMAP *bitmap = FreeImage_Load(format, path);
FIBITMAP *bitmap32;
int bitsPerPixel = FreeImage_GetBPP(bitmap);
bitmap32 = FreeImage_ConvertTo32Bits(bitmap);
int imageWidth = FreeImage_GetWidth(bitmap32);
int imageHeight = FreeImage_GetHeight(bitmap32);
GLubyte *textureData = FreeImage_GetBits(bitmap32);
GLuint texture1;
glGenTextures(1, &texture1);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, textureData);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
initShader.use();
glBindVertexArray(VAO);
int initalTime = time(NULL);
while(1){
glBindVertexArray(VAO);
int timecal = time(NULL);
//glDrawElements(GL_TRIANGLE_STRIP, 6, GL_UNSIGNED_INT, 0);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glfwSwapBuffers(window);
glfwPollEvents();
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
if ((timecal - initalTime) > imageTime) // imageTime value is 10
break;
}
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDeleteTextures(1, &texture1);// image
glDisable(GL_TEXTURE_2D);//image
FreeImage_Unload(bitmap32);
FreeImage_Unload(bitmap);
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
glDeleteBuffers(1, &EBO);
}
}
Shader code is like that
//shader.vs
#version 330 core
layout(location = 1) in vec3 position;
layout(location = 2) in vec3 color;
layout(location = 3) in vec2 texcoord;
out vec3 Color;
out vec2 Texcoord;
void main()
{
gl_Position = vec4(position, 1.0);
Texcoord = texcoord;
}
//shader.fs
#version 330 core
in vec3 Color;
in vec2 Texcoord;
out vec4 outColor;
uniform sampler2D texture5;
void main()
{
outColor = texture2D(texture5, Texcoord);
}
How can I warp texture?
And then Is it correct to use position value to warp texture image?
The issue has nothing to do with perspective projection. You draw a polygon with 4 vertices parallel to the XY plane of the view, but the polygon is not a quad! Change the x coordinate of the 1st vertex (0.5f -> 1.0f). Perspective projection works with Homogeneous coordinates.
In generalperspective projection is achieved by a Perspective projection matrix. Of course you can define homogeneous vertices to inspect the behavior:
Define an attribute tuple with homogenous vertices (4 components):
float vertices[] = {
// positions // colors // texture coords
1.0f, 1.0f, 0.5f, 2.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right
1.0f, -1.0f, -0.5f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right
-1.0f, -1.0f, -0.5f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left
-1.0f, 1.0f, 0.5f, 2.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left
};
Adapt the vertex specification and the vertex shader:
// position attribute
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 9 * sizeof(float), (void*)0);
glEnableVertexAttribArray(1);
// color attribute
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 9 * sizeof(float), (void*)(4 * sizeof(float)));
glEnableVertexAttribArray(2);
//texture attribute
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 9 * sizeof(float), (void*)(7 * sizeof(float)));
glEnableVertexAttribArray(3);
#version 330 core
layout(location = 1) in vec4 position;
layout(location = 2) in vec3 color;
layout(location = 3) in vec2 texcoord;
out vec3 Color;
out vec2 Texcoord;
void main()
{
gl_Position = position;
Texcoord = texcoord;
}
Another option to achieve the effect is, to a an Z component to the geometry. e.g:
float positions = { 1.0f, 1.0f, 0.5f,
1.0f, -1.0f, 0.0f,
-1.0f, -1.0f, -0.5f,
-1.0f, 1.0f, 0.0f };
and to compute the w component dependent on z in the vertex shader (e.g. w = z + 2.5:
#version 330 core
layout(location = 1) in vec3 position;
layout(location = 2) in vec3 color;
layout(location = 3) in vec2 texcoord;
out vec3 Color;
out vec2 Texcoord;
void main()
{
gl_Position = vec4(position, position.z + 2.5);
Texcoord = texcoord;
}

how to warp texture in openGL? (Perspective correction?)

I wanna make the OpenGL program can present images and warp the images presented.
Although I achieved image rendering using OpenGL, I don't know how to warp an image.
An warpable example I want is (Reference):
But a picture I got is:
As I know, this problem is related to perspective correction mapping.
But I don't know about that well.
Here is my source code.
void imageRender(Shader initShader, Shader imgShader, char *path){
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
float positions = { 0.5f, 1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
-1.0f, -1.0f, 0.0f,
-1.0f, 1.0f, 0.0f };
float vertices[] = {
// positions // colors // texture coords
position[0], position[1], position[2], 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right
position[3], position[4], position[5], 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right
position[6], position[7], position[8], 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left
position[9], position[10],position[11], 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left
};
unsigned int indices[] = {
0, 1, 3,
1, 2, 3
};
unsigned int VAO, VBO, EBO;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
// position attribute
//glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
glEnableVertexAttribArray(1);
// color attribute
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(2);
//texture attribute
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glEnableVertexAttribArray(3);
FREE_IMAGE_FORMAT format = FreeImage_GetFileType(path, 0);
if (format == -1){
cerr << BOLDRED << "[ERROR] IMAGE_NOT_FOUND" << RESET << endl;
exit(1);
}
if (format == FIF_UNKNOWN){
cerr << BOLDRED << "[ERROR] UNKNOWN_IMAGE_FORMAT" << RESET << endl;
format = FreeImage_GetFIFFromFilename(path);
if (!FreeImage_FIFSupportsReading(format)){
cerr << BOLDRED << "[ERROR] IMAGE_FORMAT_NOT_READABLE" << RESET << endl;
exit(1);
}
}
FIBITMAP *bitmap = FreeImage_Load(format, path);
FIBITMAP *bitmap32;
int bitsPerPixel = FreeImage_GetBPP(bitmap);
bitmap32 = FreeImage_ConvertTo32Bits(bitmap);
int imageWidth = FreeImage_GetWidth(bitmap32);
int imageHeight = FreeImage_GetHeight(bitmap32);
GLubyte *textureData = FreeImage_GetBits(bitmap32);
GLuint texture1;
glGenTextures(1, &texture1);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, textureData);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
initShader.use();
glBindVertexArray(VAO);
int initalTime = time(NULL);
while(1){
glBindVertexArray(VAO);
int timecal = time(NULL);
//glDrawElements(GL_TRIANGLE_STRIP, 6, GL_UNSIGNED_INT, 0);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glfwSwapBuffers(window);
glfwPollEvents();
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
if ((timecal - initalTime) > imageTime) // imageTime value is 10
break;
}
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDeleteTextures(1, &texture1);// image
glDisable(GL_TEXTURE_2D);//image
FreeImage_Unload(bitmap32);
FreeImage_Unload(bitmap);
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
glDeleteBuffers(1, &EBO);
}
}
Shader code is like that
//shader.vs
#version 330 core
layout(location = 1) in vec3 position;
layout(location = 2) in vec3 color;
layout(location = 3) in vec2 texcoord;
out vec3 Color;
out vec2 Texcoord;
void main()
{
gl_Position = vec4(position, 1.0);
Texcoord = texcoord;
}
//shader.fs
#version 330 core
in vec3 Color;
in vec2 Texcoord;
out vec4 outColor;
uniform sampler2D texture5;
void main()
{
outColor = texture2D(texture5, Texcoord);
}
How can I warp texture?
And then Is it correct to use position value to warp texture image?
The issue has nothing to do with perspective projection. You draw a polygon with 4 vertices parallel to the XY plane of the view, but the polygon is not a quad! Change the x coordinate of the 1st vertex (0.5f -> 1.0f). Perspective projection works with Homogeneous coordinates.
In generalperspective projection is achieved by a Perspective projection matrix. Of course you can define homogeneous vertices to inspect the behavior:
Define an attribute tuple with homogenous vertices (4 components):
float vertices[] = {
// positions // colors // texture coords
1.0f, 1.0f, 0.5f, 2.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right
1.0f, -1.0f, -0.5f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right
-1.0f, -1.0f, -0.5f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left
-1.0f, 1.0f, 0.5f, 2.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left
};
Adapt the vertex specification and the vertex shader:
// position attribute
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 9 * sizeof(float), (void*)0);
glEnableVertexAttribArray(1);
// color attribute
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 9 * sizeof(float), (void*)(4 * sizeof(float)));
glEnableVertexAttribArray(2);
//texture attribute
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 9 * sizeof(float), (void*)(7 * sizeof(float)));
glEnableVertexAttribArray(3);
#version 330 core
layout(location = 1) in vec4 position;
layout(location = 2) in vec3 color;
layout(location = 3) in vec2 texcoord;
out vec3 Color;
out vec2 Texcoord;
void main()
{
gl_Position = position;
Texcoord = texcoord;
}
Another option to achieve the effect is, to a an Z component to the geometry. e.g:
float positions = { 1.0f, 1.0f, 0.5f,
1.0f, -1.0f, 0.0f,
-1.0f, -1.0f, -0.5f,
-1.0f, 1.0f, 0.0f };
and to compute the w component dependent on z in the vertex shader (e.g. w = z + 2.5:
#version 330 core
layout(location = 1) in vec3 position;
layout(location = 2) in vec3 color;
layout(location = 3) in vec2 texcoord;
out vec3 Color;
out vec2 Texcoord;
void main()
{
gl_Position = vec4(position, position.z + 2.5);
Texcoord = texcoord;
}

SegFault after calling glDrawArrays

I am attempting to write a sprite batcher.
Originally, I was uploading the following data to openGL in order to see if I could get anything to the screen:
static GLfloat const vertexData[] = {
//Position //Color //UV Coords
0.5f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
-0.5f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f,
0.5f, -0.5f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f,
-0.5f, -0.5f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f,
-0.5f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f,
0.5f, -0.5f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f
};
These are my Vertex and Fragment Shaders:
const char* vert = "#version 330 core\n"
"layout (location = 0) in vec2 vPos;\n"
"layout (location = 1) in vec4 vColor;\n"
"layout (location = 2) in vec2 vTexPos;\n"
"\n"
"out vec4 fColor;\n"
"out vec2 fTexPos;\n"
"\n"
"void main() {\n"
" gl_Position = vec4(vPos.xy, 0.0, 1.0);\n"
" fColor = vColor;\n"
" fTexPos = vec2(vTexPos.s, 1.0 - vTexPos.t);\n"
"}";
const char* frag = "#version 330 core\n"
"in vec4 fColor;\n"
"in vec2 fTexPos;\n"
"out vec4 finalColor;\n"
"\n"
"uniform sampler2D textureUniform;\n"
"\n"
"void main() {\n"
" vec4 textureColor = texture(textureUniform, fTexPos);"
" finalColor = fColor * textureColor;\n"
"}";
I set up the VAO and VBO like so:
auto vaoID_ = 0;
auto vboID_ = 0;
glGenVertexArrays(1, &vaoID_);
glGenBuffers(1, &vboID_);
glBindVertexArray(vaoID_);
glBindBuffer(GL_ARRAY_BUFFER, vboID_);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 8, (void*)0); // Screen Position
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 8, (void*)(2 *sizeof(GL_FLOAT)); //Color
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8, (void*)(6 * sizeof(GL_FLOAT)); //UV
And finally, on each loop, I would orphan the buffer and then sub the data in.
glBufferData(GL_ARRAY_BUFFER, sizeof(vertexData), nullptr, GL_DYNAMIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(vertexData), vertexData);
glDrawArrays(GL_TRIANGLES, 0, 6);
This worked fine!
So, I attempted to begin to write a basic sprite batch.
I grouped my vertex data into a POD struct:
struct VertexData {
struct {
GLfloat screenx;
GLfloat screeny;
} position;
struct {
GLfloat r;
GLfloat g;
GLfloat b;
GLfloat a;
} color;
struct {
GLfloat texu;
GLfloat texv;
} uv;
}
And used a vector to batch everything together:
std::vector<VertexData> spritebatch_
I changed the calls to glVertexAttribPointer to match this struct (although I think this change was unneeded?)
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(VertexData), (void*)offsetof(VertexData, position)); // Screen Position
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, sizeof(VertexData), (void*)offsetof(VertexData, color)); //Color
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(VertexData), (void*)offsetof(VertexData, uv)); //UV
To test it, on every loop I would feed it the same data that was in the static GLfloat array:
spritebatch_.push_back({0.5f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f});
spritebatch_.push_back({-0.5f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f});
spritebatch_.push_back({0.5f, -0.5f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f});
spritebatch_.push_back({-0.5f, -0.5f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f});
spritebatch_.push_back({-0.5f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f});
spritebatch_.push_back({0.5f, -0.5f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f});
And then attempt to orphan the buffer and sub the data in:
glBufferData(GL_VERTEX_ARRAY, spritebatch_.size() * sizeof(VertexData), nullptr, GL_DYNAMIC_DRAW);
glBufferSubData(GL_VERTEX_ARRAY, 0, spritebatch_.size() * sizeof(VertexData), spritebatch_.data());
glDrawArrays(GL_TRIANGLES, 0, spritebatch_.size());
Finally, I would reset the spritebatch_: spritebatch_.clear();
However, this segfaults. Am I doing something incorrect when I go to orphan the buffer and sub the data in? The data formats still match (VertexData POD members are all GLfloats still), the size of spritebatch_ is still 6 (6 vertices to make a square), and the location of that data is still the same within each vertex.
The 5th parameter of glVertexAttribPointer is the "stride", the byte offset between consecutive generic vertex attributes.
So the change from
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 8, (void*)0); // Screen Position
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 8, (void*)(2 *sizeof(GL_FLOAT)); //Color
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8, (void*)(6 * sizeof(GL_FLOAT)); //UV
to
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(VertexData), (void*)offsetof(VertexData, position)); // Screen Position
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, sizeof(VertexData), (void*)offsetof(VertexData, color)); //Color
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(VertexData), (void*)offsetof(VertexData, uv)); //UV
is important, because sizeof(VertexData) is not 8, but it is 8*sizeof(GL_FLOAT) .
The first parameter of glBufferData respectively glBufferSubData has to be the enumerator constant for the target, which is one of GL_ARRAY_BUFFER, GL_ELEMENT_ARRAY_BUFFER, ... .
GL_VERTEX_ARRAY identifies the vertex array for the fixed function client-side capability - See glEnableClientState.
Change:
glBufferData(GL_VERTEX_ARRAY, ...);
glBufferData(GL_ARRAY_BUFFER, ...);
glBufferSubData(GL_VERTEX_ARRAY, ...);
glBufferSubData(GL_ARRAY_BUFFER, ...);
Note, if you would check for OpenGL errors (glGetError or Debug Output), you would get a GL_INVALID_ENUM error.
The data store for the array buffer is not created, this causes the segment fault.

SDL_TTF draws garbage

I asked a question the other day, about rendering TTF fonts using SDL, and was pointed towards SDL_TTFL I've tried using the SDL_TTF library, but All I'm getting is garbage on screen
I have included my shaders, which are very simple for this program, and also the snipped I'm using to load the text into surface, and to bind it to the texture. I'm not trying to do anything crazy here at all. Is there anything I'm doing wrong you can see? I'm not really too sure how to debug shaders etc.
Fragment Shader (frag.glsl):
#version 330
in vec2 texCoord;
in vec4 fragColor;
out vec3 finalColor;
uniform sampler2D myTextureSampler;
void main() {
finalColor = texture( myTextureSampler, texCoord ).rgb;
}
Vertex Shader (vert.glsl)
#version 330
in vec3 vert;
in vec4 color;
in vec2 texcoord;
out vec4 fragColor;
out vec2 texCoord;
void main() {
fragColor = color;
gl_Position = vec4(vert, 1);
texCoord = texcoord;
}
Font Loading (loadFont.cpp)
//Initialise TTF
if( TTF_Init() == -1 )
throw std::runtime_error("SDL_TTF failed to initialise.");
//Load the texture
font = TTF_OpenFont( filePath.c_str(), 12 );
if(!font)
throw std::runtime_error("Couldn't load: "+ filePath);
TTF_SetFontStyle(font, TTF_STYLE_NORMAL);
surface = TTF_RenderUTF8_Blended(font, "Hello", this->textColor);
Uint8 colors = surface->format->BytesPerPixel;
int texture_format;
if (colors == 4) { // alpha
if (surface->format->Rmask == 0x000000ff)
texture_format = GL_RGBA;
else
texture_format = GL_BGRA;
} else { // no alpha
if (surface->format->Rmask == 0x000000ff)
texture_format = GL_RGB;
else
texture_format = GL_BGR;
}
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, colors, surface->w, surface->h, 0,
texture_format, GL_UNSIGNED_BYTE, surface->pixels);
SDL_FreeSurface(surface);
Vertex Attribute Setup
GLfloat vertices[] = {
//X Y Z R G B A U V
-1.0f, -1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.5f, 0.f, 1.f,
1.0f, -1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.5f, 1.f, 1.f,
-1.0f, -0.4f, 0.0f, 1.0f, 0.0f, 0.0f, 0.5f, 0.f, 0.f,
1.0f, -1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.5f, 1.f, 1.f,
1.0f, -0.4f, 0.0f, 1.0f, 0.0f, 0.0f, 0.5f, 1.f, 0.f,
-1.0f, -0.4f, 0.0f, 1.0f, 0.0f, 0.0f, 0.5f, 0.f, 0.f
};
glGenVertexArrays(1, &_vao);
glBindVertexArray(_vao);
glGenBuffers(1, &_vbo);
glBindBuffer(GL_ARRAY_BUFFER, _vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(program->attrib("vert"));
glVertexAttribPointer(program->attrib("vert"), 3, GL_FLOAT, GL_FALSE, 9*sizeof(GLfloat), NULL);
glEnableVertexAttribArray(program->attrib("color"));
glVertexAttribPointer(program->attrib("color"), 4, GL_FLOAT, GL_TRUE, 9*sizeof(GLfloat), (const GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(program->attrib("texcoord"));
glVertexAttribPointer(program->attrib("texcoord"), 2, GL_FLOAT, GL_TRUE, 9*sizeof(GLfloat), (const GLvoid*)(7 * sizeof(GLfloat)));
I've attached the code I'm using for the vertex attributes as per the comment below.
EDIT:
In a reply that has been deleted since, It was asked whether SDL_TTF was returning 3 or 4 channels. It's returning a BGRA image. I've tried changing my fragment shader to
Fragment shader
#version 330
in vec2 texCoord;
in vec4 fragColor;
out vec4 finalColor;
uniform sampler2D myTextureSampler;
void main() {
finalColor = texture( myTextureSampler, texCoord ).rgba;
}
Note the vec4, and using rgba rather than rgb. This just leads to a black rectangle.
I also tried generating a surface using SDL_LoadBMP(), which gives the exact same results.
Your call to
glTexImage2D(GL_TEXTURE_2D, 0, colors, surface->w, surface->h, 0,
texture_format, GL_UNSIGNED_BYTE, surface->pixels);
Is a problem.
The third paramter is wrong:
http://www.opengl.org/sdk/docs/man/xhtml/glTexImage2D.xml
internalFormat
Specifies the number of color components in the texture.
Must be one of base internal formats given in Table 1,
one of the sized internal formats given in Table 2, or one
of the compressed internal formats given in Table 3, below.
I suspect you want yours to be GL_RGBA (or what format you want opengl to store your texture in)
EDIT:
I just saw it now, but you are using only 3 channels in your fragment shader. The Blended function requires that you use 4 channels otherwise the alpha channel is going to be messed up.
I think your "main" problem lies somewhere else though as that should just make the colour constant over the entire surface. (Not the "garbage" you are seeing)
I quickly wrote this program that mostly does what your doing. I think it will help you more than my repository as it's straight to the point.
#include <GL/glew.h>
#include <SDL2/SDL.h>
#include <SDL2/SDL_opengl.h>
#include <SDL2/SDL_ttf.h>
#include <string>
#include <iostream>
using namespace std;
SDL_Window *window = NULL;
SDL_GLContext context = NULL;
TTF_Font* font = NULL;
SDL_Surface* surface = NULL;
//OpenGL Objects
GLuint vao;
GLuint vbo;
GLuint texture;
//Shader Objects
GLuint program;
GLuint vs;
GLuint fs;
//Sampler Object
GLuint uniformSampler;
//Callback Function
APIENTRY GLvoid debugMessageCallbackFunction( GLenum source, GLenum type, GLuint id, GLenum severity,
GLsizei length, const GLchar* message, GLvoid* userParam)
{
cerr << endl << "\t" << message << endl;
}
//The shaders are identical to yours
const string fragmentShaderString =
"#version 130\n" // My laptop can't do OpenGL 3.3 so 3.0 will have to do
"in vec2 texCoord;\n"
"in vec4 fragColor;\n"
"\n"
"out vec4 finalColor;\n"
"\n"
"uniform sampler2D myTextureSampler;\n"
"void main() {\n"
" finalColor = texture( myTextureSampler, texCoord ) * fragColor;\n"
"}";
const string vertexShaderString =
"#version 130\n"
"\n"
"in vec3 vert;\n"
"in vec4 color;\n"
"in vec2 texcoord;\n"
"\n"
"out vec4 fragColor;\n"
"out vec2 texCoord;\n"
"void main() {\n"
" fragColor = color;\n"
" gl_Position = vec4(vert, 1);\n"
" texCoord = texcoord;\n"
"}\n";
//Your vertices, but I changed alpha to 1.0f
const GLfloat vertices[] =
{
//X Y Z R G B A U V
-1.0f, -1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.f, 1.f,
1.0f, -1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.f, 1.f,
-1.0f, -0.4f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.f, 0.f,
1.0f, -1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.f, 1.f,
1.0f, -0.4f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.f, 0.f,
-1.0f, -0.4f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.f, 0.f
};
int main(int argc, char* args[])
{
//Create Window and Context
window = SDL_CreateWindow("SDL Text with OpenGL", 0, 0, 640, 480, SDL_WINDOW_OPENGL);
//Set Core Context
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MAJOR_VERSION, 3 );
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MINOR_VERSION, 1 );
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
context = SDL_GL_CreateContext(window);
//Simple OpenGL State Settings
glViewport( 0.f, 0.f, 640.f, 480.f);
glClearColor( 0.f, 0.f, 0.f, 1.f);
//Init Glew
//Set glewExperimental for Core Context
glewExperimental=true;
glewInit();
//Set Blending
//Required so that the alpha channels show up from the surface
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
//Simple callback function for GL errors
glDebugMessageCallbackARB(debugMessageCallbackFunction, NULL);
//Create Shaders
vs = glCreateShader(GL_VERTEX_SHADER);
fs = glCreateShader(GL_FRAGMENT_SHADER);
//Source Pointers
const GLchar* vsSource= &vertexShaderString[0];
const GLchar* fsSource = &fragmentShaderString[0];
//Set Source
glShaderSource(vs, 1, &vsSource, NULL);
glShaderSource(fs, 1, &fsSource, NULL);
//Compile Shaders
glCompileShader(fs);
glCompileShader(vs);
//Create Shader Program
program = glCreateProgram();
//Attach Shaders to Program
glAttachShader(program, vs);
glAttachShader(program, fs);
//No need for shaders anymore
glDeleteShader(vs);
glDeleteShader(fs);
//Set Attribute Locations
glBindAttribLocation(program, 0, "vert");
glBindAttribLocation(program, 1, "color");
glBindAttribLocation(program, 2, "texcoord");
//Link Program
glLinkProgram(program);
//Setup VAO and VBO
glGenVertexArrays(1, &vao);
glGenBuffers(1, &vbo);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * 9 * 6, vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 9 * sizeof(GLfloat), NULL);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 9 * sizeof(GLfloat),(GLvoid*)(3*sizeof(GLfloat)));
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 9 * sizeof(GLfloat),(GLvoid*)(7*sizeof(GLfloat)));
//Init TTF
TTF_Init();
//Open Font
font = TTF_OpenFont("DroidSansFallbackFull.ttf", 30);
SDL_Color color = {255, 255, 255, 255};
//Create Surface
surface = TTF_RenderUTF8_Blended(font, "This is TEXT!", color);
//Your format checker
GLenum format = (surface->format->BytesPerPixel==3)?GL_RGB:GL_RGBA;
//Create OpenGL Texture
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D( GL_TEXTURE_2D, 0, format, surface->w, surface->h, 0,
format, GL_UNSIGNED_BYTE, surface->pixels);
//Set Some basic parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
//Set up Sampler
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
uniformSampler = glGetUniformLocation(program, "myTextureSampler");
//It defaults to using GL_TEXTURE0, so it's not necessary to set it
//in this program it's generally a good idea.
//--------------------------------------------------------------------------------------
// DRAW STAGE
//--------------------------------------------------------------------------------------
glUseProgram(program);
//glBindVertexArray(vao); - still in use
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLES, 0, 6);
SDL_GL_SwapWindow(window);
//Sleep for 2s before closing
SDL_Delay(2000);
}
I didn't do any error checking or close any of the resources since it's just meant to be a reference and not meant to be used.
Usually I don't use glew, but writing code to manually get the functions for such a small program seemed pointless.
It compiles with
g++ source.cpp -g -lSDL2 -lSDL2_ttf -lGL -GLEW -o demo
on linux. You might need to make some adjustments for Windows (Headers files might change slightly and libraries will change as wel) and I think it will work without change on Mac.
EDIT 2:
To compile it on windows with mingw you need to add APIENTRY to callback function and the main should have arguments. Changed code to reflect this.
Tested it and it works on both windows and linux. (Provided that your implementation have access to the GL_ARB_debug_callback extension, if not just comment that out)
Does work nicely, only got to edit the const GLfloat vertices[] array to be able to change the text color consistently. For a solid color text, have all RGB components in the array equal to 1.0f and render the texture in color. For a multicolored text, first render the texture in white with SDL_Color color = { 255, 255, 255, 255 };, then edit the array as shown here below.
float width = (float)surface->w;
float height = (float)surface->h;
// alpha to 1.0f
const GLfloat vertices[] = {
// X Y Z R G B A U V
-1.0, -height / width, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f,
1.0f, -height / width, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
-1.0f, height / width, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f,
1.0f, -height / width, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, height / width, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f,
-1.0f, height / width, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f
};