Straight copying input texture to output produces unexpected results in OpenGL - opengl

This is my first OpenGL program. I have written almost the same thing in WebGL which it works. I am translating that to OpenGL. However I am using sample code from many places and I don't know if everything makes sense. Obviously there's something wrong which I can't figure out and causes the output to be close but still different.
I am creating a 2x2 texture filling it with data { 1.0,2.0,3.0,4.0 } and using a simple shader to copy to an output texture of the same size. What is get is:
Result: [1,2,0,4,]
Below I'm pasting my code and my shader scripts as well. Thanks for reviewing this.
Vertex Shader:
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
out vec2 TexCoord;
void main()
{
gl_Position = vec4(position, 1.0f);
TexCoord = texCoord;
}
Frag Shader:
#version 330 core
precision highp float;
in vec2 TexCoord;
out vec4 TexelValue;
// Texture samplers
uniform sampler2D A;
void main()
{
TexelValue = vec4(texture(A, TexCoord).r);
}
Code with error checking removed:
// Window dimensions
const GLuint WIDTH = 800, HEIGHT = 600;
void createTexture(GLuint texture, int width, int height, GLint internalFormat, GLenum format, GLenum type, const void *data)
{
glBindTexture(GL_TEXTURE_2D, texture);
// Set our texture parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// set texenv to replace instead of the default modulate
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, width, height, 0, format, type, data);
}
void uploadAndBindGeometry(const void* vertices, int verticesByteLength)
{
GLuint VBO, VAO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, verticesByteLength, vertices, GL_STATIC_DRAW);
// Position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(GLfloat), (GLvoid *)0);
glEnableVertexAttribArray(0);
// Texture Coordinate attribute
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(GLfloat), (GLvoid *)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
}
void bindInputTexture(int index, GLuint texture, GLint location)
{
glActiveTexture(GL_TEXTURE0 + index);
glBindTexture(GL_TEXTURE_2D, texture);
glUniform1i(location, index);
}
void initFBO(GLuint *fb, int width, int height) {
glGenFramebuffersEXT(1, fb);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, *fb);
glViewport(0, 0, width, height);
}
// The MAIN function, from here we start the application and run the game loop
int main()
{
glfwInit();
GLuint fb;
// Set all the required options for GLFW
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 5);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
// Create a GLFWwindow object that we can use for GLFW's functions
GLFWwindow *window = glfwCreateWindow(WIDTH, HEIGHT, "LearnOpenGL", nullptr, nullptr);
int screenWidth, screenHeight;
glfwGetFramebufferSize(window, &screenWidth, &screenHeight);
glfwMakeContextCurrent(window);
// Set this to true so GLEW knows to use a modern approach to retrieving function pointers and extensions
glewExperimental = GL_TRUE;
glewGetExtension("GL_ARB_texture_float");
glewGetExtension("GL_EXT_framebuffer_object");
glewGetExtension("GL_ARB_color_buffer_float");
glDisable(GL_BLEND);
glDisable(GL_ALPHA);
glDisable(GL_DEPTH);
glDisable(GL_STENCIL);
// Build and compile our shader program
Shader ourShader("core.vs", "core.frag");
// Set up vertex data (and buffer(s)) and attribute pointers
GLfloat vertices[] =
{
// Positions // Texture Coords
1.0f, 1.0f, 0.0f, 1.0f, 1.0f, // Top Right
1.0f, -1.0f, 0.0f, 1.0f, 0.0f, // Bottom Right
-1.0f, -1.0f, 0.0f, 0.0f, 0.0f, // Bottom Left
-1.0f, 1.0f, 0.0f, 0.0f, 1.0f // Top Left
};
uploadAndBindGeometry(vertices, sizeof(vertices));
// Load and create textures
float adata[] = { 1.0,2.0,3.0,4.0 };
int adims[] = { 2, 2 };
int cdims[] = { 2, 2 };
float cdata[2 * 2] = { 0 };
// ===================
// Texture
// ===================
GLuint texture[2];
glGenTextures(2, &texture[0]);
createTexture(texture[0], adims[1], adims[0], GL_R32F, GL_RED, GL_FLOAT, (void*)adata);
createTexture(texture[1], cdims[1], cdims[0], GL_R32F, GL_RED, GL_FLOAT, (void*)NULL);
// Draw the triangle
ourShader.Use();
initFBO(&fb, cdims[1], cdims[0]);
glBindTexture(GL_TEXTURE_2D, texture[1]);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture[1], 0);
int locationA = glGetUniformLocation(ourShader.Program, "A");
bindInputTexture(0, texture[0], locationA);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glFlush();
// read output
glReadBuffer(GL_COLOR_ATTACHMENT0);
glReadPixels(0, 0, cdims[1], cdims[0], GL_RED, GL_FLOAT, cdata);
std::cout << "Result: [";
for (int i = 0; i < 2 * 2; ++i)
{
std::cout << cdata[i] << ",";
}
std::cout << "]" << std::endl;
// Terminate GLFW, clearing any resources allocated by GLFW.
glfwTerminate();
return EXIT_SUCCESS;
}

Obviously there's something wrong which I can't figure out and causes the output to be close but still different.
Changed my coordinates to these and it started working. Of course I don't understand why
When you draw the rectangle, then you use the primitive type triangle stripe GL_TRIANGLE_STRIP
The order of the vertex coordinates in a triangle strip looks like this:
0 2 4
x x x
| / | / |
| / | / |
x x x
1 3 5
The vertices of your question
GLfloat vertices[] =
{
// Positions // Texture Coords
1.0f, 1.0f, 0.0f, 1.0f, 1.0f, // Top Right
1.0f, -1.0f, 0.0f, 1.0f, 0.0f, // Bottom Right
-1.0f, -1.0f, 0.0f, 0.0f, 0.0f, // Bottom Left
-1.0f, 1.0f, 0.0f, 0.0f, 1.0f // Top Left
};
have the following order:
3 0
x x
| |
| |
x-----x
2 1
The primitives with this order would draw a quad, by the use of the primitive type GL_TRIANGLE_FAN
The new and changed vertices
GLfloat vertices[] =
{
-1.0, 1.0, 0.0, 0.0, 1.0, // upper left
-1.0, -1.0, 0.0, 0.0, 0.0, // lower left
1.0, 1.0, 0.0, 1.0, 1.0, // upper right
1.0, -1.0, 0.0, 1.0, 0.0 // lower right
};
have the proper order of a triangle strip:
0 2
x x
| / |
| / |
x x
1 3

Changed my coordinates to these and it started working. Of course I don't understand why:
GLfloat vertices[] =
{
-1.0, 1.0, 0.0, 0.0, 1.0, // upper left
-1.0, -1.0, 0.0, 0.0, 0.0, // lower left
1.0, 1.0, 0.0, 1.0, 1.0, // upper right
1.0, -1.0, 0.0, 1.0, 0.0 // lower right
};

Related

OpenGL fragment shader crashes with EXC_BAD_ACCESS on glDrawArrays

I'm using OpenGL 3.3 on Mac OSX 11.5.2. I have 6 fragment shaders, 3 that works and 3 that crashes with EXC_BAD_ACCESS. They all use the same vertex shader. There have been many questions about this problem, but I've made sure to:
Unbind my previous state.
Made sure attribute arrays enabled and used.
Am only using a single-thread, i.e. no concurrency.
The shaders that crash are all using one array of offsets and a kernel to do post-processing effects. If I replace texture_coordinate.st + offsets[i] with just texture_coordinate.st, it'll work just fine. As this makes the offsets array unused and optimized out, I have a suspicion that it could be due to a register allocation bug as mentioned in the comments in this question, but can't confirm.
I've also checked for other problems, such as loading them in different orders, using different compiler flags, but the crashes are consistent.
Here's a minimal, reproducible example (dependent on glad and glfw3, compiled with c++17) with one of the problematic shaders.
#include <glad/glad.h>
#include <GLFW/glfw3.h>
const char VERTEX_POST[] = R"(
#version 330 core
layout (location = 0) in vec2 position;
layout (location = 1) in vec2 in_texture_coordinate;
out vec2 texture_coordinate;
void main()
{
gl_Position = vec4(position.x, position.y, 0.0, 1.0);
texture_coordinate = in_texture_coordinate;
}
)";
const char FRAGMENT_POST[] = R"(
#version 330 core
in vec2 texture_coordinate;
out vec4 FragColor;
uniform sampler2D image;
const float offset = 1.0 / 300.0;
void main()
{
vec2 offsets[9] = vec2[](
vec2(-offset, offset), // top-left
vec2( 0.0f, offset), // top-center
vec2( offset, offset), // top-right
vec2(-offset, 0.0f), // center-left
vec2( 0.0f, 0.0f), // center-center
vec2( offset, 0.0f), // center-right
vec2(-offset, -offset), // bottom-left
vec2( 0.0f, -offset), // bottom-center
vec2( offset, -offset) // bottom-right
);
float kernel[9] = float[](
-1, -1, -1,
-1, 9, -1,
-1, -1, -1
);
vec3 sample_texture[9];
for(int i = 0; i < 9; i++)
sample_texture[i] = vec3(texture(image, texture_coordinate + offsets[i]));
vec3 color = vec3(0.0);
for (int i = 0; i < 9; i++)
color += sample_texture[i] * kernel[i];
FragColor = vec4(color, 1.0);
}
)";
float vertices[] = {
// Positions // Texture coords
-1.0f, -1.0f, 0.0f, 0.0f, 0.0f,
1.0f, 1.0f, 0.0f, 1.0f, 1.0f,
-1.0f, 1.0f, 0.0f, 0.0f, 1.0f,
-1.0f, -1.0f, 0.0f, 0.0f, 0.0f,
1.0f, -1.0f, 0.0f, 1.0f, 0.0f,
1.0f, 1.0f, 0.0f, 1.0f, 1.0f
};
GLuint create_shader(const char* source, GLenum type)
{
GLuint id = glCreateShader(type);
glShaderSource(id, 1, &source, nullptr);
glCompileShader(id);
int success;
glGetShaderiv(id, GL_COMPILE_STATUS, &success);
assert(success);
return id;
}
int main()
{
// ---- INIT GLFW & GLAD ----
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_DEBUG_CONTEXT, GLFW_TRUE);
GLFWwindow* window = glfwCreateWindow(800, 800, "Temp", nullptr, nullptr);
if (window == nullptr) return -1;
glfwMakeContextCurrent(window);
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) return -1;
// ---- CREATE QUAD ----
GLuint quad, vbo;
glGenVertexArrays(1, &quad);
glGenBuffers(1, &vbo);
glBindVertexArray(quad);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(GLfloat), (void*)0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(GLfloat), (void*)(3 * sizeof(float)));
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// ---- CREATE TEXTURE ----
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
char data[800 * 800 * 4] = {};
for (char& i : data) i = 127;
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 800, 800, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 0);
// ---- CREATE SHADER PROGRAM ----
GLuint vertex_shader = create_shader(VERTEX_POST, GL_VERTEX_SHADER);
GLuint fragment_shader = create_shader(FRAGMENT_POST, GL_FRAGMENT_SHADER);
GLuint program = glCreateProgram();
glAttachShader(program, vertex_shader);
glAttachShader(program, fragment_shader);
glLinkProgram(program);
glValidateProgram(program);
int success;
glGetProgramiv(program, GL_LINK_STATUS, &success);
assert(success);
// ---- MAIN LOOP ----
while (!glfwWindowShouldClose(window))
{
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(program);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glUniform1i(glGetUniformLocation(program, "image"), 0);
glBindVertexArray(quad);
glDrawArrays(GL_TRIANGLES, 0, 6);
glfwSwapBuffers(window);
glfwPollEvents();
}
return 0;
}
The example gives me a grey image when not including offsets and otherwise exhibits the crash. What could be causing this?

Textures not drawing OpenGL

I am having some strange behavior when trying to draw a texture in OpenGL. Currently all this program does for me is draw the background color with no indication of a texture being drawn. I have just moved from Visual Studio (where this code produces the correct output) to compiling in the command prompt. This code should color the background and draw one texture in the center of the screen.
I am concerned that I may have supplied the incorrect libraries for compilation since as far as I am concerned everything I am doing is the same. Different libraries, however, always said that they were incompatible.
Main code:
#define GLEW_STATIC
#include <GL/glew.h> // window management library
#include <GL/glfw3.h>
#include <GL/glm.hpp>
#include <GL/gtc/matrix_transform.hpp> //
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
int main(int argc, char** argv){
//Initialize GLFW and GLEW...
//Setup and combine shaders...
GLint vertex_att = glGetAttribLocation(program, "vertex");
glVertexAttribPointer(vertex_att, 2, GL_FLOAT, GL_FALSE, 7*sizeof(GLfloat), 0);
glEnableVertexAttribArray(vertex_att);
GLint color_att = glGetAttribLocation(program, "color");
glVertexAttribPointer(color_att, 3, GL_FLOAT, GL_FALSE, 7*sizeof(GLfloat), (void *) (2 *sizeof(GLfloat)));
glEnableVertexAttribArray(color_att);
GLint tex_att = glGetAttribLocation(program, "uv");
glVertexAttribPointer(tex_att, 2, GL_FLOAT, GL_FALSE, 7*sizeof(GLfloat), (void *) (5 *sizeof(GLfloat)));
glEnableVertexAttribArray(tex_att);
glUseProgram(program);
GLuint texture;
glGenTextures(1, &texture);
setthisTexture(texture, "./black.png");
// Create geometry of the square
int size = CreateSquare();
while (!glfwWindowShouldClose(window)){
// Clear background
glClearColor(viewport_background_color_g[0],
viewport_background_color_g[1],
viewport_background_color_g[2], 0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//set displacement - 'program' being the shader program
int matrixLocation = glGetUniformLocation(program, "x");
glm::mat4 translate = glm::mat4();
translate = glm::translate(translate, glm::vec3(0.0f, 0.0f, 0.0f));
glUniformMatrix4fv(matrixLocation, 1, GL_FALSE, &translate[0][0]);
glBindTexture(GL_TEXTURE_2D, texture);
glDrawElements(GL_TRIANGLES, size, GL_UNSIGNED_INT, 0);
glfwPollEvents();
glfwSwapBuffers(window);
}
}
Vertex Shader:
#version 130
in vec2 vertex;
in vec3 color;
in vec2 uv;
out vec2 uv_interp;
// Uniform (global) buffer
uniform mat4 x;
// Attributes forwarded to the fragment shader
out vec4 color_interp;
void main(){
vec4 t;
t = vec4(vertex, 0.0, 1.0);
gl_Position = x*t;
color_interp = vec4(color, 1.0);
uv_interp = uv;
}
Fragment Shader:
#version 130
in vec4 color_interp;
in vec2 uv_interp;
uniform sampler2D onetex;
void main(){
vec4 color = texture2D(onetex, uv_interp);
gl_FragColor = vec4(color.r,color.g,color.b,color.a);
if(gl_FragColor.a < 0.9){
discard;
}
}
setthisTexture:
void setthisTexture(GLuint w, const char *fname)
{
glBindTexture(GL_TEXTURE_2D, w);
int width, height, nrChannels;
unsigned char* image = stbi_load(fname, &width, &height, &nrChannels, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
stbi_image_free(image);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
CreateSquare:
int CreateSquare(void) {
// The face of the square is defined by four vertices and two triangles
// Number of attributes for vertices and faces
// const int vertex_att = 7; // 7 attributes per vertex: 2D (or 3D) position (2), RGB color (3), 2D texture coordinates (2)
// const int face_att = 3; // Vertex indices (3)
GLfloat vertex[] = {
// square (two triangles)
// Position Color Texcoords
-0.5f, 0.5f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, // Top-left
0.5f, 0.5f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // Top-right
0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, // Bottom-right
-0.5f, -0.5f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f // Bottom-left
};
GLuint face[] = {
0, 1, 2, // t1
2, 3, 0 //t2
};
GLuint vbo, ebo;
// Create buffer for vertices
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertex), vertex, GL_STATIC_DRAW);
// Create buffer for faces (index buffer)
glGenBuffers(1, &ebo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(face), face, GL_STATIC_DRAW);
// Return number of elements in array buffer
return sizeof(face);
}
For the use of glVertexAttribPointer either a named GL_ARRAY_BUFFER buffer object has to be bound or a pointer to the vertex data has to be passed.
In your case this means, that
int size = CreateSquare();
has to be done before
GLint vertex_att = glGetAttribLocation(program, "vertex");
glVertexAttribPointer(vertex_att, 2, GL_FLOAT, GL_FALSE, 7*sizeof(GLfloat), 0);
glEnableVertexAttribArray(vertex_att);
.....
Note in the function CreateSquare, the named buffer object vbo is bound:
glBindBuffer(GL_ARRAY_BUFFER, vbo);
which is used by the glVertexAttribPointer calls.
See OpenGL 4.6 API Compatibility Profile Specification; 10.3.9 Vertex Arrays in Buffer Objects; page 409:
A buffer object binding point is added to the client state associated with each
vertex array type and index. The commands that specify the locations and organizations
of vertex arrays copy the buffer object name that is bound to ARRAY_-
BUFFER to the binding point corresponding to the vertex array type or index being
specified. For example, the VertexAttribPointer command copies the value of
ARRAY_BUFFER_BINDING (the queriable name of the buffer binding corresponding
to the target ARRAY_BUFFER) to the client state variable VERTEX_ATTRIB_-
ARRAY_BUFFER_BINDING for the specified index.

C++ Texture not displaying correctly: Merging into 1 colour

I'm attempting to texture a VBO/VAO model cube. The cube is definitely rendering/drawn correctly, and as far as I know I am doing everything needed to load the texture.
However when it comes to applying the texture it appears to take an average of all colours in the texture, then apply that average to the entire cube. This results in it appearing to be "painted" with a plain, regular colour as shown in the screenshot below:
this is the texture;
I'm at a loss as to why this is happening. Below is the code from my init, loadTexture and display functions (I did not write the loadTexture function):
Init Function
(Only showing the code relevant to the cube + texture)
void init(void) {
.
.
.
pyramidTexture = TextureLoader::fiLoadTexture(wstring(L"Common\Resources\Textures\Sandstone.png"));
// Setup VAO for pyramid object
glGenVertexArrays(1, &pyramidVAO);
glBindVertexArray(pyramidVAO);
// Setup VBO for vertex position data
glGenBuffers(1, &pyramidVertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, pyramidVertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(pyramidVertices), pyramidVertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)0); // attribute 0 gets data from bound VBO (so assign vertex position buffer to attribute 0)
// Setup VBO for vertex colour data
glGenBuffers(1, &pyramidColourBuffer);
glBindBuffer(GL_ARRAY_BUFFER, pyramidColourBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(pyramidColours), pyramidColours, GL_STATIC_DRAW);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_TRUE, 0, (const GLvoid*)0); // attribute 1 gets colour data
glGenBuffers(3, &pyramidTexCoordBuffer);
glBindBuffer(GL_ARRAY_BUFFER, pyramidTexCoordBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(pyramidTexCoordArray), pyramidTexCoordArray, GL_STATIC_DRAW);
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)0);
// Enable vertex position and colour + Texture attribute arrays
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(3);
// Setup VBO for face index array
glGenBuffers(1, &pyramidIndexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, pyramidIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(pyramidVertexIndices), pyramidVertexIndices, GL_STATIC_DRAW);
glBindVertexArray(0);
glEnable(GL_NORMALIZE); // If we scale objects, ensure normal vectors are re-normalised to length 1.0 to keep lighting calculations correct (see lecture notes)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); // Best colour interpolation results
.
.
.
}
LoadTexture Function
GLuint TextureLoader::fiLoadTexture(const wstring& textureFilePath) {
BOOL fiOkay = FALSE;
GLuint newTexture = 0;
fipImage I;
// Convert wstring to const char*
wstring_convert<codecvt_utf8<wchar_t>, wchar_t> stringConverter;
string S = stringConverter.to_bytes(textureFilePath);
const char *filename = S.c_str();
// Call FreeImage to load the image file
fiOkay = I.load(filename);
if (!fiOkay) {
cout << "FreeImagePlus: Cannot open image file.\n";
return 0;
}
fiOkay = I.flipVertical();
fiOkay = I.convertTo24Bits();
if (!fiOkay) {
cout << "FreeImagePlus: Conversion to 24 bits successful.\n";
return 0;
}
auto w = I.getWidth();
auto h = I.getHeight();
BYTE *buffer = I.accessPixels();
if (!buffer) {
cout << "FreeImagePlus: Cannot access bitmap data.\n";
return 0;
}
glGenTextures(1, &newTexture);
glBindTexture(GL_TEXTURE_2D, newTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_BGR, GL_UNSIGNED_BYTE, buffer);
// Setup default texture properties
if (newTexture) {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
}
return newTexture;
}
Display Function
void display(void) {
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Set viewport to the client area of the current window
glViewport(0, 0, glutGet(GLUT_WINDOW_WIDTH), glutGet(GLUT_WINDOW_HEIGHT));
// Get view-projection transform as a GUMatrix4
GUMatrix4 T = mainCamera->projectionTransform() * mainCamera->viewTransform();
if (principleAxes)
principleAxes->render(T);
if (texturedQuad)
texturedQuad->render(T * GUMatrix4::translationMatrix(0.5f, 0.5f, 0.0f));
// Fixed function rendering (Compatability profile only) - use this since CGImport is written against OpenGL 2.1
glUseProgram(0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMultMatrixf((const float*)mainCamera->projectionTransform().M);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMultMatrixf((const float*)mainCamera->viewTransform().M);
glMultMatrixf((const float*)GUMatrix4::translationMatrix(0.0f, -0.15f, 0.0f).M);
glEnable(GL_TEXTURE_2D);
glPolygonMode(GL_FRONT, GL_FILL);
if (exampleModel)
exampleModel->renderTexturedModel();
glDisable(GL_TEXTURE_2D);
//Define position and direction (so appear at fixed point in scene)
glLightfv(GL_LIGHT0, GL_SPOT_DIRECTION, lightDirection);
glLightfv(GL_LIGHT0, GL_POSITION, lightPosition);
// enable texturing
glEnable(GL_TEXTURE_2D);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
//
// Pyramid VBO rendering
//
// Use basic shader for rendering pyramid (we'll look at this in more detail next week)
glUseProgram(basicShader);
static GLint mvpLocationPyramid = glGetUniformLocation(basicShader, "mvpMatrix");
glUniformMatrix4fv(mvpLocationPyramid, 1, GL_FALSE, (const GLfloat*)&(T.M));
GUMatrix4 pyramidModelTransform = GUMatrix4::translationMatrix(-5.75f, 0.0f, 0.0f) * GUMatrix4::scaleMatrix(2.0f, 2.0f, 2.0f);
GUMatrix4 mvpPyramid = T * pyramidModelTransform;
glUniformMatrix4fv(mvpLocationPyramid, 1, GL_FALSE, (const GLfloat*)&(mvpPyramid.M));
// Bind VAO that contains all relevant pyramid VBO buffer and attribute pointer bindings
glBindVertexArray(pyramidVAO);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, pyramidTexture);
// Draw pyramid
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_SHORT, (const GLvoid*)0);
// Unbind pyramid VAO (or bind another VAO)
glBindVertexArray(0);
glutSwapBuffers();
}
I've been trying to fix this for hours now without any luck, as such any support would be massively appreciated!!!
EDIT: Added in VAO attributes + Shaders
VAO Settings
// Per-vertex position vectors
static float pyramidVertices[32] =
{
//Front
0.0f, 0.0f, 0.0f, 1.0f, //BtmLeft
1.0f, 0.0f, 0.0f, 1.0f, //BtmRight
1.0f, 1.0f, 0.0f, 1.0f, //TopRight
0.0f, 1.0f, 0.0f, 1.0f, //TopLeft
//Back
0.0f, 1.0f, 1.0f, 1.0f, //TopLeft
1.0f, 1.0f, 1.0f, 1.0f, //TopRight
1.0f, 0.0f, 1.0f, 1.0f, //BottomRight
0.0f, 0.0f, 1.0f, 1.0f //BottomLeft
};
// Per-vertex colours (RGBA) floating point values
static float pyramidColours[32] =
{
1.0f, 0.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 1.0f,
0.0f, 0.0f, 1.0f, 1.0f,
1.0f, 0.0f, 1.0f, 1.0f,
0.0f, 1.0f, 1.0f, 1.0f,
0.0f, 0.0f, 1.0f, 1.0f,
1.0f, 0.0f, 1.0f, 1.0f,
1.0f, 0.0f, 0.0f, 1.0f
};
// 5 faces each with 3 vertices (each face forms a triangle)
static unsigned short pyramidVertexIndices[36] =
{
//Front
0, 3, 2,
2, 1, 0,
//Right
4, 3, 0,
0, 7, 4,
//Back
4, 7, 6,
6, 5, 4,
//Top
4, 5, 3,
3, 5, 2,
//Left
2, 5, 1,
1, 5, 6,
//Bottom
6, 7, 0,
0, 1, 6
};
static unsigned short pyramidTexCoordArray[24] =
{
-1.0f, -1.0f, -1.0f,
1.0f, -1.0f, -1.0f,
1.0f, 1.0f, -1.0f,
-1.0f, 1.0f, -1.0f,
-1.0f, -1.0f, 1.0f,
1.0f, -1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
-1.0f, 1.0f, 1.0f
};
Vertex Shader
#version 330
uniform mat4 mvpMatrix;
layout (location=0) in vec4 vertexPos;
layout (location=3) in vec2 vertexTexCoord;
out vec2 texCoord;
void main(void) {
mat4 M;
M[0] = vec4(1.0);
ivec2 a = ivec2(1, 2);
//vec3 b = vec3(2.0, 4.0, 1.0) + a;
texCoord = vertexTexCoord;
gl_Position = mvpMatrix * vertexPos;
}
Fragment Shader
#version 330
uniform sampler2D texture;
in vec2 texCoord;
layout (location=0) out vec4 fragColour;
void main(void) {
vec4 texColor = texture2D(texture, texCoord);
fragColour = texColor;
}
You defined your data as unsigned short:
static unsigned short pyramidTexCoordArray[24]
But it has to be float.
There are a lot of things strange:
You are generating 3 VBOs for texture coordinates, but are just using one. Unless pyramidTexCoordBuffer is of type GLuint[3] (which I assume it is not due to the &), you are writing out of bounds.
Edit: This refers to the glGenBuffers(3, &pyramidTexCoordBuffer); line, which allocates 3 buffers and stores them in three consecutive GLuint variables starting at pyramidTexCoordBuffer. Since pyramidTexCoordBuffer is most probably a GLuint, pyramidTexCoordBuffer[1] and pyramidTexCoordBuffer[2] refer to unallocated memory.
The pyramidTexCoordArray array is specified as unsigned short, but you are writing floats to it. Since it is unsigned, at least the negative numbers will be gone.
Additionally, you tell OpenGL with the
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)0);
line that the data is of type GL_FLOAT (which it is not) and that it has two floats per vertex (but the data has 3 elements per vertex):

GL_TEXTURE_2D as render input

I have an image in a GL_TEXTURE_2D. From here, I need to move this texture onto the framebuffer object. Can someone please point me to an example that uses a 2d texture that is filled with data and places it onto the FBO? I am not trying to render to a texture, but from a texture.
Here is a simple FBO demonstration.
You can find the entire code here on github (the code is based on this sample program from the OpenGL SuperBible).
The program does the following
Draws a red square onto a green background into a FBO. The backing store for the FBO is a 2D texture.
Draw a square onto a blue screen but map the texture from step 1 to the square. This maps the entire scene from the previous step onto the square.
So the theory when using a framebuffer,
(Initialisation) Attach textures to an off-screen framebuffer.
and then there are 2 programs (or 2 passes)
In program 1, render into the framebuffer (which renders into the textures)
In program 2, now render to the window (screen) and use the texture(s) from the first program as input.
So the output of the first program is the input for the second program.
That means the vertex shaders for both stages will be the same. As there is no mathes (rotation) and no lighting effects, all the vertex shader does is pass the input to the output (i.e. the coordinates).
But each stage will have slightly different fragment shader.
The 1st fragment shader outputs a constant color for each pixel. In the source code the output of the 1st fragment shader is configured to the FBO and the FBO uses a texture to store the color data.
The texture generated in the 1st fragment shader is used as input in the 2nd fragment shader. In the 2nd fragment shader, the color is determined by the texture (which is the output of the 1st fragment shader).
So here is the vertex shader. Note the input to the vertex shader is the coordinates (vp) and the texture coordinates. The texcoords are needed by the 2 program's fragment shader to know where to map the texture. In the vertex shader the texcoord's are simply passed from the input to the output.
#version 130
in vec3 vp;
in vec2 texcoord;
out vec2 outtexcoord;
void main () {
gl_Position = vec4 (vp, 1.0);
outtexcoord = texcoord;
}
This is the 1st program's fragment shader. Every pixel is drawn red.
#version 130
in vec2 outtexcoord;
out vec4 frag_colour;
void main () {
frag_colour = vec4 (1.0, 0.0, 0.0, 0.0); //everything red
}
And the 2nd programs fragment shader. Note the sampler2D which is the input texture (generated by the 1st program). Also the outtexcoord are the texture coordinates from the vertex shader. The output color (frag_color) is determined by the texture.
#version 130
uniform sampler2D tex;
in vec2 outtexcoord;
out vec4 frag_color;
void main () {
frag_color = texture(tex, outtexcoord);
}
And here is the C program (I compiled with g++, see the makefile in the github link).
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include "shader_utils.h" //see github link for details
void Initialize();
void InitGL();
void InitProgramFBO();
void InitProgramScreen();
void InitBuffer();
void InitFBO();
void Loop();
void RenderToFBO();
void RenderToScreen();
void Shutdown();
void OnWindowResize(GLFWwindow* window, int width, int height);
GLFWwindow* window;
int screenWidth = 640;
int screenHeight = 480;
GLuint render2FBOProgram;
GLuint render2ScreenProgram;
GLuint vao;
GLuint vbo;
GLuint fbo;
GLuint color_texture;
int main() {
Initialize();
Loop();
Shutdown();
return 0;
}
void Initialize() {
InitGL();
InitProgramScreen();
InitProgramFBO();
InitBuffer();
InitFBO();
}
void InitGL() {
glfwInit();
window = glfwCreateWindow(screenWidth, screenHeight, "FBO Demo", NULL, NULL);
glfwMakeContextCurrent(window);
glewInit();
glClearColor(0.0f, 0.0f, 0.1f, 1.0f);
}
void InitProgramFBO() {
GLuint vs;
GLuint fs;
render2FBOProgram = create_program("vs.glsl", "fbo.fs.glsl", vs, fs);
glDeleteShader(vs);
glDeleteShader(fs);
}
void InitProgramScreen() {
GLuint vs;
GLuint fs;
render2ScreenProgram = create_program("vs.glsl", "screen.fs.glsl", vs, fs);
glDeleteShader(vs);
glDeleteShader(fs);
}
void InitBuffer() {
//define the square made up of 2 triangles
static const GLfloat points[] = {
//x y z texcoord u and v
-0.5f, 0.5f, 0.0f, 0.0f, 0.0f,
0.5f, 0.5f, 0.0f, 1.0f, 0.0f,
0.5f, -0.5f, 0.0f, 1.0f, 1.0f,
0.5f, -0.5f, 0.0f, 1.0f, 1.0f,
-0.5f, -0.5f, 0.0f, 0.0f, 1.0f,
-0.5f, 0.5f, 0.0f, 0.0f, 0.0f
};
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
//create buffer for points
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(points), points, GL_STATIC_DRAW);
//tell opengl how to find the coordinate data
glVertexAttribPointer (0, 3, GL_FLOAT, GL_FALSE, 5*sizeof(GLfloat), (GLubyte*)NULL);
glEnableVertexAttribArray(0);
//tell opengl how to find the texcoord data
glVertexAttribPointer (1, 2, GL_FLOAT, GL_FALSE, 5*sizeof(GLfloat), (GLvoid *)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
}
void InitFBO() {
//create a framebuffer
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
//create a texture as the backing store for the framebuffer
glGenTextures(1, &color_texture);
glBindTexture(GL_TEXTURE_2D, color_texture);
glTexStorage2D(GL_TEXTURE_2D, 9, GL_RGBA8, 512, 512); //1 = mipmap levels
//mip map filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
//attach the texture as the color attachment of the framebuffer
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, color_texture, 0);
//tell opengl to draw into the color attachment
static const GLenum draw_buffers[] = { GL_COLOR_ATTACHMENT0 };
glDrawBuffers(1, draw_buffers);
}
void Loop() {
//glBindVertexArray(vao);
//glBindBuffer(GL_ARRAY_BUFFER, vbo);
while (!glfwWindowShouldClose(window)) {
RenderToFBO();
RenderToScreen();
glfwSwapBuffers(window);
glfwPollEvents();
if (GLFW_PRESS == glfwGetKey(window, GLFW_KEY_ESCAPE)) {
glfwSetWindowShouldClose(window, 1);
}
}
}
void RenderToFBO() {
static const GLfloat green[] = { 0.0f, 1.0f, 0.0f, 1.0f }; //texture background is green
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glViewport(0, 0, 512, 512); //set view port to texture size
glClearBufferfv(GL_COLOR, 0, green);
glUseProgram(render2FBOProgram);
glDrawArrays(GL_TRIANGLES, 0, 6);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
void RenderToScreen() {
static const GLfloat blue[] = { 0.0f, 0.0f, 1.0f, 1.0f }; //screen background is blue
glViewport(0, 0, screenWidth, screenHeight);
glClearBufferfv(GL_COLOR, 0, blue);
glBindTexture(GL_TEXTURE_2D, color_texture);
glUseProgram(render2ScreenProgram);
glDrawArrays(GL_TRIANGLES, 0, 6);
glBindTexture(GL_TEXTURE_2D, 0);
}
void Shutdown() {
glUseProgram(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDeleteProgram(render2FBOProgram);
glDeleteProgram(render2ScreenProgram);
glfwTerminate();
}
// a call-back function
void OnWindowResize(GLFWwindow* window, int width, int height) {
screenWidth = width;
screenHeight = height;
glViewport(0, 0, screenWidth, screenHeight);
}

2D OpenGL in GLFW flickers

I'm trying to create a 2D game using OpenGL. My helper libraries are GLFW, GLEW, SOIL (for images) and glm (for maths).
I've managed to draw a rectangle with a very simple texture on it. But when I try to move this rectangle it frequently has a little flicker (the timer between flickers is almost always the same), and the faster I move it, the more visible it becomes.
Another problem I have is I'm working on a laptop, and it renders fine with my integrated graphics (fine as in it works, it still stutters) but when I execute my program with my Nvidia graphics card it just shows my clearcolor, and nothing else, which is extremely odd. My sprite translation happens in the runCallback code (called in the main loop) and is just a multiplication of matrices. The result matrix is then fetched and used in the draw code. In the drawCallback theres just the DrawSprite function being called. Also I should note I'm using OpenGL 3.3 .
I'm sorry in advance for the rather large amount of code, but after extensive testing and trying a multitude of things i have no idea where my mistake lies...
If you would like to help me but need any more information, i will provide.
UPDATE:
Problem with Nvidia graphics card resolved, it was due to a wrong uniform parameter. But the stutter remains.
IMAGE LOADING CODE
SD_Texture::SD_Texture(const char * fileName)
{
texture = new GLuint;
unsigned char* data = SOIL_load_image(fileName, &width, &height, 0, SOIL_LOAD_RGB);
glGenTextures(1, (GLuint*)texture);
glBindTexture(GL_TEXTURE_2D, *((GLuint*)(texture)));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 0);
SOIL_free_image_data(data);
}
void SD_Texture::Bind()
{
glBindTexture(GL_TEXTURE_2D, *(GLuint*)texture);
}
VAO SETUP CODE
void SD_Window::SetupVAO()
{
GLfloat vertices[] =
{
-0.5f, 0.5f, 0.0f, 0.0f, 1.0f,
-0.5f, -0.5f, 0.0f, 0.0f, 0.0f,
0.5f, -0.5, 0.0f, 1.0, 0.0f,
-0.5f, 0.5f, 0.0f, 0.0f, 1.0f,
0.5f, -0.5f, 0.0f, 1.0f, 0.0f,
0.5f, 0.5f, 0.0f, 1.0f, 1.0f
};
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(GL_FLOAT), (GLvoid*)nullptr);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(GL_FLOAT), (GLvoid*)(3 * sizeof(GLfloat)));
glBindVertexArray(0);
}
DRAW CODE
void SD_Window::DrawSprite(SD_Sprite * sprite)
{
glActiveTexture(GL_TEXTURE0);
sprite->GetTexture()->Bind();
glUniformMatrix4fv(transformUniform, 1, GL_FALSE, glm::value_ptr(ortho * sprite->GetTransform()));
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, 6);
glBindVertexArray(0);
glBindTexture(GL_TEXTURE_2D, 0);
}
MAIN LOOP CODE
void SD_Window::TakeControl(void (*runCallback)(float delta), void (*drawCallback)())
{
double currentTime;
double oldTime = 0.0f;
while (!ShouldClose())
{
currentTime = glfwGetTime();
glClear(GL_COLOR_BUFFER_BIT);
drawCallback();
glfwSwapBuffers(window);
runCallback(currentTime - oldTime);
oldTime = currentTime;
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
}
VERTEX SHADER
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
out vec2 sCoord;
uniform mat4 transform;
void main()
{
gl_Position = transform * vec4(position, 1.0f);
sCoord = vec2(texCoord.x, 1.0f - texCoord.y);
}
FRAGMENT SHADER
#version 330 core
in vec2 sCoord;
out vec4 color;
uniform sampler2D sTexture;
void main()
{
color = texture(sTexture, sCoord);
}