I wanna make the OpenGL program can present images and warp the images presented.
Although I achieved image rendering using OpenGL, I don't know how to warp an image.
An warpable example I want is (Reference):
But a picture I got is:
As I know, this problem is related to perspective correction mapping.
But I don't know about that well.
Here is my source code.
void imageRender(Shader initShader, Shader imgShader, char *path){
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
float positions = { 0.5f, 1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
-1.0f, -1.0f, 0.0f,
-1.0f, 1.0f, 0.0f };
float vertices[] = {
// positions // colors // texture coords
position[0], position[1], position[2], 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right
position[3], position[4], position[5], 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right
position[6], position[7], position[8], 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left
position[9], position[10],position[11], 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left
};
unsigned int indices[] = {
0, 1, 3,
1, 2, 3
};
unsigned int VAO, VBO, EBO;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
// position attribute
//glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
glEnableVertexAttribArray(1);
// color attribute
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(2);
//texture attribute
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glEnableVertexAttribArray(3);
FREE_IMAGE_FORMAT format = FreeImage_GetFileType(path, 0);
if (format == -1){
cerr << BOLDRED << "[ERROR] IMAGE_NOT_FOUND" << RESET << endl;
exit(1);
}
if (format == FIF_UNKNOWN){
cerr << BOLDRED << "[ERROR] UNKNOWN_IMAGE_FORMAT" << RESET << endl;
format = FreeImage_GetFIFFromFilename(path);
if (!FreeImage_FIFSupportsReading(format)){
cerr << BOLDRED << "[ERROR] IMAGE_FORMAT_NOT_READABLE" << RESET << endl;
exit(1);
}
}
FIBITMAP *bitmap = FreeImage_Load(format, path);
FIBITMAP *bitmap32;
int bitsPerPixel = FreeImage_GetBPP(bitmap);
bitmap32 = FreeImage_ConvertTo32Bits(bitmap);
int imageWidth = FreeImage_GetWidth(bitmap32);
int imageHeight = FreeImage_GetHeight(bitmap32);
GLubyte *textureData = FreeImage_GetBits(bitmap32);
GLuint texture1;
glGenTextures(1, &texture1);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, textureData);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
initShader.use();
glBindVertexArray(VAO);
int initalTime = time(NULL);
while(1){
glBindVertexArray(VAO);
int timecal = time(NULL);
//glDrawElements(GL_TRIANGLE_STRIP, 6, GL_UNSIGNED_INT, 0);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glfwSwapBuffers(window);
glfwPollEvents();
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
if ((timecal - initalTime) > imageTime) // imageTime value is 10
break;
}
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDeleteTextures(1, &texture1);// image
glDisable(GL_TEXTURE_2D);//image
FreeImage_Unload(bitmap32);
FreeImage_Unload(bitmap);
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
glDeleteBuffers(1, &EBO);
}
}
Shader code is like that
//shader.vs
#version 330 core
layout(location = 1) in vec3 position;
layout(location = 2) in vec3 color;
layout(location = 3) in vec2 texcoord;
out vec3 Color;
out vec2 Texcoord;
void main()
{
gl_Position = vec4(position, 1.0);
Texcoord = texcoord;
}
//shader.fs
#version 330 core
in vec3 Color;
in vec2 Texcoord;
out vec4 outColor;
uniform sampler2D texture5;
void main()
{
outColor = texture2D(texture5, Texcoord);
}
How can I warp texture?
And then Is it correct to use position value to warp texture image?
The issue has nothing to do with perspective projection. You draw a polygon with 4 vertices parallel to the XY plane of the view, but the polygon is not a quad! Change the x coordinate of the 1st vertex (0.5f -> 1.0f). Perspective projection works with Homogeneous coordinates.
In generalperspective projection is achieved by a Perspective projection matrix. Of course you can define homogeneous vertices to inspect the behavior:
Define an attribute tuple with homogenous vertices (4 components):
float vertices[] = {
// positions // colors // texture coords
1.0f, 1.0f, 0.5f, 2.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right
1.0f, -1.0f, -0.5f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right
-1.0f, -1.0f, -0.5f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left
-1.0f, 1.0f, 0.5f, 2.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left
};
Adapt the vertex specification and the vertex shader:
// position attribute
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 9 * sizeof(float), (void*)0);
glEnableVertexAttribArray(1);
// color attribute
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 9 * sizeof(float), (void*)(4 * sizeof(float)));
glEnableVertexAttribArray(2);
//texture attribute
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 9 * sizeof(float), (void*)(7 * sizeof(float)));
glEnableVertexAttribArray(3);
#version 330 core
layout(location = 1) in vec4 position;
layout(location = 2) in vec3 color;
layout(location = 3) in vec2 texcoord;
out vec3 Color;
out vec2 Texcoord;
void main()
{
gl_Position = position;
Texcoord = texcoord;
}
Another option to achieve the effect is, to a an Z component to the geometry. e.g:
float positions = { 1.0f, 1.0f, 0.5f,
1.0f, -1.0f, 0.0f,
-1.0f, -1.0f, -0.5f,
-1.0f, 1.0f, 0.0f };
and to compute the w component dependent on z in the vertex shader (e.g. w = z + 2.5:
#version 330 core
layout(location = 1) in vec3 position;
layout(location = 2) in vec3 color;
layout(location = 3) in vec2 texcoord;
out vec3 Color;
out vec2 Texcoord;
void main()
{
gl_Position = vec4(position, position.z + 2.5);
Texcoord = texcoord;
}
Related
I am tring to use shader to draw rectangle with texture, following this tutorial, and this is my code:
main.cpp:
#define GLM_ENABLE_EXPERIMENTAL
#define GLEW_STATIC
#include<GL/glew.h>
#include<SFML/Graphics.hpp>
#include<SFML/OpenGL.hpp>
#include"../include/glm/glm.hpp"
#include"../include/glm/gtx/transform.hpp"
#include"../include/glm/gtc/type_ptr.hpp"
#include<iostream>
float vertices[] = {
// positions // texture coords
0.5f, 0.5f, 0.0f, 1.0f, 1.0f, // top right
0.5f, -0.5f, 0.0f, 1.0f, 0.0f, // bottom right
-0.5f, -0.5f, 0.0f, 0.0f, 0.0f, // bottom left
-0.5f, 0.5f, 0.0f, 0.0f, 1.0f // top left
};
int main(){
sf::ContextSettings set;
set.depthBits = 24;
sf::RenderWindow window(sf::VideoMode(800, 600), "Window", sf::Style::Default, set);
window.setActive(true);
glewInit();
glewExperimental = GL_TRUE;
glEnable(GL_TEXTURE_2D);
sf::Image im;
im.loadFromFile("texture.jpg");
im.flipVertically();
sf::Texture tex1;
tex1.loadFromImage(im);
sf::Shader shader;
shader.loadFromFile(
"vertex.txt",
"fragment.txt"
);
shader.setUniform("tex1", tex1);
glm::mat4 transform = glm::mat4(1.f);
transform = glm::translate(transform, glm::vec3(0.3f, 0.3f, 0.f));
shader.setUniform("transform", glm::value_ptr(transform)); // !!!
sf::Shader::bind(&shader);
unsigned int VBO;
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
unsigned int VAO;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)(0 * sizeof(float)));
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glViewport(0.f, 0.f, 800.f, 600.f);
glClearColor(0.f, 100.f, 150.f, 0.f);
while(window.isOpen()){
sf::Event event;
while(window.pollEvent(event)){
if (event.type == sf::Event::Closed){
window.close();
}
}
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDrawArrays(GL_QUADS, 0, 4);
window.display();
}
}
vertex.txt::
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec2 aTexCoord;
out vec2 TexCoord;
uniform mat4 transform;
void main()
{
gl_Position = transform * vec4(aPos, 1.0f);
TexCoord = vec2(aTexCoord.x, aTexCoord.y);
}
But it draws just clear color with no rectangle.
The fun part is, without transformation it works perfectly fine - if gl_Position = transform * vec4(aPos, 1.0f); line is changed to gl_Position = vec4(aPos, 1.0f);, it draws untransformed rectangle with texture as expected. So I guess, this is a problem with setting uniform, but it would be strange, at least texture sets without any issues. What am I doing wrong?
sf::Shader has an overloaded setUniform method for the sf::Glsl::Mat4 type. need to create a sf::Glsl::Mat4 object. That can be crated from an array of floats:
sf::Glsl::Mat4 sfTransform(glm::value_ptr(transform));
shader.setUniform("transform", sfTransform);
I wanna make the OpenGL program can present images and warp the images presented.
Although I achieved image rendering using OpenGL, I don't know how to warp an image.
An warpable example I want is (Reference):
But a picture I got is:
As I know, this problem is related to perspective correction mapping.
But I don't know about that well.
Here is my source code.
void imageRender(Shader initShader, Shader imgShader, char *path){
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
float positions = { 0.5f, 1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
-1.0f, -1.0f, 0.0f,
-1.0f, 1.0f, 0.0f };
float vertices[] = {
// positions // colors // texture coords
position[0], position[1], position[2], 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right
position[3], position[4], position[5], 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right
position[6], position[7], position[8], 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left
position[9], position[10],position[11], 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left
};
unsigned int indices[] = {
0, 1, 3,
1, 2, 3
};
unsigned int VAO, VBO, EBO;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
// position attribute
//glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
glEnableVertexAttribArray(1);
// color attribute
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(2);
//texture attribute
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glEnableVertexAttribArray(3);
FREE_IMAGE_FORMAT format = FreeImage_GetFileType(path, 0);
if (format == -1){
cerr << BOLDRED << "[ERROR] IMAGE_NOT_FOUND" << RESET << endl;
exit(1);
}
if (format == FIF_UNKNOWN){
cerr << BOLDRED << "[ERROR] UNKNOWN_IMAGE_FORMAT" << RESET << endl;
format = FreeImage_GetFIFFromFilename(path);
if (!FreeImage_FIFSupportsReading(format)){
cerr << BOLDRED << "[ERROR] IMAGE_FORMAT_NOT_READABLE" << RESET << endl;
exit(1);
}
}
FIBITMAP *bitmap = FreeImage_Load(format, path);
FIBITMAP *bitmap32;
int bitsPerPixel = FreeImage_GetBPP(bitmap);
bitmap32 = FreeImage_ConvertTo32Bits(bitmap);
int imageWidth = FreeImage_GetWidth(bitmap32);
int imageHeight = FreeImage_GetHeight(bitmap32);
GLubyte *textureData = FreeImage_GetBits(bitmap32);
GLuint texture1;
glGenTextures(1, &texture1);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, textureData);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
initShader.use();
glBindVertexArray(VAO);
int initalTime = time(NULL);
while(1){
glBindVertexArray(VAO);
int timecal = time(NULL);
//glDrawElements(GL_TRIANGLE_STRIP, 6, GL_UNSIGNED_INT, 0);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glfwSwapBuffers(window);
glfwPollEvents();
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
if ((timecal - initalTime) > imageTime) // imageTime value is 10
break;
}
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDeleteTextures(1, &texture1);// image
glDisable(GL_TEXTURE_2D);//image
FreeImage_Unload(bitmap32);
FreeImage_Unload(bitmap);
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
glDeleteBuffers(1, &EBO);
}
}
Shader code is like that
//shader.vs
#version 330 core
layout(location = 1) in vec3 position;
layout(location = 2) in vec3 color;
layout(location = 3) in vec2 texcoord;
out vec3 Color;
out vec2 Texcoord;
void main()
{
gl_Position = vec4(position, 1.0);
Texcoord = texcoord;
}
//shader.fs
#version 330 core
in vec3 Color;
in vec2 Texcoord;
out vec4 outColor;
uniform sampler2D texture5;
void main()
{
outColor = texture2D(texture5, Texcoord);
}
How can I warp texture?
And then Is it correct to use position value to warp texture image?
The issue has nothing to do with perspective projection. You draw a polygon with 4 vertices parallel to the XY plane of the view, but the polygon is not a quad! Change the x coordinate of the 1st vertex (0.5f -> 1.0f). Perspective projection works with Homogeneous coordinates.
In generalperspective projection is achieved by a Perspective projection matrix. Of course you can define homogeneous vertices to inspect the behavior:
Define an attribute tuple with homogenous vertices (4 components):
float vertices[] = {
// positions // colors // texture coords
1.0f, 1.0f, 0.5f, 2.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right
1.0f, -1.0f, -0.5f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right
-1.0f, -1.0f, -0.5f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left
-1.0f, 1.0f, 0.5f, 2.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left
};
Adapt the vertex specification and the vertex shader:
// position attribute
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 9 * sizeof(float), (void*)0);
glEnableVertexAttribArray(1);
// color attribute
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 9 * sizeof(float), (void*)(4 * sizeof(float)));
glEnableVertexAttribArray(2);
//texture attribute
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 9 * sizeof(float), (void*)(7 * sizeof(float)));
glEnableVertexAttribArray(3);
#version 330 core
layout(location = 1) in vec4 position;
layout(location = 2) in vec3 color;
layout(location = 3) in vec2 texcoord;
out vec3 Color;
out vec2 Texcoord;
void main()
{
gl_Position = position;
Texcoord = texcoord;
}
Another option to achieve the effect is, to a an Z component to the geometry. e.g:
float positions = { 1.0f, 1.0f, 0.5f,
1.0f, -1.0f, 0.0f,
-1.0f, -1.0f, -0.5f,
-1.0f, 1.0f, 0.0f };
and to compute the w component dependent on z in the vertex shader (e.g. w = z + 2.5:
#version 330 core
layout(location = 1) in vec3 position;
layout(location = 2) in vec3 color;
layout(location = 3) in vec2 texcoord;
out vec3 Color;
out vec2 Texcoord;
void main()
{
gl_Position = vec4(position, position.z + 2.5);
Texcoord = texcoord;
}
I'm attempting to texture a VBO/VAO model cube. The cube is definitely rendering/drawn correctly, and as far as I know I am doing everything needed to load the texture.
However when it comes to applying the texture it appears to take an average of all colours in the texture, then apply that average to the entire cube. This results in it appearing to be "painted" with a plain, regular colour as shown in the screenshot below:
this is the texture;
I'm at a loss as to why this is happening. Below is the code from my init, loadTexture and display functions (I did not write the loadTexture function):
Init Function
(Only showing the code relevant to the cube + texture)
void init(void) {
.
.
.
pyramidTexture = TextureLoader::fiLoadTexture(wstring(L"Common\Resources\Textures\Sandstone.png"));
// Setup VAO for pyramid object
glGenVertexArrays(1, &pyramidVAO);
glBindVertexArray(pyramidVAO);
// Setup VBO for vertex position data
glGenBuffers(1, &pyramidVertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, pyramidVertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(pyramidVertices), pyramidVertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)0); // attribute 0 gets data from bound VBO (so assign vertex position buffer to attribute 0)
// Setup VBO for vertex colour data
glGenBuffers(1, &pyramidColourBuffer);
glBindBuffer(GL_ARRAY_BUFFER, pyramidColourBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(pyramidColours), pyramidColours, GL_STATIC_DRAW);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_TRUE, 0, (const GLvoid*)0); // attribute 1 gets colour data
glGenBuffers(3, &pyramidTexCoordBuffer);
glBindBuffer(GL_ARRAY_BUFFER, pyramidTexCoordBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(pyramidTexCoordArray), pyramidTexCoordArray, GL_STATIC_DRAW);
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)0);
// Enable vertex position and colour + Texture attribute arrays
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(3);
// Setup VBO for face index array
glGenBuffers(1, &pyramidIndexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, pyramidIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(pyramidVertexIndices), pyramidVertexIndices, GL_STATIC_DRAW);
glBindVertexArray(0);
glEnable(GL_NORMALIZE); // If we scale objects, ensure normal vectors are re-normalised to length 1.0 to keep lighting calculations correct (see lecture notes)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); // Best colour interpolation results
.
.
.
}
LoadTexture Function
GLuint TextureLoader::fiLoadTexture(const wstring& textureFilePath) {
BOOL fiOkay = FALSE;
GLuint newTexture = 0;
fipImage I;
// Convert wstring to const char*
wstring_convert<codecvt_utf8<wchar_t>, wchar_t> stringConverter;
string S = stringConverter.to_bytes(textureFilePath);
const char *filename = S.c_str();
// Call FreeImage to load the image file
fiOkay = I.load(filename);
if (!fiOkay) {
cout << "FreeImagePlus: Cannot open image file.\n";
return 0;
}
fiOkay = I.flipVertical();
fiOkay = I.convertTo24Bits();
if (!fiOkay) {
cout << "FreeImagePlus: Conversion to 24 bits successful.\n";
return 0;
}
auto w = I.getWidth();
auto h = I.getHeight();
BYTE *buffer = I.accessPixels();
if (!buffer) {
cout << "FreeImagePlus: Cannot access bitmap data.\n";
return 0;
}
glGenTextures(1, &newTexture);
glBindTexture(GL_TEXTURE_2D, newTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_BGR, GL_UNSIGNED_BYTE, buffer);
// Setup default texture properties
if (newTexture) {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
}
return newTexture;
}
Display Function
void display(void) {
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Set viewport to the client area of the current window
glViewport(0, 0, glutGet(GLUT_WINDOW_WIDTH), glutGet(GLUT_WINDOW_HEIGHT));
// Get view-projection transform as a GUMatrix4
GUMatrix4 T = mainCamera->projectionTransform() * mainCamera->viewTransform();
if (principleAxes)
principleAxes->render(T);
if (texturedQuad)
texturedQuad->render(T * GUMatrix4::translationMatrix(0.5f, 0.5f, 0.0f));
// Fixed function rendering (Compatability profile only) - use this since CGImport is written against OpenGL 2.1
glUseProgram(0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMultMatrixf((const float*)mainCamera->projectionTransform().M);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMultMatrixf((const float*)mainCamera->viewTransform().M);
glMultMatrixf((const float*)GUMatrix4::translationMatrix(0.0f, -0.15f, 0.0f).M);
glEnable(GL_TEXTURE_2D);
glPolygonMode(GL_FRONT, GL_FILL);
if (exampleModel)
exampleModel->renderTexturedModel();
glDisable(GL_TEXTURE_2D);
//Define position and direction (so appear at fixed point in scene)
glLightfv(GL_LIGHT0, GL_SPOT_DIRECTION, lightDirection);
glLightfv(GL_LIGHT0, GL_POSITION, lightPosition);
// enable texturing
glEnable(GL_TEXTURE_2D);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
//
// Pyramid VBO rendering
//
// Use basic shader for rendering pyramid (we'll look at this in more detail next week)
glUseProgram(basicShader);
static GLint mvpLocationPyramid = glGetUniformLocation(basicShader, "mvpMatrix");
glUniformMatrix4fv(mvpLocationPyramid, 1, GL_FALSE, (const GLfloat*)&(T.M));
GUMatrix4 pyramidModelTransform = GUMatrix4::translationMatrix(-5.75f, 0.0f, 0.0f) * GUMatrix4::scaleMatrix(2.0f, 2.0f, 2.0f);
GUMatrix4 mvpPyramid = T * pyramidModelTransform;
glUniformMatrix4fv(mvpLocationPyramid, 1, GL_FALSE, (const GLfloat*)&(mvpPyramid.M));
// Bind VAO that contains all relevant pyramid VBO buffer and attribute pointer bindings
glBindVertexArray(pyramidVAO);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, pyramidTexture);
// Draw pyramid
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_SHORT, (const GLvoid*)0);
// Unbind pyramid VAO (or bind another VAO)
glBindVertexArray(0);
glutSwapBuffers();
}
I've been trying to fix this for hours now without any luck, as such any support would be massively appreciated!!!
EDIT: Added in VAO attributes + Shaders
VAO Settings
// Per-vertex position vectors
static float pyramidVertices[32] =
{
//Front
0.0f, 0.0f, 0.0f, 1.0f, //BtmLeft
1.0f, 0.0f, 0.0f, 1.0f, //BtmRight
1.0f, 1.0f, 0.0f, 1.0f, //TopRight
0.0f, 1.0f, 0.0f, 1.0f, //TopLeft
//Back
0.0f, 1.0f, 1.0f, 1.0f, //TopLeft
1.0f, 1.0f, 1.0f, 1.0f, //TopRight
1.0f, 0.0f, 1.0f, 1.0f, //BottomRight
0.0f, 0.0f, 1.0f, 1.0f //BottomLeft
};
// Per-vertex colours (RGBA) floating point values
static float pyramidColours[32] =
{
1.0f, 0.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 1.0f,
0.0f, 0.0f, 1.0f, 1.0f,
1.0f, 0.0f, 1.0f, 1.0f,
0.0f, 1.0f, 1.0f, 1.0f,
0.0f, 0.0f, 1.0f, 1.0f,
1.0f, 0.0f, 1.0f, 1.0f,
1.0f, 0.0f, 0.0f, 1.0f
};
// 5 faces each with 3 vertices (each face forms a triangle)
static unsigned short pyramidVertexIndices[36] =
{
//Front
0, 3, 2,
2, 1, 0,
//Right
4, 3, 0,
0, 7, 4,
//Back
4, 7, 6,
6, 5, 4,
//Top
4, 5, 3,
3, 5, 2,
//Left
2, 5, 1,
1, 5, 6,
//Bottom
6, 7, 0,
0, 1, 6
};
static unsigned short pyramidTexCoordArray[24] =
{
-1.0f, -1.0f, -1.0f,
1.0f, -1.0f, -1.0f,
1.0f, 1.0f, -1.0f,
-1.0f, 1.0f, -1.0f,
-1.0f, -1.0f, 1.0f,
1.0f, -1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
-1.0f, 1.0f, 1.0f
};
Vertex Shader
#version 330
uniform mat4 mvpMatrix;
layout (location=0) in vec4 vertexPos;
layout (location=3) in vec2 vertexTexCoord;
out vec2 texCoord;
void main(void) {
mat4 M;
M[0] = vec4(1.0);
ivec2 a = ivec2(1, 2);
//vec3 b = vec3(2.0, 4.0, 1.0) + a;
texCoord = vertexTexCoord;
gl_Position = mvpMatrix * vertexPos;
}
Fragment Shader
#version 330
uniform sampler2D texture;
in vec2 texCoord;
layout (location=0) out vec4 fragColour;
void main(void) {
vec4 texColor = texture2D(texture, texCoord);
fragColour = texColor;
}
You defined your data as unsigned short:
static unsigned short pyramidTexCoordArray[24]
But it has to be float.
There are a lot of things strange:
You are generating 3 VBOs for texture coordinates, but are just using one. Unless pyramidTexCoordBuffer is of type GLuint[3] (which I assume it is not due to the &), you are writing out of bounds.
Edit: This refers to the glGenBuffers(3, &pyramidTexCoordBuffer); line, which allocates 3 buffers and stores them in three consecutive GLuint variables starting at pyramidTexCoordBuffer. Since pyramidTexCoordBuffer is most probably a GLuint, pyramidTexCoordBuffer[1] and pyramidTexCoordBuffer[2] refer to unallocated memory.
The pyramidTexCoordArray array is specified as unsigned short, but you are writing floats to it. Since it is unsigned, at least the negative numbers will be gone.
Additionally, you tell OpenGL with the
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)0);
line that the data is of type GL_FLOAT (which it is not) and that it has two floats per vertex (but the data has 3 elements per vertex):
Here's my code
vertex shader
#version 430
in vec3 position;
uniform mat4 MVP;
void main()
{
gl_Position = MVP * vec4(position, 1.0);
}
fragment shader
#version 430
out vec4 outputColor;
uniform sampler2D tex;
void main()
{
outputColor = texture(tex, gl_PointCoord);
}
init
void init()
{
glPointSize(20.0f);
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
bmp.Open("C:\\users\\alon\\desktop\\star.bmp");
bmp.ReadPixels();
unsigned char *pixels = new unsigned char[bmp.NeededBufferSize()];
bmp.AssignPixels(pixels);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, bmp.GetWidth(), bmp.GetHeight(), 0, GL_RGB, GL_UNSIGNED_BYTE, pixels);
delete[] pixels;
glClearColor(0.2f, 0.2f, 0.2f, 1.0f);
glEnable(GL_DEPTH_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
GLuint VaoId;
glGenVertexArrays(1, &VaoId);
glBindVertexArray(VaoId);
position_index = glGetAttribLocation(program, "position");
MVP_location = glGetUniformLocation(program, "MVP");
// x y z s t
GLfloat vertices[] = {-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
0.5f, 0.5f, 0.0f,
0.5f, 0.5f, 0.0f,
-0.5f, 0.5f, 0.0f,
-0.5f, -0.5f, 0.0f};
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(position_index, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
render
void render()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnableVertexAttribArray(position_index);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
float c;
if(bTime)
c = (float)std::clock() * 2.0f / CLOCKS_PER_SEC;
else
c = 0.0f;
glm::mat4 modelview = glm::rotate(-c * 50.0f, glm::vec3(0.0f, 1.0f, 0.0f));
modelview = glm::translate(glm::vec3(0.0f + fTranslateX, 0.0f, -1.75f + fTranslateZ)) * modelview;
glm::mat4 projection = glm::perspective(60.0f, 16.0f/9.0f, 0.10f, 100.0f); //perspective
glm::mat4 MVP = projection * modelview;
glUniformMatrix4fv(MVP_location, 1, GL_FALSE, &MVP[0][0]);
glDrawArrays(GL_POINTS, 0, 6);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisableVertexAttribArray(position_index);
glutSwapBuffers();
if(glGetError() != GL_NO_ERROR)
exit(1);
}
The star image is:
Output:
Is there anything I'm doing wrong?
In compatbility profiles (or any version of GL before 3.1), the behavior you want only occurs when GL_POINT_SPRITE is enabled. In a core profile, that state was removed, and gl_PointCoord always behaves as if GL_POINT_SPRITE were enabled.
To fix your problem, you must call glEnable (GL_POINT_SPRITE).
I'm trying to write a basic OpenGL 3.3 program with shaders, buffers, etc. drawing a cube. The problem is that the cube is not drawn. Sorry for such an amout of code, but i feel like the error might be anywhere, because to me it all seems right: display function is looping, shaders are compiled, matrices are passed to shaders. I suspecting that something might be wrong with culling. Please take a look. Here is the code (I'm using freeglut, first init() is called, then display runs in a loop):
initialization code:
struct ProgramData
{
GLuint theProgram;
GLuint iModel;
GLuint iView;
GLuint iProjection;
};
ProgramData shaderProgram;
ProgramData LoadProgram(const std::string &strVertexShader,
const std::string &strFragmentShader)
{
std::vector<GLuint> shaderList;
shaderList.push_back(LoadShader(GL_VERTEX_SHADER, strVertexShader));
shaderList.push_back(LoadShader(GL_FRAGMENT_SHADER, strFragmentShader));
ProgramData data;
data.theProgram = CreateProgram(shaderList);
data.iModel = glGetUniformLocation(data.theProgram, "mModel");
data.iView = glGetUniformLocation(data.theProgram, "mView");
data.iProjection = glGetUniformLocation(data.theProgram, "mProjection");
return data;
}
float cube_vertices[] = {
-1.0f, -1.0f, 1.0f,
1.0f, -1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
-1.0f, 1.0f, 1.0f,
-1.0f, -1.0f, -1.0f,
1.0f, -1.0f, -1.0f,
1.0f, 1.0f, -1.0f,
-1.0f, 1.0f, -1.0f,
GREEN_COLOR,
BLUE_COLOR,
RED_COLOR,
BROWN_COLOR,
GREEN_COLOR,
BLUE_COLOR,
RED_COLOR,
BROWN_COLOR,
};
GLubyte cube_elements[] = {
0,1,2, 2,3,0,
0,3,4, 4,5,0,
0,5,6, 6,1,0,
1,6,7, 7,2,1,
7,4,3, 3,2,7,
4,7,6, 6,5,4
};
void InitializeProgram()
{
//initialize vertex buffer
glGenBuffers(1, &vertex_buffer_obj);
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer_obj);
glBufferData(GL_ARRAY_BUFFER, sizeof(cube_vertices),
cube_vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//initialize index buffer
glGenBuffers(1, &index_buffer_obj);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, index_buffer_obj);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(cube_elements),
cube_elements, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
shaderProgram = LoadProgram("shader.vert", "shader.frag");
}
void init()
{
InitializeProgram();
int numberOfVertices = 8;
size_t color_data_offset = sizeof(float) * 3 * numberOfVertices;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer_obj);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0,
(void*)color_data_offset);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, index_buffer_obj);
glBindVertexArray(0);
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
glFrontFace(GL_CW);
glEnable(GL_DEPTH_TEST);
glDepthMask(GL_TRUE);
glDepthFunc(GL_LEQUAL);
glDepthRange(0.0f, 1.0f);
}
vertex shader:
#version 330
layout (location = 0) in vec3 inPosition;
layout (location = 1) in vec3 color;
uniform mat4 mProjection;
uniform mat4 mView;
uniform mat4 mModel;
smooth out vec3 theColor;
void main()
{
gl_Position = mProjection * mView * mModel * vec4(inPosition, 1);
theColor = color;
}
fragment shader:
#version 330
smooth in vec3 theColor;
out vec4 outputColor;
void main()
{
outputColor = vec4(theColor, 1);
}
draw code:
glm::vec3 cam_pos(3, 2, 3);
void display()
{
glClearColor(0.3f, 0.3f, 0.3f, 1.0f);
glClearDepth(1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(shaderProgram.theProgram);
glm::mat4 model_matrix = glm::translate(glm::vec3(0, 0, 0));
glm::mat4 view_matrix = glm::lookAt(cam_pos,
glm::vec3(0, 0, 0), glm::vec3(0, 0, 1));
glm::mat4 proj_matrix = glm::perspective(45.0f, 1.0f, 1.0f, 100.0f);
glUniformMatrix4fv(shaderProgram.iProjection, 1,
GL_FALSE, glm::value_ptr(proj_matrix));
glUniformMatrix4fv(shaderProgram.iView, 1,
GL_FALSE, glm::value_ptr(view_matrix));
glUniformMatrix4fv(shaderProgram.iModel, 1,
GL_FALSE, glm::value_ptr(model_matrix));
glBindVertexArray(vao);
int size; glGetBufferParameteriv(GL_ELEMENT_ARRAY_BUFFER,
GL_BUFFER_SIZE, &size);
glDrawElements(GL_TRIANGLES, size / sizeof(GLubyte), GL_UNSIGNED_SHORT, 0);
glBindVertexArray(0);
glUseProgram(0);
glutSwapBuffers();
glutPostRedisplay();
}
UPD: in init method when an offset for colors is calculated should be
sizeof(float) * 3 * numberOfVertices instead of
sizeof(GLubyte) * 3 * numberOfVertices, colors are stored as floats. Rendering problem not solved.
SOLVED: thank you for help. See my answer below.
On thing I did spot when glancing over your code is the following line from your vertex shader:
gl_Position = mProjection * mView * mModel * vec4(inPosition, 0);
That 0 should really be a 1.0.
In display() function
glDrawElements(GL_TRIANGLES, size / sizeof(GLubyte), GL_UNSIGNED_SHORT, 0);
should be changed to
glDrawElements(GL_TRIANGLES, size / sizeof(GLubyte), GL_UNSIGNED_BYTE, 0);
and in init()
glFrontFace(GL_CW);
to
glFrontFace(GL_CCW);
So the problem was that I passed to OpenGL incorrect data. Index array is of GLUbyte (1byte size each array element) but I for some reason decided it was GLushort (2bytes).
edit: doesn't matter a lot, but up vector (when generating camera matrix) should be not glm::vec3(0, 0, 1) but glm::vec3(0, 1, 0)