I just started learning opengl technology.
My program draw 2d isometric tiles and program output this:
Be unknown reasons black lines appear when two textures overlap or two textures touch.
Code example:
typedef unsigned int ID;
class GraphicEngine {
public:
GraphicEngine();
~GraphicEngine();
void initShaders(const char* vertexShaderSource, const char* fragmentShaderSource);
void initRenderData(float vertices[], unsigned int size);
std::vector<ID> initTextures(std::vector<std::string>& paths);
void drawTextures(std::vector<ID> testuresIds);
private:
GraphicEngine(GraphicEngine&) = delete;
GraphicEngine(GraphicEngine&&) = delete;
GraphicEngine& operator=(const GraphicEngine& other) = delete;
private:
unsigned int VBO = 0;
unsigned int VAO = 0;
unsigned int EBO = 0;
unsigned int shaderProgram;
};
GraphicEngine::GraphicEngine() {
}
GraphicEngine::~GraphicEngine() {
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
glDeleteBuffers(1, &EBO);
}
void GraphicEngine::initShaders(const char* vertexShaderSource, const char* fragmentShaderSource) {
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
unsigned int vertexShader = glCreateShader(GL_VERTEX_SHADER);
unsigned int fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
shaderProgram = glCreateProgram();
glShaderSource(vertexShader, 1, &vertexShaderSource, NULL);
glCompileShader(vertexShader);
glShaderSource(fragmentShader, 1, &fragmentShaderSource, NULL);
glCompileShader(fragmentShader);
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);
}
void GraphicEngine::initRenderData(float vertices[], unsigned int size) {
unsigned int indices[] = {
0, 1, 3,
1, 2, 3
};
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, size, vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(1);
}
std::vector<ID> GraphicEngine::initTextures(std::vector<std::string>& paths) {
std::vector<ID> ids(paths.size());
stbi_set_flip_vertically_on_load(true);
for (int i = 0; i < paths.size(); i++) {
unsigned int texture;
glGenTextures(1, &ids[i]);
glBindTexture(GL_TEXTURE_2D, ids[i]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
int width, height, nrChannels;
unsigned char* data = stbi_load(paths[i].c_str(), &width, &height, &nrChannels, STBI_rgb_alpha);
if (data)
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
}
stbi_image_free(data);
}
return ids;
}
void GraphicEngine::drawTextures(std::vector<ID> testuresIds) {
static bool ex = false;
for (auto testureId : testuresIds) {
for (int i = 0; i < 4; i++) {
glBindTexture(GL_TEXTURE_2D, testureId);
glm::mat4 transform = glm::mat4(1.0f);
transform = glm::translate(transform, glm::vec3(i * 0.6f + 0.0f, 0.0f, 0.0f));
glUseProgram(shaderProgram);
unsigned int transformLoc = glGetUniformLocation(shaderProgram, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, glm::value_ptr(transform));
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
}
for (int i = 0; i < 4; i++) {
glBindTexture(GL_TEXTURE_2D, testureId);
glm::mat4 transform = glm::mat4(1.0f);
transform = glm::translate(transform, glm::vec3(i * 0.6f - 0.3f, -0.16f, 0.0f));
glUseProgram(shaderProgram);
unsigned int transformLoc = glGetUniformLocation(shaderProgram, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, glm::value_ptr(transform));
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
}
}
const unsigned int SCR_WIDTH = 800;
const unsigned int SCR_HEIGHT = 600;
Window::Window():window(nullptr) {}
Window::~Window() {
glfwTerminate();
}
bool Window::initWindowResources() {
bool result = false;
if (glfwInit() == GLFW_TRUE) {
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "LearnOpenGL", NULL, NULL);
if (window != nullptr) {
glfwMakeContextCurrent(window);
if (glfwSetFramebufferSizeCallback(window, [](GLFWwindow* window, int width, int height) {
glViewport(0, 0, width, height); }) == NULL) {
if (gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) {
result = true;
}
}
}
}
return result;
}
const char* vertexShaderSource =
"#version 330 core\n"
"layout(location = 0) in vec3 aPos;\n"
"layout(location = 1) in vec2 aTexCoord;\n"
"out vec2 TexCoord;\n"
"uniform mat4 transform;\n"
"void main()\n"
"{\n"
" gl_Position = transform * vec4(aPos, 1.0);\n"
" TexCoord = vec2(aTexCoord.x, aTexCoord.y);\n"
"}\n\0";
const char* fragmentShaderSource =
"#version 330 core\n"
"out vec4 FragColor;\n"
"in vec3 ourColor;\n"
"in vec2 TexCoord;\n"
"uniform sampler2D texture1;\n"
"void main()\n"
"{\n"
" FragColor = texture(texture1, TexCoord);\n"
"}\n\0";
void Window::mainWindowLoop() {
graphicEngine.initShaders(vertexShaderSource, fragmentShaderSource);
std::vector<std::string> pathsTextures = { "C:\\Users\\Олег\\\Desktop\\sea1.png" };
float vertices[] = {
// positions // colors // texture coords
-1.3f, 0.16f, 0.0f, 1.0f, 1.0f, // top right
-1.3f, -0.16f, 0.0f, 1.0f, 0.0f, // bottom right
-0.7f, -0.16f, 0.0f, 0.0f, 0.0f, // bottom left
-0.7f, 0.16f, 0.0f, 0.0f, 1.0f // top left
};
graphicEngine.initRenderData(vertices, sizeof(vertices));
std::vector<ID> idsTextures = graphicEngine.initTextures(pathsTextures);
while (!glfwWindowShouldClose(window))
{
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
graphicEngine.drawTextures(idsTextures);
glfwSwapBuffers(window);
glfwPollEvents();
}
}
int main()
{
Window window;
if (window.initWindowResources()) {
window.mainWindowLoop();
}
return 0;
}
Png: Size png: 62x34 pixels, Transparent sprite, use prog to created png: piskelapp
Please, pvoide information about this issue: inforamtion about reasons of this issue and how to fix this issue.
I was able to reproduce your issue. You are working with non-premultiplied alpha, this is known for producing undesirable results when rendering translucent images.
Take a look at this article: http://www.realtimerendering.com/blog/gpus-prefer-premultiplication/
Now, to solve your problem, first change your blend function to glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA).
Second, stbi doesn't pre-multiply the alpha on load, you have to do it manually.
Each pixel is composed by 4 bytes, red, green, blue and alpha, on the 0-255 range. Convert each value to the normalized range (0.0f - 1.0f) by dividing by 255.0f, multiply r, g, and b by alpha, then multiply it back by 255.0f;
The dark lines at the edge of the tiles are results of alpha blending and texture filtering.
The linked tile image (PNG) contains three premultipled color channels (red, green, blue) and transparency information (alpha channel) with no partially transparent pixels (the alpha value is either 1.0 or 0.0 everywhere, which results in sharp edges):
This can be checked in an image editor (for example Gimp). The image uses premultiplied alpha, i.e. the color channels were masked by the alpha channel and only contain color information where the alpha channel is non-zero.
The area outside of the valid image region is all black, so when OpenGL uses linear texture interpolation (GL_LINEAR) it will mix the hidden black texels right at the edge with the visible colored texels, which can result in a dark color, depending on the used blending function.
Alpha blending mixes the already present color in the framebuffer (of the cleared background or the already written fragments) with the incoming ones.
The used blending function glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) instructs the hardware to do this for every pixel:
The result: dark artifacts at the edges of each tile caused by the interpolated alpha value at the edges of the tile, which darkens the source color (sRGB * sA) (modified example with original tile image, reproduced issue from the original post):
In other words:
https://shawnhargreaves.com/blog/texture-filtering-alpha-cutouts.html:
Texture filtering: alpha cutouts
(...)
Filtering applies equally to the RGB and alpha channels. When used on the alpha
channel of a cutout texture it will produce new fractional alpha
values around the edge of the shape, which makes things look nice and
antialiased. But filtering also produces new RGB colors, part way in
between the RGB of the solid and transparent parts of the texture.
Thus the RGB values of supposedly transparent pixels can bleed into
our final image.
This most often results in dark borders around alpha cutouts, since
the RGB of transparent pixels is often black. Depending on the
texture, the bleeding could alternatively be white, pink, etc.
To quick-fix the problem, the blending function could simply by changed to glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA), since the tile image already has premultiplied RGB channels, which are all black (= 0) in transparent areas:
https://shawnhargreaves.com/blog/premultiplied-alpha.html:
Premultiplied alpha is better than conventional blending for several
reasons:
It works properly when filtering alpha cutouts (...)
It works properly when doing image composition (...)
It is a superset of both conventional and additive blending. If you set alpha to zero while RGB is non zero, you get an additive
blend. This can be handy for particle systems that want to smoothly
transition from additive glowing sparks to dark pieces of soot as the
particles age.
The result: dark artifacts disappear almost entirely after changing the blending function (modified example with original tile image, issue partially fixed):
Not perfect.
To fix this, some pixels could be drawn around the tile to enlarge the visible area a bit:
To let tiles overlap a bit, like that:
The result (with texture filtering, and overlapped pixels):
(Additionally, lines/other graphical elements could be drawn on top of the artifacts to cover them up. And if the pixelated jagged edges are not wanted, the actual textured polygons quads could be replaced by rhombuses that could be placed precisely next to each other in a continuous mesh that could be rendered in one draw call, no alpha blending required anymore, however sharp edges do not fit a pixelated look I guess.)
A possible solution using GL_NEAREST:
OpenGL texture parameters:
To get rid of the artefacts and blurred/filtered look, GL_LINEAR can be replaced by GL_NEAREST, which disables texture interpolation altogether for the selected texture and lets OpenGL render the raw pixels without applying texture filtering (GL_CLAMP_TO_EDGE makes sense here to avoid artifacts at the edges):
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
Power of Two Textures:
OpenGL performance can be improved by always using texture dimensions that
are a power of two, e.g. 64x32 (instead of 60x32 in your case). - The tile image could be modified, e.g.: 2 pixels added on each side (and borders marked):
Side note: This restriction is not that important anymore, but in the past it was even necessary to use a special extension to enable NPOT textures:
Conventional OpenGL texturing is limited to images with
power-of-two dimensions and an optional 1-texel border.
ARB_texture_non_power_of_two extension relaxes the size restrictions
for the 1D, 2D, cube map, and 3D texture targets.
Snap to pixel:
There are multiple ways to do this with OpenGL.
I would recommend to scale the orthographic projection, so that 1 OpenGL
coordinate unit exactly matches 1 texel unit. That way, tiles can be precisely placed on the pixel grid (just shift coordinates of the tile vertices by 64 pixels/OpenGL units left/right, to get to the next one, in this example). Coordinates could be represented as integers in the engine now.
Modified code example:
void GraphicEngine::drawTextures(std::vector<ID> testuresIds, float wndRatio) {
const int countx = 3, county = 3; /* number of tiles */
const float scale = 100.0f; /* zoom */
const glm::mat4 mvp = glm::ortho(-wndRatio * scale, wndRatio * scale, -scale, scale, 2.0f, -2.0f);
const float offx = -((countx * TILE_WIDTH * 2.0f) * 0.5f - TILE_WIDTH);
const float offy = -TILE_WIDTH * 0.5f;
for (auto testureId : testuresIds) {
for (int y = 0; y < county; y++) {
for (int x = 0; x < countx - (y & 1 ? 1 : 0); x++) {
const glm::mat4 transform = mvp * glm::translate(glm::mat4(1.0f), glm::vec3(
offx + x * TILE_WIDTH * 2.0f + (y & 1 ? TILE_WIDTH : 0.0f),
offy + y * TILE_HEIGHT, 0.0f));
glBindTexture(GL_TEXTURE_2D, testureId);
const GLint transformLoc = glGetUniformLocation(shaderProgram, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, glm::value_ptr(transform));
glUseProgram(shaderProgram);
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
}
}
}
}
Screenshot of modified example:
And without marked edges:
Some hints on the use of "straight alpha textures":
Another approach to solve this might be the use of an unmasked/unpremultiplied/straight alpha texture. The color channels of the original tile image can be flood filled out like this:
(Note: The linked PNG image above can't be used directly. Imgur seems to convert transparent PNG images and automatically masks the color channels...)
This technique could help to reduce the artifacts when texture filtering and the conventional alpha blending function is used (i.e. GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA). However, the background will always show through a tiny bit, because some pixels are always sightly transparent at the edges (caused by texture filtering):
(The result should be very similar to the first solution above, where the original premultiplied image is used with modified alpha blending (GL_ONE, GL_ONE_MINUS_SRC_ALPHA).)
If the tile contains not just a plain color, the color information at the edges of the tile would need to be extended outwards to avoid artifacts.
Obviously this doesn't solve the original issue completely, when a precise 2D look is the goal. But it could be useful in other situations, where the hidden pixels also generate bad results when other forms of transparency/blending/compositing are used, e.g. for particles, semi-transparent edges of foliage, text etc.
Some general hints that could help to solve the issue:
Check: glBlendFunc(), glBlendFuncSeparate(), glBlendEquation(), glBlendColor()
Do not waste precious video memory: glGenerateMipmap() is not required for a pure 2D presentation, where all pixels are visible all the time.
Alpha Testing: glAlphaFunc() has been removed from OpenGL 4, but alpha testing can be done manually in the fragment shader, just discard fragments depending on the alpha value, see OpenGL alpha test - How to replace AlphaFunc deprecated?.
glHint(): Using this OpenGL function to change implementation-specific hints can have an impact on the rendered result, sometimes in "surprising ways":
GL_POLYGON_SMOOTH_HINT
Indicates the sampling quality of antialiased polygons. Hinting
GL_NICEST can result in more pixel fragments being generated during
rasterization, if a larger filter function is applied.
The code in the original question comes with some issues, it does not compile like that (some parts of the class definitions are missing etc.). It takes some effort to reproduce to issue, which makes it more complicated to answer the question. - And it wasn't completely clear to me whether the intention is to just render seamless, pixelated tiles (solution: use GL_NEAREST), or if texture filtering is required...
Here my modified code example.
Related questions on Stack Overflow:
OpenGL normal blending with black alpha edges
opengl es2 premultiplied vs straight alpha + blending
Some links related to Alpha Blending / "premultiplied alpha":
Visual glBlendFunc + glBlendEquation Tool, by Anders Riggelsen
Premultiplied alpha, originally posted to Shawn Hargreaves Blog on MSDN, Friday, November 6, 2009
Texture filtering: alpha cutouts, originally posted to Shawn Hargreaves Blog on MSDN, Monday, November 2, 2009
What is Premultiplied Alpha? A Primer for Artists, by David Hart, July 6, 2016
GPUs prefer premultiplication, by Eric from www.realtimerendering.com
Related
So i'm trying to pass a bunch of vectors to the fragment shader and apparently i should do it with a 1d texture. But if i try to access the passed vectors, the values are not what i expect.
How should i index the texture() function?
Passing the texture:
std::vector<vec3> triangles;
//triangles is already filled by this point
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_1D, texture);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage1D(GL_TEXTURE_1D, 0, GL_RGB16F, Object::triangles.size(), 0, GL_RGB, GL_FLOAT, &Object::triangles[0]);
GLint textureLoc = glGetUniformLocation( getId(), "triangles" );
glUniform1f(textureLoc, 0);
setUniform((int)Object::triangles.size(), "triCount");
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
//draw a rectangle from -1,-1 to 1,1
fragment shader code:
uniform sampler1D triangles;
uniform int triCount;
struct Triangle{
vec3 a,b,c;
vec3 normal;
};
void main(){
for(int i = 0;i < triCount;i++){//for each triangle
Triangle triangle;
//set the points of the triangle
triangle.a = vec3(texture(triangles,i));
triangle.b = vec3(texture(triangles,i++));
triangle.c = vec3(texture(triangles,i++));
//set the normal vector of the triangle
triangle.normal = vec3(texture(triangles,i++));
//then i do stuff with the current triangle and return a color
}
}
The array contains 3 points and a normal vector of a bunch of triangles, that's why i read from the texture this way.
edit:
glGetTexImage confirmed that the passed texture is correct.
When using texture, the texture coordinates are floating point values in the range [0.0, 1.0]. Use texelFetch to perform a lookup of a single Texel from texture with integral texture coordinates in the range [0, width):
triangle.a = texelFetch(triangles, i*4, 0).xyz;
triangle.b = texelFetch(triangles, i*4+1, 0).xyz;
triangle.c = texelFetch(triangles, i*4+2, 0).xyz;
triangle.normal = texelFetch(triangles, i*4+3, 0).xyz;
Be aware, that the computation of the Texel indices in your shader code is incorrect.
Alternatively, you can calculate the texture coordinate by dividing the index by the width of the texture. The size of a texture can be get by textureSize:
float width = float(textureSize(triangles, 0));
triangle.a = texture(triangles, (float(i*4)+0.5) / width).xyz;
triangle.b = texture(triangles, (float(i*4)+1.5) / width).xyz;
triangle.c = texture(triangles, (float(i*4)+2.5) / width).xyz;
triangle.normal = texture(triangles, (float(i*4)+3.5) / width).xyz;
I want to make a program that shows the earth with a space texture as the background.
The earth is a 3D Uniform with a earth texture (.bmp).
The space with the stars is a texture (.bmp).
I have summarized what I have to do:
Create a new Model Matrix
Position it at the same place where the camera is
Disable depth test before drawing
Reverse culling
This is the Load function:
void load(){
//Load The Shader
Shader simpleShader("src/shader.vert", "src/shader.frag");
g_simpleShader = simpleShader.program;
// Create the VAO where we store all geometry (stored in g_Vao)
g_Vao = gl_createAndBindVAO();
//Create vertex buffer for positions, colors, and indices, and bind them to shader
gl_createAndBindAttribute(&(shapes[0].mesh.positions[0]), shapes[0].mesh.positions.size() * sizeof(float), g_simpleShader, "a_vertex", 3);
gl_createIndexBuffer(&(shapes[0].mesh.indices[0]), shapes[0].mesh.indices.size() * sizeof(unsigned int));
gl_createAndBindAttribute(uvs, uvs_size, g_simpleShader, "a_uv", 2);
gl_createAndBindAttribute(normal, normal_size, g_simpleShader, "a_normal", 2);
//Unbind Everything
gl_unbindVAO();
//Store Number of Triangles (use in draw())
g_NumTriangles = shapes[0].mesh.indices.size() / 3;
//Paths of the earth and space textures
Image* image = loadBMP("assets/earthmap1k.bmp");
Image* space = loadBMP("assets/milkyway.bmp");
//Generate Textures
glGenTextures(1, &texture_id);
glGenTextures(1, &texture_id2);
//Bind Textures
glBindTexture(GL_TEXTURE_2D, texture_id);
glBindTexture(GL_TEXTURE_2D, texture_id2);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//We assign your corresponding data
glTexImage2D(GL_TEXTURE_2D,1,GL_RGB,image->width, image->height,GL_RGB,GL_UNSIGNED_BYTE,image->pixels);
glTexImage2D(GL_TEXTURE_2D,1,GL_RGB,space->width, space->height,GL_RGB,GL_UNSIGNED_BYTE,space->pixels);
}
This is the Draw function:
void draw(){
//1. Enable/Disable
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDisable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
//2. Shader Activation
glUseProgram(g_simpleShader);
//3. Get All Uniform Locations
//Space:
GLuint model_loc2 = glGetUniformLocation (g_simpleShader, "u_model");
GLuint u_texture2 = glGetUniformLocation(g_simpleShader, "u_texture2");
GLuint u_light_dir2 = glGetUniformLocation(g_simpleShader,"u_light_dir2");
//Earth
GLuint model_loc = glGetUniformLocation(g_simpleShader, "u_model");
GLuint projection_loc = glGetUniformLocation(g_simpleShader, "u_projection");
GLuint view_loc = glGetUniformLocation(g_simpleShader, "u_view");
GLuint u_texture = glGetUniformLocation(g_simpleShader, "u_texture");
GLuint u_light_dir = glGetUniformLocation(g_simpleShader, "u_light_dir");
//4. Get Values From All Uniforms
mat4 model_matrix2 = translate(mat4(1.0f), vec3(1.0f,-3.0f,1.0f));
mat4 model_matrix = translate(mat4(1.0f),vec3(0.0f,-0.35f,0.0f);
mat4 projection_matrix = perspective(60.0f,1.0f,0.1f,50.0f);
mat4 view_matrix = lookAt(vec3( 1.0f, -3.0f, 1.0f),vec3(0.0f, 0.0f, 0.0f), vec3(0.0f, 1.0f, 0.0f)glm::vec3(0,1,0));
//5. Upload Uniforms To Shader
glUniformMatrix4fv(model_loc2, 1, GL_FALSE, glm::value_ptr(model_matrix2));
glUniformMatrix4fv(model_loc, 1, GL_FALSE, glm::value_ptr(model_matrix));
glUniformMatrix4fv(projection_loc, 1, GL_FALSE, glm::value_ptr(projection_matrix));
glUniformMatrix4fv(view_loc, 1, GL_FALSE, glm::value_ptr(view_matrix));
glUniform1i(u_texture, 0);
glUniform3f(u_light_dir, g_light_dir.x, g_light_dir.y, g_light_dir.z);
glUniform1i(u_texture2, 1);
glUniform3f(u_light_dir2, g_light_dir.x, g_light_dir.y, g_light_dir.z);
//6. Activate Texture Unit 0 and Bind our Texture Object
glActiveTexture(GL_TEXTURE0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture_id);
glBindTexture(GL_TEXTURE_2D, texture_id2);
//7. Bind VAO
gl_bindVAO(g_Vao);
//8. Draw Elements
glDrawElements(GL_TRIANGLES, 3 * g_NumTriangles, GL_UNSIGNED_INT, 0);
}
Also I have two Fragment Shaders:
The first one returns this:
fragColor = vec4(final_color, 1.0);
The second one returns this:
fragColor = vec4(texture_color.xyz, 1.0);
Also the Vertex Shader returns the position of the vertex:
gl_Position = u_projection * u_view * u_model * vec4( a_vertex , 1.0 );
When I compile, it only shows the earth while it should show the earth and the space as background. I have reviewed the code several times but I can not find out what it is.
Suposed result:
My Result
If I see it right among other things you are wrongly binding textures
glActiveTexture(GL_TEXTURE0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture_id);
glBindTexture(GL_TEXTURE_2D, texture_id2);
should be:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_id);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture_id2);
but I prefer that last set active units is 0 ...
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture_id2);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_id);
that will save you a lot of troubles when you start combine code with single texture unit code ... Also hope you are properly unbinding the used texture units for the same reasons...
You got ugly seam on the edge 0/360deg of longitude
this might be caused by wrongly computed normal for lighting, wrong not seamless texture or just by forgeting to duplicate the edge points with correct texture coordinates for the last patch. See:
Applying map of the earth texture a Sphere
You can also add atmosphere,bump map, clouds to your planet:
Bump-map a sphere with a texture map
Andrea is right...
set matrices as unit matrix and render (+/-)1.0 rectangle at z=0.0 +/- aspect ratio correction without depth test, face culling and depth write ... That way you will avoid jitter and flickering stuff due to floating point errors.
Skybox is better but there are also other options to enhance
Is it possible to make realistic n-body solar system simulation in matter of size and mass?
and all the sublinks in there especially stars. You can combine skybox and stellar catalog together and much much more...
I generate a PointCloud in my program, and now, I want to be able to click on a point in this point cloud rendered to my screen using OpenGL.
In order to do so, I used the trick of giving to each pixel in an offscreen render a colour based on its index in the VBO. I use the same camera for my offscreen render and my onscreen render so they move together, and when I click, I get values of my offscreen render to retrieve the position in the VBO to get the point I clicked on. This is the theory since when I click, I have only (0,0,0). I believe that means my FBO is not well renderer but I'm not sure whether it is that or if the problem comes from somewhere else...
So here are the steps. clicFBO is the FBO I'm using for offscreen render, and clicTextureColorBuf is the texture in which I write in the FBO
glGenFramebuffers(1, &clicFBO);
glBindFramebuffer(GL_FRAMEBUFFER, clicFBO);
glGenTextures(1, &clicTextureColorBuf);
glBindTexture(GL_TEXTURE_2D, clicTextureColorBuf);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, SCR_WIDTH, SCR_HEIGHT, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, clicTextureColorBuf, 0);
GLenum DrawBuffers[1] = { GL_COLOR_ATTACHMENT0 };
glDrawBuffers(1, DrawBuffers);
After that, I wrote a shader that gives to each point the color of its index in the VBO...
std::vector<cv::Point3f> reconstruction3D; //Will contain the position of my points
std::vector<float> indicesPointsVBO; //Will contain the indexes of each point
for (int i = 0; i < pts3d.size(); ++i) {
reconstruction3D.push_back(pts3d[i].pt3d);
colors3D.push_back(pt_tmp);
indicesPointsVBO.push_back(((float)i / (float)pts3d.size() ));
}
GLuint clicVAO, clicVBO[2];
glGenVertexArrays(1, &clicVAO);
glGenBuffers(2, &clicVBO[0]);
glBindVertexArray(clicVAO);
glBindBuffer(GL_ARRAY_BUFFER, clicVBO[0]);
glBufferData(GL_ARRAY_BUFFER, reconstruction3D.size() * sizeof(cv::Point3f), &reconstruction3D[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
glEnable(GL_PROGRAM_POINT_SIZE);
glBindBuffer(GL_ARRAY_BUFFER, clicVBO[1]);
glBufferData(GL_ARRAY_BUFFER, indicesPointsVBO.size() * sizeof(float), &indicesPointsVBO[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 1, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
and the vertex shader:
layout (location = 0) in vec3 pos;
layout (location = 1) in float col;
out float Col;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform int pointSize;
void main()
{
gl_PointSize = pointSize;
gl_Position = projection * view * model * vec4(pos, 1.0);
Col = col;
}
And the Fragment:
#version 330 core
layout(location = 0) out vec4 FragColor;
in float Col;
void main()
{
FragColor = vec4(Col, Col, Col ,1.0);
}
And this is how I render this texture:
glm::mat4 view = camera.GetViewMatrix();
glm::mat4 projection = glm::perspective(glm::radians(camera.Zoom), (float)SCR_WIDTH / (float)SCR_HEIGHT, 1.0f, 100.0f);
glBindFramebuffer(GL_FRAMEBUFFER, clicFBO);
clicShader.use();
glDisable(GL_DEPTH_TEST);
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
clicShader.setMat4("projection", projection);
clicShader.setMat4("view", view);
model = glm::mat4();
clicShader.setMat4("model", model);
clicShader.setInt("pointSize", pointSize);
glBindVertexArray(clicVAO);
glDrawArrays(GL_POINTS, 0, (GLsizei)reconstruction3D.size());
glBindFramebuffer(GL_FRAMEBUFFER, 0);
And then, when I click, I Use this piece of Code:
glBindFramebuffer(GL_FRAMEBUFFER, clicFBO);
glReadBuffer(GL_COLOR_ATTACHMENT0);
int width = 11, height = 11;
std::array<GLfloat, 363> arry{ 1 };
glReadPixels(Xpos - 5, Ypos - 5, width, height, GL_RGB, GL_UNSIGNED_BYTE, &arry);
for (int i = 0; i < 363; i+=3) { // It's 3 time the same number anyways for each number
std::cout << arry[i] << " "; // It gives me only 0's
}
std::cout << std::endl << std::endl;
glBindFramebuffer(GL_FRAMEBUFFER, clicFBO);
I know the error might be really stupid but I still have some problems with how OpenGL works.
I put what I thought was necessary to understand the problem (without extending too much), but if you need more code, I can write it too.
I know this is not a question in which you can say Yes or No and it's more like debugging my program, but since I really don't find from where the problem comes from, I'm looking toward someone who can explain to me what I did wrong. I do not necessarily seek the solution itself, but clues that could help me understand where my error is ...
Using a framebuffer object FBO to store a "object identifier" is a cool method. But also want to see the objects, right? Then you must render also to the default frame buffer (let me call it "defFB", which is not a FBO).
Because you need to render to two different targets, you need one of these techniques:
Draw objects twice (e.g. with two glDrawArrays calls), one to the FBO and a second one to the defFB.
Draw to two FBO's images at once and later blit one of then (with colors) to the defFB.
For the first technique you may use a texture attached to a FBO (as you currently do). Or you can use a "Renderbuffer" and draw to it.
The second approach needs a second "out" in the fragment shader:
layout(location = 0) out vec3 color; //GL_COLOR_ATTACHMENT0
layout(location = 1) out vec3 objID; //GL_COLOR_ATTACHMENT1
and setting the two attachments with glDrawBuffers.
For the blit part, read this answer.
Note that both "out" have the same format, vec3 in this example.
A fail in your code is that you set a RGB texture format and also use this format at glReadPixels, but your "out" in the FS is vec4 instead of vec3.
More concerns are:
Check the completeness with glCheckFramebufferStatus
Using a "depth attachment" to the FBO may be needed, even it will not be used for reading.
Disabling the depth test will put all elements if the frame. Your point-picking will select the last drawn, not the nearest.
I found the problem.
There were 2 failures in my code :
The first one is that in OpenGL, there is an Y inversion between the image and the framebuffer. So in order to pick the good point, you have to flip Y using the size of the viewport : I did it like this :
GLint m_viewport[4];
glGetIntegerv(GL_VIEWPORT, m_viewport);
int YposTMP = m_viewport[3] - Ypos - 1;
The second one is the use of
glReadPixels(Xpos - 2, Ypos - 2, width, height, GL_RGB, GL_UNSIGNED_BYTE, &pixels[0]);, the 6th parameter must be GL_FLOAT since the datas i'm returning are float.
Thanks all!
Best regards,
R.S
So I've recently been learning some openGL. I've initially been using the SDL library to print images on screen but I figured it would be interested to try and achieve something similar with openGL and thus also being able to apply shaders to my images for neat effects such as lighting effects and night/day cycles and such. What I'm doing right now is simply loading a texture, then applying that texture to a quad with the same size of the texture. This works well.
Now I want to apply some shaders. This is an example of a vertex and fragment shader that I could apply to one of my textured quads:
in vec2 LVertexPos2D;
void main()
{
gl_Position = vec4( LVertexPos2D.x, LVertexPos2D.y, 0, 1);
}
which does nothing, then my fragment shader:
out vec4 LFragment;
void main()
{
LFragment = vec4(1.0, 1.0, 1.0, 1.0);
}
Which obviously just turns the texture I'm applying it on into a white block, which isn't exactly what I want. Somehow I need to retrieve the current texel data so I can modify that instead of simply changing it.
I've read that the function call to texture2D is supposed to return a vec4 of the current pixel data but I haven't gotten this to work. (Having a hard time finding a good explanation of the function inputs and how it works). Furthermore texture2D is supposedly deprecated but I can't get its replacement (texture()) to work either. Any nudges in the right direction would be greatly appreciated!
Edit: I'll throw in some more info on how I'm doing things, this is the function that loads my textures:
texture makeTexture(std::string fileLocation)
{
texture tempTexture;
SDL_Surface *mySurface = IMG_Load(fileLocation.c_str());
if (mySurface == NULL)
{
std::cout << "Error in loading image at: " << fileLocation << std::endl;
return tempTexture;
}
GLuint myTexture;
glGenTextures(1, &myTexture);
glBindTexture(GL_TEXTURE_2D, myTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, mySurface->w, mySurface->h, 0, GL_RGBA, GL_UNSIGNED_BYTE, mySurface->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
SDL_FreeSurface(mySurface);
tempTexture.texture_id = myTexture;
tempTexture.h = mySurface->h;
tempTexture.w = mySurface->w;
return tempTexture;
}
Where this is my texture struct:
struct texture
{
int w;
int h;
GLuint texture_id;
};
and this function draws any texture to a given x and y coordinate:
void draw(int y, int x, texture &tempTexture)
{
glBindTexture(GL_TEXTURE_2D, tempTexture.texture_id);
glBegin(GL_QUADS);
glTexCoord2f(0, 1);
glVertex2f(-1 + ((float)(x) / SCREEN_WIDTH) * 2, 1 - ((float)(y + tempTexture.h) / SCREEN_HEIGHT) * 2); //Bottom left
glTexCoord2f(1, 1);
glVertex2f(-1 + ((float)(x + tempTexture.w)/SCREEN_WIDTH)*2, 1 - ((float)(y + tempTexture.h) / SCREEN_HEIGHT) * 2); //Bottom right?
glTexCoord2f(1, 0);
glVertex2f(-1 + ((float)(x + tempTexture.w) / SCREEN_WIDTH) * 2, 1.0 - ((float)y / SCREEN_HEIGHT) * 2); //top right
glTexCoord2f(0, 0);
glVertex2f(-1 + ((float)(x) / SCREEN_WIDTH) * 2, 1.0 - ((float)y / SCREEN_HEIGHT) * 2); //Top left (notification: Coordinates are (x,y), not (y,x).
glEnd();
glBindTexture(GL_TEXTURE_2D, 0);
}
then in my main render function I'm now doing:
draw(0, 0, myTexture);
glUseProgram(gProgramID);
glUniform1i(baseImageLoc, myTexture2.texture_id);
draw(100, 100, myTexture2);
glUseProgram(NULL);
where myTexture is just a meadow of grass and myTexture2 is a player character that I want to apply some shading shenanigans to. gPriogramID is a program that has my two aformentioned shaders loaded to it.
In order to access texture data in a shader you have to do the following:
First you need to glBind your texture to a specific texture unit (change active texture unit using glActiveTexture.
Pass the texture unit index as a uniform sampler to the shader.
Access the texture in the shader like the following.
// tex holds the value of the texture unit to be used (not the texture)
uniform sampler2D tex;
void main()
{
vec4 color = texture(tex,texCoord);
LFragment = color;
}
You also need to pass texCoords to the shader as in vertex attribute.
EDIT: Think I've narrowed down the problem. Skip to the running section.
I'm trying to sample a 3d texture in my vertex shader, I'm going to use the texel values as corner value in Marching Cubes. The issue I'm having is that no matter what method I use to sample it, I always get (0,0,0,0). I've tried using texelFetch and texture3D and neither seem to work.
I'm also using transform feedback, but as far as I'm aware that shouldn't cause this issue.
Shader setup:
glEnable(GL_TEXTURE_3D);
Shader vertListTriangles(GL_VERTEX_SHADER_ARB);
vertListTriangles.setSource(lst_tri_vert); //Util to load from file.
vertListTriangles.compile();
vertListTriangles.errorCheck(); //Prints errors to console if they exist - shader compiles fine.
Shader geomListTriangles(GL_GEOMETRY_SHADER_ARB);
geomListTriangles.setSource(lst_tri_geom); //Util to load from file
geomListTriangles.compile();
geomListTriangles.errorCheck(); //Prints errors to console if they exist - shader compiles fine.
program.attach(vertListTriangles);
program.attach(geomListTriangles);
//Setup transform feedback varyings, also works as expected.
const GLchar* varyings1[1];
varyings1[0] = "gTriangle";
glTransformFeedbackVaryings(program.getID(), 1, varyings1, GL_INTERLEAVED_ATTRIBS);
program.link();
program.checkLink(); //Prints link errors to console - program links fine aparently.
Texture setup:
glBindTexture(GL_TEXTURE_3D, textureID);
errorCheck("texture bind"); //<- Detects GL errors, I actually get a GL_INVALID_OPERATION here, not sure if its the cause of the problem though as all subsuquent binds go smoothly.
if(!(glIsTexture(textureID)==GL_TRUE)) consolePrint("Texture Binding Failed."); //Oddly the texture never registers as failed despite the previous error message.
//Generate Texture
GLfloat volumeData[32768*3];
for(int z = 0; z < 32; z++)
{
for(int y = 0; y < 32; y++)
{
for(int x = 0; x < 32; x++)
{
//Set all 1s for testing purposes
volumeData[(x*3)+(y*96)+(z*3072)] = 1.0f;
volumeData[(x*3)+(y*96)+(z*3072)+1] = 1.0f;
volumeData[(x*3)+(y*96)+(z*3072)+2] = 1.0f;
}
}
}
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAX_LEVEL, 0);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_BASE_LEVEL, 0);
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGB8, 32, 32, 32, 0, GL_RGB,
GL_FLOAT, volumeData);
glBindTexture(GL_TEXTURE_3D, 0);
Running Shader:
EDIT: Here it gets interesting. If I specify an incorrect uniform name or comment out the below lines it appears to work.
program.use();
//Disable Rastering
glEnable(GL_RASTERIZER_DISCARD);
//Input buffer: Initial vertices
glBindBuffer(GL_ARRAY_BUFFER, mInitialDataBuffer);
glEnableVertexAttribArray(0);
glVertexAttribIPointer(0, 1, GL_UNSIGNED_INT, 0, 0); //Initial input is array of uints
//Output buffer: Triangles
glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, mTriangleBuffer); //Triangle Markers, in the form of uints. NOT actual triangles.
//Texture setup
//If I comment out from here....
GLint sampler = glGetUniformLocation(program.getID(), "densityVol");
glUniform1i(sampler, GL_TEXTURE0);
glActiveTexture(GL_TEXTURE0);
//To here. It appears to work.
glBindTexture(GL_TEXTURE_3D, textureID);
//Just using this to debug texture.
//test is all 1s, so the texture is uploading correctly.
GLfloat test[32768*3];
memset(test, 0, sizeof(test));
glGetTexImage(GL_TEXTURE_3D, 0, GL_RGB, GL_FLOAT, test);
//Transform Feedback and Draw
glBeginTransformFeedback(GL_POINTS);
glDrawArrays(GL_POINTS, 0, 29790);
glEndTransformFeedback();
//Re-enable Rastering and cleanup
glDisable(GL_RASTERIZER_DISCARD);
glDisableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
My code is a little more spread out in reality, but I hope I managed to edit it into something cohesive. Anyway if I map to the output buffer it does indeed output some information, however it processes as if all the texture data is 0s. I hacked the shader to just output some test results instead but I can't find any evidence the shader is using the texture correctly:
#version 410
#extension GL_EXT_gpu_shader4 : require
layout (location = 0) in int x_y_z;
uniform sampler3D densityVol;
out Voxel
{
/*
Each triangle is the edges it joins. There are 12 edges and so we need 12 bits. 4 For each edge.
There are up to 32 voxels, which means we need 6 bits for each coord, which is 18.
30 bits total.
int format 00xxxxxxyyyyyyzzzzzz111122223333
*/
uint triangles[5];
uint triangleCount;
} vVoxel;
//... Omitted some huge ref tables.
void main()
{
vec4 sample0 = texture3D(densityVol, vec3(0.1,0.1,0.1) );
vec4 sample1 = texture3D(densityVol, vec3(0.9,0.9,0.9) );
vec4 sample2 = texture3D(densityVol, vec3(0.1,0.1,0.9) );
vec4 sample3 = texture3D(densityVol, vec3(0.9,0.9,0.1) );
if(sample0.r > 0.0f)
{
vVoxel.triangles[1] = 1;
}
if(sample1.r > 0.0f)
{
vVoxel.triangles[2] = 2;
}
if(sample2.r > 0.0f)
{
vVoxel.triangles[3] = 3;
}
if(sample3.r > 0.0f)
{
vVoxel.triangles[4] = 4;
}
vVoxel.triangleCount = 5;
}
Not the best designed test, but I didn't want to write something from scratch. If I change the if clauses to if(true) the test outputs correctly. When the shader is compiled as above, the buffer is blank. I'm using a GS for pass through.
Can anyone see an obvious mistake in there? I've been stumped for about 2 hours now and I can't see what I'm doing different from many of the GLSL texturing tutorials.
Okay figured it out.
glUniform1i(sampler, GL_TEXTURE0);
The GL_TEXTURE0 is incorrect here.
glUniform1i(sampler, 0);
Is how it should be.