Procedural generation of stars with skybox - c++

I am attempting to procedurally generate a star-filled background in OpenGL.
The approach I am taking is to create a skybox with a cubemap texture. Each side of the cubemap texture essentially consists of a 2048x2048 black image with randomly selected texels set to White. Here is the result:
I'm not sure how obvious it is from the image, but when moving around a very distinct box shape can be made out as stars close to the edge of the box appear smaller and closer together. How can I prevent this? Do I need to abandon the skybox approach and use something like a skysphere instead?
EDIT: here is how I am mapping the cubemap onto the sky.
// Create and bind texture.
glGenTextures(1, &texture_);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_CUBE_MAP, texture_);
for (unsigned int i = 0; i < 6; ++i) {
std::vector<std::uint8_t> image = generateTexture(TEXTURE_WIDTH, TEXTURE_HEIGHT);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGB, TEXTURE_WIDTH, TEXTURE_HEIGHT,
0, GL_RGB, GL_UNSIGNED_BYTE, image.data());
}
// Set texture parameters.
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
Here is the definition of the generateTexture function:
std::vector<std::uint8_t> Stars::generateTexture(GLsizei width, GLsizei height) {
std::vector<std::uint8_t> image(static_cast<std::size_t>(3 * width * height));
add_stars(image, NUM_STARS);
return image;
}
void Stars::add_stars(std::vector<std::uint8_t>& image, unsigned int nStars) {
std::default_random_engine eng;
std::uniform_int_distribution<std::size_t> dist(0, image.size() / 3 - 1);
while (nStars--) {
std::size_t index = 3 * dist(eng);
image[index++] = 255;
image[index++] = 255;
image[index++] = 255;
}
}
EDIT2: here is the draw function used to render the sky.
void Stars::draw(const Camera& camera) const {
// Skybox will be rendered last. In order to ensure that the stars are rendered at the back of
// the scene, the depth buffer is filled with values of 1.0 for the skybox -- this is done in
// the vertex shader. We need to make sure that the skybox passes the depth te3t with values
// less that or equal to the depth buffer.
glDepthFunc(GL_LEQUAL);
program_.enable();
// Calculate view-projection matrix and set the corresponding uniform. The view matrix must be
// stripped of translation components so that the skybox follows the camera.
glm::mat4 view = glm::mat4(glm::mat3(camera.viewMatrix()));
glm::mat4 projection = camera.projectionMatrix();
glm::mat4 VP = projection * view;
glUniformMatrix4fv(program_.uniformLocation("VP"), 1, GL_FALSE, glm::value_ptr(VP));
// Bind buffer objects and texture to current context and draw.
glBindVertexArray(vao_);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo_);
glBindTexture(GL_TEXTURE_CUBE_MAP, texture_);
glDrawElements(GL_TRIANGLES, static_cast<GLsizei>(INDICES.size()), GL_UNSIGNED_INT,
reinterpret_cast<GLvoid *>(0));
glBindVertexArray(0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindTexture(GL_TEXTURE_CUBE_MAP, 0);
program_.disable();
glDepthFunc(GL_LESS);
}

generate stars uniformly in some cubic volume
x=2.0*Random()-1.0; // <-1,+1>
y=2.0*Random()-1.0; // <-1,+1>
z=2.0*Random()-1.0; // <-1,+1>
project them on unit sphere
So just compute the length of vector (x,y,z) and divide the coordinates by it.
project the result onto the cube map
Each side of cube is defined by the plane so find intersection of ray casted from (0,0,0) through Cartesian star position and the planes. Take the intersection with shortest distance to (0,0,0) and use that as final star position.
The implementation could be something like this OpenGL&C++ code:
glClearColor(0.0,0.0,0.0,0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
int i,n=10000;
float a,b,x,y,z;
//RandSeed=8123456789;
n=obj.pnt.num; // triangulated sphere point list
glDepthFunc(GL_LEQUAL);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE,GL_ONE);
glPointSize(2.0);
glBegin(GL_POINTS);
for (i=0;i<n;i++)
{
// equidistant points instead of random to test this
x=obj.pnt[i].p[0];
y=obj.pnt[i].p[1];
z=obj.pnt[i].p[2];
/*
// random star spherical position
a=2.0*M_PI*Random();
b=M_PI*(Random()-0.5);
// spherical 2 cartessian r=1;
x=cos(a)*cos(b);
y=sin(a)*cos(b);
z= sin(b);
*/
// redish sphere map
glColor3f(0.6,0.3,0.0); glVertex3f(x,y,z);
// cube half size=1 undistort // using similarities like: yy/xx = y/x
if ((fabs(x)>=fabs(y))&&(fabs(x)>=fabs(z))){ y/=x; z/=x; if (x>=0) x=1.0; else x=-1.0; }
else if ((fabs(y)>=fabs(x))&&(fabs(y)>=fabs(z))){ x/=y; z/=y; if (y>=0) y=1.0; else y=-1.0; }
else if ((fabs(z)>=fabs(x))&&(fabs(z)>=fabs(y))){ x/=z; y/=z; if (z>=0) z=1.0; else z=-1.0; }
// bluish cube map
glColor3f(0.0,0.3,0.6); glVertex3f(x,y,z);
}
glEnd();
glPointSize(1.0);
glDisable(GL_BLEND);
glFlush();
SwapBuffers(hdc);
Looks like it works as it should here preview (of the blended sphere/cube map):
Although it looks like there are holes but there are none (it is may be some blend error) if I disable the sphere map render then there are no visible holes or distortions in the mapping.
The sphere triangulation mesh obj used to test this is taken from here:
Sphere triangulation
[Edit1] yes there was a silly blending error
I repaired the code ... but the problem persists anyway. does not matter this mapping is working as should here the updated code result:
So just adapt the code to your texture generator ...
[Edit2] Random stars
glClearColor(0.0,0.0,0.0,0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
int i;
float x,y,z,d;
RandSeed=8123456789;
glDepthFunc(GL_LEQUAL);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE,GL_ONE);
glPointSize(2.0);
glBegin(GL_POINTS);
for (i=0;i<1000;i++)
{
// uniform random cartesian stars inside cube
x=(2.0*Random())-1.0;
y=(2.0*Random())-1.0;
z=(2.0*Random())-1.0;
// project on unit sphere
d=sqrt((x*x)+(y*y)+(z*z));
if (d<1e-3) { i--; continue; }
d=1.0/d;
x*=d; y*=d; z*=d;
// redish sphere map
glColor3f(0.6,0.3,0.0); glVertex3f(x,y,z);
// cube half size=1 undistort using similarities like: y/x = y'/x'
if ((fabs(x)>=fabs(y))&&(fabs(x)>=fabs(z))){ y/=x; z/=x; if (x>=0) x=1.0; else x=-1.0; }
else if ((fabs(y)>=fabs(x))&&(fabs(y)>=fabs(z))){ x/=y; z/=y; if (y>=0) y=1.0; else y=-1.0; }
else if ((fabs(z)>=fabs(x))&&(fabs(z)>=fabs(y))){ x/=z; y/=z; if (z>=0) z=1.0; else z=-1.0; }
// bluish cube map
glColor3f(0.0,0.3,0.6); glVertex3f(x,y,z);
}
glEnd();
glPointSize(1.0);
glDisable(GL_BLEND);
glFlush();
SwapBuffers(hdc);
Here Blend of booth (1000 stars):
And Here only the cube-map (10000 stars)
[Edit3] The Blend problem solved
It was caused by Z-fighting and occasional changing of sign for some coordinates during the projection due to forgotten fabs here fixed code:
glClearColor(0.0,0.0,0.0,0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
int i;
float x,y,z,d;
RandSeed=8123456789;
glDepthFunc(GL_ALWAYS);
// glDepthFunc(GL_LEQUAL);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE,GL_ONE);
glPointSize(2.0);
glBegin(GL_POINTS);
for (i=0;i<25000;i++)
{
// uniform random cartesian stars inside cube
x=(2.0*Random())-1.0;
y=(2.0*Random())-1.0;
z=(2.0*Random())-1.0;
// project on unit sphere
d=sqrt((x*x)+(y*y)+(z*z));
if (d<1e-3) { i--; continue; }
d=1.0/d;
x*=d; y*=d; z*=d;
// redish sphere map
glColor3f(0.6,0.3,0.0); glVertex3f(x,y,z);
// cube half size=1 undistort using similarities like: y/x = y'/x'
if ((fabs(x)>=fabs(y))&&(fabs(x)>=fabs(z))){ y/=fabs(x); z/=fabs(x); if (x>=0) x=1.0; else x=-1.0; }
else if ((fabs(y)>=fabs(x))&&(fabs(y)>=fabs(z))){ x/=fabs(y); z/=fabs(y); if (y>=0) y=1.0; else y=-1.0; }
else if ((fabs(z)>=fabs(x))&&(fabs(z)>=fabs(y))){ x/=fabs(z); y/=fabs(z); if (z>=0) z=1.0; else z=-1.0; }
// bluish cube map
glColor3f(0.0,0.3,0.6); glVertex3f(x,y,z);
}
glEnd();
glPointSize(1.0);
glDisable(GL_BLEND);
glFlush();
SwapBuffers(hdc);
And here the Blend result finally the colors are as should be so the sphere and cube stars overlaps perfectly (white) while viewing from (0,0,0):

Related

Black lines between 2d textures

I just started learning opengl technology.
My program draw 2d isometric tiles and program output this:
Be unknown reasons black lines appear when two textures overlap or two textures touch.
Code example:
typedef unsigned int ID;
class GraphicEngine {
public:
GraphicEngine();
~GraphicEngine();
void initShaders(const char* vertexShaderSource, const char* fragmentShaderSource);
void initRenderData(float vertices[], unsigned int size);
std::vector<ID> initTextures(std::vector<std::string>& paths);
void drawTextures(std::vector<ID> testuresIds);
private:
GraphicEngine(GraphicEngine&) = delete;
GraphicEngine(GraphicEngine&&) = delete;
GraphicEngine& operator=(const GraphicEngine& other) = delete;
private:
unsigned int VBO = 0;
unsigned int VAO = 0;
unsigned int EBO = 0;
unsigned int shaderProgram;
};
GraphicEngine::GraphicEngine() {
}
GraphicEngine::~GraphicEngine() {
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
glDeleteBuffers(1, &EBO);
}
void GraphicEngine::initShaders(const char* vertexShaderSource, const char* fragmentShaderSource) {
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
unsigned int vertexShader = glCreateShader(GL_VERTEX_SHADER);
unsigned int fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
shaderProgram = glCreateProgram();
glShaderSource(vertexShader, 1, &vertexShaderSource, NULL);
glCompileShader(vertexShader);
glShaderSource(fragmentShader, 1, &fragmentShaderSource, NULL);
glCompileShader(fragmentShader);
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);
}
void GraphicEngine::initRenderData(float vertices[], unsigned int size) {
unsigned int indices[] = {
0, 1, 3,
1, 2, 3
};
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, size, vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(1);
}
std::vector<ID> GraphicEngine::initTextures(std::vector<std::string>& paths) {
std::vector<ID> ids(paths.size());
stbi_set_flip_vertically_on_load(true);
for (int i = 0; i < paths.size(); i++) {
unsigned int texture;
glGenTextures(1, &ids[i]);
glBindTexture(GL_TEXTURE_2D, ids[i]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
int width, height, nrChannels;
unsigned char* data = stbi_load(paths[i].c_str(), &width, &height, &nrChannels, STBI_rgb_alpha);
if (data)
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
}
stbi_image_free(data);
}
return ids;
}
void GraphicEngine::drawTextures(std::vector<ID> testuresIds) {
static bool ex = false;
for (auto testureId : testuresIds) {
for (int i = 0; i < 4; i++) {
glBindTexture(GL_TEXTURE_2D, testureId);
glm::mat4 transform = glm::mat4(1.0f);
transform = glm::translate(transform, glm::vec3(i * 0.6f + 0.0f, 0.0f, 0.0f));
glUseProgram(shaderProgram);
unsigned int transformLoc = glGetUniformLocation(shaderProgram, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, glm::value_ptr(transform));
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
}
for (int i = 0; i < 4; i++) {
glBindTexture(GL_TEXTURE_2D, testureId);
glm::mat4 transform = glm::mat4(1.0f);
transform = glm::translate(transform, glm::vec3(i * 0.6f - 0.3f, -0.16f, 0.0f));
glUseProgram(shaderProgram);
unsigned int transformLoc = glGetUniformLocation(shaderProgram, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, glm::value_ptr(transform));
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
}
}
const unsigned int SCR_WIDTH = 800;
const unsigned int SCR_HEIGHT = 600;
Window::Window():window(nullptr) {}
Window::~Window() {
glfwTerminate();
}
bool Window::initWindowResources() {
bool result = false;
if (glfwInit() == GLFW_TRUE) {
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "LearnOpenGL", NULL, NULL);
if (window != nullptr) {
glfwMakeContextCurrent(window);
if (glfwSetFramebufferSizeCallback(window, [](GLFWwindow* window, int width, int height) {
glViewport(0, 0, width, height); }) == NULL) {
if (gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) {
result = true;
}
}
}
}
return result;
}
const char* vertexShaderSource =
"#version 330 core\n"
"layout(location = 0) in vec3 aPos;\n"
"layout(location = 1) in vec2 aTexCoord;\n"
"out vec2 TexCoord;\n"
"uniform mat4 transform;\n"
"void main()\n"
"{\n"
" gl_Position = transform * vec4(aPos, 1.0);\n"
" TexCoord = vec2(aTexCoord.x, aTexCoord.y);\n"
"}\n\0";
const char* fragmentShaderSource =
"#version 330 core\n"
"out vec4 FragColor;\n"
"in vec3 ourColor;\n"
"in vec2 TexCoord;\n"
"uniform sampler2D texture1;\n"
"void main()\n"
"{\n"
" FragColor = texture(texture1, TexCoord);\n"
"}\n\0";
void Window::mainWindowLoop() {
graphicEngine.initShaders(vertexShaderSource, fragmentShaderSource);
std::vector<std::string> pathsTextures = { "C:\\Users\\Олег\\\Desktop\\sea1.png" };
float vertices[] = {
// positions // colors // texture coords
-1.3f, 0.16f, 0.0f, 1.0f, 1.0f, // top right
-1.3f, -0.16f, 0.0f, 1.0f, 0.0f, // bottom right
-0.7f, -0.16f, 0.0f, 0.0f, 0.0f, // bottom left
-0.7f, 0.16f, 0.0f, 0.0f, 1.0f // top left
};
graphicEngine.initRenderData(vertices, sizeof(vertices));
std::vector<ID> idsTextures = graphicEngine.initTextures(pathsTextures);
while (!glfwWindowShouldClose(window))
{
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
graphicEngine.drawTextures(idsTextures);
glfwSwapBuffers(window);
glfwPollEvents();
}
}
int main()
{
Window window;
if (window.initWindowResources()) {
window.mainWindowLoop();
}
return 0;
}
Png: Size png: 62x34 pixels, Transparent sprite, use prog to created png: piskelapp
Please, pvoide information about this issue: inforamtion about reasons of this issue and how to fix this issue.
I was able to reproduce your issue. You are working with non-premultiplied alpha, this is known for producing undesirable results when rendering translucent images.
Take a look at this article: http://www.realtimerendering.com/blog/gpus-prefer-premultiplication/
Now, to solve your problem, first change your blend function to glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA).
Second, stbi doesn't pre-multiply the alpha on load, you have to do it manually.
Each pixel is composed by 4 bytes, red, green, blue and alpha, on the 0-255 range. Convert each value to the normalized range (0.0f - 1.0f) by dividing by 255.0f, multiply r, g, and b by alpha, then multiply it back by 255.0f;
The dark lines at the edge of the tiles are results of alpha blending and texture filtering.
The linked tile image (PNG) contains three premultipled color channels (red, green, blue) and transparency information (alpha channel) with no partially transparent pixels (the alpha value is either 1.0 or 0.0 everywhere, which results in sharp edges):
This can be checked in an image editor (for example Gimp). The image uses premultiplied alpha, i.e. the color channels were masked by the alpha channel and only contain color information where the alpha channel is non-zero.
The area outside of the valid image region is all black, so when OpenGL uses linear texture interpolation (GL_LINEAR) it will mix the hidden black texels right at the edge with the visible colored texels, which can result in a dark color, depending on the used blending function.
Alpha blending mixes the already present color in the framebuffer (of the cleared background or the already written fragments) with the incoming ones.
The used blending function glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) instructs the hardware to do this for every pixel:
The result: dark artifacts at the edges of each tile caused by the interpolated alpha value at the edges of the tile, which darkens the source color (sRGB * sA) (modified example with original tile image, reproduced issue from the original post):
In other words:
https://shawnhargreaves.com/blog/texture-filtering-alpha-cutouts.html:
Texture filtering: alpha cutouts
(...)
Filtering applies equally to the RGB and alpha channels. When used on the alpha
channel of a cutout texture it will produce new fractional alpha
values around the edge of the shape, which makes things look nice and
antialiased. But filtering also produces new RGB colors, part way in
between the RGB of the solid and transparent parts of the texture.
Thus the RGB values of supposedly transparent pixels can bleed into
our final image.
This most often results in dark borders around alpha cutouts, since
the RGB of transparent pixels is often black. Depending on the
texture, the bleeding could alternatively be white, pink, etc.
To quick-fix the problem, the blending function could simply by changed to glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA), since the tile image already has premultiplied RGB channels, which are all black (= 0) in transparent areas:
https://shawnhargreaves.com/blog/premultiplied-alpha.html:
Premultiplied alpha is better than conventional blending for several
reasons:
It works properly when filtering alpha cutouts (...)
It works properly when doing image composition (...)
It is a superset of both conventional and additive blending. If you set alpha to zero while RGB is non zero, you get an additive
blend. This can be handy for particle systems that want to smoothly
transition from additive glowing sparks to dark pieces of soot as the
particles age.
The result: dark artifacts disappear almost entirely after changing the blending function (modified example with original tile image, issue partially fixed):
Not perfect.
To fix this, some pixels could be drawn around the tile to enlarge the visible area a bit:
To let tiles overlap a bit, like that:
The result (with texture filtering, and overlapped pixels):
(Additionally, lines/other graphical elements could be drawn on top of the artifacts to cover them up. And if the pixelated jagged edges are not wanted, the actual textured polygons quads could be replaced by rhombuses that could be placed precisely next to each other in a continuous mesh that could be rendered in one draw call, no alpha blending required anymore, however sharp edges do not fit a pixelated look I guess.)
A possible solution using GL_NEAREST:
OpenGL texture parameters:
To get rid of the artefacts and blurred/filtered look, GL_LINEAR can be replaced by GL_NEAREST, which disables texture interpolation altogether for the selected texture and lets OpenGL render the raw pixels without applying texture filtering (GL_CLAMP_TO_EDGE makes sense here to avoid artifacts at the edges):
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
Power of Two Textures:
OpenGL performance can be improved by always using texture dimensions that
are a power of two, e.g. 64x32 (instead of 60x32 in your case). - The tile image could be modified, e.g.: 2 pixels added on each side (and borders marked):
Side note: This restriction is not that important anymore, but in the past it was even necessary to use a special extension to enable NPOT textures:
Conventional OpenGL texturing is limited to images with
power-of-two dimensions and an optional 1-texel border.
ARB_texture_non_power_of_two extension relaxes the size restrictions
for the 1D, 2D, cube map, and 3D texture targets.
Snap to pixel:
There are multiple ways to do this with OpenGL.
I would recommend to scale the orthographic projection, so that 1 OpenGL
coordinate unit exactly matches 1 texel unit. That way, tiles can be precisely placed on the pixel grid (just shift coordinates of the tile vertices by 64 pixels/OpenGL units left/right, to get to the next one, in this example). Coordinates could be represented as integers in the engine now.
Modified code example:
void GraphicEngine::drawTextures(std::vector<ID> testuresIds, float wndRatio) {
const int countx = 3, county = 3; /* number of tiles */
const float scale = 100.0f; /* zoom */
const glm::mat4 mvp = glm::ortho(-wndRatio * scale, wndRatio * scale, -scale, scale, 2.0f, -2.0f);
const float offx = -((countx * TILE_WIDTH * 2.0f) * 0.5f - TILE_WIDTH);
const float offy = -TILE_WIDTH * 0.5f;
for (auto testureId : testuresIds) {
for (int y = 0; y < county; y++) {
for (int x = 0; x < countx - (y & 1 ? 1 : 0); x++) {
const glm::mat4 transform = mvp * glm::translate(glm::mat4(1.0f), glm::vec3(
offx + x * TILE_WIDTH * 2.0f + (y & 1 ? TILE_WIDTH : 0.0f),
offy + y * TILE_HEIGHT, 0.0f));
glBindTexture(GL_TEXTURE_2D, testureId);
const GLint transformLoc = glGetUniformLocation(shaderProgram, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, glm::value_ptr(transform));
glUseProgram(shaderProgram);
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
}
}
}
}
Screenshot of modified example:
And without marked edges:
Some hints on the use of "straight alpha textures":
Another approach to solve this might be the use of an unmasked/unpremultiplied/straight alpha texture. The color channels of the original tile image can be flood filled out like this:
(Note: The linked PNG image above can't be used directly. Imgur seems to convert transparent PNG images and automatically masks the color channels...)
This technique could help to reduce the artifacts when texture filtering and the conventional alpha blending function is used (i.e. GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA). However, the background will always show through a tiny bit, because some pixels are always sightly transparent at the edges (caused by texture filtering):
(The result should be very similar to the first solution above, where the original premultiplied image is used with modified alpha blending (GL_ONE, GL_ONE_MINUS_SRC_ALPHA).)
If the tile contains not just a plain color, the color information at the edges of the tile would need to be extended outwards to avoid artifacts.
Obviously this doesn't solve the original issue completely, when a precise 2D look is the goal. But it could be useful in other situations, where the hidden pixels also generate bad results when other forms of transparency/blending/compositing are used, e.g. for particles, semi-transparent edges of foliage, text etc.
Some general hints that could help to solve the issue:
Check: glBlendFunc(), glBlendFuncSeparate(), glBlendEquation(), glBlendColor()
Do not waste precious video memory: glGenerateMipmap() is not required for a pure 2D presentation, where all pixels are visible all the time.
Alpha Testing: glAlphaFunc() has been removed from OpenGL 4, but alpha testing can be done manually in the fragment shader, just discard fragments depending on the alpha value, see OpenGL alpha test - How to replace AlphaFunc deprecated?.
glHint(): Using this OpenGL function to change implementation-specific hints can have an impact on the rendered result, sometimes in "surprising ways":
GL_POLYGON_SMOOTH_HINT
Indicates the sampling quality of antialiased polygons. Hinting
GL_NICEST can result in more pixel fragments being generated during
rasterization, if a larger filter function is applied.
The code in the original question comes with some issues, it does not compile like that (some parts of the class definitions are missing etc.). It takes some effort to reproduce to issue, which makes it more complicated to answer the question. - And it wasn't completely clear to me whether the intention is to just render seamless, pixelated tiles (solution: use GL_NEAREST), or if texture filtering is required...
Here my modified code example.
Related questions on Stack Overflow:
OpenGL normal blending with black alpha edges
opengl es2 premultiplied vs straight alpha + blending
Some links related to Alpha Blending / "premultiplied alpha":
Visual glBlendFunc + glBlendEquation Tool, by Anders Riggelsen
Premultiplied alpha, originally posted to Shawn Hargreaves Blog on MSDN, Friday, November 6, 2009
Texture filtering: alpha cutouts, originally posted to Shawn Hargreaves Blog on MSDN, Monday, November 2, 2009
What is Premultiplied Alpha? A Primer for Artists, by David Hart, July 6, 2016
GPUs prefer premultiplication, by Eric from www.realtimerendering.com

OpenGL 3D model texture mapping

I am trying to render an obj model with texture. Here is what I do:
Get the 3d model model and the corresponding view matrix view_mat and projection matrix proj_mat from the image.
Project the 3d model to the image using proj_mat * view_mat * model, in this way I can get uv coordinates in the image for every vertex in 3d model.
Use the uv coordinates to render the 3d model.
Here is what I get (on the left is the render result), I think I should get the main steps done right, as the overall texture looks in the right position. But it looks like that triangles are not in the rotation mode.
Here is the part of the code I consider that is related to the texture mapping.
int main() {
Tracker tracker;
tracker.set_image("clooney.jpg");
Viewer viewer(tracker.get_width(), tracker.get_height());
while (tracker.track()) {
Model* model = tracker.get_model();
glm::mat4x4 proj_mat = tracker.get_proj_mat();
proj_mat = glm::transpose(proj_mat);
glm::mat4x4 view_mat = tracker.get_view_mat();
view_mat = glm::transpose(view_mat);
// render 3d shape
viewer.set_model(model);
viewer.draw(view_mat, proj_mat);
waitKey(0);
}
return 0;
}
// initialization of the render part
Viewer::Viewer(int width, int height) {
glfwInit();
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
m_window = glfwCreateWindow(width, height, "demo", NULL, NULL);
if (!m_window)
{
fprintf(stderr, "Failed to open GLFW window\n");
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwMakeContextCurrent(m_window);
glfwGetWindowSize(m_window, &m_width, &m_height);
glfwSetFramebufferSizeCallback(m_window, reshape_callback);
gladLoadGLLoader((GLADloadproc)glfwGetProcAddress);
glfwSwapInterval(1);
config();
}
void Viewer::config() {
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glDisable(GL_CULL_FACE);
glShadeModel(GL_FLAT);
}
// entry of the drawing function
void Viewer::draw(glm::mat4x4 view_mat, glm::mat4x4 proj_mat) {
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
proj_mat = glm::transpose(proj_mat);
glLoadMatrixf(&proj_mat[0][0]);
glMatrixMode(GL_MODELVIEW);
view_mat = glm::transpose(view_mat);
glLoadMatrixf(&view_mat[0][0]);
// m_pmodel is an instance of Model Class
// set texture
m_pmodel->set_texture(m_image);
// set model uvs
m_pmodel->set_uvs(view_mat, proj_mat);
m_pmodel->draw();
glfwSwapBuffers(m_window);
glfwPollEvents();
}
// set the texture for the model from the image
void Model::set_texture(cv::Mat img) {
glGenTextures(1, &m_texture);
glBindTexture(GL_TEXTURE_2D, m_texture);
glTexImage2D(GL_TEXTURE_2D, 0, 3, img.cols, img.rows, 0, GL_BGR, GL_UNSIGNED_BYTE, img.data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
glEnable(GL_TEXTURE_2D);
}
// specify correspondence between image and the model
void Model::set_uvs(glm::mat4x4 view_mat, glm::mat4x4 proj_mat) {
for (int i = 0; i < m_uvs.size(); i++) {
glm::vec4 clip_coord = proj_mat * view_mat * glm::vec4(m_vertices[i], 1);
float w = clip_coord.w;
glm::vec3 normal_coord = glm::vec3(clip_coord.x, clip_coord.y, clip_coord.z) / w;
m_uvs[i] = glm::vec2(normal_coord.x * 0.5f + 0.5f, normal_coord.y * 0.5f + 0.5f);
}
}
// render the 3d model
void Model::draw() const {
glBindTexture(GL_TEXTURE_2D, m_texture);
for (unsigned long i = 0; i < m_faces.size(); ++i) {
glm::ivec3 face = this->m_faces[i];
glBegin(GL_TRIANGLES);
for (int j = 0; j < 3; j++) {
glm::vec3 v = this->m_vertices[face[j]];
glm::vec2 uv = this->m_uvs[face[j]];
glVertex3f(v.x, v.y, v.z);
glTexCoord2f(1 - uv.x,1 - uv.y);
}
glEnd();
}
}
You have to set the current texture coordinate (glTexCoord) before you specify a vertex (glVertex), because the current color, normal, texture coordinates, and fog coordinate are associated with the vertex when glVertex is called.
This means you have to swap glVertex3f and glTexCoord2f:
glTexCoord2f(1 - uv.x,1 - uv.y);
glVertex3f(v.x, v.y, v.z);
Otherwise you would set the texture coordinate which is associated to the next vertex position.
See OpenGL 2.0 API Specification, 2.6 Begin/End Paradigm, page 13:
Each vertex is specified with two, three, or four coordinates. In addition, a current normal, multiple current texture coordinate sets, multiple current generic vertex attributes, current color, current secondary color, and current fog coordinate may be used in processing each vertex.

Point drawed over texture doesn't get the desired colour

Important: I have to work with the fixed pipeline (I have no voice in this matter).
I have to modify some existing OpenGL code (a panoramic picture viewer, where the panorama is split into the six faces of a cube) so we're able to draw lines/points on top of the loaded textures, where the points are the mouse coordinates unprojected to object coordinates.
I wrote a test program with a coloured cube just to try the line painting on top of it:
I got this with the code pushing the GL_DEPTH_BUFFER_BIT attribute to the stack, disabling it before painting the points and poping the stack attribute after I have done with the painting.
I tried to use that same approach in the existing application, but I got these results (here, I'm trying only to paint a point):
I specified red as the color for the point but, as you can see, it doesn't have the desired one. I thought it might be due to blending and that it might be mixing its color with the underlying texture, so I pushed the GL_BLEND attribute to the stack as well and disabled it before painting, but the point isn't getting the desired color anyway.
What is happening here? Is there a way to "force" the pipeline to paint the point red?
initCube() : this is call before updating the GL scene.
void panoViewer::initCube() {
makeCurrent();
if(texture){
glDisable( texture );
textName = 0;
texture = 0;
}
glDisable( GL_TEXTURE_GEN_S );
glDisable( GL_TEXTURE_GEN_T );
glDisable( GL_TEXTURE_GEN_R );
glFrontFace( GL_CCW );
glEnableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
texture = GL_TEXTURE_CUBE_MAP;
textName = texnms[1];
glEnableClientState(GL_NORMAL_ARRAY);
glTexGenf( GL_S, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP );
glTexGenf( GL_T, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP );
glTexGenf( GL_R, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP );
glEnable( GL_TEXTURE_GEN_S );
glEnable( GL_TEXTURE_GEN_T );
glEnable( GL_TEXTURE_GEN_R );
// Add the textures to the cube faces.
// ...
}
initializeGL() :
void panoViewer::initializeGL() {
qglClearColor(Qt::black);
glShadeModel(GL_SMOOTH);
glEnable(GL_CULL_FACE);
glEnable( GL_DEPTH_TEST );
// create texture objects
glGenTextures( 1, textName );
glBindTexture( GL_TEXTURE_CUBE_MAP, textName );
// find the largest feasible textures
maxTex2Dsqr = maxTexSize( GL_PROXY_TEXTURE_2D, max2d, max2d );
maxTex2Drec = maxTexSize( GL_PROXY_TEXTURE_2D, max2d, max2d / 2 );
maxTexCube = maxTexSize( GL_PROXY_TEXTURE_CUBE_MAP, maxcube, maxcube );
// constant texture mapping parameters...
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
// for cube maps...
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// enable alpha blending for overlay
glEnable (GL_BLEND);
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_POINT_SMOOTH);
glEnable(GL_LINE_SMOOTH);
glHint(GL_POINT_SMOOTH_HINT, GL_NICEST);
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST);
glDisable(GL_LIGHTING);
// Create display list: dispList
// ...
}
paintGL() :
void panoViewer::paintGL() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if(texture) {
glBindTexture(texture, textName);
glEnable( texture );
}
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glRotated( 180, 0, 1, 0 ); // camera looks at the front of the van
glRotated( 180, 0, 0, 1 ); // van's roof points to the sky
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// double hFOV, vFOV; // angular size at sphere center (deg)
// double minFOV, maxFOV; // limits on vFOV
// double wFOV; // vert angle at eye (deg) sets magnification
double hhnear = Znear * tan( 0.5 * RAD(wFOV) ),
hwnear = hhnear * aspectRatio,
dxnear = 2 * hwnear * fcompx,
dynear = 2 * hhnear * fcompy;
glFrustum( -(hwnear + dxnear), hwnear - dxnear,
-(hhnear + dynear), hhnear - dynear,
Znear, Zfar
);
glRotated( 180, 0, 1, 0 );
glTranslated( eyex, eyey, eyez );
glRotated( tiltAngle, 1, 0, 0 );
glRotated( panAngle, 0, 1, 0 );
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glCallList(dispList);
glPushAttrib(GL_ENABLE_BIT | GL_DEPTH_BUFFER_BIT);
glDisable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
// Paint the point in red
// ...
glPopAttrib();
}
UPDATE: I forgot to mention that the code is based in Qt. It uses the QtOpenGL module extensively.
UPDATE #2: I've added some code.
In the fixed function pipeline, there are many states which could lead to the vertex color beeing completely ignored.
As Reto Koradi pointed out in the comments, when lighting is enabled, the colors have no effect (unless GL_COLOR_MATERIAL is enabled, in which case the color value is used to update the material parameters used for the lighting equation.)
As I pointed out in the comments, another case is texturing. Depending on the GL_TEX_ENV_MODE selected, the fragment's color (as determined by lighting, or directly interpolated from the vertex colors) is modulated by the texture color, or completely replaced. In that case, disabling texturing for every texture unit in use can solve the issue.

OpenGL connecting a skydome and a flat ground plane

I am building a simple city with OpenGL and GLUT, I created a textured skydome and now I would like to connect that with a flat plane to give an appearance of the horizon. To give relative size, the skydome is 3.0 in radius with depth mask turned off, and it only has the camera rotation applied and sits over the camera. A building is about 30.0 in size, and I am looking at it from y=500.0 down.
I have a ground plane that is 1000x1000, I am texturing with a 1024x1024 resolution texture that looks good up close when I am against the ground. My texture is loaded with GL_REPEAT with texture coordinate of 1000 to repeat it 1000 times.
Connecting the skydome with the flat ground plane is where I am having some issues. I will list a number of things I have tried.
Issues:
1) When I rotate my heading, because of the square nature of the plane, I see edge like the attached picture instead of a flat horizon.
2) I have tried a circular ground plane instead, but I get a curve horizon, that becomes more curvy when I fly up.
3) To avoid the black gap between the infinite skydome, and my limited size flat plane, I set a limit on how far up I can fly, and shift the skydome slightly down as I go up, so I don't see the black gap between the infinite skydome and my flat plane when I am up high. Are there other methods to fade the plane into the skydome and take care of the gap when the gap varies in size at different location (ie. Circle circumscribing a square)? I tried to apply a fog color of the horizon, but I get a purple haze over white ground.
4) If I attached the ground as the bottom lid of the skydome hemisphere, then it looks weird when I zoom in and out, it looks like the textured ground is sliding and disconnected with my building.
5) I have tried to draw the infinitely large plane using the vanishing point concept by setting w=0. Rendering infinitely large plane
The horizon does look flat, but texturing properly seems difficult, so I am stuck with a single color.
6) I am disable lighting for the skydome, if I want to enable lighting for my ground plane, then at certain pitch angle, my plane would look black, but my sky is still completely lit, and it looks unnatural.
7) If I make my plane larger, like 10000x10000, then the horizon will look seemingly flat, but, if I press the arrow key to adjust my heading, the horizon will shake for a couple of seconds before stabilizing, what is causing it, and how could I prevent it. A related question to this, it seems like tiling and texturing 1000x1000 ground plane and 10000x10000 does not affect my frame rate, why is that? Wouldn't more tiling mean more work?
8) I read some math-based approach with figuring out the clipping rectangle to draw the horizon, but I wonder if there are simpler approaches http://www.gamedev.net/page/resources/_/technical/graphics-programming-and-theory/a-super-simple-method-for-creating-infinite-sce-r2769
Most threads I read regarding horizon would say, use a skybox, use a skydome, but I haven't come across a specific tutorial that talks about merging skydome with a large ground plane nicely. A pointer to such a tutorial would be great. Feel free to answer any parts of the question by indicating the number, I didn't want to break them up because they are all related. Thanks.
Here is some relevant code on my setup:
void Display()
{
// Clear frame buffer and depth buffer
glClearColor (0.0,0.0,0.0,1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
camera.Update();
GLfloat accumulated_camera_rotation_matrix[16];
GetAccumulatedRotationMatrix(accumulated_camera_rotation_matrix);
SkyDome_Draw(accumulated_camera_rotation_matrix);
FlatGroundPlane_Draw();
// draw buildings
// swap buffers when GLUT_DOUBLE double buffering is enabled
glutSwapBuffers();
}
void SkyDome_Draw(GLfloat (&accumulated_camera_rotation_matrix)[16])
{
glPushMatrix();
glLoadIdentity();
glDepthMask(GL_FALSE);
glDisable(GL_LIGHTING);
glMultMatrixf(accumulated_camera_rotation_matrix);
// 3.0f is the radius of the skydome
// If we offset by 0.5f in camera.ground_plane_y_offset, we can offset by another 1.5f
// at skydome_sky_celing_y_offset of 500. 500 is our max allowable altitude
glTranslatef( 0, -camera.ground_plane_y_offset - camera.GetCameraPosition().y /c amera.skydome_sky_celing_y_offset/1.5f, 0);
skyDome->Draw();
glEnable(GL_LIGHTING);
glDepthMask(GL_TRUE);
glEnable(GL_CULL_FACE);
glPopMatrix();
}
void GetAccumulatedRotationMatrix(GLfloat (&accumulated_rotation_matrix)[16])
{
glGetFloatv(GL_MODELVIEW_MATRIX, accumulated_rotation_matrix);
// zero out translation is in elements m12, m13, m14
accumulated_rotation_matrix[12] = 0;
accumulated_rotation_matrix[13] = 0;
accumulated_rotation_matrix[14] = 0;
}
GLfloat GROUND_PLANE_WIDTH = 1000.0f;
void FlatGroundPlane_Draw(void)
{
glEnable(GL_TEXTURE_2D);
glBindTexture( GL_TEXTURE_2D, concreteTextureId);
glBegin(GL_QUADS);
glNormal3f(0, 1, 0);
glTexCoord2d(0, 0);
// repeat 1000 times for a plane 1000 times in width
GLfloat textCoord = GROUND_PLANE_WIDTH;
glVertex3f( -GROUND_PLANE_WIDTH, 0, -GROUND_PLANE_WIDTH);
// go beyond 1 for texture coordinate so it repeats
glTexCoord2d(0, textCoord);
glVertex3f( -GROUND_PLANE_WIDTH, 0, GROUND_PLANE_WIDTH);
glTexCoord2d(textCoord, textCoord);
glVertex3f( GROUND_PLANE_WIDTH, 0, GROUND_PLANE_WIDTH);
glTexCoord2d(textCoord, 0);
glVertex3f( GROUND_PLANE_WIDTH, 0, -GROUND_PLANE_WIDTH);
glEnd();
glDisable(GL_TEXTURE_2D);
}
Void Init()
{
concreteTextureId = modelParser->LoadTiledTextureFromFile(concreteTexturePath);
}
ModelParser::LoadTiledTextureFromFile(string texturePath)
{
RGBImage image; // wrapping 2-d array of data
image.LoadData(texturePath);
GLuint texture_id;
UploadTiledTexture(texture_id, image);
image.ReleaseData();
return texture_id;
}
void ModelParser::UploadTiledTexture(unsigned int &iTexture, const RGBImage &img)
{
glGenTextures(1, &iTexture); // create the texture
glBindTexture(GL_TEXTURE_2D, iTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// the texture would wrap over at the edges (repeat)
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
gluBuild2DMipmaps(GL_TEXTURE_2D, 3, img.Width(), img.Height(), GL_RGB, GL_UNSIGNED_BYTE, img.Data());
}
Try using a randomized heightmap rather than using a flat plane. Not only will this look more realistic, it will make the edge of the ground plane invisible due to the changes in elevation. You can also try adding in some vertex fog, to blur the area where the skybox and ground plane meet. That's roughly what I did here.
A lot of 3D rendering relies on tricks to make things look realistic. If you look at most games, they have either a whole bunch of foreground objects that obscure the horizon, or they have "mountains" in the distance (a la heightmaps) that also obscure the horizon.
Another idea is to map your ground plane onto a sphere, so that it curves down like the earth does. That might make the horizon look more earthlike. This is similar to what you did with the circular ground plane.

OpenGL Mapping Textures to a Grid Stored Inside Vertex Array

I have code that uses indices and vertices to draw a set of triangles in the shape of a grid. All the vertices are drawn using glDrawElements(). Now for each vertex I will set its corresponding Texture Coordinates to 0 or 1 for each set of triangles that form a square in the grid. Basically I want to draw a collage of random textures in each one of the "squares" (consisting of two triangles). I can do this using the glBegin() and glEnd() method calls inside a for loop using the fixed functional pipeline, but I would like to know how to do this using Vertex Arrays. A code view of what I am trying to do can be seen below.
#include "glwidget.h"
GLWidget::GLWidget(QWidget *parent, QGLWidget *glparent) :
QGLWidget(parent, glparent),
texture_ids_(NULL),
col_(30),
row_(30),
step_(16.0)
{
texture_ids_ = new GLuint[row_ * col_];
}
GLWidget::~GLWidget()
{
if (texture_ids_) {
glDeleteTextures(row_ * col_, texture_ids_);
}
}
void GLWidget::resizeEvent(QResizeEvent * /*event*/) {
initGL();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glViewport(0, 0, width(), height());
glOrtho(0, width(), 0, height(), -1, 1);
}
void GLWidget::initGL()
{
makeCurrent();
// Variables for vertices
vertices_.clear();
int32_t start_y = step_;
int32_t start_x = step_;
// Varaibles for indices
indices_.clear();
int32_t vertices_per_row = col_ + 1;
int32_t vertex_num = 0;
for (int32_t j = 0; j <= row_; ++j) {
// Generate Vertices on row j
for (int32_t i = 0; i <= col_; ++i) {
vertices_.push_back(Vertex<GLfloat>((start_x + (i * step_)),
(start_y + (j * step_)), 0.0f));
}
if (j == row_) {
break;
}
// Generate Indices to get right vertices for traingle
for (int32_t i = 0; i < col_; ++i) {
indices_.push_back(Indices<GLuint>(vertex_num, (vertex_num + 1),
(vertex_num + vertices_per_row)));
indices_.push_back(Indices<GLuint>((vertex_num + 1),
(vertex_num + vertices_per_row),
(vertex_num + vertices_per_row + 1)));
vertex_num++;
}
vertex_num++;
}
}
void GLWidget::textureInit()
{
makeCurrent();
for (int32_t i = 0; i < row_ * col_; ++i) {
QImage tmpQImage(step_, step_, QImage::Format_ARGB32);
tmpQImage = QGLWidget::convertToGLFormat(tmpQImage);
QPainter tmpQPainter;
tmpQPainter.begin(&tmpQImage);
tmpQPainter.fillRect(QRect(0, 0, width(), height()),
QColor(255, 0, 0));
tmpQPainter.setRenderHint(QPainter::Antialiasing, true);
tmpQPainter.end();
glGenTextures(1, &texture_ids_[i]);
glBindTexture(GL_TEXTURE_2D, texture_ids_[i]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, tmpQImage.width(),
tmpQImage.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE,
tmpQImage.bits());
}
}
void GLWidget::updateGL() {
if (first_render_) {
textureInit();
first_render_ = false;
}
glMatrixMode(GL_MODELVIEW);
glScissor(0, 0, width(), height());
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearColor(0.5f, 0.5f, 0.5f, 0.5f);
glLoadIdentity();
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, vertices_.data());
glDrawElements(GL_TRIANGLES, indices_.size() * 3, GL_UNSIGNED_INT,
indices_.data());
glDisableClientState(GL_VERTEX_ARRAY);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
}
So, you want to draw using a lot of textures, but you obviously can't re-bind new textures as it is all drawn from one array. One solution to this is to use a texture atlas. It is one single bitmap with all your textures inside it. For example, if you have 16 different textures, you make a bitmap with 4x4 sections. Instead of using texture coordinates from 0 to 1, you will use 0 to 0.25, or 0.25 to 0.50, etc.
There are some disadvantages you need to be aware of:
If you want high resolution, the texture atlas will obviously be quite big.
Minifying and magnifying can play tricks with you. GL_NEAREST won't be any problem, but using GL_LINEAR or variants of mipmapping will average values around a pixel. This can lead to artifacts for pixels at the border of one sub image.
As the UV coordinates will vary more, fewer vertices will have common vertex data, leading to a increased number of indices.
I assume you have done profiling that shows that using multiple iterations of drawing, rebinding the texture for each, is not good enough. This obvious solution can be surprisingly effective.