I created a composite renderer that simply alpha-blends two textures, one on top of the other. The blending is just not working correctly.
Here is the code for the renderer, just rendering the background:
RenderFunction layer_render = [&]() {
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(layer_shader_.get_program_id());
// ----- Fetch shader uniform locations
layer_shader_.SetUniform("tex", 0);
layer_shader_.SetUniform("create_alpha_mask", false);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBlendEquation(GL_FUNC_ADD);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, background_texture_id_);
layer_shader_.SetUniform("model", background_model_);
background_mesh_.Bind();
glDrawElements(GL_TRIANGLES, background_mesh_.GetIndicesCount(), GL_UNSIGNED_INT, (void*)0);
background_mesh_.Unbind();
};
layer_framebuffer_.RenderToTexture(layer_render);
The background texture i'm passing is a full rgba -> {1.0, 0.0, 0.0, 0.5}. What comes out is -> {0.5, 0.0, 0.0, 0.5}. The alpha blending is not properly accounting for the source alpha when computing the blending for some reason that i'm failing to see.
Some pseudo code of what i was expecting:
source_alpha = 0.5
dest_alpha = 0.0 * (1.0 - source_alpha) = 0.0
output_alpha = source_alpha + dest_alpha = 0.5
out_r = (source_r * source_alpha + dest_r * dest_alpha) / output_alpha = (1.0 * 0.5 + 0.0 * 0.0) / 0.5 = 1.0
out_g = (source_g * source_alpha + dest_g * dest_alpha) / output_alpha = (0.0 * 0.5 + 0.0 * 0.0) / 0.5 = 0.0
out_b = (source_b * source_alpha + dest_b * dest_alpha) / output_alpha = (0.0 * 0.5 + 0.0 * 0.0) / 0.5 = 0.0
out_a = output_alpha = 0.5
I don't know where you got that pseudo code from, but that's not how it works. Why are you dividing by (source_alpha + dest_alpha)?
If you want to use premultiplied alpha what you do is set the blend function to:
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
So it doesn't multiply source with alpha and then in your fragment shader at the end you premultiply rgb with the alpha, which means:
color.rgb *= color.a;
That's it.
Related
I am currently trying to create a model of squidward from spongebob squarepant's house.
I have created a cyllinder out of GL_TRIANGLE_STRIPs as I am not allowed to use any predefined OpenGL models to create the shapes.
I am trying to do texture mapping for each triangle on the cylinder but the texture comes out stretched and not as it is supposed to be.
Here is my code for the cylinder
glPushMatrix();
glTranslated(xPos, yPos, TABLETOP_Z - cubeLen);
glScaled(cubeLen / 2, cubeLen / 2, 1.0);
glBegin(GL_TRIANGLE_STRIP);
glTexCoord3f(xPos, yPos, TABLETOP_Z);
glTexCoord3f(xPos, yPos, TABLETOP_Z);
for (int i = 0; i <= 32; i++) {
double x_next = 1.0 * cos((i + 1) * 2.0 * PI/ (32 - 2.0));
double y_next = 1.0 * sin((i + 1) * 2.0 * PI / (32 - 2.0));
if (i % 2 == 0) {
glTexCoord3f(x_next, y_next, TABLETOP_Z + cubeLen);
glVertex3f(x_next, y_next, TABLETOP_Z + cubeLen);
} else {
glTexCoord3f(x_next, y_next, TABLETOP_Z);
glVertex3f(x_next, y_next, TABLETOP_Z);
}
}
glEnd();
glPopMatrix();
And here is what the texture is supposed to look like:
And here is what it looks like on the cylinder
The texture coordinates are 2-dimenional and have to be in range [0.0, 1.0]:
glBegin(GL_TRIANGLE_STRIP);
for (int i = 0; i <= 32; i++) {
double x_next = 1.0 * cos((i + 1) * 2.0 * PI/ (32 - 2.0));
double y_next = 1.0 * sin((i + 1) * 2.0 * PI / (32 - 2.0));
if (i % 2 == 0) {
glTexCoord3f((float)i / 32.0f, 1.0f);
glVertex3f(x_next, y_next, TABLETOP_Z + cubeLen);
} else {
glTexCoord3f((float)i / 32.0f, 0.0f);
glVertex3f(x_next, y_next, TABLETOP_Z);
}
}
glEnd();
See How do opengl texture coordinates work?
I am drawing a textured trapezoid in OpenGL and affine problem occurs:
http://upload.wikimedia.org/wikipedia/commons/5/57/Perspective_correct_texture_mapping.jpg
I want my texture this in perspective-correct.
I have to interpolate in the image space (sw tw w) and I don't know how to do it:
http://i.stack.imgur.com/O0AnC.png
I paste my current code project:
c++:
ttps://gist.github.com/danicomas/a1f5a0e6849b3ac8b51c169c2c030e37 (Add http)
vertex:
ttps://gist.github.com/danicomas/fee77cf48fc5085f61a2fcf7a2c6d5de (Add http)
fragment:
ttps://gist.github.com/danicomas/0bbd679d2d7da18bc61ee23b36096a16 (Add http)
How can I do this? Some example code?
Finally. I found it a simple solution!
c++
glPushMatrix();
glBegin(GL_QUADS);
float scale_texcoord = 0.7;
float top = 0.7;
float tx = scale_texcoord * top;
glTexCoord2f(-1 / 2, -1);
glVertex2f(-1.4, -1);
glTexCoord4f(0, 0, 0, tx);
glVertex2f(-top, 1);
glTexCoord4f( tx, 0, 0, tx);
glVertex2f( top, 1);
glTexCoord2f( 1, -1);
glVertex2f( 1.4, -1);
glEnd();
glPopMatrix();
fragment:
uniform sampler2D sampler;
void main()
{
vec2 interpolated = vec2(gl_TexCoord[0].x / gl_TexCoord[0].w, gl_TexCoord[0].y);
gl_FragColor = texture2D(sampler, vec2(interpolated.x, interpolated.y));
}
I wrote a code with glDepthTest and i try to understand what is the formula for projective Z-buffer value.
I run this code:
#define CUBE_SIDE_SIZE 512.0f
#define Z_SIZE -0.25f
#define WINDOW_WIDTH 1024
#define WINDOW_HEIGHT 768
void init(void)
{
glViewport(0,0,WINDOW_WIDTH,WINDOW_HEIGHT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0,WINDOW_WIDTH,0,WINDOW_HEIGHT, -1, 1);
}
void display(void)
{
GLfloat readPixel;
glClearColor(0.0,0.0,0.0,0.0);
glClearDepth(0.8);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glBegin(GL_QUADS);
glColor3f(1, 0, 0);
glVertex3f(0, 0, Z_SIZE);
glVertex3f(0, CUBE_SIDE_SIZE, Z_SIZE);
glVertex3f(CUBE_SIDE_SIZE, CUBE_SIDE_SIZE, Z_SIZE);
glVertex3f(CUBE_SIDE_SIZE, 0, Z_SIZE);
glEnd();
glDisable(GL_DEPTH_TEST);
glFlush();
glutSwapBuffers();
glReadPixels(0,0,1.0,1.0,GL_DEPTH_COMPONENT, GL_FLOAT, &readPixel);
}
The value of readPixel in this case is 0.625, so i think the calculation is: Z-buffer value = (farZ - Z_value) / (farZ - nearZ)
in my case:
[1 - (-0.25)] / ]1 - (- 1)] = 1.25 / 2 = 0.625
But when i do these changes:
1. #define Z_SIZE 0.25f
2. glOrtho(0,WINDOW_WIDTH,0,WINDOW_HEIGHT, 0, 1);
I get the value 0.8 in readPixel, it's like the depth test falied, but if i calculated the Z-buffer value it should be equals to (1 - 0.25) / (1 - 0) = 0.75 which is less then 0.8 (clear depth value).
Can you explain me this behaviour?
so i think the calculation is: Z-buffer value = (farZ - Z_value) / (farZ - nearZ)
Nope. There's an additional division factor 1/w getting into it. Which means that the depth buffer values don't follow a linear progression. They're monotonic though.
The calculation of NDC coordinates as as following
pos_view = ModelviewMatrix · vertex_position
pos_projected = ProjectionMatrix · pos_view
pos_clipped = clip_prmitive( pos_projected )
pos_NDC = pos_clipped.xyz / pos_clipped.w
pos_NDC.z is your depth buffer value. For all practical means to understand the transformation steps you can regard clip_primitive(…) as an identity transform, i.e. things go unchanged through it.
I am trying to develop space simulator. I am trying to use sun as the light source. My problem is that the lighting dosent work as expected. Maybe i am using the wrong calculation for the normals. I am using a single "createsphere" function to create a sphere, and then use different coordinates and sizes to display them. The problem is that all the spheres on the screen show almost the same effect(i.e i've applied only one light source but it seems to have been implemented to all the spheres) .and also the light rotates along with them. I am not sure where the problem is ...i am posting my code ...
the code for sphere display
void DisplaySphere_sun (double R, GLuint texture)
{
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
int b,m = 0;
glScalef (0.0125 * R, 0.0125 * R, 0.0125 * R);
glBindTexture (GL_TEXTURE_2D, texture);
glBegin (GL_TRIANGLE_STRIP);
for ( b = 0; b <VertexCount; b++)
{
/*if((b%3)==0)
{
glNormal3f(normal[m].x,normal[m].y,normal[m].z);
m++;
}*/
glTexCoord2f (VERTEX[b].U, VERTEX[b].V);
/*glNormal3f(-VERTEX[b].X, -VERTEX[b].Y, -VERTEX[b].Z);*/
glVertex3f (VERTEX[b].Y, VERTEX[b].X, -VERTEX[b].Z);
}
m = 0;
for ( b = 0; b <VertexCount; b++)
{
/*if((b%3)==0)
{
glNormal3f(normal[m].x,normal[m].y,normal[m].z);
m++;
}*/
glTexCoord2f (VERTEX[b].U, -VERTEX[b].V);
/* glNormal3f(-VERTEX[b].X, -VERTEX[b].Y, -VERTEX[b].Z);*/
glVertex3f (VERTEX[b].Y, VERTEX[b].X, VERTEX[b].Z);
}
glEnd();
//glRotatef(120,0,0,0);
}
the code for creating a sphere
void CreateSphere (double R, double X, double Y, double Z) {
int n,m;
double a;
double b;
n = 0;
m = 0;
for( b = 0; b <= 90 - space; b+=space){
for( a = 0; a <= 360 - space; a+=space)
{
VERTEX[n].X = R * sin((a) / 180 * PI) * sin((b) / 180 * PI) - X;
VERTEX[n].Y = R * cos((a) / 180 * PI) * sin((b) / 180 * PI) + Y;
VERTEX[n].Z = R * cos((b) / 180 * PI) - Z;
VERTEX[n].V = (2 * b) / 360;
VERTEX[n].U = (a) / 360;
n++;
VERTEX[n].X = R * sin((a) / 180 * PI) * sin((b + space) / 180 * PI) - X;
VERTEX[n].Y = R * cos((a) / 180 * PI) * sin((b + space) / 180 * PI) + Y;
VERTEX[n].Z = R * cos((b + space) / 180 * PI) - Z;
VERTEX[n].V = (2 * (b + space)) / 360;
VERTEX[n].U = (a) / 360;
n++;
VERTEX[n].X = R * sin((a + space) / 180 * PI) * sin((b) / 180 * PI) - X;
VERTEX[n].Y = R * cos((a + space) / 180 * PI) * sin((b) / 180 * PI) + Y;
VERTEX[n].Z = R * cos((b) / 180 * PI) - Z;
VERTEX[n].V = (2 * b) / 360;
VERTEX[n].U = (a + space) / 360;
n++;
VERTEX[n].X = R * sin((a + space) / 180 * PI) * sin((b + space) /180 * PI) - X;
VERTEX[n].Y = R * cos((a + space) / 180 * PI) * sin((b + space) /180 * PI) + Y;
VERTEX[n].Z = R * cos((b + space) / 180 * PI) - Z;
VERTEX[n].V = (2 * (b + space)) / 360;
VERTEX[n].U = (a + space) / 360;
n++;
}
}
}
and code for lighting the sun
glPushMatrix();
gluLookAt (0.0, 10.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); //defines a viewing transformation.
// Now translate to the sun
glTranslatef(0.0, -7.0, 3.0);
/* For LIGHT0 */
GLfloat lightZeroPosition[] = {0.0f, 0.0f, 0.0f, 1.0f};
/*GLfloat lightvec[] = {0.5f, 0.2f, 0.0f, 1.0f};*/
GLfloat lightZeroColor[] = {0.5f, 0.5f, 0.5f, 1.0f};
GLfloat amb[] = {1, 1, 1, 1};
GLfloat spec[] = {0.3, 0.3, 0.3, 1};
glLightfv(GL_LIGHT0, GL_POSITION, lightZeroPosition);
glLightfv(GL_LIGHT0, GL_DIFFUSE, lightZeroColor);
glLightfv(GL_LIGHT0, GL_SPECULAR, spec);
glEnable(GL_LIGHT0);
glRotatef(angle,0,0,1);
DisplaySphere(5,textures);
// function to display the sun
glPopMatrix();
I'm a bit puzzled, why you don't draw the sun at the orign of the solar system? The sun is a star, and stars carry over 95% of their stellar systems mass, so the center of gravity of the whole thing is within the sun for most planets (only Jupiter has so much mass, that it shifts the center of gravity slightly outside the sun's photosphere radius).
As for your lighting problem, one normally doesn't illuminate light sources. Just switch off lighting when drawing the sun. Then when drawing the planets place the light source within the sun. OpenGL is not a global renderer, i.e. after you've drawn something, it completely forgets about it, i.e. you won't get any lighting interactions between the things you draw (means also, no shadows for free).
This is how I'd draw a solar system (pseudocode):
draw_solar_system():
glPushMatrix()
glDisable(GL_LIGHTING)
draw_origin_sphere(sun_radius)
glEnable(GL_LIGHTING)
glLightfv(GL_LIGHT0, GL_POSITION, (0., 0., 0., 1.))
glLightfv(GL_LIGHT0, GL_DIFFUSE, (1., 1., 1., 1.))
glLightfv(GL_LIGHT0, GL_AMBIENT, (0., 0., 0., 1.))
for p in planets:
glPushMatrix()
glRotatef(p.orbital_inclination, p.axis_of_orbital_inclination)
glRotatef(p.orbital_angle, 0., 1., 0.)
glTranslatef(p.orbit_radius, 1., 0. 0.)
glRotate(p.axial_of_inclination, p.axis_of_axis_inclination)
glRotate(p.time_of_day, 0., 1., 0.)
draw_origin_sphere(p.radius)
glPopMatrix()
glPopMatrix()
I'm trying to get familiar with shaders in opengl. Here is some sample code that I found (working with openframeworks). The code simply blurs an image in two passes, first horizontally, then vertically. Here is the code from the horizontal shader. My only confusion is the texture coordinates. They exceed 1.
void main( void )
{
vec2 st = gl_TexCoord[0].st;
//horizontal blur
//from http://www.gamerendering.com/2008/10/11/gaussian-blur-filter-shader/
vec4 color;
color += 1.0 * texture2DRect(src_tex_unit0, st + vec2(blurAmnt * -4.0, 0));
color += 2.0 * texture2DRect(src_tex_unit0, st + vec2(blurAmnt * -3.0, 0));
color += 3.0 * texture2DRect(src_tex_unit0, st + vec2(blurAmnt * -2.0, 0));
color += 4.0 * texture2DRect(src_tex_unit0, st + vec2(blurAmnt * -1.0, 0));
color += 5.0 * texture2DRect(src_tex_unit0, st + vec2(blurAmnt , 0));
color += 4.0 * texture2DRect(src_tex_unit0, st + vec2(blurAmnt * 1.0, 0));
color += 3.0 * texture2DRect(src_tex_unit0, st + vec2(blurAmnt * 2.0, 0));
color += 2.0 * texture2DRect(src_tex_unit0, st + vec2(blurAmnt * 3.0, 0));
color += 1.0 * texture2DRect(src_tex_unit0, st + vec2(blurAmnt * 4.0, 0));
color /= 5.0;
gl_FragColor = color;
}
I can't make heads or tails out of this code. Texture coordinates are supposed to be between 0 and 1, and I've read a bit about what happens when they're greater than 1, but that's not the behavior I'm seeing (or I don't see the connection). blurAmnt varies between 0.0 and 6.4, so s can go from 0 to 25.6. The image just gets blurred more or less depending on the value, I don't see any repeating patterns.
My question boils down to this: what exactly is happening when the texture coordinate argument in the call to texture2DRect exceeds 1? And why does the blurring behavior still function perfectly despite this?
The [0, 1] texture coordinate range only applies to the GL_TEXTURE_2D texture target. Since that code uses texture2DRect (and a samplerRect), it's using the GL_TEXTURE_RECTANGLE_ARB texture target, that this target uses Unnormalized texture coordinates, in the range [0, width]x[0, height].
That's why you have "weird" texture coords. Don't worry, they work fine with this texture target.
Depends on the host code. If you saw something like
glTexParameteri (GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP);
Then the out of bounds s dimension will be zeros, IIRC. Similar for t.