OpenGL: GLSL float has low precision - c++

I have float alpha texture that contains amplitude values. It is converted to decibels and displayed in grayscale.
Here is conversation code (C++) :
const float db_min = -100, db_max = 0;
float image[height][width];
for (int y = 0; y<height; ++y) {
for (int x = 0; x<width; ++x) {
image[y][x]= 20.f * log(a[i])/log(10.f);
image[y][x] = (image[y][x]-db_min)/(db_max-db_min);
}
}
Here is Fragment Shader (GLSL):
#version 120
precision highp float;
varying vec2 texcoord;
uniform sampler2D texture;
void main() {
float value = texture2D(texture, texcoord).a;
gl_FragColor = vec4(value, value, value, 0);
}
Here is a screenshot:
Looks perfect! Now, I want to write conversation in Fragment Shader itself, instead of C++:
#version 120
precision highp float;
varying vec2 texcoord;
uniform sampler2D texture;
const float db_min = -100., db_max = 0.;
void main() {
float value = texture2D(texture, texcoord).a;
value = 20. * log(value)/log(10.);
value = (value-db_min)/(db_max-db_min);
gl_FragColor = vec4(value, value, value, 0);
}
Here is a screenshot:
Why are results different? What am I doing wrong?

Limited texel precision - that could be the problem. I can guess you're keeping your (very-high-range as I understand looking at log) values in a 8-bit per-channel texture(e.g. RGBA8). Then you could use a floating-point format or pack your high-precision/range values to you 4-bytes format(e.g. a fixed point).

Related

glGetUniformLocation returns -1 for an array of struct in GLSL

I'm trying to call glGetUniformLocation() on an array of struct. I read that I should use this name format: "uniformName[index].element", but the method always returns -1. I tried to delete the array and only use the structure and it worked so I guess that the problem might there. Here is the struct and the array in my fragment shader:
const int MAX_BLOCKS = 16;
struct sblock{
vec4 color;
float shineDamper;
float reflectivity;
};
uniform sblock blocks[MAX_BLOCKS];
And here is my call:
for(int i = 0; i < 16; i++) {
location_block_color[i] = super.getUniformLocation("blocks["+i+"].color");
location_block_reflectivity[i] = super.getUniformLocation("blocks["+i+"].reflectivity");
location_block_shineDamper[i] = super.getUniformLocation("blocks["+i+"].shineDamper");
System.out.println(location_block_color[i] + " " + location_block_reflectivity[i] + " " + location_block_shineDamper[i]); // prints -1 -1 -1 ...
}
getUniformLocation method:
protected int getUniformLocation(String uniformName) {
return GL20.glGetUniformLocation(programID, uniformName);
}
and this how I create programID (before anything else):
vertexShaderID = loadShader(vertexFile, GL20.GL_VERTEX_SHADER);
fragmentShaderID = loadShader(fragmentFile, GL20.GL_FRAGMENT_SHADER);
programID = GL20.glCreateProgram();
My question is what is happening here and what am I doing wrong?
Thanks for your help.
EDIT 1:
Here is my full fragment shader:
#version 400 core
const int MAX_LIGHTS = 16;
const int MAX_BLOCKS = 16;
struct sblock{
vec4 color;
float shineDamper;
float reflectivity;
};
uniform sblock blocks[MAX_BLOCKS];
in int block_id_out;
in vec3 unitNormal;
in vec3 lightVector[MAX_LIGHTS];
in vec3 directionalLightFinalColour;
in vec3 directionalLightReflected;
in vec3 toCameraVector;
in float visibility;
out vec4 outColor;
uniform float ambientLight;
uniform vec3 skyColor;
uniform vec3 lightColour[MAX_LIGHTS];
uniform vec3 attenuation[MAX_LIGHTS];
uniform int lightCount;
uniform vec3 directionalLightColour;
uniform vec3 directionalLightDirection;
vec3 calculateDiffuse(vec3 unitNormal, vec3 unitLightVector, vec3 lightColour, float attFactor){
float nDotl = dot(unitNormal,unitLightVector);
float brightness = max(nDotl,0);
return (brightness * lightColour) / attFactor;
}
vec3 calculateSpecular(vec3 unitNormal, vec3 unitLightVector, vec3 unitToCameraVector, float shineDamper, float reflectivity, vec3 lightColour, float attFactor){
vec3 reflectedLightDirection = reflect(-unitLightVector,unitNormal);
float specularFactor = max(dot(reflectedLightDirection, unitToCameraVector),0.0);
float dampedFactor = pow(specularFactor,shineDamper);
return (dampedFactor * reflectivity * lightColour) / attFactor;
}
void main(void){
vec3 totalDiffuse = vec3(0.0);
vec3 totalSpecular = vec3(0.0);
vec3 unitToCameraVector = normalize(toCameraVector);
for(int i = 0; i < lightCount;i++){
vec3 unitLightVector = normalize(lightVector[i]);
float lightDistance = length(lightVector[i]);
float attFactor = attenuation[i].x + attenuation[i].y*lightDistance + attenuation[i].z*lightDistance*lightDistance;
totalSpecular += calculateSpecular(unitNormal, unitLightVector, unitToCameraVector, blocks[block_id_out].shineDamper, blocks[block_id_out].reflectivity, lightColour[i], attFactor);
totalDiffuse += calculateDiffuse(unitNormal, unitLightVector, lightColour[i], attFactor);
}
totalDiffuse += directionalLightFinalColour;
totalSpecular += pow(max(dot(directionalLightReflected,unitToCameraVector),0.0),blocks[block_id_out].shineDamper)*blocks[block_id_out].reflectivity*directionalLightColour;
totalDiffuse = max(totalDiffuse,ambientLight);
outColor = vec4(totalDiffuse,1.0) * blocks[block_id_out].color + vec4(totalSpecular,1.0);
outColor = mix(vec4(skyColor,1.0),outColor,visibility);
}
EDIT 2:
Here is my VertexShader:
#version 400 core
const vec4 normals[] = vec4[6](vec4(1,0,0,0),vec4(0,1,0,0),vec4(0,0,1,0),vec4(-1,0,0,0),vec4(0,-1,0,0),vec4(0,0,-1,0));
const int MAX_LIGHTS = 16;
in vec3 position;
in int block_id;
in int normal;
out int block_id_out;
out vec3 unitNormal;
out vec3 lightVector[MAX_LIGHTS];
out vec3 directionalLightFinalColour;
out vec3 directionalLightReflected;
out vec3 toCameraVector;
out float visibility;
uniform mat4 transformationMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform vec3 lightPosition[MAX_LIGHTS];
uniform vec3 directionalLight;
uniform vec3 directionalLightColor;
uniform int lightCount;
uniform float fogDensity;
uniform float fogGradient;
void main(void){
vec4 worldPosition = transformationMatrix * vec4(position,1.0);
vec4 positionRelativeToCam = viewMatrix * worldPosition;
gl_Position = projectionMatrix * positionRelativeToCam;
block_id_out = block_id;
unitNormal = normalize((transformationMatrix * normals[normal]).xyz);
for(int i = 0; i < lightCount;i++){
lightVector[i] = lightPosition[i] - worldPosition.xyz;
}
directionalLightFinalColour = max(dot(unitNormal, directionalLight),0)*directionalLightColor;
directionalLightReflected = reflect(-directionalLight,unitNormal);
toCameraVector = (inverse(viewMatrix) * vec4(0.0,0.0,0.0,1.0)).xyz - worldPosition.xyz;
visibility = clamp(exp(-pow(length(positionRelativeToCam.xyz)*fogDensity,fogGradient)),0.0,1.0);
}
From (most recent) OpenGL Shading Language 4.60 Specification (HTML) - 4.3.9. Interface Blocks
A uniform or shader storage block array can only be indexed with a dynamically uniform integral expression, otherwise results are undefined.
block_id_out is fragment shader input (set by a vertex shader input) and not a Dynamically uniform expression. Hence blocks[block_id_out] is undefined and is not guaranteed, that blocks[i] becomes an active program resource.
I recommend to change the declaration. Do not create an array of uniforms. Create a single Uniform block:
const int MAX_BLOCKS = 16;
struct Block
{
vec4 color;
float shineDamper;
float reflectivity;
};
layout(std140) uniform Blocks
{
Block blocks[MAX_BLOCKS];
};
You have to create a Uniform Buffer Object to bind data to the uniform block.
I think that #Rabbid76 answer is correct, but I found a more satisfying workaround. I defined the struct and the uniform in the vertex shader, and then pass the correct values for color, shinedamper and reflectivity to the fragment shader. This makes more sense since I only want to access the values for each vertex and they are going to be same in the fragment shader, so doing it 3 times for each triangle is better than doing it hundreds of times for each pixel.

OpenGL 2D faded circle being stretched/compressed by the resolution

I'd like my faded lighting (based on distance from a point) to be a perfect circle no matter the resolution. Currently, the light is only a circle if the height and width of the window are equal.
This is what it looks like right now:
My fragment shader looks like this:
precision mediump float;
#endif
#define MAX_LIGHTS 10
// varying input variables from our vertex shader
varying vec4 v_color;
varying vec2 v_texCoords;
// a special uniform for textures
uniform sampler2D u_texture;
uniform vec2 u_resolution;
uniform float u_time;
uniform vec2 lightsPos[MAX_LIGHTS];
uniform vec3 lightsColor[MAX_LIGHTS];
uniform float lightsSize[MAX_LIGHTS];
uniform vec2 cam;
uniform vec2 randPos;
uniform bool dark;
void main()
{
vec4 lights = vec4(0.0,0.0,0.0,1.0);
float ratio = u_resolution.x/u_resolution.y;
vec2 st = gl_FragCoord.xy/u_resolution;
vec2 loc = vec2(.5 + randPos.x, 0.5 + randPos.y);
for(int i = 0; i < MAX_LIGHTS; i++)
{
if(lightsSize[i] != 0.0)
{
// trying to reshape the light
// vec2 st2 = st;
// st2.x *= ratio;
float size = 2.0/lightsSize[i];
float dist = max(0.0, distance(lightsPos[i], st)); // st here was replaced with st2 when experimenting
lights = lights + vec4(max(0.0, lightsColor[i].x - size * dist), max(0.0, lightsColor[i].y - size * dist), max(0.0, lightsColor[i].z - size * dist), 0.0);
}
}
if(dark)
{
lights.r = max(lights.r, 0.075);
lights.g = max(lights.g, 0.075);
lights.b = max(lights.b, 0.075);
}
else
{
lights.r += 1.0;
lights.g += 1.0;
lights.b += 1.0;
}
gl_FragColor = texture2D(u_texture, v_texCoords) * lights;
}
I tried reshaping the light by multiplying the x value of the pixel by the ratio of the screen width to the screen height but that caused the lights to be out of place. I couldn't figure out anything that would put them back in their correct place while maintaining their shape.
EDIT: the displacement is determined by my camera's position in my libgdx scene.
what you need is to rescale the difference between light position and fragment position
vec2 dr = st-lightsPos[i];
dr.x*=ratio;
float dist = length(dr);
Try normalizing st like that
vec2 st = (gl_FragCoord.xy - .5*u_resolution.xy) / min(u_resolution.x, u_resolution.y);
So that coordinates of your fragments are in range [-1; 1] for y and [-ratio/2; ratio/2] where ratio = u_resolution.x/u_resolution.y
You can also make it [0; 1] for y and [0; ratio] for x by doing
vec2 st = gl_FragCoord.xy / min(u_resolution.x, u_resolution.y);
But the former is more convenient in many cases

GLSL shader to convert pixel colors as per the usecase

In the code mentioned below, I want to accept 2 arguments colors & activeColor. colors array contains the list of allowed colors to be drawn in the image, activeColor is the selected color which is yet to be painted on image. I am using HTML5 canvas to paint on the top of image and WebGLShader to convert pixel colors as per the use-case.
While drawing(painting on canvas) a color my use-case is to change other colors if present to activeColor.
Written a basic logic below but it's not syntactically right. Help needed.
colors: dynamic Float32Array of colors ex: [0,0,0,1, 1,1,1,1] represents black & white color array
activeColor: dynamic Float32Array of color ex: [0,0,0,1] represents black color
uniform sampler2D texture;
varying float colors;
varying float activeColor;
varying vec2 texCoord;
void main() {
vec4 color = texture2D(texture, texCoord);
for (int i = 0; i < colors.length; i+=3){
vec4 c = vec4(colors[i], colors[i+1], colors[i+2], colors[i+3]);
if(color.a > 0 && color.rgb != c) {
color.rgb = vec4(activeColor[0], activeColor[1], activeColor[2], activeColor[3]);
}
}
gl_FragColor = color;
}
The code doesn't make a lot of sense
varying float colors;
varying float activeColor;
Those are type float so they only old 1 value. They are not colors (vec4s) nor are they arrays.
colors.length
There is no such thing someArray.length in GLSL. In GLSL you can't pass variable sized arrays. They must be a fixed size. Similarly you can't pass in arrays as varying.
It's not clear what you're trying to do
Your code appears to be trying to draw activeColor everywhere the image does not contain the colors in colors.
You could do something like this
#define MAX_COLORS 10
uniform sampler2D texture;
uniform vec4 colors[MAX_COLORS];
uniform int numColors;
uniform vec4 activeColor;
varying vec2 texCoord;
void main() {
vec4 color = texture2D(texture, texCoord);
for (int i = 0; i < MAX_COLORS; ++i) {
if (i >= numColors) {
break;
}
vec4 c = colors[i];
if(color.a > 0 && color.rgb != c) {
color.rgb = activeColor.rgb;
break;
}
}
gl_FragColor = color;
}
There's a limit on the number of uniform vec4s you can have. It's between 29 and 4096 through looking at the stats you probably want to stay under 221.
It's more common to pass in arrays of data as textures.
uniform sampler2D texture;
uniform sampler2D colors; // texture holding colors
uniform vec2 colorsSize; // size of texture
uniform int numColors;
uniform vec4 activeColor;
varying vec2 texCoord;
vec4 getColor(int i) {
vec2 pixelCoord = vec2(mod(float(i), colorsSize.x,
floor(float(i) / colorsSize.x));
return texture2D(colors, vec2(pixelCoord + 0.5 / colorsSize));
}
void main() {
vec4 color = texture2D(texture, texCoord);
for (int i = 0; i < MAX_COLORS; ++i) {
if (i >= numColors) {
break;
}
vec4 c = getColor(i);
if(color.a > 0 && color.rgb != c) {
color.rgb = activeColor.rgb;
break;
}
}
gl_FragColor = color;
}
Now you can pass in the colors as a texture.
You might find these tutorials useful.
You might also find this technique semi related to your problem (replacing colors) for which there is a live example here and another explanation of the technique here

Move pixel with texture direction and texture velocity / GLSL

I write a little program to explain simply my problem, I try to change the pixel position of picture with a texture one where the component x is the direction, and where the other represent the velocity. The final objective is to use my data from CPU where are compute a NAVIER-STROKE fluid to move the pixel in GLSL. The CPU code is in Processing java library.
I try to undestand what is buggy in my code, but I don't understand how work the pixel translation.
in the first I transform my direction in value color from 0 to 255 in the CPU and after in the GPU transform this one in vectorial direction, and multiply this one by the velocity and scale this one in 1x1 but that's don't work... sorry if my explication is not really understable, but english is not really fluent.
link to the sketch
Processing :
PImage tex_velocity, tex_direction ;
PShader warping;
PImage img ;
int grid_w, grid_h ;
void setup() {
size(600,375,P2D);
// img = loadImage("pirate_small.jpg");
img = loadImage("puros_girl_small.jpg");
grid_w = 60 ;
grid_h = 37 ;
tex_velocity = createImage(grid_w,grid_h,RGB);
tex_direction = createImage(grid_w,grid_h,RGB);
warping = loadShader("shader/warp/rope_warp_frag.glsl");
noise_img(tex_velocity, 20, .1, .1); // max translate for the pixel
noise_img(tex_direction, 360, .1, .1); // degree direction
}
void draw() {
println(frameRate);
if(frameCount%30 == 0) {
noise_img(tex_velocity, 20, .1, .1); // max translate for the pixel
noise_img(tex_direction, 360, .1, .1); // degree direction
}
warping.set("mode", 0) ;
warping.set("texture",img);
warping.set("roof_component_colour",g.colorModeX);
warping.set("wh_ratio",1f/grid_w, 1f/grid_h);
warping.set("vel_texture",tex_velocity);
warping.set("dir_texture",tex_direction);
shader(warping);
image(img,0,0);
resetShader();
image(tex_velocity,5,5);
image(tex_direction,grid_w +15 ,5 );
}
float x_offset, y_offset ;
void noise_img(PImage dst, int max, float ratio_x, float ratio_y) {
noiseSeed((int)random(10000));
for(int x = 0 ; x < dst.width ; x++) {
x_offset += ratio_x ;
for(int y = 0 ; y < dst.height ; y++) {
y_offset += ratio_y ;
float v = map(noise(x_offset,y_offset),0,1,0,max);
v = (int)map(v,0,max,0,g.colorModeX);
int c = color(v,v,v,g.colorModeA) ;
dst.set(x,y,c);
}
}
}
GLSL
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
#define PI 3.1415926535897932384626433832795
varying vec4 vertTexCoord;
uniform sampler2D texture;
uniform int mode;
uniform float roof_component_colour;
uniform sampler2D vel_texture;
uniform sampler2D dir_texture;
uniform vec2 wh_ratio;
float map(float value, float start1, float stop1, float start2, float stop2) {
float result = start2 + (stop2 - start2) * ((value - start1) / (stop1 - start1));
return result;
}
vec2 cartesian_coord(float angle) {
float x = cos(angle);
float y = sin(angle);
return vec2(x,y);
}
vec2 translate(float fdir, float fvel) {
float angle_in_radian = map(fdir, 0, roof_component_colour, -PI, PI);
vec2 dir_cart = cartesian_coord(angle_in_radian);
return dir_cart *fvel ;
}
void main() {
vec2 ratio = gl_FragCoord.xy *wh_ratio;
vec4 vel = texture2D(vel_texture, ratio);
vec4 dir = texture2D(dir_texture, ratio);
// rendering picture ;
if(mode == 0) {
float direction = dir.x;
float velocity = vel.x;
vec2 translation = translate(direction,velocity);
// not bad, but totaly wrong
// vec2 coord_dest = vertTexCoord.st +translation
vec2 coord_dest = vertTexCoord.st *ratio +translation ;
// not bad, but totaly wrong
vec2 coord_dest = vertTexCoord.st *ratio +translation ;
vec4 tex_colour = texture2D(texture, coord_dest);
gl_FragColor = tex_colour;
}
// velocity
if(mode == 1 ) {
gl_FragColor = texture2D(vel_texture, vertTexCoord.st);;
}
// direction force field
if(mode == 2) {
gl_FragColor = texture2D(dir_texture, vertTexCoord.st);;
}
}
The texture format is GL_RGBA8, this means each color channel is stored to a byte in, which is a integral data tyoe in rage from 0 to 255.
But when you read texts from the texture sampler, the you will get a floating point value in the range from 0.0 to 1.0. (see glTexImage2D - GL_RGBA).
In the fragment shader you have to map the color channel (in [0, 1]), which you read from the texture sampler, to the range from -PI to PI. For this you can use the GLSL function mix, which does a linear interpolation between 2 values:
vec2 translate(float fdir, float fvel) // fdir, fvel in [0.0, 1.0]
{
float angle = mix(-PI, PI, fdir);
return vec2(cos(angle), sin(angle)) * fvel;
}
The texture coordinates are in range [0, 1]. You have to transform the translation to texture coordinates. For this you have to know the size of your image texture:
vec2 wh_ratio; // 1f/grid_w, 1f/grid_h
vec2 imageTexSize; // size of "texture"
vec2 scale = imageTexSize * wh_ratio;
vec2 coord_dest = vertTexCoord.st + translation / scale;
Thx for the help, now I know the picture size of picture in GLSL :) [0,1], but that's don't work expected, I use the the rendering size or the picture of the must be warp, so in my idea the vec2 imageTexSize is img.widthand img.height is passed from Processing for imageTexSize
uniform vec2 imageTexSize;
.../...
vec2 scale = imageTexSize * wh_ratio;
vec2 coord_dest = vertTexCoord.st + translation / scale;
the result is the top image
and when I try this code
vec2 ratio = gl_FragCoord.xy *wh_ratio;
vec2 coord_dest = vertTexCoord.st +translation / ratio ;
the result is the middle image
and when I try this one
vec2 coord_dest = vertTexCoord.st +translation / wh_ratio ;
the result is the bottom image
Sorry i post a single image because I cannot post more than one pic with my beginner reputation :)
I fix the display bug for the full window display, but now it's the y coord who is reverse for the translation, that's weird because the texture velocity and direction are not reversed in y, the reverse y effect is in the interpretation. that's happened on the 3 mode. I try to reverse coord_dest.y like that
float coord_dest_y = mix(coord_dest.y, vertTexCoord.t, 0);
gl_FragColor = texture2D(texture, vec2(coord_dest.x, coord_dest_y));
but that's change nothing.
I try : float coord_dest_y = mix(coord_dest.y, 0, vertTexCoord.t);but that's make something really strange, so that's don't work too...
here the full the GLSL code
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
#define PI 3.1415926535897932384626433832795
varying vec4 vertTexCoord;
uniform sampler2D texture;
uniform int mode;
uniform sampler2D vel_texture;
uniform sampler2D dir_texture;
uniform vec2 wh_grid_ratio;
uniform vec2 wh_renderer_ratio;
vec2 cartesian_coord(float angle) {
float x = cos(angle);
float y = sin(angle);
return vec2(x,y);
}
vec2 translate(float fdir, float fvel) {
//float angle = mix(PI, -PI,fdir);
float angle = mix(fdir, PI, -PI);
return cartesian_coord(angle) *fvel ;
}
void main() {
vec2 ratio = gl_FragCoord.xy *wh_renderer_ratio;
vec4 vel = texture2D(vel_texture, ratio);
vec4 dir = texture2D(dir_texture, ratio);
float direction = dir.x;
float velocity = vel.x;
vec2 translation = translate(direction,velocity);
// mode 0 perfect
// mode 1 interesting
// mode 2 bizarre, but fun
// mode 500 warp image direction
// mode 501 warp image velocity
// perfect
if(mode == 0) {
vec2 scale = gl_FragCoord.xy *wh_renderer_ratio;
vec2 coord_dest = vertTexCoord.st +translation /scale;
float coord_dest_y = mix(coord_dest.y, vertTexCoord.t, 0);
// float coord_dest_y = mix(coord_dest.y, 0, vertTexCoord.t);
gl_FragColor = texture2D(texture, vec2(coord_dest.x, coord_dest_y));
// gl_FragColor = texture2D(texture, coord_dest);
}
// interesting
if(mode == 1) {
vec2 scale = gl_FragCoord.xy *wh_grid_ratio;
vec2 coord_dest = vertTexCoord.st +translation /scale ;
gl_FragColor = texture2D(texture, coord_dest);
}
// bizarre
if(mode == 2) {
vec2 coord_dest = vertTexCoord.st +translation /wh_grid_ratio;
gl_FragColor = texture2D(texture, coord_dest);
}
// velocity
if(mode == 500 ) {
vec4 tex_colour = texture2D(vel_texture, vertTexCoord.st);;
gl_FragColor = tex_colour;
}
// direction force field
if(mode == 501) {
vec4 tex_colour = texture2D(dir_texture, vertTexCoord.st);;
gl_FragColor = tex_colour;
}
}
and the picture result here, to see the cursor error y in the final warping
enter image description here

GLSL Texture Size

I have a problem with my fragment shader.
I want to get the size of a texture (which is loaded from an image).
I know that it is possible to use textureSize(sampler) to get an ivec2 which contains the texture size. But i don't know why this isn't working (it doesn't compile):
#version 120
uniform sampler2D tex;
float textureSize;
float texelSize;
void main()
{
textureSize = textureSize(tex).x;//first line
//textureSize = 512.0;//if i set the above line as comment and use this one the shader compiles.
texelSize = 1.0 / textureSize;
vec4 color = texture2D(tex,gl_TexCoord[0].st);
gl_FragColor = color * gl_Color;
}
Th problem was that my GLSL version was to low (implemented in 1.30) and that i was missing a parameter.
Here the working version:
#version 130
uniform sampler2D tex;
float textureSize;
float texelSize;
void main()
{
ivec2 textureSize2d = textureSize(tex,0);
textureSize = float(textureSize2d.x);
texelSize = 1.0 / textureSize;
vec4 color = texture2D(tex,gl_TexCoord[0].st);
gl_FragColor = color * gl_Color;
}