Trying to port a GLSL glass shader to Processing 3.0 - glsl

EDITED
I am beginner on Processing language and GLSL shaders. I am trying to port a fresnel+cubemap shader for a glass material. But as result my shape ever disappear, instead... :-(
My vertex shader is:
const float Air = 1.0;
const float Glass = 1.51714;
const float Eta = Air / Glass;
const float R0 = ((Air - Glass) * (Air - Glass)) / ((Air + Glass) * (Air + Glass));
uniform mat4 transform;
uniform mat4 modelview;
uniform mat3 normalMatrix;
attribute vec4 vertex;
attribute vec3 normal;
varying vec3 v_reflection;
varying vec3 v_refraction;
varying float v_fresnel;
void main(void){
vec4 t_vertex = modelview * vertex;
vec3 incident = normalize(vec3(t_vertex));
vec3 t_normal = normalMatrix * normal;
v_refraction = refract(incident, t_normal, Eta);
v_reflection = reflect(incident, t_normal);
v_fresnel = R0 + (1.0 - R0) * pow((1.0 - dot(-incident, t_normal)), 5.0);
gl_Position = transform * t_vertex;
}
And the fragment shader:
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
uniform samplerCube cubemap;
varying vec3 v_refraction;
varying vec3 v_reflection;
varying float v_fresnel;
void main(void){
vec4 refractionColor = textureCube(cubemap, normalize(v_refraction));
vec4 reflectionColor = textureCube(cubemap, normalize(v_reflection));
gl_FragColor = mix(refractionColor, reflectionColor, v_fresnel);
}
I am testing this shader with the Processing 3.0 sketch bellow (edited), on Android Mode:
PShader shader;
PShape sphere;
void setup() {
fullScreen(P3D);
noStroke();
shader = loadShader("glass.frag.glsl", "glass.vert.glsl");
openCubeMap("posx.png", "negx.png", "posy.png", "negy.png", "posz.png", "negz.png");
shader.set("cubemap", 1);
sphere = createShape(SPHERE, 120);
sphere.setFill(color(-1, 50));
}
void draw() {
background(0);
directionalLight(102, 102, 102, 0, 0, -1);
lightSpecular(204, 204, 204);
directionalLight(102, 102, 102, 0, 1, -1);
specular(100, 150, 150);
translate(width / 2, height / 2);
shader(shader);
shape(sphere);
}
void openCubeMap(String posX, String negX, String posY, String negY, String posZ, String negZ) {
PGL pgl = beginPGL();
// create the OpenGL-based cubeMap
IntBuffer envMapTextureID = IntBuffer.allocate(1);
pgl.genTextures(1, envMapTextureID);
pgl.activeTexture(PGL.TEXTURE1);
pgl.enable(PGL.TEXTURE_CUBE_MAP);
pgl.bindTexture(PGL.TEXTURE_CUBE_MAP, envMapTextureID.get(0));
pgl.texParameteri(PGL.TEXTURE_CUBE_MAP, PGL.TEXTURE_WRAP_S, PGL.CLAMP_TO_EDGE);
pgl.texParameteri(PGL.TEXTURE_CUBE_MAP, PGL.TEXTURE_WRAP_T, PGL.CLAMP_TO_EDGE);
pgl.texParameteri(PGL.TEXTURE_CUBE_MAP, PGL.TEXTURE_WRAP_R, PGL.CLAMP_TO_EDGE);
pgl.texParameteri(PGL.TEXTURE_CUBE_MAP, PGL.TEXTURE_MIN_FILTER, PGL.LINEAR);
pgl.texParameteri(PGL.TEXTURE_CUBE_MAP, PGL.TEXTURE_MAG_FILTER, PGL.LINEAR);
//Load in textures
String[] textureNames = { posX, negX, posY, negY, posZ, negZ };
for (int i=0; i<textureNames.length; i++) {
PImage texture = loadImage(textureNames[i]);
int w = texture.width;
int h = texture.height;
texture.loadPixels();
pgl.texImage2D(PGL.TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, PGL.RGBA, w, h, 0, PGL.RGBA, PGL.UNSIGNED_BYTE, IntBuffer.wrap(texture.pixels));
}
endPGL();
}
And I am using this images to build the cubemap.
Someone know how I can make this work?

Problem is not in your code but in your data.
OpenGL requires that all textures used by the cubemap have the same dimensions, and that the textures be square otherwise it will refuse to load it.
I checked your PNGs and this is not the case, they all have the same dimension but they are not square (255x230).
Also for Android it may be required that the texture dimensions be a power of 2 (128, 256, 512 ...)
So I tested resizing all the textures to 256x256 pixels and now your sample works :

Related

Object not moving according to mouse position when using shaders in raylib

I'm creating a few glowing particles in raylib using shaders and the particles are supposed to move along with the mouse but when compiling it gets stuck to the bottom left corner and the particles dont move.
How it Looks
The c++ code
#include <raylib.h>
#include <vector>
const int W = 400;
const int H = 400;
std::vector<Vector2> particle;
float remap(float value, float low1, float high1, float low2, float high2) {
return low2 + (value - low1) * (high2 - low2) / (high1 - low1);
}
int main() {
SetConfigFlags( FLAG_WINDOW_RESIZABLE );
InitWindow(W, H, "FireWorks");
Shader shader = LoadShader("../assets/vert.glsl", "../assets/frag.glsl");
Texture2D texture = LoadTextureFromImage(GenImageColor(W, H, BLUE));
int resolLoc = GetShaderLocation(shader, "resolution");
int particleLoc = GetShaderLocation(shader, "particle");
int particleCountLoc = GetShaderLocation(shader, "particleCount");
float res[2] = {(float)W, (float)H};
SetShaderValue(shader, resolLoc, res, SHADER_UNIFORM_VEC2);
SetTargetFPS(60);
while (!WindowShouldClose()) {
BeginDrawing();
ClearBackground(BLACK);
particle.push_back(Vector2{(float)GetMouseX(), (float)GetMouseY()});
int removeCount = 1;
for (int i = 0; i < removeCount; i++) {
if (particle.size() == 0) break;
if (particle.size() > 30) {
particle.erase(particle.begin() + i);
}
}
BeginShaderMode(shader);
float particles[30][2];
for ( int i = 0; i < particle.size(); i++) {
particles[i][0] = remap(particle[i].x, 0, W, 0.0, 1.0);
particles[i][1] = remap(particle[i].y, 0, H, 1.0, 0.0);
}
int pSize = particle.size();
SetShaderValue(shader, particleCountLoc, &pSize, SHADER_UNIFORM_INT);
SetShaderValue(shader, particleLoc, particles, SHADER_UNIFORM_VEC2);
DrawTextureRec(texture, (Rectangle) { 0, 0, (float)texture.width, (float) -texture.height }, (Vector2) { 0, 0}, RAYWHITE);
DrawRectangle(0, 0, W, H, BLACK);
EndShaderMode();
EndDrawing();
}
UnloadTexture(texture);
UnloadShader(shader);
CloseWindow();
return 0;
}
The Vertex Shader
#version 330
// Input vertex attributes
in vec3 vertexPosition;
in vec2 vertexTexCoord;
in vec3 vertexNormal;
in vec4 vertexColor;
// Input uniform values
uniform mat4 mvp;
// Output vertex attributes (to fragment shader)
out vec2 fragTexCoord;
out vec4 fragColor;
// NOTE: Add here your custom variables
void main()
{
// Send vertex attributes to fragment shader
fragTexCoord = vertexTexCoord;
fragColor = vertexColor;
// Calculate final vertex position
gl_Position = mvp * vec4(vertexPosition, 1.0);
}
The Fragment Shader
#version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord;
in vec4 fragColor;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 colDiffuse;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
uniform vec2 resolution;
uniform int particleCount;
uniform vec2 particle[30];
void main() {
// Texel color fetching from texture sampler
vec4 texelColor = texture(texture0, fragTexCoord);
vec2 st = gl_FragCoord.xy / resolution.xy;
float r = 0.0;
float g = 0.0;
float b = 0.0;
for (int i = 0; i < 30; i++) {
if (i < particleCount) {
vec2 particlePos = particle[i];
float value = float(i) / distance(st, particlePos.xy) * 0.00015;
g += value * 0.5;
b += value;
}
}
finalColor = vec4(r, g, b, 1.0) * texelColor * colDiffuse;
}
The JS version of the code (which works) is here.
If you could point me in the right direction it'd be great.
The uniform particle is of type vec2[30]. An uniform array can needs to be set with SetShaderValueV instead of SetShaderValue:
SetShaderValue(shader, particleLoc, particles, SHADER_UNIFORM_VEC2);
SetShaderValueV(shader, particleLoc, particles[0], SHADER_UNIFORM_VEC2, 30);

glDrawArraysInstanced behave weird when camera move far from the screen

What I want to attrive is to render many small quads with this opengl function "glDrawArraysInstanced", the space between them is the same. For example, please refer to the follwing image:
The code is as follow:
void OpenGLShowVideo::displayBySmallMatrix()
{
// Now use QOpenGLExtraFunctions instead of QOpenGLFunctions as we want to
// do more than what GL(ES) 2.0 offers.
QOpenGLExtraFunctions *f = QOpenGLContext::currentContext()->extraFunctions();
f->glClearColor(9.f/255.0f, 14.f/255.0f, 15.f/255.0f, 1);
glClear(GL_COLOR_BUFFER_BIT);
f->glViewport(0, 0, this->width(), this->height());
m_displayByMatrixProgram->bind();
f->glActiveTexture(GL_TEXTURE0 + m_acRenderToScreenTexUnit);
f->glBindTexture(GL_TEXTURE_2D, m_renderWithMaskFbo->texture());
if (m_uniformsDirty) {
m_uniformsDirty = false;
m_displayByMatrixProgram->setUniformValue(m_samplerLoc, m_acRenderToScreenTexUnit);
m_proj.setToIdentity();
m_proj.perspective(INIT_VERTICAL_ANGLE, float(this->width()) / float(this->height()), m_fNearPlane, m_fFarPlane);
m_displayByMatrixProgram->setUniformValue(m_projMatrixLoc, m_proj);
QMatrix4x4 camera;
camera.lookAt(m_eye, m_eye + m_target, QVector3D(0, 1, 0));
m_displayByMatrixProgram->setUniformValue(m_camMatrixLoc, camera);
m_world.setToIdentity();
float fOffsetZ = m_fVerticalAngle / INIT_VERTICAL_ANGLE;
m_world.translate(m_fMatrixOffsetX, m_fMatrixOffsetY, fOffsetZ);
m_proj.scale(MATRIX_INIT_SCALE_X, MATRIX_INIT_SCALE_Y, 1.0f);
m_world.rotate(180, 1, 0, 0);
QMatrix4x4 wm = m_world;
m_displayByMatrixProgram->setUniformValue(m_worldMatrixLoc, wm);
QMatrix4x4 mm;
mm.setToIdentity();
m_displayByMatrixProgram->setUniformValue(m_myMatrixLoc, mm);
m_displayByMatrixProgram->setUniformValue(m_lightPosLoc, QVector3D(0, 0, 70));
QSize tmpSize = QSize(m_viewPortWidth, m_viewPortHeight);
m_displayByMatrixProgram->setUniformValue(m_resolutionLoc, tmpSize);
int whRatioVal = m_viewPortWidth / m_viewPortHeight;
m_displayByMatrixProgram->setUniformValue(m_whRatioLoc, whRatioVal);
}
m_geometries->bindBufferForArraysInstancedDraw();
f->glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, m_viewPortWidth * m_viewPortHeight);
}
And the vertex shader code is as follow:
#version 330
layout(location = 0) in vec4 vertex;
out vec3 color;
uniform mat4 mvp_matrix;
uniform mat4 projMatrix;
uniform mat4 camMatrix;
uniform mat4 worldMatrix;
uniform mat4 myMatrix;
uniform vec2 viewResolution;
uniform int whRatio;
uniform sampler2D sampler;
void main() {
int posX = gl_InstanceID % int(viewResolution.x);
int posY = gl_InstanceID / int(viewResolution.y);
if( posY % whRatio < whRatio) {
posY = gl_InstanceID / int(viewResolution.x);
}
ivec2 pos = ivec2(posX, posY);
vec2 t = vec2( pos.x * 3.0, pos.y * 3.0 );
mat4 wm = mat4(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, t.x, t.y, 1, 1) * worldMatrix;
color = texelFetch(sampler,pos,0).rgb;
gl_Position = projMatrix * camMatrix * wm * vertex;
}
And the fragment shader is as follow:
#version 330 core
in vec3 color;
out vec4 fragColor;
void main() {
fragColor = vec4(color, 1.0);
}
However, when I move the camera far from the screen (by changing the [camera.lookAt (m_eye, m_eye + m_target, QVector3D (0, 1, 0);] "m_eye" parameter value), I got sth like this:
The space between quads is different, and the size of the quad is also different. But when I move the camera closer to the screen, it looks much better.
I think what you're seeing there is the result of rounding the coordinates to the nearest integer pixel coordinate.
To get something that looks more even, you want to use some form of anti-aliasing. The options that spring to mind are:
Enable some sort of full screen anti-aliasing like MSAA. This is simple to enable, but can have a significant performance cost.
Put your pattern in a texture, and tile that texture over a single quad. Texture filtering and mip maps should take care of the anti-aliasing for you, and it will probably be faster to render that way as well because you only need a single quad.

Vertex shader animation is rotating with camera

I am new to shaders, and I want to animate an object with the vertex shader.
Right now I just want to move it with a constant. For some reason, instead of going in the x-direction of the world, it moves in the x-direction of the camera. (So whenever I turn the camera, the object rotates with me)
The project is in processing, but I don't think it affects the shader.
THE PROCESSING CODE:
PShader sdr;
void setup() {
size(1000, 1000, P3D);
noStroke();
sdr = loadShader("shdFrag.glsl", "shdVert.glsl");
}
void draw() {
background(200);
// Set camera
camera(0, -300, 700, mouseX-500, 0, 200, 0, 1, 0);
// Ground
resetShader();
beginShape();
fill(100);
vertex(-500, 0, 500);
vertex( 500, 0, 500);
vertex( 500, 0, -500);
vertex(-500, 0, -500);
endShape();
// Red Sphere
shader(sdr);
fill(255, 0, 0);
sphere(100);
}
VERTEX SHADER:
uniform mat4 transform;
attribute vec4 position;
attribute vec4 color;
out vec4 vertColor;
void main() {
vec4 pos = position;
pos.x += 300;
vertColor = color;
gl_Position = transform * pos;
}
FRAGMENT SHADER:
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
in vec4 vertColor;
void main() {
vec4 color = vertColor;
gl_FragColor = vec4(color.xyz, 1);
}
A GIF of what is happening:
the scene with a sphere

Porting ShaderToy Chromakey example to P5.js

I’m try to porting the shadertoy chromakey example to p5 with webcam as video source. After many time reading documentations of shaders, my code seems not working. I need some help.
I followed this guide to port the code for the p5
Fragment shader code:
#ifdef GL_ES
precision mediump float;
#endif
uniform sampler2D tex0;
uniform sampler2D tex1;
mat4 RGBtoYUV = mat4(0.257, 0.439, -0.148, 0.0,
0.504, -0.368, -0.291, 0.0,
0.098, -0.071, 0.439, 0.0,
0.0625, 0.500, 0.500, 1.0 );
vec4 chromaKey = vec4(0.05, 0.63, 0.14, 1);
vec2 maskRange = vec2(0.005, 0.26);
float colorclose(vec3 yuv, vec3 keyYuv, vec2 tol)
{
float tmp = sqrt(pow(keyYuv.g - yuv.g, 2.0) + pow(keyYuv.b - yuv.b, 2.0));
if (tmp < tol.x)
return 0.0;
else if (tmp < tol.y)
return (tmp - tol.x)/(tol.y - tol.x);
else
return 1.0;
}
void main()
{
vec2 fragPos = gl_FragCoord.xy / iResolution.xy;
vec4 texColor0 = texture(text0, fragPos);
vec4 texColor1 = texture(text1, fragPos);
vec4 keyYUV = RGBtoYUV * chromaKey;
vec4 yuv = RGBtoYUV * texColor0;
float mask = 1.0 - colorclose(yuv.rgb, keyYUV.rgb, maskRange);
gl_FragColor = max(texColor0 - mask * chromaKey, 0.0) + texColor1 * mask;
}
P5 sketch code:
let theShader;
let cam;
let img;
function preload(){
theShader = loadShader('webcam.vert', 'webcam.frag');
img = loadImage('http://www.quadrochave.com/wp-content/uploads/elementor/thumbs/nodulo_bannersite_ptodu%C3%A7%C3%A3o2-mpe2nvmu8s8o2uqcd7b2oh3mnuv9up05ubby33shz4.png');
}
function setup() {
pixelDensity(1);
createCanvas(windowWidth, windowHeight, WEBGL);
noStroke();
cam = createCapture(VIDEO);
cam.size(windowWidth, windowHeight);
cam.hide();
}
function draw() {
// shader() sets the active shader with our shader
shader(theShader);
// passing cam as a texture
theShader.setUniform('tex0', cam);
theShader.setUniform('tex1', img);
// rect gives us some geometry on the screen
theShader.rect(0,0,width,height);
}
Test on Glitch
Shadertoy chromakey original fragment shader
The mayor issue is, that you didn't specify and set the uniform variable iResolution. But there are some more issues in the shader code (tex0 and tex1 rather than text0 and text1).
Fragment shader:
precision mediump float;
uniform sampler2D tex0;
uniform sampler2D tex1;
uniform vec2 iResolution;
mat4 RGBtoYUV = mat4(0.257, 0.439, -0.148, 0.0,
0.504, -0.368, -0.291, 0.0,
0.098, -0.071, 0.439, 0.0,
0.0625, 0.500, 0.500, 1.0 );
vec4 chromaKey = vec4(0.05, 0.63, 0.14, 1);
vec2 maskRange = vec2(0.005, 0.26);
float colorclose(vec3 yuv, vec3 keyYuv, vec2 tol)
{
float tmp = sqrt(pow(keyYuv.g - yuv.g, 2.0) + pow(keyYuv.b - yuv.b, 2.0));
if (tmp < tol.x)
return 0.0;
else if (tmp < tol.y)
return (tmp - tol.x)/(tol.y - tol.x);
else
return 1.0;
}
void main()
{
vec2 fragPos = gl_FragCoord.xy / iResolution.xy;
vec4 texColor0 = texture2D(tex0, fragPos);
vec4 texColor1 = texture2D(tex1, fragPos);
vec4 keyYUV = RGBtoYUV * chromaKey;
vec4 yuv = RGBtoYUV * texColor0;
float mask = 1.0 - colorclose(yuv.rgb, keyYUV.rgb, maskRange);
gl_FragColor = max(texColor0 - mask * chromaKey, 0.0) + texColor1 * mask;
}
Script:
let theShader;
let cam;
let img;
function setup() {
createCanvas(windowWidth, windowHeight, WEBGL);
theShader = loadShader('webcam.vert', 'webcam.frag');
img = loadImage('http://www.quadrochave.com/wp-content/uploads/elementor/thumbs/nodulo_bannersite_ptodu%C3%A7%C3%A3o2-mpe2nvmu8s8o2uqcd7b2oh3mnuv9up05ubby33shz4.png');
pixelDensity(1);
noStroke();
cam = createCapture(VIDEO);
cam.size(windowWidth, windowHeight);
cam.hide();
}
function draw() {
// shader() sets the active shader with our shader
shader(theShader);
// passing cam as a texture
theShader.setUniform('tex0', cam);
theShader.setUniform('tex1', img);
theShader.setUniform('iResolution', [width, height]);
// rect gives us some geometry on the screen
rect(0,0,width,height);
}
If the vertex shader provides the texture the coordinate:
// our vertex data
attribute vec3 aPosition;
attribute vec2 aTexCoord;
// lets get texcoords just for fun!
varying vec2 vTexCoord;
void main() {
// copy the texcoords
vTexCoord = aTexCoord;
// copy the position data into a vec4, using 1.0 as the w component
vec4 positionVec4 = vec4(aPosition, 1.0);
positionVec4.xy = positionVec4.xy * 2.0 - 1.0;
// send the vertex information on to the fragment shader
gl_Position = positionVec4;
}
then you can use this coordinate instead of gl_FragCoord.xy / iResolution.xy:
varying vec2 vTexCoord;
// [...]
void main() {
vec2 fragPos = vTexCoord.xy;
// [...]
}

Instance name with Uniform blocks (UBO) does not work using OpenGL/GLSL

I have implemented in my OpenGL/GLSL application a uniform block managing the mesh material data (Ambient, Diffuse and Specular lighting and Shininess).
For my first try, I have implemented the following uniform block syntax:
uniform MaterialBlock
{
vec3 Ka, Kd, Ks;
float Shininess;
};
Here's the client code:
scene::MaterialPtr pMaterial = this->FindMaterialByName(name);
GLuint bindingPoint = 0, bufferIndex = 0;
GLint blockSize = 0;
GLuint indices[4];
GLint offset[4];
const GLchar *names[4] = {"Ka", "Kd", "Ks", "Shininess" };
GLuint blockIndex = glGetUniformBlockIndex(this->m_Handle, "MaterialBlock");
glGetActiveUniformBlockiv(this->m_Handle, blockIndex, GL_UNIFORM_BLOCK_DATA_SIZE, &blockSize);
glGetUniformIndices(this->m_Handle, 4, names, indices);
glGetActiveUniformsiv(this->m_Handle, 4, indices, GL_UNIFORM_OFFSET, offset);
char *pBuffer = new char[blockSize];
memset(pBuffer, '\0', blockSize);
glm::vec3 ambient = pMaterial->GetAmbient();
glm::vec3 diffuse = pMaterial->GetDiffuse();
glm::vec3 specular = pMaterial->GetSpecular();
float shininess = pMaterial->GetShininess();
std::copy(reinterpret_cast<char*>(&ambient[0]),
reinterpret_cast<char*>(&ambient[0]) + sizeof(glm::vec4), pBuffer + offset[0]);
std::copy(reinterpret_cast<char*>(&diffuse[0]), reinterpret_cast<char*>(
&diffuse[0]) + sizeof(glm::vec4), pBuffer + offset[1]);
std::copy(reinterpret_cast<char*>(&specular[0]),
reinterpret_cast<char*>(&specular[0]) + sizeof(glm::vec3), pBuffer + offset[2]);
std::copy(reinterpret_cast<char*>(&shininess), reinterpret_cast<char*>(
&shininess) + sizeof(float), pBuffer + offset[3]);
glUniformBlockBinding(this->m_Handle, blockIndex, bindingPoint);
{
glGenBuffers(1, &bufferIndex);
glBindBuffer(GL_UNIFORM_BUFFER, bufferIndex);
{
glBufferData(GL_UNIFORM_BUFFER, blockSize, NULL, GL_DYNAMIC_DRAW);
glBufferSubData(GL_UNIFORM_BUFFER, 0, blockSize, (const GLvoid *)pBuffer);
}
glBindBuffer(GL_UNIFORM_BUFFER, 0);
}
glBindBufferBase(GL_UNIFORM_BUFFER, bindingPoint, bufferIndex);
//TEXTURE.
{
this->SetUniform("colorSampler", 0); //THE CHANNEL HAS TO BE CALCULATED! //int
glActiveTexture(GL_TEXTURE0); //DYNAMICS.
pMaterial->GetTexture()->Lock();
}
Variables content:
blockIndex: 0 //OK
blockSize: 48 //OK
Indices: {1, 2, 3, 78} //OK
Offset: {0, 16, 32, 44} //OK
The fragment shader code:
#version 440
#define MAX_LIGHT_COUNT 10
/*
** Output color value.
*/
layout (location = 0) out vec4 FragColor;
/*
** Inputs.
*/
in vec3 Position;
in vec2 TexCoords;
in vec3 Normal;
/*
** Material uniform block.
*/
uniform MaterialBlock
{
vec3 Ka, Kd, Ks;
float Shininess;
};
uniform sampler2D ColorSampler;
struct Light
{
vec4 Position;
vec3 La, Ld, Ls;
float Kc, Kl, Kq;
};
uniform struct Light LightInfos[MAX_LIGHT_COUNT];
uniform unsigned int LightCount;
/*
** Light attenuation factor.
*/
float getLightAttenuationFactor(vec3 lightDir, Light light)
{
float lightAtt = 0.0f;
float dist = 0.0f;
dist = length(lightDir);
lightAtt = 1.0f / (light.Kc + (light.Kl * dist) + (light.Kq * pow(dist, 2)));
return (lightAtt);
}
/*
** Basic phong shading.
*/
vec3 Basic_Phong_Shading(vec3 normalDir, vec3 lightDir, vec3 viewDir, int idx)
{
vec3 Specular = vec3(0.0f);
float lambertTerm = max(dot(lightDir, normalDir), 0.0f);
vec3 Ambient = LightInfos[idx].La * Ka;
vec3 Diffuse = LightInfos[idx].Ld * Kd * lambertTerm;
if (lambertTerm > 0.0f)
{
vec3 reflectDir = reflect(-lightDir, normalDir);
Specular = LightInfos[idx].Ls * Ks * pow(max(dot(reflectDir, viewDir), 0.0f), Shininess);
}
return (Ambient + Diffuse + Specular);
}
/*
** Fragment shader entry point.
*/
void main(void)
{
vec3 LightIntensity = vec3(0.0f);
vec4 texDiffuseColor = texture2D(ColorSampler, TexCoords);
vec3 normalDir = (gl_FrontFacing ? -Normal : Normal);
for (int idx = 0; idx < LightCount; idx++)
{
vec3 lightDir = vec3(LightInfos[idx].Position) - Position.xyz;
vec3 viewDir = -Position.xyz;
float lightAttenuationFactor = getLightAttenuationFactor(lightDir, LightInfos[idx]);
LightIntensity += Basic_Phong_Shading(
-normalize(normalDir), normalize(lightDir), normalize(viewDir), idx
) * lightAttenuationFactor;
}
FragColor = vec4(LightIntensity, 1.0f) * texDiffuseColor;
}
This code works perfectly. The output is the following:
But I know it's possible to use instance name (like strutures in C/C++) with Uniform Blocks as follows:
uniform MaterialBlock
{
vec3 Ka, Kd, Ks;
float Shininess;
} MaterialInfos;
Of course, all the variable used in shader like 'Ka', 'Kd', 'Ks' and 'Shininess' become 'MaterialInfos.Ka', 'MaterialInfos.Kd', 'MaterialInfos.Ks' and 'MaterialInfos.Shininess'.
But unfortunatly the execution of the program fails because in the client code above the varibales 'indices' and 'offset' are not filled correctly.
Here's the log:
blockIndex: 0 //OK
blockSize: 48 //OK
Indices: {4294967295, 4294967295, 4294967295, 4294967295} //NOT OK
Offset: {-858993460, -858993460, -858993460, -858993460} //NOT OK
So the only the block index and the block size is correct. So to fix the problem I tried to change the line:
const GLchar *names[4] = {"Ka", "Kd", "Ks", "Shininess" };
by the following one:
const GLchar *names[4] = {"MaterialInfos.Ka", "MaterialInfos.Kd", "MaterialInfos.Ks", "MaterialInfos.Shininess" };
But I still have the same log for the variables 'indices' and 'offset'. Consequently my application still fails. I think it's a problem of syntax in the client code (not in GLSL code because I have no GLSL error) but I can't find the solution.
Do you have an idea where my problem comes from ?
When using instanced uniform blocks, the OpenGL application uses the block name (in this case MaterialBlock) before the dot, not the instance name as you have in your current code. The instance name is only ever seen by the GLSL shader.
Therefore your names variable should be defined and initialized as such:
const GLchar *names[4] = {"MaterialBlock.Ka", "MaterialBlock.Kd", "MaterialBlock.Ks", "MaterialBlock.Shininess" };
Try declaring your structType separately from the uniform (of type structType..)
struct MaterialData
{
vec3 kAmbient;
vec3 kDiffuse;
vec3 kSpecular;
float shininess;
};
uniform MaterialData material;
(if you follow this example, both your MatrialBlock and Light declarations are erroneous, for slightly different reasons)
Then you can set uniforms by referring to them as (eg) "material.kAmbient" on the cpu side and read them as material.kAmbient on the gpu side.