I made a glsl geometry shader that rounds corners of 2D line strips. Shader inserts circular fillets at each corner.
There's a special case when two adjacent segments are colinear and fillet can't be created. In that case just a single original vertex is passed through.
This requires an if statement to be executed per each input vertex. Is there a clever way to avoid branching in this particular case? Is it even an issue if total number of processed vertices per frame is typically around couple hundred?
Here's complete shader code:
#version 400
layout(lines_adjacency) in;
layout(line_strip, max_vertices=25) out;
uniform mat4 modelMatrix;
uniform mat4 viewMatrix;
uniform mat4 projectionMatrix;
uniform float radius = 160.0;
uniform int steps = 10;
in VS_OUT {
vec2 position;
} gsIn[];
bool arc(vec2 p0, vec2 p1, vec2 p2, out vec2 arcCenter, out vec2 arcStartPoint, out vec2 arcMidPoint, out vec2 arcEndPoint){
vec2 t0 = normalize(p0 - p1);
vec2 t1 = normalize(p2 - p1);
// segments are colinear, exit
if(abs(dot(t0, t1)) > .9999f){
return false;
}
vec2 h = normalize((t0 + t1) /2.0);
float cosa = abs(dot(h, vec2(-t0.y, t0.x)));
float hlen = radius/cosa;
arcCenter = p1 + h*hlen;
float d = sqrt(hlen*hlen - radius*radius);
arcStartPoint = p1 + t0 * d;
arcEndPoint = p1 + t1 *d;
arcMidPoint = arcCenter - h * radius;
return true;
}
float stepangle(vec2 center, vec2 s, vec2 e){
vec2 rv1 = s-center;
vec2 rv2 = e-center;
float angle = acos( dot(normalize(rv1), normalize(rv2)) ) / steps;
if( dot(rv1, vec2(-rv2.y, rv2.x))<0 ){
angle = -angle;
}
return angle;
}
mat2 rotationMatrix(float angle){
float cosa = cos(angle);
float sina = sin(angle);
return mat2(cosa, -sina, sina, cosa);
}
void emitFillet(vec2 center, vec2 arcStartPoint, vec2 arcEndPoint, mat4 mvpMatrix){
float a = stepangle(center, arcStartPoint, arcEndPoint);
mat2 rotMat = rotationMatrix(a);
vec2 radVec = arcStartPoint-center;
for(int i=0; i <=steps ; ++i){
gl_Position = mvpMatrix * vec4(center + radVec, 0.0, 1.0);
EmitVertex();
radVec = rotMat * radVec;
}
}
void emitSingleVertex(vec2 vert, mat4 mvpMatrix){
gl_Position = mvpMatrix * vec4(vert, 0.0, 1.0);
EmitVertex();
}
void main(){
mat4 mvMatrix = viewMatrix * modelMatrix;
mat4 mvpMatrix = projectionMatrix * mvMatrix;
vec2 p0 = gsIn[0].position;
vec2 p1 = gsIn[1].position;
vec2 p2 = gsIn[2].position;
vec2 p3 = gsIn[3].position;
vec2 center, s, m, e;
mat2 rotMat;
vec2 radVec;
float a;
bool canMakeFillet;
// first corner half-fillet
canMakeFillet = arc(p0, p1, p2, center, s, m, e);
if(canMakeFillet){
emitFillet(center, m, e, mvpMatrix);
}
else{
emitSingleVertex(p1, mvpMatrix);
}
// scond corner half-fillet
canMakeFillet = arc(p1, p2, p3, center, s, m, e);
if(canMakeFillet){
emitFillet(center, s, m, mvpMatrix);
}
else{
emitSingleVertex(p2, mvpMatrix);
}
EndPrimitive();
}
Related
Does anyone know why I keep getting the error that says:
The ♦ shader uses varying _I;DATA;g_mapCoord, but previous shader does not write to it.
The ♦ shader uses varying _I;DATA;worldPosition, but previous shader does not write to it.
Take a look at my shaders here.
Vertex
#version 430
layout (location = 0) in vec2 position0;
out DATA {
vec2 v_mapCoord;
vec3 worldPosition;
} Out;
uniform vec3 u_cameraPosition;
uniform mat4 u_localMatrix;
uniform mat4 u_worldMatrix;
uniform float u_scaleY;
uniform int u_lod;
uniform vec2 u_index;
uniform float u_gap;
uniform vec2 u_location;
uniform sampler2D s_heightmap;
uniform int u_lodMorphArea[8];
float morphLatitude(vec2 position)
{
//not important code
return 0;
}
float morphLongitude(vec2 position)
{
//not important code
return 0;
}
vec2 morph(vec2 localPosition, int morph_area){
//not important code
return vec2(0);
}
void main()
{
vec2 localPosition = (u_localMatrix * vec4(position0.x,0,position0.y,1)).xz;
if (u_lod > 0) {
localPosition += morph(localPosition, u_lodMorphArea[u_lod-1]); // Translate position by morphing vector
}
float height = texture(s_heightmap, localPosition).r;
Out.v_mapCoord = localPosition;
vec4 _worldPosition = u_worldMatrix * vec4(localPosition.x, height, localPosition.y,1);
Out.worldPosition = _worldPosition.xyz;
gl_Position = u_worldMatrix * vec4(localPosition.x, height, localPosition.y,1);
}
Fragment
#version 430
layout (location = 0) out vec4 outputColor;
in DATA {
vec2 g_mapCoord;
vec3 worldPosition;
} In;
const vec3 lightDirection = vec3(-0.2, -1.0, -0.2);
const float intensity = 1.2;
uniform sampler2D s_textureNormal;
uniform sampler2D s_textureWater;
uniform sampler2D s_textureLand;
float diffuse(vec3 direction, vec3 normal, float intensity)
{
return max(0.01, dot(normal, -direction) * intensity);
}
void main()
{
vec3 normal = texture(s_textureNormal, In.g_mapCoord).rgb;
float diff = diffuse(lightDirection, normal, intensity);
outputColor = vec4(1,0,0,1);
}
Geom
#version 430
layout(triangles) in;
layout(triangle_strip, max_vertices = 3) out;
in vec2 te_mapCoord[];
out vec2 g_mapCoord;
uniform mat4 u_viewProjection;
void main() {
for (int i = 0; i < gl_in.length(); ++i)
{
vec4 position = gl_in[i].gl_Position;
gl_Position = u_viewProjection * position;
g_mapCoord = te_mapCoord[i];
EmitVertex();
}
EndPrimitive();
}
TCS
#version 430
layout(vertices = 16) out;
in DATA {
vec2 v_mapCoord;
vec3 worldPosition;
} In[];
out vec2 tc_mapCoord[];
const int AB = 2;
const int BC = 3;
const int CD = 0;
const int DA = 1;
uniform int u_tessellationFactor;
uniform float u_tessellationSlope;
uniform float u_tessellationShift;
uniform vec3 u_cameraPosition;
// Calculate tessellation levels
float lodFactor(float dist)
{
float tessellationLevel = max(0.0, u_tessellationFactor/pow(dist, u_tessellationSlope) + u_tessellationShift);
return tessellationLevel;
}
void main()
{
if (gl_InvocationID == 0){
// Calculate mid points of the edges of the quad
vec3 abMid = vec3(gl_in[0].gl_Position + gl_in[3].gl_Position)/2.0; //Bottom left, Bottom right
vec3 bcMid = vec3(gl_in[3].gl_Position + gl_in[15].gl_Position)/2.0; //Bottom right Top right
vec3 cdMid = vec3(gl_in[15].gl_Position + gl_in[12].gl_Position)/2.0; //Top right, Top left
vec3 daMid = vec3(gl_in[12].gl_Position + gl_in[0].gl_Position)/2.0; //Top left, Bottom left
// Calculate distance between camera and mid points of the edges of the quad
float distanceAB = distance(abMid, u_cameraPosition);
float distanceBC = distance(bcMid, u_cameraPosition);
float distanceCD = distance(cdMid, u_cameraPosition);
float distanceDA = distance(daMid, u_cameraPosition);
// Tesselation levels used by tessellation primitive generator (define how much tessellation to apply to the patch). Value between 1 and gl_MaxTessGenLevel, depending on lodFactor.
gl_TessLevelOuter[AB] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceAB));
gl_TessLevelOuter[BC] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceBC));
gl_TessLevelOuter[CD] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceCD));
gl_TessLevelOuter[DA] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceDA));
gl_TessLevelInner[0] = (gl_TessLevelOuter[BC] + gl_TessLevelOuter[DA])/4;
gl_TessLevelInner[1] = (gl_TessLevelOuter[AB] + gl_TessLevelOuter[CD])/4;
}
tc_mapCoord[gl_InvocationID] = In[gl_InvocationID].v_mapCoord; // Just pass to the next stage
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
TES
#version 430
layout(quads, fractional_odd_spacing, cw) in;
in vec2 tc_mapCoord[];
out vec2 te_mapCoord;
uniform sampler2D s_heightmap;
uniform float u_scaleY;
void main(){
float u = gl_TessCoord.x;
float v = gl_TessCoord.y;
// Compute new position for each tessellated vertex within the patch. gl_in with index 12, 0, 3, 15 are corners of the patch.
vec4 position = ((1 - u) * (1 - v) * gl_in[12].gl_Position + u * (1 - v) * gl_in[0].gl_Position + u * v * gl_in[3].gl_Position +(1 - u) * v * gl_in[15].gl_Position);
vec2 mapCoord = ((1 - u) * (1 - v) * tc_mapCoord[12] + u * (1 - v) * tc_mapCoord[0] + u * v * tc_mapCoord[3] +(1 - u) * v * tc_mapCoord[15]);
float height = texture(s_heightmap, mapCoord).r;
height *= u_scaleY;
position.y = height;
te_mapCoord = mapCoord;
gl_Position = position;
}
Can anyone help me find the error here which is why I'm getting that error message?
When you introduce a geometry shader you need to pass the varyings for the fragment shader from the geometry shader, not the vertex shader.
You can see how your geometry shader doing this:
out vec2 g_mapCoord;
is incompatible with your fragment shader expecting this:
in DATA {
vec2 g_mapCoord;
vec3 worldPosition;
} In;
Related question and subsequent answers here.
In a similar way than in this related question, I am trying to render complex shapes by the mean of ray-tracing inside a cube: This is, 12 triangles are used to generate a bounding box and each fragment is used to render the given shape by ray-tracing.
For this example, I am using the easiest shape: a sphere.
The problem is that when the cube is rotated, different triangle angles are distording the sphere:
What I have attempted so far:
I tried making the raytracing in World space, also in View-space as suggested in the related question.
I checked that the worldCoordinate of the fragment is correct, by making a reverse projection from gl_fragCoord, with the same output.
I switched to orthographic projection, where the distortion is reversed:
My conclusion is that, as described in the related question, the interpolant of the coordinates and the projection are the origin of the problem.
I could project the cube to a plane perpendicular to the camera direction, but I would like to understand the bottom of the question.
Related code:
Vertex shader:
#version 420 core
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
in vec3 in_Position;
in vec3 in_Normal;
out Vertex
{
vec4 worldCoord;
vec4 worldNormal;
} v;
void main(void)
{
mat4 mv = view * model;
// Position
v.worldCoord = model * vec4(in_Position, 1.0);
gl_Position = projection * mv * vec4(in_Position, 1.0);
// Normal
v.worldNormal = normalize(vec4(in_Normal, 0.0));
}
Fragment shader:
#version 420 core
uniform mat4 view;
uniform vec3 cameraPosView;
in Vertex
{
vec4 worldCoord;
vec4 worldNormal;
} v;
out vec4 out_Color;
bool sphereIntersection(vec4 rayOrig, vec4 rayDirNorm, vec4 spherePos, float radius, out float t_out)
{
float r2 = radius * radius;
vec4 L = spherePos - rayOrig;
float tca = dot(L, rayDirNorm);
float d2 = dot(L, L) - tca * tca;
if(d2 > r2)
{
return false;
}
float thc = sqrt(r2 - d2);
float t0 = tca - thc;
float t1 = tca + thc;
if (t0 > 0)
{
t_out = t0;
return true;
}
if (t1 > 0)
{
t_out = t1;
return true;
}
return false;
}
void main()
{
vec3 color = vec3(1);
vec4 spherePos = vec4(0.0, 0.0, 0.0, 1.0);
float radius = 1.0;
float t_out=0.0;
vec4 cord = v.worldCoord;
vec4 rayOrig = (inverse(view) * vec4(-cameraPosView, 1.0));
vec4 rayDir = normalize(cord-rayOrig);
if (sphereIntersection(rayOrig, rayDir, spherePos, 0.3, t_out))
{
out_Color = vec4(1.0);
}
else
{
discard;
}
}
I'm rendering a sphere with instanced drawing, while rotating the model-view-matrix around the Y axis.
It looks ok at the beginning:
But at another angle, things get worse:
It looks to me like a problem with normals. Currently, I'm calculating the normal-matrix from my model-view-matrix and then pass it to the shader, which is doing phong-like lighting:
attribute vec4 a_position;
attribute vec3 a_normal;
attribute vec4 a_color;
attribute vec2 a_coord;
attribute mat4 a_matrix;
uniform mat4 u_mv_matrix;
uniform mat4 u_projection_matrix;
uniform mat3 u_normal_matrix;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec4 transformedPosition = u_mv_matrix * a_matrix * a_position;
v_position = transformedPosition;
v_normal = u_normal_matrix * a_normal;
v_color = a_color;
v_coord = a_coord;
gl_Position = u_projection_matrix * transformedPosition;
}
uniform sampler2D u_sampler;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec3 lightPosition = vec3(0.0); // XXX
// set diffuse and specular colors
vec3 cDiffuse = (v_color * texture2D(u_sampler, v_coord)).rgb;
vec3 cSpecular = vec3(0.3);
// lighting calculations
vec3 N = normalize(v_normal);
vec3 L = normalize(lightPosition - v_position.xyz);
vec3 E = normalize(-v_position.xyz);
vec3 H = normalize(L + E);
// Calculate coefficients.
float phong = max(dot(N, L), 0.0);
const float kMaterialShininess = 20.0;
const float kNormalization = (kMaterialShininess + 8.0) / (3.14159265 * 8.0);
float blinn = pow(max(dot(N, H), 0.0), kMaterialShininess) * kNormalization;
// diffuse coefficient
vec3 diffuse = phong * cDiffuse;
// specular coefficient
vec3 specular = blinn * cSpecular;
gl_FragColor = vec4(diffuse + specular, 1);
}
Final note: I'm working on desktop OpenGL 2.1 as well as WebGL on the browser.
Edit: Per request, I'm adding some information.
The mesh is built as follows, by passing an identity matrix:
void Sphere::append(IndexedVertexBatch<XYZ.N.UV> &batch, const Matrix &matrix) const {
float sectorStep = TWO_PI / sectorCount;
float stackStep = PI / stackCount;
for(int i = 0; i <= stackCount; ++i) {
float stackAngle = HALF_PI - i * stackStep;
float xy = radius * cosf(stackAngle);
float z = radius * sinf(stackAngle);
for(int j = 0; j <= sectorCount; ++j) {
float sectorAngle = j * sectorStep;
float x = xy * cosf(sectorAngle);
float y = xy * sinf(sectorAngle);
float nx = x / radius;
float ny = y / radius;
float nz = z / radius;
float s = (float)j / sectorCount;
float t = (float)i / stackCount;
batch.addVertex(matrix.transformPoint(x, y, z), matrix.transformNormal(nx, ny, nz), glm::vec2(s, t));
}
}
for(int i = 0; i < stackCount; ++i) {
float k1 = i * (sectorCount + 1);
float k2 = k1 + sectorCount + 1;
for(int j = 0; j < sectorCount; ++j, ++k1, ++k2) {
if (i != 0) {
if (frontFace == CCW) {
batch.addIndices(k1, k1 + 1, k2);
} else {
batch.addIndices(k1, k2, k1 + 1);
}
}
if (i != (stackCount - 1)) {
if (frontFace == CCW) {
batch.addIndices(k1 + 1, k2 + 1, k2);
} else {
batch.addIndices(k1 + 1, k2, k2 + 1);
}
}
}
}
}
Regarding the transformation matrices, it works as follow:
camera.getMVMatrix()
.setIdentity()
.translate(0, -150, -600)
.rotateY(clock()->getTime() * 0.5f);
State()
.setShader(shader)
.setShaderMatrix<MV>(camera.getMVMatrix())
.setShaderMatrix<PROJECTION>(camera.getProjectionMatrix())
.setShaderMatrix<NORMAL>(camera.getNormalMatrix())
.apply();
Finally, the light position is defined as vec3(0) in the fragment shader.
Note: As you can see, I'm using my own framework which provides among other things high level methods for building meshes and handling transformations. It's all straightforward stuff, proven to work as intended, but let me know if you need pointers to the source-code.
Update: The lighting part of the shader I used ended up being wrong, so I switched to another method.
But in essence, the solution I proposed in my answer is still valid (or at least it does the job of solving the "normal problem" when instancing is used, and non-uniform scaling is avoided.)
Here is a gist with the source-code. There is also an online WebGL demo.
The solution was relatively simple: there is no point in passing a normal-matrix to the shader.
Instead, the normal needs to be computed in the vertex shader:
v_normal = vec3(u_mv_matrix * a_matrix * vec4(a_normal, 0.0));
Credits
I decided to follow the classic guide for writing a basic GLSL water shaders using the sum of sines method. I attempted to implement it inside of Processing 5, where I made a field of vertices in a PShape to make a mesh to mess with. I then overwrote the default shaders with my own vertex and fragment shaders, and I dropped in a directional light so I can actually see the normals. I made sure the directional light was movable as well so I could see if the normals work from all angles.
I got the waves to form height correctly and I had some form of normals workings, but the normals are interacting really strange. When my light passes across the center axis of my water plane, the normals seem to morph between the different waves and change based on the light angle. The gif I captured was too large to post in line, so I'm sure seeing it would explain better than my words:
https://imgur.com/PCznL7U
You should maximize the link to see the whole picture. Note how as the light pans from left to right, the normals of the waves seem to morph between two sets? This is especially apparent as it crosses center. It's like the normals are inconsistent based on the direction the object is being lit from.
The sphere in the middle is a normal sphere using the standard Processing shader. I left it there as reference to see the waves as well as confirm where my lighting was and that it was working fine.
Any ideas what I did wrong? I know I did some math incorrectly somewhere.
EDIT: Was recommended I added the (lengthy) source code [which I should have done from the start].
Vertex Shader:
#define PROCESSING_LIGHT_SHADER
#define MAXWAVES 6
const float pi = 3.14159;
uniform mat4 transform;
uniform mat4 modelview;
uniform mat3 normalMatrix;
uniform float time; //Time since shader started
attribute vec4 position; //Position the vertex from Processing
attribute vec4 color; //Color of the vertex from Processing
attribute vec3 normal; //Normal of the vertex from Processing
attribute vec4 ambient;
attribute vec4 specular;
attribute vec4 emissive;
attribute float shininess;
varying vec4 vertColor; //Color passed on to fragment shader
varying vec4 backVertColor; //Color passed on to fragment shader
uniform float waveLength[MAXWAVES]; //Length of wave
uniform float speed[MAXWAVES]; //Cycle speed of wave
uniform float amplitude[MAXWAVES]; //Wave cycle height
uniform float xDirection[MAXWAVES];
uniform float yDirection[MAXWAVES]; //Flow vector of wave
uniform int lightCount;
uniform vec4 lightPosition[8];
uniform vec3 lightNormal[8];
uniform vec3 lightAmbient[8];
uniform vec3 lightDiffuse[8];
uniform vec3 lightSpecular[8];
uniform vec3 lightFalloff[8];
uniform vec2 lightSpot[8];
varying vec3 Normal;
varying vec3 FragPos;
varying vec3 Vec;
varying vec3 lightDir;
//Some constants that the processing shader used
const float zero_float = 0.0;
const float one_float = 1.0;
const vec3 zero_vec3 = vec3(0);
float falloffFactor(vec3 lightPos, vec3 vertPos, vec3 coeff) {
vec3 lpv = lightPos - vertPos;
vec3 dist = vec3(one_float);
dist.z = dot(lpv, lpv);
dist.y = sqrt(dist.z);
return one_float / dot(dist, coeff);
}
float spotFactor(vec3 lightPos, vec3 vertPos, vec3 lightNorm, float minCos, float spotExp) {
vec3 lpv = normalize(lightPos - vertPos);
vec3 nln = -one_float * lightNorm;
float spotCos = dot(nln, lpv);
return spotCos <= minCos ? zero_float : pow(spotCos, spotExp);
}
float lambertFactor(vec3 lightDir, vec3 vecNormal) {
return max(zero_float, dot(lightDir, vecNormal));
}
float blinnPhongFactor(vec3 lightDir, vec3 vertPos, vec3 vecNormal, float shine) {
vec3 np = normalize(vertPos);
vec3 ldp = normalize(lightDir - np);
return pow(max(zero_float, dot(ldp, vecNormal)), shine);
}
//Returns the height of a vertex given a single wave param
float WaveHeight(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
return amplitude[waveNumber] * sin(theta * frequency + time * phase);
}
//Returns height of a vertex given all the active waves
// and its current x/y value
float WaveSum(float x, float y)
{
float height = 0.0;
for(int i = 0; i < MAXWAVES; i++)
{
height += WaveHeight(i, x, y);
}
return height;
}
float getDy(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
float A = amplitude[waveNumber] * direction.y * frequency;
return A * cos(theta * frequency + time * phase);
}
float getDx(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
float A = amplitude[waveNumber] * direction.x * frequency;
return A * cos(theta * frequency + time * phase);
}
//Returns the normal vector for each vertex
vec3 getNormal(float x, float y) {
float dx = 0.0;
float dy = 0.0;
//Sum for each wave
for (int i = 0; i < MAXWAVES; i++) {
dx += getDx(i, x, y);
dy += getDy(i, x, y);
}
vec3 n = vec3(-dx, -dy, 1.0);
return normalize(n);
}
void main() {
vec4 pos = position; //Grab the position from Processing bc it's read only
pos.z = WaveSum(pos.x, pos.y);
gl_Position = transform * pos; //Get clipping matrix for view
vec3 ecVertex = vec3(modelview * pos);
// Normal vector in eye coordinates
vec3 Normal = getNormal(pos.x, pos.y);
vec3 ecNormal = normalize(normalMatrix * Normal);
vec3 ecNormalInv = ecNormal * -one_float;
// Light calculations
vec3 totalAmbient = vec3(0, 0, 0);
vec3 totalFrontDiffuse = vec3(0, 0, 0);
vec3 totalFrontSpecular = vec3(0, 0, 0);
vec3 totalBackDiffuse = vec3(0, 0, 0);
vec3 totalBackSpecular = vec3(0, 0, 0);
for (int i = 0; i < 8; i++) {
if (lightCount == i) break;
vec3 lightPos = lightPosition[i].xyz;
bool isDir = lightPosition[i].w < one_float;
float spotCos = lightSpot[i].x;
float spotExp = lightSpot[i].y;
vec3 lightDir;
float falloff;
float spotf;
if (isDir) {
falloff = one_float;
lightDir = -one_float * lightNormal[i];
} else {
falloff = falloffFactor(lightPos, ecVertex, lightFalloff[i]);
lightDir = normalize(lightPos - ecVertex);
}
spotf = spotExp > zero_float ? spotFactor(lightPos, ecVertex, lightNormal[i],
spotCos, spotExp)
: one_float;
if (any(greaterThan(lightAmbient[i], zero_vec3))) {
totalAmbient += lightAmbient[i] * falloff;
}
if (any(greaterThan(lightDiffuse[i], zero_vec3))) {
totalFrontDiffuse += lightDiffuse[i] * falloff * spotf *
lambertFactor(lightDir, ecNormal);
totalBackDiffuse += lightDiffuse[i] * falloff * spotf *
lambertFactor(lightDir, ecNormalInv);
}
if (any(greaterThan(lightSpecular[i], zero_vec3))) {
totalFrontSpecular += lightSpecular[i] * falloff * spotf *
blinnPhongFactor(lightDir, ecVertex, ecNormal, shininess);
totalBackSpecular += lightSpecular[i] * falloff * spotf *
blinnPhongFactor(lightDir, ecVertex, ecNormalInv, shininess);
}
}
// Calculating final color as result of all lights (plus emissive term).
// Transparency is determined exclusively by the diffuse component.
vertColor =
vec4(totalFrontDiffuse, 1) * color;
backVertColor =
vec4(totalBackDiffuse, 1) * color;
}
Fragment Shader:
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
varying vec4 vertColor; //Color from vertshader
varying vec4 backVertColor; //Color from vertshader
void main() {
gl_FragColor = gl_FrontFacing ? vertColor : backVertColor;
}
I've written an LWJGL application that uses .obj files, reads them and displays them (using displaylists).
On my nvidia graphics card, everything runs fine. But on an amd graphics card i can't see the objects.
How i give data to the shaders:
glUseProgram(shaderEngine.obj);
glUniform1i(glGetUniformLocation(shaderEngine.obj, "inOrangeJuice"), inOrangeJuice ? 1 : 0);
shaderEngine.loadMatrix(glGetUniformLocation(shaderEngine.standard, "projectionMatrix"), camera.projectionMatrix);
shaderEngine.loadMatrix(glGetUniformLocation(shaderEngine.obj, "viewMatrix"), camera.viewMatrix);
ModelMatrix is loaded:
shaderEngine.createModelMatrix(new Vector3f(x, y, z), new Vector3f(rx, ry, rz), new Vector3f(1, 1, 1));
shaderEngine.loadModelMatrix(shaderEngine.obj);
Fragment Shader:
#version 130
uniform sampler2D tex;
uniform vec2 texCoord[4];
float textureSize;
float texelSize;
uniform int inOrangeJuice;
bool pointInTriangle(vec3 P, vec3 A, vec3 B, vec3 C)
{
vec3 u = B - A;
vec3 v = C - A;
vec3 w = P - A;
vec3 vCrossW = cross(v, w);
vec3 vCrossU = cross(v, u);
if(dot(vCrossW, vCrossU) < 0)
{
return false;
}
vec3 uCrossW = cross(u, w);
vec3 uCrossV = cross(u, v);
if(dot(uCrossW, uCrossV) < 0)
{
return false;
}
float denom = length(uCrossV);
float r = length(vCrossW);
float t = length(uCrossW);
return (r + t <= 1);
}
vec4 texture2DBilinear(sampler2D textureSampler, vec2 uv)
{
vec4 tl = texture2D(textureSampler, uv);
vec4 tr = texture2D(textureSampler, uv + vec2(texelSize, 0));
vec4 bl = texture2D(textureSampler, uv + vec2(0, texelSize));
vec4 br = texture2D(textureSampler, uv + vec2(texelSize , texelSize));
vec2 f = fract( uv.xy * textureSize );
vec4 tA = mix( tl, tr, f.x );
vec4 tB = mix( bl, br, f.x );
return mix( tA, tB, f.y );
}
void main()
{
ivec2 textureSize2d = textureSize(tex,0);
textureSize = float(textureSize2d.x);
texelSize = 1.0 / textureSize;
//texture coordinate:
vec2 texCoord = (gl_TexCoord[0].st);
bool inOJ = false;
if(inOrangeJuice == 1)
{
float depth = gl_FragCoord.z / gl_FragCoord.w;//works only with perspective projection
depth = depth / 6;
if(depth > 1)
{
depth = 1;
}
inOJ = true;
gl_FragColor = texture2DBilinear(tex, texCoord) * gl_Color * (1.0 - depth) + vec4(1.0, 0.5, 0.0, 1.0) * depth;
}
if(inOJ == false)
{
gl_FragColor = texture2DBilinear(tex, texCoord) * gl_Color;
}
//Nothing is shown, inOrangeJuice should be 0
//gl_FragColor = vec4(inOrangeJuice,0,0,1);
//Always works:
//gl_FragColor = texture2D(tex, texCoord) * gl_Color;
}