I’m try to porting the shadertoy chromakey example to p5 with webcam as video source. After many time reading documentations of shaders, my code seems not working. I need some help.
I followed this guide to port the code for the p5
Fragment shader code:
#ifdef GL_ES
precision mediump float;
#endif
uniform sampler2D tex0;
uniform sampler2D tex1;
mat4 RGBtoYUV = mat4(0.257, 0.439, -0.148, 0.0,
0.504, -0.368, -0.291, 0.0,
0.098, -0.071, 0.439, 0.0,
0.0625, 0.500, 0.500, 1.0 );
vec4 chromaKey = vec4(0.05, 0.63, 0.14, 1);
vec2 maskRange = vec2(0.005, 0.26);
float colorclose(vec3 yuv, vec3 keyYuv, vec2 tol)
{
float tmp = sqrt(pow(keyYuv.g - yuv.g, 2.0) + pow(keyYuv.b - yuv.b, 2.0));
if (tmp < tol.x)
return 0.0;
else if (tmp < tol.y)
return (tmp - tol.x)/(tol.y - tol.x);
else
return 1.0;
}
void main()
{
vec2 fragPos = gl_FragCoord.xy / iResolution.xy;
vec4 texColor0 = texture(text0, fragPos);
vec4 texColor1 = texture(text1, fragPos);
vec4 keyYUV = RGBtoYUV * chromaKey;
vec4 yuv = RGBtoYUV * texColor0;
float mask = 1.0 - colorclose(yuv.rgb, keyYUV.rgb, maskRange);
gl_FragColor = max(texColor0 - mask * chromaKey, 0.0) + texColor1 * mask;
}
P5 sketch code:
let theShader;
let cam;
let img;
function preload(){
theShader = loadShader('webcam.vert', 'webcam.frag');
img = loadImage('http://www.quadrochave.com/wp-content/uploads/elementor/thumbs/nodulo_bannersite_ptodu%C3%A7%C3%A3o2-mpe2nvmu8s8o2uqcd7b2oh3mnuv9up05ubby33shz4.png');
}
function setup() {
pixelDensity(1);
createCanvas(windowWidth, windowHeight, WEBGL);
noStroke();
cam = createCapture(VIDEO);
cam.size(windowWidth, windowHeight);
cam.hide();
}
function draw() {
// shader() sets the active shader with our shader
shader(theShader);
// passing cam as a texture
theShader.setUniform('tex0', cam);
theShader.setUniform('tex1', img);
// rect gives us some geometry on the screen
theShader.rect(0,0,width,height);
}
Test on Glitch
Shadertoy chromakey original fragment shader
The mayor issue is, that you didn't specify and set the uniform variable iResolution. But there are some more issues in the shader code (tex0 and tex1 rather than text0 and text1).
Fragment shader:
precision mediump float;
uniform sampler2D tex0;
uniform sampler2D tex1;
uniform vec2 iResolution;
mat4 RGBtoYUV = mat4(0.257, 0.439, -0.148, 0.0,
0.504, -0.368, -0.291, 0.0,
0.098, -0.071, 0.439, 0.0,
0.0625, 0.500, 0.500, 1.0 );
vec4 chromaKey = vec4(0.05, 0.63, 0.14, 1);
vec2 maskRange = vec2(0.005, 0.26);
float colorclose(vec3 yuv, vec3 keyYuv, vec2 tol)
{
float tmp = sqrt(pow(keyYuv.g - yuv.g, 2.0) + pow(keyYuv.b - yuv.b, 2.0));
if (tmp < tol.x)
return 0.0;
else if (tmp < tol.y)
return (tmp - tol.x)/(tol.y - tol.x);
else
return 1.0;
}
void main()
{
vec2 fragPos = gl_FragCoord.xy / iResolution.xy;
vec4 texColor0 = texture2D(tex0, fragPos);
vec4 texColor1 = texture2D(tex1, fragPos);
vec4 keyYUV = RGBtoYUV * chromaKey;
vec4 yuv = RGBtoYUV * texColor0;
float mask = 1.0 - colorclose(yuv.rgb, keyYUV.rgb, maskRange);
gl_FragColor = max(texColor0 - mask * chromaKey, 0.0) + texColor1 * mask;
}
Script:
let theShader;
let cam;
let img;
function setup() {
createCanvas(windowWidth, windowHeight, WEBGL);
theShader = loadShader('webcam.vert', 'webcam.frag');
img = loadImage('http://www.quadrochave.com/wp-content/uploads/elementor/thumbs/nodulo_bannersite_ptodu%C3%A7%C3%A3o2-mpe2nvmu8s8o2uqcd7b2oh3mnuv9up05ubby33shz4.png');
pixelDensity(1);
noStroke();
cam = createCapture(VIDEO);
cam.size(windowWidth, windowHeight);
cam.hide();
}
function draw() {
// shader() sets the active shader with our shader
shader(theShader);
// passing cam as a texture
theShader.setUniform('tex0', cam);
theShader.setUniform('tex1', img);
theShader.setUniform('iResolution', [width, height]);
// rect gives us some geometry on the screen
rect(0,0,width,height);
}
If the vertex shader provides the texture the coordinate:
// our vertex data
attribute vec3 aPosition;
attribute vec2 aTexCoord;
// lets get texcoords just for fun!
varying vec2 vTexCoord;
void main() {
// copy the texcoords
vTexCoord = aTexCoord;
// copy the position data into a vec4, using 1.0 as the w component
vec4 positionVec4 = vec4(aPosition, 1.0);
positionVec4.xy = positionVec4.xy * 2.0 - 1.0;
// send the vertex information on to the fragment shader
gl_Position = positionVec4;
}
then you can use this coordinate instead of gl_FragCoord.xy / iResolution.xy:
varying vec2 vTexCoord;
// [...]
void main() {
vec2 fragPos = vTexCoord.xy;
// [...]
}
Related
I am following along with the LearnOpenGL guide and am trying to implement Steep Parallax Mapping.
Everything seems to be working fine except my brick wall seems to have distinct visible layers whereas the photos in the guide don't show any layers. I was trying to use this code to parallax the topography of the world but these weird layers seem to show up there too so I was hoping to find a fix for this.
Layered wall photo
[1
Photo of how it should look
Here is my modified vertex shader
#version 300 es
in vec4 vPosition; // aPos
in vec2 texCoord; // aTexCoords
in vec4 vNormal; // aNormal
in vec4 vTangent; // aTangent
uniform mat4 model_view;
uniform mat4 projection;
uniform vec4 light_position;
out vec2 ftexCoord;
out vec3 vT;
out vec3 vN;
out vec4 position;
out vec3 FragPos;
out vec3 TangentLightPos;
out vec3 TangentViewPos;
out vec3 TangentFragPos;
void
main()
{
// Normal variables
vN = normalize(model_view * vNormal).xyz;
vT = normalize(model_view * vTangent).xyz;
vec4 veyepos = model_view*vPosition;
position = veyepos;
ftexCoord = texCoord;
// Displacement variables
vec3 bi = cross(vT, vN);
FragPos = vec3(model_view * vPosition).xyz;
vec3 T = normalize(mat3(model_view) * vTangent.xyz);
vec3 B = normalize(mat3(model_view) * bi);
vec3 N = normalize(mat3(model_view) * vNormal.xyz);
mat3 TBN = transpose(mat3(T, B, N));
TangentLightPos = TBN * light_position.xyz;
TangentViewPos = TBN * vPosition.xyz;
TangentFragPos = TBN * FragPos;
gl_Position = projection * model_view * vPosition;
}
and my modified fragment shader is here
#version 300 es
precision highp float;
in vec2 ftexCoord;
in vec3 vT; //parallel to surface in eye space
in vec3 vN; //perpendicular to surface in eye space
in vec4 position;
in vec3 FragPos;
in vec3 TangentLightPos;
in vec3 TangentViewPos;
in vec3 TangentFragPos;
uniform int mode;
uniform vec4 light_position;
uniform vec4 light_color;
uniform vec4 ambient_light;
uniform sampler2D colorMap;
uniform sampler2D normalMap;
uniform sampler2D depthMap;
out vec4 fColor;
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
return currentTexCoords;
}
void main()
{
// DO NORMAL MAPPING
if (mode == 0) {
vec3 T = normalize(vT);
vec3 N = normalize(vN);
vec3 bi = cross(T, N);
mat4 changeOfCoord = mat4(vec4(T, 0), vec4(bi, 0), vec4(N, 0), vec4(0, 0, 0, 1));
vec3 L = normalize(light_position - position).xyz;
vec3 E = normalize(-position).xyz;
vec4 text = vec4(texture(normalMap, ftexCoord) * 2.0 - 1.0);
vec4 eye = changeOfCoord * text;
vec4 amb = texture(colorMap, ftexCoord) * ambient_light;
vec4 diff = max(0.0, dot(L, eye.xyz)) * light_color * texture(colorMap, ftexCoord);
fColor = amb + diff;
} else if (mode == 1) { // DO PARALLAX MAPPING
// offset texture coordinates with Parallax Mapping
vec3 viewDir = normalize(TangentViewPos - TangentFragPos);
vec2 texCoords = ftexCoord;
texCoords = ParallaxMapping(ftexCoord, viewDir);
// discard samples outside of the default texture coordinate space
if(texCoords.x > 1.0 || texCoords.y > 1.0 || texCoords.x < 0.0 || texCoords.y < 0.0)
discard;
// obtain normal from normal map
vec3 normal = texture(normalMap, texCoords).rgb;
//values stored in normal texture is [0,1] range, we need [-1, 1] range
normal = normalize(normal * 2.0 - 1.0);
// get diffuse color
vec3 color = texture(colorMap, texCoords).rgb;
// ambient
vec3 ambient = 0.1 * color;
// diffuse
vec3 lightDir = normalize(TangentLightPos - TangentFragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * color;
// specular
vec3 reflectDir = reflect(lightDir, normal);
vec3 halfwayDir = normalize(lightDir + viewDir);
float spec = pow(max(dot(normal, halfwayDir), 0.0), 32.0);
vec3 specular = vec3(0.2) * spec;
fColor = vec4(ambient + diffuse + 0.0, 1.0);
}
}
The layers at acute gazing angles are a common effect at parallax mapping. To improve the result you've to increment the number of samples or implement Parallax Occlusion Mapping (as described in the bottom part of the tutorial):
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
// get texture coordinates before collision (reverse operations)
vec2 prevTexCoords = currentTexCoords + deltaTexCoords;
// get depth after and before collision for linear interpolation
float afterDepth = currentDepthMapValue - currentLayerDepth;
float beforeDepth = texture(depthMap, prevTexCoords).r - currentLayerDepth + layerDepth;
// interpolation of texture coordinates
float weight = afterDepth / (afterDepth - beforeDepth);
vec2 finalTexCoords = prevTexCoords * weight + currentTexCoords * (1.0 - weight);
return finalTexCoords;
}
By thee way, the vector seems to be inverted. In common the bitangent is the Cross product of the normal vector and the tangent in a Right-handed system. But that depends on the displacement texture.
vec3 bi = cross(vT, vN);
vec3 bi = cross(vN, vT);
See further:
Bump Mapping with javascript and glsl
Normal, Parallax and Relief mapping
Demo
On my scene, I rendering landscape (approximately 522000 triangles, from a heights map, all points of the heights map are used to create a triangle mesh) and shadow mapping / blur is active. I noticed a strong fall in FPS. At the moment I have one light source - a kind of "sun". Therefore, light source far_plane is extremely high - 512 (the maximum point of the landscape is 128, the minimum point is 0). I need to somehow optimize it to get better performance.
An example of a generated landscape with heights map 512x512
My first idea is to reduce the number of triangles in the grid. I think for a heights map 512*512 522 thousand triangles is too much. Also, when scaling, the landscape is extremely smooth, even without averaging the normals. This is the reason to make the terrain lower polygonal.
If I reduce the height of the vertices (scale them) and, accordingly, reduce far_plane of light source, can this give an increase in performance?
My shaders:
Vertex shader:
#version 130
in vec4 a_Position; // Per-vertex position information we will pass in.
void main() {
gl_Position = a_Position;
}
Geometry shader:
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices=3) out;
uniform mat4 u_Model, u_View, u_Projection;
uniform float greenValue = 64;
uniform float greyValue = 96;
out vec3 norm;
out vec4 v_Position;
out mat4 model, view, projection;
out vec4 ambientColor;
out vec4 diffuseColor;
out vec4 specularColor;
void main() {
vec4 v1Eye = u_View * u_Model * gl_in[0].gl_Position;
vec4 v2Eye = u_View * u_Model * gl_in[1].gl_Position;
vec4 v3Eye = u_View * u_Model * gl_in[2].gl_Position;
vec4 v1v2 = v1Eye - v2Eye;
vec4 v2v3 = v2Eye - v3Eye;
vec3 normal = cross(vec3(v1v2), vec3(v2v3));
normal = normalize(normal);
if (normal.z < 0) normal = -normal;
mat4 MVPMatrix = u_Projection * u_View * u_Model;
for (int i = 0; i < gl_in.length(); i++) {
v_Position = gl_in[i].gl_Position;
gl_Position = MVPMatrix * gl_in[i].gl_Position;
model = u_Model;
view = u_View;
projection = u_Projection;
norm = normal;
if (v_Position.y < greenValue) {
ambientColor = vec4(0, 1, 0, 1);
diffuseColor = ambientColor;
specularColor = vec4(0, 0, 0, 1);
} else if (v_Position.y < greyValue) {
ambientColor = vec4(0.4, 0.4, 0.4, 1);
diffuseColor = ambientColor;
specularColor = vec4(0, 0, 0, 1);
} else {
ambientColor = vec4(1, 1, 1, 1);
diffuseColor = ambientColor;
specularColor = ambientColor;
}
EmitVertex();
}
EndPrimitive();
}
Fragment shader:
#version 330 core
precision mediump float; // Set the default precision to medium. We don't need as high of a
// precision in the fragment shader.
#define MAX_LAMPS_COUNT 8 // Max lamps count.
uniform vec3 u_ViewPos; // Camera position
uniform int u_LampsCount; // Lamps count
uniform float brightnessThreshold = 0.3; // brightness threshold variable
uniform float far_plane; // shadow matrix far plane
in mat4 model, view, projection;
in vec4 v_Position; // Position for this fragment in world space
in vec4 ambientColor;
in vec4 diffuseColor;
in vec4 specularColor;
in vec3 norm;
struct Lamp {
float ambientStrength;
float diffuseStrength;
float specularStrength;
float kc; // constant term
float kl; // linear term
float kq; // quadratic term
int shininess;
vec3 lampPos; // in world space
vec3 lampColor;
};
uniform samplerCube shadowMaps[MAX_LAMPS_COUNT];
uniform Lamp u_Lamps[MAX_LAMPS_COUNT];
vec3 fragPos;
vec3 fragWorldPos;
vec3 lampEyePos; // Transformed lamp position into eye space
float shadow;
// for PCF
vec3 sampleOffsetDirections[20] = vec3[] (
vec3(1, 1, 1), vec3(1, -1, 1), vec3(-1, -1, 1), vec3(-1, 1, 1),
vec3(1, 1, -1), vec3(1, -1, -1), vec3(-1, -1, -1), vec3(-1, 1, -1),
vec3(1, 1, 0), vec3(1, -1, 0), vec3(-1, -1, 0), vec3(-1, 1, 0),
vec3(1, 0, 1), vec3(-1, 0, 1), vec3(1, 0, -1), vec3(-1, 0, -1),
vec3(0, 1, 1), vec3(0, -1, 1), vec3(0, -1, -1), vec3(0, 1, -1)
);
// output colors
layout(location = 0) out vec4 fragColor;
layout(location = 1) out vec4 fragBrightColor;
float calculateShadow(vec3 lightDir, int index) {
// get vector between fragment position and light position
vec3 fragToLight = fragWorldPos - u_Lamps[index].lampPos;
// now get current linear depth as the length between the fragment and light position
float currentDepth = length(fragToLight);
// now test for shadows
//float bias = max(0.5 * (1.0 - dot(norm, lightDir)), 0.005);
float bias = 1;
// PCF
float viewDistance = length(u_ViewPos - fragWorldPos);
float diskRadius = (1.0 + (viewDistance / far_plane)) / 25.0;
for (int i = 0; i < 20; ++i) {
float closestDepth = texture(shadowMaps[index], fragToLight + sampleOffsetDirections[i] * diskRadius).r;
closestDepth *= far_plane; // Undo mapping [0;1]
if(currentDepth - bias > closestDepth) shadow += 1.0;
}
shadow /= 20;
//fragColor = vec4(vec3(closestDepth / far_plane), 1.0); // visualizing
return shadow;
}
float calculateAttenuation(Lamp lamp) {
float distance = length(lampEyePos - fragPos);
return 1.0 / (
lamp.kc +
lamp.kl * distance +
lamp.kq * (distance * distance)
);
}
vec4 toVec4(vec3 v) {
return vec4(v, 1);
}
// The entry point for our fragment shader.
void main() {
fragWorldPos = vec3(model * v_Position);
// Transform the vertex into eye space
mat4 mvMatrix = view * model;
fragPos = vec3(mvMatrix * v_Position);
vec3 viewDir = normalize(u_ViewPos - fragPos);
vec3 ambientResult = vec3(0, 0, 0); // result of ambient lighting for all lamps
vec3 diffuseResult = vec3(0, 0, 0); // result of diffuse lighting for all lamps
vec3 specularResult = vec3(0, 0, 0); // result of specular lighting for all lamps
for (int i = 0; i<u_LampsCount; i++) {
lampEyePos = vec3(view * toVec4(u_Lamps[i].lampPos));
// attenuation
float attenuation = calculateAttenuation(u_Lamps[i]);
// ambient
vec3 ambient = u_Lamps[i].ambientStrength * u_Lamps[i].lampColor * attenuation;
// diffuse
vec3 lightDir = normalize(lampEyePos - fragPos);
float diff = max(dot(norm, lightDir), 0.0);
vec3 diffuse = u_Lamps[i].diffuseStrength * diff * u_Lamps[i].lampColor * attenuation;
// specular
vec3 reflectDir = reflect(-lightDir, norm);
float spec = pow(max(dot(viewDir, reflectDir), 0.0), u_Lamps[i].shininess);
vec3 specular = u_Lamps[i].specularStrength * spec * u_Lamps[i].lampColor * attenuation;
// calculate shadow
shadow = calculateShadow(lightDir, i);
// result for this(i) lamp
ambientResult += ambient;
diffuseResult += diffuse * (1-shadow);
specularResult += specular * (1-shadow);
}
fragColor =
toVec4(ambientResult) * ambientColor +
toVec4(diffuseResult) * diffuseColor +
toVec4(specularResult) * specularColor;
// brightness calculation
float brightness = dot(fragColor.rgb, vec3(0.2126, 0.7152, 0.0722));
if (brightness > brightnessThreshold) fragBrightColor = vec4(fragColor.rgb, 1.0);
}
And my shadow shaders:
Vertex shader:
#version 130
attribute vec3 a_Position;
uniform mat4 u_ModelMatrix;
void main() {
gl_Position = u_ModelMatrix * vec4(a_Position, 1.0);
}
Geometry shader:
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices=18) out;
uniform mat4 shadowMatrices[6];
out vec4 fragPos; // FragPos from GS (output per emitvertex)
void main() {
for(int face = 0; face < 6; face++) {
gl_Layer = face; // built-in variable that specifies to which face we render.
// for each triangle's vertices
for(int i = 0; i < 3; i++) {
fragPos = gl_in[i].gl_Position;
gl_Position = shadowMatrices[face] * fragPos;
EmitVertex();
}
EndPrimitive();
}
}
Fragment shader:
#version 330 core
in vec4 fragPos; // world space
uniform vec3 lightPos; // world space
uniform float far_plane; // shadow matrix far plane
void main()
{
float lightDistance = length(fragPos.xyz - lightPos);
// map to [0;1] range by dividing by far_plane
lightDistance = lightDistance / far_plane;
// write this as modified depth
gl_FragDepth = lightDistance;
}
I hope for your help in optimizing this scene.
I need to flip my textures upside-down in shaders before applying perspective transformations. I modified vertTexCoord in vert.glsl, but I don't know where to use it in swap.glsl. The way to do it like
gl_FragColor = texture2D(texture, vertTexCoord );
does not work, because I also need the texture to be modified in perspective.
vert.glsl:
#define PROCESSING_COLOR_SHADER
uniform mat4 transform;
uniform mat4 texMatrix;
attribute vec4 vertex;
attribute vec4 color;
attribute vec2 texCoord;
varying vec4 vertColor;
varying vec4 vertTexCoord;
void main() {
gl_Position = transform * vertex;
vertColor = color;
vertTexCoord = texMatrix * vec4(texCoord, 1.0, 1.0);
}
swap.glsl:
#ifdef GL_ES
precision highp float;
#endif
// General parameters
uniform sampler2D from;
uniform sampler2D to;
uniform float progress;
uniform vec2 resolution;
uniform float reflection;
uniform float perspective;
uniform float depth;
varying vec4 vertColor;
varying vec4 vertTexCoord;
const vec4 black = vec4(0.0, 0.0, 0.0, 1.0);
const vec2 boundMin = vec2(0.0, 0.0);
const vec2 boundMax = vec2(1.0, 1.0);
bool inBounds (vec2 p) {
return all(lessThan(boundMin, p)) && all(lessThan(p, boundMax));
}
vec2 project (vec2 p) {
return p * vec2(1.0, -1.2) + vec2(0.0, -0.02);
}
vec4 bgColor (vec2 p, vec2 pfr, vec2 pto) {
vec4 c = black;
pfr = project(pfr);
if (inBounds(pfr)) {
c += mix(black, texture2D(from, pfr), reflection * mix(1.0, 0.0, pfr.y));
}
pto = project(pto);
if (inBounds(pto)) {
c += mix(black, texture2D(to, pto), reflection * mix(1.0, 0.0, pto.y));
}
return c;
}
void main() {
vec2 p = gl_FragCoord.xy / resolution.xy;
vec2 pfr, pto = vec2(-1.);
float size = mix(1.0, depth, progress);
float persp = perspective * progress;
pfr = (p + vec2(-0.0, -0.5)) * vec2(size/(1.0-perspective*progress), size/(1.0-size*persp*p.x)) + vec2(0.0, 0.5);
size = mix(1.0, depth, 1.-progress);
persp = perspective * (1.-progress);
pto = (p + vec2(-1.0, -0.5)) * vec2(size/(1.0-perspective*(1.0-progress)), size/(1.0-size*persp*(0.5-p.x))) + vec2(1.0, 0.5);
bool fromOver = progress < 0.5;
if (fromOver) {
if (inBounds(pfr)) {
gl_FragColor = texture2D(from, pfr);
}
else if (inBounds(pto)) {
gl_FragColor = texture2D(to, pto);
}
else {
gl_FragColor = bgColor(p, pfr, pto);
}
}
else {
if (inBounds(pto)) {
gl_FragColor = texture2D(to, pto);
}
else if (inBounds(pfr)) {
gl_FragColor = texture2D(from, pfr);
}
else {
gl_FragColor = bgColor(p, pfr, pto);
}
}
}
You sample the texture at
(u,v)
If you want to flip the Y-axis, just sample at
(u, 1.0f -v)
So your updated main will look like:
void main() {
gl_Position = transform * vertex;
vertColor = color;
newTCoord = texCoord;
newTCoord.y = 1.0 - newTCoord.y;
vertTexCoord = vec4(newTCoord, 1.0, 1.0);
}
I'm searching for a way to interpolate / smooth a texture which is red from a texture atlas.
That problem is that if i set the MIN_FILTER to GL_LINEAR i'll get bleeding at the edges. (the pixel of the next texture are used for interpolation)
I tried solving this by writing a shader which does the interpolation manually but to detect which pixels it has to ignore, the shader has to know the texCoords of the vertices so that it can calculate if the pixel used is in or outside of the triangle. To get the texCoords into the shader i tried to use uniform but i use DisplayLists (i considered switching to VBOs, but my first implemetation used them and it was extremly slow) which makes it very complicated.
How can i smooth my textures without bleeding?
Another problem is that the method that checks if a point is in a triangle doesn't seem to work.
Vertex Shader:
#version 130
void main()
{
gl_TexCoord[0] = gl_TextureMatrix[0] * gl_MultiTexCoord0;
gl_FrontColor = gl_Color;
gl_Position = ftransform();
}
Fragement Shader:
#version 130
uniform sampler2D tex;
uniform vec2 texCoord[4];
float textureSize;
float texelSize;
bool pointInTriangle(vec3 P, vec3 A, vec3 B, vec3 C)
{
vec3 u = B - A;
vec3 v = C - A;
vec3 w = P - A;
vec3 vCrossW = cross(v, w);
vec3 vCrossU = cross(v, u);
if(dot(vCrossW, vCrossU) < 0)
{
return false;
}
vec3 uCrossW = cross(u, w);
vec3 uCrossV = cross(u, v);
if(dot(uCrossW, uCrossV) < 0)
{
return false;
}
float denom = length(uCrossV);
float r = length(vCrossW);
float t = length(uCrossW);
return (r + t <= 1);
}
vec4 texture2DBilinear(sampler2D textureSampler, vec2 uv)
{
vec4 tl = texture2D(textureSampler, uv);
vec4 tr = texture2D(textureSampler, uv + vec2(texelSize, 0));
vec4 bl = texture2D(textureSampler, uv + vec2(0, texelSize));
vec4 br = texture2D(textureSampler, uv + vec2(texelSize , texelSize));
vec2 f = fract( uv.xy * textureSize );
vec4 tA = mix( tl, tr, f.x );
vec4 tB = mix( bl, br, f.x );
return mix( tA, tB, f.y );
}
void main()
{
ivec2 textureSize2d = textureSize(tex,0);
textureSize = float(textureSize2d.x);
texelSize = 1.0 / textureSize;
//texture coordinate:
vec2 texCoord = (gl_TexCoord[0].st);
//vec4 color = texture2D(tex,gl_TexCoord[0].st);
gl_FragColor = texture2DBilinear(tex, texCoord) * gl_Color;
//TEST IF METHOD WORKS
if(pointInTriangle(vec3(1,1,0),vec3(0,0,0),vec3(3,0,0),vec3(0,3,0)))
{
//pointInTriangle DOES NOT WORK!
gl_FragColor = gl_Color;
}
}
I've been having problems sending the shininess factor to my bump mapping shader.
The result always looks like this: http://i.imgur.com/unzdx.jpg
But if I hard code the value inside the shader to 0.0 it's working just fine.
When I send 0.0 to the shader it's turning out like in the picture above.
Any ideas?
Here's my shader
#version 110
uniform sampler2D tex;
uniform sampler2D bmap;
uniform bool boolBump;
uniform vec4 vecColor;
uniform bool onlyColor;
uniform float fTransparencyThresh;
uniform float fShininess;
uniform float alpha;
varying vec3 vecLight;
varying vec3 vecEye;
varying vec3 vecNormal;
vec4 getLighting()
{
//Ambient part
vec4 color = (gl_FrontLightModelProduct.sceneColor * gl_FrontMaterial.ambient) + (gl_LightSource[0].ambient * gl_FrontMaterial.ambient);
//For bump mapping, the normal comes from the bump map texture lookup
vec3 n = normalize(vecNormal);
if(boolBump)
{
n = normalize(texture2D(bmap, gl_TexCoord[0].st).xyz * 2.0 - 1.0);
}
vec3 l = normalize(vecLight);
//Lambert term
float NdotL = dot(n, l);
if(NdotL > 0.0)
{
//Diffuse part
color += gl_LightSource[0].diffuse * gl_FrontMaterial.diffuse * max(0.0, NdotL);
//Specular part
vec3 e = normalize(vecEye);
vec3 r = normalize(-reflect(l,n));
float spec = pow(max(0.0, dot(r, e)), fShininess);
color += gl_LightSource[0].specular * gl_FrontMaterial.specular * spec;
}
return color;
}
void main(void)
{
vec4 texel = texture2D(tex, gl_TexCoord[0].st);
if(texel.a < fTransparencyThresh)
discard;
//Get shading
vec4 color = getLighting();
//Color only mode?
if(onlyColor)
{
color *= vecColor;
}
else
{
color *= texel;
}
//Set fragment color, alpha comes from MTL file
gl_FragColor = vec4(color.xyz, alpha);
}
Edit, OpenGL code:
void MyClass::sendToShader(const OBJ::StelModel* pStelModel, Effect cur, bool& tangEnabled, int& tangLocation)
{
int location;
tangEnabled = false;
if(cur != No)
{
location = curShader->uniformLocation("fTransparencyThresh");
curShader->setUniform(location, fTransparencyThresh);
location = curShader->uniformLocation("alpha");
curShader->setUniform(location, pStelModel->pMaterial->alpha);
location = curShader->uniformLocation("fShininess");
curShader->setUniform(location, 0.0f);
...
Edit: Even this wont work:
GLint loc = glGetUniformLocation(curShader->program, "fShininess");
glUniform1f(loc, 0.0f);
Note that pow(0, 0) is undefined. This means spec is undefined if dot(r, e) == 0 and fShininess == 0.
Do you make sure the program is actively bound when you call glUniform? Do you check glGetError anywhere?