GLSL: Unable to find compatible overloaded function - glsl

I am trying to write a fragment shader and I've hit a wall. This is only my second shader so I'm very new to this and I'm almost certain I'm just not dealing with the data types properly and that's where the error is coming from but for the life of me I can't seem to fix it. The goal is to be able to pass a texture to the function so I can call it again and again... Any and all help would be appreciated!
The error I get is: error C1115: unable to find compatible overloaded function "texture2D(vec3, vec2)".
uniform sampler2D image;
uniform float radius;
uniform float adsk_result_w, adsk_result_h;
vec2 iResolution = vec2(adsk_result_w, adsk_result_h);
vec2 uv=(gl_FragCoord.xy/iResolution.xy);
vec2 vTexCoord = (gl_FragCoord.xy/iResolution.xy);
float blurSize = radius/iResolution;
vec4 hblur(vec4 frontin ) {
vec3 RTScene = frontin.rgb;
vec4 sum = vec4(0.0);
sum += texture2D(RTScene, vec2(vTexCoord.x - 4.0*blurSize, vTexCoord.y)) * 0.05;
sum += texture2D(RTScene, vec2(vTexCoord.x - 3.0*blurSize, vTexCoord.y)) * 0.09;
sum += texture2D(RTScene, vec2(vTexCoord.x - 2.0*blurSize, vTexCoord.y)) * 0.12;
sum += texture2D(RTScene, vec2(vTexCoord.x - blurSize, vTexCoord.y)) * 0.15;
sum += texture2D(RTScene, vec2(vTexCoord.x, vTexCoord.y)) * 0.16;
sum += texture2D(RTScene, vec2(vTexCoord.x + blurSize, vTexCoord.y)) * 0.15;
sum += texture2D(RTScene, vec2(vTexCoord.x + 2.0*blurSize, vTexCoord.y)) * 0.12;
sum += texture2D(RTScene, vec2(vTexCoord.x + 3.0*blurSize, vTexCoord.y)) * 0.09;
sum += texture2D(RTScene, vec2(vTexCoord.x + 4.0*blurSize, vTexCoord.y)) * 0.05;
return vec4(sum);
}
void main() {
vec4 front = texture2D(image, uv);
vec4 work = hblur(front);
gl_FragColor = vec4(work);
}

As the error message tells you there is no texture2D overload that expects a vec3 as first and a vec2 as second argument. The first argument of texture2D has to be a of the type sampler2D.
Literally you tell OpenGL in your code with texture2D(RTScene, vec2(vTexCoord.x, vTexCoord.y)) to look for the color at position vec2(vTexCoord.x, vTexCoord.y) in the texture RTScene. But as RTScene is just a vec3 containing three floats (the rgb value you retrieved in the main), where should OpenGL find the pixel.
In your main code you would have something like this:
vec4 work = hblur(image, uv);
And your hblur should look something like this:
vec4 hblur(sampler2D tex, vec2 uv) {
// your other code
sum += texture2D(tex, vec2(uv.x - 4.0*blurSize, uv.y)) * 0.05;
// your other code
}
It is always helpful to look at the OpenGL Quick Reference Card for the OpenGL version your are coding for, there you will have a quick overview over the available functions:
OpenGL 3.2 API Reference Card
OpenGL 4.1 API Reference Card
OpenGL 4.2 API Reference Card

Related

GLSL: Sample from previous output and not texture2D

I'm trying to write a custom shader where I first blur the texture and then run sobel edge finding.
I've got sobel running ok via the following script
vec4 colorSobel = texture2D(texture, uv);
float bottomLeftIntensity = texture2D(texture, uv + vec2(-0.0015625, 0.0020833)).r;
float topRightIntensity = texture2D(texture, uv + vec2(0.0015625, -0.0020833)).r;
float topLeftIntensity = texture2D(texture, uv + vec2(-0.0015625, 0.0020833)).r;
float bottomRightIntensity = texture2D(texture, uv + vec2(0.0015625, 0.0020833)).r;
float leftIntensity = texture2D(texture, uv + vec2(-0.0015625, 0)).r;
float rightIntensity = texture2D(texture, uv + vec2(0.0015625, 0)).r;
float bottomIntensity = texture2D(texture, uv + vec2(0, 0.0020833)).r;
float topIntensity = texture2D(texture, uv + vec2(0, -0.0020833)).r;
float h = -secondary * topLeftIntensity - coef * topIntensity - secondary * topRightIntensity + secondary * bottomLeftIntensity + coef * bottomIntensity + secondary * bottomRightIntensity;
float v = -secondary * bottomLeftIntensity - coef * leftIntensity - secondary * topLeftIntensity + secondary * bottomRightIntensity + coef * rightIntensity + secondary * topRightIntensity;
float mag = length(vec2(h, v));
// alpha values removed atm
if (mag < 0.5) {
colorSobel.rgb *= (1.0 - 1.0);
colorSobel.r += 0.0 * 1.0;
colorSobel.g += 0.0 * 1.0;
colorSobel.b += 0.0 * 1.0;
colorSobel.rgb += 1.0 * mag;
} else {
colorSobel.rgb *= (1.0 - 1.0);
colorSobel.r += 0.0 * 1.0;
colorSobel.g += 0.0 * 1.0;
colorSobel.b += 0.0 * 1.0;
colorSobel.rgb += 1.0 * mag;
}
gl_FragColor = colorSobel;
However I know it works by sampling the texture via texture2D.
If I were to first manipulate the output via a simple script such as this which reduces the colours
vec4 bg = texture2D(texture,uv);
gl_FragColor = vec4(gb.rgb, 1.0);
gl_FragColor.r = float(floor(gl_FragColor.r * 0.5 ) / 0.5);
gl_FragColor.g = float(floor(gl_FragColor.g * 0.5 ) / 0.5);
gl_FragColor.b = float(floor(gl_FragColor.b * 0.5 ) / 0.5);
The output still samples from the texture and ignores the first paint.
Is there a way to sample from the color output rather than using texture2D?
The reason I'm asking is that i'm chaining my shaders at runtime depending on user interaction?

Cook-Torrance shader with more than one point light

I am trying to implement the Cook-Torrance lighting mode with four point lights. While I am getting nice results by using just only one point light, I can't understand which is the correct way to sum up the specular term inside my light loop.
I am defining the materials as follows:
struct material {
vec3 ambient; /* ambient color */
vec3 diffuse; /* diffuse color */
vec3 specular; /* speculr color */
float metallic;
float roughness;
};
...whereby my lights only have one color/intensity property,
struct light {
vec3 position;
vec3 color;
bool enabled;
};
Here is the function inside my fragment shader with the fragment color computation:
vec3 lighting() {
vec3 color = vec3(0.0,0.0,0.0);
float r0 = pow(material.metallic - 1.0,2.0)/pow(material.metallic + 1.0,2.0);
vec3 V = normalize(-v_viewpos);
vec3 N = normalize(v_normal);
for (int i = 0; i < 4; i++) {
if (light[i].enabled) {
vec3 L = normalize(light[i].position - v_viewpos);
// Half-way vector
vec3 halfVector = normalize(L + V);
float NdotL = max(dot(N, L),0.0);
float NdotV = max(dot(N, V),0.0);
if (NdotL > 0.001 && NdotV > 0.001) {
float NdotH = max(0.0, dot(N, halfVector));
float HdotV = max(0.0, dot(halfVector, V));
// Beckmann
float tanAlpha = sqrt(1.0-NdotH*NdotH)/NdotH;
float D = exp(-pow(tanAlpha/material.roughness,2.0))/(4.0*pow(material.roughness,2.0)*pow(NdotH,4.0));
// Shadowing-masking term
float G1 = (2.0 * NdotH * NdotV) / HdotV;
float G2 = (2.0 * NdotH * NdotL) / HdotV;
float G = min(1.0, min(G1, G2));
// Fresnel reflection, Schlick approximation
float F = r0 + (1.0 - r0) * pow(1.0 - NdotL, 5.0);
float R = (F*G*D) / (3.14159 * NdotL * NdotV);
color += light[i].color * R * NdotL;
}
color += material.diffuse * light[i].color;
}
}
return color;
}
I believe the key point here is my wrong computation inside the light loop:
color += light[i].color * R * NdotL;
Here is an example of what I mean, the resulting fragment color is either too dark, or too bright. I am not able to sum up each light contribution to get a nice smooth color gradient among the specular term and the material colors.
I am reading here about gamma correction, but I can't understand if this applies to my question or not.
How should I sum up each light.color with the diffuse, ambient and specular colors of the material, to calculate the final fragment color, by correctly including the total amount of specular highlight contribution of each light?
vec3 V should be the normalized vector starting from the fragment position to the camera position.
vec3 L should be the normalized vector starting from the fragment position to the light position.
One of those vectors is wrong in your shader, depending on the actual value of v_viewpos.
The fresnel should be based on HoV not NoL:
pow(1.0 - HoV, 5.0)
For the diffuse part, you consider your light like ambiant light and not point light.
color += material.diffuse * light[i].color;
should be (for simple Lambertian)
color += material.diffuse * light[i].color * NoL;
Most of your computations look good (including the directions of V and L and the Fresnel term). The only thing is that you might have mixed up how to combine the individual lighting components. For specular and diffuse, you have
color += light[i].color * R * NdotL;
R corresponds to the specular part and NdotL to the diffuse part. Both are additive, however. Therefore, the equation should be (plus considering material parameters):
color += light[i].color * (material.specular * R + material.diffuse * NdotL);
For the ambient term you have
color += material.diffuse * light[i].color;
Replace material.diffuse with material.ambient and this should be correct.
And be sure that your lights are not too bright. The screen cannot display anything brighter than white (or fully saturated red).

Few problems with BRDF using Beckmann and GGX/Trowbridge-Reitz distribution for comparison

I have been trying to wrap my head around physical based rendering these last 2.5 weeks and so far I managed to learn a lot, ask a lot of questions, and have some results, although I still have few problems that I would like to fix but the last few days I am stuck. I am want to continue working/learning more but now I don't know what else to do or how to proceed further, thus I need some guidance :(
One of the first problems that I can not figure out what is happening is when I get close to a shape. There is a cut-off problem with BRDF function that I have implemented. The second and third row are BRDF functions using Spherical Gaussian for Fresnel, and Schlick approximation. The second row Beckmann distribution NDF and the third one uses GGX/Trowbridge-Reitz as NDF.
I started implementing this referring to "Real Shading in Unreal Engine 4" and few other posts found while Google-ing.
What I believe the remaining things to do are:
How to blend diffuse, reflection, and speculal better
Fix the problem with the BRDF cut-off problem
Evaluate if my shaders are producing good results based on the equation (it is the first time for me going this way and some comments would be very helpful as a guide on how to proceed in tweaking things)
Fix specular factor in Phong (first row) shader, now I use material roughness as a blend factor when I mix Phong, skybox reflection and diffuse
The code I use for BRDF's is
// geometry term Cook Torrance
float G(float NdotH, float NdotV, float VdotH, float NdotL) {
float NH2 = 2.0 * NdotH;
float g1 = (NH2 * NdotV) / VdotH;
float g2 = (NH2 * NdotL) / VdotH;
return min(1.0, min(g1, g2));
}
// Fresnel reflection term, Schlick approximation
float R_Fresnel(float VdotH) {
return F0 + (1.0 - F0) * pow(2, (-5.55473 * (VdotH)-6.98316) * (VdotH));
}
// Normal distribution function, GGX/Trowbridge-Reitz
float D_GGX(float NdotH, float roughtness2) {
float a = roughtness2 * roughtness2;
float a2 = a*a;
float t = ((NdotH * NdotH) * (a2 - 1.0) + 1.0);
return a2 / (PI * t * t);
}
// Normal distribution function, Beckmann distribution
float D_Beckmann(float NdotH, float mSquared) {
float r1 = 1.0 / (4.0 * mSquared * pow(NdotH, 4.0));
float r2 = (NdotH * NdotH - 1.0) / (mSquared * NdotH * NdotH);
return (r1 * exp(r2));
}
// COOK TORRANCE BRDF
vec4 cookTorrance(Light light, vec3 direction, vec3 normal) {
// do the lighting calculation for each fragment.
float NdotL = max(dot(normal, direction), 0.0);
float specular = 0.0;
if (NdotL > 0.0)
{
vec3 eyeDir = normalize(cameraPosition);
// calculate intermediary values
vec3 halfVector = normalize(direction + eyeDir);
float NdotH = max(dot(normal, halfVector), 0.0);
float NdotV = max(dot(normal, eyeDir), 0.0);
float VdotH = max(dot(eyeDir, halfVector), 0.0);
float matShininess = (material.shininess / 1000.0);
float mSquared = (0.99 - matShininess) * (0.99 - matShininess);
float geoAtt = G(NdotH, NdotV, VdotH, NdotL);
float roughness = D_Beckmann(NdotH, mSquared);
float fresnel = R_Fresnel(VdotH);
specular = (fresnel * geoAtt * roughness) / (NdotV * NdotL * PI);
}
vec3 finalValue = light.color * NdotL * (k + specular * (1.0 - k));
return vec4(finalValue, 1.0);
}
vec4 cookTorrance_GGX(Light light, vec3 direction, vec3 normal) {
// do the lighting calculation for each fragment.
float NdotL = max(dot(normal, direction), 0.0);
float specular = 0.0;
if (NdotL > 0.0)
{
vec3 eyeDir = normalize(cameraPosition);
// calculate intermediary values
vec3 halfVector = normalize(direction + eyeDir);
float NdotH = max(dot(normal, halfVector), 0.0);
float NdotV = max(dot(normal, eyeDir), 0.0);
float VdotH = max(dot(eyeDir, halfVector), 0.0);
float matShininess = (material.shininess / 1000.0);
float mSquared = (0.99 - matShininess) * (0.99 - matShininess);
float geoAtt = G(NdotH, NdotV, VdotH, NdotL);
// NDF CHANGED TO GGX
float roughness = D_GGX(NdotH, mSquared);
float fresnel = R_Fresnel(VdotH);
specular = (fresnel * geoAtt * roughness) / (NdotV * NdotL * PI);
}
vec3 finalValue = light.color * NdotL * (k + specular * (1.0 - k));
return vec4(finalValue, 1.0);
}
void main() {
//vec4 tempColor = vec4(material.diffuse, 1.0);
vec4 tempColor = vec4(0.1);
// interpolating normals will change the length of the normal, so renormalize the normal.
vec3 normal = normalize(Normal);
vec3 I = normalize(Position - cameraPosition);
vec3 R = reflect(I, normalize(Normal));
vec4 reflection = texture(skybox, R);
// fix blending
float shininess = (material.shininess / 1000.0);
vec4 tempFinalDiffuse = mix(tempColor, reflection, shininess);
vec4 finalValue = cookTorrance_GGX(directionalLight.light, directionalLight.position, normal) + tempFinalDiffuse;
// OR FOR COOK TORRANCE IN THE OTHER SHADER PROGRAM
//vec4 finalValue = cookTorrance(directionalLight.light, directionalLight.position, normal) + tempFinalDiffuse;
gl_FragColor = finalValue;
//gl_FragColor = vec4(1.0); // TESTING AND DEBUGGING FRAG OUT
}
The results i have so far are lik in pictures below
EDIT :: I managed to solve few problems and implement environment sampling given in "Real Shading in Unreal Engine 4" but still I just cant figure out why I have that cut-off problem and I have a problem with reflection now after sampling. :(
Also I moved Phong that i tough in books and online tutorial to BDRF Blinn-Phong for better comparison.
My shader now looks like this.
vec4 brdf_GGX(Light light, vec3 direction, vec3 normal) {
float specular = 0.0;
float matShininess = 1.0 - (material.shininess / 1000.0);
vec2 randomPoint;
vec4 finalColor = vec4(0.0);
vec4 totalLambert = vec4(0.0);
const uint numberSamples = 32;
for (uint sampleIndex = 0; sampleIndex < numberSamples; sampleIndex++)
{
randomPoint = hammersley2d(sampleIndex, numberSamples);
vec3 H = ImportanceSampleGGX(randomPoint, matShininess, normal);
vec3 L = 2.0 * dot(normal, H) * H - normal;
vec3 R = reflect(L, normalize(normal));
totalLambert += texture(skybox, -R);
}
totalLambert = totalLambert / numberSamples;
float NdotL = max(dot(normal, direction), 0.0);
if (NdotL > 0.0)
{
vec3 eyeDir = normalize(cameraPosition);
// calculate intermediary values
vec3 halfVector = normalize(direction + eyeDir);
float NdotH = max(dot(normal, halfVector), 0.0);
float NdotV = max(dot(normal, eyeDir), 0.0);
float VdotH = max(dot(eyeDir, halfVector), 0.0);
float mSquared = clamp(matShininess * matShininess, 0.01, 0.99);
float geoAtt = G(NdotH, NdotV, VdotH, NdotL);
float roughness = D_Beckmann(NdotH, mSquared);
float fresnel = R_Fresnel(VdotH);
specular = (fresnel * geoAtt * roughness) / (NdotV * NdotL * PI);
}
vec3 finalValue = light.color * NdotL * (k + specular * (1.0 - k));
return vec4(finalValue, 1.0) * totalLambert;
}
Current results look like this (NOTE: I used skybox sampling only in the third GGX model, do the same for other shaders tomorrow)
EDIT:: OK i am figuring out what is happening but still i can not fix it. I have problems when sampling. I have no idea how to translate normalized ray to proper cube map reflection after sampling. If you can notice in pictures I lost the correct reflection that sphere does to environment map. I just have a simple/flat texture on each sphere and now I have no idea how to fix that.

GLSL Shader Ported From HLSL Is Not Working

I have this HLSL Shader for blur:
struct VS_INPUT
{
float4 Position : POSITION0;
float2 TexCoord : TEXCOORD0;
float4 Color : TEXCOORD1;
};
struct VS_OUTPUT
{
float4 Position : POSITION0;
float4 Color : COLOR0;
float2 TexCoord : TEXCOORD0;
};
float4x4 al_projview_matrix;
VS_OUTPUT vs_main(VS_INPUT Input)
{
VS_OUTPUT Output;
Output.Position = mul(Input.Position, al_projview_matrix);
Output.Color = Input.Color;
Output.TexCoord = Input.TexCoord;
return Output;
}
Frag
texture al_tex;
sampler2D s = sampler_state {
texture = <al_tex>;
};
int tWidth;
int tHeight;
float blurSize = 5.0;
float4 ps_main(VS_OUTPUT Input) : COLOR0
{
float2 pxSz = float2(1.0 / tWidth,1.0 / tHeight);
float4 outC = 0;
float outA = 0;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,-4.0 * pxSz.y * blurSize)).a * 0.05;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,-3.0 * pxSz.y * blurSize)).a * 0.09;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,-2.0 * pxSz.y * blurSize)).a * 0.12;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,-pxSz.y * blurSize)).a * 0.15;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,0)).a * 0.16;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,pxSz.y * blurSize)).a * 0.15;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,2.0 * pxSz.y * blurSize)).a * 0.12;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,3.0 * pxSz.y * blurSize)).a * 0.09;
outA += Input.Color.a * tex2D(s, Input.TexCoord.xy + float2(0,4.0 * pxSz.y * blurSize)).a * 0.05;
outC.a = outA;
return outC;
}
There is a similar one for horizontal...
The idea is, I provide tWidth, tHeight for the texture with and height, and use that to get the 'size' of a pixel relative to UV coords.
I then use this to do normal blur by taking a weighted average of neighbors.
I ported this to GLSL:
attribute vec4 al_pos;
attribute vec4 al_color;
attribute vec2 al_texcoord;
uniform mat4 al_projview_matrix;
varying vec4 varying_color;
varying vec2 varying_texcoord;
void main()
{
varying_color = al_color;
varying_texcoord = al_texcoord;
gl_Position = al_projview_matrix * al_pos;
}
Frag
uniform sampler2D al_tex;
varying float blurSize;
varying float tWidth;
varying float tHeight;
varying vec2 varying_texcoord;
varying vec4 varying_color;
void main()
{
vec4 sum = vec4(0.0);
vec2 pxSz = vec2(1.0 / tWidth,1.0 / tHeight);
// blur in x
// take nine samples, with the distance blurSize between them
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,-4.0 * pxSz.y * blurSize))* 0.05;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,-3.0 * pxSz.y * blurSize))* 0.09;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,-2.0 * pxSz.y * blurSize))* 0.12;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,-pxSz.y * blurSize))* 0.15;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,0))* 0.16;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,pxSz.y * blurSize))* 0.15;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,2.0 * pxSz.y * blurSize))* 0.12;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,3.0 * pxSz.y * blurSize))* 0.09;
sum += texture2D(al_tex, varying_texcoord.xy + vec2(0,4.0 * pxSz.y * blurSize))* 0.05;
gl_FragColor = varying_color * sum;
}
This is a little different, but it's the same logic. I convert pixel coords to UV coords, and multiply by the blur factor, same as the hlsl factor. Yet, the glsl one gives me an unblurred, slightly more transparent version of the original.
What could cause this?
In your fragment shader, you have:
varying vec4 varying_color;
[...]
gl_FragColor = varying_color;
so all the texture fetches and calculations you do don't have any effect on the final shader output (and are likely to be completely removed by the compiler). You probably want to output sum or to modify it, e.g. with gl_FragColor = varying_color * sum; or whatever effect you want to achieve.
Another thing: in the frag shader, you define varyings for the texture size, but you don't pass them from the vertex shader. Those should be uniforms (or, in modern GLSL, there is also the textureSize() GLSL function which allows you to directly sccess that values without explicitely passing them).

How to prevent blur from using colors outside of the texture?

I'm trying to create a shader that blurs, and I've done that. However, theres one problem. Around the texture there is a black background, and when the shader runs it is grabbing color from that background and it ends up darkening my entire image (but it still blurs the image just fine, its just as if it is also lowering the brightness). Just to be sure thats what I thought was going on, I set the background white, and sure enough it made my image duller. I don't really know how to prevent this, I suppose its grabbing them outside because the blur size is reaching outside the texture boundary, is there any way to prevent texture look ups from occuring outside of the texture I'm dealing with?
frag:
precision mediump float;
uniform sampler2D CC_Texture0;
uniform float u_time;
const lowp int SAMPLES = 9;
varying vec2 v_texCoord;
varying highp vec2 transCord[SAMPLES];
void main()
{
vec4 sum = vec4(0.0);
sum += texture2D(CC_Texture0, transCord[0]) * 0.05;
sum += texture2D(CC_Texture0, transCord[1]) * 0.09;
sum += texture2D(CC_Texture0, transCord[2]) * 0.12;
sum += texture2D(CC_Texture0, transCord[3]) * 0.15;
sum += texture2D(CC_Texture0, transCord[4]) * 0.16;
sum += texture2D(CC_Texture0, transCord[5]) * 0.15;
sum += texture2D(CC_Texture0, transCord[6]) * 0.12;
sum += texture2D(CC_Texture0, transCord[7]) * 0.09;
sum += texture2D(CC_Texture0, transCord[8]) * 0.05;
gl_FragColor = sum;
}
Vertex shader
precision mediump float;
attribute vec4 a_position;
attribute vec2 a_texCoord;
const lowp int SAMPLES = 9;
uniform highp float texelWidthOffset;
uniform highp float texelHeightOffset;
uniform highp float blurSize;
uniform float u_time;
varying vec2 v_texCoord;
varying highp vec2 transCord[SAMPLES];
void main()
{
gl_Position = CC_MVPMatrix * a_position;
v_texCoord = a_texCoord;
highp vec2 offset = vec2(texelWidthOffset, texelHeightOffset) * (u_time * blurSize);
transCord[0] = a_texCoord - offset * 4.0;
transCord[1] = a_texCoord - offset * 3.0;
transCord[2] = a_texCoord - offset * 2.0;
transCord[3] = a_texCoord - offset * 1.0;
transCord[4] = a_texCoord;
transCord[5] = a_texCoord + offset * 1.0;
transCord[6] = a_texCoord + offset * 2.0;
transCord[7] = a_texCoord + offset * 3.0;
transCord[8] = a_texCoord + offset * 4.0;
}
UPDATE: It appears this is only an issue when rendering on to CCRenderTexture. I'm not sure why though.
Set the wrap modes of the texture, in both S and T, to GL_CLAMP_TO_EDGE -- this will mean that out-of-range UV coordinate vales return more-sensible results