As you can see in the image, I'm getting a black circle on the top of the spheres and the image appears grainy. It's supposed to be more sharp however there are these small black and white spots.
This is the code for the shadow ray
int pos = 0;
float intersect(const ray &r, vector<unique_ptr<object>> &obj)
{
//gives closest object hit point and position;
float closest = numeric_limits<float>::max();
for(int j = 0; j < obj.size(); j++)
{
float t = obj[j]->intersect(r);
if(t > 1e-6 && t < closest)
{
closest = t;
pos = j;
}
}
return closest;
}
vec color(const ray& r, vector<unique_ptr<object>> &shape, vector<unique_ptr<Light>> &lighting, int depth)
{
vec background_color( .678, .847, .902);
vec total{0.0, 0.0, 0.0};
vec ambient{0.125, 0.125, 0.125};
float t_near = intersect(r, shape);
if(t_near == numeric_limits<float>::max())
return background_color;
else
{
total += ambient;
for(int i = 0; i < lighting.size(); i++){
total += shape[pos]->shade(lighting[i]->position(), t_near, r);//gives specular + diffuse
vec shadow_dir = unit_vector(lighting[i]->position() - r.p_at_par(t_near));
ray shadowRay(r.p_at_par(t_near), shadow_dir);
float dist = shadow_dir.lenght();
float a = intersect(shadowRay, shape);
if(a != numeric_limits<float>::max())
return vec(0.0, 0.0, 0.0);
}
return total;
}
}
Okay, got it.
For the black circle, you have to test for the distance of the shadow ray to be less that the distance between the point and the light source. Also, for the distance, the shadow_dir shouldn't be normalised. And to deal with the black white spots, which is dude to shadow intersection, you have to add N*bias to the hit point where the bias is for example 1e-4. The bias shouldn't be too small
vec shadow_dir = lighting[i]->position() - r.p_at_par(t_near);
float dist = shadow_dir.lenght();
vec N = unit_vector(shape[pos]->normal(r, t_near));
shadow_dir = unit_vector(shadow_dir);
ray shadowRay(r.p_at_par(t_near) + N*1e-4, shadow_dir);
float a = intersect(shadowRay, shape);
if(a != numeric_limits<float>::max()){
float m = shadowRay.p_at_par(a).lenght();
if(a < dist)
return vec(0.0, 0.0, 0.0);
}
}
Related
I am trying to calculate the RGB value of a pixel using the Blinn-Phong formula. For that I use this function:
Material getPixelColor(Ray ray, double min, int index, std::vector<Object*> Objects, std::vector<Object*> lightSources) {
Vector intersectionPoint = ray.getOrigin() + ray.getDirection() * min;
Vector n = Objects.at(index)->getNormalAt(intersectionPoint);
Vector reflectiondirection = ray.getDirection() - n * Vector::dot(ray.getDirection(), n) * 2;
Ray reflectionRay(intersectionPoint, reflectiondirection);
// check if ray intersects any other object;
double minimum = INFINITY;
int count = 0, indx = -1;
for (auto const& obj : Objects) {
double distance = obj->Intersect(reflectionRay);
if (minimum > distance) {
minimum = distance;
indx = count;
}
count++;
}
Material result(0,0,0);
if (recurseDepth >= 5 || indx == -1) {
recurseDepth = 0;
// Check if object is lit for each light source
for (auto const& light : lightSources) {
// Blinn-Phong
Vector lightDirection = (light->getPosition() - intersectionPoint).normalize();
double nl = Vector::dot(n, lightDirection);
nl = nl > 0 ? nl : 0.0;
result = result + (Objects.at(index)->getMaterial() * light->getMaterial() * nl);
}
}
else{
recurseDepth++;
result = result + getPixelColor(reflectionRay, minimum, indx, Objects, lightSources);
}
return result;
}
The result that I get is this:
This is how it was without shading:
I have been trying to find a solution for hours and can't. Am I using the wrong formula?
After a lot of research, I removed the part where it is getting color from other objects:
Material getPixelColor(Ray ray, double min, int index, std::vector<Object*> Objects, std::vector<Object*> lightSources) {
Vector intersectionPoint = ray.getOrigin() + ray.getDirection() * min;
Vector n = Objects.at(index)->getNormalAt(intersectionPoint);
Material result(0,0,0);
// Check if object is lit for each light source
for (auto const& light : lightSources) {
//create a ray to the light and check if there is an object between the two
Vector lightDirection = (light->getPosition() - intersectionPoint).normalize();
Ray lightRay(intersectionPoint, lightDirection);
bool hit = false;
for (auto const& obj : Objects) {
double distance = obj->Intersect(lightRay);
if (INFINITY > distance && distance > 0.0001) {
hit = true;
break;
}
}
if (!hit) {
// Blinn-Phong
double nl = Vector::dot(n, lightDirection);
// clamp nl between 0 and 1
if (nl > 1.0) {
nl = 1.0;
}
else if (nl < 0.0) {
nl = 0.0;
}
result = result + (Objects.at(index)->getMaterial() * nl);
}
}
return result;
}
And so I got the desired result:
My final goal is to detect a laser line in my picture. To do so, first I convert my RGB colour space to HSV(in order to examine brightness). Then I will select only the pixels which have a certain value of H, S and V (the brightest pixels with a certain colour (red in my case)).
The pixels which satisfy these values I set their luminance for all 3 channels of RGB and if they don't satisfy I set them as black.
Here comes my problem and question:
As I mentioned, I would have either a black pixel or a grey (luminance) pixel. I don't understand how these purple or green pixels come into my picture. They are like noise and they are constantly changing!
At first, I thought I have these colour because of values bigger than 1. But I think OpenGL clamps the values to 0-1( I even tried it myself but no success).
Anyone know what causes this effect?
Any help or idea is appreciated.
Here is my fragment shader:
precision highp float;
varying vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
const highp vec3 W = vec3(0.299, 0.587, 0.114);
void main()
{
lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
float luminance = dot(textureColor.rgb, W);
float r = textureColor.r;
float g = textureColor.g;
float b = textureColor.b;
float h;
float s;
float v;
float min = 0.0;
float max = 0.0;
float delta = 0.0;
if(r >= g) {
if(r >= b) {
max = r;
}
else {
max = b;
}
}
else {
if(g >= b) {
max = g;
}
else {
max = b;
}
}
// max = MAX( r, g, b );
if(r < g) {
if(r < b) {
min = r;
}
else {
min = b;
}
}
else {
if(g < b) {
min = g;
}
else {
min = b;
}
}
v = max; // v
delta = max - min;
if (delta == 0.0) {
h = 0.0;
s = 0.0;
return;
}
else if( max != 0.0 )
s = delta / max; // s
else {
// r = g = b = 0 // s = 0, v is undefined
s = 0.0;
h = -1.0;
return;
}
if( r == max ){
h = ( g - b ) / delta; // between yellow & magenta
h = mod(h, 6.0);
}
else if( g == max )
h = 2.0 + ( b - r ) / delta; // between cyan & yellow
else
h = 4.0 + ( r - g ) / delta; // between magenta & cyan
h = h * 60.0; // degrees
if( h < 0.0 )
h = h + 360.0;
//HSV transformation ends here
if(v >0.8){
if(s > 0.7){
if( h >320.0 && h < 360.0){
if(luminance > 1.0)
gl_FragColor = vec4(vec3(1.0), textureColor.a);
else
gl_FragColor = vec4(vec3(luminance ), textureColor.a);
}
}
}else{
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
}
}
I have to mention that, the big white spot is sunlight and it's so bright that it passes my HSV conditions at the end. That's not my problem now, but these purple noise like pixels which are there or the green noises around the picture are my problem.
The purple and green pixels are probably uninitialized memory.
gl_FragColor is not guaranteed to be initialized at the start of a shader, so initializing it before the return statements on lines 70 and 78 should fix the issue.
If it does not and an uninitialized framebuffer is used, then it may be because of a different (and less likey) cause:
It may be because of blending. Try disabling it or ensuring gl_FragColor.a is 1.
It may be because the stencil buffer is exposing it. Try disabling stencil testing or ensuring that all pixels pass the stencil test.
Alternatively, if it is caused by blending of the stencil test, you could initialize the framebuffer with something like glClear().
While your fragment "returns" without setting a fragment color, like here:
else {
// r = g = b = 0 // s = 0, v is undefined
s = 0.0;
h = -1.0;
return; //<<<<==== NO gl_FragColor set
I think this code lacks a else-path for the cases s <= 0.7 or h <= 320.0:
if(v >0.8){
if(s > 0.7){
if( h >320.0 && h < 360.0){
if(luminance > 1.0)
gl_FragColor = vec4(vec3(1.0), textureColor.a);
else
gl_FragColor = vec4(vec3(luminance ), textureColor.a);
}
//<<<<===== NO 'else' for h<320
}
///<<<<====== NO 'else' for s<0.7
}else{
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
}
Thus, there are many cases where the fragment (current pixel on image) remains untouched.
So my mistake was some places which I have returned but didn't set the gl_FragColor. In my case, any where which the process failed or didn't meet the conditions I have to set:
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
The corrected code should look like:
precision highp float;
varying vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
const highp vec3 W = vec3(0.299, 0.587, 0.114);
void main()
{
lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
float luminance = dot(textureColor.rgb, W);
//RF: test
float r = textureColor.r;
float g = textureColor.g;
float b = textureColor.b;
float h;
float s;
float v;
float min = 0.0;
float max = 0.0;
float delta = 0.0;
// min = MIN( r, g, b );
if(r >= g) {
if(r >= b) {
max = r;
}
else {
max = b;
}
}
else {
if(g >= b) {
max = g;
}
else {
max = b;
}
}
if(r < g) {
if(r < b) {
min = r;
}
else {
min = b;
}
}
else {
if(g < b) {
min = g;
}
else {
min = b;
}
}
v = max; // v
delta = max - min;
if (delta == 0.0) {
h = 0.0;
s = 0.0;
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
return;
}
else if( max != 0.0 )
s = delta / max; // s
else {
s = 0.0;
h = -1.0;
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
return;
}
if( r == max ){
h = ( g - b ) / delta; // between yellow & magenta
h = mod(h, 6.0);
}
else if( g == max )
h = 2.0 + ( b - r ) / delta; // between cyan & yellow
else
h = 4.0 + ( r - g ) / delta; // between magenta & cyan
h = h * 60.0; // degrees
if( h < 0.0 )
h = h + 360.0;
//---------------------------------------------------
if(v >0.8){
if(s > 0.2){
if( h >320.0 && h < 360.0){
gl_FragColor = vec4(vec3(luminance ), textureColor.a);
return;
}
}
}
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
}
and then it works great:
Resulting Image
I am trying to implement the ray tracing algorithm and I have some trouble computing the reflected rays of spherical objects.It seems that
for some particular rays, the reflected ray just passes through and is collinear with the traced ray.
Bellow is how i record the ray - sphere intersection:
bool Sphere::intersectLocal(const ray & r, isect & i) const {
Vec3d P = r.getPosition();
Vec3d D = r.getDirection();
//D.normalize();
double a = dot(D, D);
double b = 2 * dot(P, D);
double c = dot(P, P) - 1;
double delta = b * b - 4 * a * c;
if (delta < 0)
return false;
if (delta == 0) {
double t = -b / 2 * a;
Vec3d Q = P + t * D;
Vec3d N = Q;
N.normalize();
i.setT(t);
i.setN(N);
i.setObject(this);
return true;
}
if (delta > 0) {
double t1 = (-b - sqrt(delta)) / 2 * a;
double t2 = (-b + sqrt(delta)) / 2 * a;
double t;
if (t1 > 0) t = t1;
else if (t2 > 0) t = t2;
else return false;
Vec3d N = P + t * D;
N.normalize();
i.setT(t);
i.setN(N);
i.setObject(this);
return true;
}
return false;
}
And this is how I compute the reflected ray for each intersection:
isect i;
if (scene - > intersect(r, i)) {
// An intersection occured!
const Material & m = i.getMaterial();
double t = i.t;
Vec3d N = i.N;
Vec3d I = m.shade(scene, r, i); //local illumination
if (!m.kr(i).iszero() && depth >= 0) {
// compute reflection direction
Vec3d raydir = r.getDirection();
Vec3d refldir = 2 * dot(-raydir, i.N) * i.N + raydir;
refldir.normalize();
ray reflectionRay = ray(r.at(i.t), refldir, ray::RayType::REFLECTION);
Vec3d reflection = traceRay(reflectionRay, thresh, depth - 1);
Vec3d R = reflection;
I += R;
}
return I;
} else {
// No intersection. This ray travels to infinity, so we color
// it according to the background color, which in this (simple) case
// is just black.
return Vec3d(0.0, 0.0, 0.0);
}
The code above seems to work fine for most of the points on the sphere where the rays intersect, but for others it does not reflect as i expected
If I see right, this makes the normal face same direction as the ray. So with ray==normal==reflected_ray nothing gets reflected.
Vec3d Q = P + t * D;
Vec3d N = Q;
About errors in floating-point arithmetic and how to deal with it:
What Every Computer Scientist Should Know About Floating-Point Arithmetic
Here you can find how to compare floating-point numbers. Searching for relative absolute compare floating you may find more information.
https://floating-point-gui.de/errors/comparison/
This is an excerpt from my code in C#. Almost never use absolute compare.
public static bool IsAlmostRelativeEquals(this double d1, double d2, double epsilon)
{
double absDiff = Math.Abs(d1 - d2);
if (double.IsPositiveInfinity(absDiff))
return false;
if (absDiff < epsilon)
return true;
double absMax = Math.Max(Math.Abs(d1), Math.Abs(d2));
return Math.Abs(d1 - d2) <= epsilon * absMax;
}
public static bool IsAlmostZero(this double d, double epsilon)
{
double abs = Math.Abs(d);
if (double.IsPositiveInfinity(abs))
return false;
return abs < epsilon;
}
I'm trying to write a ray tracer for any objects formed of triangular meshes. I'm using an external library to load a cube from .ply format and then trace it down. So far, I've implemented most of the tracer, and now I'm trying to test it with a single cube, but for some reason all I get on the screen is a red line. I've tried several ways to fix it but I simply can't figure it out anymore. For this primary test, I'm only creating primary rays, and if they hit my cube, then I color that pixel to the cube's diffuse color and return. For checking ray-object intersections, I am going through all the triangles that form that object and return the distance to the closest one. It would be great if you could have a look at the code and tell me what could have gone wrong and where. I would greatly appreciate it.
Ray-Triangle intersection:
bool intersectTri(const Vec3D& ray_origin, const Vec3D& ray_direction, const Vec3D& v0, const Vec3D& v1, const Vec3D& v2, double &t, double &u, double &v) const
{
Vec3D edge1 = v1 - v0;
Vec3D edge2 = v2 - v0;
Vec3D pvec = ray_direction.cross(edge2);
double det = edge1.dot(pvec);
if (det > - THRESHOLD && det < THRESHOLD)
return false;
double invDet = 1/det;
Vec3D tvec = ray_origin - v0;
u = tvec.dot(pvec)*invDet;
if (u < 0 || u > 1)
return false;
Vec3D qvec = tvec.cross(edge1);
v = ray_direction.dot(qvec)*invDet;
if (v < 0 || u + v > 1)
return false;
t = edge2.dot(qvec)*invDet;
if (t < 0)
return false;
return true;
}
//Object intersection
bool intersect(const Vec3D& ray_origin, const Vec3D& ray_direction, IntersectionData& idata, bool enforce_max) const
{
double tClosest;
if (enforce_max)
{
tClosest = idata.t;
}
else
{
tClosest = TMAX;
}
for (int i = 0 ; i < indices.size() ; i++)
{
const Vec3D v0 = vertices[indices[i][0]];
const Vec3D v1 = vertices[indices[i][1]];
const Vec3D v2 = vertices[indices[i][2]];
double t, u, v;
if (intersectTri(ray_origin, ray_direction, v0, v1, v2, t, u, v))
{
if (t < tClosest)
{
idata.t = t;
tClosest = t;
idata.u = u;
idata.v = v;
idata.index = i;
}
}
}
return (tClosest < TMAX && tClosest > 0) ? true : false;
}
Vec3D trace(World world, Vec3D &ray_origin, Vec3D &ray_direction)
{
Vec3D objColor = world.background_color;
IntersectionData idata;
double coeff = 1.0;
int depth = 0;
double tClosest = TMAX;
Object *hitObject = NULL;
for (unsigned int i = 0 ; i < world.objs.size() ; i++)
{
IntersectionData idata_curr;
if (world.objs[i].intersect(ray_origin, ray_direction, idata_curr, false))
{
if (idata_curr.t < tClosest && idata_curr.t > 0)
{
idata.t = idata_curr.t;
idata.u = idata_curr.u;
idata.v = idata_curr.v;
idata.index = idata_curr.index;
tClosest = idata_curr.t;
hitObject = &(world.objs[i]);
}
}
}
if (hitObject == NULL)
{
return world.background_color;
}
else
{
return hitObject->getDiffuse();
}
}
int main(int argc, char** argv)
{
parse("cube.ply");
Vec3D diffusion1(1, 0, 0);
Vec3D specular1(1, 1, 1);
Object cube1(coordinates, connected_vertices, diffusion1, specular1, 0, 0);
World wrld;
// Add objects to the world
wrld.objs.push_back(cube1);
Vec3D background(0, 0, 0);
wrld.background_color = background;
// Set light color
Vec3D light_clr(1, 1, 1);
wrld.light_colors.push_back(light_clr);
// Set light position
Vec3D light(0, 64, -10);
wrld.light_positions.push_back(light);
int width = 128;
int height = 128;
Vec3D *image = new Vec3D[width*height];
Vec3D *pixel = image;
// Trace rays
for (int y = -height/2 ; y < height/2 ; ++y)
{
for (int x = -width/2 ; x < width/2 ; ++x, ++pixel)
{
Vec3D ray_dir(x+0.5, y+0.5, -1.0);
ray_dir.normalize();
Vec3D ray_orig(0.5*width, 0.5*height, 0.0);
*pixel = trace(wrld, ray_orig, ray_dir);
}
}
savePPM("./test.ppm", image, width, height);
return 0;
}
I've just ran a test case and I got this:
for a unit cube centered at (0,0, -1.5) and scaled on the X and Y axis by 100. It seems that there is something wrong with the projection, but I can't really tell exactly what from the result. Also, shouldn't, in this case (cube is centered at (0,0)) the final object also appear in the middle of the picture?
FIX: I fixed the centering problem by doing ray_dir = ray_dir - ray_orig before normalizing and calling the trace function. Still, the perspective seems to be plain wrong.
I continued the work and now I started implementing the diffuse reflection according to Phong.
Vec3D trace(World world, Vec3D &ray_origin, Vec3D &ray_direction)
{
Vec3D objColor = Vec3D(0);
IntersectionData idata;
double coeff = 1.0;
int depth = 0;
do
{
double tClosest = TMAX;
Object *hitObject = NULL;
for (unsigned int i = 0 ; i < world.objs.size() ; i++)
{
IntersectionData idata_curr;
if (world.objs[i].intersect(ray_origin, ray_direction, idata_curr, false))
{
if (idata_curr.t < tClosest && idata_curr.t > 0)
{
idata.t = idata_curr.t;
idata.u = idata_curr.u;
idata.v = idata_curr.v;
idata.index = idata_curr.index;
tClosest = idata_curr.t;
hitObject = &(world.objs[i]);
}
}
}
if (hitObject == NULL)
{
return world.background_color;
}
Vec3D newStart = ray_origin + ray_direction*idata.t;
// Compute normal at intersection by interpolating vertex normals (PHONG Idea)
Vec3D v0 = hitObject->getVertices()[hitObject->getIndices()[idata.index][0]];
Vec3D v1 = hitObject->getVertices()[hitObject->getIndices()[idata.index][1]];
Vec3D v2 = hitObject->getVertices()[hitObject->getIndices()[idata.index][2]];
Vec3D n1 = hitObject->getNormals()[hitObject->getIndices()[idata.index][0]];
Vec3D n2 = hitObject->getNormals()[hitObject->getIndices()[idata.index][1]];
Vec3D n3 = hitObject->getNormals()[hitObject->getIndices()[idata.index][2]];
// Vec3D N = n1 + (n2 - n1)*idata.u + (n3 - n1)*idata.v;
Vec3D N = v0.computeFaceNrm(v1, v2);
if (ray_direction.dot(N) > 0)
{
N = N*(-1);
}
N.normalize();
Vec3D lightray_origin = newStart;
for (unsigned int itr = 0 ; itr < world.light_positions.size() ; itr++)
{
Vec3D lightray_dir = world.light_positions[0] - newStart;
lightray_dir.normalize();
double cos_theta = max(N.dot(lightray_dir), 0.0);
objColor.setX(objColor.getX() + hitObject->getDiffuse().getX()*hitObject->getDiffuseReflection()*cos_theta);
objColor.setY(objColor.getY() + hitObject->getDiffuse().getY()*hitObject->getDiffuseReflection()*cos_theta);
objColor.setZ(objColor.getZ() + hitObject->getDiffuse().getZ()*hitObject->getDiffuseReflection()*cos_theta);
return objColor;
}
depth++;
} while(coeff > 0 && depth < MAX_RAY_DEPTH);
return objColor;
}
When I reach an object with the primary ray, I send another ray to the light source positioned at (0,0,0) and return the color according to the Phong illumination model for diffuse reflection, but the result is really not the expected one: http://s15.postimage.org/vc6uyyssr/test.png. The cube is a unit cube centered at (0,0,0) and then translated by (1.5, -1.5, -1.5). From my point of view, the left side of the cube should get more light and it actually does. What do you think of it?
I'm having a problem with edge detection using Sobel operator: it produces too many false edges, effect is shown on pictures below.
I'm using a 3x3 sobel operator - first extracting vertical then horizontal, final output is magnitude of each filter output.
Edges on synthetic images are extracted properly but natural images produce have too many false edges or "noise" even if image is preprocessed by applying blur or median filter.
What might be cause of this? Is it implementation problem (then: why synthetic images are fine?) or I need to do some more preprocessing?
Original:
Output:
code:
void imageOp::filter(image8* image, int maskSize, int16_t *mask)
{
if((image == NULL) || (maskSize/2 == 0) || maskSize < 1)
{
if(image == NULL)
{
printf("filter: image pointer == NULL \n");
}
else if(maskSize < 1)
{
printf("filter: maskSize must be greater than 1\n");
}
else
{
printf("filter: maskSize must be odd number\n");
}
return;
}
image8* fImage = new image8(image->getHeight(), image->getWidth());
uint16_t sum = 0;
int d = maskSize/2;
int ty, tx;
for(int x = 0; x < image->getHeight(); x++) //
{ // loop over image
for(int y = 0; y < image->getWidth(); y++) //
{
for(int xm = -d; xm <= d; xm++)
{
for(int ym = -d; ym <= d; ym++)
{
ty = y + ym;
if(ty < 0) // edge conditions
{
ty = (-1)*ym - 1;
}
else if(ty >= image->getWidth())
{
ty = image->getWidth() - ym;
}
tx = x + xm;
if(tx < 0) // edge conditions
{
tx = (-1)*xm - 1;
}
else if(tx >= image->getHeight())
{
tx = image->getHeight() - xm;
}
sum += image->img[tx][ty] * mask[((xm+d)*maskSize) + ym + d];
}
}
if(sum > 255)
{
fImage->img[x][y] = 255;
}
else if(sum < 0)
{
fImage->img[x][y] = 0;
}
else
{
fImage->img[x][y] = (uint8_t)sum;
}
sum = 0;
}
}
for(int x = 0; x < image->getHeight(); x++)
{
for(int y = 0; y < image->getWidth(); y++)
{
image->img[x][y] = fImage->img[x][y];
}
}
delete fImage;
}
This appears to be due to a math error somewhere in your code. To follow on my comment, this is what I get when I run your image through a Sobel operator here (edge strength is indicated by brightness of the output image):
I used a GLSL fragment shader to produce this:
precision mediump float;
varying vec2 textureCoordinate;
varying vec2 leftTextureCoordinate;
varying vec2 rightTextureCoordinate;
varying vec2 topTextureCoordinate;
varying vec2 topLeftTextureCoordinate;
varying vec2 topRightTextureCoordinate;
varying vec2 bottomTextureCoordinate;
varying vec2 bottomLeftTextureCoordinate;
varying vec2 bottomRightTextureCoordinate;
uniform sampler2D inputImageTexture;
void main()
{
float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r;
float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r;
float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r;
float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r;
float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r;
float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r;
float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r;
float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r;
float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity;
float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity;
float mag = length(vec2(h, v));
gl_FragColor = vec4(vec3(mag), 1.0);
You don't show your mask values, which I assume contain the Sobel kernel. In the above code, I've hardcoded the calculations performed against the red channel of each pixel in a 3x3 Sobel kernel. This is purely for performance on my platform.
One thing I don't notice in your code (again, I may be missing it like I did the sum being set back to 0) is the determination of the magnitude of the vector for the two portions of the Sobel operator. I'd expect to see a square root operation in there somewhere, if that was present.