As you can see in the image, I'm getting a black circle on the top of the spheres and the image appears grainy. It's supposed to be more sharp however there are these small black and white spots.
This is the code for the shadow ray
int pos = 0;
float intersect(const ray &r, vector<unique_ptr<object>> &obj)
{
//gives closest object hit point and position;
float closest = numeric_limits<float>::max();
for(int j = 0; j < obj.size(); j++)
{
float t = obj[j]->intersect(r);
if(t > 1e-6 && t < closest)
{
closest = t;
pos = j;
}
}
return closest;
}
vec color(const ray& r, vector<unique_ptr<object>> &shape, vector<unique_ptr<Light>> &lighting, int depth)
{
vec background_color( .678, .847, .902);
vec total{0.0, 0.0, 0.0};
vec ambient{0.125, 0.125, 0.125};
float t_near = intersect(r, shape);
if(t_near == numeric_limits<float>::max())
return background_color;
else
{
total += ambient;
for(int i = 0; i < lighting.size(); i++){
total += shape[pos]->shade(lighting[i]->position(), t_near, r);//gives specular + diffuse
vec shadow_dir = unit_vector(lighting[i]->position() - r.p_at_par(t_near));
ray shadowRay(r.p_at_par(t_near), shadow_dir);
float dist = shadow_dir.lenght();
float a = intersect(shadowRay, shape);
if(a != numeric_limits<float>::max())
return vec(0.0, 0.0, 0.0);
}
return total;
}
}
Okay, got it.
For the black circle, you have to test for the distance of the shadow ray to be less that the distance between the point and the light source. Also, for the distance, the shadow_dir shouldn't be normalised. And to deal with the black white spots, which is dude to shadow intersection, you have to add N*bias to the hit point where the bias is for example 1e-4. The bias shouldn't be too small
vec shadow_dir = lighting[i]->position() - r.p_at_par(t_near);
float dist = shadow_dir.lenght();
vec N = unit_vector(shape[pos]->normal(r, t_near));
shadow_dir = unit_vector(shadow_dir);
ray shadowRay(r.p_at_par(t_near) + N*1e-4, shadow_dir);
float a = intersect(shadowRay, shape);
if(a != numeric_limits<float>::max()){
float m = shadowRay.p_at_par(a).lenght();
if(a < dist)
return vec(0.0, 0.0, 0.0);
}
}
I’ve written a Fragment shader in GLSL, using shader toy.
Link : https://www.shadertoy.com/view/wtGSzy
most of it works, but when I enable texture lookups in the reflection function, the performance drops from 60FPS to 5~FPS.
The code in question is on lines 173 - 176
if(SDFObjectToDraw.texChannelID == 0)
col = texture(iChannel0, uv);
if(SDFObjectToDraw.texChannelID == 1)
col = texture(iChannel1, uv);
This same code can bee seen in my rayMarch function (lines 274-277) and works fine for colouring my objects. It only causes issues in the reflection function.
My question is, why are my texture lookups, in the reflection code, dropping my performance this much and what can I do to improve it?
/**
* Return the normalized direction to march in from the eye point for a single pixel.
*
* fieldOfView: vertical field of view in degrees
* size: resolution of the output image
* fragCoord: the x,y coordinate of the pixel in the output image
*/
vec3 rayDirection(float fieldOfView, vec2 size, vec2 fragCoord) {
vec2 xy = fragCoord - size / 2.0;
float z = size.y / tan(radians(fieldOfView) / 2.0);
return normalize(vec3(xy, -z));
}
float start = 0.0;
vec3 eye = vec3(0,0,5);
int MAX_MARCHING_STEPS = 255;
float EPSILON = 0.00001;
float end = 10.0;
const uint Shpere = 1u;
const uint Box = 2u;
const uint Plane = 4u;
vec3 lightPos = vec3(-10,0,5);
#define M_PI 3.1415926535897932384626433832795
const int SDF_OBJECT_COUNT = 4;
struct SDFObject
{
uint Shape;
vec3 Position;
float Radius;
int texChannelID;
float Ambiant;
float Spec;
float Diff;
vec3 BoxSize;
bool isMirror; //quick hack to get refletions working
};
SDFObject SDFObjects[SDF_OBJECT_COUNT] = SDFObject[SDF_OBJECT_COUNT](
SDFObject(Shpere, vec3(2,0,-3),1.0,0,0.2,0.2,0.8, vec3(0,0,0),true)
,SDFObject(Shpere, vec3(-2,0,-3),1.0,0,0.1,1.0,1.0, vec3(0,0,0),false)
,SDFObject(Box, vec3(0,0,-6),0.2,1,0.2,0.2,0.8, vec3(1.0,0.5,0.5),false)
,SDFObject(Plane, vec3(0,0,0),1.0,1,0.2,0.2,0.8, vec3(0.0,1.0,0.0),false)
);
float shereSDF(vec3 p, SDFObject o)
{
return length(p-o.Position)-o.Radius;
}
float boxSDF(vec3 pointToTest, vec3 boxBoundery, float radius, vec3 boxPos)
{
vec3 q = abs(pointToTest - boxPos) - boxBoundery;
return length(max(q,0.0)) + min(max(q.x, max(q.y,q.z)) ,0.0) -radius;
}
float planeSDF(vec3 p, vec4 n, vec3 Pos)
{
return dot(p-Pos, n.xyz) + n.w;
}
bool IsShadow(vec3 LightPos, vec3 HitPos)
{
bool isShadow = false;
vec3 viewRayDirection = normalize(lightPos- HitPos) ;
float depth = start;
vec3 hitpoint;
for(int i=0; i<MAX_MARCHING_STEPS; i++)
{
hitpoint = (HitPos+ depth * viewRayDirection);
float dist = end;
for(int j =0; j<SDF_OBJECT_COUNT; j++)
{
float distToObjectBeingConsidered;
if(SDFObjects[j].Shape == Shpere)
distToObjectBeingConsidered = shereSDF(hitpoint, SDFObjects[j]);
if(SDFObjects[j].Shape == Box)
distToObjectBeingConsidered = boxSDF(hitpoint, SDFObjects[j].BoxSize , SDFObjects[j].Radius, SDFObjects[j].Position);
if(SDFObjects[j].Shape == Plane)
distToObjectBeingConsidered= planeSDF(hitpoint, vec4(SDFObjects[j].BoxSize, SDFObjects[j].Radius), SDFObjects[j].Position);
if( distToObjectBeingConsidered < dist)
{
dist = distToObjectBeingConsidered;
}
}
if(dist < EPSILON)
{
isShadow = true;
}
depth += dist;
if(depth >= end)
{
isShadow = false;
}
}
return isShadow;
}
vec3 MirrorReflection(vec3 inComingRay, vec3 surfNormal, vec3 HitPos, int objectIndexToIgnore)
{
vec3 returnCol;
vec3 reflectedRay = reflect(inComingRay, surfNormal);
vec3 RayDirection = normalize(reflectedRay) ;
float depth = start;
vec3 hitpoint;
int i;
for(i=0; i<MAX_MARCHING_STEPS; i++)
{
hitpoint = (HitPos+ depth * RayDirection);
SDFObject SDFObjectToDraw;
float dist = end;
for(int j =0; j<SDF_OBJECT_COUNT; j++)
{
float distToObjectBeingConsidered;
if(SDFObjects[j].Shape == Shpere)
distToObjectBeingConsidered = shereSDF(hitpoint, SDFObjects[j]);
if(SDFObjects[j].Shape == Box)
distToObjectBeingConsidered = boxSDF(hitpoint, SDFObjects[j].BoxSize , SDFObjects[j].Radius, SDFObjects[j].Position);
if(SDFObjects[j].Shape == Plane)
distToObjectBeingConsidered= planeSDF(hitpoint, vec4(SDFObjects[j].BoxSize, SDFObjects[j].Radius), SDFObjects[j].Position);
if( distToObjectBeingConsidered < dist && j!= objectIndexToIgnore )// D > 0.0)
{
dist = distToObjectBeingConsidered;
SDFObjectToDraw = SDFObjects[j];
}
}
if(dist < EPSILON)
{
vec3 normal =normalize(hitpoint-SDFObjectToDraw.Position);
float u = 0.5+ (atan(normal.z, normal.x)/(2.0*M_PI));
float v = 0.5+ (asin(normal.y)/(M_PI));
vec2 uv =vec2(u,v);
vec4 col = vec4(0,0.5,0.5,0);
///>>>>>>>>>>>> THESE LINES ARE broken, WHY?
//if(SDFObjectToDraw.texChannelID == 0)
//col = texture(iChannel0, uv);
//if(SDFObjectToDraw.texChannelID == 1)
//col = texture(iChannel1, uv);
vec3 NormalizedDirToLight = normalize(lightPos-SDFObjectToDraw.Position);
float theta = dot(normal,NormalizedDirToLight);
vec3 reflectionOfLight = reflect(NormalizedDirToLight, normal);
vec3 viewDir = normalize(SDFObjectToDraw.Position);
float Spec = dot(reflectionOfLight, viewDir);
if(IsShadow(lightPos, hitpoint))
{
returnCol= (col.xyz*SDFObjectToDraw.Ambiant);
}
else
{
returnCol= (col.xyz*SDFObjectToDraw.Ambiant)
+(col.xyz * max(theta *SDFObjectToDraw.Diff, SDFObjectToDraw.Ambiant));
}
break;
}
depth += dist;
if(depth >= end)
{
//should look up bg texture here but cant be assed right now
returnCol = vec3(1.0,0.0,0.0);
break;
}
}
return returnCol;//*= (vec3(i+1)/vec3(MAX_MARCHING_STEPS));
}
vec3 rayMarch(vec2 fragCoord)
{
vec3 viewRayDirection = rayDirection(45.0, iResolution.xy, fragCoord);
float depth = start;
vec3 hitpoint;
vec3 ReturnColour = vec3(0,0,0);
for(int i=0; i<MAX_MARCHING_STEPS; i++)
{
hitpoint = (eye+ depth * viewRayDirection);
float dist = end;
SDFObject SDFObjectToDraw;
int objectInDexToIgnore=-1;
//find closest objecct to current point
for(int j =0; j<SDF_OBJECT_COUNT; j++)
{
float distToObjectBeingConsidered;
if(SDFObjects[j].Shape == Shpere)
distToObjectBeingConsidered = shereSDF(hitpoint, SDFObjects[j]);
if(SDFObjects[j].Shape == Box)
distToObjectBeingConsidered = boxSDF(hitpoint, SDFObjects[j].BoxSize , SDFObjects[j].Radius, SDFObjects[j].Position);
if(SDFObjects[j].Shape == Plane)
distToObjectBeingConsidered= planeSDF(hitpoint, vec4(SDFObjects[j].BoxSize, SDFObjects[j].Radius), SDFObjects[j].Position);
if( distToObjectBeingConsidered < dist)
{
dist = distToObjectBeingConsidered;
SDFObjectToDraw = SDFObjects[j];
objectInDexToIgnore = j;
}
}
//if we are close enough to an objectoto hit it.
if(dist < EPSILON)
{
vec3 normal =normalize(hitpoint-SDFObjectToDraw.Position);
if(SDFObjectToDraw.isMirror)
{
ReturnColour = MirrorReflection( viewRayDirection, normal, hitpoint, objectInDexToIgnore);
}
else
{
float u = 0.5+ (atan(normal.z, normal.x)/(2.0*M_PI));
float v = 0.5+ (asin(normal.y)/(M_PI));
vec2 uv =vec2(u,v);
vec4 col;
if(SDFObjectToDraw.texChannelID == 0)
col = texture(iChannel0, uv);
if(SDFObjectToDraw.texChannelID == 1)
col = texture(iChannel1, uv);
vec3 NormalizedDirToLight = normalize(lightPos-SDFObjectToDraw.Position);
float theta = dot(normal,NormalizedDirToLight);
vec3 reflectionOfLight = reflect(NormalizedDirToLight, normal);
vec3 viewDir = normalize(SDFObjectToDraw.Position);
float Spec = dot(reflectionOfLight, viewDir);
if(IsShadow(lightPos, hitpoint))
{
ReturnColour= (col.xyz*SDFObjectToDraw.Ambiant);
}
else
{
ReturnColour= (col.xyz*SDFObjectToDraw.Ambiant)
+(col.xyz * max(theta *SDFObjectToDraw.Diff, SDFObjectToDraw.Ambiant));
//+(col.xyz* Spec * SDFObjectToDraw.Spec);
}
}
return ReturnColour;
}
depth += dist;
if(depth >= end)
{
float u = fragCoord.x/ iResolution.x;
float v = fragCoord.y/ iResolution.y;
vec4 col = texture(iChannel2, vec2(u,v));
ReturnColour =col.xyz;
}
}
return ReturnColour;
}
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
//vec2 uv = fragCoord/iResolution.xy;
// Time varying pixel color
//vec3 col = 0.5 + 0.5*cos(iTime+uv.xyx+vec3(0,2,4));
// Output to screen
lightPos *= cos(iTime+vec3(1.5,2,2));
//lightPos= vec3(cos(iTime)*2.0,0,0);
vec3 SDFCol= rayMarch(fragCoord);
vec3 col = vec3(0);
//if(SDFVal <=1.0)
// col = vec3(1,0,0);
//col = vec3(SDFVal,0,0);
col = vec3(0.5,0,0);
col = SDFCol;
fragColor = vec4(col,1.0);
}
[...] This same code can bee seen in my rayMarch function (lines 274-277) and works fine for colouring my objects. [...]
The "working" texture lookup is executed in a loop in rayMarch. MAX_MARCHING_STEPS is 255, so the lookup is done at most 255 times.
vec3 rayMarch(vec2 fragCoord)
{
// [...]
for(int i=0; i<MAX_MARCHING_STEPS; i++)
{
// [...]
if(SDFObjectToDraw.texChannelID == 0)
col = texture(iChannel0, uv);
if(SDFObjectToDraw.texChannelID == 1)
col = texture(iChannel1, uv);
// [...]
}
// [...]
}
When you do the lookup in MirrorReflection then the performance breaks down, because it is done in a loop in MirrorReflection and MirrorReflection is called in a loop in rayMarch. In this case the lookup is done up to 255*255 = 65025 times.
~65000 texture lookups for a fragment is far to much and cause the break down of performance.
vec3 MirrorReflection(vec3 inComingRay, vec3 surfNormal, vec3 HitPos, int objectIndexToIgnore)
{
// [...]
for(i=0; i<MAX_MARCHING_STEPS; i++)
{
// [...]
if(SDFObjectToDraw.texChannelID == 0)
col = texture(iChannel0, uv);
if(SDFObjectToDraw.texChannelID == 1)
col = texture(iChannel1, uv);
// [...]
}
// [...]
}
vec3 rayMarch(vec2 fragCoord)
{
// [...]
for(int i=0; i<MAX_MARCHING_STEPS; i++)
{
// [...]
ReturnColour = MirrorReflection(viewRayDirection, normal, hitpoint, objectInDexToIgnore);
// [...]
}
// [...]
}
I have this WebGL fragment shader on my html file which is running fine, except when I call the first 5 vec2 functions.
vec2 subCalc(vec2 z1, vec2 z2){
return vec2((z1.x - z2.x), (z1.y - z2.x));
}
I get this error:
It's weird because I use the vec4's, for example 'HSVtoRGB', the code runs fine.
full shaders
<script id="fragment-shader" type="x-shader/x-fragment">
precision mediump float;
varying vec4 actualPos;
uniform vec2 drag;
uniform float func;
uniform float n;
uniform vec2 center;
uniform float scale;
uniform float test;
vec2 subCalc(vec2 z1, vec2 z2){
return vec2((z1.x - z2.x), (z1.y - z2.x));
}
vec2 expCalc(vec2 z){
return vec2(exp(z.x)*cos(z.y),exp(z.x)*sin(z.y));
}
vec2 divCalc(vec2 z1, vec2 z2){
return vec2((z1.x*z2.x + z1.y*z2.y)/(z2.x*z2.x+z2.y*z2.y),(z1.y*z2.x-z1.x*z2.y)/(z2.x*z2.x+z2*z2.y));
}
vec2 multCalc(vec2 z1, vec2 z2){
return vec2((z1.x*z2.x - z1.y*z2.y), (z1.y*z2.x + z1.x*z2.y));
}
vec2 sumCalc(vec2 z1, vec2 z2){
return vec2((z1.x + z2.x), (z1.y + z2.x));
}
vec4 HSVtoRGB(float h, float s, float v)
{
//Convert between the HSV and RGB color model.
//Taken from http://www.cs.rit.edu/~ncs/color/t_convert.html and rewritten for GLSL
int i;
float f, p, q, t;
vec4 RGB;
if (s == 0.0)
{
// achromatic (grey)
RGB.x = RGB.y = RGB.z = v;
RGB.w = 1.0;
return RGB;
}
h /= 60.0; // sector 0 to 5
i = int(floor(h));
f = h - float(i); // fracional part of h
p = v * (1.0 - s);
q = v * (1.0 - s * f);
t = v * (1.0 - s * (1.0 - f));
if(i==0)
{
RGB.x = v;
RGB.y = t;
RGB.z = p;
}
else if(i==1)
{
RGB.x = q;
RGB.y = v;
RGB.z = p;
}
else if(i==2)
{
RGB.x = p;
RGB.y = v;
RGB.z = t;
}
else if(i==3)
{
RGB.x = p;
RGB.y = q;
RGB.z = v;
}
else if(i==4)
{
RGB.x = t;
RGB.y = p;
RGB.z = v;
}
else if(i==5)
{
RGB.x = v;
RGB.y = p;
RGB.z = q;
}
else
{
RGB.x = 1.0;
RGB.y = 1.0;
RGB.z = 1.0;
}
RGB.w = 1.0;
return RGB;
}
vec4 secondCase(vec4 posi){
float multX = ((posi.x * posi.x) - (posi.y * posi.y));
float multY = ((posi.y * posi.x) + (posi.x * posi.y));
float newX = multX;
float newY = multY;
float r = pow(sqrt((newX)*(newX)+(newY)*(newY)),n);
for(float i = 0.0; i <=10000.0; i++){
if(i>=n){break;}
newX = ((newX * posi.x) - (newY * posi.y));
newY = ((newY * posi.x) + (newX * posi.y));
}
float h = (atan(newY/newX));
float s = 1.0;
float v = fract((log2(r)));
h = (h*180.0)/(3.14);
if(h<0.0)
h = h*(-1.0);
if(newX <= 0.0 && newY > 0.0)
h = 180.0 - h;
if(newX < 0.0 && newY < 0.0)
h = 180.0 + h;
if(newX >= 0.0 && newY < 0.0)
h = 360.0 - h;
return HSVtoRGB(h,s,v);
}
vec4 thirdCase(vec4 posi){
vec2 divi = vec2((100.0*posi.x + 0.0*posi.y)/(posi.x*posi.x+posi.y*posi.y),(0.0*posi.x-100.0*posi.y)/(posi.x*posi.x+posi.y*posi.y));
vec2 exp = vec2(exp(divi.x)*cos(divi.y),exp(divi.x)*sin(divi.y));
float r = pow(sqrt((exp.x)*(exp.x)+(exp.y)*(exp.y)),n);
float h = (atan((exp.y/exp.x)));
float s = 1.0;
float v = fract((log2(r)));
h = (h*180.0)/(3.14);
if(h<0.0)
h = h*(-1.0);
if(exp.x <= 0.0 && exp.y > 0.0)
h = 180.0 - h;
if(exp.x < 0.0 && exp.y < 0.0)
h = 180.0 + h;
if(exp.x >= 0.0 && exp.y < 0.0)
h = 360.0 - h;
return HSVtoRGB(h,s,v);
}
vec4 forthCase(vec4 posi){
vec2 divi = vec2((100.0*posi.x + 0.0*posi.y)/(posi.x*posi.x+posi.y*posi.y),(0.0*posi.x-100.0*posi.y)/(posi.x*posi.x+posi.y*posi.y));
vec2 exp = vec2(exp(divi.x)*cos(divi.y),exp(divi.x)*sin(divi.y));
float r = pow(sqrt((exp.x)*(exp.x)+(exp.y)*(exp.y)),n);
float h = (atan((exp.y/exp.x))*3.0);
float s = 1.0;
float v = fract((log2(r)));
h = (h*180.0)/(3.14);
if(h<0.0)
h = h*(-1.0);
if(exp.x <= 0.0 && exp.y > 0.0)
h = 180.0 - h;
if(exp.x < 0.0 && exp.y < 0.0)
h = 180.0 + h;
if(exp.x >= 0.0 && exp.y < 0.0)
h = 360.0 - h;
return HSVtoRGB(h,s,v);
}
void main() {
vec4 finalPosition = actualPos;
finalPosition.x += drag.x;
finalPosition.y += drag.y;
finalPosition.x *= test;
finalPosition.y *= test;
float r = sqrt((finalPosition.x)*(finalPosition.x)+(finalPosition.y)*(finalPosition.y));
float h = atan((finalPosition.y/finalPosition.x));
float s = 1.0;
float v = fract((log2(r)));
h = (h*180.0)/(3.14);
if(h<0.0)
h = h*(-1.0);
if(finalPosition.x < 0.0 && finalPosition.y > 0.0)
h = 180.0 - h;
if(finalPosition.x < 0.0 && finalPosition.y < 0.0)
h = 180.0 + h;
if(finalPosition.x > 0.0 && finalPosition.y < 0.0)
h = 360.0 - h;
vec2 firstMemb = expCalc(finalPosition);
if(func == 1.0){
gl_FragColor = HSVtoRGB(h,s,v);
} else if(func == 2.0){
gl_FragColor = secondCase(finalPosition);
} else if(func == 3.0){
gl_FragColor = thirdCase(finalPosition);
}
}
</script>
Thanks in advance.
This line
vec2 firstMemb = expCalc(finalPosition);
Is trying to pass a vec4 to a function that takes a vec2
I have this shadertoy at:
https://www.shadertoy.com/view/4sVfz1
I seem to get most of the routines of volume ray marching but I am missing something in my ray marching function that computes transparency according to the accumulated density at a point.
Here is my ray marching function:
float ray( vec3 ro, vec3 rd, out float d, out float den )
{
float t = 0.0; d = EPS; den = 0.0;
for( int i = 0; i < STEPS; ++i )
{
vec3 p = ro + rd * t;
d = 0.3 * map( p, den ).x;
if( d < EPS || den > 1.0 || t > FAR ) break;
t += d;
den = fbm( p + TWA );
den += clamp( den, -1.0, 0.0 ) * t * t;
}
return t;
}
The proper ray marching function is this one:
float ray( vec3 ro, vec3 rd, out float den )
{
float t = 0.0, maxD = 0.0; den = 0.0;
for( int i = 0; i < STEPS; ++i )
{
vec3 p = ro + rd * t;
den = map( p );
maxD = maxD < den ? den : maxD;
if( maxD > 0.99 || t > FAR ) break;
t += 0.05;
}
den = maxD;
return t;
}
I'm having a problem with edge detection using Sobel operator: it produces too many false edges, effect is shown on pictures below.
I'm using a 3x3 sobel operator - first extracting vertical then horizontal, final output is magnitude of each filter output.
Edges on synthetic images are extracted properly but natural images produce have too many false edges or "noise" even if image is preprocessed by applying blur or median filter.
What might be cause of this? Is it implementation problem (then: why synthetic images are fine?) or I need to do some more preprocessing?
Original:
Output:
code:
void imageOp::filter(image8* image, int maskSize, int16_t *mask)
{
if((image == NULL) || (maskSize/2 == 0) || maskSize < 1)
{
if(image == NULL)
{
printf("filter: image pointer == NULL \n");
}
else if(maskSize < 1)
{
printf("filter: maskSize must be greater than 1\n");
}
else
{
printf("filter: maskSize must be odd number\n");
}
return;
}
image8* fImage = new image8(image->getHeight(), image->getWidth());
uint16_t sum = 0;
int d = maskSize/2;
int ty, tx;
for(int x = 0; x < image->getHeight(); x++) //
{ // loop over image
for(int y = 0; y < image->getWidth(); y++) //
{
for(int xm = -d; xm <= d; xm++)
{
for(int ym = -d; ym <= d; ym++)
{
ty = y + ym;
if(ty < 0) // edge conditions
{
ty = (-1)*ym - 1;
}
else if(ty >= image->getWidth())
{
ty = image->getWidth() - ym;
}
tx = x + xm;
if(tx < 0) // edge conditions
{
tx = (-1)*xm - 1;
}
else if(tx >= image->getHeight())
{
tx = image->getHeight() - xm;
}
sum += image->img[tx][ty] * mask[((xm+d)*maskSize) + ym + d];
}
}
if(sum > 255)
{
fImage->img[x][y] = 255;
}
else if(sum < 0)
{
fImage->img[x][y] = 0;
}
else
{
fImage->img[x][y] = (uint8_t)sum;
}
sum = 0;
}
}
for(int x = 0; x < image->getHeight(); x++)
{
for(int y = 0; y < image->getWidth(); y++)
{
image->img[x][y] = fImage->img[x][y];
}
}
delete fImage;
}
This appears to be due to a math error somewhere in your code. To follow on my comment, this is what I get when I run your image through a Sobel operator here (edge strength is indicated by brightness of the output image):
I used a GLSL fragment shader to produce this:
precision mediump float;
varying vec2 textureCoordinate;
varying vec2 leftTextureCoordinate;
varying vec2 rightTextureCoordinate;
varying vec2 topTextureCoordinate;
varying vec2 topLeftTextureCoordinate;
varying vec2 topRightTextureCoordinate;
varying vec2 bottomTextureCoordinate;
varying vec2 bottomLeftTextureCoordinate;
varying vec2 bottomRightTextureCoordinate;
uniform sampler2D inputImageTexture;
void main()
{
float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r;
float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r;
float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r;
float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r;
float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r;
float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r;
float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r;
float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r;
float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity;
float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity;
float mag = length(vec2(h, v));
gl_FragColor = vec4(vec3(mag), 1.0);
You don't show your mask values, which I assume contain the Sobel kernel. In the above code, I've hardcoded the calculations performed against the red channel of each pixel in a 3x3 Sobel kernel. This is purely for performance on my platform.
One thing I don't notice in your code (again, I may be missing it like I did the sum being set back to 0) is the determination of the magnitude of the vector for the two portions of the Sobel operator. I'd expect to see a square root operation in there somewhere, if that was present.