Currently I have problems by handling uniforms on GLSL shaders. If I have only one object in the scene, the uniform works as I expect. However, for multiple objects, the uniform is not set per object, in other words, the last uniform is used to represent all scene objects.
How can I handle with this problem? Follows my C++ code:
void addSimpleObject(osg::ref_ptr<osg::Group> root, osg::Vec3 position, float radius, float myUniform) {
osg::ref_ptr<osg::Geode> geode = new osg::Geode();
geode->addDrawable(new osg::ShapeDrawable(new osg::Sphere(position, radius)));
root->addChild(geode);
root->getChild(0)->asGeode()->addDrawable(geode->getDrawable(0));
osg::ref_ptr<osg::StateSet> stateset = new osg::StateSet();
stateset->addUniform(new osg::Uniform("myUniform", myUniform));
root->setStateSet(stateset);
}
osg::ref_ptr<osg::Group> root = new osg::Group();
addSimpleObject(root, osg::Vec3(-3.0, 2.5, -10), 2, 0.5);
addSimpleObject(root, osg::Vec3( 3.0, 2.5, -10), 2, 1.5);
addSimpleObject(root, osg::Vec3( 0.0, -2.5, -10), 2, 1.0);
The vertex shader:
#version 130
out vec3 pos;
out vec3 normal;
void main() {
pos = (gl_ModelViewMatrix * gl_Vertex).xyz;
normal = gl_NormalMatrix * gl_Normal;
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
And the fragment shader:
#version 130
in vec3 pos;
in vec3 normal;
uniform float myUniform;
void main() {
vec3 normNormal;
normNormal = normalize(normal);
if (myUniform > 0)
normNormal = min(normNormal * myUniform, 1.0);
...
}
A uniform is bound to a shader program not to an "object".
If use the same program for all objects you have to set the uniform before drawing the object.
You bound the osg::StateSet to the root node of the scene graph.
If the value of the uniform variable changes for each drawable you have to add
a separated osg::StateSet to each osg::Drawable node.
Adapt your method addSimpleObject like this:
void addSimpleObject(osg::ref_ptr<osg::Group> root, osg::Vec3 position, float radius, float myUniform) {
// create the drawable
osg::ref_ptr<osg::Drawable> drawable = new osg::ShapeDrawable(new osg::Sphere(position, radius))
// crate the stateset and add the uniform
osg::ref_ptr<osg::StateSet> stateset = new osg::StateSet();
stateset->addUniform(new osg::Uniform("myUniform", myUniform));
// add the stateset tor the drawable
drawable->setStateSet(stateset);
if (root->getNumChildren() == 0) {
osg::ref_ptr<osg::Geode> geode = new osg::Geode();
root->addChild(geode);
}
root->getChild(0)->asGeode()->addDrawable(drawable);
}
Related
In a similar way than in this related question, I am trying to render complex shapes by the mean of ray-tracing inside a cube: This is, 12 triangles are used to generate a bounding box and each fragment is used to render the given shape by ray-tracing.
For this example, I am using the easiest shape: a sphere.
The problem is that when the cube is rotated, different triangle angles are distording the sphere:
What I have attempted so far:
I tried making the raytracing in World space, also in View-space as suggested in the related question.
I checked that the worldCoordinate of the fragment is correct, by making a reverse projection from gl_fragCoord, with the same output.
I switched to orthographic projection, where the distortion is reversed:
My conclusion is that, as described in the related question, the interpolant of the coordinates and the projection are the origin of the problem.
I could project the cube to a plane perpendicular to the camera direction, but I would like to understand the bottom of the question.
Related code:
Vertex shader:
#version 420 core
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
in vec3 in_Position;
in vec3 in_Normal;
out Vertex
{
vec4 worldCoord;
vec4 worldNormal;
} v;
void main(void)
{
mat4 mv = view * model;
// Position
v.worldCoord = model * vec4(in_Position, 1.0);
gl_Position = projection * mv * vec4(in_Position, 1.0);
// Normal
v.worldNormal = normalize(vec4(in_Normal, 0.0));
}
Fragment shader:
#version 420 core
uniform mat4 view;
uniform vec3 cameraPosView;
in Vertex
{
vec4 worldCoord;
vec4 worldNormal;
} v;
out vec4 out_Color;
bool sphereIntersection(vec4 rayOrig, vec4 rayDirNorm, vec4 spherePos, float radius, out float t_out)
{
float r2 = radius * radius;
vec4 L = spherePos - rayOrig;
float tca = dot(L, rayDirNorm);
float d2 = dot(L, L) - tca * tca;
if(d2 > r2)
{
return false;
}
float thc = sqrt(r2 - d2);
float t0 = tca - thc;
float t1 = tca + thc;
if (t0 > 0)
{
t_out = t0;
return true;
}
if (t1 > 0)
{
t_out = t1;
return true;
}
return false;
}
void main()
{
vec3 color = vec3(1);
vec4 spherePos = vec4(0.0, 0.0, 0.0, 1.0);
float radius = 1.0;
float t_out=0.0;
vec4 cord = v.worldCoord;
vec4 rayOrig = (inverse(view) * vec4(-cameraPosView, 1.0));
vec4 rayDir = normalize(cord-rayOrig);
if (sphereIntersection(rayOrig, rayDir, spherePos, 0.3, t_out))
{
out_Color = vec4(1.0);
}
else
{
discard;
}
}
What I want to attrive is to render many small quads with this opengl function "glDrawArraysInstanced", the space between them is the same. For example, please refer to the follwing image:
The code is as follow:
void OpenGLShowVideo::displayBySmallMatrix()
{
// Now use QOpenGLExtraFunctions instead of QOpenGLFunctions as we want to
// do more than what GL(ES) 2.0 offers.
QOpenGLExtraFunctions *f = QOpenGLContext::currentContext()->extraFunctions();
f->glClearColor(9.f/255.0f, 14.f/255.0f, 15.f/255.0f, 1);
glClear(GL_COLOR_BUFFER_BIT);
f->glViewport(0, 0, this->width(), this->height());
m_displayByMatrixProgram->bind();
f->glActiveTexture(GL_TEXTURE0 + m_acRenderToScreenTexUnit);
f->glBindTexture(GL_TEXTURE_2D, m_renderWithMaskFbo->texture());
if (m_uniformsDirty) {
m_uniformsDirty = false;
m_displayByMatrixProgram->setUniformValue(m_samplerLoc, m_acRenderToScreenTexUnit);
m_proj.setToIdentity();
m_proj.perspective(INIT_VERTICAL_ANGLE, float(this->width()) / float(this->height()), m_fNearPlane, m_fFarPlane);
m_displayByMatrixProgram->setUniformValue(m_projMatrixLoc, m_proj);
QMatrix4x4 camera;
camera.lookAt(m_eye, m_eye + m_target, QVector3D(0, 1, 0));
m_displayByMatrixProgram->setUniformValue(m_camMatrixLoc, camera);
m_world.setToIdentity();
float fOffsetZ = m_fVerticalAngle / INIT_VERTICAL_ANGLE;
m_world.translate(m_fMatrixOffsetX, m_fMatrixOffsetY, fOffsetZ);
m_proj.scale(MATRIX_INIT_SCALE_X, MATRIX_INIT_SCALE_Y, 1.0f);
m_world.rotate(180, 1, 0, 0);
QMatrix4x4 wm = m_world;
m_displayByMatrixProgram->setUniformValue(m_worldMatrixLoc, wm);
QMatrix4x4 mm;
mm.setToIdentity();
m_displayByMatrixProgram->setUniformValue(m_myMatrixLoc, mm);
m_displayByMatrixProgram->setUniformValue(m_lightPosLoc, QVector3D(0, 0, 70));
QSize tmpSize = QSize(m_viewPortWidth, m_viewPortHeight);
m_displayByMatrixProgram->setUniformValue(m_resolutionLoc, tmpSize);
int whRatioVal = m_viewPortWidth / m_viewPortHeight;
m_displayByMatrixProgram->setUniformValue(m_whRatioLoc, whRatioVal);
}
m_geometries->bindBufferForArraysInstancedDraw();
f->glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, m_viewPortWidth * m_viewPortHeight);
}
And the vertex shader code is as follow:
#version 330
layout(location = 0) in vec4 vertex;
out vec3 color;
uniform mat4 mvp_matrix;
uniform mat4 projMatrix;
uniform mat4 camMatrix;
uniform mat4 worldMatrix;
uniform mat4 myMatrix;
uniform vec2 viewResolution;
uniform int whRatio;
uniform sampler2D sampler;
void main() {
int posX = gl_InstanceID % int(viewResolution.x);
int posY = gl_InstanceID / int(viewResolution.y);
if( posY % whRatio < whRatio) {
posY = gl_InstanceID / int(viewResolution.x);
}
ivec2 pos = ivec2(posX, posY);
vec2 t = vec2( pos.x * 3.0, pos.y * 3.0 );
mat4 wm = mat4(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, t.x, t.y, 1, 1) * worldMatrix;
color = texelFetch(sampler,pos,0).rgb;
gl_Position = projMatrix * camMatrix * wm * vertex;
}
And the fragment shader is as follow:
#version 330 core
in vec3 color;
out vec4 fragColor;
void main() {
fragColor = vec4(color, 1.0);
}
However, when I move the camera far from the screen (by changing the [camera.lookAt (m_eye, m_eye + m_target, QVector3D (0, 1, 0);] "m_eye" parameter value), I got sth like this:
The space between quads is different, and the size of the quad is also different. But when I move the camera closer to the screen, it looks much better.
I think what you're seeing there is the result of rounding the coordinates to the nearest integer pixel coordinate.
To get something that looks more even, you want to use some form of anti-aliasing. The options that spring to mind are:
Enable some sort of full screen anti-aliasing like MSAA. This is simple to enable, but can have a significant performance cost.
Put your pattern in a texture, and tile that texture over a single quad. Texture filtering and mip maps should take care of the anti-aliasing for you, and it will probably be faster to render that way as well because you only need a single quad.
The normal mapping looks great when the objects aren't rotated from the origin, and spot lights and directional lights work, but when I spin an object on the spot it darkens and then lightens again, just on the top face.
I'm testing using a cube. I've used a geometry shader to visualise my calculated normals (after multiplying by a TBN matrix), and they appear to be in the correct places. If I take the normal map out of the equation then the lighting is fine.
Here's where the TBN is calculated:
void calculateTBN()
{
//get the normal matrix
mat3 model = mat3(transpose(inverse(mat3(transform))));
vec3 T = normalize(vec3(model * tangent.xyz ));
vec3 N = normalize(vec3(model * normal ));
vec3 B = cross(N, T);
mat3 TBN = mat3( T , B , N);
outputVertex.TBN =TBN;
}
And the normal is sampled and transformed:
vec3 calculateNormal()
{
//Sort the input so that the normal is between 1 and minus 1 instead of 0 and 1
vec3 input = texture2D(normalMap, inputFragment.textureCoord).xyz;
input = 2.0 * input - vec3(1.0, 1.0, 1.0);
vec3 newNormal = normalize(inputFragment.TBN* input);
return newNormal;
}
My Lighting is in world space (as far as I understand the term, it takes into account the transform matrix but not the camera or projection matrix)
I did try the technique where I pass down the TBN as inverse (or transpose) and then multiplied every vector apart from the normal by it. That had the same effect. I'd rather work in world space anyway as apparently this is better for deffered lighting? Or so I've heard.
If you'd like to see any of the lighting code and so on I'll add it in but I didn't think it was necessary as it works apart from this.
EDIT::
As requested, here is vertex and part of frag shader
#version 330
uniform mat4 T; // Translation matrix
uniform mat4 S; // Scale matrix
uniform mat4 R; // Rotation matrix
uniform mat4 camera; // camera matrix
uniform vec4 posRelParent; // the position relative to the parent
// Input vertex packet
layout (location = 0) in vec4 position;
layout (location = 2) in vec3 normal;
layout (location = 3) in vec4 tangent;
layout (location = 4) in vec4 bitangent;
layout (location = 8) in vec2 textureCoord;
// Output vertex packet
out packet {
vec2 textureCoord;
vec3 normal;
vec3 vert;
mat3 TBN;
vec3 tangent;
vec3 bitangent;
vec3 normalTBN;
} outputVertex;
mat4 transform;
mat3 TBN;
void calculateTBN()
{
//get the model matrix, the transform of the object with scaling and transform removeds
mat3 model = mat3(transpose(inverse(transform)));
vec3 T = normalize(model*tangent.xyz);
vec3 N = normalize(model*normal);
//I used to retrieve the bitangents by crossing the normal and tangent but now they are calculated independently
vec3 B = normalize(model*bitangent.xyz);
TBN = mat3( T , B , N);
outputVertex.TBN = TBN;
//Pass though TBN vectors for colour debugging in the fragment shader
outputVertex.tangent = T;
outputVertex.bitangent = B;
outputVertex.normalTBN = N;
}
void main(void) {
outputVertex.textureCoord = textureCoord;
// Setup local variable pos in case we want to modify it (since position is constant)
vec4 pos = vec4(position.x, position.y, position.z, 1.0) + posRelParent;
//Work out the transform matrix
transform = T * R * S;
//Work out the normal for lighting
mat3 normalMat = transpose(inverse(mat3(transform)));
outputVertex.normal = normalize(normalMat* normal);
calculateTBN();
outputVertex.vert =(transform* pos).xyz;
//Work out the final pos of the vertex
gl_Position = camera * transform * pos;
}
And Lighting vector of fragment:
vec3 applyLight(Light thisLight, vec3 baseColor, vec3 surfacePos, vec3 surfaceToCamera)
{
float attenuation = 1.0f;
vec3 lightPos = (thisLight.finalLightMatrix*thisLight.position).xyz;
vec3 surfaceToLight;
vec3 coneDir = normalize(thisLight.coneDirection);
if (thisLight.position.w == 0.0f)
{
//Directional Light (all rays same angle, use position as direction)
surfaceToLight = normalize( (thisLight.position).xyz);
attenuation = 1.0f;
}
else
{
//Point light
surfaceToLight = normalize(lightPos - surfacePos);
float distanceToLight = length(lightPos - surfacePos);
attenuation = 1.0 / (1.0f + thisLight.attenuation * pow(distanceToLight, 2));
//Work out the Cone restrictions
float lightToSurfaceAngle = degrees(acos(dot(-surfaceToLight, normalize(coneDir))));
if (lightToSurfaceAngle > thisLight.coneAngle)
{
attenuation = 0.0;
}
}
}
Here's the main of the frag shader too:
void main(void) {
//get the base colour from the texture
vec4 tempFragColor = texture2D(textureImage, inputFragment.textureCoord).rgba;
//Support for objects with and without a normal map
if (useNormalMap == 1)
{
calcedNormal = calculateNormal();
}
else
{
calcedNormal = inputFragment.normal;
}
vec3 surfaceToCamera = normalize((cameraPos_World) - (inputFragment.vert));
vec3 tempColour = vec3(0.0, 0.0, 0.0);
for (int count = 0; count < numLights; count++)
{
tempColour += applyLight(allLights[count], tempFragColor.xyz, inputFragment.vert, surfaceToCamera);
}
vec3 gamma = vec3(1.0 / 2.2);
fragmentColour = vec4(pow(tempColour,gamma), tempFragColor.a);
//fragmentColour = vec4(calcedNormal, 1);
}
Edit 2:
The geometry shader used to visualize "sampled" normals by the TBN matrix as shown here:
void GenerateLineAtVertex(int index)
{
vec3 testSampledNormal = vec3(0, 0, 1);
vec3 bitangent = cross(gs_in[index].normal, gs_in[index].tangent);
mat3 TBN = mat3(gs_in[index].tangent, bitangent, gs_in[index].normal);
testSampledNormal = TBN * testSampledNormal;
gl_Position = gl_in[index].gl_Position;
EmitVertex();
gl_Position =
gl_in[index].gl_Position
+ vec4(testSampledNormal, 0.0) * MAGNITUDE;
EmitVertex();
EndPrimitive();
}
And it's vertex shader
void main(void) {
// Setup local variable pos in case we want to modify it (since position is constant)
vec4 pos = vec4(position.x, position.y, position.z, 1.0);
mat4 transform = T* R * S;
// Apply transformation to pos and store result in gl_Position
gl_Position = projection* camera* transform * pos;
mat3 normalMatrix = mat3(transpose(inverse(camera * transform)));
vs_out.tangent = normalize(vec3(projection * vec4(normalMatrix * tangent.xyz, 0.0)));
vs_out.normal = normalize(vec3(projection * vec4(normalMatrix * normal , 0.0)));
}
Here is the TBN vectors visualized. The slight angles on the points are due to an issue with how I'm applying the projection matrix, rather than mistakes in the actual vectors. The red lines just show where the arrows I've drawn on the texture are, they're not very clear from that angle that's all.
Problem Solved!
Actually nothing to do with the code above, although thanks to everyone that helped.
I was importing the texture using my own texture loader, which uses by default non-gamma corrected, SRGB colour in 32 bit. I switched it to 24bit and just RGB colour and it worked straight away. Typical developer problems....
I'm designing a sprite class, and I would like to display only a color if no texture is loaded.
Here are my vertex shader
#version 330 core
layout (location = 0) in vec4 vertex; // <vec2 pos, vec2 tex>
out vec2 vs_tex_coords;
uniform mat4 model;
uniform mat4 projection;
void main()
{
vs_tex_coords = vertex.zw;
gl_Position = projection * model * vec4(vertex.xy, 0.0, 1.0);
}
And the fragment shader :
#version 330 core
in vec2 vs_tex_coords;
out vec4 fs_color;
uniform sampler2D image;
uniform vec3 sprite_color;
void main()
{
fs_color = vec4(sprite_color, 1.0) * texture(image, vs_tex_coords);
}
My problem is that if I don't bind a texture, it displays only a black sprite. I think the problem is that the texture function in my fragment shader returns a 0, and screw all the formula.
Is there a way to know if the sampler2D is not initialized or null, and just return the sprite_color?
A sampler cannot be "empty". A valid texture must be bound to the texture units referenced by each sampler in order for rendering to have well-defined behavior.
But that doesn't mean you have to read from the texture that's bound there. It's perfectly valid to use a uniform value to tell the shader whether to read from the texture or not.
But you still have to bind a simple, 1x1 texture there. Indeed, you can use textureSize on the sampler; if it is a 1x1 texture, then don't bother to read from it. Note that this might be slower than using a uniform.
Here below the 2 versions, with and without if... else... conditional statement. The conditional statement avoids to have to sample the texture if not used.
The uniform int textureSample is set to 1 or 0 for the texture or the color to show up respectively. Both uniform variables are normally set up by the program, not the shader.
uniform int textureSample = 1;
uniform vec3 color = vec3(1.0, 1.0, 0.0);
void main() { // without if... else...
// ...
vec3 materialDiffuseColor = textureSample * texture( textureSampler, fragmentTexture ).rgb - (textureSample - 1) * color;
// ...
}
void main() { // with if... else...
// ...
if (textureSample == 1) { // 1 if texture, 0 if color
vec3 materialDiffuseColor = textureSample * texture( textureSampler, fragmentTexture ).rgb;
vec3 materialAmbientColor = vec3(0.5, 0.5, 0.5) * materialDiffuseColor;
vec3 materialSpecularColor = vec3(0.3, 0.3, 0.3);
gl_Color = brightness *
(materialAmbientColor +
materialDiffuseColor * lightPowerColor * cosTheta / distanceLight2 +
materialSpecularColor * lightPowerColor * pow(cosAlpha, 10000) / distanceLight2);
}
else {
vec3 materialDiffuseColor = color;
vec3 materialAmbientColor = vec3(0.5, 0.5, 0.5) * materialDiffuseColor;
vec3 materialSpecularColor = vec3(0.3, 0.3, 0.3);
gl_Color = brightness *
(materialAmbientColor +
materialDiffuseColor * lightPowerColor * cosTheta / distanceLight2 +
materialSpecularColor * lightPowerColor * pow(cosAlpha, 10000) / distanceLight2);
}
// ...
}
I'd check length of rgb for diffuse texture. This won't work on a specular map though
vec3 texDiffuseCol = texture2D(diffuseTex, TexCoord).rgb;
if(length(texDiffuseCol) == 0.0)
{
//Texture not present
}else
{
//Texture not present
}
Why does my light move with my camera? in my draw scene function I set my light source position, then I call my matrix, translate the "camera", then a sphere, and after two cubes. When I move the camera around along with the first cube, the light source moves with it...
function drawScene() {
gl.viewport(0, 0, gl.viewportWidth, gl.viewportHeight);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
mat4.perspective(45, gl.viewportWidth / gl.viewportHeight, 0.1, 100.0, pMatrix);
//currentProgram = perFragmentProgram;
currentProgram = perVertexProgram;
gl.useProgram(currentProgram);
gl.uniform3f(
currentProgram.ambientColorUniform,
parseFloat(document.getElementById("ambientR").value),
parseFloat(document.getElementById("ambientG").value),
parseFloat(document.getElementById("ambientB").value)
);
gl.uniform3f(
currentProgram.pointLightingLocationUniform,
parseFloat(document.getElementById("lightPositionX").value),
parseFloat(document.getElementById("lightPositionY").value),
parseFloat(document.getElementById("lightPositionZ").value)
);
gl.uniform3f(
currentProgram.pointLightingColorUniform,
parseFloat(document.getElementById("pointR").value),
parseFloat(document.getElementById("pointG").value),
parseFloat(document.getElementById("pointB").value)
);
mat4.identity(mvMatrix);
//Camera
mat4.translate(mvMatrix, [-xPos, -yPos, -10]);
mat4.rotate(mvMatrix, degToRad(180), [0, 1, 0]);
//Sphere
mvPushMatrix();
mat4.rotate(mvMatrix, degToRad(moonAngle), [0, 1, 0]);
mat4.translate(mvMatrix, [5, 0, 0]);
gl.bindBuffer(gl.ARRAY_BUFFER, moonVertexPositionBuffer);
gl.vertexAttribPointer(currentProgram.vertexPositionAttribute, moonVertexPositionBuffer.itemSize, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ARRAY_BUFFER, moonVertexNormalBuffer);
gl.vertexAttribPointer(currentProgram.vertexNormalAttribute, moonVertexNormalBuffer.itemSize, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, moonVertexIndexBuffer);
setMatrixUniforms();
gl.drawElements(gl.TRIANGLES, moonVertexIndexBuffer.numItems, gl.UNSIGNED_SHORT, 0);
mvPopMatrix();
//Cube 1
object.render(xPos, yPos);
//Cube 2
object2.render(0, 5);
}
And my shaders look like this.
<script id="per-vertex-lighting-fs" type="x-shader/x-fragment">
precision mediump float;
varying vec3 vLightWeighting;
void main(void) {
vec4 fragmentColor;
fragmentColor = vec4(1.0, 1.0, 1.0, 1.0);
gl_FragColor = vec4(fragmentColor.rgb * vLightWeighting, fragmentColor.a);
}
</script>
<script id="per-vertex-lighting-vs" type="x-shader/x-vertex">
attribute vec3 aVertexPosition;
attribute vec3 aVertexNormal;
uniform mat4 uMVMatrix;
uniform mat4 uPMatrix;
uniform mat3 uNMatrix;
uniform vec3 uAmbientColor;
uniform vec3 uPointLightingLocation;
uniform vec3 uPointLightingColor;
uniform bool uUseLighting;
varying vec2 vTextureCoord;
varying vec3 vLightWeighting;
void main(void) {
vec4 mvPosition = uMVMatrix * vec4(aVertexPosition, 1.0);
gl_Position = uPMatrix * mvPosition;
vec3 lightDirection = normalize(uPointLightingLocation - mvPosition.xyz);
vec3 transformedNormal = uNMatrix * aVertexNormal;
float directionalLightWeighting = max(dot(transformedNormal, lightDirection), 0.0);
vLightWeighting = uAmbientColor + uPointLightingColor * directionalLightWeighting;
}
</script>
What can I do to stop the light from being moved around so its static
uPointLightingLocation must be in eye space, matching transformedNormal which you're comparing it to with the dot product.
Multiply lightPosition (assuming it's in world space) by the view/camera matrix. It'll be cheaper to do this outside the shader as the value does not change during the render.
The view matrix already exists in your code mid-way through the model-view construction.
The //Camera block is the view and the //Sphere block multiplies in the model transform. To extract just the view, copy mvMatrix between your Camera and Sphere transform blocks (or just transform the light then and there).
//untested, but something along these lines
var worldSpaceLight = vec4.fromValues( //not sure which lib your using
parseFloat(document.getElementById("lightPositionX").value),
parseFloat(document.getElementById("lightPositionY").value),
parseFloat(document.getElementById("lightPositionZ").value),
1.0
);
...
//Camera
...
var eyeSpaceLight = vec4.create();
vec4.transformMat4(eyeSpaceLight, worldSpaceLight, mvMatrix);
gl.uniform3f(currentProgram.pointLightingLocationUniform, eyeSpaceLight);
//Sphere
...