struct quadricObj {
GLUquadricObj* obj;
GLenum drawmode{ GLU_FILL };
GLdouble radius{1.0};
GLint slices{20};
GLint stacks{20};
glm::vec3 col{ 1.0,0.0,0.0 };
std::vector<glm::mat4> M;
glm::mat4 world_M() {
glm::mat4 WM(1.0f);
std::for_each(this->M.begin(), this->M.end(), [&WM](glm::mat4& m) { WM *= m; });
//M0*M1*M2 TRS
return WM;
}
GLvoid draw() {
gluQuadricDrawStyle(this->obj, this->drawmode);
glUniformMatrix4fv(worldLoc, 1, GL_FALSE, glm::value_ptr(this->world_M()));
glColor4f(this->col.r, this->col.g, this->col.b, 1.0f); // doesn't work.
gluSphere(this->obj, this->radius, this->slices, this->stacks);
}
};
This is my struct for use quadricObj. I think glColor4f has to work but doesn't.
quadrics are staying black.
How can I color quadrics in GL?
#version 330
in vec3 v_normal;
in vec2 v_texCoord;
in vec3 v_color;
in vec3 fragPos;
out vec4 gl_FragColor;
uniform vec3 lightColor;
uniform vec3 lightPos;
uniform vec3 viewPos;
uniform float ambientLight;
uniform int shine;
void main(void)
{
vec3 ambient = clamp(ambientLight*lightColor,0.0,1.0);
vec3 normalVector = normalize(v_normal);
vec3 lightDir = normalize(lightPos-fragPos);
float diffuseLight = max(dot(normalVector,lightDir),0.0);
vec3 diffuse = clamp(diffuseLight * lightColor,0.0,1.0);
vec3 viewDir = normalize(viewPos-fragPos);
vec3 reflectDir = reflect(-lightDir,normalVector);
float specularLight = max(dot(viewDir,reflectDir),0.0);
specularLight = pow(specularLight,shine);
vec3 specular = clamp(specularLight*lightColor,0.0,1.0);
vec3 result = (ambient+diffuse)*v_color+specular*(0.8,0.8,0.8);
gl_FragColor = vec4(result,1.0);
}
I edit my fragment shader contain phong model. this can work with gluSphere too? or not? I'm using vertex shader too. which has inpos,col,nor,tex. and out also.
gluSphere cannot be used together with user defined vertex shader input variables (attribute). You are limited to a GLSL 1.20 vertex shader and the Vertex Shader Built-In Attributes. You can combine a GLSL 1.20 vertex shader with your fragment shader.
A suitable vertex shader can look like this:
#version 120
varying vec3 v_normal;
varying vec2 v_texCoord;
varying vec3 v_color;
varying vec3 fragPos;
uniform mat4 worldMatrix; // the matrix with the location worldLoc
void main()
{
v_color = gl_Color.rgb;
v_texCoord = gl_MultiTexCoord0.st;
v_normal = mat3(worldMatrix) * gl_Normal;
fragPos = (worldMatrix * gl_Vertex).xyz;
gl_Position = gl_ProjectionMatrix * worldMatrix * gl_Vertex;
}
Related
I'm drawing some static geometry (a sphere, a cube, etc.) together with some dynamic geometry (a rotating torus.)
I can see that there is a problem because specular lighting on the torus is static and the torus is rendered dark when the rotation angle changes...
I'm targeting OpenGL 2.1 (desktop), OpenGL ES2 (mobile) and WebGL1 (web). Here is a gist with the full code. There is also a WebGL demo.
The framework used is chronotext-cross. It provides a level of abstraction above GL. It should be straightforward to understand. In any case I can provide pointers, as the author.
The C++ code, abridged:
void Sketch::setup()
{
Box()
.setFrontFace(CCW)
.setColor(0.75f, 0.75f, 0.75f, 1)
.setSize(300, 5, 300)
.append(geometryBatch, Matrix().translate(-150, -5, -150));
Sphere()
.setFrontFace(CCW)
.setColor(0.25f, 1.0f, 0.0f, 1)
.setSectorCount(60)
.setStackCount(30)
.setRadius(40)
.append(geometryBatch, Matrix().translate(-75, -40, 100));
Torus()
.setFrontFace(CCW)
.setSliceCount(20)
.setLoopCount(60)
.setInnerRadius(12)
.setOuterRadius(48)
.append(torusBatch, Matrix());
}
void Sketch::resize()
{
camera
.setFov(45)
.setClip(0.1f, 1000.0f)
.setWindowSize(windowInfo.size);
}
void Sketch::draw()
{
camera.getViewMatrix()
.setIdentity()
.scale(1, -1, 1)
.translate(0, 0, -400)
.rotateX(-30 * D2R)
.rotateY(15 * D2R);
State state;
state
.setShader(shader)
.setShaderMatrix<MODEL>(Matrix())
.setShaderMatrix<VIEW>(camera.getViewMatrix())
.setShaderMatrix<PROJECTION>(camera.getProjectionMatrix())
.setShaderMatrix<NORMAL>(camera.getNormalMatrix())
.setShaderUniform("u_eye_position", camera.getEyePosition())
.setShaderUniform("u_light_position", camera.getEyePosition())
.setShaderUniform("u_shininess", 50.0f)
.apply();
geometryBatch.flush();
Matrix modelMatrix;
modelMatrix
.translate(75, -60, 100)
.rotateY(clock()->getTime());
state
.setShaderMatrix<MODEL>(modelMatrix)
.apply();
torusBatch.flush();
}
The vertex shader:
attribute vec4 a_position;
attribute vec3 a_normal;
attribute vec4 a_color;
attribute vec2 a_coord;
uniform mat4 u_model_matrix;
uniform mat4 u_view_matrix;
uniform mat4 u_projection_matrix;
uniform mat3 u_normal_matrix;
uniform vec3 u_eye_position;
uniform vec3 u_light_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
varying vec3 v_surface_to_light;
varying vec3 v_surface_to_view;
void main() {
v_normal = u_normal_matrix * a_normal;
v_color = a_color;
v_coord = a_coord;
v_surface_to_light = (u_view_matrix * (vec4(u_light_position, 1.0) - a_position)).xyz;
v_surface_to_view = (u_view_matrix * (vec4(u_eye_position, 1.0) - a_position)).xyz;
gl_Position = u_projection_matrix * u_view_matrix * u_model_matrix * a_position;
}
The fragment shader:
#ifdef GL_ES
precision highp float;
#endif
uniform sampler2D u_sampler;
uniform float u_shininess;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
varying vec3 v_surface_to_light;
varying vec3 v_surface_to_view;
void main() {
vec3 normal = normalize(v_normal);
vec3 surfaceToLightDirection = normalize(v_surface_to_light);
vec3 surfaceToViewDirection = normalize(v_surface_to_view);
vec3 halfVector = normalize(surfaceToLightDirection + surfaceToViewDirection);
float specular = 0.0;
float light = dot(normal, surfaceToLightDirection);
if (light > 0.0) {
specular = pow(dot(normal, halfVector), u_shininess);
}
vec4 color = v_color * texture2D(u_sampler, v_coord);
gl_FragColor = vec4(color.rgb * light + specular, 1.0);
}
I found the solution: passing a new normal matrix (extracted from the model-view matrix) to the shader when drawing the dynamic mesh.
Matrix modelMatrix;
modelMatrix
.translate(75, -60, 100)
.rotateY(clock()->getTime());
Matrix modelViewMatrix = modelMatrix * camera.getViewMatrix();
state
.setShaderMatrix<MODEL>(modelMatrix)
.setShaderMatrix<NORMAL>(modelViewMatrix.getNormalMatrix())
.apply();
sorry, I am a new on opengl es and processing
below processing and shaders output only background
PShader Gouraud,Phong;
rocket = loadShape("rocket.obj");
rocket.setFill(color(800, 0, 0));
Gouraud= loadShader("gouraudfragment.glsl","gouraudvertex.glsl");
Phong= loadShader("phongfragment.glsl","phongvertex.glsl");
background(0);
pushMatrix();
shader(Gouraud);
translate(130,height/2.0);
rotateY(rc);
rotateX(0.4);
noStroke();
fill(#800080);
box(100);
rc+=(0.02+speedCube);
rc*=dirCube;
popMatrix();
pushMatrix();
shader(Gouraud);
translate(width/2, height/2 + 100, -200);
rotateZ(PI);
rotateY(rr);
shape(rocket,100,100);
rr +=( 0.02+speedRocket);
rr*=dirRocket;
popMatrix();
vertex shader
varying vec3 N;
varying vec3 v;
varying vec4 diffuse;
varying vec4 spec;
attribute vec4 position;
attribute vec3 normal;
uniform mat4 modelview;
uniform mat4 projectionMatrix;
uniform mat3 normalMatrix;
uniform vec4 lightPosition;
uniform vec3 lightAmbient;
uniform vec3 lightDiffuse;
uniform vec3 lightSpecular;
uniform float SpecularPower;
void main()
{
vec4 diffuse;
vec4 spec;
vec4 ambient;
v = vec3(modelview * position);
N = normalize(normalMatrix * normal);
gl_Position = projectionMatrix * position;
vec3 L = normalize(lightPosition.xyz - v);
vec3 E = normalize(-v);
vec3 R = normalize(reflect(-L,N));
ambient = vec4(lightAmbient,100.0);
diffuse = vec4(clamp( lightDiffuse * max(dot(N,L), 0.0) , 0.0, 1.0 ) ,100.0);
spec = vec4(clamp (lightSpecular * pow(max(dot(R,E),0.0),0.3*SpecularPower) , 0.0, 1.0 ),100.0);
color = ambient + diffuse + spec;
}
fragment shader
void main()
{
gl_FragColor = color;
}
please help!
before apply gouraud shading
after apply gouraud shading
The prcessing load the obj and the draw a cube and apply a gouraud shader, but after that only backgroud are shown, the obj loaded and cube is gone. nothing shown!
the shader doesn't even compile and link. The vertex shader has 1 varying output (color), so the framgent shader needs the input varying vec4 color;.
varying vec4 color;
When you set the clip space position, then the vertex coordinate has to be transformed by the model view and projection matrix:
gl_Position = projectionMatrix * modelview * position;
The types specifications of v and N are missing and the types of ambient, diffuse and spec are vec4 rather than vec3.
Vertex shader:
attribute vec4 position;
attribute vec3 normal;
varying vec4 color;
uniform mat4 modelview;
uniform mat4 projectionMatrix;
uniform mat3 normalMatrix;
uniform vec4 lightPosition;
uniform vec3 lightAmbient;
uniform vec3 lightDiffuse;
uniform vec3 lightSpecular;
uniform float SpecularPower;
void main()
{
vec3 v = vec3(modelview * position);
vec3 N = normalize(normalMatrix * normal);
gl_Position = projectionMatrix * modelview * position;
vec3 L = normalize(lightPosition.xyz - v);
vec3 E = normalize(-v);
vec3 R = normalize(reflect(-L,N));
vec4 ambient = vec4(lightAmbient,100.0);
vec4 diffuse = vec4(clamp( lightDiffuse * max(dot(N,L), 0.0) , 0.0, 1.0 ) ,100.0);
vec4 spec = vec4(clamp (lightSpecular * pow(max(dot(R,E),0.0),0.3*SpecularPower) , 0.0, 1.0 ),100.0);
color = ambient + diffuse + spec;
}
Fragment shader:
varying vec4 color;
void main()
{
gl_FragColor = color;
}
Of course you have to set at least an ambient light source ambientLight().
You can use a directionalLight(), pointLight() or spotLight(), too.
But note, your shader can handle 1 light source only. More the 1 light source would gain
OpenGL error 1282 at top endDraw(): invalid operation
If you want to use more than 1 light source then you would have to use uniform arrays int the vertex shader for lightPosition, lightAmbient, lightDiffuse, and lightSpecular. See Types of shaders in Processing(https://processing.org/tutorials/pshader/)
So I've currently managed to write a shader using Xoppa tutorials and use an AssetManager, and I managed to bind a texture to the model, and it looks fine.
[[1
Now the next step I guess would be to create diffuse(not sure if thats the word? phong shading?) lighting(?) to give the bunny some form of shading. While I have a little bit of experience with GLSL shaders in LWJGL, I'm unsure how to process that same information so I can use it in libGDX and in the glsl shaders.
I understand that this all could be accomplished using the Environment class etc. But I want to achieve this through the shaders alone or by traditional means simply for the challenge.
In LWJGL, shaders would have uniforms:
in vec3 position;
in vec2 textureCoordinates;
in vec3 normal;
out vec2 pass_textureCoordinates;
out vec3 surfaceNormal;
out vec3 toLightVector;
uniform mat4 transformationMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform vec3 lightPosition;
This would be reasonably easy for me to calculate in LWJGL
Vertex file:
attribute vec3 a_position;
attribute vec3 a_normal;
attribute vec2 a_texCoord0;
uniform mat4 u_worldTrans;
uniform mat4 u_projViewTrans;
varying vec2 v_texCoords;
void main() {
v_texCoords = a_texCoord0;
gl_Position = u_projViewTrans * u_worldTrans * vec4(a_position, 1.0);
}
I imagine that I could implement the uniforms similarly to the LWGL glsl example, but I dont know how I can apply these uniforms into libgdx and have it work. I am unsure what u_projViewTrans is, I'm assuming it is a combination of the projection, transformation and view matrix, and setting the uniform with the camera.combined?
If someone could help me understand the process or point to an example of how (per pixel lighting?), can be implemented with just the u_projViewTrans and u_worldTrans, I'd greatly appreciate your time and effort in helping me understand these concepts a bit better.
Heres my github upload of my work in progress.here
You can do the light calculations in world space. A simple lambertian diffuse light can be calculated like this:
vec3 toLightVector = normalize( lightPosition - vertexPosition );
float ligtIntensity = max( 0.0, dot( normal, toLightVector ));
A detailed explanation can be found in the answer to the Stackoverflow question How does this faking the light work on aerotwist?.
While Gouraud shading calculates the light in the the vertex shader, Phong shading calculates the light in the fragment shader.
(see further GLSL fixed function fragment program replacement)
A Gouraud shader may look like this:
Vertex Shader:
attribute vec3 a_position;
attribute vec3 a_normal;
attribute vec2 a_texCoord0;
uniform mat4 u_worldTrans;
uniform mat4 u_projViewTrans;
uniform vec3 lightPosition;
varying vec2 v_texCoords;
varying float v_lightIntensity;
void main()
{
vec4 vertPos = u_worldTrans * vec4(a_position, 1.0);
vec3 normal = normalize(mat3(u_worldTrans) * a_normal);
vec3 toLightVector = normalize(lightPosition - vertPos.xyz);
v_lightIntensity = max( 0.0, dot(normal, toLightVector));
v_texCoords = a_texCoord0;
gl_Position = u_projViewTrans * vertPos;
}
Fragment Shader:
varying vec2 v_texCoords;
varying float v_lightIntensity;
uniform sampler2D u_texture;
void main()
{
vec4 texCol = texture( u_texture, v_texCoords.st );
gl_FragColor = vec4( texCol.rgb * v_lightIntensity, 1.0 );
}
A Phong shading may look like this:
Vertex Shader:
attribute vec3 a_position;
attribute vec3 a_normal;
attribute vec2 a_texCoord0;
uniform mat4 u_worldTrans;
uniform mat4 u_projViewTrans;
varying vec2 v_texCoords;
varying vec3 v_vertPosWorld;
varying vec3 v_vertNVWorld;
void main()
{
vec4 vertPos = u_worldTrans * vec4(a_position, 1.0);
v_vertPosWorld = vertPos.xyz;
v_vertNVWorld = normalize(mat3(u_worldTrans) * a_normal);
v_texCoords = a_texCoord0;
gl_Position = u_projViewTrans * vertPos;
}
Fragment Shader:
varying vec2 v_texCoords;
varying vec3 v_vertPosWorld;
varying vec3 v_vertNVWorld;
uniform sampler2D u_texture;
struct PointLight
{
vec3 color;
vec3 position;
float intensity;
};
uniform PointLight u_pointLights[1];
void main()
{
vec3 toLightVector = normalize(u_pointLights[0].position - v_vertPosWorld.xyz);
float lightIntensity = max( 0.0, dot(v_vertNVWorld, toLightVector));
vec4 texCol = texture( u_texture, v_texCoords.st );
vec3 finalCol = texCol.rgb * lightIntensity * u_pointLights[0].color;
gl_FragColor = vec4( finalCol.rgb * lightIntensity, 1.0 );
}
I have problem with multiply shaders on my one object.
That's my render code:
#include "MeshRenderer.h"
ForwardAmbient* shader1;
ForwardDirectional* shader2;
MeshRenderer::MeshRenderer(Obj& obj) :
meshObject(obj)
{
shader1 = new ForwardAmbient(vec3(1, 1, 1));
shader2 = new ForwardDirectional(vec3(1, 0, 0), vec3(1, 1, 1));
}
MeshRenderer::~MeshRenderer()
{
}
void MeshRenderer::Render(RenderingCore* rc)
{
//for (Shader* shader : meshObject.shaders)
//{
//}
shader1->Bind();
shader1->UpdateShader(rc, transform, meshObject.material);
meshObject.material->GetTexture()->Bind(0);
meshObject.mesh->Render();
shader2->Bind();
shader2->UpdateShader(rc, transform, meshObject.material);
meshObject.material->GetTexture()->Bind(0);
meshObject.mesh->Render();
/*
meshObject.shader->Bind();
meshObject.shader->UpdateShader(rc, transform, meshObject.material);
meshObject.material->GetTexture()->Bind(0);
meshObject.mesh->Render();
*/
}
ambient light.vs:
#version 120
attribute vec3 position;
attribute vec2 texCoord;
attribute vec3 normal;
varying vec2 texCoord0;
uniform mat4 transform;
uniform mat4 projection;
void main()
{
gl_Position = (projection * transform) * vec4(position, 1);
texCoord0 = texCoord;
}
ambient light.fs:
#version 120
varying vec2 texCoord0;
uniform vec3 ambientLight;
uniform float alpha;
uniform sampler2D sampler;
void main()
{
gl_FragColor = texture2D(sampler, texCoord0.xy) * vec4(ambientLight, alpha);
}
directional light.vs
#version 120
attribute vec3 position;
attribute vec2 texCoord;
attribute vec3 normal;
varying vec2 texCoord0;
varying vec3 normal0;
varying vec3 worldPos0;
uniform mat4 transform;
uniform mat4 projection;
void main()
{
gl_Position = (projection * transform) * vec4(position, 1);
texCoord0 = texCoord;
normal0 = (transform * vec4(normal, 0)).xyz;
worldPos0 = (transform * vec4(position, 1)).xyz;
}
directional light.fs
#version 120
varying vec2 texCoord0;
varying vec3 normal0;
varying vec3 worldPos0;
uniform vec3 color;
uniform float alpha;
uniform vec3 direction;
uniform float specularIntensity;
uniform float specularPower;
uniform vec3 eyePosition;
uniform sampler2D sampler;
vec4 calcLight(vec3 color, float alpha, vec3 direction, vec3 normal)
{
float diffuseFactor = dot(normal, -direction);
vec4 diffuseColor = vec4(0,0,0,0);
vec4 specularColor = vec4(0,0,0,0);
if(diffuseFactor > 0)
{
diffuseColor = vec4(color, 1.0) * diffuseFactor;
vec3 directionToEye = normalize(eyePosition - worldPos0);
vec3 reflectDirection = normalize(reflect(direction, normal));
float specularFactor = dot(directionToEye, reflectDirection);
specularFactor = pow(specularFactor, specularPower);
if(specularFactor > 0)
{
specularColor = vec4(color, 1.0) * specularIntensity * specularFactor;
}
}
return diffuseColor + specularColor;
}
vec4 calcDirectionalLight(vec3 color, float alpha, vec3 direction, vec3 normal)
{
return calcLight(color, alpha, -direction, normal);
}
void main()
{
gl_FragColor = texture2D(sampler, texCoord0.xy) * calcDirectionalLight(color, 1, direction, normalize(normal0));
}
Here is the result:
http://imgur.com/Bawny2P
Only ambient light is render, directional light no
Your problem is this:
shader1->Bind();
shader1->UpdateShader(rc, transform, meshObject.material);
meshObject.material->GetTexture()->Bind(0);
meshObject.mesh->Render();
shader2->Bind();
shader2->UpdateShader(rc, transform, meshObject.material);
meshObject.material->GetTexture()->Bind(0);
meshObject.mesh->Render();
OpenGL doesn't know what "object" are. It just draws points, lines and triangles, one at a time. To sort out depth overlap the depth buffer method is used. When you use exactly the same drawing call (meshObject.mesh->Render) with all the same vertex setup and depth testing enabled then one of the two draw calls will win over the other.
Also, more importantly, drawing calls don't "stack". You simply can not combine shaders simply by drawing the same thing multiple times; it may sort of work for additive processes. But that's barking up the wrong tree: Instead of saving additional work, you're duplicating the amount of work to be done.
What you should do instead is merging the two shaders into single one and draw the geometry only once, with the merged shader.
I have created an application in OpenGL that uses a Vertex Shader, Geometry Shader, and Fragment Shader.
I have a uniform variable, eyePositionWorld that I would like to use both in the Geometry Shader and the Fragment Shader.
(I am rendering the position of the verticies compared to the eyePositionWorld as the color)
Vertex Shader
#version 430
in vec4 vertexPositionModel;
in vec3 vertexColor;
in vec3 vertexNormalModel;
in mat4 modelMatrix;
uniform mat4 viewMatrix;//World To View
uniform mat4 projectionMatrix;//View to Projection
struct fData
{
vec3 fragColor;
vec3 fragPositionWorld;
vec3 fragNormalWorld;
};
out fData geomData;
void main()
{
gl_Position = projectionMatrix * viewMatrix * modelMatrix * vertexPositionModel;
geomData.fragColor = vertexColor;
geomData.fragPositionWorld = (modelMatrix * vertexPositionModel).xyz;
geomData.fragNormalWorld = (modelMatrix * vec4(vertexNormalModel, 0.0)).xyz;
}
Geometry Shader
#version 430
layout(triangles_adjacency) in;
layout(triangle_strip, max_vertices=3) out;
struct fData
{
vec3 fragColor;
vec3 fragPositionWorld;
vec3 fragNormalWorld;
};
uniform vec3 eyePositionWorldGeomShader;
in fData geomData[];
out fData fragData;
void main() {
gl_Position = gl_in[0].gl_Position;
fragData = geomData[0];
fragData.fragColor = gl_in[0].gl_Position.xyz - eyePositionWorldGeomShader;
EmitVertex();
gl_Position = gl_in[2].gl_Position;
fragData = geomData[2];
fragData.fragColor = gl_in[2].gl_Position.xyz - eyePositionWorldGeomShader;
EmitVertex();
gl_Position = gl_in[4].gl_Position;
fragData = geomData[4];
fragData.fragColor = gl_in[4].gl_Position.xyz - eyePositionWorldGeomShader;
EmitVertex();
EndPrimitive();
}
Fragment Shader
#version 430
struct fData
{
vec3 fragColor;
vec3 fragPositionWorld;
vec3 fragNormalWorld;
};
in fData fragData;
uniform vec4 ambientLight;
uniform vec3 lightPositionWorld;
uniform vec3 eyePositionWorld;
uniform bool isLighted;
out vec4 color;
void main()
{
if (!isLighted)
{
color = vec4(fragData.fragColor, 1.0);
}
else
{
vec3 lightVectorWorld = normalize(lightPositionWorld - fragData.fragPositionWorld);
float brightness = clamp(dot(lightVectorWorld, normalize(fragData.fragNormalWorld)), 0.0, 1.0);
vec4 diffuseLight = vec4(brightness, brightness, brightness, 1.0);
vec3 reflectedLightVectorWorld = reflect(-lightVectorWorld, fragData.fragNormalWorld);
vec3 eyeVectorWorld = normalize(eyePositionWorld - fragData.fragPositionWorld);
float specularity = pow(clamp(dot(reflectedLightVectorWorld, eyeVectorWorld), 0.0, 1.0), 40) * 0.5;
vec4 specularLight = vec4(specularity, specularity, specularity, 1.0);
//Maximum Distance of All Lights
float maxDist = 55.0;
float attenuation = clamp((maxDist - length(lightPositionWorld - fragData.fragPositionWorld)) / maxDist, 0.0, 1.0);
color = (ambientLight + (diffuseLight + specularLight) * attenuation) * vec4(fragData.fragColor, 1.0);
}
}
C++ Code (the m_eyePositionUL and m_eyePositionGeomShaderUL are both just loaded with glGetUniformLocation)
glUniform3fv(m_eyePositionUL, 1, &m_camera.getPosition()[0]);
glUniform3fv(m_eyePositionGeomShaderUL, 1, &m_camera.getPosition()[0]);
How can I only upload one uniform to OpenGL and use it in both the Geometry Shader and Vertex Shader?
It's a bit surprising but OpenGL makes it easy. All that you have to do is use the same uniform name in both Shaders!
Then just upload it once under that uniform location.
Replace uniform vec3 eyePositionWorldGeomShader; with uniform vec3 eyePositionWorld; in your Geometry Shader and keep the uniform name the same in the Fragment Shader.
Then just don't upload the other Uniform so your C++ code will simply be
glUniform3fv(m_eyePositionUL, 1, &m_camera.getPosition()[0]);