This is my vertex shader and what I expect from it is to have a correct displacement from a texture. What it does though is taking every vertices of my plane upward to a same level
uniform sampler2D clouds
void main()
{
vec4 position = gl_Vertex;
vec2 uv = gl_MultiTexCoord0.st;
position.z = position.z + texture(clouds, uv.st);
gl_Position = gl_ModelViewProjectionMatrix * position;
}
I had to declare my position and uv variable with varying, and only needed to add one channel from my texture to the 'z' component of my Position variable.
vertex_shader = """
varying vec4 Position;
varying vec2 TexCoord;
uniform sampler2D clouds;
void main()
{
Position = gl_Vertex;
TexCoord = gl_MultiTexCoord0;
Position.z += texture2D(clouds, TexCoord.st).a;
gl_Position = gl_ModelViewProjectionMatrix * Position;
}
"""
I found this wikibook dedicated to glsl programming for blender3D
Related
Currently I am rendering mesh triangles like this:
// draw the same polygons again
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
shader.setVec3("objectColor", obj_color);
glDrawElements(GL_TRIANGLES, static_cast<unsigned int>(indices.size()), GL_UNSIGNED_INT, 0);
The problem with this code is that I am setting object color inside shader for the full mesh.
What would be a good way to render one single mesh whose faces have different colors?
For now I only know how to set vertex colors, and pass it the fragment shader.
What are the most common ways to set individual face colors?
I only think about duplicating mesh vertices twice to avoid vertex color interpolation.
My current shader looks like this:
Vertex Shader:
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec3 aNormal;
out vec3 FragPos;
out vec3 Normal;
out vec3 LightPos;
uniform vec3 lightPos;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(aPos, 1.0);
FragPos = vec3(view * model * vec4(aPos, 1.0));
Normal = mat3(transpose(inverse(view * model))) * aNormal;
LightPos = vec3(vec4(lightPos, 1.0)); // Transform world-space light position to view-space light position
// FragPos = vec3(model * vec4(aPos, 1.0));
//Normal = mat3(transpose(inverse(model))) * aNormal;
// gl_Position = projection * view * vec4(FragPos, 1.0);
}
Fragment Shader:
#version 330 core
out vec4 FragColor;
in vec3 FragPos;
in vec3 Normal;
in vec3 LightPos;
// extra in variable, since we need the light position in view space we calculate this in the vertex shader
uniform vec3 lightColor;
uniform vec3 objectColor;
uniform float f;
uniform float transparency;
void main()
{
//flat shading
// vec3 x_ = dFdx(FragPos);
// vec3 y_= dFdy(FragPos);
// vec3 normal_ = cross(x_, y_);
// vec3 norm_ = normalize(normal_);
// ambient
float ambientStrength = 0.75;
vec3 ambient = ambientStrength * lightColor;
// diffuse
vec3 norm = normalize(Normal);
vec3 lightDir = normalize(LightPos - FragPos);
float diff = max(dot(norm, lightDir), 0.0);//change "norm_" to "norm" avoid the performance warning and have unwelded view
vec3 diffuse = diff * lightColor;
// specular
float specularStrength = 0.01;
vec3 viewDir = normalize(-FragPos); // the viewer is always at (0,0,0) in view-space, so viewDir is (0,0,0) - Position => -Position
vec3 reflectDir = reflect(-lightDir, norm);
float spec = pow(max(dot(viewDir, reflectDir), 0.0), 32);
vec3 specular = specularStrength * spec * lightColor;
vec3 shading = (ambient + diffuse + specular)*objectColor;
//float f = 0.75;
float r_interpolated = shading[0] + f * (objectColor[0] - shading[0]);
float g_interpolated = shading[1] + f * (objectColor[1] - shading[1]);
float b_interpolated = shading[2] + f * (objectColor[2] - shading[2]);
vec3 result = vec3(r_interpolated,g_interpolated,b_interpolated);
FragColor = vec4(result, transparency);
}
You can use the flat Interpolation qualifier:
The value will not be interpolated. The value given to the fragment shader is the value from the Provoking Vertex for that primitive.
Vertex shader
// [...]
layout (location = 0) in vec3 aColor;
flat out vec3 vColor;
void main()
{
vColor = aColor;
// [...]
}
Fragment shader
// [...]
flat in vec3 vColor;
void main()
{
FragColor = vec4(vColor, 1.0);
}
With this implementation, the entire triangle primitive is rendered with one color. If you find an intelligent system for assigning the color attributes to the vertices, you can render all triangles with different colors. e.g. 2 tringles with the indices 0-1-2 and 1-2-3. The color attribute of vertex 0 defines the color of the first triangle and the color attribute of vertex 1 defines the color of the 2nd triangle.
An alternative way would be to create an array of colors for each triangle primitive and store this color array in a Shader Storage Buffer Object. Use gl_VertexID to address the color in the vertex shader.
layout(std430, binding = 0) buffer primitiveColors
{
vec4 colors[];
};
void main()
{
vColor = colors[gl_VertexID / 3];
// [...]
}
Let's say the concept is to create a map consisting of cubes with a neon aesthetic, such as:
Currently I have this vertex shader:
// Uniforms
uniform mat4 u_projection;
uniform mat4 u_view;
uniform mat4 u_model;
// Vertex atributes
in vec3 a_position;
in vec3 a_normal;
in vec2 a_texture;
vec3 u_light_direction = vec3(1.0, 2.0, 3.0);
// Vertex shader outputs
out vec2 v_texture;
out float v_intensity;
void main()
{
vec3 normal = normalize((u_model * vec4(a_normal, 0.0)).xyz);
vec3 light_dir = normalize(u_light_direction);
v_intensity = max(0.0, dot(normal, light_dir));
v_texture = a_texture;
gl_Position = u_projection * u_view * u_model * vec4(a_position, 1.0);
}
And this pixel shader:
in float v_intensity;
in vec2 v_texture;
uniform sampler2D u_texture;
out vec4 fragColor;
void main()
{
fragColor = texture(u_texture, v_texture) * vec4(v_intensity, v_intensity, v_intensity, 1.0);
}
How would I use this to create a neon effect such as in the example for 3D cubes? The cubes are simply models with a mesh/material. The only change would be to set the material color to black and the outlines to a bright pink or blue (maybe with a glow).
Any help is appreciated. :)
You'd normally implement this as a post-processing effect. First render with bright, saturated colours into a texture, then apply a bloom effect, when drawing that texture to screen.
I am busy implementing a deferred lighting system and have gotten all the way to having the bound position, diffuse and normal textures in my fragment shader in which I am to calculate the lighting specifications for each fragment.
#version 400 core
in vec3 fs_position;
in vec3 fs_color;
in vec4 fs_attenuation;
layout (location = 0) out vec4 outColor;
uniform sampler2D diffuseSampler;
uniform sampler2D positionSampler;
uniform sampler2D normalSampler;
const float cutOffFactor = 200;
const float reflectivity = 0.15;
const float shineDamper = 1;
void main(void){
vec2 frag = gl_PointCoord.xy;
frag.x = (frag.x+1)/2f;
frag.y = ((frag.y+1)/2f);
vec4 texDiffuse = texture(diffuseSampler,frag);
vec4 texPosition = texture(positionSampler,frag);
vec4 texNormal = texture(normalSampler,frag);
vec3 p = vec3(fs_position.xyz);
vec3 ePosition = texPosition.xyz;
ePosition = ePosition*200;
vec3 eNormal = texNormal.xyz;
vec3 unitNormal = normalize(eNormal);
outColor = vec4(texNormal.xyz,1.0);
}
That is literally all that my Fragment Shader contains.
The probem lies at "vec3 p = vec3(fs_position.xyz);".
When I remove it the program renders a perfect normal map, but when I add it a blank screen in which I can rotate and eventually a certain color flickers.
fs_position has nothing to do with color and was inputted from the geometry shader (all references are correct) yet it somehow causes a massive malfunction.
Same thing happens as well with all in variables (fs_color and fs_attenuation).
Whats being rendered is a non-blended quad of equal per-vertex properties that covers the viewport that renders to a color_attachment that exists(as said without that line everything works).
(blending does nothing, and I will put additive blending on when I get a result worthy of allowing me to continue)
Any help will be appreciated, the engine and shaders have never acted this way for me before and no errors are popping up.
Extra code:
Vertex shader
#version 400 core
in vec3 position;
in vec3 color;
in vec4 attenuation;
out vec3 gs_position;
out vec3 gs_color;
out vec4 gs_attenuation;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
void main(void){
vec4 worldPosition = vec4(position,1.0);
vec4 viewPosition = viewMatrix * worldPosition;
gl_Position = projectionMatrix * viewPosition;
gs_position = viewPosition.xyz;
gs_color = color;
gs_attenuation = attenuation;
}
GeometryShader
#version 150
layout (points) in;
layout (triangle_strip,max_vertices = 4) out;
in vec3 gs_position[];
in vec3 gs_color[];
in vec4 gs_attenuation[];
out vec3 fs_position;
out vec3 fs_color;
out vec4 fs_attenuation;
void main(void){
gl_Position = vec4(-1,1,0,1);
fs_position = gs_position[0];
fs_color = gs_color[0];
fs_attenuation = gs_attenuation[0];
EmitVertex();
gl_Position = vec4(-1,-1,0,1);
fs_position = gs_position[0];
fs_color = gs_color[0];
fs_attenuation = gs_attenuation[0];
EmitVertex();
gl_Position = vec4(1,1,0,1);
fs_position = gs_position[0];
fs_color = gs_color[0];
fs_attenuation = gs_attenuation[0];
EmitVertex();
gl_Position = vec4(1,-1,0,1);
fs_position = gs_position[0];
fs_color = gs_color[0];
fs_attenuation = gs_attenuation[0];
EmitVertex();
EndPrimitive();
}
Example of light values:
Position: -1, 0.5, -1
Color: 0, 0.5 ,0
Attenuation: 1, 0.1, 0.2, 0
As for the requested screenshots, basically without referencing an in variable I get something like this:
And with it I get a black screen, which is pretty easy to visualise.
(Although when rotating the view matrix (y-axis) there is a certain point at which the quad gets colored green, although I cant get values for it)
I have been researching different techniques for rendering grass. I've decided to go with a Geometry shader generated grass mainly so I can generate triangle fans on the fly when I render them as GL_POINTS but I'm not seeing the performance I'd like to see. I'm getting maybe 20-50 fps with 100,000 blades of grass, and I have a decent GPU. I'm wondering if my approach is wrong, or if I'm reaching the limitations of my GPU or If I am doing something incorrectly or maybe if their is a faster way (My aim is individual blades where I can manipulate the vertices ideally). The texture I am using 256x256
My rendering steps are:
Creation of the VAO and VBO and storing locations and binding once:
float[] GrassLocations= new float[100000];
int vaoID = createVAO();
. //bind VBO to VAO
storeDataInAttributeList(0, 3, GrassLocations,0,0);
I then render:
GL30.glBindVertexArray(VAO);
GL20.glEnableVertexAttribArray(0);
GL13.glActiveTexture(GL13.GL_TEXTURE0);
GL11.glBindTexture(GL11.GL_TEXTURE_2D, texture);
GL11.glDrawArrays(GL11.GL_POINTS, 0, 100000);
GL20.glDisableVertexAttribArray(0);
GL30.glBindVertexArray(0);
then My Vertex Shader:
#version 400
layout (location = 0) in vec3 VertexLocation;
uniform float time;
out vec3 offsets;
out vec3 Position;
out vec3 Normal;
out vec2 TexCoord;
out float visibility;
uniform mat4 transformationMatrix;
uniform mat4 viewMatrix;
uniform mat4 MVPmatrix;
uniform mat4 modelViewMatrix;
const float density = .007;
const float gradient = 1.5;
out float Time;
void main()
{
Time = time;
vec4 worldPosition = transformationMatrix * vec4(VertexLocation,1.0);
vec4 positionRelativeToCam = modelViewMatrix* vec4(VertexLocation,1.0);
Normal = vec3(0,1,0);
Position = vec3( worldPosition );
gl_Position = MVPmatrix* vec4(VertexLocation,1.0);
float distance = length(positionRelativeToCam.xyz);
visibility = exp(-pow((distance * density), gradient));
visibility = clamp(visibility,0.0,1.0);
offsets = offset;
}
I did gut the vertex shader and left only GL_POSITION and still not the issue.
My Geometry Shader:
#version 400
layout( points ) in;
layout( triangle_strip, max_vertices = 10 ) out;
float Size2=1; // Half the width of the quad
in vec3 Position[];
in vec3 Normal[];
in vec3 offsets[];
out vec3 position;
out vec3 normal;
in float Time[];
out vec2 TexCoord;
out vec3 color;
const float width = 5;
void main()
{
position = Position[0];
normal = Normal[0];
color = offsets[0];
gl_Position = (vec4(-Size2*width,-Size2,0.0,0.0) + gl_in[0].gl_Position);
TexCoord = vec2(0.0,0.0);
EmitVertex();
gl_Position = (vec4(Size2*width,-Size2,0.0,0.0) + gl_in[0].gl_Position);
TexCoord = vec2(1.0,0.0);
EmitVertex();
gl_Position = (vec4(-Size2*width+(Time[0].x),10,0.0,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(0.0,.25);
EmitVertex();
gl_Position = (vec4(Size2*width+(Time[0].x),10,0.0,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(1.0,.25);
EmitVertex();
///////////////////////////////////////////////////
gl_Position = (vec4(-Size2*width+(Time[0].x)*2,15,0.0,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(0.0,.50);
EmitVertex();
gl_Position = (vec4(Size2*width+(Time[0].x)*2,15,0.0,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(1.0,.50);
EmitVertex();
///////////////////////////////////////////////////
gl_Position = (vec4(-Size2*width+(Time[0].x)*3,25,0.0,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(0.0,.75);
EmitVertex();
gl_Position = (vec4(Size2*width+(Time[0].x)*3,25,0.0,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(1.0,.75);
EmitVertex();
///////////////////////////////////////////////////
gl_Position = (vec4(-Size2*width,Size2*7,Time[0].x,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(0.0,1.0);
EmitVertex();
gl_Position = (vec4(Size2*width,Size2*7,Time[0].x,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(1.0,1.0);
EmitVertex();
}
and my fragment Shader: (This is in a deferred engine, I've tried it with forward rendering also and I don't think performance hit is here)
#version 400
in vec2 TexCoord;
layout (binding=0) uniform sampler2D SpriteTex;
in vec3 color;
in vec3 normal;
in vec3 position;
layout( location = 0 ) out vec4 FragColor;
void main() {
vec4 texColor = texture(SpriteTex,TexCoord);
vec4 posColor = vec4(position.xyz,0);
gl_FragData[1] = posColor;
gl_FragData[2] = vec4(normal,1);
if(texColor.a<.5){
discard;
}
gl_FragData[0] = texColor;
}
What you want is a technique called Instancing. The tutorial I've linked is fantastic for figuring out how to do instancing.
I would probably advise that you avoid the geometry shader (since the geometry shader doesn't usually scale well when its purpose is to expand the quantity of vertices), and instead just define a buffer containing all the vertices necessary to draw a single blade (or patch) of grass, then use instancing to redraw that object thousands of times.
In the past few days I been trying to implement parallax mapping in my engine, but it doesn't seem to work, I have seen at least 15 examples, and I'm still not being able to get it to work
Here is an Image:
As you can see, all you can see is the base color, the height map is not there
Here are my shaders:
Fragment Shader
#version 330 core
uniform sampler2D DiffuseTextureSampler;
uniform sampler2D HeightTextureSampler;
vec2 scaleBias = vec2(0.5,0.5);
in vec3 EyeDirection_tangentspace;
in vec2 UV;
void main()
{
float height = texture2D(HeightTextureSampler, vec2 (UV.x, -UV.y)).r;
//Our heightmap only has one color channel.
float v = height * scaleBias.r - scaleBias.g;
vec3 eye = EyeDirection_tangentspace;
vec2 newCoords = UV + (eye.xy * v);
vec3 rgb = texture2D(DiffuseTextureSampler, vec2 (newCoords.x, -newCoords.y)).rgb;
gl_FragColor = vec4(rgb, 1.0);
}
Vertex Shader
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 1) in vec2 vertexUV;
layout(location = 2) in vec3 vertexNormal_modelspace;
layout(location = 3) in vec3 vertexTangent_modelspace;
layout(location = 4) in vec3 vertexBitangent_modelspace;
// Output data ; will be interpolated for each fragment.
out vec2 UV;
out vec3 Position_worldspace;
out vec3 EyeDirection_cameraspace;
out vec3 LightDirection_cameraspace;
out vec3 LightDirection_tangentspace;
out vec3 EyeDirection_tangentspace;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform mat3 MV3x3;
uniform vec3 LightPosition_worldspace;
void main()
{
gl_Position = MVP * vec4(vertexPosition_modelspace,1);
Position_worldspace = (M * vec4(vertexPosition_modelspace,1)).xyz;
// Vector that goes from the vertex to the camera, in camera space.
// In camera space, the camera is at the origin (0,0,0).
vec3 vertexPosition_cameraspace = ( V * M * vec4(vertexPosition_modelspace,1)).xyz;
EyeDirection_cameraspace = vec3(0,0,0) - vertexPosition_cameraspace;
UV = vertexUV;
vec3 vertexTangent_cameraspace = MV3x3 * vertexTangent_modelspace;
vec3 vertexBitangent_cameraspace = MV3x3 * vertexBitangent_modelspace;
vec3 vertexNormal_cameraspace = MV3x3 * vertexNormal_modelspace;
mat3 TBNMatrix = transpose(mat3(vertexTangent_cameraspace, vertexBitangent_cameraspace, vertexNormal_cameraspace));
EyeDirection_tangentspace = Position_worldspace - vertexPosition_modelspace.xyz;
EyeDirection_tangentspace *= TBNMatrix;
}
couple things
set your scale to 1. no point in halving your hightscale if you cant see it at all.
(YOUR CURRENT PROBLEM) you are getting your texture coordinates with -UV.y Opengl does not have negative texture coordinates. getting negative will pull nothing from the texture, or worse a mirrored textured if you have tiling on.
(YOUR NEXT PROBLEM) normalize your eye vector before calculating new coordinates in the fragment. if you don't normalize, the XY coords of the vector are going to be HUGE so your new texture coordinates are MASSIVE offsets.
try these shaders. they are very simple and work. you will have to add lighting after you get the parallax working
Vertex shader
attribute vec3 tangent;
attribute vec3 binormal;
uniform vec3 CAMERA_POSITION;
varying vec3 eyeVec;
void main()
{
gl_Position = ftransform();
gl_TexCoord[0] = gl_TextureMatrix[0] * gl_MultiTexCoord0;
mat3 TBNMatrix = mat3(tangent, binormal, gl_Normal);
eyeVec = CAMERA_POSITION - gl_Vertex.xyz;
eyeVec *= TBNMatrix;
}
fragment shader
uniform sampler2D basetex;
uniform sampler2D heightMap;
uniform vec2 scaleBias;
varying vec3 eyeVec;
void main()
{
float height = texture2D(heightMap, gl_TexCoord[0].st).r;
float v = height * scaleBias.r - scaleBias.g;
vec3 eye = normalize(eyeVec);
vec2 newCoords = texCoord + (eye.xy * v);
vec3 rgb = texture2D(basetex, newCoords).rgb;
gl_FragColor = vec4(rgb, 1.0);
}