I'm in the process of translating a piece of OpenGL code to Vulkan. The code recreates a rendered scene from an image (on a hemisphere projection) with depth information encoded. Note that I also load the model view matrix used for the projection to recreate the scene. The translation has been pretty straightforward but I'm running into issues due to the new Vulkan coordinate system.
The original OpenGL shader with comments follows:
#version 430
layout (triangles) in;
layout (triangle_strip, max_vertices = 3) out;
in vec2 posGeom[];
out vec2 texCoord;
uniform mat4 view;
uniform mat4 projection;
uniform float threshold;
uniform vec3 quantization;
uniform mat4 inverseStaticView;
uniform sampler2D rgbdTexture;
//get the image space for each pixel of our hemisphere image
vec3 getSphereRay(const vec2 coord) {
//get length of ray from camera to point on image plane
float len = 1 - coord.x * coord.x - coord.y * coord.y;
if (len > 0)
return vec3(coord, -sqrt(len));//scale to unit length vector as viewing ray
else
return vec3(0);
}
vec4 getPosition(const in vec2 inCoord, const in float depth) {
vec2 coord = inCoord;
//reverse the stretching from sphere to quad (based on y-coordinate)
float percent = sqrt(1.0 - coord.y * coord.y);
coord.x = coord.x * percent;
//scale ray with corresponding depth
vec3 normal = getSphereRay(coord) * depth;
//move from image space to world space by inverse view matrix
return inverseStaticView * vec4(normal, 1);
}
bool hasZeroDepth = false;
//get the real depth from quantized and packed depth by inverting the gamma correction and inverting min, max
float getDepth(int idx) {
float depth = texture(rgbdTexture, posGeom[idx] * 0.5 + 0.5).w;
if(depth == 0)
hasZeroDepth = true;
float minDepth = quantization.x;
float maxDepth = quantization.y;
float gamma = quantization.z;
depth = pow(depth, gamma);
depth = depth * (maxDepth - minDepth) + minDepth;
return depth;
}
//emit the position and texcoord
void emitPosition(int idx, float depth) {
texCoord = posGeom[idx] * 0.5 + 0.5;
gl_Position = projection * view * getPosition(posGeom[idx], depth);
EmitVertex();
}
void main() {
float d0 = getDepth(0);
float d1 = getDepth(1);
float d2 = getDepth(2);
//do not emit tris with zero (invalid) depth
if(!hasZeroDepth) {
float minDepth = min(d0, min(d1, d2));
float maxDepth = max(d0, max(d1, d2));
float minDist = maxDepth - minDepth;
float avgDepth = (d0 + d1 + d2) / 3;
float thres = threshold;
//look at tri stretching factor
if(minDist / avgDepth < thres) {
//emit original tri
emitPosition(0, d0);
emitPosition(1, d1);
emitPosition(2, d2);
} else {
//emit tri with maxDepth to only show background
emitPosition(0, maxDepth);
emitPosition(1, maxDepth);
emitPosition(2, maxDepth);
}
}
}
In the Vulkan shader, I account for the Vulkan coordinate system by inverting the y value. I also must normalize the world values for reasons that are unclear to me (otherwise what's rendered is completely nonsense). The shader code follows:
#version 450
layout (triangles) in;
layout (triangle_strip, max_vertices = 3) out;
layout(binding = 0) uniform UniformBufferObject {
mat4 modelView;
mat4 inverseStaticModelView;
float quantization;
} ubo;
layout(binding = 1) uniform sampler2D texSampler;
layout(location = 0) in vec2 posGeom[];
layout(location = 0) out vec2 texCoord;
bool hasZeroDepth = false;
float minDepth = 0;
float maxDepth = 1.0;
vec3 unproject(vec2 win) {
float scale = 1 - win.y * win.y;
// Invert y to account for Vulkan coordinate system.
float y = win.y * -1;
// Scale x to account for hemisphere projection.
float x = win.x * scale;
float z = -sqrt(1 - x * x - y * y);
if(z < 0){
vec4 outVals = ubo.inverseStaticModelView * vec4(x, y, z, 1.0);
return vec3(outVals[0], outVals[1], outVals[2]) / outVals.w;
}else
return vec3(0);
}
vec3 reconstructWorldPosition(vec2 ndc, float depth) {
vec3 pos = unproject(ndc);
return depth * normalize(pos);
}
float getDepth(int idx) {
float depth = texture(texSampler, posGeom[idx] * 0.5 + 0.5).w;
if(depth == 0)
hasZeroDepth = true;
depth = pow(depth, ubo.quantization);
return depth;
}
void emitPosition(int idx, float depth) {
vec2 pos = posGeom[idx].xy;
texCoord = pos * 0.5 + 0.5;
vec3 positionFromDepth = reconstructWorldPosition(pos, depth);
gl_Position = ubo.modelView * vec4(positionFromDepth,1);
EmitVertex();
}
void main() {
float d0 = getDepth(0);
float d1 = getDepth(1);
float d2 = getDepth(2);
if(!hasZeroDepth) {
float minDepth = min(d0, min(d1, d2));
float maxDepth = max(d0, max(d1, d2));
float minDist = maxDepth - minDepth;
float avgDepth = (d0 + d1 + d2) / 3.0;
float thres = 0.1;
if(minDist / avgDepth < thres ) {
emitPosition(0, d0);
emitPosition(1, d1);
emitPosition(2, d2);
} else {
emitPosition(0, maxDepth);
emitPosition(1, maxDepth);
emitPosition(2, maxDepth);
}
}
}
Images of the output of the two programs are contained in this album: http://imgur.com/a/KUl57
The Vulkan output appears to almost be correct except for some odd artifacts in the lower left hand of the scene. My suspicion is that the scaling to the x coordinate to account for the hemisphere projection is causing the issue. I've played around with the scaling and other parts of the shader but I can't seem to get it right. Am I overlooking something else that is different between Vulkan and OpenGL, especially with regards to the coordinate system?
Related
I'm rendering a sphere with instanced drawing, while rotating the model-view-matrix around the Y axis.
It looks ok at the beginning:
But at another angle, things get worse:
It looks to me like a problem with normals. Currently, I'm calculating the normal-matrix from my model-view-matrix and then pass it to the shader, which is doing phong-like lighting:
attribute vec4 a_position;
attribute vec3 a_normal;
attribute vec4 a_color;
attribute vec2 a_coord;
attribute mat4 a_matrix;
uniform mat4 u_mv_matrix;
uniform mat4 u_projection_matrix;
uniform mat3 u_normal_matrix;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec4 transformedPosition = u_mv_matrix * a_matrix * a_position;
v_position = transformedPosition;
v_normal = u_normal_matrix * a_normal;
v_color = a_color;
v_coord = a_coord;
gl_Position = u_projection_matrix * transformedPosition;
}
uniform sampler2D u_sampler;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec3 lightPosition = vec3(0.0); // XXX
// set diffuse and specular colors
vec3 cDiffuse = (v_color * texture2D(u_sampler, v_coord)).rgb;
vec3 cSpecular = vec3(0.3);
// lighting calculations
vec3 N = normalize(v_normal);
vec3 L = normalize(lightPosition - v_position.xyz);
vec3 E = normalize(-v_position.xyz);
vec3 H = normalize(L + E);
// Calculate coefficients.
float phong = max(dot(N, L), 0.0);
const float kMaterialShininess = 20.0;
const float kNormalization = (kMaterialShininess + 8.0) / (3.14159265 * 8.0);
float blinn = pow(max(dot(N, H), 0.0), kMaterialShininess) * kNormalization;
// diffuse coefficient
vec3 diffuse = phong * cDiffuse;
// specular coefficient
vec3 specular = blinn * cSpecular;
gl_FragColor = vec4(diffuse + specular, 1);
}
Final note: I'm working on desktop OpenGL 2.1 as well as WebGL on the browser.
Edit: Per request, I'm adding some information.
The mesh is built as follows, by passing an identity matrix:
void Sphere::append(IndexedVertexBatch<XYZ.N.UV> &batch, const Matrix &matrix) const {
float sectorStep = TWO_PI / sectorCount;
float stackStep = PI / stackCount;
for(int i = 0; i <= stackCount; ++i) {
float stackAngle = HALF_PI - i * stackStep;
float xy = radius * cosf(stackAngle);
float z = radius * sinf(stackAngle);
for(int j = 0; j <= sectorCount; ++j) {
float sectorAngle = j * sectorStep;
float x = xy * cosf(sectorAngle);
float y = xy * sinf(sectorAngle);
float nx = x / radius;
float ny = y / radius;
float nz = z / radius;
float s = (float)j / sectorCount;
float t = (float)i / stackCount;
batch.addVertex(matrix.transformPoint(x, y, z), matrix.transformNormal(nx, ny, nz), glm::vec2(s, t));
}
}
for(int i = 0; i < stackCount; ++i) {
float k1 = i * (sectorCount + 1);
float k2 = k1 + sectorCount + 1;
for(int j = 0; j < sectorCount; ++j, ++k1, ++k2) {
if (i != 0) {
if (frontFace == CCW) {
batch.addIndices(k1, k1 + 1, k2);
} else {
batch.addIndices(k1, k2, k1 + 1);
}
}
if (i != (stackCount - 1)) {
if (frontFace == CCW) {
batch.addIndices(k1 + 1, k2 + 1, k2);
} else {
batch.addIndices(k1 + 1, k2, k2 + 1);
}
}
}
}
}
Regarding the transformation matrices, it works as follow:
camera.getMVMatrix()
.setIdentity()
.translate(0, -150, -600)
.rotateY(clock()->getTime() * 0.5f);
State()
.setShader(shader)
.setShaderMatrix<MV>(camera.getMVMatrix())
.setShaderMatrix<PROJECTION>(camera.getProjectionMatrix())
.setShaderMatrix<NORMAL>(camera.getNormalMatrix())
.apply();
Finally, the light position is defined as vec3(0) in the fragment shader.
Note: As you can see, I'm using my own framework which provides among other things high level methods for building meshes and handling transformations. It's all straightforward stuff, proven to work as intended, but let me know if you need pointers to the source-code.
Update: The lighting part of the shader I used ended up being wrong, so I switched to another method.
But in essence, the solution I proposed in my answer is still valid (or at least it does the job of solving the "normal problem" when instancing is used, and non-uniform scaling is avoided.)
Here is a gist with the source-code. There is also an online WebGL demo.
The solution was relatively simple: there is no point in passing a normal-matrix to the shader.
Instead, the normal needs to be computed in the vertex shader:
v_normal = vec3(u_mv_matrix * a_matrix * vec4(a_normal, 0.0));
Credits
I decided to follow the classic guide for writing a basic GLSL water shaders using the sum of sines method. I attempted to implement it inside of Processing 5, where I made a field of vertices in a PShape to make a mesh to mess with. I then overwrote the default shaders with my own vertex and fragment shaders, and I dropped in a directional light so I can actually see the normals. I made sure the directional light was movable as well so I could see if the normals work from all angles.
I got the waves to form height correctly and I had some form of normals workings, but the normals are interacting really strange. When my light passes across the center axis of my water plane, the normals seem to morph between the different waves and change based on the light angle. The gif I captured was too large to post in line, so I'm sure seeing it would explain better than my words:
https://imgur.com/PCznL7U
You should maximize the link to see the whole picture. Note how as the light pans from left to right, the normals of the waves seem to morph between two sets? This is especially apparent as it crosses center. It's like the normals are inconsistent based on the direction the object is being lit from.
The sphere in the middle is a normal sphere using the standard Processing shader. I left it there as reference to see the waves as well as confirm where my lighting was and that it was working fine.
Any ideas what I did wrong? I know I did some math incorrectly somewhere.
EDIT: Was recommended I added the (lengthy) source code [which I should have done from the start].
Vertex Shader:
#define PROCESSING_LIGHT_SHADER
#define MAXWAVES 6
const float pi = 3.14159;
uniform mat4 transform;
uniform mat4 modelview;
uniform mat3 normalMatrix;
uniform float time; //Time since shader started
attribute vec4 position; //Position the vertex from Processing
attribute vec4 color; //Color of the vertex from Processing
attribute vec3 normal; //Normal of the vertex from Processing
attribute vec4 ambient;
attribute vec4 specular;
attribute vec4 emissive;
attribute float shininess;
varying vec4 vertColor; //Color passed on to fragment shader
varying vec4 backVertColor; //Color passed on to fragment shader
uniform float waveLength[MAXWAVES]; //Length of wave
uniform float speed[MAXWAVES]; //Cycle speed of wave
uniform float amplitude[MAXWAVES]; //Wave cycle height
uniform float xDirection[MAXWAVES];
uniform float yDirection[MAXWAVES]; //Flow vector of wave
uniform int lightCount;
uniform vec4 lightPosition[8];
uniform vec3 lightNormal[8];
uniform vec3 lightAmbient[8];
uniform vec3 lightDiffuse[8];
uniform vec3 lightSpecular[8];
uniform vec3 lightFalloff[8];
uniform vec2 lightSpot[8];
varying vec3 Normal;
varying vec3 FragPos;
varying vec3 Vec;
varying vec3 lightDir;
//Some constants that the processing shader used
const float zero_float = 0.0;
const float one_float = 1.0;
const vec3 zero_vec3 = vec3(0);
float falloffFactor(vec3 lightPos, vec3 vertPos, vec3 coeff) {
vec3 lpv = lightPos - vertPos;
vec3 dist = vec3(one_float);
dist.z = dot(lpv, lpv);
dist.y = sqrt(dist.z);
return one_float / dot(dist, coeff);
}
float spotFactor(vec3 lightPos, vec3 vertPos, vec3 lightNorm, float minCos, float spotExp) {
vec3 lpv = normalize(lightPos - vertPos);
vec3 nln = -one_float * lightNorm;
float spotCos = dot(nln, lpv);
return spotCos <= minCos ? zero_float : pow(spotCos, spotExp);
}
float lambertFactor(vec3 lightDir, vec3 vecNormal) {
return max(zero_float, dot(lightDir, vecNormal));
}
float blinnPhongFactor(vec3 lightDir, vec3 vertPos, vec3 vecNormal, float shine) {
vec3 np = normalize(vertPos);
vec3 ldp = normalize(lightDir - np);
return pow(max(zero_float, dot(ldp, vecNormal)), shine);
}
//Returns the height of a vertex given a single wave param
float WaveHeight(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
return amplitude[waveNumber] * sin(theta * frequency + time * phase);
}
//Returns height of a vertex given all the active waves
// and its current x/y value
float WaveSum(float x, float y)
{
float height = 0.0;
for(int i = 0; i < MAXWAVES; i++)
{
height += WaveHeight(i, x, y);
}
return height;
}
float getDy(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
float A = amplitude[waveNumber] * direction.y * frequency;
return A * cos(theta * frequency + time * phase);
}
float getDx(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
float A = amplitude[waveNumber] * direction.x * frequency;
return A * cos(theta * frequency + time * phase);
}
//Returns the normal vector for each vertex
vec3 getNormal(float x, float y) {
float dx = 0.0;
float dy = 0.0;
//Sum for each wave
for (int i = 0; i < MAXWAVES; i++) {
dx += getDx(i, x, y);
dy += getDy(i, x, y);
}
vec3 n = vec3(-dx, -dy, 1.0);
return normalize(n);
}
void main() {
vec4 pos = position; //Grab the position from Processing bc it's read only
pos.z = WaveSum(pos.x, pos.y);
gl_Position = transform * pos; //Get clipping matrix for view
vec3 ecVertex = vec3(modelview * pos);
// Normal vector in eye coordinates
vec3 Normal = getNormal(pos.x, pos.y);
vec3 ecNormal = normalize(normalMatrix * Normal);
vec3 ecNormalInv = ecNormal * -one_float;
// Light calculations
vec3 totalAmbient = vec3(0, 0, 0);
vec3 totalFrontDiffuse = vec3(0, 0, 0);
vec3 totalFrontSpecular = vec3(0, 0, 0);
vec3 totalBackDiffuse = vec3(0, 0, 0);
vec3 totalBackSpecular = vec3(0, 0, 0);
for (int i = 0; i < 8; i++) {
if (lightCount == i) break;
vec3 lightPos = lightPosition[i].xyz;
bool isDir = lightPosition[i].w < one_float;
float spotCos = lightSpot[i].x;
float spotExp = lightSpot[i].y;
vec3 lightDir;
float falloff;
float spotf;
if (isDir) {
falloff = one_float;
lightDir = -one_float * lightNormal[i];
} else {
falloff = falloffFactor(lightPos, ecVertex, lightFalloff[i]);
lightDir = normalize(lightPos - ecVertex);
}
spotf = spotExp > zero_float ? spotFactor(lightPos, ecVertex, lightNormal[i],
spotCos, spotExp)
: one_float;
if (any(greaterThan(lightAmbient[i], zero_vec3))) {
totalAmbient += lightAmbient[i] * falloff;
}
if (any(greaterThan(lightDiffuse[i], zero_vec3))) {
totalFrontDiffuse += lightDiffuse[i] * falloff * spotf *
lambertFactor(lightDir, ecNormal);
totalBackDiffuse += lightDiffuse[i] * falloff * spotf *
lambertFactor(lightDir, ecNormalInv);
}
if (any(greaterThan(lightSpecular[i], zero_vec3))) {
totalFrontSpecular += lightSpecular[i] * falloff * spotf *
blinnPhongFactor(lightDir, ecVertex, ecNormal, shininess);
totalBackSpecular += lightSpecular[i] * falloff * spotf *
blinnPhongFactor(lightDir, ecVertex, ecNormalInv, shininess);
}
}
// Calculating final color as result of all lights (plus emissive term).
// Transparency is determined exclusively by the diffuse component.
vertColor =
vec4(totalFrontDiffuse, 1) * color;
backVertColor =
vec4(totalBackDiffuse, 1) * color;
}
Fragment Shader:
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
varying vec4 vertColor; //Color from vertshader
varying vec4 backVertColor; //Color from vertshader
void main() {
gl_FragColor = gl_FrontFacing ? vertColor : backVertColor;
}
I am trying to implement screen space reflection with DDA.
http://casual-effects.blogspot.jp/2014/08/screen-space-ray-tracing.html
But, not working well.
Below is my shader codes.
This is vertex shader code.
layout(location = 0) in vec4 position;
layout(location = 1) in vec4 color_0;
layout(location = 2) in vec3 normal;
uniform mat4 mtxL2W; // Local to World space.
uniform mat4 mtxW2C; // World to Clip space.
out vec4 varColor;
out vec3 varNormal;
void main()
{
gl_Position = mtxW2C * mtxL2W * position;
varColor = color_0;
varNormal = normalize(mtxL2W * vec4(normal, 0)).xyz;
}
This is fragment shader code.
in vec4 varColor;
in vec3 varNormal;
layout(location = 0) out vec4 outColor;
uniform sampler2D s0; // color
uniform sampler2D s1; // linear depth.
uniform mat4 mtxW2V; // World to View(Camera) space.
uniform mat4 mtxV2C; // View(Camera) to Clip space.
uniform mat4 mtxC2V; // Clip to View(Camera) space.
uniform mat4 mtxV2W; // View(Camera) to World space.
uniform vec4 camPos; // Camera position (World space).
uniform float nearPlaneZ;
uniform float maxDistance;
uniform float zThickness;
uniform int maxSteps;
uniform float stride;
float squaredLength(vec2 a, vec2 b)
{
a -= b;
return dot(a, a);
}
bool intersectsDepthBuffer(float z, float minZ, float maxZ)
{
z += zThickness;
return (maxZ >= z) && (minZ - zThickness <= z);
}
bool traceScreenSpaceRay(
vec3 csOrig,
vec3 csDir,
out vec2 hitPixel,
out vec3 hitPoint)
{
// Clip to the near plane.
float rayLength = (csOrig.z + csDir.z * maxDistance) < nearPlaneZ
? (nearPlaneZ - csOrig.z) / csDir.z
: maxDistance;
vec3 csEndPoint = csOrig + csDir * rayLength;
// Project into homogeneous clip space.
vec4 H0 = mtxV2C * vec4(csOrig, 1);
vec4 H1 = mtxV2C * vec4(csEndPoint, 1);
float k0 = 1.0 / H0.w;
float k1 = 1.0 / H1.w;
// The interpolated homogeneous version of the camera-space points.
vec3 Q0 = csOrig * k0;
vec3 Q1 = csEndPoint * k1;
// Screen space point.
vec2 P0 = H0.xy * k0;
vec2 P1 = H1.xy * k1;
// [-1, 1] -> [0, 1]
P0 = P0 * 0.5 + 0.5;
P1 = P1 * 0.5 + 0.5;
ivec2 texsize = textureSize(s0, 0);
P0 *= vec2(texsize.xy);
P1 *= vec2(texsize.xy);
P1.x = min(max(P1.x, 0), texsize.x);
P1.y = min(max(P1.y, 0), texsize.y);
// If the line is degenerate, make it cover at least one pixel to avoid handling zero-pixel extent as a special case later.
P1 += squaredLength(P0, P1) < 0.0001
? vec2(0.01, 0.01)
: vec2(0.0);
vec2 delta = P1 - P0;
// Permute so that the primary iteration is in x to collapse all quadrant-specific DDA cases later.
bool permute = false;
if (abs(delta.x) < abs(delta.y))
{
permute = true;
delta = delta.yx;
P0 = P0.yx;
P1 = P1.yx;
}
float stepDir = sign(delta.x);
float invdx = stepDir / delta.x;
// Track the derivatives of Q and k.
vec3 dQ = (Q1 - Q0) / invdx;
float dk = (k1 - k0) / invdx;
// y is slope.
// slope = (y1 - y0) / (x1 - x0)
vec2 dP = vec2(stepDir, delta.y / invdx);
// Adjust end condition for iteration direction
float end = P1.x * stepDir;
int stepCount = 0;
float prevZMaxEstimate = csOrig.z;
float rayZMin = prevZMaxEstimate;
float rayZMax = prevZMaxEstimate;
float sceneZMax = rayZMax + 100.0f;
dP *= stride;
dQ *= stride;
dk *= stride;
vec4 PQk = vec4(P0, Q0.z, k0);
vec4 dPQk = vec4(dP, dQ.z, dk);
vec3 Q = Q0;
for (;
((PQk.x * stepDir) <= end)
&& (stepCount < maxSteps)
&& !intersectsDepthBuffer(sceneZMax, rayZMin, rayZMax)
&& (sceneZMax != 0.0);
++stepCount)
{
rayZMin = prevZMaxEstimate;
rayZMax = (PQk.z + dPQk.z * 0.5) / (PQk.w + dPQk.w * 0.5);
prevZMaxEstimate = rayZMax;
if (rayZMin > rayZMax) {
float tmp = rayZMin;
rayZMin = rayZMax;
rayZMax = tmp;
}
hitPixel = permute ? PQk.yx : PQk.xy;
//hitPixel.y = texsize.y - hitPixel.y;
sceneZMax = texelFetch(s1, ivec2(hitPixel), 0).r;
PQk += dPQk;
}
// Advance Q based on the number of steps
Q.xy += dQ.xy * stepCount;
hitPoint = Q * (1.0f / PQk.w);
hitPoint = vec3(sceneZMax, rayZMin, rayZMax);
return intersectsDepthBuffer(sceneZMax, rayZMin, rayZMax);
}
void main()
{
vec3 normal = normalize(varNormal);
float linearDepth = texelFetch(s1, ivec2(gl_FragCoord.xy), 0).r;
ivec2 texsize = textureSize(s0, 0);
// Ray origin is camera origin.
vec3 rayOrg = camPos.xyz;
// Screen coordinate.
vec4 pos = vec4(gl_FragCoord.xy / texsize, 0, 1);
// [0, 1] -> [-1, 1]
pos.xy = pos.xy * 2.0 - 1.0;
// Screen-space -> Clip-space
pos.xy *= linearDepth;
// Clip-space -> View-space
pos = mtxC2V * pos;
pos.z = linearDepth;
// View-space -> World-space.
vec3 worldPos = (mtxV2W * vec4(pos.xyz, 1)).xyz;
// Compute ray direction.
// From ray origin to world position.
vec3 rayDir = normalize(worldPos - rayOrg);
// Compute reflection vector.
vec3 refDir = reflect(rayDir, normal);
// Reflection vector origin is world position.
vec3 refOrg = worldPos;
// Transform to view coordinate.
refOrg = (mtxW2V * vec4(refOrg, 1)).xyz;
refDir = (mtxW2V * vec4(refDir, 0)).xyz;
vec2 hitPixel = vec2(0, 0);
vec3 hitPoint = vec3(0, 0, 0);
// Trace screen space ray.
bool isIntersect = traceScreenSpaceRay(refOrg, refDir, hitPixel, hitPoint);
vec2 uv = hitPixel / texsize.xy;
if (uv.x > 1.0 || uv.x < 0.0f || uv.y > 1.0 || uv.y < 0.0) {
isIntersect = false;
}
if (isIntersect) {
outColor = varColor * texture(s0, uv);
}
else {
outColor = vec4(1, 1, 1, 1);
}
}
I think Q0.z and Q1.z are always 1.0.
So, I think dQ.z is also always 0.0.
And, dk is always minus value.
What is wrong?
I write a little program to explain simply my problem, I try to change the pixel position of picture with a texture one where the component x is the direction, and where the other represent the velocity. The final objective is to use my data from CPU where are compute a NAVIER-STROKE fluid to move the pixel in GLSL. The CPU code is in Processing java library.
I try to undestand what is buggy in my code, but I don't understand how work the pixel translation.
in the first I transform my direction in value color from 0 to 255 in the CPU and after in the GPU transform this one in vectorial direction, and multiply this one by the velocity and scale this one in 1x1 but that's don't work... sorry if my explication is not really understable, but english is not really fluent.
link to the sketch
Processing :
PImage tex_velocity, tex_direction ;
PShader warping;
PImage img ;
int grid_w, grid_h ;
void setup() {
size(600,375,P2D);
// img = loadImage("pirate_small.jpg");
img = loadImage("puros_girl_small.jpg");
grid_w = 60 ;
grid_h = 37 ;
tex_velocity = createImage(grid_w,grid_h,RGB);
tex_direction = createImage(grid_w,grid_h,RGB);
warping = loadShader("shader/warp/rope_warp_frag.glsl");
noise_img(tex_velocity, 20, .1, .1); // max translate for the pixel
noise_img(tex_direction, 360, .1, .1); // degree direction
}
void draw() {
println(frameRate);
if(frameCount%30 == 0) {
noise_img(tex_velocity, 20, .1, .1); // max translate for the pixel
noise_img(tex_direction, 360, .1, .1); // degree direction
}
warping.set("mode", 0) ;
warping.set("texture",img);
warping.set("roof_component_colour",g.colorModeX);
warping.set("wh_ratio",1f/grid_w, 1f/grid_h);
warping.set("vel_texture",tex_velocity);
warping.set("dir_texture",tex_direction);
shader(warping);
image(img,0,0);
resetShader();
image(tex_velocity,5,5);
image(tex_direction,grid_w +15 ,5 );
}
float x_offset, y_offset ;
void noise_img(PImage dst, int max, float ratio_x, float ratio_y) {
noiseSeed((int)random(10000));
for(int x = 0 ; x < dst.width ; x++) {
x_offset += ratio_x ;
for(int y = 0 ; y < dst.height ; y++) {
y_offset += ratio_y ;
float v = map(noise(x_offset,y_offset),0,1,0,max);
v = (int)map(v,0,max,0,g.colorModeX);
int c = color(v,v,v,g.colorModeA) ;
dst.set(x,y,c);
}
}
}
GLSL
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
#define PI 3.1415926535897932384626433832795
varying vec4 vertTexCoord;
uniform sampler2D texture;
uniform int mode;
uniform float roof_component_colour;
uniform sampler2D vel_texture;
uniform sampler2D dir_texture;
uniform vec2 wh_ratio;
float map(float value, float start1, float stop1, float start2, float stop2) {
float result = start2 + (stop2 - start2) * ((value - start1) / (stop1 - start1));
return result;
}
vec2 cartesian_coord(float angle) {
float x = cos(angle);
float y = sin(angle);
return vec2(x,y);
}
vec2 translate(float fdir, float fvel) {
float angle_in_radian = map(fdir, 0, roof_component_colour, -PI, PI);
vec2 dir_cart = cartesian_coord(angle_in_radian);
return dir_cart *fvel ;
}
void main() {
vec2 ratio = gl_FragCoord.xy *wh_ratio;
vec4 vel = texture2D(vel_texture, ratio);
vec4 dir = texture2D(dir_texture, ratio);
// rendering picture ;
if(mode == 0) {
float direction = dir.x;
float velocity = vel.x;
vec2 translation = translate(direction,velocity);
// not bad, but totaly wrong
// vec2 coord_dest = vertTexCoord.st +translation
vec2 coord_dest = vertTexCoord.st *ratio +translation ;
// not bad, but totaly wrong
vec2 coord_dest = vertTexCoord.st *ratio +translation ;
vec4 tex_colour = texture2D(texture, coord_dest);
gl_FragColor = tex_colour;
}
// velocity
if(mode == 1 ) {
gl_FragColor = texture2D(vel_texture, vertTexCoord.st);;
}
// direction force field
if(mode == 2) {
gl_FragColor = texture2D(dir_texture, vertTexCoord.st);;
}
}
The texture format is GL_RGBA8, this means each color channel is stored to a byte in, which is a integral data tyoe in rage from 0 to 255.
But when you read texts from the texture sampler, the you will get a floating point value in the range from 0.0 to 1.0. (see glTexImage2D - GL_RGBA).
In the fragment shader you have to map the color channel (in [0, 1]), which you read from the texture sampler, to the range from -PI to PI. For this you can use the GLSL function mix, which does a linear interpolation between 2 values:
vec2 translate(float fdir, float fvel) // fdir, fvel in [0.0, 1.0]
{
float angle = mix(-PI, PI, fdir);
return vec2(cos(angle), sin(angle)) * fvel;
}
The texture coordinates are in range [0, 1]. You have to transform the translation to texture coordinates. For this you have to know the size of your image texture:
vec2 wh_ratio; // 1f/grid_w, 1f/grid_h
vec2 imageTexSize; // size of "texture"
vec2 scale = imageTexSize * wh_ratio;
vec2 coord_dest = vertTexCoord.st + translation / scale;
Thx for the help, now I know the picture size of picture in GLSL :) [0,1], but that's don't work expected, I use the the rendering size or the picture of the must be warp, so in my idea the vec2 imageTexSize is img.widthand img.height is passed from Processing for imageTexSize
uniform vec2 imageTexSize;
.../...
vec2 scale = imageTexSize * wh_ratio;
vec2 coord_dest = vertTexCoord.st + translation / scale;
the result is the top image
and when I try this code
vec2 ratio = gl_FragCoord.xy *wh_ratio;
vec2 coord_dest = vertTexCoord.st +translation / ratio ;
the result is the middle image
and when I try this one
vec2 coord_dest = vertTexCoord.st +translation / wh_ratio ;
the result is the bottom image
Sorry i post a single image because I cannot post more than one pic with my beginner reputation :)
I fix the display bug for the full window display, but now it's the y coord who is reverse for the translation, that's weird because the texture velocity and direction are not reversed in y, the reverse y effect is in the interpretation. that's happened on the 3 mode. I try to reverse coord_dest.y like that
float coord_dest_y = mix(coord_dest.y, vertTexCoord.t, 0);
gl_FragColor = texture2D(texture, vec2(coord_dest.x, coord_dest_y));
but that's change nothing.
I try : float coord_dest_y = mix(coord_dest.y, 0, vertTexCoord.t);but that's make something really strange, so that's don't work too...
here the full the GLSL code
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
#define PI 3.1415926535897932384626433832795
varying vec4 vertTexCoord;
uniform sampler2D texture;
uniform int mode;
uniform sampler2D vel_texture;
uniform sampler2D dir_texture;
uniform vec2 wh_grid_ratio;
uniform vec2 wh_renderer_ratio;
vec2 cartesian_coord(float angle) {
float x = cos(angle);
float y = sin(angle);
return vec2(x,y);
}
vec2 translate(float fdir, float fvel) {
//float angle = mix(PI, -PI,fdir);
float angle = mix(fdir, PI, -PI);
return cartesian_coord(angle) *fvel ;
}
void main() {
vec2 ratio = gl_FragCoord.xy *wh_renderer_ratio;
vec4 vel = texture2D(vel_texture, ratio);
vec4 dir = texture2D(dir_texture, ratio);
float direction = dir.x;
float velocity = vel.x;
vec2 translation = translate(direction,velocity);
// mode 0 perfect
// mode 1 interesting
// mode 2 bizarre, but fun
// mode 500 warp image direction
// mode 501 warp image velocity
// perfect
if(mode == 0) {
vec2 scale = gl_FragCoord.xy *wh_renderer_ratio;
vec2 coord_dest = vertTexCoord.st +translation /scale;
float coord_dest_y = mix(coord_dest.y, vertTexCoord.t, 0);
// float coord_dest_y = mix(coord_dest.y, 0, vertTexCoord.t);
gl_FragColor = texture2D(texture, vec2(coord_dest.x, coord_dest_y));
// gl_FragColor = texture2D(texture, coord_dest);
}
// interesting
if(mode == 1) {
vec2 scale = gl_FragCoord.xy *wh_grid_ratio;
vec2 coord_dest = vertTexCoord.st +translation /scale ;
gl_FragColor = texture2D(texture, coord_dest);
}
// bizarre
if(mode == 2) {
vec2 coord_dest = vertTexCoord.st +translation /wh_grid_ratio;
gl_FragColor = texture2D(texture, coord_dest);
}
// velocity
if(mode == 500 ) {
vec4 tex_colour = texture2D(vel_texture, vertTexCoord.st);;
gl_FragColor = tex_colour;
}
// direction force field
if(mode == 501) {
vec4 tex_colour = texture2D(dir_texture, vertTexCoord.st);;
gl_FragColor = tex_colour;
}
}
and the picture result here, to see the cursor error y in the final warping
enter image description here
I'm currently learning about shaders and graphics pipelines and I was wondering if a pixel shader could be used to create, for example, a triangle or a more complex shape like a zigzag.
Could this be done without the use of a vertex shader?
Answer is yes! You can draw anything you want using pixel shader by implementing a ray Tracer. Here is a sample code:
uniform vec3 lightposition;
uniform vec3 cameraposition;
uniform float motion;
struct Ray
{
vec3 org;
vec3 dir;
};
struct Sphere
{
vec3 Center;
float Radius;
vec4 Color;
float MatID;
float id;
};
struct Intersection
{
float t;
vec3 normal;
vec3 hitpos;
vec4 color;
float objectid;
float materialID;
};
bool sphereIntersect(Ray eyeray, Sphere sp, inout Intersection intersection)
{
float t1=0.0;
eyeray.dir = normalize(eyeray.dir);
float B = 2.0 *( ( eyeray.dir.x * (eyeray.org.x - sp.Center.x ) )+ ( eyeray.dir.y *(eyeray.org.y - sp.Center.y )) + ( eyeray.dir.z * (eyeray.org.z - sp.Center.z ) ));
float C = pow((eyeray.org.x - sp.Center.x),2.0) + pow((eyeray.org.y - sp.Center.y),2.0) + pow((eyeray.org.z - sp.Center.z),2.0) - pow(sp.Radius,2.0);
float D = B*B - 4.0*C ;
if(D>=0.0)
{
t1= (-B - pow(D, .5)) / 2.0;
if (t1 < 0.0)
{
t1 = (-B + pow(D, .5)) / 2.0;
if( t1 < 0.0)
return false;
else
{
if (t1 > 1e-2 && t1 < intersection.t)
{
intersection.t = t1;
intersection.materialID = sp.MatID;
intersection.hitpos = eyeray.org + t1 * eyeray.dir;
intersection.normal = normalize(intersection.hitpos - sp.Center);
intersection.color = sp.Color;
intersection.objectid = sp.id;
return true;
}
}
}
else
{
if(t1 > 1e-2 && t1 < intersection.t)
{
intersection.t = t1;
intersection.materialID = sp.MatID;
intersection.hitpos = eyeray.org + t1 * eyeray.dir;
intersection.normal = normalize(intersection.hitpos - sp.Center);
intersection.color = sp.Color;
intersection.objectid = sp.id;
return true;
}
}
}
else
return false;
}
void findIntersection(Ray ray, inout Intersection intersection)
{
intersection.t = 1e10;
intersection.materialID = 0.0;
Sphere sp1 = Sphere(vec3(-2.0,0.0,-5.0),1.5,vec4(0.5, 0.1, 0.5, 1.0),1.0,1.0);
Sphere sp2 = Sphere(vec3( 2.0,0.0,-5.0),1.5,vec4(0.5,0.5,0.1,1.0),1.0,2.0);
Sphere sp3 = Sphere(vec3( 0.0,3.0,-5.0),1.5,vec4(0.1,0.5,0.5,1.0),1.0,3.0);
sphereIntersect(ray, sp1, intersection);
sphereIntersect(ray, sp2, intersection);
sphereIntersect(ray, sp3, intersection);
}
vec4 CalculateColor(vec4 ambient ,float shiness,vec3 intersection, vec3 normal);
Ray ReflectedRay(vec3 Normal,Ray EyeRay,vec3 intersection);
vec4 GetColor(Ray ray)
{
Ray currentRay = ray;
vec4 finalColor = vec4(0.0);
for(int bounce = 1 ; bounce < 4 ; bounce++)
{
Intersection intersection;
intersection.objectid = 0.0;
findIntersection(currentRay, intersection);
if (intersection.materialID == 0.0) // We could not find any object. We return the background color
return finalColor;
else if (intersection.materialID == 1.0)
{
vec3 lv = lightposition - intersection.hitpos;
vec3 nlv = normalize(lv);
Intersection shadowIntersection;
Ray shadowRay = Ray(intersection.hitpos, nlv);
shadowIntersection.objectid = intersection.objectid;
findIntersection(shadowRay, shadowIntersection);
if (shadowIntersection.t > length(lv) || shadowIntersection.t < 1)
{
finalColor = finalColor + float(1.0f/bounce) * CalculateColor(intersection.color, 100.0, intersection.hitpos, intersection.normal);;
}
else
{
finalColor = finalColor + float(1.0f/bounce) * intersection.color;
}
//currentRay = Ray(intersection.hitpos, reflect(ray.dir, intersection.normal));
currentRay = ReflectedRay(intersection.normal,ray,intersection.hitpos);
}
}
return finalColor;
}
Ray createRay(float ScreenWidth,float ScreenHeight)
{
Ray toret;
toret.org = cameraposition;
float left = -3.0;
float bottom = -3.0;
float screenZ = -3.0;
float su = -3.0 + gl_FragCoord.x/ScreenWidth * 6; //gl_FragCoord gives you the current x and y component of your current pixel
float sv = -3.0 + gl_FragCoord.y/ScreenHeight * 6;
float sz = screenZ - cameraposition.z;
toret.dir = normalize(vec3(su,sv,sz));
//vec2 p = (gl_FragCoord.xy/resolution) * 2 ;
//toret.dir = normalize(vec3(p, -1.0));
return toret;
}
Ray ReflectedRay(vec3 Normal,Ray EyeRay,vec3 intersection)
{
Ray reflection;
reflection.dir = EyeRay.dir - 2 * Normal * dot(EyeRay.dir,Normal);
reflection.org = intersection + reflection.dir * 0.01;
return reflection;
}
vec4 CalculateColor(vec4 ambient ,float shiness,vec3 intersection, vec3 normal)
{
//intensities
vec3 Idifuse = vec3(1, 1, 1);
vec3 Iambient = vec3(0.8, 0.8, 0.8);
vec3 Ispecular = vec3(1,1,1);
vec3 kDifuse = vec3(0.5,0.5,0.5); //for difuse
vec3 kSpecular = vec3(0.75, 0.6, 0.3); //for specular
vec3 kAmbient = vec3(0.1, 0.2, 0.3); //for ambient
//vec4 kSpecular = vec4(0.5,0.5,0.5,1.0);
//vec4 kDifuse = vec4(0.5,0.5,0.5,1.0);
float ColorDifuse = max(dot(normal,lightposition),0.0) * kDifuse;
//vector calculations
vec3 l = normalize(lightposition - intersection); //light vector
vec3 n = normalize(normal); // normalVector of point in the sea
vec3 v = normalize(cameraposition - intersection); // view Vector
vec3 h = normalize(v + l); // half Vector
vec3 difuse = kDifuse * Idifuse * max(0.0, dot(n, l));
vec3 specular = kSpecular * Ispecular * pow(max(0.0, dot(n, h)), shiness);
vec3 color = ambient.xyz + difuse + specular;
return vec4(color,1.0);
gl_FragColor = vec4(color,1.0);
}
void main()
{
if(lightposition == vec3(0.0,0.0,0.0))
gl_FragColor = vec4(0.0,1.0,0.0,1.0);
Ray eyeray = createRay(600.0,600.0);
gl_FragColor = GetColor(eyeray);
}
A useful technique is to use a fragment shader (I'm an OpenGL guy) with point sprites. Point sprites in OpenGL 3+ get rendered as squares of pixels, with the size of the square (gl_PointSize) set by the vertex shader.
In the fragment shader, gl_PointCoord has the x and y coords of this particular pixel within the square, from 0.0 to 1.0. So you can draw a circle by testing if gl_PointCoord.x and gl_PointCoord.y are both within the radius and discarding if not, a framed square by checking that .x and .y are with some distance of the edge, and so on. It's classic maths, define a function(x, y) which returns true for points within the shape you want, false if not.
The Orange book, OpenGL Shading Language 3rd edition, has some examples (which in turn come from RenderMan) of how to draw such shapes.
Hope this helps.
What you want is called procedural textures or procedural shading.
You can draw different shapes with a simple (and not so simple) math.
Take a look for some examples here:
http://glslsandbox.com/
More on google.