SSAO sampling from depth map causing pixel flicker issue - glsl

I was trying to implement the Crytek SSAO technique based on a shader shown in the Appendix of the following paper:
Shader from paper:
However, when implementing and running the shader, it kept giving me this graining look and I cannot seem to identify what might have caused this particular issue.
My implementation of the shader:
#version 450
layout(binding = 3) uniform sampler2D texNoise;
layout(binding = 6) uniform sampler2D depthMap;
layout(location = 0) in vec3 fragColor;
layout(location = 1) in vec2 uvCoords;
layout(binding = 5) uniform UniformBufferObject {
mat4 model;
mat4 view;
mat4 proj;
float time;
}camera;
layout(binding = 4) uniform KernelSample {
vec3 samples[64];
mat4 projection;
vec4 camera_eye;
vec4 camera_direction;
float z_far;
}kernelsamples;
int kernelSize = 64;
layout(location = 0) out vec4 outColor;
vec4 ambient_occlusion;
float ec_depth(in vec2 tc)
{
float buffer_z = texture(depthMap, tc).x;
return camera.proj[3][2] / (-2.0 * buffer_z + 1.0 - camera.proj[2][2]);
}
const vec2 window = vec2(2560.0, 1440.0);
void main()
{
vec2 tc_depths = gl_FragCoord.xy / uvCoords;
float ec_depth_negated = -ec_depth(tc_depths);
vec3 wc_positions = kernelsamples.camera_eye.xyz + kernelsample.camera_direction * ec_depth_negated / kernelsamples.z_far;
ambient_occlusion.a = 0.0f;
const float radius = 10.0f;
const int samples = 64;
float projection_scale_xy = 1.0 / ec_depth_negated;
float projection_scale_z = 100.0 / kernelsamples.z_far * projection_scale_xy;
float scene_depth = texture(depthMap, tc_depths).x;
vec2 inverted_random_texture_size = 1.0 / vec2(textureSize(texNoise, 0));
vec2 tc_random_texture = gl_FragCoord.xy * inverted_random_texture_size;
vec3 random_direction = texture(texNoise, tc_random_texture).xyz;
random_direction = normalize(random_direction * 2.0 - 1.0);
for(int i = 0; i < samples; i++)
{
vec3 sample_random_direction = texture(texNoise, vec2(float(i) *
inverted_random_texture_size.x, float(i / textureSize(texNoise, 0).x) *
inverted_random_texture_size.y)).xyz;
sample_random_direction = sample_random_direction * 2.0 - 1.0;
sample_random_direction = reflect(sample_random_direction, random_direction);
vec3 tc_sample_pos = vec3(tc_depths.xy, scene_depth)
+ vec3(sample_random_direction.xy * projection_scale_xy,
sample_random_direction.z * scene_depth * projection_scale_z) * radius;
float sample_depth = texture(depthMap, tc_sample_pos.xy).x;
ambient_occlusion.a += float(sample_depth > tc_sample_pos.z);
}
ambient_occlusion.a /= float(kernelSize);
outColor = ambient_occlusion;
}
C++
// Projection
ubo.proj = glm::perspective(glm::radians(45.0f), swapChainExtent.width /
(float)swapChainExtent.height, 0.1f, 1000.0f);
ubo.proj[1][1] *= -1;
KernelSample ks{};
.....
ks.cameraEye = glm::vec4(cameraPosition, 0.0f);
ks.cameraDirection = glm::vec4(cameraPosition + cameraCenter, 1.0f);
RenderPass
VkAttachmentDescription colorAttachment{};
colorAttachment.format = VK_FORMAT_R16_SFLOAT;
colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
colorAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
colorAttachment.finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
VkAttachmentDescription depthAttachment = {};
depthAttachment.format = format;
depthAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
depthAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
depthAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
depthAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
depthAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
depthAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
depthAttachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkAttachmentReference depthAttachmentRef{};
depthAttachmentRef.attachment = 1;
depthAttachmentRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkAttachmentReference colorAttatchmentRef{};
colorAttatchmentRef.attachment = 0;
colorAttatchmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass{};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorAttatchmentRef;
subpass.pDepthStencilAttachment = &depthAttachmentRef;
VkSubpassDependency dependency{};
dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
dependency.dstSubpass = 0;
dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
dependency.srcAccessMask = 0;
dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
std::array<VkAttachmentDescription, 2> attachments = { colorAttachment, depthAttachment };
VkRenderPassCreateInfo renderPassInfo{};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassInfo.attachmentCount = static_cast<uint32_t>(attachments.size());
renderPassInfo.pAttachments = attachments.data();
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
renderPassInfo.dependencyCount = 1;
renderPassInfo.pDependencies = &dependency;
Result
SSAO shader output

Related

Why is my Open GL Compute Shader so slow?

I have been building an OpenGL compute shader that implements ray tracing. Currently it just computes the pixel color by casting a ray against an array of triangles.
#version 430 core
struct Triangle {
vec3 vertex1;
vec3 vertex2;
vec3 vertex3;
vec3 color1;
vec3 color2;
vec3 color3;
vec3 normal1;
vec3 normal2;
vec3 normal3;
vec3 edge1;
vec3 edge2;
};
layout (std430, binding = 0) readonly buffer TriangleBuffer {
int numTriangles;
Triangle triangles[];
};
layout (std430, binding = 1, column_major) buffer CameraBuffer {
vec3 cameraPosition;
mat4 view;
mat4 projection;
mat4 inverseViewProjection;
};
layout (rgba8, binding = 2) writeonly uniform image2D outputImage;
layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
vec3 getBarycentricCoords(int triangleIndex, vec3 closestIntersectionPoint) {
vec3 v0 = triangles[triangleIndex].vertex2 - triangles[triangleIndex].vertex1;
vec3 v1 = triangles[triangleIndex].vertex3 - triangles[triangleIndex].vertex1;
vec3 v2 = closestIntersectionPoint - triangles[triangleIndex].vertex1;
float d00 = dot(v0, v0);
float d01 = dot(v0, v1);
float d11 = dot(v1, v1);
float d20 = dot(v2, v0);
float d21 = dot(v2, v1);
float denom = d00 * d11 - d01 * d01;
float b1 = (d11 * d20 - d01 * d21) / denom;
float b2 = (d00 * d21 - d01 * d20) / denom;
float b0 = 1.0f - b1 - b2;
return vec3(b0, b1, b2);
}
vec3 getTriangleColor(int triangleIndex, vec3 closestIntersectionPoint) {
vec3 barycentric = getBarycentricCoords(triangleIndex, closestIntersectionPoint);
vec3 triangleColor = barycentric.x * triangles[triangleIndex].color1 + barycentric.y * triangles[triangleIndex].color2 + barycentric.z * triangles[triangleIndex].color3;
return triangleColor;
}
bool rayTriangleIntersection(vec3 rayOrigin, vec3 rayDirection, int triangleIndex, out vec3 intersectionPoint) {
vec3 h = cross(rayDirection, triangles[triangleIndex].edge2);
float a = dot(triangles[triangleIndex].edge1, h);
if (a > -0.00001 && a < 0.00001) {
return false;
}
float f = 1.0 / a;
vec3 s = rayOrigin - triangles[triangleIndex].vertex1;
float u = f * dot(s, h);
if (u < 0.0 || u > 1.0) {
return false;
}
vec3 q = cross(s, triangles[triangleIndex].edge1);
float v = f * dot(rayDirection, q);
if (v < 0.0 || u + v > 1.0) {
return false;
}
float t = f * dot(triangles[triangleIndex].edge2, q);
if (t > 0.00001) {
intersectionPoint = rayOrigin + rayDirection * t;
return true;
}
return false;
}
vec3 unProject(vec3 win, mat4 model, mat4 proj, vec4 viewport) {
vec4 tmp = vec4(win, 1);
tmp.x = (tmp.x - viewport[0]) / viewport[2];
tmp.y = (tmp.y - viewport[1]) / viewport[3];
tmp.x = tmp.x * 2 - 1;
tmp.y = tmp.y * 2 - 1;
vec4 obj = inverseViewProjection * tmp;
obj /= obj.w;
return obj.xyz;
}
void main() {
ivec2 pixelCoord = ivec2(gl_GlobalInvocationID.xy);
vec4 viewport = vec4(0, 0, vec2(imageSize(outputImage)).xy);
vec3 near = vec3(pixelCoord.x, pixelCoord.y, -1);
vec3 far = vec3(pixelCoord.x, pixelCoord.y, 0.9518f);
vec3 rayOrigin = unProject(near, view, projection, viewport);
vec3 rayWorldFar = unProject(far, view, projection, viewport);
vec3 rayDirection = normalize(rayWorldFar - rayOrigin);
vec3 intersectionPoint;
vec3 closestIntersectionPoint = vec3(0,0,0);
float closestIntersectionDistance = 999999999.0f;
vec3 finalColor = vec3(0,0,0);
bool intersectionFound = false;
for (int triangleIndex = 0; triangleIndex < numTriangles; triangleIndex++) {
if (rayTriangleIntersection(rayOrigin, rayDirection, triangleIndex, intersectionPoint)) {
float intersectionDistance = distance(intersectionPoint, rayOrigin);
if (intersectionDistance < closestIntersectionDistance) {
closestIntersectionDistance = intersectionDistance;
closestIntersectionPoint = intersectionPoint;
finalColor = getTriangleColor(triangleIndex, closestIntersectionPoint);
intersectionFound = true;
}
}
}
if (intersectionFound) {
imageStore(outputImage, pixelCoord, vec4(finalColor, 1.0f));
}
else
imageStore(outputImage, pixelCoord, vec4(0));
}
However when running the shader I only get 30fps. There is a significant bottleneck in the code. This is running with only 20 triangles.
What optimizations can I make to increase the performance of the code? Why is there a bottleneck?
I managed to more than double my framerate by making the following modifications:
Change layout to a higher value
for this I used GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS
GLint glMaxComputeWorkGroupInvocations = 0;
glGetIntegerv(GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS, &glMaxComputeWorkGroupInvocations);
LIGHTING_SHADER_LOCAL_SIZE_Y = LIGHTING_SHADER_LOCAL_SIZE_X = sqrt(glMaxComputeWorkGroupInvocations);
and update the layout sizes:
layout (local_size_x = ${LIGHTING_SHADER_LOCAL_SIZE_X}, local_size_y = ${LIGHTING_SHADER_LOCAL_SIZE_Y}, local_size_z = 1) in;
Get pixelCoord based on group_id and local_id
ivec3 groupId = ivec3(gl_WorkGroupID);
ivec3 localId = ivec3(gl_LocalInvocationID);
ivec3 globalId = ivec3(gl_GlobalInvocationID);
ivec3 coords = groupId * ivec3(gl_WorkGroupSize) + localId;
ivec2 pixelCoord = ivec2(coords.xy);
Update glDispatchCompute
glDispatchCompute(windowWidth / LIGHTING_SHADER_LOCAL_SIZE_X, windowHeight / LIGHTING_SHADER_LOCAL_SIZE_Y, 1);

How to compute vertex normals for a triangle mesh in OpenGl?

To give background, I'm currently generating a surface of revolution with it's center of mass centered at (0,0,0) in the WCS. The surface of a revolution is y=x^2 where 0 <= x <= 1.
I have converted this surface of revolution into a virtual buffer object, and can render it successfully on the screen. However, I cannot seem to get Blinn-Phong shading to work on the object. I'm fairly sure that the problem lies in my normal calculation.
This is the stub that creates the object and calculates normals:
GLfloat vp[49 * 49 * 18]; // array of vertex points
int _i = 50;
int _j = 50;
float vertices[50][50][3];
for (int i = 0; i < _i; i++) {
float fT = (float) i / (_i - 1);
float fY = fT;
float fZ = sqrt(fT);
for (int j = 0; j < _j; j++) {
float fS = 2 * M_PI * (float) j / (_j - 1);
vertices[i][j][0] = fZ * cos(fS);
vertices[i][j][1] = fY - 0.5; // offset by 0.5 to make center of mass the center
vertices[i][j][2] = fZ * sin(fS);
}
}
int curr = 0;
for (int i = 0; i < _i - 1; i++) {
for (int j = 0; j < _j - 1; j++) {
vp[curr++] = vertices[i][j][0];
vp[curr++] = vertices[i][j][1];
vp[curr++] = vertices[i][j][2];
vp[curr++] = vertices[i+1][j][0];
vp[curr++] = vertices[i+1][j][1];
vp[curr++] = vertices[i+1][j][2];
vp[curr++] = vertices[i][j+1][0];
vp[curr++] = vertices[i][j+1][1];
vp[curr++] = vertices[i][j+1][2];
vp[curr++] = vertices[i+1][j][0];
vp[curr++] = vertices[i+1][j][1];
vp[curr++] = vertices[i+1][j][2];
vp[curr++] = vertices[i+1][j+1][0];
vp[curr++] = vertices[i+1][j+1][1];
vp[curr++] = vertices[i+1][j+1][2];
vp[curr++] = vertices[i][j+1][0];
vp[curr++] = vertices[i][j+1][1];
vp[curr++] = vertices[i][j+1][2];
}
}
GLuint vao;
glGenVertexArrays (1, &vao); // generating and binding is common pattern in OpenGL
glBindVertexArray (vao); // basically setting up memory and associating it
GLuint points_vbo;
glGenBuffers(1, &points_vbo);
glBindBuffer(GL_ARRAY_BUFFER, points_vbo);
glBufferData(GL_ARRAY_BUFFER, 49 * 49 * 18 * sizeof (GLfloat), vp, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(0);
GLfloat normals[49 * 49 * 18 / 3];
curr = 0;
for (int i = 0; i < 49 * 49 * 18; i += 9){
int Ux = vp[i+3] - vp[i];
int Uy = vp[i+4] - vp[i+1];
int Uz = vp[i+5] - vp[i+2];
int Vx = vp[i+6] - vp[i];
int Vy = vp[i+7] - vp[i+1];
int Vz = vp[i+8] - vp[i+2];
normals[curr++] = Uy * Vz - Uz * Vy;
normals[curr++] = Uz * Vx - Ux * Vz;
normals[curr++] = Ux * Vy - Uy * Vx;
}
GLuint normals_vbo;
glGenBuffers(1, &normals_vbo);
glBindBuffer(GL_ARRAY_BUFFER, normals_vbo);
glBufferData(GL_ARRAY_BUFFER, 49 * 49 * 18 / 3 * sizeof(GLfloat), normals, GL_STATIC_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(1);
This is my vertex shader:
#version 410
layout (location = 0) in vec3 vtxPosition;
layout (location = 1) in vec3 normal;
uniform mat4 proj_mat, view_mat, model_mat;
out vec3 Normal;
out vec3 fpos;
void main () {
gl_Position = proj_mat * view_mat * model_mat * vec4(vtxPosition, 1.0);
fpos = vec3(model_mat * vec4(vtxPosition, 1.0));
Normal = normal;
}
And lastly my fragment shader:
#version 410
// Define INPUTS from fragment shader
//uniform mat4 view_mat;
in vec3 Normal;
in vec3 fpos;
// These come from the VAO for texture coordinates.
in vec2 texture_coords;
// And from the uniform outputs for the textures setup in main.cpp.
uniform sampler2D texture00;
uniform sampler2D texture01;
out vec4 fragment_color; //RGBA color
const vec3 lightPos = vec3(0.0,0.0,5.0);
const vec3 diffColor = vec3(1.0,0.5,0.0);
const vec3 specColor = vec3(1.0,1.0,1.0);
void main () {
vec3 normal = normalize(Normal);
vec3 lightDir = normalize(lightPos - fpos);
float lamb = max(dot(lightDir, normal), 0.0);
float spec = 0.0;
if (lamb > 0.0) {
vec3 refDir = reflect(-lightDir, normal);
vec3 viewDir = normalize(-fpos);
float specAngle = max(dot(refDir, viewDir), 0.0);
spec = pow(specAngle, 4.0);
}
fragment_color = vec4(lamb * diffColor + spec * specColor, 1.0);
}
This is the current rendering of the object:
You have to specify 1 normal attribute for each vertex coordinate. A vertex coordinate and its attributes form a tuple.
Furthermore you have to use the data type float rather than int, for computing the normal vectors:
GLfloat normals[49 * 49 * 18];
curr = 0;
for (int i = 0; i < 49 * 49 * 18; i += 9){
float Ux = vp[i+3] - vp[i];
float Uy = vp[i+4] - vp[i+1];
float Uz = vp[i+5] - vp[i+2];
float Vx = vp[i+6] - vp[i];
float Vy = vp[i+7] - vp[i+1];
float Vz = vp[i+8] - vp[i+2];
float nx = Uy * Vz - Uz * Vy;
float ny = Uz * Vx - Ux * Vz;
float nz = Ux * Vy - Uy * Vx;
for (int j = 0; j < 3; ++j) {
normals[curr++] = nx;
normals[curr++] = ny;
normals[curr++] = nz;
}
}
glBufferData(GL_ARRAY_BUFFER, 49 * 49 * 18 * sizeof(GLfloat), normals, GL_STATIC_DRAW);
I recommend to invert the normal vector of the back faces for a double sided light model:
vec3 normal = normalize(Normal);
vec3 viewDir = normalize(-fpos);
if (dot(normal, viewDir) < 0.0)
normal *= -1.0;
Fragment shader:
#version 410
// Define INPUTS from fragment shader
//uniform mat4 view_mat;
in vec3 Normal;
in vec3 fpos;
// These come from the VAO for texture coordinates.
in vec2 texture_coords;
// And from the uniform outputs for the textures setup in main.cpp.
uniform sampler2D texture00;
uniform sampler2D texture01;
out vec4 fragment_color; //RGBA color
const vec3 lightPos = vec3(0.0,0.0,5.0);
const vec3 diffColor = vec3(1.0,0.5,0.0);
const vec3 specColor = vec3(1.0,1.0,1.0);
void main () {
vec3 normal = normalize(Normal);
vec3 viewDir = normalize(-fpos);
if (dot(normal, viewDir) < 0.0)
normal *= -1.0;
vec3 lightDir = normalize(lightPos - fpos);
float lamb = max(dot(lightDir, normal), 0.0);
float spec = 0.0;
if (lamb > 0.0) {
vec3 refDir = reflect(-lightDir, normal);
float specAngle = max(dot(refDir, viewDir), 0.0);
spec = pow(specAngle, 4.0);
}
fragment_color = vec4(lamb * diffColor + spec * specColor, 1.0);
}

How to render to texture in Vulkan?

I need render to textrue in my project.
Pass1 : Draw a color square that color is (0.5,0.5,0.5,1.0) and render target is a texture.
Pass2 : Draw a textured square that use pass1's texture and render target is a surface.
Expected result:
But I got strange result as follow:
Create image using the following:
VkImage hImageTexture = VK_NULL_HANDLE;
VkImageCreateInfo imageCreateInfo = {0};
imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageCreateInfo.pNext = nullptr;
imageCreateInfo.flags = 0;
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.format = VK_FORMAT_B8G8R8A8_UNORM;
imageCreateInfo.extent.width = width;
imageCreateInfo.extent.height = height;
imageCreateInfo.extent.depth = 1;
imageCreateInfo.mipLevels = 1;
imageCreateInfo.arrayLayers = 1;
imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
imageCreateInfo.usage =
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageCreateInfo.queueFamilyIndexCount = 0;
imageCreateInfo.pQueueFamilyIndices = nullptr;
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
//
VkResult res = vkCreateImage(hDevice , &imageCreateInfo , nullptr , &hImageTexture);
Create image view using the following:
VkImageView hImageViewTexture = VK_NULL_HANDLE;
VkImageViewCreateInfo imageViewCreateInfo = {0};
imageViewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imageViewCreateInfo.pNext = nullptr;
imageViewCreateInfo.flags = 0;
imageViewCreateInfo.image = hImageTexture;
imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
imageViewCreateInfo.format = VK_FORMAT_B8G8R8A8_UNORM;
imageViewCreateInfo.components.r = VK_COMPONENT_SWIZZLE_R;
imageViewCreateInfo.components.g = VK_COMPONENT_SWIZZLE_G;
imageViewCreateInfo.components.b = VK_COMPONENT_SWIZZLE_B;
imageViewCreateInfo.components.a = VK_COMPONENT_SWIZZLE_A;
imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
imageViewCreateInfo.subresourceRange.levelCount = 1;
imageViewCreateInfo.subresourceRange.baseArrayLayer = 0;
imageViewCreateInfo.subresourceRange.layerCount = 1;
VkResult res=vkCreateImageView(
hDevice,
&imageViewCreateInfo,
NULL,
&hImageViewTexture);
Render loop as follows:
//Pass1
//Clear texture color
vkCmdPipelineBarrier();//Transition layout to VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
vkCmdClearColorImage();//Clear color(0.0 , 0.0 ,0.0 , 1.0)
vkCmdPipelineBarrier();//Transition layout to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
//Change texture's barrier
VkImageMemoryBarrier imageMemoryBarrierForOutput = {0};
imageMemoryBarrierForOutput.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
imageMemoryBarrierForOutput.pNext = nullptr;
imageMemoryBarrierForOutput.srcAccessMask = 0;
imageMemoryBarrierForOutput.dstAccessMask =
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
imageMemoryBarrierForOutput.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageMemoryBarrierForOutput.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
imageMemoryBarrierForOutput.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrierForOutput.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrierForOutput.image = hImageTexture;
imageMemoryBarrierForOutput.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageMemoryBarrierForOutput.subresourceRange.baseMipLevel = 0;
imageMemoryBarrierForOutput.subresourceRange.levelCount = 1;
imageMemoryBarrierForOutput.subresourceRange.baseArrayLayer = 0;
imageMemoryBarrierForOutput.subresourceRange.layerCount = 1;
vkCmdPipelineBarrier(
hCommandBuffer,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
0,
0,
nullptr,
0,
nullptr,
1,
&imageMemoryBarrierForOutput);
//draw
vkCmdBeginRenderPass();
vkCmdSetViewport();
vkCmdSetScissor();
vkCmdBindPipeline();
vkCmdBindDescriptorSets();
vkCmdBindVertexBuffers();
vkCmdBindIndexBuffer();
vkCmdDrawIndexed();
vkCmdEndRenderPass();
//
//Pass2
//Clear surface color
vkCmdPipelineBarrier();//Transition layout to VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
vkCmdClearColorImage();//Clear color(0.5 , 0.5 ,1.0 , 1.0)
vkCmdPipelineBarrier();//Transition layout to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
//Change texture's barrier
VkImageMemoryBarrier imageMemoryBarrierForInput = {0};
imageMemoryBarrierForInput.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
imageMemoryBarrierForInput.pNext = nullptr;
imageMemoryBarrierForInput.srcAccessMask = 0;
imageMemoryBarrierForInput.dstAccessMask =
VK_ACCESS_SHADER_READ_BIT |
VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
imageMemoryBarrierForInput.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageMemoryBarrierForInput.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
imageMemoryBarrierForInput.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrierForInput.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrierForInput.image = hImageTexture;
imageMemoryBarrierForInput.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageMemoryBarrierForInput.subresourceRange.baseMipLevel = 0;
imageMemoryBarrierForInput.subresourceRange.levelCount = 1;
imageMemoryBarrierForInput.subresourceRange.baseArrayLayer = 0;
imageMemoryBarrierForInput.subresourceRange.layerCount = 1;
vkCmdPipelineBarrier(
hCommandBuffer,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
0,
0,
nullptr,
0,
nullptr,
1,
&imageMemoryBarrierForInput);
//draw
vkCmdBeginRenderPass();
vkCmdSetViewport();
vkCmdSetScissor();
vkCmdBindPipeline();
vkCmdBindDescriptorSets();
vkCmdBindVertexBuffers();
vkCmdBindIndexBuffer();
vkCmdDrawIndexed();
vkCmdEndRenderPass();
Pass1 vertex shader:
#version 450
layout (location=0) in vec4 inPos;
layout (location=0) out vec4 outPos;
void main(void)
{
outPos = float4(inPos.xy , 0.0 , 1.0);
gl_Position = outPos;
}
Pass1 fragment shader:
#version 450
layout (location=0) in vec4 inPos;
layout (location=0) out vec4 outColor;
void main(void)
{
outColor = float4(0.5 , 0.5 , 0.5 , 1.0);
}
Pass2 vertex shader:
#version 450
layout (location=0) in vec4 inPos;
layout (location=1) in vec2 inUV;
//
layout (location=0) out vec4 outPos;
layout (location=1) out vec2 outUV;
//
void main(void)
{
outPos = inPos;
outUV = inUV;
//
gl_Position=outPos;
}
Pass2 fragment shader:
#version 450
//
layout (location=0) in vec4 inPos;
layout (location=1) in vec2 inUV;
//
layout (binding=1) uniform sampler2D inTex;
layout (location=0) out vec4 outColor;
void main(void)
{
outColor = texture(inTex , inUV);
}
If I change image tiling to VK_IMAGE_TILING_LINEAR then I get the following:
If change image tiling to VK_IMAGE_TILING_LINEAR without clear(pass1's clear), the result was correct!
What mistakes have I made?
EDIT:
I add some code that how i change texture's barrier in render loop .
EDIT:
I set dependency when create render pass as follow
VkSubpassDependency subpassDependency[2];
subpassDependency[0].srcSubpass = VK_SUBPASS_EXTERNAL;
subpassDependency[0].dstSubpass = 0;
subpassDependency[0].srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
subpassDependency[0].dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
subpassDependency[0].srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
subpassDependency[0].dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
subpassDependency[0].dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT;
//
subpassDependency[1].srcSubpass = 0;
subpassDependency[1].dstSubpass = VK_SUBPASS_EXTERNAL;
subpassDependency[1].srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
subpassDependency[1].dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
subpassDependency[1].srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
subpassDependency[1].dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
subpassDependency[1].dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT;
but nothing to changed.

Screen Space Reflections Artifacts

When I implemented SSR I encountered the problem of artifacts. Below I present the code and screenshots.
Fragment SSR shader:
#version 330 core
uniform sampler2D normalMap; // in view space
uniform sampler2D colorMap;
uniform sampler2D reflectionStrengthMap;
uniform sampler2D positionMap; // in view space
uniform mat4 projection;
uniform vec3 skyColor = vec3(0.1, 0, 0.5);
in vec2 texCoord;
layout (location = 0) out vec4 fragColor;
const int binarySearchCount = 10;
const int rayMarchCount = 30;
const float step = 0.05;
const float LLimiter = 0.2;
const float minRayStep = 0.2;
vec3 getPosition(in vec2 texCoord) {
return texture(positionMap, texCoord).xyz;
}
vec2 binarySearch(inout vec3 dir, inout vec3 hitCoord, inout float dDepth) {
float depth;
vec4 projectedCoord;
for(int i = 0; i < binarySearchCount; i++) {
projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
depth = getPosition(projectedCoord.xy).z;
dDepth = hitCoord.z - depth;
dir *= 0.5;
if(dDepth > 0.0)
hitCoord += dir;
else
hitCoord -= dir;
}
projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
return vec2(projectedCoord.xy);
}
vec2 rayCast(vec3 dir, inout vec3 hitCoord, out float dDepth) {
dir *= step;
for (int i = 0; i < rayMarchCount; i++) {
hitCoord += dir;
vec4 projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
float depth = getPosition(projectedCoord.xy).z;
dDepth = hitCoord.z - depth;
if((dir.z - dDepth) < 1.2 && dDepth <= 0.0) return binarySearch(dir, hitCoord, dDepth);
}
return vec2(-1.0);
}
void main() {
float reflectionStrength = texture(reflectionStrengthMap, texCoord).r;
if (reflectionStrength == 0) {
fragColor = texture(colorMap, texCoord);
return;
}
vec3 normal = texture(normalMap, texCoord).xyz;
vec3 viewPos = getPosition(texCoord);
// Reflection vector
vec3 reflected = normalize(reflect(normalize(viewPos), normalize(normal)));
// Ray cast
vec3 hitPos = viewPos;
float dDepth;
vec2 coords = rayCast(reflected * max(-viewPos.z, minRayStep), hitPos, dDepth);
float L = length(getPosition(coords) - viewPos);
L = clamp(L * LLimiter, 0, 1);
float error = 1 - L;
vec3 color = texture(colorMap, coords.xy).rgb * error;
if (coords.xy != vec2(-1.0)) {
fragColor = mix(texture(colorMap, texCoord), vec4(color, 1.0), reflectionStrength);
return;
}
fragColor = mix(texture(colorMap, texCoord), vec4(skyColor, 1.0), reflectionStrength);
}
Result without blackout (without * error):
Result with blackout:
Note: blue is filled specifically to see artifacts
And one more question, what is the best way to add fresnel without harming scene?

How to generate OBJ mesh file if I used GLSL

I want to generate a OBJ file from a code, which using GLSL file to generate mesh, now I can get the vertex information from the code, but how can I extract the triangle information from the .geom.glsl file and export it into a OBJ file?
Also, is there any helper function do to so? if not, how should I write the code to get the points and triangle information from the geom.glsl file?
Here attached the geom.glsl:
#version 400 core
#extension GL_EXT_geometry_shader4 : enable
layout(lines, invocations = 1) in;
layout(triangle_strip, max_vertices = 100) out;
uniform mat4 matLightView;
uniform mat4 matViewProjection;
uniform vec3 lightPos;
uniform vec3 camPos;
uniform int isExplicit;
in vec4 VertPosition[];
in vec4 VertColor[];
in vec3 VertNormal[];
in vec3 VertTexture[];
in float VertLengthTotal[];
in float VertLengthFromBeginning[];
out vec3 GeomNormal;
out vec2 GeomTexCoords;
out float GeomDiffuse;
out float GeomThickness;
out vec4 texCoordA;
out vec4 texCoordB;
const float PI2 = 2 * 3.141592654;
void main()
{
// for(int i=0; i<gl_VerticesIn-1; ++i)
for (int i = 0; i<gl_in.length ()-1; ++i)
{
//Reading Data
vec4 posS = VertPosition[i];
vec4 posT = VertPosition[i+1];
vec3 vS = VertColor[i].xyz;
vec3 vT = VertColor[i+1].xyz;
vec3 tS = VertTexture[i].xyz;
vec3 tT = VertTexture[i+1].xyz;
float thickS = VertColor[i].w;
float thickT = VertColor[i+1].w;
//Computing
vec3 v11 = normalize(vS);
vec3 v12 = normalize(cross(vS, tS));
vec3 v21 = normalize(vT);
vec3 v22 = normalize(cross(vT, tT));
float rS = max(0.0001, thickS);
float rT = max(0.0001, thickT);
int pS = 10;
int pT = 10;
int forMax = 16;
//Light Pos
vec4 lPos = normalize(vec4(-lightPos.x, -lightPos.y, -lightPos.z, 1));
vec3 L = normalize(lPos.xyz);
for(int k=0; k<=forMax; ++k)
{
float angle = k * (PI2 / forMax);
vec3 newPS = posS.xyz + (v11 * sin(angle) + v12 * cos(angle)) * rS;
vec3 newPT = posT.xyz + (v21 * sin(angle) + v22 * cos(angle)) * rT;
float scale = 1.0f;
float texX = float(k) / float(forMax);
float edgeLength = length(posS - posT);
float sTexY = (VertLengthFromBeginning[i] * scale);
float tTexY = (VertLengthFromBeginning[i+1] * scale);
//Source Vertex
vec3 N = normalize(posS.xyz - newPS);
texCoordB = matLightView * vec4(newPS, 1);
GeomNormal = N;
GeomThickness = rS;
GeomDiffuse = rS < 0.0005 ? 0.0f : max(dot(N, L), 0.0);
GeomTexCoords = vec2(texX, sTexY);
gl_Position = matViewProjection * vec4(newPS, 1);
EmitVertex();
//Target Vertex
N = normalize(posT.xyz - newPT);
texCoordB = matLightView * vec4(newPT, 1);
GeomNormal = N;
GeomThickness = rT;
GeomDiffuse = rT < 0.0005 ? 0.0f : max(dot(N, L), 0.0);
GeomTexCoords = vec2(texX, tTexY);
gl_Position = matViewProjection * vec4(newPT, 1);
EmitVertex();
}
}
EndPrimitive();
}
And the vert.glsl:
#version 400 core
#define VERT_POSITION 0
#define VERT_NORMAL 1
#define VERT_COLOR 2
#define VERT_TEXTURE 3
layout(location = VERT_POSITION) in vec4 Position;
layout(location = VERT_NORMAL) in vec4 Normal;
layout(location = VERT_COLOR) in vec4 Color;
layout(location = VERT_TEXTURE) in vec4 Texture;
out vec4 VertPosition;
out vec3 VertNormal;
out vec3 VertTexture;
out vec4 VertColor;
out float VertLengthFromBeginning;
out float VertLengthTotal;
uniform mat4 matModel;
void main()
{
VertPosition = matModel * Position;
VertNormal = Normal.xyz; // Direction
VertColor = Color; // V from PTF, VertColor.w = thick
VertTexture = Texture.xyz; // Tangent
VertLengthFromBeginning = Normal.w; // Global Texture Coordinates
VertLengthTotal = Texture.w; // total length of chain
}
Lots of Thanks!!