How to render to texture in Vulkan? - c++

I need render to textrue in my project.
Pass1 : Draw a color square that color is (0.5,0.5,0.5,1.0) and render target is a texture.
Pass2 : Draw a textured square that use pass1's texture and render target is a surface.
Expected result:
But I got strange result as follow:
Create image using the following:
VkImage hImageTexture = VK_NULL_HANDLE;
VkImageCreateInfo imageCreateInfo = {0};
imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageCreateInfo.pNext = nullptr;
imageCreateInfo.flags = 0;
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.format = VK_FORMAT_B8G8R8A8_UNORM;
imageCreateInfo.extent.width = width;
imageCreateInfo.extent.height = height;
imageCreateInfo.extent.depth = 1;
imageCreateInfo.mipLevels = 1;
imageCreateInfo.arrayLayers = 1;
imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
imageCreateInfo.usage =
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageCreateInfo.queueFamilyIndexCount = 0;
imageCreateInfo.pQueueFamilyIndices = nullptr;
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
//
VkResult res = vkCreateImage(hDevice , &imageCreateInfo , nullptr , &hImageTexture);
Create image view using the following:
VkImageView hImageViewTexture = VK_NULL_HANDLE;
VkImageViewCreateInfo imageViewCreateInfo = {0};
imageViewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imageViewCreateInfo.pNext = nullptr;
imageViewCreateInfo.flags = 0;
imageViewCreateInfo.image = hImageTexture;
imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
imageViewCreateInfo.format = VK_FORMAT_B8G8R8A8_UNORM;
imageViewCreateInfo.components.r = VK_COMPONENT_SWIZZLE_R;
imageViewCreateInfo.components.g = VK_COMPONENT_SWIZZLE_G;
imageViewCreateInfo.components.b = VK_COMPONENT_SWIZZLE_B;
imageViewCreateInfo.components.a = VK_COMPONENT_SWIZZLE_A;
imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
imageViewCreateInfo.subresourceRange.levelCount = 1;
imageViewCreateInfo.subresourceRange.baseArrayLayer = 0;
imageViewCreateInfo.subresourceRange.layerCount = 1;
VkResult res=vkCreateImageView(
hDevice,
&imageViewCreateInfo,
NULL,
&hImageViewTexture);
Render loop as follows:
//Pass1
//Clear texture color
vkCmdPipelineBarrier();//Transition layout to VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
vkCmdClearColorImage();//Clear color(0.0 , 0.0 ,0.0 , 1.0)
vkCmdPipelineBarrier();//Transition layout to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
//Change texture's barrier
VkImageMemoryBarrier imageMemoryBarrierForOutput = {0};
imageMemoryBarrierForOutput.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
imageMemoryBarrierForOutput.pNext = nullptr;
imageMemoryBarrierForOutput.srcAccessMask = 0;
imageMemoryBarrierForOutput.dstAccessMask =
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
imageMemoryBarrierForOutput.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageMemoryBarrierForOutput.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
imageMemoryBarrierForOutput.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrierForOutput.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrierForOutput.image = hImageTexture;
imageMemoryBarrierForOutput.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageMemoryBarrierForOutput.subresourceRange.baseMipLevel = 0;
imageMemoryBarrierForOutput.subresourceRange.levelCount = 1;
imageMemoryBarrierForOutput.subresourceRange.baseArrayLayer = 0;
imageMemoryBarrierForOutput.subresourceRange.layerCount = 1;
vkCmdPipelineBarrier(
hCommandBuffer,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
0,
0,
nullptr,
0,
nullptr,
1,
&imageMemoryBarrierForOutput);
//draw
vkCmdBeginRenderPass();
vkCmdSetViewport();
vkCmdSetScissor();
vkCmdBindPipeline();
vkCmdBindDescriptorSets();
vkCmdBindVertexBuffers();
vkCmdBindIndexBuffer();
vkCmdDrawIndexed();
vkCmdEndRenderPass();
//
//Pass2
//Clear surface color
vkCmdPipelineBarrier();//Transition layout to VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
vkCmdClearColorImage();//Clear color(0.5 , 0.5 ,1.0 , 1.0)
vkCmdPipelineBarrier();//Transition layout to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
//Change texture's barrier
VkImageMemoryBarrier imageMemoryBarrierForInput = {0};
imageMemoryBarrierForInput.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
imageMemoryBarrierForInput.pNext = nullptr;
imageMemoryBarrierForInput.srcAccessMask = 0;
imageMemoryBarrierForInput.dstAccessMask =
VK_ACCESS_SHADER_READ_BIT |
VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
imageMemoryBarrierForInput.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageMemoryBarrierForInput.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
imageMemoryBarrierForInput.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrierForInput.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrierForInput.image = hImageTexture;
imageMemoryBarrierForInput.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageMemoryBarrierForInput.subresourceRange.baseMipLevel = 0;
imageMemoryBarrierForInput.subresourceRange.levelCount = 1;
imageMemoryBarrierForInput.subresourceRange.baseArrayLayer = 0;
imageMemoryBarrierForInput.subresourceRange.layerCount = 1;
vkCmdPipelineBarrier(
hCommandBuffer,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
0,
0,
nullptr,
0,
nullptr,
1,
&imageMemoryBarrierForInput);
//draw
vkCmdBeginRenderPass();
vkCmdSetViewport();
vkCmdSetScissor();
vkCmdBindPipeline();
vkCmdBindDescriptorSets();
vkCmdBindVertexBuffers();
vkCmdBindIndexBuffer();
vkCmdDrawIndexed();
vkCmdEndRenderPass();
Pass1 vertex shader:
#version 450
layout (location=0) in vec4 inPos;
layout (location=0) out vec4 outPos;
void main(void)
{
outPos = float4(inPos.xy , 0.0 , 1.0);
gl_Position = outPos;
}
Pass1 fragment shader:
#version 450
layout (location=0) in vec4 inPos;
layout (location=0) out vec4 outColor;
void main(void)
{
outColor = float4(0.5 , 0.5 , 0.5 , 1.0);
}
Pass2 vertex shader:
#version 450
layout (location=0) in vec4 inPos;
layout (location=1) in vec2 inUV;
//
layout (location=0) out vec4 outPos;
layout (location=1) out vec2 outUV;
//
void main(void)
{
outPos = inPos;
outUV = inUV;
//
gl_Position=outPos;
}
Pass2 fragment shader:
#version 450
//
layout (location=0) in vec4 inPos;
layout (location=1) in vec2 inUV;
//
layout (binding=1) uniform sampler2D inTex;
layout (location=0) out vec4 outColor;
void main(void)
{
outColor = texture(inTex , inUV);
}
If I change image tiling to VK_IMAGE_TILING_LINEAR then I get the following:
If change image tiling to VK_IMAGE_TILING_LINEAR without clear(pass1's clear), the result was correct!
What mistakes have I made?
EDIT:
I add some code that how i change texture's barrier in render loop .
EDIT:
I set dependency when create render pass as follow
VkSubpassDependency subpassDependency[2];
subpassDependency[0].srcSubpass = VK_SUBPASS_EXTERNAL;
subpassDependency[0].dstSubpass = 0;
subpassDependency[0].srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
subpassDependency[0].dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
subpassDependency[0].srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
subpassDependency[0].dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
subpassDependency[0].dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT;
//
subpassDependency[1].srcSubpass = 0;
subpassDependency[1].dstSubpass = VK_SUBPASS_EXTERNAL;
subpassDependency[1].srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
subpassDependency[1].dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
subpassDependency[1].srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
subpassDependency[1].dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
subpassDependency[1].dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT;
but nothing to changed.

Related

SSAO sampling from depth map causing pixel flicker issue

I was trying to implement the Crytek SSAO technique based on a shader shown in the Appendix of the following paper:
Shader from paper:
However, when implementing and running the shader, it kept giving me this graining look and I cannot seem to identify what might have caused this particular issue.
My implementation of the shader:
#version 450
layout(binding = 3) uniform sampler2D texNoise;
layout(binding = 6) uniform sampler2D depthMap;
layout(location = 0) in vec3 fragColor;
layout(location = 1) in vec2 uvCoords;
layout(binding = 5) uniform UniformBufferObject {
mat4 model;
mat4 view;
mat4 proj;
float time;
}camera;
layout(binding = 4) uniform KernelSample {
vec3 samples[64];
mat4 projection;
vec4 camera_eye;
vec4 camera_direction;
float z_far;
}kernelsamples;
int kernelSize = 64;
layout(location = 0) out vec4 outColor;
vec4 ambient_occlusion;
float ec_depth(in vec2 tc)
{
float buffer_z = texture(depthMap, tc).x;
return camera.proj[3][2] / (-2.0 * buffer_z + 1.0 - camera.proj[2][2]);
}
const vec2 window = vec2(2560.0, 1440.0);
void main()
{
vec2 tc_depths = gl_FragCoord.xy / uvCoords;
float ec_depth_negated = -ec_depth(tc_depths);
vec3 wc_positions = kernelsamples.camera_eye.xyz + kernelsample.camera_direction * ec_depth_negated / kernelsamples.z_far;
ambient_occlusion.a = 0.0f;
const float radius = 10.0f;
const int samples = 64;
float projection_scale_xy = 1.0 / ec_depth_negated;
float projection_scale_z = 100.0 / kernelsamples.z_far * projection_scale_xy;
float scene_depth = texture(depthMap, tc_depths).x;
vec2 inverted_random_texture_size = 1.0 / vec2(textureSize(texNoise, 0));
vec2 tc_random_texture = gl_FragCoord.xy * inverted_random_texture_size;
vec3 random_direction = texture(texNoise, tc_random_texture).xyz;
random_direction = normalize(random_direction * 2.0 - 1.0);
for(int i = 0; i < samples; i++)
{
vec3 sample_random_direction = texture(texNoise, vec2(float(i) *
inverted_random_texture_size.x, float(i / textureSize(texNoise, 0).x) *
inverted_random_texture_size.y)).xyz;
sample_random_direction = sample_random_direction * 2.0 - 1.0;
sample_random_direction = reflect(sample_random_direction, random_direction);
vec3 tc_sample_pos = vec3(tc_depths.xy, scene_depth)
+ vec3(sample_random_direction.xy * projection_scale_xy,
sample_random_direction.z * scene_depth * projection_scale_z) * radius;
float sample_depth = texture(depthMap, tc_sample_pos.xy).x;
ambient_occlusion.a += float(sample_depth > tc_sample_pos.z);
}
ambient_occlusion.a /= float(kernelSize);
outColor = ambient_occlusion;
}
C++
// Projection
ubo.proj = glm::perspective(glm::radians(45.0f), swapChainExtent.width /
(float)swapChainExtent.height, 0.1f, 1000.0f);
ubo.proj[1][1] *= -1;
KernelSample ks{};
.....
ks.cameraEye = glm::vec4(cameraPosition, 0.0f);
ks.cameraDirection = glm::vec4(cameraPosition + cameraCenter, 1.0f);
RenderPass
VkAttachmentDescription colorAttachment{};
colorAttachment.format = VK_FORMAT_R16_SFLOAT;
colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
colorAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
colorAttachment.finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
VkAttachmentDescription depthAttachment = {};
depthAttachment.format = format;
depthAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
depthAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
depthAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
depthAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
depthAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
depthAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
depthAttachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkAttachmentReference depthAttachmentRef{};
depthAttachmentRef.attachment = 1;
depthAttachmentRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkAttachmentReference colorAttatchmentRef{};
colorAttatchmentRef.attachment = 0;
colorAttatchmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass{};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorAttatchmentRef;
subpass.pDepthStencilAttachment = &depthAttachmentRef;
VkSubpassDependency dependency{};
dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
dependency.dstSubpass = 0;
dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
dependency.srcAccessMask = 0;
dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
std::array<VkAttachmentDescription, 2> attachments = { colorAttachment, depthAttachment };
VkRenderPassCreateInfo renderPassInfo{};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassInfo.attachmentCount = static_cast<uint32_t>(attachments.size());
renderPassInfo.pAttachments = attachments.data();
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
renderPassInfo.dependencyCount = 1;
renderPassInfo.pDependencies = &dependency;
Result
SSAO shader output

Vulkan Dynamic Buffer With Different Texture Per Object

I am currently using a dynamic buffer to store the uniforms of multiple objects and update them individually without changing the shader. The problem is that when i am adding a VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER to my descriptor the render is this (I can't post images due to low reputation)! Just the color of the background of the texture. This is my initialization of the descriptor().
descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites[0].dstSet = descriptorSet;
descriptorWrites[0].pNext = NULL;
descriptorWrites[0].dstBinding = 0;
descriptorWrites[0].dstArrayElement = 0;
descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
descriptorWrites[0].descriptorCount = 1;
descriptorWrites[0].pBufferInfo = &bufferInfo;
descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites[1].dstSet = descriptorSet;
descriptorWrites[1].pNext = NULL;
descriptorWrites[1].dstBinding = 1;
descriptorWrites[1].dstArrayElement = 0;
descriptorWrites[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptorWrites[1].descriptorCount = 1;
descriptorWrites[1].pImageInfo = &imageInfo;
And this is my fragment shader:
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(set=0, binding = 1) uniform sampler2D texSampler;
layout(location = 0) in vec3 fragColor;
layout(location = 1) in vec2 fragTexCoord;
layout(location = 0) out vec4 outColor;
void main() {
outColor = texture(texSampler, fragTexCoord*2);
}
What could be the reason of this? I have updated descriptor layout, and the validation layer don't throw any errors and the dynamic buffer was working perfectly even for multiple objects. Is the binding corrupted or something like that?

DirectX 11 Render To Texture

basically I am trying to render a scene to a texture as in this ogl tutorial here but in DirectX 11, and I faced some issues:
Absolutely nothing is rendered when I launch the program IDK why.
The only thing the texture displays 'correctly' is the clear color.
I have examined the executable in RenderDoc, and in the captured frame the back buffer draws the quad and the texture on it displays the scene correctly!
Source code peak:
D3D11_TEXTURE2D_DESC texDesc;
ZeroMemory(&texDesc, sizeof(D3D11_TEXTURE2D_DESC));
texDesc.Width = Data.Width;
texDesc.Height = Data.Height;
texDesc.Format = R32G32B32A32_FLOAT;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.CPUAccessFlags = 0;
texDesc.ArraySize = 1;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
texDesc.MiscFlags = 0;
texDesc.MipLevels = 1;
if (Data.Img_Data_Buf == NULL)
{
if (FAILED(DX11Context::GetDevice()->CreateTexture2D(&texDesc, NULL, &result->tex2D)))
{
Log.Error("[DirectX] Texture2D Creation Failed for Null-ed Texture2D!\n");
return;
}
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
srvDesc.Format = texDesc.Format;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MostDetailedMip = 0;
srvDesc.Texture2D.MipLevels = 1;
DX11Context::GetDevice()->CreateShaderResourceView(result->tex2D, &srvDesc, &result->resourceView);
return;
}
//depth stencil texture
D3D11_TEXTURE2D_DESC texDesc;
{
texDesc.Width = size.x;
texDesc.Height = size.y;
texDesc.MipLevels = 1;
texDesc.ArraySize = 1;
texDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.BindFlags = D3D11_BIND_DEPTH_STENCIL;
texDesc.CPUAccessFlags = 0;
texDesc.MiscFlags = 0;
}
if (FAILED(API::DirectX::DX11Context::GetDevice()->CreateTexture2D(&texDesc, nullptr, &depthstenciltex)))
{
Log.Error("[DX11RenderTarget] Failed to create DepthStencilTexture for render-target!\n");
//Return or the next call will fail too
return;
}
if (FAILED(API::DirectX::DX11Context::GetDevice()->CreateDepthStencilView(depthstenciltex, nullptr, &depthstencilview)))
{
Log.Error("[DX11RenderTarget] Failed to create DepthStencilView for render-target!\n");
}
//render target
D3D11_RENDER_TARGET_VIEW_DESC renderTargetViewDesc;
ZeroMemory(&renderTargetViewDesc, sizeof(D3D11_RENDER_TARGET_VIEW_DESC));
renderTargetViewDesc.Format = texDesc.Format;
renderTargetViewDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
renderTargetViewDesc.Texture2D.MipSlice = 0;
ID3D11RenderTargetView* rtv;
if (FAILED(API::DirectX::DX11Context::GetDevice()->CreateRenderTargetView(texture->tex2D, &renderTargetViewDesc, &rtv)))
{
Log.Error("[DX11RenderTarget] Failed to create render-target-view (RTV)!\n");
return;
}
//binding
Context->OMSetRenderTargets(1, &rtv, rt->depthstenciltex);
Shaders:
std::string VertexShader = R"(struct VertexInputType
{
float4 position : POSITION;
float2 tex : TEXCOORD;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
cbuffer NE_Camera : register(b0)
{
matrix Model;
matrix View;
matrix Projection;
};
PixelInputType main(VertexInputType input)
{
PixelInputType output;
// Calculate the position of the vertex against the world, view, and projection matrices.
output.position = mul(Model, input.position);
output.position = mul(View, output.position);
output.position = mul(Projection, output.position);
// Store the input texture for the pixel shader to use.
output.tex = input.tex;
return output;
})";
std::string PixelShader = R"(
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
Texture2D NE_Tex_Diffuse : register(t0);
SamplerState NE_Tex_Diffuse_Sampler : register(s0);
float4 main(PixelInputType input) : SV_TARGET
{
return NE_Tex_Diffuse.Sample(NE_Tex_Diffuse_Sampler, input.tex);
}
)";
std::string ScreenVertexShader = R"(struct VertexInputType
{
float2 position : POSITION;
float2 tex : TEXCOORD;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
PixelInputType main(VertexInputType input)
{
PixelInputType output;
// CalcSulate the position of the vertex against the world, view, and projection matrices.
output.position = float4(input.position.x,input.position.y,0.0f,1.0f);
// Store the input texture for the pixel shader to use.
output.tex = input.tex;
return output;
})";
std::string ScreenPixelShader = R"(
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
Texture2D ScreenTexture : register(t0);
SamplerState ScreenTexture_Sampler : register(s0);
float4 main(PixelInputType input) : SV_TARGET
{
return float4(ScreenTexture.Sample(ScreenTexture_Sampler, input.tex).rgb, 1.0f);
}
)";
Full Source Code
Also I captured a frame with visual studio graphics debugger, and noticed that the render to texture draw call has the PS shader with "stage didn't run, no output".
Note: I know that the scene should be flipped in DirectX.
I have found the bug causing this problem, I wasn't clearing the depth stencil view at rendering, I wonder why is clearing the DSV essential for RenderTarget output.

OpenGL instancing: using mat4x2 or vec2[4]

Follow examples online, here is the code in the vertex shader:
The attributes are defined as follows:
// Option 1:
layout (location = 0) in vec3 colorvec3;
layout (location = 1) in mat4x2 xyInBaseImageMat4x2;
out vec2 xyInElementTextureImage;
out vec3 elementTintColorVec3;
In the vertex shader, they are used:
// Option 1
vec2 xyInBaseImageVec2 = xyInBaseImageMat4x2[gl_VertexID];
gl_Position = vec4(xyInBaseImageVec2, 0.0, 1.0);
elementTintColorVec3 = colorvec3;
But the result is weird, totally random. Sometimes it is black, sometimes it is random shape.
But if I change to use:
// Option 2:
layout (location = 0) in vec3 colorvec3;
layout (location = 1) in vec2 xyInBaseImageVec2_p0;
layout (location = 2) in vec2 xyInBaseImageVec2_p1;
layout (location = 3) in vec2 xyInBaseImageVec2_p2;
layout (location = 4) in vec2 xyInBaseImageVec2_p3;
out vec2 xyInElementTextureImage;
out vec3 elementTintColorVec3;
In the vertex shader:
// Option 2:
vec2 xyInBaseImageVec2;
if (gl_VertexID==0) {
xyInBaseImageVec2 = xyInBaseImageVec2_p0;
} else if (gl_VertexID==1) {
xyInBaseImageVec2 = xyInBaseImageVec2_p1;
} else if (gl_VertexID==2) {
xyInBaseImageVec2 = xyInBaseImageVec2_p2;
} else if (gl_VertexID==3) {
xyInBaseImageVec2 = xyInBaseImageVec2_p3;
}
gl_Position = vec4(xyInBaseImageVec2, 0.0, 1.0);
elementTintColorVec3 = colorvec3;
Then it works as desired.
The data buffer for the color and vertex positions are the same for the two examples. [Update: add the feeding code below]
// refer to the code in: http://sol.gfxile.net/instancing.html
// refer to the code in: http://www.gamedev.net/page/resources/_/technical/opengl/opengl-instancing-demystified-r3226
// Case: instaced vertex positions. So each instance should have 4 vec2
{
// in mat4x2; later try in vec2[4] xyInBaseImage_vec2list if mat4x2 does not work.
GLuint instanceVBO = instancevbo_4pts;
int pos = 1;
int componentsSize = 2; // vec2 has 2 components
// if it is mat, then componentsNum is the column number of the matrix; for example, for mat4x2 it is 4
int componentsNum = 4; // mat4x2
GLenum type = GL_FLOAT;
GLboolean normalized = GL_FALSE;
GLsizei stride = componentsSize * sizeof(GL_FLOAT) * componentsNum;
char* pointerFirstComponentOffset = 0;
int offsetInteger = 0;
int byteSizeOfOneVertexAttribute = componentsSize * sizeof(GL_FLOAT);
GLuint divisor = 1; // 0 not instance
glBindBuffer(GL_ARRAY_BUFFER, instanceVBO);
for (int i = 0; i < componentsNum; i++) {
glEnableVertexAttribArray(pos + i);
// the offset can also be: (void*) (offsetInteger + i * byteSizeOfOneVertexAttribute)
glVertexAttribPointer(pos + i, componentsSize, type,
normalized, stride, pointerFirstComponentOffset + i * byteSizeOfOneVertexAttribute );
glVertexAttribDivisor(pos + i, divisor);
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
What's wrong with the first option?

Per vertex lighting problems in opengl 4

I have a model made of cubes with 8 vertices only, and I am having issues with per vertex directional lighting. Instead of the entire model being lit at once, each cube seems to be lit separately like this.
This is my vertex shader:
layout(location = 0)in vec3 vp;
layout(location = 1)in vec3 color;
layout(location = 2)in vec3 normal;
out vec3 fColor;
uniform mat4 model;
uniform mat3 nm;
uniform mat3 partNM;
uniform mat4 modelPart;
uniform mat4 view;
uniform mat4 projection;
void main () {
gl_Position = modelPart * vec4(vp, 1.0f);
gl_Position = model * gl_Position;
gl_Position = view * gl_Position;
gl_Position = projection * gl_Position;
mat3 normalMatrix = partNM*nm;
vec3 normalDirection = normalize(normalMatrix*normal);
vec3 lightDirection = normalize(vec3(-1.0, 1.0, -1.0));
vec3 diffuseReflection = clamp(dot(normalDirection, lightDirection),0.0,1.0);
fColor = color+diffuseReflection;
}
and my fragment shader:
in vec3 fColor;
out vec4 frag_colour;
void main () {
frag_colour = vec4(fColor.xyz,1.0);
}
This is the function I use to set the normal matrix:
void Shader::setNormalMatrix(string name,glm::mat4 matrix) {
glm::mat3 nm = glm::transpose(glm::inverse(glm::mat3(matrix)));
unsigned int location = glGetUniformLocation(program, name.c_str());
glUniformMatrix3fv(location, 1, false, &nm[0][0]);
}
and the function which generates the vertices and normals for my cubes:
std::vector<float> Cube::createCube(float size,float x,float y,float z,float r, float g, float b) {
VertexType points[8];
points[0].x = (x*size)+0.0f;
points[0].y = (y*size)+0.0f;
points[0].z = (z*size)+size;
points[0].nx = 0.577350;
points[0].ny = 0.577350;
points[0].nz = -0.577350;
points[0].r = r;
points[0].g = g;
points[0].b = b;
points[1].x = (x*size)+size;
points[1].y = (y*size)+0.0f;
points[1].z = (z*size)+size;
points[1].nx = -0.577350;
points[1].ny = 0.577350;
points[1].nz = -0.577350;
points[1].r = r;
points[1].g = g;
points[1].b = b;
points[2].x = (x*size)+size;
points[2].y = (y*size)+size;
points[2].z = (z*size)+size;
points[2].nx = -0.577350;
points[2].ny = -0.577350;
points[2].nz = -0.577350;
points[2].r = r;
points[2].g = g;
points[2].b = b;
points[3].x = (x*size)+0.0f;
points[3].y = (y*size)+size;
points[3].z = (z*size)+size;
points[3].nx = 0.577350;
points[3].ny = -0.577350;
points[3].nz = -0.577350;
points[3].r = r;
points[3].g = g;
points[3].b = b;
points[4].x = (x*size)+0.0f;
points[4].y = (y*size)+0.0f;
points[4].z = (z*size)+0.0f;
points[4].nx = 0.577350;
points[4].ny = 0.577350;
points[4].nz = 0.577350;
points[4].r = r;
points[4].g = g;
points[4].b = b;
points[5].x = (x*size)+size;
points[5].y = (y*size)+0.0f;
points[5].z = (z*size)+0.0f;
points[5].nx = -0.577350;
points[5].ny = 0.577350;
points[5].nz = 0.577350;
points[5].r = r;
points[5].g = g;
points[5].b = b;
points[6].x = (x*size)+size;
points[6].y = (y*size)+size;
points[6].z = (z*size)+0.0f;
points[6].nx = -0.577350;
points[6].ny = -0.577350;
points[6].nz = 0.577350;
points[6].r = r;
points[6].g = g;
points[6].b = b;
points[7].x = (x*size)+0.0f;
points[7].y = (y*size)+size;
points[7].z = (z*size)+0.0f;
points[7].nx = 0.577350;
points[7].ny = -0.577350;
points[7].nz = 0.577350;
points[7].r = r;
points[7].g = g;
points[7].b = b;
std::vector<float> rPoint;
for(VertexType p:points) {
rPoint.push_back(p.x);
rPoint.push_back(p.y);
rPoint.push_back(p.z);
rPoint.push_back(p.r);
rPoint.push_back(p.g);
rPoint.push_back(p.b);
rPoint.push_back(p.nx);
rPoint.push_back(p.ny);
rPoint.push_back(p.nz);
}
return rPoint;
}
The models are divided up into parts, which is why I have two normal and model matrices; one for the model as a whole, and one for an individual piece of the model. Is there a problem with my code, or do I need to use per-fragment lighting to fix this bug?
Your problem is the topology of your mesh. At the corner of a cube 3 faces meet. Each of these faces have a different normal. This creates a discontinuity in the normal's topology. Or to put it into simpler terms. You must use 3 vertices per corner, one for each face, with the face normal pointing into the right direction.
And while you're at it, you could remove those cube faces not being visible anyway.
The reason is because you are rendering each cube as separate models. The shader will thus run once per model, in your case, once per cube. To solve this, you need render your entire model (your robot) as one model, with one set of vertices, rather than as a set of cubes.