I'm optimizing my already working VR rendering by implementing multiview rendering into my custom C++ engine, but can't get the other eye to render. Here's my vertex shader:
layout(set=0, binding=2) Buffer<float3> positions : register(b2);
VSOutput unlitVS( uint vertexId : SV_VertexID, uint viewId : SV_ViewID )
{
VSOutput vsOut;
vsOut.uv = uvs[ vertexId ];
vsOut.pos = mul( data.localToClip[ viewId ], float4( positions[ vertexId ], 1 ) );
return vsOut;
}
If I use viewId to select a transformation matrix, I'm getting an error:
ERROR: Vertex shader consumes input at location 0 but not provided
If I don't use viewId my other eye doesn't render at all. Here's how I setup my framebuffer, it has 2 layers and after rendering the left eye, I copy the second layer to the right eye.
VkImageCreateInfo imageCreateInfo = {};
imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageCreateInfo.pNext = nullptr;
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.extent.width = width;
imageCreateInfo.extent.height = height;
imageCreateInfo.extent.depth = 1;
imageCreateInfo.mipLevels = 1;
imageCreateInfo.arrayLayers = 2;
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageCreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
imageCreateInfo.samples = sampleCount == 4 ? VK_SAMPLE_COUNT_4_BIT : VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.usage = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
imageCreateInfo.flags = 0;
VkResult result = vkCreateImage( dev, &imageCreateInfo, nullptr, &outFramebufferDesc.image );
...
// Bit mask that specifies which view rendering is broadcast to.
// 0011 = Broadcast to first and second view (layer)
const uint32_t viewMask = 0b00000011;
// Bit mask that specifices correlation between views
// An implementation may use this for optimizations (concurrent render)
const uint32_t correlationMask = 0b00000011;
VkRenderPassMultiviewCreateInfo renderPassMultiviewCI{};
renderPassMultiviewCI.sType = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO;
renderPassMultiviewCI.subpassCount = 1;
renderPassMultiviewCI.pViewMasks = &viewMask;
renderPassMultiviewCI.correlationMaskCount = 1;
renderPassMultiviewCI.pCorrelationMasks = &correlationMask;
VkRenderPassCreateInfo renderPassCreateInfo = {};
renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassCreateInfo.flags = 0;
renderPassCreateInfo.pNext = &renderPassMultiviewCI;
renderPassCreateInfo.attachmentCount = 2;
renderPassCreateInfo.pAttachments = &attachmentDescs[ 0 ];
renderPassCreateInfo.subpassCount = 1;
renderPassCreateInfo.pSubpasses = &subPassCreateInfo;
renderPassCreateInfo.dependencyCount = 0;
renderPassCreateInfo.pDependencies = nullptr;
result = vkCreateRenderPass( dev, &renderPassCreateInfo, nullptr, &outFramebufferDesc.renderPass );
assert( result == VK_SUCCESS );
VkImageView attachments[ 2 ] = { outFramebufferDesc.imageView, outFramebufferDesc.depthStencilImageView };
VkFramebufferCreateInfo framebufferCreateInfo = {};
framebufferCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferCreateInfo.pNext = nullptr;
framebufferCreateInfo.renderPass = outFramebufferDesc.renderPass;
framebufferCreateInfo.attachmentCount = 2;
framebufferCreateInfo.pAttachments = &attachments[ 0 ];
framebufferCreateInfo.width = width;
framebufferCreateInfo.height = height;
framebufferCreateInfo.layers = 1;
result = vkCreateFramebuffer( dev, &framebufferCreateInfo, nullptr, &outFramebufferDesc.framebuffer );
...
// After rendering the left eye:
VkImageCopy region = {};
region.extent.width = device.width;
region.extent.height = device.height;
region.extent.depth = 1;
region.srcSubresource.baseArrayLayer = 1;
region.srcSubresource.layerCount = 1;
region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.dstSubresource.layerCount = 1;
region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
vkCmdCopyImage( gCurrentDrawCommandBuffer, device.fbDesc.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, device.fbDesc.imageCopy, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion );
Here's my VkPipelineVertexInputStateCreateInfo, I'm using programmable vertex pulling:
VkPipelineVertexInputStateCreateInfo inputState = {};
inputState.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
How can I fix the error and get right eye working? I'm using HTC Vive, OpenVR 1.7.15, AMD R9 Nano, Vulkan 1.1 on Vulkan SDK 1.1.126.0, Windows 10. I have enabled device extension VK_KHR_MULTIVIEW_EXTENSION_NAME.
Related
First of all, my understanding of a descriptor range is that I can specify multiple buffers (constant buffers in my case) that a shader may use, is that correct? If not, then this is where my misunderstanding is, and the rest of the question will make no sense.
Lets say I want to pass a couple of constant buffers in my vertex shader
// Vertex.hlsl
float value0 : register(b0)
float value1 : register(b1)
...
And for whatever reason I want to use a descriptor range to specify b0 and b1. I fill out a D3D12_DESCRIPTOR_RANGE:
D3D12_DESCRIPTOR_RANGE range;
range.RangeType = D3D12_DESCRIPTOR_RANGE_TYPE_CBV;
range.NumDescriptors = 2;
range.BaseShaderRegister = 0;
range.RegisterSpace = 0;
range.OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND;
I then go on to shove this into a root parameter
D3D12_ROOT_PARAMETER param;
param.ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
param.DescriptorTable.NumDescriptorRanges = 1;
param.DescriptorTable.pDescriptorRanges = ⦥
param.ShaderVisibility = D3D12_SHADER_VISIBILITY_VERTEX;
Root parameter goes into my signature description
D3D12_ROOT_SIGNATURE_DESC1 signatureDesc;
signatureDesc.NumParameters = 1;
signatureDesc.pParameters = ¶m;
signatureDesc.NumStaticSamplers = 0;
signatureDesc.pStaticSamplers = nullptr;
D3D12_ROOT_SIGNATURE_FLAGS = D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT;
After this is create my root signature and so on. I also created a heap for 2 descriptors
D3D12_DESCRIPTOR_HEAP_DESC heapDescCbv;
heapDescCbv.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
heapDescCbv.NumDescriptors = 2;
heapDescCbv.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
heapDescCbv.NodeMask = 0;
ThrowIfFailed(m_device->CreateDescriptorHeap(&heapDescCbv, IID_PPV_ARGS(&m_cbvHeap)));
I then mapped the respective ID3D12Resource's to get two pointers so I can memcpy my values to them.
void D3D12App::AllocateConstantBuffer(SIZE_T index, size_t dataSize, ID3D12Resource** buffer, void** mappedPtr)
{
D3D12_HEAP_PROPERTIES heapProp;
heapProp.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
heapProp.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
heapProp.CreationNodeMask = 1;
heapProp.VisibleNodeMask = 1;
heapProp.Type = D3D12_HEAP_TYPE_UPLOAD;
D3D12_RESOURCE_DESC resDesc;
resDesc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
resDesc.Alignment = 0;
resDesc.Width = (dataSize + 255) & ~255;
resDesc.Height = 1;
resDesc.DepthOrArraySize = 1;
resDesc.MipLevels = 1;
resDesc.Format = DXGI_FORMAT_UNKNOWN;
resDesc.SampleDesc.Count = 1;
resDesc.SampleDesc.Quality = 0;
resDesc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
resDesc.Flags = D3D12_RESOURCE_FLAG_NONE;
ThrowIfFailed(m_device->CreateCommittedResource(&heapProp, D3D12_HEAP_FLAG_NONE,
&resDesc, D3D12_RESOURCE_STATE_GENERIC_READ, nullptr, IID_PPV_ARGS(buffer)));
D3D12_CONSTANT_BUFFER_VIEW_DESC cbvDesc;
cbvDesc.BufferLocation = (*buffer)->GetGPUVirtualAddress();
cbvDesc.SizeInBytes = (dataSize + 255) & ~255;
auto cbvHandle = m_cbvHeap->GetCPUDescriptorHandleForHeapStart();
cbvHandle.ptr += index * m_device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
m_device->CreateConstantBufferView(&cbvDesc, cbvHandle);
D3D12_RANGE readRange;
readRange.Begin = 0;
readRange.End = 0;
ThrowIfFailed((*buffer)->Map(0, &readRange, mappedPtr));
}
AllocateConstantBuffer(0, sizeof(m_value0), &m_value0Resource, reinterpret_cast<void**>&m_constPtrvalue0));
AllocateConstantBuffer(1, sizeof(m_value1), &m_value1Resource, reinterpret_cast<void**>&m_constPtrvalue1));
The problem is when I want to feed them to the pipeline. When rendering, I used
auto cbvHandle = m_cbvHeap->GetGPUDescriptorHandleForHeapStart();
m_commandList->SetGraphicsRootDescriptorTable(0, cbvHandle);
The result I get is only register(b0) got the the correct value, and register(b1) remains uninitialized. What did I do wrong?
OK I got it to work. Turned out I need to change the shader a bit:
cbuffer a : register(b0) { float value0; }
cbuffer b : register(b1) { float value1; };
This gave me another question though, according to this link:
https://learn.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-constants
the buffer names a and b should be optional, but when I tried that the shaders cannot compile. I guess that is a different question.
I've been trying to sample a YCbCr image in Vulkan but I keep getting incorrect results, and I was hoping someone might be able to spot my mistake.
I have a NV12 YCbCr image which I want to render onto two triangles forming a quad. If i understand correctly, the VkFormat that corresponds to NV12 is VK_FORMAT_G8_B8R8_2PLANE_420_UNORM. Below is the code that I would expect to work, but I'll try to explain what I'm trying to do as well:
Create a VkSampler with a VkSamplerYcbcrConversion (with the correct format) in pNext
Read NV12 data into staging buffer
Create VkImage with the correct format and specify that the planes are disjoint
Get memory requirements (and offset for plane 1) for each plane (0 and 1)
Allocate device local memory for the image data
Bind each plane to the correct location in memory
Copy staging buffer to image memory
Create VkImageView with the same format as the VkImage and the same VkSamplerYcbcrConversionInfo as the VkSampler in pNext.
Code:
VkSamplerYcbcrConversion ycbcr_sampler_conversion;
VkSamplerYcbcrConversionInfo ycbcr_info;
VkSampler ycbcr_sampler;
VkImage image;
VkDeviceMemory image_memory;
VkDeviceSize memory_offset_plane0, memory_offset_plane1;
VkImageView image_view;
enum YCbCrStorageFormat
{
NV12
};
unsigned char* ReadYCbCrFile(const std::string& filename, YCbCrStorageFormat storage_format, VkFormat vulkan_format, uint32_t* buffer_size, uint32_t* buffer_offset_plane1, uint32_t* buffer_offset_plane2)
{
std::ifstream file;
file.open(filename.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
if (!file.is_open()) { ELOG("Failed to open YCbCr image"); }
*buffer_size = file.tellg();
file.seekg(0);
unsigned char* data;
switch (storage_format)
{
case NV12:
{
if (vulkan_format != VK_FORMAT_G8_B8R8_2PLANE_420_UNORM)
{
ILOG("A 1:1 relationship doesn't exist between NV12 and 420, exiting");
exit(1);
}
*buffer_offset_plane1 = (*buffer_size / 3) * 2;
*buffer_offset_plane2 = 0; //Not used
data = new unsigned char[*buffer_size];
file.read((char*)(data), *buffer_size);
break;
}
default:
ELOG("A YCbCr storage format is required");
break;
}
file.close();
return data;
}
VkFormatProperties format_properties;
vkGetPhysicalDeviceFormatProperties(physical_device, VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, &format_properties);
bool cosited = false, midpoint = false;
if (format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT)
{
cosited = true;
}
else if (format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT)
{
midpoint = true;
}
if (!cosited && !midpoint)
{
ELOG("Nither VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT nor VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT is supported for VK_FORMAT_G8_B8R8_2PLANE_420_UNORM");
}
VkSamplerYcbcrConversionCreateInfo conversion_info = {};
conversion_info.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO;
conversion_info.pNext = NULL;
conversion_info.format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
conversion_info.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709;
conversion_info.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
conversion_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
conversion_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
conversion_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
conversion_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
if (cosited)
{
conversion_info.xChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
conversion_info.yChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
}
else
{
conversion_info.xChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
conversion_info.yChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
}
conversion_info.chromaFilter = VK_FILTER_LINEAR;
conversion_info.forceExplicitReconstruction = VK_FALSE;
VkResult res = vkCreateSamplerYcbcrConversion(logical_device, &conversion_info, NULL, &ycbcr_sampler_conversion);
CHECK_VK_RESULT(res, "Failed to create YCbCr conversion sampler");
ILOG("Successfully created YCbCr conversion");
ycbcr_info.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO;
ycbcr_info.pNext = NULL;
ycbcr_info.conversion = ycbcr_sampler_conversion;
VkSamplerCreateInfo sampler_info = {};
sampler_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
sampler_info.pNext = &ycbcr_info;
sampler_info.flags = 0;
sampler_info.magFilter = VK_FILTER_LINEAR;
sampler_info.minFilter = VK_FILTER_LINEAR;
sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_info.mipLodBias = 0.0f;
sampler_info.anisotropyEnable = VK_FALSE;
//sampler_info.maxAnisotropy IGNORED
sampler_info.compareEnable = VK_FALSE;
//sampler_info.compareOp = IGNORED
sampler_info.minLod = 0.0f;
sampler_info.maxLod = 1.0f;
sampler_info.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
sampler_info.unnormalizedCoordinates = VK_FALSE;
res = vkCreateSampler(logical_device, &sampler_info, NULL, &ycbcr_sampler);
CHECK_VK_RESULT(res, "Failed to create YUV sampler");
ILOG("Successfully created sampler with YCbCr in pNext");
std::string filename = "tree_nv12_1920x1080.yuv";
uint32_t width = 1920, height = 1080;
VkFormat format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
uint32_t buffer_size, buffer_offset_plane1, buffer_offset_plane2;
unsigned char* ycbcr_data = ReadYCbCrFile(filename, NV12, VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, &buffer_size, &buffer_offset_plane1, &buffer_offset_plane2);
//Load image into staging buffer
VkDeviceMemory stage_buffer_memory;
VkBuffer stage_buffer = create_vk_buffer(buffer_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, stage_buffer_memory);
void* stage_memory_ptr;
vkMapMemory(logical_device, stage_buffer_memory, 0, buffer_size, 0, &stage_memory_ptr);
memcpy(stage_memory_ptr, ycbcr_data, buffer_size);
vkUnmapMemory(logical_device, stage_buffer_memory);
delete[] ycbcr_data;
//Create image
VkImageCreateInfo img_info = {};
img_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
img_info.flags = VK_IMAGE_CREATE_DISJOINT_BIT;
img_info.imageType = VK_IMAGE_TYPE_2D;
img_info.extent.width = width;
img_info.extent.height = height;
img_info.extent.depth = 1;
img_info.mipLevels = 1;
img_info.arrayLayers = 1;
img_info.format = format;
img_info.tiling = VK_IMAGE_TILING_LINEAR;//VK_IMAGE_TILING_OPTIMAL;
img_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
img_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
img_info.samples = VK_SAMPLE_COUNT_1_BIT;
img_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkResult result = vkCreateImage(logical_device, &img_info, NULL, &image);
CHECK_VK_RESULT(result, "vkCreateImage failed to create image handle");
ILOG("Image created!");
//Get memory requirements for each plane and combine
//Plane 0
VkImagePlaneMemoryRequirementsInfo image_plane_info = {};
image_plane_info.sType = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO;
image_plane_info.pNext = NULL;
image_plane_info.planeAspect = VK_IMAGE_ASPECT_PLANE_0_BIT;
VkImageMemoryRequirementsInfo2 image_info2 = {};
image_info2.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2;
image_info2.pNext = &image_plane_info;
image_info2.image = image;
VkImagePlaneMemoryRequirementsInfo memory_plane_requirements = {};
memory_plane_requirements.sType = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO;
memory_plane_requirements.pNext = NULL;
memory_plane_requirements.planeAspect = VK_IMAGE_ASPECT_PLANE_0_BIT;
VkMemoryRequirements2 memory_requirements2 = {};
memory_requirements2.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2;
memory_requirements2.pNext = &memory_plane_requirements;
vkGetImageMemoryRequirements2(logical_device, &image_info2, &memory_requirements2);
VkDeviceSize image_size = memory_requirements2.memoryRequirements.size;
uint32_t image_bits = memory_requirements2.memoryRequirements.memoryTypeBits;
//Set offsets
memory_offset_plane0 = 0;
memory_offset_plane1 = image_size;
//Plane 1
image_plane_info.planeAspect = VK_IMAGE_ASPECT_PLANE_1_BIT;
memory_plane_requirements.planeAspect = VK_IMAGE_ASPECT_PLANE_1_BIT;
vkGetImageMemoryRequirements2(logical_device, &image_info2, &memory_requirements2);
image_size += memory_requirements2.memoryRequirements.size;
image_bits = image_bits | memory_requirements2.memoryRequirements.memoryTypeBits;
//Allocate image memory
VkMemoryAllocateInfo allocate_info = {};
allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocate_info.allocationSize = image_size;
allocate_info.memoryTypeIndex = get_device_memory_type(image_bits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
result = vkAllocateMemory(logical_device, &allocate_info, NULL, &image_memory);
CHECK_VK_RESULT(result, "vkAllocateMemory failed to allocate image memory");
//Bind each image plane to memory
std::vector<VkBindImageMemoryInfo> bind_image_memory_infos(2);
//Plane 0
VkBindImagePlaneMemoryInfo bind_image_plane0_info = {};
bind_image_plane0_info.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO;
bind_image_plane0_info.pNext = NULL;
bind_image_plane0_info.planeAspect = VK_IMAGE_ASPECT_PLANE_0_BIT;
VkBindImageMemoryInfo& bind_image_memory_plane0_info = bind_image_memory_infos[0];
bind_image_memory_plane0_info.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bind_image_memory_plane0_info.pNext = &bind_image_plane0_info;
bind_image_memory_plane0_info.image = image;
bind_image_memory_plane0_info.memory = image_memory;
bind_image_memory_plane0_info.memoryOffset = memory_offset_plane0;
//Plane 1
VkBindImagePlaneMemoryInfo bind_image_plane1_info = {};
bind_image_plane1_info.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO;
bind_image_plane1_info.pNext = NULL;
bind_image_plane1_info.planeAspect = VK_IMAGE_ASPECT_PLANE_1_BIT;
VkBindImageMemoryInfo& bind_image_memory_plane1_info = bind_image_memory_infos[1];
bind_image_memory_plane1_info.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bind_image_memory_plane1_info.pNext = &bind_image_plane1_info;
bind_image_memory_plane1_info.image = image;
bind_image_memory_plane1_info.memory = image_memory;
bind_image_memory_plane1_info.memoryOffset = memory_offset_plane1;
vkBindImageMemory2(logical_device, bind_image_memory_infos.size(), bind_image_memory_infos.data());
context.transition_vk_image_layout(image, format, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
//Copy staging buffer to device local buffer
VkCommandBuffer tmp_cmd_buffer = begin_tmp_vk_cmd_buffer();
std::vector<VkBufferImageCopy> plane_regions(2);
plane_regions[0].bufferOffset = 0;
plane_regions[0].bufferRowLength = 0;
plane_regions[0].bufferImageHeight = 0;
plane_regions[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT;
plane_regions[0].imageSubresource.mipLevel = 0;
plane_regions[0].imageSubresource.baseArrayLayer = 0;
plane_regions[0].imageSubresource.layerCount = 1;
plane_regions[0].imageOffset = { 0, 0, 0 };
plane_regions[0].imageExtent = { width, height, 1 };
plane_regions[1].bufferOffset = buffer_offset_plane1;
plane_regions[1].bufferRowLength = 0;
plane_regions[1].bufferImageHeight = 0;
plane_regions[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT;
plane_regions[1].imageSubresource.mipLevel = 0;
plane_regions[1].imageSubresource.baseArrayLayer = 0;
plane_regions[1].imageSubresource.layerCount = 1;
plane_regions[1].imageOffset = { 0, 0, 0 };
plane_regions[1].imageExtent = { width / 2, height / 2, 1 };
vkCmdCopyBufferToImage(tmp_cmd_buffer, stage_buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, plane_regions.size(), plane_regions.data());
end_tmp_vk_cmd_buffer(tmp_cmd_buffer); //Submit and waits
vkFreeMemory(logical_device, stage_buffer_memory, NULL);
vkDestroyBuffer(logical_device, stage_buffer, NULL);
transition_vk_image_layout(image, format, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
VkImageViewCreateInfo image_view_info = {};
image_view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_info.pNext = &ycbcr_info;
image_view_info.flags = 0;
image_view_info.image = image;
image_view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_info.format = format;
image_view_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
image_view_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
image_view_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
image_view_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
image_view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
image_view_info.subresourceRange.baseMipLevel = 0;
image_view_info.subresourceRange.levelCount = 1;
image_view_info.subresourceRange.baseArrayLayer = 0;
image_view_info.subresourceRange.layerCount = 1;
VkResult res = vkCreateImageView(logical_device, &image_view_info, NULL, &.image_view);
CHECK_VK_RESULT(res, "Failed to create image view");
ILOG("Successfully created image, allocated image memory and created image view");
I receive one validation error: vkCmdCopyBufferToImage() parameter, VkImageAspect pRegions->imageSubresource.aspectMask, is an unrecognized enumerator, but from inspecting the validation code, it seems that it's just a bit outdated and this shouldn't be an issue.
The rest of the code just sets up regular descriptor layouts/pools and allocated and updates accordingly (I've verified with a regular RGB texture).
The fragment shader is as follows:
vec2 uv = vec2(gl_FragCoord.x / 1024.0, 1.0 - (gl_FragCoord.y / 1024.0));
out_color = vec4(texture(ycbcr_image, uv).rgb, 1.0f);
When I run my program I only get a red components (the image is essentially a greyscale image). from a little testing, it seems that the VkSamplerYcbcrconversion setup as removing it from both the VkSamplerCreateInfo.pNext and VkImageViewCreateInfo.pNext doesn't change anything.
I've also looked here, Khronos YCbCr tests, but I can't find any real mistake.
Solution: according to the spec, sec. 12.1, Conversion must be fixed at pipeline creation time, through use of a combined image sampler with an immutable sampler in VkDescriptorSetLayoutBinding.
By adding the ycbcr_sampler to pImmutableSamplers when setting up the descriptor set layout binding it now works:
VkDescriptorSetLayoutBinding image_binding = {};
image_binding.binding = 0;
image_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
image_binding.descriptorCount = 1;
image_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
image_binding.pImmutableSamplers = &ycbcr_sampler;
I am trying to render a triangle and I'm following a tutorial from https://vulkan-tutorial.org
I am using GLFW, GLSL and Visual Studio 2017.
When render pass is created, it's value is always 0xc, and when swapchain is created, it's value is always 0x2. Creation functions always return VK_SUCCESS, and there is no validation layer output.
When I try to create a frame buffer, using vkCreateFramebuffer() I get prompted that my render pass object handle is invalid, and that my ImageView object handles are invalid. Their values are always 0x6, 0x7, and 0x8.
Also, when I try to invoke vkAcquireNextImageKHR(), I get prompted that my swapchain object handle is invalid.
My swapchain creation code is below. Please, ignore the comments, they are for learning purposes only.
void Swapchain::initSwapchain() {
VkPresentModeKHR presentMode = getAvaiablePresentMode();
QueueFamilyIndices indices = *mainWindow->getRenderer()->getQueueIndices();
swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
swapchainCreateInfo.surface = this->mainWindow->getSurface();
swapchainCreateInfo.minImageCount = swapchainImageCount; //Bufferovanje slika display buffera, koliko slika odjednom moze biti u redu
swapchainCreateInfo.imageFormat = this->mainWindow->getSurfaceFormat().format;
swapchainCreateInfo.imageColorSpace = this->mainWindow->getSurfaceFormat().colorSpace;
swapchainCreateInfo.imageExtent = this->swapExtent;
swapchainCreateInfo.imageArrayLayers = 1; //Koliko slojeva ima slika (1 je obicno renderovanje, 2 je stetoskopsko)
swapchainCreateInfo.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; //Za koju vrstu operacija koristimo slike? Renderujemo ih, sto znaci da su oni COLOR ATTACHMENTS
if (indices.getGraphicsFamilyIndex() != indices.getPresentationFamilyIndex()) {
uint32_t queueIndices[] = { indices.getGraphicsFamilyIndex(), indices.getPresentationFamilyIndex() };
swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT; //Slika moze da se koristi paralelno, bez transfera vlasnistva nad slikom.
swapchainCreateInfo.queueFamilyIndexCount = 2;
swapchainCreateInfo.pQueueFamilyIndices = queueIndices;
}
else {
swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; //Slika je u vlasnistvu jednog reda u jedno vreme, i vlasnistvo mora biti prebaceno na drugi da bi taj drugi mogao da ga koristi.
swapchainCreateInfo.queueFamilyIndexCount = 0; //Za exclusive je uvek 0
swapchainCreateInfo.pQueueFamilyIndices = nullptr; //Ignorisemo za Exclusive
}
swapchainCreateInfo.preTransform = mainWindow->getSurfaceCapatibilities().currentTransform; //mainWindow->getCapabilities().currentTransform ako necemo transformaciju. VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
swapchainCreateInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; //Alfa kanal SURFACE-a, da li je ona transparentna
swapchainCreateInfo.presentMode = presentMode; //Vertical Sync
swapchainCreateInfo.clipped = VK_TRUE; //Ukljucujemo clipping, jako bitno za telefone
swapchainCreateInfo.oldSwapchain = VK_NULL_HANDLE; //Ako rekonstruisemo swapchain, pokazivac na stari
util->ErrorCheck(vkCreateSwapchainKHR(renderer->getDevice(), &swapchainCreateInfo, nullptr, &swapchain));
util->ErrorCheck(vkGetSwapchainImagesKHR(renderer->getDevice(), swapchain, &swapchainImageCount, nullptr));
}
void Swapchain::initSwapchainImgs()
{
images.resize(swapchainImageCount);
imageViews.resize(swapchainImageCount);
util->ErrorCheck(vkGetSwapchainImagesKHR(renderer->getDevice(), swapchain, &swapchainImageCount, images.data()));
for (uint32_t i = 0; i < swapchainImageCount; i++) {
VkImageViewCreateInfo imgCreateInfo = {};
imgCreateInfo.components.r = VK_COMPONENT_SWIZZLE_R;
imgCreateInfo.components.g = VK_COMPONENT_SWIZZLE_G;
imgCreateInfo.components.b = VK_COMPONENT_SWIZZLE_B;
imgCreateInfo.components.a = VK_COMPONENT_SWIZZLE_A;
imgCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imgCreateInfo.subresourceRange.baseMipLevel = 0;
imgCreateInfo.subresourceRange.levelCount = 1;
imgCreateInfo.subresourceRange.baseArrayLayer = 0;
imgCreateInfo.subresourceRange.layerCount = 1;
imgCreateInfo.format = this->imagesFormat;
imgCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imgCreateInfo.image = images[i];
imgCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
util->ErrorCheck(vkCreateImageView(renderer->getDevice(), &imgCreateInfo, nullptr, &imageViews[i]));
}
}
Render pass creation code:
void RenderPass::createColor() {
VkAttachmentDescription attachment = {};
VkAttachmentReference reference = {};
VkSubpassDescription subpass = {};
VkRenderPassCreateInfo info = {};
attachment.format = surfaceFormat.format; //Mora da se poklapa sa formatom slika iz swapchaina
attachment.samples = VK_SAMPLE_COUNT_1_BIT; //Odnosi se na multisampling
attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; //Operacija koju render pass attachment treba da obavi pri ucitavanju
attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE; //Operacija koju treba odraditi posle rendera
attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //Nije nam bitno kog je formata bila prosla slika, to ovo znaci.
attachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; //Slike koje treba da budu predstavljene u swapchainu
reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;//Layout slike u ovom subpassu
reference.attachment = 0; //Index attachmenta koji referenciramo ovim subpassom
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &reference; //REFERENCIRAN JE IZ FRAGMENT SHADERA
info.attachmentCount = 1;
info.dependencyCount = 0;
info.pAttachments = &attachment;
info.pDependencies = nullptr;
info.subpassCount = 1;
info.pSubpasses = &subpass;
info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
util->ErrorCheck(vkCreateRenderPass(this->renderer->getDevice(), &info, nullptr, &this->renderPass));
}
And finally, Framebuffer creation code:
void FrameBuffer::initFrameBuffer(
uint32_t swapchainImageCount,
std::vector<VkImageView> imageViews,
VkRenderPass renderPass,
VkExtent2D surfaceSize,
std::vector<VkImageView> attachments
)
{
frameBuffers.resize(swapchainImageCount);
for (uint32_t i = 0; i < swapchainImageCount; ++i) {
VkFramebufferCreateInfo frameBufferCreateInfo{};
std::vector<VkImageView> allAttachments = { imageViews[i] };
allAttachments.insert(allAttachments.begin(), attachments.begin(), attachments.end());
frameBufferCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
frameBufferCreateInfo.renderPass = renderPass;
frameBufferCreateInfo.width = surfaceSize.width;
frameBufferCreateInfo.height = surfaceSize.height;
frameBufferCreateInfo.layers = 1;
frameBufferCreateInfo.pAttachments = allAttachments.data();
frameBufferCreateInfo.attachmentCount = allAttachments.size();
util->ErrorCheck(vkCreateFramebuffer(renderer->getDevice(), &frameBufferCreateInfo, nullptr, &frameBuffers[i]));
}
}
After running framebuffer creation code, I get these outputs:
Render Pass error on framebuffer creation:
ImageView error on framebuffer creation:
This error I get when I invoke vkAcquireNextImageKHR():
This can't be a problem with a device or instance because I have enabled VK_surface_khr and VK_KHR_swapchain, and I have checked if my device and window support these extensions. Also, I have rebooted my PC and this errors still occur. I am using Vulkan SDK 1.1.85.0.
Also, there are no missing values in creation info structures except for pNext, which I think shouldn't even be used for now.
I'm trying to actually RENDER the cloth I created to the screen in DirectX11.
I used the PhysX API to create a cloth object and tried to create the vertex and index buffer accordingly. As far as I know the cloth object should be okay.
Here's my code. Please note that this is in a custom engine (from school) so some things might look weird (like the gameContext object for example) but you should be able to comprehend the code.
I used the Introduction to 3D Game Programming with DirectX10 book from Frank D Luna as a reference for the buffers.
// create regular mesh
PxU32 resolution = 20;
PxU32 numParticles = resolution*resolution;
PxU32 numTriangles = 2*(resolution-1)*(resolution-1);
// create cloth particles
PxClothParticle* particles = new PxClothParticle[numParticles];
PxVec3 center(0.5f, 0.3f, 0.0f);
PxVec3 delta = 1.0f/(resolution-1) * PxVec3(15.0f, 15.0f, 15.0f);
PxClothParticle* pIt = particles;
for(PxU32 i=0; i<resolution; ++i)
{
for(PxU32 j=0; j<resolution; ++j, ++pIt)
{
pIt->invWeight = j+1<resolution ? 1.0f : 0.0f;
pIt->pos = delta.multiply(PxVec3(PxReal(i),
PxReal(j), -PxReal(j))) - center;
}
}
// create triangles
PxU32* triangles = new PxU32[3*numTriangles];
PxU32* iIt = triangles;
for(PxU32 i=0; i<resolution-1; ++i)
{
for(PxU32 j=0; j<resolution-1; ++j)
{
PxU32 odd = j&1u, even = 1-odd;
*iIt++ = i*resolution + (j+odd);
*iIt++ = (i+odd)*resolution + (j+1);
*iIt++ = (i+1)*resolution + (j+even);
*iIt++ = (i+1)*resolution + (j+even);
*iIt++ = (i+even)*resolution + j;
*iIt++ = i*resolution + (j+odd);
}
}
// create fabric from mesh
PxClothMeshDesc meshDesc;
meshDesc.points.count = numParticles;
meshDesc.points.stride = sizeof(PxClothParticle);
meshDesc.points.data = particles;
meshDesc.invMasses.count = numParticles;
meshDesc.invMasses.stride = sizeof(PxClothParticle);
meshDesc.invMasses.data = &particles->invWeight;
meshDesc.triangles.count = numTriangles;
meshDesc.triangles.stride = 3*sizeof(PxU32);
meshDesc.triangles.data = triangles;
// cook fabric
PxClothFabric* fabric = PxClothFabricCreate(*PhysxManager::GetInstance()->GetPhysics(), meshDesc, PxVec3(0, 1, 0));
//delete[] triangles;
// create cloth
PxTransform gPose = PxTransform(PxVec3(0,1,0));
gCloth = PhysxManager::GetInstance()->GetPhysics()->createCloth(gPose, *fabric, particles, PxClothFlags(0));
fabric->release();
//delete[] particles;
// 240 iterations per/second (4 per-60hz frame)
gCloth->setSolverFrequency(240.0f);
GetPhysxProxy()->GetPhysxScene()->addActor(*gCloth);
// CREATE VERTEX BUFFER
D3D11_BUFFER_DESC bufferDescriptor = {};
bufferDescriptor.Usage = D3D11_USAGE_DEFAULT;
bufferDescriptor.ByteWidth = sizeof( PxClothParticle* ) * gCloth->getNbParticles();
bufferDescriptor.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bufferDescriptor.CPUAccessFlags = 0;
bufferDescriptor.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA initData = {};
initData.pSysMem = particles;
gameContext.pDevice->CreateBuffer(&bufferDescriptor, &initData, &m_pVertexBuffer);
// BUILD INDEX BUFFER
D3D11_BUFFER_DESC bd = {};
bd.Usage = D3D11_USAGE_IMMUTABLE;
bd.ByteWidth = sizeof(PxU32) * sizeof(triangles);
bd.BindFlags = D3D11_BIND_INDEX_BUFFER;
bd.CPUAccessFlags = 0;
bd.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA initData2 = {};
initData2.pSysMem = triangles;
gameContext.pDevice->CreateBuffer(&bd, &initData2, &m_pIndexBuffer);
When this is done I run this code in the "draw" part of the engine:
// Set vertex buffer(s)
UINT offset = 0;
UINT vertexBufferStride = sizeof(PxClothParticle*);
gameContext.pDeviceContext->IASetVertexBuffers( 0, 1, &m_pVertexBuffer, &vertexBufferStride, &offset );
// Set index buffer
gameContext.pDeviceContext->IASetIndexBuffer(m_pIndexBuffer,DXGI_FORMAT_R32_UINT,0);
// Set primitive topology
gameContext.pDeviceContext->IASetPrimitiveTopology( D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST );
auto mat = new DiffuseMaterial();
mat->Initialize(gameContext);
mat->SetDiffuseTexture(L"./Resources/Textures/Chair_Dark.dds");
gameContext.pMaterialManager->AddMaterial(mat, 3);
ID3DX11EffectTechnique* pTechnique = mat->GetDefaultTechnique();
D3DX11_TECHNIQUE_DESC techDesc;
pTechnique->GetDesc( &techDesc );
for( UINT p = 0; p < techDesc.Passes; ++p )
{
pTechnique->GetPassByIndex(p)->Apply(0, gameContext.pDeviceContext);
gameContext.pDeviceContext->DrawIndexed(gCloth->getNbParticles(), 0, 0 );
}
I think there's something obviously wrong that I'm just totally missing. (DirectX isn't my strongest part in programming). Every comment or answer is much appreciated.
I currently have a OpenGL sprite drawing class that buffers up a bunch of sprite data then dumps it with glDrawElements. The problem is, creating the sprites that go into the buffer is cumbersome as I have loads of parameters to pass into the buffer with even more redundancy for the shaders. I was wondering if I could reduce CPU load by only loading the buffer with the essentials, location, orientation, texture coordinates etc... and then let a geometry shader turn that nonsense into quads for the fragment shader.
If theres a different answer, I've added the offending method so you can see what I mean:
void Machine::draw(key num, BoundingBox loc, float angle){
SpriteSyncData* props;
VertexAttribArray* vdata;
GLushort* idata;
SpriteProperties* sprite_props;
int sliceW;
int sliceH;
sprite_props = &spriteList[num];
props = &spriteToSync[sprite_props->atlas];
props->size++;
if(props->size > props->capacity){
props->capacity += COARSE_MEM_SCALE;
props->data = (VertexAttribArray*) realloc((void*) props->data, (sizeof(VertexAttribArray)*4) * props->capacity);
props->i_data = (GLushort*) realloc((void*) props->i_data, (sizeof(GLushort)*4) * props->capacity);
}
vdata = props->data + (props->size - 1) * 4;
idata = props->i_data + (props->size - 1) * 4;
sliceW = sprite_props->location.x1 - sprite_props->location.x0;
sliceH = sprite_props->location.y1 - sprite_props->location.y0;
if(sprite_props->flags & DRAW_TILED){
vdata[0].p = QVector3D(loc.x1, loc.y0, UNIFORM_DEPTH);
vdata[1].p = QVector3D(loc.x0, loc.y0, UNIFORM_DEPTH);
vdata[2].p = QVector3D(loc.x0, loc.y1, UNIFORM_DEPTH);
vdata[3].p = QVector3D(loc.x1, loc.y1, UNIFORM_DEPTH);
vdata[0].s = QVector2D(((float) (loc.x1 - loc.x0)) / sliceW,
((float) (loc.y1 - loc.y0)) / sliceH);
vdata[0].r = QVector2D(0, 0);
vdata[1].r = vdata[0].r;
vdata[2].r = vdata[0].r;
vdata[3].r = vdata[0].r;
}
else{
vdata[0].p = QVector3D(loc.x0 + sliceW, loc.y0, UNIFORM_DEPTH);
vdata[1].p = QVector3D(loc.x0, loc.y0, UNIFORM_DEPTH);
vdata[2].p = QVector3D(loc.x0, loc.y0 + sliceH, UNIFORM_DEPTH);
vdata[3].p = QVector3D(loc.x0 + sliceW, loc.y0 + sliceH, UNIFORM_DEPTH);
vdata[0].s = QVector2D(1, 1);
vdata[0].r = QVector2D(sliceW, sliceH);
vdata[1].r = vdata[0].r;
vdata[2].r = vdata[0].r;
vdata[3].r = vdata[0].r;
}
vdata[0].t = QVector2D(sprite_props->texCoords[2], sprite_props->texCoords[1]);
vdata[1].t = QVector2D(sprite_props->texCoords[0], sprite_props->texCoords[1]);
vdata[2].t = QVector2D(sprite_props->texCoords[0], sprite_props->texCoords[3]);
vdata[3].t = QVector2D(sprite_props->texCoords[2], sprite_props->texCoords[3]);
vdata[1].s = vdata[0].s;
vdata[2].s = vdata[0].s;
vdata[3].s = vdata[0].s;
vdata[0].s_lo = QVector2D(sprite_props->texCoords[0], sprite_props->texCoords[1]);
vdata[0].s_hi = QVector2D(sprite_props->texCoords[2] - sprite_props->texCoords[0],
sprite_props->texCoords[3] - sprite_props->texCoords[1]);
vdata[1].s_lo = vdata[0].s_lo;
vdata[1].s_hi = vdata[0].s_hi;
vdata[2].s_lo = vdata[0].s_lo;
vdata[2].s_hi = vdata[0].s_hi;
vdata[3].s_lo = vdata[0].s_lo;
vdata[3].s_hi = vdata[0].s_hi;
vdata[0].o = (vdata[1].p + vdata[3].p) * 0.5;
vdata[1].o = vdata[0].o;
vdata[2].o = vdata[0].o;
vdata[3].o = vdata[0].o;
vdata[0].a = angle;
vdata[1].a = angle;
vdata[2].a = angle;
vdata[3].a = angle;
idata[0] = (props->size - 1) * 4;
idata[1] = idata[0] + 1;
idata[2] = idata[0] + 2;
idata[3] = idata[0] + 3;
}