AUGraph Record and Play - audiounit

When I use AUGraph to implemention record and play, I want to get audio data through RenderCallback.There is a problem in remote io unit callback with error kAudioConverterErr_InvalidInputSize(1768846202),I don't know how to solve it.
Is there any other way to get audio data?
NSAssert(NewAUGraph(&_auGraph) == noErr, #"NewAUGraph failed");
const AudioComponentDescription ioComponentDesc = {
.componentType = kAudioUnitType_Output,
.componentSubType = kAudioUnitSubType_RemoteIO,
.componentManufacturer = kAudioUnitManufacturer_Apple,
.componentFlags = 0,
.componentFlagsMask = 0
};
AUGraphAddNode(_auGraph, &ioComponentDesc, &_ioNode);
const AudioComponentDescription effectComponent = {
.componentType = kAudioUnitType_Effect,
.componentSubType = kAudioUnitSubType_Reverb2,
.componentManufacturer = kAudioUnitManufacturer_Apple,
.componentFlags = 0,
.componentFlagsMask = 0
};
AUGraphAddNode(_auGraph, &effectComponent, &_reverbNode);
const AudioComponentDescription inConvertComponentDesc = {
.componentType = kAudioUnitType_FormatConverter,
.componentSubType = kAudioUnitSubType_AUConverter,
.componentManufacturer = kAudioUnitManufacturer_Apple,
.componentFlags = 0,
.componentFlagsMask = 0
};
AUGraphAddNode(_auGraph, &inConvertComponentDesc, &_inConvertNode);
const AudioComponentDescription outConvertComponentDesc = {
.componentType = kAudioUnitType_FormatConverter,
.componentSubType = kAudioUnitSubType_AUConverter,
.componentManufacturer = kAudioUnitManufacturer_Apple,
.componentFlags = 0,
.componentFlagsMask = 0
};
AUGraphAddNode(_auGraph, &outConvertComponentDesc, &_outConvertNode);
NSAssert(AUGraphOpen(_auGraph) == noErr, #"AUGraphOpen failed");
AUGraphNodeInfo(_auGraph, _ioNode, NULL, &_ioUnit);
AUGraphNodeInfo(_auGraph, _reverbNode, NULL, &_reverbUnit);
AUGraphNodeInfo(_auGraph, _inConvertNode, NULL, &_inConvertUnit);
AUGraphNodeInfo(_auGraph, _outConvertNode, NULL, &_outConvertUnit);
UInt32 enable = 1;
AudioUnitElement inputBus = 1;
AudioUnitElement outputBus = 0;
AudioUnitSetProperty(_ioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, inputBus, &enable, sizeof(enable));
AudioUnitSetProperty(_ioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, outputBus, &enable, sizeof(enable));
AudioStreamBasicDescription absd;
memset(&absd, 0, sizeof(absd));
absd.mSampleRate = 44100;
absd.mFormatID = kAudioFormatLinearPCM;
absd.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
absd.mBitsPerChannel = 16;
absd.mChannelsPerFrame = 2;
absd.mBytesPerFrame = absd.mBitsPerChannel / 8 * absd.mChannelsPerFrame;
absd.mFramesPerPacket = 1;
absd.mBytesPerPacket = absd.mBytesPerFrame;
AudioUnitSetProperty(_ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, inputBus, &absd, sizeof(absd));
AudioUnitSetProperty(_ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, outputBus, &absd, sizeof(absd));
AURenderCallbackStruct callback = {0};
callback.inputProc = audioUnitRenderCallback;
callback.inputProcRefCon = (__bridge void *)self;
AudioUnitSetProperty(_ioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
outputBus,
&callback,
sizeof(callback));
OSStatus status;
AudioStreamBasicDescription reverbABSD;
memset(&reverbABSD, 0, sizeof(reverbABSD));
reverbABSD.mSampleRate = 44100;
reverbABSD.mFormatID = kAudioFormatLinearPCM;
reverbABSD.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved;
reverbABSD.mBitsPerChannel = 32;
reverbABSD.mChannelsPerFrame = 2;
reverbABSD.mBytesPerFrame = reverbABSD.mBitsPerChannel / 8;
reverbABSD.mFramesPerPacket = 1;
reverbABSD.mBytesPerPacket = reverbABSD.mBytesPerFrame;
AudioUnitSetProperty(_reverbUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &reverbABSD, sizeof(reverbABSD));
AudioUnitSetProperty(_reverbUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &reverbABSD, sizeof(reverbABSD));
AudioUnitSetParameter(_reverbUnit, kReverb2Param_Gain, kAudioUnitScope_Global, 0, 20, 0);
AudioUnitSetParameter(_reverbUnit, kReverb2Param_DryWetMix, kAudioUnitScope_Global, 0, 50, 0);
status = AudioUnitSetProperty(_outConvertUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &reverbABSD, sizeof(reverbABSD));
status = AudioUnitSetProperty(_outConvertUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &absd, sizeof(absd));
AUGraphConnectNodeInput(_auGraph, _ioNode, 1, _inConvertNode, 0);
AUGraphConnectNodeInput(_auGraph, _inConvertNode, 0, _reverbNode, 0);
AUGraphConnectNodeInput(_auGraph, _reverbNode, 0, _outConvertNode, 0);
static OSStatus audioUnitRenderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * __nullable ioData)
{
XLAudioCaptureGraph *THIS = (__bridge XLAudioCaptureGraph *)inRefCon;
memset(ioData->mBuffers[0].mData, 0, ioData->mBuffers[0].mDataByteSize);
OSStatus status = AudioUnitRender(THIS.outConvertUnit, ioActionFlags, inTimeStamp, 0, inNumberFrames, ioData);
}
The audioUnitRenderCallback status return kAudioConverterErr_InvalidInputSize.

Related

Can not connect a Surface eglCreatePbufferFromClientBuffer

VGint num_config;
EGLint value;
EGLBoolean ret;
m_BackLayerBuffer = vgCreateImage(SGA_DIS_FMT_RGB565, LCD_WIDTH, LCD_HEIGHT, VG_IMAGE_QUALITY_NONANTIALIASED);
vgClearImage(m_BackLayerBuffer, 0, 0, LCD_WIDTH, LCD_HEIGHT);
m_Context = eglGetCurrentContext();
m_Display = eglGetCurrentDisplay();
eglGetConfigs(m_Display, NULL, NULL, &num_config);
m_DrawSurface = eglGetCurrentSurface(EGL_DRAW);
ret = eglChooseConfig(m_Display, attribute_list, &m_Config, 1, &num_config);
m_OffScreenContext = eglCreateContext(m_Display, m_Config, EGL_NO_CONTEXT, NULL);
m_ImageSurface = eglCreatePbufferFromClientBuffer(m_Display, EGL_OPENVG_IMAGE, (EGLClientBuffer)m_BackLayerBuffer, m_Config, NULL);
value = eglGetError();
ret = eglMakeCurrent(m_Display, m_ImageSurface, m_ImageSurface, m_OffScreenContext);
m_ImageSurface always 0, and the error is EGL_BAD_ACCESS. the config is error?
I know that, this hardware can not supoort muti context. After I delete
m_OffScreenContext = eglCreateContext(m_Display, m_Config, EGL_NO_CONTEXT, NULL);
the return value is EGL_SUCCESS

0xC0000005: Access violation reading location 0x005EF9E4

I am having issues with Handles. I have Bytebeat (music in bytes) playing inside of a DWORD WINAPI function. When I try to terminate and close the thread, it straight up gives me the error in the title. This is my code:
#include <windows.h>
#pragma comment(lib, "Winmm.lib")
DWORD WINAPI bytebeat1(LPVOID) {
while (1) {
HWAVEOUT hwo = 0;
WAVEFORMATEX wfx = { WAVE_FORMAT_PCM, 1, 11000, 11000, 1, 8, 0 };
waveOutOpen(&hwo, WAVE_MAPPER, &wfx, 0, 0, CALLBACK_NULL);
char buffer[11000 * 6];
for (DWORD t = 0; t < sizeof(buffer); t++)
buffer[t] = static_cast<char>(t & t + t / 256) - t * (t >> 15) & 64;
WAVEHDR hdr = { buffer, sizeof(buffer), 0, 0, 0, 0, 0, 0 };
waveOutPrepareHeader(hwo, &hdr, sizeof(WAVEHDR));
waveOutWrite(hwo, &hdr, sizeof(WAVEHDR));
waveOutUnprepareHeader(hwo, &hdr, sizeof(WAVEHDR));
waveOutClose(hwo);
Sleep(6000);
}
}
DWORD WINAPI bytebeat2(LPVOID) {
while (1) {
HWAVEOUT hwo = 0;
WAVEFORMATEX wfx = { WAVE_FORMAT_PCM, 1, 8000, 8000, 1, 8, 0 };
waveOutOpen(&hwo, WAVE_MAPPER, &wfx, 0, 0, CALLBACK_NULL);
char buffer[8000 * 6];
for (DWORD t = 0; t < sizeof(buffer); t++)
buffer[t] = static_cast<char>(t, t / 5) >> t / 25 & t / 55 ^ t & 255 ^ (t / 150) ^ 2508025 * 24240835810 & (t / 100) * t / 6000 ^ 5000 * t / 2500 ^ 25 * t / 24;
WAVEHDR hdr = { buffer, sizeof(buffer), 0, 0, 0, 0, 0, 0 };
waveOutPrepareHeader(hwo, &hdr, sizeof(WAVEHDR));
waveOutWrite(hwo, &hdr, sizeof(WAVEHDR));
waveOutUnprepareHeader(hwo, &hdr, sizeof(WAVEHDR));
waveOutClose(hwo);
Sleep(6000);
}
}
int main() {
HANDLE beat1 = CreateThread(0, 0, bytebeat1, 0, 0, 0);
Sleep(6000);
TerminateThread(beat1, 0); CloseHandle(beat1);
Sleep(1000);
HANDLE beat2 = CreateThread(0, 0, bytebeat2, 0, 0, 0);
Sleep(6000);
TerminateThread(beat2, 0); CloseHandle(beat2);
}
I do not know why this is happening. The only fix is compiling it with G++ but I want it so I could just build it. Any help is appreciated. Thanks!
As the documentation makes clear, you can't use TerminateThread this way. Instead, replace the calls to Sleep with an interruptible sleep function that will terminate the thread cleanly and safely if requested to do so.
When you call TerminateThread, you are basically force-crashing your threads. They still have their own stack allocated and handles to Windows resources. They aren't cleaned up properly, causing your crash.
Here's a simple example of how to close your threads without any error. In a real-world scenario this is an unprofessional solution, but it shows the bare minimum that you need to do.
#include <windows.h>
#pragma comment(lib, "Winmm.lib")
volatile bool quit1 = false;
volatile bool quit2 = false;
DWORD WINAPI bytebeat1(LPVOID) {
while (!quit1) {
HWAVEOUT hwo = 0;
WAVEFORMATEX wfx = { WAVE_FORMAT_PCM, 1, 11000, 11000, 1, 8, 0 };
waveOutOpen(&hwo, WAVE_MAPPER, &wfx, 0, 0, CALLBACK_NULL);
char buffer[11000 * 6];
for (DWORD t = 0; t < sizeof(buffer); t++)
buffer[t] = static_cast<char>(t & t + t / 256) - t * (t >> 15) & 64;
WAVEHDR hdr = { buffer, sizeof(buffer), 0, 0, 0, 0, 0, 0 };
waveOutPrepareHeader(hwo, &hdr, sizeof(WAVEHDR));
waveOutWrite(hwo, &hdr, sizeof(WAVEHDR));
waveOutUnprepareHeader(hwo, &hdr, sizeof(WAVEHDR));
waveOutClose(hwo);
Sleep(6000);
}
return 0;
}
DWORD WINAPI bytebeat2(LPVOID) {
while (!quit2) {
HWAVEOUT hwo = 0;
WAVEFORMATEX wfx = { WAVE_FORMAT_PCM, 1, 8000, 8000, 1, 8, 0 };
waveOutOpen(&hwo, WAVE_MAPPER, &wfx, 0, 0, CALLBACK_NULL);
char buffer[8000 * 6];
for (DWORD t = 0; t < sizeof(buffer); t++)
buffer[t] = static_cast<char>(t, t / 5) >> t / 25 & t / 55 ^ t & 255 ^ (t / 150) ^ 2508025 * 24240835810 & (t / 100) * t / 6000 ^ 5000 * t / 2500 ^ 25 * t / 24;
WAVEHDR hdr = { buffer, sizeof(buffer), 0, 0, 0, 0, 0, 0 };
waveOutPrepareHeader(hwo, &hdr, sizeof(WAVEHDR));
waveOutWrite(hwo, &hdr, sizeof(WAVEHDR));
waveOutUnprepareHeader(hwo, &hdr, sizeof(WAVEHDR));
waveOutClose(hwo);
Sleep(6000);
}
return 0;
}
int main() {
HANDLE beat1 = CreateThread(0, 0, bytebeat1, 0, 0, 0);
Sleep(6000);
quit1 = true;
WaitForSingleObject(beat1, INFINITE);
CloseHandle(beat1);
Sleep(1000);
HANDLE beat2 = CreateThread(0, 0, bytebeat2, 0, 0, 0);
Sleep(6000);
quit2 = true;
WaitForSingleObject(beat2, INFINITE);
CloseHandle(beat2);
}

Problems loading HDR textures Vulkan

I am attempting to make a game engine. Currently I am trying to implement a HDR skybox which also has mip maps, however have been stuck with an error for a while. The Example runs completely fine loading non-HDR 8 bit colour textures. But the moment I attempt to use VK_FORMAT_R32G32B32A32_SFLOAT (which I'm fairly sure is the correct format in order to correspond to 4 bit floats loaded from stbi) the command buffer fails to ever complete and seems to always be in a pending state, I have tried giving the texture manager its own command buffer with no luck and using both vkQueueWaitIdle as well as fences but they both return VK_SUCCEED. The validation layers then throw an error once vkResetCommandBuffer is invoked because the command buffer is in pending state. Seemingly it sometimes rarely works if I click to focus on the console, more errors appear afterwards but was not always the case and HDR seemed to be working once it loaded but was still a 1/3 occasion mostly throwing the same error.
This is the code that is used to load the cubemaps in:
Cubemap::Cubemap(CubemapInfo cubemapInfo)
{
RenderSystem& renderSystem = RenderSystem::instance();
TextureManager& textureManager = TextureManager::instance();
VkImageFormatProperties formatProperties;
assert(("[ERROR] Unsupported texture format", !vkGetPhysicalDeviceImageFormatProperties(renderSystem.mPhysicalDevice, cubemapInfo.format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, 0, &formatProperties)));
FormatInfo formatInfo = getFormatInfo(cubemapInfo.format);
#pragma region Create cubemap resources
stbi_set_flip_vertically_on_load(true);
void* textureData[6];
// Load images
int width, height, channels;
bool hdr = cubemapInfo.format == VK_FORMAT_R16_SFLOAT || cubemapInfo.format == VK_FORMAT_R16G16_SFLOAT || cubemapInfo.format == VK_FORMAT_R16G16B16_SFLOAT || cubemapInfo.format == VK_FORMAT_R16G16B16A16_SFLOAT || cubemapInfo.format == VK_FORMAT_R32_SFLOAT || cubemapInfo.format == VK_FORMAT_R32G32_SFLOAT || cubemapInfo.format == VK_FORMAT_R32G32B32_SFLOAT || cubemapInfo.format == VK_FORMAT_R32G32B32A32_SFLOAT;
if (hdr)
{
if (formatInfo.bytesPerChannel == 4)
{
for (unsigned int i = 0; i < 6; i++)
{
textureData[i] = stbi_loadf(cubemapInfo.directories[i].c_str(), &width, &height, &channels, formatInfo.nChannels);
}
}
else if (formatInfo.bytesPerChannel == 2)
{
for (unsigned int i = 0; i < 6; i++)
{
float* data = stbi_loadf(cubemapInfo.directories[i].c_str(), &width, &height, &channels, formatInfo.nChannels);
unsigned long long dataSize = width * height * formatInfo.nChannels;
textureData[i] = new float16[dataSize];
for (unsigned long long j = 0; j < dataSize; j++)
{
((float16*)textureData[i])[j] = floatToFloat16(data[j]);
}
stbi_image_free((void*)data);
}
}
}
else
{
for (unsigned int i = 0; i < 6; i++)
{
textureData[i] = stbi_load(cubemapInfo.directories[i].c_str(), &width, &height, &channels, formatInfo.nChannels);
}
}
const VkDeviceSize imageSize = 6 * VkDeviceSize(width) * height * formatInfo.nChannels * formatInfo.bytesPerChannel;
unsigned int nMips = unsigned int(std::floor(std::log2(width > height ? width : height))) + 1;
assert(("[ERROR] Unsupported texture format", formatProperties.maxExtent.width >= width && formatProperties.maxExtent.height >= height && formatProperties.maxExtent.depth >= 1 && formatProperties.maxMipLevels >= 1 && formatProperties.maxArrayLayers >= 1 && formatProperties.sampleCounts & VK_SAMPLE_COUNT_1_BIT && formatProperties.maxResourceSize >= imageSize));
// Create image
VkImageCreateInfo imageCreateInfo = {};
imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageCreateInfo.pNext = nullptr;
imageCreateInfo.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.format = cubemapInfo.format;
imageCreateInfo.extent = { unsigned int(width), unsigned int(height), 1 };
imageCreateInfo.mipLevels = nMips;
imageCreateInfo.arrayLayers = 6;
imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageCreateInfo.queueFamilyIndexCount = 0;
imageCreateInfo.pQueueFamilyIndices = nullptr;
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkResult result = vkCreateImage(renderSystem.mDevice, &imageCreateInfo, nullptr, &mImage);
validateResult(result);
VkMemoryRequirements memoryRequirements;
vkGetImageMemoryRequirements(renderSystem.mDevice, mImage, &memoryRequirements);
VkMemoryAllocateInfo memoryAllocateInfo = {};
memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memoryAllocateInfo.allocationSize = memoryRequirements.size;
memoryAllocateInfo.memoryTypeIndex = memoryTypeFromProperties(renderSystem.mPhysicalDeviceMemoryProperties, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
result = vkAllocateMemory(renderSystem.mDevice, &memoryAllocateInfo, nullptr, &mImageMemory);
validateResult(result);
result = vkBindImageMemory(renderSystem.mDevice, mImage, mImageMemory, 0);
validateResult(result);
// Create staging buffer
VkBuffer stagingBuffer;
VkDeviceMemory stagingMemory;
VkBufferCreateInfo bufferCreateInfo = {};
bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferCreateInfo.pNext = nullptr;
bufferCreateInfo.flags = 0;
bufferCreateInfo.size = imageSize;
bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
bufferCreateInfo.queueFamilyIndexCount = 0;
bufferCreateInfo.pQueueFamilyIndices = nullptr;
result = vkCreateBuffer(renderSystem.mDevice, &bufferCreateInfo, nullptr, &stagingBuffer);
validateResult(result);
vkGetBufferMemoryRequirements(renderSystem.mDevice, stagingBuffer, &memoryRequirements);
memoryAllocateInfo.allocationSize = memoryRequirements.size;
memoryAllocateInfo.memoryTypeIndex = memoryTypeFromProperties(renderSystem.mPhysicalDeviceMemoryProperties, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
result = vkAllocateMemory(renderSystem.mDevice, &memoryAllocateInfo, nullptr, &stagingMemory);
validateResult(result);
result = vkBindBufferMemory(renderSystem.mDevice, stagingBuffer, stagingMemory, 0);
validateResult(result);
unsigned char* data;
result = vkMapMemory(renderSystem.mDevice, stagingMemory, 0, imageSize, 0, (void**)&data);
validateResult(result);
unsigned long long dataLayer = unsigned long long(width) * height * formatInfo.nChannels * formatInfo.bytesPerChannel;
for (unsigned int i = 0; i < 6; i++)
{
memcpy((void*)(data + i * dataLayer), textureData[i], dataLayer);
stbi_image_free(textureData[i]);
}
vkUnmapMemory(renderSystem.mDevice, stagingMemory);
result = vkBeginCommandBuffer(textureManager.mCommandBuffer, &renderSystem.mCommandBufferBeginInfo);
validateResult(result);
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
barrier.srcAccessMask = 0;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT /* Additional >> */ | VK_ACCESS_TRANSFER_READ_BIT;
barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = mImage;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = nMips;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 6;
vkCmdPipelineBarrier(textureManager.mCommandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
VkBufferImageCopy copyRegion = {};
copyRegion.bufferOffset = 0;
copyRegion.bufferRowLength = 0;
copyRegion.bufferImageHeight = 0;
copyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.imageSubresource.mipLevel = 0;
copyRegion.imageSubresource.baseArrayLayer = 0;
copyRegion.imageSubresource.layerCount = 6;
copyRegion.imageOffset = { 0, 0, 0 };
copyRegion.imageExtent = { unsigned int(width), unsigned int(height), 1 };
vkCmdCopyBufferToImage(textureManager.mCommandBuffer, stagingBuffer, mImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copyRegion);
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.subresourceRange.levelCount = 1;
VkImageBlit imageBlit = {};
imageBlit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageBlit.srcSubresource.baseArrayLayer = 0;
imageBlit.srcSubresource.layerCount = 6;
imageBlit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageBlit.dstSubresource.baseArrayLayer = 0;
imageBlit.dstSubresource.layerCount = 6;
unsigned int mipWidth = width, mipHeight = height;
for (unsigned int i = 1; i < nMips; i++)
{
barrier.subresourceRange.baseMipLevel = i - 1;
vkCmdPipelineBarrier(textureManager.mCommandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
imageBlit.srcSubresource.mipLevel = i - 1;
imageBlit.srcOffsets[0] = { 0, 0, 0 };
imageBlit.srcOffsets[1] = { int(mipWidth), int(mipHeight), 1 };
imageBlit.dstSubresource.mipLevel = i;
if (mipWidth > 1)
mipWidth /= 2;
if (mipHeight > 1)
mipHeight /= 2;
imageBlit.dstOffsets[0] = { 0, 0, 0 };
imageBlit.dstOffsets[1] = { int(mipWidth), int(mipHeight), 1 };
vkCmdBlitImage(textureManager.mCommandBuffer, mImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, mImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imageBlit, VK_FILTER_LINEAR);
}
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
for (unsigned int i = 0; i < nMips; i++)
{
barrier.oldLayout = i == nMips - 1 ? VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL : VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.subresourceRange.baseMipLevel = i;
vkCmdPipelineBarrier(textureManager.mCommandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
result = vkEndCommandBuffer(textureManager.mCommandBuffer);
validateResult(result);
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.pNext = nullptr;
submitInfo.waitSemaphoreCount = 0;
submitInfo.pWaitSemaphores = nullptr;
submitInfo.pWaitDstStageMask = nullptr;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &textureManager.mCommandBuffer;
submitInfo.signalSemaphoreCount = 0;
submitInfo.pSignalSemaphores = nullptr;
result = vkQueueSubmit(renderSystem.mGraphicsQueue, 1, &submitInfo, NULL);
validateResult(result);
// Create image view
VkImageViewCreateInfo imageViewCreateInfo = {};
imageViewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imageViewCreateInfo.pNext = nullptr;
imageViewCreateInfo.flags = 0;
imageViewCreateInfo.image = mImage;
imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_CUBE;
imageViewCreateInfo.format = cubemapInfo.format;
imageViewCreateInfo.components = formatInfo.componentMapping;
imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
imageViewCreateInfo.subresourceRange.levelCount = nMips;
imageViewCreateInfo.subresourceRange.baseArrayLayer = 0;
imageViewCreateInfo.subresourceRange.layerCount = 6;
result = vkCreateImageView(renderSystem.mDevice, &imageViewCreateInfo, nullptr, &mImageView);
validateResult(result);
// Create sampler
VkSamplerCreateInfo samplerCreateInfo = {};
samplerCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
samplerCreateInfo.pNext = nullptr;
samplerCreateInfo.flags = 0;
samplerCreateInfo.magFilter = VK_FILTER_LINEAR;
samplerCreateInfo.minFilter = VK_FILTER_LINEAR;
samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
samplerCreateInfo.mipLodBias = 0.0f;
samplerCreateInfo.minLod = 0.0f;
samplerCreateInfo.maxLod = float(nMips);
samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;;
samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerCreateInfo.anisotropyEnable = VK_TRUE;
samplerCreateInfo.maxAnisotropy = renderSystem.mPhysicalDeviceProperties.limits.maxSamplerAnisotropy;
samplerCreateInfo.compareEnable = VK_FALSE;
samplerCreateInfo.compareOp = VK_COMPARE_OP_ALWAYS;
samplerCreateInfo.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
samplerCreateInfo.unnormalizedCoordinates = VK_FALSE;
result = vkCreateSampler(renderSystem.mDevice, &samplerCreateInfo, nullptr, &mSampler);
validateResult(result);
result = vkQueueWaitIdle(renderSystem.mGraphicsQueue);
validateResult(result);
result = vkResetCommandBuffer(textureManager.mCommandBuffer, 0);
validateResult(result);
vkDestroyBuffer(renderSystem.mDevice, stagingBuffer, nullptr);
vkFreeMemory(renderSystem.mDevice, stagingMemory, nullptr);
#pragma endregion
}
Exact errors that occurr:
VUID-vkResetCommandBuffer-commandBuffer-00045(ERROR / SPEC): msgNum: 511214570 - Validation Error: [ VUID-vkResetCommandBuffer-commandBuffer-00045 ] Object 0: handle = 0x19323492138, type = VK_OBJECT_TYPE_COMMAND_BUFFER; | MessageID = 0x1e7883ea | Attempt to reset VkCommandBuffer 0x19323492138[] which is in use. The Vulkan spec states: commandBuffer must not be in the pending state (https://vulkan.lunarg.com/doc/view/1.2.162.1/windows/1.2-extensions/vkspec.html#VUID-vkResetCommandBuffer-commandBuffer-00045)
Objects: 1
[0] 0x19323492138, type: 6, name: NULL
UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout(ERROR / SPEC): msgNum: 1303270965 - Validation Error: [ UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout ] Object 0: handle = 0x19323492138, type = VK_OBJECT_TYPE_COMMAND_BUFFER; | MessageID = 0x4dae5635 | Submitted command buffer expects VkImage 0x5fb0e800000000cd[] (subresource: aspectMask 0x1 array layer 0, mip level 0) to be in layout VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL--instead, current layout is VK_IMAGE_LAYOUT_UNDEFINED.
Objects: 1
[0] 0x19323492138, type: 6, name: NULL
(^ This error is thrown continuously for mip levels 0-12 ^)
View full source:
https://github.com/finnbuhse/Vulkan-Engine-V1.0
Although assets and shader binaries are not on the github so compile the shader sources into files with names identical to those found in mesh.cpp line 1083 and adjust main.cpp to include custom models if you wish to try compile and run the source.
Any clue as to why this might be happening would be greatly appreciated
So, after almost an entire year of agonising over this one error... I found what seemed to have happened was the GPU memory ran out, and the skybox I picked initially was simply too large; each face was 4K and I found the whole cubemap had to be allocated over a gigabyte of video memory, and having a mere NVIDIA GTX 1050 Ti, is a quarter of it. However I did think this was possible early on, which is why I validated every VkResult I could thinking if this happened, VK_ERROR_OUT_OF_DEVICE_MEMORY would be returned. However nothing but success from what the 'results' could tell. Perhaps it wasn't so much video memory but the GPU had a hard time mip-mapping such a large image. Either way, with a different HDR skybox (1k) it works perfectly fine with both 16 bit floating point images aswell as 32 bits.

I can't assign a texture to IMGUI because I can't create directx texture

I'm trying to add some images for using with IMGUI library. Basically, IMGUI can get directx or OpenGL raw texture data to draw. You can see example code from IMGUI for creating a texture ;
// Simple helper function to load an image into a DX11 texture with common settings
bool LoadTextureFromFile(const char* filename, ID3D11ShaderResourceView** out_srv, int* out_width, int* out_height)
{
// Load from disk into a raw RGBA buffer
int image_width = 0;
int image_height = 0;
unsigned char* image_data = stbi_load(filename, &image_width, &image_height, NULL, 4);
if (image_data == NULL)
return false;
// Create texture
D3D11_TEXTURE2D_DESC desc;
ZeroMemory(&desc, sizeof(desc));
desc.Width = image_width;
desc.Height = image_height;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.SampleDesc.Count = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = 0;
ID3D11Texture2D *pTexture = NULL;
D3D11_SUBRESOURCE_DATA subResource;
subResource.pSysMem = image_data;
subResource.SysMemPitch = desc.Width * 4;
subResource.SysMemSlicePitch = 0;
g_pd3dDevice->CreateTexture2D(&desc, &subResource, &pTexture);
// Create texture view
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
ZeroMemory(&srvDesc, sizeof(srvDesc));
srvDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MipLevels = desc.MipLevels;
srvDesc.Texture2D.MostDetailedMip = 0;
g_pd3dDevice->CreateShaderResourceView(pTexture, &srvDesc, out_srv);
pTexture->Release();
*out_width = image_width;
*out_height = image_height;
stbi_image_free(image_data);
return true;
}
Everything is ok except 'g_pd3dDevice->CreateTexture2D(&desc, &subResource, &pTexture);' line. Because as you can see g_pd3dDevice is undefined. That's why I put a line for this like : ID3D11Device* g_pd3dDevice = nullptr; but when I run code it hangs.. I guess I must create a 3ddevice before using this part. But I can't. Any suggestions? Thanks
Now I can get some image but it's wrong:
Now my code like this ;
bool LoadTextureFromBufferDX11(unsigned char* address, ID3D11ShaderResourceView** out_srv, int* out_width, int* out_height)
{
WNDCLASSEX wc = { sizeof(WNDCLASSEX), CS_CLASSDC, WndProc, 0L, 0L, GetModuleHandle(NULL), NULL, NULL, NULL, NULL, _T("ImGui Example"), NULL };
::RegisterClassEx(&wc);
//HWND hwnd = ::CreateWindow(wc.lpszClassName, _T("Dear ImGui DirectX11 Example"), WS_OVERLAPPEDWINDOW, 100, 100, 200, 200, NULL, NULL, wc.hInstance, NULL);
HWND hwnd = GetForegroundWindow();
CreateDeviceD3D(hwnd);
/*
if (!CreateDeviceD3D(hwnd))
{
CleanupDeviceD3D();
::UnregisterClass(wc.lpszClassName, wc.hInstance);
return 1;
}
*/
//ImGui_ImplDX11_Init(g_pd3dDevice, g_pd3dDeviceContext);
// Load from disk into a raw RGBA buffer
int image_width = 0;
int image_height = 0;
const char* filename = "e:\\obama.png";
unsigned char* image_data = stbi_load(filename, &image_width, &image_height, NULL, 4);
//unsigned char* image_data = address;
if (image_data == NULL)
return false;
// Create texture
D3D11_TEXTURE2D_DESC desc;
ZeroMemory(&desc, sizeof(desc));
desc.Width = image_width;
desc.Height = image_height;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.SampleDesc.Count = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = 0;
ID3D11Texture2D *pTexture = NULL;
D3D11_SUBRESOURCE_DATA subResource;
subResource.pSysMem = image_data;
subResource.SysMemPitch = desc.Width * 4;
subResource.SysMemSlicePitch = 0;
g_pd3dDevice->CreateTexture2D(&desc, &subResource, &pTexture);
// Create texture view
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
ZeroMemory(&srvDesc, sizeof(srvDesc));
srvDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MipLevels = desc.MipLevels;
srvDesc.Texture2D.MostDetailedMip = 0;
g_pd3dDevice->CreateShaderResourceView(pTexture, &srvDesc, out_srv);
pTexture->Release();
*out_width = image_width;
*out_height = image_height;
return true;
}
And I trigger this function from host app.
static ID3D11ShaderResourceView* my_texture = NULL;
int my_image_width = 0;
int my_image_height = 0;
fn_export double imgui_showimage(unsigned char* address) {
bool ret = LoadTextureFromBufferDX11(address, &my_texture, &my_image_width, &my_image_height);
//IM_ASSERT(ret);
//ImGui::Image((void*)my_texture, ImVec2(my_image_width, my_image_height));
return 1;
}
And I draw imgu image from host like ;
fn_export double imgui_image(char* address) {
ImGui::Image((void*)my_texture, ImVec2(my_image_width, my_image_height));
return 1;
}

correct resizing of D2D over D3D

I have problem with resizing D2D render target over D3D11. I make function to resize swap chain buffer.
It works great, but when I activate D2D, resizing became broken.
When I make window smaller, all meshes disappear from window - only text displayed.
When I make bigger - meshes rendered, but proportions not correct, despite that I recreate projection Matrix.
My question is - how to correctly resize D2D render target and text after window size changed?
Here is my code for creating D3D11Device, swapchain1 and D2D.
bool D3D11::InitializeD3D11(HWND hwnd, SettingsContainer* settingsContainer)
{
this->hwnd = hwnd;
HRESULT result;
IDXGIFactory2* factory;
IDXGIAdapter* adapter;
IDXGIOutput* adapterOutput;
D3D_DRIVER_TYPE driverType = D3D_DRIVER_TYPE_UNKNOWN;
unsigned int numModes, numerator, denominator;
DXGI_MODE_DESC* displayModeList;
DXGI_ADAPTER_DESC adapterDesc;
DXGI_OUTPUT_DESC displayDesc;
DXGI_SWAP_CHAIN_DESC1 swapChainDescription;
D3D_FEATURE_LEVEL featureLevel;
ID3D11Texture2D* backBufferPtr;
D3D11_TEXTURE2D_DESC depthBufferDescription;
D3D11_DEPTH_STENCIL_DESC depthStencilDescription;
D3D11_DEPTH_STENCIL_VIEW_DESC depthStencilViewDescription;
D3D11_RASTERIZER_DESC rasterizerDescription;
D3D11_VIEWPORT viewPort;
D3D11_DEPTH_STENCIL_DESC depthStencilDescriptionDisabled;
D3D11_BLEND_DESC blendStateDescrition;
AdapterInfo* adapterInfo;
DisplayInfo* displayInfo;
adapterInfo = NULL;
//Создаём фабрику графических интерфейсов DX
result = CreateDXGIFactory1(__uuidof(IDXGIFactory2), (void**)&factory);
if (FAILED(result))
{
return false;
}
/*IDXGIDevice2 * pDXGIDevice;
result = d3d11_Device->QueryInterface(__uuidof(IDXGIDevice2), (void **)&pDXGIDevice);*/
/*IDXGIAdapter * pDXGIAdapter;
result = pDXGIDevice->GetParent(__uuidof(IDXGIAdapter), (void **)&pDXGIAdapter);*/
/*IDXGIFactory2 * pIDXGIFactory;
pDXGIAdapter->GetParent(__uuidof(IDXGIFactory2), (void **)&pIDXGIFactory);*/
//pIDXGIFactory->EnumAdapters1(0,&adapter);
//adapter->EnumOutputs(0, &adapterOutput);
//Достаём все графические адаптеры
for (UINT i = 0; factory->EnumAdapters(i,&adapter)!= DXGI_ERROR_NOT_FOUND;++i)
{
adapterInfo = new AdapterInfo(adapter);
adapter->GetDesc(&adapterDesc);
adapterInfo->Name = adapterDesc.Description;
adapterInfo->InternalMemory = (float)adapterDesc.DedicatedVideoMemory/1024/1024;
for (UINT j = 0; adapter->EnumOutputs(j, &adapterOutput) != DXGI_ERROR_NOT_FOUND;++j)
{
displayInfo = new DisplayInfo;
adapterOutput->GetDesc(&displayDesc);
displayInfo->MonitorHWND = displayDesc.Monitor;
DWORD cPhysicalMonitors;
LPPHYSICAL_MONITOR pPhysicalMonitors = NULL;
BOOL bSuccess = GetNumberOfPhysicalMonitorsFromHMONITOR(displayInfo->MonitorHWND, &cPhysicalMonitors);
ZeroMemory(&pPhysicalMonitors, sizeof(pPhysicalMonitors));
pPhysicalMonitors = (LPPHYSICAL_MONITOR)malloc(
cPhysicalMonitors* sizeof(PHYSICAL_MONITOR));
GetPhysicalMonitorsFromHMONITOR(displayInfo->MonitorHWND, cPhysicalMonitors, pPhysicalMonitors);
displayInfo->Name = pPhysicalMonitors->szPhysicalMonitorDescription;
//Получаем количество режимов которые удовлетворяют формату монитора DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM
adapterOutput->GetDisplayModeList(DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_ENUM_MODES_INTERLACED, &numModes, NULL );
displayModeList = new DXGI_MODE_DESC[numModes];
if (!displayModeList)
{
return false;
}
//Заполняем структуру display mode list
result = adapterOutput->GetDisplayModeList(DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_ENUM_MODES_INTERLACED, &numModes, displayModeList);
//Проходимся по всем режимам дисплея и находим тот, который совпадает с высотой и шириной экрана
//Когда совпадение найдено сохраняем нумератор и деноминатор частоты обновления для монитора
std::ofstream input_file("file.txt");
for(int k=0; k<numModes; k++)
{
if (input_file.is_open())
{
input_file << displayModeList[k].Width<<" "<<displayModeList[k].Height<<" "<<displayModeList[k].RefreshRate.Numerator<<" "<<displayModeList[k].RefreshRate.Denominator
<<" "<<displayModeList[k].ScanlineOrdering<<" "<<displayModeList[k].Format<<" "<<displayModeList[k].Scaling<<std::endl;
}
if (displayModeList[k].Width >= 1024)
{
if ((displayModeList[k].RefreshRate.Numerator/displayModeList[k].RefreshRate.Denominator)>59)
{
if (displayModeList[k].Scaling == DXGI_MODE_SCALING_CENTERED)
{
DisplayMode displayMode;
displayMode.Width = displayModeList[k].Width;
displayMode.Height = displayModeList[k].Height;
displayMode.Numerator = displayModeList[k].RefreshRate.Numerator;
displayMode.Denominator = displayModeList[k].RefreshRate.Denominator;
displayInfo->DisplayModes.push_back(displayMode);
/*numerator = displayModeList[k].RefreshRate.Numerator;
denominator = displayModeList[k].RefreshRate.Denominator;*/
}
}
}
}
//Удаляем display mode list
delete[] displayModeList;
displayModeList = NULL;
adapterInfo->displayList.push_back(displayInfo);
input_file.close ();
}
//добавляем в вектор всю собранную информацию по видеокарте
adapterList.push_back(adapterInfo);
}
//Удаляем адаптер и адаптер output
//adapter->Release();
adapter = 0;
//adapterOutput->Release();
adapterOutput = 0;
//Удаляем фабрику
/*factory->Release();
factory = 0;*/
D3D_FEATURE_LEVEL featureLevels[] =
{
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0
};
UINT numFeatureLevels = ARRAYSIZE(featureLevels);
factory->EnumAdapters(0,&adapter);
//Создаём устройство Direct3D и контекст устройства Direct3D
/*result = D3D11CreateDevice(NULL, driverType,NULL,D3D11_CREATE_DEVICE_DEBUG|D3D11_CREATE_DEVICE_BGRA_SUPPORT, featureLevels,numFeatureLevels,
D3D11_SDK_VERSION, &d3d11_Device, &featureLevel, &d3d11_DeviceContext );*/
result = D3D11CreateDevice(adapterList[0]->adapter, driverType, NULL, D3D11_CREATE_DEVICE_DEBUG|D3D11_CREATE_DEVICE_BGRA_SUPPORT, featureLevels,numFeatureLevels,
D3D11_SDK_VERSION, &d3d11_Device, &featureLevel, &d3d11_DeviceContext );
if (FAILED(result))
{
return false;
}
UINT qualityLevels;
d3d11_Device->CheckMultisampleQualityLevels(DXGI_FORMAT_R8G8B8A8_UNORM, 4, &qualityLevels);
//Инициализируем структуру swap chain description
ZeroMemory(&swapChainDescription, sizeof(swapChainDescription));
//Устанавливаем количество буферов
swapChainDescription.BufferCount = 1;
//устанавливаем ширину и высоту заднего буфера
swapChainDescription.Width = settingsContainer->renderSettingsDX11->GetScreenWidth();
swapChainDescription.Height = settingsContainer->renderSettingsDX11->GetScreenHeight();
//устанавливаем формат пикселей
swapChainDescription.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapChainDescription.SampleDesc.Count = 1;
swapChainDescription.SampleDesc.Quality = 0;
//Устанавливаем использование заднего буфера
swapChainDescription.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDescription.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
swapChainDescription.AlphaMode = DXGI_ALPHA_MODE_UNSPECIFIED;
DXGI_SWAP_CHAIN_FULLSCREEN_DESC fullscreen;
fullscreen.RefreshRate.Numerator = 0;
fullscreen.RefreshRate.Denominator = 1;
fullscreen.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
fullscreen.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_PROGRESSIVE;
//Устанавливаем в полноэкранный режим и оконный
if (settingsContainer->renderSettingsDX11->IsFullScreenEnabled())
{
fullscreen.Windowed = FALSE;
}
else
{
fullscreen.Windowed = TRUE;
}
//Создаём swap chain
result = factory->CreateSwapChainForHwnd(d3d11_Device, hwnd, &swapChainDescription, &fullscreen, NULL, &swapChain);
if(FAILED(result))
return false;
//Получаем указатель на задний буфер
result = swapChain->GetBuffer(0,__uuidof(ID3D11Texture2D), (LPVOID*)&backBufferPtr);
if (FAILED(result))
return false;
//Создаём render target view с указателем на задний буфер
result = d3d11_Device->CreateRenderTargetView(backBufferPtr, NULL, &d3d11_RenderTargetView);
if(FAILED(result))
return false;
//Освобождаем указатель на задний буфер, так как он больше не нужен
backBufferPtr->Release();
backBufferPtr = 0;
//Инициализируем описание структуры Depth_Buffer
ZeroMemory(&depthBufferDescription, sizeof(depthBufferDescription));
//Заполняем описание структуры
depthBufferDescription.Width = settingsContainer->renderSettingsDX11->GetScreenWidth();
depthBufferDescription.Height = settingsContainer->renderSettingsDX11->GetScreenHeight();
depthBufferDescription.MipLevels = 1;
depthBufferDescription.ArraySize = 1;
depthBufferDescription.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
depthBufferDescription.SampleDesc.Count = 1;
depthBufferDescription.SampleDesc.Quality = 0;
depthBufferDescription.Usage = D3D11_USAGE_DEFAULT;
depthBufferDescription.BindFlags = D3D11_BIND_DEPTH_STENCIL;
depthBufferDescription.CPUAccessFlags = 0;
depthBufferDescription.MiscFlags = 0;
//Создаём текстуру для буфера глубины используя заполненную структуру
result = d3d11_Device->CreateTexture2D(&depthBufferDescription, NULL, &d3d11_DepthStencilBuffer);
if (FAILED(result))
return false;
//Инициализируем описание stencil state
ZeroMemory(&depthStencilDescription, sizeof(depthStencilDescription));
//Вводим описание структуры stencil_state
depthStencilDescription.DepthEnable = true;
depthStencilDescription.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL;
depthStencilDescription.DepthFunc = D3D11_COMPARISON_LESS;
depthStencilDescription.StencilEnable = true;
depthStencilDescription.StencilReadMask = 0xFF;
depthStencilDescription.StencilWriteMask = 0xFF;
//Шаблонные операции если пиксель впереди
depthStencilDescription.FrontFace.StencilFailOp = D3D11_STENCIL_OP_KEEP;
depthStencilDescription.FrontFace.StencilDepthFailOp = D3D11_STENCIL_OP_INCR;
depthStencilDescription.FrontFace.StencilPassOp = D3D11_STENCIL_OP_KEEP;
depthStencilDescription.FrontFace.StencilFunc = D3D11_COMPARISON_ALWAYS;
//Швблонные операции, если пиксель на обратной стороне
depthStencilDescription.BackFace.StencilFailOp = D3D11_STENCIL_OP_KEEP;
depthStencilDescription.BackFace.StencilDepthFailOp = D3D11_STENCIL_OP_DECR;
depthStencilDescription.BackFace.StencilPassOp = D3D11_STENCIL_OP_KEEP;
depthStencilDescription.BackFace.StencilFunc = D3D11_COMPARISON_ALWAYS;
//Создаём Depth_Stencil_State
result = d3d11_Device->CreateDepthStencilState(&depthStencilDescription, &d3d11_DepthStencilState);
if (FAILED(result))
{
return false;
}
//Задаём состояние Depth Stencil State
d3d11_DeviceContext->OMSetDepthStencilState(d3d11_DepthStencilState,1);
//Инициализируем структуру Depth Stencil View
ZeroMemory (&depthStencilViewDescription, sizeof(depthStencilViewDescription));
//Заполняем описание структуры Depth_Stencil_View
depthStencilViewDescription.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
depthStencilViewDescription.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D;
depthStencilViewDescription.Texture2D.MipSlice = 0;
//Задаём описание структуры Depth_Stencil_View
result = d3d11_Device->CreateDepthStencilView(d3d11_DepthStencilBuffer, &depthStencilViewDescription, &d3d11_DepthStencilView);
if (FAILED(result))
return false;
//Привязываем render target view и depth stencil buffer к output pipeline
d3d11_DeviceContext->OMSetRenderTargets(1, &d3d11_RenderTargetView, d3d11_DepthStencilView);
//Заполняем структуру Raster_Description, которая будет определять как и какие полигоны будут нарисованы
rasterizerDescription.AntialiasedLineEnable = false;
rasterizerDescription.CullMode = D3D11_CULL_BACK;
rasterizerDescription.DepthBias = 0;
rasterizerDescription.DepthBiasClamp = 0.0f;
rasterizerDescription.DepthClipEnable = true;
rasterizerDescription.FillMode = D3D11_FILL_SOLID;
rasterizerDescription.FrontCounterClockwise = false;
rasterizerDescription.MultisampleEnable = true;
rasterizerDescription.ScissorEnable = false;
rasterizerDescription.SlopeScaledDepthBias = 0.0f;
//Создаём rasterizerState из описания, которое только что заполнили
result = d3d11_Device->CreateRasterizerState(&rasterizerDescription, &d3d11_RasterizerState);
if(FAILED(result))
{
return false;
}
//Устанавливаем rasterizer state (можно менять состояние с D3D11_FILL_SOLID на D3D11_FILL_WIREFRAME)
d3d11_DeviceContext->RSSetState(d3d11_RasterizerState);
//Устанавливаем вьюпорт для рендеринга
viewPort.Width = (float)settingsContainer->renderSettingsDX11->GetScreenWidth();
viewPort.Height = (float)settingsContainer->renderSettingsDX11->GetScreenHeight();
viewPort.MinDepth = 0.0f;
viewPort.MaxDepth = 1.0f;
viewPort.TopLeftX = 0.0f;
viewPort.TopLeftY = 0.0f;
//Создаём ViewPort
d3d11_DeviceContext->RSSetViewports(1,&viewPort);
return true;
}
void D3D11::ResizeSwapChain()
{
HRESULT result;
if (swapChain)
{
RECT rc;
GetClientRect(hwnd, &rc);
UINT width = rc.right - rc.left;
UINT height = rc.bottom - rc.top;
d3d11_DeviceContext->OMSetRenderTargets(0,0,0);
d3d11_RenderTargetView->Release();
swapChain->ResizeBuffers(0, 0, 0, DXGI_FORMAT_R8G8B8A8_UNORM, 0);
swapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&backBufferPtr);
d3d11_Device->CreateRenderTargetView(backBufferPtr, NULL, &d3d11_RenderTargetView);
//Инициализируем описание структуры Depth_Buffer
ZeroMemory(&depthBufferDescription, sizeof(depthBufferDescription));
//Заполняем описание структуры
depthBufferDescription.Width = width;
depthBufferDescription.Height = height;
depthBufferDescription.MipLevels = 1;
depthBufferDescription.ArraySize = 1;
depthBufferDescription.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
depthBufferDescription.SampleDesc.Count = 1;
depthBufferDescription.SampleDesc.Quality = 0;
depthBufferDescription.Usage = D3D11_USAGE_DEFAULT;
depthBufferDescription.BindFlags = D3D11_BIND_DEPTH_STENCIL;
depthBufferDescription.CPUAccessFlags = 0;
depthBufferDescription.MiscFlags = 0;
d3d11_DepthStencilBuffer->Release();
d3d11_DepthStencilBuffer = NULL;
//Создаём текстуру для буфера глубины используя заполненную структуру
result = d3d11_Device->CreateTexture2D(&depthBufferDescription, NULL, &d3d11_DepthStencilBuffer);
bool D3D11::InitializeD2D11()
{
options.debugLevel = D2D1_DEBUG_LEVEL_ERROR;
D2D1CreateFactory(D2D1_FACTORY_TYPE_MULTI_THREADED, options, &d2dfactory);
swapChain->GetBuffer(0, IID_PPV_ARGS(&dxgiBackbuffer));
D2D1_RENDER_TARGET_PROPERTIES props = D2D1::RenderTargetProperties(
D2D1_RENDER_TARGET_TYPE_HARDWARE,
D2D1::PixelFormat(DXGI_FORMAT_UNKNOWN, D2D1_ALPHA_MODE_PREMULTIPLIED));
d2dfactory->CreateDxgiSurfaceRenderTarget(dxgiBackbuffer, props, &d2dRenderTarget);
dxgiBackbuffer->Release();
dxgiBackbuffer = NULL;
d2dfactory->Release();
d2dfactory = NULL;
DWriteCreateFactory(DWRITE_FACTORY_TYPE_SHARED, __uuidof(DWriteFactory), (IUnknown**)(&DWriteFactory));
DWriteFactory->CreateTextFormat(
L"Cambria",
NULL,
DWRITE_FONT_WEIGHT_NORMAL,
DWRITE_FONT_STYLE_NORMAL,
DWRITE_FONT_STRETCH_NORMAL,
12,
L"",
&textFormat);
DWriteFactory->CreateTextFormat(
L"Cambria",
NULL,
DWRITE_FONT_WEIGHT_NORMAL,
DWRITE_FONT_STYLE_NORMAL,
DWRITE_FONT_STRETCH_NORMAL,
12,
L"",
&textFormat2);
textFormat->SetParagraphAlignment(DWRITE_PARAGRAPH_ALIGNMENT_NEAR);
textFormat->SetTextAlignment(DWRITE_TEXT_ALIGNMENT_LEADING);
textFormat2->SetParagraphAlignment(DWRITE_PARAGRAPH_ALIGNMENT_FAR);
textFormat2->SetTextAlignment(DWRITE_TEXT_ALIGNMENT_LEADING);
d2dRenderTarget->CreateSolidColorBrush(D2D1::ColorF(D2D1::ColorF::White), &Brush);
return true;
}
//Задаём состояние Depth Stencil State
d3d11_DeviceContext->OMSetDepthStencilState(d3d11_DepthStencilState, 1);
//Инициализируем структуру Depth Stencil View
ZeroMemory (&depthStencilViewDescription, sizeof(depthStencilViewDescription));
//Заполняем описание структуры Depth_Stencil_View
depthStencilViewDescription.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
depthStencilViewDescription.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D;
depthStencilViewDescription.Texture2D.MipSlice = 0;
d3d11_DepthStencilView->Release();
d3d11_DepthStencilView = NULL;
//Задаём описание структуры Depth_Stencil_View
result = d3d11_Device->CreateDepthStencilView(d3d11_DepthStencilBuffer, &depthStencilViewDescription, &d3d11_DepthStencilView);
//Освобождаем указатель на задний буфер, так как он больше не нужен
backBufferPtr->Release();
backBufferPtr = 0;
d3d11_DeviceContext->OMSetRenderTargets(1, &d3d11_RenderTargetView, d3d11_DepthStencilView);
//d3d11_DeviceContext->OMSetRenderTargets(1, &d3d11_RenderTargetView, NULL);
//Устанавливаем вьюпорт для рендеринга
D3D11_VIEWPORT viewPort;
viewPort.Width = (float)width;
viewPort.Height = (float)height;
viewPort.MinDepth = 0.0f;
viewPort.MaxDepth = 1.0f;
viewPort.TopLeftX = 0.0f;
viewPort.TopLeftY = 0.0f;
//Создаём ViewPort
d3d11_DeviceContext->RSSetViewports(1,&viewPort);
}
}
I resize buffer on WM_SIZE event:
case WM_SIZE:
{
int Width = LOWORD(lparam);
int Height = HIWORD(lparam);
WCHAR* str = NULL;
if (wparam == SIZE_RESTORED)
{
if (system->application)
{
system->application->d3d11->ResizeSwapChain();
system->application->d3d11->ResizeD2DBuffer();
system->application->cameraDX_Free->perspective(system->application->cameraDX_Free->m_fovx,
static_cast<float>(Width) / static_cast<float>(Height),
system->application->cameraDX_Free->m_znear,
system->application->cameraDX_Free->m_zfar);
}
}
}
To correctly resize D2D in my case you need first to delete all d2d resources and after resizing swap chain recreate all resources again.