DirectX12 commandList execution error - c++

I've started to learn DirectX12 and i try to make some kind of simple engine.
I follow the Frank D. Luna "Introduction to 3D programming with DirectX12" and i have got some problems.
First during creating swapChain, filling description like this:
DXGI_SWAP_CHAIN_DESC swapChainDesc;
swapChainDesc.BufferDesc.Width = Core::displayWidth;
swapChainDesc.BufferDesc.Height = Core::displayHeight;
swapChainDesc.BufferDesc.RefreshRate.Numerator = 60;
swapChainDesc.BufferDesc.RefreshRate.Denominator = 1;
swapChainDesc.BufferDesc.Format = Core::pixelDefinitionFormat;
swapChainDesc.BufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
swapChainDesc.BufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
swapChainDesc.SampleDesc.Count = Core::multiSamplingLevel ? 4 : 1;
swapChainDesc.SampleDesc.Quality = Core::multiSamplingEnabled ? (Core::multiSamplingLevel - 1) : 0;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.BufferCount = (INT) Core::buffering;
swapChainDesc.OutputWindow = Core::mainWindow;
swapChainDesc.Windowed = true;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
swapChainDesc.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
// Note: Swap chain uses queue to perform flush.
ThrowIfFailed(Core::factory->CreateSwapChain(
Core::commandQueue.Get(),
&swapChainDesc,
Core::swapChain.GetAddressOf()
));
I recive "bad parameter" error. I've already found solution on MSDN but i want to know what i'am doing wrong.
Second question is why have i:
D3D12 ERROR: ID3D12GraphicsCommandList::*: A single command list cannot write to multiple buffers within a particular swapchain. [ STATE_SETTING ERROR #904: COMMAND_LIST_MULTIPLE_SWAPCHAIN_BUFFER_REFERENCES]
Durning execution of this clearing screen code fragment:
void Renderer::drawSomething() {
// Reuse the memory associated with command recording.
// We can only reset when the associated command lists have finished
// execution on the GPU.
ThrowIfFailed(Core::commandAllocator->Reset());
// A command list can be reset after it has been added to the 
// command queue via ExecuteCommandList. Reusing the command list reuses memory.
ThrowIfFailed(Core::commandList->Reset(Core::commandAllocator.Get(), NULL));
// Set the viewport and scissor rect. This needs to be reset 
// whenever the command list is reset.
Core::commandList->RSSetViewports(1, &Core::viewport);
Core::commandList->RSSetScissorRects(1, &Core::scissorsRectangle);
// Indicate a state transition on the resource usage.
Core::commandList->ResourceBarrier(
1,
&CD3DX12_RESOURCE_BARRIER::Transition(
Core::getCurrentBackBuffer().Get(),
D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_PRESENT,
D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_RENDER_TARGET
)
);
// Specify the buffers we are going to render to.
Core::commandList->OMSetRenderTargets(
1,
&Core::getCurrentBackBufferView(),
true,
&Core::getDSVHeapStartDescriptorHandle()
);
// Clear the back buffer and depth buffer.
Core::commandList->ClearRenderTargetView(
Core::getCurrentBackBufferView(),
DirectX::Colors::LightSteelBlue,
0,
NULL
);
Core::commandList->ClearDepthStencilView(
Core::getDSVHeapStartDescriptorHandle(),
D3D12_CLEAR_FLAGS::D3D12_CLEAR_FLAG_DEPTH | D3D12_CLEAR_FLAGS::D3D12_CLEAR_FLAG_STENCIL,
1.0f,
0,
0,
NULL
);
//// Indicate a state transition on the resource usage.
Core::commandList->ResourceBarrier(
1,
&CD3DX12_RESOURCE_BARRIER::Transition(
Core::getCurrentBackBuffer().Get(),
D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_RENDER_TARGET,
D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_PRESENT
)
);
// Done recording commands.
ThrowIfFailed(Core::commandList->Close());
// Add the command list to the queue for execution.
ID3D12CommandList* cmdsLists[] = {Core::commandList.Get()};
Core::commandQueue->ExecuteCommandLists(_countof(cmdsLists), cmdsLists);
// swap the back and front buffers
ThrowIfFailed(Core::swapChain->Present(0, 0));
UINT buffering = Core::buffering;
Core::currentBackBuffer = (Core::currentBackBuffer + 1) % buffering;
Core::flushCommandQueue();
}
To not making big mess in this post, i won't place all code here, but if you would like to look how does it look like, or it would be important in this case, my whole repository is here:
repository
It's very small and simlple, almost all code is placed in Core class.
Thank you in advance!
Edit:
I found solution to second question.
Problem was in this loop:
void Core::createSwapChainBuffersIntoRTVHeap() {
for (UINT i = 0; i < Core::buffering; i++) {
CD3DX12_CPU_DESCRIPTOR_HANDLE rtvHeapHandle(rtvDescriptorHeap->GetCPUDescriptorHandleForHeapStart());
ErrorUtils::messageAndExitIfFailed(
swapChain->GetBuffer(i, IID_PPV_ARGS(&swapChainBackBuffers[i])),
L"B³¹d pobierania backBuffera!",
GET_SWAPCHAIN_BACK_BUFFER_ERROR
);
device->CreateRenderTargetView(swapChainBackBuffers[i].Get(), NULL, rtvHeapHandle);
//Zapamiêtuje offset, to jest sterta po prostu zwyk³a
rtvHeapHandle.Offset(1, rtvDescriptorSize);
}
}
I did only one move to made this code look like this:
void Core::createSwapChainBuffersIntoRTVHeap() {
CD3DX12_CPU_DESCRIPTOR_HANDLE rtvHeapHandle(rtvDescriptorHeap->GetCPUDescriptorHandleForHeapStart());
for (UINT i = 0; i < Core::buffering; i++) {
ErrorUtils::messageAndExitIfFailed(
swapChain->GetBuffer(i, IID_PPV_ARGS(&swapChainBackBuffers[i])),
L"B³¹d pobierania backBuffera!",
GET_SWAPCHAIN_BACK_BUFFER_ERROR
);
device->CreateRenderTargetView(swapChainBackBuffers[i].Get(), NULL, rtvHeapHandle);
//Zapamiêtuje offset, to jest sterta po prostu zwyk³a
rtvHeapHandle.Offset(1, rtvDescriptorSize);
}
}
After that. When the commandList closing gone right, i've got AccessViolationException in D3D12.dll on:
ThrowIfFailed(Core::swapChain->Present(0, 0));
Which after few hours of internet research i fixed by forcing WARP on this application using "dxcpl.exe".
I assume that was because i work on laptop with HD4000 and Nvidia as second card, but i'm not sure.

This will not help you to fix the problem at once, but will give you much more information. DirectX12 wants you to push all commands into commandlists and it will only report an error when you call Close() on it. Note that you can not "resurrect" the commandlist that failed on Close() by resetting it, the best thing you can do is to delete it and then create a new commandlist. In this case you however may not update referenced resources. Don't really recommend you doing that.
What you indeed can do is to use the ID3D12InfoQueue interface to make your program break on D3D12 errors if the debugger is attached.
Get it from your device:
ID3D12InfoQueue* InfoQueue = nullptr;
Core::device->QueryInterface(IID_PPV_ARGS(&InfoQueue));
Enable "break on severity":
InfoQueue->SetBreakOnSeverity(D3D12_MESSAGE_SEVERITY_ERROR, true);
InfoQueue->SetBreakOnSeverity(D3D12_MESSAGE_SEVERITY_CORRUPTION, true);
InfoQueue->SetBreakOnSeverity(D3D12_MESSAGE_SEVERITY_WARNING, false);
And let it go:
InfoQueue->Release();
You can also set your InfoQueue to whitelist or blacklist sets of D3D12 error ids. Your error ID is D3D12_MESSAGE_ID_COMMAND_LIST_MULTIPLE_SWAPCHAIN_BUFFER_REFERENCES (code 904).
Hope that this will help someone to deal with this graphics API.

Related

PrintDlgEx invalid argument, while PrintDlg works

Problem: I need to get PrintDlgEx working for my project, but no combination of options or arguments works for me. It gives E_INVALIDARG for any combinations of options, as the ones I copied from Microsoft samples or other online samples.
Replacing PRINTDLGEX with PRINTDLG and PrintDlgEx with PrintDlg (and eliminating the group of options only from PRINTDLGEX) works fine.
Unfortunately I need PrintDlgEx, because I really need the Apply button, to change printers or property sheet without printing, for design and preview.
Please help me find why I can't get the dialog to show.
Code: while I simplified pieces, like what should happen on successful return, or setting DEVMODE and DEVNAMES, I tried this function exactly, with the same result: Invalid Argument.
#include <QDebug>
#include <QWindow>
#include <windows.h>
void showPrintDialog()
{
// Simplifying the setup: real code passes in a QWidget *
QWidget *caller = this;
// Not returning a value or doing any work. I just want the dialog to pop up for now
// Create the standard windows print dialog
PRINTDLGEX printDialog;
memset(&printDialog, 0, sizeof(PRINTDLGEX));
printDialog.lStructSize = sizeof(PRINTDLGEX);
printDialog.Flags = PD_RETURNDC | // Return a printer device context. Without this, HDC may be undefined
PD_USEDEVMODECOPIESANDCOLLATE |
PD_NOSELECTION | // Don't allow selecting individual document pages to print
PD_NOPAGENUMS | // Disables some boxes
PD_NOCURRENTPAGE | // Disables some boxes
PD_NONETWORKBUTTON | // Don't allow networking (but it show "Find printer") so what does this do ?
PD_HIDEPRINTTOFILE; // Don't allow print to file
// Only on PRINTDLGEX
// Theis block copied from https://learn.microsoft.com/en-us/windows/win32/dlgbox/using-common-dialog-boxes?redirectedfrom=MSDN
// I have tried multiple combinations of options, including none, I really don't want any of them
printDialog.nStartPage = START_PAGE_GENERAL;
printDialog.nPageRanges = 1;
printDialog.nMaxPageRanges = 10;
LPPRINTPAGERANGE pageRange = (LPPRINTPAGERANGE) GlobalAlloc(GPTR, 10 * sizeof(PRINTPAGERANGE));
printDialog.lpPageRanges = pageRange;
printDialog.lpPageRanges[0].nFromPage = 1;
printDialog.lpPageRanges[0].nToPage = 1;
printDialog.Flags2 = 0;
printDialog.ExclusionFlags = 0;
printDialog.dwResultAction = 0; // This will tell me if PRINT
// Rest of options are also on PRINTDLG
printDialog.nMinPage = 1;
printDialog.nMaxPage = 10;
// The only options I really need
printDialog.nCopies = 1;
printDialog.hDevMode = Q_NULLPTR; // which will be better once this works
printDialog.hDevNames = Q_NULLPTR; // which will be better once this works
printDialog.hwndOwner = reinterpret_cast<HWND>(caller->windowHandle()->winId());
// Calling...
int result = PrintDlgEx(&printDialog);
qDebug() << (result == E_INVALIDARG ? "Invalid Argument\n" : "Success\n");
// It always is E_INVALIDARG
// Cleanup
if (printDialog.hDevMode)
GlobalFree(printDialog.hDevMode);
if (printDialog.hDevNames)
GlobalFree(printDialog.hDevNames);
if (printDialog.hDC)
DeleteDC(printDialog.hDC);
}
Platform: Windows 10, latest update;
Qt version: 5.12.7 or higher
(since in VM I have 5.15.1)
The fact that I am running in Qt should be irrelevant, since this is all WIN API, beyond the c++ version (11)
I can make your example work if I remove PD_NONETWORKBUTTON flag.
Please note that while it is documented for PRINTDLGA struct, it is NOT listed in PRINTDLGEXA
NOTE: I did get the same error with that flag.

Is there anyway to set the VkDescriptorImageInfo to null or have some way of skipping using a VkWriteDescriptorSet without vulkan complaining

Some of the mesh that I'll be using doesn't always have a DiffuseMap or a SpecularMap. When I try to load something without a diffuse and specular map the program crashes because there's nothing in the DiffuseMap.ImageView/SpecularMap.ImageView because it isn't pointing to anything. If I try to set the imageview/sample to VK_NULL_HANDLE the program gives me this and crashes at the vkUpdateDescriptorSets:
Validation Layer: Invalid VkImageView Object 0x0. The Vulkan spec states: If descriptorType is VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, or VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, the imageView and imageLayout members of each element of pImageInfo must be a valid VkImageView and VkImageLayout, respectively (https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#VUID-VkWriteDescriptorSet-descriptorType-00326)
Then if I just try set the binding to null, I get this:
Validation Layer: vkUpdateDescriptorSets: required parameter pDescriptorWrites[2].dstSet specified as VK_NULL_HANDLE
Validation Layer: Cannot call vkUpdateDescriptorSets() on VkDescriptorSet 0x0[] that has not been allocated.
This is what the base code looks like right now. This is the area that defines the descriptor sets to make it bit easier to see what's going on:
void Mesh::CreateDescriptorSets(VulkanRenderer& Renderer)
{
BaseMesh::CreateDescriptorSets(Renderer, *GetDescriptorSetLayout(Renderer));
VkDescriptorImageInfo DiffuseMap = {};
DiffuseMap.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
DiffuseMap.imageView = TextureList[0].textureImageView;
DiffuseMap.sampler = TextureList[0].textureSampler;
VkDescriptorImageInfo SpecularMap = {};
SpecularMap.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
SpecularMap.imageView = TextureList[1].textureImageView;
SpecularMap.sampler = TextureList[1].textureSampler;
for (size_t i = 0; i < GetSwapChainImageCount(Renderer); i++)
{
VkDescriptorBufferInfo PositionInfo = {};
PositionInfo.buffer = uniformBuffers[i];
PositionInfo.offset = 0;
PositionInfo.range = sizeof(UniformBufferObject);
VkDescriptorBufferInfo AmbiantLightInfo = {};
AmbiantLightInfo.buffer = AmbientLightUniformBuffers[i];
AmbiantLightInfo.offset = 0;
AmbiantLightInfo.range = sizeof(AmbientLightUniformBuffer);
VkDescriptorBufferInfo LightInfo = {};
LightInfo.buffer = LighterUniformBuffers[i];
LightInfo.offset = 0;
LightInfo.range = sizeof(Lighter);
std::array<WriteDescriptorSetInfo, 5> WriteDescriptorInfo = {};
WriteDescriptorInfo[0].DstBinding = 0;
WriteDescriptorInfo[0].DstSet = descriptorSets[i];
WriteDescriptorInfo[0].DescriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
WriteDescriptorInfo[0].DescriptorBufferInfo = PositionInfo;
WriteDescriptorInfo[1].DstBinding = 1;
WriteDescriptorInfo[1].DstSet = descriptorSets[i];
WriteDescriptorInfo[1].DescriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
WriteDescriptorInfo[1].DescriptorImageInfo = DiffuseMap;
WriteDescriptorInfo[2].DstBinding = 2;
WriteDescriptorInfo[2].DstSet = descriptorSets[i];
WriteDescriptorInfo[2].DescriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
WriteDescriptorInfo[2].DescriptorImageInfo = SpecularMap;
WriteDescriptorInfo[3].DstBinding = 3;
WriteDescriptorInfo[3].DstSet = descriptorSets[i];
WriteDescriptorInfo[3].DescriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
WriteDescriptorInfo[3].DescriptorBufferInfo = AmbiantLightInfo;
WriteDescriptorInfo[4].DstBinding = 4;
WriteDescriptorInfo[4].DstSet = descriptorSets[i];
WriteDescriptorInfo[4].DescriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
WriteDescriptorInfo[4].DescriptorBufferInfo = LightInfo;
Mesh::CreateDescriptorSetsData(Renderer, std::vector<WriteDescriptorSetInfo>(WriteDescriptorInfo.begin(), WriteDescriptorInfo.end()));
}
}
Until Vulkan 1.2, Vulkan did not recognize the possibility of a descriptor being "empty". When a descriptor set is created, the descriptors are (mostly) uninitialized. It's OK to have a set with an uninitialized descriptor, so long as the pipeline which consumes it does not statically use the descriptor. Since you are presumably trying to use the same pipeline for objects with diffuse maps and objects without them, your shader reads from the image based on a variable you provide. That represents static use of the descriptor, so you need an image there.
The typical way to deal with this is to create a tiny image of a reasonable format and stuff that into the descriptor. You can use the same image for essentially any "null" texture you want to use.
Vulkan 1.2, as part of the VK_EXT_descriptor_indexing extension promoted to core, allows for the possibility of partially bound descriptors. Basically, if the descriptorBindingPartiallyBound feature is available and requested, then you can allocate a descriptor set using the VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT bit. This means that it's OK to leave a descriptor undefined so long as it is not dynamically used.
So you simply wouldn't write a value for that descriptor.
Of course, this requires 1.2 (or the aforementioned extension), as well as requesting the feature.
You are not quite correct, a combined image sampler still requires a valid sampler for some reason, and not only that, a nullDescriptor functionality have to be enabled. I run into exactly the same problem. What noone ever tells is that partly bound descriptors is not the same as sparsly bound descriptors, so it basicaly means y can bind X descriptors out of N if X<N but all those X must be valid, the specs is realy thin on this and there is no good examples.

Changing Mavlink Message Rate ArduPilotMega

I am working on a project that uses Mavlink protocol (in c++) to communicate with the ArduPilotMega (2.6).
I am able to read messages such as ATTITUDE for example. The current message rate (for all messages) is 2Hz and I would like to increase this rate.
I found out that I should probably set MESSAGE_INTERVAL using MAV_CMD_SET_MESSAGE_INTERVAL in order to change it.
So my question is:
How do I send this command message using mavlink in c++?
I tried doing it using the code below but it did not work. I guess I have to use the command I mentioned above, but I don't know how.
mavlink_message_t command;
mavlink_message_interval_t interval;
interval.interval_us = 100000;
interval.message_id = 30;
mavlink_msg_message_interval_encode(255, 200, &command, &interval);
p_sensorsPort->write_message(command);
Update: I also tried this code below, maybe I am not giving it the right system id or component id.
mavlink_message_t command;
mavlink_command_long_t interval;
interval.param1 = MAVLINK_MSG_ID_ATTITUDE;
interval.param2 = 100000;
interval.command = MAV_CMD_SET_MESSAGE_INTERVAL;
interval.target_system = 0;
interval.target_component = 0;
mavlink_msg_command_long_encode(255, 0, &command, &interval);
p_sensorsPort->write_message(command);
Maybe I am missing something about the difference between target_system, target_component and sysid, compid. I tried few values for each but nothing worked.
Is there any ACK that will be able to tell me if it even got the command?
I guess you missed start_stop field. the below sample is working.
final msg_request_data_stream msg = new msg_request_data_stream ();
msg.req_message_rate = rate;
msg.req_stream_id = (short) streamId;
msg.target_component = (short)compID;
msg.target_system = (short)sysID;
/*
GCS_COMMON.cpp contains code that sends when value =1
and stop when value = 0
that is it.
*/
if (rate > 0) {
msg.start_stop = 1;
} else {
msg.start_stop = 0;
}
From Robotis Stack Exchange answer,
In order to change the message rate, the simplest way is to change the SR_* parameters value using Mission Planner. The maximum rate is 10Hz.
For example, in order to change the ATTITUDE message rate to be 10Hz I just had to change the SR_EXTRA1 parameter to be 10.
For more information about which parameter changes each message see GCS_Mavlink.cpp file in ArduCopter firmware.

ID3D12GraphicsCommandList::Close() returns E_INVALIDARG

I'm following the Rastertek Tutorials on Direct3D12, which can be found here.
I've double-checked to make sure all my code is the same as his, but I'm running into issues with the Command List. When I close the command list after just clearing the back buffer, the method ID3D12GraphicsCommandList::Close() returns E_INVALIDARG, which means that I've done something wrong during recording of the Command List. However, nothing I'm doing seems to be wrong.
D3D12_RESOURCE_BARRIER Barrier;
hr = CommandAllocator->Reset(); HANDLE_HR(__LINE__);
hr = CommandList->Reset (
CommandAllocator,
nullptr
); HANDLE_HR(__LINE__);
Barrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
Barrier.Transition.pResource = RenderTargetResource[BufferIndex];
Barrier.Transition.StateBefore = D3D12_RESOURCE_STATE_PRESENT;
Barrier.Transition.StateAfter = D3D12_RESOURCE_STATE_RENDER_TARGET;
Barrier.Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
Barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
CommandList->ResourceBarrier(1, &Barrier);
RenderTargetViewPtr.ptr = RenderTargetViewHandle.ptr + BufferIndex * RenderTargetBytes;
CommandList->OMSetRenderTargets(1, &RenderTargetViewPtr, FALSE, nullptr);
FLOAT color[] = { 1.0, 1.0, 1.0, 1.0 };
CommandList->ClearRenderTargetView(RenderTargetViewHandle, color, 0, nullptr);
Barrier.Transition.StateBefore = D3D12_RESOURCE_STATE_RENDER_TARGET;
Barrier.Transition.StateAfter = D3D12_RESOURCE_STATE_PRESENT;
CommandList->ResourceBarrier(1, &Barrier);
hr = CommandList->Close(); HANDLE_HR(__LINE__);
The entirety of my project can be found at this github branch.
What is the issue with this code?
It seems like the resource barriers are the problem, but they're rather innocuous. Commenting out OMSetRenderTargets() and ClearRenderTargetView() still results in E_INVALIDARG being returned from Close().
I've also tried using the ID3D12InfoQueue interface to find out what the problem was. There are no messages in the queue when Close() returns the error - I've checked to make sure the interface is working properly, since messages do show up when other errors occur.

LLVM API: correct way to create/dispose

I'm attempting to implement a simple JIT compiler using the LLVM C API. So far, I have no problems generating IR code and executing it, that is: until I start disposing objects and recreating them.
What I basically would like to do is to clean up the JIT'ted resources the moment they're no longer used by the engine. What I'm basically attempting to do is something like this:
while (true)
{
// Initialize module & builder
InitializeCore(GetGlobalPassRegistry());
module = ModuleCreateWithName(some_unique_name);
builder = CreateBuilder();
// Initialize target & execution engine
InitializeNativeTarget();
engine = CreateExecutionEngineForModule(...);
passmgr = CreateFunctionPassManagerForModule(module);
AddTargetData(GetExecutionEngineTargetData(engine), passmgr);
InitializeFunctionPassManager(passmgr);
// [... my fancy JIT code ...] --** Will give a serious error the second iteration
// Destroy
DisposePassManager(passmgr);
DisposeExecutionEngine(engine);
DisposeBuilder(builder);
// DisposeModule(module); //--> Commented out: Deleted by execution engine
Shutdown();
}
However, this doesn't seem to be working correctly: the second iteration of the loop I get a pretty bad error...
So to summarize: what's the correct way to destroy and re-create the LLVM API?
Posting this as Answer because the code's too long. If possible and no other constraints, try to use LLVM like this. I am pretty sure the Shutdown() inside the loop is the culprit here. And I dont think it would hurt to keep the Builder outside, too. This reflects well the way I use LLVM in my JIT.
InitializeCore(GetGlobalPassRegistry());
InitializeNativeTarget();
builder = CreateBuilder();
while (true)
{
// Initialize module & builder
module = ModuleCreateWithName(some_unique_name);
// Initialize target & execution engine
engine = CreateExecutionEngineForModule(...);
passmgr = CreateFunctionPassManagerForModule(module);
AddTargetData(GetExecutionEngineTargetData(engine), passmgr);
InitializeFunctionPassManager(passmgr);
// [... my fancy JIT code ...] --** Will give a serious error the second iteration
// Destroy
DisposePassManager(passmgr);
DisposeExecutionEngine(engine);
}
DisposeBuilder(builder);
Shutdown();
/* program init */
LLVMInitializeNativeTarget();
LLVMInitializeNativeAsmPrinter();
LLVMInitializeNativeAsmParser();
LLVMLinkInMCJIT();
ctx->context = LLVMContextCreate();
ctx->builder = LLVMCreateBuilderInContext(ctx->context);
LLVMParseBitcodeInContext2(ctx->context, module_template_buf, &module) // create module
do IR code creation
{
function = LLVMAddFunction(ctx->module, "my_func")
LLVMAppendBasicBlockInContext(ctx->context, ...
LLVMBuild...
...
}
optional optimization
{
LLVMPassManagerBuilderRef pass_builder = LLVMPassManagerBuilderCreate();
LLVMPassManagerBuilderSetOptLevel(pass_builder, 3);
LLVMPassManagerBuilderSetSizeLevel(pass_builder, 0);
LLVMPassManagerBuilderUseInlinerWithThreshold(pass_builder, 1000);
LLVMPassManagerRef function_passes = LLVMCreateFunctionPassManagerForModule(ctx->module);
LLVMPassManagerRef module_passes = LLVMCreatePassManager();
LLVMPassManagerBuilderPopulateFunctionPassManager(pass_builder, function_passes);
LLVMPassManagerBuilderPopulateModulePassManager(pass_builder, module_passes);
LLVMPassManagerBuilderDispose(pass_builder);
LLVMInitializeFunctionPassManager(function_passes);
for (LLVMValueRef value = LLVMGetFirstFunction(ctx->module); value;
value = LLVMGetNextFunction(value))
{
LLVMRunFunctionPassManager(function_passes, value);
}
LLVMFinalizeFunctionPassManager(function_passes);
LLVMRunPassManager(module_passes, ctx->module);
LLVMDisposePassManager(function_passes);
LLVMDisposePassManager(module_passes);
}
optional for debug
{
LLVMVerifyModule(ctx->module, LLVMAbortProcessAction, &error);
LLVMPrintModule
}
if (LLVMCreateJITCompilerForModule(&ctx->engine, ctx->module, 0, &error) != 0)
my_func = (exec_func_t)(uintptr_t)LLVMGetFunctionAddress(ctx->engine, "my_func");
LLVMRemoveModule(ctx->engine, ctx->module, &ctx->module, &error);
LLVMDisposeModule(ctx->module);
LLVMDisposeBuilder(ctx->builder);
do
{
my_func(...);
}
LLVMDisposeExecutionEngine(ctx->engine);
LLVMContextDispose(ctx->context);
/* program finit */
LLVMShutdown();