How to feed cudaArray to Windows-Machine-Learning inference engine? - c++

I am trying to develop an ML powered plugin for a real-time image processing software, that provides image data as cudaArray_t on the GPU, but because the software locks me into an older CUDA version, I would like to do this with DirectML (the software is Windows only anyways).
For latency reasons, I don't want to do any unnecessary GPU-CPU-GPU roundtrips. To do this, I thought that I would need to map the CUDA data to D3D12 resources, which then can be used to create input and output tensors to bind to the model. I have found a sample that uses the CUDA External Resource Interoperability API to map a cudaArray_t into a ID3D12Resource here that I am trying to base my code on. As I don't need to render anything, I thought I was able to simply create the heap and resource and then copy the incoming cudaArray_tinto the interop cudaArray_t as shown below, without needing to create any sort of command queue. Note that the missing code is the same as in the linked github repo above, so I left it out for conciseness.
This approach does not work, but I am not sure how to debug this, as I am new to Direct3D programming and GPU programming in general. I am using the official Direct3D 12 docs as a reference, but it is a bit overwhelming, so some direction on what should be fixed here would be greatly appreciated :) I was thinking that I need to use a semaphore for some kind of syncing, but I am not sure if that works without creating some sort of command queue.
bool initD3d12() {
// setup the d3d12 device
UINT dxgiFactoryFlags = 0;
winrt::com_ptr<IDXGIFactory4> factory;
winrt::check_hresult(CreateDXGIFactory2(dxgiFactoryFlags, IID_PPV_ARGS(factory.put())));
winrt::com_ptr<IDXGIAdapter1> hardwareAdapter;
GetHardwareAdapter(factory.get(), hardwareAdapter.put());
winrt::check_hresult(D3D12CreateDevice(hardwareAdapter.get(), D3D_FEATURE_LEVEL_11_0, IID_PPV_ARGS(m_d3d12Device.put())));
DXGI_ADAPTER_DESC1 desc;
hardwareAdapter->GetDesc1(&desc);
m_dx12deviceluid = desc.AdapterLuid;
return true;
}
void initCuda() {
// setup the cuda device
int num_cuda_devices = 0;
checkCudaErrors(cudaGetDeviceCount(&num_cuda_devices));
if (!num_cuda_devices) {
throw std::exception("No CUDA Devices found");
}
for (int devId = 0; devId < num_cuda_devices; devId++) {
cudaDeviceProp devProp;
checkCudaErrors(cudaGetDeviceProperties(&devProp, devId));
if ((memcmp(&m_dx12deviceluid.LowPart, devProp.luid,
sizeof(m_dx12deviceluid.LowPart)) == 0) &&
(memcmp(&m_dx12deviceluid.HighPart,
devProp.luid + sizeof(m_dx12deviceluid.LowPart),
sizeof(m_dx12deviceluid.HighPart)) == 0)) {
checkCudaErrors(cudaSetDevice(devId));
m_cudaDeviceID = devId;
m_nodeMask = devProp.luidDeviceNodeMask;
checkCudaErrors(cudaStreamCreate(&m_streamToRun));
printf("CUDA Device Used [%d] %s\n", devId, devProp.name);
break;
}
}
}
void copyArrayToResource(cudaArray_t cudaArray) {
// then we want to copy cudaArray to the D3D texture, via its mapped form : cudaArray
cudaMemcpy2DArrayToArray(
m_cudaArray, // dst array
0, 0, // offset
cudaArray, 0, 0, // src
m_width * 4 * sizeof(float), m_height, // extent
cudaMemcpyDeviceToDevice); // kind
}
void createResource(size_t width, size_t height, ID3D12Resource** d3d12Resource) {
// Create a d3d12 resource in the desired size and map it to a cudaArray
m_width = width;
m_height = height;
// Create D3D12 2DTexture
// Assume 32-Bit float RGBA image
const auto channels = 4;
const auto textureSurface = width * height;
const auto texturePixels = textureSurface * channels;
const auto textureSizeBytes = sizeof(float)* texturePixels;
const auto texFormat = channels == 4 ? DXGI_FORMAT_R32G32B32A32_FLOAT : DXGI_FORMAT_R32G32B32_FLOAT;
const auto texDesc = CD3DX12_RESOURCE_DESC::Tex2D(texFormat, width, height, 1, 1, 1, 0, D3D12_RESOURCE_FLAG_ALLOW_SIMULTANEOUS_ACCESS);
D3D12_HEAP_PROPERTIES heapProperties = {
D3D12_HEAP_TYPE_DEFAULT,
D3D12_CPU_PAGE_PROPERTY_UNKNOWN,
D3D12_MEMORY_POOL_UNKNOWN,
0,
0};
winrt::check_hresult(m_d3d12Device->CreateCommittedResource(
&heapProperties,
D3D12_HEAP_FLAG_SHARED,
&texDesc,
D3D12_RESOURCE_STATE_COMMON,
nullptr,
IID_PPV_ARGS(d3d12Resource)));
// Create CUDA external resource
HANDLE sharedHandle;
WindowsSecurityAttributes windowsSecurityAttributes{};
LPCWSTR name = NULL;
winrt::check_hresult(m_d3d12Device->CreateSharedHandle(
*d3d12Resource, &windowsSecurityAttributes, GENERIC_ALL, 0,
&sharedHandle));
D3D12_RESOURCE_ALLOCATION_INFO d3d12ResourceAllocationInfo;
d3d12ResourceAllocationInfo = m_d3d12Device->GetResourceAllocationInfo(
m_nodeMask, 1, &texDesc);
size_t actualSize = d3d12ResourceAllocationInfo.SizeInBytes;
size_t alignment = d3d12ResourceAllocationInfo.Alignment;
cudaExternalMemoryHandleDesc externalMemoryHandleDesc;
memset(&externalMemoryHandleDesc, 0, sizeof(externalMemoryHandleDesc));
externalMemoryHandleDesc.type = cudaExternalMemoryHandleTypeD3D12Resource;
externalMemoryHandleDesc.handle.win32.handle = sharedHandle;
externalMemoryHandleDesc.size = actualSize;
externalMemoryHandleDesc.flags = cudaExternalMemoryDedicated;
checkCudaErrors(
cudaImportExternalMemory(&m_externalMemory, &externalMemoryHandleDesc));
cudaExternalMemoryMipmappedArrayDesc cuExtmemMipDesc{};
cuExtmemMipDesc.extent = make_cudaExtent(width, height, 0);
cuExtmemMipDesc.formatDesc = cudaCreateChannelDesc<float4>();
cuExtmemMipDesc.numLevels = 1;
cuExtmemMipDesc.flags = cudaArrayDefault;
cudaMipmappedArray_t cuMipArray{};
checkCudaErrors(cudaExternalMemoryGetMappedMipmappedArray(&cuMipArray, m_externalMemory, &cuExtmemMipDesc));
checkCudaErrors(cudaGetMipmappedArrayLevel(&m_cudaArray, cuMipArray, 0));
}
In the end if the mapping to a ID3D12Resource would work, I assume that one could use the ITensorStaticsNative interface to create a tensor to bind to the output or input of a LearningModel.

Related

DirectX - Writing to 3D Texture Causing Display Driver Failure

I'm testing writing to 2D and 3D textures in compute shaders, outputting a gradient noise texture consisting of 32 bit floats. Writing to a 2D texture works fine, but writing to a 3D texture isn't. Are there additional considerations that need to be made when creating a 3D texture when compared to a 2D texture?
Code of how I'm defining the 3D texture below:
HRESULT BaseComputeShader::CreateTexture3D(UINT width, UINT height, UINT depth, DXGI_FORMAT format, ID3D11Texture3D** texture)
{
D3D11_TEXTURE3D_DESC textureDesc;
ZeroMemory(&textureDesc, sizeof(textureDesc));
textureDesc.Width = width;
textureDesc.Height = height;
textureDesc.Depth = depth;
textureDesc.MipLevels = 1;
textureDesc.Format = format;
textureDesc.Usage = D3D11_USAGE_DEFAULT;
textureDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_UNORDERED_ACCESS;
textureDesc.CPUAccessFlags = 0;
textureDesc.MiscFlags = 0;
return renderer->CreateTexture3D(&textureDesc, 0, texture);
}
HRESULT BaseComputeShader::CreateTexture3DUAV(UINT depth, DXGI_FORMAT format, ID3D11Texture3D** texture, ID3D11UnorderedAccessView** unorderedAccessView)
{
D3D11_UNORDERED_ACCESS_VIEW_DESC uavDesc;
ZeroMemory(&uavDesc, sizeof(uavDesc));
uavDesc.Format = format;
uavDesc.ViewDimension = D3D11_UAV_DIMENSION_TEXTURE3D;
uavDesc.Texture3D.MipSlice = 0;
uavDesc.Texture3D.FirstWSlice = 0;
uavDesc.Texture3D.WSize = depth;
return renderer->CreateUnorderedAccessView(*texture, &uavDesc, unorderedAccessView);
}
HRESULT BaseComputeShader::CreateTexture3DSRV(DXGI_FORMAT format, ID3D11Texture3D** texture, ID3D11ShaderResourceView** shaderResourceView)
{
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
ZeroMemory(&srvDesc, sizeof(srvDesc));
srvDesc.Format = format;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE3D;
srvDesc.Texture3D.MostDetailedMip = 0;
srvDesc.Texture3D.MipLevels = 1;
return renderer->CreateShaderResourceView(*texture, &srvDesc, shaderResourceView);
}
And how I'm writing to it in the compute shader:
// The texture we're writing to
RWTexture3D<float> outputTexture : register(u0);
[numthreads(8, 8, 8)]
void main(uint3 DTid : SV_DispatchThreadID)
{
float noiseValue = 0.0f;
float value = 0.0f;
float localAmplitude = amplitude;
float localFrequency = frequency;
// Loop for the number of octaves, running the noise function as many times as desired (8 is usually sufficient)
for (int k = 0; k < octaves; k++)
{
noiseValue = noise(float3(DTid.x * localFrequency, DTid.y * localFrequency, DTid.z * localFrequency)) * localAmplitude;
value += noiseValue;
// Calculate a new amplitude based on the input persistence/gain value
// amplitudeLoop will get smaller as the number of layers (i.e. k) increases
localAmplitude *= persistence;
// Calculate a new frequency based on a lacunarity value of 2.0
// This gives us 2^k as the frequency
// i.e. Frequency at k = 4 will be f * 2^4 as we have looped 4 times
localFrequency *= 2.0f;
}
// Output value to 2D index in the texture provided by thread indexing
outputTexture[DTid.xyz] = value;
}
And finally, how I'm running the shader:
// Set the shader
deviceContext->CSSetShader(computeShader, nullptr, 0);
// Set the shader's buffers and views
deviceContext->CSSetConstantBuffers(0, 1, &cBuffer);
deviceContext->CSSetUnorderedAccessViews(0, 1, &textureUAV, nullptr);
// Launch the shader
deviceContext->Dispatch(512, 512, 512);
// Reset the shader now we're done
deviceContext->CSSetShader(nullptr, nullptr, 0);
// Reset the shader views
ID3D11UnorderedAccessView* ppUAViewnullptr[1] = { nullptr };
deviceContext->CSSetUnorderedAccessViews(0, 1, ppUAViewnullptr, nullptr);
// Create the shader resource view for access in other shaders
HRESULT result = CreateTexture3DSRV(DXGI_FORMAT_R32_FLOAT, &texture, &textureSRV);
if (result != S_OK)
{
MessageBox(NULL, L"Failed to create texture SRV after compute shader execution", L"Failed", MB_OK);
exit(0);
}
My bad, simple mistake. Compute shader threads are limited in number. In the compute shader you're limited to a total of 1024 threads, and the dispatch call cannot dispatch more than 65535 thread groups. The HLSL compiler will catch the former issue, but the Visual C++ compiler will not catch the latter issue.
If you create a texture of 512 * 512 * 512 (which seems what you are trying to achieve), your dispatch needs to be divided by groups:
deviceContext->Dispatch(512 / 8, 512 / 8, 512 / 8);
In your previous case, the dispatch was :
512*8 * 512*8 * 512*8 = 68719476736 units
Which very likely triggered the time out detection and crashes the driver
Also the limit of 65535 is per dimension, so in your case you are completely safe to run this.
And last one, you can create both shader resource view and unordered view right after creating your 3d texture (before the dispatch call).
This is generally recommended to avoid mixing context code and resource creation code.
On resource creation, your check is not valid either :
if (result != S_OK)
HRESULT success condition is >= 0
you can use the built in macro instead eg :
if (SUCCEEDED(result))

How to get all vertex cordinates from DirectXTK (ToolKit) DirectX::Model class to use for collision detection

I'm, doing some basic rendering with DirectXToolKit and I would like to be able to get the vertex coordinates for each model in order to compute collisions between models.
currently, I have some test code to load the model, but the ID3D11Buffer loads internally using CreateFromSDKMESH
void Model3D::LoadSDKMESH(ID3D11Device* p_device, ID3D11DeviceContext* device_context, const wchar_t* file_mesh)
{
mAlpha = 1.0f;
mTint = DirectX::Colors::White.v;
mStates.reset(new DirectX::CommonStates(p_device));
auto fx = new DirectX::EffectFactory(p_device);
fx->SetDirectory(L"media");
mFxFactory.reset(fx);
mBatch.reset(new DirectX::PrimitiveBatch<DirectX::VertexPositionColor>(device_context));
mBatchEffect.reset(new DirectX::BasicEffect(p_device));
mBatchEffect->SetVertexColorEnabled(true);
{
void const* shaderByteCode;
size_t byteCodeLength;
mBatchEffect->GetVertexShaderBytecode(&shaderByteCode, &byteCodeLength);
HR(p_device->CreateInputLayout(DirectX::VertexPositionColor::InputElements,
DirectX::VertexPositionColor::InputElementCount,
shaderByteCode, byteCodeLength,
mBatchInputLayout.ReleaseAndGetAddressOf()));
}
mModel = DirectX::Model::CreateFromSDKMESH(p_device, file_mesh, *mFxFactory);
}
I know there is a way to get vertexes from the ID3D11Buffer, answered here:
How to read vertices from vertex buffer in Direct3d11
But they suggest not loading from GPU memory. So I assume it's better to load vertices ahead of time into a separate container.
I looked into CreateFromSDKMESH and there are a few functions that are publicly accessible without making changes to XTK
In order to get Vertices while loading a model, replace the line mModel = DirectX::Model::CreateFromSDKMESH(p_device, file_mesh, *mFxFactory); in the question above with:
size_t data_size = 0;
std::unique_ptr<uint8_t[]> v_data;
HRESULT hr = DirectX::BinaryReader::ReadEntireFile(file_mesh, v_data, &data_size);
if (FAILED(hr))
{
DirectX::DebugTrace("CreateFromSDKMESH failed (%08X) loading '%ls'\n", hr, file_mesh);
throw std::exception("CreateFromSDKMESH");
}
uint8_t* mesh_data = v_data.get();
mModel = DirectX::Model::CreateFromSDKMESH(p_device, v_data.get(), data_size, *mFxFactory, false, false);
mModel->name = file_mesh;
auto v_header = reinterpret_cast<const DXUT::SDKMESH_HEADER*>(mesh_data);
auto vb_array = reinterpret_cast<const DXUT::SDKMESH_VERTEX_BUFFER_HEADER*>(mesh_data + v_header->VertexStreamHeadersOffset);
if(v_header->NumVertexBuffers < 1)
throw std::exception("Vertex Buffers less than 1");
auto& vertex_header = vb_array[0];
uint64_t buffer_data_offset = v_header->HeaderSize + v_header->NonBufferDataSize;
uint8_t* buffer_data = mesh_data + buffer_data_offset;
auto verts_pairs = reinterpret_cast<std::pair<Vector3,Vector3>*>(buffer_data + (vertex_header.DataOffset - buffer_data_offset));
There, accessing a coordinate should be as simple as
float x = verts_pairs[0].first.x;
and the total number of vertices is stored in
vertex_header.NumVertices
Dont forget that after loading the vertex buffer gets deleted, so you may want to do something like:
memcpy(vertexBuffer, reinterpret_cast<std::pair<Vector3,Vector3>*>(buffer_data + (vertex_header.DataOffset - buffer_data_offset)), vertexCnt);
Also, vertex buffer doesn't get transformed with draw functions, so you will need to do transforms yourselves
Thanks,

DirectX 11 - Compute Shader, copy data from the GPU to the CPU

I've just started up using Direct compute in an attempt to move a fluid simulation I have been working on, onto the GPU. I have found a very similar (if not identical) question here however seems the resolution to my problem is not the same as theirs; I do have my CopyResource the right way round for sure! As with the pasted question, I only get a buffer filled with 0's when copy back from the GPU. I really can't see the error as I don't understand how I can be reaching out of bounds limits. I'm going to apologise for the mass amount of code pasting about to occur but I want be sure I've not got any of the setup wrong.
Output Buffer, UAV and System Buffer set up
outputDesc.Usage = D3D11_USAGE_DEFAULT;
outputDesc.BindFlags = D3D11_BIND_UNORDERED_ACCESS;
outputDesc.ByteWidth = sizeof(BoundaryConditions) * numElements;
outputDesc.CPUAccessFlags = 0;
outputDesc.StructureByteStride = sizeof(BoundaryConditions);
outputDesc.MiscFlags = D3D11_RESOURCE_MISC_BUFFER_STRUCTURED;
result =_device->CreateBuffer(&outputDesc, 0, &m_outputBuffer);
outputDesc.Usage = D3D11_USAGE_STAGING;
outputDesc.BindFlags = 0;
outputDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
result = _device->CreateBuffer(&outputDesc, 0, &m_outputresult);
D3D11_UNORDERED_ACCESS_VIEW_DESC uavDesc;
uavDesc.Format = DXGI_FORMAT_UNKNOWN;
uavDesc.ViewDimension = D3D11_UAV_DIMENSION_BUFFER;
uavDesc.Buffer.FirstElement = 0;
uavDesc.Buffer.Flags = 0;
uavDesc.Buffer.NumElements = numElements;
result =_device->CreateUnorderedAccessView(m_outputBuffer, &uavDesc, &m_BoundaryConditionsUAV);
Running the Shader in my frame loop
HRESULT result;
D3D11_MAPPED_SUBRESOURCE mappedResource;
_deviceContext->CSSetShader(m_BoundaryConditionsCS, nullptr, 0);
_deviceContext->CSSetUnorderedAccessViews(0, 1, &m_BoundaryConditionsUAV, 0);
_deviceContext->Dispatch(1, 1, 1);
// Unbind output from compute shader
ID3D11UnorderedAccessView* nullUAV[] = { NULL };
_deviceContext->CSSetUnorderedAccessViews(0, 1, nullUAV, 0);
// Disable Compute Shader
_deviceContext->CSSetShader(nullptr, nullptr, 0);
_deviceContext->CopyResource(m_outputresult, m_outputBuffer);
D3D11_MAPPED_SUBRESOURCE mappedData;
result = _deviceContext->Map(m_outputresult, 0, D3D11_MAP_READ, 0, &mappedData);
BoundaryConditions* newbc = reinterpret_cast<BoundaryConditions*>(mappedData.pData);
for (int i = 0; i < 4; i++)
{
Debug::Instance()->Log(newbc[i].x.x);
}
_deviceContext->Unmap(m_outputresult, 0);
HLSL
struct BoundaryConditions
{
float3 x;
float3 y;
};
RWStructuredBuffer<BoundaryConditions> _boundaryConditions;
[numthreads(4, 1, 1)]
void ComputeBoundaryConditions(int3 id : SV_DispatchThreadID)
{
_boundaryConditions[id.x].x = float3(id.x,id.y,id.z);
}
I dispatch the Compute shader after I begin a frame and before I end the frame. I have played around with moving the shaders dispatch call outside of the end scene and before the present ect but nothing seems to effect the process. Can't seem to figure this one out!
Holy Smokes I fixed the error! I was creating the compute shader to a different ID3D11ComputeShader pointer! D: Works like a charm! Pheew Sorry and thanks Adam!

How to convert Windows Bitmap into Actionscript Bitmap in C++

to circumvent some (a lot) of problems with the Actionscript Camera API on Windows 8 Systems,
I decided to create a native extension to deal with the camera.
Right now, the camera part and all the glue to communicate with the AIR Runtime is actually working, so clicking on a button in AIR will open a new Windows window that will return a System::Drawing::Bitmap.
My task would be now to
a) Create a FREBitmapData object and
b) Fill in the BitmapData from the Windows Bitmap.
Should be easy, I thought, many days ago... As I'm not really familiar with C++ I didn't get this to work at all.
Here's what I tried so far:
bmp = form1->bitmap; // bmp is a handle to the System::Drawing::Bitmap returned from the external window
// Lock the bitmap's bits.
Rectangle rect = Rectangle(0, 0, bmp->Width, bmp->Height);
System::Drawing::Imaging::BitmapData^ bmpData = bmp->LockBits(rect, System::Drawing::Imaging::ImageLockMode::ReadWrite, bmp->PixelFormat);
// Get the address of the first line.
IntPtr ptr = bmpData->Scan0;
// Declare an array to hold the bytes of the bitmap.
// This code is specific to a bitmap with 24 bits per pixels.
int inputLength = Math::Abs(bmpData->Stride) * bmp->Height;
array<Byte>^ input = gcnew array<Byte>(inputLength);
// Copy the RGB values into the array.
System::Runtime::InteropServices::Marshal::Copy(ptr, input, 0, inputLength);
// Unlock the bits.
bmp->UnlockBits(bmpData);
// Create a FREByteArray to hold the data.
// Don't know, if this is necessary
FREObject* outputObject;
FREByteArray* outputBytes = new FREByteArray;
outputBytes->length = inputLength;
outputBytes->bytes = (uint8_t *) &input;
FREAcquireByteArray(outputObject, outputBytes);
// now copy it over
memcpy(outputBytes->bytes, &input, inputLength);
FREReleaseByteArray(outputObject);
// we create a new instance of BitmapData here,
// as we cannot simply pass it over in the args,
// because we don't know it's correct size at extension creation
FREObject* width;
FRENewObjectFromUint32(bmp->Width, width);
FREObject* height;
FRENewObjectFromUint32(bmp->Height, height);
FREObject* transparent;
FRENewObjectFromBool(uint32_t(0), transparent);
FREObject* fillColor;
FRENewObjectFromUint32(uint32_t(0xFFFFFF), fillColor);
FREObject obs[4] = { width, height, transparent, fillColor };
// we create some Actionscript Intsances here, we want to send back
FREObject* asBmpObj;
FRENewObject("BitmapData", 4, obs, asBmpObj, NULL);
// Now we have our AS bitmap data, copy bytes over
FREBitmapData* asData;
FREAcquireBitmapData(asBmpObj, asData);
// Now what? asData->bits32 won't accept array<Bytes> or any other value I've tried.
return asBmpObj;
The basic idea was:
a) find out the size and bit-depth of the original Win Bitmap (size is determinded by cam resolution picked in the Camera window)
b) write it's bytes to an array. Convert to 32 bits as necessary. (Still missing any idea.)
c) create AS Bitmap of the same size. Bit-depth must always be 32.
d) copy over array to AS Bitmap.
But I just can't achieve this.
Any advice? Thank you!
I don't think the following straight copy will work.
// Copy the RGB values into the array.
System::Runtime::InteropServices::Marshal::Copy(ptr, input, 0, inputLength);
You have to convert pixel by pixel. I don't know how to convert it to FREBitmapData. Here are the examples you can following on msdn
I finally figured it out:
the code below doesn't deal with the 24to32 bit conversion though, but it actually works in my application quite well, so I thought, i might share it:
FREObject launch(FREContext ctx, void* funcData, uint32_t argc, FREObject argv[])
{
System::Drawing::Bitmap^ windowsBitmap;
SKILLCamControl::CamControlForm^ form1;
form1 = gcnew SKILLCamControl::CamControlForm();
DialogResult dr;
// Show testDialog as a modal dialog and determine if DialogResult = OK.
dr = form1->ShowDialog();
if (dr == DialogResult::OK) {
windowsBitmap = form1->bitmap;
int bmpW = windowsBitmap->Width;
int bmpH = windowsBitmap->Height;
// we create a new instance of BitmapData here,
// as we cannot simply pass it over in the args,
// because we don't know it's correct size at extension creation
FREObject width;
FRENewObjectFromUint32( uint32_t(bmpW), &width);
FREObject height;
FRENewObjectFromUint32( uint32_t(bmpH), &height);
FREObject transparent;
FRENewObjectFromBool( uint32_t(0), &transparent);
FREObject fillColor;
FRENewObjectFromUint32( uint32_t(0xFF0000), &fillColor);
FREObject obs[4] = { width, height, transparent, fillColor };
FREObject freBitmap;
FRENewObject((uint8_t *)"flash.display.BitmapData", 4, obs, &freBitmap , NULL);
FREBitmapData2 freBitmapData;
FREAcquireBitmapData2(freBitmap, &freBitmapData);
// is inverted?
if (&freBitmapData.isInvertedY != (uint32_t*)(0) ) windowsBitmap->RotateFlip(RotateFlipType::RotateNoneFlipY);
int pixelSize = 4;
//Rect rect( 0, 0, freBitmap.width, freBitmap.height );
System::Drawing::Rectangle rect(0, 0, bmpW, bmpH);
BitmapData^ windowsBitmapData = windowsBitmap->LockBits(rect, ImageLockMode::ReadOnly, PixelFormat::Format32bppArgb);
for (int y = 0; y < bmpH ; y++)
{
//get pixels from each bitmap
byte* oRow = (byte*)windowsBitmapData->Scan0.ToInt32() + (y * windowsBitmapData->Stride);
byte* nRow = (byte*)freBitmapData.bits32 + (y * freBitmapData.lineStride32 * 4);
for (int x = 0; x < bmpW ; x++)
{
// set pixels
nRow[x * pixelSize] = oRow[x * pixelSize]; //B
nRow[x * pixelSize + 1] = oRow[x * pixelSize + 1]; //G
nRow[x * pixelSize + 2] = oRow[x * pixelSize + 2]; //R
}
}
// Free resources
FREReleaseBitmapData(freBitmap);
FREInvalidateBitmapDataRect(freBitmap, 0, 0, bmpW, bmpH);
windowsBitmap->UnlockBits(windowsBitmapData);
delete windowsBitmapData;
delete windowsBitmap;
return freBitmap;
}
else if (dr == DialogResult::Cancel)
{
return NULL;
}
return NULL;
}
I dont use C++ myself so this is not a full answer but just something to consider...
Bitmap data is universal raw pixel data. It should be passable within different software. Unless you are actually creating .BMP files with header etc??
...that will return a System::Drawing::Bitmap does this mean you have a bitmap's data held by C++ (as raw uncompressed RGBA pixels)? If so then just either put that inside a byteArray and send to AS3 or a if you can get that bitmap copied to the Windows clipboard then use AS3 to read from clipboard into a new AS3 Bitmap.
these might help you:
AS3: Copy image from clipboard
AS3: Serialize Bitmaps : Scroll down to the section ByteArray to BitmapData (for this to work you must first store the C++ bitmap bytes as a file call it what you want, example tempIMG.dat or myPIc.bin or whatever since file extension does not really matter just that you need a loadable URL).

OpenGL + PBO + FBO + some ATI cards - color and pixel shifting

We are developing software for slide show creation and use OpenGL.
We use FBO + PBO for fast data reading from VGA to RAM but on some video cards from ATI we faced with the following problems:
swapping RGB components
pixel shifting
There are no problems if we do not use PBO.
Also we have noticed that the aspect ratio of PBO/FBO (4:3) solve the pixel shifting problem.
Any thoughts or suggestions?
Here are more details:
ATI Radeon HD 3650
PBO code:
public bool PBO_Initialize(
int bgl_size_w,
int bgl_size_h)
{
PBO_Release();
if (mCSGL12Control1 != null)
{
GL mGL = mCSGL12Control1.GetGL();
mCSGL12Control1.wgl_MakeCurrent();
//
// check PBO is supported by your video card
if (mGL.bglGenBuffersARB == true &&
mGL.bglBindBufferARB == true &&
mGL.bglBufferDataARB == true &&
mGL.bglBufferSubDataARB == true &&
mGL.bglMapBufferARB == true &&
mGL.bglUnmapBufferARB == true &&
mGL.bglDeleteBuffersARB == true &&
mGL.bglGetBufferParameterivARB == true)
{
mGL.glGenBuffersARB(2, _pbo_imageBuffers);
int clientHeight1 = bgl_size_h / 2;
int clientHeight2 = bgl_size_h - clientHeight1;
int clientSize1 = bgl_size_w * clientHeight1 * 4;
int clientSize2 = bgl_size_w * clientHeight2 * 4;
mGL.glBindBufferARB(GL.GL_PIXEL_PACK_BUFFER_ARB, _pbo_imageBuffers[0]);
mGL.glBufferDataARB(GL.GL_PIXEL_PACK_BUFFER_ARB, clientSize1, IntPtr.Zero,
GL.GL_STREAM_READ_ARB);
mGL.glBindBufferARB(GL.GL_PIXEL_PACK_BUFFER_ARB, _pbo_imageBuffers[1]);
mGL.glBufferDataARB(GL.GL_PIXEL_PACK_BUFFER_ARB, clientSize2, IntPtr.Zero,
GL.GL_STREAM_READ_ARB);
return true;
}
}
return false;
}
...
PBO read data back to memory
int clientHeight1 = _bgl_size_h / 2;
int clientHeight2 = _bgl_size_h - clientHeight1;
int clientSize1 = _bgl_size_w * clientHeight1 * 4;
int clientSize2 = _bgl_size_w * clientHeight2 * 4;
//mGL.glPushAttrib(GL.GL_VIEWPORT_BIT | GL.GL_COLOR_BUFFER_BIT);
// Bind two different buffer objects and start the glReadPixels
// asynchronously. Each call will return directly after
// starting the DMA transfer.
mGL.glBindBufferARB(GL.GL_PIXEL_PACK_BUFFER_ARB, _pbo_imageBuffers[0]);
mGL.glReadPixels(0, 0, _bgl_size_w, clientHeight1, imageFormat,
pixelTransferMethod, IntPtr.Zero);
mGL.glBindBufferARB(GL.GL_PIXEL_PACK_BUFFER_ARB, _pbo_imageBuffers[1]);
mGL.glReadPixels(0, clientHeight1, _bgl_size_w, clientHeight2, imageFormat,
pixelTransferMethod, IntPtr.Zero);
//mGL.glPopAttrib();
mGL.glBindFramebufferEXT(GL.GL_FRAMEBUFFER_EXT, 0);
// Process partial images. Mapping the buffer waits for
// outstanding DMA transfers into the buffer to finish.
mGL.glBindBufferARB(GL.GL_PIXEL_PACK_BUFFER_ARB, _pbo_imageBuffers[0]);
IntPtr pboMemory1 = mGL.glMapBufferARB(GL.GL_PIXEL_PACK_BUFFER_ARB,
GL.GL_READ_ONLY_ARB);
mGL.glBindBufferARB(GL.GL_PIXEL_PACK_BUFFER_ARB, _pbo_imageBuffers[1]);
IntPtr pboMemory2 = mGL.glMapBufferARB(GL.GL_PIXEL_PACK_BUFFER_ARB,
GL.GL_READ_ONLY_ARB);
System.Runtime.InteropServices.Marshal.Copy(pboMemory1, _bgl_rgbaData_out, 0, clientSize1);
System.Runtime.InteropServices.Marshal.Copy(pboMemory2, _bgl_rgbaData_out, clientSize1, clientSize2);
// Unmap the image buffers
mGL.glBindBufferARB(GL.GL_PIXEL_PACK_BUFFER_ARB, _pbo_imageBuffers[0]);
mGL.glUnmapBufferARB(GL.GL_PIXEL_PACK_BUFFER_ARB);
mGL.glBindBufferARB(GL.GL_PIXEL_PACK_BUFFER_ARB, _pbo_imageBuffers[1]);
mGL.glUnmapBufferARB(GL.GL_PIXEL_PACK_BUFFER_ARB);
FBO initialization
private static void FBO_Initialize(GL mGL,
ref int[] bgl_texture,
ref int[] bgl_framebuffer,
ref int[] bgl_renderbuffer,
ref byte[] bgl_rgbaData,
int bgl_size_w,
int bgl_size_h)
{
// Texture
mGL.glGenTextures(1, bgl_texture);
mGL.glBindTexture(GL.GL_TEXTURE_2D, bgl_texture[0]);
mGL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST);
mGL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST);
mGL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE);
mGL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE);
IntPtr null_ptr = new IntPtr(0);
// <null> means reserve texture memory, but texels are undefined
mGL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, bgl_size_w, bgl_size_h, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, null_ptr);
//
mGL.glGenFramebuffersEXT(1, bgl_framebuffer);
mGL.glBindFramebufferEXT(GL.GL_FRAMEBUFFER_EXT, bgl_framebuffer[0]);
mGL.glGenRenderbuffersEXT(1, bgl_renderbuffer);
mGL.glBindRenderbufferEXT(GL.GL_RENDERBUFFER_EXT, bgl_renderbuffer[0]);
mGL.glRenderbufferStorageEXT(GL.GL_RENDERBUFFER_EXT, GL.GL_DEPTH_COMPONENT24, bgl_size_w, bgl_size_h);
mGL.glFramebufferTexture2DEXT(GL.GL_FRAMEBUFFER_EXT, GL.GL_COLOR_ATTACHMENT0_EXT,
GL.GL_TEXTURE_2D, bgl_texture[0], 0);
mGL.glFramebufferRenderbufferEXT(GL.GL_FRAMEBUFFER_EXT, GL.GL_DEPTH_ATTACHMENT_EXT,
GL.GL_RENDERBUFFER_EXT, bgl_renderbuffer[0]);
// Errors?
int status = mGL.glCheckFramebufferStatusEXT(GL.GL_FRAMEBUFFER_EXT);
if (status != GL.GL_FRAMEBUFFER_COMPLETE_EXT || mGL.glGetError() != GL.GL_NO_ERROR)
{
mGL.glFramebufferTexture2DEXT(GL.GL_FRAMEBUFFER_EXT, GL.GL_COLOR_ATTACHMENT0_EXT,
GL.GL_TEXTURE_2D, 0, 0);
mGL.glFramebufferRenderbufferEXT(GL.GL_FRAMEBUFFER_EXT, GL.GL_DEPTH_ATTACHMENT_EXT,
GL.GL_RENDERBUFFER_EXT, 0);
mGL.glBindTexture(GL.GL_TEXTURE_2D, 0);
mGL.glDeleteTextures(1, bgl_texture);
mGL.glBindRenderbufferEXT(GL.GL_RENDERBUFFER_EXT, 0);
mGL.glDeleteRenderbuffersEXT(1, bgl_renderbuffer);
mGL.glBindFramebufferEXT(GL.GL_FRAMEBUFFER_EXT, 0);
mGL.glDeleteFramebuffersEXT(1, bgl_framebuffer);
throw new Exception("Bad framebuffer.");
}
mGL.glDrawBuffer(GL.GL_COLOR_ATTACHMENT0_EXT);
mGL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0_EXT); // For glReadPixels()
mGL.glBindFramebufferEXT(GL.GL_FRAMEBUFFER_EXT, 0);
mGL.glDrawBuffer(GL.GL_BACK);
mGL.glReadBuffer(GL.GL_BACK);
mGL.glBindTexture(GL.GL_TEXTURE_2D, 0);
bgl_rgbaData = new byte[bgl_size_w * bgl_size_h * 4];
}
It seems that re-installing/updating VGA Driver does solve this problem.
Really strange behaviour (also, it may be that the official notebook driver is old/buggy/etc. and causes the problem, so updating with the latest driver from AMD, for this vga-chip series, seems affect/solve the problem. Also I'm not sure if the previouse driver was set up correct thus I say re-installing/updating)
Thank you all for help.