// Sample_PCL.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#define NOMINMAX
#include <Windows.h>
#include <Kinect.h> // Kinectを使用するためのヘッダファイル
#include <pcl/visualization/cloud_viewer.h> // PCLを使用して表示するためのヘッダファイル
#include <pcl/io/pcd_io.h> // 点群データを保存するためのヘッダファイル(.pcd, .ply)
//#include <pcl/io/ply_io.h>
#include <pcl/point_types.h>
#include <iostream>
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease)
{
if (pInterfaceToRelease != NULL) {
pInterfaceToRelease->Release();
}
}
int main()
{
// Create Sensor Instance
IKinectSensor* pSensor;
HRESULT hResult = S_OK;
hResult = GetDefaultKinectSensor(&pSensor);
if (FAILED(hResult)) {
std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
return -1;
}
printf("GetDfaultKinectSensor is OK\n");
// Open Sensor
hResult = pSensor->Open();
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::Open()" << std::endl;
return -1;
}
printf("IKinectSensor::Open is OK\n");
// Retrieved Coordinate Mapper
ICoordinateMapper* pCoordinateMapper;
hResult = pSensor->get_CoordinateMapper(&pCoordinateMapper);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl;
return -1;
}
printf("IKinectSensor::get_CoordinateMapper is OK\n");
// Retrieved Color Frame Source
IColorFrameSource* pColorSource;
hResult = pSensor->get_ColorFrameSource(&pColorSource);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_ColorFrameSource()" << std::endl;
return -1;
}
printf("IKinectSensor::get_ColorFrameSource is OK\n");
// Open Color Frame Reader
IColorFrameReader* pColorReader;
hResult = pColorSource->OpenReader(&pColorReader);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrameSource::OpenReader()" << std::endl;
return -1;
}
printf("IColorFrameSource::OpenReader is OK\n");
// Retrieved Depth Frame Source
IDepthFrameSource* pDepthSource;
hResult = pSensor->get_DepthFrameSource(&pDepthSource);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
return -1;
}
printf("IKinectSensor::get_DepthFrameSource is OK\n");
// Open Depth Frame Reader
IDepthFrameReader* pDepthReader;
hResult = pDepthSource->OpenReader(&pDepthReader);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
return -1;
}
printf("IDepthFrameSource::OpenReader is OK\n");
// Retrieved Color Frame Size
IFrameDescription* pColorDescription;
hResult = pColorSource->get_FrameDescription(&pColorDescription);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl;
return -1;
}
printf("IColorFrameSource::get_FrameDescription is OK\n");
int colorWidth = 0;
int colorHeight = 0;
pColorDescription->get_Width(&colorWidth); // 1920
pColorDescription->get_Height(&colorHeight); // 1080
// To Reserve Color Frame Buffer
std::vector<RGBQUAD> colorBuffer(colorWidth * colorHeight);
// Retrieved Depth Frame Size
IFrameDescription* pDepthDescription;
hResult = pDepthSource->get_FrameDescription(&pDepthDescription);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
return -1;
}
printf("IDepthFrameSource::get_FrameDescription is OK\n");
int depthWidth = 0;
int depthHeight = 0;
pDepthDescription->get_Width(&depthWidth); // 512
pDepthDescription->get_Height(&depthHeight); // 424
// To Reserve Depth Frame Buffer
std::vector<UINT16> depthBuffer(depthWidth * depthHeight);
printf("Display Point Cloud\n");
// Create Cloud Viewer
pcl::visualization::CloudViewer viewer("Point Cloud Viewer"); // 点群のウィンドウ表示
while (!viewer.wasStopped()) {
// Acquire Latest Color Frame
IColorFrame* pColorFrame = nullptr;
hResult = pColorReader->AcquireLatestFrame(&pColorFrame);
if (SUCCEEDED(hResult)) {
// Retrieved Color Data
hResult = pColorFrame->CopyConvertedFrameDataToArray(colorBuffer.size() * sizeof(RGBQUAD), reinterpret_cast<BYTE*>(&colorBuffer[0]), ColorImageFormat::ColorImageFormat_Bgra);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrame::CopyConvertedFrameDataToArray()" << std::endl;
}
}
SafeRelease(pColorFrame);
// Acquire Latest Depth Frame
IDepthFrame* pDepthFrame = nullptr;
hResult = pDepthReader->AcquireLatestFrame(&pDepthFrame);
if (SUCCEEDED(hResult)) {
// Retrieved Depth Data
hResult = pDepthFrame->CopyFrameDataToArray(depthBuffer.size(), &depthBuffer[0]);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrame::CopyFrameDataToArray()" << std::endl;
}
}
SafeRelease(pDepthFrame);
// Point Cloud Libraryの設定
// Create Point Cloud
pcl::PointCloud<pcl::PointXYZRGB>::Ptr pointcloud(new pcl::PointCloud<pcl::PointXYZRGB>()); // PCLの構造体
pointcloud->width = static_cast<uint32_t>(depthWidth); // 点群の数
pointcloud->height = static_cast<uint32_t>(depthHeight);
pointcloud->is_dense = false;
for (int y = 0; y < depthHeight; y++) {
for (int x = 0; x < depthWidth; x++) {
pcl::PointXYZRGB point; // PCLで使用する点群情報
DepthSpacePoint depthSpacePoint = { static_cast<float>(x), static_cast<float>(y) };
UINT16 depth = depthBuffer[y * depthWidth + x];
// Coordinate Mapping Depth to Color Space, and Setting PointCloud RGB
ColorSpacePoint colorSpacePoint = { 0.0f, 0.0f };
pCoordinateMapper->MapDepthPointToColorSpace(depthSpacePoint, depth, &colorSpacePoint); // 色の座標系
int colorX = static_cast<int>(std::floor(colorSpacePoint.X + 0.5f));
int colorY = static_cast<int>(std::floor(colorSpacePoint.Y + 0.5f));
if ((0 <= colorX) && (colorX < colorWidth) && (0 <= colorY) && (colorY < colorHeight)) {
RGBQUAD color = colorBuffer[colorY * colorWidth + colorX];
// 色の情報を格納する
point.b = color.rgbBlue;
point.g = color.rgbGreen;
point.r = color.rgbRed;
}
// Coordinate Mapping Depth to Camera Space, and Setting PointCloud XYZ
CameraSpacePoint cameraSpacePoint = { 0.0f, 0.0f, 0.0f }; // カメラ空間
pCoordinateMapper->MapDepthPointToCameraSpace(depthSpacePoint, depth, &cameraSpacePoint);
if ((0 <= colorX) && (colorX < colorWidth) && (0 <= colorY) && (colorY < colorHeight)) {
// 直交座標系の情報を格納する
point.x = cameraSpacePoint.X;
point.y = cameraSpacePoint.Y;
point.z = cameraSpacePoint.Z;
}
pointcloud->push_back(point);
}
}
// Show Point Cloud on Cloud Viewer
viewer.showCloud(pointcloud);
// Input Key ( Exit ESC key )
if (GetKeyState(VK_ESCAPE) < 0) {
pcl::io::savePCDFile("PointCloud.pcd", *pointcloud);
//pcl::io::savePLYFile("test_pcd2.ply", *pointcloud); // 最後に取得した点群を保存
printf("Save Point Cloud Data\n");
//break;
}
}
// End Processing
SafeRelease(pColorSource);
SafeRelease(pDepthSource);
SafeRelease(pColorReader);
SafeRelease(pDepthReader);
SafeRelease(pColorDescription);
SafeRelease(pDepthDescription);
SafeRelease(pCoordinateMapper);
if (pSensor) {
pSensor->Close();
}
SafeRelease(pSensor);
printf("Disconnect Kinect Sensor\n");
return 0;
}
The previous code is a code taken from a tutorial from the Point Cloud Library Website uses the Kinect to display a Point Cloud of what the Kinect sees in real time. Therefore The Point Cloud is constantly changing. That is why I would like to get just a frame, in other words, I would like the point cloud to freeze instead of constantly capturing new frames.
and here is my modification.
// Sample_PCL.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#define NOMINMAX
#include <Windows.h>
#include <Kinect.h> // Kinectを使用するためのヘッダファイル
#include <pcl/visualization/cloud_viewer.h> // PCLを使用して表示するためのヘッダファイル
#include <pcl/io/pcd_io.h> // 点群データを保存するためのヘッダファイル(.pcd, .ply)
//#include <pcl/io/ply_io.h>
#include <pcl/point_types.h>
#include <iostream>
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease)
{
if (pInterfaceToRelease != NULL) {
pInterfaceToRelease->Release();
}
}
int main()
{
// Create Sensor Instance
IKinectSensor* pSensor;
HRESULT hResult = S_OK;
hResult = GetDefaultKinectSensor(&pSensor);
if (FAILED(hResult)) {
std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
return -1;
}
printf("GetDfaultKinectSensor is OK\n");
// Open Sensor
hResult = pSensor->Open();
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::Open()" << std::endl;
return -1;
}
printf("IKinectSensor::Open is OK\n");
// Retrieved Coordinate Mapper
ICoordinateMapper* pCoordinateMapper;
hResult = pSensor->get_CoordinateMapper(&pCoordinateMapper);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl;
return -1;
}
printf("IKinectSensor::get_CoordinateMapper is OK\n");
// Retrieved Color Frame Source
IColorFrameSource* pColorSource;
hResult = pSensor->get_ColorFrameSource(&pColorSource);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_ColorFrameSource()" << std::endl;
return -1;
}
printf("IKinectSensor::get_ColorFrameSource is OK\n");
// Open Color Frame Reader
IColorFrameReader* pColorReader;
hResult = pColorSource->OpenReader(&pColorReader);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrameSource::OpenReader()" << std::endl;
return -1;
}
printf("IColorFrameSource::OpenReader is OK\n");
// Retrieved Depth Frame Source
IDepthFrameSource* pDepthSource;
hResult = pSensor->get_DepthFrameSource(&pDepthSource);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
return -1;
}
printf("IKinectSensor::get_DepthFrameSource is OK\n");
// Open Depth Frame Reader
IDepthFrameReader* pDepthReader;
hResult = pDepthSource->OpenReader(&pDepthReader);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
return -1;
}
printf("IDepthFrameSource::OpenReader is OK\n");
// Retrieved Color Frame Size
IFrameDescription* pColorDescription;
hResult = pColorSource->get_FrameDescription(&pColorDescription);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl;
return -1;
}
printf("IColorFrameSource::get_FrameDescription is OK\n");
int colorWidth = 0;
int colorHeight = 0;
pColorDescription->get_Width(&colorWidth); // 1920
pColorDescription->get_Height(&colorHeight); // 1080
// To Reserve Color Frame Buffer
std::vector<RGBQUAD> colorBuffer(colorWidth * colorHeight);
// Retrieved Depth Frame Size
IFrameDescription* pDepthDescription;
hResult = pDepthSource->get_FrameDescription(&pDepthDescription);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
return -1;
}
printf("IDepthFrameSource::get_FrameDescription is OK\n");
int depthWidth = 0;
int depthHeight = 0;
pDepthDescription->get_Width(&depthWidth); // 512
pDepthDescription->get_Height(&depthHeight); // 424
// To Reserve Depth Frame Buffer
std::vector<UINT16> depthBuffer(depthWidth * depthHeight);
printf("Display Point Cloud\n");
// Create Cloud Viewer
pcl::visualization::CloudViewer viewer("Point Cloud Viewer"); // 点群のウィンドウ表示
//while (!viewer.wasStopped()) {
// Acquire Latest Color Frame
IColorFrame* pColorFrame = nullptr;
hResult = pColorReader->AcquireLatestFrame(&pColorFrame);
if (SUCCEEDED(hResult)) {
// Retrieved Color Data
hResult = pColorFrame->CopyConvertedFrameDataToArray(colorBuffer.size() * sizeof(RGBQUAD), reinterpret_cast<BYTE*>(&colorBuffer[0]), ColorImageFormat::ColorImageFormat_Bgra);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrame::CopyConvertedFrameDataToArray()" << std::endl;
}
}
SafeRelease(pColorFrame);
// Acquire Latest Depth Frame
IDepthFrame* pDepthFrame = nullptr;
hResult = pDepthReader->AcquireLatestFrame(&pDepthFrame);
if (SUCCEEDED(hResult)) {
// Retrieved Depth Data
hResult = pDepthFrame->CopyFrameDataToArray(depthBuffer.size(), &depthBuffer[0]);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrame::CopyFrameDataToArray()" << std::endl;
}
}
SafeRelease(pDepthFrame);
// Point Cloud Libraryの設定
// Create Point Cloud
pcl::PointCloud<pcl::PointXYZRGB>::Ptr pointcloud(new pcl::PointCloud<pcl::PointXYZRGB>()); // PCLの構造体
pointcloud->width = static_cast<uint32_t>(depthWidth); // 点群の数
pointcloud->height = static_cast<uint32_t>(depthHeight);
pointcloud->is_dense = false;
for (int y = 0; y < depthHeight; y++) {
for (int x = 0; x < depthWidth; x++) {
//printf("scann\n");
pcl::PointXYZRGB point; // PCLで使用する点群情報
DepthSpacePoint depthSpacePoint = { static_cast<float>(x), static_cast<float>(y) };
UINT16 depth = depthBuffer[y * depthWidth + x];
// Coordinate Mapping Depth to Color Space, and Setting PointCloud RGB
ColorSpacePoint colorSpacePoint = { 0.0f, 0.0f };
pCoordinateMapper->MapDepthPointToColorSpace(depthSpacePoint, depth, &colorSpacePoint); // 色の座標系
int colorX = static_cast<int>(std::floor(colorSpacePoint.X + 0.5f));
int colorY = static_cast<int>(std::floor(colorSpacePoint.Y + 0.5f));
if ((0 <= colorX) && (colorX < colorWidth) && (0 <= colorY) && (colorY < colorHeight)) {
RGBQUAD color = colorBuffer[colorY * colorWidth + colorX];
// 色の情報を格納する
point.b = color.rgbBlue;
point.g = color.rgbGreen;
point.r = color.rgbRed;
}
// Coordinate Mapping Depth to Camera Space, and Setting PointCloud XYZ
CameraSpacePoint cameraSpacePoint = { 0.0f, 0.0f, 0.0f }; // カメラ空間
pCoordinateMapper->MapDepthPointToCameraSpace(depthSpacePoint, depth, &cameraSpacePoint);
if ((0 <= colorX) && (colorX < colorWidth) && (0 <= colorY) && (colorY < colorHeight)) {
// 直交座標系の情報を格納する
point.x = cameraSpacePoint.X;
point.y = cameraSpacePoint.Y;
point.z = cameraSpacePoint.Z;
}
pointcloud->push_back(point);
}
//}
viewer.showCloud(pointcloud);
while (!viewer.wasStopped())
{
}
/*pcl::io::savePCDFile("PointCloud.pcd", *pointcloud);
printf("Saved Point Cloud Data\n");
// Show Point Cloud on Cloud Viewer
printf("Open viewer\n");
viewer.showCloud(pointcloud);
while (!viewer.wasStopped()) {
}*/
// Input Key ( Exit ESC key )
if (GetKeyState(VK_ESCAPE) < 0) {
pcl::io::savePCDFile("PointCloud.pcd", *pointcloud);
//pcl::io::savePLYFile("test_pcd2.ply", *pointcloud); // 最後に取得した点群を保存
printf("Save Point Cloud Data\n");
//break;
}
}
// End Processing
SafeRelease(pColorSource);
SafeRelease(pDepthSource);
SafeRelease(pColorReader);
SafeRelease(pDepthReader);
SafeRelease(pColorDescription);
SafeRelease(pDepthDescription);
SafeRelease(pCoordinateMapper);
if (pSensor) {
pSensor->Close();
}
SafeRelease(pSensor);
printf("Disconnect Kinect Sensor\n");
return 0;
}
The modification mainly consists of removing the loop that updates the point cloud constantly which is: You can see it commented in the second code.
while (!viewer.wasStopped())
The problem is that the the Point cloud viewer does not display any data received by the Kinect, and I would like to know the reason why it could not be displayed.
Your code only seems to show the very first frame it receives from the Kinect, which might be empty or invalid. Did you check if the points in the cloud you're taking are sane?
However, you might want to approach the problem differently:
Leave the visualization loop as it is.
Register a key handler using registerKeyboardCallback (doc).
When a specific key is pressed, set a boolean to true.
In the visualization loop, if that boolean variable is true, skip the frame grabbing from the Kinect. It should retain the previously set cloud.
Related
I am trying to write the simplest possible compute shader in DirectX12 so that I can have a starting point for a real project. However, it seems like no matter what I do I am unable to get my GPU to process "1+1" and see the output. As there is almost no documentation on compute shaders, I figured my only option now is to query StackOverflow.
I wrote the following code using the D3D12nBodyGravity project. First I copied as much of the code over as verbatim as possible, fixed "small" things, and then once it was all working I started trimming the code down to the basics. I am using Visual Studio 2019.
myClass.cpp:
#include "pch.h"
#include "myClass.h"
#include <d3dcompiler.h> // D3DReadFileToBlob
#include "Common\DirectXHelper.h" // NAME_D3D12_OBJECT
#include "Common\Device.h"
#include <iostream>
// InterlockedCompareExchange returns the object's value if the
// comparison fails. If it is already 0, then its value won't
// change and 0 will be returned.
#define InterlockedGetValue(object) InterlockedCompareExchange(object, 0, 0)
myClass::myClass()
: m_frameIndex(0)
, m_UavDescriptorSize(0)
, m_renderContextFenceValue(0)
, m_frameFenceValues{} {
std::cout << "Initializing myClass" << std::endl;
m_FenceValue = 0;
//std::cout << "Calling DXGIDeclareAdapterRemovalSupport()" << std::endl;
//DX::ThrowIfFailed(DXGIDeclareAdapterRemovalSupport());
// Identify the device
std::cout << "Identifying the device" << std::endl;
auto m_device = Device::Get().GetDevice();
std::cout << "Leading the rendering pipeline dependencies" << std::endl;
// Load the rendering pipeline dependencies.
{
std::cout << " Creating the root signatures" << std::endl;
// Create the root signatures.
{
CD3DX12_ROOT_PARAMETER rootParameter;
rootParameter.InitAsUnorderedAccessView(0);
Microsoft::WRL::ComPtr<ID3DBlob> signature;
Microsoft::WRL::ComPtr<ID3DBlob> error;
CD3DX12_ROOT_SIGNATURE_DESC computeRootSignatureDesc(1, &rootParameter, 0, nullptr);
DX::ThrowIfFailed(D3D12SerializeRootSignature(&computeRootSignatureDesc, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error));
DX::ThrowIfFailed(m_device->CreateRootSignature(0, signature->GetBufferPointer(), signature->GetBufferSize(), IID_PPV_ARGS(&m_computeRootSignature)));
}
// Describe and create the command queue.
std::cout << " Describing and creating the command queue" << std::endl;
D3D12_COMMAND_QUEUE_DESC queueDesc = {};
queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
DX::ThrowIfFailed(m_device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&m_commandQueue)));
NAME_D3D12_OBJECT(m_commandQueue);
std::cout << " Creating descriptor heaps" << std::endl;
// Create descriptor heaps.
{
// Describe and create a shader resource view (SRV) and unordered
// access view (UAV) descriptor heap.
D3D12_DESCRIPTOR_HEAP_DESC UavHeapDesc = {};
UavHeapDesc.NumDescriptors = DescriptorCount;
UavHeapDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
UavHeapDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
DX::ThrowIfFailed(m_device->CreateDescriptorHeap(&UavHeapDesc, IID_PPV_ARGS(&m_UavHeap)));
NAME_D3D12_OBJECT(m_UavHeap);
m_UavDescriptorSize = m_device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
}
std::cout << " Creating a command allocator for each frame" << std::endl;
// Create a command allocator for each frame.
for (UINT n = 0; n < FrameCount; n++) {
DX::ThrowIfFailed(m_device->CreateCommandAllocator(D3D12_COMMAND_LIST_TYPE_DIRECT, IID_PPV_ARGS(&m_commandAllocators[n])));
}
} // Load the rendering pipeline dependencies.
std::cout << "Loading the sample assets" << std::endl;
// Load the sample assets.
{
std::cout << " Creating the pipeline states, including compiling and loading shaders" << std::endl;
// Create the pipeline states, which includes compiling and loading shaders.
{
Microsoft::WRL::ComPtr<ID3DBlob> computeShader;
#if defined(_DEBUG)
// Enable better shader debugging with the graphics debugging tools.
UINT compileFlags = D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
#else
UINT compileFlags = 0;
#endif
// Load and compile the compute shader.
DX::ThrowIfFailed(D3DReadFileToBlob(L"ComputeShader.cso", &computeShader));
auto convert_blob_to_byte = [](Microsoft::WRL::ComPtr<ID3DBlob> blob) {
auto* p = reinterpret_cast<unsigned char*>(blob->GetBufferPointer());
auto n = blob->GetBufferSize();
std::vector<unsigned char> buff;
buff.reserve(n);
std::copy(p, p + n, std::back_inserter(buff));
return buff;
};
std::vector<BYTE> m_computeShader = convert_blob_to_byte(computeShader);
// Describe and create the compute pipeline state object (PSO).
D3D12_COMPUTE_PIPELINE_STATE_DESC computePsoDesc = {};
computePsoDesc.pRootSignature = m_computeRootSignature.Get();
computePsoDesc.CS = CD3DX12_SHADER_BYTECODE(computeShader.Get());
DX::ThrowIfFailed(m_device->CreateComputePipelineState(&computePsoDesc, IID_PPV_ARGS(&m_computeState)));
NAME_D3D12_OBJECT(m_computeState);
}
std::cout << " Creating the command list" << std::endl;
// Create the command list.
DX::ThrowIfFailed(m_device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, m_commandAllocators[m_frameIndex].Get(), m_computeState.Get(), IID_PPV_ARGS(&m_commandList)));
NAME_D3D12_OBJECT(m_commandList);
std::cout << " Initializing the data in the buffers" << std::endl;
// Initialize the data in the buffers.
{
data.resize(2);
for (unsigned int i = 0; i < data.size(); i++) {
data[i] = 0.0f;
}
const UINT dataSize = data.size() * sizeof(data[0]);
D3D12_HEAP_PROPERTIES defaultHeapProperties = CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_DEFAULT);
D3D12_HEAP_PROPERTIES uploadHeapProperties = CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_UPLOAD);
D3D12_HEAP_PROPERTIES readbackHeapProperties = CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_READBACK);
D3D12_RESOURCE_DESC bufferDesc = CD3DX12_RESOURCE_DESC::Buffer(dataSize, D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS);
D3D12_RESOURCE_DESC uploadBufferDesc = CD3DX12_RESOURCE_DESC::Buffer(dataSize);
readbackBufferDesc = CD3DX12_RESOURCE_DESC::Buffer(dataSize);
DX::ThrowIfFailed(m_device->CreateCommittedResource(
&defaultHeapProperties,
D3D12_HEAP_FLAG_NONE,
&bufferDesc,
D3D12_RESOURCE_STATE_COPY_DEST,
nullptr,
IID_PPV_ARGS(&m_dataBuffer)));
m_dataBuffer.Get()->SetName(L"m_dataBuffer");
DX::ThrowIfFailed(m_device->CreateCommittedResource(
&uploadHeapProperties,
D3D12_HEAP_FLAG_NONE,
&uploadBufferDesc,
D3D12_RESOURCE_STATE_GENERIC_READ,
nullptr,
IID_PPV_ARGS(&m_dataBufferUpload)));
m_dataBufferUpload.Get()->SetName(L"m_dataBufferUpload");
DX::ThrowIfFailed(m_device->CreateCommittedResource(
&readbackHeapProperties,
D3D12_HEAP_FLAG_NONE,
&readbackBufferDesc,
D3D12_RESOURCE_STATE_COPY_DEST,
nullptr,
IID_PPV_ARGS(&m_dataBufferReadback)));
m_dataBufferReadback.Get()->SetName(L"m_dataBufferReadback");
NAME_D3D12_OBJECT(m_dataBuffer);
dataSubResource = {};
dataSubResource.pData = &data[0];
dataSubResource.RowPitch = dataSize;
dataSubResource.SlicePitch = dataSubResource.RowPitch;
UpdateSubresources<1>(m_commandList.Get(), m_dataBuffer.Get(), m_dataBufferUpload.Get(), 0, 0, 1, &dataSubResource);
m_commandList->ResourceBarrier(1, &CD3DX12_RESOURCE_BARRIER::Transition(m_dataBuffer.Get(), D3D12_RESOURCE_STATE_COPY_DEST, D3D12_RESOURCE_STATE_COMMON));
m_commandList->CopyResource(m_dataBufferReadback.Get(), m_dataBufferUpload.Get());
D3D12_UNORDERED_ACCESS_VIEW_DESC uavDesc = {};
uavDesc.Format = DXGI_FORMAT_UNKNOWN;
uavDesc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER;
uavDesc.Buffer.FirstElement = 0;
uavDesc.Buffer.NumElements = 1;
uavDesc.Buffer.StructureByteStride = sizeof(data[0]);
uavDesc.Buffer.CounterOffsetInBytes = 0;
uavDesc.Buffer.Flags = D3D12_BUFFER_UAV_FLAG_NONE;
CD3DX12_CPU_DESCRIPTOR_HANDLE uavHandle0(m_UavHeap->GetCPUDescriptorHandleForHeapStart(), Uav, m_UavDescriptorSize);
m_device->CreateUnorderedAccessView(m_dataBuffer.Get(), nullptr, &uavDesc, uavHandle0);
} // Initialize the data in the buffers.
std::cout << " Closing the command list and executing it to begind the initial GPU setup" << std::endl;
// Close the command list and execute it to begin the initial GPU setup.
DX::ThrowIfFailed(m_commandList->Close());
ID3D12CommandList* ppCommandLists[] = { m_commandList.Get() };
m_commandQueue->ExecuteCommandLists(_countof(ppCommandLists), ppCommandLists);
std::cout << " Creating synchronization objects and wait until assets have been uploaded to the GPU" << std::endl;
// Create synchronization objects and wait until assets have been uploaded to the GPU.
{
DX::ThrowIfFailed(m_device->CreateFence(m_renderContextFenceValue, D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&m_renderContextFence)));
m_renderContextFenceValue++;
m_renderContextFenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
if (m_renderContextFenceEvent == nullptr) {
DX::ThrowIfFailed(HRESULT_FROM_WIN32(GetLastError()));
}
// Add a signal command to the queue.
DX::ThrowIfFailed(m_commandQueue->Signal(m_renderContextFence.Get(), m_renderContextFenceValue));
// Instruct the fence to set the event object when the signal command completes.
DX::ThrowIfFailed(m_renderContextFence->SetEventOnCompletion(m_renderContextFenceValue, m_renderContextFenceEvent));
m_renderContextFenceValue++;
// Wait until the signal command has been processed.
WaitForSingleObject(m_renderContextFenceEvent, INFINITE);
}
} // Load the sample assets.
std::cout << "Creating compute resources" << std::endl;
{
// Create compute resources.
D3D12_COMMAND_QUEUE_DESC queueDesc = { D3D12_COMMAND_LIST_TYPE_COMPUTE, 0, D3D12_COMMAND_QUEUE_FLAG_NONE };
DX::ThrowIfFailed(m_device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&m_computeCommandQueue)));
DX::ThrowIfFailed(m_device->CreateCommandAllocator(D3D12_COMMAND_LIST_TYPE_COMPUTE, IID_PPV_ARGS(&m_computeAllocator)));
DX::ThrowIfFailed(m_device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_COMPUTE, m_computeAllocator.Get(), nullptr, IID_PPV_ARGS(&m_computeCommandList)));
DX::ThrowIfFailed(m_device->CreateFence(0, D3D12_FENCE_FLAG_SHARED, IID_PPV_ARGS(&m_Fence)));
m_FenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
if (m_FenceEvent == nullptr) {
DX::ThrowIfFailed(HRESULT_FROM_WIN32(GetLastError()));
}
}
std::cout << "Calculating" << std::endl;
Calculate();
std::cout << "Finished" << std::endl;
}
void myClass::Calculate() {
m_computeCommandList.Get()->ResourceBarrier(1, &CD3DX12_RESOURCE_BARRIER::Transition(m_dataBuffer.Get(), D3D12_RESOURCE_STATE_COMMON, D3D12_RESOURCE_STATE_UNORDERED_ACCESS));
m_computeCommandList.Get()->SetPipelineState(m_computeState.Get());
m_computeCommandList.Get()->SetComputeRootSignature(m_computeRootSignature.Get());
ID3D12DescriptorHeap* ppHeaps[] = { m_UavHeap.Get() };
m_computeCommandList.Get()->SetDescriptorHeaps(_countof(ppHeaps), ppHeaps);
CD3DX12_GPU_DESCRIPTOR_HANDLE uavHandle(m_UavHeap->GetGPUDescriptorHandleForHeapStart(), Uav, m_UavDescriptorSize);
m_computeCommandList.Get()->SetComputeRootUnorderedAccessView(ComputeRootUAVTable, m_dataBuffer->GetGPUVirtualAddress());
m_computeCommandList.Get()->Dispatch(1, 1, 1);
m_computeCommandList.Get()->ResourceBarrier(1, &CD3DX12_RESOURCE_BARRIER::Transition(m_dataBuffer.Get(), D3D12_RESOURCE_STATE_UNORDERED_ACCESS, D3D12_RESOURCE_STATE_COMMON));
// Close and execute the command list.
DX::ThrowIfFailed(m_computeCommandList.Get()->Close());
ID3D12CommandList* commandLists[] = { m_computeCommandList.Get() };
m_computeCommandQueue->ExecuteCommandLists(1, commandLists);
// Wait for the compute shader to complete the calculation.
UINT64 FenceValue = InterlockedIncrement(&m_FenceValue);
DX::ThrowIfFailed(m_computeCommandQueue.Get()->Signal(m_Fence.Get(), FenceValue));
DX::ThrowIfFailed(m_Fence.Get()->SetEventOnCompletion(FenceValue, m_FenceEvent));
WaitForSingleObject(m_FenceEvent, INFINITE);
std::cout << "FenceValue = " << FenceValue << " " << m_FenceValue << " " << m_Fence.Get()->GetCompletedValue() << std::endl;
// Check the output!
float* dataptr = nullptr;
D3D12_RANGE range = { 0, readbackBufferDesc.Width };
DX::ThrowIfFailed(m_dataBufferReadback->Map(0, &range, (void**)&dataptr));
for (int i = 0; i < readbackBufferDesc.Width / sizeof(data[0]); i++)
printf("uav[%d] = %.2f\n", i, dataptr[i]);
m_dataBufferReadback->Unmap(0, nullptr);
for (unsigned int i = 0; i < data.size(); i++) {
std::cout << "data[" << i << "] = " << data[i] << std::endl;
}
}
myClass.h:
#pragma once
#include "Common\Device.h"
#include <iostream>
// We have to write all of this as its own class, otherwise we cannot
// use the "this" pointer when we create compute resources. We need to
// do that because this code tagets multithreading.
class myClass {
public:
myClass();
private:
// Two buffers full of data are used. The compute thread alternates
// writing to each of them. The render thread renders using the
// buffer that is not currently in use by the compute shader.
//struct Data {
// float c;
//};
//std::vector<Data> data;
std::vector<float> data;
// For the compute pipeline, the CBV is a struct containing some
// constants used in the compute shader.
struct ConstantBufferCS {
float a;
float b;
};
D3D12_SUBRESOURCE_DATA dataSubResource;
static const UINT FrameCount = 1;
//static const UINT ThreadCount = 1;
UINT m_heightInstances;
UINT m_widthInstances;
UINT m_frameIndex;
Microsoft::WRL::ComPtr<ID3D12RootSignature> m_rootSignature;
Microsoft::WRL::ComPtr<ID3D12RootSignature> m_computeRootSignature;
Microsoft::WRL::ComPtr<ID3D12CommandQueue> m_commandQueue;
Microsoft::WRL::ComPtr<ID3D12DescriptorHeap> m_UavHeap;
Microsoft::WRL::ComPtr<ID3D12CommandAllocator> m_commandAllocators[FrameCount];
Microsoft::WRL::ComPtr<ID3D12PipelineState> m_computeState;
Microsoft::WRL::ComPtr<ID3D12GraphicsCommandList> m_commandList;
Microsoft::WRL::ComPtr<ID3D12Resource> m_constantBufferCS;
UINT64 m_renderContextFenceValue;
HANDLE m_renderContextFenceEvent;
UINT64 m_frameFenceValues[FrameCount];
UINT m_UavDescriptorSize;
ConstantBufferCS constantBufferCS;
Microsoft::WRL::ComPtr<ID3D12Resource> constantBufferCSUpload;
Microsoft::WRL::ComPtr<ID3D12Fence> m_renderContextFence;
Microsoft::WRL::ComPtr<ID3D12Resource> m_dataBuffer;
Microsoft::WRL::ComPtr<ID3D12Resource> m_dataBufferUpload;
Microsoft::WRL::ComPtr<ID3D12Resource> m_dataBufferReadback;
// Compute objects.
Microsoft::WRL::ComPtr<ID3D12CommandAllocator> m_computeAllocator;
Microsoft::WRL::ComPtr<ID3D12CommandQueue> m_computeCommandQueue;
Microsoft::WRL::ComPtr<ID3D12GraphicsCommandList> m_computeCommandList;
Microsoft::WRL::ComPtr<ID3D12Fence> m_Fence;
volatile HANDLE m_FenceEvent;
D3D12_RESOURCE_DESC readbackBufferDesc;
// State
UINT64 volatile m_FenceValue;
/*
struct ThreadData {
myClass* pContext;
UINT threadIndex;
};
ThreadData m_threadData;
HANDLE m_threadHandles;
*/
void Calculate();
// Indices of shader resources in the descriptor heap.
enum DescriptorHeapIndex : UINT32 {
Uav = 0,
DescriptorCount = 1
};
enum ComputeRootParameters : UINT32 {
//ComputeRootCBV = 0,
ComputeRootUAVTable = 0,
ComputeRootParametersCount
};
};
Device.cpp:
#pragma once
#include "pch.h"
#include "Device.h"
#include "DirectXHelper.h"
#include <cassert> // for "assert"
#include <iostream>
static Device* gs_pSingelton = nullptr;
// Constructor
Device::Device(HINSTANCE hInst, bool useWarp)
: m_hInstance(hInst)
, m_useWarp(useWarp)
{
}
void Device::Initialize() {
#if defined(_DEBUG)
// Always enable the debug layer before doing anything DX12 related
// so all possible errors generated while creating DX12 objects
// are caught by the debug layer.
Microsoft::WRL::ComPtr<ID3D12Debug1> debugInterface;
DX::ThrowIfFailed(D3D12GetDebugInterface(IID_PPV_ARGS(&debugInterface)));
debugInterface->EnableDebugLayer();
// Enable these if you want full validation (will slow down rendering a lot).
//debugInterface->SetEnableGPUBasedValidation(TRUE);
//debugInterface->SetEnableSynchronizedCommandQueueValidation(TRUE);
#endif
auto dxgiAdapter = GetAdapter(false);
if (!dxgiAdapter) { // If no supporting DX12 adapters exist, fall back to WARP
dxgiAdapter = GetAdapter(true);
}
if (dxgiAdapter) {
m_device = CreateDevice(dxgiAdapter);
}
else {
throw std::exception("DXGI adapter enumeration failed.");
}
}
void Device::Create(HINSTANCE hInst) {
if (!gs_pSingelton) {
gs_pSingelton = new Device(hInst);
gs_pSingelton->Initialize();
}
}
Device& Device::Get() {
assert(gs_pSingelton);
return *gs_pSingelton;
}
void Device::Destroy() {
if (gs_pSingelton) {
delete gs_pSingelton;
gs_pSingelton = nullptr;
}
}
// Destructor
Device::~Device() {
}
Microsoft::WRL::ComPtr<ID3D12Device2> Device::CreateDevice(Microsoft::WRL::ComPtr<IDXGIAdapter4> adapter) {
Microsoft::WRL::ComPtr<ID3D12Device2> d3d12Device2;
DX::ThrowIfFailed(D3D12CreateDevice(adapter.Get(), D3D_FEATURE_LEVEL_11_0, IID_PPV_ARGS(&d3d12Device2)));
// Enable debug messages in debug mode.
#if defined(_DEBUG)
Microsoft::WRL::ComPtr<ID3D12InfoQueue> pInfoQueue;
if (SUCCEEDED(d3d12Device2.As(&pInfoQueue))) {
pInfoQueue->SetBreakOnSeverity(D3D12_MESSAGE_SEVERITY_CORRUPTION, TRUE);
pInfoQueue->SetBreakOnSeverity(D3D12_MESSAGE_SEVERITY_ERROR, TRUE);
pInfoQueue->SetBreakOnSeverity(D3D12_MESSAGE_SEVERITY_WARNING, TRUE);
// Suppress whole categories of messages
//D3D12_MESSAGE_CATEGORY Categories[] = {};
// Suppress messages based on their severity level
D3D12_MESSAGE_SEVERITY Severities[] = { D3D12_MESSAGE_SEVERITY_INFO };
// Suppress individual messages by their ID
D3D12_MESSAGE_ID DenyIds[] = {
D3D12_MESSAGE_ID_CLEARRENDERTARGETVIEW_MISMATCHINGCLEARVALUE, // I'm really not sure how to avoid this message.
D3D12_MESSAGE_ID_MAP_INVALID_NULLRANGE, // This warning occurs when using capture frame while graphics debugging.
D3D12_MESSAGE_ID_UNMAP_INVALID_NULLRANGE, // This warning occurs when using capture frame while graphics debugging.
};
D3D12_INFO_QUEUE_FILTER NewFilter = {};
//NewFilter.DenyList.NumCategories = _countof(Categories);
//NewFilter.DenyList.pCategoryList = Categories;
NewFilter.DenyList.NumSeverities = _countof(Severities);
NewFilter.DenyList.pSeverityList = Severities;
NewFilter.DenyList.NumIDs = _countof(DenyIds);
NewFilter.DenyList.pIDList = DenyIds;
DX::ThrowIfFailed(pInfoQueue->PushStorageFilter(&NewFilter));
}
#endif
return d3d12Device2;
}
Microsoft::WRL::ComPtr<IDXGIAdapter4> Device::GetAdapter(bool useWarp) {
UINT createFactoryFlags = 0;
#if defined(_DEBUG)
createFactoryFlags = DXGI_CREATE_FACTORY_DEBUG;
#endif
DX::ThrowIfFailed(CreateDXGIFactory2(createFactoryFlags, IID_PPV_ARGS(&m_factory)));
Microsoft::WRL::ComPtr<IDXGIAdapter1> dxgiAdapter1;
Microsoft::WRL::ComPtr<IDXGIAdapter4> dxgiAdapter4;
if (useWarp) {
DX::ThrowIfFailed(m_factory->EnumWarpAdapter(IID_PPV_ARGS(&dxgiAdapter1)));
DX::ThrowIfFailed(dxgiAdapter1.As(&dxgiAdapter4));
}
else {
SIZE_T maxDedicatedVideoMemory = 0;
for (UINT i = 0; m_factory->EnumAdapters1(i, &dxgiAdapter1) != DXGI_ERROR_NOT_FOUND; ++i) {
DXGI_ADAPTER_DESC1 dxgiAdapterDesc1;
dxgiAdapter1->GetDesc1(&dxgiAdapterDesc1);
// Check to see if the adapter can create a D3D12 device without actually
// creating it. The adapter with the largest dedicated video memory
// is favored.
if ((dxgiAdapterDesc1.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) == 0 &&
SUCCEEDED(D3D12CreateDevice(dxgiAdapter1.Get(),
D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device), nullptr)) &&
dxgiAdapterDesc1.DedicatedVideoMemory > maxDedicatedVideoMemory) {
maxDedicatedVideoMemory = dxgiAdapterDesc1.DedicatedVideoMemory;
DX::ThrowIfFailed(dxgiAdapter1.As(&dxgiAdapter4));
}
}
}
return dxgiAdapter4;
}
Device.h:
#pragma once
#include <dxgi1_6.h> // IDXGIAdapter4
// We require this file because we are unable to pass the device pointer to everywhere we need to.
class Device {
public:
/**
* Create the device singleton with the device instance handle.
*/
static void Create(HINSTANCE hInst);
/**
* Destroy the device instance.
*/
static void Destroy();
/**
* Get the device singleton.
*/
static Device& Get();
/**
* Get the Direct3D 12 device
*/
Microsoft::WRL::ComPtr<ID3D12Device2> GetDevice() const { return m_device; }
Microsoft::WRL::ComPtr<IDXGIFactory4> GetFactory() const { return m_factory; }
protected:
// Create a device instance
Device(HINSTANCE hInst, bool useWarp = false);
// Destroy the device instance.
virtual ~Device();
// Initialize the device instance.
void Initialize();
Microsoft::WRL::ComPtr<IDXGIAdapter4> GetAdapter(bool useWarp);
Microsoft::WRL::ComPtr<ID3D12Device2> CreateDevice(Microsoft::WRL::ComPtr<IDXGIAdapter4> adapter);
private:
Device(const Device& copy) = delete;
Device& operator=(const Device& other) = delete;
HINSTANCE m_hInstance;
Microsoft::WRL::ComPtr<ID3D12Device2> m_device;
Microsoft::WRL::ComPtr<IDXGIFactory4> m_factory;
bool m_useWarp;
};
ComputeShader.hlsl:
RWStructuredBuffer<float> output : register(u0); // UAV
[numthreads(1, 1, 1)]
void main( uint3 DTid : SV_DispatchThreadID ) {
output[DTid.x] = 1 + 1;
}
Please let me know if you are able to find what I do not understand. I can also try uploading my project to GitHub if it helps... SOS :(
I made a code in c++ to take a snapshot with a Ueye camera, however the picture that gets saved is just white, in saying that sometimes to can see a tiny bit of the road but still just white. I believe it’s an issue with an auto parameter but I feel as if I have tried everything.
Below is my code:
void MainWindow::CaptureImage(){
int initcamera = is_InitCamera(&hCam, hWndDisplay);
if(initcamera != IS_SUCCESS)
{
cout<<endl<<"Failed to initialize the camera"<<endl;
exit(-1);
}else{
cout<<endl<<"Initialized Camera"<<endl;
}
int camerainfo = is_GetCameraInfo (hCam, &camera_info);
if(camerainfo != IS_SUCCESS)
{
cout<<endl<<"Unable to acquire camera information"<<endl;
exit(-1);
}else{
cout<<endl<<"Camera information required"<<endl;
}
int sensorinfo = is_GetSensorInfo (hCam, &sInfo);
if(sensorinfo != IS_SUCCESS)
{
cout<<endl<<"Unable to acquire sensor information"<<endl;
exit(-1);
}else{
cout<<endl<<"Sensor information acquired"<<endl;
}
int colormode = is_SetColorMode(hCam, IS_CM_BGR8_PACKED);
if(colormode != IS_SUCCESS)
{
cout<<endl<<"Unable to set the color mode"<<endl;
exit(-1);
}else{
cout<<endl<<"Color mode set"<<endl;
}
int pXPos = (sInfo.nMaxWidth);
int pYPos = (sInfo.nMaxHeight);
int rit = is_AllocImageMem (hCam,pXPos,pYPos, 24, &m_pcImageMemory, &m_lMemoryId);
if(rit != IS_SUCCESS)
{
cout<<endl<<"UNABLE TO INITIALIZE MEMORY"<<endl;
system("PAUSE");
exit(-1);
}else{
cout<<endl<<"INITIALIZED MEMORY"<<endl;
}
int rat = is_SetImageMem (hCam, m_pcImageMemory, m_lMemoryId);
if(rat != IS_SUCCESS)
{
cout<<endl<<"UNABLE TO ACTIVATE MEMORY"<<endl;
system("PAUSE");
exit(-1);
}else{
cout<<endl<<"ACTIVATE MEMORY"<<endl;
}
double strenght_factor = 1.0;
int colorcorrection = is_SetColorCorrection(hCam, IS_CCOR_ENABLE, &strenght_factor);
double pval = 1;
int whiteb = is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_WHITEBALANCE, &pval, 0);
double gval = 1;
int gains = is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_GAIN, &gval, 0);
int dummy;
char *pMem, *pLast;
IMAGE_FILE_PARAMS ImageFileParams;
ImageFileParams.pwchFileName = L"./TestImage.bmp"; /// <-- Insert name and location of the image
ImageFileParams.pnImageID = NULL;
ImageFileParams.ppcImageMem = NULL;
ImageFileParams.nQuality = 0;
ImageFileParams.nFileType = IS_IMG_BMP;
int sho = is_FreezeVideo(hCam, IS_WAIT);
if(sho != IS_SUCCESS)
{
cout<<endl<<"UNABLE TO ACQUIRE FROM THE CAMERA"<<endl;
system("PAUSE");
exit(-1);
}
if (sho == IS_SUCCESS){
int m_Ret = is_GetActiveImageMem(hCam, &pLast, &dummy);
int n_Ret = is_GetImageMem(hCam, (void**)&pLast);
}
if (is_ImageFile(hCam, IS_IMAGE_FILE_CMD_SAVE, (void*)&ImageFileParams, sizeof(ImageFileParams)) == IS_SUCCESS)
{
cout << endl << "An Image was saved" << endl;
}
else
{
cout << endl << "something went wrong" << endl;
}
// Releases an image memory that was allocated
//is_FreeImageMem(hCam, pcImageMemory, nMemoryId);
// Disables the hCam camera handle and releases the data structures and memory areas taken up by the uEye camera
is_ExitCamera(hCam);
}
I have tried many things like the parameters below however it is still just white. I had the camera in the room which is darker and the image came out ok, so I defiantly think it’s due to the day light outside.
const wstring filenameU(filename.begin(), filename.end());
is_ParameterSet(hCam, IS_PARAMETERSET_CMD_LOAD_FILE,(void*) filenameU.c_str(), 0);
unsigned int range[3];
memset(range, 0, sizeof(range));
is_PixelClock(hCam, IS_PIXELCLOCK_CMD_GET_RANGE, (void*)range, sizeof(range));
unsigned int maximumPixelClock = range[1];
is_PixelClock(hCam, IS_PIXELCLOCK_CMD_SET, (void*)&maximumPixelClock, sizeof(maximumPixelClock));
double pval1 = auto_exposure, pval2 = 0;
double pval1 = 1, pval2 = 0;
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_SHUTTER, &pval1, &pval2);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SHUTTER,&pval1, &pval2);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_FRAMERATE,&pval1, &pval2);
is_SetAutoParameter(hCam, IS_SET_AUTO_WB_OFFSET, &pval1, &pval2);
int desiredFrameRate = 60;
is_SetFrameRate(hCam, desiredFrameRate, &m_actualFrameRate);
You cannot just take one picture because the exposure might be to high. You have to capture a few images to give the auto exposure control a chance to lower the exposure. The reason you have to capture some frames is that the AEC is done in software and can only start to change parameters if it gets some frames.
I am trying to make a program that uses servos connected to an arduino that follows your face. I'm using visual c++ in visual studio 2017 with opencv 4.1 to do the facial recognition, then sending the location of the identified face to an arduino via serial connection.
I'm new to opencv so I've been going through many tutorials to try to make a code that works.
The plan is to do the facial recognition then calculate the location of the face in visual studio. Then combine the x and y locations to a single string and send it to the arduino with serial. The arduino then splits the coordinates, separated by a colon, using strtok(). It then will move servos accordingly to keep the tracked face in center screen.
I have tested the serial comm in a separate c++ project and it seems to work fine with the current arduino code (not anywhere near finished because I ran into a hiccup wiyh serial comm) except when I send the location string, the servo moves to the desired spot then returns to its starting location.
When I try to implement the serial communication in the project with opencv, it sends the location once, then appears to stop sending serial commands. I've tried debugging by manually, by calling the sendSerial function in other locations, to see if I can get it to send. I've tried looking around for solutions but haven't found any definite solutions other than it may be the waitKey(10) function. If this is so, is there a way around this?
Thanks.
###############SerialPort.h##############
#ifndef SERIALPORT_H
#define SERIALPORT_H
#define ARDUINO_WAIT_TIME 2000
#define MAX_DATA_LENGTH 255
#include <windows.h>
#include <stdio.h>
#include <stdlib.h>
class SerialPort
{
private:
HANDLE handler;
bool connected;
COMSTAT status;
DWORD errors;
public:
SerialPort(char *portName);
~SerialPort();
int readSerialPort(char *buffer, unsigned int buf_size);
bool writeSerialPort(char *buffer, unsigned int buf_size);
bool isConnected();
};
#endif // SERIALPORT_H
#################SerialSource.cpp##################
#include "SerialPort.h"
SerialPort::SerialPort(char *portName)
{
this->connected = false;
this->handler = CreateFileA(static_cast<LPCSTR>(portName),
GENERIC_READ | GENERIC_WRITE,
0,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (this->handler == INVALID_HANDLE_VALUE) {
if (GetLastError() == ERROR_FILE_NOT_FOUND) {
printf("ERROR: Handle was not attached. Reason: %s not available\n", portName);
}
else
{
printf("ERROR!!!");
}
}
else {
DCB dcbSerialParameters = { 0 };
if (!GetCommState(this->handler, &dcbSerialParameters)) {
printf("failed to get current serial parameters");
}
else {
dcbSerialParameters.BaudRate = CBR_9600;
dcbSerialParameters.ByteSize = 8;
dcbSerialParameters.StopBits = ONESTOPBIT;
dcbSerialParameters.Parity = NOPARITY;
dcbSerialParameters.fDtrControl = DTR_CONTROL_ENABLE;
if (!SetCommState(handler, &dcbSerialParameters))
{
printf("ALERT: could not set Serial port parameters\n");
}
else {
this->connected = true;
PurgeComm(this->handler, PURGE_RXCLEAR | PURGE_TXCLEAR);
Sleep(ARDUINO_WAIT_TIME);
}
}
}
}
SerialPort::~SerialPort()
{
if (this->connected) {
this->connected = false;
CloseHandle(this->handler);
}
}
int SerialPort::readSerialPort(char *buffer, unsigned int buf_size)
{
DWORD bytesRead;
unsigned int toRead = 0;
ClearCommError(this->handler, &this->errors, &this->status);
if (this->status.cbInQue > 0) {
if (this->status.cbInQue > buf_size) {
toRead = buf_size;
}
else toRead = this->status.cbInQue;
}
if (ReadFile(this->handler, buffer, toRead, &bytesRead, NULL)) return bytesRead;
return 0;
}
bool SerialPort::writeSerialPort(char *buffer, unsigned int buf_size)
{
DWORD bytesSend;
if (!WriteFile(this->handler, (void*)buffer, buf_size, &bytesSend, 0)) {
ClearCommError(this->handler, &this->errors, &this->status);
return false;
}
else return true;
}
bool SerialPort::isConnected()
{
return this->connected;
}
###################faceDetect.cpp################
// CPP program to detects face in a video
// Include required header files from OpenCV directory
#include <opencv2/objdetect.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <string>
#include <stdlib.h>
#include "SerialPort.h"
#include <sstream>
#include <iomanip>
using namespace std;
using namespace cv;
//Set up serial comm
char output[MAX_DATA_LENGTH];
char port[] = "\\\\.\\COM3";
char incoming[MAX_DATA_LENGTH];
// Function for Face Detection
void detectAndDraw(Mat& img, CascadeClassifier& cascade, double scale, SerialPort arduino);
string cascadeName;
// Function for sending locations to arduino
void sendSerial(string locations, SerialPort arduino);
int main(int argc, const char** argv)
{
//Establish connection to serial
SerialPort arduino(port);
if (arduino.isConnected()) {
cout << "COnnection Established" << endl;
}
else {
cout << "Error in port name" << endl;
}
// VideoCapture class for playing video for which faces to be detected
VideoCapture capture;
Mat frame, image;
// PreDefined trained XML classifiers with facial features
CascadeClassifier cascade;
double scale = 1;
// Change path before execution
cascade.load("C:/opencv/sources/data/haarcascades/haarcascade_frontalface_default.xml");
// Start Video..1) 0 for WebCam 2) "Path to Video" for a Local Video
capture.open(CAP_MSMF);
//sendSerial("400:100", arduino);
if (capture.isOpened())
{
// Capture frames from video and detect faces
cout << "Face Detection Started...." << endl;
while (1)
{
capture >> frame;
if (frame.empty())
break;
Mat frame1 = frame.clone();
detectAndDraw(frame1, cascade, scale, arduino);
char c = (char)waitKey(10);
// Press q to exit from window
if (c == 27 || c == 'q' || c == 'Q')
break;
}
}
else
cout << "Could not Open Camera";
return 0;
}
void sendSerial(string locations, SerialPort arduino) {
//string command;
//command = to_string(xloc);
cout << locations << endl;
char *charArray = new char[locations.size() + 1];
copy(locations.begin(), locations.end(), charArray);
charArray[locations.size()] = '\n';
arduino.writeSerialPort(charArray, MAX_DATA_LENGTH);
//arduino.readSerialPort(output, MAX_DATA_LENGTH);
//cout << output;
delete[] charArray;
//
//command = to_string(yloc);
//copy(command.begin(), command.end(), charArray);
//charArray[command.size()] = '\n';
//arduino.writeSerialPort(charArray, MAX_DATA_LENGTH);
////arduino.readSerialPort(output, MAX_DATA_LENGTH);
////cout << output;
//delete[] charArray;
}
void detectAndDraw(Mat& img, CascadeClassifier& cascade,
double scale, SerialPort arduino)
{
vector<Rect> faces;
Mat gray, smallImg;
cvtColor(img, gray, COLOR_BGR2GRAY); // Convert to Gray Scale
double fx = 1 / scale;
// Resize the Grayscale Image
resize(gray, smallImg, Size(), fx, fx, INTER_LINEAR);
equalizeHist(smallImg, smallImg);
// Detect faces of different sizes using cascade classifier
cascade.detectMultiScale(smallImg, faces, 1.1,
2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Draw circles around the faces
for (size_t i = 0; i < faces.size(); i++)
{
Rect r = faces[i];
Mat smallImgROI;
int x = faces[i].x;
int y = faces[i].y;
int h = y + faces[i].height;
int w = x + faces[i].width;
int centerX = x + (.5* faces[i].width);
int centerY = y + (.5* faces[i].height);
if (abs(320 - centerX) <= 50) {
if (abs(240 - centerY) <= 50) {
rectangle(img,
Point(x, y),
Point(w, h),
Scalar(0, 0, 275),
2,
8,
0);
}
}
else {
rectangle(img,
Point(x, y),
Point(w, h),
Scalar(275, 275, 275),
2,
8,
0);
}
stringstream stringX;
stringstream stringY;
stringX << std::setw(3) << std::setfill('0') << centerX;
stringY << std::setw(3) << std::setfill('0') << centerY;
std::stringstream ss;
//ss << std::setw(3) << std::setfill('0') << centerX << ":"<< centerY;
//std::string s = ss.str();
std::string s = stringX.str() + ":" + stringY.str();
//cout << s << endl;
sendSerial(s, arduino);
smallImgROI = smallImg(r);
if (arduino.isConnected()) {
cout << "COnnection Established" << endl;
//sendSerial("400:100", arduino);
}
}
// Show Processed Image with detected faces
imshow("Face Detection", img);
}
#####################arduino code################
#include <Servo.h>
String input;
char array[6];
char *strings[3];
char *ptr = NULL;
int xloc;
int yloc;
int hServoPin = 9;
Servo hServo;
int ledPin = 13;
void setup() {
//set up servos
hServo.attach(hServoPin);
//start serial connection
Serial.begin(9600);
//***** delete later *****
pinMode(ledPin, OUTPUT);
}
void loop() {
if(Serial.available()){
//grab "xloc:yloc" and convert to char array
input = Serial.readStringUntil('\n');
//delete later
//Serial.print("input; ");
//Serial.println(input);
for(int i = 0; i<6; i++){
array[i] = input.charAt(i);
//Serial.print(array[i]);
}
//split char array into two entities
byte index = 0;
ptr = strtok(array, ":;"); // takes a list of delimiters
while(ptr != NULL)
{
strings[index] = ptr;
index++;
ptr = strtok(NULL, ":;"); // takes a list of delimiters
//Serial.println("loop");
}
//set xloc and yloc respectively
xloc = atoi(strings[0]);
yloc = atoi(strings[1]);
}
if((xloc < 214)){
hServo.write(0);
delay(100);
}
else if((xloc > 214) && (xloc < 328)){
hServo.write(90);
delay(100);
}
else if((xloc > 328)){
hServo.write(180);
delay(100);
}
}
Now, i'm using real sense D435 camera.
I installed sdk 2.0 full package and upgraded camera version 5.1 to 5.9(latest version).
I want to code to get color image and depth image using visual studio 2015.
so i coded
#include <iostream>
#include "pxcsession.h"
#include "pxcprojection.h"
#include "pxcsensemanager.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <Windows.h>
#pragma comment(lib, "winmm.lib")
using namespace cv;
using namespace std;
class RealSenseAsenseManager
{
public:
~RealSenseAsenseManager()
{
if (senseManager != 0) {
senseManager->Release();
}
}
void initialize()
{
senseManager = PXCSenseManager::CreateInstance();
if (senseManager == nullptr) {
throw std::runtime_error("SenseManager failed");
}
pxcStatus sts = senseManager->EnableStream(
PXCCapture::StreamType::STREAM_TYPE_DEPTH,
DEPTH_WIDTH, DEPTH_HEIGHT, DEPTH_FPS);
if (sts < PXC_STATUS_NO_ERROR) {
throw std::runtime_error("Depth stream activation failed");
}
sts = senseManager->Init();
if (sts < PXC_STATUS_NO_ERROR) {
throw std::runtime_error("Pipeline Initialzation failed");
}
senseManager->QueryCaptureManager()->QueryDevice()->SetMirrorMode(
PXCCapture::Device::MirrorMode::MIRROR_MODE_HORIZONTAL);
}
void run()
{
while (1) {
updateFrame();
auto ret = showImage();
if (!ret) {
break;
}
}
}
private:
void updateFrame()
{
pxcStatus sts = senseManager->AcquireFrame(false);
if (sts < PXC_STATUS_NO_ERROR) {
return;
}
const PXCCapture::Sample *sample = senseManager->QuerySample();
if (sample) {
updateDepthImage(sample->depth);
}
senseManager->ReleaseFrame();
}
void updateDepthImage(PXCImage* depthFrame)
{
if (depthFrame == 0) {
return;
}
PXCImage::ImageData data;
pxcStatus sts = depthFrame->AcquireAccess(
PXCImage::Access::ACCESS_READ,
PXCImage::PixelFormat::PIXEL_FORMAT_RGB32, &data);
if (sts < PXC_STATUS_NO_ERROR) {
throw std::runtime_error("Taking Depth image failed");
}
PXCImage::ImageInfo info = depthFrame->QueryInfo();
depthImage = cv::Mat(info.height, info.width, CV_8UC4);
memcpy(depthImage.data, data.planes[0], data.pitches[0] * info.height);
depthFrame->ReleaseAccess(&data);
}
bool showImage()
{
if (depthImage.rows == 0 || (depthImage.cols == 0)) {
return true;
}
cv::imshow("Depth Image", depthImage);
int c = cv::waitKey(10);
if ((c == 27) || (c == 'q') || (c == 'Q')) {
// ESC|q|Q for Exit
return false;
}
return true;
}
private:
cv::Mat depthImage;
PXCSenseManager *senseManager = 0;
const int DEPTH_WIDTH = 640;
const int DEPTH_HEIGHT = 480;
const int DEPTH_FPS = 30.0f;
};
void main()
{
try {
RealSenseAsenseManager deep;
deep.initialize();
deep.run();
}
catch (std::exception& ex) {
std::cout << ex.what() << std::endl;
}
}
But this error appears.
sts = senseManager->Init();
if (sts < PXC_STATUS_NO_ERROR) {
throw std::runtime_error("Pipeline Initialzation failed");
}
Pipeline Initialization failed <-
I don't know how to solve this problem.
The depth camera connection is not likely to be wrong.
The color image is displayed. Only depth video is not available.
How I can solve this problem??
Thank you for reading my question.
The D400 series cameras aren't compatible with the old Realsense SDK, only the new librealsense SDK, available here: https://github.com/IntelRealSense/librealsense.
A sample showing how to get the colour and depth images streaming is here: https://github.com/IntelRealSense/librealsense/tree/master/examples/capture
You can start by using one of the provided examples.
The code below configures the camera and renders Depth & RGB data:
(the example.hpp header is located in the main repo /examples dir)
#include <librealsense2/rs.hpp> // Include RealSense Cross Platform API
#include "example.hpp" // Include short list of convenience functions for rendering
// Capture Example demonstrates how to
// capture depth and color video streams and render them to the screen
int main(int argc, char * argv[]) try
{
rs2::log_to_console(RS2_LOG_SEVERITY_ERROR);
// Create a simple OpenGL window for rendering:
window app(1280, 720, "RealSense Capture Example");
// Declare two textures on the GPU, one for color and one for depth
texture depth_image, color_image;
// Declare depth colorizer for pretty visualization of depth data
rs2::colorizer color_map;
// Declare RealSense pipeline, encapsulating the actual device and sensors
rs2::pipeline pipe;
// Start streaming with default recommended configuration
pipe.start();
while(app) // Application still alive?
{
rs2::frameset data = pipe.wait_for_frames(); // Wait for next set of frames from the camera
rs2::frame depth = color_map(data.get_depth_frame()); // Find and colorize the depth data
rs2::frame color = data.get_color_frame(); // Find the color data
// For cameras that don't have RGB sensor, we'll render infrared frames instead of color
if (!color)
color = data.get_infrared_frame();
// Render depth on to the first half of the screen and color on to the second
depth_image.render(depth, { 0, 0, app.width() / 2, app.height() });
color_image.render(color, { app.width() / 2, 0, app.width() / 2, app.height() });
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception& e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
I am actually working with the Kinect V2 (the one for Xbox One) and I am trying to have a depth stream. I want to see what the depth sensor of Kinect sees. But I don't succeed to open a stream. I only succeed to open a single frame with another piece of code, but not a video. With a few research, I have tried to use handles, but the code I wrote don't printe on the screen the line 'stream' put at the end of the code. I am working on VS2012, the code is in C++.
I think I have this because I don't know how to use correcty an handle... If anybody could help me and explain to me what an handle is instead of a kind of pointer to something, it would be great. Thank you
Here is my code :
HRESULT hr=S_OK;
WAITABLE_HANDLE *stream=nullptr;
IKinectSensor* kinectSensor=nullptr;
if( SUCCEEDED(hr) )
{
std::cout << "Success IKinectSensor::GetDefaultSensor" << std::endl;
}
else
{
std::cout << "Failed IKinectSensor::GetDefaultSensor" << std::endl;
}
std::cout << "Opening sensors" << std::endl;
if(kinectSensor != NULL)
{
hr = kinectSensor->Open();
Sleep(sleeptime*5);
if( SUCCEEDED( hr ) )
{
std::cout << "Success IKinectSensor::Open" << std::endl;
}
else
{
std::cout << "Failed IKinectSensor::Open" << std::endl;
}
}
}
hr = kinectSensor->OpenMultiSourceFrameReader(FrameSourceTypes_Depth | FrameSourceTypes_Color , &multiSourceReader);
if( SUCCEEDED(hr) )
{
std::cout << "reader open" << std::endl;
hr = multiSourceReader->SubscribeMultiSourceFrameArrived(stream);
if( SUCCEEDED(hr) )
{
std::cout << "stream" << std::endl;
}
}
I didn't use a handle. See the below code snippet for getting the depth frame from a multisource frame reader and display it with OpenCV.
#include <Kinect.h>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
// helper function
template <class Interface> inline void safe_release(Interface **ppT)
{
if (*ppT)
{
(*ppT)->Release();
*ppT = NULL;
}
}
bool setup_kinect_sensor_and_acquire_frame()
{
IKinectSensor* kinect_sensor
// initialize kinect sensor
HRESULT hr = GetDefaultKinectSensor(&kinect_sensor);
if (FAILED(hr) || !kinect_sensor)
{
safe_release(&p_multisource_frame);
return false;
}
kinect_sensor->Open();
if (FAILED(hr))
{
return false;
}
// initialize kinect multisource frame reader
IMultiSourceFrameReader* kinect_multisource_reader;
hr = kinect_sensor->OpenMultiSourceFrameReader(FrameSourceTypes_Depth, &kinect_multisource_reader;);
if (FAILED(hr))
{
safe_release(&kinect_multisource_reader);
return false;
}
// acquire multisource frame
IMultiSourceFrame* p_multisource_frame = NULL;
HRESULT hr = kinect_multisource_reader->AcquireLatestFrame(&p_multisource_frame);
if (FAILED(hr))
{
safe_release(&p_multisource_frame);
return false;
}
// get depth frame
IDepthFrameReference* p_depth_frame_ref = NULL;
hr = p_multisource_frame->get_DepthFrameReference(&p_depth_frame_ref);
if (FAILED(hr))
{
safe_release(&p_depth_frame_ref);
return false;
}
IDepthFrame* p_depth_frame = NULL;
IFrameDescription* p_frame_description = NULL;
int width = 0;
int height = 0;
unsigned short depth_min_distance = 0;
unsigned short depth_max_distance = 0;
unsigned short* p_depth_buffer = NULL;
hr = p_depth_frame_ref->AcquireFrame(&p_depth_frame);
if (SUCCEEDED(hr))
{
hr = p_depth_frame->get_FrameDescription(&p_frame_description);
}
if (SUCCEEDED(hr))
{
hr = p_frame_description->get_Width(&width);
}
if (SUCCEEDED(hr))
{
hr = p_frame_description->get_Height(&height);
}
if (width != 512 || height != 424)
{
safe_release(&p_depth_frame);
safe_release(&p_frame_description);
return false;
}
// process depth frame
if (SUCCEEDED(hr))
{
hr = p_depth_frame->get_DepthMinReliableDistance(&depth_min_distance);
}
if (SUCCEEDED(hr))
{
hr = p_depth_frame->get_DepthMaxReliableDistance(&depth_max_distance);
}
if (SUCCEEDED(hr))
{
int size = 512 * 424;
p_depth_buffer = new unsigned short[size];
hr = p_depth_frame->CopyFrameDataToArray(size, p_depth_buffer);
if (SUCCEEDED(hr))
{
cv::Mat depth_map(cv::Size(DEPTH_WIDTH, DEPTH_HEIGHT), CV_16UC1, p_depth_buffer);
double scale = 255.0 / (depth_max_distance - depth_min_distance);
depth_map.convertTo(depth_frame, CV_8UC1, scale);
cv::imshow("depth", depth_map);
}
}
// Clean up depth frame
safe_release(&p_depth_frame_ref);
safe_release(&p_depth_frame);
safe_release(&p_frame_description);
if (p_depth_buffer != NULL) {
delete[] p_depth_buffer;
p_depth_buffer = NULL;
}
if (FAILED(hr))
{
return false;
}
else
{
return true;
}
}