Kinect SDK 2.0 handle and acquire depth frame - c++

I am actually working with the Kinect V2 (the one for Xbox One) and I am trying to have a depth stream. I want to see what the depth sensor of Kinect sees. But I don't succeed to open a stream. I only succeed to open a single frame with another piece of code, but not a video. With a few research, I have tried to use handles, but the code I wrote don't printe on the screen the line 'stream' put at the end of the code. I am working on VS2012, the code is in C++.
I think I have this because I don't know how to use correcty an handle... If anybody could help me and explain to me what an handle is instead of a kind of pointer to something, it would be great. Thank you
Here is my code :
HRESULT hr=S_OK;
WAITABLE_HANDLE *stream=nullptr;
IKinectSensor* kinectSensor=nullptr;
if( SUCCEEDED(hr) )
{
std::cout << "Success IKinectSensor::GetDefaultSensor" << std::endl;
}
else
{
std::cout << "Failed IKinectSensor::GetDefaultSensor" << std::endl;
}
std::cout << "Opening sensors" << std::endl;
if(kinectSensor != NULL)
{
hr = kinectSensor->Open();
Sleep(sleeptime*5);
if( SUCCEEDED( hr ) )
{
std::cout << "Success IKinectSensor::Open" << std::endl;
}
else
{
std::cout << "Failed IKinectSensor::Open" << std::endl;
}
}
}
hr = kinectSensor->OpenMultiSourceFrameReader(FrameSourceTypes_Depth | FrameSourceTypes_Color , &multiSourceReader);
if( SUCCEEDED(hr) )
{
std::cout << "reader open" << std::endl;
hr = multiSourceReader->SubscribeMultiSourceFrameArrived(stream);
if( SUCCEEDED(hr) )
{
std::cout << "stream" << std::endl;
}
}

I didn't use a handle. See the below code snippet for getting the depth frame from a multisource frame reader and display it with OpenCV.
#include <Kinect.h>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
// helper function
template <class Interface> inline void safe_release(Interface **ppT)
{
if (*ppT)
{
(*ppT)->Release();
*ppT = NULL;
}
}
bool setup_kinect_sensor_and_acquire_frame()
{
IKinectSensor* kinect_sensor
// initialize kinect sensor
HRESULT hr = GetDefaultKinectSensor(&kinect_sensor);
if (FAILED(hr) || !kinect_sensor)
{
safe_release(&p_multisource_frame);
return false;
}
kinect_sensor->Open();
if (FAILED(hr))
{
return false;
}
// initialize kinect multisource frame reader
IMultiSourceFrameReader* kinect_multisource_reader;
hr = kinect_sensor->OpenMultiSourceFrameReader(FrameSourceTypes_Depth, &kinect_multisource_reader;);
if (FAILED(hr))
{
safe_release(&kinect_multisource_reader);
return false;
}
// acquire multisource frame
IMultiSourceFrame* p_multisource_frame = NULL;
HRESULT hr = kinect_multisource_reader->AcquireLatestFrame(&p_multisource_frame);
if (FAILED(hr))
{
safe_release(&p_multisource_frame);
return false;
}
// get depth frame
IDepthFrameReference* p_depth_frame_ref = NULL;
hr = p_multisource_frame->get_DepthFrameReference(&p_depth_frame_ref);
if (FAILED(hr))
{
safe_release(&p_depth_frame_ref);
return false;
}
IDepthFrame* p_depth_frame = NULL;
IFrameDescription* p_frame_description = NULL;
int width = 0;
int height = 0;
unsigned short depth_min_distance = 0;
unsigned short depth_max_distance = 0;
unsigned short* p_depth_buffer = NULL;
hr = p_depth_frame_ref->AcquireFrame(&p_depth_frame);
if (SUCCEEDED(hr))
{
hr = p_depth_frame->get_FrameDescription(&p_frame_description);
}
if (SUCCEEDED(hr))
{
hr = p_frame_description->get_Width(&width);
}
if (SUCCEEDED(hr))
{
hr = p_frame_description->get_Height(&height);
}
if (width != 512 || height != 424)
{
safe_release(&p_depth_frame);
safe_release(&p_frame_description);
return false;
}
// process depth frame
if (SUCCEEDED(hr))
{
hr = p_depth_frame->get_DepthMinReliableDistance(&depth_min_distance);
}
if (SUCCEEDED(hr))
{
hr = p_depth_frame->get_DepthMaxReliableDistance(&depth_max_distance);
}
if (SUCCEEDED(hr))
{
int size = 512 * 424;
p_depth_buffer = new unsigned short[size];
hr = p_depth_frame->CopyFrameDataToArray(size, p_depth_buffer);
if (SUCCEEDED(hr))
{
cv::Mat depth_map(cv::Size(DEPTH_WIDTH, DEPTH_HEIGHT), CV_16UC1, p_depth_buffer);
double scale = 255.0 / (depth_max_distance - depth_min_distance);
depth_map.convertTo(depth_frame, CV_8UC1, scale);
cv::imshow("depth", depth_map);
}
}
// Clean up depth frame
safe_release(&p_depth_frame_ref);
safe_release(&p_depth_frame);
safe_release(&p_frame_description);
if (p_depth_buffer != NULL) {
delete[] p_depth_buffer;
p_depth_buffer = NULL;
}
if (FAILED(hr))
{
return false;
}
else
{
return true;
}
}

Related

How do you get the NamePropertyId of a UIAutomationElement by hovering the cursor?

I am trying to build my own screen reader using UIAutomation. I want my program to return the NameProperty of the element that is pointed by my cursor
This is what I have done so far; this is just sample code anyway:
#include <iostream>
#include <windows.h>
#include <UIAutomation.h>
const int MAX_WND_TEXT = 60;
IUIAutomation *automation = NULL;
BOOL InitializeUIAutomation(IUIAutomation **pAutomation)
{
CoInitialize(NULL);
HRESULT hr = CoCreateInstance(__uuidof(CUIAutomation), NULL,
CLSCTX_INPROC_SERVER,
__uuidof(IUIAutomation), (void**)pAutomation);
return (SUCCEEDED(hr));
}
int main()
{
POINT p;
IUIAutomationElement *elem;
wchar_t wndName[MAX_WND_TEXT];
BOOL stat = InitializeUIAutomation(&automation);
while (true)
{
if (stat)
{
GetCursorPos(&p);
HRESULT hr = automation->ElementFromPoint(p, &elem);
if (SUCCEEDED(hr))
{
HRESULT hr = elem->GetCurrentPropertyValue(UIA_NamePropertyId,
(VARIANT*)wndName);
if (SUCCEEDED(hr))
std::cout << wndName << std::endl;
else
wndName[0] = '\0';
}
else
std::cout << "No element selected." << std::endl;
Sleep(100);
elem->Release();
}
}
automation->Release();
CoUninitialize();
return 0;
}
Now the problem is I can't make it to print the values I wanted. The program just output a specific hex number. And also I'm still a beginner in UIAutomation so I am still lost.
Can you help me or give me tips how to solve my problem?
Solved my problem using this code.
#include <iostream>
#include <string>
#include <Windows.h>
#include <UIAutomation.h>
BOOL InitializeUIAutomation(IUIAutomation **automation)
{
CoInitialize(NULL);
HRESULT hr = CoCreateInstance(__uuidof(CUIAutomation), NULL,
CLSCTX_INPROC_SERVER, __uuidof(IUIAutomation),
(void**)automation);
return (SUCCEEDED(hr));
}
int main()
{
IUIAutomation *automation = NULL;
IUIAutomationElement *elem = NULL;
BOOL stat = InitializeUIAutomation(&automation);
POINT mousePt;
BSTR elemName = NULL;
if (stat)
{
while(true)
{
GetCursorPos(&mousePt);
HRESULT hr = automation->ElementFromPoint(mousePt, &elem);
if (SUCCEEDED(hr) && elem != NULL)
{
elem->get_CurrentName(&elemName);
std::wstring ws(elemName, SysStringLen(elemName));
std::wcout << ws << std::endl;
}
SysFreeString(elemName);
elem->Release();
Sleep(200);
}
}
automation->Release();
CoUninitialize();
return 0;
}
The hex numbers printed was the BSTR header after all. Solve my problem by converting BSTR to wstring.

How can I capture one frame using a Point Cloud Library?

// Sample_PCL.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#define NOMINMAX
#include <Windows.h>
#include <Kinect.h> // Kinectを使用するためのヘッダファイル
#include <pcl/visualization/cloud_viewer.h> // PCLを使用して表示するためのヘッダファイル
#include <pcl/io/pcd_io.h> // 点群データを保存するためのヘッダファイル(.pcd, .ply)
//#include <pcl/io/ply_io.h>
#include <pcl/point_types.h>
#include <iostream>
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease)
{
if (pInterfaceToRelease != NULL) {
pInterfaceToRelease->Release();
}
}
int main()
{
// Create Sensor Instance
IKinectSensor* pSensor;
HRESULT hResult = S_OK;
hResult = GetDefaultKinectSensor(&pSensor);
if (FAILED(hResult)) {
std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
return -1;
}
printf("GetDfaultKinectSensor is OK\n");
// Open Sensor
hResult = pSensor->Open();
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::Open()" << std::endl;
return -1;
}
printf("IKinectSensor::Open is OK\n");
// Retrieved Coordinate Mapper
ICoordinateMapper* pCoordinateMapper;
hResult = pSensor->get_CoordinateMapper(&pCoordinateMapper);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl;
return -1;
}
printf("IKinectSensor::get_CoordinateMapper is OK\n");
// Retrieved Color Frame Source
IColorFrameSource* pColorSource;
hResult = pSensor->get_ColorFrameSource(&pColorSource);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_ColorFrameSource()" << std::endl;
return -1;
}
printf("IKinectSensor::get_ColorFrameSource is OK\n");
// Open Color Frame Reader
IColorFrameReader* pColorReader;
hResult = pColorSource->OpenReader(&pColorReader);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrameSource::OpenReader()" << std::endl;
return -1;
}
printf("IColorFrameSource::OpenReader is OK\n");
// Retrieved Depth Frame Source
IDepthFrameSource* pDepthSource;
hResult = pSensor->get_DepthFrameSource(&pDepthSource);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
return -1;
}
printf("IKinectSensor::get_DepthFrameSource is OK\n");
// Open Depth Frame Reader
IDepthFrameReader* pDepthReader;
hResult = pDepthSource->OpenReader(&pDepthReader);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
return -1;
}
printf("IDepthFrameSource::OpenReader is OK\n");
// Retrieved Color Frame Size
IFrameDescription* pColorDescription;
hResult = pColorSource->get_FrameDescription(&pColorDescription);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl;
return -1;
}
printf("IColorFrameSource::get_FrameDescription is OK\n");
int colorWidth = 0;
int colorHeight = 0;
pColorDescription->get_Width(&colorWidth); // 1920
pColorDescription->get_Height(&colorHeight); // 1080
// To Reserve Color Frame Buffer
std::vector<RGBQUAD> colorBuffer(colorWidth * colorHeight);
// Retrieved Depth Frame Size
IFrameDescription* pDepthDescription;
hResult = pDepthSource->get_FrameDescription(&pDepthDescription);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
return -1;
}
printf("IDepthFrameSource::get_FrameDescription is OK\n");
int depthWidth = 0;
int depthHeight = 0;
pDepthDescription->get_Width(&depthWidth); // 512
pDepthDescription->get_Height(&depthHeight); // 424
// To Reserve Depth Frame Buffer
std::vector<UINT16> depthBuffer(depthWidth * depthHeight);
printf("Display Point Cloud\n");
// Create Cloud Viewer
pcl::visualization::CloudViewer viewer("Point Cloud Viewer"); // 点群のウィンドウ表示
while (!viewer.wasStopped()) {
// Acquire Latest Color Frame
IColorFrame* pColorFrame = nullptr;
hResult = pColorReader->AcquireLatestFrame(&pColorFrame);
if (SUCCEEDED(hResult)) {
// Retrieved Color Data
hResult = pColorFrame->CopyConvertedFrameDataToArray(colorBuffer.size() * sizeof(RGBQUAD), reinterpret_cast<BYTE*>(&colorBuffer[0]), ColorImageFormat::ColorImageFormat_Bgra);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrame::CopyConvertedFrameDataToArray()" << std::endl;
}
}
SafeRelease(pColorFrame);
// Acquire Latest Depth Frame
IDepthFrame* pDepthFrame = nullptr;
hResult = pDepthReader->AcquireLatestFrame(&pDepthFrame);
if (SUCCEEDED(hResult)) {
// Retrieved Depth Data
hResult = pDepthFrame->CopyFrameDataToArray(depthBuffer.size(), &depthBuffer[0]);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrame::CopyFrameDataToArray()" << std::endl;
}
}
SafeRelease(pDepthFrame);
// Point Cloud Libraryの設定
// Create Point Cloud
pcl::PointCloud<pcl::PointXYZRGB>::Ptr pointcloud(new pcl::PointCloud<pcl::PointXYZRGB>()); // PCLの構造体
pointcloud->width = static_cast<uint32_t>(depthWidth); // 点群の数
pointcloud->height = static_cast<uint32_t>(depthHeight);
pointcloud->is_dense = false;
for (int y = 0; y < depthHeight; y++) {
for (int x = 0; x < depthWidth; x++) {
pcl::PointXYZRGB point; // PCLで使用する点群情報
DepthSpacePoint depthSpacePoint = { static_cast<float>(x), static_cast<float>(y) };
UINT16 depth = depthBuffer[y * depthWidth + x];
// Coordinate Mapping Depth to Color Space, and Setting PointCloud RGB
ColorSpacePoint colorSpacePoint = { 0.0f, 0.0f };
pCoordinateMapper->MapDepthPointToColorSpace(depthSpacePoint, depth, &colorSpacePoint); // 色の座標系
int colorX = static_cast<int>(std::floor(colorSpacePoint.X + 0.5f));
int colorY = static_cast<int>(std::floor(colorSpacePoint.Y + 0.5f));
if ((0 <= colorX) && (colorX < colorWidth) && (0 <= colorY) && (colorY < colorHeight)) {
RGBQUAD color = colorBuffer[colorY * colorWidth + colorX];
// 色の情報を格納する
point.b = color.rgbBlue;
point.g = color.rgbGreen;
point.r = color.rgbRed;
}
// Coordinate Mapping Depth to Camera Space, and Setting PointCloud XYZ
CameraSpacePoint cameraSpacePoint = { 0.0f, 0.0f, 0.0f }; // カメラ空間
pCoordinateMapper->MapDepthPointToCameraSpace(depthSpacePoint, depth, &cameraSpacePoint);
if ((0 <= colorX) && (colorX < colorWidth) && (0 <= colorY) && (colorY < colorHeight)) {
// 直交座標系の情報を格納する
point.x = cameraSpacePoint.X;
point.y = cameraSpacePoint.Y;
point.z = cameraSpacePoint.Z;
}
pointcloud->push_back(point);
}
}
// Show Point Cloud on Cloud Viewer
viewer.showCloud(pointcloud);
// Input Key ( Exit ESC key )
if (GetKeyState(VK_ESCAPE) < 0) {
pcl::io::savePCDFile("PointCloud.pcd", *pointcloud);
//pcl::io::savePLYFile("test_pcd2.ply", *pointcloud); // 最後に取得した点群を保存
printf("Save Point Cloud Data\n");
//break;
}
}
// End Processing
SafeRelease(pColorSource);
SafeRelease(pDepthSource);
SafeRelease(pColorReader);
SafeRelease(pDepthReader);
SafeRelease(pColorDescription);
SafeRelease(pDepthDescription);
SafeRelease(pCoordinateMapper);
if (pSensor) {
pSensor->Close();
}
SafeRelease(pSensor);
printf("Disconnect Kinect Sensor\n");
return 0;
}
The previous code is a code taken from a tutorial from the Point Cloud Library Website uses the Kinect to display a Point Cloud of what the Kinect sees in real time. Therefore The Point Cloud is constantly changing. That is why I would like to get just a frame, in other words, I would like the point cloud to freeze instead of constantly capturing new frames.
and here is my modification.
// Sample_PCL.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#define NOMINMAX
#include <Windows.h>
#include <Kinect.h> // Kinectを使用するためのヘッダファイル
#include <pcl/visualization/cloud_viewer.h> // PCLを使用して表示するためのヘッダファイル
#include <pcl/io/pcd_io.h> // 点群データを保存するためのヘッダファイル(.pcd, .ply)
//#include <pcl/io/ply_io.h>
#include <pcl/point_types.h>
#include <iostream>
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease)
{
if (pInterfaceToRelease != NULL) {
pInterfaceToRelease->Release();
}
}
int main()
{
// Create Sensor Instance
IKinectSensor* pSensor;
HRESULT hResult = S_OK;
hResult = GetDefaultKinectSensor(&pSensor);
if (FAILED(hResult)) {
std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
return -1;
}
printf("GetDfaultKinectSensor is OK\n");
// Open Sensor
hResult = pSensor->Open();
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::Open()" << std::endl;
return -1;
}
printf("IKinectSensor::Open is OK\n");
// Retrieved Coordinate Mapper
ICoordinateMapper* pCoordinateMapper;
hResult = pSensor->get_CoordinateMapper(&pCoordinateMapper);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl;
return -1;
}
printf("IKinectSensor::get_CoordinateMapper is OK\n");
// Retrieved Color Frame Source
IColorFrameSource* pColorSource;
hResult = pSensor->get_ColorFrameSource(&pColorSource);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_ColorFrameSource()" << std::endl;
return -1;
}
printf("IKinectSensor::get_ColorFrameSource is OK\n");
// Open Color Frame Reader
IColorFrameReader* pColorReader;
hResult = pColorSource->OpenReader(&pColorReader);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrameSource::OpenReader()" << std::endl;
return -1;
}
printf("IColorFrameSource::OpenReader is OK\n");
// Retrieved Depth Frame Source
IDepthFrameSource* pDepthSource;
hResult = pSensor->get_DepthFrameSource(&pDepthSource);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
return -1;
}
printf("IKinectSensor::get_DepthFrameSource is OK\n");
// Open Depth Frame Reader
IDepthFrameReader* pDepthReader;
hResult = pDepthSource->OpenReader(&pDepthReader);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
return -1;
}
printf("IDepthFrameSource::OpenReader is OK\n");
// Retrieved Color Frame Size
IFrameDescription* pColorDescription;
hResult = pColorSource->get_FrameDescription(&pColorDescription);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl;
return -1;
}
printf("IColorFrameSource::get_FrameDescription is OK\n");
int colorWidth = 0;
int colorHeight = 0;
pColorDescription->get_Width(&colorWidth); // 1920
pColorDescription->get_Height(&colorHeight); // 1080
// To Reserve Color Frame Buffer
std::vector<RGBQUAD> colorBuffer(colorWidth * colorHeight);
// Retrieved Depth Frame Size
IFrameDescription* pDepthDescription;
hResult = pDepthSource->get_FrameDescription(&pDepthDescription);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
return -1;
}
printf("IDepthFrameSource::get_FrameDescription is OK\n");
int depthWidth = 0;
int depthHeight = 0;
pDepthDescription->get_Width(&depthWidth); // 512
pDepthDescription->get_Height(&depthHeight); // 424
// To Reserve Depth Frame Buffer
std::vector<UINT16> depthBuffer(depthWidth * depthHeight);
printf("Display Point Cloud\n");
// Create Cloud Viewer
pcl::visualization::CloudViewer viewer("Point Cloud Viewer"); // 点群のウィンドウ表示
//while (!viewer.wasStopped()) {
// Acquire Latest Color Frame
IColorFrame* pColorFrame = nullptr;
hResult = pColorReader->AcquireLatestFrame(&pColorFrame);
if (SUCCEEDED(hResult)) {
// Retrieved Color Data
hResult = pColorFrame->CopyConvertedFrameDataToArray(colorBuffer.size() * sizeof(RGBQUAD), reinterpret_cast<BYTE*>(&colorBuffer[0]), ColorImageFormat::ColorImageFormat_Bgra);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrame::CopyConvertedFrameDataToArray()" << std::endl;
}
}
SafeRelease(pColorFrame);
// Acquire Latest Depth Frame
IDepthFrame* pDepthFrame = nullptr;
hResult = pDepthReader->AcquireLatestFrame(&pDepthFrame);
if (SUCCEEDED(hResult)) {
// Retrieved Depth Data
hResult = pDepthFrame->CopyFrameDataToArray(depthBuffer.size(), &depthBuffer[0]);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrame::CopyFrameDataToArray()" << std::endl;
}
}
SafeRelease(pDepthFrame);
// Point Cloud Libraryの設定
// Create Point Cloud
pcl::PointCloud<pcl::PointXYZRGB>::Ptr pointcloud(new pcl::PointCloud<pcl::PointXYZRGB>()); // PCLの構造体
pointcloud->width = static_cast<uint32_t>(depthWidth); // 点群の数
pointcloud->height = static_cast<uint32_t>(depthHeight);
pointcloud->is_dense = false;
for (int y = 0; y < depthHeight; y++) {
for (int x = 0; x < depthWidth; x++) {
//printf("scann\n");
pcl::PointXYZRGB point; // PCLで使用する点群情報
DepthSpacePoint depthSpacePoint = { static_cast<float>(x), static_cast<float>(y) };
UINT16 depth = depthBuffer[y * depthWidth + x];
// Coordinate Mapping Depth to Color Space, and Setting PointCloud RGB
ColorSpacePoint colorSpacePoint = { 0.0f, 0.0f };
pCoordinateMapper->MapDepthPointToColorSpace(depthSpacePoint, depth, &colorSpacePoint); // 色の座標系
int colorX = static_cast<int>(std::floor(colorSpacePoint.X + 0.5f));
int colorY = static_cast<int>(std::floor(colorSpacePoint.Y + 0.5f));
if ((0 <= colorX) && (colorX < colorWidth) && (0 <= colorY) && (colorY < colorHeight)) {
RGBQUAD color = colorBuffer[colorY * colorWidth + colorX];
// 色の情報を格納する
point.b = color.rgbBlue;
point.g = color.rgbGreen;
point.r = color.rgbRed;
}
// Coordinate Mapping Depth to Camera Space, and Setting PointCloud XYZ
CameraSpacePoint cameraSpacePoint = { 0.0f, 0.0f, 0.0f }; // カメラ空間
pCoordinateMapper->MapDepthPointToCameraSpace(depthSpacePoint, depth, &cameraSpacePoint);
if ((0 <= colorX) && (colorX < colorWidth) && (0 <= colorY) && (colorY < colorHeight)) {
// 直交座標系の情報を格納する
point.x = cameraSpacePoint.X;
point.y = cameraSpacePoint.Y;
point.z = cameraSpacePoint.Z;
}
pointcloud->push_back(point);
}
//}
viewer.showCloud(pointcloud);
while (!viewer.wasStopped())
{
}
/*pcl::io::savePCDFile("PointCloud.pcd", *pointcloud);
printf("Saved Point Cloud Data\n");
// Show Point Cloud on Cloud Viewer
printf("Open viewer\n");
viewer.showCloud(pointcloud);
while (!viewer.wasStopped()) {
}*/
// Input Key ( Exit ESC key )
if (GetKeyState(VK_ESCAPE) < 0) {
pcl::io::savePCDFile("PointCloud.pcd", *pointcloud);
//pcl::io::savePLYFile("test_pcd2.ply", *pointcloud); // 最後に取得した点群を保存
printf("Save Point Cloud Data\n");
//break;
}
}
// End Processing
SafeRelease(pColorSource);
SafeRelease(pDepthSource);
SafeRelease(pColorReader);
SafeRelease(pDepthReader);
SafeRelease(pColorDescription);
SafeRelease(pDepthDescription);
SafeRelease(pCoordinateMapper);
if (pSensor) {
pSensor->Close();
}
SafeRelease(pSensor);
printf("Disconnect Kinect Sensor\n");
return 0;
}
The modification mainly consists of removing the loop that updates the point cloud constantly which is: You can see it commented in the second code.
while (!viewer.wasStopped())
The problem is that the the Point cloud viewer does not display any data received by the Kinect, and I would like to know the reason why it could not be displayed.
Your code only seems to show the very first frame it receives from the Kinect, which might be empty or invalid. Did you check if the points in the cloud you're taking are sane?
However, you might want to approach the problem differently:
Leave the visualization loop as it is.
Register a key handler using registerKeyboardCallback (doc).
When a specific key is pressed, set a boolean to true.
In the visualization loop, if that boolean variable is true, skip the frame grabbing from the Kinect. It should retain the previously set cloud.

VisualGestureBuilder API not working when more than one player

I'm using the VisualGestureBuilder C++ API to track three simple discrete gestures.
When there is only one player in front of the Kinect, it works great.
But as soon as a second player arrives, the gesture analysis seems to go crazy, and for the two players, I get erratic but smooth results, as if the data for the two players and for the three gestures were mixed together, and random data was added...
And while the VGB API is going wrong, I still get valid body data (I'm drawing skeletons at the same time)
I discovered that this phenomenon was triggered as soon as I have more than one IVisualGestureBuilderFrameReader in the not-paused state.
Here are some portions of my code :
class GESTURES_STREAM
{
friend class MY_KINECT;
private:
struct SLOT
{
IVisualGestureBuilderDatabase* database = nullptr;
IGesture** gestures = nullptr;
UINT gestures_count = 0;
IVisualGestureBuilderFrameSource* source = nullptr;
IVisualGestureBuilderFrameReader* reader = nullptr;
IVisualGestureBuilderFrame* frame = nullptr;
};
SLOT slots[BODY_COUNT];
public:
GESTURES_RESULTS gestures_results[BODY_COUNT];
};
GESTURES_STREAM gestures_stream;
//----------------------------------------------------------------------------------------------------
void MY_KINECT::start_gestures_stream(wstring vgb_file)
{
for (int i = 0; i < BODY_COUNT; i++)
{
hr = CreateVisualGestureBuilderDatabaseInstanceFromFile(vgb_file.c_str(), &gestures_stream.slots[i].database);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
gestures_stream.slots[i].gestures_count = 0;
hr = gestures_stream.slots[i].database->get_AvailableGesturesCount(&gestures_stream.slots[i].gestures_count);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
if (gestures_stream.slots[i].gestures_count == 0)
{
MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
}
gestures_stream.slots[i].gestures = new IGesture*[gestures_stream.slots[i].gestures_count];
hr = gestures_stream.slots[i].database->get_AvailableGestures(gestures_stream.slots[i].gestures_count, gestures_stream.slots[i].gestures);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
/////
hr = CreateVisualGestureBuilderFrameSource(kinect_sensor, 0, &gestures_stream.slots[i].source);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
hr = gestures_stream.slots[i].source->AddGestures(gestures_stream.slots[i].gestures_count, gestures_stream.slots[i].gestures);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
hr = gestures_stream.slots[i].source->OpenReader(&gestures_stream.slots[i].reader);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
hr = gestures_stream.slots[i].reader->put_IsPaused(TRUE);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
/////
safe_release(&gestures_stream.slots[i].database);
}
}
//----------------------------------------------------------------------------------------------------
void MY_KINECT::update()
{
//...
//...
// handling all the different stream, including body
//...
//...
if (body_stream.data_is_new == true)
{
for (int i = 0; i < BODY_COUNT; i++)
{
if (gestures_stream.slots[i].source != nullptr)
{
BOOLEAN is_tracked;
if (body_stream.bodies[i]->get_IsTracked(&is_tracked) == S_OK)
{
if (is_tracked == TRUE)
{
UINT64 a;
if (body_stream.bodies[i]->get_TrackingId(&a) == S_OK)
{
UINT64 b;
if (gestures_stream.slots[i].source->get_TrackingId(&b) == S_OK)
{
if (a != b)
{
gestures_stream.slots[i].source->put_TrackingId(a);
}
}
}
BOOLEAN paused;
if (gestures_stream.slots[i].reader->get_IsPaused(&paused) == S_OK)
{
if (paused == TRUE)
{
gestures_stream.slots[i].reader->put_IsPaused(FALSE);
}
}
}
else
{
BOOLEAN paused;
if (gestures_stream.slots[i].reader->get_IsPaused(&paused) == S_OK)
{
if (paused == FALSE)
{
gestures_stream.slots[i].reader->put_IsPaused(TRUE);
}
}
}
}
}
if (gestures_stream.slots[i].reader != nullptr)
{
if (gestures_stream.slots[i].reader->CalculateAndAcquireLatestFrame(&gestures_stream.slots[i].frame) == S_OK)
{
BOOLEAN is_valid;
if (gestures_stream.slots[i].frame->get_IsTrackingIdValid(&is_valid) == S_OK)
{
if (is_valid == TRUE)
{
for (int j = 0; j < gestures_stream.slots[i].gestures_count; j++)
{
wchar_t gesture_name[256];
if (gestures_stream.slots[i].gestures[j]->get_Name(sizeof(gesture_name), gesture_name) == S_OK)
{
IDiscreteGestureResult* discrete_result = nullptr;
if (gestures_stream.slots[i].frame->get_DiscreteGestureResult(gestures_stream.slots[i].gestures[j], &discrete_result) == S_OK)
{
BOOLEAN detected;
if (discrete_result->get_Detected(&detected) == S_OK)
{
if (detected == TRUE)
{
float confidence;
if (discrete_result->get_Confidence(&confidence) == S_OK)
{
gestures_stream.gestures_results[i].results[gesture_name] = confidence;
}
}
else
{
gestures_stream.gestures_results[i].results[gesture_name] = 0;
}
}
safe_release(&discrete_result);
}
}
}
}
}
safe_release(&gestures_stream.slots[i].frame);
}
}
}
}
}
//----------------------------------------------------------------------------------------------------

IMFActivate::ActivateObject return error code "CoInitialize has not been called."

I'm writing a simple multimedia application in Visual Studio 2013 and I need to enumerate camera devices connected to my computer and create a media source object to link to one of them. I use Media Foundation SDK and tried to run the guide here: https://msdn.microsoft.com/en-us/library/windows/desktop/dd940326(v=vs.85).aspx :
#include <Mfapi.h>
#include <mfidl.h>
#include <mfobjects.h>
#include <iostream>
#pragma comment(lib, "Mfplat")
#pragma comment(lib, "Mf")
template <class T> void SafeRelease(T **ppT) {
if (*ppT) {
(*ppT)->Release();
*ppT = NULL;
}
}
HRESULT CreateVideoDeviceSource(IMFMediaSource **ppSource) {
*ppSource = NULL;
IMFMediaSource *pSource = NULL;
IMFAttributes *pAttributes = NULL;
IMFActivate **ppDevices = NULL;
// Create an attribute store to specify the enumeration parameters.
HRESULT hr = MFCreateAttributes(&pAttributes, 1);
if (FAILED(hr))
{
goto done;
}
// Source type: video capture devices
hr = pAttributes->SetGUID(
MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID
);
if (FAILED(hr))
{
goto done;
}
// Enumerate devices.
UINT32 count;
hr = MFEnumDeviceSources(pAttributes, &ppDevices, &count);
if (FAILED(hr))
{
goto done;
}
if (count == 0)
{
hr = E_FAIL;
goto done;
}
// Create the media source object.
hr = ppDevices[0]->ActivateObject(IID_PPV_ARGS(&pSource));
if (FAILED(hr))
{
std::cout << "Failed to create device object" << hr <<std::endl;
goto done;
}
*ppSource = pSource;
(*ppSource)->AddRef();
DWORD chs;
(*ppSource)->GetCharacteristics(&chs);
std::cout << chs << std::endl;
done:
SafeRelease(&pAttributes);
for (DWORD i = 0; i < count; i++)
{
SafeRelease(&ppDevices[i]);
}
CoTaskMemFree(ppDevices);
SafeRelease(&pSource);
return hr;
}
int main(int argc, char* argv[]) {
IMFMediaSource* ppSource;
CreateVideoDeviceSource(&ppSource);
std::cout << "END" << std::endl;
return 0;
}
The problem is that this part of the code :
// Create the media source object.
hr = ppDevices[0]->ActivateObject(IID_PPV_ARGS(&pSource));
if (FAILED(hr))
{
goto done;
}
fails to create a media source object(the HRESULT returned is 0x800401F0 (CO_E_NOTINITIALIZED)--"CoInitialize has not been called.
"). What does the error code mean and What could be the problem causing the failure ? I'm using WIN8.1.
Com Libraries need to be initialized for each thread, through either of
CoInitialize
CoInitializeEx
OleInitialize
depending on which services are to be used in this thread.
Do this at the start of your program, for all threads that use COM, and don't forget to call the respective Uninitialize function

Windows media foundation enumerating Camera devices

I would like to enumerate the camera devices on my computer using Windows Media Foundation, I used the code on Microsoft : http://msdn.microsoft.com/en-us/library/windows/desktop/dd940326(v=vs.85).aspx
I reproduced the same code they use here : http://msdn.microsoft.com/en-us/library/windows/desktop/ee663604(v=vs.85).aspx
When I used their code I get my webcam device name, however my code doesn't find any camera capture device. I'm enable to find why.
Here is the code :
#pragma once
#include <new>
#include <windows.h>
#include <mfapi.h>
#include <mfidl.h>
#include <mfreadwrite.h>
#include <Wmcodecdsp.h>
#include <assert.h>
#include <Dbt.h>
#include <shlwapi.h>
#include <mfplay.h>
#include <iostream>
const UINT WM_APP_PREVIEW_ERROR = WM_APP + 1; // wparam = HRESULT
class DeviceList
{
UINT32 m_cDevices; // contains the number of devices
IMFActivate **m_ppDevices; // contains properties about each device
public:
DeviceList() : m_ppDevices(NULL), m_cDevices(0)
{
}
~DeviceList()
{
Clear();
}
UINT32 Count() const { return m_cDevices; }
void Clear();
HRESULT EnumerateDevices();
HRESULT GetDevice(UINT32 index, IMFActivate **ppActivate);
HRESULT GetDeviceName(UINT32 index, WCHAR **ppszName);
};
#include "DeviceList.h"
/*
* A templated Function SafeRelease releasing pointers memories
* #param ppT the pointer to release
*/
template <class T> void SafeRelease(T **ppT)
{
if (*ppT)
{
(*ppT)->Release();
*ppT = NULL;
}
}
/*
* A function which copy attribute form source to a destination
* # param pSrc is an Interface to store key/value pairs of an Object
* # param pDest is an Interface to store key/value pairs of an Object
* # param GUID is an unique identifier
* # return HRESULT return errors warning condition on windows
*/
HRESULT CopyAttribute(IMFAttributes *pSrc, IMFAttributes *pDest, const GUID& key);
/*
* A Method form DeviceList which clear the list of Devices
*/
void DeviceList::Clear()
{
for (UINT32 i = 0; i < m_cDevices; i++)
{
SafeRelease(&m_ppDevices[i]);
}
CoTaskMemFree(m_ppDevices);
m_ppDevices = NULL;
m_cDevices = 0;
}
/*
* A function which enumerate the list of Devices.
* # return HRESULT return errors warning condition on windows
*/
HRESULT DeviceList::EnumerateDevices()
{
HRESULT hr = S_OK;
IMFAttributes *pAttributes = NULL;
this->Clear();
// Initialize an attribute store. We will use this to
// specify the enumeration parameters.
std::cout << "Enumerate devices" << std::endl;
hr = MFCreateAttributes(&pAttributes, 1);
// Ask for source type = video capture devices
if (SUCCEEDED(hr))
{
std::cout << "Enumerate devices" << std::endl;
hr = pAttributes->SetGUID(
MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID
);
}
// Enumerate devices.
if (SUCCEEDED(hr))
{
std::cout << "Enumerate devices:" << m_cDevices << std::endl;
hr = MFEnumDeviceSources(pAttributes, &m_ppDevices, &m_cDevices);
}
SafeRelease(&pAttributes);
return hr;
}
/*
* A function which copy attribute form source to a destination
* # param index the index in an array
* # param ppActivate is an Interface to store key/value pairs of an Object
* # return HRESULT return errors warning condition on windows
*/
HRESULT DeviceList::GetDevice(UINT32 index, IMFActivate **ppActivate)
{
if (index >= Count())
{
return E_INVALIDARG;
}
*ppActivate = m_ppDevices[index];
(*ppActivate)->AddRef();
return S_OK;
}
/*
* A function which get the name of the devices
* # param index the index in an array
* # param ppszName Name of the device
*/
HRESULT DeviceList::GetDeviceName(UINT32 index, WCHAR **ppszName)
{
std::cout << "Get Device name" << std::endl;
if (index >= Count())
{
return E_INVALIDARG;
}
HRESULT hr = S_OK;
hr = m_ppDevices[index]->GetAllocatedString(
MF_DEVSOURCE_ATTRIBUTE_FRIENDLY_NAME,
ppszName,
NULL
);
return hr;
}
#include <iostream>
#include "DeviceList.h"
HRESULT UpdateDeviceList()
{
HRESULT hr = S_OK;
WCHAR *szFriendlyName = NULL;
DeviceList g_devices;
g_devices.Clear();
hr = g_devices.EnumerateDevices();
if (FAILED(hr)) { goto done; }
std::cout << "Nb devices found:"<< g_devices.Count() << std::endl;
for (UINT32 iDevice = 0; iDevice < g_devices.Count(); iDevice++)
{
//std::cout << "" << std::endl;
hr = g_devices.GetDeviceName(iDevice, &szFriendlyName);
if (FAILED(hr)) { goto done; }
std::cout << szFriendlyName << std::endl;
// The list might be sorted, so the list index is not always the same as the
// array index. Therefore, set the array index as item data.
CoTaskMemFree(szFriendlyName);
szFriendlyName = NULL;
}
std::cout << "End of EnumDeviceList" << std::endl;
done:
return hr;
}
int main()
{
std::cout <<"Main" << std::endl;
UpdateDeviceList();
while (1);
return 0;
}
You are expected to do MFStartup(MF_VERSION); before you start calling other Media Foundation API functions.
Then you print m_cDevices before it is getting initialized one line below by MFEnumDeviceSources.
std::cout << "Enumerate devices:" << m_cDevices << std::endl;
hr = MFEnumDeviceSources(pAttributes, &m_ppDevices, &m_cDevices);
Having this fixed, your code will start getting you the devices.