I connected a Azure Kinect Camera to my system, and from code I can grab an IR image and a Depth image, but the Color image is not working. I have compiled the SDK myself, and when running the k4aviewer.exe, I get the same thing. IR + Depth camera work, color camera is just empty.Errors I am getting:
Failed to start microphone: unable to open device!
Failed to start microphone listener: unable to open device!
I then installed the official SDK, in that k4aviewer I get both IR and color camera. But when compiling using that lib and dll, I still get nothing. What might cause this issue in the first place? I can't be totally off, as I get the depth data.
main.cpp:
#include "azure_camera.h"
#include <opencv2/opencv.hpp>
int main() {
int count = global::getCameraCount();
AzureKinect cam (0);
cam.connectCamera();
k4a_device_configuration_t config;// = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL;
config.camera_fps = K4A_FRAMES_PER_SECOND_30;
config.color_format = K4A_IMAGE_FORMAT_COLOR_BGRA32;
config.color_resolution = K4A_COLOR_RESOLUTION_1080P;
config.depth_delay_off_color_usec = 0;
config.depth_mode = K4A_DEPTH_MODE_NFOV_UNBINNED;
config.disable_streaming_indicator = false;
config.subordinate_delay_off_master_usec = 0;
config.synchronized_images_only = true;
config.wired_sync_mode = K4A_WIRED_SYNC_MODE_STANDALONE;
cam.startCamera(config);
AzureKinect::Images m_images;
cam.grab_images(false, m_images);
{
cv::imshow("Test Color", m_images.color);
cv::imshow("Test Depth", m_images.depth);
cv::waitKey(0);
}
cam.stopCamera();
return 0;
}
AzureCamera.cpp:
#include "azure_camera.h"
AzureKinect::~AzureKinect() {
device.close();
}
bool AzureKinect::connectCamera()
{
try {
device = k4a::device::open(index);
}
catch (...) {
return false;
}
return true;
}
bool AzureKinect::startCamera(k4a_device_configuration_t _config)
{
config = _config;
try {
device.start_cameras(&config);
}
catch(const k4a::error& e) {
printf("Error occurred: %s", e.what());
return false;
}
return true;
}
bool AzureKinect::stopCamera()
{
device.stop_cameras();
return true;
}
bool AzureKinect::grab_images(bool rectified, AzureKinect::Images& images)
{
if (device.get_capture(&capture, std::chrono::milliseconds(1000)))
{
colorImage = capture.get_color_image();
depthImage = capture.get_depth_image();
}
else
{
return false;
}
if (images.color.cols != colorImage.get_width_pixels() || images.color.cols != colorImage.get_height_pixels())
{
images.color = cv::Mat(colorImage.get_height_pixels(), colorImage.get_width_pixels(), CV_8UC4);
}
if (images.depth.cols != depthImage.get_width_pixels() || images.depth.cols != depthImage.get_height_pixels())
{
images.depth = cv::Mat(depthImage.get_height_pixels(), depthImage.get_width_pixels(), CV_16UC1);
}
std::memcpy(images.color.data, colorImage.get_buffer(), colorImage.get_size());
std::memcpy(images.depth.data, depthImage.get_buffer(), depthImage.get_size());
colorImage.reset();
depthImage.reset();
capture.reset();
return true;
}
cv::Mat AzureKinect::get_calibration()
{
return cv::Mat();
}
uint32_t global::getCameraCount()
{
return k4a_device_get_installed_count();
}
AzureCamera.h
#include <k4a/k4a.hpp>
#include <opencv2/core.hpp>
#include <stdint.h>
class AzureKinect {
public:
struct Images {
cv::Mat color;
cv::Mat depth;
};
AzureKinect(int id) : index(id), colorImage(nullptr), depthImage(nullptr) { }
~AzureKinect();
bool connectCamera();
bool startCamera(k4a_device_configuration_t _config);
bool stopCamera();
bool grab_images(bool rectified, AzureKinect::Images& images);
cv::Mat get_calibration();
private:
uint32_t index;
k4a::device device;
k4a::capture capture;
k4a_device_configuration_t config;
k4a::image colorImage;
k4a::image depthImage;
};
namespace global {
uint32_t getCameraCount();
}
Note: I found something really similar at https://github.com/microsoft/Azure-Kinect-Sensor-SDK/issues/1237 , but then I need it to work on this system. How can I debug this?
The timeout might be too short for the first capture. You are also not checking the error code for AzureKinect::grab_images(). What is reported in the error log?
Related
I need to decode some H.264 frames into raw format (YUV420).
I receive packets which contains frame by some custom protocol.
How can I pass received H.264 frames into GStreamermm API to decode?
In current time I read them tutorials (unfortunately this is GST - API of C version), but can't find actual GStreamermm API documentation.
Please, point me in any documents or examples of how to do it.
I was able to implement the transfer data via pipeline and retrieve decoding raw video frames using C++ version. Here is raw example:
struct WebPipeline {
Glib::RefPtr<Gst::AppSrc> appsrc;
Glib::RefPtr<Gst::AppSink> appdst;
Glib::RefPtr<Gst::Element> h264parser;
Glib::RefPtr<Gst::Element> avdec_h264;
Glib::RefPtr<Gst::Element> jpegenc;
Glib::RefPtr<Gst::Pipeline> pipe;
bool accepts_data {false};
};
WebPipePtr ExampleClass::CreatePipeline() {
auto web_pipe = std::make_shared<WebPipeline>();
web_pipe->appsrc = Gst::AppSrc::create("web_appsrc");
if (!web_pipe->appsrc) {
throw std::runtime_error("Can't create AppSrc");
}
web_pipe->appdst = Gst::AppSink::create("web_appdst");
if (!web_pipe->appdst) {
throw std::runtime_error("Can't create AppSink");
}
web_pipe->h264parser = Gst::ElementFactory::create_element("h264parse", "h264_parser");
if (!web_pipe->h264parser) {
throw std::runtime_error("Can't create h264parse");
}
web_pipe->avdec_h264 = Gst::ElementFactory::create_element("avdec_h264", "avdec264");
if (!web_pipe->avdec_h264) {
throw std::runtime_error("Can't create avdec_h264");
}
web_pipe->jpegenc = Gst::ElementFactory::create_element("jpegenc");
if (!web_pipe->jpegenc) {
throw std::runtime_error("Can't create jpegenc");
}
web_pipe->pipe = Gst::Pipeline::create("websocket_pipe");
if (!web_pipe->pipe) {
throw std::runtime_error("Can't create pipeline");
}
web_pipe->appdst->property_emit_signals() = true;
web_pipe->appdst->set_sync(false);
web_pipe->appdst->signal_new_sample().connect(sigc::bind(sigc::mem_fun(this, &ExampleClass::PullFromPipe), web_pipe->appdst));
web_pipe->appsrc->property_emit_signals() = true;
web_pipe->appsrc->signal_need_data().connect(sigc::bind(sigc::mem_fun(this, &ExampleClass::EnableAcceptance), web_pipe));
web_pipe->appsrc->signal_enough_data().connect(sigc::bind(sigc::mem_fun(this, &ExampleClass::DisableAcceptance), web_pipe));
web_pipe->pipe->add(web_pipe->appsrc)->add(web_pipe->h264parser)->add(web_pipe->avdec_h264)->add(web_pipe->jpegenc)->add(web_pipe->appdst);
web_pipe->appsrc->link(web_pipe->h264parser)->link(web_pipe->avdec_h264)->link(web_pipe->jpegenc)->link(web_pipe->appdst);
web_pipe->pipe->set_state(Gst::STATE_PLAYING);
return web_pipe;
}
void ExampleClass::EnableAcceptance(guint, WebPipePtr pipe) {
if (!pipe->accepts_data) {
BOOST_LOG_SEV(GetLogger(), log::info) << "Begin push frames";
pipe->accepts_data = true;
}
}
void ExampleClass::DisableAcceptance(WebPipePtr pipe) {
if (pipe->accepts_data) {
BOOST_LOG_SEV(GetLogger(), log::info) << "Begin drop frames";
pipe->accepts_data = false;
}
}
void ExampleClass::PushToPipe(WebPipePtr pipe, std::vector<uint8_t>&& frames) {
if (!pipe->accepts_data) {
return Gst::FLOW_CUSTOM_ERROR;
}
GstBuffer* buffer = gst_buffer_new_wrapped_full(static_cast<GstMemoryFlags>(GST_MEMORY_FLAG_READONLY | GST_MEMORY_FLAG_PHYSICALLY_CONTIGUOUS),
const_cast<uint8_t*>(frame.data()),
frame.size(),
0,
frame.size(),
reinterpret_cast<gpointer>(frame_ref), // inner implementation of some sort of wrapper
destroy); // lamda-destructor
buffer->set_pts(time);
return pipe->appsrc->push_buffer(buffer);
}
Gst::FlowReturn ExampleClass::PullFromPipe(const Glib::RefPtr<Gst::AppSink>& appsink) {
auto sample = appsink->pull_sample();
if (!sample) {
return Gst::FLOW_ERROR;
}
if (appsink->property_eos()) {
return Gst::FLOW_EOS;
}
Gst::ClockTime timestamp = 0;
{
auto buffer = sample->get_buffer();
if (!buffer) {
throw std::runtime_error("Can't get buffer from sample");
}
timestamp = buffer->get_pts();
}
// process sample...
return Gst::FLOW_OK;
}
I want to extract albumart from mp3 files
so i use taglib library
extracting title and artist succed.
but when i try to extract album art, it occurs error.
void MusicContainer::getAlbumArt(const char* path) {
static const char *IdPicture = "APIC";
TagLib::MPEG::File mpegFile(path);
TagLib::ID3v2::Tag *id3v2tag = mpegFile.ID3v2Tag();
TagLib::ID3v2::FrameList Frame;
TagLib::ID3v2::AttachedPictureFrame *PicFrame;
void *RetImage = NULL, *SrcImage;
unsigned long Size;
FILE *jpegFile;
errno_t err = fopen_s(&jpegFile, "d:\\FromId3.jpg", "wb");
if (id3v2tag)
{
// picture frame
Frame = id3v2tag->frameListMap()[IdPicture];//error occurs on this line.
if (!Frame.isEmpty())
{
for (TagLib::ID3v2::FrameList::ConstIterator it = Frame.begin(); it != Frame.end(); ++it)
{
PicFrame = (TagLib::ID3v2::AttachedPictureFrame *)(*it);
// if ( PicFrame->type() ==
//TagLib::ID3v2::AttachedPictureFrame::FrontCover)
{
// extract image (in it’s compressed form)
Size = PicFrame->picture().size();
SrcImage = malloc(Size);
if (SrcImage)
{
memcpy(SrcImage, PicFrame->picture().data(), Size);
fwrite(SrcImage, Size, 1, jpegFile);
fclose(jpegFile);
free(SrcImage);
}
}
}
}
}
else
{
cout << "id3v2 not present";
}
}
error message
Exception thrown: write access violation.
_Parent_proxy was 0x10011EE0.
If there is a handler for this exception, the program may be safely continued.
tell me how to fix it.
I'm trying to get familiar with Graphics2D for NaCl, but I'm getting the error "NativeClient: NaCl module crashed" when I try to paint and flush my Graphics2D instance.
Here is the output of my calls to console.log:
View did change
Making graphics context
Drawing
Making blank image data
PaintAndFlush
Flush
I am on main thread
NativeClient: NaCl module crashed
Here is my index.html : https://jsfiddle.net/3c1y4wp9/
And here is my c++ code:
#include <deque>
#include "ppapi/cpp/instance.h"
#include "ppapi/cpp/module.h"
#include "ppapi/cpp/var.h"
#include "ppapi/cpp/graphics_2d.h"
#include "ppapi/cpp/image_data.h"
#include "ppapi/cpp/instance.h"
#include "ppapi/cpp/module.h"
#include "ppapi/cpp/var.h"
#include "ppapi/cpp/var_array.h"
#include "ppapi/utility/completion_callback_factory.h"
namespace {
const uint32_t kBlue = 0xff4040ffu;
const uint32_t kBlack = 0xff000000u;
}
class GravitySimInstance : public pp::Instance {
public:
explicit GravitySimInstance(PP_Instance instance) : pp::Instance(instance)
{}
virtual ~GravitySimInstance() {}
private:
virtual void HandleMessage(const pp::Var& var_message) {
}
void PaintAndFlush(pp::ImageData* image_data) {
PostMessage("PaintAndFlush");
assert(!flushing_);
graphics_2d_context_.ReplaceContents(image_data);
PostMessage("Flush");
if (pp::Module::Get()->core()->IsMainThread()) {
PostMessage("I am on main thread");
} else {
PostMessage("I am NOT on main thread");
}
graphics_2d_context_.Flush(
callback_factory_.NewCallback(&GravitySimInstance::DidFlush));
flushing_ = true;
}
void DidFlush(int32_t error_code) {
PostMessage("DidFlush");
flushing_ = false;
}
virtual void DidChangeView(const pp::View& view) {
PostMessage("View did change");
if (size_ != view.GetRect().size()) {
size_ = view.GetRect().size();
const bool is_always_opaque = true;
PostMessage("Making graphics context");
graphics_2d_context_ = pp::Graphics2D(this, view.GetRect().size(),
is_always_opaque);
BindGraphics(graphics_2d_context_);
Draw();
}
}
void Draw() {
PostMessage("Drawing");
pp::ImageData image_data = MakeBlankImageData(size_);
PaintAndFlush(&image_data);
}
pp::ImageData MakeBlankImageData(const pp::Size& size) {
PostMessage("Making blank image data");
const bool init_to_zero = false;
pp::ImageData image_data = pp::ImageData(this,
PP_IMAGEDATAFORMAT_BGRA_PREMUL,
size,
init_to_zero);
uint32_t* image_buffer = static_cast<uint32_t*>(image_data.data());
for (int i = 0; i < size.GetArea(); ++i)
image_buffer[i] = kBlack;
return image_data;
}
pp::Graphics2D graphics_2d_context_;
pp::CompletionCallbackFactory<GravitySimInstance> callback_factory_;
/// The size of our rectangle in the DOM, as of the last time DidChangeView
/// was called.
pp::Size size_;
/// true iff we are flushing.
bool flushing_;
/// Stores the most recent histogram so that we can re-draw it if we get
/// resized.
double histogram_[10];
};
class GravitySimModule : public pp::Module {
public:
GravitySimModule() : pp::Module() {}
virtual ~GravitySimModule() {}
virtual pp::Instance* CreateInstance(PP_Instance instance) {
return new GravitySimInstance(instance);
}
};
namespace pp {
Module* CreateModule() {
return new GravitySimModule();
}
} // namespace pp
Nothing looks obviously wrong, except where you initialize the image buffer. You need to take the stride into consideration when working with the image data. However, it doesn't look like you're writing out of bounds. Have you tried making init_to_zero true and using the ImageData as is to see if it still crashes?
Try calling of Flush function only in main thread. If you want to call such functions (which should be called in main thread) from working thread call them throuth pp::Core::CallOnMainThread function.
I have a very annoying OpenCV error, that i can't understand, and handle with.
I write an application which gets mjpg's stream from ip camera, and process it, but when i try to load image from stream, sometimes i have
[mjpeg # 0000000000428480] overread 8
error, and i don't know why.
Even if i try to skip this issue, and try to load next frame from the stream, the application stucks on
frameStatus = cameraHandler->read(mat);
This is code for connection establishing:
void ImageProcessor::connectWithCamera(VideoCapture * cameraHandler) {
if (cameraHandler != nullptr) {
Logger::log("Closing existing camera stream.");
cameraHandler->release();
delete cameraHandler;
}
Logger::log("Camera configuration and connection establishing.");
cameraHandler = new VideoCapture();
cameraHandler->set(CV_CAP_PROP_FRAME_WIDTH, config.RESOLUTION_WIDTH);
cameraHandler->set(CV_CAP_PROP_FRAME_HEIGHT, config.RESOLUTION_HEIGHT);
cameraHandler->set(CV_CAP_PROP_FPS, config.CAMERA_FPS);
cameraHandler->set(CV_CAP_PROP_FOURCC, CV_FOURCC('M', 'J', 'P', 'G'));
while (!cameraHandler->open(config.LINK)) {
Logger::log("Cannot connect to camera! Trying again.");
}
}
And this is code for capturing images:
void ImageProcessor::start() {
VideoCapture * cameraHandler = new VideoCapture();
this->connectWithCamera(cameraHandler);
this->connectWithServer(this->serverConnection);
Logger::log("Id sending.");
serverConnection->send(config.TOKEN + "\n");
Logger::log("Computations starting.");
Mat mat;
Result * result = nullptr;
int delta = 1000 / cameraHandler->get(CV_CAP_PROP_FPS);
char frameErrorCounter = 0;
bool frameStatus;
while (true) {
frameStatus = false;
cv::waitKey(delta);
try {
frameStatus = cameraHandler->read(mat);
} catch (std::exception& e) {
std::string message = e.what();
Logger::log("Critical camera error! : " + message);
}
if (!frameStatus) {
Logger::log("Cannot read a frame from source. ");
++frameErrorCounter;
if (!cameraHandler->isOpened() || frameErrorCounter >= this->GET_FRAME_ERROR_COUNTER) {
Logger::log("Probably camera is disconnected. Trying to establish connection again.");
frameErrorCounter = 0;
this->connectWithCamera(cameraHandler);
Logger::log("Computations starting.");
}
continue;
}
result = processImage(mat);
std::string stringResult;
if (result == nullptr) {
stringResult = this->NO_RESULT;
delete result;
result = nullptr;
} else {
stringResult = result->toJson();
}
if (!serverConnection->send(stringResult)) {
Logger::log("Server connection lost, trying to establish it again.");
serverConnection->close();
while (!serverConnection->isOpen()) {
this->connectWithServer(serverConnection);
}
}
mat.release();
}
}
Thanks in advance!
Cocos2d-x is a C++ port of Cocos2d-for-iPhone. It has the advantage of cross-platform. I'm using Cocos2d-x to develop games for Android and iPhone.
Right now I'm compiling a set of Cocos2d-X code with both Android NDK and Xcode.
On Xcode the game compiles and runs well on the iPhone.
With Android NDK, the compile would fail. (I'm using the official Android r7c NDK).
Please help.
Edited: For those of you who're interested in the full implementation file. Here it is.
#include "GameOverScene.h"
#include "HelloWorldScene.h"
using namespace cocos2d;
bool GameOverScene::init() {
if (CCScene::init()) {
this->_layer = GameOverLayer::node();
this->_layer->retain();
this->addChild(_layer);
return true;
} else {
return false;
}
}
GameOverScene::~GameOverScene () {
if (_layer) {
_layer->release();
_layer = NULL;
}
}
bool GameOverLayer::init () {
if (CCLayerColor::initWithColor(ccc4f(255, 255, 255, 255))) {
CCSize winSize = CCDirector::sharedDirector()->getWinSize();
this->_label = CCLabelTTF::labelWithString("", "Artial", 32);
_label->retain();
_label->setColor(ccc3(0, 0, 0));
_label->setPosition(ccp(winSize.width/2, winSize.height/2));
this->addChild(_label);
this->runAction(CCSequence::actions(CCDelayTime::actionWithDuration(3), CCCallFunc::actionWithTarget(this, callfunc_selector(GameOverLayer::gameOverDone)), NULL));
return true;
} else {
return false;
}
}
void GameOverLayer::gameOverDone() {
CCDirector::sharedDirector()->replaceScene(HelloWorld::scene());
}
GameOverLayer::~GameOverLayer() {
if (_label) {
_label->release();
_label = NULL;
}
}
And the full header file
#ifndef S6_GameOverScene_h
#define S6_GameOverScene_h
#include "cocos2d.h"
class GameOverLayer : public cocos2d::CCLayerColor {
public:
GameOverLayer():_label(NULL) {};
virtual ~GameOverLayer();
bool init();
LAYER_NODE_FUNC(GameOverLayer);
void gameOverDone();
CC_SYNTHESIZE_READONLY(cocos2d::CCLabelTTF*, _label, Label);
};
class GameOverScene : public cocos2d::CCScene {
public:
GameOverScene():_layer(NULL) {};
~GameOverScene();
bool init();
//SCENE_NODE_FUNC(GameOverScene);
static GameOverScene* node()
{
GameOverScene *pRet = new GameOverScene();
//Error: undefined reference to `GameOverScene::init()'
if (pRet && pRet->init())
{
pRet->autorelease();
return pRet;
}
else
{
//Error: undefined reference to `vtable for GameOverScene'
delete pRet;
pRet = NULL;
return NULL;
}
};
CC_SYNTHESIZE_READONLY(GameOverLayer*, _layer, Layer);
};
#endif
It might be problem with Android.mk file.. In that you need to add your GameOverScene.h file for compilation..
/Users/my_account_name/Desktop/Projects/S6/S6/android/jni/../../Classes/GameOverScene.h:40: undefined reference to GameOverScene::init()'
You have to link with GameOverScene's object file.
You might forget to add GameOverScene.cpp in Android.mk located at Classed folder.