Im working in code which can create coronal, saggital, and axial image from dicom file, but In the same time I dont need to display renderWindow to user, and this is my code:
int main(int argc, char* argv[])
{
// Verify input arguments
if ( argc != 2 )
{
std::cout << "Usage: " << argv[0]
<< " FolderName" << std::endl;
return EXIT_FAILURE;
}
std::string folder = argv[1];
//std::string folder = "C:\\VTK\\vtkdata-5.8.0\\Data\\DicomTestImages";
// Read all the DICOM files in the specified directory.
vtkSmartPointer<vtkDICOMImageReader> reader =
vtkSmartPointer<vtkDICOMImageReader>::New();
reader->SetDirectoryName(folder.c_str());
reader->Update();
// Visualize
vtkSmartPointer<vtkImageViewer2> imageViewer =
vtkSmartPointer<vtkImageViewer2>::New();
imageViewer->SetInputConnection(reader->GetOutputPort());
imageViewer->SetSlice(0);
imageViewer->Render();
for(int k = 0; k < 3; k++){
cout<< k << endl;
if(k == 0){
imageViewer->SetSliceOrientationToYZ();
}else if(k == 1){
imageViewer->SetSliceOrientationToXZ();
}else{
imageViewer->SetSliceOrientationToXY();
}
int _MinSlice = imageViewer->GetSliceMin();
int _MaxSlice = imageViewer->GetSliceMax();
// Screenshot
vtkSmartPointer<vtkWindowToImageFilter> windowToImageFilter =
vtkSmartPointer<vtkWindowToImageFilter>::New();
vtkSmartPointer<vtkPNGWriter> writer =
vtkSmartPointer<vtkPNGWriter>::New();
for (int i = _MinSlice; i < _MaxSlice; i++){
vtkSmartPointer<vtkWindowToImageFilter> windowToImageFilter =
vtkSmartPointer<vtkWindowToImageFilter>::New();
vtkSmartPointer<vtkPNGWriter> writer =
vtkSmartPointer<vtkPNGWriter>::New();
windowToImageFilter->SetInput(imageViewer->GetRenderWindow());
windowToImageFilter->ReadFrontBufferOff(); // read from the back buffer
windowToImageFilter->Update();
std::string filename = "img/" + std::to_string(k) + "/" + std::to_string(i) + ".png";
char *y = new char[filename.length() + 1];
std::strcpy(y, filename.c_str());
writer->SetFileName(y);
writer->SetInputConnection(windowToImageFilter->GetOutputPort());
writer->Write();
imageViewer->SetSlice(i);
imageViewer->Render();
}
}
return EXIT_SUCCESS;
}
my issue is when try imageViewer->SetRenderWindow(NULL); Or imageViewer->GetRenderWindow()->Delete(); its will remove renderWindow instance, and code will be break, so that how I can keep renderWindow work in background without display it to user?
Thanks
You can use SetShowWindow(bool) or ShowWindowOff(), these methods are inherited from vtkWindow. Doing so should not stop the rendering pipeline (you can probably still use windowToImageFilter) but tbh I did not test it. Let us know if it worked.
Related
I have a function that is executing by std::thread. I want it works until the user closes the terminal that running roscore by pressing Ctrl+C. Because of that I use this inside the thread:
void publish_camera_on_topic(std::vector<Camera> cameras, const std::vector<ros::Publisher> publishers, const int camera_index)
{
int frameSize;
BYTE *imagePtr;
// frame id
int frame_id = 0;
cv_bridge::CvImage img_bridge;
sensor_msgs::Image img_msg;
while (ros::ok()) {
// Grab and display a single image from each camera
imagePtr = cameras[camera_index].getRawImage();
frameSize = cameras[camera_index].getFrameSize();
cameras[camera_index].createRGBImage(imagePtr,frameSize);
unsigned char* pImage = cameras[camera_index].getImage();
if (NULL != pImage) {
Mat image(cameras[camera_index].getMatSize(), CV_8UC3, pImage, Mat::AUTO_STEP);
// release asap
cameras[camera_index].releaseImage();
//cvtColor(image, image, CV_BGR2RGB,3);
// publish on ROS topic
std_msgs::Header header; // empty header
header.seq = frame_id; // user defined counter
header.stamp = ros::Time::now(); // time
img_bridge = cv_bridge::CvImage(header, sensor_msgs::image_encodings::RGB8, image);
img_bridge.toImageMsg(img_msg); // from cv_bridge to sensor_msgs::Image
publishers[camera_index].publish(img_msg); // ros::Publisher pub_img = node.advertise<sensor_msgs::Image>("topic", queuesize);
}
// increase frame Id
frame_id = frame_id + 1;
}
std::cout << "ROS closing for thread of camera " << camera_index << " recieved." << std::endl;
}
Also, I create thread like this:
// image publisher
// for each camera create an publisher
std::vector<ros::Publisher> publishers;
for (size_t i = 0; i < cameras.size(); i++) {
char topic_name[200];
sprintf(topic_name, "/lumenera_camera_package/%d", i + 1);
publishers.push_back(nh.advertise<sensor_msgs::Image>(topic_name, 10));
}
// work with each camera on a seprate thread
std::vector<std::thread> thread_vector;
for(size_t i=0; i < cameras.size(); i++) {
thread_vector.push_back(std::thread(publish_camera_on_topic, cameras, publishers, i));
}
ros::spin();
std::for_each(thread_vector.begin(), thread_vector.end(), [](std::thread &t){t.join(); });
for(size_t i=0; i < cameras.size(); i++) {
cameras[i].stopStreaming();
}
ROS_INFO("Node: [lumenera_camera_node] has been Ended.");
However, when I press Ctrl+C in the terminal and stop the roscore, the threads keep running, and the value of ros::ok() does not change.
The problem is solved. The issue is ros::ok() does not check for ROS master. Instead of this line:
while (ros::ok()) { //do sth}
This line should be used:
while (ros::ok() && ros::master::check()) { // do sth}
I am using libx264 compiled from source. It was configured to get both .dll and .lib by this command
./configure --disable-cli --enable-shared --extra-ldflags=-Wl,--output-def=libx264.def`
I am using the libx264 API in my screen-sharing program with the preset - "veryfast", tune - "zerolatency", profile - "high" and also the following settings.
param.i_csp = X264_CSP_BGRA;
param.i_threads = 1;
param.i_width = width;
param.i_height = height;
param.i_fps_num = fps;
param.i_fps_den = 1;
param.rc.i_bitrate = bitrate;
param.rc.i_rc_method = X264_RC_ABR;
param.rc.b_filler = true;
param.rc.f_rf_constant = (float)0;
param.rc.i_vbv_max_bitrate = param.rc.i_bitrate;
param.rc.i_vbv_buffer_size = param.rc.i_bitrate;
param.b_repeat_headers = 0;
param.b_annexb = 1;
For these settings the program works fine. I specified it as single threaded by setting param.i_threads = 1.
If this is removed, x264 defaults to using multiple threads and sets param.i_threads as 1.5x of number of cores in the CPU automatically. This will give faster performance than running in single thread.
But when I remove the param.i_threads = 1 to make it multi-threaded, the generated output is fully grey. I cannot see any output when I view the live stream with VLC or some times I can view a weird output.
I am using this bitmap image as an example (https://imgur.com/a/l8LCd1l). Only this same image is being encoded multiple times. When it is saved into .h264 video, it is viewable clearly. But when the encoded payload is sent through rtmp, the live stream produces very bad and weird output (or sometimes no output). This is the weird output which im seeing most of the time for this image: https://imgur.com/a/VdyX1Zm
This is the full example code in which I am both streaming and writing video file of the same picture. This is using the srs librtmp library. There is no error but the stream has weird output.
In this code if you set add param.i_threads = 1; then only the output stream will be viewable. The problem is that it should be viewable in both single-threaded and multi-threaded encoding.
#include <iostream>
#include <stdio.h>
#include <sstream>
#include <x264.h>
#include "srs_librtmp.h"
#pragma comment(lib, "C:/Softwares/x264/libx264.lib")
using namespace std;
int check_ret(int ret);
int main()
{
int dts = 0;
x264_param_t param;
x264_t* h;
x264_nal_t* nals;
int i_nal;
int pts = 0;
int i_frame_size;
x264_picture_t picIn;
x264_picture_t picOut;
x264_param_default_preset(¶m, "veryfast", "zerolatency");
//x264 settings
param.i_csp = X264_CSP_BGRA;
param.i_width = 1920;
param.i_height = 1080;
param.i_fps_num = 30;
param.i_fps_den = 1;
param.rc.i_bitrate = 2500;
param.rc.i_rc_method = X264_RC_ABR;
param.rc.b_filler = true;
param.rc.f_rf_constant = (float)0;
param.rc.i_vbv_max_bitrate = param.rc.i_bitrate;
param.rc.i_vbv_buffer_size = param.rc.i_bitrate;
param.b_repeat_headers = 0;
param.b_annexb = 1;
x264_param_apply_profile(¶m, "high");
h = x264_encoder_open(¶m);
//allocate picture
x264_picture_alloc(&picIn, param.i_csp, param.i_width, param.i_height);
//picture settings
picIn.img.i_plane = 1;
picIn.img.i_stride[0] = 4 * param.i_width;
picIn.i_type = X264_TYPE_AUTO;
int header_size = x264_encoder_headers(h, &nals, &i_nal);
FILE* fptr;
fopen_s(&fptr, "example1.h264", "wb");
// write sps and pps in the video file
fwrite(nals->p_payload, header_size, 1, fptr);
int size = 1920 * 1080 * 4;
char* bmp = new char[size];
FILE* bitptr;
errno_t err = fopen_s(&bitptr, "flower.bmp", "rb");
fseek(bitptr, 54, SEEK_SET);
fread(bmp, size, 1, bitptr);
fclose(bitptr);
srs_rtmp_t rtmp = srs_rtmp_create("127.0.0.1:1935/live/test");
if (srs_rtmp_handshake(rtmp) != 0)
{
std::cout << "Simple handshake failed.";
return -1;
}
std::cout << "Handshake completed successfully.\n";
if (srs_rtmp_connect_app(rtmp) != 0) {
std::cout << "Connecting to host failed.";
return -1;
}
std::cout << "Connected to host successfully.\n";
if (srs_rtmp_publish_stream(rtmp) != 0) {
std::cout << "Publish signal failed.";
}
std::cout << "Publish signal success\n";
// write sps and pps in the live stream
int ret = srs_h264_write_raw_frames(rtmp, reinterpret_cast<char*>(nals->p_payload), header_size, 0, 0);
ret = check_ret(ret);
if (!ret)
return -1;
std::cout << "SPS and PPS sent.\n";
// main loop
std::cout << "Now streaming and encoding\n";
int i = 1800;
while (i--)
{
picIn.img.plane[0] = reinterpret_cast<uint8_t*>(bmp);
picIn.i_pts = pts++;
i_frame_size = x264_encoder_encode(h, &nals, &i_nal, &picIn, &picOut);
if (i_frame_size)
{
for (int j = 0; j < i_nal; j++)
{
x264_nal_t* nal = nals + j;
// write data in the video file
fwrite(nal->p_payload, nal->i_payload, 1, fptr);
// write data in the live stream
ret = srs_h264_write_raw_frames(rtmp, reinterpret_cast<char*>(nal->p_payload), nal->i_payload, dts, dts);
ret = check_ret(ret);
if (!ret)
{
return -1;
}
}
}
else
{
std::cout << "i_frame_size = 0 (encoder failed)\n";
}
dts += 33;
}
while (x264_encoder_delayed_frames(h))
{
i_frame_size = x264_encoder_encode(h, &nals, &i_nal, NULL, &picOut);
if (i_frame_size)
{
fwrite(nals->p_payload, i_frame_size, 1, fptr);
}
}
std::cout << "\nAll done\n";
std::cout << "Output video is example1.h264 and it is viewable in VLC";
return 0;
}
int check_ret(int ret)
{
if (ret != 0) {
if (srs_h264_is_dvbsp_error(ret)) {
srs_human_trace("ignoring drop video error, code=%d", ret);
}
else if (srs_h264_is_duplicated_sps_error(ret)) {
srs_human_trace("ignoring duplicated sps, code=%d", ret);
}
else if (srs_h264_is_duplicated_pps_error(ret)) {
srs_human_trace("ignoring duplicated pps, code=%d", ret);
}
else {
srs_human_trace("sending h264 raw data failed. ret=%d", ret);
return 0;
}
}
return 1;
}
If you would like to download the original flower.bmp file, here is the link: https://gofile.io/d/w2kX56
This error can be reproduced in any other bmp file also.
Please tell me what is causing this problem when multi-threading is enabled. Am I setting wrong values? Is the code in which I am streaming the encoded data wrong?
Mxnet c++ inference with MXPredSetInput segmentation fault
1. background
I have tried https://github.com/apache/incubator-mxnet/tree/master/example/image-classification/predict-cpp successed.
But when I try to deploy mxnet in c++ with my own model, I met a segmentation fault error:
[17:33:07] src/nnvm/legacy_json_util.cc:209: Loading symbol saved by previous version v1.2.1. Attempting to upgrade...
Signal: SIGSEGV (Segmentation fault)
2. code with error:
MXPredSetInput(pred_hnd, "data", image_data.data(), static_cast<mx_uint>(image_size));
3. tips
First I thought it's because of input data shape not compatible with the model input layer.But I ask model designer, it's a resnet model with conv only, so, any kind input shape should be OK.
4. Download model:
Download them, and put them into model dir.
https://drive.google.com/drive/folders/16MEKNOz_iwquVxHMk9c7igmBNuT6w7wz?usp=sharing
4. code: find: https://github.com/jaysimon/mxnet_cpp_infere
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <vector>
#include <memory>
#include <thread>
#include <iomanip>
#include <opencv2/opencv.hpp>
// Path for c_predict_api
#include <mxnet/c_predict_api.h>
const mx_float DEFAULT_MEAN = 117.0;
static std::string trim(const std::string& input) {
auto not_space = [](int ch) {
return !std::isspace(ch);
};
auto output = input;
output.erase(output.begin(), std::find_if(output.begin(), output.end(), not_space));
output.erase(std::find_if(output.rbegin(), output.rend(), not_space).base(), output.end());
return output;
}
// Read file to buffer
class BufferFile {
public :
std::string file_path_;
std::size_t length_ = 0;
std::unique_ptr<char[]> buffer_;
explicit BufferFile(const std::string& file_path)
: file_path_(file_path) {
std::ifstream ifs(file_path.c_str(), std::ios::in | std::ios::binary);
if (!ifs) {
std::cerr << "Can't open the file. Please check " << file_path << ". \n";
return;
}
ifs.seekg(0, std::ios::end);
length_ = static_cast<std::size_t>(ifs.tellg());
ifs.seekg(0, std::ios::beg);
std::cout << file_path.c_str() << " ... " << length_ << " bytes\n";
// Buffer as null terminated to be converted to string
buffer_.reset(new char[length_ + 1]);
buffer_[length_] = 0;
ifs.read(buffer_.get(), length_);
ifs.close();
}
std::size_t GetLength() {
return length_;
}
char* GetBuffer() {
return buffer_.get();
}
};
void GetImageFile(const std::string& image_file,
mx_float* image_data, int channels,
cv::Size resize_size, const mx_float* mean_data = nullptr) {
// Read all kinds of file into a BGR color 3 channels image
cv::Mat im_ori = cv::imread(image_file, cv::IMREAD_COLOR);
if (im_ori.empty()) {
std::cerr << "Can't open the image. Please check " << image_file << ". \n";
assert(false);
}
cv::Mat im;
resize(im_ori, im, resize_size);
int size = im.rows * im.cols * channels;
mx_float* ptr_image_r = image_data;
mx_float* ptr_image_g = image_data + size / 3;
mx_float* ptr_image_b = image_data + size / 3 * 2;
float mean_b, mean_g, mean_r;
mean_b = mean_g = mean_r = DEFAULT_MEAN;
mean_b = 103.06;
mean_g = 115.9;
mean_r = 123.15;
for (int i = 0; i < im.rows; i++) {
auto data = im.ptr<uchar>(i);
for (int j = 0; j < im.cols; j++) {
if (channels > 1) {
*ptr_image_b++ = static_cast<mx_float>(*data++) - mean_b;
*ptr_image_g++ = static_cast<mx_float>(*data++) - mean_g;
}
*ptr_image_r++ = static_cast<mx_float>(*data++) - mean_r;
}
}
}
// LoadSynsets
// Code from : https://github.com/pertusa/mxnet_predict_cc/blob/master/mxnet_predict.cc
std::vector<std::string> LoadSynset(const std::string& synset_file) {
std::ifstream fi(synset_file.c_str());
if (!fi.is_open()) {
std::cerr << "Error opening synset file " << synset_file << std::endl;
assert(false);
}
std::vector<std::string> output;
std::string synset, lemma;
while (fi >> synset) {
getline(fi, lemma);
output.push_back(lemma);
}
fi.close();
return output;
}
void PrintOutputResult(const std::vector<float>& data, const std::vector<std::string>& synset) {
if (data.size() != synset.size()) {
std::cerr << "Result data and synset size do not match!" << std::endl;
}
float best_accuracy = 0.0;
std::size_t best_idx = 0;
for (std::size_t i = 0; i < data.size(); ++i) {
std::cout << "Accuracy[" << i << "] = " << std::setprecision(8) << data[i] << std::endl;
if (data[i] > best_accuracy) {
best_accuracy = data[i];
best_idx = i;
}
}
std::cout << "Best Result: " << trim(synset[best_idx]) << " (id=" << best_idx << ", " <<
"accuracy=" << std::setprecision(8) << best_accuracy << ")" << std::endl;
}
void predict(PredictorHandle pred_hnd, const std::vector<mx_float> &image_data,
NDListHandle nd_hnd, const std::string &synset_file, int i) {
auto image_size = image_data.size();
// Set Input
//>>>>>>>>>>>>>>>>>>>> Problem code <<<<<<<<<<<<<<<<<<<<<<<
MXPredSetInput(pred_hnd, "data", image_data.data(), static_cast<mx_uint>(image_size));
// <<<<<<<<<<<<<<<<<<<<<<< Problem code <<<<<<<<<<<<<<<<<<<<<<<
// Do Predict Forward
MXPredForward(pred_hnd);
mx_uint output_index = 0;
mx_uint* shape = nullptr;
mx_uint shape_len;
// Get Output Result
MXPredGetOutputShape(pred_hnd, output_index, &shape, &shape_len);
std::size_t size = 1;
for (mx_uint i = 0; i < shape_len; ++i) { size *= shape[i]; }
std::vector<float> data(size);
MXPredGetOutput(pred_hnd, output_index, &(data[0]), static_cast<mx_uint>(size));
// Release NDList
if (nd_hnd) {
MXNDListFree(nd_hnd);
}
// Release Predictor
MXPredFree(pred_hnd);
// Synset path for your model, you have to modify it
auto synset = LoadSynset(synset_file);
// Print Output Data
PrintOutputResult(data, synset);
}
int main(int argc, char* argv[]) {
if (argc < 2) {
std::cout << "No test image here." << std::endl
<< "Usage: ./image-classification-predict apple.jpg [num_threads]" << std::endl;
return EXIT_FAILURE;
}
std::string test_file(argv[1]);
int num_threads = 1;
if (argc == 3)
num_threads = std::atoi(argv[2]);
// Models path for your model, you have to modify it
std::string json_file = "../model/rfcn_dcn_chicken-0000.json";
std::string param_file = "../model/rfcn_dcn_chicken-0000.params";
std::string synset_file = "../model/synset.txt";
std::string nd_file = "../model/mean_224.nd";
BufferFile json_data(json_file);
BufferFile param_data(param_file);
// Parameters
int dev_type = 1; // 1: cpu, 2: gpu
int dev_id = 0; // arbitrary.
mx_uint num_input_nodes = 1; // 1 for feedforward
const char* input_key[1] = { "data" };
const char** input_keys = input_key;
// Image size and channels
int width = 1000;
int height = 562;
int channels = 3;
const mx_uint input_shape_indptr[2] = { 0, 4 };
const mx_uint input_shape_data[4] = { 1,
static_cast<mx_uint>(channels),
static_cast<mx_uint>(height),
static_cast<mx_uint>(width) };
if (json_data.GetLength() == 0 || param_data.GetLength() == 0) {
return EXIT_FAILURE;
}
auto image_size = static_cast<std::size_t>(width * height * channels);
// Read Mean Data
const mx_float* nd_data = nullptr;
NDListHandle nd_hnd = nullptr;
BufferFile nd_buf(nd_file);
if (nd_buf.GetLength() > 0) {
mx_uint nd_index = 0;
mx_uint nd_len;
const mx_uint* nd_shape = nullptr;
const char* nd_key = nullptr;
mx_uint nd_ndim = 0;
MXNDListCreate(static_cast<const char*>(nd_buf.GetBuffer()),
static_cast<int>(nd_buf.GetLength()),
&nd_hnd, &nd_len);
MXNDListGet(nd_hnd, nd_index, &nd_key, &nd_data, &nd_shape, &nd_ndim);
}
// Read Image Data
std::vector<mx_float> image_data(image_size);
GetImageFile(test_file, image_data.data(), channels, cv::Size(width, height), nd_data);
if (num_threads == 1) {
// Create Predictor
PredictorHandle pred_hnd;
MXPredCreate(static_cast<const char*>(json_data.GetBuffer()),
static_cast<const char*>(param_data.GetBuffer()),
static_cast<int>(param_data.GetLength()),
dev_type,
dev_id,
num_input_nodes,
input_keys,
input_shape_indptr,
input_shape_data,
&pred_hnd);
assert(pred_hnd);
predict(pred_hnd, image_data, nd_hnd, synset_file, 0);
} else {
// Create Predictor
std::vector<PredictorHandle> pred_hnds(num_threads, nullptr);
MXPredCreateMultiThread(static_cast<const char*>(json_data.GetBuffer()),
static_cast<const char*>(param_data.GetBuffer()),
static_cast<int>(param_data.GetLength()),
dev_type,
dev_id,
num_input_nodes,
input_keys,
input_shape_indptr,
input_shape_data,
pred_hnds.size(),
pred_hnds.data());
for (auto hnd : pred_hnds)
assert(hnd);
std::vector<std::thread> threads;
for (int i = 0; i < num_threads; i++)
threads.emplace_back(predict, pred_hnds[i], image_data, nd_hnd, synset_file, i);
for (int i = 0; i < num_threads; i++)
threads[i].join();
}
printf("run successfully\n");
return EXIT_SUCCESS;
}
I'm trying to take a screenshot for each monitor of my macOS 10.13 setup in C++ using methods available in the some OSX frameworks but using CGImageDestinationCreateWithURL to create a CGImageDestinationRef destination returns NULL and I have no idea what I'm doing wrong.
The problem that I think I'm having is with the line:
CGImageDestinationRef destination = CGImageDestinationCreateWithURL(url, kUTTypePNG, 1, NULL);
The code that I'm using is the following:
main.cpp:
#include <iostream>
#include <string>
#include <QuartzCore/QuartzCore.h>
#include <CoreServices/CoreServices.h>
#include <ImageIO/ImageIO.h>
int main(int argc, const char * argv[]) {
std::string baseImageOutput = "/Users/bogdan/Desktop";
std::string pathSeparator = "/";
std::string baseImageName = "image-";
std::string imageExtension = ".png";
CGDisplayCount displayCount;
CGDirectDisplayID displays[32];
// grab the active displays
CGGetActiveDisplayList(32, displays, &displayCount);
// go through the list
for (int i = 0; i < displayCount; i++) {
std::string imagePath = baseImageOutput + pathSeparator + baseImageName + std::to_string(i) + imageExtension;
const char *charPath = imagePath.c_str();
CFStringRef imageOutputPath = CFStringCreateWithCString(kCFAllocatorDefault, charPath, kCFURLPOSIXPathStyle);
// make a snapshot of the current display
CGImageRef image = CGDisplayCreateImage(displays[i]);
CFURLRef url = CFURLCreateWithString(kCFAllocatorDefault, imageOutputPath, NULL);
// The following CGImageDestinationRef variable is NULL
CGImageDestinationRef destination = CGImageDestinationCreateWithURL(url, kUTTypePNG, 1, NULL);
if (!destination) {
std::cout<< "The destination does not exist: " << imagePath << std::endl;
CGImageRelease(image);
return 1;
}
CGImageDestinationAddImage(destination, image, NULL);
if (!CGImageDestinationFinalize(destination)) {
std::cout << "Failed to write image to the path" << std::endl;;
CFRelease(destination);
CGImageRelease(image);
return 1;
}
CFRelease(destination);
CGImageRelease(image);
}
std::cout << "It Worked. Check your desktop" << std::endl;;
return 0;
}
Am I creating the destination correctly?
Found the solution.
It seems that baseImageOutput needs to have prepended file:// so that the final url is valid so we have
std::string baseImageOutput = "file:///Users/bogdan/Desktop";
Instead of
std::string baseImageOutput = "/Users/bogdan/Desktop";
I am writing a program using c++ opencv and a console window. I want to be able to have a help menu where the user can input desired threshold values. The problem I have is the inputs only work if the image window is selected and not if the console window is selected. The inputs are not in main, there in void onNewDepthSample( as seen below. I want to get inputs when the console window is selected, they work fine when an image window is selected. A short sample of the code is below:
void changeItems()
{
int input = 0;
cout << "Welcome: Here is where to set up the proper image quality\n";
cout << "Press 'h' for the help menu and 'x' to leave the loop\n";
char keyInputs = 'a';
while(keyInputs != 'x')
{
cin >> keyInputs;
if( keyInputs == 'h' )
{
cout << "help menu\n";
}
}
cout << "Leaving the loop";
changeValues = 0;
}
void onNewDepthSample(DepthNode node, DepthNode::NewSampleReceivedData data)
{
//printf("Z#%u: %d\n",g_dFrames,data.vertices.size());
int p[3];
int circleTemp[6000] ={0};
int circleTempVal = 0;
int resultsTotalAfterRemove = 0;
int resultsTotalAfterRemoveVideo = 0;
float robotx = 0;
float roboty = 0;
char * cCoordinates = new char[100];
char * cDepth = new char[100];
p[0] = CV_IMWRITE_JPEG_QUALITY;
p[1] = 100;
p[2] = 0;
int32_t w, h;
FrameFormat_toResolution(data.captureConfiguration.frameFormat,&w,&h);
if(changeValues != 0)
{
changeItems();
}