How can I parse a const char* from a double or long?
Mainly because my code is a lot faster when I use a const char*, so i decided to create a small base string class. But my code to parse a double has some bugs.
My code only works partially. Some help would be very appreciated.
I am using macos, g++ & c++17.
Code:
#include <iostream>
class bstring {
public:
const char* characters;
bstring(const char* c = "") { characters = c; }
static bstring parse(const double number, int precision = 100) {
// Convert.
int decimal, sign;
char *buffer;
buffer = ecvt(number, precision, &decimal, &sign);
int n = strlen(buffer);
// Add decimal.
char before[decimal];
strncpy(before, 0 + buffer, decimal);
char after[n - decimal - 1];
strncpy(after, decimal + buffer, n - decimal - 1);
// Remove zero padding.
int removed = 0;
while (true) {
size_t n = sizeof(after) - removed;
size_t index_to_remove = n - 1;
if (after[index_to_remove] == '0') {
for (size_t i = index_to_remove; i < n - 1; ++i) {
after[i] = after[i + 1];
}
removed += 1;
} else { break; }
}
bool is_zero = removed == sizeof(after);
int after_size = sizeof(after)-removed;
char* nafter = (char*)malloc(sizeof(char) * after_size);
// Concat.
char* new__{ new char[strlen(before) + 1 + after_size] };
new__ = strcpy(new__, before);
new__ = strcat(new__, ".");
if (is_zero) {
char a[] = "0";
new__ = strcat(new__, a);
} else {
new__ = strcat(new__, after);
}
// Assign.
bstring s = new__;
delete[] new__; new__ = NULL;
return s;
//
}
};
std::ostream& operator <<(std::ostream &s, bstring x) { return s << x.characters; }
int main() {
std::cout << "Should be " << "-1234.39950" << ": " << bstring::parse(-1234.39950) << std::endl;
std::cout << "Should be " << "-1.0" << ": " << bstring::parse(-1.0) << std::endl;
std::cout << "Should be " <<"0.0" << ": " << bstring::parse(0.0) << std::endl;
std::cout << "Should be " <<"0.3897495" << ": " << bstring::parse(0.3897495) << std::endl;
std::cout << "Should be " <<"1.0" << ": " << bstring::parse(1.0) << std::endl;
std::cout << "Should be " <<"100.00" << ": " << bstring::parse(1000.0) << std::endl;
std::cout << "Should be " <<"10000.000" << ": " << bstring::parse(1000000.0) << std::endl;
std::cout << "Should be " <<"1000000.0000" << ": " << bstring::parse(1000000000.0) << std::endl;
std::cout << "Should be " <<"1000000000.0000" << ": " << bstring::parse(1000000000000.0) << std::endl;
std::cout << "Should be " <<"1000000000000.0000" << ": " << bstring::parse(1000000000000000.0) << std::endl;
}
Edit:
Is this piece of code okay? Or am I doing something wrong by not deleting it / By where I assign the new__ to.
// Concat.
bstring concat(const char* c) {
int n = ::strlen(characters) + ::strlen(c);
if (n == 0) { return bstring(); }
if (::strlen(c) == 0) { return bstring(characters); }
char* new__{ new char[n + 1] };
new__ = strcpy(new__, characters);
new__ = strcat(new__, c);
// const char* n = new__;
// delete[] new__; new__ = NULL;
bstring s = new__;
return s;
}
I'm trying to interpret the output of FasterRCNN in C++ and I'm fighting with the GenericDict type.
My code is as follows:
#include <opencv4/opencv2/opencv.hpp>
#include <opencv4/opencv2/shape.hpp>
#include <opencv4/opencv2/imgcodecs.hpp>
#include <opencv4/opencv2/highgui.hpp>
#include <opencv4/opencv2/imgproc.hpp>
#include <opencv4/opencv2/core/utility.hpp>
#include <opencv4/opencv2/core/mat.hpp>
#include <c10/cuda/CUDAStream.h>
#include <torch/csrc/autograd/grad_mode.h>
#include <torch/csrc/api/include/torch/torch.h>
#include <torch/script.h>
#include <torchvision/vision.h>
#include <torchvision/nms.h>
#include <iostream>
#include <memory>
#include <string>
int main(int argc, const char* argv[])
{
if (argc != 3)
{
printf("usage: %s <path-to-exported-script-module> <image_to_test>\n",argv[0]);
return -1;
}
std::string module_filename = argv[1];
std::string image_file = argv[2];
try
{
cv::Mat input_img = cv::imread(image_file, cv::IMREAD_GRAYSCALE);
torch::autograd::AutoGradMode guard(false);
// Deserialize the ScriptModule from a file using torch::jit::load().
torch::jit::script::Module module = torch::jit::load(module_filename);
assert(module.buffers().size() > 0);
module.eval();
// Assume that the entire model is on the same device.
// We just put input to this device.
auto device = (*std::begin(module.buffers())).device();
const int height = input_img.rows;
const int width = input_img.cols;
const int channels = 1;
auto input = torch::from_blob(input_img.data, {height, width, channels}, torch::kUInt8);
// HWC to CHW
// input = input.to(device, torch::kFloat).permute({2, 0, 1}).contiguous();
input = input.to(device, torch::kFloat).permute({2, 0, 1}).contiguous();
// run the network
std::vector<at::Tensor> inputs;
inputs.push_back(input);
auto output = module.forward({inputs});
if (device.is_cuda())
c10::cuda::getCurrentCUDAStream().synchronize();
std::cout << "output: " << output << std::endl;
auto outputs = output.toTuple()->elements();
std::cout << "outputs: " << outputs << std::endl;
for( auto& elem : outputs )
{
std::cout << "elem: " << elem << std::endl;
if( elem.isGenericDict() )
{
std::cout << "elem is generic dict: " << elem << std::endl;
c10::Dict<c10::IValue, c10::IValue> dict = elem.toGenericDict();
auto elem_vector_0 = dict.at(c10::IValue("scores")).toIntVector();
auto elem_vector_1 = dict.at(c10::IValue("boxes")).toIntVector();
auto elem_vector_2 = dict.at(c10::IValue("labels")).toIntVector();
for( auto& ee0 : elem_vector_0 )
{
std::cout << "elem_vector_0" << ee0 << std::endl;
}
for( auto& ee0 : elem_vector_1 )
{
std::cout << "elem_vector_1" << ee0 << std::endl;
}
for( auto& ee0 : elem_vector_2 )
{
std::cout << "elem_vector_2" << ee0 << std::endl;
}
}
}
cv::namedWindow("Display Image", cv::WINDOW_AUTOSIZE );
cv::imshow("Display Image", input_img);
cv::waitKey(0);
}
catch(const c10::Error& e)
{
std::cerr << e.what() << std::endl;
return -1;
}
catch(const cv::Exception& e)
{
std::cerr << e.what() << std::endl;
return -1;
}
catch(const std::exception& e)
{
std::cerr << e.what() << std::endl;
return -1;
}
catch(...)
{
std::cerr << "Unknown error" << std::endl;
return -1;
}
std::cout << "ok\n";
return 0;
}
and the output is:
(base) fstrati#fstrati-desktop:~/libtorch_shared_cuda_10.1/load_and_run_model/Release$ ./load_and_run_model ./torch_script_v0.2.pt test_img.png
[W faster_rcnn.py:95] Warning: RCNN always returns a (Losses, Detections) tuple in scripting (function )
output: ({}, [{boxes: [ CPUFloatType{0,4} ], labels: [ CPULongType{0} ], scores: [ CPUFloatType{0} ]}])
outputs: {} [{boxes: [ CPUFloatType{0,4} ], labels: [ CPULongType{0} ], scores: [ CPUFloatType{0} ]}]
elem: {}
elem is generic dict: {}
Argument passed to at() was not in the map.
I'm struggling to find a way to extract the boxes, labels and scores from the dictionary
GenericDict.
This map is strange, I cannot iterate on it and I cannot access first and second types...
with it->first it->second
Any ideas ?
Thanks in advance
I think the following method can resolve the main problem here,
output = module.forward(inputs);
auto detections = output.toTuple()->elements().at(1).toList().get(0).toGenericDict();
std::cout << ">>> detections labels: " << detections.at("labels") << std::endl;
std::cout << ">>> detections boxes: " << detections.at("boxes") << std::endl;
std::cout << ">>> detections scores: " << detections.at("scores") << std::endl;
Besides, I've added an executable file https://github.com/zhiqwang/yolov5-rt-stack/tree/master/deployment/libtorch to show how libtorch works.
I train some Unet-based model in Pytorch. It take an image as an input, and return a mask.
After training i save it to ONNX format, run it with onnxruntime python module and it worked like a charm.
Now, i want to use this model in C++ code in Linux.
Is there simple tutorial (Hello world) when explained:
How to incorporate onnxruntime module to C++ program in Ubuntu
(install shared lib and so on)?
How to properly load an image and pass it to model?
P.S. I found only this: https://www.onnxruntime.ai/docs/tutorials/samples_catalog.html#cc
But there no info about loading image and converting it to ONNX - compatible format in C++ code.
For installation on the Linux, you should refer to https://www.onnxruntime.ai/.
You can refer to the following code to get help regarding how to load and run the ONNX model.
#include <algorithm> // std::generate
#include <assert.h>
#include <iostream>
#include <sstream>
#include <vector>
#include <experimental_onnxruntime_cxx_api.h>
// pretty prints a shape dimension vector
std::string print_shape(const std::vector<int64_t>& v) {
std::stringstream ss("");
for (size_t i = 0; i < v.size() - 1; i++)
ss << v[i] << "x";
ss << v[v.size() - 1];
return ss.str();
}
int calculate_product(const std::vector<int64_t>& v) {
int total = 1;
for (auto& i : v) total *= i;
return total;
}
using namespace std;
int main(int argc, char** argv) {
if (argc != 2) {
cout << "Usage: ./onnx-api-example <onnx_model.onnx>" << endl;
return -1;
}
#ifdef _WIN32
std::string str = argv[1];
std::wstring wide_string = std::wstring(str.begin(), str.end());
std::basic_string<ORTCHAR_T> model_file = std::basic_string<ORTCHAR_T>(wide_string);
#else
std::string model_file = argv[1];
#endif
// onnxruntime setup
Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "example-model-explorer");
Ort::SessionOptions session_options;
Ort::Experimental::Session session = Ort::Experimental::Session(env, model_file, session_options); // access experimental components via the Experimental namespace
// print name/shape of inputs
std::vector<std::string> input_names = session.GetInputNames();
std::vector<std::vector<int64_t> > input_shapes = session.GetInputShapes();
cout << "Input Node Name/Shape (" << input_names.size() << "):" << endl;
for (size_t i = 0; i < input_names.size(); i++) {
cout << "\t" << input_names[i] << " : " << print_shape(input_shapes[i]) << endl;
}
// print name/shape of outputs
std::vector<std::string> output_names = session.GetOutputNames();
std::vector<std::vector<int64_t> > output_shapes = session.GetOutputShapes();
cout << "Output Node Name/Shape (" << output_names.size() << "):" << endl;
for (size_t i = 0; i < output_names.size(); i++) {
cout << "\t" << output_names[i] << " : " << print_shape(output_shapes[i]) << endl;
}
// Assume model has 1 input node and 1 output node.
assert(input_names.size() == 1 && output_names.size() == 1);
// Create a single Ort tensor of random numbers
auto input_shape = input_shapes[0];
int total_number_elements = calculate_product(input_shape);
std::vector<float> input_tensor_values(total_number_elements);
std::generate(input_tensor_values.begin(), input_tensor_values.end(), [&] { return rand() % 255; }); // generate random numbers in the range [0, 255]
std::vector<Ort::Value> input_tensors;
input_tensors.push_back(Ort::Experimental::Value::CreateTensor<float>(input_tensor_values.data(), input_tensor_values.size(), input_shape));
// double-check the dimensions of the input tensor
assert(input_tensors[0].IsTensor() &&
input_tensors[0].GetTensorTypeAndShapeInfo().GetShape() == input_shape);
cout << "\ninput_tensor shape: " << print_shape(input_tensors[0].GetTensorTypeAndShapeInfo().GetShape()) << endl;
// pass data through model
cout << "Running model...";
try {
auto output_tensors = session.Run(session.GetInputNames(), input_tensors, session.GetOutputNames());
cout << "done" << endl;
// double-check the dimensions of the output tensors
// NOTE: the number of output tensors is equal to the number of output nodes specifed in the Run() call
assert(output_tensors.size() == session.GetOutputNames().size() &&
output_tensors[0].IsTensor());
cout << "output_tensor_shape: " << print_shape(output_tensors[0].GetTensorTypeAndShapeInfo().GetShape()) << endl;
} catch (const Ort::Exception& exception) {
cout << "ERROR running model inference: " << exception.what() << endl;
exit(-1);
}
}
I`m using a one_vs_one_trainer and one_vs_one_decision_function for classify 128D face descriptors, and i want to detect unknown face.
I`m detecting faces using OpenCV and my wrapper, then i followed the guide and computed the 128D face descriptors, that i stored in files. Next, i trained one_vs_one classifier following this tutorial. All works perfectly, but when i try to classify unknown face it returns some label.
I used code from guides, but if you want to look at my code - it is here
Is there a better way to identify faces? Maybe, its simpler to use OpenCV`s methods, or other from Dlib?
Thanks for Davis!
Here is forum thread on SourceForge.
The answer is:
Use a bunch of binary classifiers rather than one vs one. If all the binary
classifiers say they don't match then you know the person doesn't match any
of them.
And i implemented this as follows:
#include <iostream>
#include <ctime>
#include <vector>
#include <dlib/svm.h>
using namespace std;
using namespace dlib;
int main() {
typedef matrix<double, 128, 1> sample_type;
typedef histogram_intersection_kernel<sample_type> kernel_type;
typedef svm_c_trainer<kernel_type> trainer_type;
typedef decision_function<kernel_type> classifier_type;
std::vector<sample_type> samples;
std::vector<double> labels;
sample_type sample;
// Samples ->
sample = -0.104075,0.0353173,...,0.114782,-0.0360935;
samples.emplace_back(sample);
labels.emplace_back(0);
sample = -0.0842,-0.0103397,...,0.0938285,0.010045;
samples.emplace_back(sample);
labels.emplace_back(0);
sample = -0.0978358,0.0709425,...,0.052436,-0.0582029;
samples.emplace_back(sample);
labels.emplace_back(0);
sample = -0.126522,0.0319873,...,0.12045,-0.0277105;
samples.emplace_back(sample);
labels.emplace_back(0);
sample = -0.10335,-0.0261625,...,0.0600661,0.00703168,-8.67462e-05,-0.0598214,-0.104442,-0.046698,0.0553857,-0.0880691,0.0482511,0.0331484;
samples.emplace_back(sample);
labels.emplace_back(0);
sample = -0.0747794,0.0599716,...,-0.0440207,-6.45183e-05;
samples.emplace_back(sample);
labels.emplace_back(1);
sample = -0.0280804,0.0900723,...,-0.0267513,0.00824318;
samples.emplace_back(sample);
labels.emplace_back(1);
sample = -0.0721213,0.00700722,...,-0.0128318,0.100784;
samples.emplace_back(sample);
labels.emplace_back(1);
sample = -0.122747,0.0737782,0.0375799,...,0.0168201,-0.0246723;
samples.emplace_back(sample);
labels.emplace_back(1);
sample = -0.0218071,0.118063,...,-0.0735178,0.04046;
samples.emplace_back(sample);
labels.emplace_back(1);
sample = -0.0680787,0.0490121,-0.0228516,...,-0.0366242,0.0287891;
samples.emplace_back(sample);
labels.emplace_back(2);
sample = 0.00152394,0.107174,...,-0.0479925,0.0182667;
samples.emplace_back(sample);
labels.emplace_back(2);
sample = -0.0334521,0.165314,...,-0.0385227,-0.0215499;
samples.emplace_back(sample);
labels.emplace_back(2);
sample = 0.0276394,0.106774,...,-0.0496831,-0.020857;
samples.emplace_back(sample);
labels.emplace_back(2);
// <- Samples
// Unique labels ->
std::vector<double> total_labels;
for(double &label : labels) {
if(find(total_labels.begin(), total_labels.end(), label) == total_labels.end())
total_labels.emplace_back(label);
}
// <- Unique labels
// Init trainers ->
std::vector<trainer_type> trainers;
int num_trainers = total_labels.size() * (total_labels.size() - 1) / 2;
cout << "Number of trainers is " << num_trainers << endl;
for(int i = 0; i < num_trainers; i++) {
trainers.emplace_back(trainer_type());
trainers[i].set_kernel(kernel_type());
trainers[i].set_c(10);
}
// <- Init trainers
// Init classifiers ->
std::vector<pair<double, double>> classifiersLabels;
std::vector<classifier_type> classifiers;
int label1 = 0, label2 = 1;
for(trainer_type &trainer : trainers) {
std::vector<sample_type> samples4pair;
std::vector<double> labels4pair;
for(int i = 0; i < samples.size(); i++) {
if(labels[i] == total_labels[label1]) {
samples4pair.emplace_back(samples[i]);
labels4pair.emplace_back(-1);
}
if(labels[i] == total_labels[label2]) {
samples4pair.emplace_back(samples[i]);
labels4pair.emplace_back(+1);
}
}
classifiers.emplace_back(trainer.train(samples4pair, labels4pair));
classifiersLabels.emplace_back(make_pair(total_labels[label1],
total_labels[label2]));
label2++;
if(label2 == total_labels.size()) {
label1++;
label2 = label1 + 1;
}
}
// <- Init classifiers
double threshold = 0.3;
auto classify = [&](){
std::map<double, int> votes;
for(int i = 0; i < classifiers.size(); i++) {
cout << "Classifier #" << i << ":" << endl;
double prediction = classifiers[i](sample);
cout << prediction << ": ";
if(abs(prediction) < threshold) {
cout << "-1" << endl;
} else if (prediction < 0) {
votes[classifiersLabels[i].first]++;
cout << classifiersLabels[i].first << endl;
} else {
votes[classifiersLabels[i].second]++;
cout << classifiersLabels[i].second << endl;
}
}
cout << "Votes: " << endl;
for(auto &vote : votes) {
cout << vote.first << ": " << vote.second << endl;
}
auto max = std::max_element(votes.begin(), votes.end(),
[](const pair<double, int>& p1, const pair<double, int>& p2) {
return p1.second < p2.second; });
double label = votes.empty() ? -1 : max->first;
cout << "Label is " << label << endl;
};
// Test ->
cout << endl;
sample = -0.0971093, ..., 0.123482, -0.0399552;
cout << "True: 0 - " << endl;
classify();
cout << endl;
sample = -0.0548414, ..., 0.0277335, 0.0460183;
cout << "True: 1 - " << endl;
classify();
cout << endl;
sample = -0.0456186,0.0617834,...,-0.0387607,0.0366309;
cout << "True: 1 - " << endl;
classify();
cout << endl;
sample = -0.0500396, 0.0947202, ..., -0.0540899, 0.0206803;
cout << "True: 2 - " << endl;
classify();
cout << endl;
sample = -0.0702862, 0.065316, ..., -0.0279446, 0.0453012;
cout << "Unknown - " << endl;
classify();
cout << endl;
sample = -0.0789684, 0.0632067, ..., 0.0330486, 0.0117508;
cout << "Unknown - " << endl;
classify();
cout << endl;
sample = -0.0941284, 0.0542927, ..., 0.00855513, 0.00840678;
cout << "Unknown - " << endl;
classify();
// <- Test
return 0;
}
I would like to convert a tensor to image array and use tensor.data() method.
But it doesn't work.
#include <torch/script.h> // One-stop header.
#include <iostream>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include "itkImage.h"
#include "itkImageFileReader.h"
#include "itkImageFileWriter.h"
#include "itkImageRegionIterator.h"
//////////////////////////////////////////////////////
//Goal: load jit script model and segment myocardium
//Step: 1. load jit script model
// 2. load input image
// 3. predict by model
// 4. save the result to file
//////////////////////////////////////////////////////
typedef short PixelType;
const unsigned int Dimension = 3;
typedef itk::Image<PixelType, Dimension> ImageType;
typedef itk::ImageFileReader<ImageType> ReaderType;
typedef itk::ImageRegionIterator<ImageType> IteratorType;
bool itk2tensor(ImageType::Pointer itk_img, torch::Tensor &tensor_img) {
typename ImageType::RegionType region = itk_img->GetLargestPossibleRegion();
const typename ImageType::SizeType size = region.GetSize();
std::cout << "Input size: " << size[0] << ", " << size[1]<< ", " << size[2] << std::endl;
int len = size[0] * size[1] * size[2];
short rowdata[len];
int count = 0;
IteratorType iter(itk_img, itk_img->GetRequestedRegion());
// convert itk to array
for (iter.GoToBegin(); !iter.IsAtEnd(); ++iter) {
rowdata[count] = iter.Get();
count++;
}
std::cout << "Convert itk to array DONE!" << std::endl;
// convert array to tensor
tensor_img = torch::from_blob(rowdata, {1, 1, (int)size[0], (int)size[1], (int)size[2]}, torch::kShort).clone();
tensor_img = tensor_img.toType(torch::kFloat);
tensor_img = tensor_img.to(torch::kCUDA);
tensor_img.set_requires_grad(0);
return true;
}
bool tensor2itk(torch::Tensor &t, ImageType::Pointer itk_img) {
std::cout << "tensor dtype = " << t.dtype() << std::endl;
std::cout << "tensor size = " << t.sizes() << std::endl;
t = t.toType(torch::kShort);
short * array = t.data<short>();
ImageType::IndexType start;
start[0] = 0; // first index on X
start[1] = 0; // first index on Y
start[2] = 0; // first index on Z
ImageType::SizeType size;
size[0] = t.size(2);
size[1] = t.size(3);
size[2] = t.size(4);
ImageType::RegionType region;
region.SetSize( size );
region.SetIndex( start );
itk_img->SetRegions( region );
itk_img->Allocate();
int len = size[0] * size[1] * size[2];
IteratorType iter(itk_img, itk_img->GetRequestedRegion());
int count = 0;
// convert array to itk
std::cout << "start!" << std::endl;
for (iter.GoToBegin(); !iter.IsAtEnd(); ++iter) {
short temp = *array++; // ERROR!
std::cout << temp << " ";
iter.Set(temp);
count++;
}
std::cout << "end!" << std::endl;
return true;
}
int main(int argc, const char* argv[]) {
int a, b, c;
if (argc != 4) {
std::cerr << "usage: automyo input jitmodel output\n";
return -1;
}
std::cout << "========= jit start =========\n";
// 1. load jit script model
std::cout << "Load script module: " << argv[2] << std::endl;
std::shared_ptr<torch::jit::script::Module> module = torch::jit::load(argv[2]);
module->to(at::kCUDA);
// assert(module != nullptr);
std::cout << "Load script module DONE" << std::endl;
// 2. load input image
const char* img_path = argv[1];
std::cout << "Load image: " << img_path << std::endl;
ReaderType::Pointer reader = ReaderType::New();
if (!img_path) {
std::cout << "Load input file error!" << std::endl;
return false;
}
reader->SetFileName(img_path);
reader->Update();
std::cout << "Load image DONE!" << std::endl;
ImageType::Pointer itk_img = reader->GetOutput();
torch::Tensor tensor_img;
if (!itk2tensor(itk_img, tensor_img)) {
std::cerr << "itk2tensor ERROR!" << std::endl;
}
else {
std::cout << "Convert array to tensor DONE!" << std::endl;
}
std::vector<torch::jit::IValue> inputs;
inputs.push_back(tensor_img);
// 3. predict by model
torch::Tensor y = module->forward(inputs).toTensor();
std::cout << "Inference DONE!" << std::endl;
// 4. save the result to file
torch::Tensor seg = y.gt(0.5);
// std::cout << seg << std::endl;
ImageType::Pointer out_itk_img = ImageType::New();
if (!tensor2itk(seg, out_itk_img)) {
std::cerr << "tensor2itk ERROR!" << std::endl;
}
else {
std::cout << "Convert tensor to itk DONE!" << std::endl;
}
std::cout << out_itk_img << std::endl;
return true;
}
The runtime log is showed below:
Load script module:model_myo_jit.pt
Load script module DONE Load
image: patch_6.nii.gz
Load image DONE!
Input size: 128, 128, 128
Convert itk to array DONE!
Convert array to tensor DONE!
Inference DONE!
tensor dtype = unsigned char
tensor size = [1, 1, 96, 96, 96]
start!
Segmentation fault (core dumped)
Why and how to convert?
I have found the solution. When I convert the y to kCPU, it works. Because it in CUDA before.