How to convert tensor to image array? - c++

I would like to convert a tensor to image array and use tensor.data() method.
But it doesn't work.
#include <torch/script.h> // One-stop header.
#include <iostream>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include "itkImage.h"
#include "itkImageFileReader.h"
#include "itkImageFileWriter.h"
#include "itkImageRegionIterator.h"
//////////////////////////////////////////////////////
//Goal: load jit script model and segment myocardium
//Step: 1. load jit script model
// 2. load input image
// 3. predict by model
// 4. save the result to file
//////////////////////////////////////////////////////
typedef short PixelType;
const unsigned int Dimension = 3;
typedef itk::Image<PixelType, Dimension> ImageType;
typedef itk::ImageFileReader<ImageType> ReaderType;
typedef itk::ImageRegionIterator<ImageType> IteratorType;
bool itk2tensor(ImageType::Pointer itk_img, torch::Tensor &tensor_img) {
typename ImageType::RegionType region = itk_img->GetLargestPossibleRegion();
const typename ImageType::SizeType size = region.GetSize();
std::cout << "Input size: " << size[0] << ", " << size[1]<< ", " << size[2] << std::endl;
int len = size[0] * size[1] * size[2];
short rowdata[len];
int count = 0;
IteratorType iter(itk_img, itk_img->GetRequestedRegion());
// convert itk to array
for (iter.GoToBegin(); !iter.IsAtEnd(); ++iter) {
rowdata[count] = iter.Get();
count++;
}
std::cout << "Convert itk to array DONE!" << std::endl;
// convert array to tensor
tensor_img = torch::from_blob(rowdata, {1, 1, (int)size[0], (int)size[1], (int)size[2]}, torch::kShort).clone();
tensor_img = tensor_img.toType(torch::kFloat);
tensor_img = tensor_img.to(torch::kCUDA);
tensor_img.set_requires_grad(0);
return true;
}
bool tensor2itk(torch::Tensor &t, ImageType::Pointer itk_img) {
std::cout << "tensor dtype = " << t.dtype() << std::endl;
std::cout << "tensor size = " << t.sizes() << std::endl;
t = t.toType(torch::kShort);
short * array = t.data<short>();
ImageType::IndexType start;
start[0] = 0; // first index on X
start[1] = 0; // first index on Y
start[2] = 0; // first index on Z
ImageType::SizeType size;
size[0] = t.size(2);
size[1] = t.size(3);
size[2] = t.size(4);
ImageType::RegionType region;
region.SetSize( size );
region.SetIndex( start );
itk_img->SetRegions( region );
itk_img->Allocate();
int len = size[0] * size[1] * size[2];
IteratorType iter(itk_img, itk_img->GetRequestedRegion());
int count = 0;
// convert array to itk
std::cout << "start!" << std::endl;
for (iter.GoToBegin(); !iter.IsAtEnd(); ++iter) {
short temp = *array++; // ERROR!
std::cout << temp << " ";
iter.Set(temp);
count++;
}
std::cout << "end!" << std::endl;
return true;
}
int main(int argc, const char* argv[]) {
int a, b, c;
if (argc != 4) {
std::cerr << "usage: automyo input jitmodel output\n";
return -1;
}
std::cout << "========= jit start =========\n";
// 1. load jit script model
std::cout << "Load script module: " << argv[2] << std::endl;
std::shared_ptr<torch::jit::script::Module> module = torch::jit::load(argv[2]);
module->to(at::kCUDA);
// assert(module != nullptr);
std::cout << "Load script module DONE" << std::endl;
// 2. load input image
const char* img_path = argv[1];
std::cout << "Load image: " << img_path << std::endl;
ReaderType::Pointer reader = ReaderType::New();
if (!img_path) {
std::cout << "Load input file error!" << std::endl;
return false;
}
reader->SetFileName(img_path);
reader->Update();
std::cout << "Load image DONE!" << std::endl;
ImageType::Pointer itk_img = reader->GetOutput();
torch::Tensor tensor_img;
if (!itk2tensor(itk_img, tensor_img)) {
std::cerr << "itk2tensor ERROR!" << std::endl;
}
else {
std::cout << "Convert array to tensor DONE!" << std::endl;
}
std::vector<torch::jit::IValue> inputs;
inputs.push_back(tensor_img);
// 3. predict by model
torch::Tensor y = module->forward(inputs).toTensor();
std::cout << "Inference DONE!" << std::endl;
// 4. save the result to file
torch::Tensor seg = y.gt(0.5);
// std::cout << seg << std::endl;
ImageType::Pointer out_itk_img = ImageType::New();
if (!tensor2itk(seg, out_itk_img)) {
std::cerr << "tensor2itk ERROR!" << std::endl;
}
else {
std::cout << "Convert tensor to itk DONE!" << std::endl;
}
std::cout << out_itk_img << std::endl;
return true;
}
The runtime log is showed below:
Load script module:model_myo_jit.pt
Load script module DONE Load
image: patch_6.nii.gz
Load image DONE!
Input size: 128, 128, 128
Convert itk to array DONE!
Convert array to tensor DONE!
Inference DONE!
tensor dtype = unsigned char
tensor size = [1, 1, 96, 96, 96]
start!
Segmentation fault (core dumped)
Why and how to convert?

I have found the solution. When I convert the y to kCPU, it works. Because it in CUDA before.

Related

Error loading ONNX file (converted YOLOv7 model) in OnnxRuntime/C++

I trained a model in YOLOv7 in python, and then converted the model to ONNX in order to open it in C++ with OpenCV.
It seems to work fine in python on collab, but when I try to run it in C++
Inference Execution Provider: CPU
Num Input Nodes: 1
Num Output Nodes: 1
Input Name: images
Input Type: float
Input Dimensions: [1, 3, 640, 640]
Output Name: output
Output Type: float
Output Dimensions: [-1, 7]
input tensor size: 1228800
output tensor size: 18446744073709551609
terminate called after throwing an instance of 'std::length_error'
what(): cannot create std::vector larger than max_size()
Aborted (core dumped)
It seems to get the output dimensions wrong somehow, which leads to a -1, and then the output size becomes a negative number which when converted to a signed 64-bit integer becomes astronomical and crashes the program when it tries to allocate the vector. But why is it reading it wrong?
The code I'm trying to run is from here:
https://github.com/Neuralearn/onnx_cplusplus/blob/main/main.cpp
int main(int argc, char* argv[])
{
bool useCUDA{false};
const char* useCUDAFlag = "--use_cuda";
const char* useCPUFlag = "--use_cpu";
if (argc == 1)
{
useCUDA = false;
}
else if ((argc == 2) && (strcmp(argv[1], useCUDAFlag) == 0))
{
useCUDA = true;
}
else if ((argc == 2) && (strcmp(argv[1], useCPUFlag) == 0))
{
useCUDA = false;
}
else if ((argc == 2) && (strcmp(argv[1], useCUDAFlag) != 0))
{
useCUDA = false;
}
else
{
throw std::runtime_error{"Too many arguments."};
}
if (useCUDA)
{
std::cout << "Inference Execution Provider: CUDA" << std::endl;
}
else
{
std::cout << "Inference Execution Provider: CPU" << std::endl;
}
/*****************************************************************************************************************/
std::string instanceName{"image-classification-inference"};
//std::string modelFilepath{"best_yolox_nano.onnx"};
std::string modelFilepath{"best.onnx"};
//std::string imageFilepath{"demo.jpg"};
std::string imageFilepath{"train1.jpg"};
std::string labelFilepath{"synset.txt"};
std::vector<std::string> labels{readLabels(labelFilepath)};
//cerr<<"aaaa"<<endl;
Ort::Env env(OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING,
instanceName.c_str());
Ort::SessionOptions sessionOptions;
sessionOptions.SetIntraOpNumThreads(1);
//cerr<<"bbb"<<endl;
sessionOptions.SetGraphOptimizationLevel(
GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
Ort::Session session(env, modelFilepath.c_str(), sessionOptions);
Ort::AllocatorWithDefaultOptions allocator;
size_t numInputNodes = session.GetInputCount();
size_t numOutputNodes = session.GetOutputCount();
//cerr<<"ccc"<<endl;
std::cout << "Num Input Nodes: " << numInputNodes << std::endl;
std::cout << "Num Output Nodes: " << numOutputNodes << std::endl;
const char* inputName = session.GetInputName(0, allocator);
std::cout << "Input Name: " << inputName << std::endl;
Ort::TypeInfo inputTypeInfo = session.GetInputTypeInfo(0);
auto inputTensorInfo = inputTypeInfo.GetTensorTypeAndShapeInfo();
ONNXTensorElementDataType inputType = inputTensorInfo.GetElementType();
std::cout << "Input Type: " << inputType << std::endl;
std::vector<int64_t> inputDims = inputTensorInfo.GetShape();
std::cout << "Input Dimensions: " << inputDims << std::endl;
const char* outputName = session.GetOutputName(0, allocator);
std::cout << "Output Name: " << outputName << std::endl;
Ort::TypeInfo outputTypeInfo = session.GetOutputTypeInfo(0);
auto outputTensorInfo = outputTypeInfo.GetTensorTypeAndShapeInfo();
ONNXTensorElementDataType outputType = outputTensorInfo.GetElementType();
std::cout << "Output Type: " << outputType << std::endl;
std::vector<int64_t> outputDims = outputTensorInfo.GetShape();
std::cout << "Output Dimensions: " << outputDims << std::endl;
cv::Mat imageBGR = cv::imread(imageFilepath, cv::ImreadModes::IMREAD_COLOR);
cv::Mat preprocessedImage;
cv::Mat resizedImage = static_resize(imageBGR);
cv::dnn::blobFromImage(resizedImage, preprocessedImage);
// cv::imwrite("image.jpg", resizedImage);
size_t inputTensorSize = vectorProduct(inputDims);
cerr<<"input tensor size: "<<inputTensorSize<<endl;
std::vector<float> inputTensorValues(inputTensorSize);
inputTensorValues.assign(preprocessedImage.begin<float>(),
preprocessedImage.end<float>());
size_t outputTensorSize = vectorProduct(outputDims);
for (auto x : outputDims) cerr << x<< " "; cerr<<endl;
cerr<<"output tensor size: "<<outputTensorSize<<endl;
std::vector<float> outputTensorValues(outputTensorSize);
std::vector<const char*> inputNames{inputName};
std::vector<const char*> outputNames{outputName};
std::vector<Ort::Value> inputTensors;
std::vector<Ort::Value> outputTensors;
Ort::MemoryInfo memoryInfo = Ort::MemoryInfo::CreateCpu(
OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);
inputTensors.push_back(Ort::Value::CreateTensor<float>(
memoryInfo, inputTensorValues.data(), inputTensorSize, inputDims.data(),
inputDims.size()));
outputTensors.push_back(Ort::Value::CreateTensor<float>(
memoryInfo, outputTensorValues.data(), outputTensorSize,
outputDims.data(), outputDims.size()));
session.Run(Ort::RunOptions{nullptr}, inputNames.data(),
inputTensors.data(), 1, outputNames.data(),
outputTensors.data(), 1);
cv::Mat image = imread_t(imageFilepath);
int img_w = image.cols;
int img_h = image.rows;
float scale = std::min(INPUT_W / (image.cols*1.0), INPUT_H / (image.rows*1.0));
std::vector<Object> objects;
const float * net_pred = outputTensorValues.data();
decode_outputs(net_pred, objects, scale, img_w, img_h);
draw_objects(image, objects);
}

PyTorch and TorchVision FasterRCNN interpreting the output in C++ GenericDict

I'm trying to interpret the output of FasterRCNN in C++ and I'm fighting with the GenericDict type.
My code is as follows:
#include <opencv4/opencv2/opencv.hpp>
#include <opencv4/opencv2/shape.hpp>
#include <opencv4/opencv2/imgcodecs.hpp>
#include <opencv4/opencv2/highgui.hpp>
#include <opencv4/opencv2/imgproc.hpp>
#include <opencv4/opencv2/core/utility.hpp>
#include <opencv4/opencv2/core/mat.hpp>
#include <c10/cuda/CUDAStream.h>
#include <torch/csrc/autograd/grad_mode.h>
#include <torch/csrc/api/include/torch/torch.h>
#include <torch/script.h>
#include <torchvision/vision.h>
#include <torchvision/nms.h>
#include <iostream>
#include <memory>
#include <string>
int main(int argc, const char* argv[])
{
if (argc != 3)
{
printf("usage: %s <path-to-exported-script-module> <image_to_test>\n",argv[0]);
return -1;
}
std::string module_filename = argv[1];
std::string image_file = argv[2];
try
{
cv::Mat input_img = cv::imread(image_file, cv::IMREAD_GRAYSCALE);
torch::autograd::AutoGradMode guard(false);
// Deserialize the ScriptModule from a file using torch::jit::load().
torch::jit::script::Module module = torch::jit::load(module_filename);
assert(module.buffers().size() > 0);
module.eval();
// Assume that the entire model is on the same device.
// We just put input to this device.
auto device = (*std::begin(module.buffers())).device();
const int height = input_img.rows;
const int width = input_img.cols;
const int channels = 1;
auto input = torch::from_blob(input_img.data, {height, width, channels}, torch::kUInt8);
// HWC to CHW
// input = input.to(device, torch::kFloat).permute({2, 0, 1}).contiguous();
input = input.to(device, torch::kFloat).permute({2, 0, 1}).contiguous();
// run the network
std::vector<at::Tensor> inputs;
inputs.push_back(input);
auto output = module.forward({inputs});
if (device.is_cuda())
c10::cuda::getCurrentCUDAStream().synchronize();
std::cout << "output: " << output << std::endl;
auto outputs = output.toTuple()->elements();
std::cout << "outputs: " << outputs << std::endl;
for( auto& elem : outputs )
{
std::cout << "elem: " << elem << std::endl;
if( elem.isGenericDict() )
{
std::cout << "elem is generic dict: " << elem << std::endl;
c10::Dict<c10::IValue, c10::IValue> dict = elem.toGenericDict();
auto elem_vector_0 = dict.at(c10::IValue("scores")).toIntVector();
auto elem_vector_1 = dict.at(c10::IValue("boxes")).toIntVector();
auto elem_vector_2 = dict.at(c10::IValue("labels")).toIntVector();
for( auto& ee0 : elem_vector_0 )
{
std::cout << "elem_vector_0" << ee0 << std::endl;
}
for( auto& ee0 : elem_vector_1 )
{
std::cout << "elem_vector_1" << ee0 << std::endl;
}
for( auto& ee0 : elem_vector_2 )
{
std::cout << "elem_vector_2" << ee0 << std::endl;
}
}
}
cv::namedWindow("Display Image", cv::WINDOW_AUTOSIZE );
cv::imshow("Display Image", input_img);
cv::waitKey(0);
}
catch(const c10::Error& e)
{
std::cerr << e.what() << std::endl;
return -1;
}
catch(const cv::Exception& e)
{
std::cerr << e.what() << std::endl;
return -1;
}
catch(const std::exception& e)
{
std::cerr << e.what() << std::endl;
return -1;
}
catch(...)
{
std::cerr << "Unknown error" << std::endl;
return -1;
}
std::cout << "ok\n";
return 0;
}
and the output is:
(base) fstrati#fstrati-desktop:~/libtorch_shared_cuda_10.1/load_and_run_model/Release$ ./load_and_run_model ./torch_script_v0.2.pt test_img.png
[W faster_rcnn.py:95] Warning: RCNN always returns a (Losses, Detections) tuple in scripting (function )
output: ({}, [{boxes: [ CPUFloatType{0,4} ], labels: [ CPULongType{0} ], scores: [ CPUFloatType{0} ]}])
outputs: {} [{boxes: [ CPUFloatType{0,4} ], labels: [ CPULongType{0} ], scores: [ CPUFloatType{0} ]}]
elem: {}
elem is generic dict: {}
Argument passed to at() was not in the map.
I'm struggling to find a way to extract the boxes, labels and scores from the dictionary
GenericDict.
This map is strange, I cannot iterate on it and I cannot access first and second types...
with it->first it->second
Any ideas ?
Thanks in advance
I think the following method can resolve the main problem here,
output = module.forward(inputs);
auto detections = output.toTuple()->elements().at(1).toList().get(0).toGenericDict();
std::cout << ">>> detections labels: " << detections.at("labels") << std::endl;
std::cout << ">>> detections boxes: " << detections.at("boxes") << std::endl;
std::cout << ">>> detections scores: " << detections.at("scores") << std::endl;
Besides, I've added an executable file https://github.com/zhiqwang/yolov5-rt-stack/tree/master/deployment/libtorch to show how libtorch works.

How to use ONNX model in C++ code on Linux?

I train some Unet-based model in Pytorch. It take an image as an input, and return a mask.
After training i save it to ONNX format, run it with onnxruntime python module and it worked like a charm.
Now, i want to use this model in C++ code in Linux.
Is there simple tutorial (Hello world) when explained:
How to incorporate onnxruntime module to C++ program in Ubuntu
(install shared lib and so on)?
How to properly load an image and pass it to model?
P.S. I found only this: https://www.onnxruntime.ai/docs/tutorials/samples_catalog.html#cc
But there no info about loading image and converting it to ONNX - compatible format in C++ code.
For installation on the Linux, you should refer to https://www.onnxruntime.ai/.
You can refer to the following code to get help regarding how to load and run the ONNX model.
#include <algorithm> // std::generate
#include <assert.h>
#include <iostream>
#include <sstream>
#include <vector>
#include <experimental_onnxruntime_cxx_api.h>
// pretty prints a shape dimension vector
std::string print_shape(const std::vector<int64_t>& v) {
std::stringstream ss("");
for (size_t i = 0; i < v.size() - 1; i++)
ss << v[i] << "x";
ss << v[v.size() - 1];
return ss.str();
}
int calculate_product(const std::vector<int64_t>& v) {
int total = 1;
for (auto& i : v) total *= i;
return total;
}
using namespace std;
int main(int argc, char** argv) {
if (argc != 2) {
cout << "Usage: ./onnx-api-example <onnx_model.onnx>" << endl;
return -1;
}
#ifdef _WIN32
std::string str = argv[1];
std::wstring wide_string = std::wstring(str.begin(), str.end());
std::basic_string<ORTCHAR_T> model_file = std::basic_string<ORTCHAR_T>(wide_string);
#else
std::string model_file = argv[1];
#endif
// onnxruntime setup
Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "example-model-explorer");
Ort::SessionOptions session_options;
Ort::Experimental::Session session = Ort::Experimental::Session(env, model_file, session_options); // access experimental components via the Experimental namespace
// print name/shape of inputs
std::vector<std::string> input_names = session.GetInputNames();
std::vector<std::vector<int64_t> > input_shapes = session.GetInputShapes();
cout << "Input Node Name/Shape (" << input_names.size() << "):" << endl;
for (size_t i = 0; i < input_names.size(); i++) {
cout << "\t" << input_names[i] << " : " << print_shape(input_shapes[i]) << endl;
}
// print name/shape of outputs
std::vector<std::string> output_names = session.GetOutputNames();
std::vector<std::vector<int64_t> > output_shapes = session.GetOutputShapes();
cout << "Output Node Name/Shape (" << output_names.size() << "):" << endl;
for (size_t i = 0; i < output_names.size(); i++) {
cout << "\t" << output_names[i] << " : " << print_shape(output_shapes[i]) << endl;
}
// Assume model has 1 input node and 1 output node.
assert(input_names.size() == 1 && output_names.size() == 1);
// Create a single Ort tensor of random numbers
auto input_shape = input_shapes[0];
int total_number_elements = calculate_product(input_shape);
std::vector<float> input_tensor_values(total_number_elements);
std::generate(input_tensor_values.begin(), input_tensor_values.end(), [&] { return rand() % 255; }); // generate random numbers in the range [0, 255]
std::vector<Ort::Value> input_tensors;
input_tensors.push_back(Ort::Experimental::Value::CreateTensor<float>(input_tensor_values.data(), input_tensor_values.size(), input_shape));
// double-check the dimensions of the input tensor
assert(input_tensors[0].IsTensor() &&
input_tensors[0].GetTensorTypeAndShapeInfo().GetShape() == input_shape);
cout << "\ninput_tensor shape: " << print_shape(input_tensors[0].GetTensorTypeAndShapeInfo().GetShape()) << endl;
// pass data through model
cout << "Running model...";
try {
auto output_tensors = session.Run(session.GetInputNames(), input_tensors, session.GetOutputNames());
cout << "done" << endl;
// double-check the dimensions of the output tensors
// NOTE: the number of output tensors is equal to the number of output nodes specifed in the Run() call
assert(output_tensors.size() == session.GetOutputNames().size() &&
output_tensors[0].IsTensor());
cout << "output_tensor_shape: " << print_shape(output_tensors[0].GetTensorTypeAndShapeInfo().GetShape()) << endl;
} catch (const Ort::Exception& exception) {
cout << "ERROR running model inference: " << exception.what() << endl;
exit(-1);
}
}

How to identify stranger in dlib`s one vs one classifier

I`m using a one_vs_one_trainer and one_vs_one_decision_function for classify 128D face descriptors, and i want to detect unknown face.
I`m detecting faces using OpenCV and my wrapper, then i followed the guide and computed the 128D face descriptors, that i stored in files. Next, i trained one_vs_one classifier following this tutorial. All works perfectly, but when i try to classify unknown face it returns some label.
I used code from guides, but if you want to look at my code - it is here
Is there a better way to identify faces? Maybe, its simpler to use OpenCV`s methods, or other from Dlib?
Thanks for Davis!
Here is forum thread on SourceForge.
The answer is:
Use a bunch of binary classifiers rather than one vs one. If all the binary
classifiers say they don't match then you know the person doesn't match any
of them.
And i implemented this as follows:
#include <iostream>
#include <ctime>
#include <vector>
#include <dlib/svm.h>
using namespace std;
using namespace dlib;
int main() {
typedef matrix<double, 128, 1> sample_type;
typedef histogram_intersection_kernel<sample_type> kernel_type;
typedef svm_c_trainer<kernel_type> trainer_type;
typedef decision_function<kernel_type> classifier_type;
std::vector<sample_type> samples;
std::vector<double> labels;
sample_type sample;
// Samples ->
sample = -0.104075,0.0353173,...,0.114782,-0.0360935;
samples.emplace_back(sample);
labels.emplace_back(0);
sample = -0.0842,-0.0103397,...,0.0938285,0.010045;
samples.emplace_back(sample);
labels.emplace_back(0);
sample = -0.0978358,0.0709425,...,0.052436,-0.0582029;
samples.emplace_back(sample);
labels.emplace_back(0);
sample = -0.126522,0.0319873,...,0.12045,-0.0277105;
samples.emplace_back(sample);
labels.emplace_back(0);
sample = -0.10335,-0.0261625,...,0.0600661,0.00703168,-8.67462e-05,-0.0598214,-0.104442,-0.046698,0.0553857,-0.0880691,0.0482511,0.0331484;
samples.emplace_back(sample);
labels.emplace_back(0);
sample = -0.0747794,0.0599716,...,-0.0440207,-6.45183e-05;
samples.emplace_back(sample);
labels.emplace_back(1);
sample = -0.0280804,0.0900723,...,-0.0267513,0.00824318;
samples.emplace_back(sample);
labels.emplace_back(1);
sample = -0.0721213,0.00700722,...,-0.0128318,0.100784;
samples.emplace_back(sample);
labels.emplace_back(1);
sample = -0.122747,0.0737782,0.0375799,...,0.0168201,-0.0246723;
samples.emplace_back(sample);
labels.emplace_back(1);
sample = -0.0218071,0.118063,...,-0.0735178,0.04046;
samples.emplace_back(sample);
labels.emplace_back(1);
sample = -0.0680787,0.0490121,-0.0228516,...,-0.0366242,0.0287891;
samples.emplace_back(sample);
labels.emplace_back(2);
sample = 0.00152394,0.107174,...,-0.0479925,0.0182667;
samples.emplace_back(sample);
labels.emplace_back(2);
sample = -0.0334521,0.165314,...,-0.0385227,-0.0215499;
samples.emplace_back(sample);
labels.emplace_back(2);
sample = 0.0276394,0.106774,...,-0.0496831,-0.020857;
samples.emplace_back(sample);
labels.emplace_back(2);
// <- Samples
// Unique labels ->
std::vector<double> total_labels;
for(double &label : labels) {
if(find(total_labels.begin(), total_labels.end(), label) == total_labels.end())
total_labels.emplace_back(label);
}
// <- Unique labels
// Init trainers ->
std::vector<trainer_type> trainers;
int num_trainers = total_labels.size() * (total_labels.size() - 1) / 2;
cout << "Number of trainers is " << num_trainers << endl;
for(int i = 0; i < num_trainers; i++) {
trainers.emplace_back(trainer_type());
trainers[i].set_kernel(kernel_type());
trainers[i].set_c(10);
}
// <- Init trainers
// Init classifiers ->
std::vector<pair<double, double>> classifiersLabels;
std::vector<classifier_type> classifiers;
int label1 = 0, label2 = 1;
for(trainer_type &trainer : trainers) {
std::vector<sample_type> samples4pair;
std::vector<double> labels4pair;
for(int i = 0; i < samples.size(); i++) {
if(labels[i] == total_labels[label1]) {
samples4pair.emplace_back(samples[i]);
labels4pair.emplace_back(-1);
}
if(labels[i] == total_labels[label2]) {
samples4pair.emplace_back(samples[i]);
labels4pair.emplace_back(+1);
}
}
classifiers.emplace_back(trainer.train(samples4pair, labels4pair));
classifiersLabels.emplace_back(make_pair(total_labels[label1],
total_labels[label2]));
label2++;
if(label2 == total_labels.size()) {
label1++;
label2 = label1 + 1;
}
}
// <- Init classifiers
double threshold = 0.3;
auto classify = [&](){
std::map<double, int> votes;
for(int i = 0; i < classifiers.size(); i++) {
cout << "Classifier #" << i << ":" << endl;
double prediction = classifiers[i](sample);
cout << prediction << ": ";
if(abs(prediction) < threshold) {
cout << "-1" << endl;
} else if (prediction < 0) {
votes[classifiersLabels[i].first]++;
cout << classifiersLabels[i].first << endl;
} else {
votes[classifiersLabels[i].second]++;
cout << classifiersLabels[i].second << endl;
}
}
cout << "Votes: " << endl;
for(auto &vote : votes) {
cout << vote.first << ": " << vote.second << endl;
}
auto max = std::max_element(votes.begin(), votes.end(),
[](const pair<double, int>& p1, const pair<double, int>& p2) {
return p1.second < p2.second; });
double label = votes.empty() ? -1 : max->first;
cout << "Label is " << label << endl;
};
// Test ->
cout << endl;
sample = -0.0971093, ..., 0.123482, -0.0399552;
cout << "True: 0 - " << endl;
classify();
cout << endl;
sample = -0.0548414, ..., 0.0277335, 0.0460183;
cout << "True: 1 - " << endl;
classify();
cout << endl;
sample = -0.0456186,0.0617834,...,-0.0387607,0.0366309;
cout << "True: 1 - " << endl;
classify();
cout << endl;
sample = -0.0500396, 0.0947202, ..., -0.0540899, 0.0206803;
cout << "True: 2 - " << endl;
classify();
cout << endl;
sample = -0.0702862, 0.065316, ..., -0.0279446, 0.0453012;
cout << "Unknown - " << endl;
classify();
cout << endl;
sample = -0.0789684, 0.0632067, ..., 0.0330486, 0.0117508;
cout << "Unknown - " << endl;
classify();
cout << endl;
sample = -0.0941284, 0.0542927, ..., 0.00855513, 0.00840678;
cout << "Unknown - " << endl;
classify();
// <- Test
return 0;
}

Surface parametrization

I am trying to get a parametrized surface on a surface mesh (which is read from a STL format file.). I read some examples about parametrization provided by CGAL examples directory. I get to know that seam line should be provided in order to get a parametric surface on a arbitrary surface. But still I don't get how to make seam line. The below is my code so far. In summary, What I want to know is,
1) When CGAL::Parameterization_mesh_feature_extractor is used, how can I get vertices on the feature curves and make a seam line with the vertices?
2) Does CGAL provide a way to get intersection curve of a given surface and a cutting plane so that I can get a parametrized surface on a part of the given surface?
#include <cstdio>
#include <ctime>
#include <iostream>
#include <iomanip>
#include <algorithm>
#include <fstream>
#include <CGAL/IO/io.h>
#include <CGAL/IO/STL_reader.h>
#include <CGAL/Polyhedron_3.h>
#include <CGAL/Simple_cartesian.h>
#include <CGAL/polygon_soup_to_polyhedron_3.h>
#include <CGAL/Parameterization_polyhedron_adaptor_3.h>
#include <CGAL/Parameterization_mesh_patch_3.h>
#include <CGAL/parameterize.h>
int main(int argc, char* argv[]) {
clock_t time1, time2;
double read_time, write_time, build_time;
if(argc == 1) {
std::cout << "Please, give me a filename" << std::endl;
return 0;
}
std::ifstream infile(argv[1]);
if(infile.bad()) {
std::cout << "Infile not found or file corrupt" << std::endl;
return 1;
}
std::vector<CGAL::cpp11::array<double, 3> > points;
std::vector<CGAL::cpp11::array<int, 3> > triangles;
time1 = clock();
if (!CGAL::read_STL(infile, points, triangles)) {
std::cerr << "Error: invalid STL file" << std::endl;
return 0;
}
time2 = clock();
read_time = float(time2 - time1) / CLOCKS_PER_SEC;
fprintf(stdout, "Read time : %5.2f sec\n", read_time);
// Write polyhedron in Tecplot format
std::ofstream ofs("mesh.dat");
CGAL::set_ascii_mode(ofs);
time1 = clock();
ofs << "TITLE=\"\"" << std::endl;
ofs << "VARIABLES=\"X\" \"Y\" \"Z\"" << std::endl;
ofs << "ZONE T=\"None\" N=" << points.size() << " E=" << triangles.size() << " F=FEPOINT ET=TRIANGLE" << std::endl;
ofs.setf(std::ios::fixed);
ofs.precision(6);
for(std::vector<CGAL::cpp11::array<double, 3> >::iterator i = points.begin(); i != points.end(); ++i) {
ofs << (*i)[0] << " " << (*i)[1] << " " << (*i)[2] << std::endl;
}
for(std::vector<CGAL::cpp11::array<int, 3> >::iterator i = triangles.begin(); i != triangles.end(); ++i) {
ofs << (*i)[0]+1 << " " << (*i)[1]+1 << " " << (*i)[2]+1 << std::endl;
}
time2 = clock();
write_time = float(time2 - time1) / CLOCKS_PER_SEC;
fprintf(stdout, "Write time : %5.2f sec\n", write_time);
// build mesh
typedef CGAL::Simple_cartesian<double> Kernel;
typedef CGAL::Polyhedron_3<Kernel> Polyhedron;
Polyhedron mesh;
time1 = clock();
try{
// Try building a polyhedron
CGAL::polygon_soup_to_polyhedron_3(mesh, points, triangles);
if(! mesh.is_valid() || mesh.empty()){
std::cerr << "Error: Invalid polyhedron" << std::endl;
}
}
catch(...){}
time2 = clock();
build_time= float(time2 - time1) / CLOCKS_PER_SEC;
fprintf(stdout, "Build time : %5.2f sec\n", build_time);
// parameterization
typedef CGAL::Parameterization_polyhedron_adaptor_3<Polyhedron> Parameterization_polyhedron_adaptor;
// Type describing a border or seam as a vertex list
typedef std::list<Parameterization_polyhedron_adaptor::Vertex_handle> Seam;
//Create a second adaptor that virtually "cuts" the mesh following the 'seam' path
typedef CGAL::Parameterization_mesh_patch_3<Parameterization_polyhedron_adaptor> Mesh_patch_polyhedron;
Parameterization_polyhedron_adaptor mesh_adaptor(mesh);
////////////////////// cut graph ////////////////////////////////
typedef CGAL::Parameterization_mesh_feature_extractor<Parameterization_polyhedron_adaptor>
Mesh_feature_extractor;
Seam seam;
// Get reference to Polyhedron_3 mesh
Polyhedron& mesh_ref = mesh_adaptor.get_adapted_mesh();
// Extract mesh borders and compute genus
Mesh_feature_extractor feature_extractor(mesh_adaptor);
int nb_borders = feature_extractor.get_nb_borders();
int genus = feature_extractor.get_genus(); // genus means a hole inside a surface
std::cout << "# borders: " << nb_borders << " # holes: " << genus << std::endl;
std::cout << feature_extractor.get_borders()[0] << std::endl;
///////////////////// end of cut graph //////////////////////////
/*
Mesh_patch_polyhedron mesh_patch(mesh_adaptor, seam.begin(), seam.end());
if (!mesh_patch.is_valid())
{
std::cerr << "Input mesh not supported: non manifold shape or invalid cutting" << std::endl;
return EXIT_FAILURE;
}
typedef CGAL::Parameterizer_traits_3<Mesh_patch_polyhedron> Parameterizer; // Type that defines the error codes
Parameterizer::Error_code err = CGAL::parameterize(mesh_patch);
switch(err) {
case Parameterizer::OK: // Success
break;
case Parameterizer::ERROR_EMPTY_MESH: // Input mesh not supported
case Parameterizer::ERROR_NON_TRIANGULAR_MESH:
case Parameterizer::ERROR_NO_TOPOLOGICAL_DISC:
case Parameterizer::ERROR_BORDER_TOO_SHORT:
std::cerr << "Input mesh not supported: " << Parameterizer::get_error_message(err) << std::endl;
return EXIT_FAILURE;
break;
default: // Error
std::cerr << "Error: " << Parameterizer::get_error_message(err) << std::endl;
return EXIT_FAILURE;
break;
};
// Raw output: dump (u,v) pairs
Polyhedron::Vertex_const_iterator pVertex;
for (pVertex = mesh.vertices_begin(); pVertex != mesh.vertices_end(); pVertex++)
{
// (u,v) pair is stored in any halfedge
double u = mesh_adaptor.info(pVertex->halfedge())->uv().x();
double v = mesh_adaptor.info(pVertex->halfedge())->uv().y();
std::cout << "(u,v) = (" << u << "," << v << ")" << std::endl;
}
*/
return 0;
}