I have the following code which raises an error about line m_threads.push_back(boost::thread(boost::ref(*this)));. Could you help me the figure out what is wrong?
#include <algorithm>
#include <fstream> // NOLINT(readability/streams)
#include <string>
#include <utility>
#include <vector>
#include "boost/scoped_ptr.hpp"
#include "boost/thread.hpp"
#include "boost/thread/barrier.hpp"
#include "boost/atomic.hpp"
// #include "gflags/gflags.h"
// #include "glog/logging.h"
// #include "caffe/proto/caffe.pb.h"
// #include "caffe/util/db.hpp"
// #include "caffe/util/io.hpp"
// #include "caffe/util/rng.hpp"
using namespace caffe; // NOLINT(build/namespaces)
using std::pair;
using boost::scoped_ptr;
DEFINE_bool(gray, false,
"When this option is on, treat images as grayscale ones");
DEFINE_bool(shuffle, false,
"Randomly shuffle the order of images and their labels");
DEFINE_int32(threads, 1,
"Build DB using multiple threads.");
DEFINE_string(backend, "lmdb",
"The backend {lmdb, leveldb} for storing the result");
DEFINE_int32(resize_width, 0, "Width images are resized to");
DEFINE_int32(resize_height, 0, "Height images are resized to");
DEFINE_bool(check_size, false,
"When this option is on, check that all the datum have the same size");
DEFINE_bool(encoded, false,
"When this option is on, the encoded image will be save in datum");
DEFINE_string(encode_type, "",
"Optional: What type should we encode the image as ('png','jpg',...).");
class ThreadedReader
{
public:
typedef struct {
int line_id;
bool status;
Datum datum;
} data_entry;
ThreadedReader(int threads, std::vector<std::pair<std::string, int> >& lines,
const string& encode_type, const string& root_folder,
int resize_height, int resize_width, bool is_color) :
m_thread_count(threads), m_lines(lines),
m_barrier_read(threads), m_barrier_fetch(2),
m_id(0), m_thd_done(0),
m_done(false),
m_root_folder(root_folder),
m_encode_type(encode_type),
m_resize_height(resize_height), m_resize_width(resize_width),
m_is_color(is_color)
{
m_data.resize(m_thread_count);
for (int i = 0; i < threads; ++i)
m_threads.push_back(boost::thread(boost::ref(*this)));
}
~ThreadedReader() {
// for (int i = 0; i < m_thread_count; ++i)
// m_threads[i].join();
}
void operator()()
{
for (;;) {
int my_id = m_id.fetch_add(1, boost::memory_order_acq_rel);
const int my_base = my_id % m_thread_count;
m_barrier_read.count_down_and_wait();
m_data[my_base].status = false;
m_data[my_base].line_id = my_id;
if (my_id < m_lines.size()) {
m_data[my_base].line_id = my_id;
m_data[my_base].status = ReadImageToDatum(m_root_folder + m_lines[my_id].first,
m_lines[my_id].second, m_resize_height, m_resize_width, m_is_color,
m_encode_type, &m_data[my_base].datum);
}
if (m_thd_done.fetch_add(1, boost::memory_order_acq_rel) + 1 == m_thread_count) {
m_barrier_fetch.count_down_and_wait();
m_thd_done.store(0, boost::memory_order_release);
if (m_id.load(boost::memory_order_relaxed) >= m_lines.size())
m_done.store(true);
m_barrier_fetch.count_down_and_wait();
}
if (m_id.load(boost::memory_order_relaxed) >= m_lines.size())
break;
}
}
// bool fetch_batch(std::vector<data_entry>& vec)
// {
// using std::swap;
// if (m_done.load(boost::memory_order_acquire))
// return false;
// m_barrier_fetch.count_down_and_wait();
// swap(vec, m_data);
// if (m_data.size() < m_thread_count)
// m_data.resize(m_thread_count);
// m_barrier_fetch.count_down_and_wait();
// return true;
// }
private:
const int m_thread_count;
std::vector<std::pair<std::string, int> >& m_lines;
boost::barrier m_barrier_read;
boost::barrier m_barrier_fetch;
boost::atomic<int> m_id;
boost::atomic<int> m_thd_done;
boost::atomic<bool> m_done;
const std::string& m_root_folder;
const std::string& m_encode_type;
const int m_resize_height, m_resize_width;
const bool m_is_color;
std::vector<data_entry> m_data;
std::vector<boost::thread> m_threads;
};
int main(int argc, char** argv) {
return 0;
}
The errror:
passing ‘const boost::thread’ as ‘this’ argument of ‘boost::thread::operator boost::detail::thread_move_t<boost::thread>()’ discards qualifiers [-fpermissive]
make: *** [.build_release/tools/convert_imageset_threaded.o] Error 1
Your code compiles. It should just be a lot more reduced.
The error message indicates that in your real code you have a const/volatile qualified ThreadedReader, perhaps.
And you shouldn't mix signed/unsigned integers in comparison.
Live On Coliru
// This program converts a set of images to a lmdb/leveldb by storing them
// as Datum proto buffers.
// Usage:
// convert_imageset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME
//
// where ROOTFOLDER is the root folder that holds all the images, and LISTFILE
// should be a list of files as well as their labels, in the format as
// subfolder1/file1.JPEG 7
// ....
#include <algorithm>
#include <fstream> // NOLINT(readability/streams)
#include <string>
#include <utility>
#include <vector>
#include "boost/scoped_ptr.hpp"
#include "boost/thread.hpp"
#include "boost/thread/barrier.hpp"
#include "boost/atomic.hpp"
// #include "gflags/gflags.h"
// #include "glog/logging.h"
// #include "caffe/proto/caffe.pb.h"
// #include "caffe/util/db.hpp"
// #include "caffe/util/io.hpp"
// #include "caffe/util/rng.hpp"
//using namespace caffe; // NOLINT(build/namespaces)
using std::pair;
using boost::scoped_ptr;
struct Datum {};
class ThreadedReader
{
public:
typedef struct {
int line_id;
bool status;
Datum datum;
} data_entry;
bool ReadImageToDatum(...) { return false; }
ThreadedReader(int threads, std::vector<std::pair<std::string, int> >& lines,
const std::string& encode_type, const std::string& root_folder,
int resize_height, int resize_width, bool is_color) :
m_thread_count(threads), m_lines(lines),
m_barrier_read(threads), m_barrier_fetch(2),
m_id(0), m_thd_done(0),
m_done(false),
m_root_folder(root_folder),
m_encode_type(encode_type),
m_resize_height(resize_height), m_resize_width(resize_width),
m_is_color(is_color)
{
m_data.resize(m_thread_count);
for (int i = 0; i < threads; ++i)
m_threads.push_back(boost::thread(boost::ref(*this)));
}
~ThreadedReader() {
// for (int i = 0; i < m_thread_count; ++i)
// m_threads[i].join();
}
void operator()()
{
for (;;) {
unsigned my_id = m_id.fetch_add(1, boost::memory_order_acq_rel);
const int my_base = my_id % m_thread_count;
m_barrier_read.count_down_and_wait();
m_data[my_base].status = false;
m_data[my_base].line_id = my_id;
if (my_id < m_lines.size()) {
m_data[my_base].line_id = my_id;
m_data[my_base].status = ReadImageToDatum(m_root_folder + m_lines[my_id].first,
m_lines[my_id].second, m_resize_height, m_resize_width, m_is_color,
m_encode_type, &m_data[my_base].datum);
}
if (m_thd_done.fetch_add(1, boost::memory_order_acq_rel) + 1 == m_thread_count) {
m_barrier_fetch.count_down_and_wait();
m_thd_done.store(0, boost::memory_order_release);
if (m_id.load(boost::memory_order_relaxed) >= m_lines.size())
m_done.store(true);
m_barrier_fetch.count_down_and_wait();
}
if (m_id.load(boost::memory_order_relaxed) >= m_lines.size())
break;
}
}
// bool fetch_batch(std::vector<data_entry>& vec)
// {
// using std::swap;
// if (m_done.load(boost::memory_order_acquire))
// return false;
// m_barrier_fetch.count_down_and_wait();
// swap(vec, m_data);
// if (m_data.size() < m_thread_count)
// m_data.resize(m_thread_count);
// m_barrier_fetch.count_down_and_wait();
// return true;
// }
private:
const int m_thread_count;
std::vector<std::pair<std::string, int> >& m_lines;
boost::barrier m_barrier_read;
boost::barrier m_barrier_fetch;
boost::atomic<unsigned> m_id;
boost::atomic<int> m_thd_done;
boost::atomic<bool> m_done;
const std::string& m_root_folder;
const std::string& m_encode_type;
const int m_resize_height, m_resize_width;
const bool m_is_color;
std::vector<data_entry> m_data;
std::vector<boost::thread> m_threads;
};
int main(int argc, char** argv) {
// ::google::InitGoogleLogging(argv[0]);
// #ifndef GFLAGS_GFLAGS_H_
// namespace gflags = google;
// #endif
// gflags::SetUsageMessage("Convert a set of images to the leveldb/lmdb\n"
// "format used as input for Caffe.\n"
// "Usage:\n"
// " convert_imageset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME\n"
// "The ImageNet dataset for the training demo is at\n"
// " http://www.image-net.org/download-images\n");
// gflags::ParseCommandLineFlags(&argc, &argv, true);
// if (argc < 4) {
// gflags::ShowUsageWithFlagsRestrict(argv[0], "tools/convert_imageset");
// return 1;
// }
// const bool is_color = !FLAGS_gray;
// const int threads = FLAGS_threads > 1 ? FLAGS_threads : 1;
// const bool check_size = FLAGS_check_size;
// const bool encoded = FLAGS_encoded;
// const string encode_type = FLAGS_encode_type;
// std::ifstream infile(argv[2]);
// std::vector<std::pair<std::string, int> > lines;
// std::string filename;
// int label;
// while (infile >> filename >> label) {
// lines.push_back(std::make_pair(filename, label));
// }
// if (FLAGS_shuffle) {
// // randomly shuffle data
// LOG(INFO) << "Shuffling data";
// shuffle(lines.begin(), lines.end());
// }
// LOG(INFO) << "A total of " << lines.size() << " images.";
// if (encode_type.size() && !encoded)
// LOG(INFO) << "encode_type specified, assuming encoded=true.";
// int resize_height = std::max<int>(0, FLAGS_resize_height);
// int resize_width = std::max<int>(0, FLAGS_resize_width);
// // Create new DB
// scoped_ptr<db::DB> db(db::GetDB(FLAGS_backend));
// db->Open(argv[3], db::NEW);
// scoped_ptr<db::Transaction> txn(db->NewTransaction());
// // Storing to db
// std::string root_folder(argv[1]);
// int count = 0;
// const int kMaxKeyLength = 256;
// char key_cstr[kMaxKeyLength];
// int data_size = 0;
// bool data_size_initialized = false;
// if (threads > 1) {
// ThreadedReader rdr(threads, lines, encode_type, root_folder,
// resize_height, resize_width, is_color);
// std::vector<ThreadedReader::data_entry> vec;
// while (rdr.fetch_batch(vec)) {
// for (int i = 0; i < vec.size(); ++i) {
// if (!vec[i].status)
// continue;
// if (check_size) {
// if (!data_size_initialized) {
// data_size = vec[i].datum.channels() * vec[i].datum.height() * vec[i].datum.width();
// data_size_initialized = true;
// } else {
// const std::string& data = vec[i].datum.data();
// CHECK_EQ(data.size(), data_size) << "Incorrect data field size "
// << data.size();
// }
// }
// // sequential
// int length = snprintf(key_cstr, kMaxKeyLength, "%08d_%s", vec[i].line_id,
// lines[vec[i].line_id].first.c_str());
// // Put in db
// string out;
// CHECK(vec[i].datum.SerializeToString(&out));
// txn->Put(string(key_cstr, length), out);
// if (++count % 1000 == 0) {
// // Commit db
// txn->Commit();
// txn.reset(db->NewTransaction());
// LOG(ERROR) << "Processed " << count << " files.";
// }
// }
// }
// } else {
// Datum datum;
// for (int line_id = 0; line_id < lines.size(); ++line_id) {
// bool status;
// std::string enc = encode_type;
// if (encoded && !enc.size()) {
// // Guess the encoding type from the file name
// string fn = lines[line_id].first;
// size_t p = fn.rfind('.');
// if (p == fn.npos)
// LOG(WARNING) << "Failed to guess the encoding of '" << fn << "'";
// enc = fn.substr(p);
// std::transform(enc.begin(), enc.end(), enc.begin(), ::tolower);
// }
// status = ReadImageToDatum(root_folder + lines[line_id].first,
// lines[line_id].second, resize_height, resize_width, is_color,
// enc, &datum);
// if (status == false) continue;
// if (check_size) {
// if (!data_size_initialized) {
// data_size = datum.channels() * datum.height() * datum.width();
// data_size_initialized = true;
// } else {
// const std::string& data = datum.data();
// CHECK_EQ(data.size(), data_size) << "Incorrect data field size "
// << data.size();
// }
// }
// // sequential
// int length = snprintf(key_cstr, kMaxKeyLength, "%08d_%s", line_id,
// lines[line_id].first.c_str());
// // Put in db
// string out;
// CHECK(datum.SerializeToString(&out));
// txn->Put(string(key_cstr, length), out);
// if (++count % 1000 == 0) {
// // Commit db
// txn->Commit();
// txn.reset(db->NewTransaction());
// LOG(ERROR) << "Processed " << count << " files.";
// }
// }
// }
// // write the last batch
// if (count % 1000 != 0) {
// txn->Commit();
// LOG(ERROR) << "Processed " << count << " files.";
// }
return 0;
}
Related
I'm trying to use an "example" code for c++ on qt. In this example, there's a function "get_top_n" from tflite::label_image, in tensorflow/lite/examples/label_image/get_top_n.h. But, qt creator doesn't find the function.
Error: main.cpp:104 (and 107): erreur : no matching function for call to 'get_top_n'
What am I doing wrong here ?
#include <fstream>
#include <string>
#include <vector>
#include <opencv2/opencv.hpp>
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/examples/label_image/get_top_n.h"
#include "tensorflow/lite/model.h"
std::vector<std::string> load_labels(std::string labels_file)
{
std::ifstream file(labels_file.c_str());
if (!file.is_open())
{
fprintf(stderr, "unable to open label file\n");
exit(-1);
}
std::string label_str;
std::vector<std::string> labels;
while (std::getline(file, label_str))
{
if (label_str.size() > 0)
labels.push_back(label_str);
}
file.close();
return labels;
}
int main(int argc, char *argv[])
{
// Get Model label and input image
if (argc != 4)
{
fprintf(stderr, "TfliteClassification.exe modelfile labels image\n");
exit(-1);
}
const char *modelFileName = argv[1];
const char *labelFile = argv[2];
const char *imageFile = argv[3];
// Load Model
auto model = tflite::FlatBufferModel::BuildFromFile(modelFileName);
if (model == nullptr)
{
fprintf(stderr, "failed to load model\n");
exit(-1);
}
// Initiate Interpreter
std::unique_ptr<tflite::Interpreter> interpreter;
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder(*model, resolver)(&interpreter);
if (interpreter == nullptr)
{
fprintf(stderr, "Failed to initiate the interpreter\n");
exit(-1);
}
if (interpreter->AllocateTensors() != kTfLiteOk)
{
fprintf(stderr, "Failed to allocate tensor\n");
exit(-1);
}
// Configure the interpreter
interpreter->SetAllowFp16PrecisionForFp32(true);
interpreter->SetNumThreads(1);
// Get Input Tensor Dimensions
int input = interpreter->inputs()[0];
auto height = interpreter->tensor(input)->dims->data[1];
auto width = interpreter->tensor(input)->dims->data[2];
auto channels = interpreter->tensor(input)->dims->data[3];
// Load Input Image
cv::Mat image;
auto frame = cv::imread(imageFile);
if (frame.empty())
{
fprintf(stderr, "Failed to load iamge\n");
exit(-1);
}
// Copy image to input tensor
cv::resize(frame, image, cv::Size(width, height), cv::INTER_NEAREST);
memcpy(interpreter->typed_input_tensor<unsigned char>(0), image.data, image.total() * image.elemSize());
// Inference
std::chrono::steady_clock::time_point start, end;
start = std::chrono::steady_clock::now();
interpreter->Invoke();
end = std::chrono::steady_clock::now();
auto inference_time = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// Get Output
int output = interpreter->outputs()[0];
TfLiteIntArray *output_dims = interpreter->tensor(output)->dims;
auto output_size = output_dims->data[output_dims->size - 1];
std::vector<std::pair<float, int>> top_results;
float threshold = 0.01f;
switch (interpreter->tensor(output)->type)
{
case kTfLiteInt32:
tflite::label_image::get_top_n<float>(interpreter->typed_output_tensor<float>(0), output_size, 1, threshold, &top_results, kTfLiteFloat32);
break;
case kTfLiteUInt8:
tflite::label_image::get_top_n<uint8_t>(interpreter->typed_output_tensor<uint8_t>(0), output_size, 1, threshold, &top_results, kTfLiteUInt8);
break;
default:
fprintf(stderr, "cannot handle output type\n");
exit(-1);
}
// Print inference ms in input image
cv::putText(frame, "Infernce Time in ms: " + std::to_string(inference_time), cv::Point(10, 30), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 255), 2);
// Load Labels
auto labels = load_labels(labelFile);
// Print labels with confidence in input image
for (const auto &result : top_results)
{
const float confidence = result.first;
const int index = result.second;
std::string output_txt = "Label :" + labels[index] + " Confidence : " + std::to_string(confidence);
cv::putText(frame, output_txt, cv::Point(10, 60), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 255), 2);
}
// Display image
cv::imshow("Output", frame);
cv::waitKey(0);
return 0;
}
The lines affected :
104: tflite::label_image::get_top_n<float>(interpreter->typed_output_tensor<float>(0), output_size, 1, threshold, &top_results, kTfLiteFloat32);
107: tflite::label_image::get_top_n<uint8_t>(interpreter->typed_output_tensor<uint8_t>(0), output_size, 1, threshold, &top_results, kTfLiteUInt8);
Content of tensorflow/lite/examples/label_image/get_top_n.h:
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. ...*/
#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_IMAGE_GET_TOP_N_H_
#define TENSORFLOW_LITE_EXAMPLES_LABEL_IMAGE_GET_TOP_N_H_
#include "tensorflow/lite/examples/label_image/get_top_n_impl.h"
namespace tflite {
namespace label_image {
template <class T>
void get_top_n(T* prediction, int prediction_size, size_t num_results,
float threshold, std::vector<std::pair<float, int>>* top_results,
TfLiteType input_type);
// explicit instantiation so that we can use them otherwhere
template void get_top_n<float>(float*, int, size_t, float,
std::vector<std::pair<float, int>>*, TfLiteType);
template void get_top_n<int8_t>(int8_t*, int, size_t, float,
std::vector<std::pair<float, int>>*,
TfLiteType);
template void get_top_n<uint8_t>(uint8_t*, int, size_t, float,
std::vector<std::pair<float, int>>*,
TfLiteType);
} // namespace label_image
} // namespace tflite
#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_IMAGE_GET_TOP_N_H_
Content of tensorflow/lite/examples/label_image/get_top_n_impl.h:
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. ...*/
#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_IMAGE_GET_TOP_N_IMPL_H_
#define TENSORFLOW_LITE_EXAMPLES_LABEL_IMAGE_GET_TOP_N_IMPL_H_
#include <algorithm>
#include <functional>
#include <queue>
#include "tensorflow/lite/c/common.h"
namespace tflite {
namespace label_image {
extern bool input_floating;
// Returns the top N confidence values over threshold in the provided vector,
// sorted by confidence in descending order.
template <class T>
void get_top_n(T* prediction, int prediction_size, size_t num_results,
float threshold, std::vector<std::pair<float, int>>* top_results,
TfLiteType input_type) {
// Will contain top N results in ascending order.
std::priority_queue<std::pair<float, int>, std::vector<std::pair<float, int>>,
std::greater<std::pair<float, int>>>
top_result_pq;
const long count = prediction_size; // NOLINT(runtime/int)
float value = 0.0;
for (int i = 0; i < count; ++i) {
switch (input_type) {
case kTfLiteFloat32:
value = prediction[i];
break;
case kTfLiteInt8:
value = (prediction[i] + 128) / 256.0;
break;
case kTfLiteUInt8:
value = prediction[i] / 255.0;
break;
default:
break;
}
// Only add it if it beats the threshold and has a chance at being in
// the top N.
if (value < threshold) {
continue;
}
top_result_pq.push(std::pair<float, int>(value, i));
// If at capacity, kick the smallest value out.
if (top_result_pq.size() > num_results) {
top_result_pq.pop();
}
}
// Copy to output vector and reverse into descending order.
while (!top_result_pq.empty()) {
top_results->push_back(top_result_pq.top());
top_result_pq.pop();
}
std::reverse(top_results->begin(), top_results->end());
}
} // namespace label_image
} // namespace tflite
#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_IMAGE_GET_TOP_N_IMPL_H_
Github link
I wrote the program to count all words in .log files in the different threads and output the result on the screen. First argument in command line is dir to find all .log files and then count words in this files. Second argument in command line is number of threads (by default = 4)
I used the ThreadPool for this program
ThreadPool.h
#ifndef THREAD_POOL_H
#define THREAD_POOL_H
#include <boost/thread/condition_variable.hpp>
#include <boost/thread.hpp>
#include <future> // I don't how to work with boost future
#include <queue>
#include <vector>
#include <functional>
class ThreadPool
{
public:
using Task = std::function<void()>; // Our task
explicit ThreadPool(int num_threads)
{
start(num_threads);
}
~ThreadPool()
{
stop();
}
template<class T>
auto enqueue(T task)->std::future<decltype(task())>
{
// packaged_task wraps any Callable target
auto wrapper = std::make_shared<std::packaged_task<decltype(task()) ()>>(std::move(task));
{
boost::unique_lock<boost::mutex> lock{ mutex_p };
tasks_p.emplace([=] {
(*wrapper)();
});
}
event_p.notify_one();
return wrapper->get_future();
}
//void enqueue(Task task)
//{
// {
// boost::unique_lock<boost::mutex> lock { mutex_p };
// tasks_p.emplace(std::move(task));
// event_p.notify_one();
// }
//}
private:
std::vector<boost::thread> threads_p; // num of threads
std::queue<Task> tasks_p; // Tasks to make
boost::condition_variable event_p;
boost::mutex mutex_p;
bool isStop = false;
void start(int num_threads)
{
for (int i = 0; i < num_threads; ++i)
{
// Add to the end our thread
threads_p.emplace_back([=] {
while (true)
{
// Task to do
Task task;
{
boost::unique_lock<boost::mutex> lock(mutex_p);
event_p.wait(lock, [=] { return isStop || !tasks_p.empty(); });
// If we make all tasks
if (isStop && tasks_p.empty())
break;
// Take new task from queue
task = std::move(tasks_p.front());
tasks_p.pop();
}
// Execute our task
task();
}
});
}
}
void stop() noexcept
{
{
boost::unique_lock<boost::mutex> lock(mutex_p);
isStop = true;
event_p.notify_all();
}
for (auto& thread : threads_p)
{
thread.join();
}
}
};
#endif
main.cpp
#include "ThreadPool.h"
#include <iostream>
#include <iomanip>
#include <Windows.h>
#include <vector>
#include <map>
#include <boost/filesystem.hpp>
#include <boost/thread.hpp>
namespace bfs = boost::filesystem;
int count_words(const std::string& filename)
{
int counter = 0;
std::ifstream file(filename);
std::string buffer;
while (file >> buffer)
{
++counter;
}
return counter;
}
int main(int argc, const char* argv[])
{
bfs::path path = argv[1];
// If this path is exist and if this is dir
if (bfs::exists(path) && bfs::is_directory(path))
{
// Number of threads. Default = 4
int n = (argc == 3 ? atoi(argv[2]) : 4);
ThreadPool pool(n);
// Container to store all filenames and number of words inside them
std::map<bfs::path, int> all_files_and_sums;
// Iterate all files in dir
for (auto& p : bfs::directory_iterator(path)) {
// Takes only .txt files
if (p.path().extension() == ".log") {
// Future for taking value from here
auto fut = pool.enqueue([&p, &all_files_and_sums]() {
// In this lambda function I count all words in file and return this value
int result = count_words(p.path().string());
std::cout << "TID " << GetCurrentThreadId() << "\n";
return result;
});
// "filename = words in this .txt file"
all_files_and_sums[p.path()] = fut.get();
}
}
int result = 0;
for (auto& k : all_files_and_sums)
{
std::cout << k.first << "- " << k.second << "\n";
result += k.second;
}
std::cout << "Result: " << result << "\n";
}
else
std::perror("Dir is not exist");
}
And this solution works correctly. But if in the directory many files this solution works so slow. I think it's because of the futures. How can I take values from different threads without futures.
(P.S)
Sorry for my english
Mxnet c++ inference with MXPredSetInput segmentation fault
1. background
I have tried https://github.com/apache/incubator-mxnet/tree/master/example/image-classification/predict-cpp successed.
But when I try to deploy mxnet in c++ with my own model, I met a segmentation fault error:
[17:33:07] src/nnvm/legacy_json_util.cc:209: Loading symbol saved by previous version v1.2.1. Attempting to upgrade...
Signal: SIGSEGV (Segmentation fault)
2. code with error:
MXPredSetInput(pred_hnd, "data", image_data.data(), static_cast<mx_uint>(image_size));
3. tips
First I thought it's because of input data shape not compatible with the model input layer.But I ask model designer, it's a resnet model with conv only, so, any kind input shape should be OK.
4. Download model:
Download them, and put them into model dir.
https://drive.google.com/drive/folders/16MEKNOz_iwquVxHMk9c7igmBNuT6w7wz?usp=sharing
4. code: find: https://github.com/jaysimon/mxnet_cpp_infere
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <vector>
#include <memory>
#include <thread>
#include <iomanip>
#include <opencv2/opencv.hpp>
// Path for c_predict_api
#include <mxnet/c_predict_api.h>
const mx_float DEFAULT_MEAN = 117.0;
static std::string trim(const std::string& input) {
auto not_space = [](int ch) {
return !std::isspace(ch);
};
auto output = input;
output.erase(output.begin(), std::find_if(output.begin(), output.end(), not_space));
output.erase(std::find_if(output.rbegin(), output.rend(), not_space).base(), output.end());
return output;
}
// Read file to buffer
class BufferFile {
public :
std::string file_path_;
std::size_t length_ = 0;
std::unique_ptr<char[]> buffer_;
explicit BufferFile(const std::string& file_path)
: file_path_(file_path) {
std::ifstream ifs(file_path.c_str(), std::ios::in | std::ios::binary);
if (!ifs) {
std::cerr << "Can't open the file. Please check " << file_path << ". \n";
return;
}
ifs.seekg(0, std::ios::end);
length_ = static_cast<std::size_t>(ifs.tellg());
ifs.seekg(0, std::ios::beg);
std::cout << file_path.c_str() << " ... " << length_ << " bytes\n";
// Buffer as null terminated to be converted to string
buffer_.reset(new char[length_ + 1]);
buffer_[length_] = 0;
ifs.read(buffer_.get(), length_);
ifs.close();
}
std::size_t GetLength() {
return length_;
}
char* GetBuffer() {
return buffer_.get();
}
};
void GetImageFile(const std::string& image_file,
mx_float* image_data, int channels,
cv::Size resize_size, const mx_float* mean_data = nullptr) {
// Read all kinds of file into a BGR color 3 channels image
cv::Mat im_ori = cv::imread(image_file, cv::IMREAD_COLOR);
if (im_ori.empty()) {
std::cerr << "Can't open the image. Please check " << image_file << ". \n";
assert(false);
}
cv::Mat im;
resize(im_ori, im, resize_size);
int size = im.rows * im.cols * channels;
mx_float* ptr_image_r = image_data;
mx_float* ptr_image_g = image_data + size / 3;
mx_float* ptr_image_b = image_data + size / 3 * 2;
float mean_b, mean_g, mean_r;
mean_b = mean_g = mean_r = DEFAULT_MEAN;
mean_b = 103.06;
mean_g = 115.9;
mean_r = 123.15;
for (int i = 0; i < im.rows; i++) {
auto data = im.ptr<uchar>(i);
for (int j = 0; j < im.cols; j++) {
if (channels > 1) {
*ptr_image_b++ = static_cast<mx_float>(*data++) - mean_b;
*ptr_image_g++ = static_cast<mx_float>(*data++) - mean_g;
}
*ptr_image_r++ = static_cast<mx_float>(*data++) - mean_r;
}
}
}
// LoadSynsets
// Code from : https://github.com/pertusa/mxnet_predict_cc/blob/master/mxnet_predict.cc
std::vector<std::string> LoadSynset(const std::string& synset_file) {
std::ifstream fi(synset_file.c_str());
if (!fi.is_open()) {
std::cerr << "Error opening synset file " << synset_file << std::endl;
assert(false);
}
std::vector<std::string> output;
std::string synset, lemma;
while (fi >> synset) {
getline(fi, lemma);
output.push_back(lemma);
}
fi.close();
return output;
}
void PrintOutputResult(const std::vector<float>& data, const std::vector<std::string>& synset) {
if (data.size() != synset.size()) {
std::cerr << "Result data and synset size do not match!" << std::endl;
}
float best_accuracy = 0.0;
std::size_t best_idx = 0;
for (std::size_t i = 0; i < data.size(); ++i) {
std::cout << "Accuracy[" << i << "] = " << std::setprecision(8) << data[i] << std::endl;
if (data[i] > best_accuracy) {
best_accuracy = data[i];
best_idx = i;
}
}
std::cout << "Best Result: " << trim(synset[best_idx]) << " (id=" << best_idx << ", " <<
"accuracy=" << std::setprecision(8) << best_accuracy << ")" << std::endl;
}
void predict(PredictorHandle pred_hnd, const std::vector<mx_float> &image_data,
NDListHandle nd_hnd, const std::string &synset_file, int i) {
auto image_size = image_data.size();
// Set Input
//>>>>>>>>>>>>>>>>>>>> Problem code <<<<<<<<<<<<<<<<<<<<<<<
MXPredSetInput(pred_hnd, "data", image_data.data(), static_cast<mx_uint>(image_size));
// <<<<<<<<<<<<<<<<<<<<<<< Problem code <<<<<<<<<<<<<<<<<<<<<<<
// Do Predict Forward
MXPredForward(pred_hnd);
mx_uint output_index = 0;
mx_uint* shape = nullptr;
mx_uint shape_len;
// Get Output Result
MXPredGetOutputShape(pred_hnd, output_index, &shape, &shape_len);
std::size_t size = 1;
for (mx_uint i = 0; i < shape_len; ++i) { size *= shape[i]; }
std::vector<float> data(size);
MXPredGetOutput(pred_hnd, output_index, &(data[0]), static_cast<mx_uint>(size));
// Release NDList
if (nd_hnd) {
MXNDListFree(nd_hnd);
}
// Release Predictor
MXPredFree(pred_hnd);
// Synset path for your model, you have to modify it
auto synset = LoadSynset(synset_file);
// Print Output Data
PrintOutputResult(data, synset);
}
int main(int argc, char* argv[]) {
if (argc < 2) {
std::cout << "No test image here." << std::endl
<< "Usage: ./image-classification-predict apple.jpg [num_threads]" << std::endl;
return EXIT_FAILURE;
}
std::string test_file(argv[1]);
int num_threads = 1;
if (argc == 3)
num_threads = std::atoi(argv[2]);
// Models path for your model, you have to modify it
std::string json_file = "../model/rfcn_dcn_chicken-0000.json";
std::string param_file = "../model/rfcn_dcn_chicken-0000.params";
std::string synset_file = "../model/synset.txt";
std::string nd_file = "../model/mean_224.nd";
BufferFile json_data(json_file);
BufferFile param_data(param_file);
// Parameters
int dev_type = 1; // 1: cpu, 2: gpu
int dev_id = 0; // arbitrary.
mx_uint num_input_nodes = 1; // 1 for feedforward
const char* input_key[1] = { "data" };
const char** input_keys = input_key;
// Image size and channels
int width = 1000;
int height = 562;
int channels = 3;
const mx_uint input_shape_indptr[2] = { 0, 4 };
const mx_uint input_shape_data[4] = { 1,
static_cast<mx_uint>(channels),
static_cast<mx_uint>(height),
static_cast<mx_uint>(width) };
if (json_data.GetLength() == 0 || param_data.GetLength() == 0) {
return EXIT_FAILURE;
}
auto image_size = static_cast<std::size_t>(width * height * channels);
// Read Mean Data
const mx_float* nd_data = nullptr;
NDListHandle nd_hnd = nullptr;
BufferFile nd_buf(nd_file);
if (nd_buf.GetLength() > 0) {
mx_uint nd_index = 0;
mx_uint nd_len;
const mx_uint* nd_shape = nullptr;
const char* nd_key = nullptr;
mx_uint nd_ndim = 0;
MXNDListCreate(static_cast<const char*>(nd_buf.GetBuffer()),
static_cast<int>(nd_buf.GetLength()),
&nd_hnd, &nd_len);
MXNDListGet(nd_hnd, nd_index, &nd_key, &nd_data, &nd_shape, &nd_ndim);
}
// Read Image Data
std::vector<mx_float> image_data(image_size);
GetImageFile(test_file, image_data.data(), channels, cv::Size(width, height), nd_data);
if (num_threads == 1) {
// Create Predictor
PredictorHandle pred_hnd;
MXPredCreate(static_cast<const char*>(json_data.GetBuffer()),
static_cast<const char*>(param_data.GetBuffer()),
static_cast<int>(param_data.GetLength()),
dev_type,
dev_id,
num_input_nodes,
input_keys,
input_shape_indptr,
input_shape_data,
&pred_hnd);
assert(pred_hnd);
predict(pred_hnd, image_data, nd_hnd, synset_file, 0);
} else {
// Create Predictor
std::vector<PredictorHandle> pred_hnds(num_threads, nullptr);
MXPredCreateMultiThread(static_cast<const char*>(json_data.GetBuffer()),
static_cast<const char*>(param_data.GetBuffer()),
static_cast<int>(param_data.GetLength()),
dev_type,
dev_id,
num_input_nodes,
input_keys,
input_shape_indptr,
input_shape_data,
pred_hnds.size(),
pred_hnds.data());
for (auto hnd : pred_hnds)
assert(hnd);
std::vector<std::thread> threads;
for (int i = 0; i < num_threads; i++)
threads.emplace_back(predict, pred_hnds[i], image_data, nd_hnd, synset_file, i);
for (int i = 0; i < num_threads; i++)
threads[i].join();
}
printf("run successfully\n");
return EXIT_SUCCESS;
}
How do you copy one stream to another using dedicated read/write threads in C++?
Let's say I have these methods (not real, but to illustrate the point) to read/write data from. These read/write functions could represent anything (network/file/USB/serial/etc).
// returns the number of bytes read
void read(char* buffer, int bufferSize, int* bytesRead);
// returns the number of bytes written
void write(char* buffer, int bufferSize, int* bytesWritten);
The solution should also be portable.
NOTE: I am aware that Windows has a FILE_FLAG_OVERLAPPED feature, but this assumes that the read/write is file IO. Remember, these read/write methods could represent anything.
Here is the solution I came up with.
Header
#pragma once
#include <stdlib.h>
#include <queue>
#include <mutex>
#include <thread>
#include <chrono>
#include <list>
#include <thread>
#define ASYNC_COPY_READ_WRITE_SUCCESS 0
struct BufferBlock;
struct ReadStream
{
// read a stream to a buffer.
// return non-zero if error occured
virtual int read(char* buffer, int bufferSize, int* bytesRead) = 0;
};
struct WriteStream
{
// write a buffer to a stream.
// return non-zero if error occured
virtual int write(char* buffer, int bufferSize, int* bytesWritten) = 0;
};
class BufferBlockManager
{
public:
BufferBlockManager(int numberOfBlocks, int bufferSize);
~BufferBlockManager();
void enqueueBlockForRead(BufferBlock* block);
void dequeueBlockForRead(BufferBlock** block);
void enqueueBlockForWrite(BufferBlock* block);
void dequeueBlockForWrite(BufferBlock** block);
void resetState();
private:
std::list<BufferBlock*> blocks;
std::queue<BufferBlock*> blocksPendingRead;
std::queue<BufferBlock*> blocksPendingWrite;
std::mutex queueLock;
std::chrono::milliseconds dequeueSleepTime;
};
void AsyncCopyStream(BufferBlockManager* bufferBlockManager, ReadStream* readStream, WriteStream* writeStream, int* readResult, int* writeResult);
CPP
#include "AsyncReadWrite.h"
struct BufferBlock
{
BufferBlock(int bufferSize) : buffer(NULL)
{
this->bufferSize = bufferSize;
this->buffer = new char[bufferSize];
this->actualSize = 0;
this->isLastBlock = false;
}
~BufferBlock()
{
this->bufferSize = 0;
free(this->buffer);
this->buffer = NULL;
this->actualSize = 0;
}
char* buffer;
int bufferSize;
int actualSize;
bool isLastBlock;
};
BufferBlockManager::BufferBlockManager(int numberOfBlocks, int bufferSize)
{
dequeueSleepTime = std::chrono::milliseconds(100);
for (int x = 0; x < numberOfBlocks; x++)
{
BufferBlock* block = new BufferBlock(bufferSize);
blocks.push_front(block);
blocksPendingRead.push(block);
}
}
BufferBlockManager::~BufferBlockManager()
{
for (std::list<BufferBlock*>::const_iterator iterator = blocks.begin(), end = blocks.end(); iterator != end; ++iterator) {
delete (*iterator);
}
}
void BufferBlockManager::enqueueBlockForRead(BufferBlock* block)
{
queueLock.lock();
block->actualSize = 0;
block->isLastBlock = false;
blocksPendingRead.push(block);
queueLock.unlock();
}
void BufferBlockManager::dequeueBlockForRead(BufferBlock** block)
{
WAITFOR:
while (blocksPendingRead.size() == 0)
std::this_thread::sleep_for(dequeueSleepTime);
queueLock.lock();
if (blocksPendingRead.size() == 0)
{
queueLock.unlock();
goto WAITFOR;
}
*block = blocksPendingRead.front();
blocksPendingRead.pop();
queueLock.unlock();
}
void BufferBlockManager::enqueueBlockForWrite(BufferBlock* block)
{
queueLock.lock();
blocksPendingWrite.push(block);
queueLock.unlock();
}
void BufferBlockManager::dequeueBlockForWrite(BufferBlock** block)
{
WAITFOR:
while (blocksPendingWrite.size() == 0)
std::this_thread::sleep_for(dequeueSleepTime);
queueLock.lock();
if (blocksPendingWrite.size() == 0)
{
queueLock.unlock();
goto WAITFOR;
}
*block = blocksPendingWrite.front();
blocksPendingWrite.pop();
queueLock.unlock();
}
void BufferBlockManager::resetState()
{
queueLock.lock();
blocksPendingRead = std::queue<BufferBlock*>();
blocksPendingWrite = std::queue<BufferBlock*>();
for (std::list<BufferBlock*>::const_iterator iterator = blocks.begin(), end = blocks.end(); iterator != end; ++iterator) {
(*iterator)->actualSize = 0;
}
queueLock.unlock();
}
struct AsyncCopyContext
{
AsyncCopyContext(BufferBlockManager* bufferBlockManager, ReadStream* readStream, WriteStream* writeStream)
{
this->bufferBlockManager = bufferBlockManager;
this->readStream = readStream;
this->writeStream = writeStream;
this->readResult = ASYNC_COPY_READ_WRITE_SUCCESS;
this->writeResult = ASYNC_COPY_READ_WRITE_SUCCESS;
}
BufferBlockManager* bufferBlockManager;
ReadStream* readStream;
WriteStream* writeStream;
int readResult;
int writeResult;
};
void ReadStreamThread(AsyncCopyContext* asyncContext)
{
int bytesRead = 0;
BufferBlock* readBuffer = NULL;
int readResult = ASYNC_COPY_READ_WRITE_SUCCESS;
while (
// as long there hasn't been any write errors
asyncContext->writeResult == ASYNC_COPY_READ_WRITE_SUCCESS
// and we haven't had an error reading yet
&& readResult == ASYNC_COPY_READ_WRITE_SUCCESS)
{
// let's deque a block to read to!
asyncContext->bufferBlockManager->dequeueBlockForRead(&readBuffer);
readResult = asyncContext->readStream->read(readBuffer->buffer, readBuffer->bufferSize, &bytesRead);
readBuffer->actualSize = bytesRead;
readBuffer->isLastBlock = bytesRead == 0;
if (readResult == ASYNC_COPY_READ_WRITE_SUCCESS)
{
// this was a valid read, go ahead and queue it for writing
asyncContext->bufferBlockManager->enqueueBlockForWrite(readBuffer);
}
else
{
// an error occured reading
asyncContext->readResult = readResult;
// since an error occured, lets queue an block to write indicatiting we are done and there are no more bytes to read
readBuffer->isLastBlock = true;
readBuffer->actualSize = 0;
asyncContext->bufferBlockManager->enqueueBlockForWrite(readBuffer);
}
if (readBuffer->isLastBlock) return;
}
}
void WriteStreamThread(AsyncCopyContext* asyncContext)
{
int bytesWritten = 0;
BufferBlock* writeBuffer = NULL;
int writeResult = ASYNC_COPY_READ_WRITE_SUCCESS;
bool isLastWriteBlock = false;
while (
// as long as there are no errors during reading
asyncContext->readResult == ASYNC_COPY_READ_WRITE_SUCCESS
// and we haven't had an error writing yet
&& writeResult == ASYNC_COPY_READ_WRITE_SUCCESS)
{
// lets dequeue a block for writing!
asyncContext->bufferBlockManager->dequeueBlockForWrite(&writeBuffer);
isLastWriteBlock = writeBuffer->isLastBlock;
if (writeBuffer->actualSize > 0)
writeResult = asyncContext->writeStream->write(writeBuffer->buffer, writeBuffer->actualSize, &bytesWritten);
if (writeResult == ASYNC_COPY_READ_WRITE_SUCCESS)
{
asyncContext->bufferBlockManager->enqueueBlockForRead(writeBuffer);
if (isLastWriteBlock) return;
}
else
{
asyncContext->writeResult = writeResult;
asyncContext->bufferBlockManager->enqueueBlockForRead(writeBuffer);
return;
}
}
}
void AsyncCopyStream(BufferBlockManager* bufferBlockManager, ReadStream* readStream, WriteStream* writeStream, int* readResult, int* writeResult)
{
AsyncCopyContext asyncContext(bufferBlockManager, readStream, writeStream);
std::thread readThread(ReadStreamThread, &asyncContext);
std::thread writeThread(WriteStreamThread, &asyncContext);
readThread.join();
writeThread.join();
*readResult = asyncContext.readResult;
*writeResult = asyncContext.writeResult;
}
Usage
#include <stdio.h>
#include <tchar.h>
#include "AsyncReadWrite.h"
struct ReadTestStream : ReadStream
{
int readCount = 0;
int read(char* buffer, int bufferSize, int* bytesRead)
{
printf("Starting read...\n");
memset(buffer, bufferSize, 0);
if (readCount == 10)
{
*bytesRead = 0;
return 0;
}
// pretend this function takes a while!
std::this_thread::sleep_for(std::chrono::milliseconds(100));
char buff[100];
sprintf_s(buff, "This is read number %d\n", readCount);
strcpy_s(buffer, sizeof(buff), buff);
*bytesRead = strlen(buffer);
readCount++;
printf("Finished read...\n");
return 0;
}
};
struct WriteTestStream : WriteStream
{
int write(char* buffer, int bufferSize, int* bytesWritten)
{
printf("Starting write...\n");
// pretend this function takes a while!
std::this_thread::sleep_for(std::chrono::milliseconds(500));
printf(buffer);
printf("Finished write...\n");
return 0;
}
};
int _tmain(int argc, _TCHAR* argv[])
{
BufferBlockManager bufferBlockManager(5, 4096);
ReadTestStream readStream;
WriteTestStream writeStream;
int readResult = 0;
int writeResult = 0;
printf("Starting copy...\n");
AsyncCopyStream(&bufferBlockManager, &readStream, &writeStream, &readResult, &writeResult);
printf("Finished copy... readResult=%d writeResult=%d \n", readResult, writeResult);
getchar();
return 0;
}
EDIT: I put my solution into a GitHub repository here. If you wish to use this code, refer to the repository since it may be more updated than this answer.
Typically, you would just have one thread for each direction that alternates between reads and writes.
I'm writing a server and I wanted to use XML with my Java client. I'm using CygWin with XercesC 3.1.1 for my development test and this works fine (I looped 30000 with this function and had no crash). However, on my target machine it's running HP-UX with XercesC 2.7. To implement the differences in the XercesC implementation I wrote a separate class to handle each version.
When I try to run the code with XercesC 2.7. I always get a NULL pointer when I try to create the DOMWriter, and a SIGABORT when trying again.
Since I couldn't find anyything on google I hope that someone can shed some light on what I'm doing wrong here. I've been looking at the sample code provided with the XercesC souorce, and I also have some production code from fellow programmers, and I can't see any difference for the live of it.
I tried to create an SSCE which is a bit long, but it is the shortest sample that I could create.
xml_serialize.h
#ifndef XML_SERIALIZE_H_
#define XML_SERIALIZE_H_
#include <string>
#include <xercesc/util/PlatformUtils.hpp>
#include <xercesc/util/XMLString.hpp>
#include <xercesc/dom/DOM.hpp>
#if defined(XERCES_NEW_IOSTREAMS)
#include <iostream>
#else
#include <iostream.h>
#endif
#include <xercesc/util/OutOfMemoryException.hpp>
class XStr
{
public :
// -----------------------------------------------------------------------
// Constructors and Destructor
// -----------------------------------------------------------------------
XStr(const char* const toTranscode)
{
// Call the private transcoding method
fUnicodeForm = xercesc::XMLString::transcode(toTranscode);
}
~XStr()
{
xercesc::XMLString::release(&fUnicodeForm);
}
// -----------------------------------------------------------------------
// Getter methods
// -----------------------------------------------------------------------
const XMLCh* unicodeForm() const
{
return fUnicodeForm;
}
private :
// -----------------------------------------------------------------------
// Private data members
//
// fUnicodeForm
// This is the Unicode XMLCh format of the string.
// -----------------------------------------------------------------------
XMLCh* fUnicodeForm;
};
#define X(str) XStr(str).unicodeForm()
std::string fromXMLString(XMLCh *oXMLString);
class XMLSerialize
{
private:
xercesc::DOMImplementation *mImpl;
protected:
xercesc::DOMImplementation *getDOMImplementation(void);
public:
XMLSerialize(void);
virtual ~XMLSerialize(void);
public:
/**
* Creates an empty DOM
*/
xercesc::DOMDocument *createDocument(const std::string &oDocumentName);
/**
* Parses an XML from a string.
*/
xercesc::DOMDocument *parseDocument(const std::string &oDocumentName, std::string const &oReferenceId);
/**
* Serializes the document into a string
*/
int serialize(xercesc::DOMDocument *oDocument, std::string &oXMLOut, bool bDocumentRelease = true);
};
#endif /* XML_SERIALIZE_H_ */
xml_serialize.cpp
#include <xercesc/util/XMLString.hpp>
#include <xercesc/dom/DOM.hpp>
#include <xercesc/util/TransService.hpp>
#include <xercesc/framework/MemBufFormatTarget.hpp>
#include <xercesc/util/OutOfMemoryException.hpp>
#include <sstream>
#include <vector>
#include <iostream>
#include "xml_serialize.h"
int serializeEnvironment(void);
XMLSerialize *serializer = NULL;
XMLSerialize::XMLSerialize()
{
mImpl = xercesc::DOMImplementationRegistry::getDOMImplementation(X("Core"));
}
XMLSerialize::~XMLSerialize()
{
}
xercesc::DOMDocument *XMLSerialize::createDocument(const std::string &oDocumentName)
{
if(mImpl == NULL)
return NULL;
xercesc::DOMDocument *doc = mImpl->createDocument(
0, // root element namespace URI.
X(oDocumentName.c_str()), // root element name
0); // document type object (DTD).
if(doc == NULL)
return NULL;
return doc;
}
int XMLSerialize::serialize(xercesc::DOMDocument *oDocument, std::string &oXMLOut, bool bDocumentRelease)
{
int result = 0;
XMLCh *xmlUnicode = NULL;
char *strXML = NULL;
xercesc::DOMWriter *serializer = NULL;
if(mImpl == NULL)
{
oXMLOut = "ERROR: XercesC DOMImplementationRegistry not initialized";
result = 1;
goto Quit;
}
serializer = ((xercesc::DOMImplementationLS*)mImpl)->createDOMWriter();
if(serializer == NULL)
{
oXMLOut = "ERROR: XercesC unable to instantiate a DOMWriter!";
result = 2;
goto Quit;
}
xmlUnicode = serializer->writeToString(*oDocument);
strXML = xercesc::XMLString::transcode(xmlUnicode);
oXMLOut = strXML;
if(bDocumentRelease == true)
oDocument->release();
result = 0;
Quit:
if(strXML != NULL)
xercesc::XMLString::release(&strXML);
if(xmlUnicode != NULL)
xercesc::XMLString::release(&xmlUnicode);
if(serializer != NULL)
serializer->release();
return result;
}
int serializeEnvironment(void)
{
int errorCode = 0;
xercesc::DOMElement *rootElem = NULL;
xercesc::DOMElement *item = NULL;
xercesc::DOMElement *element = NULL;
xercesc::DOMText *nameNode = NULL;
xercesc::DOMCDATASection *dataNode = NULL;
std::string xml;
try
{
xercesc::DOMDocument *doc = serializer->createDocument("EnvironmentList");
if(doc == NULL)
return 1;
rootElem = doc->getDocumentElement();
std::vector<std::pair<std::string, std::string> > env;
for(int i = 0; i < 5; i++)
{
std::string key;
std::string value;
std::stringstream ss;
ss << "KEY";
ss << i;
ss >> key;
ss.clear();
ss << "VALUE";
ss << i;
ss >> value;
ss.clear();
env.push_back(std::make_pair(key, value));
}
for(std::vector<std::pair<std::string, std::string> >::const_iterator it = env.begin(); it != env.end(); ++it)
{
std::pair<std::string, std::string>entry = *it;
std::string name = entry.first;
std::string value = entry.second;
if(value.empty())
value = "";
item = doc->createElement(X("item"));
rootElem->appendChild(item);
element = doc->createElement(X("item"));
nameNode = doc->createTextNode(X(name.c_str()));
item->appendChild(element);
element->appendChild(nameNode);
element = doc->createElement(X("item"));
dataNode = doc->createCDATASection(X(value.c_str()));
item->appendChild(element);
element->appendChild(dataNode);
}
errorCode = serializer->serialize(doc, xml);
std::cout << xml << std::endl;
doc->release();
errorCode = 0;
}
catch (const xercesc::OutOfMemoryException&)
{
XERCES_STD_QUALIFIER cerr << "OutOfMemoryException" << XERCES_STD_QUALIFIER endl;
errorCode = 2;
}
catch (const xercesc::DOMException& e)
{
XERCES_STD_QUALIFIER cerr << "DOMException code is: " << e.code << XERCES_STD_QUALIFIER endl;
errorCode = 3;
}
catch (...)
{
XERCES_STD_QUALIFIER cerr << "An error occurred creating the document" << XERCES_STD_QUALIFIER endl;
errorCode = 4;
}
return errorCode;
}
int main()
{
xercesc::XMLPlatformUtils::Initialize();
serializer = new XMLSerialize();
int error = 0;
for(int i = 0; i < 2; i++)
{
std::cout << "Serializing:" << i << " ... " << std::endl;
if((error = serializeEnvironment()) != 0)
std::cout << "ERROR" << error << std::endl;
std::cout << "Done" << std::endl;
}
xercesc::XMLPlatformUtils::Terminate();
return 0;
}
output
Serializing:0 ...
ERROR: XercesC unable to instantiate a DOMWriter!
Done
Serializing:1 ...
aCC runtime: pure virtual function called for class "xercesc_2_7::DOMImplementationLS".
Abort(coredump)
update
I finally managed to compile 2.7 for cygwin and tested the above code there. This works fine, so there must be some problem with the HP-UX environment.
I was compiling the code with gcc and aparently the xerces library was compiled with aCC. So nopw I switched to aCC in my makefile and now it works.
One should expect that the produced libraries are compatible, but apparently this is not the case. So the above code is actually correct.