Keras custom layer with CNTK backend (CRF as RNN) - c++

I am attempting to duplicate the CRF as RNN which has been implemented in Keras but uses TensorFlow as a backend (https://github.com/sadeepj/crfasrnn_keras). The Keras front-end is fine, but some of the backend code is written as a TensorFlow custom op. I am trying to duplicate this in CNTK, but I have a few questions.
import cntk as C
from cntk import ops
import copy
import numpy as np
C.device.try_set_default_device(C.device.cpu())
ops.register_native_user_function('HighDimFilterOp', 'Cntk.HighDimFilter-' + C.__version__.rstrip('+'), 'CreateHighDimFilter')
def high_dim_filter(image=None, rgb=None, **kwargs):
inputs = [list(image), list(rgb)];
layer_config = copy.deepcopy(kwargs)
ops.native_user_function('HighDimFilterOp', inputs, layer_config, 'high_dim_filter')
This code is the Python call to my user C++ function. The C++ interface is as follows:
#include "HighDimFilter.h"
using namespace CNTK;
extern "C"
#ifdef _WIN32
__declspec(dllexport)
#endif
Function* CreateHighDimFilter(const Variable* operands, size_t /*numOperands*/, const Dictionary* attributes, const wchar_t* name)
{
printf("Creating HighDimFilter\n");
return new HighDimFilter({operands[0], operands[1]}, *attributes, name);
}
and the custom function itself is defined as:
#pragma once
#include "CNTKLibrary.h"
#include "modified_permutohedral.h"
using namespace CNTK;
class HighDimFilter final : public Function
{
bool _bilateral;
float _theta_alpha;
float _theta_beta;
float _theta_gamma;
enum Input : uint32_t
{
SCORES,
IM_INFO
};
public:
HighDimFilter(const std::vector<Variable>& inputs, const Dictionary& attributes, const std::wstring& name = L"HighDimFilter")
: Function(inputs, attributes, name)
{
if (attributes.Contains(L"bilateral"))
_bilateral = attributes[L"bilateral"].Value<bool>();
if (_bilateral == false)
{
if (attributes.Contains(L"theta_gamma"))
_theta_gamma = static_cast<float>(attributes[L"theta_gamma"].Value<double>());
}
else
{
if (attributes.Contains(L"theta_alpha"))
_theta_alpha = static_cast<float>(attributes[L"theta_alpha"].Value<double>());
if (attributes.Contains(L"theta_beta"))
_theta_beta = static_cast<float>(attributes[L"theta_beta"].Value<double>());
}
}
private:
void _compute_spatial_kernel(NDArrayViewPtr& Tensor, const float theta_gamma)
{
auto output_kernel = Tensor->WritableDataBuffer<float>();
auto outputShape = Tensor->Shape();
//auto channels = outputShape[0];
auto height = outputShape[1];
auto width = outputShape[2];
const auto num_pixels = width * height;
for (int p = 0; p < num_pixels; ++p)
{
output_kernel[2 * p] = static_cast<float>(p % width) / theta_gamma;
output_kernel[2 * p + 1] = static_cast<float>(p / width) / theta_gamma;
}
}
void _compute_bilateral_kernel(NDArrayViewPtr& Tensor, const NDArrayViewPtr& Image,
const float theta_alpha, const float theta_beta)
{
auto output_kernel = Tensor->WritableDataBuffer<float>();
auto rgb = Image->DataBuffer<float>();
auto outputShape = Tensor->Shape();
//auto channels = outputShape[0];
auto height = outputShape[1];
auto width = outputShape[2];
const auto num_pixels = height * width;
for (int p = 0; p < num_pixels; ++p)
{
// Spatial terms
output_kernel[5 * p] = static_cast<float>(p % width) / theta_alpha;
output_kernel[5 * p + 1] = static_cast<float>(p / width) / theta_alpha;
// Color terms
output_kernel[5 * p + 2] = static_cast<float>(rgb[p] / theta_beta);
output_kernel[5 * p + 3] = static_cast<float>(rgb[num_pixels + p] / theta_beta);
output_kernel[5 * p + 4] = static_cast<float>(rgb[2 * num_pixels + p] / theta_beta);
}
}
BackPropStatePtr Forward(const std::vector<ValuePtr>& inputValues,
std::unordered_map<Variable, ValuePtr>& outputs,
const DeviceDescriptor& computeDevice,
const std::unordered_set<Variable>& /*outputsToRetainBackwardStateFor */) override
{
#if 0
auto scoresShape = inputValues[Input::SCORES]->Shape();
auto channels = scoresShape[0];
auto height = scoresShape[1];
auto width = scoresShape[2];
const auto num_pixels = width * height;
auto &outputValue = outputs[this->Output()];
if (outputValue == nullptr)
{
outputValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(DataType::Float, scoresShape, computeDevice));
}
if (computeDevice.Type() != DeviceKind::CPU)
throw std::runtime_error("HighDimFilter: only CPU evaluation is supported at the moment.");
ModifiedPermutohedral mp;
if (_bilateral)
{
auto &kernel_vals = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(DataType::Float, NDShape({5, height, width}), computeDevice));
//float* kernel_vals = new float[5 * num_pixels];
_compute_bilateral_kernel(kernel_vals->Data(), inputValues[Input::IM_INFO]->Data(),
_theta_alpha, _theta_beta);
mp.init(kernel_vals->Data(), 5, num_pixels);
mp.compute(outputValue->Data(), inputValues[Input::SCORES]->Data(), false);
}
else
{
auto &kernel_vals = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(DataType::Float, NDShape({2, height, width}), computeDevice));
_compute_spatial_kernel(kernel_vals->Data(), _theta_gamma);
mp.init(kernel_vals->Data(), 2, num_pixels);
mp.compute(outputValue->Data(), inputValues[Input::SCORES]->Data(), channels, false);
}
return MakeSharedObject<BackPropState>(this->shared_from_this(), computeDevice, std::unordered_map<Variable, ValuePtr>({ {Inputs()[Input::IM_INFO], inputValues[Input::IM_INFO]} }));
#else
return nullptr;
#endif
}
void Backward(const BackPropStatePtr& state,
const std::unordered_map<Variable, ValuePtr>& rootGradientValues,
std::unordered_map<Variable, ValuePtr>& backPropagatedGradientValuesForInputs) override
{
#if 0
auto gradOutputVariable = Inputs()[Input::SCORES];
auto inputVariable = Inputs()[Input::IM_INFO];
auto &gradValue = backPropagatedGradientValuesForInputs[gradOutputVariable];
if (gradValue == nullptr)
gradValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(DataType::Float, gradOutputVariable.Shape(), state->Device()));
auto imageData = state->SavedForwardPropValues().at(inputVariable)->Data();
auto imageShape = imageData->Shape();
auto channels = imageShape[0];
auto height = imageShape[1];
auto width = imageShape[2];
const auto num_pixels = width * height;
if (state->Device().Type() != DeviceKind::CPU)
throw std::runtime_error("HighDimFilter: only CPU evaluation is supported at the moment.");
auto rootGradientData = rootGradientValues.at(this->Output())->Data();
ModifiedPermutohedral mp;
if (_bilateral)
{
auto &kernel_vals = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(DataType::Float, NDShape({5, height, width}), state->Device()));
//float* kernel_vals = new float[5 * num_pixels];
_compute_bilateral_kernel(kernel_vals->Data(), imageData,
_theta_alpha, _theta_beta);
mp.init(kernel_vals->Data(), 5, num_pixels);
mp.compute(gradValue->Data(), rootGradientData, true);
}
else
{
auto &kernel_vals = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(DataType::Float, NDShape({2, height, width}), state->Device()));
_compute_spatial_kernel(kernel_vals->Data(), _theta_gamma);
mp.init(kernel_vals->Data(), 2, num_pixels);
mp.compute(gradValue->Data(), rootGradientData, channels, true);
}
#endif
return;
}
const std::wstring& OpName() const override
{
static const std::wstring opName = L"HighDimFilterOp";
return opName;
}
size_t CurrentVersion() const override
{
NOT_IMPLEMENTED;
}
void InferOutputs(std::vector<Variable>& /*outputs */) override
{
NOT_IMPLEMENTED;
}
FunctionPtr Clone(const std::vector<Variable>& /*clonedInputs */) override
{
return nullptr;
}
};
My python call looks like:
bilateral_high_dim_filter = custom_module.high_dim_filter(image=all_ones_flat,
rgb=rgb_flat,
bilateral=True,
theta_alpha=self.theta_alpha,
theta_beta=self.theta_beta)
high_dim_filter = custom_module.high_dim_filter(image=all_ones_flat,
rgb=rgb_flat,
bilateral=False,
theta_gamma=self.theta_gamma)
The questions are as follows: 1) What are the "operands" passed in to the native_user_function on initialization? Are these only passed on initialization (are they intended to be weight and bias initialization)? How are the input operands used in the "Function" construction initializer? If I set these to "None" in Python, the code crashes.
2) How do you forward propagate the filter? Just call "forward()"? What about the required arguments to forward propagate?
3) Is there a numerical gradient calculation in CNTK similar to TensorFlow to check the gradient?

Related

I am learning to make QRCode in esp32-2432s028

I am learning to make QRCode in esp32-2432s028 (this pictura) from https://www.youtube.com/watch?v=Ss3zBO-V9kI I have problem.
From code
/*
QR Code Maker (ESP32+LVGL8)
For More Information: https://youtu.be/Ss3zBO-V9kI
Created by Eric N. (ThatProject)
*/
/////////////////////////////////////////////////////////////////
#include <lvgl.h>
#include "MyDisplay.h"
static const uint32_t screenWidth = 320;
static const uint32_t screenHeight = 480;
static lv_disp_draw_buf_t draw_buf;
static lv_color_t buf[ screenWidth * 10 ];
lv_obj_t * mainScreen;
lv_obj_t * titleImage;
lv_obj_t * qrCode;
LV_IMG_DECLARE(ui_logo_img_obj);
void my_disp_flush( lv_disp_drv_t *disp, const lv_area_t *area, lv_color_t *color_p )
{
uint32_t w = ( area->x2 - area->x1 + 1 );
uint32_t h = ( area->y2 - area->y1 + 1 );
tft.startWrite();
tft.setAddrWindow( area->x1, area->y1, w, h );
tft.writePixels((lgfx::rgb565_t *)&color_p->full, w * h);
tft.endWrite();
lv_disp_flush_ready( disp );
}
void my_touchpad_read( lv_indev_drv_t * indev_driver, lv_indev_data_t * data )
{
if (ts.touched()) {
data->state = LV_INDEV_STATE_PR;
TS_Point p = ts.getPoint();
data->point.x = p.x;
data->point.y = p.y;
} else {
data->state = LV_INDEV_STATE_REL;
}
}
void setup()
{
Serial.begin(115200);
tft.begin();
tft.setRotation(0);
tft.setBrightness(255);
if (!ts.begin(40, SDA_FT6236, SCL_FT6236)) {
Serial.println("Unable to start the capacitive touch Screen.");
}
lv_init();
lv_disp_draw_buf_init( &draw_buf, buf, NULL, screenWidth * 10 );
static lv_disp_drv_t disp_drv;
lv_disp_drv_init(&disp_drv);
disp_drv.hor_res = screenWidth;
disp_drv.ver_res = screenHeight;
disp_drv.flush_cb = my_disp_flush;
disp_drv.draw_buf = &draw_buf;
lv_disp_drv_register(&disp_drv);
static lv_indev_drv_t indev_drv;
lv_indev_drv_init(&indev_drv);
indev_drv.type = LV_INDEV_TYPE_POINTER;
indev_drv.read_cb = my_touchpad_read;
lv_indev_drv_register(&indev_drv);
ui_init();
}
void loop() {
lv_timer_handler();
delay( 5 );
}
void ui_init() {
ui_background();
ui_dynamic_obj();
lv_disp_load_scr(mainScreen);
}
void ui_background() {
mainScreen = lv_obj_create(NULL);
lv_obj_clear_flag(mainScreen, LV_OBJ_FLAG_SCROLLABLE);
titleImage = lv_img_create(mainScreen);
lv_img_set_src(titleImage, &ui_logo_img_obj);
lv_obj_set_size(titleImage, 320, 117);
lv_obj_set_pos(titleImage, 0, 0);
lv_obj_set_align(titleImage, LV_ALIGN_TOP_MID);
lv_obj_add_flag(titleImage, LV_OBJ_FLAG_ADV_HITTEST);
lv_obj_clear_flag(titleImage, LV_OBJ_FLAG_SCROLLABLE);
lv_obj_t * titleLabel = lv_label_create(mainScreen);
lv_obj_set_pos(titleLabel, 0, 120);
lv_obj_set_align(titleLabel, LV_ALIGN_TOP_MID);
lv_label_set_text(titleLabel, "QR Code Maker");
lv_obj_clear_flag(titleLabel, LV_OBJ_FLAG_CLICKABLE);
lv_obj_set_style_text_font(titleLabel, &lv_font_montserrat_34, LV_PART_MAIN | LV_STATE_DEFAULT);
}
static void ta_event_cb(lv_event_t * e) {
lv_event_code_t code = lv_event_get_code(e);
lv_obj_t * ta = lv_event_get_target(e);
lv_obj_t * kb = (lv_obj_t*)lv_event_get_user_data(e);
if (code == LV_EVENT_READY) {
lv_obj_add_flag(kb, LV_OBJ_FLAG_HIDDEN);
const char * text = lv_textarea_get_text(ta);
if (strlen(text) == 0) return;
lv_qrcode_update(qrCode, text, strlen(text));
lv_obj_clear_flag(qrCode, LV_OBJ_FLAG_HIDDEN);
}
if (code == LV_EVENT_CLICKED || code == LV_EVENT_FOCUSED) {
lv_keyboard_set_textarea(kb, ta);
lv_obj_clear_flag(kb, LV_OBJ_FLAG_HIDDEN);
lv_obj_add_flag(qrCode, LV_OBJ_FLAG_HIDDEN);
}
if (code == LV_EVENT_DEFOCUSED) {
lv_keyboard_set_textarea(kb, NULL);
lv_obj_add_flag(kb, LV_OBJ_FLAG_HIDDEN);
}
}
void ui_dynamic_obj(void) {
lv_obj_t * kb = lv_keyboard_create(mainScreen);
lv_obj_t * ta = lv_textarea_create(mainScreen);
lv_obj_align(ta, LV_ALIGN_CENTER, 0, -40);
lv_obj_add_event_cb(ta, ta_event_cb, LV_EVENT_ALL, kb);
lv_obj_set_size(ta, screenWidth - 40, 60);
lv_keyboard_set_textarea(kb, ta);
qrCode = lv_qrcode_create(mainScreen, 200, lv_color_hex3(0x000), lv_color_hex3(0xeef));
lv_obj_set_pos(qrCode, 0, -20);
lv_obj_set_align(qrCode, LV_ALIGN_BOTTOM_MID);
lv_obj_add_flag(qrCode, LV_OBJ_FLAG_HIDDEN);
}
I run in arduino ide 2.0.1
It shown error is:
C:\Users\Supakee\Documents\Arduino\QRCODE\QRCODE.ino: In function 'void ui_dynamic_obj()':
C:\Users\Supakee\Documents\Arduino\QRCODE\QRCODE.ino:141:88: error: too many arguments to function 'lv_obj_t* lv_qrcode_create(lv_obj_t*)'
qrCode = lv_qrcode_create(mainScreen, 200, lv_color_hex3(0x000), lv_color_hex3(0xeef));
^
In file included from c:\users\supakee\documents\arduino\libraries\lvgl-d17450a55fbd8603e57b64d23feb36a63832b195\lvgl.h:91,
from c:\Users\Supakee\Documents\Arduino\libraries\lvgl-d17450a55fbd8603e57b64d23feb36a63832b195\src/lvgl.h:17,
from C:\Users\Supakee\Documents\Arduino\QRCODE\QRCODE.ino:7:
c:\users\supakee\documents\arduino\libraries\lvgl-d17450a55fbd8603e57b64d23feb36a63832b195\src/libs/qrcode/lv_qrcode.h:45:12: note: declared here
lv_obj_t * lv_qrcode_create(lv_obj_t * parent);
^~~~~~~~~~~~~~~~
Multiple libraries were found for "lvgl.h"
Used: C:\Users\Supakee\Documents\Arduino\libraries\lvgl-d17450a55fbd8603e57b64d23feb36a63832b195
Not used: C:\Users\Supakee\Documents\Arduino\libraries\src
exit status 1
Compilation error: too many arguments to function 'lv_obj_t* lv_qrcode_create(lv_obj_t*)'
I tried to follow instruction in LVGL Document.
I want to make this esp32-2432s028 to display QR code so how can i do it?
I use library from https://github.com/lvgl/lvgl/tree/6948eee9b12e13ea7db9287b96385b05a8a6cc9a
I use code from https://github.com/0015/ThatProject/tree/master/ESP32_LVGL/LVGL8/4_QR_Code_Maker
The compilation error:
Compilation error: too many arguments to function 'lv_obj_t* lv_qrcode_create(lv_obj_t*)'
Suggests that you should only send a single argument to the function
qrCode = lv_qrcode_create(mainScreen, 200, lv_color_hex3(0x000), lv_color_hex3(0xeef));
Does the documentation you have read differ from the implementation you are using?

Parameter of Mat.at() in Opencv

I'm running a program to sample data from an image recently. The code:
cv::Mat image_data;
Eigen::Vector3f getColor(float u, float v)
{
auto u_img = u * width;
auto v_img = (1 - v) * height;
auto color = image_data.at<cv::Vec3b>(v_img, u_img);
return Eigen::Vector3f(color[0], color[1], color[2]);
}
When calling image_data.at(v_img, u_img),the program will throw an exception. And I find that v_img == 578.815 and u_img == -28.672, and the size of image_data is 1024*1024. Is there anything wrong with the data?

Matrix multiplication in SYCL using nd_range

I am doing matrix multiplication in sycl, but having some problems. I am using 2 (4x4) Matrices for multiplication and on first iteration of for loop it works but on second iteration when i = 1 it works fine until C[11] = A[11]*B[15] but then it skips 1 multiplication and move forward. I know the problem why it skips but unfortunately i have been unable to change matrix B index properly. Kindly if someone can help i will greatly appreciate it. Thanks
Here is the code
Matsize= 4,
Blocksize = 4 also i know for loop will be equal to matsize it is 2 just to get clear idea of execution flow
{
range<1> dimensions(matSize * matSize);
const property_list props = { property::buffer::use_host_ptr() };
buffer<T> A_buf(MA, dimensions, props);
buffer<T> B_buf(MB, dimensions, props);
buffer<T> C_buf(MC, dimensions, props);
myQueue.submit([&](handler& cgh) {
auto A_ptr = A_buf.template get_access<access::mode::read>(cgh);
auto B_ptr = B_buf.template get_access<access::mode::read_write>(cgh);
auto C_ptr = C_buf.template get_access<access::mode::write>(cgh);
auto localRange = range<1>(blockSize* blockSize);
accessor<T, 1, access::mode::read_write, access::target::local>
C(matSize * matSize, cgh);
cgh.parallel_for<mxm_kernel>(
nd_range<2>(range<2>{matSize, matSize},
range<2>{blockSize, blockSize}),
[=](nd_item<2> item) {
const auto id_x = item.get_global_id(0);
const auto id_y = item.get_global_id(1);
const auto width = item.get_group_range(0) * item.get_local_range(0);
const auto index = id_x * width + id_y;
const auto index2 = id_y * width + id_x;
for (int i = 0; i < 2 ; i++) {
C[index] += A_ptr[index] * B_ptr[index2 + i ];
}
out << "C is!" << C[index] << sycl::endl;
item.barrier(cl::sycl::access::fence_space::local_space);
C_ptr[index] = C[index];
});
});
}

Equivalence of slicing tensor in Pytorch/ATen C++

In Python given a 2-D tensor, we can use tensor[:,:2] to slice the a 2x2 matrix of the first two elements in the top left of the matrix, e.g. :
x = torch.tensor([[-1.4673, 0.9980, -2.1427, -1.1798, -0.0646, -0.2635, -2.8930, -0.2563,
0.4559, -0.7947, -0.4540, 3.3224, 0.2295, 5.5568, -8.0451, -2.4529,
4.8724, 2.1640, 3.3255, 0.6693, -1.2362, 4.4713, -3.5547, -0.0528,
0.1031, -1.2472, -1.6014, 1.8134],
[ 2.1636, -1.1497, -5.0298, 2.8261, -0.5684, 0.6389, 2.9009, -5.1609,
1.7358, -3.1819, -0.9877, 5.5598, 6.7142, 4.5704, -1.2683, -5.3046,
3.0454, 3.2757, -3.2541, 3.6619, -3.6391, -0.2002, 5.7175, 5.7130,
0.6632, -0.0744, -0.3502, 4.8116]])
y, z = x[:,:2].chunk(2,1)
print(y)
print(z)
[out]:
tensor([[-1.4673],
[ 2.1636]])
tensor([[ 0.9980],
[-1.1497]])
What is right way to do it in C++ for PyTorch's ATen particularly?
For e.g. in the LSTM, there is the gate.chunk(4,1) function at https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/RNN.cpp#L253
If I want to do a gate[:,:2].chunk(2,1) to extract different parts of the gates, e.g. auto partial_gates = gates[:,:2].chunk(4, 1);, how can it be done?
template <typename cell_params>
struct LSTMCell : Cell<std::tuple<Tensor, Tensor>, cell_params> {
using hidden_type = std::tuple<Tensor, Tensor>;
hidden_type operator()(const Tensor& input, const hidden_type& hidden, const cell_params& params) const override {
auto hx = std::get<0>(hidden);
auto cx = std::get<1>(hidden);
if (input.is_cuda()) {
auto igates = params.matmul_ih(input);
auto hgates = params.matmul_hh(hx);
auto result = at::_thnn_fused_lstm_cell(igates, hgates, cx, params.b_ih, params.b_hh);
// Slice off the workspace argument (it's needed only for AD).
return std::make_tuple(std::get<0>(result), std::get<1>(result));
}
auto gates = params.linear_ih(input) + params.linear_hh(hx);
auto chunked_gates = gates.chunk(4, 1);
auto partial_gates = gates[:,:2].chunk(4, 1);
auto ingate = chunked_gates[0].sigmoid();
auto forgetgate = chunked_gates[1].sigmoid();
auto cellgate = chunked_gates[2].tanh();
auto outgate = chunked_gates[3].sigmoid();
auto cy = (forgetgate * cx) + (ingate * cellgate);
auto hy = outgate * cy.tanh();
return std::make_tuple(hy, cy);
}
};
1. You can also use .slice
Tensor::slice(int64_t dim, int64_t start, int64_t end, int64_t step)
auto partial_gates = gates.slice(1, 0, 3).chunk(4, 1);
2. Pytorch 1.5 using Tensor::index and Tensor::index_put_
using namespace torch::indexing;
auto partial_gates = gates.index({"...", Slice(None, 2)}).chunk(4, 1);
Also supports multimensional indexing
General translation for Tensor::index and Tensor::index_put_
Python C++ (assuming `using namespace torch::indexing`)
-------------------------------------------------------------------
0 0
None None
... "..." or Ellipsis
: Slice()
start:stop:step Slice(start, stop, step)
True / False true / false
[[1, 2]] torch::tensor({{1, 2}})
It's .narrow() from https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/TensorShape.cpp#L364
auto partial_gates = gates.narrow(1,0,2).chunk(4, 1);

CNTK Evaluate model has two inputs C++

I have a project based on CNTK 2.3. I used the code from the integration tests to train MNIST classifier like this:
auto device = DeviceDescriptor::GPUDevice(0);
const size_t inputDim = sizeBlob * sizeBlob;
const size_t numOutputClasses = numberOfClasses;
const size_t hiddenLayerDim = 200;
auto input = InputVariable({ inputDim }, CNTK::DataType::Float, L"features");
auto scaledInput = ElementTimes(Constant::Scalar(0.00390625f, device), input);
auto classifierOutput = FullyConnectedDNNLayer(scaledInput, hiddenLayerDim, device, std::bind(Sigmoid, _1, L""));
auto outputTimesParam = Parameter(NDArrayView::RandomUniform<float>({ numOutputClasses, hiddenLayerDim }, -0.05, 0.05, 1, device));
auto outputBiasParam = Parameter(NDArrayView::RandomUniform<float>({ numOutputClasses }, -0.05, 0.05, 1, device));
classifierOutput = Plus(outputBiasParam, Times(outputTimesParam, classifierOutput), L"classifierOutput");
auto labels = InputVariable({ numOutputClasses }, CNTK::DataType::Float, L"labels");
auto trainingLoss = CNTK::CrossEntropyWithSoftmax(classifierOutput, labels, L"lossFunction");;
auto prediction = CNTK::ClassificationError(classifierOutput, labels, L"classificationError");
// Test save and reload of model
Variable classifierOutputVar = classifierOutput;
Variable trainingLossVar = trainingLoss;
Variable predictionVar = prediction;
auto combinedNet = Combine({ trainingLoss, prediction, classifierOutput }, L"MNISTClassifier");
//SaveAndReloadModel<float>(combinedNet, { &input, &labels, &trainingLossVar, &predictionVar, &classifierOutputVar }, device);
classifierOutput = classifierOutputVar;
trainingLoss = trainingLossVar;
prediction = predictionVar;
const size_t minibatchSize = 64;
const size_t numSamplesPerSweep = 60000;
const size_t numSweepsToTrainWith = 2;
const size_t numMinibatchesToTrain = (numSamplesPerSweep * numSweepsToTrainWith) / minibatchSize;
auto featureStreamName = L"features";
auto labelsStreamName = L"labels";
auto minibatchSource = TextFormatMinibatchSource(trainingSet, { { featureStreamName, inputDim },{ labelsStreamName, numOutputClasses } });
auto featureStreamInfo = minibatchSource->StreamInfo(featureStreamName);
auto labelStreamInfo = minibatchSource->StreamInfo(labelsStreamName);
LearningRateSchedule learningRatePerSample = TrainingParameterPerSampleSchedule<double>(0.003125);
auto trainer = CreateTrainer(classifierOutput, trainingLoss, prediction, { SGDLearner(classifierOutput->Parameters(), learningRatePerSample) });
size_t outputFrequencyInMinibatches = 20;
for (size_t i = 0; i < numMinibatchesToTrain; ++i)
{
auto minibatchData = minibatchSource->GetNextMinibatch(minibatchSize, device);
trainer->TrainMinibatch({ { input, minibatchData[featureStreamInfo] },{ labels, minibatchData[labelStreamInfo] } }, device);
PrintTrainingProgress(trainer, i, outputFrequencyInMinibatches);
size_t trainingCheckpointFrequency = 100;
if ((i % trainingCheckpointFrequency) == (trainingCheckpointFrequency - 1))
{
const wchar_t* ckpName = L"feedForward.net";
//trainer->SaveCheckpoint(ckpName);
//trainer->RestoreFromCheckpoint(ckpName);
}
}
combinedNet->Save(g_dnnFile);
That part works fine and I train the model then save to a model file. But when I try to evaluate a simple image to test the model it looks like something is wrong in the model.
// Load the model.
// The model is trained by <CNTK>/Examples/Image/Classification/ResNet/Python/TrainResNet_CIFAR10.py
// Please see README.md in <CNTK>/Examples/Image/Classification/ResNet about how to train the model.
FunctionPtr modelFunc = Function::Load(modelFile, device);
// Get input variable. The model has only one single input.
std::vector<Variable> inputs = modelFunc->Arguments();
Variable inputVar = modelFunc->Arguments()[0];
// The model has only one output.
// If the model has more than one output, use modelFunc->Outputs to get the list of output variables.
std::vector<Variable> outputs = modelFunc->Outputs();
Variable outputVar = outputs[0];
// Prepare input data.
// For evaluating an image, you first need to perform some image preprocessing to make sure that the input image has the correct size and layout
// that match the model inputs.
// Please note that the model used by this example expects the CHW image layout.
// inputVar.Shape[0] is image width, inputVar.Shape[1] is image height, and inputVar.Shape[2] is channels.
// For simplicity and avoiding external dependencies, we skip the preprocessing step here, and just use some artificially created data as input.
Mat image = imread(".....");
uint8_t* imagePtr = (uint8_t*)(image).data;
auto width = image.cols;
auto heigth = image.rows;
std::vector<float> inputData(inputVar.Shape().TotalSize());
for (size_t i = 0; i < inputData.size(); ++i)
{
auto curChVal = imagePtr[(i)];
inputData[i] = curChVal;
}
// Create input value and input data map
ValuePtr inputVal = Value::CreateBatch(inputVar.Shape(), inputData, device);
std::unordered_map<Variable, ValuePtr> inputDataMap = { { inputVar, inputVal } };
// Create output data map. Using null as Value to indicate using system allocated memory.
// Alternatively, create a Value object and add it to the data map.
std::unordered_map<Variable, ValuePtr> outputDataMap = { { outputVar, nullptr } };
// Start evaluation on the device
modelFunc->Evaluate(inputDataMap, outputDataMap, device);
// Get evaluate result as dense output
ValuePtr outputVal = outputDataMap[outputVar];
std::vector<std::vector<float>> outputData;
outputVal->CopyVariableValueTo(outputVar, outputData);
PrintOutput<float>(outputVar.Shape().TotalSize(), outputData);
I run the same code on c# and it works fine. What I found as a difference is that modelFunc->Arguments() should have one argument but it has two - it finds features and labels as two inputs but I need to have only feature as an input and it throws the following error:
Find input and output variables by name, instead of modelFunc->Arguments()[0].
Variable inputVar;
GetInputVariableByName(modelFunc, L"features", inputVar);
Variable outputVar;
GetOutputVaraiableByName(modelFunc, L"classifierOutput", outputVar);
GetInputVariableByName and GetOutputVaraiableByName() come from
https://github.com/Microsoft/CNTK/blob/v2.3.1/Tests/EndToEndTests/EvalClientTests/CNTKLibraryCPPEvalExamplesTest/EvalMultithreads.cpp#L316