Convert simple C++ code to Google docs spread sheet OR Excel - c++

I've simple C++ code need to convert it to Google docs spreadsheet but unfortunately it didn't work because of brackets issue... May you please help me to fix Google doc cell format?
C++ code
#ifndef DEMMURAGE_H
#define DEMMURAGE_H
#include <QtCore>
class Demmurage : public QObject
{
Q_OBJECT
public:
Demmurage(QDate vesselArrival,
QDate containerDelivery,
int containerSize,
int freeDuration = 10,
int stageOneDays = 10,
int stageOne40Dem = 15,
int stageTwo40Dem = 25,
int stageOne20Dem = 10,
int stageTwo20Dem = 15,
double dollarToDynar = 1.4)
{
_arrival = vesselArrival;
_delivery = containerDelivery;
_containerSize = containerSize;
_freeDuration = freeDuration;
_stageOneDays = stageOneDays;
_stageOne40Dem = stageOne40Dem;
_stageTwo40Dem = stageTwo40Dem;
_stageOne20Dem = stageOne20Dem;
_stageTwo20Dem = stageTwo20Dem;
_dollarToDynar = dollarToDynar;
_demurrage = 0;
int duration = _arrival.daysTo(_delivery) - _freeDuration;
if (duration > 0) {
if (duration - _stageOneDays <= 0) {
if (containerSize == 40)
_demurrage = duration * _stageOne40Dem * _dollarToDynar;
else
_demurrage = duration * _stageOne20Dem * _dollarToDynar;
} else {
if (containerSize == 40)
_demurrage = (_stageOneDays * _stageOne40Dem * _dollarToDynar) + ( (duration - _stageOneDays) * _stageTwo40Dem * _dollarToDynar);
else
_demurrage = (_stageOneDays * _stageOne20Dem * _dollarToDynar) + ( (duration - _stageOneDays) * _stageTwo20Dem * _dollarToDynar);
}
}
}
double demmurage()
{
return _demurrage;
}
virtual ~Demmurage() {}
private:
double _demurrage;
int _freeDuration;
int _stageOneDays;
int _stageOne40Dem;
int _stageTwo40Dem;
int _stageOne20Dem;
int _stageTwo20Dem;
double _dollarToDynar;
};
#endif // DEMMURAGE_H
Cell of Google docs spreadsheet:
=if( ((L84-I84-10) > 0), ((if ((L84-I84-10) - 10 <= 0), (if( (F84==40), (L84-I84-10)*15*1.4, (L84-I84-10)*10*1.4 )), (if( (F84==40), (10*15*1.4)+(((L84-I84-10)-10)*25*1.4), (10*10*1.4)+(((L84-I84-10)-10)*15*1.4) )))), 0)

I found the error... it happen because of (IF and 40==
The correct format is:
=if((L84-I84-10)>0,if((L84-I84-10)-10<=0,if(F84=40,(L84-I84-10)*15*1.4,(L84-I84-10)*10*1.4),if(F84=40,(10*15*1.4)+(((L84-I84-10)-10)*25*1.4),(10*10*1.4)+(((L84-I84-10)-10)*15*1.4))),0)

Related

I am learning to make QRCode in esp32-2432s028

I am learning to make QRCode in esp32-2432s028 (this pictura) from https://www.youtube.com/watch?v=Ss3zBO-V9kI I have problem.
From code
/*
QR Code Maker (ESP32+LVGL8)
For More Information: https://youtu.be/Ss3zBO-V9kI
Created by Eric N. (ThatProject)
*/
/////////////////////////////////////////////////////////////////
#include <lvgl.h>
#include "MyDisplay.h"
static const uint32_t screenWidth = 320;
static const uint32_t screenHeight = 480;
static lv_disp_draw_buf_t draw_buf;
static lv_color_t buf[ screenWidth * 10 ];
lv_obj_t * mainScreen;
lv_obj_t * titleImage;
lv_obj_t * qrCode;
LV_IMG_DECLARE(ui_logo_img_obj);
void my_disp_flush( lv_disp_drv_t *disp, const lv_area_t *area, lv_color_t *color_p )
{
uint32_t w = ( area->x2 - area->x1 + 1 );
uint32_t h = ( area->y2 - area->y1 + 1 );
tft.startWrite();
tft.setAddrWindow( area->x1, area->y1, w, h );
tft.writePixels((lgfx::rgb565_t *)&color_p->full, w * h);
tft.endWrite();
lv_disp_flush_ready( disp );
}
void my_touchpad_read( lv_indev_drv_t * indev_driver, lv_indev_data_t * data )
{
if (ts.touched()) {
data->state = LV_INDEV_STATE_PR;
TS_Point p = ts.getPoint();
data->point.x = p.x;
data->point.y = p.y;
} else {
data->state = LV_INDEV_STATE_REL;
}
}
void setup()
{
Serial.begin(115200);
tft.begin();
tft.setRotation(0);
tft.setBrightness(255);
if (!ts.begin(40, SDA_FT6236, SCL_FT6236)) {
Serial.println("Unable to start the capacitive touch Screen.");
}
lv_init();
lv_disp_draw_buf_init( &draw_buf, buf, NULL, screenWidth * 10 );
static lv_disp_drv_t disp_drv;
lv_disp_drv_init(&disp_drv);
disp_drv.hor_res = screenWidth;
disp_drv.ver_res = screenHeight;
disp_drv.flush_cb = my_disp_flush;
disp_drv.draw_buf = &draw_buf;
lv_disp_drv_register(&disp_drv);
static lv_indev_drv_t indev_drv;
lv_indev_drv_init(&indev_drv);
indev_drv.type = LV_INDEV_TYPE_POINTER;
indev_drv.read_cb = my_touchpad_read;
lv_indev_drv_register(&indev_drv);
ui_init();
}
void loop() {
lv_timer_handler();
delay( 5 );
}
void ui_init() {
ui_background();
ui_dynamic_obj();
lv_disp_load_scr(mainScreen);
}
void ui_background() {
mainScreen = lv_obj_create(NULL);
lv_obj_clear_flag(mainScreen, LV_OBJ_FLAG_SCROLLABLE);
titleImage = lv_img_create(mainScreen);
lv_img_set_src(titleImage, &ui_logo_img_obj);
lv_obj_set_size(titleImage, 320, 117);
lv_obj_set_pos(titleImage, 0, 0);
lv_obj_set_align(titleImage, LV_ALIGN_TOP_MID);
lv_obj_add_flag(titleImage, LV_OBJ_FLAG_ADV_HITTEST);
lv_obj_clear_flag(titleImage, LV_OBJ_FLAG_SCROLLABLE);
lv_obj_t * titleLabel = lv_label_create(mainScreen);
lv_obj_set_pos(titleLabel, 0, 120);
lv_obj_set_align(titleLabel, LV_ALIGN_TOP_MID);
lv_label_set_text(titleLabel, "QR Code Maker");
lv_obj_clear_flag(titleLabel, LV_OBJ_FLAG_CLICKABLE);
lv_obj_set_style_text_font(titleLabel, &lv_font_montserrat_34, LV_PART_MAIN | LV_STATE_DEFAULT);
}
static void ta_event_cb(lv_event_t * e) {
lv_event_code_t code = lv_event_get_code(e);
lv_obj_t * ta = lv_event_get_target(e);
lv_obj_t * kb = (lv_obj_t*)lv_event_get_user_data(e);
if (code == LV_EVENT_READY) {
lv_obj_add_flag(kb, LV_OBJ_FLAG_HIDDEN);
const char * text = lv_textarea_get_text(ta);
if (strlen(text) == 0) return;
lv_qrcode_update(qrCode, text, strlen(text));
lv_obj_clear_flag(qrCode, LV_OBJ_FLAG_HIDDEN);
}
if (code == LV_EVENT_CLICKED || code == LV_EVENT_FOCUSED) {
lv_keyboard_set_textarea(kb, ta);
lv_obj_clear_flag(kb, LV_OBJ_FLAG_HIDDEN);
lv_obj_add_flag(qrCode, LV_OBJ_FLAG_HIDDEN);
}
if (code == LV_EVENT_DEFOCUSED) {
lv_keyboard_set_textarea(kb, NULL);
lv_obj_add_flag(kb, LV_OBJ_FLAG_HIDDEN);
}
}
void ui_dynamic_obj(void) {
lv_obj_t * kb = lv_keyboard_create(mainScreen);
lv_obj_t * ta = lv_textarea_create(mainScreen);
lv_obj_align(ta, LV_ALIGN_CENTER, 0, -40);
lv_obj_add_event_cb(ta, ta_event_cb, LV_EVENT_ALL, kb);
lv_obj_set_size(ta, screenWidth - 40, 60);
lv_keyboard_set_textarea(kb, ta);
qrCode = lv_qrcode_create(mainScreen, 200, lv_color_hex3(0x000), lv_color_hex3(0xeef));
lv_obj_set_pos(qrCode, 0, -20);
lv_obj_set_align(qrCode, LV_ALIGN_BOTTOM_MID);
lv_obj_add_flag(qrCode, LV_OBJ_FLAG_HIDDEN);
}
I run in arduino ide 2.0.1
It shown error is:
C:\Users\Supakee\Documents\Arduino\QRCODE\QRCODE.ino: In function 'void ui_dynamic_obj()':
C:\Users\Supakee\Documents\Arduino\QRCODE\QRCODE.ino:141:88: error: too many arguments to function 'lv_obj_t* lv_qrcode_create(lv_obj_t*)'
qrCode = lv_qrcode_create(mainScreen, 200, lv_color_hex3(0x000), lv_color_hex3(0xeef));
^
In file included from c:\users\supakee\documents\arduino\libraries\lvgl-d17450a55fbd8603e57b64d23feb36a63832b195\lvgl.h:91,
from c:\Users\Supakee\Documents\Arduino\libraries\lvgl-d17450a55fbd8603e57b64d23feb36a63832b195\src/lvgl.h:17,
from C:\Users\Supakee\Documents\Arduino\QRCODE\QRCODE.ino:7:
c:\users\supakee\documents\arduino\libraries\lvgl-d17450a55fbd8603e57b64d23feb36a63832b195\src/libs/qrcode/lv_qrcode.h:45:12: note: declared here
lv_obj_t * lv_qrcode_create(lv_obj_t * parent);
^~~~~~~~~~~~~~~~
Multiple libraries were found for "lvgl.h"
Used: C:\Users\Supakee\Documents\Arduino\libraries\lvgl-d17450a55fbd8603e57b64d23feb36a63832b195
Not used: C:\Users\Supakee\Documents\Arduino\libraries\src
exit status 1
Compilation error: too many arguments to function 'lv_obj_t* lv_qrcode_create(lv_obj_t*)'
I tried to follow instruction in LVGL Document.
I want to make this esp32-2432s028 to display QR code so how can i do it?
I use library from https://github.com/lvgl/lvgl/tree/6948eee9b12e13ea7db9287b96385b05a8a6cc9a
I use code from https://github.com/0015/ThatProject/tree/master/ESP32_LVGL/LVGL8/4_QR_Code_Maker
The compilation error:
Compilation error: too many arguments to function 'lv_obj_t* lv_qrcode_create(lv_obj_t*)'
Suggests that you should only send a single argument to the function
qrCode = lv_qrcode_create(mainScreen, 200, lv_color_hex3(0x000), lv_color_hex3(0xeef));
Does the documentation you have read differ from the implementation you are using?

Calling Variables in another function (c++)

So I've gotta create a whole bunch of Variables for some colours for a customisable menu I'm making. I wanted to know if there was a way to call the variables from the first function into other functions (there are alot) These variables have to be used in over 100 locations across about 10 different functions and I really don't want to redefine all the sh*t over again every single time. (Looks messy and is a pain if I need to make changes. Heres my Menu Colour Function
Color MenuColor(int MenuAlpha_Main)
{
Color Theme;
Color Basic = Color(c_config::get().menu_color_r, c_config::get().menu_color_g, c_config::get().menu_color_b, MenuAlpha_Main);
Color Background = Color(c_config::get().menu_background_r, c_config::get().menu_background_g, c_config::get().menu_background_b, MenuAlpha_Main);
Color MiscSelectedTab_Colour = Color(c_config::get().MiscSelectedTab_r, c_config::get().MiscSelectedTab_g, c_config::get().MiscSelectedTab_b, MenuAlpha_Main);
Color MiscSelectedTab_Highlight_Colour = Color(c_config::get().MiscSelectedTab_Highlight_r, c_config::get().MiscSelectedTab_Highlight_g, c_config::get().MiscSelectedTab_Highlight_b, MenuAlpha_Main);
Color MiscUnSelectedTab_Colour = Color(c_config::get().MiscUnSelectedTab_r, c_config::get().MiscUnSelectedTab_g, c_config::get().MiscUnSelectedTab_b, MenuAlpha_Main);
Color MiscUnSelectedTab_Highlight_Colour = Color(c_config::get().MiscUnSelectedTab_Highlight_r, c_config::get().MiscUnSelectedTab_Highlight_g, c_config::get().MiscUnSelectedTab_Highlight_b, MenuAlpha_Main);
/////rainbow sync//////
static unsigned int last_time;
last_time = GetTickCount();
Color rainbow;
rainbow.FromHSV(fmod(last_time * 0.0002f, 1.f), 1.f, 0.5f);
//////////////////////
// Oh fuck, time for the customizable shit
Color MiscSelectedTab;
Color MiscSelectedTab_Highlight;
Color MiscUnSelectedTab;
Color MiscUnSelectedTab_Highlight;
if (c_config::get().menu_colour_style == 0) {
Theme = Basic; //Normal Style
}
else if (c_config::get().menu_colour_style == 1) {
Theme = rainbow; //Rainbow
}
else if (c_config::get().menu_colour_style == 2) {
//This shit is done below
}
return Theme;
if (c_config::get().menu_colour_style == 0 || c_config::get().menu_colour_style == 0 && !c_config::get().fullmenuhighlight) {
MiscSelectedTab = Background;
MiscSelectedTab_Highlight = Theme;
MiscUnSelectedTab = Background;
MiscUnSelectedTab_Highlight = Theme;
}
else if (c_config::get().menu_colour_style == 0 || c_config::get().menu_colour_style == 0 && c_config::get().fullmenuhighlight) {
MiscSelectedTab = Theme;
MiscSelectedTab_Highlight = Theme;
MiscUnSelectedTab = Theme;
MiscUnSelectedTab_Highlight = Theme;
} // MISC SUBTAB (Misc/Colours)
else if (c_config::get().menu_colour_style == 2) {
MiscSelectedTab = MiscSelectedTab_Colour;
MiscSelectedTab_Highlight = MiscSelectedTab_Highlight_Colour;
MiscUnSelectedTab = MiscUnSelectedTab_Colour;
MiscUnSelectedTab_Highlight = MiscUnSelectedTab_Highlight_Colour;
}
return MiscSelectedTab;
return MiscSelectedTab_Highlight;
return MiscUnSelectedTab;
return MiscUnSelectedTab_Highlight;
}
I'm still fairly fresh to cpp so please don't judge. And the return values would need to be referenced in a function like this
void miscsubtab(int& current_players_esp_subtab, int tab_amount, Vector _pos, int MenuAlpha_Main)
{
int in_sizew_esp_player_subtabs = GroupBoxSize_Width - 8;
static std::string ESP_Player_SubTabs_Names[2] = { "Misc", "Colours" };
for (int i = 0; i < tab_amount; i++)
{
RECT text_size2 = g_pSurface->GetTextSizeRect(Globals::SmallText, ESP_Player_SubTabs_Names[i].c_str());
int tab_area[4] = {
_pos.x + 9 + (i * (in_sizew_esp_player_subtabs / tab_amount)), _pos.y + 52 + 5,
(in_sizew_esp_player_subtabs / tab_amount), 20
};
if (GetAsyncKeyState(VK_LBUTTON) && g_pSurface->MouseInRegion(tab_area[0], tab_area[1], tab_area[2],
tab_area[3]))
current_players_esp_subtab = i;
if (current_players_esp_subtab == i)
{
g_pSurface->FilledRect(tab_area[0], tab_area[1], tab_area[2], tab_area[3], MiscSelectedTab); //HERE
g_pSurface->FilledRect(tab_area[0], tab_area[1] + tab_area[3], tab_area[2], 3, MiscSelectedTab_Highlight); //HERE
g_pSurface->DrawT(tab_area[0] + (((in_sizew_esp_player_subtabs / tab_amount) / 2) - (text_size2.right / 2)),
tab_area[1] + (tab_area[3] / 2) - (text_size2.bottom / 2),
Color(143, 143, 143, MenuAlpha_Main), Globals::SmallText, false,
ESP_Player_SubTabs_Names[i].c_str());
}
else
{
g_pSurface->FilledRect(tab_area[0], tab_area[1], tab_area[2], tab_area[3], MiscUnSelectedTab); //HERE
g_pSurface->FilledRect(tab_area[0], tab_area[1] + tab_area[3], tab_area[2], 3, MiscUnSelectedTab_Highlight); //HERE
g_pSurface->DrawT(tab_area[0] + (((in_sizew_esp_player_subtabs / tab_amount) / 2) - (text_size2.right / 2)),
tab_area[1] + (tab_area[3] / 2) - (text_size2.bottom / 2),
Color(143, 143, 143, MenuAlpha_Main), Globals::SmallText, false,
ESP_Player_SubTabs_Names[i].c_str());
}
}
}
Any help would be greatly appreciated.
Thanks -Kenny
You can define a seperate Theme struct for it and use all colors from a Theme variable.
class Theme {
public:
enum Style {BASIC, RAINBOW, CUSTOM};
Style style;
Color MiscSelectedTab;
Color MiscSelectedTab_Highlight;
Color MiscUnSelectedTab;
Color MiscUnSelectedTab_Highlight;
}
Theme MenuColor(int MenuAlpha_Main)
{
Theme theme;
...
if (c_config::get().menu_colour_style == 0) {
theme.style = Theme::BASIC; //Normal Style
}
else if (c_config::get().menu_colour_style == 1) {
theme.style = Theme::RAINBOW; //Rainbow
}
else if (c_config::get().menu_colour_style == 2) {
theme.style = Theme::CUSTOM;
//This shit is done below
}
....
return theme;
}
...
Theme myTheme = MenuColor(5);
myTheme.MiscSelectedTab;
myTheme.MiscSelectedTab_Highlight;
myTheme.MiscUnSelectedTab;
myTheme.MiscUnSelectedTab_Highlight;

openCV 'cvPoint' and 'CV_FONT_HERSHEY_SIMPLEX' not declared in this scope

I'm trying to get the LSD-SLAM by Kevin George.
But I got stuck because openCV doesn't want to be cooperative and when using catkin_make it doesn't know the following two things.
/home/adas/ros_workspace/src/src/lsd_slam/lsd_slam_core/src/util/globalFuncs.cpp: In function ‘void lsd_slam::printMessageOnCVImage(cv::Mat&, std::__cxx11::string, std::__cxx11::string)’:
/home/adas/ros_workspace/src/src/lsd_slam/lsd_slam_core/src/util/globalFuncs.cpp:52:28: error: ‘cvPoint’ was not declared in this scope
cv::putText(image, line2, cvPoint(10,image.rows-5),
^~~~~~~
/home/adas/ros_workspace/src/src/lsd_slam/lsd_slam_core/src/util/globalFuncs.cpp:52:28: note: suggested alternative: ‘cvRound’
cv::putText(image, line2, cvPoint(10,image.rows-5),
^~~~~~~
cvRound
and
/home/adas/ros_workspace/src/src/lsd_slam/lsd_slam_core/src/util/globalFuncs.cpp:53:6: error: ‘CV_FONT_HERSHEY_SIMPLEX’ was not declared in this scope
CV_FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(200,200,250), 1, 8);
in case of the cvPoint catkin_make suggests replacing the cvPoint with cvRount which doesn't work as then I get a new error that the funciton with that type can't be found.
Here is be the full source code if it would help you.
/**
* This file is part of LSD-SLAM.
*
* Copyright 2013 Jakob Engel <engelj at in dot tum dot de> (Technical University of Munich)
* For more information see <http://vision.in.tum.de/lsdslam>
*
* LSD-SLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* LSD-SLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with LSD-SLAM. If not, see <http://www.gnu.org/licenses/>.
*/
#include "util/globalFuncs.h"
#include "util/SophusUtil.h"
#include "opencv2/opencv.hpp"
#include "DataStructures/Frame.h"
namespace lsd_slam
{
SE3 SE3CV2Sophus(const cv::Mat &R, const cv::Mat &t)
{
Sophus::Matrix3f sR;
Sophus::Vector3f st;
for(int i=0;i<3;i++)
{
sR(0,i) = R.at<double>(0,i);
sR(1,i) = R.at<double>(1,i);
sR(2,i) = R.at<double>(2,i);
st[i] = t.at<double>(i);
}
return SE3(toSophus(sR.inverse()), toSophus(st));
}
void printMessageOnCVImage(cv::Mat &image, std::string line1,std::string line2)
{
for(int x=0;x<image.cols;x++)
for(int y=image.rows-30; y<image.rows;y++)
image.at<cv::Vec3b>(y,x) *= 0.5;
cv::putText(image, line2, cvPoint(10,image.rows-5),
CV_FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(200,200,250), 1, 8);
cv::putText(image, line1, cvPoint(10,image.rows-18),
CV_FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(200,200,250), 1, 8);
}
cv::Mat getDepthRainbowPlot(Frame* kf, int lvl)
{
return getDepthRainbowPlot(kf->idepth(lvl), kf->idepthVar(lvl), kf->image(lvl),
kf->width(lvl), kf->height(lvl));
}
cv::Mat getDepthRainbowPlot(const float* idepth, const float* idepthVar, const float* gray, int width, int height)
{
cv::Mat res = cv::Mat(height,width,CV_8UC3);
if(gray != 0)
{
cv::Mat keyFrameImage(height, width, CV_32F, const_cast<float*>(gray));
cv::Mat keyFrameImage8u;
keyFrameImage.convertTo(keyFrameImage8u, CV_8UC1);
cv::cvtColor(keyFrameImage8u, res, cv::COLOR_GRAY2RGB);
}
else
fillCvMat(&res,cv::Vec3b(255,170,168));
for(int i=0;i<width;i++)
for(int j=0;j<height;j++)
{
float id = idepth[i + j*width];
if(id >=0 && idepthVar[i + j*width] >= 0)
{
// rainbow between 0 and 4
float r = (0-id) * 255 / 1.0; if(r < 0) r = -r;
float g = (1-id) * 255 / 1.0; if(g < 0) g = -g;
float b = (2-id) * 255 / 1.0; if(b < 0) b = -b;
uchar rc = r < 0 ? 0 : (r > 255 ? 255 : r);
uchar gc = g < 0 ? 0 : (g > 255 ? 255 : g);
uchar bc = b < 0 ? 0 : (b > 255 ? 255 : b);
res.at<cv::Vec3b>(j,i) = cv::Vec3b(255-rc,255-gc,255-bc);
}
}
return res;
}
cv::Mat getVarRedGreenPlot(const float* idepthVar, const float* gray, int width, int height)
{
float* idepthVarExt = (float*)Eigen::internal::aligned_malloc(width*height*sizeof(float));
memcpy(idepthVarExt,idepthVar,sizeof(float)*width*height);
for(int i=2;i<width-2;i++)
for(int j=2;j<height-2;j++)
{
if(idepthVar[(i) + width*(j)] <= 0)
idepthVarExt[(i) + width*(j)] = -1;
else
{
float sumIvar = 0;
float numIvar = 0;
for(int dx=-2; dx <=2; dx++)
for(int dy=-2; dy <=2; dy++)
{
if(idepthVar[(i+dx) + width*(j+dy)] > 0)
{
float distFac = (float)(dx*dx+dy*dy)*(0.075*0.075)*0.02;
float ivar = 1.0f/(idepthVar[(i+dx) + width*(j+dy)] + distFac);
sumIvar += ivar;
numIvar += 1;
}
}
idepthVarExt[(i) + width*(j)] = numIvar / sumIvar;
}
}
cv::Mat res = cv::Mat(height,width,CV_8UC3);
if(gray != 0)
{
cv::Mat keyFrameImage(height, width, CV_32F, const_cast<float*>(gray));
cv::Mat keyFrameImage8u;
keyFrameImage.convertTo(keyFrameImage8u, CV_8UC1);
cv::cvtColor(keyFrameImage8u, res, cv::COLOR_GRAY2RGB);
}
else
fillCvMat(&res,cv::Vec3b(255,170,168));
for(int i=0;i<width;i++)
for(int j=0;j<height;j++)
{
float idv = idepthVarExt[i + j*width];
if(idv > 0)
{
float var= sqrt(idv);
var = var*60*255*0.5 - 20;
if(var > 255) var = 255;
if(var < 0) var = 0;
res.at<cv::Vec3b>(j,i) = cv::Vec3b(0,255-var, var);
}
}
Eigen::internal::aligned_free((void*)idepthVarExt);
return res;
}
}
Here is also some information what I have installed, that I thing might be helpful:
openCV 4.1.1-pre
ROS Melodic
Ubuntu 18.04
CMake 3.10.2
CatKin_tools 0.4.5
Python 3.6.8
Hope someone out there is able to help me.
Don't mix the obsolete C api with the C++ api.
Use cv::Point instead of cvPoint, and cv::FONT_HERSHEY_SIMPLEX instead of CV_FONT_HERSHEY_SIMPLEX, e.g.:
cv::putText(image, line1, cv::Point(10,image.rows-18),
cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(200,200,250), 1, 8);

Keras custom layer with CNTK backend (CRF as RNN)

I am attempting to duplicate the CRF as RNN which has been implemented in Keras but uses TensorFlow as a backend (https://github.com/sadeepj/crfasrnn_keras). The Keras front-end is fine, but some of the backend code is written as a TensorFlow custom op. I am trying to duplicate this in CNTK, but I have a few questions.
import cntk as C
from cntk import ops
import copy
import numpy as np
C.device.try_set_default_device(C.device.cpu())
ops.register_native_user_function('HighDimFilterOp', 'Cntk.HighDimFilter-' + C.__version__.rstrip('+'), 'CreateHighDimFilter')
def high_dim_filter(image=None, rgb=None, **kwargs):
inputs = [list(image), list(rgb)];
layer_config = copy.deepcopy(kwargs)
ops.native_user_function('HighDimFilterOp', inputs, layer_config, 'high_dim_filter')
This code is the Python call to my user C++ function. The C++ interface is as follows:
#include "HighDimFilter.h"
using namespace CNTK;
extern "C"
#ifdef _WIN32
__declspec(dllexport)
#endif
Function* CreateHighDimFilter(const Variable* operands, size_t /*numOperands*/, const Dictionary* attributes, const wchar_t* name)
{
printf("Creating HighDimFilter\n");
return new HighDimFilter({operands[0], operands[1]}, *attributes, name);
}
and the custom function itself is defined as:
#pragma once
#include "CNTKLibrary.h"
#include "modified_permutohedral.h"
using namespace CNTK;
class HighDimFilter final : public Function
{
bool _bilateral;
float _theta_alpha;
float _theta_beta;
float _theta_gamma;
enum Input : uint32_t
{
SCORES,
IM_INFO
};
public:
HighDimFilter(const std::vector<Variable>& inputs, const Dictionary& attributes, const std::wstring& name = L"HighDimFilter")
: Function(inputs, attributes, name)
{
if (attributes.Contains(L"bilateral"))
_bilateral = attributes[L"bilateral"].Value<bool>();
if (_bilateral == false)
{
if (attributes.Contains(L"theta_gamma"))
_theta_gamma = static_cast<float>(attributes[L"theta_gamma"].Value<double>());
}
else
{
if (attributes.Contains(L"theta_alpha"))
_theta_alpha = static_cast<float>(attributes[L"theta_alpha"].Value<double>());
if (attributes.Contains(L"theta_beta"))
_theta_beta = static_cast<float>(attributes[L"theta_beta"].Value<double>());
}
}
private:
void _compute_spatial_kernel(NDArrayViewPtr& Tensor, const float theta_gamma)
{
auto output_kernel = Tensor->WritableDataBuffer<float>();
auto outputShape = Tensor->Shape();
//auto channels = outputShape[0];
auto height = outputShape[1];
auto width = outputShape[2];
const auto num_pixels = width * height;
for (int p = 0; p < num_pixels; ++p)
{
output_kernel[2 * p] = static_cast<float>(p % width) / theta_gamma;
output_kernel[2 * p + 1] = static_cast<float>(p / width) / theta_gamma;
}
}
void _compute_bilateral_kernel(NDArrayViewPtr& Tensor, const NDArrayViewPtr& Image,
const float theta_alpha, const float theta_beta)
{
auto output_kernel = Tensor->WritableDataBuffer<float>();
auto rgb = Image->DataBuffer<float>();
auto outputShape = Tensor->Shape();
//auto channels = outputShape[0];
auto height = outputShape[1];
auto width = outputShape[2];
const auto num_pixels = height * width;
for (int p = 0; p < num_pixels; ++p)
{
// Spatial terms
output_kernel[5 * p] = static_cast<float>(p % width) / theta_alpha;
output_kernel[5 * p + 1] = static_cast<float>(p / width) / theta_alpha;
// Color terms
output_kernel[5 * p + 2] = static_cast<float>(rgb[p] / theta_beta);
output_kernel[5 * p + 3] = static_cast<float>(rgb[num_pixels + p] / theta_beta);
output_kernel[5 * p + 4] = static_cast<float>(rgb[2 * num_pixels + p] / theta_beta);
}
}
BackPropStatePtr Forward(const std::vector<ValuePtr>& inputValues,
std::unordered_map<Variable, ValuePtr>& outputs,
const DeviceDescriptor& computeDevice,
const std::unordered_set<Variable>& /*outputsToRetainBackwardStateFor */) override
{
#if 0
auto scoresShape = inputValues[Input::SCORES]->Shape();
auto channels = scoresShape[0];
auto height = scoresShape[1];
auto width = scoresShape[2];
const auto num_pixels = width * height;
auto &outputValue = outputs[this->Output()];
if (outputValue == nullptr)
{
outputValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(DataType::Float, scoresShape, computeDevice));
}
if (computeDevice.Type() != DeviceKind::CPU)
throw std::runtime_error("HighDimFilter: only CPU evaluation is supported at the moment.");
ModifiedPermutohedral mp;
if (_bilateral)
{
auto &kernel_vals = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(DataType::Float, NDShape({5, height, width}), computeDevice));
//float* kernel_vals = new float[5 * num_pixels];
_compute_bilateral_kernel(kernel_vals->Data(), inputValues[Input::IM_INFO]->Data(),
_theta_alpha, _theta_beta);
mp.init(kernel_vals->Data(), 5, num_pixels);
mp.compute(outputValue->Data(), inputValues[Input::SCORES]->Data(), false);
}
else
{
auto &kernel_vals = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(DataType::Float, NDShape({2, height, width}), computeDevice));
_compute_spatial_kernel(kernel_vals->Data(), _theta_gamma);
mp.init(kernel_vals->Data(), 2, num_pixels);
mp.compute(outputValue->Data(), inputValues[Input::SCORES]->Data(), channels, false);
}
return MakeSharedObject<BackPropState>(this->shared_from_this(), computeDevice, std::unordered_map<Variable, ValuePtr>({ {Inputs()[Input::IM_INFO], inputValues[Input::IM_INFO]} }));
#else
return nullptr;
#endif
}
void Backward(const BackPropStatePtr& state,
const std::unordered_map<Variable, ValuePtr>& rootGradientValues,
std::unordered_map<Variable, ValuePtr>& backPropagatedGradientValuesForInputs) override
{
#if 0
auto gradOutputVariable = Inputs()[Input::SCORES];
auto inputVariable = Inputs()[Input::IM_INFO];
auto &gradValue = backPropagatedGradientValuesForInputs[gradOutputVariable];
if (gradValue == nullptr)
gradValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(DataType::Float, gradOutputVariable.Shape(), state->Device()));
auto imageData = state->SavedForwardPropValues().at(inputVariable)->Data();
auto imageShape = imageData->Shape();
auto channels = imageShape[0];
auto height = imageShape[1];
auto width = imageShape[2];
const auto num_pixels = width * height;
if (state->Device().Type() != DeviceKind::CPU)
throw std::runtime_error("HighDimFilter: only CPU evaluation is supported at the moment.");
auto rootGradientData = rootGradientValues.at(this->Output())->Data();
ModifiedPermutohedral mp;
if (_bilateral)
{
auto &kernel_vals = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(DataType::Float, NDShape({5, height, width}), state->Device()));
//float* kernel_vals = new float[5 * num_pixels];
_compute_bilateral_kernel(kernel_vals->Data(), imageData,
_theta_alpha, _theta_beta);
mp.init(kernel_vals->Data(), 5, num_pixels);
mp.compute(gradValue->Data(), rootGradientData, true);
}
else
{
auto &kernel_vals = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(DataType::Float, NDShape({2, height, width}), state->Device()));
_compute_spatial_kernel(kernel_vals->Data(), _theta_gamma);
mp.init(kernel_vals->Data(), 2, num_pixels);
mp.compute(gradValue->Data(), rootGradientData, channels, true);
}
#endif
return;
}
const std::wstring& OpName() const override
{
static const std::wstring opName = L"HighDimFilterOp";
return opName;
}
size_t CurrentVersion() const override
{
NOT_IMPLEMENTED;
}
void InferOutputs(std::vector<Variable>& /*outputs */) override
{
NOT_IMPLEMENTED;
}
FunctionPtr Clone(const std::vector<Variable>& /*clonedInputs */) override
{
return nullptr;
}
};
My python call looks like:
bilateral_high_dim_filter = custom_module.high_dim_filter(image=all_ones_flat,
rgb=rgb_flat,
bilateral=True,
theta_alpha=self.theta_alpha,
theta_beta=self.theta_beta)
high_dim_filter = custom_module.high_dim_filter(image=all_ones_flat,
rgb=rgb_flat,
bilateral=False,
theta_gamma=self.theta_gamma)
The questions are as follows: 1) What are the "operands" passed in to the native_user_function on initialization? Are these only passed on initialization (are they intended to be weight and bias initialization)? How are the input operands used in the "Function" construction initializer? If I set these to "None" in Python, the code crashes.
2) How do you forward propagate the filter? Just call "forward()"? What about the required arguments to forward propagate?
3) Is there a numerical gradient calculation in CNTK similar to TensorFlow to check the gradient?

How do I prevent over-correcting in my autonomous driving solution?

I am working on an autonomous driving solution for Euro Truck Simulator 2 with OpenCV in C++.
Here is where we detect the curve of the road:
int bottom_center = 160;
int sum_centerline = 0;
int count_centerline = 0;
int first_centerline = 0;
int last_centerline = 0;
double avr_center_to_left = 0;
double avr_center_to_right = 0;
//#pragma omp parallel for
for (int i = 240; i > 30; i--){
double center_to_right = -1;
double center_to_left = -1;
for (int j = 0; j < 150; j++) {
if (contours.at<uchar>(i, bottom_center + j) == 112 && center_to_right == -1) {
center_to_right = j;
}
if (contours.at<uchar>(i, bottom_center - j) == 112 && center_to_left == -1) {
center_to_left = j;
}
}
if (center_to_left != -1 && center_to_right != -1){
int centerline = (center_to_right - center_to_left + 2 * bottom_center) / 2;
if (first_centerline == 0) {
first_centerline = centerline;
}
cv::circle(outputImg, Point(centerline, i), 1, Scalar(30, 255, 30), 3);
cv::circle(outputImg, Point(centerline + center_to_right+20, i), 1, Scalar(255, 30, 30) , 3);
cv::circle(outputImg, Point(centerline - center_to_left+10, i), 1, Scalar(255, 30, 30) , 3);
sum_centerline += centerline;
avr_center_to_left = (avr_center_to_left * count_centerline + center_to_left) / count_centerline + 1;
avr_center_to_right = (avr_center_to_right * count_centerline + center_to_right) / count_centerline + 1;
last_centerline = centerline;
count_centerline++;
}
else {}
}
And here is my current solution for steering:
int diff = 0;
if (count_centerline != 0) {
diff = sum_centerline / count_centerline - bottom_center;
int degree = atan2(last_centerline - first_centerline, count_centerline) * 180 / PI;
//diff = (90 - degree);
int move_mouse_pixel = diff;
cout << "Steer: " << move_mouse_pixel << "px ";
if (diff <= 20 || diff >= -20){
SetCursorPos(pt.x + (move_mouse_pixel / 10), height / 2);
}
else{
SetCursorPos(pt.x + (move_mouse_pixel / 25), height / 2);
}
}
Finally, here is a video of what my program currently does: https://www.youtube.com/watch?v=rqyvoFuGKKk&feature=youtu.be
The current problem I have is that the steering does not center fast enough, leading it to continually over-correct until it swerves off the lane. I have tried to increase steering sensitivity in-game, to allow for faster or slower turning, but this either makes the truck spin out of control or not turn enough when driving along a large curve.
My current method just divides slight movements (between -20px and 20px) by 10, and large movements by 20. I've also tried reversing this but did not fix the over-correcting problem.
There are two possible solutions that I have found so far:
I could incrementally increase the divider for which we apply to move_mouse_pixel, therefore reducing the force of steering done between small movements.
Or, I could somehow make the program center the steering wheel more quickly. I am not sure how I would implement this.
What do you guys think?
I believe that PID controller would be suitable for this task.
https://en.wikipedia.org/wiki/PID_controller
In your situation it would look similar to this:
diffOld = diff;
diff = sum_centerline / count_centerline - bottom_center;
SetCursorPos(width/2 + Kp* diff + Kd*(diff - diffOld) , height / 2);
Do not use if statement in this controller. You need to keep steering even if there is no error to corect. I would suggest to skip integral part, because your object integrate (when you do not drive straight you integrate error). You need to experimentally choose values of Kp and Kd parameters, for example with Ziegler–Nichols method https://en.wikipedia.org/wiki/Ziegler%E2%80%93Nichols_method.