I'm having a loop where I parse an ONNX model into TensorRT, create an engine and do inference.
I make sure I call x->destroy() on all objects and I use cudaFree for each cudaMalloc.
Yet, I keep getting an increase in memory usage through nvidia-smi over consecutive iterations.
I'm really not sure where the problem comes from. The cuda-memcheck tool reports no leaks either.
Running Ubuntu 18.04, TensorRT 7.0.0, CUDA 10.2 and using a GTX 1070.
The code, the ONNX file along with a CMakeLists.txt are available on this repo
Here's the code
#include <memory>
#include <iostream>
#include <cuda_runtime_api.h>
#include <NvOnnxParser.h>
#include <NvInfer.h>
class Logger : public nvinfer1::ILogger
{
void log(Severity severity, const char* msg) override
{
// suppress info-level messages
if (severity != Severity::kINFO)
std::cout << msg << std::endl;
}
};
int main(int argc, char * argv[])
{
Logger gLogger;
auto builder = nvinfer1::createInferBuilder(gLogger);
const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
auto network = builder->createNetworkV2(explicitBatch);
auto config = builder->createBuilderConfig();
auto parser = nvonnxparser::createParser(*network, gLogger);
parser->parseFromFile("../model.onnx", static_cast<int>(0));
builder->setMaxBatchSize(1);
config->setMaxWorkspaceSize(128 * (1 << 20)); // 128 MiB
auto engine = builder->buildEngineWithConfig(*network, *config);
builder->destroy();
network->destroy();
parser->destroy();
config->destroy();
for(int i=0; i< atoi(argv[1]); i++)
{
auto context = engine->createExecutionContext();
void* deviceBuffers[2]{0};
int inputIndex = engine->getBindingIndex("input_rgb:0");
constexpr int inputNumel = 1 * 128 * 64 * 3;
int outputIndex = engine->getBindingIndex("truediv:0");
constexpr int outputNumel = 1 * 128;
//TODO: Remove batch size hardcoding
cudaMalloc(&deviceBuffers[inputIndex], 1 * sizeof(float) * inputNumel);
cudaMalloc(&deviceBuffers[outputIndex], 1 * sizeof(float) * outputNumel);
cudaStream_t stream;
cudaStreamCreate(&stream);
float inBuffer[inputNumel] = {0};
float outBuffer[outputNumel] = {0};
cudaMemcpyAsync(deviceBuffers[inputIndex], inBuffer, 1 * sizeof(float) * inputNumel, cudaMemcpyHostToDevice, stream);
context->enqueueV2(deviceBuffers, stream, nullptr);
cudaMemcpyAsync(outBuffer, deviceBuffers[outputIndex], 1 * sizeof(float) * outputNumel, cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
cudaFree(deviceBuffers[inputIndex]);
cudaFree(deviceBuffers[outputIndex]);
cudaStreamDestroy(stream);
context->destroy();
}
engine->destroy();
return 0;
}
Looks like the issue was coming from the repetitive IExecutionContext creation despite destroying it at the end of every iteration. Creating/deleting the context at the same time as the engine fixed the issue for me. Nevertheless, it could still be a bug where context creation leaks a little bit of memory and that leak accumulates over time. Filed a github issue.
Related
At present, I need to calculate the cpu usage of a certain process on the macOS platform (the target process is not directly related to the current process). I use the proc_pid_rusage API. The calculation method is to call it every once in a while, and then calculate this section The difference between ri_user_time and ri_system_time of the time. So as to calculate the percentage of cpu usage.
I used it on a macOS system with non-M1 chip, and the results were in line with expectations (basically the same as what I saw on the activity monitor), but recently I found that the value obtained on the macOS system with the M1 chip is small. For example, one of my processes that consumes 30+% of the cpu(from activity monitor) is less than 1%.
I provide a demo code, you can directly create a new project to run:
//
// main.cpp
// SimpleMonitor
//
// Created by m1 on 2021/2/23.
//
#include <stdio.h>
#include <stdlib.h>
#include <libproc.h>
#include <stdint.h>
#include <iostream>
#include <thread> // std::this_thread::sleep_for
#include <chrono> // std::chrono::seconds
int main(int argc, const char * argv[]) {
// insert code here...
std::cout << "run simple monitor!\n";
// TODO: change process id:
int64_t pid = 12483;
struct rusage_info_v4 ru;
struct rusage_info_v4 ru2;
int64_t success = (int64_t)proc_pid_rusage((pid_t)pid, RUSAGE_INFO_V4, (rusage_info_t *)&ru);
if (success != 0) {
std::cout << "get cpu time fail \n";
return 0;
}
std::cout<<"getProcessPerformance, pid=" + std::to_string(pid) + " ru.ri_user_time=" + std::to_string(ru.ri_user_time) + " ru.ri_system_time=" + std::to_string(ru.ri_system_time)<<std::endl;
std::this_thread::sleep_for (std::chrono::seconds(10));
int64_t success2 = (int64_t)proc_pid_rusage((pid_t)pid, RUSAGE_INFO_V4, (rusage_info_t *)&ru2);
if (success2 != 0) {
std::cout << "get cpu time fail \n";
return 0;
}
std::cout<<"getProcessPerformance, pid=" + std::to_string(pid) + " ru2.ri_user_time=" + std::to_string(ru2.ri_user_time) + " ru2.ri_system_time=" + std::to_string(ru2.ri_system_time)<<std::endl;
int64_t cpu_time = ru2.ri_user_time - ru.ri_user_time + ru2.ri_system_time - ru.ri_system_time;
// percentage:
double cpu_usage = (double)cpu_time / 10 / 1000000000 * 100 ;
std::cout<<pid<<" cpu usage: "<<cpu_usage<<std::endl;
}
Here I want to know whether there is a problem with my calculation method, if there is no problem, how can I handle the inaccurate results on the M1 chip macOS system?
you have to multiply the cpu usage by some constant. Here are some snipets of code from a diff.
+#include <mach/mach_time.h>
mach_timebase_info_data_t sTimebase;
mach_timebase_info(&sTimebase);
timebase_to_ns = (double)sTimebase.numer / (double)sTimebase.denom;
syscpu.total = task_info.ptinfo.pti_total_system* timebase_to_ns/ 1000000;
usercpu.total = task_info.ptinfo.pti_total_user* timebase_to_ns / 1000000;
~
We have a working library that uses LibTorch 1.5.0, built with CUDA 10.0 which runs as expected.
We are working on upgrading to CUDA 10.2 for various non-PyTorch related reasons. We noticed that when we run LibTorch inference on the newly compiled LibTorch (compiled exactly the same, except changing to CUDA 10.2), the runtime is about 20x slower.
We also checked it using the precompiled binaries.
This was tested on 3 different machines using 3 different GPUs (Tesla T4, GTX980 & P1000) and all gives consistent ~20x slower on CUDA 10.2
(Both on Windows 10 & Ubuntu 16.04), all with the latest drivers and on 3 different torch scripts (of the same architecture)
I've simplified the code to be extremely minimal without external dependencies other than Torch
int main(int argc, char** argv)
{
// Initialize CUDA device 0
cudaSetDevice(0);
std::string networkPath = DEFAULT_TORCH_SCRIPT;
if (argc > 1)
{
networkPath = argv[1];
}
auto jitModule = std::make_shared<torch::jit::Module>(torch::jit::load(networkPath, torch::kCUDA));
if (jitModule == nullptr)
{
std::cerr << "Failed creating module" << std::endl;
return EXIT_FAILURE;
}
// Meaningless data, just something to pass to the module to run on
// PATCH_HEIGHT & WIDTH are defined as 256
uint8_t* data = new uint8_t[PATCH_HEIGHT * PATCH_WIDTH * 3];
memset(data, 0, PATCH_HEIGHT * PATCH_WIDTH * 3);
auto stream = at::cuda::getStreamFromPool(true, 0);
bool res = infer(jitModule, stream, data, PATCH_WIDTH, PATCH_HEIGHT);
std::cout << "Warmed up" << std::endl;
res = infer(jitModule, stream, data, PATCH_WIDTH, PATCH_HEIGHT);
delete[] data;
return 0;
}
// Inference function
bool infer(std::shared_ptr<JitModule>& jitModule, at::cuda::CUDAStream& stream, const uint8_t* inputData, int width, int height)
{
std::vector<torch::jit::IValue> tensorInput;
// This function simply uses cudaMemcpy to copy to device and create a torch::Tensor from that data
// I can paste it if it's relevant but didn't now to keep as clean as possible
if (!prepareInput(inputData, width, height, tensorInput, stream))
{
return false;
}
// Reduce memory usage, without gradients
torch::NoGradGuard noGrad;
{
at::cuda::CUDAStreamGuard streamGuard(stream);
auto totalTimeStart = std::chrono::high_resolution_clock::now();
jitModule->forward(tensorInput);
// The synchronize here is just for timing sake, not use in production
cudaStreamSynchronize(stream.stream());
auto totalTimeStop = std::chrono::high_resolution_clock::now();
printf("forward sync time = %.3f milliseconds\n",
std::chrono::duration<double, std::milli>(totalTimeStop - totalTimeStart).count());
}
return true;
}
When compiling this with Torch that was compiled using CUDA 10.0 we get a runtime of 18 ms and when we run it with Torch compiled with CUDA 10.2, we get a runtime of 430 ms
Any thoughts on that?
This issue was also posted on PyTorch Forums.
Issue on GitHub
UPDATE
I profiled this small program using both CUDAs
It seems that both use very different kernels
96.5% of the 10.2 computes are conv2d_grouped_direct_kernel which takes ~60-100ms on my P1000
where as the top kernels in the 10.0 run are
47.1% - cudnn::detail::implicit_convolve_sgemm (~1.5 ms)
23.1% - maxwell_scudnn_winograd_128x128_ldg1_ldg4_tile148n_nt (~0.4 ms)
8.5% - maxwell_scudnn_128x32_relu_small_nn (~0.4ms)
so it's easy to see where the time difference comes from. Now the question is, why.
I just started using c++ bindings of libgpiod library and have problem with settings gpios. I know, that I can create long vector of values, and apply it in all at once, but I would like to be able to set their direction, and control them separately. How can I do that?
What I tried is this:
First: Working code with applying all values at once:
#include <gpiod.hpp>
int main(int argc, char **argv)
{
::gpiod::chip chip("gpiochip0");
auto lines = chip.get_all_lines();
::gpiod::line_request requestOutputs = {
argv[0],
::gpiod::line_request::DIRECTION_OUTPUT,
0
};
int value_to_be_set = 0xAAAAAAA ; //example value
::std::vector<int> values;
for (int i = 0; i < 32; i++)
{
values.push_back((value_to_be_set >> i) & 1UL);
}
lines.request(requestOutputs, values);
lines.release();
return EXIT_SUCCESS;
}
Second, my approach to do that I want:
#include <gpiod.hpp>
int main(int argc, char **argv)
{
::gpiod::chip chip("gpiochip0");
auto lines = chip.get_all_lines();
::gpiod::line_request requestOutputs = {
argv[0],
::gpiod::line_request::DIRECTION_OUTPUT,
0
};
lines.request(requestOutputs);
int value_to_be_set = 0xAAAAAAA; //example value
for (int i = 0; i < 32; i++)
{
// This does not set value :(
lines.get(i).set_value((value_to_be_set >> i) & 1UL);
}
lines.release();
return EXIT_SUCCESS;
}
I also could not find a simple C++ example to toggle a single GPIO line using the latest Raspberry PI libraries.
There is a multi-line example below but this is not what was originally asked:
https://git.kernel.org/pub/scm/libs/libgpiod/libgpiod.git/tree/bindings/cxx
Below is an example that will cause GPIO17 to go high then low to create a single line output pulse.
// Use gpio drivers to toggle a single GPIO
// line on Raspberry Pi
// Use following commands to install prerequisites and build
// sudo apt install gpiod
// sudo apt install libgpiod-dev
// g++ -Wall -o gpio gpip.cpp -lgpiodcxx
#include <iostream>
#include <gpiod.hpp>
#include <unistd.h>
int main(void)
{
::gpiod::chip chip("gpiochip0");
auto line = chip.get_line(17); // GPIO17
line.request({"example", gpiod::line_request::DIRECTION_OUTPUT, 0},1);
sleep(0.1);
line.set_value(0);
line.release();
}
also don't forget to build with the flag -lgpiodcxx (for c++) or -lgpiod (for c)
This question is similar to cuModuleLoadDataEx options but I would like to bring the topic up again and in addition provide more information.
When loading a PTX string with the NV driver via cuModuleLoadDataEx it seems to ignore all options all together. I provide full working examples so that anyone interested can directly and with no effort reproduce this. First a small PTX kernel (save this as small.ptx) then the C++ program that loads the PTX kernel.
.version 3.1
.target sm_20, texmode_independent
.address_size 64
.entry main()
{
ret;
}
main.cc
#include<cstdlib>
#include<iostream>
#include<fstream>
#include<sstream>
#include<string>
#include<map>
#include "cuda.h"
int main(int argc,char *argv[])
{
CUdevice cuDevice;
CUcontext cuContext;
CUfunction func;
CUresult ret;
CUmodule cuModule;
cuInit(0);
std::cout << "trying to get device 0\n";
ret = cuDeviceGet(&cuDevice, 0);
if (ret != CUDA_SUCCESS) { exit(1);}
std::cout << "trying to create a context\n";
ret = cuCtxCreate(&cuContext, 0, cuDevice);
if (ret != CUDA_SUCCESS) { exit(1);}
std::cout << "loading PTX string from file " << argv[1] << "\n";
std::ifstream ptxfile( argv[1] );
std::stringstream buffer;
buffer << ptxfile.rdbuf();
ptxfile.close();
std::string ptx_kernel = buffer.str();
std::cout << "Loading PTX kernel with driver\n" << ptx_kernel;
const unsigned int jitNumOptions = 3;
CUjit_option *jitOptions = new CUjit_option[jitNumOptions];
void **jitOptVals = new void*[jitNumOptions];
// set up size of compilation log buffer
jitOptions[0] = CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES;
int jitLogBufferSize = 1024*1024;
jitOptVals[0] = (void *)&jitLogBufferSize;
// set up pointer to the compilation log buffer
jitOptions[1] = CU_JIT_INFO_LOG_BUFFER;
char *jitLogBuffer = new char[jitLogBufferSize];
jitOptVals[1] = jitLogBuffer;
// set up wall clock time
jitOptions[2] = CU_JIT_WALL_TIME;
float jitTime = -2.0;
jitOptVals[2] = &jitTime;
ret = cuModuleLoadDataEx( &cuModule , ptx_kernel.c_str() , jitNumOptions, jitOptions, (void **)jitOptVals );
if (ret != CUDA_SUCCESS) { exit(1);}
std::cout << "walltime: " << jitTime << "\n";
std::cout << std::string(jitLogBuffer) << "\n";
}
Build (assuming CUDA is installed under /usr/local/cuda, I use CUDA 5.0):
g++ -I/usr/local/cuda/include -L/usr/local/cuda/lib64/ main.cc -o main -lcuda
If someone is able to extract any sensible information from the compilation process that would be great! The documentation of CUDA driver API where cuModuleLoadDataEx is explained (and which options it is supposed to accept) http://docs.nvidia.com/cuda/cuda-driver-api/index.html
If I run this, the log is empty and jitTime wasn't even touched by the NV driver:
./main small.ptx
trying to get device 0
trying to create a context
loading PTX string from file empty.ptx
Loading PTX kernel with driver
.version 3.1
.target sm_20, texmode_independent
.address_size 64
.entry main()
{
ret;
}
walltime: -2
EDIT:
I managed to get the JIT compile time. However it seems that the driver expects an array of 32bit values as OptVals. Not as stated in the manual as an array of pointers (void *) which are on my system 64 bits. So, this works:
const unsigned int jitNumOptions = 1;
CUjit_option *jitOptions = new CUjit_option[jitNumOptions];
int *jitOptVals = new int[jitNumOptions];
jitOptions[0] = CU_JIT_WALL_TIME;
// here the call to cuModuleLoadDataEx
std::cout << "walltime: " << (float)jitOptions[0] << "\n";
I believe that it is not possible to do the same with an array of void *. The following code does not work:
const unsigned int jitNumOptions = 1;
CUjit_option *jitOptions = new CUjit_option[jitNumOptions];
void **jitOptVals = new void*[jitNumOptions];
jitOptions[0] = CU_JIT_WALL_TIME;
// here the call to cuModuleLoadDataEx
// here I also would have a problem casting a 64 bit void * to a float (32 bit)
EDIT
Looking at the JIT compilation time jitOptVals[0] was misleading. As mentioned in the comments, the JIT compiler caches previous translations and won't update the JIT compile time if it finds a cached compilation. Since I was looking whether this value has changed or not I assumed that the call ignores the options all together. Which it doesn't. It's works fine.
Your jitOptVals should not contain pointers to your values, instead cast the values to void*:
// set up size of compilation log buffer
jitOptions[0] = CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES;
int jitLogBufferSize = 1024*1024;
jitOptVals[0] = (void *)jitLogBufferSize;
// set up pointer to the compilation log buffer
jitOptions[1] = CU_JIT_INFO_LOG_BUFFER;
char *jitLogBuffer = new char[jitLogBufferSize];
jitOptVals[1] = jitLogBuffer;
// set up wall clock time
jitOptions[2] = CU_JIT_WALL_TIME;
float jitTime = -2.0;
//Keep jitOptVals[2] empty as it only an Output value:
//jitOptVals[2] = (void*)jitTime;
and after cuModuleLoadDataEx, you get your jitTime like jitTime = (float)jitOptions[2];
I was trying to copy a structure to constant memory in this way:
struct Foo {
int a, b, c;
};
__constant__ Foo cData;
int main() {
Foo hData = {1, 2, 3};
cudaMemcpyToSymbol(cData, &hData, sizeof(Foo));
// ...
}
And this worked fine, in my kernel I could access the constant data directly:
__global__ void kernel() {
printf("Data is: %d %d %d\n", cData.a, cData.b, cData.c); // 1 2 3
}
But then I tried to use a const char * as symbol name, and things stopped working:
cudaMemcpyToSymbol("cData", &hData, sizeof(Foo)); // prints 0 0 0
I thought both versions were similar, but it seems I was wrong.
What is happening?
EDIT:
I'd like to report this same behavior with cudaGetSymbolAddress, which works for me if no const char * is used:
__constant__ int someData[10];
__constant__ int *ptrToData;
int *dataPosition;
cudaGetSymbolAddress((void **)&dataPosition, someData); // Works
// cudaGetSymbolAddress((void **)&dataPosition, "someData"); // Do not work
cudaMemcpyToSymbol(ptrToData, &dataPosition, sizeof(int *));
As of CUDA 5, using a string for symbol names is no longer supported. This is covered in the CUDA 5 release notes here
•The use of a character string to indicate a device symbol, which was possible with certain API functions, is no longer supported. Instead, the symbol should be used directly.
One of the reasons for this has to do with enabling of a true device linker, which is new functionality in CUDA 5.
Because of getting the same error again and again, I want to share this sample code that shows nearly all of the example cases for this problem (so I may refer here later when I make same mistakes again).
//file: main.cu
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__constant__ float constData[256];
__device__ float devData;
__device__ float* devPointer;
int main(int argc, char **argv)
{
cudaFree(0);
float data[256];
cudaError_t err = cudaMemcpyToSymbol(constData, data, sizeof(data));
printf("Err id: %d, str: %s\n", err, cudaGetErrorString(err));
float value = 3.14f;
err = cudaMemcpyToSymbol(devData, &value, sizeof(float));
printf("Err id: %d, str: %s\n", err, cudaGetErrorString(err));
float* ptr;
cudaMalloc(&ptr, 256 * sizeof(float));
err = cudaMemcpyToSymbol(devPointer, &ptr, sizeof(ptr));
printf("Err id: %d, str: %s\n", err, cudaGetErrorString(err));
cudaFree(ptr);
return EXIT_SUCCESS;
}
I was getting "invalid device symbol" and many others which are related to _constant_ _device_ memory usage. This code gives no such errors at runtime.