We have a working library that uses LibTorch 1.5.0, built with CUDA 10.0 which runs as expected.
We are working on upgrading to CUDA 10.2 for various non-PyTorch related reasons. We noticed that when we run LibTorch inference on the newly compiled LibTorch (compiled exactly the same, except changing to CUDA 10.2), the runtime is about 20x slower.
We also checked it using the precompiled binaries.
This was tested on 3 different machines using 3 different GPUs (Tesla T4, GTX980 & P1000) and all gives consistent ~20x slower on CUDA 10.2
(Both on Windows 10 & Ubuntu 16.04), all with the latest drivers and on 3 different torch scripts (of the same architecture)
I've simplified the code to be extremely minimal without external dependencies other than Torch
int main(int argc, char** argv)
{
// Initialize CUDA device 0
cudaSetDevice(0);
std::string networkPath = DEFAULT_TORCH_SCRIPT;
if (argc > 1)
{
networkPath = argv[1];
}
auto jitModule = std::make_shared<torch::jit::Module>(torch::jit::load(networkPath, torch::kCUDA));
if (jitModule == nullptr)
{
std::cerr << "Failed creating module" << std::endl;
return EXIT_FAILURE;
}
// Meaningless data, just something to pass to the module to run on
// PATCH_HEIGHT & WIDTH are defined as 256
uint8_t* data = new uint8_t[PATCH_HEIGHT * PATCH_WIDTH * 3];
memset(data, 0, PATCH_HEIGHT * PATCH_WIDTH * 3);
auto stream = at::cuda::getStreamFromPool(true, 0);
bool res = infer(jitModule, stream, data, PATCH_WIDTH, PATCH_HEIGHT);
std::cout << "Warmed up" << std::endl;
res = infer(jitModule, stream, data, PATCH_WIDTH, PATCH_HEIGHT);
delete[] data;
return 0;
}
// Inference function
bool infer(std::shared_ptr<JitModule>& jitModule, at::cuda::CUDAStream& stream, const uint8_t* inputData, int width, int height)
{
std::vector<torch::jit::IValue> tensorInput;
// This function simply uses cudaMemcpy to copy to device and create a torch::Tensor from that data
// I can paste it if it's relevant but didn't now to keep as clean as possible
if (!prepareInput(inputData, width, height, tensorInput, stream))
{
return false;
}
// Reduce memory usage, without gradients
torch::NoGradGuard noGrad;
{
at::cuda::CUDAStreamGuard streamGuard(stream);
auto totalTimeStart = std::chrono::high_resolution_clock::now();
jitModule->forward(tensorInput);
// The synchronize here is just for timing sake, not use in production
cudaStreamSynchronize(stream.stream());
auto totalTimeStop = std::chrono::high_resolution_clock::now();
printf("forward sync time = %.3f milliseconds\n",
std::chrono::duration<double, std::milli>(totalTimeStop - totalTimeStart).count());
}
return true;
}
When compiling this with Torch that was compiled using CUDA 10.0 we get a runtime of 18 ms and when we run it with Torch compiled with CUDA 10.2, we get a runtime of 430 ms
Any thoughts on that?
This issue was also posted on PyTorch Forums.
Issue on GitHub
UPDATE
I profiled this small program using both CUDAs
It seems that both use very different kernels
96.5% of the 10.2 computes are conv2d_grouped_direct_kernel which takes ~60-100ms on my P1000
where as the top kernels in the 10.0 run are
47.1% - cudnn::detail::implicit_convolve_sgemm (~1.5 ms)
23.1% - maxwell_scudnn_winograd_128x128_ldg1_ldg4_tile148n_nt (~0.4 ms)
8.5% - maxwell_scudnn_128x32_relu_small_nn (~0.4ms)
so it's easy to see where the time difference comes from. Now the question is, why.
Related
I'm trying to run TensorRT inference in C++. Sometimes the code crashes when trying to build a new engine or load the engine from the file. It happens occasionally (sometimes it runs without any problem). I follow the below steps to prepare network:
initLibNvInferPlugins(&gLogger.getTRTLogger(), "");
if (mParams.loadEngine.size() > 0)
{
std::vector<char> trtModelStream;
size_t size{0};
std::ifstream file(mParams.loadEngine, std::ios::binary);
if (file.good())
{
file.seekg(0, file.end);
size = file.tellg();
file.seekg(0, file.beg);
trtModelStream.resize(size);
file.read(trtModelStream.data(), size);
file.close();
}
IRuntime* infer_Runtime = nvinfer1::createInferRuntime(gLogger);
if (mParams.dlaCore >= 0)
{
infer_Runtime->setDLACore(mParams.dlaCore);
}
mEngine = std::shared_ptr<nvinfer1::ICudaEngine>(
infer_Runtime->deserializeCudaEngine(trtModelStream.data(), size, nullptr), samplesCommon::InferDeleter());
gLogInfo << "TRT Engine loaded from: " << mParams.loadEngine << endl;
infer_Runtime->destroy();
if (!mEngine)
{
return false;
}
else
{
return true;
}
}
auto builder = SampleUniquePtr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(gLogger.getTRTLogger()));
const auto explicitBatch = 1U << static_cast<uint32_t>(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
auto network = SampleUniquePtr<nvinfer1::INetworkDefinition>(builder->createNetworkV2(explicitBatch));
auto config = SampleUniquePtr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig());
auto parser = SampleUniquePtr<nvonnxparser::IParser>(nvonnxparser::createParser(*network, gLogger.getTRTLogger()));
mEngine = nullptr;
parser->parseFromFile(
locateFile(mParams.onnxFileName, mParams.dataDirs).c_str(), static_cast<int>(gLogger.getReportableSeverity()));
// Calibrator life time needs to last until after the engine is built.
std::unique_ptr<IInt8Calibrator> calibrator;
config->setAvgTimingIterations(1);
config->setMinTimingIterations(1);
config->setMaxWorkspaceSize(4_GiB);
builder->setMaxBatchSize(mParams.batchSize);
mEngine = std::shared_ptr<nvinfer1::ICudaEngine>(
builder->buildEngineWithConfig(*network, *config), samplesCommon::InferDeleter());
The error occurs here:
[05/12/2021-16:46:42] [I] [TRT] Detected 1 inputs and 1 output network tensors.
16:46:42: The program has unexpectedly finished.
This line crashes when loading existing engine:
mEngine = std::shared_ptr<nvinfer1::ICudaEngine(
infer_Runtime->deserializeCudaEngine(trtModelStream.data(), size, nullptr), samplesCommon::InferDeleter());
Or when building the engine:
mEngine = std::shared_ptr<nvinfer1::ICudaEngine>(
builder->buildEngineWithConfig(*network, *config), samplesCommon::InferDeleter());
More info:
TensorRT 7.2.3
Ubuntu 18.04
cuDNN 8.1.1
CUDA 11.1 update1
ONNX 1.6.0
Pytorch 1.5.0
Finally got it!
I rewrote the CMake.txt and add all required libs and paths and removed duplicate ones. That might be a lib conflict in cuBLAS.
I'll warn you in advance my written english it is not good, so please have some patience because I'll do a lot of errors.
I need to expose the graphic card in order to do some benchmark with parallel algorithms on finite element analysis. I downloaded the intel sdk at this link https://software.intel.com/en-us/intel-opencl .
I am using Ubuntu 16.10, so i followed all the instruction as explained in this post https://streamcomputing.eu/blog/2011-06-24/install-opencl-on-debianubuntu-orderly/ .
When i run a simple algorithm wich checks all the device, it only recognizes the cpu, failing to find the graphic card. The same program works well on a mac (because OpenCL is in the stack of course).
// includes...
int main(int argc, const char * argv[])
{
// See what standard OpenCL sees
std::vector<cl::Platform> platforms;
// Get platform
cl::Platform::get(&platforms);
// Temp
std::string s;
// Where the GPU lies
cl::Device gpudevice;
// Found a GPU
bool gpufound = false;
std::cout << "**** OPENCL ****" << std::endl;
// See if we have a GPU
for (auto p : platforms)
{
std::vector<cl::Device> devices;
p.getDevices(CL_DEVICE_TYPE_ALL, &devices);
for (auto d : devices)
{
std::size_t i = 4;
d.getInfo(CL_DEVICE_TYPE, &i);
std::cout << "> Device type " <<
(i & CL_DEVICE_TYPE_CPU ? "CPU" : "") <<
(i & CL_DEVICE_TYPE_GPU ? "GPU" : "") <<
(i & CL_DEVICE_TYPE_ACCELERATOR ? "ACCELERATOR" : "");
if (i & CL_DEVICE_TYPE_GPU)
{
gpudevice = d;
gpufound = true;
}
std::cout << " Version " << s << std::endl;
}
}
if (!gpufound)
{
std::cout << "NO GPU FOUND. ABORTING." << std::endl;
return 1;
}
// Do other things...
the output is:
/home/andrea/Dropbox/fem/SiNDy/clfem/cmake-build-debug/vector_sycl
**** OPENCL ****
> Device type CPU Version
NO GPU FOUND. ABORTING.
Process finished with exit code 1
I tried to add the current user in the video group, i also tried to install Intel Media Server Studio following the instructions coming with the package but I could not build the kernel because of some compile errors.
I also updated all the drivers with the automatic software update of Ubuntu, but still the GC is not found.
Maybe you want to try beignet, which is an OpenCL implementation for IvyBridge+ iGPUs. There are packages of beignet for Ubuntu 16.10. To be more precise, I think you are looking for the packages beignet-dev and beignet-opencl-icd. Test it yourself since I have no Ubuntu installation currently available. (However, beignet itself works pretty well on my Intel HD Graphics 520 and Antergos/Arch Linux)
I have an Nvidia GTX 970M GPU & I am trying to run a face detection algorithm in c++ that runs on the GPU using OpenCL.
The function where this error occurs is :
ocl::OclCascadeClassifier::detectMultiScale()
The error I get is :
OpenCV Error: Assertion failed (localThreads[0] * localThreads[1] * localThreads[2] <= kernelWorkGroupSize) in cv::ocl::openCLVerifyKernel
I know that this problem is related to the GPU of the device but I do not know how to fix this. I have tried using OpenCV versions 2 and 3 but both give the same problem.
The problem was that it was trying to use the Intel HD Graphics GPU instead of the Nvidia GPU. I solved this by choosing the Nvidia GPU as the OpenCL Device.
The code I used was :
cv::ocl::DevicesInfo devInfo;
int res = cv::ocl::getOpenCLDevices(devInfo);
if (res == 0)
{
std::cerr << "There is no OPENCL Here !" << std::endl;
}
else
{
for (unsigned int i = 0; i < devInfo.size(); ++i)
{
std::cout << "Device : " << devInfo[i]->deviceName << " is present" << std::endl;
}
}
cv::ocl::setDevice(devInfo[1]);
I am a fresh to opencv with cuda.
I use opencv2.4.6 and CUDA4.2.
I have successfuly compile the opencv with cuda.
when i use the code:
int cuda_count;
cudaError_t error = cudaGetDeviceCount( &cuda_count );
it returns cudaSuccess and cuda_count=1
But, when i use the code:
int num_devices = cv::gpu::getCudaEnabledDeviceCount();
the num_devices returns 0
why?
my complete code is:
int main()
{
int num_devices = cv::gpu::getCudaEnabledDeviceCount();
int cuda_count;
cudaError_t error = cudaGetDeviceCount( &cuda_count );
if(num_devices <=0 )
{
std::cerr << "no" << std::endl;
return -1;
}
int enable_devivce_id = -1;
}
you must have been compiled OpenCV without CUDA support
gpu::getCudaEnabledDeviceCount Returns the number of installed
CUDA-enabled devices.
C++: int gpu::getCudaEnabledDeviceCount()
Use this function before any
other GPU functions calls. If OpenCV is compiled without GPU support,
this function returns 0.
This question is similar to cuModuleLoadDataEx options but I would like to bring the topic up again and in addition provide more information.
When loading a PTX string with the NV driver via cuModuleLoadDataEx it seems to ignore all options all together. I provide full working examples so that anyone interested can directly and with no effort reproduce this. First a small PTX kernel (save this as small.ptx) then the C++ program that loads the PTX kernel.
.version 3.1
.target sm_20, texmode_independent
.address_size 64
.entry main()
{
ret;
}
main.cc
#include<cstdlib>
#include<iostream>
#include<fstream>
#include<sstream>
#include<string>
#include<map>
#include "cuda.h"
int main(int argc,char *argv[])
{
CUdevice cuDevice;
CUcontext cuContext;
CUfunction func;
CUresult ret;
CUmodule cuModule;
cuInit(0);
std::cout << "trying to get device 0\n";
ret = cuDeviceGet(&cuDevice, 0);
if (ret != CUDA_SUCCESS) { exit(1);}
std::cout << "trying to create a context\n";
ret = cuCtxCreate(&cuContext, 0, cuDevice);
if (ret != CUDA_SUCCESS) { exit(1);}
std::cout << "loading PTX string from file " << argv[1] << "\n";
std::ifstream ptxfile( argv[1] );
std::stringstream buffer;
buffer << ptxfile.rdbuf();
ptxfile.close();
std::string ptx_kernel = buffer.str();
std::cout << "Loading PTX kernel with driver\n" << ptx_kernel;
const unsigned int jitNumOptions = 3;
CUjit_option *jitOptions = new CUjit_option[jitNumOptions];
void **jitOptVals = new void*[jitNumOptions];
// set up size of compilation log buffer
jitOptions[0] = CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES;
int jitLogBufferSize = 1024*1024;
jitOptVals[0] = (void *)&jitLogBufferSize;
// set up pointer to the compilation log buffer
jitOptions[1] = CU_JIT_INFO_LOG_BUFFER;
char *jitLogBuffer = new char[jitLogBufferSize];
jitOptVals[1] = jitLogBuffer;
// set up wall clock time
jitOptions[2] = CU_JIT_WALL_TIME;
float jitTime = -2.0;
jitOptVals[2] = &jitTime;
ret = cuModuleLoadDataEx( &cuModule , ptx_kernel.c_str() , jitNumOptions, jitOptions, (void **)jitOptVals );
if (ret != CUDA_SUCCESS) { exit(1);}
std::cout << "walltime: " << jitTime << "\n";
std::cout << std::string(jitLogBuffer) << "\n";
}
Build (assuming CUDA is installed under /usr/local/cuda, I use CUDA 5.0):
g++ -I/usr/local/cuda/include -L/usr/local/cuda/lib64/ main.cc -o main -lcuda
If someone is able to extract any sensible information from the compilation process that would be great! The documentation of CUDA driver API where cuModuleLoadDataEx is explained (and which options it is supposed to accept) http://docs.nvidia.com/cuda/cuda-driver-api/index.html
If I run this, the log is empty and jitTime wasn't even touched by the NV driver:
./main small.ptx
trying to get device 0
trying to create a context
loading PTX string from file empty.ptx
Loading PTX kernel with driver
.version 3.1
.target sm_20, texmode_independent
.address_size 64
.entry main()
{
ret;
}
walltime: -2
EDIT:
I managed to get the JIT compile time. However it seems that the driver expects an array of 32bit values as OptVals. Not as stated in the manual as an array of pointers (void *) which are on my system 64 bits. So, this works:
const unsigned int jitNumOptions = 1;
CUjit_option *jitOptions = new CUjit_option[jitNumOptions];
int *jitOptVals = new int[jitNumOptions];
jitOptions[0] = CU_JIT_WALL_TIME;
// here the call to cuModuleLoadDataEx
std::cout << "walltime: " << (float)jitOptions[0] << "\n";
I believe that it is not possible to do the same with an array of void *. The following code does not work:
const unsigned int jitNumOptions = 1;
CUjit_option *jitOptions = new CUjit_option[jitNumOptions];
void **jitOptVals = new void*[jitNumOptions];
jitOptions[0] = CU_JIT_WALL_TIME;
// here the call to cuModuleLoadDataEx
// here I also would have a problem casting a 64 bit void * to a float (32 bit)
EDIT
Looking at the JIT compilation time jitOptVals[0] was misleading. As mentioned in the comments, the JIT compiler caches previous translations and won't update the JIT compile time if it finds a cached compilation. Since I was looking whether this value has changed or not I assumed that the call ignores the options all together. Which it doesn't. It's works fine.
Your jitOptVals should not contain pointers to your values, instead cast the values to void*:
// set up size of compilation log buffer
jitOptions[0] = CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES;
int jitLogBufferSize = 1024*1024;
jitOptVals[0] = (void *)jitLogBufferSize;
// set up pointer to the compilation log buffer
jitOptions[1] = CU_JIT_INFO_LOG_BUFFER;
char *jitLogBuffer = new char[jitLogBufferSize];
jitOptVals[1] = jitLogBuffer;
// set up wall clock time
jitOptions[2] = CU_JIT_WALL_TIME;
float jitTime = -2.0;
//Keep jitOptVals[2] empty as it only an Output value:
//jitOptVals[2] = (void*)jitTime;
and after cuModuleLoadDataEx, you get your jitTime like jitTime = (float)jitOptions[2];