Unable to build mex file - c++

I've followed the MATLAB example for creating a mex file from here https://uk.mathworks.com/help/matlab/matlab_external/standalone-example.html
The source code it produces is as follows
#include "mex.h"
/* The computational routine */
void arrayProduct(double x, double *y, double *z, mwSize n)
{
mwSize i;
/* multiply each element y by x */
for (i=0; i<n; i++) {
z[i] = x * y[i];
}
}
/* The gateway function */
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[])
{
double multiplier; /* input scalar */
double *inMatrix; /* 1xN input matrix */
size_t ncols; /* size of matrix */
double *outMatrix; /* output matrix */
/* check for proper number of arguments */
if(nrhs!=2) {
mexErrMsgIdAndTxt("MyToolbox:arrayProduct:nrhs","Two inputs required.");
}
if(nlhs!=1) {
mexErrMsgIdAndTxt("MyToolbox:arrayProduct:nlhs","One output required.");
}
/* make sure the first input argument is scalar */
if( !mxIsDouble(prhs[0]) ||
mxIsComplex(prhs[0]) ||
mxGetNumberOfElements(prhs[0])!=1 ) {
mexErrMsgIdAndTxt("MyToolbox:arrayProduct:notScalar","Input multiplier must be a scalar.");
}
/* make sure the second input argument is type double */
if( !mxIsDouble(prhs[1]) ||
mxIsComplex(prhs[1])) {
mexErrMsgIdAndTxt("MyToolbox:arrayProduct:notDouble","Input matrix must be type double.");
}
/* check that number of rows in second input argument is 1 */
if(mxGetM(prhs[1])!=1) {
mexErrMsgIdAndTxt("MyToolbox:arrayProduct:notRowVector","Input must be a row vector.");
}
/* get the value of the scalar input */
multiplier = mxGetScalar(prhs[0]);
/* create a pointer to the real data in the input matrix */
inMatrix = mxGetPr(prhs[1]);
/* get dimensions of the input matrix */
ncols = mxGetN(prhs[1]);
/* create the output matrix */
plhs[0] = mxCreateDoubleMatrix(1,(mwSize)ncols,mxREAL);
/* get a pointer to the real data in the output matrix */
outMatrix = mxGetPr(plhs[0]);
/* call the computational routine */
arrayProduct(multiplier,inMatrix,outMatrix,(mwSize)ncols);
}
When I run the command mex arrayProduct.cpp (the name of my file), I get the following error:
Building with 'Microsoft Visual C++ 2017'.
Error using mex
LINK : error LNK2001: unresolved external symbol mexfilerequiredapiversion
arrayProduct.lib : fatal error LNK1120: 1 unresolved externals
I'm using MATLAB 2015b 32-bit, with the Visual Studio 2017 C++ compiler. Is there some preliminary set-up required for making mex files that isn't mentioned in the MATLAB tutorial?

The youngest supported compiler for MATLAB R2015b is MSVC Professional 2015. Also, R2015b is the latest version with 32-bit support. Your compiler is probably MSVC 2017, 64-bit.
Try to install .NET4 + SDK 7.1, select that in MATLAB, and re-run your mex command. That is an officially supported compiler for R2015b, and I expect that that solves your problem.
Note: for me, .NET4 refused to install because it detected a previously installed framework, but this answer resolved that problem for me.

Related

Inconsistent MEX file Output using Armadillo Interpolation

I am trying to convert matlab code to a c++ mex file in order to run a few computations more efficiently. I am using the armadillo library with blas and lapack for a few matrix operations, which involves interpolating data to apply a delay.
However, I am receiving an inconsistent output from my mex file. If I run the same mex file with the same input, sometimes I receive the correct output, and occasionally it will output a HUGE number (i.e. instead of on the order of 100, it is on the order of 10^246).
I am very new to c++ coding, and have exhausted my general knowledge base. I believe the problem is in my interpolation step, because I am able to consistently output the correct delay matrix, which is the preceeding step.
Does anyone have any idea what I am doing to produce this?
In Matlab I call:
mex test.cpp -lblas -llapack
[outData] = test( squeeze(inData(:,:,ang,:)) , params, angles(ang),1);
My mex file is generally:
#include <math.h>
#include <mex.h>
#include <armadillo>
#include "armaMex.hpp"
using namespace std; //avoid having to scope with std:: before commands
using namespace arma; //avoid having to scope with std:: before commands
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
// ============== INITIALIZE =============
// Initialize Data
const mwSize *dims;
int cDim,dDim,aDim,numDims; // Dimension variables
int m, n, a; // Loop variables
mxArray *fs_p, *f0_p, *prf_p, *pval_p, *c_p; // Parameter pointers
const double *fs,*f0,*prf,*pval, *c, *ang; // Parameter variables
const int *nthreads;
// Initialize pointers for param variables
pval_p = mxGetField(prhs[1],0,"pval"); //note that your parameters need these exact names
fs_p = mxGetField(prhs[1],0,"fs");
f0_p = mxGetField(prhs[1],0,"f0");
prf_p = mxGetField(prhs[1],0,"prf");
c_p = mxGetField(prhs[1],0,"c");
// Initialize parameters
pval = mxGetPr(pval_p);
fs = mxGetPr(fs_p);
f0 = mxGetPr(pval_p);
prf = mxGetPr(prf_p);
c = mxGetPr(c_p);
ang = (double*)mxGetData(prhs[2]);
nthreads = (int*)mxGetData(prhs[3]);
dims = mxGetDimensions(prhs[0]);
numDims = (int)mxGetNumberOfDimensions(prhs[0]);
dDim=(int)dims[0];cDim=(int)dims[1];aDim=(int)dims[2];
//Read in channel Data
cube data_in = armaGetCubePr(prhs[0]);
(....... simple calculations that look okay ... )
cube data_out(dDim, bDim, aDim);
cube delayedData(dDim, aDim, bDim);
vec delayArray(dDim); //need to define these tmp variables bc subcube fcn otherwise gives me errors idk
vec tmpIN(dDim);
vec tmpOut(dDim);
vec tmpOUTdata(dDim);
for(m=0;m<bDim;m++){
for(n=0;n<cDim;n++){
for (a=0;a<aDim;a++){
delayArray = tdelays.subcube(0,n,m,dDim-1,n,m);
tmpIN = data_in.subcube(0,n,a,dDim-1,n,a);
tmpOUTdata = data_out.subcube(0,m,a,dDim-1,m,a);
interp1(timeArray, tmpIN , delayArray, tmpOut, "linear",0);
data_out.subcube(0,m,a,dDim-1,m,a) = tmpOUTdata +tmpOut;
}
}
}
// Define output data
plhs[0] = armaCreateMxMatrix(data_out.n_rows, data_out.n_cols, data_out.n_slices);
armaSetCubePr(plhs[0], data_out);
return
}

cuRAND expected an expression

I'm learning C++ for a project, and for my project I need to generate a random number on the GPU.
For this, I decided to use cuRAND.
However, I'm running into a small issue on this line:
random << <1, 1 >> >(time(NULL), gpu_x);
I got the error expected an expression on that line.
using this code, which I got from here:
__global__ void random(unsigned int seed, int* result) {
/* CUDA's random number library uses curandState_t to keep track of the seed value
we will store a random state for every thread */
curandState_t state;
/* we have to initialize the state */
curand_init(seed, /* the seed controls the sequence of random values that are produced */
0, /* the sequence number is only important with multiple cores */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&state);
/* curand works like rand - except that it takes a state as a parameter */
*result = curand(&state) % MAX;
}
void Miner::GoMine() {
int* gpu_x;
cudaMalloc((void**)&gpu_x, sizeof(int));
/* invoke the GPU to initialize all of the random states */
random << <1, 1 >> >(time(NULL), gpu_x);
/* copy the random number back */
int x;
cudaMemcpy(&x, gpu_x, sizeof(int), cudaMemcpyDeviceToHost);
printf("Random number = %d.\n", x);
/* free the memory we allocated */
cudaFree(gpu_x);
}
As I am new to C++, I couldn't figure out what is going on.
I hope somebody here was able to help me?
Cheers
I managed to fix the issue by placing the CUDA related code in cuRAND.cu (Add -> New Item -> CUDA 9.0 -> Code -> CUDA C/C++ File).
I renamed the function void Miner::GoMine() to int cuRND()
I added some extra code so my entire cuRAND.cu file now looks like this:
// For the RNG using CUDA
#include <curand.h>
#include <curand_kernel.h>
#include <iomanip>
#include "sha256.h"
#ifndef __Kernel_CU__
#define __Kernel_CU__
#define MAX 100
__global__ void random(unsigned int seed, int* result) {
/* CUDA's random number library uses curandState_t to keep track of the seed value
we will store a random state for every thread */
curandState_t state;
/* we have to initialize the state */
curand_init(seed, /* the seed controls the sequence of random values that are produced */
0, /* the sequence number is only important with multiple cores */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&state);
/* curand works like rand - except that it takes a state as a parameter */
*result = curand(&state) % MAX;
}
extern "C"
int cuRND() {
int* gpu_x;
cudaMalloc((void**)&gpu_x, sizeof(int));
/* invoke the GPU to initialize all of the random states */
random <<< 1, 1 >> >(time(NULL), gpu_x);
/* copy the random number back */
int x;
cudaMemcpy(&x, gpu_x, sizeof(int), cudaMemcpyDeviceToHost);
/* free the memory we allocated */
cudaFree(gpu_x);
return floor(99999999 * x);
}
#endif
I then proceeded to add this code to my miner.cpp (which is the file I need it in):
extern "C"
int cuRND();
I can now make a call to cuRND() from my miner.cpp.
Hit start, and I was off to the races!
Thanks for the help, I hope this answer can help somebody later down the road!

Butterworth Nth order filter design

I am looking for a function which calculate a Butterworth Nth filter design coefficients like a Matlab function:
[bl,al]=butter(but_order,Ws);
and
[bh,ah]=butter(but_order,2*bandwidth(1)/fs,'high');
I found many examples of calculating 2nd order but not Nth order (for example I work with order 18 ...). - unfortunately I haven't any knowledge about DSP.
Do you know any library or a way to easily implement this method? When I know just order, cut off frequency and sample rate. I just need to get a vectors of B (numerator) and A (denominator).
There is also a requirement that the method works under different platforms - Windows, Linux, ...
It can be easily found (in Debian or Ubuntu):
$ aptitude search ~dbutterworth | grep lib
Which gives you answer immediately:
p librtfilter-dev - realtime digital filtering library (dev)
p librtfilter1 - realtime digital filtering library
p librtfilter1-dbg - realtime digital filtering library (dbg)
So you need library called rtfilter. Description:
rtfilter is a library that provides a set of routines implementing realtime digital filter for multichannel signals (i.e. filtering multiple signals with the same filter parameters). It implements FIR, IIR filters and downsampler for float and double data type (both for real and complex valued signal). Additional functions are also provided to design few usual filters: Butterworth, Chebyshev, windowed sinc, analytical filter...
This library is cross-platform, i.e. works under Linux, MacOS and Windows. From
official site:
rtfilter should compile and run on any POSIX platform (GNU/Linux, MacOSX, BSD...) and Windows platforms.
You can install it like this:
$ sudo aptitude install librtfilter-dev librtfilter1
After -dev package is installed, you can even find an example (with Butterworth filter usage) at /usr/share/doc/librtfilter1/examples/butterworth.c. This example (along with corresponding Makefile) also can be found here.
Particularly you are interested in rtf_create_butterworth() function. You can access documentation for this function via command:
$ man rtf_create_butterworth
or you can read it here.
You can specify any filter order passing it as num_pole param to rtf_create_butterworth() function (as far as I remember the number of poles it's the same thing as filter order).
UPDATE
This library doesn't provide external API for coefficients calculation. It only provides actual filtering capabilities, so you can use rtf_filter() to obtain samples (data) after filtering.
But, you can find the code for coefficients calculation in library sources. See compute_cheby_iir() function. This function is static, so it's only can be used inside the library itself. But, you can copy this function code as is to your project and use it. Also, don't let the name of this function confuse you: it is the same algorithm for Butterworth filter and Chebyshev filter coefficients calculation.
Let's say, you have prepared parameters for rtf_create_butterworth() function:
const double cutoff = 8.0; /* cutoff frequency, in Hz */
const double fs = 512.0; /* sampling rate, in Hz */
unsigned int nchann = 1; /* channels number */
int proctype = RTF_FLOAT; /* samples have float type */
double fc = cutoff / fs; /* normalized cut-off frequency, Hz */
unsigned int num_pole = 2; /* filter order */
int highpass = 0; /* lowpass filter */
Now you want to calculate numerator and denominator for your filter. I have written the wrapper for you:
struct coeff {
double *num;
double *den;
};
/* TODO: Insert compute_cheby_iir() function here, from library:
* https://github.com/nbourdau/rtfilter/blob/master/src/common-filters.c#L250
*/
/* Calculate coefficients for Butterworth filter.
* coeff: contains calculated coefficients
* Returns 0 on success or negative value on failure.
*/
static int calc_coeff(unsigned int nchann, int proctype, double fc,
unsigned int num_pole, int highpass,
struct coeff *coeff)
{
double *num = NULL, *den = NULL;
double ripple = 0.0;
int res = 0;
if (num_pole % 2 != 0)
return -1;
num = calloc(num_pole+1, sizeof(*num));
if (num == NULL)
return -2;
den = calloc(num_pole+1, sizeof(*den));
if (den == NULL) {
res = -3;
goto err1;
}
/* Prepare the z-transform of the filter */
if (!compute_cheby_iir(num, den, num_pole, highpass, ripple, fc)) {
res = -4;
goto err2;
}
coeff->num = num;
coeff->den = den;
return 0;
err2:
free(den);
err1:
free(num);
return res;
}
You can use this wrapper like this:
int main(void)
{
struct coeff coeff;
int res;
int i;
/* Calculate coefficients */
res = calc_coeff(nchann, proctype, fc, num_pole, highpass, &coeff);
if (res != 0) {
fprintf(stderr, "Error: unable to calculate coefficients: %d\n", res);
return EXIT_FAILURE;
}
/* TODO: Work with calculated coefficients here (coeff.num, coeff.den) */
for (i = 0; i < num_pole+1; ++i)
printf("num[%d] = %f\n", i, coeff.num[i]);
for (i = 0; i < num_pole+1; ++i)
printf("den[%d] = %f\n", i, coeff.den[i]);
/* Don't forget to free memory allocated in calc_coeff() */
free(coeff.num);
free(coeff.den);
return EXIT_SUCCESS;
}
If you are interested in math background for those coefficients calculation, look at DSP Guide, chapter 33.

MKL library behaving differently in mex-files and in standalone C++

I am trying to get GPc (https://github.com/SheffieldML/GPc) working in Matlab, using mex-files. I got the examples working, I took the bit I'm currently being interested in out as a standalone C++ program, that works just fine. However, when I try to do the same in a mex and run it through Matlab, I'm getting some errors, in particular:
MKL ERROR: Parameter 4 was incorrect on entry to DPOTRF.
or
** On entry to DPOTRF parameter number 4 had an illegal value
depending on whether I use the system version of MKL or the one Matlab carries along. The call to dpotrf is:
dpotrf_(type, nrows, vals, nrows, info);
with all variables valid (type="U", nrows=40, vals = double[40*40]) and with the interface:
extern "C" void dpotrf_(
const char* t, // whether upper or lower triangluar 'U' or 'L'
const int &n, // (input)
double *a, // a[n][lda] (input/output)
const int &lda, // (input)
int &info // (output)
);
(both are taken from GPc). LDA was originally supplied as ncols (which I believe is incorrect, but I didn't inquiry the library author about it yet), but it shouldn't make a difference, because this is called on a square matrix.
I feared that there might be problem with the references, so I changed the interface header to accept int* (like in http://www.netlib.org/clapack/clapack-3.2.1-CMAKE/SRC/dpotrf.c), but that started giving me segfaults, so it made me thinking the references there are right.
Does anybody have an idea what might be wrong?
I've tried to reproduce with an example on my end, but I'm not seeing any errors. In fact the result is identical to MATLAB's.
mex_chol.cpp
#include "mex.h"
#include "lapack.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
// verify arguments
if (nrhs != 1 || nlhs > 1) {
mexErrMsgTxt("Wrong number of arguments.");
}
if (!mxIsDouble(prhs[0]) || mxIsComplex(prhs[0])) {
mexErrMsgTxt("Input must be a real double matrix.");
}
if (mxGetM(prhs[0]) != mxGetN(prhs[0])) {
mexErrMsgTxt("Input must be a symmetric positive-definite matrix.");
}
// copy input matrix to output (its contents will be overwritten)
plhs[0] = mxDuplicateArray(prhs[0]);
// pointer to data
double *A = mxGetPr(plhs[0]);
mwSignedIndex n = mxGetN(plhs[0]);
// perform matrix factorization
mwSignedIndex info = 0;
dpotrf("U", &n, A, &n, &info);
// check if call was successful
if (info < 0) {
mexErrMsgTxt("Parameters had an illegal value.");
} else if (info > 0) {
mexErrMsgTxt("Matrix is not positive-definite.");
}
}
Note that MATLAB already ships with BLAS/LAPCK headers and libraries (Intel MKL implementation). In fact this is what $MATLABROOT\extern\include\lapack.h has as function prototype for dpotrf:
#define dpotrf FORTRAN_WRAPPER(dpotrf)
extern void dpotrf(
char *uplo,
ptrdiff_t *n,
double *a,
ptrdiff_t *lda,
ptrdiff_t *info
);
Here is how you compile the above C++ code:
>> mex -largeArrayDims mex_chol.cpp libmwblas.lib libmwlapack.lib
Finally let's test the MEX function:
% some random symmetric positive semidefinite matrix
A = gallery('randcorr',10);
% our MEX-version of Cholesky decomposition
chol2 = #(A) triu(mex_chol(A));
% compare
norm(chol(A) - chol2(A)) % I get 0
(Note that the MEX code returns the working matrix as is, where the LAPACK routine only overwrites half of the matrix. So I used TRIU to zero-out the other half and extract the upper part).

matPutVariable: error (matrix::serialize::WrongSize) trying to output data to mat file

I have a created a simulator in C/C++ that is supposed to output the results in a .mat file that can be imported into some visualization tools in Matlab.
During the simulation results are stored in a data buffer. The buffer is a std::map<const char *, double *>, where the string should be the same name as the corresponding matlab struct field, and the double* is the buffered data.
At the end of the simulation I then use the following code to write the buffered data into a .mat file
const char **fieldnames; // Declared and populated in another class method
int numFields; // Declared in another method. Equal to fieldnames length.
int buffer_size; // Declared in another method. Equal to number of timesteps in simulation.
std::map<const char *, double *> field_data;
std::map<const char *, mxArray *> field_matrices;
// Open .mat file
MATFile *pmat = matOpen(filename.str().c_str(), "w");
// Create an empty Matlab struct of the right size
mxArray *SimData_struct = mxCreateStructMatrix(1,1,this->numFields,this->fieldnames);
int rows=this->buffer_size, cols=1;
for(int i=0; i<this->numFields; i++) {
// Create an empty matlab array for each struct field
field_matrices[this->fieldnames[i]] = mxCreateDoubleMatrix(rows, cols, mxREAL);
// Copy data from buffers to struct fields
memcpy(mxGetPr(field_matrices[this->fieldnames[i]]), this->field_data[this->fieldnames[i]], rows * cols * sizeof(double));
// Insert arrays into the struct
mxSetField(SimData_struct,0,this->fieldnames[i],field_matrices[this->fieldnames[i]]);
}
matPutVariable(pmat, object_name.str().c_str(), SimData_struct);
I can compile and start the simulation, but it dies with an error when the matPutVariable command is reached. The error I get is terminate called after throwing an instance of 'matrix::serialize::WrongSize'. I have tried to google for more information, but have been unable to find something that could help me.
Mathworks support helped me to identify the cause of the issue. My application uses boost 1.55, but Matlab uses 1.49. There was a clash between those dependencies that was solved by adding an additional external dependencies directory path.
-Wl,-rpath={matlab path}/bin/glnxa64
I tried to reproduce the error with a simple example, but I don't see the problem. Here is my code:
test_mat_api.cpp
#include "mat.h"
#include <algorithm>
int main()
{
// output MAT-file
MATFile *pmat = matOpen("out.mat", "w");
// create a scalar struct array with two fields
const char *fieldnames[2] = {"a", "b"};
mxArray *s = mxCreateStructMatrix(1, 1, 2, fieldnames);
// fill struct fields
for (mwIndex i=0; i<2; i++) {
// 10x1 vector
mxArray *arr = mxCreateDoubleMatrix(10, 1, mxREAL);
double *x = mxGetPr(arr);
std::fill(x, x+10, i);
// assign field
mxSetField(s, 0, fieldnames[i], arr);
}
// write struct to MAT-file
matPutVariable(pmat, "my_struct", s);
// cleanup
mxDestroyArray(s);
matClose(pmat);
return 0;
}
First I compile the standalone program:
>> mex -client engine -largeArrayDims test_map_api.cpp
Next I run the executable:
>> !test_map_api.exe
Finally I load the created MAT-file in MATLAB:
>> whos -file out.mat
Name Size Bytes Class Attributes
my_struct 1x1 512 struct
>> load out.mat
>> my_struct
my_struct =
a: [10x1 double]
b: [10x1 double]
>> (my_struct.b)'
ans =
1 1 1 1 1 1 1 1 1 1
So everything runs successfully (I'm using MATLAB R2014a on Windows x64).