I use the following code to make an in-place forward-backward FIR filtering :
lena = len(a)
lenb = len(b)
convol = zeros(a.shape)
code = """
// Forward convolution
int pad = (lenb-1)/2;
int i, j;
for (i=pad; i<pad+lena; i++)
{
int kmin, kmax, k;
// Reverse indexing for the next pass
j = lena-1-i+pad;
convol(j) = 0;
kmin = (i >= lenb - 1) ? i - (lenb - 1) : 0;
kmax = (i < lena - 1) ? i : lena - 1;
for (k = kmin; k <= kmax; k++)
{
convol(j) += a(k)*b(i - k);
}
}
// Backward convolution (the signal in convol has been
// reversed using reversed indexes)
for (i=pad; i<pad+lena; i++)
{
int kmin, kmax, k;
// Reverse indexing for reordering the output vector
j = lena-1-i+pad;
a(j) = 0;
kmin = (i >= lenb - 1) ? i - (lenb - 1) : 0;
kmax = (i < lena - 1) ? i : lena - 1;
for (k = kmin; k <= kmax; k++)
{
a(j) += convol(k)*b(i - k);
}
}
return_val = 1;
"""
weave.inline(code, [ 'a', 'b', 'lena', 'lenb', 'convol'],
type_converters=converters.blitz, compiler = 'g++')
Of course the “convol” variable does not need to be seen outside of the C/C++ scope and I need both space and processing optimality. So, it makes sense (at least to me) to replace this code by the following:
lena = len(a)
lenb = len(b)
code = """
// Forward convolution
int pad = (lenb-1)/2;
int i, j;
float* convol = (float*) calloc(lena, sizeof(float));
for (i=pad; i<pad+lena; i++)
{
int kmin, kmax, k;
// Reverse indexing for the next pass
j = lena-1-i+pad;
convol[j] = 0;
kmin = (i >= lenb - 1) ? i - (lenb - 1) : 0;
kmax = (i < lena - 1) ? i : lena - 1;
for (k = kmin; k <= kmax; k++)
{
convol[j] += a(k)*b(i - k);
}
}
// Backward convolution (the signal in convol has been
// reversed using reversed indexes)
for (i=pad; i<pad+lena; i++)
{
int kmin, kmax, k;
// Reverse indexing for reordering the output vector
j = lena-1-i+pad;
a(j) = 0;
kmin = (i >= lenb - 1) ? i - (lenb - 1) : 0;
kmax = (i < lena - 1) ? i : lena - 1;
for (k = kmin; k <= kmax; k++)
{
a(j) += convol[k]*b(i - k);
}
}
free(convol);
return_val = 1;
"""
weave.inline(code, [ 'a', 'b', 'lena', 'lenb'],
type_converters=converters.blitz, compiler = 'g++')
Where the only difference is instead of using a numpy array I used directly a C float array. The problem is that the second code takes about 2 times longer to process than the first one… why is that? Is there something wrong in the second version? It should be faster…!
Related
I would like to use this "Levenshtein" function to assess similarities between two strings (to check if user has committed a spelling mistake).
Since I work on null safe mode, it points out an error with the LIST constructor :
List<List<int>> d = List.generate(sa + 1, (int i) => List(sb + 1));
What can I write to replace List(sb+1)); ?
int levenshtein(String a, String b) {
a = a.toUpperCase();
b = b.toUpperCase();
int sa = a.length;
int sb = b.length;
int i, j, cost, min1, min2, min3;
int levenshtein;
// ignore: deprecated_member_use
List<List<int>> d = List.generate(sa + 1, (int i) => List(sb + 1));
if (a.length == 0) {
levenshtein = b.length;
return (levenshtein);
}
if (b.length == 0) {
levenshtein = a.length;
return (levenshtein);
}
for (i = 0; i <= sa; i++) d[i][0] = i;
for (j = 0; j <= sb; j++) d[0][j] = j;
for (i = 1; i <= a.length; i++)
for (j = 1; j <= b.length; j++) {
if (a[i - 1] == b[j - 1])
cost = 0;
else
cost = 1;
min1 = (d[i - 1][j] + 1);
min2 = (d[i][j - 1] + 1);
min3 = (d[i - 1][j - 1] + cost);
d[i][j] = min(min1, min(min2, min3));
}
levenshtein = d[a.length][b.length];
return (levenshtein);
}
You can use List.generate for the inner list as well.
List<List<int>> d = List.generate(sa + 1, (int i) => List.generate(sb + 1, (int j) => 0));
Also, if they're all going to be initialized to 0 you can just do this too:
List<List<int>> d = List.filled(sa + 1, List.filled(sb + 1, 0));
I'm trying to implement Interpolation by relaxed cubic splines, which can be found in the 5th chapter of this article (page 9):
https://www.math.ucla.edu/~baker/149.1.02w/handouts/dd_splines.pdf
So far, I have the following:
auto GetControlPoints = [](const std::vector<Vector3d>& S) {
int n = S.size();
float var = n - 1.0f;
MatrixXd M(n - 1, n - 1);
VectorXd C[3] = {
VectorXd(n - 1),
VectorXd(n - 1),
VectorXd(n - 1)
};
for (int i = 0; i < n - 1; ++i) {
auto r = RowVectorXd(n - 1);
for (int j = 0; j < n - 1; ++j) {
if (j == i)
r[j] = var;
else if (j == i - 1 || j == i + 1)
r[j] = 1.f;
else
r[j] = 0.f;
}
M.row(i) = r;
if (i == 0) {
for (int j = 0; j < 3; ++j) {
C[j] << (n + 1) * S[1][j] - S[0][j];
}
}
else if (i == n - 1) {
for (int j = 0; j < 3; ++j) {
C[j] << (n + 1) * S[n - 1][j] - S[n][j];
}
}
else {
for (int j = 0; j < 3; ++j) {
C[j] << (n + 1) * S[i][j];
}
}
}
MatrixXd augMC[3] = {
MatrixXd(n - 1, n),
MatrixXd(n - 1, n),
MatrixXd(n - 1, n)
};
for (int i = 0; i < 3; ++i) {
augMC[i].block(0, 0, n - 1, n - 1) = M;
augMC[i].block(n - 1, n - 1, n - 1, 1) = C[i].transpose();
}
};
I got to the point where I made an augmented Matrix using M and C, but I have no idea on how to row reduce it. Any thoughts?
You could use the inplace-variant of PartialPivLU -- but it looks like you actually want to solve the system M*B = C for which you should just decompose M (as it is symmetric you can use an LLt or LDLt decomposition) and then use the solve method of that decomposition.
To setup M you should also use the diagonal method (untested):
MatrixXd M(n - 1, n - 1);
M.setZero();
M.diagonal().setConstant(n - 1.0);
M.diagonal<1>().setOnes();
M.diagonal<-1>().setOnes();
LLT<MatrixXd> lltOfM(M);
for (int i = 0; i < 3; ++i) { B[i] = lltOfM.solve(C[i]); }
For large n this is sub-optimal, since it does not exploit the tridiagonal structure of M. You could try out the sparse module for this, but there should actually be a direct algorithm (Eigen does not explicitly have a tridiagonal matrix type, though).
For C you probably could also use a MatrixX3d (I don't really understand how you fill your C vectors -- I think your current code should assert at run-time, unless n==2, or you disabled assertions).
Hello guys I am having the following problem:
I have an array with a lenght that is a multiple of 4 e.g:
{1,2,3,4,5,6,7,8}
I want to know how can i get the numbers in the following pairs: {1,4},{2,3},{5,8},{6,7}.....(etc)
Suppose i loop through them and i want to get the index of the pair member from my current index
int myarr[8]={1,2,3,4,5,6,7,8};
for(int i=0;i<8;i++)
**j= func(i)**
I have thought of something like this:
f(1)=4
f(4)=1
and i would be taking: **f(i)=a * i + b** (i think a linear function is enough) It would result: f(i)=j=-i+5 .How can i generalise this for more then 4 members? What do you do in cases where you need a general formula for pairing elements?
Basically, if i is odd j would be i+3, otherwise j = i+1;
int func(int i) {
if(i%2 != 0)
return i+3;
else
return i+1;
}
This will generate
func(1) = 4, func(2) = 3, func(5) = 8, func(6) = 7 // {1,4},{2,3},{5,8},{6,7}.
You could do it as follows by keeping the incremental iteration but use a function depending on the current block and the remainder as follows.
int myarr[8]={1,2,3,4,5,6,7,8};
int Successor(int i)
{
int BlockStart = i / 4;
int Remainder = i % 4;
int j = 0;
if ( Remainder == 0 )
j = 0;
else if ( Remainder == 1 )
j = 3;
else if ( Remainder == 2 )
j = 1;
else if ( Remainder == 3 )
j = 2
return BlockStart + j;
}
for(int i = 0; i < 8; i++)
{
j = f(i);
// usage of the index
}
About the generalization, this should do it:
auto pairs(const vector<int>& in, int groupLength = 4) {
vector<pair<int, int>> result;
int groups = in.size() / groupLength;
for (int group = 0; group < groups; ++group) {
int i = group * groupLength;
int j = i + groupLength - 1;
while (i < j) {
result.emplace_back(in[i++], in[j--]);
}
}
return result;
}
You can run this code online.
If you are just looking for a formula to calculate the indices, then in general case it's:
int f(int i, int k = 4) {
return i + k - 2 * (i % k) - 1;
}
Turns out your special case (size 4) is sequence A004444 in OEIS.
In general you have "nimsum n + (size-1)".
I am trying to make an alphatrimmed filter in openCV library. My code is not working properly and the resultant image is not looking as image after filtering.
The filter should work in the following way.
Chossing some (array) of pixels in my example it is 9 pixels '3x3' window.
Ordering them in increasing way.
Cutting our 'array' both sides for alpha-2.
calculating arithmetic mean of remaining pixels and inserting them in proper place.
int alphatrimmed(Mat img, int alpha)
{
Mat img9 = img.clone();
const int start = alpha/2 ;
const int end = 9 - (alpha/2);
//going through whole image
for (int i = 1; i < img.rows - 1; i++)
{
for (int j = 1; j < img.cols - 1; j++)
{
uchar element[9];
Vec3b element3[9];
int k = 0;
int a = 0;
//selecting elements for window 3x3
for (int m = i -1; m < i + 2; m++)
{
for (int n = j - 1; n < j + 2; n++)
{
element3[a] = img.at<Vec3b>(m*img.cols + n);
a++;
for (int c = 0; c < img.channels(); c++)
{
element[k] += img.at<Vec3b>(m*img.cols + n)[c];
}
k++;
}
}
//comparing and sorting elements in window (uchar element [9])
for (int b = 0; b < end; b++)
{
int min = b;
for (int d = b + 1; d < 9; d++)
{
if (element[d] < element[min])
{
min = d;
const uchar temp = element[b];
element[b] = element[min];
element[min] = temp;
const Vec3b temporary = element3[b];
element3[b] = element3[min];
element3[min] = temporary;
}
}
}
// index in resultant image( after alpha-trimmed filter)
int result = (i - 1) * (img.cols - 2) + j - 1;
for (int l = start ; l < end; l++)
img9.at<Vec3b>(result) += element3[l];
img9.at<Vec3b>(result) /= (9 - alpha);
}
}
namedWindow("AlphaTrimmed Filter", WINDOW_AUTOSIZE);
imshow("AlphaTrimmed Filter", img9);
return 0;
}
Without actual data, it's somewhat of a guess, but an uchar can't hold the sum of 3 channels. It works modulo 256 (at least on any platform OpenCV supports).
The proper solution is std::sort with a proper comparator for your Vec3b :
void L1(Vec3b a, Vec3b b) { return a[0]+a[1]+a[2] < b[0]+b[1]+b[2]; }
This is a problem I have been struggling for a week, coming back just to give up after wasted hours...
I am supposed to find coefficents for the following Laguerre polynomial:
P0(x) = 1
P1(x) = 1 - x
Pn(x) = ((2n - 1 - x) / n) * P(n-1) - ((n - 1) / n) * P(n-2)
I believe there is an error in my implementation, because for some reason the coefficents I get seem way too big. This is the output this program generates:
a1 = -190.234
a2 = -295.833
a3 = 378.283
a4 = -939.537
a5 = 774.861
a6 = -400.612
Description of code (given below):
If you scroll the code down a little to the part where I declare array, you'll find given x's and y's.
The function polynomial just fills an array with values of said polynomial for certain x. It's a recursive function. I believe it works well, because I have checked the output values.
The gauss function finds coefficents by performing Gaussian elimination on output array. I think this is where the problems begin. I am wondering, if there's a mistake in this code or perhaps my method of veryfying results is bad? I am trying to verify them like that:
-190.234 * 1.5 ^ 5 - 295.833 * 1.5 ^ 4 ... - 400.612 = -3017,817625 =/= 2
Code:
#include "stdafx.h"
#include <conio.h>
#include <iostream>
#include <iomanip>
#include <math.h>
using namespace std;
double polynomial(int i, int j, double **tab)
{
double n = i;
double **array = tab;
double x = array[j][0];
if (i == 0) {
return 1;
} else if (i == 1) {
return 1 - x;
} else {
double minusone = polynomial(i - 1, j, array);
double minustwo = polynomial(i - 2, j, array);
double result = (((2.0 * n) - 1 - x) / n) * minusone - ((n - 1.0) / n) * minustwo;
return result;
}
}
int gauss(int n, double tab[6][7], double results[7])
{
double multiplier, divider;
for (int m = 0; m <= n; m++)
{
for (int i = m + 1; i <= n; i++)
{
multiplier = tab[i][m];
divider = tab[m][m];
if (divider == 0) {
return 1;
}
for (int j = m; j <= n; j++)
{
if (i == n) {
break;
}
tab[i][j] = (tab[m][j] * multiplier / divider) - tab[i][j];
}
for (int j = m; j <= n; j++) {
tab[i - 1][j] = tab[i - 1][j] / divider;
}
}
}
double s = 0;
results[n - 1] = tab[n - 1][n];
int y = 0;
for (int i = n-2; i >= 0; i--)
{
s = 0;
y++;
for (int x = 0; x < n; x++)
{
s = s + (tab[i][n - 1 - x] * results[n-(x + 1)]);
if (y == x + 1) {
break;
}
}
results[i] = tab[i][n] - s;
}
}
int _tmain(int argc, _TCHAR* argv[])
{
int num;
double **array;
array = new double*[5];
for (int i = 0; i <= 5; i++)
{
array[i] = new double[2];
}
//i 0 1 2 3 4 5
array[0][0] = 1.5; //xi 1.5 2 2.5 3.5 3.8 4.1
array[0][1] = 2; //yi 2 5 -1 0.5 3 7
array[1][0] = 2;
array[1][1] = 5;
array[2][0] = 2.5;
array[2][1] = -1;
array[3][0] = 3.5;
array[3][1] = 0.5;
array[4][0] = 3.8;
array[4][1] = 3;
array[5][0] = 4.1;
array[5][1] = 7;
double W[6][7]; //n + 1
for (int i = 0; i <= 5; i++)
{
for (int j = 0; j <= 5; j++)
{
W[i][j] = polynomial(j, i, array);
}
W[i][6] = array[i][1];
}
for (int i = 0; i <= 5; i++)
{
for (int j = 0; j <= 6; j++)
{
cout << W[i][j] << "\t";
}
cout << endl;
}
double results[6];
gauss(6, W, results);
for (int i = 0; i < 6; i++) {
cout << "a" << i + 1 << " = " << results[i] << endl;
}
_getch();
return 0;
}
I believe your interpretation of the recursive polynomial generation either needs revising or is a bit too clever for me.
given P[0][5] = {1,0,0,0,0,...}; P[1][5]={1,-1,0,0,0,...};
then P[2] is a*P[0] + convolution(P[1], { c, d });
where a = -((n - 1) / n)
c = (2n - 1)/n and d= - 1/n
This can be generalized: P[n] == a*P[n-2] + conv(P[n-1], { c,d });
In every step there is involved a polynomial multiplication with (c + d*x), which increases the degree by one (just by one...) and adding to P[n-1] multiplied with a scalar a.
Then most likely the interpolation factor x is in range [0..1].
(convolution means, that you should implement polynomial multiplication, which luckily is easy...)
[a,b,c,d]
* [e,f]
------------------
af,bf,cf,df +
ae,be,ce,de, 0 +
--------------------------
(= coefficients of the final polynomial)
The definition of P1(x) = x - 1 is not implemented as stated. You have 1 - x in the computation.
I did not look any further.