Sum of increasing alternate terms in python - evaluation

I need to evaluate the finite sum of numbers which are increasing in absolute value, but are alternate. Problem is: the abolute values grow too fast and it starts accumulating numerical errors. These the functions definitions, one (Gj_full) straight to it and the other (Gj) recursively. fact_quo is a simple factorial function.
def fact_quo(n, m=1):
if (type(n) != int) or (type(m) != int):
raise TypeError("Arguments must be integers.")
if (n < 0) or (m < 0):
raise ValueError("n=" + str(n) + "\t m=" + str(m))
if (n == 0) or (n == 1) or (n == m):
return 1
if (n < m):
raise ValueError("n=" + str(n) + "\t m=" + str(m))
f = n
while n > (m+1):
n -= 1
f *= n
return f
def Gj_full(X, Y, Xl, Yl, j=0, coef=1):
if (X - Y + Xl + Yl) % 2 or X < Y or Y < j:
raise ValueError
u = (X - Y + Xl + Yl) // 2
v = coef * (2 ** (X - Y) * fact_quo(X, Y-j) * fact_quo(u+j, j) *
4 ** j * (-1) ** j)
w = 3 ** (u+j) * fact_quo(X-Y+j)
den2 = fact_quo(X) * fact_quo(Xl) * fact_quo(Yl)
z = (np.sqrt(fact_quo(X)) * np.sqrt(fact_quo(Y))
* np.sqrt(fact_quo(Xl)) * np.sqrt(fact_quo(Yl)))
return (v / (den2 * w)) * z
def Gj(X, Y, Xl, Yl, j=0, coef=1):
if (X - Y + Xl + Yl) % 2 or X < Y or Y < j:
raise ValueError
kX, kY, kXl, kYl, kj = X % 2, Y % 2, Xl % 2, Yl % 2, 0
g = coef * Gj_full(kX, kY, kXl, kYl, kj)
while kX < X:
u = (kX - kY + kXl + kYl) // 2
v = 4 * (u + kj + 1)
w = 3 * (kX + 2 - kY + kj) * (kX + 1 - kY + kj)
g *= (v / w) * np.sqrt(kX + 2) * np.sqrt(kX + 1)
kX += 2
while kXl < Xl:
u = (kX - kY + kXl + kYl) // 2
v = u + kj + 1
w = 3 * (kXl + 2) * (kXl + 1)
g *= (v / w) * np.sqrt(kXl + 2) * np.sqrt(kXl + 1)
kXl += 2
while kYl < Yl:
u = (kX - kY + kXl + kYl) // 2
v = u + kj + 1
w = 3 * (kYl + 2) * (kYl + 1)
g *= (v / w) * np.sqrt(kYl + 2) * np.sqrt(kYl + 1)
kYl += 2
while kY < Y:
u = (kX - kY + kXl + kYl) // 2
v = 3 * (kX - kY + kj) * (kX - kY - 1 + kj)
w = 4 * (kY + 2 - kj) * (kY + 1 - kj) * (u + kj)
g *= (v / w) * np.sqrt(kY + 2) * np.sqrt(kY + 1)
kY += 2
while kj < j:
u = (kX - kY + kXl + kYl) // 2
v = -4 * (kY - kj) * (u + kj + 1)
w = 3 * (kX - kY + kj + 1) * (kj + 1)
g *= (v / w)
kj += 1
return g
The (4/3) ** j and the factorials quicly increase the absolute value of the summing terms. The sum, however, are supposed to be smaller than 1. In fact, for X = Y and Xl = Yl = 0, the sum equals to (-1/3) ** X.

The precision for infinitely large numbers for floats are not available yet without using a lib. Therefore you should look into the decimal lib, you can even set the precision. Eg.
import decimal
decimal.getcontext().prec = 100
def pi():
pi = decimal.Decimal(0)
for k in range(350):
pi += (decimal.Decimal(4)/(decimal.Decimal(8)*decimal.Decimal(k+1))...)
If you manage to force all the numbers to be integers, you don't need to worry about it

Related

Can this Iron Python 2.7 code be sped up?

I have implemented Gray-Scott reaction diffusion in Iron Python 2, mapping grayscale values between 0 and 1.
But since it only gets interesting from about 6400 steps, it takes a lot of time.
So can someone smarter / more proficient in Python than me help me make this code more efficient?
Here is the code:
import random as rnd
dA = 1
dB = 0.5
feed = 0.055
kill = 0.062
wHalb = int(width/2)
hHalb = int(height/2)
#decides if gAA is random or 1
if gAA == 0:
gAA = rnd.random()
def main():
#create base grid of chemical A and B
gridA = [ [gAA for i in range(width)] for i in range(height)]
gridB = [ [0 for i in range(width)] for i in range(height)]
nextA = [ [1 for i in range(width)] for i in range(height)]
nextB = [ [0 for i in range(width)] for i in range(height)]
color = [ [0 for i in range(width)] for i in range(height)]
for x in range (wHalb-baseBlock,wHalb+baseBlock):
for y in range (hHalb-baseBlock,hHalb+baseBlock):
gridB[x][y] = 1
x, y, i, j = 0, 0, 0, 0
for n in range(steps):
for x in range (width):
for y in range (height):
a = gridA[x][y]
b = gridB[x][y]
nextA[x][y] = (a + (dA * laplaceA(x, y, gridA)) - (a * b*b) + (feed * (1 - a)))
nextB[x][y] = (b + (dB * laplaceB(x, y, gridB)) + (a * b*b) - ((kill + feed) * b))
tempA = gridA
gridA = nextA
nextA = tempA
tempB = gridB
gridB = nextB
nextB = tempB
color = [ [(nextA[i][j] - nextB[i][j]) for i in range(width)] for j in range(height)]
return color
def laplaceA(x, y, gridA):
sumA = 0;
xS = x - 1
xE = x + 1
yS = y - 1
yE = y + 1
if (x == 0):
xS = width-1
if (y == 0):
yS = height-1
if (x == width - 1):
xE = 0;
if (y == height - 1):
yE = 0;
sumA = sumA + gridA[x][y] * -1
sumA = sumA + gridA[xS][y] * 0.2
sumA = sumA + gridA[xE][y] * 0.2
sumA = sumA + gridA[x][yE] * 0.2
sumA = sumA + gridA[x][yS] * 0.2
sumA = sumA + gridA[xS][yS] * 0.05
sumA = sumA + gridA[xE][yS] * 0.05
sumA = sumA + gridA[xS][yE] * 0.05
sumA = sumA + gridA[xE][yE] * 0.05
return sumA
def laplaceB(x, y, gridB):
sumB = 0
xS = x - 1
xE = x + 1
yS = y - 1
yE = y + 1
if (x == 0):
xS = width-1
if (y == 0):
yS = height-1
if (x == width - 1):
xE = 0
if (y == height - 1):
yE = 0
sumB = sumB + gridB[x][y] * -1
sumB = sumB + gridB[xS][y] * 0.2
sumB = sumB + gridB[xE][y] * 0.2
sumB = sumB + gridB[x][yE] * 0.2
sumB = sumB + gridB[x][yS] * 0.2
sumB = sumB + gridB[xS][yS] * 0.05
sumB = sumB + gridB[xE][yS] * 0.05
sumB = sumB + gridB[xS][yE] * 0.05
sumB = sumB + gridB[xE][yE] * 0.05
return sumB
aOut = main()

Why can't the subs function of the sympy replace the value of the symbol sometimes?

I have the following code:
u_ini = 0.1
v_ini = 0.1
z_ini = 0.1 # 初始化三个拉格朗日乘子
q = 0
lis = list(range(2))
u = list(sp.symbols('u:{}'.format(len(lis))))
v = list(sp.symbols('v:{}'.format(len(lis))))
z = sp.symbols('z')
p = list(sp.symbols('p:{}'.format(len(lis))))
lag1 = 0
lag2 = 0
lag3 = 0
p_symbol_sum = np.sum(p)
for i in range(k):
if i < k-1:
lag1 += B*ts_ratio[i]*sp.log(1+g[i]*p[i]/(sgm_2+g[i]*np.sum(p[i+1:k])),2)-q*(af_eff*p[i]+Pc-eh_eff*(1-ts_ratio[i])*g[i]*p_symbol_sum)
lag2 -= u[i] * (R_min - ts_ratio[i] * B * sp.log(1 + g[i] * p[i] / (sgm_2 + g[i] * np.sum(p[i + 1:k])),2))
elif i == k-1:
lag1 += B*ts_ratio[i]*sp.log(1+g[i]*p[i]/(sgm_2+g[i]*p[i]),2)-q*(af_eff*p[i]+Pc-eh_eff*(1-ts_ratio[i])*g[i]*p_symbol_sum)
lag2 -= u[i] * (R_min - ts_ratio[i] * B * sp.log(1+g[i]*p[i]/(sgm_2+g[i]*p[i]),2))
lag3 -= v[i] * (E_min - (1 - ts_ratio[i])*eh_eff*g[i]*p_symbol_sum) + z * (p[i] - p_max)
lag_fun = lag1 + lag2 + lag3
print("lag_fun:",lag_fun)
for i in range(k):
lag_fun.subs([(u[i],u_ini), (v[i],v_ini), (z,z_ini), (p[i],p_ini)]).evalf()
print("lag_fun:",lag_fun)
Why does the value of the expression not change after I count down the subs of the second line。
This is the output of the program. The first line is the output before using subs. The second is the output after using subs. Why hasn't it changed?
lag_fun: -u0*(-0.5*log(0.0410609879149758*p0/(0.0410609879149758*p1 + 0.001) + 1)/log(2) + 2) - u1*(-0.5*log(0.0123909311217172*p1/(0.0123909311217172*p1 + 0.001) + 1)/log(2) + 2) - v0*(-0.00205304939574879*p0 - 0.00205304939574879*p1 + 0.2) - v1*(-0.000619546556085859*p0 - 0.000619546556085859*p1 + 0.2) - z*(p0 - 20) - z*(p1 - 20) + 0.5*log(0.0410609879149758*p0/(0.0410609879149758*p1 + 0.001) + 1)/log(2) + 0.5*log(0.0123909311217172*p1/(0.0123909311217172*p1 + 0.001) + 1)/log(2)
lag_fun: -u0*(-0.5*log(0.0410609879149758*p0/(0.0410609879149758*p1 + 0.001) + 1)/log(2) + 2) - u1*(-0.5*log(0.0123909311217172*p1/(0.0123909311217172*p1 + 0.001) + 1)/log(2) + 2) - v0*(-0.00205304939574879*p0 - 0.00205304939574879*p1 + 0.2) - v1*(-0.000619546556085859*p0 - 0.000619546556085859*p1 + 0.2) - z*(p0 - 20) - z*(p1 - 20) + 0.5*log(0.0410609879149758*p0/(0.0410609879149758*p1 + 0.001) + 1)/log(2) + 0.5*log(0.0123909311217172*p1/(0.0123909311217172*p1 + 0.001) + 1)/log(2)
subs doesn't change anything in place, you have to capture the result for the same reason that this loop fails to change x:
>>> x = 0
>>> for i in range(10): x + 1
>>> x
0
So it must be
lag_fun = lag_fun.subs(etc...)

C++ seg fault on gaussian mean filter using cimg

I just tried implemting this filter using cimg but keep getting a seg fault. I am not sure why this is happening as the other filters that I have been using do not have that issue. Is there anything obvious that I am missing here?
CImg<float> gaussianBlur(CImg<float> source)
{
double frame[25];
double mean = 0;
int width = source.width;
int height = source.height;
CImg<float> destination;
destination = source;
for (int x = 1; x < int(width) - 3; x++)
{
for (int y = 1; y < int(height) - 3; y++)
{
mean = 0.0;
frame[0] = int(source(x - 2 ,y - 2)) * .003765;
frame[1] = int(source(x - 1 ,y - 2)) * .015019;
frame[2] = int(source(x - 0 ,y - 2)) * .023792;
frame[3] = int(source(x + 1 ,y - 2)) * .015019;
frame[4] = int(source(x + 2 ,y - 2)) * .003765;
frame[5] = int(source(x - 2 ,y - 1)) * .015019;
frame[6] = int(source(x - 1 ,y - 1)) * .059912;
frame[7] = int(source(x - 0 ,y - 1)) * .094907;
frame[8] = int(source(x + 1 ,y - 1)) * .059912;
frame[9] = int(source(x + 2 ,y - 1)) * .015019;
frame[10] = int(source(x - 2 ,y - 0)) * .023792;
frame[11] = int(source(x - 1 ,y - 0)) * .094907;
frame[12] = int(source(x - 0 ,y - 0)) * .150342;
frame[13] = int(source(x + 1 ,y - 0)) * .094907;
frame[14] = int(source(x + 2 ,y - 0)) * .023792;
frame[15] = int(source(x - 2 ,y + 1)) * .015019;
frame[16] = int(source(x - 1 ,y + 1)) * .059912;
frame[17] = int(source(x - 0 ,y + 1)) * .094907;
frame[18] = int(source(x + 1 ,y + 1)) * .059912;
frame[19] = int(source(x + 2 ,y + 1)) * .015019;
frame[20] = int(source(x - 2 ,y + 2)) * .003765;
frame[21] = int(source(x - 1 ,y + 2)) * .015019;
frame[22] = int(source(x - 0 ,y + 2)) * .023792;
frame[23] = int(source(x + 1 ,y + 2)) * .015019;
frame[24] = int(source(x + 2 ,y + 2)) * .003765;
for (int z = 0; z < 25; z++)
{
mean += frame[z];
}
destination(x,y) = float(mean / 25);
}
}
return destination;
}
for (int y = 1; y < int(height) - 3; y++)
{
mean = 0.0;
frame[0] = int(source(x - 2 ,y - 2)) * .003765;
When y == 1 you have y - 2 == -1, so you have an out-of-bounds access to the source image.

C++ equations, not sure if i wrote it correctly

Are and correctly written?
if(x>0.1){z = 2*n+x}
if else(x <= 0.1){x(pow,n) - 1 / sqrt(n(pow,2)) + x(pow,2) * n}
x = n / 2 * n(pow,2) + 3 * n - 2
x is correctly translated to C++.
As for the rest of the code, it would be:
if(x > 0.1) {
z = 2 * n + x;
}
else {
z = pow(x, n) - 1 / sqrt(pow(n, 2) + pow(x, 2) * n);
}

2d rotation opengl

Here is the code I am using.
#define ANGLETORADIANS 0.017453292519943295769236907684886f // PI / 180
#define RADIANSTOANGLE 57.295779513082320876798154814105f // 180 / PI
rotation = rotation *ANGLETORADIANS;
cosRotation = cos(rotation);
sinRotation = sin(rotation);
for(int i = 0; i < 3; i++)
{
px[i] = (vec[i].x + centerX) * (cosRotation - (vec[i].y + centerY)) * sinRotation;
py[i] = (vec[i].x + centerX) * (sinRotation + (vec[i].y + centerY)) * cosRotation;
printf("num: %i, px: %f, py: %f\n", i, px[i], py[i]);
}
so far it seams my Y value is being fliped.. say I enter the value of X = 1 and Y = 1 with a 45 rotation you should see about x = 0 and y = 1.25 ish but I get x = 0 y = -1.25.
Also my 90 degree rotation always return x = 0 and y = 0.
p.s I know I'm only centering my values and not putting them back where they came from. It's not needed to put them back as all I need to know is the value I'm getting now.
Your bracket placement doesn't look right to me. I would expect:
px[i] = (vec[i].x + centerX) * cosRotation - (vec[i].y + centerY) * sinRotation;
py[i] = (vec[i].x + centerX) * sinRotation + (vec[i].y + centerY) * cosRotation;
Your brackets are wrong. It should be
px[i] = ((vec[i].x + centerX) * cosRotation) - ((vec[i].y + centerY) * sinRotation);
py[i] = ((vec[i].x + centerX) * sinRotation) + ((vec[i].y + centerY) * cosRotation);
instead