Gaussian elimination iteration with pivoting in python with back substitution - python-2.7

I am implementing a code for solving Ux = b and I don't get the same results as for linalg.solve(). I have created a function implementing back substitution that I was using in my gauelim function but I have the feeling that it does not take into consideration my iteration inside gauelim function. Does anyone sees the problem?:/ You can find the code below. Tank you very much in advance!
import numpy as np
#To solve Ux = b for n=3 (3x3 non-triangular matrix)
U = np.array([[2, 1, 1],[1, 1, -2],[1, 2, 1]])
#print(U)
bs = np.array([8, -2, 2])
#print(bs)
def backsub(U,bs):
# i=0
n = bs.size
xs = np.zeros(n)
for i in reversed(range(n)):
xs[i] = (bs[i] - U[i,i+1:] # xs[i+1:]) / U[i,i]
return xs
def gauelim(U,bs):
n = bs.size
for j in range(n-1):
for i in range(j+1,n):
coeff = U[i,j]/U[j,j]
U[i,j:] - coeff*U[j,j:]
bs[i] - coeff*bs[j]
xs = backsub(U,bs)
return xs
print(gauelim(U,bs))
#solving Ux = b with linalg.solve()
x_sol = np.linalg.solve(U,bs)
print(x_sol)
I was trying to implement a code solving Ux=b with x being a 3x3 non-triangular matrix by doing Gaussian elimination.

Related

Is there any all_coeffs() for multivariable polynomials in sympy?

I want to extract all coefficients (INCLUDING ZEROS) of a multariable polynomial using sympy.
Sympy offers all_coeffs() but it only works for univariate. Otherwise I get this error PolynomialError: multivariate polynomials not supported
For example for a polynomial x^3+y^3+x*y+1 I woud like the output to be [3,3,0,0,0,0,1,0,0,1]
If you make the monomials of interest then you can see what their coefficients are in your expression. You have to watch out for requesting a monomial coefficient of x*y in an expression with terms like x*y*z, however. The following routine handles that by zeroing out any variables in the coefficient that is obtained. It also has a routine to create monomials of interest.
def all_coeffs(expr,*free):
x = IndexedBase('x')
expr = expr.expand()
free = list(free) or list(expr.free_symbols)
pows = [p.as_base_exp() for p in expr.atoms(Pow,Symbol)]
P = {}
for p,e in pows:
if p not in free:
continue
elif p not in P:
P[p]=e
elif e>P[p]:
P[p] = e
reps = dict([(f, x[i]) for i,f in enumerate(free)])
xzero = dict([(v,0) for k,v in reps.items()])
e = expr.xreplace(reps); reps = {v:k for k,v in reps.items()}
return dict([(m.xreplace(reps), e.coeff(m).xreplace(xzero) if m!=1 else e.xreplace(xzero)) for m in monoms(*[P[f] for f in free])])
def monoms(*o):
x = IndexedBase('x')
f = []
for i,o in enumerate(o):
f.append(Poly([1]*(o+1),x[i]).as_expr())
return Mul(*f).expand().args
>>> eq = x**2 + x*y - 3
>>> all_coeffs(eq)
{1: -3, x**2: 1, x**2*y: 0, x*y: 1, y: 0, x: 0}
>>> all_coeffs(eq, x)
{1: -3, x**2: 1, x: y}

trim np arrays according to a list of starting points

I have a table, represented by an np.array like the following:
A = [[12,412,42,54],
[144,2,42,4],
[2,43,22,10]]
And a list that contains the desired starting point of each row in A:
L=[0,2,1]
The desired output would be:
B = [[12,412,42,54],
[42,4,np.nan,np.nan],
[43,22,10,np.nan]]
Edit
I prefer to avoid using a for-loop for obvious reasons.
Try compare the L with column index, then use boolean set/get items:
# convert A to numpy array for advanced indexing
A = np.array(A)
ll = A.shape[1]
keep = np.arange(ll) >= np.array(L)[:,None]
out = np.full(A.shape, np.nan)
out[keep[:,::-1]] = A[keep]
print(out)
Output:
[[ 12. 412. 42. 54.]
[ 42. 4. nan nan]
[ 43. 22. 10. nan]]
My guess would be that a vectorized approach for this would be less efficient than explicit looping, because the result is fundamentally a jagged array, which NumPy does not support well.
However, a loop-based solution is simple, that can be made faster with Numba's nb.njit(), if needed.:
import numpy as np
import numba as nb
#nb.njit
def jag_nb(arr, starts, empty=np.nan):
result = np.full(arr.shape, empty)
for i, x in enumerate(starts):
if x != 0:
result[i, :-x] = arr[i, x:]
else:
result[i, :] = arr[i, :]
return result
A = np.array([[12,412,42,54], [144,2,42,4], [2,43,22,10]])
L = np.array([0,2,1])
jag(A, L)
# array([[ 12., 412., 42., 54.],
# [ 42., 4., nan, nan],
# [ 43., 22., 10., nan]])
Compared to the pure NumPy vectorized approach proposed in #QuangHoang's answer:
def jag_np(arr, starts, empty=np.nan):
m, _ = arr.shape
keep = np.arange(m) >= starts[:, None]
result = np.full(arr.shape, np.nan)
result[keep[:, ::-1]] = arr[keep]
return result
The Numba based approach is noticeably faster, as shown with the following benchmarks:
import pandas as pd
import matplotlib.pyplot as plt
def benchmark(
funcs,
ii=range(4, 10, 1),
is_equal=lambda x, y: np.allclose(x, y, equal_nan=True),
seed=0,
unit="ms",
verbose=True,
use_str=True
):
labels = [func.__name__ for func in funcs]
units = {"s": 0, "ms": 3, "µs": 6, "ns": 9}
assert unit in units
np.random.seed(seed)
timings = {}
for i in ii:
m = n = 2 ** i
if verbose:
print(f"i={i}, n={n}")
arr = np.random.random((m, n))
starts = np.random.randint(0, n, m)
base = funcs[0](arr, starts)
timings[n] = []
for func in funcs:
res = func(arr, starts)
is_good = is_equal(base, res)
timed = %timeit -n 64 -r 8 -q -o func(arr, starts)
timing = timed.best
timings[n].append(timing if is_good else None)
if verbose:
print(
f"{func.__name__:>24}"
f" {is_good!s:5}"
f" {timing * (10 ** units[unit]):10.3f} {unit}"
f" {timings[n][0] / timing:5.1f}x")
return timings, labels
def plot(timings, labels, title=None, xlabel="Input Size / #", unit="ms"):
n_rows = 1
n_cols = 3
fig, axs = plt.subplots(n_rows, n_cols, figsize=(8 * n_cols, 6 * n_rows), squeeze=False)
units = {"s": 0, "ms": 3, "µs": 6, "ns": 9}
df = pd.DataFrame(data=timings, index=labels).transpose()
base = df[[labels[0]]].to_numpy()
(df * 10 ** units[unit]).plot(marker="o", xlabel=xlabel, ylabel=f"Best timing / {unit}", ax=axs[0, 0])
(df / base * 100).plot(marker='o', xlabel=xlabel, ylabel='Relative speed / %', logx=True, ax=axs[0, 1])
(base / df).plot(marker='o', xlabel=xlabel, ylabel='Speed Gain / x', ax=axs[0, 2])
if title:
fig.suptitle(title)
fig.patch.set_facecolor('white')
funcs = jag_np, jag_nb
timings, labels = benchmark(funcs, ii=range(4, 11))
plot(timings, labels, unit="ms")

Implementation of Karger's Algorithm in Python Taking too Long

Wondering if you can help me understand where the critical flaw may be with my attempt at implementing Karger's algorithm in python. My program appears to take far too long to run and my computer starts to overwork running large sets of vertices. The purpose of the program is to output the minimum cut of the graph.
from random import choice
from statistics import mode
import math
fhand = open("mincuts.txt", "r")
vertices = fhand.readlines()
d = {}
for index,line in enumerate(vertices):
d["{0}".format(index+1)] = line.split()
def randy(graph, x):
y = str(choice(list(graph)))
if x == y:
y = randy(graph, x)
return y
count = 0
def contract(graph):
global count
if len(graph) == 2:
a = list(graph.keys())[0]
b = list(graph.keys())[1]
for i in range(1, len(graph[a])):
if graph[a][i] in graph[b]:
count = count + 1
#print(graph)
return
x = str(choice(list(graph)))
y = randy(graph, x)
#print(x)
#print(y)
graph[x] = graph[x] + graph[y]
graph.pop(y)
#remove self loops
for key in graph:
#method to remove duplicate entries in the arrays of the vertices. Source: www.w3schools.com
graph[key] = list(dict.fromkeys(graph[key]))
contract(graph)
N = len(d)
runs = int(N*N*(math.log(N)))
outcomes = []
for i in range(runs):
e = d.copy()
count = 0
contract(e)
outcomes.append(count)
print(outcomes)
#returns most common minimum cut value
print(mode(outcomes))
Below is a link to the graph I am running in mincuts.txt:
https://github.com/BigSoundCode/Misc-Algorithm-Implementations/blob/main/mincuts.txt

Netwon's method without pre-built functions of Python: Calculation of gradient and Hessian

I am trying to write the basic Newton's method without pre-built solvers.
This is the function:
## definition of variables
x_1, x_2 = sym.symbols("x_1 x_2")
a_T=np.array([[0.3],[0.6],[0.2]])
b_T=np.array([5,26,3])
c_T=np.array([40,1,10])
u= x_1-0.8
v= x_2-(a_T[0]+a_T[1]*u**2*(1-u)**(1/2)-a_T[2]*u)
alpha= -b_T[0]+(b_T[1]*u**2)*(1+u)**(1/2)+(b_T[2])*u
beta= c_T[0]*v**2*(1-c_T[1]*v)/(1+c_T[2]*u**2)
## function
f = alpha**(-beta)
I calculated the gradient and the Hessian and defined the other parameters:
## gradient
gradient_cal = sym.Matrix(1,2,sym.derive_by_array(f, (x_1, x_2)))
## hessian
hessian_cal = sym.Matrix(2, 2, sym.derive_by_array(gradient_cal, (x_1, x_2)))
# initial guess
x_A= Matrix([[1],[0.5]])
xk = x_A
#tolerance
epsilon= 1e-10
#maximum iterations
max_iter=100
And the function itself:
def newton(gradient_cal,hessian_cal,xk,epsilon,max_iter):
for k in range(0,max_iter):
fxk = gradient_cal.evalf(subs={x_1:xk[0], x_2:xk[1]})
if fxk.norm() < epsilon:
print('Found solution after',k,'iterations.')
return xk
Dfxk = hessian_cal.evalf(subs={x_1: xk[0], x_2: xk[1]})
if Dfxk == 0:
print('Zero derivative. No solution found.')
return None
A=hessian_cal.evalf(subs={x_1: xk[0], x_2: xk[1]})
B=gradient_cal.evalf(subs={x_1: xk[0], x_2: xk[1]})
pk= (A.inv().dot(B))
xk = np.subtract(xk, pk)
print('Exceeded maximum iterations. No solution found.')
return None
approx = newton(gradient_cal,hessian_cal,x_A,epsilon,max_iter)
print(approx)
The following error shows up:
TypeError: Shape should contain integers only.
I checked it and saw that the Hessian contains "I" values. Therefore I am not sure if the calculations of the gradient and the Hessian are correct.
Does anyone have a better solution to calculate the gradient and the Hessian for such a complex function?
The jacobian-batteries are already included in SymPy:
>>> from sympy.abc import x, y
>>> f = x/y + x*y**2
>>> Matrix([f]).jacobian((x,y))
Matrix([[y**2 + 1/y, 2*x*y - x/y**2]])
>>> _.jacobian((x,y)) # Hessian
Matrix([
[ 0, 2*y - 1/y**2],
[2*y - 1/y**2, 2*x + 2*x/y**3]])
So you could try
x_1, x_2 = sym.symbols("x_1 x_2")
xx = x_1, x_2
a_T=[0.3,0.6,0.2]
b_T=[5,26,3]
c_T=[40,1,10]
u= x_1-0.8
v= x_2-(a_T[0]+a_T[1]*u**2*(1-u)**(1/2)-a_T[2]*u)
alpha= -b_T[0]+(b_T[1]*u**2)*(1+u)**(1/2)+(b_T[2])*u
beta= c_T[0]*v**2*(1-c_T[1]*v)/(1+c_T[2]*u**2)
## function
f = alpha**(-beta)
jac = Matrix([f]).jacobian(xx)
hes = jac.jacobian(xx)

How to update the results from a solve function to the original L and U matrices?

I can find each equation to solve but I cannot seem to update each computation's result in the original matrix.
I tried to itemset in the matrix, replacing the u variables with actual result from the solve function. But then it gives the following error:
TypeError: can't multiply sequence by non-int of type 'Symbol'
Dont know what to do else!
from sympy import *
import numpy as np
#input matrix
a = np.array([[1,3,5],\
[2,4,7],\
[1,1,0]])
d = len(a)
d1 = d-1
d2 = (d1*(d1+1))/2
d3 = (d*(d+1))/2
symbols_dict = dict(('L%d'%k, symbols('L%d'%k)) for k in range(d2))
locals().update(symbols_dict)
symbols_dict = dict(('U%d'%k, symbols('U%d'%k)) for k in range(d3))
locals().update(symbols_dict)
# L decomposition
b = np.array([[1,0,0],\
[L0,1,0],\
[L1,L2,1]])
# U decomposition
c = np.array([[U0,U1,U2],\
[0,U3,U4],\
[0,0,U5]])
t=[[[]for i in range(d)]for j in range(d)]
s=[[[]for i in range(d)]for j in range(d)]
for j in range(d):
for i in range(d):
m=0
for k in range(d):
m = m+(b[j,k]*c[k,i])
t[j][i] = m
s[j][i]= solve(t[j][i]-a[j,i])
c.itemset((j,i),s[j][i])
It should give me the numerical results for each equation solved.
The corrected code is as follows:
L0,L1,L2 = symbols('L0 L1 L2')
U0,U1,U2,U3,U4,U5 = symbols('U0 U1 U2 U3 U4 U5')
.
.
.
for j in range(d):
for i in range(d):
m=0
for k in range(d):
m = m+(b[j,k]*c[k,i])
s= solve(m-a[j,i])
if j>i:
b[j,i]=s
else:
c[j,i]=s