solving linear system of equations - gekko - linear-programming

Here is a simple example of solving the system of linear equations and the example of using for loop for many equations.
import numpy as np
from gekko import GEKKO
m = GEKKO(remote=False)
# Random 3x3
A = np.random.rand(3,3)
# Random 3x1
b = np.random.rand(3)
# Gekko array 3x1
x = m.Array(m.Var,(3))
# solve Ax = b
eqn = np.dot(A,x)
for i in range(3):
m.Equation(eqn[i]==b[i])
m.solve(disp=False)
X = [x[i].value for i in range(3)]
print(X)
print(b)
print(np.dot(A,X))
with the correct output. With the result X (np.dot(A,X)==b) - correct!
[[-0.45756768428], [1.0562541773], [0.10058435163]]
[0.64342498 0.34894335 0.5375324 ]
[[0.64342498]
[0.34894335]
[0.5375324 ]]
In the recent Gekko 0.2rc6 there is also introduced axb() function for linear programing. This might be the same problem solved with this function, but I am not sure how to get the correct result.
m = GEKKO(remote=False)
# Random 3x3
A = np.random.rand(3,3)
# Random 3x1
b = np.random.rand(3)
# Gekko array 3x1
x = m.Array(m.Var,(3))
# solve Ax = b
m.axb(A,b,x=x)
m.solve(disp=False)
X = [x[i].value for i in range(3)]
print(X)
print(b)
print(np.dot(A,X))
but it seems I missed something because the output is not the solution??? With the result X (np.dot(A,X)==b) - is not correct!
[[0.2560342704], [0.7543346092], [-0.084190799732]]
[0.27262652 0.61028723 0.74616952]
[[0.4201021 ]
[0.5206979 ]
[0.39195592]]

The m.axb() currently has a bug in Gekko v0.2.8. It is fixed in the next version of Gekko or you can get the pre-release version from GitHub: https://github.com/BYU-PRISM/GEKKO/blob/master/gekko/gekko.py with APM executable in the bin folder. The fix is available with Gekko v1.0.0.

Related

PyMC3 Bayesian Inference with NUTS initialization

I'm trying to implement a simple Bayesian Inference using a ODE model. I want to use the NUTS algorithm to sample but it gives me an initialization error. I do not know much about the PyMC3 as I'm new to this. Please take a look and tell me what is wrong.
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
import seaborn
import pymc3 as pm
import theano.tensor as T
from theano.compile.ops import as_op
#Actual Solution of the Differential Equation(Used to generate data)
def actual(a,b,x):
Y = np.exp(-b*x)*(a*np.exp(b*x)*(b*x-1)+a+b**2)/b**2
return Y
#Method For Solving the ODE
def lv(xdata, a=5.0, b=0.2):
def dy_dx(y, x):
return a*x - b*y
y0 = 1.0
Y, dict = odeint(dy_dx,y0,xdata,full_output=True)
return Y
#Generating Data for Bayesian Inference
a0, b0 = 5, 0.2
xdata = np.linspace(0, 21, 100)
ydata = actual(a0,b0,xdata)
# Adding some error to the ydata points
yerror = 10*np.random.rand(len(xdata))
ydata += np.random.normal(0.0, np.sqrt(yerror))
ydata = np.ravel(ydata)
#as_op(itypes=[T.dscalar, T.dscalar], otypes=[T.dvector])
def func(al,be):
Q = lv(xdata, a=al, b=be)
return np.ravel(Q)
# Number of Samples and Initial Conditions
nsample = 5000
y0 = 1.0
# Model for Bayesian Inference
model = pm.Model()
with model:
# Priors for unknown model parameters
alpha = pm.Uniform('alpha', lower=a0/2, upper=a0+a0/2)
beta = pm.Uniform('beta', lower=b0/2, upper=b0+b0/2)
# Expected value of outcome
mu = func(alpha,beta)
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=yerror, observed=ydata)
trace = pm.sample(nsample, nchains=1)
pm.traceplot(trace)
plt.show()
The error that I get is
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Initializing NUTS failed. Falling back to elementwise auto-assignment.
Any help would be really appreciated

Custom multivariate Dirichlet priors in pymc3

Is it possible create custom multivariate distributions in pymc3? In the following, I have tried to create a linear transformation of a Dirichlet distribution. All variants on this have returned numerous errors, perhaps to do with theano data types? Any help would be gratefully appreciated.
import numpy as np
import pymc3 as pymc
import theano.tensor as tt
# data
n = 5
prior_params = np.ones(n - 1) / (n - 1)
mx = np.array([[0.25 , 0.5 , 0.75 , 1. ],
[0.25 , 0.333, 0.25 , 0. ],
[0.25 , 0.167, 0. , 0. ],
[0.25 , 0. , 0. , 0. ]])
# Note that the matrix mx takes the unit simplex into the unit simplex.
# custom log-liklihood
def generate_function(mx, prior_params):
def log_trunc_dir(x):
return pymc.Dirichlet.dist(a=prior_params).logp(mx.dot(x.T)).eval()
return log_trunc_dir
#model
with pymc.Model() as simple_model:
x = pymc.Dirichlet('x', a=np.ones(n - 1))
q = pymc.DensityDist('q', generate_function(mx, prior_params), observed={'x': x})
Thanks to significant help from the PyMC3 development community, I can post the
following working example of a customised Dirichlet prior in PyMC3.
import pymc3 as pm
import numpy as np
import scipy.special as special
import theano.tensor as tt
import matplotlib.pyplot as plt
n = 4
with pm.Model() as model:
prior = np.ones(n) / n
def dirich_logpdf(value=prior):
return -n * special.gammaln(1/n) + (-1 + 1/n) * tt.log(value).sum()
stick = pm.distributions.transforms.StickBreaking()
probs = pm.DensityDist('probs', dirich_logpdf, shape=n,
testval=np.array(prior), transform=stick)
data = np.array([5, 7, 1, 0])
sfs_obs = pm.Multinomial('sfs_obs', n=np.sum(data), p=probs, observed=data)
with model:
step = pm.Metropolis()
trace = pm.sample(100000, tune=10000, step=step)
print('MLE = ', data / np.sum(data))
print(pm.summary(trace))
pm.traceplot(trace, [probs])
plt.show()

Adding diagional line y=x with an offset

I am trying to add the y=x line along with an offset from the y=x line a better way than the code I shared. For example, with Y=X, i want two additional diagonal lines +0.5 and -0.5 from the Y=X but the code I show is a bit harder to understand. Any help with this would be appreciated.
x=np.linspace(0,5,101)
y=np.random.normal(x) # add some noise
plt.plot(x,y,'r.') # x vs y
plt.plot(x,x,'k-') # identity line
plt.plot(x+0.25,x-0.25,'b-') # identity line
plt.plot(x-0.25,x+0.25,'b-') # identity line
plt.xlim(0,5)
plt.ylim(0,5)
plt.show()
It seems you wish to implement a function that plots linear lines for a given offset.
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(123)
%matplotlib inline
# Functions
def linear(x, m=1, b=0):
"""Return y values for a line."""
return x*m + b
def plot_offsets(x, func=linear, thres=0.25, ax=None):
"""Plot lines seperated by the given offset."""
if ax is None:
ax = plt.gca()
half = thres/2.
ax.plot(x+half, func(x, b=-half), "b-") # lower
ax.plot(x-half, func(x, b=half), "b-") # upper
return ax
# Data
x_lin = np.linspace(0, 5, 101)
y_data = np.random.normal(x_lin) # add some noise
# Plot
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(x_lin, y_data, "r.") # x vs y
ax.plot(x_lin, linear(x_lin), "k-") # identity line
plot_offsets(x_lin, thres=0.5, ax=ax)
ax.set_xlim(0, 5)
ax.set_ylim(0, 5)
plt.show()
Two functions were added to your code to abstract plotting linear lines and linear "offsets" (given a threshold). Most of the remaining additions (np.random.seed, _, ax = plt.subplots) are included for reproducible code.
Output

tf.py_func , custom tensorflow function getting applied to only the first element in the tensor

I am new to tensorflow and was playing around with a deep learning network. I wanted to do a custom rounding off on all the weights after each iteration. As the round function in tensorflow library doesn't give you the option to round the values down to a certain number of decimal points.
So I wrote this
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
np_prec = lambda x: np.round(x,3).astype(np.float32)
def tf_prec(x,name=None):
with ops.name_scope( "d_spiky", name,[x]) as name:
y = tf.py_func(np_prec,
[x],
[tf.float32],
name=name,
stateful=False)
return y[0]
with tf.Session() as sess:
x = tf.constant([0.234567,0.712,1.2,1.7])
y = tf_prec(x)
y = tf_prec(x)
tf.global_variables_initializer
print(x.eval(), y.eval())
The output I got was this
[ 0.234567 0.71200001 1.20000005 1.70000005] [ 0.235 0.71200001 1.20000005 1.70000005]
So the custom rounding off worked only on the first item in the tensor and I am not sure about what I am doing wrong. Thanks in advance.
The error here because of the following line,
np_prec = lambda x: np.round(x,3).astype(np.float32)
you are casting the output to np.float32. You can verify the error by the following code,
print(np.round([0.234567,0.712,1.2,1.7], 3).astype(np.float32)) #prints [ 0.235 0.71200001 1.20000005 1.70000005]
The default output of np.round is float64. Moreover, you also have to change the Tout argument in tf.py_func to float64.
I have given the following code with the above fix and commented where necessary.
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
np_prec = lambda x: np.round(x,3)
def tf_prec(x,name=None):
with ops.name_scope( "d_spiky", name,[x]) as name:
y = tf.py_func(np_prec,
[x],
[tf.float64], #changed this line to tf.float64
name=name,
stateful=False)
return y[0]
with tf.Session() as sess:
x = tf.constant([0.234567,0.712,1.2,1.7],dtype=np.float64) #specify the input data type np.float64
y = tf_prec(x)
y = tf_prec(x)
tf.global_variables_initializer
print(x.eval(), y.eval())
Hope this helps.

Interpolating 3d data at a single point in space (Python 2.7)

I have a point cloud in 4 dimensions, where each point in the cloud has a location and a value (x,y,z,Value). In addition, I have a 'special' point, S0, within the 3d point cloud; I've used this example to find the closest 10 points in the cloud, relative to S0. Now, I have a numpy array for each of the 10 closest points and their values. How can I interpolate these 10 points, to find the interpolated value at point S0? Example code is shown below:
import numpy as np
import matplotlib.pyplot as plt
numpoints = 20
linexs = 320
lineys = 40
linezs = 60
linexe = 20
lineye = 20
lineze = 0
# Create vectors of points
xpts = np.linspace(linexs, linexe, numpoints)
ypts = np.linspace(lineys, lineye, numpoints)
zpts = np.linspace(linezs, lineze, numpoints)
lin = np.dstack((xpts,ypts,zpts))
# Image line of points
fig = plt.figure()
ax = fig.add_subplot(211, projection='3d')
ax.set_xlim(0,365); ax.set_ylim(-85, 85); ax.set_zlim(0, 100)
ax.plot_wireframe(xpts, ypts, zpts)
ax.view_init(elev=12, azim=78)
def randrange(n, vmin, vmax):
return (vmax - vmin)*np.random.rand(n) + vmin
n = 10
for n in range(21):
xs = randrange(n, 0, 350)
ys = randrange(n, -75, 75)
zs = randrange(n, 0, 100)
ax.scatter(xs, ys, zs)
dat = np.dstack((xs,ys,zs))
ax.set_xlabel('X Label')
ax.set_xlim(0,350)
ax.set_ylabel('Y Label')
ax.set_ylim(-75,75)
ax.set_zlabel('Z Label')
ax.set_zlim(0,100)
ax = fig.add_subplot(212, projection='3d')
ax.set_xlim(0,365); ax.set_ylim(-85, 85); ax.set_zlim(0, 100)
ax.plot_wireframe(xpts,ypts,zpts)
ax.view_init(elev=12, azim=78)
plt.show()
dist = []
# Calculate distance from first point to all other points in cloud
for l in range(len(xpts)):
aaa = lin[0][0]-dat
dist.append(np.sqrt(aaa[0][l][0]**2+aaa[0][l][1]**2+aaa[0][l][2]**2))
full = np.dstack((dat,dist))
aaa = full[0][full[0][:,3].argsort()]
print(aaa[0:10])
A basic example. Note that the meshgrid is not needed for the interpolation, but only to make a fast ufunc to generate an example function A=f(x,y,z), here A=x+y+z.
from scipy.interpolate import interpn
import numpy as np
#make up a regular 3d grid
X=np.linspace(-5,5,11)
Y=np.linspace(-5,5,11)
Z=np.linspace(-5,5,11)
xv,yv,zv = np.meshgrid(X,Y,Z)
# make up a function
# see http://docs.scipy.org/doc/numpy/reference/ufuncs.html
A = np.add(xv,np.add(yv,zv))
#this one is easy enough for us to know what to expect at (.5,.5,.5)
# usage : interpn(points, values, xi, method='linear', bounds_error=True, fill_value=nan)
interpn((X,Y,Z),A,[0.5,0.5,0.5])
Output:
array([ 1.5])
If you pass in an array of points of interest, it will give you multiple answers.