Parallel processing of a 3d matrix in Python - python-2.7

Hi I need to speed up this code
import numpy as np
matrix3d=np.empty([10,10,1000])
matrix3d[:]=np.random.randint(10)
matrix3d_1=np.empty([10,10,1000])
x=10
y=1
for z in range(0,1000):
matrix3d_1[:,:,z]=func(matrix3d[:,:,z],x,y)
def func(matrix,x,y):
return matrix*x+y
I have tried using multiprocessig using Pool.map() but it did not work.
from functools import partial
import multiprocessing as mp
pool=mp.Pool(processes=2)
args=partial(func,x,y)
matrix3d_2=np.empty([10,10,1000])
matrix3d_2=pool.map(args,matrix3d)
pool.close()
If I compare the two matrix matrix3d_1==matrix3d_2 the results is false.
How can this be fixed?

Parallel processing of a 3d matrix
The python map method as well as the pool.map methode can only take one input object. See for example https://stackoverflow.com/a/10973817/4045774
To reduce the inputs to one input we can use for example functools. The input which remains have to be on the last place.
from functools import partial
import numpy as np
import multiprocessing as mp
def main():
matrix3d=np.empty([10,10,1000])
matrix3d[:]=np.random.randint(10)
matrix3d_1=np.empty([10,10,1000])
x=10
y=1
pool=mp.Pool(processes=4)
func_p=partial(func,x,y)
#parallel map returns a list
res=pool.map(func_p,(matrix3d[:,:,z] for z in xrange(0,matrix3d.shape[2])))
#copy the data to array
for i in xrange(0,matrix3d.shape[2]):
matrix3d_1[:,:,i]=res[i]
def func(x,y,matrix):
return matrix*x+y
Parallel version using numba
This version will scale well over all cores and is at least 200 times faster than simple multiprocessing shown above. I have modified the code you linked to a bit, to get rid of any other dependencies than numpy.
import numpy
from numba import njit, prange
nb_meanInterp = njit("float32[:,:](float32[:,:],int64,int64)")(meanInterp)
resample_3d_nb = njit("float32[:,:,:](float32[:,:,:],int64,int64)",parallel=True)(resample_3d)
def resample_3d(matrix_3d,x,y):
matrix3d_res=numpy.empty((x,y,matrix_3d.shape[2]),dtype=numpy.float32)
for z in prange(0,matrix_3d.shape[2]):
matrix3d_res[:,:,z]=nb_meanInterp(matrix_3d[:,:,z],x,y)
return matrix3d_res
def meanInterp(data, m, n):
newData = numpy.zeros((m,n),dtype=numpy.float32)
mOrig, nOrig = data.shape
hBoundariesOrig, vBoundariesOrig = numpy.linspace(0,1,mOrig+1),
numpy.linspace(0,1,nOrig+1)
hBoundaries, vBoundaries = numpy.linspace(0,1,m+1), numpy.linspace(0,1,n+1)
for iOrig in range(mOrig):
for jOrig in range(nOrig):
for i in range(m):
if hBoundaries[i+1] <= hBoundariesOrig[iOrig]: continue
if hBoundaries[i] >= hBoundariesOrig[iOrig+1]: break
for j in range(n):
if vBoundaries[j+1] <= vBoundariesOrig[jOrig]: continue
if vBoundaries[j] >= vBoundariesOrig[jOrig+1]: break
#boxCoords = ((hBoundaries[i], vBoundaries[j]),(hBoundaries[i+1], vBoundaries[j+1]))
#origBoxCoords = ((hBoundariesOrig[iOrig], vBoundariesOrig[jOrig]),(hBoundariesOrig[iOrig+1], vBoundariesOrig[jOrig+1]))
#area=overlap(boxCoords, origBoxCoords)
#hopefully this is equivivalent (not tested)-----
T_x=(hBoundaries[i],hBoundaries[i+1],hBoundariesOrig[iOrig],hBoundariesOrig[iOrig+1])
T_y=(vBoundaries[j],vBoundaries[j+1],vBoundariesOrig[jOrig],vBoundariesOrig[jOrig+1])
tx=(T_x[1]-T_x[0]+T_x[3]-T_x[2])-(max(T_x)-min(T_x))
ty=(T_y[1]-T_y[0]+T_y[3]-T_y[2])-(max(T_y)-min(T_y))
area=tx*ty
#------------------------
newData[i][j] += area * data[iOrig][jOrig] / (hBoundaries[1] * vBoundaries[1])
return newData

Related

How to fix "TypeError: only length-1 arrays can be converted to Python scalars" in python regression

I tried to use scipy.optimize package for regression. The model of the function is defined in func with parameters named as coeffs. I want to use the data xdata and ydata to learn the parameters using LS criterion.
I have the following TypeError: only length-1 arrays can be converted to Python scalars
from __future__ import division
import numpy
import scipy
from math import exp
import scipy.optimize as optimization
global m0,t0
t0 = 0.25
m0=1
def func(t, coeffs):
a = coeffs[0]
b = coeffs[1]
m = (a/b + m0 )*exp(b*(t-t0))-a/b
return m
# fitting test
x0 = numpy.array([5, -5], dtype=float)
def residuals(coeffs, y, t):
return y - func(t, coeffs)
xdata = numpy.array([0.25,0.5,1])
ydata = numpy.array([1.0,0.803265329856,0.611565080074])
from scipy.optimize import leastsq
x = leastsq(residuals, x0, args=(ydata, xdata))
return parameters are expected around [2,-1].
Do not use from math import exp, replace it by from numpy import exp so that your arrays are correctly handled: the numpy.exp function will return the array expected by scipy, with each element converted to its exponential value.

tf.py_func , custom tensorflow function getting applied to only the first element in the tensor

I am new to tensorflow and was playing around with a deep learning network. I wanted to do a custom rounding off on all the weights after each iteration. As the round function in tensorflow library doesn't give you the option to round the values down to a certain number of decimal points.
So I wrote this
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
np_prec = lambda x: np.round(x,3).astype(np.float32)
def tf_prec(x,name=None):
with ops.name_scope( "d_spiky", name,[x]) as name:
y = tf.py_func(np_prec,
[x],
[tf.float32],
name=name,
stateful=False)
return y[0]
with tf.Session() as sess:
x = tf.constant([0.234567,0.712,1.2,1.7])
y = tf_prec(x)
y = tf_prec(x)
tf.global_variables_initializer
print(x.eval(), y.eval())
The output I got was this
[ 0.234567 0.71200001 1.20000005 1.70000005] [ 0.235 0.71200001 1.20000005 1.70000005]
So the custom rounding off worked only on the first item in the tensor and I am not sure about what I am doing wrong. Thanks in advance.
The error here because of the following line,
np_prec = lambda x: np.round(x,3).astype(np.float32)
you are casting the output to np.float32. You can verify the error by the following code,
print(np.round([0.234567,0.712,1.2,1.7], 3).astype(np.float32)) #prints [ 0.235 0.71200001 1.20000005 1.70000005]
The default output of np.round is float64. Moreover, you also have to change the Tout argument in tf.py_func to float64.
I have given the following code with the above fix and commented where necessary.
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
np_prec = lambda x: np.round(x,3)
def tf_prec(x,name=None):
with ops.name_scope( "d_spiky", name,[x]) as name:
y = tf.py_func(np_prec,
[x],
[tf.float64], #changed this line to tf.float64
name=name,
stateful=False)
return y[0]
with tf.Session() as sess:
x = tf.constant([0.234567,0.712,1.2,1.7],dtype=np.float64) #specify the input data type np.float64
y = tf_prec(x)
y = tf_prec(x)
tf.global_variables_initializer
print(x.eval(), y.eval())
Hope this helps.

write a recursive function to find hermite polynomials

# I have the recursive relationship of the Hermite Polynomials:
Hn+1(x)=2xHn(x)−2nHn−1(x), n≥1,
H0(x)=1, H1(x)=2x.
I need to write def hermite(x,n) for any hermite polynomial Hn(x) using python 2.7
and make a plot of H5(x) on the interval x∈[−1,1].
Recursion is trivial here since the formula gives it. Just a small trap: you compute Hn(x), not Hn+1(x) so substract 1 to all n occurrences:
def hermite(x,n):
if n==0:
return 1
elif n==1:
return 2*x
else:
return 2*x*hermite(x,n-1)-2*(n-1)*hermite(x,n-2)
small test:
for i in range(0,5):
print(hermite(1,i))
1
2
2
-4
-20
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import hermite
def HERMITE(X,N):
HER = hermite(N)
sn = HER(X)
return sn
xvals = np.linspace(-1.0,1.0,1000)
for n in np.arange(0,7,1):
sol = HERMITE(xvals,n)
plt.plot(xvals,sol,"-.",label = "n = " + str(n),linewidth=2)
plt.xticks(fontsize=14,fontweight="bold")
plt.yticks(fontsize=14,fontweight="bold")
plt.grid()
plt.legend()
plt.show()
import math
import numpy as np
import matplotlib.pyplot as plt
def HER(x,n):
if n==0:
return 1.0 + 0.0*x
elif n==1:
return 2.0*x
else:
return 2.0*x*HER(x,n-1) -2.0*(n-1)*HER(x,n-2)
xvals = np.linspace(-np.pi,np.pi,1000)
for N in np.arange(0,7,1):
sol = HER(xvals,N)
plt.plot(xvals,sol,label = "n = " + str(N))
plt.xticks(fontsize=14,fontweight="bold")
plt.yticks(fontsize=14,fontweight="bold")
plt.grid()
plt.legend()
plt.show()
#Code is working perfectly fine
Feel free to ask any question...

Gradient of kriged function in Openmdao

I am currently coding an Multiple Gradient Descent algorithm, where I use kriged functions.
My problem is that I can't find how to obtain the gradient of the kriged function (I tried to use linearize but I don't know how to make it work).
from __future__ import print_function
from six import moves
from random import shuffle
import sys
import numpy as np
from numpy import linalg as LA
import math
from openmdao.braninkm import F, G, DF, DG
from openmdao.api import Group, Component,IndepVarComp
from openmdao.api import MetaModel
from openmdao.api import KrigingSurrogate, FloatKrigingSurrogate
def rand_lhc(b, k):
# Calculates a random Latin hypercube set of n points in k dimensions within [0,n-1]^k hypercube.
arr = np.zeros((2*b, k))
row = list(moves.xrange(-b, b))
for i in moves.xrange(k):
shuffle(row)
arr[:, i] = row
return arr/b*1.2
class TrigMM(Group):
''' FloatKriging gives responses as floats '''
def __init__(self):
super(TrigMM, self).__init__()
# Create meta_model for f_x as the response
F_mm = self.add("F_mm", MetaModel())
F_mm.add_param('X', val=np.array([0., 0.]))
F_mm.add_output('f_x:float', val=0., surrogate=FloatKrigingSurrogate())
# F_mm.add_output('df_x:float', val=0., surrogate=KrigingSurrogate().linearize)
#F_mm.linearize('X', 'f_x:float')
#F_mm.add_output('g_x:float', val=0., surrogate=FloatKrigingSurrogate())
print('init ok')
self.add('p1', IndepVarComp('X', val=np.array([0., 0.])))
self.connect('p1.X','F_mm.X')
# Create meta_model for f_x as the response
G_mm = self.add("G_mm", MetaModel())
G_mm.add_param('X', val=np.array([0., 0.]))
G_mm.add_output('g_x:float', val=0., surrogate=FloatKrigingSurrogate())
#G_mm.add_output('df_x:float', val=0., surrogate=KrigingSurrogate().linearize)
#G_mm.linearize('X', 'g_x:float')
self.add('p2', IndepVarComp('X', val=np.array([0., 0.])))
self.connect('p2.X','G_mm.X')
from openmdao.api import Problem
prob = Problem()
prob.root = TrigMM()
prob.setup()
u=4
v=3
#training avec latin hypercube
prob['F_mm.train:X'] = rand_lhc(20,2)
prob['G_mm.train:X'] = rand_lhc(20,2)
#prob['F_mm.train:X'] = rand_lhc(10,2)
#prob['G_mm.train:X'] = rand_lhc(10,2)
#prob['F_mm.linearize:X'] = rand_lhc(10,2)
#prob['G_mm.linearize:X'] = rand_lhc(10,2)
datF=[]
datG=[]
datDF=[]
datDG=[]
for i in range(len(prob['F_mm.train:X'])):
datF.append(F(np.array([prob['F_mm.train:X'][i]]),u))
#datG.append(G(np.array([prob['F_mm.train:X'][i]]),v))
data_trainF=np.fromiter(datF,np.float)
for i in range(len(prob['G_mm.train:X'])):
datG.append(G(np.array([prob['G_mm.train:X'][i]]),v))
data_trainG=np.fromiter(datG,np.float)
prob['F_mm.train:f_x:float'] = data_trainF
#prob['F_mm.train:g_x:float'] = data_trainG
prob['G_mm.train:g_x:float'] = data_trainG
Are you going to be writing a Multiple Gradient Descent driver? If so, then OpenMDAO calculates the gradient from a param to an output at the Problem level using the calc_gradient method.
If you take a look at the source code for the pyoptsparse driver:
https://github.com/OpenMDAO/OpenMDAO/blob/master/openmdao/drivers/pyoptsparse_driver.py
The _gradfunc method is a callback function that returns the gradient of the constraints and objectives with respect to the design variables. The Metamodel component has built-in analytic gradients for all (I think) of our surrogates, so you don't even have to declare any there.
If this isn't what you are trying to do, then I may need a little more information about your application.

Python how to plot graph sine wave

I have this signal :
from math import*
Fs=8000
f=500
sample=16
a=[0]*sample
for n in range(sample):
a[n]=sin(2*pi*f*n/Fs)
How can I plot a graph (this sine wave)?
and create name of xlabel as 'voltage(V)' and ylabel as 'sample(n)'
What code to do this?
I am so thanksful for help ^_^
Setting the x-axis with np.arange(0, 1, 0.001) gives an array from 0 to 1 in 0.001 increments.
x = np.arange(0, 1, 0.001) returns an array of 1000 points from 0 to 1, and y = np.sin(2*np.pi*x) you will get the sin wave from 0 to 1 sampled 1000 times
I hope this will help:
import matplotlib.pyplot as plt
import numpy as np
Fs = 8000
f = 5
sample = 8000
x = np.arange(sample)
y = np.sin(2 * np.pi * f * x / Fs)
plt.plot(x, y)
plt.xlabel('sample(n)')
plt.ylabel('voltage(V)')
plt.show()
P.S.: For comfortable work you can use The Jupyter Notebook.
import matplotlib.pyplot as plt # For ploting
import numpy as np # to work with numerical data efficiently
fs = 100 # sample rate
f = 2 # the frequency of the signal
x = np.arange(fs) # the points on the x axis for plotting
# compute the value (amplitude) of the sin wave at the for each sample
y = np.sin(2*np.pi*f * (x/fs))
#this instruction can only be used with IPython Notbook.
% matplotlib inline
# showing the exact location of the smaples
plt.stem(x,y, 'r', )
plt.plot(x,y)
import numpy as np
import matplotlib.pyplot as plt
F = 5.e2 # No. of cycles per second, F = 500 Hz
T = 2.e-3 # Time period, T = 2 ms
Fs = 50.e3 # No. of samples per second, Fs = 50 kHz
Ts = 1./Fs # Sampling interval, Ts = 20 us
N = int(T/Ts) # No. of samples for 2 ms, N = 100
t = np.linspace(0, T, N)
signal = np.sin(2*np.pi*F*t)
plt.plot(t, signal)
plt.xlabel('Time (s)')
plt.ylabel('Voltage (V)')
plt.show()
import math
import turtle
ws = turtle.Screen()
ws.bgcolor("lightblue")
fred = turtle.Turtle()
for angle in range(360):
y = math.sin(math.radians(angle))
fred.goto(angle, y * 80)
ws.exitonclick()
The window of usefulness has likely come and gone, but I was working at a similar problem. Here is my attempt at plotting sine using the turtle module.
from turtle import *
from math import *
#init turtle
T=Turtle()
#sample size
T.screen.setworldcoordinates(-1,-1,1,1)
#speed up the turtle
T.speed(-1)
#range of hundredths from -1 to 1
xcoords=map(lambda x: x/100.0,xrange(-100,101))
#setup the origin
T.pu();T.goto(-1,0);T.pd()
#move turtle
for x in xcoords:
T.goto(x,sin(xcoords.index(x)))
A simple way to plot sine wave in python using matplotlib.
import numpy as np
import matplotlib.pyplot as plt
x=np.arange(0,3*np.pi,0.1)
y=np.sin(x)
plt.plot(x,y)
plt.title("SINE WAVE")
plt.show()
import matplotlib.pyplot as plt
import numpy as np
#%matplotlib inline
x=list(range(10))
def fun(k):
return np.sin(k)
y=list(map(fun,x))
plt.plot(x,y,'-.')
#print(x)
#print(y)
plt.show()
This is another option
#!/usr/bin/env python
import numpy as np
import matplotlib
matplotlib.use('TKAgg') #use matplotlib backend TkAgg (optional)
import matplotlib.pyplot as plt
sample_rate = 200 # sampling frequency in Hz (atleast 2 times f)
t = np.linspace(0,5,sample_rate) #time axis
f = 100 #Signal frequency in Hz
sig = np.sin(2*np.pi*f*(t/sample_rate))
plt.plot(t,sig)
plt.xlabel("Time")
plt.ylabel("Amplitude")
plt.tight_layout()
plt.show()
Yet another way to plot the sine wave.
import numpy as np
import matplotlib
matplotlib.use('TKAgg') #use matplotlib backend TKAgg (optional)
import matplotlib.pyplot as plt
t = np.linspace(0.0, 5.0, 50000) # time axis
sig = np.sin(t)
plt.plot(t,sig)
from math import *
Fs = 8000
f = 500
sample = 16
a = [0] * sample
for n in range(sample):
a[n] = sin(2*pi*f*n/Fs)
creating the x coordinates
Sample = [i for i in range(sample)]
importing matplotlib for plotting
import matplotlib.pyplot as plt
adding labels and plotting
plt.xlabel('Voltage(V)')
plt.ylabel('Sample(n)')
plt.plot(Sample, a)
plt.show()