I'm trying to get a "getting started with pickles" script working. I managed to save a pickle file from a file, and load it. But when I save a pickle file in one file (the main.py in this case) and load it from another, I get an error. I probably missed something small, but can't figure out what.
main.py
import pickle
class Node:
"""This class represents a node"""
def __init__(self, value = None):
self.val = value
def toString(self):
return self.val
class Link:
"""This class represents a link between 2 nodes"""
def __init__(self, sourceNode, targetNode, LinkWigth):
self.source = sourceNode
self.target = targetNode
self.wight = LinkWigth
def setWeight(self, newWeight):
self.wight = newWeight
def toString(self):
return self.wight
class Graph:
"""This class represents a graph"""
def __init__(self):
self.nodes = []
self.links = []
def addNode(self, node):
self.nodes.append(node)
def addLink(self, link):
self.links.append(link)
def getInDegree(self, node):
counter = 0
for link in self.links:
if link.target == node:
counter +=1
else:
print "target is: %s" % link.target.toString()
print "source is: %s" % link.source.toString()
return counter
def toString(self):
for link in self.links:
print link.toString()
for node in self.nodes:
print node.toString()
if __name__ == "__main__":
n1 = Node(4)
l1 = Link(n1, n1, 1)
g = Graph()
g.addNode(n1)
g.addLink(l1)
pickle.dump(g, open('haha', 'wb') )
pickleLoader.py
import pickle
import main
n = main.Node(44)
print n.toString()
g = pickle.load( open('haha', 'rb') )
print "ha"
The error
C:\Users\R\Desktop\pickle test>main.py
C:\Users\R\Desktop\pickle test>pickleLoader.py
44
Traceback (most recent call last):
File "C:\Users\R\Desktop\pickle test\pickleLoader.py", line 7, in <module>
g = pickle.load( open('haha', 'rb') )
File "C:\Program Files\Python27\lib\pickle.py", line 1378, in load
return Unpickler(file).load()
File "C:\Program Files\Python27\lib\pickle.py", line 858, in load
dispatch[key](self)
File "C:\Program Files\Python27\lib\pickle.py", line 1069, in load_inst
klass = self.find_class(module, name)
File "C:\Program Files\Python27\lib\pickle.py", line 1126, in find_class
klass = getattr(mod, name)
AttributeError: 'module' object has no attribute 'Graph'
C:\Users\R\Desktop\pickle test>
I guess that the problem is something with the namespace because main.py has been imported, but I have no idea how to get it working.
This does appear to be related to how the classes are defined in relation to the module. A quick way to allow this to work is to import the components of the main module directly into pickleLoader:
from main import Graph, Node, Link
A better solution might be to move the common components (Graph, Node, Link) into their own module, and then import that module into both main and pickleLoader.
Related
I have provided the following python code,but the problem is that when it doesnot receive any input, it start showing error. how can i modify the code in a way that this error dosnot appear:
#!/usr/bin/env python from roslib import message import rospy import sensor_msgs.point_cloud2 as pc2 from sensor_msgs.msg import PointCloud2, PointField import numpy as np import ros_numpy from geometry_msgs.msg import Pose
#listener def listen():
rospy.init_node('listen', anonymous=True)
rospy.Subscriber("/Filtered_points_x", PointCloud2, callback_kinect)
def callback_kinect(data):
pub = rospy.Publisher('lidar_distance',Pose, queue_size=10)
data_lidar = Pose()
xyz_array = ros_numpy.point_cloud2.pointcloud2_to_xyz_array(data)
print(xyz_array)
mini_data = min(xyz_array[:,0])
print("mini_data", mini_data)
data_lidar.position.x = mini_data
pub.publish(data_lidar)
print("data_points", data_lidar.position.x)
height = int (data.height / 2)
middle_x = int (data.width / 2)
middle = read_depth (middle_x, height, data) # do stuff with middle
def read_depth(width, height, data) :
if (height >= data.height) or (width >= data.width) :
return -1
data_out = pc2.read_points(data, field_names= ('x','y','z'), skip_nans=True, uvs=[[width, height]])
int_data = next(data_out)
rospy.loginfo("int_data " + str(int_data))
return int_data
if __name__ == '__main__':
try:
listen()
rospy.spin()
except rospy.ROSInterruptException:
pass
the following is the error that i mentioned:
[[ 7.99410915 1.36072445 -0.99567264]]
('mini_data', 7.994109153747559)
('data_points', 7.994109153747559)
[INFO] [1662109961.035894]: int_data (7.994109153747559, 1.3607244491577148, -0.9956726431846619)
[]
[ERROR] [1662109961.135572]: bad callback: <function callback_kinect at 0x7f9346d44230>
Traceback (most recent call last):
File "/opt/ros/kinetic/lib/python2.7/dist-packages/rospy/topics.py", line 750, in _invoke_callback
cb(msg)
File "/home/masoumeh/catkin_ws/src/yocs_velocity_smoother/test4/distance_from_pointcloud.py", line 27, in callback_kinect
mini_data = min(xyz_array[:,0])
ValueError: min() arg is an empty sequence
The code is still receiving input, but specifically it’s receiving an empty array. You’re then trying to splice the empty array, causing the error. Instead you should check that the array has elements before the line min(xyz_array[:,0]). It can be as simple as:
if xyz_array == []:
return
As another note, you’re creating a publisher in the callback. You shouldn’t do this as a new publisher will be created every time it gets called. Instead create it as a global variable.
It's my first post, I hope it will be well done.
I'm trying to run the following ZipLine Algo with local AAPL data :
import pandas as pd
from collections import OrderedDict
import pytz
from zipline.api import order, symbol, record, order_target
from zipline.algorithm import TradingAlgorithm
data = OrderedDict()
data['AAPL'] = pd.read_csv('AAPL.csv', index_col=0, parse_dates=['Date'])
panel = pd.Panel(data)
panel.minor_axis = ['Open', 'High', 'Low', 'Close', 'Volume', 'Price']
panel.major_axis = panel.major_axis.tz_localize(pytz.utc)
print panel["AAPL"]
def initialize(context):
context.security = symbol('AAPL')
def handle_data(context, data):
MA1 = data[context.security].mavg(50)
MA2 = data[context.security].mavg(100)
date = str(data[context.security].datetime)[:10]
current_price = data[context.security].price
current_positions = context.portfolio.positions[symbol('AAPL')].amount
cash = context.portfolio.cash
value = context.portfolio.portfolio_value
current_pnl = context.portfolio.pnl
# code (this will come under handle_data function only)
if (MA1 > MA2) and current_positions == 0:
number_of_shares = int(cash / current_price)
order(context.security, number_of_shares)
record(date=date, MA1=MA1, MA2=MA2, Price=
current_price, status="buy", shares=number_of_shares, PnL=current_pnl, cash=cash, value=value)
elif (MA1 < MA2) and current_positions != 0:
order_target(context.security, 0)
record(date=date, MA1=MA1, MA2=MA2, Price=current_price, status="sell", shares="--", PnL=current_pnl, cash=cash,
value=value)
else:
record(date=date, MA1=MA1, MA2=MA2, Price=current_price, status="--", shares="--", PnL=current_pnl, cash=cash,
value=value)
#initializing trading enviroment
algo_obj = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
#run algo
perf_manual = algo_obj.run(panel)
#code
#calculation
print "total pnl : " + str(float(perf_manual[["PnL"]].iloc[-1]))
buy_trade = perf_manual[["status"]].loc[perf_manual["status"] == "buy"].count()
sell_trade = perf_manual[["status"]].loc[perf_manual["status"] == "sell"].count()
total_trade = buy_trade + sell_trade
print "buy trade : " + str(int(buy_trade)) + " sell trade : " + str(int(sell_trade)) + " total trade : " + str(int(total_trade))
I was inspired by https://www.quantinsti.com/blog/introduction-zipline-python/ and https://www.quantinsti.com/blog/importing-csv-data-zipline-backtesting/.
I get this error :
Traceback (most recent call last):
File "C:/Users/main/Desktop/docs/ALGO_TRADING/_DATAS/_zipline_data_bundle /temp.py", line 51, in <module>
algo_obj = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
File "C:\Python27-32\lib\site-packages\zipline\algorithm.py", line 273, in __init__
self.trading_environment = TradingEnvironment()
File "C:\Python27-32\lib\site-packages\zipline\finance\trading.py", line 99, in __init__
self.bm_symbol,
File "C:\Python27-32\lib\site-packages\zipline\data\loader.py", line 166, in load_market_data
environ,
File "C:\Python27-32\lib\site-packages\zipline\data\loader.py", line 230, in ensure_benchmark_data
last_date,
File "C:\Python27-32\lib\site-packages\zipline\data\benchmarks.py", line 50, in get_benchmark_returns
last_date
File "C:\Python27-32\lib\site-packages\pandas_datareader\data.py", line 137, in DataReader
session=session).read()
File "C:\Python27-32\lib\site-packages\pandas_datareader\base.py", line 181, in read
params=self._get_params(self.symbols))
File "C:\Python27-32\lib\site-packages\pandas_datareader\base.py", line 79, in _read_one_data
out = self._read_url_as_StringIO(url, params=params)
File "C:\Python27-32\lib\site-packages\pandas_datareader\base.py", line 90, in _read_url_as_StringIO
response = self._get_response(url, params=params)
File "C:\Python27-32\lib\site-packages\pandas_datareader\base.py", line 139, in _get_response
raise RemoteDataError('Unable to read URL: {0}'.format(url))
pandas_datareader._utils.RemoteDataError: Unable to read URL: http://www.google.com/finance/historical?q=SPY&startdate=Dec+29%2C+1989&enddate=Dec+20%2C+2017&output=csv
I don't understand : "http://www.google.com/finance/historical?q=SPY&startdate=Dec+29%2C+1989&enddate=Dec+20%2C+2017&output=csv".
I don't ask for online data request... and not 'SPY' stock but 'APPL'...
What does this error mean to you ?
Thanks a lot for your help !
C.
Only reference and workaround I found regarding this issue is here:
from pandas_datareader.google.daily import GoogleDailyReader
#property
def url(self):
return 'http://finance.google.com/finance/historical'
GoogleDailyReader.url = url
do:
pip install fix_yahoo_finance
then modify the file: zipline/lib/pythonx.x/site-packages/zipline/data/benchmarks.py
add the following two statements to the file:
import fix_yahoo_finance as yf
yf.pdr_override ()
then change following instruction:
data = pd_reader.DataReader (symbol, 'Google' first_date, last_date)
to:
data = pd_reader.get_data_yahoo(symbol,first_date, last_date)
thisfile.py
import cPickle
import gzip
import os
import numpy
import theano
import theano.tensor as T
def load_data(dataset):
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
def shared_dataset(data_xy, borrow=True):
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
class PCA(object):
def __init__(self):
self.param = 0
def dimemsion_transform(self, X):
m_mean = T.mean(X, axis=0)
X = X - m_mean ##################### this line makes error
return X
if __name__ == '__main__':
dataset = 'mnist.pkl.gz'
# load the MNIST data
data = load_data(dataset)
X = T.matrix('X')
m_pca = PCA()
transform = theano.function(
inputs=[],
outputs=m_pca.dimemsion_transform(X),
givens={
X: data
}
)
error showing like below
Traceback (most recent call last):
File ".../thisfile.py", line 101, in <module>
X: data
File ".../Theano/theano/compile/function.py", line 322, in function
output_keys=output_keys)
File ".../Theano/theano/compile/pfunc.py", line 443, in pfunc
no_default_updates=no_default_updates)
File ".../Theano/theano/compile/pfunc.py", line 219, in rebuild_collect_shared
cloned_v = clone_v_get_shared_updates(v, copy_inputs_over)
File ".../Theano/theano/compile/pfunc.py", line 93, in clone_v_get_shared_updates
clone_v_get_shared_updates(i, copy_inputs_over)
File ".../Theano/theano/compile/pfunc.py", line 93, in clone_v_get_shared_updates
clone_v_get_shared_updates(i, copy_inputs_over)
File ".../Theano/theano/compile/pfunc.py", line 93, in clone_v_get_shared_updates
clone_v_get_shared_updates(i, copy_inputs_over)
File ".../Theano/theano/compile/pfunc.py", line 96, in clone_v_get_shared_updates
[clone_d[i] for i in owner.inputs], strict=rebuild_strict)
File ".../Theano/theano/gof/graph.py", line 242, in clone_with_new_inputs
new_inputs[i] = curr.type.filter_variable(new)
File ".../Theano/theano/tensor/type.py", line 234, in filter_variable
self=self))
TypeError: Cannot convert Type Generic (of Variable <Generic>) into Type TensorType(float64, matrix). You can try to manually convert <Generic> into a TensorType(float64, matrix).
I am making PCA function with theano but have a problem.
mean value is subtracted from MNIST data in dimension_transform in PCA class
I do not get why it gives type matching error and how do I fix it
Your problem comes from these lines:
data = load_data(dataset)
Here data is a list (as this is what load_data() returns).
transform = theano.function(
inputs=[],
outputs=m_pca.dimemsion_transform(X),
givens={
X: data
}
)
And here you pass it as a value. You have to extract the item you want from the return value of load_data() like so:
[(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)] = load_data(dataset)
and then use
givens={
X: train_set_x
}
or one of the other values.
Hi guys I want ask about json.dump
I use scikit to tune some method with parameters and I want dump it to json, but I got some error here :
I have parameter with method KNN:
KNeighborsClassifier(algorithm=u'auto', leaf_size=30, metric=u'manhattan',
metric_params=None, n_jobs=-1, n_neighbors=300, p=2,
weights=u'distance')
But got error like this:
Traceback (most recent call last):
File "jamu.py", line 1018, in <module>
main(argv)
File "jamu.py", line 863, in main
json.dumps(meta_clf, f)
File "C:\Python27\lib\json\__init__.py", line 250, in d
sort_keys=sort_keys, **kw).encode(obj)
File "C:\Python27\lib\json\encoder.py", line 207, in en
chunks = self.iterencode(o, _one_shot=True)
File "C:\Python27\lib\json\encoder.py", line 270, in it
return _iterencode(o, 0)
File "C:\Python27\lib\json\encoder.py", line 184, in de
raise TypeError(repr(o) + " is not JSON serializable"
TypeError: KNeighborsClassifier(algorithm=u'auto', leaf_s
tan',
metric_params=None, n_jobs=-1, n_neighbors=300
weights=u'distance') is not JSON serializable
Is anything wrong with my code?
It is what it says on the tin, KNeighborsClassifier cannot be serialised with json.
You'll have to use a different way to serialise a model. For example you can use joblib:
from sklearn.external import joblib
# Suppose your KNeighborsClassifier model is called knn
joblib.dump(knn, 'some/kind/of/path/knn.joblib')
Loading a model is equally simple:
knn = joblib.load('some/kind/of/path/knn.joblib')
Check the joblib docs for what else it is good for.
finally i use this way:
meta_clf = KNeighborsClassifier(algorithm=u'auto', leaf_size=30, metric=u'manhattan',
metric_params=None, n_jobs=-1, n_neighbors=300, p=2,
weights=u'distance')
def print_to_json(meta_clf):
meta_clf_str = str(meta_clf)
meta_clf_str = meta_clf_str[meta_clf_str.index("(") + 1:meta_clf_str.rindex(")")]
meta_clf_str = meta_clf_str.replace('\n ', '')
meta_clf_str = meta_clf_str.replace(' ', '')
meta_clf_str = meta_clf_str.replace('=u\'', '=\'')
meta_clf_str = meta_clf_str.replace('\'', '')
meta_clf_str_list = meta_clf_str.split(',')
meta_clf_str_list_len = len(meta_clf_str_list)
meta_clf_str_lists = []
params = {}
for x in meta_clf_str_list:
meta_clf_str_list = x.split('=')
if meta_clf_str_list[1].isdigit() == True:
meta_clf_str_list[1] = int(meta_clf_str_list[1])
meta_clf_str_lists.append(meta_clf_str_list)
params[meta_clf_str_list[0]] = meta_clf_str_list[1]
return params
it's enough for me.
thanks for the asnwer thomas, i appreciate it.
I'am using Boneh-Lynn-Shacham Identity Based Signature scheme for my final year project for getting encryption keys
from charm.toolbox.pairinggroup import *
from charm.engine.util import *
debug = False
class IBSig():
def __init__(self, groupObj):
global group
group = groupObj
def dump(self, obj):
ser_a = serializeDict(obj, group)
return str(pickleObject(ser_a))
def keygen(self, secparam=None):
g, x = group.random(G2), group.random()
g_x = g ** x
pk = { 'g^x':g_x, 'g':g, 'identity':str(g_x), 'secparam':secparam }
sk = { 'x':x }
return (pk, sk)
def sign(self, x, message):
M = self.dump(message)
if debug: print("Message => '%s'" % M)
return group.hash(M, G1) ** x
def verify(self, pk, sig, message):
M = self.dump(message)
h = group.hash(M, G1)
if pair(sig, pk['g']) == pair(h, pk['g^x']):
return True
return False
def main():
groupObj = PairingGroup('../param/d224.param')
m = { 'a':"hello world!!!" , 'b':"test message" }
bls = IBSig(groupObj)
(pk, sk) = bls.keygen(0)
sig = bls.sign(sk['x'], m)
if debug: print("Message: '%s'" % m)
if debug: print("Signature: '%s'" % sig)
assert bls.verify(pk, sig, m)
if debug: print('SUCCESS!!!')
if __name__ == "__main__":
debug = True
main()
when I am implementing it in python the code was not able to find the module named pairing though I have added Charm module to my library.Getting error like
Traceback (most recent call last):
File "C:\Users\Sailesh\Desktop\bls.py", line 1, in <module>
from charm.toolbox.pairinggroup import *
File "C:\Python27\lib\charm\toolbox\pairinggroup.py", line 2, in <module>
from charm.core.math.pairing import serialize
ImportError: DLL load failed: The specified module could not be found.
I have taken the code from
Boneh-Lynn-Shacham Identity Based Signature code and downloaded the module charm from charm module link. Let me know where is the error or whether the
problem is with the module. I cant figure out what is the problem. Thanks in advance.
Try the 0.43 version directly fron github:
https://github.com/JHUISI/charm/releases