How do i pass my input/output to this network? - python-2.7

I seem to have some problems starting my learning... I am not sure why..
the network is multi input (72 1d arrays) and output is a 1d array length 24. the 1d array output consist of numbers related to 145 different classes.
So: 72 inputs => 24 outputs
Minimal working example - without the input/output being set.
import keras
from keras.utils import np_utils
from keras import metrics
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Lambda, Reshape,Flatten
from keras.layers import Conv1D,Conv2D, MaxPooling2D, MaxPooling1D, Reshape, ZeroPadding2D
from keras.utils import np_utils
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.layers.advanced_activations import ELU
from keras.models import Model
from keras.layers import Input, Dense
from keras.layers import Dropout
from keras import backend as K
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import CSVLogger
from keras.callbacks import EarlyStopping
from keras.models import load_model
from keras.layers.merge import Concatenate
import numpy as np
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
nano_train_input = []
nano_train_output = []
nano_test_input = []
nano_test_output = []
## Creating train input:
for i in range(974):
nano_train_input.append(np.random.random((78,684,4)))
nano_train_output.append(np.random.randint(145,size=(228)).tolist())
## Creating test input:
for i in range(104):
nano_test_input.append(np.random.random((78,684,4)))
nano_test_output.append(np.random.randint(145,size=(228)).tolist())
def model(train_input, train_output, test_input, test_output, names=0):
# Paper uses dimension (40 x 45 =(15 * 3))
# Filter size 5
# Pooling size
# I use dimension (78 x 72 = (24 * 3)
# Filter size 9
print "In model"
i = 0
print_once = True
data_test_output = []
data_test_input = []
for matrix in test_input:
row,col,channel = matrix.shape
remove_output = (col/3)%24
remove_input = col%72
if remove_output > 0 :
test_output[i] = test_output[i][:-(remove_output)]
for split in chunks(test_output[i],24):
data_test_output.append(np.array(split))
if remove_input > 0:
out = np.split(matrix[:,:-(remove_input),:-1],matrix[:,:-(remove_input),:-1].shape[1]/72,axis=1)
else:
out = np.split(matrix[:,:,:-1],matrix[:,:,:-1].shape[1]/72,axis=1)
data_test_input.extend(out)
del out
i=i+1 # Increment
i=0
data_train_output = []
data_train_input = []
for matrix in train_input:
row,col,channel = matrix.shape
remove_output = (col/3)%24
remove_input = col%72
if remove_output > 0 :
train_output[i] = train_output[i][:-(remove_output)]
for split in chunks(train_output[i],24):
data_train_output.append(np.array(split))
if remove_input > 0:
out = np.split(matrix[:,:-(remove_input),:-1],matrix[:,:-(remove_input),:-1].shape[1]/72,axis=1)
else:
out = np.split(matrix[:,:,:-1],matrix[:,:,:-1].shape[1]/72,axis=1)
data_train_input.extend(out)
del out
i=i+1 # Increment
print
print "Len:"
print len(data_train_input)
print len(data_train_output)
print len(data_test_input)
print len(data_test_output)
print
print "Type[0]:"
print type(data_train_input[0])
print type(data_train_output[0])
print type(data_test_input[0])
print type(data_test_output[0])
print
print "Type:"
print type(data_train_input)
print type(data_train_output)
print type(data_test_input)
print type(data_test_output)
print
print "shape of [0]:"
print data_train_input[0].shape
print data_train_output[0].shape
print data_test_input[0].shape
print data_test_output[0].shape
list_of_input = [Input(shape = (78,3)) for i in range(72)]
list_of_conv_output = []
list_of_max_out = []
for i in range(72):
list_of_conv_output.append(Conv1D(filters = 32 , kernel_size = 6 , padding = "same", activation = 'relu')(list_of_input[i]))
list_of_max_out.append(MaxPooling1D(pool_size=3)(list_of_conv_output[i]))
merge = keras.layers.concatenate(list_of_max_out)
reshape = Flatten()(merge)
dense1 = Dense(units = 500, activation = 'relu', name = "dense_1")(reshape)
dense2 = Dense(units = 250, activation = 'relu', name = "dense_2")(dense1)
dense3 = Dense(units = 24 , activation = 'softmax', name = "dense_3")(dense2)
model = Model(inputs = list_of_input , outputs = dense3)
model.compile(loss="categorical_crossentropy", optimizer="adam" , metrics = [metrics.sparse_categorical_accuracy])
reduce_lr=ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1, mode='auto', epsilon=0.01, cooldown=0, min_lr=0.000000000000000000001)
stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto')
print "Train!"
print model.summary()
hist_current = model.fit(x = ,
y = ,
shuffle=False,
validation_data=(,),
validation_split=0.1,
epochs=150000,
verbose=1,
callbacks=[reduce_lr,stop])
model(nano_train_input,nano_train_output,nano_test_input, nano_test_output)
The input and output is stored as a list of numpy.ndarrays.
This is a minimal working example.. how am I supposed to pass the input an output?

I would try:
merge = keras.layers.concatenate(list_of_max_out)
merge = Flatten()(merge) # or GlobalMaxPooling1D or GlobalAveragePooling1D
dense1 = Dense(500, activation = 'relu')(merge)
You probably want to apply something to transform your output from Convolutional layers. In order to do that - you need to squash the time / sequential dimension. In order to do that try techniques I provided.

If you take a look at your code and outputs you indeed have what you say: 24 outputs (data_train_outputs[0].shape). However, if you look at your layer output of Keras, you have this as output:
dense_3 (Dense) (None, 26, 145) 36395
I would say that this should be an array with shape (None, 24)....
I suggest you add a reshape layer to get the output you want to have!

Related

Showing key error

I am trying to use a recomendation engine to predict thr top selling product,it is showing key error,i am doing it with python2 anaconda jupyter notebook.hw i can over come from this error
import pandas as pd
import numpy as np
import operator
SMOOTHING_WINDOW_FUNCTION = np.hamming
SMOOTHING_WINDOW_SIZE = 7
def train():
df = pd.read_csv('C:\\Users\SHIVAPRASAD\Desktop\sample-cart-add-data
(1).csv')
df.sort_values(by=['id', 'age'], inplace=True)
trends = pd.pivot_table(df, values='count', index=['id', 'age'])
trend_snap = {}
for i in np.unique(df['id']):
trend = np.array(trends[i])
smoothed = smooth(trend, SMOOTHING_WINDOW_SIZE,
SMOOTHING_WINDOW_FUNCTION)
nsmoothed = standardize(smoothed)
slopes = nsmoothed[1:] - nsmoothed[:-1]
# I blend in the previous slope as well, to stabalize things a bit
# give a boost to things that have been trending for more than1day[![key error][1]][1]
if len(slopes) > 1:
trend_snap[i] = slopes[-1] + slopes[-2] * 0.5
return sorted(trend_snap.items(), key=operator.itemgetter(1),
reverse=True)
def smooth(series, window_size, window):
ext = np.r_[2 * series[0] - series[window_size-1::-1],
series,
2 * series[-1] - series[-1:-window_size:-1]]
weights = window(window_size)
smoothed = np.convolve(weights / weights.sum(), ext, mode='same')
return smoothed[window_size:-window_size+1]
def standardize(series):
iqr = np.percentile(series, 75) - np.percentile(series, 25)
return (series - np.median(series)) / iqr
trending = train()
print "Top 5 trending products:"
for i, s in trending[:5]:
print "Product %s (score: %2.2f)" % (i, s)
insted of
trend = np.array(trends[i]) use trend = np.array(trends.loc[i])

Removing dimension using reshape in keras?

Is it possible to remove a dimension using Reshape or any other function.
I have the following network.
import keras
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.layers import Input, Dense
from keras.layers import Dropout
from keras.layers.core import Dense, Activation, Lambda, Reshape,Flatten
from keras.layers import Conv2D, MaxPooling2D, Reshape, ZeroPadding2D
import numpy as np
#Number_of_splits = ((input_width-win_dim)+1)/stride_dim
splits = ((40-5)+1)/1
print splits
train_data_1 = np.random.randint(100,size=(100,splits,45,5,3))
test_data_1 = np.random.randint(100,size=(10,splits,45,5,3))
labels_train_data =np.random.randint(145,size=(100,15))
labels_test_data =np.random.randint(145,size=(10,15))
list_of_input = [Input(shape = (45,5,3)) for i in range(splits)]
list_of_conv_output = []
list_of_max_out = []
for i in range(splits):
list_of_conv_output.append(Conv2D(filters = 145 , kernel_size = (15,3))(list_of_input[i])) #output dim: 36x(31,3,145)
list_of_max_out.append((MaxPooling2D(pool_size=(2,2))(list_of_conv_output[i]))) #output dim: 36x(15,1,145)
merge = keras.layers.concatenate(list_of_max_out) #Output dim: (15,1,5220)
#reshape = Reshape((merge.shape[0],merge.shape[3]))(merge) # expected output dim: (15,145)
dense1 = Dense(units = 1000, activation = 'relu', name = "dense_1")(merge)
dense2 = Dense(units = 1000, activation = 'relu', name = "dense_2")(dense1)
dense3 = Dense(units = 145 , activation = 'softmax', name = "dense_3")(dense2)
model = Model(inputs = list_of_input , outputs = dense3)
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
print model.summary()
raw_input("SDasd")
hist_current = model.fit(x = [train_input[i] for i in range(100)],
y = labels_train_data,
shuffle=False,
validation_data=([test_input[i] for i in range(10)], labels_test_data),
validation_split=0.1,
epochs=150000,
batch_size = 15,
verbose=1)
The maxpooling layer creates an output with dimension (15,1,36) which i would like to remove the middle axis, so the output dimension end up being (15,36)..
If possible would I like to avoid specifying the outer dimension, or as i've tried use the prior layer dimension to reshape it.
#reshape = Reshape((merge.shape[0],merge.shape[3]))(merge) # expected output dim: (15,145)
I need my output dimension for the entire network to be (15,145), in which the middle dimension is causing some problems.
How do i remove the middle dimension?
I wanted to remove all dimensions that are equal to 1, but not specify a specific size with Reshape so that my code does not break if I change the input size or number of kernels in a convolution. This works with the functional keras API on a tensorflow backend.
from keras.layers.core import Reshape
old_layer = Conv2D(#actualArguments) (older_layer)
#old_layer yields, e.g., a (None, 15,1,36) size tensor, where None is the batch size
newdim = tuple([x for x in old_layer.shape.as_list() if x != 1 and x is not None])
#newdim is now (15, 36). Reshape does not take batch size as an input dimension.
reshape_layer = Reshape(newdim) (old_layer)
reshape = Reshape((15,145))(merge) # expected output dim: (15,145)

Python KeyError: 1.0

I'm trying to run this code
from math import sqrt
import numpy as np
import warnings
from collections import Counter
import pandas as pd
import random
def k_nearest_neighbors(data,predict, k =3):
if len(data) >= k:
warnings.warn('K is set to a value less than total voting groups')
distances = []
for group in data:
for features in data[group]:
eucliden_distance = np.linalg.norm(np.array(features)-np.array(predict))
distances.append([eucliden_distance,group])
votes = [i[1] for i in sorted(distances)[:k]]
print(Counter(votes).most_common(1))
vote_result = Counter(votes).most_common(1)[0][0]
return vote_result
df = pd.read_csv('bc2.txt')
df.replace('?',-99999,inplace=True)
df.drop(['id'],1,inplace = True)
full_data = df.astype(float).values.tolist()
random.shuffle(full_data)
test_size = 0.2
train_set = {2:[],4:[]}
test_set = {2:[],4:[]}
train_data = full_data[:-int(test_size*len(full_data))]
test_data = full_data[-int(test_size*len(full_data)):]
for i in train_data:
train_set[i[-1]].append(i[:-1])
for i in train_data:
test_set[i[-1]].append(i[:-1])
correct = 0
total = 0
for group in test_set:
for data in test_set[group]:
vote = k_nearest_neighbors(train_set,data, k=5)
if group == vote:
correct += 1
total += 1
print ('Accuracy:',correct/total)
it comes out with this error msg
File "ml8.py", line 38, in <module>
train_set[i[-1]].append(i[:-1])
KeyError: 1.0
file m18.py is this above code file
below is the sample of txt file
id,clump_thickness,unif_cell_size,unif_cell_shape,marg_adhesion,single_epith_cell_size,bare_nuclei,bland_chrom,norm_nucleoli,mitoses,class
1000025,2,5,1,1,1,2,1,3,1,1
1002945,2,5,4,4,5,7,10,3,2,1
1015425,2,3,1,1,1,2,2,3,1,1
1016277,2,6,8,8,1,3,4,3,7,1
1017023,2,4,1,1,3,2,1,3,1,1
1017122,4,8,10,10,8,7,10,9,7,1
1018099,2,1,1,1,1,2,10,3,1,1
1018561,2,2,1,2,1,2,1,3,1,1
1033078,2,2,1,1,1,2,1,1,1,5
1033078,2,4,2,1,1,2,1,2,1,1
1035283,2,1,1,1,1,1,1,3,1,1
1036172,2,2,1,1,1,2,1,2,1,1
1041801,4,5,3,3,3,2,3,4,4,1
I'm using 2.7.11 version
Your train_set only contains keys 2 and 4, whereas your classes in that sample are 1 and 5.
Instead of using
train_set = {2:[],4:[]}
you might have better luck with defaultdict:
from collections import defaultdict
train_set = defaultdict(list)
This way a non-existent key will be initialized to a new empty list on first access.

Feature selection in scikit learn for multiple variables and thousands+ features

I am trying to perform feature selection for logistic regression classifier. Originally there are 4 variables: name, location, gender, and label = ethnicity. The three variables, namely the name, give rise to tens of thousands of more "features", for example, name "John Snow" will give rise to 2-letter substrings like 'jo', 'oh', 'hn'... etc. The feature set undergoes DictVectorization.
I am trying to follow this tutorial (http://scikit-learn.org/stable/auto_examples/feature_selection/plot_feature_selection.html) but I am not sure if I am doing it right since the tutorial is using a small number of features while mine has tens of thousands after vectorization. And also the plt.show() shows a blank figure.
# coding=utf-8
import pandas as pd
from pandas import DataFrame, Series
import numpy as np
import re
import random
import time
from random import randint
import csv
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from sklearn import svm
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve
# Assign X and y variables
X = df.raw_name.values
X2 = df.name.values
X3 = df.gender.values
X4 = df.location.values
y = df.ethnicity_scan.values
# Feature extraction functions
def feature_full_name(nameString):
try:
full_name = nameString
if len(full_name) > 1: # not accept name with only 1 character
return full_name
else: return '?'
except: return '?'
def feature_avg_wordLength(nameString):
try:
space = 0
for i in nameString:
if i == ' ':
space += 1
length = float(len(nameString) - space)
name_entity = float(space + 1)
avg = round(float(length/name_entity), 0)
return avg
except:
return 0
def feature_name_entity(nameString2):
space = 0
try:
for i in nameString2:
if i == ' ':
space += 1
return space+1
except: return 0
def feature_gender(genString):
try:
gender = genString
if len(gender) >= 1:
return gender
else: return '?'
except: return '?'
def feature_noNeighborLoc(locString):
try:
x = re.sub(r'^[^, ]*', '', locString) # remove everything before and include first ','
y = x[2:] # remove subsequent ',' and ' '
return y
except: return '?'
def list_to_dict(substring_list):
try:
substring_dict = {}
for i in substring_list:
substring_dict['substring='+str(i)] = True
return substring_dict
except: return '?'
# Transform format of X variables, and spit out a numpy array for all features
my_dict13 = [{'name-entity': feature_name_entity(feature_full_name(i))} for i in X2]
my_dict14 = [{'avg-length': feature_avg_wordLength(feature_full_name(i))} for i in X]
my_dict15 = [{'gender': feature_full_name(i)} for i in X3]
my_dict16 = [{'location': feature_noNeighborLoc(feature_full_name(i))} for i in X4]
my_dict17 = [{'dummy1': 1} for i in X]
my_dict18 = [{'dummy2': random.randint(0,2)} for i in X]
all_dict = []
for i in range(0, len(my_dict)):
temp_dict = dict(my_dict13[i].items() + my_dict14[i].items()
+ my_dict15[i].items() + my_dict16[i].items() + my_dict17[i].items() + my_dict18[i].items()
)
all_dict.append(temp_dict)
newX = dv.fit_transform(all_dict)
# Separate the training and testing data sets
half_cut = int(len(df)/2.0)*-1
X_train = newX[:half_cut]
X_test = newX[half_cut:]
y_train = y[:half_cut]
y_test = y[half_cut:]
# Fitting X and y into model, using training data
lr = LogisticRegression()
lr.fit(X_train, y_train)
dv = DictVectorizer()
# Feature selection
plt.figure(1)
plt.clf()
X_indices = np.arange(X_train.shape[-1])
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X_train, y_train)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
plt.show()
Warning:
E:\Program Files Extra\Python27\lib\site-packages\sklearn\feature_selection\univariate_selection.py:111: UserWarning: Features [[0 0 0 ..., 0 0 0]] are constant.
It looks like the way you split your data into training and testing sets is not working:
# Separate the training and testing data sets
X_train = newX[:half_cut]
X_test = newX[half_cut:]
If you already use sklearn, it is much more convenient to use the builtin splitting routine for this:
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.5, random_state=0)

python plotting moving average python error

i made some code where i need to make a plot where my data is persed to moving average
import numpy as np
import csv
import datetime
import matplotlib.pyplot as plt
#Open Data/File
data = open('iphonevsandroid.csv', 'r')
reader = csv.reader(data, delimiter=',')
#Define lists
iphone_data = []
android_data = []
dateTime = []
stringdates = []
#iphone_data_average = []
#android_data_average = []
for row in reader:
first_date_row = row[0]
first_date = row[0][:-13]
if row[1] != 'iphone':
iphone_data.append(int(row[1]))
if row[2] != 'android':
android_data.append(int(row[2]))
if row[0] != 'week':
stringdates.append(row[0][:-13])
for item in stringdates:
dateTime.append(datetime.datetime.strptime(item, '%Y-%m-%d'))
def movingaverage(values,window):
weigths = np.repeat(1.0, window)/window
#including valid will REQUIRE there to be enough datapoints.
#for example, if you take out valid, it will start # point one,
#not having any prior points, so itll be 1+0+0 = 1 /3 = .3333
smas = np.convolve(values, weigths, 'valid')
return smas # as a numpy array
movingaverage(iphone_data,3)
movingaverage(android_data,3)
plt.ylabel('Indsæt y label')
plt.xlabel('Indsæt x label')
plt.plot(dateTime,movingaverage(iphone_data,3)+2)
plt.plot(dateTime,movingaverage(android_data,3)+2)
plt.show()
My problem is that i get this error: ValueError: x and y must have same first dimension.
I know its because of the len of the values,
if i print the len of:
print len(dateTime)
print len(movingaverage(iphone_data,3))
print len(movingaverage(android_data,3))
i get:
528
526
526
How do i get dateTime to 526???
smas = np.convolve(values, weigths, 'valid')
should be
smas = np.convolve(values, weigths, 'same')
and if you don't want the border values, then you will have to remove them yourself, that is for odd window lengths:
smas = np.convolve(values, weigths, 'valid')[(window-1)/2:-(window-1)/2]
Note that you would also have to remove these values from android_data and iphone_data.