Comparing metrics of Keras with metrics of sklearn.classification_report - python-2.7

I am struggling with different metrics while evaluating neural networks.
My investigations showed that Keras (version 1.2.2) calculates different values for specific metrics (using function evaluate) compared to sklearn.classification report.
Specifically, the values for the metric 'precision' (i.e. 'precision' of Keras != 'precision' of sklearn) or 'recall' (i.e. 'recall' of Keras != 'recall' of sklearn) differ.
For the following working example the differences seem to be random, but evaluating bigger networks shows that 'precision' of Keras equals (almost) 'recall' of sklearn whereas both 'recall' metrics differ clearly.
I appreciate your help!
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils # numpy utils for to_categorical()
from keras import backend as K # abstract backend API (in order to generate compatible code for Theano and Tf)
from sklearn.metrics import classification_report
batch_size = 128
nb_classes = 10
nb_epoch = 30
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255 # range [0,1]
X_test /= 255 # range [0,1]
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes) # necessary for use of categorical_crossentropy
Y_test = np_utils.to_categorical(y_test, nb_classes) # necessary for use of categorical_crossentropy
# create model
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# configure model
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy', 'precision', 'recall'])
# train model
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
# evaluate model with keras
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
print('Test precision:', score[2])
print('Test recall:', score[3])
# evaluate model with sklearn
predictions_last_epoch = model.predict(X_test, batch_size=batch_size, verbose=1)
target_names = ['class 0', 'class 1', 'class 2', 'class 3', 'class 4',
'class 5', 'class 6', 'class 7', 'class 8', 'class 9']
predicted_classes = np.argmax(predictions_last_epoch, axis=1)
print('\n')
print(classification_report(y_test, predicted_classes,
target_names=target_names, digits = 6))
E D I T
The output of the script given above:
Test score: 0.0271549037314
Test accuracy: 0.9916
Test precision: 0.992290322304
Test recall: 0.9908
9728/10000 [============================>.] - ETA: 0s
precision recall f1-score support
class 0 0.987867 0.996939 0.992382 980
class 1 0.993860 0.998238 0.996044 1135
class 2 0.990329 0.992248 0.991288 1032
class 3 0.991115 0.994059 0.992585 1010
class 4 0.994882 0.989817 0.992343 982
class 5 0.991041 0.992152 0.991597 892
class 6 0.993678 0.984342 0.988988 958
class 7 0.992180 0.987354 0.989761 1028
class 8 0.989754 0.991786 0.990769 974
class 9 0.991054 0.988107 0.989578 1009
avg / total 0.991607 0.991600 0.991597 10000
For another model:
val/test loss: 0.231304548573
val/test categorical_accuracy: **0.978500002956**
val/test precision: *0.995103668976*
val/test recall: 0.941900001907
val/test fbeta_score: 0.967675107574
val/test mean_squared_error: 0.0064611148566
10000/10000 [==============================] - 0s
precision recall f1-score support
class 0 0.989605 0.971429 0.980433 980
class 1 0.985153 0.993833 0.989474 1135
class 2 0.988154 0.969961 0.978973 1032
class 3 0.981373 0.991089 0.986207 1010
class 4 0.968907 0.983707 0.976251 982
class 5 0.997633 0.945067 0.970639 892
class 6 0.995690 0.964509 0.979852 958
class 7 0.987230 0.977626 0.982405 1028
class 8 0.945205 0.991786 0.967936 974
class 9 0.951429 0.990089 0.970374 1009
avg / total *0.978964* **0.978500** 0.978522 10000
Definition of desired metrics (for model.compile):
metrics=['categorical_accuracy', 'precision', 'recall', 'fbeta_score', 'mean_squared_error']
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=metrics)
Output of model.metrics_names:
['loss', 'categorical_accuracy', 'precision', 'recall', 'fbeta_score', 'mean_squared_error']

Yes, it is different due to the fact that the sklearn classification report gives you the weighted average based on the support.
Experiment with:
from sklearn.metrics import classification_report
y_true = [0, 1,2,1]
y_pred = [0, 0,2,0]
target_names = ['class 0', 'class 1', 'class 2']
print(classification_report(y_true, y_pred, target_names=target_names))
Gives you:
precision recall f1-score support
class 0 0.33 1.00 0.50 1
class 1 0.00 0.00 0.00 2
class 2 1.00 1.00 1.00 1
avg / total 0.33 0.50 0.38 **4**
However, (1+0+0.33)/3 = 0.44(3), but as it seems from the support column sklearn returns (1*1+0*2+0.33*1)/4=0.3325

Related

LSTM classification for 2D dataset

I a dataset with 10000 rows and 4 features (positions of two cars and their velocity) as x_train and also y_train is my labels which is 0 or 1. I want with LSTM classifying my dataset but the accuracy do not go more than 50 percent. For LSTM I tested both input_shape (10000, 4, 1) and (10000, 1, 4) but accuracy remains around 50 percentage. Do you know what should I do to improve the accuracy? and which of (10000, 4, 1) and (10000, 1, 4) are correct format ?
x_train, x_test, y_train, y_test = train_test_split(x, label, test_size=0.25, random_state=42)
x_train = x_train.reshape(-1, 4, 1)
x_test = x_test.reshape(-1, 4, 1)
y_train = y_train.reshape(-1, 1, 1)
y_test = y_test.reshape(-1, 1, 1)
model = Sequential()
model.add(LSTM(256, input_shape=(x_train.shape[1:]), activation='tanh', return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(LSTM(256, activation='tanh'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(128, activation='tanh'))
model.add(BatchNormalization())
model.add(Dense(2, activation='softmax'))
model.compile( loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'] )
history = model.fit(x_train, y_train, validation_split=0.33, epochs=20, batch_size=128,shuffle=True )

How to plot parallel coordinae plot ftrom Hyperparameter Tuning with the HParams Dashboard?

I am trying to replicate the parallel coordinate plot form Hyperparameter Tuning tutorial in this Tensorflow tutorial and I have writen my own csv file where I store my results.
My output reading the csv file is like this:
conv_layers filters dropout accuracy
0 4 16 0.5 0.447917
1 4 16 0.6 0.458333
2 4 32 0.5 0.635417
3 4 32 0.6 0.447917
4 4 64 0.5 0.604167
5 4 64 0.6 0.645833
6 8 16 0.5 0.437500
7 8 16 0.6 0.437500
8 8 32 0.5 0.437500
9 8 32 0.6 0.562500
10 8 64 0.5 0.562500
11 8 64 0.6 0.437500
How can I create the same plot like in the tutorial in python?
so I found the answer using plotly
import os
import sys
import pandas as pd
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objects as go
init_notebook_mode(connected=True)
df = pd.read_csv('path/to/csv')
fig = go.Figure(data=
go.Parcoords(
line = dict(color = df['accuracy'],
colorbar = [],
colorscale = [[0, '#6C9E12'], ##
[0.25,'#0D5F67'], ##
[0.5,'#AA1B13'], ##
[0.75, '#69178C'], ##
[1, '#DE9733']]),
dimensions = list([
dict(range = [0,12],
label = 'Conv_layers', values = df['conv_layers']),
dict(range = [8,64],
label = 'filter_number', values = df['filters']),
dict(range = [0.2,0.8],
label = 'dropout_rate', values = df['dropout']),
dict(range = [0.2,0.8],
label = 'dense_num', values = df['dense']),
dict(range = [0.1,1.0],
label = 'accuracy', values = df['accuracy'])
])
)
)
fig.update_layout(
plot_bgcolor = '#E5E5E5',
paper_bgcolor = '#E5E5E5',
title="Parallel Coordinates Plot"
)
# print the plot
fig.show()

ValueError: shapes (1,4) and (5,4) not aligned: 4 (dim 1) != 5 (dim 0), when adding my variables to my prediction machine

I am creating a prediction machine with four variables. When I add the variables it all messes up and gives me:
ValueError: shapes (1,4) and (5,4) not aligned: 4 (dim 1) != 5 (dim 0)
code
import pandas as pd
from pandas import DataFrame
from sklearn import linear_model
import tkinter as tk
import statsmodels.api as sm
# Approach 1: Import the data into Python
Stock_Market = pd.read_csv(r'Training_Nis_New2.csv')
df = DataFrame(Stock_Market,columns=['Month 1','Month 2','Month 3','Month
4','Month 5','Month 6','Month 7','Month 8',
'Month 9','Month 10','Month 11','Month
12','FSUTX','MMUKX','FUFRX','RYUIX','Interest R','Housing
Sale','Unemployement Rate','Conus Average Temperature
Rank','30FSUTX','30MMUKX','30FUFRX','30RYUIX'])
X = df[['Month 1','Interest R','Housing Sale','Unemployement Rate','Conus Average Temperature Rank']]
# here we have 2 variables for multiple regression. If you just want to use one variable for simple linear regression, then use X = df['Interest_Rate'] for example.Alternatively, you may add additional variables within the brackets
Y = df[['30FSUTX','30MMUKX','30FUFRX','30RYUIX']]
# with sklearn
regr = linear_model.LinearRegression()
regr.fit(X, Y)
print('Intercept: \n', regr.intercept_)
print('Coefficients: \n', regr.coef_)
# prediction with sklearn
# prediction with sklearn
HS=5.5
UR=6.7
CATR=8.9
New_Interest_R = 4.6
print('Predicted Stock Index Price: \n', regr.predict([[UR ,HS ,CATR
,New_Interest_R]]))
# with statsmodel
X = df[['Month 1','Interest R','Housing Sale','Unemployement Rate','Conus Average Temperature Rank']]
Y = df['30FSUTX']
print('\n\n*** Fund = FSUTX')
X = sm.add_constant(X) # adding a constant
model = sm.OLS(Y, X).fit()
predictions = model.predict(X)
print_model = model.summary()
print(print_model)

Getting same value for Precision and Recall (K-NN) using sklearn

Updated question:
I did this, but I am getting the same result for both precision and recall is it because I am using average ='binary'?
But when I use average='macro' I get this error message:
Test a custom review
messageC:\Python27\lib\site-packages\sklearn\metrics\classification.py:976:
DeprecationWarning: From version 0.18, binary input will not be
handled specially when using averaged precision/recall/F-score. Please
use average='binary' to report only the positive class performance.
'positive class performance.', DeprecationWarning)
Here is my updated code:
path = 'opinions.tsv'
data = pd.read_table(path,header=None,skiprows=1,names=['Sentiment','Review'])
X = data.Review
y = data.Sentiment
#Using CountVectorizer to convert text into tokens/features
vect = CountVectorizer(stop_words='english', ngram_range = (1,1), max_df = .80, min_df = 4)
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=1, test_size= 0.2)
#Using training data to transform text into counts of features for each message
vect.fit(X_train)
X_train_dtm = vect.transform(X_train)
X_test_dtm = vect.transform(X_test)
#Accuracy using KNN Model
KNN = KNeighborsClassifier(n_neighbors = 3)
KNN.fit(X_train_dtm, y_train)
y_pred = KNN.predict(X_test_dtm)
print('\nK Nearest Neighbors (NN = 3)')
#Naive Bayes Analysis
tokens_words = vect.get_feature_names()
print '\nAnalysis'
print'Accuracy Score: %f %%'% (metrics.accuracy_score(y_test,y_pred)*100)
print "Precision Score: %f%%" % precision_score(y_test,y_pred, average='binary')
print "Recall Score: %f%%" % recall_score(y_test,y_pred, average='binary')
By using the code above I get same value for precision and recall.
Thank you for answering my question, much appreciated.
To calculate precision and recall metrics, you should import the according methods from sklearn.metrics.
As stated in the documentation, their parameters are 1-d arrays of true and predicted labels:
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
y_true = [0, 1, 2, 0, 1, 2]
y_pred = [0, 2, 1, 0, 0, 1]
print('Calculating the metrics...')
recision_score(y_true, y_pred, average='macro')
>>> 0.22
recall_score(y_true, y_pred, average='macro')
>>> 0.33

Why I am getting the following AttributeError in Python?

I am using the sklearn's GradientBoostingRegression method. So after fitting it with 2000 estimators, I wanted to add more estimators to it. Since it is taking too long to rerun the entire fitting process, I used the set_params() method. Note that it is a multi-target problem, meaning, I have 3 targets to fit. So I am using the following code to add more estimators.
'''parameters: models (list of length 3 in our case )
train_X, train_y [n_samples x 3], test
n_estimators : previous + 500 (default) [additional estimators]
warm_start : True (default)
'''
def addMoreEstimators(train_X, train_y, test, models, n_estimators = 500, warm_start=True):
params = {'n_estimators':n_estimators, 'warm_start':warm_start}
gbm_pred= pd.DataFrame()
for (i,stars),clf in zip(enumerate(['*','**','***']), models):
clf.set_params(**params)
%time clf.fit(train_X.todense(),train_y[stars])
%time gbm_pred[stars] = clf.predict(test.todense())
gbm_pred = gbm_pred.as_matrix()
gbm_dict ={'model': gbm, 'prediction': gbm_pred}
return gbm_dict
Note: the models parameter is a list of 3 fitted models for the 3 targets.
When I ran it for the first time using 2500 (originally I had 2000 estimators), it ran fine and gave me an output.
When, I am running the same function using 3000 estimators, I am getting an AttributeError (see the traceback of the error below). Here the models contained the 3 fitted models. Below is the traceback of the error: (it's kinda long)
AttributeError Traceback (most recent call last)
<ipython-input-104-9418ada3b36f> in <module>()
7 test = val_X_tfidf[:,shortened_col_index],
8 models = models,
----> 9 n_estimators = 3000)
10
11 reduced_features_gbm_pred_3000_2_lr_1_msp_2 = reduced_features_gbm_model_3000_2_lr_1_msp_2['prediction']
<ipython-input-103-e15a4fb70b50> in addMoreEstimators(train_X, train_y, test, models, n_estimators, warm_start)
15
16 clf.set_params(**params)
---> 17 get_ipython().magic(u'time clf.fit(train_X.todense(),train_y[stars])')
18 print 'starting prediction'
19
//anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.pyc in magic(self, arg_s)
2305 magic_name, _, magic_arg_s = arg_s.partition(' ')
2306 magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
-> 2307 return self.run_line_magic(magic_name, magic_arg_s)
2308
2309 #-------------------------------------------------------------------------
//anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.pyc in run_line_magic(self, magic_name, line)
2226 kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
2227 with self.builtin_trap:
-> 2228 result = fn(*args,**kwargs)
2229 return result
2230
//anaconda/lib/python2.7/site-packages/IPython/core/magics/execution.pyc in time(self, line, cell, local_ns)
//anaconda/lib/python2.7/site-packages/IPython/core/magic.pyc in <lambda>(f, *a, **k)
191 # but it's overkill for just that one bit of state.
192 def magic_deco(arg):
--> 193 call = lambda f, *a, **k: f(*a, **k)
194
195 if callable(arg):
//anaconda/lib/python2.7/site-packages/IPython/core/magics/execution.pyc in time(self, line, cell, local_ns)
1160 if mode=='eval':
1161 st = clock2()
-> 1162 out = eval(code, glob, local_ns)
1163 end = clock2()
1164 else:
<timed eval> in <module>()
//anaconda/lib/python2.7/site-packages/sklearn/ensemble/gradient_boosting.pyc in fit(self, X, y, sample_weight, monitor)
973 self.estimators_.shape[0]))
974 begin_at_stage = self.estimators_.shape[0]
--> 975 y_pred = self._decision_function(X)
976 self._resize_state()
977
//anaconda/lib/python2.7/site-packages/sklearn/ensemble/gradient_boosting.pyc in _decision_function(self, X)
1080 # not doing input validation.
1081 score = self._init_decision_function(X)
-> 1082 predict_stages(self.estimators_, X, self.learning_rate, score)
1083 return score
1084
sklearn/ensemble/_gradient_boosting.pyx in sklearn.ensemble._gradient_boosting.predict_stages (sklearn/ensemble/_gradient_boosting.c:2502)()
AttributeError: 'int' object has no attribute 'tree_'
Sorry for the long traceback, but I think it wouldn't be possible to provide me with meaningful feedback.
Again, why am I getting this feedback ?
Any help would be greatly appreciated.
Thanks
Edit
Below is the code that generates the models that was one of the inputs in the above function.
from sklearn import ensemble
def updated_runGBM(train_X, train_y, test,
n_estimators =100,
max_depth = 1,
min_samples_split=1,
learning_rate=0.01,
loss= 'ls',
warm_start=True):
'''train_X : n_samples x m_features
train_y : n_samples x k_targets (multiple targets allowed)
test : n_samples x m_features
warm_start : True (originally the default is False, but I want to add trees)
'''
params = {'n_estimators': n_estimators, 'max_depth': max_depth, 'min_samples_split': min_samples_split,
'learning_rate': learning_rate, 'loss': loss,'warm_start':warm_start}
gbm1 = ensemble.GradientBoostingRegressor(**params)
gbm2 = ensemble.GradientBoostingRegressor(**params)
gbm3 = ensemble.GradientBoostingRegressor(**params)
gbm = [gbm1,gbm2,gbm3]
gbm_pred= pd.DataFrame()
for (i,stars),clf in zip(enumerate(['*','**','***']), gbm):
%time clf.fit(train_X.todense(),train_y[stars])
%time gbm_pred[stars] = clf.predict(test.todense())
gbm_pred = gbm_pred.as_matrix()
gbm_pred = np.clip(gbm_pred,0,np.inf)
gbm_dict ={'model': gbm, 'prediction': gbm_pred}
return gbm_dict
NOTE In the code above, I have removed some of the print statements to reduce clutter.
These are the two functions I am using, nothing else (apart from the code to split up the data).