How can I use the pvlib modules in pvmismatch? - pvlib

Is it possible to use the cec modules of pvlib modules in pvmismatch?
I tried to make it using the values of the cec modules.
cell=pvcell.PVcell(
Rs=parametersw.R_s/parameters.N_s,
Rsh= parametersw.R_sh_ref*parameters.N_s,
Isat1_T0=parametersw.I_o_ref,
Isat2_T0=0,
Isc0_T0= parametersw.I_L_ref,
alpha_Isc=parametersw.alpha_sc,
#aRBD=0,
#bRBD=0,
#nRBD=0,
Eg=1.121,
Tcell=273.15
)
and putting this cells in a mismatch module, the values that I receive: voc isc vmp imp, are 3V less than expected.

The short answer is no, it is not possible. pvmismatch uses the two-diode model and you cannot simple put in the corresponding values from the one-diode model and omit the others.
Having said that, using the same (scaled) resistances might be ok, and you could try tweaking Isat1 and Isat2 to get closer to what you were expecting. This may or may not be good enough for your application.

If you are willing to do a little extra work, you can generate 2-diode model coefficients from an IV curve at STC using the gen_coeffs package in pvmismatch.contrib. See this discussion for an example using a Canadian Solar module from the Sandia array performance model library. All you need are the principle characteristics at STC, a little luck, and some elbow grease and you can do it.
For example, if using a CEC module:
"""
Making 2-diode modules from CEC in PVMismatch
"""
from matplotlib import pyplot as plt
import numpy as np
import pvlib
from pvmismatch import *
from pvmismatch.contrib import gen_coeffs
cecmod = pvlib.pvsystem.retrieve_sam('CECMod')
csmods = cecmod.columns[cecmod.T.index.str.startswith('Canadian')]
len(csmods) # 409 modules in CEC library!
cs6x_300m = cecmod[csmods[264]] # CS6X-300M Canadian Solar 300W mono-Si module
args = (
cs6x_300m.I_sc_ref,
cs6x_300m.V_oc_ref,
cs6x_300m.I_mp_ref,
cs6x_300m.V_mp_ref,
cs6x_300m.N_s, # number of series cells
1, # number of parallel sub-strings
25.0) # cell temperature
# try to solve using default coeffs
x, sol = gen_coeffs.gen_two_diode(*args)
# solver fails, so get the last guess before it quit
def last_guess(sol):
isat1 = np.exp(sol.x[0])
isat2 = np.exp(sol.x[1])
rs = sol.x[2] ** 2.0
rsh = sol.x[3] ** 2.0
return isat1, isat2, rs, rsh
x = last_guess(sol)
# the series and shunt resistance solver guess are way off, so reset them
# with something reasonable for a 2-diode model
x, sol = gen_coeffs.gen_two_diode(*args, x0=(x[0], x[1], 0.005, 10.0))
# Hooray, it worked! Note that the 1-diode and 2-diode parametres are so
# different! Anyway, let's make a cell and a module to check the solution.
pvc = pvcell.PVcell(
Isat1_T0=x[0],
Isat2_T0=x[1],
Rs=x[2],
Rsh=x[3],
Isc0_T0=cs6x_300m.I_sc_ref,
alpha_Isc=cs6x_300m.alpha_sc)
np.isclose(pvc.Isc, cs6x_300m.I_sc_ref) # ha-ha, this exact b/c we used it
# open circuit voltage within 1E-3: (45.01267251639085, 45.0)
np.isclose(pvc.Voc*cs6x_300m.N_s, cs6x_300m.V_oc_ref, rtol=1e-3, atol=1e-3)
# get index max power point
mpp = np.argmax(pvc.Pcell)
# max power voltage within 1E-3: (36.50580418834946, 36.5)
np.isclose(pvc.Vcell[mpp][0]*cs6x_300m.N_s, cs6x_300m.V_mp_ref, rtol=1e-3, atol=1e-3)
# max power current within 1E-3: (8.218687568902466, 8.22)
np.isclose(pvc.Icell[mpp][0], cs6x_300m.I_mp_ref, rtol=1e-3, atol=1e-3)
# use pvlib to get the full IV curve using CEC model
params1stc = pvlib.pvsystem.calcparams_cec(effective_irradiance=1000,
temp_cell=25.0, alpha_sc=cs6x_300m.alpha_sc, a_ref=cs6x_300m.a_ref,
I_L_ref=cs6x_300m.I_L_ref, I_o_ref=cs6x_300m.I_o_ref, R_sh_ref=cs6x_300m.R_sh_ref,
R_s=cs6x_300m.R_s, Adjust=cs6x_300m.Adjust)
iv_params1stc = pvlib.pvsystem.singlediode(*params1stc, ivcurve_pnts=100, method='newton')
# use pvmm to get full IV curve using 2-diode model parameters
pvm = pvmodule.PVmodule(cell_pos=pvmodule.STD72, pvcells=[pvc]*72)
# make some comparison plots
pvm.plotMod() # plot the pvmm module
plt.tight_layout()
# get axes for IV curve
f, ax = plt.gcf(), plt.gca()
ax0 = f.axes[0]
ax0.plot(iv_params1stc['v'], iv_params1stc['i'], '--')
ax0.plot(0, iv_params1stc['i_sc'], '|k')
ax0.plot(iv_params1stc['v_oc'], 0, '|k')
ax0.plot(iv_params1stc['v_mp'], iv_params1stc['i_mp'], '|k')
ax0.set_ylim([0, 10])
ax0.plot(0, pvm.Isc.mean(), '_k')
ax0.plot(pvm.Voc.sum(), 0, '|k')
mpp = np.argmax(pvm.Pmod)
ax0.plot(pvm.Vcell[mpp], mpp.Icell[mpp], '_k')
ax0.plot(pvm.Vmod[mpp], pvm.Imod[mpp], '_k')
iv_params1stc['p'] = iv_params1stc['v'] * iv_params1stc['i']
ax1.plot(iv_params1stc['v'], iv_params1stc['p'], '--')
ax1.plot(iv_params1stc['v_mp'], iv_params1stc['p_mp'], '|k')
ax1.plot(pvm.Vmod[mpp], pvm.Pmod[mpp], '_k')
you can get very close agreement:

You can not define as following:
Isat1_T0=parametersw.I_o_ref,
Isat2_T0=0,
The reason is that the ideal factor for PVmismatch is n1=1 and n2=2. However, if you want to use one diode model, you must input ideal factor n.
If you still want to use one diode model for PVmismatch, the right method is that you need to rewrite "PVcell" module in this pachage. I try to write it, you can refer.
enter code here
"""
This module contains the :class:`~pvmismatch.pvmismatch_lib.pvcell.PVcell`
object which is used by modules, strings and systems.
"""
from __future__ import absolute_import
from future.utils import iteritems
from pvmismatch.pvmismatch_lib.pvconstants import PVconstants
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import newton
# Defaults
RS = 0.0038 # [ohm] series resistance
RSH = 500 # [ohm] shunt resistance
Gamma = 0.97
ISAT1_T0 = 2.76E-11 # [A] diode one saturation current
ISC0_T0 = 9.87 # [A] reference short circuit current
TCELL = 298.15 # [K] cell temperature
ARBD = 2E-3 # reverse breakdown coefficient 1
BRBD = 0. # reverse breakdown coefficient 2
VRBD_ = -20 # [V] reverse breakdown voltage
NRBD = 3.28 # reverse breakdown exponent
EG = 1.1 # [eV] band gap of cSi
ALPHA_ISC = 0.0005 # [1/K] short circuit current temperature coefficient
EPS = np.finfo(np.float64).eps
class PVcell(object):
"""
Class for PV cells.
:param Rs: series resistance [ohms]
:param Rsh: shunt resistance [ohms]
:param Isat1_T0: first saturation diode current at ref temp [A]
:param Isat2_T0: second saturation diode current [A]
:param Isc0_T0: short circuit current at ref temp [A]
:param aRBD: reverse breakdown coefficient 1
:param bRBD: reverse breakdown coefficient 2
:param VRBD: reverse breakdown voltage [V]
:param nRBD: reverse breakdown exponent
:param Eg: band gap [eV]
:param alpha_Isc: short circuit current temp coeff [1/K]
:param Tcell: cell temperature [K]
:param Ee: incident effective irradiance [suns]
:param pvconst: configuration constants object
:type pvconst: :class:`~pvmismatch.pvmismatch_lib.pvconstants.PVconstants`
"""
_calc_now = False #: if True ``calcCells()`` is called in ``__setattr__``
def __init__(self, Rs=RS, Rsh=RSH, Isat1_T0=ISAT1_T0, Gamma=Gamma,
Isc0_T0=ISC0_T0, aRBD=ARBD, bRBD=BRBD, VRBD=VRBD_,
nRBD=NRBD, Eg=EG, alpha_Isc=ALPHA_ISC,
Tcell=TCELL, Ee=1., pvconst=PVconstants()):
# user inputs
self.Rs = Rs #: [ohm] series resistance
self.Rsh = Rsh #: [ohm] shunt resistance
self.Isat1_T0 = Isat1_T0 #: [A] diode one sat. current at T0
self.Gamma = Gamma #: ideal factor
self.Isc0_T0 = Isc0_T0 #: [A] short circuit current at T0
self.aRBD = aRBD #: reverse breakdown coefficient 1
self.bRBD = bRBD #: reverse breakdown coefficient 2
self.VRBD = VRBD #: [V] reverse breakdown voltage
self.nRBD = nRBD #: reverse breakdown exponent
self.Eg = Eg #: [eV] band gap of cSi
self.alpha_Isc = alpha_Isc #: [1/K] short circuit temp. coeff.
self.Tcell = Tcell #: [K] cell temperature
self.Ee = Ee #: [suns] incident effective irradiance on cell
self.pvconst = pvconst #: configuration constants
self.Icell = None #: cell currents on IV curve [A]
self.Vcell = None #: cell voltages on IV curve [V]
self.Pcell = None #: cell power on IV curve [W]
self.VocSTC = self._VocSTC() #: estimated Voc at STC [V]
# set calculation flag
self._calc_now = True # overwrites the class attribute
def __str__(self):
fmt = '<PVcell(Ee=%g[suns], Tcell=%g[K], Isc=%g[A], Voc=%g[V])>'
return fmt % (self.Ee, self.Tcell, self.Isc, self.Voc)
def __repr__(self):
return str(self)
def __setattr__(self, key, value):
# check for floats
try:
value = np.float64(value)
except (TypeError, ValueError):
pass # fail silently if not float, eg: pvconst or _calc_now
super(PVcell, self).__setattr__(key, value)
# recalculate IV curve
if self._calc_now:
Icell, Vcell, Pcell = self.calcCell()
self.__dict__.update(Icell=Icell, Vcell=Vcell, Pcell=Pcell)
def update(self, **kwargs):
"""
Update user-defined constants.
"""
# turn off calculation flag until all attributes are updated
self._calc_now = False
# don't use __dict__.update() instead use setattr() to go through
# custom __setattr__() so that numbers are cast to floats
for k, v in iteritems(kwargs):
setattr(self, k, v)
self._calc_now = True # recalculate
#property
def Vt(self):
"""
Thermal voltage in volts.
"""
return self.pvconst.k * self.Tcell / self.pvconst.q
#property
def Isc(self):
return self.Ee * self.Isc0
#property
def Aph(self):
"""
Photogenerated current coefficient, non-dimensional.
"""
# Aph is undefined (0/0) if there is no irradiance
if self.Isc == 0: return np.nan
# short current (SC) conditions (Vcell = 0)
Vdiode_sc = self.Isc * self.Rs # diode voltage at SC
Idiode1_sc = self.Isat1 * (np.exp(Vdiode_sc / (self.Gamma*self.Vt)) - 1.)
Ishunt_sc = Vdiode_sc / self.Rsh # diode voltage at SC
# photogenerated current coefficient
return 1. + (Idiode1_sc + Ishunt_sc) / self.Isc
#property
def Isat1(self):
"""
Diode one saturation current at Tcell in amps.
"""
_Tstar = self.Tcell ** 3. / self.pvconst.T0 ** 3. # scaled temperature
_inv_delta_T = 1. / self.pvconst.T0 - 1. / self.Tcell # [1/K]
_expTstar = np.exp(
self.Eg * self.pvconst.q / (self.pvconst.k * self.Gamma) * _inv_delta_T
)
return self.Isat1_T0 * _Tstar * _expTstar # [A] Isat1(Tcell)
#property
def Isc0(self):
"""
Short circuit current at Tcell in amps.
"""
_delta_T = self.Tcell - self.pvconst.T0 # [K] temperature difference
return self.Isc0_T0 * (1. + self.alpha_Isc * _delta_T) # [A] Isc0
#property
def Voc(self):
"""
Estimate open circuit voltage of cells.
Returns Voc : numpy.ndarray of float, estimated open circuit voltage
"""
return self.Vt * self.Gamma * np.log((self.Aph * self.Isc)/self.Isat1_T0 + 1)
def _VocSTC(self):
"""
Estimate open circuit voltage of cells.
Returns Voc : numpy.ndarray of float, estimated open circuit voltage
"""
Vdiode_sc = self.Isc0_T0 * self.Rs # diode voltage at SC
Idiode1_sc = self.Isat1_T0 * (np.exp(Vdiode_sc / (self.Gamma*self.Vt)) - 1.)
Ishunt_sc = Vdiode_sc / self.Rsh # diode voltage at SC
# photogenerated current coefficient
Aph = 1. + (Idiode1_sc + Ishunt_sc) / self.Isc0_T0
# estimated Voc at STC
return self.Vt * self.Gamma * np.log((Aph * self.Isc0_T0)/self.Isat1_T0 + 1)
#property
def Igen(self):
"""
Photovoltaic generated light current (AKA IL or Iph)
Returns Igen : numpy.ndarray of float, PV generated light current [A]
Photovoltaic generated light current is zero if irradiance is zero.
"""
if self.Ee == 0: return 0
return self.Aph * self.Isc
def calcCell(self):
"""
Calculate cell I-V curves.
Returns (Icell, Vcell, Pcell) : tuple of numpy.ndarray of float
"""
Vreverse = self.VRBD * self.pvconst.negpts
Vff = self.Voc
delta_Voc = self.VocSTC - self.Voc
# to make sure that the max voltage is always in the 4th quadrant, add
# a third set of points log spaced with decreasing density, from Voc to
# Voc # STC unless Voc *is* Voc # STC, then use an arbitrary voltage at
# 80% of Voc as an estimate of Vmp assuming a fill factor of 80% and
# Isc close to Imp, or if Voc > Voc # STC, then use Voc as the max
if delta_Voc == 0:
Vff = 0.8 * self.Voc
delta_Voc = 0.2 * self.Voc
elif delta_Voc < 0:
Vff = self.VocSTC
delta_Voc = -delta_Voc
Vquad4 = Vff + delta_Voc * self.pvconst.Vmod_q4pts
Vforward = Vff * self.pvconst.pts
Vdiode = np.concatenate((Vreverse, Vforward, Vquad4), axis=0)
Idiode1 = self.Isat1 * (np.exp(Vdiode / (self.Gamma*self.Vt)) - 1.)
Ishunt = Vdiode / self.Rsh
fRBD = 1. - Vdiode / self.VRBD
# use epsilon = 2.2204460492503131e-16 to avoid "divide by zero"
fRBD[fRBD == 0] = EPS
Vdiode_norm = Vdiode / self.Rsh / self.Isc0_T0
fRBD = self.Isc0_T0 * fRBD ** (-self.nRBD)
IRBD = (self.aRBD * Vdiode_norm + self.bRBD * Vdiode_norm ** 2) * fRBD
Icell = self.Igen - Idiode1 - Ishunt - IRBD
Vcell = Vdiode - Icell * self.Rs
Pcell = Icell * Vcell
return Icell, Vcell, Pcell
# diode model
# *-->--*--->---*--Rs->-Icell--+
# ^ | | ^
# | | | |
# Igen Idiode Ishunt Vcell
# | | | |
# | v v v
# *--<--*---<---*--<-----------=
# http://en.wikipedia.org/wiki/Diode_modelling#Shockley_diode_model
# http://en.wikipedia.org/wiki/Diode#Shockley_diode_equation
# http://en.wikipedia.org/wiki/William_Shockley
#staticmethod
def f_Icell(Icell, Vcell, Igen, Rs, Vt, Isat1, Rsh):
"""
Objective function for Icell.
:param Icell: cell current [A]
:param Vcell: cell voltage [V]
:param Igen: photogenerated current at Tcell and Ee [A]
:param Rs: series resistance [ohms]
:param Vt: thermal voltage [V]
:param Isat1: first diode saturation current at Tcell [A]
:param Isat2: second diode saturation current [A]
:param Rsh: shunt resistance [ohms]
:return: residual = (Icell - Icell0) [A]
"""
# arbitrary current condition
Vdiode = Vcell + Icell * Rs # diode voltage
Idiode1 = Isat1 * (np.exp(Vdiode / (Gamma * Vt)) - 1.) # diode current
Ishunt = Vdiode / Rsh # shunt current
return Igen - Idiode1 - Ishunt - Icell
def calcIcell(self, Vcell):
"""
Calculate Icell as a function of Vcell.
:param Vcell: cell voltage [V]
:return: Icell
"""
args = (np.float64(Vcell), self.Igen, self.Rs, self.Vt,
self.Rsh)
return newton(self.f_Icell, x0=self.Isc, args=args)
#staticmethod
def f_Vcell(Vcell, Icell, Igen, Rs, Vt, Isat1, Rsh):
return PVcell.f_Icell(Icell, Vcell, Igen, Rs, Vt, Isat1, Rsh)
def calcVcell(self, Icell):
"""
Calculate Vcell as a function of Icell.
:param Icell: cell current [A]
:return: Vcell
"""
args = (np.float64(Icell), self.Igen, self.Rs, self.Vt,
self.Isat1, self.Rsh)
return newton(self.f_Vcell, x0=self.Voc, args=args)

Related

I want to add TensorBoard to the code from pytorch.org

I'm pretty new to deep learning, but I want to add Tensorboard to the following code to track loss, accuracy, average precision and so on.
Sample code from the TorchVision -2.3 Object Detection Finetuning Tutorial
http://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
import os
import numpy as np
import torch
from PIL import Image
import sys
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from engine import train_one_epoch, evaluate
import utils
import transforms as T
#from torch.utils.tensorboard import SummaryWriter
#writer = SummaryWriter()
class PennFudanDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
# load all image files, sorting them to
# ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(root, "PNGImages"))))
self.masks = list(sorted(os.listdir(os.path.join(root, "PedMasks"))))
def __getitem__(self, idx):
# load images and masks
img_path = os.path.join(self.root, "PNGImages", self.imgs[idx])
mask_path = os.path.join(self.root, "PedMasks", self.masks[idx])
img = Image.open(img_path).convert("RGB")
# note that we haven't converted the mask to RGB,
# because each color corresponds to a different instance
# with 0 being background
mask = Image.open(mask_path)
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = mask == obj_ids[:, None, None]
# get bounding box coordinates for each mask
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes.append([xmin, ymin, xmax, ymax])
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.imgs)
def get_model_instance_segmentation(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
return model
def get_transform(train):
transforms = []
transforms.append(T.ToTensor())
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def main():
# train on the GPU or on the CPU, if a GPU is not available
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# our dataset has two classes only - background and person
num_classes = 2
# use our dataset and defined transformations
dataset = PennFudanDataset('PennFudanPed', get_transform(train=True))
dataset_test = PennFudanDataset('PennFudanPed', get_transform(train=False))
# split the dataset in train and test set
indices = torch.randperm(len(dataset)).tolist()
dataset = torch.utils.data.Subset(dataset, indices[:-50])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=4, shuffle=True, num_workers=4,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=2, shuffle=False, num_workers=4,
collate_fn=utils.collate_fn)
# get the model using our helper function
model = get_model_instance_segmentation(num_classes)
# move model to the right device
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005,
momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
# let's train it for 10 epochs
num_epochs = 10
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
evaluate(model, data_loader_test, device=device)
print("That's it!")
if __name__ == "__main__":
main()

TypeError: only length-1 arrays can be converted to Python scalars, python2.7

I searched about this issue, I got more questions "the same error" but different code and different reason. So, I was hesitant more to put my issue here. After reading the majority of answers, I didn't find a solution for my issue.
The original and full code here
chapter6.py:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from datasets import gtsrb
from classifiers import MultiClassSVM
def main():
strategies = ['one-vs-one', 'one-vs-all']
features = [None, 'gray', 'rgb', 'hsv', 'surf', 'hog']
accuracy = np.zeros((2, len(features)))
precision = np.zeros((2, len(features)))
recall = np.zeros((2, len(features)))
for f in xrange(len(features)):
print "feature", features[f]
(X_train, y_train), (X_test, y_test) = gtsrb.load_data(
"datasets/gtsrb_training",
feature=features[f],
test_split=0.2,
seed=42)
# convert to numpy
X_train = np.squeeze(np.array(X_train)).astype(np.float32)
y_train = np.array(y_train)
X_test = np.squeeze(np.array(X_test)).astype(np.float32)
y_test = np.array(y_test)
# find all class labels
labels = np.unique(np.hstack((y_train, y_test)))
for s in xrange(len(strategies)):
print " - strategy", strategies[s]
# set up SVMs
MCS = MultiClassSVM(len(labels), strategies[s])
# training phase
print " - train"
MCS.fit(X_train, y_train)
# test phase
print " - test"
acc, prec, rec = MCS.evaluate(X_test, y_test)
accuracy[s, f] = acc
precision[s, f] = np.mean(prec)
recall[s, f] = np.mean(rec)
print " - accuracy: ", acc
print " - mean precision: ", np.mean(prec)
print " - mean recall: ", np.mean(rec)
# plot results as stacked bar plot
f, ax = plt.subplots(2)
for s in xrange(len(strategies)):
x = np.arange(len(features))
ax[s].bar(x - 0.2, accuracy[s, :], width=0.2, color='b',
hatch='/', align='center')
ax[s].bar(x, precision[s, :], width=0.2, color='r', hatch='\\',
align='center')
ax[s].bar(x + 0.2, recall[s, :], width=0.2, color='g', hatch='x',
align='center')
ax[s].axis([-0.5, len(features) + 0.5, 0, 1.5])
ax[s].legend(('Accuracy', 'Precision', 'Recall'), loc=2, ncol=3,
mode='expand')
ax[s].set_xticks(np.arange(len(features)))
ax[s].set_xticklabels(features)
ax[s].set_title(strategies[s])
plt.show()
if __name__ == '__main__':
main()
classifiers.py
import cv2
import numpy as np
from abc import ABCMeta, abstractmethod
from matplotlib import pyplot as plt
__author__ = "Michael Beyeler"
__license__ = "GNU GPL 3.0 or later"
class Classifier:
"""
Abstract base class for all classifiers
A classifier needs to implement at least two methods:
- fit: A method to train the classifier by fitting the model to
the data.
- evaluate: A method to test the classifier by predicting labels of
some test data based on the trained model.
A classifier also needs to specify a classification strategy via
setting self.mode to either "one-vs-all" or "one-vs-one".
The one-vs-all strategy involves training a single classifier per
class, with the samples of that class as positive samples and all
other samples as negatives.
The one-vs-one strategy involves training a single classifier per
class pair, with the samples of the first class as positive samples
and the samples of the second class as negative samples.
This class also provides method to calculate accuracy, precision,
recall, and the confusion matrix.
"""
__metaclass__ = ABCMeta
#abstractmethod
def fit(self, X_train, y_train):
pass
#abstractmethod
def evaluate(self, X_test, y_test, visualize=False):
pass
def _accuracy(self, y_test, Y_vote):
"""Calculates accuracy
This method calculates the accuracy based on a vector of
ground-truth labels (y_test) and a 2D voting matrix (Y_vote) of
size (len(y_test), num_classes).
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: accuracy e[0,1]
"""
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
# all cases where predicted class was correct
mask = y_hat == y_test
return np.float32(np.count_nonzero(mask)) / len(y_test)
def _precision(self, y_test, Y_vote):
"""Calculates precision
This method calculates precision extended to multi-class
classification by help of a confusion matrix.
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: precision e[0,1]
"""
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
if self.mode == "one-vs-one":
# need confusion matrix
conf = self._confusion(y_test, Y_vote)
# consider each class separately
prec = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = conf[c, c]
# false positives: label is c, classifier predicted not c
fp = np.sum(conf[:, c]) - conf[c, c]
if tp + fp != 0:
prec[c] = tp * 1. / (tp + fp)
elif self.mode == "one-vs-all":
# consider each class separately
prec = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = np.count_nonzero((y_test == c) * (y_hat == c))
# false positives: label is c, classifier predicted not c
fp = np.count_nonzero((y_test == c) * (y_hat != c))
if tp + fp != 0:
prec[c] = tp * 1. / (tp + fp)
return prec
def _recall(self, y_test, Y_vote):
"""Calculates recall
This method calculates recall extended to multi-class
classification by help of a confusion matrix.
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: recall e[0,1]
"""
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
if self.mode == "one-vs-one":
# need confusion matrix
conf = self._confusion(y_test, Y_vote)
# consider each class separately
recall = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = conf[c, c]
# false negatives: label is not c, classifier predicted c
fn = np.sum(conf[c, :]) - conf[c, c]
if tp + fn != 0:
recall[c] = tp * 1. / (tp + fn)
elif self.mode == "one-vs-all":
# consider each class separately
recall = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = np.count_nonzero((y_test == c) * (y_hat == c))
# false negatives: label is not c, classifier predicted c
fn = np.count_nonzero((y_test != c) * (y_hat == c))
if tp + fn != 0:
recall[c] = tp * 1. / (tp + fn)
return recall
def _confusion(self, y_test, Y_vote):
"""Calculates confusion matrix
This method calculates the confusion matrix based on a vector of
ground-truth labels (y-test) and a 2D voting matrix (Y_vote) of
size (len(y_test), num_classes).
Matrix element conf[r,c] will contain the number of samples that
were predicted to have label r but have ground-truth label c.
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: confusion matrix
"""
y_hat = np.argmax(Y_vote, axis=1)
conf = np.zeros((self.num_classes, self.num_classes)).astype(np.int32)
for c_true in xrange(self.num_classes):
# looking at all samples of a given class, c_true
# how many were classified as c_true? how many as others?
for c_pred in xrange(self.num_classes):
y_this = np.where((y_test == c_true) * (y_hat == c_pred))
conf[c_pred, c_true] = np.count_nonzero(y_this)
return conf
class MultiClassSVM(Classifier):
"""
Multi-class classification using Support Vector Machines (SVMs)
This class implements an SVM for multi-class classification. Whereas
some classifiers naturally permit the use of more than two classes
(such as neural networks), SVMs are binary in nature.
However, we can turn SVMs into multinomial classifiers using at least
two different strategies:
* one-vs-all: A single classifier is trained per class, with the
samples of that class as positives (label 1) and all
others as negatives (label 0).
* one-vs-one: For k classes, k*(k-1)/2 classifiers are trained for each
pair of classes, with the samples of the one class as
positives (label 1) and samples of the other class as
negatives (label 0).
Each classifier then votes for a particular class label, and the final
decision (classification) is based on a majority vote.
"""
def __init__(self, num_classes, mode="one-vs-all", params=None):
"""
The constructor makes sure the correct number of classifiers is
initialized, depending on the mode ("one-vs-all" or "one-vs-one").
:param num_classes: The number of classes in the data.
:param mode: Which classification mode to use.
"one-vs-all": single classifier per class
"one-vs-one": single classifier per class pair
Default: "one-vs-all"
:param params: SVM training parameters.
For now, default values are used for all SVMs.
Hyperparameter exploration can be achieved by
embedding the MultiClassSVM process flow in a
for-loop that classifies the data with
different parameter values, then pick the
values that yield the best accuracy.
Default: None
"""
self.num_classes = num_classes
self.mode = mode
self.params = params or dict()
# initialize correct number of classifiers
self.classifiers = []
if mode == "one-vs-one":
# k classes: need k*(k-1)/2 classifiers
for _ in xrange(num_classes*(num_classes - 1) / 2):
self.classifiers.append(cv2.ml.SVM_create())
elif mode == "one-vs-all":
# k classes: need k classifiers
for _ in xrange(num_classes):
self.classifiers.append(cv2.ml.SVM_create())
else:
print "Unknown mode ", mode
def fit(self, X_train, y_train, params=None):
"""Fits the model to training data
This method trains the classifier on data (X_train) using either
the "one-vs-one" or "one-vs-all" strategy.
:param X_train: input data (rows=samples, cols=features)
:param y_train: vector of class labels
:param params: dict to specify training options for cv2.SVM.train
leave blank to use the parameters passed to the
constructor
"""
if params is None:
params = self.params
if self.mode == "one-vs-one":
svm_id = 0
for c1 in xrange(self.num_classes):
for c2 in xrange(c1 + 1, self.num_classes):
# indices where class labels are either `c1` or `c2`
data_id = np.where((y_train == c1) + (y_train == c2))[0]
# set class label to 1 where class is `c1`, else 0
y_train_bin = np.where(y_train[data_id] == c1, 1,
0).flatten()
self.classifiers[svm_id].train(X_train[data_id, :],
y_train_bin,
params=self.params)
svm_id += 1
elif self.mode == "one-vs-all":
for c in xrange(self.num_classes):
# train c-th SVM on class c vs. all other classes
# set class label to 1 where class==c, else 0
y_train_bin = np.where(y_train == c, 1, 0).flatten()
# train SVM
self.classifiers[c].train(X_train, y_train_bin,
params=self.params)
def evaluate(self, X_test, y_test, visualize=False):
"""Evaluates the model on test data
This method evaluates the classifier's performance on test data
(X_test) using either the "one-vs-one" or "one-vs-all" strategy.
:param X_test: input data (rows=samples, cols=features)
:param y_test: vector of class labels
:param visualize: flag whether to plot the results (True) or not
(False)
:returns: accuracy, precision, recall
"""
# prepare Y_vote: for each sample, count how many times we voted
# for each class
Y_vote = np.zeros((len(y_test), self.num_classes))
if self.mode == "one-vs-one":
svm_id = 0
for c1 in xrange(self.num_classes):
for c2 in xrange(c1 + 1, self.num_classes):
data_id = np.where((y_test == c1) + (y_test == c2))[0]
X_test_id = X_test[data_id, :]
y_test_id = y_test[data_id]
# set class label to 1 where class==c1, else 0
# y_test_bin = np.where(y_test_id==c1,1,0).reshape(-1,1)
# predict labels
y_hat = self.classifiers[svm_id].predict_all(X_test_id)
for i in xrange(len(y_hat)):
if y_hat[i] == 1:
Y_vote[data_id[i], c1] += 1
elif y_hat[i] == 0:
Y_vote[data_id[i], c2] += 1
else:
print "y_hat[", i, "] = ", y_hat[i]
# we vote for c1 where y_hat is 1, and for c2 where y_hat
# is 0 np.where serves as the inner index into the data_id
# array, which in turn serves as index into the results
# array
# Y_vote[data_id[np.where(y_hat == 1)[0]], c1] += 1
# Y_vote[data_id[np.where(y_hat == 0)[0]], c2] += 1
svm_id += 1
elif self.mode == "one-vs-all":
for c in xrange(self.num_classes):
# set class label to 1 where class==c, else 0
# predict class labels
# y_test_bin = np.where(y_test==c,1,0).reshape(-1,1)
# predict labels
y_hat = self.classifiers[c].predict_all(X_test)
# we vote for c where y_hat is 1
if np.any(y_hat):
Y_vote[np.where(y_hat == 1)[0], c] += 1
# with this voting scheme it's possible to end up with samples
# that have no label at all...in this case, pick a class at
# random...
no_label = np.where(np.sum(Y_vote, axis=1) == 0)[0]
Y_vote[no_label, np.random.randint(self.num_classes,
size=len(no_label))] = 1
accuracy = self._accuracy(y_test, Y_vote)
precision = self._precision(y_test, Y_vote)
recall = self._recall(y_test, Y_vote)
return accuracy, precision, recall
when running chapter6.py
The output is:
feature None
- strategy one-vs-one
- train
Traceback (most recent call last):
File "/home/redhwan/Downloads/opencv-python-blueprints-master/chapter6/chapter6.py", line 77, in <module>
main()
File "/home/redhwan/Downloads/opencv-python-blueprints-master/chapter6/chapter6.py", line 44, in main
MCS.fit(X_train, y_train)
File "/home/redhwan/Downloads/opencv-python-blueprints-master/chapter6/classifiers.py", line 258, in fit
params=self.params)
TypeError: only length-1 arrays can be converted to Python scalars
please help me or your suggestion
Thank you in advance!

AttributeError: GaussianMixture instance has no attribute 'loglike'

I'm trying to implement Gaussian Mixture Model with Expectation–Maximization algorithm and I get this error.
This is the Gaussian Mixture model that I used:
class GaussianMixture:
"Model mixture of two univariate Gaussians and their EM estimation"
def __init__(self, data, mu_min=min(data), mu_max=max(data), sigma_min=.1, sigma_max=1, mix=.5):
self.data = data
#init with multiple gaussians
self.one = Gaussian(uniform(mu_min, mu_max),
uniform(sigma_min, sigma_max))
self.two = Gaussian(uniform(mu_min, mu_max),
uniform(sigma_min, sigma_max))
#as well as how much to mix them
self.mix = mix
def Estep(self):
"Perform an E(stimation)-step, freshening up self.loglike in the process"
# compute weights
self.loglike = 0. # = log(p = 1)
for datum in self.data:
# unnormalized weights
wp1 = self.one.pdf(datum) * self.mix
wp2 = self.two.pdf(datum) * (1. - self.mix)
# compute denominator
den = wp1 + wp2
# normalize
wp1 /= den
wp2 /= den
# add into loglike
self.loglike += log(wp1 + wp2)
# yield weight tuple
yield (wp1, wp2)
def Mstep(self, weights):
"Perform an M(aximization)-step"
# compute denominators
(left, rigt) = zip(*weights)
one_den = sum(left)
two_den = sum(rigt)
# compute new means
self.one.mu = sum(w * d / one_den for (w, d) in zip(left, data))
self.two.mu = sum(w * d / two_den for (w, d) in zip(rigt, data))
# compute new sigmas
self.one.sigma = sqrt(sum(w * ((d - self.one.mu) ** 2)
for (w, d) in zip(left, data)) / one_den)
self.two.sigma = sqrt(sum(w * ((d - self.two.mu) ** 2)
for (w, d) in zip(rigt, data)) / two_den)
# compute new mix
self.mix = one_den / len(data)
def iterate(self, N=1, verbose=False):
"Perform N iterations, then compute log-likelihood"
def pdf(self, x):
return (self.mix)*self.one.pdf(x) + (1-self.mix)*self.two.pdf(x)
def __repr__(self):
return 'GaussianMixture({0}, {1}, mix={2.03})'.format(self.one,
self.two,
self.mix)
def __str__(self):
return 'Mixture: {0}, {1}, mix={2:.03})'.format(self.one,
self.two,
self.mix)
And then , while training I get that error in the conditional statement.
# Check out the fitting process
n_iterations = 5
best_mix = None
best_loglike = float('-inf')
mix = GaussianMixture(data)
for _ in range(n_iterations):
try:
#train!
mix.iterate(verbose=True)
if mix.loglike > best_loglike:
best_loglike = mix.loglike
best_mix = mix
except (ZeroDivisionError, ValueError, RuntimeWarning): # Catch division errors from bad starts, and just throw them out...
pass
Any ideas why I have the following error?
AttributeError: GaussianMixture instance has no attribute 'loglike'
The loglike attribute is only created when you call the Estep method.
def Estep(self):
"Perform an E(stimation)-step, freshening up self.loglike in the process"
# compute weights
self.loglike = 0. # = log(p = 1)
You didn't call Estep between creating the GaussianMixture instance and mix.loglike:
mix = GaussianMixture(data)
for _ in range(n_iterations):
try:
#train!
mix.iterate(verbose=True)
if mix.loglike > best_loglike:
And the iterate method is empty (It looks like you forgot some code here).
def iterate(self, N=1, verbose=False):
"Perform N iterations, then compute log-likelihood"
So, there'll be no loglike attribute set on the mix instance by the time you do if mix.loglike. Hence, the AttributeError.
You need to do one of the following:
Call the Estep method (since you set self.loglike there)
Define a loglike attribute in __init__

genetic algorithm ( model fitting)

I have run genetic algorithm model on antimicrobacterial resistance (amr) but it turns out an error like this :amr() takes exactly 3 arguments (2 given). Hopefully someone can help me to trace the error as Im desperately need to solve this in two days time. Thank you. My code is embedded below :
# First code to run
# imports relevant modules
# then defines functions for the Levenberg-Marquardt algorithm
import numpy as np
import matplotlib.pyplot as plt
#from scipy.optimize import leastsq
%matplotlib inline
time = np.arange(0.0, 3000.1,1.0)
pop = np.array([2,27,43,36,39,32,27,22,10,14,14,4,4,7,3,3,1])
def amr(pars,t):
beta,gamma,sigma_A,A,H,MIC_S,MIC_R,H,r = pars
E_S = 1-Emax*A**H/(MIC_S**H + A**H)
E_R = 1-Emax*A**H/(MIC_R**H + A**H)
derivs = [r*(1-(R+S)/Nmax)*E_S*S - sigma_S*S - beta*S*R/(R+S),
r*(1-gamma)*(1-(R+S)/Nmax)*E_R*R - sigma_R*R + beta*S*R/(R+S),
-sigma_A*A]
return derivs
def amr_resid(pars,t,data):
return amr(pars,t)-data
# code for the genetic algorithm. Relies on data set up above
# define a sum of squares function that we will use as fitness
def amr_ss(pars,t,data):
return sum(amr_resid(pars,t,data)**2)
# Parameter values
npars = 3
popsize = 60 # this needs to be a multiple of 3
elitism = popsize/3
# strength of mutation: the higher the number the larger the mutations can be, and vice versa
psigma = 0.08
ngenerations = 100
#set up initial population with parameters at log normal distribution around (1,1,1)
population = 10**np.random.normal(0,psigma,size=(popsize,npars))
newpop = population
# Matrices into which we put results
best = np.zeros(shape=(ngenerations,npars))
bestfitness = np.zeros(ngenerations)
fitnesses = np.zeros(shape=(popsize,ngenerations))
# Genetic algorithm code
for j in range(ngenerations):
# work out fitness
for i in range(popsize):
fitnesses[i,j] = amr_ss(population[i,:],time,pop)
# find the best and copy them into the next generation: we use the top 1/3rd
newpop[range(elitism),:]=population[np.argsort(fitnesses[:,j])[range(elitism)],:]
best[j,:]=newpop[0,:]
bestfitness[j]=np.sort(fitnesses[:,j])[0]
#create some mutants
for i in range(elitism):
# mutants have multiplicative change so that the change is a fixed proportion of the parameter value
newpop[elitism+i,:] = newpop[i,:] * 10**np.random.normal(0,psigma,npars)
# now create some recombinants: the gene values also mutate
for i in range(elitism):
parents = np.random.choice(elitism,2,replace=False)
# first gene from first parent
newpop[2*elitism+i,0]=newpop[parents[0],0] * 10**np.random.normal(0,psigma)
# second gene at random from first or second parent: depends on recombination position
if (np.random.rand()<0.5):
newpop[2*elitism+i,1]=newpop[parents[0],1] * 10**np.random.normal(0,psigma)
else:
newpop[2*elitism+i,1]=newpop[parents[1],1] * 10**np.random.normal(0,psigma)
# third gene from second parent
newpop[2*elitism+i,2]=newpop[parents[1],2] * 10**np.random.normal(0,psigma)
#update population
population = newpop
plt.boxplot(fitnesses) ;

Plot in tensorboard is always closes and like a circle

I was trying to plot a loss curve, but is always abnormal (just like a circle, I really don't know how to describe it in English properly), I had found many topics about question like this and just can't solve, my tensorflow version is 0.10.0.
import tensorflow as tf
from tensorflow.core.util.event_pb2 import SessionLog
import os
# initialize variables/model parameters
# define the training loop operations
def inputs():
# read/generate input training data X and expected outputs Y
weight_age = [[84,46],[73,20],[65,52],[70,30],[76,57],[69,25],[63,28],[72,36],[79,57],[75,44],[27,24]
,[89,31],[65,52],[57,23],[59,60],[69,48],[60,34],[79,51],[75,50],[82,34],[59,46],[67,23],
[85,37],[55,40],[63,30]]
blodd_fat_content = [354,190,405,263,451,302,288,385,402,365,209,290,346,
254,395,434,220,374,308,220,311,181,274,303,244]
return tf.to_float(weight_age), tf.to_float(blodd_fat_content)
def inference(X):
# compute inference model over data X and return the result
return tf.matmul(X, W) + b
def loss(X, Y):
# compute loss over training data X and expected outputs Y
Y_predicted = inference(X)
return tf.reduce_sum(tf.squared_difference(Y, Y_predicted))
def train(total_loss):
# train / adjust model parameters according to computed total loss
learning_rate = 1e-7
return tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)
def evaluate(sess, X, Y):
# evaluate the resulting trained model
print (sess.run(inference([[80., 25.]])))
print (sess.run(inference([[60., 25.]])))
g1 = tf.Graph()
with tf.Session(graph=g1) as sess:
W = tf.Variable(tf.zeros([2,1]), name="weights")
b = tf.Variable(0., name="bias")
tf.initialize_all_variables().run()
X, Y = inputs()
print (sess.run(W))
total_loss = loss(X, Y)
train_op = train(total_loss)
tf.scalar_summary("loss", total_loss)
summaries = tf.merge_all_summaries()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
summary_writer = tf.train.SummaryWriter('linear', g1)
summary_writer.add_session_log(session_log= SessionLog(status=SessionLog.START), global_step=1)
# actual training loop
training_steps = 100
tolerance = 100
total_loss_last = 0
initial_step = 0
# Create a saver.
saver = tf.train.Saver()
# verify if we don't have a checkpoint saved already
ckpt = tf.train.get_checkpoint_state(os.path.dirname('my_model'))
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
initial_step = int(ckpt.model_checkpoint_path.rsplit('-', 1)[1])
# summary_writer.add_session_log(SessionLog(status=SessionLog.START), global_step=initial_step)
for step in range(initial_step, training_steps):
sess.run([train_op])
if step%20 == 0:
saver.save(sess, 'my-model', global_step=step)
gap = abs(sess.run(total_loss) - total_loss_last)
total_loss_last = sess.run(total_loss)
summary_writer.add_summary(sess.run(summaries), step)
# for debugging and learning purposes, see how the loss gets decremented thru training steps
if step % 10 == 0:
print ("loss: ", sess.run([total_loss]))
print("step: ", step)
if gap < tolerance:
break
# evaluation...
evaluate(sess, X, Y)
coord.request_stop()
coord.join(threads)
saver.save(sess, 'my-model', global_step=training_steps)
summary_writer.flush()
sess.close()