AttributeError: GaussianMixture instance has no attribute 'loglike' - python-2.7

I'm trying to implement Gaussian Mixture Model with Expectation–Maximization algorithm and I get this error.
This is the Gaussian Mixture model that I used:
class GaussianMixture:
"Model mixture of two univariate Gaussians and their EM estimation"
def __init__(self, data, mu_min=min(data), mu_max=max(data), sigma_min=.1, sigma_max=1, mix=.5):
self.data = data
#init with multiple gaussians
self.one = Gaussian(uniform(mu_min, mu_max),
uniform(sigma_min, sigma_max))
self.two = Gaussian(uniform(mu_min, mu_max),
uniform(sigma_min, sigma_max))
#as well as how much to mix them
self.mix = mix
def Estep(self):
"Perform an E(stimation)-step, freshening up self.loglike in the process"
# compute weights
self.loglike = 0. # = log(p = 1)
for datum in self.data:
# unnormalized weights
wp1 = self.one.pdf(datum) * self.mix
wp2 = self.two.pdf(datum) * (1. - self.mix)
# compute denominator
den = wp1 + wp2
# normalize
wp1 /= den
wp2 /= den
# add into loglike
self.loglike += log(wp1 + wp2)
# yield weight tuple
yield (wp1, wp2)
def Mstep(self, weights):
"Perform an M(aximization)-step"
# compute denominators
(left, rigt) = zip(*weights)
one_den = sum(left)
two_den = sum(rigt)
# compute new means
self.one.mu = sum(w * d / one_den for (w, d) in zip(left, data))
self.two.mu = sum(w * d / two_den for (w, d) in zip(rigt, data))
# compute new sigmas
self.one.sigma = sqrt(sum(w * ((d - self.one.mu) ** 2)
for (w, d) in zip(left, data)) / one_den)
self.two.sigma = sqrt(sum(w * ((d - self.two.mu) ** 2)
for (w, d) in zip(rigt, data)) / two_den)
# compute new mix
self.mix = one_den / len(data)
def iterate(self, N=1, verbose=False):
"Perform N iterations, then compute log-likelihood"
def pdf(self, x):
return (self.mix)*self.one.pdf(x) + (1-self.mix)*self.two.pdf(x)
def __repr__(self):
return 'GaussianMixture({0}, {1}, mix={2.03})'.format(self.one,
self.two,
self.mix)
def __str__(self):
return 'Mixture: {0}, {1}, mix={2:.03})'.format(self.one,
self.two,
self.mix)
And then , while training I get that error in the conditional statement.
# Check out the fitting process
n_iterations = 5
best_mix = None
best_loglike = float('-inf')
mix = GaussianMixture(data)
for _ in range(n_iterations):
try:
#train!
mix.iterate(verbose=True)
if mix.loglike > best_loglike:
best_loglike = mix.loglike
best_mix = mix
except (ZeroDivisionError, ValueError, RuntimeWarning): # Catch division errors from bad starts, and just throw them out...
pass
Any ideas why I have the following error?
AttributeError: GaussianMixture instance has no attribute 'loglike'

The loglike attribute is only created when you call the Estep method.
def Estep(self):
"Perform an E(stimation)-step, freshening up self.loglike in the process"
# compute weights
self.loglike = 0. # = log(p = 1)
You didn't call Estep between creating the GaussianMixture instance and mix.loglike:
mix = GaussianMixture(data)
for _ in range(n_iterations):
try:
#train!
mix.iterate(verbose=True)
if mix.loglike > best_loglike:
And the iterate method is empty (It looks like you forgot some code here).
def iterate(self, N=1, verbose=False):
"Perform N iterations, then compute log-likelihood"
So, there'll be no loglike attribute set on the mix instance by the time you do if mix.loglike. Hence, the AttributeError.
You need to do one of the following:
Call the Estep method (since you set self.loglike there)
Define a loglike attribute in __init__

Related

How can I use the pvlib modules in pvmismatch?

Is it possible to use the cec modules of pvlib modules in pvmismatch?
I tried to make it using the values of the cec modules.
cell=pvcell.PVcell(
Rs=parametersw.R_s/parameters.N_s,
Rsh= parametersw.R_sh_ref*parameters.N_s,
Isat1_T0=parametersw.I_o_ref,
Isat2_T0=0,
Isc0_T0= parametersw.I_L_ref,
alpha_Isc=parametersw.alpha_sc,
#aRBD=0,
#bRBD=0,
#nRBD=0,
Eg=1.121,
Tcell=273.15
)
and putting this cells in a mismatch module, the values that I receive: voc isc vmp imp, are 3V less than expected.
The short answer is no, it is not possible. pvmismatch uses the two-diode model and you cannot simple put in the corresponding values from the one-diode model and omit the others.
Having said that, using the same (scaled) resistances might be ok, and you could try tweaking Isat1 and Isat2 to get closer to what you were expecting. This may or may not be good enough for your application.
If you are willing to do a little extra work, you can generate 2-diode model coefficients from an IV curve at STC using the gen_coeffs package in pvmismatch.contrib. See this discussion for an example using a Canadian Solar module from the Sandia array performance model library. All you need are the principle characteristics at STC, a little luck, and some elbow grease and you can do it.
For example, if using a CEC module:
"""
Making 2-diode modules from CEC in PVMismatch
"""
from matplotlib import pyplot as plt
import numpy as np
import pvlib
from pvmismatch import *
from pvmismatch.contrib import gen_coeffs
cecmod = pvlib.pvsystem.retrieve_sam('CECMod')
csmods = cecmod.columns[cecmod.T.index.str.startswith('Canadian')]
len(csmods) # 409 modules in CEC library!
cs6x_300m = cecmod[csmods[264]] # CS6X-300M Canadian Solar 300W mono-Si module
args = (
cs6x_300m.I_sc_ref,
cs6x_300m.V_oc_ref,
cs6x_300m.I_mp_ref,
cs6x_300m.V_mp_ref,
cs6x_300m.N_s, # number of series cells
1, # number of parallel sub-strings
25.0) # cell temperature
# try to solve using default coeffs
x, sol = gen_coeffs.gen_two_diode(*args)
# solver fails, so get the last guess before it quit
def last_guess(sol):
isat1 = np.exp(sol.x[0])
isat2 = np.exp(sol.x[1])
rs = sol.x[2] ** 2.0
rsh = sol.x[3] ** 2.0
return isat1, isat2, rs, rsh
x = last_guess(sol)
# the series and shunt resistance solver guess are way off, so reset them
# with something reasonable for a 2-diode model
x, sol = gen_coeffs.gen_two_diode(*args, x0=(x[0], x[1], 0.005, 10.0))
# Hooray, it worked! Note that the 1-diode and 2-diode parametres are so
# different! Anyway, let's make a cell and a module to check the solution.
pvc = pvcell.PVcell(
Isat1_T0=x[0],
Isat2_T0=x[1],
Rs=x[2],
Rsh=x[3],
Isc0_T0=cs6x_300m.I_sc_ref,
alpha_Isc=cs6x_300m.alpha_sc)
np.isclose(pvc.Isc, cs6x_300m.I_sc_ref) # ha-ha, this exact b/c we used it
# open circuit voltage within 1E-3: (45.01267251639085, 45.0)
np.isclose(pvc.Voc*cs6x_300m.N_s, cs6x_300m.V_oc_ref, rtol=1e-3, atol=1e-3)
# get index max power point
mpp = np.argmax(pvc.Pcell)
# max power voltage within 1E-3: (36.50580418834946, 36.5)
np.isclose(pvc.Vcell[mpp][0]*cs6x_300m.N_s, cs6x_300m.V_mp_ref, rtol=1e-3, atol=1e-3)
# max power current within 1E-3: (8.218687568902466, 8.22)
np.isclose(pvc.Icell[mpp][0], cs6x_300m.I_mp_ref, rtol=1e-3, atol=1e-3)
# use pvlib to get the full IV curve using CEC model
params1stc = pvlib.pvsystem.calcparams_cec(effective_irradiance=1000,
temp_cell=25.0, alpha_sc=cs6x_300m.alpha_sc, a_ref=cs6x_300m.a_ref,
I_L_ref=cs6x_300m.I_L_ref, I_o_ref=cs6x_300m.I_o_ref, R_sh_ref=cs6x_300m.R_sh_ref,
R_s=cs6x_300m.R_s, Adjust=cs6x_300m.Adjust)
iv_params1stc = pvlib.pvsystem.singlediode(*params1stc, ivcurve_pnts=100, method='newton')
# use pvmm to get full IV curve using 2-diode model parameters
pvm = pvmodule.PVmodule(cell_pos=pvmodule.STD72, pvcells=[pvc]*72)
# make some comparison plots
pvm.plotMod() # plot the pvmm module
plt.tight_layout()
# get axes for IV curve
f, ax = plt.gcf(), plt.gca()
ax0 = f.axes[0]
ax0.plot(iv_params1stc['v'], iv_params1stc['i'], '--')
ax0.plot(0, iv_params1stc['i_sc'], '|k')
ax0.plot(iv_params1stc['v_oc'], 0, '|k')
ax0.plot(iv_params1stc['v_mp'], iv_params1stc['i_mp'], '|k')
ax0.set_ylim([0, 10])
ax0.plot(0, pvm.Isc.mean(), '_k')
ax0.plot(pvm.Voc.sum(), 0, '|k')
mpp = np.argmax(pvm.Pmod)
ax0.plot(pvm.Vcell[mpp], mpp.Icell[mpp], '_k')
ax0.plot(pvm.Vmod[mpp], pvm.Imod[mpp], '_k')
iv_params1stc['p'] = iv_params1stc['v'] * iv_params1stc['i']
ax1.plot(iv_params1stc['v'], iv_params1stc['p'], '--')
ax1.plot(iv_params1stc['v_mp'], iv_params1stc['p_mp'], '|k')
ax1.plot(pvm.Vmod[mpp], pvm.Pmod[mpp], '_k')
you can get very close agreement:
You can not define as following:
Isat1_T0=parametersw.I_o_ref,
Isat2_T0=0,
The reason is that the ideal factor for PVmismatch is n1=1 and n2=2. However, if you want to use one diode model, you must input ideal factor n.
If you still want to use one diode model for PVmismatch, the right method is that you need to rewrite "PVcell" module in this pachage. I try to write it, you can refer.
enter code here
"""
This module contains the :class:`~pvmismatch.pvmismatch_lib.pvcell.PVcell`
object which is used by modules, strings and systems.
"""
from __future__ import absolute_import
from future.utils import iteritems
from pvmismatch.pvmismatch_lib.pvconstants import PVconstants
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import newton
# Defaults
RS = 0.0038 # [ohm] series resistance
RSH = 500 # [ohm] shunt resistance
Gamma = 0.97
ISAT1_T0 = 2.76E-11 # [A] diode one saturation current
ISC0_T0 = 9.87 # [A] reference short circuit current
TCELL = 298.15 # [K] cell temperature
ARBD = 2E-3 # reverse breakdown coefficient 1
BRBD = 0. # reverse breakdown coefficient 2
VRBD_ = -20 # [V] reverse breakdown voltage
NRBD = 3.28 # reverse breakdown exponent
EG = 1.1 # [eV] band gap of cSi
ALPHA_ISC = 0.0005 # [1/K] short circuit current temperature coefficient
EPS = np.finfo(np.float64).eps
class PVcell(object):
"""
Class for PV cells.
:param Rs: series resistance [ohms]
:param Rsh: shunt resistance [ohms]
:param Isat1_T0: first saturation diode current at ref temp [A]
:param Isat2_T0: second saturation diode current [A]
:param Isc0_T0: short circuit current at ref temp [A]
:param aRBD: reverse breakdown coefficient 1
:param bRBD: reverse breakdown coefficient 2
:param VRBD: reverse breakdown voltage [V]
:param nRBD: reverse breakdown exponent
:param Eg: band gap [eV]
:param alpha_Isc: short circuit current temp coeff [1/K]
:param Tcell: cell temperature [K]
:param Ee: incident effective irradiance [suns]
:param pvconst: configuration constants object
:type pvconst: :class:`~pvmismatch.pvmismatch_lib.pvconstants.PVconstants`
"""
_calc_now = False #: if True ``calcCells()`` is called in ``__setattr__``
def __init__(self, Rs=RS, Rsh=RSH, Isat1_T0=ISAT1_T0, Gamma=Gamma,
Isc0_T0=ISC0_T0, aRBD=ARBD, bRBD=BRBD, VRBD=VRBD_,
nRBD=NRBD, Eg=EG, alpha_Isc=ALPHA_ISC,
Tcell=TCELL, Ee=1., pvconst=PVconstants()):
# user inputs
self.Rs = Rs #: [ohm] series resistance
self.Rsh = Rsh #: [ohm] shunt resistance
self.Isat1_T0 = Isat1_T0 #: [A] diode one sat. current at T0
self.Gamma = Gamma #: ideal factor
self.Isc0_T0 = Isc0_T0 #: [A] short circuit current at T0
self.aRBD = aRBD #: reverse breakdown coefficient 1
self.bRBD = bRBD #: reverse breakdown coefficient 2
self.VRBD = VRBD #: [V] reverse breakdown voltage
self.nRBD = nRBD #: reverse breakdown exponent
self.Eg = Eg #: [eV] band gap of cSi
self.alpha_Isc = alpha_Isc #: [1/K] short circuit temp. coeff.
self.Tcell = Tcell #: [K] cell temperature
self.Ee = Ee #: [suns] incident effective irradiance on cell
self.pvconst = pvconst #: configuration constants
self.Icell = None #: cell currents on IV curve [A]
self.Vcell = None #: cell voltages on IV curve [V]
self.Pcell = None #: cell power on IV curve [W]
self.VocSTC = self._VocSTC() #: estimated Voc at STC [V]
# set calculation flag
self._calc_now = True # overwrites the class attribute
def __str__(self):
fmt = '<PVcell(Ee=%g[suns], Tcell=%g[K], Isc=%g[A], Voc=%g[V])>'
return fmt % (self.Ee, self.Tcell, self.Isc, self.Voc)
def __repr__(self):
return str(self)
def __setattr__(self, key, value):
# check for floats
try:
value = np.float64(value)
except (TypeError, ValueError):
pass # fail silently if not float, eg: pvconst or _calc_now
super(PVcell, self).__setattr__(key, value)
# recalculate IV curve
if self._calc_now:
Icell, Vcell, Pcell = self.calcCell()
self.__dict__.update(Icell=Icell, Vcell=Vcell, Pcell=Pcell)
def update(self, **kwargs):
"""
Update user-defined constants.
"""
# turn off calculation flag until all attributes are updated
self._calc_now = False
# don't use __dict__.update() instead use setattr() to go through
# custom __setattr__() so that numbers are cast to floats
for k, v in iteritems(kwargs):
setattr(self, k, v)
self._calc_now = True # recalculate
#property
def Vt(self):
"""
Thermal voltage in volts.
"""
return self.pvconst.k * self.Tcell / self.pvconst.q
#property
def Isc(self):
return self.Ee * self.Isc0
#property
def Aph(self):
"""
Photogenerated current coefficient, non-dimensional.
"""
# Aph is undefined (0/0) if there is no irradiance
if self.Isc == 0: return np.nan
# short current (SC) conditions (Vcell = 0)
Vdiode_sc = self.Isc * self.Rs # diode voltage at SC
Idiode1_sc = self.Isat1 * (np.exp(Vdiode_sc / (self.Gamma*self.Vt)) - 1.)
Ishunt_sc = Vdiode_sc / self.Rsh # diode voltage at SC
# photogenerated current coefficient
return 1. + (Idiode1_sc + Ishunt_sc) / self.Isc
#property
def Isat1(self):
"""
Diode one saturation current at Tcell in amps.
"""
_Tstar = self.Tcell ** 3. / self.pvconst.T0 ** 3. # scaled temperature
_inv_delta_T = 1. / self.pvconst.T0 - 1. / self.Tcell # [1/K]
_expTstar = np.exp(
self.Eg * self.pvconst.q / (self.pvconst.k * self.Gamma) * _inv_delta_T
)
return self.Isat1_T0 * _Tstar * _expTstar # [A] Isat1(Tcell)
#property
def Isc0(self):
"""
Short circuit current at Tcell in amps.
"""
_delta_T = self.Tcell - self.pvconst.T0 # [K] temperature difference
return self.Isc0_T0 * (1. + self.alpha_Isc * _delta_T) # [A] Isc0
#property
def Voc(self):
"""
Estimate open circuit voltage of cells.
Returns Voc : numpy.ndarray of float, estimated open circuit voltage
"""
return self.Vt * self.Gamma * np.log((self.Aph * self.Isc)/self.Isat1_T0 + 1)
def _VocSTC(self):
"""
Estimate open circuit voltage of cells.
Returns Voc : numpy.ndarray of float, estimated open circuit voltage
"""
Vdiode_sc = self.Isc0_T0 * self.Rs # diode voltage at SC
Idiode1_sc = self.Isat1_T0 * (np.exp(Vdiode_sc / (self.Gamma*self.Vt)) - 1.)
Ishunt_sc = Vdiode_sc / self.Rsh # diode voltage at SC
# photogenerated current coefficient
Aph = 1. + (Idiode1_sc + Ishunt_sc) / self.Isc0_T0
# estimated Voc at STC
return self.Vt * self.Gamma * np.log((Aph * self.Isc0_T0)/self.Isat1_T0 + 1)
#property
def Igen(self):
"""
Photovoltaic generated light current (AKA IL or Iph)
Returns Igen : numpy.ndarray of float, PV generated light current [A]
Photovoltaic generated light current is zero if irradiance is zero.
"""
if self.Ee == 0: return 0
return self.Aph * self.Isc
def calcCell(self):
"""
Calculate cell I-V curves.
Returns (Icell, Vcell, Pcell) : tuple of numpy.ndarray of float
"""
Vreverse = self.VRBD * self.pvconst.negpts
Vff = self.Voc
delta_Voc = self.VocSTC - self.Voc
# to make sure that the max voltage is always in the 4th quadrant, add
# a third set of points log spaced with decreasing density, from Voc to
# Voc # STC unless Voc *is* Voc # STC, then use an arbitrary voltage at
# 80% of Voc as an estimate of Vmp assuming a fill factor of 80% and
# Isc close to Imp, or if Voc > Voc # STC, then use Voc as the max
if delta_Voc == 0:
Vff = 0.8 * self.Voc
delta_Voc = 0.2 * self.Voc
elif delta_Voc < 0:
Vff = self.VocSTC
delta_Voc = -delta_Voc
Vquad4 = Vff + delta_Voc * self.pvconst.Vmod_q4pts
Vforward = Vff * self.pvconst.pts
Vdiode = np.concatenate((Vreverse, Vforward, Vquad4), axis=0)
Idiode1 = self.Isat1 * (np.exp(Vdiode / (self.Gamma*self.Vt)) - 1.)
Ishunt = Vdiode / self.Rsh
fRBD = 1. - Vdiode / self.VRBD
# use epsilon = 2.2204460492503131e-16 to avoid "divide by zero"
fRBD[fRBD == 0] = EPS
Vdiode_norm = Vdiode / self.Rsh / self.Isc0_T0
fRBD = self.Isc0_T0 * fRBD ** (-self.nRBD)
IRBD = (self.aRBD * Vdiode_norm + self.bRBD * Vdiode_norm ** 2) * fRBD
Icell = self.Igen - Idiode1 - Ishunt - IRBD
Vcell = Vdiode - Icell * self.Rs
Pcell = Icell * Vcell
return Icell, Vcell, Pcell
# diode model
# *-->--*--->---*--Rs->-Icell--+
# ^ | | ^
# | | | |
# Igen Idiode Ishunt Vcell
# | | | |
# | v v v
# *--<--*---<---*--<-----------=
# http://en.wikipedia.org/wiki/Diode_modelling#Shockley_diode_model
# http://en.wikipedia.org/wiki/Diode#Shockley_diode_equation
# http://en.wikipedia.org/wiki/William_Shockley
#staticmethod
def f_Icell(Icell, Vcell, Igen, Rs, Vt, Isat1, Rsh):
"""
Objective function for Icell.
:param Icell: cell current [A]
:param Vcell: cell voltage [V]
:param Igen: photogenerated current at Tcell and Ee [A]
:param Rs: series resistance [ohms]
:param Vt: thermal voltage [V]
:param Isat1: first diode saturation current at Tcell [A]
:param Isat2: second diode saturation current [A]
:param Rsh: shunt resistance [ohms]
:return: residual = (Icell - Icell0) [A]
"""
# arbitrary current condition
Vdiode = Vcell + Icell * Rs # diode voltage
Idiode1 = Isat1 * (np.exp(Vdiode / (Gamma * Vt)) - 1.) # diode current
Ishunt = Vdiode / Rsh # shunt current
return Igen - Idiode1 - Ishunt - Icell
def calcIcell(self, Vcell):
"""
Calculate Icell as a function of Vcell.
:param Vcell: cell voltage [V]
:return: Icell
"""
args = (np.float64(Vcell), self.Igen, self.Rs, self.Vt,
self.Rsh)
return newton(self.f_Icell, x0=self.Isc, args=args)
#staticmethod
def f_Vcell(Vcell, Icell, Igen, Rs, Vt, Isat1, Rsh):
return PVcell.f_Icell(Icell, Vcell, Igen, Rs, Vt, Isat1, Rsh)
def calcVcell(self, Icell):
"""
Calculate Vcell as a function of Icell.
:param Icell: cell current [A]
:return: Vcell
"""
args = (np.float64(Icell), self.Igen, self.Rs, self.Vt,
self.Isat1, self.Rsh)
return newton(self.f_Vcell, x0=self.Voc, args=args)

TensorFlow train function with multiple layers

I am new to tensor and trying to understand it. I managed to create one layer model. But I would like now to add 2 more. How can I make my train function working? I would like to train it with hundreds of values X and Y. I implemented all values what I need: Weight and Bias of each layer, but I dont understand how can I use them in my train function. And when it will be trained, how can I use it. Like I do in the last part of a code.
import numpy as np
print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))
x = np.array([
[10, 10, 30, 20],
])
y = np.array([[10, 1, 1, 1]])
class Model(object):
def __init__(self, x, y):
# get random values.
self.W = tf.Variable(tf.random.normal((len(x), len(x[0]))))
self.b = tf.Variable(tf.random.normal((len(y),)))
self.W1 = tf.Variable(tf.random.normal((len(x), len(x[0]))))
self.b1 = tf.Variable(tf.random.normal((len(y),)))
self.W2 = tf.Variable(tf.random.normal((len(x), len(x[0]))))
self.b2 = tf.Variable(tf.random.normal((len(y),)))
def __call__(self, x):
out1 = tf.multiply(x, self.W) + self.b
out2 = tf.multiply(out1, self.W1) + self.b1
last_layer = tf.multiply(out2, self.W2) + self.b2
# Input_Leyer = self.W * x + self.b
return last_layer
def loss(predicted_y, desired_y):
return tf.reduce_sum(tf.square(predicted_y - desired_y))
optimizer = tf.optimizers.Adam(0.1)
def train(model, inputs, outputs):
with tf.GradientTape() as t:
current_loss = loss(model(inputs), outputs)
grads = t.gradient(current_loss, [model.W, model.b])
optimizer.apply_gradients(zip(grads, [model.W, model.b]))
print(current_loss)
model = Model(x, y)
for i in range(10000):
train(model, x, y)
for i in range(3):
InputX = np.array([
[input(), input(), input(), input()],
])
returning = tf.math.multiply(
InputX, model.W, name=None
)
print("I think that output can be:", returning)
Just add new variables to the list:
grads = t.gradient(current_loss, [model.W, model.b, model.W1, model.b1, model.W2, model.b2])
optimizer.apply_gradients(zip(grads, [model.W, model.b, model.W1, model.b1, model.W2, model.b2]))

TypeError: only length-1 arrays can be converted to Python scalars, python2.7

I searched about this issue, I got more questions "the same error" but different code and different reason. So, I was hesitant more to put my issue here. After reading the majority of answers, I didn't find a solution for my issue.
The original and full code here
chapter6.py:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from datasets import gtsrb
from classifiers import MultiClassSVM
def main():
strategies = ['one-vs-one', 'one-vs-all']
features = [None, 'gray', 'rgb', 'hsv', 'surf', 'hog']
accuracy = np.zeros((2, len(features)))
precision = np.zeros((2, len(features)))
recall = np.zeros((2, len(features)))
for f in xrange(len(features)):
print "feature", features[f]
(X_train, y_train), (X_test, y_test) = gtsrb.load_data(
"datasets/gtsrb_training",
feature=features[f],
test_split=0.2,
seed=42)
# convert to numpy
X_train = np.squeeze(np.array(X_train)).astype(np.float32)
y_train = np.array(y_train)
X_test = np.squeeze(np.array(X_test)).astype(np.float32)
y_test = np.array(y_test)
# find all class labels
labels = np.unique(np.hstack((y_train, y_test)))
for s in xrange(len(strategies)):
print " - strategy", strategies[s]
# set up SVMs
MCS = MultiClassSVM(len(labels), strategies[s])
# training phase
print " - train"
MCS.fit(X_train, y_train)
# test phase
print " - test"
acc, prec, rec = MCS.evaluate(X_test, y_test)
accuracy[s, f] = acc
precision[s, f] = np.mean(prec)
recall[s, f] = np.mean(rec)
print " - accuracy: ", acc
print " - mean precision: ", np.mean(prec)
print " - mean recall: ", np.mean(rec)
# plot results as stacked bar plot
f, ax = plt.subplots(2)
for s in xrange(len(strategies)):
x = np.arange(len(features))
ax[s].bar(x - 0.2, accuracy[s, :], width=0.2, color='b',
hatch='/', align='center')
ax[s].bar(x, precision[s, :], width=0.2, color='r', hatch='\\',
align='center')
ax[s].bar(x + 0.2, recall[s, :], width=0.2, color='g', hatch='x',
align='center')
ax[s].axis([-0.5, len(features) + 0.5, 0, 1.5])
ax[s].legend(('Accuracy', 'Precision', 'Recall'), loc=2, ncol=3,
mode='expand')
ax[s].set_xticks(np.arange(len(features)))
ax[s].set_xticklabels(features)
ax[s].set_title(strategies[s])
plt.show()
if __name__ == '__main__':
main()
classifiers.py
import cv2
import numpy as np
from abc import ABCMeta, abstractmethod
from matplotlib import pyplot as plt
__author__ = "Michael Beyeler"
__license__ = "GNU GPL 3.0 or later"
class Classifier:
"""
Abstract base class for all classifiers
A classifier needs to implement at least two methods:
- fit: A method to train the classifier by fitting the model to
the data.
- evaluate: A method to test the classifier by predicting labels of
some test data based on the trained model.
A classifier also needs to specify a classification strategy via
setting self.mode to either "one-vs-all" or "one-vs-one".
The one-vs-all strategy involves training a single classifier per
class, with the samples of that class as positive samples and all
other samples as negatives.
The one-vs-one strategy involves training a single classifier per
class pair, with the samples of the first class as positive samples
and the samples of the second class as negative samples.
This class also provides method to calculate accuracy, precision,
recall, and the confusion matrix.
"""
__metaclass__ = ABCMeta
#abstractmethod
def fit(self, X_train, y_train):
pass
#abstractmethod
def evaluate(self, X_test, y_test, visualize=False):
pass
def _accuracy(self, y_test, Y_vote):
"""Calculates accuracy
This method calculates the accuracy based on a vector of
ground-truth labels (y_test) and a 2D voting matrix (Y_vote) of
size (len(y_test), num_classes).
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: accuracy e[0,1]
"""
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
# all cases where predicted class was correct
mask = y_hat == y_test
return np.float32(np.count_nonzero(mask)) / len(y_test)
def _precision(self, y_test, Y_vote):
"""Calculates precision
This method calculates precision extended to multi-class
classification by help of a confusion matrix.
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: precision e[0,1]
"""
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
if self.mode == "one-vs-one":
# need confusion matrix
conf = self._confusion(y_test, Y_vote)
# consider each class separately
prec = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = conf[c, c]
# false positives: label is c, classifier predicted not c
fp = np.sum(conf[:, c]) - conf[c, c]
if tp + fp != 0:
prec[c] = tp * 1. / (tp + fp)
elif self.mode == "one-vs-all":
# consider each class separately
prec = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = np.count_nonzero((y_test == c) * (y_hat == c))
# false positives: label is c, classifier predicted not c
fp = np.count_nonzero((y_test == c) * (y_hat != c))
if tp + fp != 0:
prec[c] = tp * 1. / (tp + fp)
return prec
def _recall(self, y_test, Y_vote):
"""Calculates recall
This method calculates recall extended to multi-class
classification by help of a confusion matrix.
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: recall e[0,1]
"""
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
if self.mode == "one-vs-one":
# need confusion matrix
conf = self._confusion(y_test, Y_vote)
# consider each class separately
recall = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = conf[c, c]
# false negatives: label is not c, classifier predicted c
fn = np.sum(conf[c, :]) - conf[c, c]
if tp + fn != 0:
recall[c] = tp * 1. / (tp + fn)
elif self.mode == "one-vs-all":
# consider each class separately
recall = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = np.count_nonzero((y_test == c) * (y_hat == c))
# false negatives: label is not c, classifier predicted c
fn = np.count_nonzero((y_test != c) * (y_hat == c))
if tp + fn != 0:
recall[c] = tp * 1. / (tp + fn)
return recall
def _confusion(self, y_test, Y_vote):
"""Calculates confusion matrix
This method calculates the confusion matrix based on a vector of
ground-truth labels (y-test) and a 2D voting matrix (Y_vote) of
size (len(y_test), num_classes).
Matrix element conf[r,c] will contain the number of samples that
were predicted to have label r but have ground-truth label c.
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: confusion matrix
"""
y_hat = np.argmax(Y_vote, axis=1)
conf = np.zeros((self.num_classes, self.num_classes)).astype(np.int32)
for c_true in xrange(self.num_classes):
# looking at all samples of a given class, c_true
# how many were classified as c_true? how many as others?
for c_pred in xrange(self.num_classes):
y_this = np.where((y_test == c_true) * (y_hat == c_pred))
conf[c_pred, c_true] = np.count_nonzero(y_this)
return conf
class MultiClassSVM(Classifier):
"""
Multi-class classification using Support Vector Machines (SVMs)
This class implements an SVM for multi-class classification. Whereas
some classifiers naturally permit the use of more than two classes
(such as neural networks), SVMs are binary in nature.
However, we can turn SVMs into multinomial classifiers using at least
two different strategies:
* one-vs-all: A single classifier is trained per class, with the
samples of that class as positives (label 1) and all
others as negatives (label 0).
* one-vs-one: For k classes, k*(k-1)/2 classifiers are trained for each
pair of classes, with the samples of the one class as
positives (label 1) and samples of the other class as
negatives (label 0).
Each classifier then votes for a particular class label, and the final
decision (classification) is based on a majority vote.
"""
def __init__(self, num_classes, mode="one-vs-all", params=None):
"""
The constructor makes sure the correct number of classifiers is
initialized, depending on the mode ("one-vs-all" or "one-vs-one").
:param num_classes: The number of classes in the data.
:param mode: Which classification mode to use.
"one-vs-all": single classifier per class
"one-vs-one": single classifier per class pair
Default: "one-vs-all"
:param params: SVM training parameters.
For now, default values are used for all SVMs.
Hyperparameter exploration can be achieved by
embedding the MultiClassSVM process flow in a
for-loop that classifies the data with
different parameter values, then pick the
values that yield the best accuracy.
Default: None
"""
self.num_classes = num_classes
self.mode = mode
self.params = params or dict()
# initialize correct number of classifiers
self.classifiers = []
if mode == "one-vs-one":
# k classes: need k*(k-1)/2 classifiers
for _ in xrange(num_classes*(num_classes - 1) / 2):
self.classifiers.append(cv2.ml.SVM_create())
elif mode == "one-vs-all":
# k classes: need k classifiers
for _ in xrange(num_classes):
self.classifiers.append(cv2.ml.SVM_create())
else:
print "Unknown mode ", mode
def fit(self, X_train, y_train, params=None):
"""Fits the model to training data
This method trains the classifier on data (X_train) using either
the "one-vs-one" or "one-vs-all" strategy.
:param X_train: input data (rows=samples, cols=features)
:param y_train: vector of class labels
:param params: dict to specify training options for cv2.SVM.train
leave blank to use the parameters passed to the
constructor
"""
if params is None:
params = self.params
if self.mode == "one-vs-one":
svm_id = 0
for c1 in xrange(self.num_classes):
for c2 in xrange(c1 + 1, self.num_classes):
# indices where class labels are either `c1` or `c2`
data_id = np.where((y_train == c1) + (y_train == c2))[0]
# set class label to 1 where class is `c1`, else 0
y_train_bin = np.where(y_train[data_id] == c1, 1,
0).flatten()
self.classifiers[svm_id].train(X_train[data_id, :],
y_train_bin,
params=self.params)
svm_id += 1
elif self.mode == "one-vs-all":
for c in xrange(self.num_classes):
# train c-th SVM on class c vs. all other classes
# set class label to 1 where class==c, else 0
y_train_bin = np.where(y_train == c, 1, 0).flatten()
# train SVM
self.classifiers[c].train(X_train, y_train_bin,
params=self.params)
def evaluate(self, X_test, y_test, visualize=False):
"""Evaluates the model on test data
This method evaluates the classifier's performance on test data
(X_test) using either the "one-vs-one" or "one-vs-all" strategy.
:param X_test: input data (rows=samples, cols=features)
:param y_test: vector of class labels
:param visualize: flag whether to plot the results (True) or not
(False)
:returns: accuracy, precision, recall
"""
# prepare Y_vote: for each sample, count how many times we voted
# for each class
Y_vote = np.zeros((len(y_test), self.num_classes))
if self.mode == "one-vs-one":
svm_id = 0
for c1 in xrange(self.num_classes):
for c2 in xrange(c1 + 1, self.num_classes):
data_id = np.where((y_test == c1) + (y_test == c2))[0]
X_test_id = X_test[data_id, :]
y_test_id = y_test[data_id]
# set class label to 1 where class==c1, else 0
# y_test_bin = np.where(y_test_id==c1,1,0).reshape(-1,1)
# predict labels
y_hat = self.classifiers[svm_id].predict_all(X_test_id)
for i in xrange(len(y_hat)):
if y_hat[i] == 1:
Y_vote[data_id[i], c1] += 1
elif y_hat[i] == 0:
Y_vote[data_id[i], c2] += 1
else:
print "y_hat[", i, "] = ", y_hat[i]
# we vote for c1 where y_hat is 1, and for c2 where y_hat
# is 0 np.where serves as the inner index into the data_id
# array, which in turn serves as index into the results
# array
# Y_vote[data_id[np.where(y_hat == 1)[0]], c1] += 1
# Y_vote[data_id[np.where(y_hat == 0)[0]], c2] += 1
svm_id += 1
elif self.mode == "one-vs-all":
for c in xrange(self.num_classes):
# set class label to 1 where class==c, else 0
# predict class labels
# y_test_bin = np.where(y_test==c,1,0).reshape(-1,1)
# predict labels
y_hat = self.classifiers[c].predict_all(X_test)
# we vote for c where y_hat is 1
if np.any(y_hat):
Y_vote[np.where(y_hat == 1)[0], c] += 1
# with this voting scheme it's possible to end up with samples
# that have no label at all...in this case, pick a class at
# random...
no_label = np.where(np.sum(Y_vote, axis=1) == 0)[0]
Y_vote[no_label, np.random.randint(self.num_classes,
size=len(no_label))] = 1
accuracy = self._accuracy(y_test, Y_vote)
precision = self._precision(y_test, Y_vote)
recall = self._recall(y_test, Y_vote)
return accuracy, precision, recall
when running chapter6.py
The output is:
feature None
- strategy one-vs-one
- train
Traceback (most recent call last):
File "/home/redhwan/Downloads/opencv-python-blueprints-master/chapter6/chapter6.py", line 77, in <module>
main()
File "/home/redhwan/Downloads/opencv-python-blueprints-master/chapter6/chapter6.py", line 44, in main
MCS.fit(X_train, y_train)
File "/home/redhwan/Downloads/opencv-python-blueprints-master/chapter6/classifiers.py", line 258, in fit
params=self.params)
TypeError: only length-1 arrays can be converted to Python scalars
please help me or your suggestion
Thank you in advance!

Is it possible to find all integer solutions?

I wanna get all integer solutions in a limited time, is it possible?
This is a linear, integer constraint satisfaction problem, which can be solved efficiently by OR Tools' CP-SAT. I've modified their example to solve your problem in Python:
from ortools.sat.python import cp_model
class VarArraySolutionPrinter(cp_model.CpSolverSolutionCallback):
"""Print intermediate solutions."""
def __init__(self, variables):
cp_model.CpSolverSolutionCallback.__init__(self)
self.__variables = variables
self.__solution_count = 0
def on_solution_callback(self):
self.__solution_count += 1
for v in self.__variables:
print('%s=%i' % (v, self.Value(v)), end=' ')
print()
def solution_count(self):
return self.__solution_count
def SearchForAllSolutionsSampleSat():
"""Showcases calling the solver to search for all solutions."""
# Creates the model.
model = cp_model.CpModel()
p = [1, 2, 3, 4]
ceq = 30
cgeq = 2
N = len(p)
# Creates the variables
x = [model.NewIntVar(0, 100, f'x{i}') for i in range(N)]
# Create the constraints.
model.Add(sum([xi*pi for xi, pi in zip(x, p)]) == ceq)
model.Add(sum(x) >= cgeq)
# Create a solver and solve.
solver = cp_model.CpSolver()
solution_printer = VarArraySolutionPrinter(x)
status = solver.SearchForAllSolutions(model, solution_printer)
print('Status = %s' % solver.StatusName(status))
print('Number of solutions found: %i' % solution_printer.solution_count())
SearchForAllSolutionsSampleSat()

PYOMO: How to use abstract models with internal data

Hei all,
I am trying to set up an abstract model for a very simple QP of the form
min (x-x0)^2
s.t.
A x = b
C x <= d
I would like to use an abstract model, as I need to resolve with changing parameters (mainly x0, but potentially also A, b, C, d). I am right now struggeling with simply setting the parameters in the model instance. I do not want to use an external data file, but rather internal python variables. All examples I find online use AMPL formatted data files.
This is the code I have right now
import pyomo.environ as pe
model = pe.AbstractModel()
# the sets
model.n = pe.Param(within=pe.NonNegativeIntegers)
model.m = pe.Param(initialize = 1)
model.ss = pe.RangeSet(1, model.n)
model.os = pe.RangeSet(1, model.m)
# the starting point and the constraint parameters
model.x_hat = pe.Param(model.ss)
model.A = pe.Param(model.os, model.ss)
model.b = pe.Param(model.os)
model.C = pe.Param(model.os, model.os)
model.d = pe.Param(model.ss, model.os)
# the decision variables
model.x_projected = pe.Var(model.ss)
# the cosntraints
# A x = b
def sum_of_elements_rule(model):
value = model.A * model.x_projected
return value == model.d
model.sumelem = pe.Constraint(model.os, rule=sum_of_elements_rule)
# C x <= d
def positivity_constraint(model):
return model.C*model.x_projected <= model.d
model.bounds = pe.Constraint(model.ss, rule=positivity_constraint)
# the cost
def cost_rule(model):
return sum((model.x_projected[i] - model.x[i])**2 for i in model.ss)
model.cost = pe.Objective(rule=cost_rule)
instance = model.create_instance()
And somehow here I am stuck. How do I set the parameters now?
Thanks and best, Theo
I know this is an old post but a solution to this could have helped me so here is the solution to this problem:
## TEST
data_init= {None: dict(
n = {None : 3},
d = {0:0, 1:1, 2:2},
x_hat = {0:10, 1:-1, 2:-100},
b = {None: 10}
)}
# create instance
instance = model.create_instance(data_init)
This creates the instance in an equivalent way than what you did but in a more formal way.
Ok, I seemed to have figured out what the problem is. If I want to set a parameter after I create an instance, I need the
mutable=True
flag. Then, I can set the parameter with something like
for i in range(model_dimension):
getattr(instance, 'd')[i] = i
The model dimension I need to choose before i create an instance (which is ok for my case). The instance can be reused with different parameters for the constraints.
The code below should work for the problem
min (x-x_hat)' * (x-x_hat)
s.t.
sum(x) = b
x[i] >= d[i]
with x_hat, b, d as parameters.
import pyomo.environ as pe
model = pe.AbstractModel()
# model dimension
model.n = pe.Param(default=2)
# state space set
model.ss = pe.RangeSet(0, model.n-1)
# equality
model.b = pe.Param(default=5, mutable=True)
# inequality
model.d = pe.Param(model.ss, default=0.0, mutable=True)
# decision var
model.x = pe.Var(model.ss)
model.x_hat = pe.Param(model.ss, default=0.0, mutable=True)
# the cost
def cost_rule(model):
return sum((model.x[i] - model.x_hat[i])**2 for i in model.ss)
model.cost = pe.Objective(rule=cost_rule)
# CONSTRAINTS
# each x_i bigger than d_i
def lb_rule(model, i):
return (model.x[i] >= model.d[i])
model.state_bound = pe.Constraint(model.ss, rule=lb_rule)
# sum of x == P_tot
def sum_rule(model):
return (sum(model.x[i] for i in model.ss) == model.b)
model.state_sum = pe.Constraint(rule=sum_rule)
## TEST
# define model dimension
model_dimension = 3
model.n = model_dimension
# create instance
instance = model.create_instance()
# set d
for i in range(model_dimension):
getattr(instance, 'd')[i] = i
# set x_hat
xh = (10,1,-100)
for i in range(model_dimension):
getattr(instance, 'x_hat')[i] = xh[i]
# set b
instance.b = 10
# solve
solver = pe.SolverFactory('ipopt')
result = solver.solve(instance)
instance.display()