Related
I have noticed that when using Pyomo + Ipopt, some optimization dae problems that converge to an optimal solution, when expanded in complexity (e.g. larger distance in a car example) and consequetly in the number of finite elements to keep accuracy, the solver displays:
EXIT: Solved To Acceptable Level.
instead of the previous "Optimal solution found".
As an example of stated above, I will use a modified code of "ampl car sample" from Pyomo repository.
# Ampl Car Example
#
# Shows how to convert a minimize final time optimal control problem
# to a format pyomo.dae can handle by removing the time scaling from
# the ContinuousSet.
#
# min tf
# dxdt = v
# dvdt = a-R*v^2
# x(0)=0; x(tf)=L
# v(0)=0; v(tf)=0
# -3<=a<=1
from pyomo.environ import *
from pyomo.dae import *
m = ConcreteModel()
m.R = Param(initialize=0.001) # Friction factor
m.L = Param(initialize=1000000.0) # Final position
m.tau = ContinuousSet(bounds=(0,1)) # Unscaled time
m.time = Var(m.tau) # Scaled time
m.tf = Var()
m.x = Var(m.tau,bounds=(0,m.L+50))
m.v = Var(m.tau,bounds=(0,None))
m.a = Var(m.tau, bounds=(-3.0,1.0),initialize=0)
m.dtime = DerivativeVar(m.time)
m.dx = DerivativeVar(m.x)
m.dv = DerivativeVar(m.v)
m.obj = Objective(expr=m.tf)
def _ode1(m,i):
if i == 0 :
return Constraint.Skip
return m.dx[i] == m.tf * m.v[i]
m.ode1 = Constraint(m.tau, rule=_ode1)
def _ode2(m,i):
if i == 0 :
return Constraint.Skip
return m.dv[i] == m.tf*(m.a[i] - m.R*m.v[i]**2)
m.ode2 = Constraint(m.tau, rule=_ode2)
def _ode3(m,i):
if i == 0:
return Constraint.Skip
return m.dtime[i] == m.tf
m.ode3 = Constraint(m.tau, rule=_ode3)
def _init(m):
yield m.x[0] == 0
yield m.x[1] == m.L
yield m.v[0] == 0
yield m.v[1] == 0
yield m.time[0] == 0
m.initcon = ConstraintList(rule=_init)
discretizer = TransformationFactory('dae.finite_difference')
discretizer.apply_to(m,nfe=5000,scheme='BACKWARD')
solver = SolverFactory('ipopt')
solver.solve(m,tee=True)
print("final time = %6.2f" %(value(m.tf)))
x = []
v = []
a = []
time=[]
for i in m.tau:
time.append(value(m.time[i]))
x.append(value(m.x[i]))
v.append(value(m.v[i]))
a.append(value(m.a[i]))
import matplotlib.pyplot as plt
plt.subplot(131)
plt.plot(time,x,label='x')
plt.title('location')
plt.xlabel('time')
plt.subplot(132)
plt.plot(time,v,label='v')
plt.xlabel('time')
plt.title('velocity')
plt.subplot(133)
plt.plot(time,a,label='a')
plt.xlabel('time')
plt.title('acceleration')
plt.show()
NOTE: The original source code can be colsulted here to compare with mine modified: https://github.com/Pyomo/pyomo/blob/main/examples/dae/car_example.py
Is there anything I can do about this? May I lower the ipopt tolerance so it keeps finding for an optimal solution?
You can disable the heuristic that makes Ipopt stop with an "acceptable" solution by setting option acceptable_iter to 0. See https://coin-or.github.io/Ipopt/OPTIONS.html#OPT_Termination for all options that determine termination of Ipopt.
I'm pretty new to deep learning, but I want to add Tensorboard to the following code to track loss, accuracy, average precision and so on.
Sample code from the TorchVision -2.3 Object Detection Finetuning Tutorial
http://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
import os
import numpy as np
import torch
from PIL import Image
import sys
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from engine import train_one_epoch, evaluate
import utils
import transforms as T
#from torch.utils.tensorboard import SummaryWriter
#writer = SummaryWriter()
class PennFudanDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
# load all image files, sorting them to
# ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(root, "PNGImages"))))
self.masks = list(sorted(os.listdir(os.path.join(root, "PedMasks"))))
def __getitem__(self, idx):
# load images and masks
img_path = os.path.join(self.root, "PNGImages", self.imgs[idx])
mask_path = os.path.join(self.root, "PedMasks", self.masks[idx])
img = Image.open(img_path).convert("RGB")
# note that we haven't converted the mask to RGB,
# because each color corresponds to a different instance
# with 0 being background
mask = Image.open(mask_path)
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = mask == obj_ids[:, None, None]
# get bounding box coordinates for each mask
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes.append([xmin, ymin, xmax, ymax])
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.imgs)
def get_model_instance_segmentation(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
return model
def get_transform(train):
transforms = []
transforms.append(T.ToTensor())
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def main():
# train on the GPU or on the CPU, if a GPU is not available
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# our dataset has two classes only - background and person
num_classes = 2
# use our dataset and defined transformations
dataset = PennFudanDataset('PennFudanPed', get_transform(train=True))
dataset_test = PennFudanDataset('PennFudanPed', get_transform(train=False))
# split the dataset in train and test set
indices = torch.randperm(len(dataset)).tolist()
dataset = torch.utils.data.Subset(dataset, indices[:-50])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=4, shuffle=True, num_workers=4,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=2, shuffle=False, num_workers=4,
collate_fn=utils.collate_fn)
# get the model using our helper function
model = get_model_instance_segmentation(num_classes)
# move model to the right device
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005,
momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
# let's train it for 10 epochs
num_epochs = 10
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
evaluate(model, data_loader_test, device=device)
print("That's it!")
if __name__ == "__main__":
main()
I searched about this issue, I got more questions "the same error" but different code and different reason. So, I was hesitant more to put my issue here. After reading the majority of answers, I didn't find a solution for my issue.
The original and full code here
chapter6.py:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from datasets import gtsrb
from classifiers import MultiClassSVM
def main():
strategies = ['one-vs-one', 'one-vs-all']
features = [None, 'gray', 'rgb', 'hsv', 'surf', 'hog']
accuracy = np.zeros((2, len(features)))
precision = np.zeros((2, len(features)))
recall = np.zeros((2, len(features)))
for f in xrange(len(features)):
print "feature", features[f]
(X_train, y_train), (X_test, y_test) = gtsrb.load_data(
"datasets/gtsrb_training",
feature=features[f],
test_split=0.2,
seed=42)
# convert to numpy
X_train = np.squeeze(np.array(X_train)).astype(np.float32)
y_train = np.array(y_train)
X_test = np.squeeze(np.array(X_test)).astype(np.float32)
y_test = np.array(y_test)
# find all class labels
labels = np.unique(np.hstack((y_train, y_test)))
for s in xrange(len(strategies)):
print " - strategy", strategies[s]
# set up SVMs
MCS = MultiClassSVM(len(labels), strategies[s])
# training phase
print " - train"
MCS.fit(X_train, y_train)
# test phase
print " - test"
acc, prec, rec = MCS.evaluate(X_test, y_test)
accuracy[s, f] = acc
precision[s, f] = np.mean(prec)
recall[s, f] = np.mean(rec)
print " - accuracy: ", acc
print " - mean precision: ", np.mean(prec)
print " - mean recall: ", np.mean(rec)
# plot results as stacked bar plot
f, ax = plt.subplots(2)
for s in xrange(len(strategies)):
x = np.arange(len(features))
ax[s].bar(x - 0.2, accuracy[s, :], width=0.2, color='b',
hatch='/', align='center')
ax[s].bar(x, precision[s, :], width=0.2, color='r', hatch='\\',
align='center')
ax[s].bar(x + 0.2, recall[s, :], width=0.2, color='g', hatch='x',
align='center')
ax[s].axis([-0.5, len(features) + 0.5, 0, 1.5])
ax[s].legend(('Accuracy', 'Precision', 'Recall'), loc=2, ncol=3,
mode='expand')
ax[s].set_xticks(np.arange(len(features)))
ax[s].set_xticklabels(features)
ax[s].set_title(strategies[s])
plt.show()
if __name__ == '__main__':
main()
classifiers.py
import cv2
import numpy as np
from abc import ABCMeta, abstractmethod
from matplotlib import pyplot as plt
__author__ = "Michael Beyeler"
__license__ = "GNU GPL 3.0 or later"
class Classifier:
"""
Abstract base class for all classifiers
A classifier needs to implement at least two methods:
- fit: A method to train the classifier by fitting the model to
the data.
- evaluate: A method to test the classifier by predicting labels of
some test data based on the trained model.
A classifier also needs to specify a classification strategy via
setting self.mode to either "one-vs-all" or "one-vs-one".
The one-vs-all strategy involves training a single classifier per
class, with the samples of that class as positive samples and all
other samples as negatives.
The one-vs-one strategy involves training a single classifier per
class pair, with the samples of the first class as positive samples
and the samples of the second class as negative samples.
This class also provides method to calculate accuracy, precision,
recall, and the confusion matrix.
"""
__metaclass__ = ABCMeta
#abstractmethod
def fit(self, X_train, y_train):
pass
#abstractmethod
def evaluate(self, X_test, y_test, visualize=False):
pass
def _accuracy(self, y_test, Y_vote):
"""Calculates accuracy
This method calculates the accuracy based on a vector of
ground-truth labels (y_test) and a 2D voting matrix (Y_vote) of
size (len(y_test), num_classes).
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: accuracy e[0,1]
"""
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
# all cases where predicted class was correct
mask = y_hat == y_test
return np.float32(np.count_nonzero(mask)) / len(y_test)
def _precision(self, y_test, Y_vote):
"""Calculates precision
This method calculates precision extended to multi-class
classification by help of a confusion matrix.
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: precision e[0,1]
"""
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
if self.mode == "one-vs-one":
# need confusion matrix
conf = self._confusion(y_test, Y_vote)
# consider each class separately
prec = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = conf[c, c]
# false positives: label is c, classifier predicted not c
fp = np.sum(conf[:, c]) - conf[c, c]
if tp + fp != 0:
prec[c] = tp * 1. / (tp + fp)
elif self.mode == "one-vs-all":
# consider each class separately
prec = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = np.count_nonzero((y_test == c) * (y_hat == c))
# false positives: label is c, classifier predicted not c
fp = np.count_nonzero((y_test == c) * (y_hat != c))
if tp + fp != 0:
prec[c] = tp * 1. / (tp + fp)
return prec
def _recall(self, y_test, Y_vote):
"""Calculates recall
This method calculates recall extended to multi-class
classification by help of a confusion matrix.
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: recall e[0,1]
"""
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
if self.mode == "one-vs-one":
# need confusion matrix
conf = self._confusion(y_test, Y_vote)
# consider each class separately
recall = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = conf[c, c]
# false negatives: label is not c, classifier predicted c
fn = np.sum(conf[c, :]) - conf[c, c]
if tp + fn != 0:
recall[c] = tp * 1. / (tp + fn)
elif self.mode == "one-vs-all":
# consider each class separately
recall = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = np.count_nonzero((y_test == c) * (y_hat == c))
# false negatives: label is not c, classifier predicted c
fn = np.count_nonzero((y_test != c) * (y_hat == c))
if tp + fn != 0:
recall[c] = tp * 1. / (tp + fn)
return recall
def _confusion(self, y_test, Y_vote):
"""Calculates confusion matrix
This method calculates the confusion matrix based on a vector of
ground-truth labels (y-test) and a 2D voting matrix (Y_vote) of
size (len(y_test), num_classes).
Matrix element conf[r,c] will contain the number of samples that
were predicted to have label r but have ground-truth label c.
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: confusion matrix
"""
y_hat = np.argmax(Y_vote, axis=1)
conf = np.zeros((self.num_classes, self.num_classes)).astype(np.int32)
for c_true in xrange(self.num_classes):
# looking at all samples of a given class, c_true
# how many were classified as c_true? how many as others?
for c_pred in xrange(self.num_classes):
y_this = np.where((y_test == c_true) * (y_hat == c_pred))
conf[c_pred, c_true] = np.count_nonzero(y_this)
return conf
class MultiClassSVM(Classifier):
"""
Multi-class classification using Support Vector Machines (SVMs)
This class implements an SVM for multi-class classification. Whereas
some classifiers naturally permit the use of more than two classes
(such as neural networks), SVMs are binary in nature.
However, we can turn SVMs into multinomial classifiers using at least
two different strategies:
* one-vs-all: A single classifier is trained per class, with the
samples of that class as positives (label 1) and all
others as negatives (label 0).
* one-vs-one: For k classes, k*(k-1)/2 classifiers are trained for each
pair of classes, with the samples of the one class as
positives (label 1) and samples of the other class as
negatives (label 0).
Each classifier then votes for a particular class label, and the final
decision (classification) is based on a majority vote.
"""
def __init__(self, num_classes, mode="one-vs-all", params=None):
"""
The constructor makes sure the correct number of classifiers is
initialized, depending on the mode ("one-vs-all" or "one-vs-one").
:param num_classes: The number of classes in the data.
:param mode: Which classification mode to use.
"one-vs-all": single classifier per class
"one-vs-one": single classifier per class pair
Default: "one-vs-all"
:param params: SVM training parameters.
For now, default values are used for all SVMs.
Hyperparameter exploration can be achieved by
embedding the MultiClassSVM process flow in a
for-loop that classifies the data with
different parameter values, then pick the
values that yield the best accuracy.
Default: None
"""
self.num_classes = num_classes
self.mode = mode
self.params = params or dict()
# initialize correct number of classifiers
self.classifiers = []
if mode == "one-vs-one":
# k classes: need k*(k-1)/2 classifiers
for _ in xrange(num_classes*(num_classes - 1) / 2):
self.classifiers.append(cv2.ml.SVM_create())
elif mode == "one-vs-all":
# k classes: need k classifiers
for _ in xrange(num_classes):
self.classifiers.append(cv2.ml.SVM_create())
else:
print "Unknown mode ", mode
def fit(self, X_train, y_train, params=None):
"""Fits the model to training data
This method trains the classifier on data (X_train) using either
the "one-vs-one" or "one-vs-all" strategy.
:param X_train: input data (rows=samples, cols=features)
:param y_train: vector of class labels
:param params: dict to specify training options for cv2.SVM.train
leave blank to use the parameters passed to the
constructor
"""
if params is None:
params = self.params
if self.mode == "one-vs-one":
svm_id = 0
for c1 in xrange(self.num_classes):
for c2 in xrange(c1 + 1, self.num_classes):
# indices where class labels are either `c1` or `c2`
data_id = np.where((y_train == c1) + (y_train == c2))[0]
# set class label to 1 where class is `c1`, else 0
y_train_bin = np.where(y_train[data_id] == c1, 1,
0).flatten()
self.classifiers[svm_id].train(X_train[data_id, :],
y_train_bin,
params=self.params)
svm_id += 1
elif self.mode == "one-vs-all":
for c in xrange(self.num_classes):
# train c-th SVM on class c vs. all other classes
# set class label to 1 where class==c, else 0
y_train_bin = np.where(y_train == c, 1, 0).flatten()
# train SVM
self.classifiers[c].train(X_train, y_train_bin,
params=self.params)
def evaluate(self, X_test, y_test, visualize=False):
"""Evaluates the model on test data
This method evaluates the classifier's performance on test data
(X_test) using either the "one-vs-one" or "one-vs-all" strategy.
:param X_test: input data (rows=samples, cols=features)
:param y_test: vector of class labels
:param visualize: flag whether to plot the results (True) or not
(False)
:returns: accuracy, precision, recall
"""
# prepare Y_vote: for each sample, count how many times we voted
# for each class
Y_vote = np.zeros((len(y_test), self.num_classes))
if self.mode == "one-vs-one":
svm_id = 0
for c1 in xrange(self.num_classes):
for c2 in xrange(c1 + 1, self.num_classes):
data_id = np.where((y_test == c1) + (y_test == c2))[0]
X_test_id = X_test[data_id, :]
y_test_id = y_test[data_id]
# set class label to 1 where class==c1, else 0
# y_test_bin = np.where(y_test_id==c1,1,0).reshape(-1,1)
# predict labels
y_hat = self.classifiers[svm_id].predict_all(X_test_id)
for i in xrange(len(y_hat)):
if y_hat[i] == 1:
Y_vote[data_id[i], c1] += 1
elif y_hat[i] == 0:
Y_vote[data_id[i], c2] += 1
else:
print "y_hat[", i, "] = ", y_hat[i]
# we vote for c1 where y_hat is 1, and for c2 where y_hat
# is 0 np.where serves as the inner index into the data_id
# array, which in turn serves as index into the results
# array
# Y_vote[data_id[np.where(y_hat == 1)[0]], c1] += 1
# Y_vote[data_id[np.where(y_hat == 0)[0]], c2] += 1
svm_id += 1
elif self.mode == "one-vs-all":
for c in xrange(self.num_classes):
# set class label to 1 where class==c, else 0
# predict class labels
# y_test_bin = np.where(y_test==c,1,0).reshape(-1,1)
# predict labels
y_hat = self.classifiers[c].predict_all(X_test)
# we vote for c where y_hat is 1
if np.any(y_hat):
Y_vote[np.where(y_hat == 1)[0], c] += 1
# with this voting scheme it's possible to end up with samples
# that have no label at all...in this case, pick a class at
# random...
no_label = np.where(np.sum(Y_vote, axis=1) == 0)[0]
Y_vote[no_label, np.random.randint(self.num_classes,
size=len(no_label))] = 1
accuracy = self._accuracy(y_test, Y_vote)
precision = self._precision(y_test, Y_vote)
recall = self._recall(y_test, Y_vote)
return accuracy, precision, recall
when running chapter6.py
The output is:
feature None
- strategy one-vs-one
- train
Traceback (most recent call last):
File "/home/redhwan/Downloads/opencv-python-blueprints-master/chapter6/chapter6.py", line 77, in <module>
main()
File "/home/redhwan/Downloads/opencv-python-blueprints-master/chapter6/chapter6.py", line 44, in main
MCS.fit(X_train, y_train)
File "/home/redhwan/Downloads/opencv-python-blueprints-master/chapter6/classifiers.py", line 258, in fit
params=self.params)
TypeError: only length-1 arrays can be converted to Python scalars
please help me or your suggestion
Thank you in advance!
Hei all,
I am trying to set up an abstract model for a very simple QP of the form
min (x-x0)^2
s.t.
A x = b
C x <= d
I would like to use an abstract model, as I need to resolve with changing parameters (mainly x0, but potentially also A, b, C, d). I am right now struggeling with simply setting the parameters in the model instance. I do not want to use an external data file, but rather internal python variables. All examples I find online use AMPL formatted data files.
This is the code I have right now
import pyomo.environ as pe
model = pe.AbstractModel()
# the sets
model.n = pe.Param(within=pe.NonNegativeIntegers)
model.m = pe.Param(initialize = 1)
model.ss = pe.RangeSet(1, model.n)
model.os = pe.RangeSet(1, model.m)
# the starting point and the constraint parameters
model.x_hat = pe.Param(model.ss)
model.A = pe.Param(model.os, model.ss)
model.b = pe.Param(model.os)
model.C = pe.Param(model.os, model.os)
model.d = pe.Param(model.ss, model.os)
# the decision variables
model.x_projected = pe.Var(model.ss)
# the cosntraints
# A x = b
def sum_of_elements_rule(model):
value = model.A * model.x_projected
return value == model.d
model.sumelem = pe.Constraint(model.os, rule=sum_of_elements_rule)
# C x <= d
def positivity_constraint(model):
return model.C*model.x_projected <= model.d
model.bounds = pe.Constraint(model.ss, rule=positivity_constraint)
# the cost
def cost_rule(model):
return sum((model.x_projected[i] - model.x[i])**2 for i in model.ss)
model.cost = pe.Objective(rule=cost_rule)
instance = model.create_instance()
And somehow here I am stuck. How do I set the parameters now?
Thanks and best, Theo
I know this is an old post but a solution to this could have helped me so here is the solution to this problem:
## TEST
data_init= {None: dict(
n = {None : 3},
d = {0:0, 1:1, 2:2},
x_hat = {0:10, 1:-1, 2:-100},
b = {None: 10}
)}
# create instance
instance = model.create_instance(data_init)
This creates the instance in an equivalent way than what you did but in a more formal way.
Ok, I seemed to have figured out what the problem is. If I want to set a parameter after I create an instance, I need the
mutable=True
flag. Then, I can set the parameter with something like
for i in range(model_dimension):
getattr(instance, 'd')[i] = i
The model dimension I need to choose before i create an instance (which is ok for my case). The instance can be reused with different parameters for the constraints.
The code below should work for the problem
min (x-x_hat)' * (x-x_hat)
s.t.
sum(x) = b
x[i] >= d[i]
with x_hat, b, d as parameters.
import pyomo.environ as pe
model = pe.AbstractModel()
# model dimension
model.n = pe.Param(default=2)
# state space set
model.ss = pe.RangeSet(0, model.n-1)
# equality
model.b = pe.Param(default=5, mutable=True)
# inequality
model.d = pe.Param(model.ss, default=0.0, mutable=True)
# decision var
model.x = pe.Var(model.ss)
model.x_hat = pe.Param(model.ss, default=0.0, mutable=True)
# the cost
def cost_rule(model):
return sum((model.x[i] - model.x_hat[i])**2 for i in model.ss)
model.cost = pe.Objective(rule=cost_rule)
# CONSTRAINTS
# each x_i bigger than d_i
def lb_rule(model, i):
return (model.x[i] >= model.d[i])
model.state_bound = pe.Constraint(model.ss, rule=lb_rule)
# sum of x == P_tot
def sum_rule(model):
return (sum(model.x[i] for i in model.ss) == model.b)
model.state_sum = pe.Constraint(rule=sum_rule)
## TEST
# define model dimension
model_dimension = 3
model.n = model_dimension
# create instance
instance = model.create_instance()
# set d
for i in range(model_dimension):
getattr(instance, 'd')[i] = i
# set x_hat
xh = (10,1,-100)
for i in range(model_dimension):
getattr(instance, 'x_hat')[i] = xh[i]
# set b
instance.b = 10
# solve
solver = pe.SolverFactory('ipopt')
result = solver.solve(instance)
instance.display()
I am trying to perform feature selection for logistic regression classifier. Originally there are 4 variables: name, location, gender, and label = ethnicity. The three variables, namely the name, give rise to tens of thousands of more "features", for example, name "John Snow" will give rise to 2-letter substrings like 'jo', 'oh', 'hn'... etc. The feature set undergoes DictVectorization.
I am trying to follow this tutorial (http://scikit-learn.org/stable/auto_examples/feature_selection/plot_feature_selection.html) but I am not sure if I am doing it right since the tutorial is using a small number of features while mine has tens of thousands after vectorization. And also the plt.show() shows a blank figure.
# coding=utf-8
import pandas as pd
from pandas import DataFrame, Series
import numpy as np
import re
import random
import time
from random import randint
import csv
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from sklearn import svm
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve
# Assign X and y variables
X = df.raw_name.values
X2 = df.name.values
X3 = df.gender.values
X4 = df.location.values
y = df.ethnicity_scan.values
# Feature extraction functions
def feature_full_name(nameString):
try:
full_name = nameString
if len(full_name) > 1: # not accept name with only 1 character
return full_name
else: return '?'
except: return '?'
def feature_avg_wordLength(nameString):
try:
space = 0
for i in nameString:
if i == ' ':
space += 1
length = float(len(nameString) - space)
name_entity = float(space + 1)
avg = round(float(length/name_entity), 0)
return avg
except:
return 0
def feature_name_entity(nameString2):
space = 0
try:
for i in nameString2:
if i == ' ':
space += 1
return space+1
except: return 0
def feature_gender(genString):
try:
gender = genString
if len(gender) >= 1:
return gender
else: return '?'
except: return '?'
def feature_noNeighborLoc(locString):
try:
x = re.sub(r'^[^, ]*', '', locString) # remove everything before and include first ','
y = x[2:] # remove subsequent ',' and ' '
return y
except: return '?'
def list_to_dict(substring_list):
try:
substring_dict = {}
for i in substring_list:
substring_dict['substring='+str(i)] = True
return substring_dict
except: return '?'
# Transform format of X variables, and spit out a numpy array for all features
my_dict13 = [{'name-entity': feature_name_entity(feature_full_name(i))} for i in X2]
my_dict14 = [{'avg-length': feature_avg_wordLength(feature_full_name(i))} for i in X]
my_dict15 = [{'gender': feature_full_name(i)} for i in X3]
my_dict16 = [{'location': feature_noNeighborLoc(feature_full_name(i))} for i in X4]
my_dict17 = [{'dummy1': 1} for i in X]
my_dict18 = [{'dummy2': random.randint(0,2)} for i in X]
all_dict = []
for i in range(0, len(my_dict)):
temp_dict = dict(my_dict13[i].items() + my_dict14[i].items()
+ my_dict15[i].items() + my_dict16[i].items() + my_dict17[i].items() + my_dict18[i].items()
)
all_dict.append(temp_dict)
newX = dv.fit_transform(all_dict)
# Separate the training and testing data sets
half_cut = int(len(df)/2.0)*-1
X_train = newX[:half_cut]
X_test = newX[half_cut:]
y_train = y[:half_cut]
y_test = y[half_cut:]
# Fitting X and y into model, using training data
lr = LogisticRegression()
lr.fit(X_train, y_train)
dv = DictVectorizer()
# Feature selection
plt.figure(1)
plt.clf()
X_indices = np.arange(X_train.shape[-1])
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X_train, y_train)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
plt.show()
Warning:
E:\Program Files Extra\Python27\lib\site-packages\sklearn\feature_selection\univariate_selection.py:111: UserWarning: Features [[0 0 0 ..., 0 0 0]] are constant.
It looks like the way you split your data into training and testing sets is not working:
# Separate the training and testing data sets
X_train = newX[:half_cut]
X_test = newX[half_cut:]
If you already use sklearn, it is much more convenient to use the builtin splitting routine for this:
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.5, random_state=0)