Practice assignment AWS Computer Vision : get_Cifar10_dataset - amazon-web-services

I have problem with this methode which should return both the training and the validation dataset and examine it to return the index that corresponds to the first occurrence of each class in CIFAR10.
this is code:
def get_cifar10_dataset(): """ Should create the cifar 10 network and identify the dataset index of the first time each new class
appears
:return: tuple of training and validation dataset as well as label indices
:rtype: (gluon.data.Dataset, 'dict_values' object is not subscriptable, gluon.data.Dataset,
dict[int:int])
"""
train_data = None
val_data = None
# YOUR CODE HERE
train_data = datasets.CIFAR10(train=True, root=M5_IMAGES)
val_data = datasets.CIFAR10(train=False, root=M5_IMAGES)

You are asked to return a dictionary with labels and the corresponding indexes. Using the following function can solve your problem.
def get_idx_dict(data):
lis = []
idx = []
indices = {}
for i in range(len(data)):
if data[i][1] not in lis:
lis.append(data[i][1])
idx.append(i)
indices = {lis[i]: idx[i] for i in range(len(lis))}
return indices
The function returns a dictionary with desired output. Use this function on data from train and validation set.
train_indices = get_idx_dict(train_data)
val_indices = get_idx_dict(val_data)

You can do it this
def get_cifar10_dataset():
"""
Should create the cifar 10 network and identify the dataset index of the first time each new class appears
:return: tuple of training and validation dataset as well as label indices
:rtype: (gluon.data.Dataset, dict[int:int], gluon.data.Dataset, dict[int:int])
"""
train_data = None
val_data = None
train_indices = {}
val_indices = {}
# Use `root=M5_IMAGES` for your dataset
train_data = gluon.data.vision.datasets.CIFAR10(train=True, root=M5_IMAGES)
val_data = gluon.data.vision.datasets.CIFAR10(train=False, root=M5_IMAGES)
#for train
for i in range(len(train_data)):
if train_data[i][1] not in train_indices:
train_indices[train_data[i][1]] = i
#for valid
for i in range(len(val_data)):
if val_data[i][1] not in val_indices:
val_indices[val_data[i][1]] = i
#raise NotImplementedError()
return train_data, train_indices, val_data, val_indices

Related

What does does if self.transforms mean?

What does
if self.transforms:
data = self.transforms(data)
do? I don't understand the logic behind this line - what is the condition the line is using?
I'm reading an article on creating a custom dataset with pytorch based on the below implementation:
#custom dataset
class MNISTDataset(Dataset):
def __init__(self, images, labels=None, transforms=None):
self.X = images
self.y = labels
self.transforms = transforms
def __len__(self):
return (len(self.X))
def __getitem__(self, i):
data = self.X.iloc[i, :]
data = np.asarray(data).astype(np.uint8).reshape(28, 28, 1)
if self.transforms:
data = self.transforms(data)
if self.y is not None:
return (data, self.y[i])
else:
return data
train_data = MNISTDataset(train_images, train_labels, transform)
test_data = MNISTDataset(test_images, test_labels, transform)
# dataloaders
trainloader = DataLoader(train_data, batch_size=128, shuffle=True)
testloader = DataLoader(test_data, batch_size=128, shuffle=True)
thank you! i'm basically trying to understand why it works & how it applies transforms to the data.
The dataset MNISTDataset can optionnaly be initialized with a transform function. If such transform function is given it be saved in self.transforms else it will keep its default values None. When calling a new item with __getitem__, it first checks if the transform is a truthy value, in this case it checks if self.transforms can be coerced to True which is the case for a callable object. Otherwise it means self.transforms hasn't been provided in the first place and no transform function is applied on data.
Here's a general example, out of a torch/torchvision context:
def do(x, callback=None):
if callback: # will be True if callback is a function/lambda
return callback(x)
return x
do(2) # returns 2
do(2, callback=lambda x: 2*x) # returns 4

Looking for a best way to insert a records from one model to another based on selection in odoo

I did the code for insert records from so_parts table to so_bo table using Query...How can I use ORM method to do this kind of job. Is there any other way(best)to do that? Here is my code`
`
#api.multi
def save_rapair_parts(self, vals):
#get todays date and convert it to string
created_date = datetime.datetime.today().strftime("%m/%d/%Y")
str_date = str(created_date)
so_p_id = self.so_p_id.id
bo_status = self.bo_status
so_part_t = self.so_part_t
so_part_sno = self.so_part_sno
product = self.so_part_product
product_str = 'Repair '+str(product)
part_id = self.id
bench_order_table.search(['id','bo_sno','created_date','bo_number','rep_description','bo_status'])
#insert details intoso bench orders
`
if so_part_t=='r_b':
try:
sequence = self.env['ir.sequence'].next_by_code('so.benchorder') or '/'
str_sequence = str(sequence)
query = """SELECT so_work_authorization FROM my_depots_so WHERE id=%d """ % (so_p_id)
self.env.cr.execute(query)
result = self.env.cr.fetchall()
result_number = json.dumps(result, ensure_ascii=False)
strip_number = result_number.strip('\' \" [] ')
work_auth_no = str(strip_number)
work_auth_no += "-"
work_auth_no += str_sequence
insert ="""INSERT INTO my_depots_so_bo(id,so_bo_id,bo_sno,created_date,bo_number,rep_description,bo_status) values %s """
parameters = (part_id,so_p_id,so_part_sno,str_date,work_auth_no,product_str,bo_status)
self.env.cr.execute(insert,(parameters,))
my_depots_bo(id,bo_sno,created_date,bo_number,rep_description,bo_status) values %s """
# self.env.cr.execute(insert_query, (parameters,))
except Exception:
print "Error in inserting values"`
yes there is a better way because when you use ORM
method you also checks access right for user to:
for your select query:
rec = self.env['my.depots.so'].search_read(['id', '=', so_p_id], ['so_work_authorization'])
if rec:
rec = rec[0] # search_read return a list of dictionary
so_work_authorization = rec['so_work_authorization']
# and do what ever you want with the result
# to create
# call create method witch accept a dictionary
# field_name : value
new_rec = self.env['my.depots.so.bo'].create({
'so_bo_id': so_p_id, # many2one must be an integer value
'bo_sno': bo_nso_value,
'bo_number': value_of_number,
# ....
# ....
# add al field
}) # create return the new created record as model object
for inserting use: self.env['model.name'].create(vals)
for updating use : self.env['model.name'].write(vals)
using ORM method makes sure that user don't pass the security access rigths
Hope you get the idea

Python - Convert dictionary (having "list" as values) into csv file

Trying to write below dictionary into csv file with desired output as mentioned below.
dict_data = {"1":["xyz"],
"2":["abc","def"],
"3":["zzz"]
}
desired output:
1,3,2
xyz,zzz,abc
def
Below code doesn't work as expected as it keeps both "abc" & "def" in same cell as shown below.
with open('k.csv','wb') as out_file:
writer = csv.writer(out_file,dialect = 'excel')
headers = [k for k in dict_data]
items = [dict_data[k] for k in dict_data]
writer.writerow(headers)
writer.writerow(items)
output:
1,3,2
xyz,zzz,abc,def
Here is the complete solution:
import csv
import os
class CsvfileWriter:
'''
Takes dictionary as input and writes items into a CSV file.
For ex:-
Input dictionary:
dict_data = {"1":["xyz"],"2":["abc","def"],"3":["zzz"]}
Output: (CSV file)
1,3,2
xyz,zzz,abc
,,def
'''
def __init__(self,dictInput,maxLength=0):
'''
Creates a instance with following variables.
dictInput & maxLength
dictInput -> dictionary having values(list) of same length
ex:-
dict_data = {"1":["xyz",""],"2":["abc","def"],"3":["zzz",""]}
maxLength -> length of the list
'''
self.dictInput = dictInput
self.maxLength = maxLength
#classmethod
def list_padding(cls,dictInput):
'''
converts input dictionary having list (as values) of varying lenghts into constant length.
Also returns class variables dictInput & maxLength
Note:
dictInput represents the dictionary after padding is applied.
maxLength represents the length of the list(values in dictionary) having maximum number of items.
Ex:-
input dictionary:
dict_data = {"1":["xyz"],"2":["abc","def"],"3":["zzz"]}
output dictionary:
dict_data = {"1":["xyz",""],"2":["abc","def"],"3":["zzz",""]}
'''
cls.dictInput = dictInput
listValues = dictInput.values()
listValues.sort(key = lambda i: len(i))
maxLength = len(listValues[-1])
for i in listValues:
while(len(i) < maxLength):
i.append('')
return cls(dictInput,maxLength)
def write_to_csv(self):
with open('sample_file.csv','wb') as out_file:
writer = csv.writer(out_file,dialect = 'excel')
headers = [k for k in self.dictInput]
items = [self.dictInput[k] for k in self.dictInput]
writer.writerow(headers)
c = 0
while (c < self.maxLength):
writer.writerow([i[c] for i in items])
c += 1
dict_data = {"1":["xyz"],"2":["abc","def"],"3":["zzz"]}
cf = CsvfileWriter.list_padding(dict_data)
cf.write_to_csv()
The following works in Python 2:
import csv
dict_data = {
"1":["xyz"],
"2":["abc","def"],
"3":["zzz"]
}
def transpose(cols):
return map(lambda *row: list(row), *cols)
with open('k.csv','w') as out_file:
writer = csv.writer(out_file,dialect = 'excel')
headers = dict_data.keys()
items = transpose(dict_data.values())
writer.writerow(headers)
writer.writerows(items)
I can't take credit for the transpose function, which I picked up from here. It turns a list of columns into a list of rows, automatically padding columns that are too short with None. Fortunately, the csv writer outputs blanks for None values, which is exactly what's needed.
(In Python 3, map behaves differently (no padding), so it would require some changes.)
Edit: A replacement transpose function that works for both Python 2 and 3 is:
def transpose(cols):
def mypop(l):
try:
return l.pop(0)
except IndexError:
return ''
while any(cols):
yield [mypop(l) for l in cols]

Python: How to properly call a method?

I have this class:
class Tumor(object):
"""
Wrapper for the tumor data points.
Attributes:
idNum = ID number for the tumor (is unique) (int)
malignant = label for this tumor (either 'M' for malignant
or 'B' for benign) (string)
featureNames = names of all features used in this Tumor
instance (list of strings)
featureVals = values of all features used in this Tumor
instance, same order as featureNames (list of floats)
"""
def __init__(self, idNum, malignant, featureNames, featureVals):
self.idNum = idNum
self.label = malignant
self.featureNames = featureNames
self.featureVals = featureVals
def distance(self, other):
dist = 0.0
for i in range(len(self.featureVals)):
dist += abs(self.featureVals[i] - other.featureVals[i])**2
return dist**0.5
def getLabel(self):
return self.label
def getFeatures(self):
return self.featureVals
def getFeatureNames(self):
return self.featureNames
def __str__(self):
return str(self.idNum) + ', ' + str(self.label) + ', ' \
+ str(self.featureVals)
and I am trying to use an instance of it in another function later in my code:
def train_model(train_set):
"""
Trains a logistic regression model with the given dataset
train_set (list): list of data points of type Tumor
Returns a model of type sklearn.linear_model.LogisticRegression
fit to the training data
"""
tumor = Tumor()
features = tumor.getFeatures()
labels = tumor.getLabel()
log_reg = sklearn.linear_model.LogisticRegression(train_set)
model = log_reg.fit(features, labels)
return model
However, I keep getting this error when I test my code:
TypeError: __init__() takes exactly 5 arguments (1 given)
I understand that I am not using the five arguments when I create the instance of Tumor in train_model , but how can I do so?
Arguments to the __init__ (or __new__, if you're using that) just go, predictably, where you create the instance in train_model:
tumor = Tumor(idNum, malignant, featureNames, featureVals)
Of course, you actually need values for all of these, as they are all required arguments.
You don't need to include self, however, as that first argument is taken care of automatically.

Getting next and previous objects in Django

I'm trying to get the next and previous objects of a comic book issue. Simply changing the id number or filtering through date added is not going to work because I don't add the issues sequentially.
This is how my views are setup and it WORKS for prev_issue and does return the previous object, but it returns the last object for next_issue and I do not know why.
def issue(request, issue_id):
issue = get_object_or_404(Issue, pk=issue_id)
title = Title.objects.filter(issue=issue)
prev_issue = Issue.objects.filter(title=title).filter(number__lt=issue.number)[0:1]
next_issue = Issue.objects.filter(title=title).filter(number__gt=issue.number)[0:1]
Add an order_by clause to ensure it orders by number.
next_issue = Issue.objects.filter(title=title, number__gt=issue.number).order_by('number').first()
I know this is a bit late, but for anyone else, django does have a nicer way to do this, see https://docs.djangoproject.com/en/1.7/ref/models/instances/#django.db.models.Model.get_previous_by_FOO
So the answer here would be something something like
next_issue = Issue.get_next_by_number(issue, title=title)
Django managers to do that with a bit of meta class cleaverness.
If it's required to find next and previous objects ordered by field values that can be equal and those fields are not of Date* type, the query gets slightly complex, because:
ordering on objects with same values limiting by [:1] will always produce same result for several objects;
object can itself be included in resulting set.
Here's are querysets that also take into account the primary keys to produce a correct result (assuming that number parameter from OP is not unique and omitting the title parameter as it's irrelevant for the example):
Previous:
prev_issue = (Issue.objects
.filter(number__lte=issue.number, id__lt=instance.id)
.exclude(id=issue.id)
.order_by('-number', '-id')
.first())
Next:
next_issue = (Issue.objects
.filter(number__gte=issue.number, id__gt=instance.id)
.exclude(id=issue.id)
.order_by('number', 'id')
.first())
from functools import partial, reduce
from django.db import models
def next_or_prev_instance(instance, qs=None, prev=False, loop=False):
if not qs:
qs = instance.__class__.objects.all()
if prev:
qs = qs.reverse()
lookup = 'lt'
else:
lookup = 'gt'
q_list = []
prev_fields = []
if qs.query.extra_order_by:
ordering = qs.query.extra_order_by
elif qs.query.order_by:
ordering = qs.query.order_by
elif qs.query.get_meta().ordering:
ordering = qs.query.get_meta().ordering
else:
ordering = []
ordering = list(ordering)
if 'pk' not in ordering and '-pk' not in ordering:
ordering.append('pk')
qs = qs.order_by(*ordering)
for field in ordering:
if field[0] == '-':
this_lookup = (lookup == 'gt' and 'lt' or 'gt')
field = field[1:]
else:
this_lookup = lookup
q_kwargs = dict([(f, get_model_attr(instance, f))
for f in prev_fields])
key = "%s__%s" % (field, this_lookup)
q_kwargs[key] = get_model_attr(instance, field)
q_list.append(models.Q(**q_kwargs))
prev_fields.append(field)
try:
return qs.filter(reduce(models.Q.__or__, q_list))[0]
except IndexError:
length = qs.count()
if loop and length > 1:
return qs[0]
return None
next_instance = partial(next_or_prev_instance, prev=False)
prev_instance = partial(next_or_prev_instance, prev=True)
note that do not use object.get(pk=object.pk + 1) these sorts of things, IntegrityError occurs if object at that pk is deleted, hence always use a query set
for visitors:
''' Useage '''
"""
# Declare our item
store = Store.objects.get(pk=pk)
# Define our models
stores = Store.objects.all()
# Ask for the next item
new_store = get_next_or_prev(stores, store, 'next')
# If there is a next item
if new_store:
# Replace our item with the next one
store = new_store
"""
''' Function '''
def get_next_or_prev(models, item, direction):
'''
Returns the next or previous item of
a query-set for 'item'.
'models' is a query-set containing all
items of which 'item' is a part of.
direction is 'next' or 'prev'
'''
getit = False
if direction == 'prev':
models = models.reverse()
for m in models:
if getit:
return m
if item == m:
getit = True
if getit:
# This would happen when the last
# item made getit True
return models[0]
return False
original author
Usage
# you MUST call order by to pass in an order, otherwise QuerySet.reverse will not work
qs = Model.objects.all().order_by('pk')
q = qs[0]
prev = get_next_or_prev(qs, q, 'prev')
next = get_next_or_prev(qs, q, 'next')
next_obj_id = int(current_obj_id) + 1
next_obj = Model.objects.filter(id=next_obj_id).first()
prev_obj_id= int(current_obj_id) - 1
prev_obj = Model.objects.filter(id=prev_obj_id).first()
#You have nothing to loose here... This works for me