NoneType' object has no attribute 'name' - django

I was building wine recommendation system using k means approach in django. I made cluster module in admin and added 3 clusters manually. However, when I am trying to recommend wine to logged in user I get this error.Can you please help:
AttributeError at /reviews/recommendation/
'NoneType' object has no attribute 'name'
I am getting error in line:
User.objects.get(username=request.user.username).cluster_set.first().name
here is the code for view.py
#login_required
def user_recommendation_list(request):
# get request user reviewed wines
user_reviews = Review.objects.filter(user_name=request.user.username).prefetch_related('wine')
user_reviews_wine_ids = set(map(lambda x: x.wine.id, user_reviews))
# get request user cluster name (just the first one righ now)
try:
user_cluster_name = \
User.objects.get(username=request.user.username).cluster_set.first().name
except: # if no cluster assigned for a user, update clusters
update_clusters()
user_cluster_name = \
User.objects.get(username=request.user.username).cluster_set.first().name
# get usernames for other memebers of the cluster
user_cluster_other_members = \
Cluster.objects.get(name=user_cluster_name).users \
.exclude(username=request.user.username).all()
other_members_usernames = set(map(lambda x: x.username, user_cluster_other_members))
# get reviews by those users, excluding wines reviewed by the request user
other_users_reviews = \
Review.objects.filter(user_name__in=other_members_usernames) \
.exclude(wine__id__in=user_reviews_wine_ids)
other_users_reviews_wine_ids = set(map(lambda x: x.wine.id, other_users_reviews))
# then get a wine list including the previous IDs, order by rating
wine_list = sorted(
list(Wine.objects.filter(id__in=other_users_reviews_wine_ids)),
key=lambda x: x.average_rating,
reverse=True
)
return render(
request,
'reviews/user_recommendation_list.html',
{'username': request.user.username,'wine_list': wine_list}
)
and here is the code for suggestions.py
from .models import Review, Wine, Cluster
from django.contrib.auth.models import User
from sklearn.cluster import KMeans
from scipy.sparse import dok_matrix, csr_matrix
import numpy as np
def update_clusters():
num_reviews = Review.objects.count()
update_step = ((num_reviews/100)+1) * 5
if num_reviews % update_step == 0: # using some magic numbers here, sorry...
# Create a sparse matrix from user reviews
all_user_names = map(lambda x: x.username, User.objects.only("username"))
all_wine_ids = set(map(lambda x: x.wine.id, Review.objects.only("wine")))
num_users = len(all_user_names)
ratings_m = dok_matrix((num_users, max(all_wine_ids)+1), dtype=np.float32)
for i in range(num_users): # each user corresponds to a row, in the order of all_user_names
user_reviews = Review.objects.filter(user_name=all_user_names[i])
for user_review in user_reviews:
ratings_m[i,user_review.wine.id] = user_review.rating
# Perform kmeans clustering
k = int(num_users / 10) + 2
kmeans = KMeans(n_clusters=k)
clustering = kmeans.fit(ratings_m.tocsr())
# Update clusters
Cluster.objects.all().delete()
new_clusters = {i: Cluster(name=i) for i in range(k)}
for cluster in new_clusters.values(): # clusters need to be saved before refering to users
cluster.save()
for i,cluster_label in enumerate(clustering.labels_):
new_clusters[cluster_label].users.add(User.objects.get(username=all_user_names[i]))

When you are going to add data in the cluster table at that time you need to insert your current logged in username. For instance,
jadianes, carlos, and lluis
<username>, teus, yasset
...

Related

Getting a 'ValueError: 2 many values to unpack' from a method that lists only one required arg. I'm not sure of the best way to unpack it

I am calling a method from a predefined Class (L2Interface) from the acitoolkit module that lists only one required argument. The method returns two strings 'encap-type' and 'encap-id'. I am floundering with the best way to unpack these values. Here is my script. The method in question is: 'vlans = aci.L2Interface.parse_encap(encap)'
import sys
import acitoolkit.acitoolkit as aci
import requests
import re
def init(self, name, encap_type, encap_id, encap_mode=None):
self.name = None
self.encap_type = VLAN
self.encap_id = None
def main():
"""
Main Show Endpoints Routine
:return: None
"""
# Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
description = ('Simple application that logs on to the APIC'
' and displays all of the Endpoints.')
creds = aci.Credentials('apic', description)
args = creds.get()
# Login to APIC
session = aci.Session(args.url, args.login, args.password, verify_ssl=False)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
sys.exit(0)
# Get encap per interface
# and store the data as tuples in a List
data = []
encap = 'vlan-[0-9].*'
#vxtype = 'vxlan\-[0-9|a-z].*'
vlans = aci.L2Interface.parse_encap(encap)
for vlan in vlans:
data.append((vlan.attributes['encap_type'],
vlan.attributes['encap_id']))
# Display the data downloaded
col_widths = [19, 17, 15, 15, 15]
template = ''
for idx, width in enumerate(col_widths):
template += '{%s:%s} ' % (idx, width)
print(template.format("ENDCAP_TYPE", "ENCAP_ID"))
fmt_string = []
for i in range(0, len(col_widths)):
fmt_string.append('-' * (col_widths[i] - 2))
print(template.format(*fmt_string))
for rec in data:
print(template.format(*rec))
if name == 'main':
try:
main()
except KeyboardInterrupt:
pass
I am trying to connect to an APIC, grab L2 interfaces with encapsulation (encap) assigned and return them in a list.

'no SavedModel bundles found!' on tensorflow_hub model deployment to AWS SageMaker

I attempting to deploy the universal-sentence-encoder model to a aws Sagemaker endpoint and am getting the error raise ValueError('no SavedModel bundles found!')
I have shown my code below, I have a feeling that one of my paths is incorrect
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
from sagemaker import get_execution_role
from sagemaker.tensorflow.serving import Model
def tfhub_to_savedmodel(model_name,uri):
tfhub_uri = uri
model_path = 'encoder_model/' + model_name
with tf.Session(graph=tf.Graph()) as sess:
module = hub.Module(tfhub_uri)
input_params = module.get_input_info_dict()
dtype = input_params['text'].dtype
shape = input_params['text'].get_shape()
# define the model inputs
inputs = {'text': tf.placeholder(dtype, shape, 'text')}
# define the model outputs
# we want the class ids and probabilities for the top 3 classes
logits = module(inputs['text'])
outputs = {
'vector': logits,
}
# export the model
sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
tf.saved_model.simple_save(
sess,
model_path,
inputs=inputs,
outputs=outputs)
return model_path
sagemaker_role = get_execution_role()
!tar -C "$PWD" -czf encoder.tar.gz encoder_model/
model_data = Session().upload_data(path='encoder.tar.gz',key_prefix='model')
env = {'SAGEMAKER_TFS_DEFAULT_MODEL_NAME': 'universal-sentence-encoder-large'}
model = Model(model_data=model_data, role=sagemaker_role, framework_version=1.12, env=env)
predictor = model.deploy(initial_instance_count=1, instance_type='ml.t2.medium')
I suppose you started from this example? https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-python-sdk/tensorflow_serving_container
It looks like you're not saving the TF Serving bundle properly: the model version number is missing, because of this line:
model_path = 'encoder_model/' + model_name
Replacing it with this should fix your problem:
model_path = '{}/{}/00000001'.format('encoder_model/', model_name)
Your model artefact should look like this (I used the model in the notebook above):
mobilenet/
mobilenet/mobilenet_v2_140_224/
mobilenet/mobilenet_v2_140_224/00000001/
mobilenet/mobilenet_v2_140_224/00000001/saved_model.pb
mobilenet/mobilenet_v2_140_224/00000001/variables/
mobilenet/mobilenet_v2_140_224/00000001/variables/variables.data-00000-of-00001
mobilenet/mobilenet_v2_140_224/00000001/variables/variables.index
Then, upload to S3 and deploy.

Unable to lunch Multiple Streaming Pipeline ( N to N Pipeline) Dynamically (Using Runtime Value Provider) in Single Dataflow Job in Python

I am trying to launch a Streaming Dataflow Job which contains n number of pipelines.
Based on configured topic and corresponding BQ table for each Topic i want to launch a Pipeline inside a one Streaming Job.
My actual problem is i have to create and upload a template for each and every project. What i want is, i can reuse the uploaded template and only configuration files ihave to pass for launching new dataflow job by changing topic,subscription, dataset and bq table.
Which is i am unable to reuse the template.
Please help me on this and let me know if this is possible or not. Because Google has also provided one to one template. Not many to many Template (e.g Three topic - Three BQ Table (three data pipeleine) , n-n).
import logging
import os
import json
from google.cloud import storage
from apache_beam import Pipeline, ParDo, DoFn
from apache_beam.io import ReadFromPubSub, WriteToBigQuery, BigQueryDisposition
from apache_beam.options.pipeline_options import PipelineOptions, StandardOptions, WorkerOptions, GoogleCloudOptions, \
SetupOptions
def _get_storage_service():
storage_client = storage.Client \
.from_service_account_json(
json_credentials_path='C:\Users\dneema\PycharmProjects\iot_dataflow\df_stm_iot_pubsub_bq\service_account_credentials.json')
print('storage service fetched')
return storage_client
class RuntimeOptions(PipelineOptions):
def __init__(self, flags=None, **kwargs):
super(RuntimeOptions, self).__init__(flags, **kwargs)
#classmethod
def _add_argparse_args(cls, parser):
parser.add_value_provider_argument('--bucket_name', type=str)
parser.add_value_provider_argument('--config_json_path', type=str,)
class PipelineCreator:
def __init__(self):
self.options = PipelineOptions()
storage_client = storage.Client.from_service_account_json(
'service_account_credentials_updated.json')
runtime_options = self.options.view_as(RuntimeOptions)
bucket_name = str(runtime_options.bucket_name)
config_json_path = str(runtime_options.config_json_path)
# get the bucket with name
bucket = storage_client.get_bucket(bucket_name)
# get bucket file as blob
blob = bucket.get_blob(config_json_path)
# convert to string and load config
json_data = blob.download_as_string()
self.configData = json.loads(json_data)
dataflow_config = self.configData['dataflow_config']
self.options.view_as(StandardOptions).streaming = bool(dataflow_config['streaming'])
self.options.view_as(SetupOptions).save_main_session = True
worker_options = self.options.view_as(WorkerOptions)
worker_options.max_num_workers = int(dataflow_config['max_num_worker'])
worker_options.autoscaling_algorithm = str(dataflow_config['autoscaling_algorithm'])
#worker_options.machine_type = str(dataflow_config['machine_type'])
#worker_options.zone = str(dataflow_config['zone'])
#worker_options.network = str(dataflow_config['network'])
#worker_options.subnetwork = str(dataflow_config['subnetwork'])
def run(self):
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'dataflow-service-account.json'
project_id = self.configData['project_id']
dataset_id = self.configData['dataset_id']
topics = self.configData['topics']
table_ids = self.configData['bq_table_ids']
error_table_id = self.configData['error_table_id']
logger = logging.getLogger(project_id)
logger.info(self.options.display_data())
pipeline = Pipeline(options=self.options)
size = len(topics)
for index in range(size):
print(topics[index])
pipeline_name = "pipeline_"+str(index)
logger.info("Launch pipeline :: "+pipeline_name)
messages = pipeline | 'Read PubSub Message in ' + pipeline_name >> ReadFromPubSub(topic=topics[index])
logger.info("Read PubSub Message")
valid_messages, invalid_messages = messages | 'Convert Messages to TableRows in ' + pipeline_name >> ParDo(TransformMessageToTableRow()).with_outputs('invalid', main='valid')
valid_messages | 'Write Messages to BigQuery in ' + pipeline_name >> WriteToBigQuery(table=table_ids[index],
dataset=dataset_id,
project=project_id,
write_disposition=BigQueryDisposition.WRITE_APPEND)
pipeline.run().wait_until_finish()
class TransformMessageToTableRow(DoFn):
def process(self, element, *args, **kwargs):
logging.getLogger('dataflow').log(logging.INFO, element)
print element
print("element type ", type(element))
print("inside bq pardo")
import json
try:
message_rows = json.loads(element)
# if using emulator, uncomment below line
message_rows = json.loads(message_rows)
print 'loaded element'
except:
try:
element = "[" + element + "]"
message_rows = json.loads(element)
except Exception as e:
print(e)
from apache_beam import pvalue
yield [pvalue.TaggedOutput('invalid', [element, str(e)])]
print(message_rows)
print("message rows", type(message_rows))
if not isinstance(message_rows, list):
message_rows = [message_rows]
#rows = list()
if isinstance(message_rows, list):
for row in message_rows:
try:
new_row = dict()
for k, v in row.items():
new_row[str(k)] = v
#rows.append(new_row)
print(new_row)
yield new_row
except Exception as e:
print(e)
from apache_beam import pvalue
yield pvalue.TaggedOutput('invalid', [row, str(e)])
if __name__ == '__main__':
PipelineCreator().run()
Here Runtime argument as bucket_name and config_json_path for all the configuration related stuffs like Dataset, BQ table, Topics/ Subscription and all Workflow options.
This is possible or not ? Because Google has also provided one to one template. Not many to many Template (e.g Three topic - Three BQ Table (three data pipeleine) , n-n).
Regarding this previously answered thread Unable to run multiple Pipelines in desired order by creating template in Apache Beam, you can run only one pipeline inside a template at any time.
You'll have to delegate the template creation to another service and pass the configuration with it, just follow the link inside the thread and you'll have How To examples.

How to order NDB query by the key?

I try to use task queues on Google App Engine. I want to utilize the Mapper class shown in the App Engine documentation "Background work with the deferred library".
I get an exception on the ordering of the query result by the key
def get_query(self):
...
q = q.order("__key__")
...
Exception:
File "C:... mapper.py", line 41, in get_query
q = q.order("__key__")
File "C:\Program Files (x86)\Google\google_appengine\google\appengine\ext\ndb\query.py", line 1124, in order
'received %r' % arg)
TypeError: order() expects a Property or query Order; received '__key__'
INFO 2017-03-09 11:56:32,448 module.py:806] default: "POST /_ah/queue/deferred HTTP/1.1" 500 114
The article is from 2009, so I guess something might have changed.
My environment: Windows 7, Python 2.7.9, Google App Engine SDK 1.9.50
There are somewhat similar questions about ordering in NDB on SO.
What bugs me this code is from the official doc, presumably updated in Feb 2017 (recently) and posted by someone within top 0.1 % of SO users by reputation.
So I must be doing something wrong. What is the solution?
Bingo.
Avinash Raj is correct. If it were an answer I'd accept it.
Here is the full class code
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
from google.appengine.ext import deferred
from google.appengine.ext import ndb
from google.appengine.runtime import DeadlineExceededError
import logging
class Mapper(object):
"""
from https://cloud.google.com/appengine/docs/standard/python/ndb/queries
corrected with suggestions from Stack Overflow
http://stackoverflow.com/questions/42692319/how-to-order-ndb-query-by-the-key
"""
# Subclasses should replace this with a model class (eg, model.Person).
KIND = None
# Subclasses can replace this with a list of (property, value) tuples to filter by.
FILTERS = []
def __init__(self):
logging.info("Mapper.__init__: {}")
self.to_put = []
self.to_delete = []
def map(self, entity):
"""Updates a single entity.
Implementers should return a tuple containing two iterables (to_update, to_delete).
"""
return ([], [])
def finish(self):
"""Called when the mapper has finished, to allow for any final work to be done."""
pass
def get_query(self):
"""Returns a query over the specified kind, with any appropriate filters applied."""
q = self.KIND.query()
for prop, value in self.FILTERS:
q = q.filter(prop == value)
if __name__ == '__main__':
q = q.order(self.KIND.key) # the fixed version. The original q.order('__key__') failed
# see http://stackoverflow.com/questions/42692319/how-to-order-ndb-query-by-the-key
return q
def run(self, batch_size=100):
"""Starts the mapper running."""
logging.info("Mapper.run: batch_size: {}".format(batch_size))
self._continue(None, batch_size)
def _batch_write(self):
"""Writes updates and deletes entities in a batch."""
if self.to_put:
ndb.put_multi(self.to_put)
self.to_put = []
if self.to_delete:
ndb.delete_multi(self.to_delete)
self.to_delete = []
def _continue(self, start_key, batch_size):
q = self.get_query()
# If we're resuming, pick up where we left off last time.
if start_key:
key_prop = getattr(self.KIND, '_key')
q = q.filter(key_prop > start_key)
# Keep updating records until we run out of time.
try:
# Steps over the results, returning each entity and its index.
for i, entity in enumerate(q):
map_updates, map_deletes = self.map(entity)
self.to_put.extend(map_updates)
self.to_delete.extend(map_deletes)
# Do updates and deletes in batches.
if (i + 1) % batch_size == 0:
self._batch_write()
# Record the last entity we processed.
start_key = entity.key
self._batch_write()
except DeadlineExceededError:
# Write any unfinished updates to the datastore.
self._batch_write()
# Queue a new task to pick up where we left off.
deferred.defer(self._continue, start_key, batch_size)
return
self.finish()

Method works fine in iPython but runs endlessly on Gunicorn

I wrote an app in Falcon framework that I am running using the Gunicorn server. When the server starts, the app first learns random forest model:
forest = sklearn.ensemble.ExtraTreesClassifier(n_estimators=150, n_jobs=-1)
forest.fit(x, t)
and then returns probabilities for requests posted to it. This works fine on my server when I run the code in iPython (training this model takes 15s, running on 12 cores).
When I was writing the app, i set n_estimators=10 and everything was working. When I finished tweaking the app, I set n_estimators back to 150. However, when I ran Gunicorn then with gunicorn -c ./app.conf app:app, from htop I could see the the forest.fit(x, t) runs for few seconds on all cores, after which the usage of all cores drops to 0. After that, the method keeps running indefinitely until the Gunicorn worker timeouts after 10 minutes.
This is my first time using Gunicorn and Falcon, or any WSGI technologies for that matter, and I am clueless as to what might be causing the problem or how to troubleshoot it.
Edit:
The settings file for gunicorn:
# app.conf
# run with gunicorn -c ./app.conf app:app
import sys
sys.path.append('/home/user/project/Module')
bind = "127.0.0.1:8123"
timeout = 60*20 # Timeout worker after more than 20 minutes`
The falcon code:
class Processor(object):
""" Processor object handles the training of the models,
feature generation of requests and probability predictions.
"""
# data statistics used in feature calculations
data_statistics = {}
# Classification targets
targets = ()
# Select features for the models.
cols1 = [ #...
]
cols2 = [ #...
]
model_1 = ExtraTreesClassifier(n_estimators=150, n_jobs=-1)
model_2 = ExtraTreesClassifier(n_estimators=150, n_jobs=-1)
def __init__(self, features_dataset, tr_prepro):
# Get the datasets
da_1, da_2 = self.prepare_datasets(features_dataset)
# Train models
# ----THIS IS WHERE THE PROGRAM HANGS -----------------------------------
self.model_1.fit(da_1.x, utils.vectors_to_labels(da_1.t))
# -----------------------------------------------------------------------
self.model_2.fit(da_2.x, utils.vectors_to_labels(da_2.t))
# Generate data statistics for feature calculations
self.calculate_data_statistics(tr_prepro)
def prepare_datasets(self, features_dataset):
sel_cols = [ #...
]
# Build dataset
d = features_dataset[sel_cols].dropna()
da, scalers = ft.build_dataset(d, scaling='std', target_feature='outcome')
# Binirize data
da_bin = utils.binirize_dataset(da)
# What are the classification targets
self.targets = da_bin.t_labels
# Prepare the datasets
da_1 = da_bin.select_attributes(self.cols1)
da_2 = da_bin.select_attributes(self.cols2)
return da_1, da_2
def calculate_data_statistics(self, tr_prepro):
logger.info('Getting data and feature statistics...')
#...
logger.info('Done.')
def import_data(self, data):
# convert dictionary generated from json to Pandas DataFrame
return tr
def generate_features(self, tr):
# Preprocessing, Feature calculations, imputation
return tr
def predict_proba(self, data):
# Convert Data
tr = self.import_data(data)
# Generate features
tr = self.generate_features(tr)
# Select model based on missing values - either no. 1 or no. 2
tr_1 = #...
tr_2 = #...
# Get the probabilities from different models
if tr_1.shape[0] > 0:
tr_1.loc[:, 'prob'] = self.model_1.predict_proba(tr_1.loc[:, self.cols1])[:, self.targets.index('POSITIVE')]
if tr_2.shape[0] > 0:
tr_2.loc[:, 'prob'] = self.model_2.predict_proba(tr_2.loc[:, self.cols2])[:, self.targets.index('POSITIVE')]
return pd.concat([tr_1, tr_2], axis=0)
#staticmethod
def export_single_result(tr):
result = {'sample_id': tr.loc[0, 'sample_id'],
'batch_id': tr.loc[0, 'batch_id'],
'prob': tr.loc[0, 'prob']
}
return result
class JSONTranslator(object):
def process_request(self, req, resp):
"""Generic method for extracting json from requets
Throws
------
HTTP 400 (Bad Request)
HTTP 753 ('Syntax Error')
"""
if req.content_length in (None, 0):
# Nothing to do
return
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest('Empty request body',
'A valid JSON document is required.')
try:
req.context['data'] = json.loads(body.decode('utf-8'))
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPError(falcon.HTTP_753,
'Malformed JSON',
'Could not decode the request body. The '
'JSON was incorrect or not encoded as '
'UTF-8.')
def process_response(self, req, resp, resource):
"""Generic method for putting response to json
Does not do anything if 'result_json' not in req.context.
"""
if 'result_json' not in req.context:
return
resp.body = json.dumps(req.context['result_json'])
class ProbResource(object):
def __init__(self, processor):
self.schema_raw = open(config.__ROOT__ + "app_docs/broadcast_schema.json").read()
self.schema = json.loads(self.schema_raw)
self.processor = processor
def validate_request(self, req):
""" Validate the request json against the schema.
Throws
------
HTTP 753 ('Syntax Error')
"""
data = req.context['data']
# validate the json
try:
v = jsonschema.Draft4Validator(self.schema) # using jsonschema draft 4
err_msg = str()
for error in sorted(v.iter_errors(data), key=str):
err_msg += str(error)
if len(err_msg) > 0:
raise falcon.HTTPError(falcon.HTTP_753,
'JSON failed validation',
err_msg)
except jsonschema.ValidationError as e:
print("Failed to use schema:\n" + str(self.schema_raw))
raise e
def on_get(self, req, resp):
"""Handles GET requests
Throws
------
HTTP 404 (Not Found)
"""
self.validate_request(req)
data = req.context['data']
try:
# get probability
tr = self.processor.predict_proba(data)
# convert pandas dataframe to dictionary
result = self.processor.export_single_result(tr)
# send the dictionary away
req.context['result_json'] = result
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_404, 'Error', ex.message)
resp.status = falcon.HTTP_200
# Get data
features_data = fastserialize.load(config.__ROOT__ + 'data/samples.s')
prepro_data = fastserialize.load(config.__ROOT__ + 'data/prepro/samples_preprocessed.s')
# Get the models - this is where the code hangs
sp = SampleProcessor(features_data, prepro_data)
app = falcon.API(middleware=[JSONTranslator()])
prob = ProbResource(sp)
app.add_route('/prob', prob)