Related
Models.py
from datetime import datetime
from cmrapp import db,login_manager
from flask_login import UserMixin
#login_manager.user_loader
def load_user(user_id):
return Registration.query.get(int(user_id))
class Registration(db.Model,UserMixin):
id = db.Column(db.Integer(),primary_key=True)
regid = db.Column(db.String(),unique=True)
section = db.Column(db.String(),nullable= False)
username = db.Column(db.String(20),unique=True,nullable = False)
password = db.Column(db.String(60),nullable = False)
def __repr__(self):
return f"Registration('{self.username}','{self.regid}')"
class Participation(db.Model):
id = db.Column(db.Integer(),primary_key=True)
pname = db.Column(db.String(20),nullable= False)
pregid = db.Column(db.String(),unique=True)
psection = db.Column(db.String(),nullable=False)
def __repr__(self):
return f"Participation('{self.pname},'{self.pregid}','{self.psection}')"
#login_manager.user_loader
def load_user(mregid):
return Memberdb.query.get(int(mregid))
class Memberdb(db.Model,UserMixin):
id = db.Column(db.Integer(),primary_key=True)
mregid = db.Column(db.String(20),nullable = False,unique=True)
mname = db.Column(db.String(20),unique=True,nullable = False)
mpassword = db.Column(db.String(60),nullable = False)
organizers = db.relationship('Eventdb',backref='owner',lazy=True)
def __repr__(self):
return f"Memberdb('{self.mname}','{self.id}')"
****routes.py****
from flask import render_template,url_for,request,redirect,flash,request
from cmrapp import app,db,bcrypt
from cmrapp.data import fetch
from datetime import datetime,date
from cmrapp.userforms import Registerform,identityform,studentform,memberform,blockform
from cmrapp.models import Registration,Participation,Memberdb
from flask_login import login_user,current_user,logout_user,login_required
#app.route('/')
#app.route('/signup',methods=["GET","POST"])
def signuppage():
if current_user.is_authenticated:
return redirect(url_for('homepage'))
form = Registerform()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = Registration(username = form.username.data, regid = form.regno.data, password =
hashed_password,section=form.section.data )
db.session.add(user)
db.session.commit()
flash(f"Your Account has been Created! {form.username.data}",'success')
return redirect(url_for('sloginpage'))
else:
return render_template('signup.html',title="Sign Up",form=form,consent=consent,diff=diff)
#app.route('/slogin',methods=["GET","POST"])
def sloginpage():
if current_user.is_authenticated:
return redirect(url_for('homepage'))
form = studentform()
if form.validate_on_submit():
user = Registration.query.filter_by(username = form.studentname.data).first()
if user:
if user and bcrypt.check_password_hash(user.password,form.spassword.data):
login_user(user,True)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('homepage'))
else:
flash(f'Login Unsuccessful ! Check Name and Password.','danger')
return render_template('slogin.html',title ='Student Login',form=form)
else:
flash(f'User doesn\'t exist' ,'danger')
return render_template('slogin.html',title ='Student Login',form=form)
#app.route('/msignup',methods=["GET","POST"])
def msignuppage():
form = memberform()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.memberpassword.data).decode('utf-8')
user = Memberdb(mname = form.membername.data, mregid = form.secureid.data, mpassword =
hashed_password )
db.session.add(user)
db.session.commit()
flash('Your Account has been Created','success')
return redirect('mlogin')
else:
return render_template('msignup.html',title="Member Account",form=form)
# user = user1
#app.route('/mlogin',methods=["GET","POST"])
def mloginpage():
form = memberform()
if form.validate_on_submit():
# if form.membername.data == "Lucky" and form.memberpassword.data == "luckyG2611#":
user = Memberdb.query.filter_by(mname = form.membername.data).first()
if user:
if user and bcrypt.check_password_hash(user.mpassword,form.memberpassword.data):
login_user(user,True)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('mlist'))
else:
flash(f'Login Unsuccessful ! Check Name and Password.','danger')
return render_template('mlogin.html',title ='Member Login',form=form)
else:
flash(f'User doesn\'t exist' ,'danger')
return render_template('mlogin.html',title ='Member Login',form=form)
****userform.py****
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,validators,SubmitField,BooleanField,ValidationError,BooleanField,SelectField,DateField,TextAreaField
from wtforms.validators import DataRequired,Length,EqualTo
from cmrapp.models import Registration,Participation,Memberdb
from flask_login import current_user
class Registerform(FlaskForm):
username = StringField('Name',
validators=[DataRequired(),Length(min=2,max =20),validators.regexp('^[a-
zA-Z\s]+$',message="Name contains only string ! Try Again")])
section = SelectField(u'Section',choices=[('1yr-A', 'Ist-A'),('1yr-B', 'Ist-B'), ('1yr-C','Ist-
C'),('2yr-A', 'Sec-A'),('2yr-B', 'Sec-B'), ('2yr-C','Sec-C'),('3yr-A',
'Third-A'),('3yr-B', 'Third-B'), ('3yr-C','Third-C')],validators=
[DataRequired()],validate_choice=False,coerce=str)
regno = StringField('Registration No.',
validators=[DataRequired(), Length(min=10,max=10),validators.regexp('[19]+
[DBCAG]+\d\d\d',message="Use your Respective Regno.")])
password = PasswordField('Password',
validators=[DataRequired(),Length(min=10)])
submit = SubmitField('Sign Up')
def validate_username(self,username):
user = Registration.query.filter_by(username=username.data).all()
if user:
raise ValidationError('Username Already Exist :(')
def validate_regno(self,regno):
regid = Registration.query.filter_by(regid=regno.data).all()
if regid :
raise ValidationError('Regno. Already Exist :(')
class studentform(FlaskForm):
studentname = StringField('Name',
validators=[DataRequired(),Length(min=2,max =20)])
spassword = PasswordField('Password',
validators=[DataRequired(),Length(min=10)])
studentin = SubmitField('Log In')
class identityform(FlaskForm):
identitypswd = PasswordField(validators=[DataRequired(),Length(min=10)])
submit = SubmitField('Want to join')
def validate_regid(self,pregid):
user1 = Participation.query.filter_by(pregid = current_user.regid.data).all()
if user1 :
raise ValidationError('Already Enrolled :(')
class memberform(FlaskForm):
membername= StringField('Member Name:',
validators=[DataRequired(),Length(min=2,max=20),validators.regexp('^[a-
zA-Z\s]+$',message="Name contains only string ! Try Again")])
memberpassword = PasswordField('Password',validators=[DataRequired(),Length(min=10)])
secureid = StringField('UserId',
validators=[DataRequired(),Length(max=10),validators.regexp('[19]+
[BCA1.]+\d\d\d')])
membersubmit = SubmitField('Get In')
def validate_mname(self,mname):
user1 = Memberdb.query.filter_by(mname = mname.data).all()
if user1 is not None:
raise ValidationError('Username Already Exist :(')
def validate_mregid(self,mregid):
mregid = Memberdb.query.filter_by(mregid = mregid.data).all()
if mregid is not None:
raise ValidationError('Already Exist :(')
i have imported all the required module also but it's not working but the first login page is working . if anybody knows about this .Can anyone help me out of this.
You've coded two #login_manager.user_loaders. When you do that multiple times, the last one wins. (See here)
You'll have to choose which of Registration or Memberdb your app will use to represent a logged-in user.
I want to scrape a site with scrapy that lists its products in catagories i'm new to scrapy and just getting my head round it today but though i was getting the gist of it on simple scrapes so attempted to scrape urls and return them to scrape further but appears i'm missing something.
someone answered fixing my code here is the latest version as thought i'd have another go at learning scrapy today but its still not recursively scanning it just seems to loop through all the pages but never gets into parse the items
never seems to enter the else statement
yield scrapy.Request(url = response.url,callback = self.parse_item)
i can debug it to check the items are parsed correctly if i force it to output items without looping
if i change the following
if product_pages:
for product_url in product_pages:
product_url2 = str(self.base_url + product_url)
self.log("Queued up: %s" % product_url2)
yield scrapy.Request(url = product_url2,callback = self.parse_product_pages)
else:
yield scrapy.Request(url = response.url,callback = self.parse_item)
to
if product_pages:
for product_url in product_pages:
product_url2 = str(self.base_url + product_url)
self.log("Queued up: %s" % product_url2)
yield scrapy.Request(url = product_url2,callback = self.parse_item)
else:
yield scrapy.Request(url = response.url,callback = self.parse_product_pages)
here is my code i'm working in python 2.7
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import HtmlXPathSelector
from ybscrape.items import Product
from scrapy.linkextractors import LinkExtractor
from scrapy.linkextractors.sgml import SgmlLinkExtractor
class ybracingSpider(CrawlSpider):
name = 'ybscrape2'
download_delay = 0.75
def __init__(self, *args, **kwargs):
super(ybracingSpider, self).__init__(*args, **kwargs)
self.allowed_domains = ['http://www.ybracing.com/', 'www.ybracing.com', 'www.esellepro.com']
self.base_url = 'http://www.ybracing.com'
self.start_urls = ['http://www.ybracing.com/karting/']
def parse_start_url(self, response):
category = response.xpath("//h2/a/#href").extract()
#loop over catagory pages take the product link and add all pages url
for product in category:
all_pages = '?itemsperpage=99999'
category_url = str(self.base_url + product + all_pages)
self.log("Queued up: %s" % category_url)
yield scrapy.Request(url = category_url,callback = self.parse_product_pages)
def parse_product_pages(self, response):
product_pages = response.xpath("//li/div/div/h3/a/#href").extract()
#print("debug pause")
#print(product_pages)
#wait = input("PRESS ENTER TO CONTINUE.")
#print("continue")
if product_pages:
for product_url in product_pages:
product_url2 = str(self.base_url + product_url)
self.log("Queued up: %s" % product_url2)
yield scrapy.Request(url = product_url2,callback = self.parse_product_pages)
else:
yield scrapy.Request(url = response.url,callback = self.parse_item)
def parse_item(self, response):
item = Product()
item['description'] = response.xpath("//div[#id='Tabbed-Container-Details']/div[2]/div/text()").extract()
item['product_title'] = response.xpath("//h3[#class='Product-Heading']/text()").extract()
item['price'] = response.xpath("//div[#id='Product-Price']/text()").extract()
table_rows = response.xpath("//table[#id='SpecificationTab']/tr[*]/td[1]//text()").extract()
yield item
my items.py
from scrapy.item import Item, Field
class Product(Item):
product_title = Field()
description = Field()
price = Field()
What i'm expecting my code to do in steps
grab all the links within the the first export (categories) (this works)
look at all 9999 products inside each category and export the list (this works)
take the product url from the export append it to the base url to get to the product page for each. (this works)
4.then read data from in the product page to add to items ( never gets here) unlese i skip the if statement but thats not recursive it wont handle sub catagories like that
Here, I have made some changes in your code and now it's working
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import HtmlXPathSelector
from demo.items import DemoItem
from scrapy.linkextractors import LinkExtractor
from scrapy.linkextractors.sgml import SgmlLinkExtractor
class DemoSpider(CrawlSpider):
name = 'ybracing2'
def __init__(self, *args, **kwargs):
super(DemoSpider, self).__init__(*args, **kwargs)
self.allowed_domains = ['http://www.ybracing.com/', 'www.ybracing.com', 'www.esellepro.com']
self.base_url = 'http://www.ybracing.com'
self.start_urls = ['http://www.ybracing.com/racewear/']
def parse_start_url(self, response):
category = response.xpath("//h2/a/#href").extract()
#loop over catagory pages take the product link and add all pages url
for product in category:
all_pages = '?itemsperpage=99999'
category_url = str(self.base_url + product + all_pages)
self.log("Queued up: %s" % category_url)
yield scrapy.Request(url = category_url,callback = self.parse_product_pages)
def parse_product_pages(self, response):
product_pages = response.xpath("//div[#class='Product']/a/#href").extract()
if product_pages:
for product_url in product_pages:
product_url2 = str(self.base_url + product_url)
self.log("Queued up: %s" % product_url2)
yield scrapy.Request(url = product_url2,callback = self.parse_item)
else:
yield scrapy.Request(url = response.url,callback = self.parse_product_pages)
def parse_item(self, response):
item = DemoItem()
dirty_data ={}
item['product_title'] = response.xpath("//h3[#class='Product-Heading']/text()").extract()
item['price'] = response.xpath("//div[#id='Product-Price']/text()").extract()
item['description'] = response.xpath("//div[#id='Tabbed-Container-Details']/div[2]/div/text()").extract()
#image['product_image'] =
# for variable in dirty_data.keys():
# if dirty_data[variable]:
# if variable == 'price':
# item[variable] = float(''.join(dirty_data[variable]).strip().replace('$', '').replace(',', ''))
# else:
# item[variable] = ''.join(dirty_data[variable]).strip()
yield item
Non-coder here. I have an activity I'm editing for beginning students. It was created a year ago by someone else. The project is pre-created for students. They are supposed deploy it, create a bucket, and upload some files to the bucket from within the app. When I try it, I get this error:
Traceback (most recent call last):
File "/base/data/home/runtimes/python27/python27_lib/versions/third_party/webapp2-2.5.2/webapp2.py", line 1535, in __call__
rv = self.handle_exception(request, response, e)
File "/base/data/home/runtimes/python27/python27_lib/versions/third_party/webapp2-2.5.2/webapp2.py", line 1529, in __call__
rv = self.router.dispatch(request, response)
File "/base/data/home/runtimes/python27/python27_lib/versions/third_party/webapp2-2.5.2/webapp2.py", line 1278, in default_dispatcher
return route.handler_adapter(request, response)
File "/base/data/home/runtimes/python27/python27_lib/versions/third_party/webapp2-2.5.2/webapp2.py", line 1102, in __call__
return handler.dispatch()
File "/base/data/home/runtimes/python27/python27_lib/versions/third_party/webapp2-2.5.2/webapp2.py", line 572, in dispatch
return self.handle_exception(e, self.app.debug)
File "/base/data/home/runtimes/python27/python27_lib/versions/third_party/webapp2-2.5.2/webapp2.py", line 570, in dispatch
return method(*args, **kwargs)
File "/base/data/home/apps/s~lively-armor-126415/1.391710117126333360/main.py", line 205, in get
for imagefile in gcs.listbucket(bucket_path(), delimiter='/'):
File "/base/data/home/apps/s~lively-armor-126415/1.391710117126333360/main.py", line 114, in bucket_path
return '/' + bucket_name + '/'
TypeError: cannot concatenate 'str' and 'NoneType' objects
Here is (part of) main.py:
#!/usr/bin/env python
import webapp2
import sys
import os
import logging
import urllib
import zipfile
import StringIO
import jinja2
import datetime
import mimetypes
import json
from google.appengine.api import users
from google.appengine.api import mail
from google.appengine.api import xmpp
from google.appengine.api import channel
from google.appengine.api import app_identity
from google.appengine.api import images
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.api import search
from google.appengine.ext import ndb
from google.appengine.datastore.datastore_query import Cursor
sys.path.insert(0, 'libs')
libpath = os.path.join(os.path.dirname(__file__), 'lib')
sys.path.append(libpath)
from wtforms import Form
from wtforms import StringField,TextAreaField,SelectField,DecimalField
from wtforms import FileField
from wtforms import SubmitField
from wtforms import validators
import cloudstorage as gcs
from datamodels import Product, ProductCategory
JINJA_ENV = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
# Add custom filter for currency output in JINJA2
def currencyformat(value):
template = "${:.2f}"
currency_string = template.format(value)
return currency_string
JINJA_ENV.filters['currencyformat'] = currencyformat
PRODUCT_GROUPS = [
('1','Bathroom'),
('2','Decor'),
('3','Lumber'),
('4','Materials'),
('5','Outdoors'),
('6','Tools')]
def login_html():
# Load differently based on whether logged in to Google account
user = users.get_current_user()
if user:
url = users.create_logout_url('/')
username = user.nickname()
else:
url = users.create_login_url('/')
username = ''
template_values = {
'url': url,
'username': username
}
greeting_template = JINJA_ENV.get_template('html/greeting.htm')
greeting_html = greeting_template.render(template_values)
return greeting_html
def store_product(prodcode, title, price, category, description):
logging.info('Add product %s to category %s in database', title, category)
category_key = ndb.Key(ProductCategory, category)
product = Product(
parent=category_key,
id=prodcode,
title=title,
price=price,
category=category,
desc=description
)
product.put()
try:
# Create a searchable document to use with Search API
document = search.Document(
doc_id = prodcode,
fields=[
search.TextField(name='title', value=title),
search.TextField(name='category', value=category),
search.HtmlField(name='desc', value=description),
search.NumberField(name='price', value=float(price)),
])
index = search.Index(name="ProductIndex")
index.put(document)
except:
logging.exception("Unable to store search document for " + prodcode)
def file_extension(filename):
return os.path.splitext(filename)[-1]
def bucket_path():
bucket_name = app_identity.get_default_gcs_bucket_name()
return '/' + bucket_name + '/'
class EditProductForm(Form):
# Test and message for currency format
cur_regex = '^\s*(?=.*[1-9])\d*(?:\.\d{1,2})?\s*$'
pricemsg = 'Enter a price with up to two decimal places (no dollar symbol)'
prodcode = StringField(
'* Product Code:',
[validators.Length(min=1, max=10)])
price = StringField(
'* Product Price:',
[validators.Regexp(cur_regex, message=pricemsg)])
title = StringField(
'* Product Title:',
[validators.Length(min=1, max=500)])
category = SelectField(
'* Product Group:',
choices=PRODUCT_GROUPS,
default='Hardware')
description = TextAreaField(
'* Product Description:',
[validators.Required()])
submitbtn = SubmitField('Save Product')
class EditImagesForm(Form):
image = FileField('File to Upload:')
submitbtn = SubmitField('Upload')
class BucketImageHandler(webapp2.RequestHandler):
# Return image from cloud storage
def get(self, image_file):
self.response.headers['Content-Type'] = 'image/png'
# Get complete file name
filename = bucket_path() + image_file
cache_name = 'productimages:{}'.format(image_file)
# Get image data from memcache
filedata = memcache.get(cache_name)
if filedata is None:
try:
# Get image from cloud storage
gcs_file = gcs.open(filename)
filedata = gcs_file.read()
memcache.add(cache_name, filedata, 3600)
except:
# Get placeholder image from static images
self.redirect('/images/image_placeholder.png')
self.response.out.write(filedata)
class UploadHandler(webapp2.RequestHandler):
# Display upload page
def get(self):
# Allow only for admin users
if users.is_current_user_admin():
# Delete image if one is passed in
# (in finished site, add a prompt to confirm)
image_filename = self.request.get('del')
if image_filename != '':
datastore_filename = bucket_path() + image_filename
logging.info('>>> DELETED FILE %s', image_filename)
try:
gcs.delete(datastore_filename)
except:
pass
# Gather image data to pass in to HTML template
MAX_IMAGES = 10
image_count = 0
reached_end = True
last_image = 1
start = self.request.get('s')
if start is '':
first_image = 1
else:
first_image = int(start)
if first_image < 1:
first_image = 1
# Get images from Cloud Storage
image_gallery = []
for imagefile in gcs.listbucket(bucket_path(), delimiter='/'):
image_count += 1
reached_first_image = (image_count >= first_image)
reached_last_image = (image_count >= first_image + MAX_IMAGES)
if reached_first_image and not reached_last_image:
# Files to show for this page
filename = imagefile.filename.split('/')[-1]
if file_extension(filename) == '.png':
this_image = dict(
name=filename,
size=imagefile.st_size,
safename=urllib.quote_plus(filename)
)
image_gallery.append(this_image)
last_image = image_count
back_start_index = first_image - MAX_IMAGES
next_start_index = last_image + 1
# Prepare image edit form for HTML template
new_images_form = EditImagesForm()
# Populate batch upload page
template_values = {
'admin_mode': users.is_current_user_admin(),
'greeting_html': login_html(),
'editform': new_images_form,
'gallery': image_gallery,
'start_image_index': first_image,
'end_image_index': last_image,
'image_count': image_count,
'back_start_index': back_start_index,
'next_start_index': next_start_index
}
image_mgr_template = JINJA_ENV.get_template('html/uploadmgr.htm')
image_mgr_html = image_mgr_template.render(template_values)
self.response.write(image_mgr_html)
else:
# Unauthorized user - raise an error
self.abort(401)
# Post new image or batch update to the gallery
def post(self):
# Allow batch upload only for admin users
if users.is_current_user_admin():
file_data = self.request.get('image')
upload_filename = ''
try:
upload_filename = os.path.basename(self.request.POST['image'].filename)
except:
logging.info('NO FILE SPECIFIED')
self.redirect('/upload')
upload_file_extension = file_extension(upload_filename)
datastore_filename = bucket_path() + upload_filename
logging.info('Store file to %s', datastore_filename)
if upload_file_extension == '.png':
# Write image to cloud storage
if len(file_data) > 0:
gcs_file = gcs.open(
datastore_filename,
'w',content_type='image/png')
file_data = images.resize(file_data, 400, 400)
gcs_file.write(file_data)
gcs_file.close()
# Upload done -- return to gallery
self.redirect('/upload')
elif upload_file_extension == '.zip':
# Save uploaded Zip file to Google Cloud Storage
gcs_file = gcs.open(
datastore_filename,
'w',content_type='application/zip')
gcs_file.write(file_data)
gcs_file.close()
logging.info('>>> STORED ZIP FILE %s', datastore_filename)
# Start background task to extract the Zip file
client_id = 'bgmsg-' + users.get_current_user().user_id()
email_address = users.get_current_user().email()
taskqueue.add(
url='/processuploads',
method="POST",
params={'zipfile': datastore_filename,
'address': email_address,
'clientid': client_id,
'starttime': datetime.datetime.now() }
)
# Upload done -- return to gallery
self.redirect('/upload')
else:
# Ignore other file types
self.redirect('/upload')
else:
# Unauthorized user - raise an error
self.abort(401)
class BatchProcessBackgroundHandler(webapp2.RequestHandler):
def post(self):
# Task queue handler - Extract and process uploaded Zip file
# Check header to ensure request came from inside App Engine platform
if 'X-AppEngine-TaskName' in self.request.headers:
zip_file_name = self.request.get('zipfile')
address = self.request.get('address')
client_id = self.request.get('clientid')
start_time = self.request.get('starttime')
# logging.info('>>> EXTRACTING ZIP FILE %s', zip_file_name)
# Get zip data from cloud storage
gcs_file = gcs.open(zip_file_name)
gcs_data = gcs_file.read()
zip_data = StringIO.StringIO(gcs_data)
# Open the archive for reading
zip_file = zipfile.ZipFile(zip_data, 'r')
# Extract each file in the archive and process based on extension
for extracted_file_name in zip_file.namelist():
extracted_file_extension = file_extension(extracted_file_name)
if extracted_file_extension == '.png':
# Read Zip file data as StringIO
extracted_image_data = zip_file.read(extracted_file_name)
# Resize images no wider or taller than 400 pixels
extracted_image_data = images.resize(
extracted_image_data,
400,
400)
datastore_filename = bucket_path() + extracted_file_name
gcs_file = gcs.open(
datastore_filename,
'w',
content_type='image/png')
gcs_file.write(extracted_image_data)
gcs_file.close()
elif extracted_file_extension == '.txt':
extracted_data = zip_file.read(extracted_file_name)
lines = extracted_data.split('\r\n')
for line in lines:
if line:
line_values = line.split('\t')
category = line_values[0]
prodcode = line_values[1]
title = line_values[2]
price = line_values[3]
description = line_values[4]
store_product(
prodcode,
title,
price,
category,
description)
# Close the Zip file
zip_file.close()
# Delete the Zip file when done
gcs.delete(zip_file_name)
# Compose success message
notify_title = 'Batch Update Successfully Completed'
message_body = 'Batch file ' + zip_file_name + '\n'
message_body += 'Started at ' + start_time + '\n'
message_body += 'Finished at ' + str(datetime.datetime.now()) + '\n'
message_body += 'Refresh your browser to see the product updates.\n'
# Send message by email
mail.send_mail(
sender = 'WW Admin <admin#wwheelhouse.com>',
to = address,
subject = notify_title,
body = message_body
)
# Send message by XMPP
user_address = address
chat_message_sent = False
msg = message_body
status_code = xmpp.send_message(user_address, msg)
chat_message_sent = (status_code == xmpp.NO_ERROR)
# Send message to web client via channel API
channel.send_message(
client_id,
msg
)
else:
# Report forbidden operation
self.error(403)
class DeleteProductHandler(webapp2.RequestHandler):
def get(self):
if users.is_current_user_admin():
# Get product code from query string passed in to page
prodcode = self.request.get('edit')
category = self.request.get('cat')
logging.info('>>> GET prodcode=%s and cat=%s', prodcode, category)
try:
# Get product from the datastore
parent_key = ndb.Key('ProductCategory', category)
product = Product.get_by_id(prodcode, parent=parent_key)
# Delete the entity
product.key.delete()
except:
pass
# Redirect back to main product view
self.redirect('/?cat=' + category)
else:
# Report forbidden operation
self.error(403)
class SearchHandler(webapp2.RequestHandler):
def post(self):
# Process search
search_text = self.request.get('q')
search_category = self.request.get('scat')
query_string = "title:" + search_text
query_string += " OR desc:" + search_text
if search_category != '' and search_category != '0':
query_string += " AND category=" + search_category
found_products = ''
num_found = 0
if search_text != '':
index = search.Index(name="ProductIndex")
found_products = index.search(query_string)
num_found = found_products.number_found
# Populate search results page
template_values = {
'admin_mode': users.is_current_user_admin(),
'greeting_html': login_html(),
'prod_categories': PRODUCT_GROUPS,
'selected': search_category,
'search_text': search_text,
'product_list': found_products,
'num_found': num_found
}
results_template = JINJA_ENV.get_template('html/searchresults.htm')
search_html = results_template.render(template_values)
self.response.write(search_html)
class MainHandler(webapp2.RequestHandler):
def get(self):
editcode = self.request.get('edit')
prod_category = self.request.get('cat',default_value='0')
in_edit = (prod_category and editcode)
if in_edit:
product = Product.query_get_product(prod_category, editcode)
new_product_form = EditProductForm(
prodcode=editcode,
title=product.title,
price=product.price,
category=prod_category,
description=product.desc
)
else:
# Produce empty product editing form
new_product_form = EditProductForm()
self.response.write(self.catalog_html(new_product_form))
# logging.info("ENVIRONMENT: %s", os.environ)
def post(self):
if users.is_current_user_admin():
# Get data submitted in form and validate user input
prodcode = self.request.get('prodcode')
title = self.request.get('title')
price = self.request.get('price')
category = self.request.get('category')
description = self.request.get('description')
new_product_form = EditProductForm(
prodcode=prodcode,
title=title,
price=price,
category=category,
description=description
)
if new_product_form.validate():
store_product(prodcode, title, price, category, description)
self.redirect('/?cat='+category+'&viewproduct='+prodcode)
else:
html = self.catalog_html(new_product_form)
self.response.write(html)
else:
# Unauthorized user -- raise an error
self.abort(401)
def catalog_html(self, editform):
""" Return HTML for the product catalog """
PRODUCTS_PER_PAGE = 4
viewcode = self.request.get('viewproduct')
editcode = self.request.get('edit')
category = self.request.get('cat', default_value='0')
in_edit = (category and editcode) # Show Edit mode only if category and editcode provided
in_one_product_view = viewcode != ''
# If one product view or in edit, show single product
if in_one_product_view or in_edit:
# RETURN SINGLE PRODUCT VIEW
if in_edit:
# Query to get the product specified for editing
product = Product.query_get_product(category, editcode)
else:
# Query to get the product specified for viewing
product = Product.query_get_product(category, viewcode)
# Populate catalog page
template_values = {
'admin_mode': users.is_current_user_admin(),
'greeting_html': login_html(),
'prod_categories': PRODUCT_GROUPS,
'selected': category,
'product': product,
'editform': editform
}
one_product_template = JINJA_ENV.get_template('html/oneproduct.htm')
one_product_html = one_product_template.render(template_values)
return one_product_html
else:
# MULTIPLE PRODUCT VIEW
if category == '0':
# Show all products in all categories
q_forward = Product.query_all_categories_sort_newest()
q_backward = Product.query_all_categories_sort_oldest()
else:
# Show products in one category
q_forward = Product.query_by_category_sort_newest(category)
q_backward = Product.query_by_category_sort_oldest(category)
page_nav = ''
products = None
num_products_in_query = q_forward.count()
num_pages_to_show = num_products_in_query / PRODUCTS_PER_PAGE
if (num_products_in_query % PRODUCTS_PER_PAGE) > 0:
num_pages_to_show += 1
cursor_was_passed_in = self.request.get('cursor') is not ''
page_num = self.request.get('pg',default_value='1')
prev_cur = ''
next_cur = ''
if num_products_in_query > 0:
# Read the cursor passed in from the previous page
cursor = Cursor(urlsafe=self.request.get('cursor'))
# Fetch a forward cursor
products, next_cursor, more_after = q_forward.fetch_page(
PRODUCTS_PER_PAGE,
start_cursor=cursor )
# Fetch a backward cursor
prev_products, prev_cursor, more_before = q_backward.fetch_page(
PRODUCTS_PER_PAGE,
start_cursor=cursor.reversed() )
if cursor_was_passed_in and prev_cursor:
prev_cur = prev_cursor.urlsafe()
if more_after and next_cursor:
next_cur = next_cursor.urlsafe()
# Populate catalog page
template_values = {
'admin_mode': users.is_current_user_admin(),
'greeting_html': login_html(),
'catalog': products,
'prod_categories': PRODUCT_GROUPS,
'selected': category,
'editform': editform,
'prev_cur': prev_cur,
'next_cur': next_cur,
'page_num': page_num,
'page_num_prev': int(page_num)-1,
'page_num_next': int(page_num)+1,
'num_pages_to_show': num_pages_to_show
}
catalog_template=JINJA_ENV.get_template('html/catalog.htm')
return catalog_template.render(template_values)
...
Please help. I don't have a clue how to fix this, but I need to get this project working for the students to use.
Thanks so much,
Chrys
That's because there's no default bucket attached to your current project.
You can create a bucket with gsutil or the cloud console and then hardcode the value in your bucket_path function.
Also you can keep your current bucket_path helper function that returns the bucket_name from app_identity.get_default_gcs_bucket_name() and then create a default bucket in the new console by visiting your app engine's application settings in the cloud console https://console.cloud.google.com/appengine/settings?project=
i just want to ask, what's wrong with my codes, or am i missing something. because when i call the function PausePlay() in the PlayAndPause() I always get an error saying:
self.dbusIfaceKey.Action(dbus.Int32("16"))
AttributeError: 'OpenOMX' object has no attribute 'dbusIfaceKey'
Here's the code:
OPTIONS = 'omxplayer -o local -t on --align center --win "0 0 {1} {2}" \"{0}\"'
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import *
from Tkinter import *
import sys
import os
from os import system
import dbus,time
from subprocess import Popen
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
MoviePath = '/media/HP v250w/Our Story in 1 Minute.mp4'
class OpenOMX(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
def run(self):
global MoviePath
global myctr
width = QtGui.QApplication.desktop().width()
height = QtGui.QApplication.desktop().height()
cmd = OPTIONS.format(MoviePath,width,height-60)
Popen([cmd], shell=True)
done,retry = 0,0
while done==0:
try:
with open('/tmp/omxplayerdbus', 'r+') as f:
omxplayerdbus = f.read().strip()
bus = dbus.bus.BusConnection(self.omxplayerdbus)
print 'connected'
object = bus.get_object('org.mpris.MediaPlayer2.omxplayer','/org/mpris/MediaPlayer2', introspect=False)
self.dbusIfaceProp = dbus.Interface(object,'org.freedesktop.DBus.Properties')
self.dbusIfaceKey = dbus.Interface(object,'org.mpris.MediaPlayer2.Player')
done=1
except:
retry+=1
if retry >= 50:
print 'ERROR'
raise SystemExit
def PausePlay(self):
self.dbusIfaceKey.Action(dbus.Int32('16'))
class VidControls(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
def setupUi(self, MainWindow):
'some codes here'
def retranslateUi(self, MainWindow):
self.btnPause.clicked.connect(self.PlayAndPause)
def PlayAndPause(self):
self.opn = OpenOMX()
self.opn.PausePlay()
if __name__=='__main__':
app = QtGui.QApplication(sys.argv)
ex = VidControls()
ex.show()
play = OpenOMX()
play.start()
sys.exit(app.exec_())
Any Comments or Suggestions would be highly appreciated. Thanks.
I am using Scrapy for a project, in this project I am extracting the information from the xml.
In the xml document the format where I would like to implement the for loop:
<relatedPersonsList>
<relatedPersonInfo>...</relatedPersonInfo>
<relatedPersonInfo>
<relatedPersonName>
<firstName>Mark</firstName>
<middleName>E.</middleName>
<lastName>Lucas</lastName>
</relatedPersonName>
<relatedPersonAddress>
<street1>1 IMATION WAY</street1>
<city>OAKDALE</city>
<stateOrCountry>MN</stateOrCountry>
<stateOrCountryDescription>MINNESOTA</stateOrCountryDescription>
<zipCode>55128</zipCode>
</relatedPersonAddress>
<relatedPersonRelationshipList>
<relationship>Executive Officer</relationship>
<relationship>Director</relationship>
</relatedPersonRelationshipList>
<relationshipClarification/>
</relatedPersonInfo>
<relatedPersonInfo>...</relatedPersonInfo>
<relatedPersonInfo>...</relatedPersonInfo>
<relatedPersonInfo>...</relatedPersonInfo>
<relatedPersonInfo>...</relatedPersonInfo>
<relatedPersonInfo>...</relatedPersonInfo>
</relatedPersonsList>
As you can see in the <relatedPersonsList>, you can have multiple <relatedPersonInfo>, and when I try to make a for loop, I still only get the information of the first person.
This is my actual code:
for person in xxs.select('./relatedPersonsList/relatedPersonInfo'):
item = Myform() #even if get rid of it I get the same result
item["firstName"] = person.select('./relatedPersonName/firstName/text()').extract()[0]
item["middleName"] = person.select('./relatedPersonName/middleName/text()')
if item["middleName"]:
item["middleName"] = item["middleName"].extract()[0]
else:
item["middleName"] = "NA"
here is the code that I used on my spider:
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapy.selector import XmlXPathSelector
from scrapy.http import Request
import urlparse
from formds.items import SecformD
class SecDform(CrawlSpider):
name = "DFORM"
allowed_domain = ["http://www..gov"]
start_urls = [
""
]
rules = (
Rule(
SgmlLinkExtractor(restrict_xpaths=["/html/body/div/table/tr/td[3]/a[2]"]),
callback='parse_formd',
#follow= True no need of follow thing
),
Rule(
SgmlLinkExtractor(restrict_xpaths=('/html/body/div/center[1]/a[contains(., "[NEXT]")]')),
follow=True
),
)
def parse_formd(self, response):
hxs = HtmlXPathSelector(response)
sites = hxs.select('//*[#id="formDiv"]/div/table/tr[3]/td[3]/a/#href').extract()
for site in sites:
yield Request(url=urlparse.urljoin(response.url, site), callback=self.parse_xml_document)
def parse_xml_document(self, response):
xxs = XmlXPathSelector(response)
item = SecformD()
item["stateOrCountryDescription"] = xxs.select('./primaryIssuer/issuerAddress/stateOrCountryDescription/text()').extract()[0]
item["zipCode"] = xxs.select('./primaryIssuer/issuerAddress/zipCode/text()').extract()[0]
item["issuerPhoneNumber"] = xxs.select('./primaryIssuer/issuerPhoneNumber/text()').extract()[0]
for person in xxs.select('./relatedPersonsList//relatedPersonInfo'):
#item = SecDform()
item["firstName"] = person.select('./relatedPersonName/firstName/text()').extract()[0]
item["middleName"] = person.select('./relatedPersonName/middleName/text()')
if item["middleName"]:
item["middleName"] = item["middleName"].extract()[0]
else:
item["middleName"] = "NA"
return item
I extract the information to a .json file using this command:
scrapy crawl DFORM -o tes4.json -t json
Try something like this:
def parse_xml_document(self, response):
xxs = XmlXPathSelector(response)
items = []
# common field values
stateOrCountryDescription = xxs.select('./primaryIssuer/issuerAddress/stateOrCountryDescription/text()').extract()[0]
zipCode = xxs.select('./primaryIssuer/issuerAddress/zipCode/text()').extract()[0]
issuerPhoneNumber = xxs.select('./primaryIssuer/issuerPhoneNumber/text()').extract()[0]
for person in xxs.select('./relatedPersonsList//relatedPersonInfo'):
# instantiate one item per loop iteration
item = SecformD()
# save common parameters
item["stateOrCountryDescription"] = stateOrCountryDescription
item["zipCode"] = zipCode
item["issuerPhoneNumber"] = issuerPhoneNumber
item["firstName"] = person.select('./relatedPersonName/firstName/text()').extract()[0]
item["middleName"] = person.select('./relatedPersonName/middleName/text()')
if item["middleName"]:
item["middleName"] = item["middleName"].extract()[0]
else:
item["middleName"] = "NA"
items.append(item)
return items