Odoo no UserError message without any error - python-2.7

My code:
from openerp.tools.translate import _
from openerp.exceptions import UserError
and function:
#api.multi
def button_in_progress(self):
for rec in self:
rec.state = 'in_progress'
test = self.test_ids.ids
test1 = len(test)
if test1 == 0:
raise UserError(_('Test test'))
return True
I logged. When I get test1 is 0, my error message doesn't appear. Also I don't get any of errors. What can be wrong?

No need to find len and then check just try this
#api.multi
def button_in_progress(self):
for rec in self:
rec.state = 'in_progress'
if not self.test_ids.ids:
raise UserError(_('Test test'))
return True

Related

Scrapy doesn't call callback function even with no filter

I have this code to crawl the details page
yield Request(flexibleItem[self.linkAttributeName],callback=self.parseDetails,dont_filter=True )
there is no error in the subURL because I tested it with the same method "GET"
I didn't get any error but simply python ignoring the callback function
It is a very huge project working on a server so I can't share the code .
But here is the main architecture for what I am doing .
Output is :
in start request
TRUE
oooo
def start_requests(self):
print "in start request"
for url in self.start_urls:
yield Request (url, method='GET',dont_filter=True)
def parse(self, response):
"this method "
jsonResponse = json.loads(response.body_as_unicode())
flexibleItem = {}
flexibleItem['source'] = self.Source
flexibleItem['action'] = self.Action
flexibleItem['category'] = self.Category
jsonResponse = self.attributesXMLParser.correctResponse(jsonResponse)
flexibleItem[self.linkAttributeName] = self.attributesXMLParser.getPageLink(flexibleItem[self.linkAttributeName], flexibleItem)
meta = dict()
meta['flexibleItem'] = flexibleItem
if self.ParseDetailsPage=="TRUE" : "this value is from XML file"
print "TRUE"
yield Request(flexibleItem[self.linkAttributeName],callback=self.parseDetails,dont_filter=True)
print "oooooo"
else :
yield flexibleItem
def parseDetails(self, response):
print "in parse details "
jsonResponse = json.loads(response.body_as_unicode())
print jsonResponse
flexibleItem = {}
for key in self.Attributes:
flexibleItem[key]= .....bla bla bla
yield flexibleItem

Where am i going wrong, trying to recursively scrape?

I want to scrape a site with scrapy that lists its products in catagories i'm new to scrapy and just getting my head round it today but though i was getting the gist of it on simple scrapes so attempted to scrape urls and return them to scrape further but appears i'm missing something.
someone answered fixing my code here is the latest version as thought i'd have another go at learning scrapy today but its still not recursively scanning it just seems to loop through all the pages but never gets into parse the items
never seems to enter the else statement
yield scrapy.Request(url = response.url,callback = self.parse_item)
i can debug it to check the items are parsed correctly if i force it to output items without looping
if i change the following
if product_pages:
for product_url in product_pages:
product_url2 = str(self.base_url + product_url)
self.log("Queued up: %s" % product_url2)
yield scrapy.Request(url = product_url2,callback = self.parse_product_pages)
else:
yield scrapy.Request(url = response.url,callback = self.parse_item)
to
if product_pages:
for product_url in product_pages:
product_url2 = str(self.base_url + product_url)
self.log("Queued up: %s" % product_url2)
yield scrapy.Request(url = product_url2,callback = self.parse_item)
else:
yield scrapy.Request(url = response.url,callback = self.parse_product_pages)
here is my code i'm working in python 2.7
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import HtmlXPathSelector
from ybscrape.items import Product
from scrapy.linkextractors import LinkExtractor
from scrapy.linkextractors.sgml import SgmlLinkExtractor
class ybracingSpider(CrawlSpider):
name = 'ybscrape2'
download_delay = 0.75
def __init__(self, *args, **kwargs):
super(ybracingSpider, self).__init__(*args, **kwargs)
self.allowed_domains = ['http://www.ybracing.com/', 'www.ybracing.com', 'www.esellepro.com']
self.base_url = 'http://www.ybracing.com'
self.start_urls = ['http://www.ybracing.com/karting/']
def parse_start_url(self, response):
category = response.xpath("//h2/a/#href").extract()
#loop over catagory pages take the product link and add all pages url
for product in category:
all_pages = '?itemsperpage=99999'
category_url = str(self.base_url + product + all_pages)
self.log("Queued up: %s" % category_url)
yield scrapy.Request(url = category_url,callback = self.parse_product_pages)
def parse_product_pages(self, response):
product_pages = response.xpath("//li/div/div/h3/a/#href").extract()
#print("debug pause")
#print(product_pages)
#wait = input("PRESS ENTER TO CONTINUE.")
#print("continue")
if product_pages:
for product_url in product_pages:
product_url2 = str(self.base_url + product_url)
self.log("Queued up: %s" % product_url2)
yield scrapy.Request(url = product_url2,callback = self.parse_product_pages)
else:
yield scrapy.Request(url = response.url,callback = self.parse_item)
def parse_item(self, response):
item = Product()
item['description'] = response.xpath("//div[#id='Tabbed-Container-Details']/div[2]/div/text()").extract()
item['product_title'] = response.xpath("//h3[#class='Product-Heading']/text()").extract()
item['price'] = response.xpath("//div[#id='Product-Price']/text()").extract()
table_rows = response.xpath("//table[#id='SpecificationTab']/tr[*]/td[1]//text()").extract()
yield item
my items.py
from scrapy.item import Item, Field
class Product(Item):
product_title = Field()
description = Field()
price = Field()
What i'm expecting my code to do in steps
grab all the links within the the first export (categories) (this works)
look at all 9999 products inside each category and export the list (this works)
take the product url from the export append it to the base url to get to the product page for each. (this works)
4.then read data from in the product page to add to items ( never gets here) unlese i skip the if statement but thats not recursive it wont handle sub catagories like that
Here, I have made some changes in your code and now it's working
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import HtmlXPathSelector
from demo.items import DemoItem
from scrapy.linkextractors import LinkExtractor
from scrapy.linkextractors.sgml import SgmlLinkExtractor
class DemoSpider(CrawlSpider):
name = 'ybracing2'
def __init__(self, *args, **kwargs):
super(DemoSpider, self).__init__(*args, **kwargs)
self.allowed_domains = ['http://www.ybracing.com/', 'www.ybracing.com', 'www.esellepro.com']
self.base_url = 'http://www.ybracing.com'
self.start_urls = ['http://www.ybracing.com/racewear/']
def parse_start_url(self, response):
category = response.xpath("//h2/a/#href").extract()
#loop over catagory pages take the product link and add all pages url
for product in category:
all_pages = '?itemsperpage=99999'
category_url = str(self.base_url + product + all_pages)
self.log("Queued up: %s" % category_url)
yield scrapy.Request(url = category_url,callback = self.parse_product_pages)
def parse_product_pages(self, response):
product_pages = response.xpath("//div[#class='Product']/a/#href").extract()
if product_pages:
for product_url in product_pages:
product_url2 = str(self.base_url + product_url)
self.log("Queued up: %s" % product_url2)
yield scrapy.Request(url = product_url2,callback = self.parse_item)
else:
yield scrapy.Request(url = response.url,callback = self.parse_product_pages)
def parse_item(self, response):
item = DemoItem()
dirty_data ={}
item['product_title'] = response.xpath("//h3[#class='Product-Heading']/text()").extract()
item['price'] = response.xpath("//div[#id='Product-Price']/text()").extract()
item['description'] = response.xpath("//div[#id='Tabbed-Container-Details']/div[2]/div/text()").extract()
#image['product_image'] =
# for variable in dirty_data.keys():
# if dirty_data[variable]:
# if variable == 'price':
# item[variable] = float(''.join(dirty_data[variable]).strip().replace('$', '').replace(',', ''))
# else:
# item[variable] = ''.join(dirty_data[variable]).strip()
yield item

Syntax error creating ArcGIS Feature class from Twitter data

I've tried my best to solve this error-
SyntaxError: Invalid syntax in this line
if__name__==__main':
main()
I'm using #Tweepy and #PYTHON27 and attempting to build an #ArcGIS .mdb Feature Class with the collected tweets that contain geotags. Any ideas what is causing the bail? Thank you so much. #Twitter
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import time
import sys
import arcpy
#global variables
consumer_key = 'xxx'
consumer_secret = 'xxxx'
token_key = 'xxx'
token_secret = 'xxx'
class StdOutListener(StreamListener):
def __init__(self, start_time, featureClass, time_limit):
super(StdOutListener, self).__init__()
self.time = start_time
self.limit = time_limit
self.featureClass = featureClass
def on_status(self, status):
while (time.time() - self.time) <self.limit:
if status.geo is not None:
dictCoords = status.geo
listCoords = dictCoords['coordinates']
latitude = listCoords[0]
longitude = listCo0ords[1]
cursor = arcpy.da.InsertCursor(self.featureClass,("SHAPE#XY"))
cursor.insertRow([(longitude,latitude)])
print(str(listCoords[0]) + "," + str(listCoords[1]))
return True
else:
print "No coordinates found"
return True
def on_error(self, status):
print('Error...')
print status
return True
def on_timeout(self):
print('Timeout...')
return True
start_time = time.time()
arcpy.env.workspace = r'c:\ArcGIS_Blueprint_Python\data\Twitter\TweetInformation.gdb'
def main():
try: #new
featureClass = sys.argv[1]
monitorTime = sys.argv[2]
monitorTime = monitorTime * 3600
sr = arcpy.SpatialReference(4326)
arcpy.env.overwriteOutput = True
arcpy.CreateFeatureClass_management(arcpy.env.workspace,
featureClass, "POINT", spatial_reference=sr)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(token_key, token_secret)
stream = Stream(auth, StdOutListener(start_time, featureClass,
time_limit=monitorTime)) #172800
stream.filter(track=['car'])
except Exception as e:
print(e.message)
if__name__ == '__main__':
main()

How to call the next page in my parse - Scrapy

I've tried everything but I can't seem to figure it out how I can call the next page in the parse_category.
I've tried LinkExtractor as I do when I go directly to a catergory page but this didn't work.
import scrapy.selector
import urlparse
from scrapy.spiders import CrawlSpider, Rule
from scrapy.http import Request
from msh_final.items import CrawlerMshFinalItem
def complete_url(string):
return "http://www.mediamarkt.be" + string
def get_base_url(url):
if url != "":
u = urlparse.urlparse(url)
return "%s://%s" % (u.scheme, u.netloc)
else:
return ""
def encode(str):
return str.encode('utf8', 'ignore')
class msh_finalSpider(CrawlSpider):
name = 'msh_final'
start_urls = ['http://www.mediamarkt.be/mcs/productlist/_Telefoon-Navigatie,98952,509451.html?langId=-17']
def parse(self, response):
items = response.xpath('//ul[#class="infield cf"]//div[#class="infield-wrapper"]/h2/a/#href')
for item in items:
link = item.extract()
yield Request(complete_url(link), callback=self.parse_category)
def parse_category(self, response):
items = response.xpath("//ul[#class='products-list']/li/div")
for item in items:
msh = CrawlerMshFinalItem()
msh['item_price'] = encode(item.xpath('normalize-space(.//aside/div/div/div/text())').extract()[0])
msh['item_name'] = encode(item.xpath('normalize-space(.//div/h2/a/text())').extract()[0])
yield msh
You should inherite your spider from Spider instead of CrawlSpider and use following code:
class msh_finalSpider(Spider):
name = 'msh_final'
start_urls = ['http://www.mediamarkt.be/mcs/productlist/_Telefoon-Navigatie,98952,509451.html?langId=-17']
def parse(self, response):
items = response.xpath('//ul[#class="infield cf"]//div[#class="infield-wrapper"]/h2/a/#href')
for item in items:
link = item.extract()
yield Request(complete_url(link), callback=self.parse_category)
def parse_category(self, response):
items = response.xpath("//ul[#class='products-list']/li/div")
for item in items:
msh = CrawlerMshFinalItem()
msh['item_price'] = encode(item.xpath('normalize-space(.//aside/div/div/div/text())').extract()[0])
msh['item_name'] = encode(item.xpath('normalize-space(.//div/h2/a/text())').extract()[0])
yield msh
new_link = response.xpath('//li[#class="pagination-next"]/a/#href').extract()[0]
yield Request(
complete_url(new_link),
callback=self.parse_category
)

python Attributes error

i just want to ask, what's wrong with my codes, or am i missing something. because when i call the function PausePlay() in the PlayAndPause() I always get an error saying:
self.dbusIfaceKey.Action(dbus.Int32("16"))
AttributeError: 'OpenOMX' object has no attribute 'dbusIfaceKey'
Here's the code:
OPTIONS = 'omxplayer -o local -t on --align center --win "0 0 {1} {2}" \"{0}\"'
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import *
from Tkinter import *
import sys
import os
from os import system
import dbus,time
from subprocess import Popen
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
MoviePath = '/media/HP v250w/Our Story in 1 Minute.mp4'
class OpenOMX(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
def run(self):
global MoviePath
global myctr
width = QtGui.QApplication.desktop().width()
height = QtGui.QApplication.desktop().height()
cmd = OPTIONS.format(MoviePath,width,height-60)
Popen([cmd], shell=True)
done,retry = 0,0
while done==0:
try:
with open('/tmp/omxplayerdbus', 'r+') as f:
omxplayerdbus = f.read().strip()
bus = dbus.bus.BusConnection(self.omxplayerdbus)
print 'connected'
object = bus.get_object('org.mpris.MediaPlayer2.omxplayer','/org/mpris/MediaPlayer2', introspect=False)
self.dbusIfaceProp = dbus.Interface(object,'org.freedesktop.DBus.Properties')
self.dbusIfaceKey = dbus.Interface(object,'org.mpris.MediaPlayer2.Player')
done=1
except:
retry+=1
if retry >= 50:
print 'ERROR'
raise SystemExit
def PausePlay(self):
self.dbusIfaceKey.Action(dbus.Int32('16'))
class VidControls(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
def setupUi(self, MainWindow):
'some codes here'
def retranslateUi(self, MainWindow):
self.btnPause.clicked.connect(self.PlayAndPause)
def PlayAndPause(self):
self.opn = OpenOMX()
self.opn.PausePlay()
if __name__=='__main__':
app = QtGui.QApplication(sys.argv)
ex = VidControls()
ex.show()
play = OpenOMX()
play.start()
sys.exit(app.exec_())
Any Comments or Suggestions would be highly appreciated. Thanks.