How to detect iPad using django-mobile - django

I just gave django-moble a try.
I really like the concept and helps my project to detect mobile device.
However, I am trying to find when the request is made by iPad browser.
I have added the following to the Settings.py
FLAVOURS = ('full', 'mobile','ipad')
But it is not working.
Can anybody kindly gave me how I can proceed from here?
What else do I need to do?
The following is my view.
if get_flavour()=='full':
t = loader.get_template('index.html')
elif get_flavour()=='ipad':
t = loader.get_template('ipad.html')
else:
t = loader.get_template('mobile.html')
Thank you for your time in advance.

Out of the box, Django-mobile only provides two flavours. From the github page:
Note: By default django-mobile only distinguishes between full and mobile flavour.
In order to actually detect iPad vs any other device, you will need to replace the existing MobileDetectionMiddleware with MyMobileDetectionMiddleware in MIDDLEWARE_CLASSES in your settings.py. You can use the existing MobileDetectionMiddleware class as a guide, and there is some information on customization on the same github page

If you want a quick and easy fix to include an ipad as a 'mobile' flavor, then you can modify the middleware class by commenting out (or removing) the user_agents_exception_search on line 42. Also comment out self.user_agents_exception_search_regex at line 51. Then on line 60, remove the 'and not' from the 'if statement'.
Here's a simple modified code for the lazy:
import re
from django_mobile import flavour_storage
from django_mobile import set_flavour, _init_flavour
from django_mobile.conf import settings
class SetFlavourMiddleware(object):
def process_request(self, request):
_init_flavour(request)
if settings.FLAVOURS_GET_PARAMETER in request.GET:
flavour = request.GET[settings.FLAVOURS_GET_PARAMETER]
if flavour in settings.FLAVOURS:
set_flavour(flavour, request, permanent=True)
def process_response(self, request, response):
flavour_storage.save(request, response)
return response
class MobileDetectionMiddleware(object):
user_agents_test_match = (
"w3c ", "acs-", "alav", "alca", "amoi", "audi",
"avan", "benq", "bird", "blac", "blaz", "brew",
"cell", "cldc", "cmd-", "dang", "doco", "eric",
"hipt", "inno", "ipaq", "java", "jigs", "kddi",
"keji", "leno", "lg-c", "lg-d", "lg-g", "lge-",
"maui", "maxo", "midp", "mits", "mmef", "mobi",
"mot-", "moto", "mwbp", "nec-", "newt", "noki",
"xda", "palm", "pana", "pant", "phil", "play",
"port", "prox", "qwap", "sage", "sams", "sany",
"sch-", "sec-", "send", "seri", "sgh-", "shar",
"sie-", "siem", "smal", "smar", "sony", "sph-",
"symb", "t-mo", "teli", "tim-", "tosh", "tsm-",
"upg1", "upsi", "vk-v", "voda", "wap-", "wapa",
"wapi", "wapp", "wapr", "webc", "winw", "xda-",)
user_agents_test_search = u"(?:%s)" % u'|'.join((
'up.browser', 'up.link', 'mmp', 'symbian', 'smartphone', 'midp',
'wap', 'phone', 'windows ce', 'pda', 'mobile', 'mini', 'palm',
'netfront', 'opera mobi', 'ipad',
))
#user_agents_exception_search = u"(?:%s)" % u'|'.join((
# 'ipad',
#))
http_accept_regex = re.compile("application/vnd\.wap\.xhtml\+xml", re.IGNORECASE)
def __init__(self):
user_agents_test_match = r'^(?:%s)' % '|'.join(self.user_agents_test_match)
self.user_agents_test_match_regex = re.compile(user_agents_test_match, re.IGNORECASE)
self.user_agents_test_search_regex = re.compile(self.user_agents_test_search, re.IGNORECASE)
#self.user_agents_exception_search_regex = re.compile(self.user_agents_exception_search, re.IGNORECASE)
def process_request(self, request):
is_mobile = False
if request.META.has_key('HTTP_USER_AGENT'):
user_agent = request.META['HTTP_USER_AGENT']
# Test common mobile values.
if self.user_agents_test_search_regex.search(user_agent):
is_mobile = True
else:
# Nokia like test for WAP browsers.
# http://www.developershome.com/wap/xhtmlmp/xhtml_mp_tutorial.asp?page=mimeTypesFileExtension
if request.META.has_key('HTTP_ACCEPT'):
http_accept = request.META['HTTP_ACCEPT']
if self.http_accept_regex.search(http_accept):
is_mobile = True
if not is_mobile:
# Now we test the user_agent from a big list.
if self.user_agents_test_match_regex.match(user_agent):
is_mobile = True
if is_mobile:
set_flavour(settings.DEFAULT_MOBILE_FLAVOUR, request)
else:
set_flavour(settings.FLAVOURS[0], request)

Related

Which Timed JSONWebSignature Serializer replacement for itsdangerous is better? pyjwt or authlib

Currently I was using itsdangerous to generate timed json web signature as a token for users to auth and resetpassword etc. Here's the code:
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
class SampleCode:
def generate_confirmation_token(self, expiration=600):
s = Serializer(current_app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'confirm': self.id}).decode('utf-8')
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
And since TimedJSONWebSignatureSerializer is deprecated and removed in itsdangerous 2.1.0 I think I might need to move on to some other libs that provides a JWT/JWS interface.
And here I've got two candidates, which one is better:
pyjwt
authlib
Which library is to be rated as "better" depends very much on the use case.
If you want to keep it short and simple, I would recommend pyjwt. Its easy to set the expiration time, whereas i could not find a suited flag for that option in the authlib JWS documentation. So just change your code as follows:
import jwt
import datetime
class SampleCode:
def generate_confirmation_token(self, expiration=600):
reset_token = jwt.encode(
{
"confirm": self.id,
"exp": datetime.datetime.now(tz=datetime.timezone.utc)
+ datetime.timedelta(seconds=expiration)
},
current_app.config['SECRET_KEY'],
algorithm="HS256"
)
return reset_token
def confirm(self, token):
try:
data = jwt.decode(
token,
current_app.config['SECRET_KEY'],
leeway=datetime.timedelta(seconds=10),
algorithms=["HS256"]
)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
Hope I could help!

Python scrapy working (only half of the time)

I created a python scrapy project to extract the prices of some google flights.
I configured the middleware to use PhantomJS instead of a normal browser.
class JSMiddleware(object):
def process_request(self, request, spider):
driver = webdriver.PhantomJS()
try:
driver.get(request.url)
time.sleep(1.5)
except e:
raise ValueError("request url failed - \n url: {},\n error:
{}").format(request.url, e)
body = driver.page_source
#encoding='utf-8' - add to html response if necessary
return HtmlResponse(driver.current_url, body=body,encoding='utf-8',
request=request)
In the settings.py i added:
DOWNLOADER_MIDDLEWARES = {
# key path intermediate class, order value of middleware
'scraper_module.middlewares.middleware.JSMiddleware' : 543 ,
# prohibit the built-in middleware
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware' : None , } `
I also created the following spider class:
import scrapy
from scrapy import Selector
class Gspider(scrapy.Spider):
name = "google_spider"
def __init__(self):
self.start_urls = ["https://www.google.pt/flights/#search;f=LIS;t=POR;d=2017-06-18;r=2017-06-22"]
self.prices = []
self.links = []
def clean_price(self, part):
#part received as a list
#the encoding is utf-8
part = part[0]
part = part.encode('utf-8')
part = filter(str.isdigit, part)
return part
def clean_link(self, part):
part = part[0]
part = part.encode('utf-8')
return part
def get_part(self, var_holder, response, marker, inner_marker, amount = 1):
selector = Selector(response)
divs = selector.css(marker)
for n, div in enumerate(divs):
if n < amount:
part = div.css(inner_marker).extract()
if inner_marker == '::text':
part = self.clean_price(part)
else:
part = self.clean_link(part)
var_holder.append(part)
else:
break
return var_holder
def parse(self, response):
prices, links = [], []
prices = self.get_part(prices, response, 'div.OMOBOQD-d-Ab', '::text')
print prices
links = self.get_part(links, response, 'a.OMOBOQD-d-X', 'a::attr(href)')
print links
The problem is, I run the code in the shell, and around half of the times I successfully get the prices and links requested, but another half of the time, the final vectors which should contain the extracted data, are empty.
I do not get any errors during execution.
Does anyone have any idea about why this is happening?
here are the logs from the command line:
Google has a very strict policy in terms of crawling. (Pretty hypocritical when you know that they constently crawl all the web...)
You should either find an API, as said previously in the comments or maybe use proxies. An easy way is to use Crawlera. It manages thousands of proxies so you don't have to bother. I personnaly use it to crawl google and it works perfectly. The downside is that it is not free.

How to work with a very large "allowed_domains" attribute in scrapy?

The following is my scrapy code:
def get_host_regex(self, spider):
"""Override this method to implement a different offsite policy"""
allowed_domains = getattr(spider, 'allowed_domains', None)
if not allowed_domains:
return re.compile('') # allow all by default
regex = r'^(.*\.)?(%s)$' % '|'.join(re.escape(d) for d in allowed_domains if d is not None)
return re.compile(regex)
def spider_opened(self, spider):
self.host_regex = self.get_host_regex(spider)
self.domains_seen = set()
Because the allowed_domains is very big, it throws this exception:
regex = r'^(.*.)?(%s)$' % '|'.join(re.escape(d) for d in allowed_domains if d is not None)
How do I solve this problem?
You can build your own OffsiteMiddleware variation, with a different implementation checking requests to domains not in the spider's allowed_domains.
For example, add this in a middlewares.py file,
from scrapy.spidermiddlewares.offsite import OffsiteMiddleware
from scrapy.utils.httpobj import urlparse_cached
class SimpleOffsiteMiddleware(OffsiteMiddleware):
def spider_opened(self, spider):
# don't build a regex, just use the list as-is
self.allowed_hosts = getattr(spider, 'allowed_domains', [])
self.domains_seen = set()
def should_follow(self, request, spider):
if self.allowed_hosts:
host = urlparse_cached(request).hostname or ''
# does 'www.example.com' end with 'example.com'?
# test this for all allowed domains
return any([host.endswith(h) for h in self.allowed_hosts])
else:
return True
and change your settings to disable the default OffsiteMiddleware, and add yours:
SPIDER_MIDDLEWARES = {
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': None,
'myproject.middlewares.SimpleOffsiteMiddleware': 500,
}
Warning: this middleware is not tested. This is a very naive implementation, definitely not very efficient (testing string inclusion for each of 50'000 possible domains for each and every request).
You could use another backend to store the list and test a hostname value, like sqlite for example.

django-nocaptcha-recaptcha always shows additional verification box

I installed django-nocaptcha-recaptcha and integrated it into my form:
from nocaptcha_recaptcha.fields import NoReCaptchaField
class ClientForm(forms.ModelForm):
captcha = NoReCaptchaField()
It shows up fine on the form, but whenever I click on it an additional dialog pops up asking to enter some text and verify. It happens every time. I tested it from another computer on another network and it still asks for additional verification after clicking the box.
This is what it looks like: additional verification dialog box
Here's how I'm handling the form:
#xframe_options_exempt
def registration(request):
if request.method == 'POST':
clientform = ClientForm(request.POST)
# check whether it's valid:
if clientform.is_valid():
new_client = clientform.save()
...
What am I doing wrong? Is it a problem with django-nocaptcha-recaptcha? Should I use something else?
P.S. I'm using django 1.7.1 with python 3.4
Another alternative: Minimalist and non framework dependant.
This is the code, in case you want to rewrite it.
'''
NO-CAPTCHA VERSION: 1.0
PYTHON VERSION: 3.x
'''
import json
from urllib.request import Request, urlopen
from urllib.parse import urlencode
VERIFY_SERVER = "www.google.com"
class RecaptchaResponse(object):
def __init__(self, is_valid, error_code=None):
self.is_valid = is_valid
self.error_code = error_code
def __repr__(self):
return "Recaptcha response: %s %s" % (
self.is_valid, self.error_code)
def __str__(self):
return self.__repr__()
def displayhtml(site_key, language=''):
"""Gets the HTML to display for reCAPTCHA
site_key -- The site key
language -- The language code for the widget.
"""
return """<script src="https://www.google.com/recaptcha/api.js?hl=%(LanguageCode)s" async="async" defer="defer"></script>
<div class="g-recaptcha" data-sitekey="%(SiteKey)s"></div>
""" % {
'LanguageCode': language,
'SiteKey': site_key,
}
def submit(response,
secret_key,
remote_ip,
verify_server=VERIFY_SERVER):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
response -- The value of response from the form
secret_key -- your reCAPTCHA secret key
remote_ip -- the user's ip address
"""
if not(response and len(response)):
return RecaptchaResponse(is_valid=False, error_code='incorrect-captcha-sol')
def encode_if_necessary(s):
if isinstance(s, str):
return s.encode('utf-8')
return s
params = urlencode({
'secret': encode_if_necessary(secret_key),
'remoteip': encode_if_necessary(remote_ip),
'response': encode_if_necessary(response),
})
params = params.encode('utf-8')
request = Request(
url="https://%s/recaptcha/api/siteverify" % verify_server,
data=params,
headers={
"Content-type": "application/x-www-form-urlencoded",
"User-agent": "reCAPTCHA Python"
}
)
httpresp = urlopen(request)
return_values = json.loads(httpresp.read().decode('utf-8'))
httpresp.close()
return_code = return_values['success']
if return_code:
return RecaptchaResponse(is_valid=True)
else:
return RecaptchaResponse(is_valid=False, error_code=return_values['error-codes'])
Restart the server and don't forget to clear your browser's cache. Hope this helps.

Why does my redirect middleware fail?

I have a middleware that redirects mobile users to a mobile site, but I want to direct them to the the full site if the url is /property/. The mobile redirect is working, but /property/ is not being excluded.
Here is the current middleware.
middleware.py
# Adapted from http://djangosnippets.org/snippets/2001/
import re
from django.conf import settings
from django.http import HttpResponseRedirect
class MobileRedirectMiddleware(object):
"""
Redirects mobile users to a different site.
"""
def process_request(self, request):
if self._is_mobile(request):
return HttpResponseRedirect(settings.MOBILE_SITE_URL)
def _is_mobile(self, request):
is_mobile = False
NON_MOBILE_REDIRECT_URLS = getattr(settings, 'NON_MOBILE_REDIRECT_URLS', [])
if request.path in NON_MOBILE_REDIRECT_URLS:
return False
if request.META.has_key('HTTP_USER_AGENT'):
user_agent = request.META['HTTP_USER_AGENT']
# Test common mobile values.
pattern = "(up.browser|up.link|mmp|symbian|smartphone|midp|wap|phone|windows ce|pda|mobile|mini|palm|netfront)"
prog = re.compile(pattern, re.IGNORECASE)
match = prog.search(user_agent)
if match:
is_mobile = True
else:
# Nokia like test for WAP browsers.
# http://www.developershome.com/wap/xhtmlmp/xhtml_mp_tutorial.asp?page=mimeTypesFileExtension
if request.META.has_key('HTTP_ACCEPT'):
http_accept = request.META['HTTP_ACCEPT']
pattern = "application/vnd\.wap\.xhtml\+xml"
prog = re.compile(pattern, re.IGNORECASE)
match = prog.search(http_accept)
if match:
is_mobile = True
if not is_mobile:
# Now we test the user_agent from a big list.
user_agents_test = ("w3c ", "acs-", "alav", "alca", "amoi", "audi",
"avan", "benq", "bird", "blac", "blaz", "brew",
"cell", "cldc", "cmd-", "dang", "doco", "eric",
"hipt", "inno", "ipaq", "java", "jigs", "kddi",
"keji", "leno", "lg-c", "lg-d", "lg-g", "lge-",
"maui", "maxo", "midp", "mits", "mmef", "mobi",
"mot-", "moto", "mwbp", "nec-", "newt", "noki",
"xda", "palm", "pana", "pant", "phil", "play",
"port", "prox", "qwap", "sage", "sams", "sany",
"sch-", "sec-", "send", "seri", "sgh-", "shar",
"sie-", "siem", "smal", "smar", "sony", "sph-",
"symb", "t-mo", "teli", "tim-", "tosh", "tsm-",
"upg1", "upsi", "vk-v", "voda", "wap-", "wapa",
"wapi", "wapp", "wapr", "webc", "winw", "winw",
"xda-",)
test = user_agent[0:4].lower()
if test in user_agents_test:
is_mobile = True
return is_mobile
in settings.py I have this:
MOBILE_SITE_URL = 'http://mobile.somesite.com/'
NON_MOBILE_REDIRECT_URLS = ['/property/']
It may not be enough just to avoid redirecting to mobile. at the given url. If the user is already coming from mobile.somesite.com/..../, you will need to actively redirect them to www. to get away from the mobile site.
This is untested, but should be pretty close:
class MobileRedirectMiddleware(object):
"""
Redirects mobile users to a different site.
"""
def process_request(self, request):
was_mobile = settings.MOBILE_SITE_URL in request.META.HTTP_REFERER
NON_MOBILE_REDIRECT_URLS = getattr(settings, 'NON_MOBILE_REDIRECT_URLS', [])
if request.path in NON_MOBILE_REDIRECT_URLS and was_mobile:
# redirect them to 'www.somesite.com/.../'
return HttpResponseRedirect(settings.MAIN_SITE_URL + request.path.lstrip('/'))
if self._is_mobile(request):
return HttpResponseRedirect(settings.MOBILE_SITE_URL)
def _is_mobile(self, request):
is_mobile = False
# no longer need to check urls in here
if request.META.has_key('HTTP_USER_AGENT'):
...