Why is this template filter invalid? - django

What am I doing wrong here?
I have roundnumber in a module called accounts_extras.py located in the templatetags directory. In my template I have {% load accounts_extras %} at the top. It's also worth noting that 'upto' is currently working in another template (haven't tried it on this template yet), but the issue is with roundnumber. {{ staravg.stars__avg|roundnumber }} is giving me an invalid filter error.
#accounts_extras.py
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
#register.filter
#stringfilter
def upto(value, delimiter=None):
return value.split(delimiter)[0]
upto.is_safe = True
#register.filter
def roundnumber(value):
if value > 1.75 and value < 2.25
return 2
if value > 2.25 and value < 2.75
return 2.5
if value > 2.75 and value < 3.25
return 3
if value > 3.25 and value < 3.75
return 3.5
if value > 3.75 and value < 4.25
return 4
Is the problem that I have two filters in the same module? Is that allowed?

Your filter definition is fine. The problem is with the missing colons and indentation:
#register.filter
def roundnumber(value):
if value > 1.75 and value < 2.25: # colon
return 2 # indentation
# ...

Related

Django 3.2.3 Pagination isn't working properly

I have a Class Based View who isn't working properly (duplicating objects and deleting some)
Tested it in shell
from django.core.paginator import Paginator
from report.models import Grade, Exam
f = Exam.objects.all().filter(id=7)[0].full_mark
all = Grade.objects.all().filter(exam_id=7, value__gte=(f*0.95)).order_by('-value')
p = Paginator(all, 12)
for i in p.page(1).object_list:
... print(i.id)
2826
2617
2591
2912
2796
2865
2408
2501
2466
2681
2616
2563
for i in p.page(2).object_list:
... print(i.id)
2558
2466
2563
2920
2681
2824
2498
2854
2546
2606
2598
2614
Making an order_by call before passing the query_set all to the pagination is the root of the problem and well explained here. All you need is to call distinct() or specify another field in the order_by to use in case of same value.
Below is the code that should work, you also don't need to use all() in your queries. The filter by default applies on all the model objects.
from django.core.paginator import Paginator
from report.models import Grade, Exam
f = Exam.objects.filter(id=7).first().full_mark
all = Grade.objects.filter(exam_id=7, value__gte=(f*0.95)).order_by('-value').distinct()
p = Paginator(all, 12)
for i in p.page(1).object_list:
... print(i.id)
By the way, your code will crash if an exam object with id=7 is not found. You should assign the full_mark value to your f variable conditionally.

Unhashable type error with sklearn and importing a CSV

I'm trying to execute the below code and I don't understand what I'm doing wrong. The purpose of the code is to use Python's & sklearn's train_test_split function to partition the data into training and testing chunks.
The data (downloadable here) is cost of rent data for various houses/condos, along with each house/condo's properties. Ultimately I'm trying to use predictive modeling to predict rent prices (so rent prices are the target). Here's the code:
import pandas as pd
rentdata = pd.read_csv('6000_clean.csv')
import sklearn as sk
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
#trying to make a all rows of the first column and b all rows of columns 2-46, i.e., a will be only target data (rent prices) and b will be the data.
a, b = rentdata[ : ,0], rentdata[ : ,1:46]
What results is the following error:
TypeError Traceback (most recent call last)
<ipython-input-24-789fb8e8c2f6> in <module>()
8 from sklearn.cross_validation import train_test_split
9
---> 10 a, b = rentdata[ : ,0], rentdata[ : ,1:46]
11
C:\Users\Nick\Anaconda\lib\site-packages\pandas\core\frame.pyc in __getitem__(self, key)
2001 # get column
2002 if self.columns.is_unique:
-> 2003 return self._get_item_cache(key)
2004
2005 # duplicate columns
C:\Users\Nick\Anaconda\lib\site-packages\pandas\core\generic.pyc in _get_item_cache(self, item)
665 return cache[item]
666 except Exception:
--> 667 values = self._data.get(item)
668 res = self._box_item_values(item, values)
669 cache[item] = res
C:\Users\Nick\Anaconda\lib\site-packages\pandas\core\internals.pyc in get(self, item)
1653 def get(self, item):
1654 if self.items.is_unique:
-> 1655 _, block = self._find_block(item)
1656 return block.get(item)
1657 else:
C:\Users\Nick\Anaconda\lib\site-packages\pandas\core\internals.pyc in _find_block(self, item)
1933
1934 def _find_block(self, item):
-> 1935 self._check_have(item)
1936 for i, block in enumerate(self.blocks):
1937 if item in block:
C:\Users\Nick\Anaconda\lib\site-packages\pandas\core\internals.pyc in _check_have(self, item)
1939
1940 def _check_have(self, item):
-> 1941 if item not in self.items:
1942 raise KeyError('no item named %s' % com.pprint_thing(item))
1943
C:\Users\Nick\Anaconda\lib\site-packages\pandas\core\index.pyc in __contains__(self, key)
317
318 def __contains__(self, key):
--> 319 hash(key)
320 # work around some kind of odd cython bug
321 try:
TypeError: unhashable type
You can download the CSV to get a look at the data here: http://wikisend.com/download/776790/6000_clean.csv
I downloaded your data and modified your problem line to this:
a, b = rentdata.iloc[0], rentdata.iloc[1:46]
iloc selects row by position, see the docs: http://pandas.pydata.org/pandas-docs/stable/indexing.html#selection-by-position
This now selects the first row and rows 2-46 (remember that slicing is open-closed, includes begin of range but not the end of the range)
Note you can always select the first row using head:
a, b = rentdata.head(0), rentdata.iloc[1:46]
would also work
In [5]:
a
Out[5]:
Monthly $ rent 1150
Location alameda
# of bedrooms 1
# of bathrooms 1
# of square feet NaN
Latitude 37.77054
Longitude -122.2509
Street address 1500-1598 Lincoln Lane
# more rows so trimmed for brevity here
.......
In [9]: b
Out[9]:
# too large to paste here
.....
45 rows × 46 columns

Django Template Save operation result into a variable

I have a listing counter "2370" and the page only shows "12" item so I want to calculate the pages number, I did the solution below
{% widthratio listing.total_count 12 1 %}
so, how I can save the result into a variable?
{% set total_pages = widthratio listing.total_count 12 1 %}
this one didn't work
If would choose Rohans comment, but if you would write your own templatetags, use an assignment_tag (https://docs.djangoproject.com/en/1.7/howto/custom-template-tags/#assignment-tags, exists since Django 1.4).
#register.assignment_tag
def page_numbers(total_items, items_per_page):
if not total_items:
return 0
# int divisions round down (1 / 12 = 0), so just add 1 to cover this
return total_items / items_per_page + 1
Within the template you should use;
{% page_numbers listing.total_count 12 as page_nrs %}

how to convert unix timestamp to datetime in django's templete?

First:
My database is Mysql,and the table has exists,it been used for php,all the time fileds'type is unix_timestamp.
the query result return an unix timestamp string. how to convert the unix timestamp to datetime in django's templete?
second:
about regex,
my code here:
import re
pattern=re.compile("^\d+$")
if pattern.match(50):
print 1
else:
print 0
but it show me "TypeError" why ?
Thanks!
My english is very pool~! I'm sorry
Second:
import re
pattern=re.compile(r"^\d+$")
if pattern.match(u"50"):
print 1
else:
print 0
First:
I can offer a custom template filter, which converts timestamp to python datetime object:
#register.filter(name='fromunix')
def fromunix(value):
return datetime.datetime.fromtimestamp(int(value))
Template:
{{ obj.unixtime|fromunix|date:"F" }}}
https://docs.djangoproject.com/en/dev/howto/custom-template-tags/#registering-custom-filters

The queryset's `count` is wrong after `extra`

When I use extra in a certain way on a Django queryset (call it qs), the result of qs.count() is different than len(qs.all()). To reproduce:
Make an empty Django project and app, then add a trivial model:
class Baz(models.Model):
pass
Now make a few objects:
>>> Baz(id=1).save()
>>> Baz(id=2).save()
>>> Baz(id=3).save()
>>> Baz(id=4).save()
Using the extra method to select only some of them produces the expected count:
>>> Baz.objects.extra(where=['id > 2']).count()
2
>>> Baz.objects.extra(where=['-id < -2']).count()
2
But add a select clause to the extra and refer to it in the where clause, and the count is suddenly wrong, even though the result of all() is correct:
>>> Baz.objects.extra(select={'negid': '0 - id'}, where=['"negid" < -2']).all()
[<Baz: Baz object>, <Baz: Baz object>] # As expected
>>> Baz.objects.extra(select={'negid': '0 - id'}, where=['"negid" < -2']).count()
0 # Should be 2
I think the problem has to do with django.db.models.sql.query.BaseQuery.get_count(). It checks whether the BaseQuery's select or aggregate_select attributes have been set; if so, it uses a subquery. But django.db.models.sql.query.BaseQuery.add_extra adds only to the BaseQuery's extra attribute, not select or aggregate_select.
How can I fix the problem? I know I could just use len(qs.all()), but it would be nice to be able to pass the extra'ed queryset to other parts of the code, and those parts may call count() without knowing that it's broken.
Redefining get_count() and monkeypatching appears to fix the problem:
def get_count(self):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or self.extra:
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model, obj.connection)
obj.add_subquery(subquery)
obj.add_count_column()
number = obj.get_aggregation()[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
django.db.models.sql.query.BaseQuery.get_count = quuux.get_count
Testing:
>>> Baz.objects.extra(select={'negid': '0 - id'}, where=['"negid" < -2']).count()
2
Updated to work with Django 1.2.1:
def basequery_get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or self.extra:
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
obj.add_subquery(subquery, using=using)
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
models.sql.query.Query.get_count = basequery_get_count
I'm not sure if this fix will have other unintended consequences, however.