How can i give limit in django? - django

I'm making a project and want to use limit for fetch data how can i use limit if there are any function or any way i can give limit to my fetching data
i expect the output of (2019, 12, 27)(2019, 6, 30) to be (2019, 12, 27) but it fetching all records
def maintenancefunction(request): #maintenance page function
if 'user' not in request.session:
return redirect('/login')
else:
if request.session.has_key('user'):
abc=request.session['user']
today = date(2019,1,1) # today= date.today.().strftime('%d/%m/%Y')
next_date=today.strftime('%Y-%m-%d')
lastdate= today + timedelta(days=180)
new_date= lastdate.strftime('%Y-%m-%d')
duedate=maintanance_table.objects.values_list('maintanance_todate').filter(user_email=abc).order_by('maintanance_todate').reverse()
# # newduedate=duedate.strftime('%Y-%m-%d')
print("DueDate:",duedate)
checkstatus=maintanance_table.objects.filter(user_email=abc).filter(maintanance_status="PAID").order_by('maintanance_todate').reverse()
if checkstatus:
lastdate = lastdate + timedelta(days=180)
new_date = lastdate.strftime('%Y-%m-%d')
else:
lastdate=lastdate
new_date= lastdate.strftime('%Y-%m-%d')
return render(request,"maintenance.html", {'abc':abc,'new_date':new_date})
else:
return render(request,"login.html")
return render(request,"maintenance.html")

You can add range at the end of the query like [1-10] and if you want the first record then just put [0] at the end of the query. If you want specific record then put its number at the end of the query like [5] or [3] etc.
duedate=maintanance_table.objects.values_list('maintanance_todate').filter(user_email=abc).order_by('maintanance_todate').reverse()[1-10]
checkstatus=maintanance_table.objects.filter(user_email=abc).filter(maintanance_status="PAID").order_by('maintanance_todate').reverse()[1-10]

Related

Django toolbar: not showing the results for query with filter with greater than

I have a simple query in Django. I have Django toolbar installed to check the SQL queries and the corresponding data
My model:
class RecipePosition(models.Model):
name = models.CharField(max_length=200,blank=True,help_text="If left blank will be same as Ingredient name Eg: Tomato pulp")
mass_quantity = models.DecimalField(max_digits=19, decimal_places=10,null=True,blank=True,default=0,validators=[MinValueValidator(0)])
title = models.CharField(max_length=200,blank=True)
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
I have the below django query with filter.
RecipePosition.objects.all().filter(mass_quantity__gt = 0)
Django gets all the objects whose mass_quantity is greater than 0.
But when i check the sql in the django - toolbar. it shows:
SELECT "recipes_recipeposition"."id",
"recipes_recipeposition"."name",
"recipes_recipeposition"."mass_quantity",
"recipes_recipeposition"."title",
"recipes_recipeposition"."updated",
"recipes_recipeposition"."timestamp"
FROM "recipes_recipeposition"
WHERE "recipes_recipeposition"."mass_quantity" > 'Decimal(''0'')'
ORDER BY "recipes_recipeposition"."sequence_number" ASC
I tried this command in sqlite browser also, but it didn't show any results.
Why django-toolbar is not showing the correct SQL?
As per me the sql should be:
SELECT "recipes_recipeposition"."id",
"recipes_recipeposition"."name",
"recipes_recipeposition"."mass_quantity",
"recipes_recipeposition"."title",
"recipes_recipeposition"."updated",
"recipes_recipeposition"."timestamp"
FROM "recipes_recipeposition"
WHERE "recipes_recipeposition"."mass_quantity" > 0
ORDER BY "recipes_recipeposition"."sequence_number" ASC
and this when tested in sqlite browser shows the results.
Also when I tested this on shell_plus with --print-sql --ipython shows
$ python manage.py shell_plus --print-sql --ipython
System check identified some issues:
# Shell Plus Model Imports
from recipes.models import Recipe, RecipePosition
Python 3.6.4 (default, Jan 5 2018, 02:35:40)
Type 'copyright', 'credits' or 'license' for more information
IPython 6.5.0 -- An enhanced Interactive Python. Type '?' for help.
In [1]: RecipePosition.objects.all().filter(mass_quantity__gt=0)
Out[1]: SELECT "recipes_recipeposition"."id",
"recipes_recipeposition"."name",
"recipes_recipeposition"."mass_quantity",
"recipes_recipeposition"."title",
"recipes_recipeposition"."updated",
"recipes_recipeposition"."timestamp"
FROM "recipes_recipeposition"
WHERE "recipes_recipeposition"."mass_quantity" > '0'
ORDER BY "recipes_recipeposition"."sequence_number" ASC
LIMIT 21
Only on django-toolbar it shows Decimal() thing here on Django shell it shows WHERE "recipes_recipeposition"."mass_quantity" > '0'
I also tried debugsqlshell as mentioned in the django-toolbar documentation. Its shows "recipes_recipeposition"."mass_quantity" > '0' rather than "recipes_recipeposition"."mass_quantity" > 'Decimal(''0'')'
$ python manage.py debugsqlshell
Python 3.6.4 (default, Jan 5 2018, 02:35:40)
Type 'copyright', 'credits' or 'license' for more information
IPython 6.5.0 -- An enhanced Interactive Python. Type '?' for help.
In [2]: from recipes.models import Recipe, RecipePosition
In [3]: RecipePosition.objects.all().filter(mass_quantity__gt = 0)
Out[3]: SELECT "recipes_recipeposition"."id",
"recipes_recipeposition"."name",
"recipes_recipeposition"."mass_quantity",
"recipes_recipeposition"."title",
"recipes_recipeposition"."updated",
"recipes_recipeposition"."timestamp"
FROM "recipes_recipeposition"
WHERE "recipes_recipeposition"."mass_quantity" > '0'
ORDER BY "recipes_recipeposition"."sequence_number" ASC
LIMIT 21 [1.58ms]
I dont know why django-toobar is using "recipes_recipeposition"."mass_quantity" > 'Decimal(''0'')' instead of "recipes_recipeposition"."mass_quantity" > '0'
I want to rely on django-toolbar, but now i am worried.
Good news, I think you need to make the most minute of changes!
Instead of:
RecipePosition.objects.all().filter(mass_quantity__gt = 0)
You require:
RecipePosition.objects.all().filter(mass_quantity__gt=0.0)
Finally after lot of struggle and going through the code. I did the following changes in the source code and then everything worked the way i want.
####################
http://127.0.0.1:8001/static/debug_toolbar/css/toolbar.css
# By doing this the sql will show in multiple lines with indent
#djDebug .djDebugSql {
#word-break:break-word;
z-index:100000002;
}
#####################
# replace \n with <br> and space with nbsp and dont use Boldkeyword
# By doing this the sql will show in multiple lines with indent
def reformat_sql(sql):
stack = sqlparse.engine.FilterStack()
options = formatter.validate_options({'reindent':True,'indent_width':True,})
stack = formatter.build_filter_stack(stack, options)
#stack.preprocess.append(BoldKeywordFilter()) # add our custom filter
stack.postprocess.append(sqlparse.filters.SerializerUnicode()) # tokens -> strings
#return swap_fields(''.join(stack.run(sql)))
return swap_fields(''.join(stack.run(sql)).replace("\n", "<br/>").replace(" ", " "))
#####################
in file /lib/python3.6/site-packages/debug_toolbar/panels/sql/tracking.py
# because of this the greater than 0 is shown as deciman(0.0)
# change for decimal wrap p in rev_typecast_decimal
def _record(self, method, sql, params):
start_time = time()
try:
return method(sql, params)
finally:
stop_time = time()
duration = (stop_time - start_time) * 1000
if dt_settings.get_config()['ENABLE_STACKTRACES']:
stacktrace = tidy_stacktrace(reversed(get_stack()))
else:
stacktrace = []
_params = ''
try:
_params = json.dumps([self._decode(rev_typecast_decimal(p)) for p in params])
#_params = json.dumps([self._decode(p) for p in params]) I
###########################
hare = []
if params is not None:
hare = [self._decode(rev_typecast_decimal(p)) for p in params]
try:
_params = json.dumps([self._decode(rev_typecast_decimal(p)) for p in params])
# _params = json.dumps([self._decode(p) for p in params])
except Exception:
pass # object not JSON serializable
template_info = get_template_info()
alias = getattr(self.db, 'alias', 'default')
conn = self.db.connection
vendor = getattr(conn, 'vendor', 'unknown')
params1 = {
'vendor': vendor,
'alias': alias,
# 'sql': self.db.ops.last_executed_query(
# self.cursor, sql, self._quote_params(params)),
'sql': self.db.ops.last_executed_query(
self.cursor, sql, hare),
'duration': duration,
'raw_sql': sql,
'params': _params,
'stacktrace': stacktrace,
'start_time': start_time,
'stop_time': stop_time,
'is_slow': duration > dt_settings.get_config()['SQL_WARNING_THRESHOLD'],
'is_select': sql.lower().strip().startswith('select'),
'template_info': template_info,
}
################################################
The final out put looks like, with decimal(0.0) replaces with 0 and well formatted sql
SELECT "recipes_recipeposition"."id",
"recipes_recipeposition"."name",
"recipes_recipeposition"."recipe_id",
"recipes_recipeposition"."ingredient_id",
"recipes_recipeposition"."recipeposition_slug",
"recipes_recipeposition"."cooking_unit",
"recipes_recipeposition"."mass_unit_id",
"recipes_recipeposition"."mass_quantity",
"recipes_recipeposition"."volume_unit_id",
"recipes_recipeposition"."volume_quantity",
"recipes_recipeposition"."pieces_unit_id",
"recipes_recipeposition"."pieces_quantity",
"recipes_recipeposition"."cooking_notes",
"recipes_recipeposition"."sequence_number",
"recipes_recipeposition"."title",
"recipes_recipeposition"."updated",
"recipes_recipeposition"."timestamp",
"ingredients_ingredient"."rate" AS "ingredient__rate",
CASE
WHEN "ingredients_ingredient"."munit" = 'kg' THEN 'kg'
WHEN "ingredients_ingredient"."munit" = 'ltr' THEN 'ltr'
WHEN "ingredients_ingredient"."munit" = 'pcs' THEN 'pcs'
ELSE 'False'
END AS "ingredient__cost_unit",
CASE
WHEN "ingredients_ingredient"."munit" = 'kg' THEN CASE
WHEN ("recipes_recipeposition"."mass_unit_id" IS NOT NULL
AND "recipes_recipeposition"."mass_quantity" IS NOT NULL
AND "recipes_recipeposition"."mass_quantity" > '0') THEN CAST(("recipes_recipeposition"."mass_quantity" * "single_measurements_singlemeasurements"."quantity") AS NUMERIC)
ELSE 'False'
END
WHEN "ingredients_ingredient"."munit" = 'ltr' THEN 'ltr'
WHEN "ingredients_ingredient"."munit" = 'pcs' THEN 'pcs'
ELSE 'False'
END AS "reciposition_cost_quantity"
FROM "recipes_recipeposition"
LEFT OUTER JOIN "ingredients_ingredient" ON ("recipes_recipeposition"."ingredient_id" = "ingredients_ingredient"."id")
LEFT OUTER JOIN "single_measurements_singlemeasurements" ON ("recipes_recipeposition"."mass_unit_id" = "single_measurements_singlemeasurements"."id")
WHERE "recipes_recipeposition"."recipe_id" = '1'
ORDER BY "recipes_recipeposition"."sequence_number" ASC

Formatting multiple lines of PYsnmp 4.4 print output to rows and columns Python 2.7

I'm new to python and looking for some assistance on formatting print output to rows and columns. This data will eventually be sent to csv file.
The script will grab data from multiple hosts. The number of lines is variable as well as the length of the interface name and description.
Currently the output looks like this:
hostname IF-MIB::ifDescr.1 = GigabitEthernet0/0/0<br/>
hostname IF-MIB::ifAlias.1 = --> InterfaceDesc<br/>
hostname IF-MIB::ifOperStatus.1 = 'up'<br/>
hostname IF-MIB::ifDescr.2 = GigabitEthernet0/0/1<br/>
hostname IF-MIB::ifAlias.2 = --> InterfaceDesc<br/>
hostname IF-MIB::ifOperStatus.2 = 'up'<br/>
hostname IF-MIB::ifDescr.3 = GigabitEthernet0/0/2<br/>
hostname IF-MIB::ifAlias.3 = --> InterfaceDesc<br/>
hostname IF-MIB::ifOperStatus.3 = 'up'<br/>
I'm trying to format it to the following rows and columns with headers of each row(hostname, interface, interface desc, and status).
hostname interface interface desc status
hostname GigabitEthernet0/0/0 InterfaceDesc up
hostname GigabitEthernet0/0/1 InterfaceDesc up
hostname GigabitEthernet0/0/2 InterfaceDesc up
The print code I currently have is here. I want to keep the print statements for errors.
for errorIndication, errorStatus, errorIndex, varBinds in snmp_iter:
# Check for errors and print out results
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
else:
for varBind in varBinds:
print(hostip),
print(' = '.join([x.prettyPrint() for x in varBind]))
Full script:
from pysnmp.hlapi import *
routers = ["router1"]
#adds routers to bulkCmd
def snmpquery (hostip):
snmp_iter = bulkCmd(SnmpEngine(),
CommunityData('Community'),
UdpTransportTarget((hostip, 161)),
ContextData(),
0, 50, # fetch up to 50 OIDs
ObjectType(ObjectIdentity('IF-MIB', 'ifDescr')),
ObjectType(ObjectIdentity('IF-MIB', 'ifAlias')),
ObjectType(ObjectIdentity('IF-MIB', 'ifOperStatus')),
lexicographicMode=False) # End bulk request once outside of OID child objects
for errorIndication, errorStatus, errorIndex, varBinds in snmp_iter:
# Check for errors and print out results
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
else:
for rowId, varBind in enumerate(varBindTable):
oid, value = varBind
print('%20.20s' % value)
if not rowId and rowId % 3 == 0:
print('\n')
# calls snmpquery for all routers in list
for router in routers:
snmpquery(router)
Any help you can provide is much appreciated.
Thanks!
Assuming the snmp_iter is initialized with three SNMP table columns:
snmp_iter = bulkCmd(SnmpEngine(),
UsmUserData('usr-md5-des', 'authkey1', 'privkey1'),
Udp6TransportTarget(('demo.snmplabs.com', 161)),
ContextData(),
0, 25,
ObjectType(ObjectIdentity('IF-MIB', 'ifDescr')),
ObjectType(ObjectIdentity('IF-MIB', 'ifAlias')),
ObjectType(ObjectIdentity('IF-MIB', 'ifOperStatus')))
you can be sure that (for the GETNEXT and GETBULK commands) pysnmp always returns rectangular table in a row by row fashion.
Knowing the number of the columns you have requested (3) you should be able to print the output row by row:
for rowId, varBind in enumerate(varBindTable):
oid, value = varBind
print('%20.20s' % value)
if not rowId and rowId % 3 == 0:
print('\n')

Get objects created in last 30 days, for each past day

I am looking for fast method to count model's objects created within past 30 days, for each day separately. For example:
27.07.2013 (today) - 3 objects created
26.07.2013 - 0 objects created
25.07.2013 - 2 objects created
...
27.06.2013 - 1 objects created
I am going to use this data in google charts API. Have you any idea how to get this data efficiently?
items = Foo.objects.filter(createdate__lte=datetime.datetime.today(), createdate__gt=datetime.datetime.today()-datetime.timedelta(days=30)).\
values('createdate').annotate(count=Count('id'))
This will (1) filter results to contain the last 30 days, (2) select just the createdate field and (3) count the id's, grouping by all selected fields (i.e. createdate). This will return a list of dictionaries of the format:
[
{'createdate': <datetime.date object>, 'count': <int>},
{'createdate': <datetime.date object>, 'count': <int>},
...
]
EDIT:
I don't believe there's a way to get all dates, even those with count == 0, with just SQL. You'll have to insert each missing date through python code, e.g.:
import datetime
# needed to use .append() later on
items = list(items)
dates = [x.get('createdate') for x in items]
for d in (datetime.datetime.today() - datetime.timedelta(days=x) for x in range(0,30)):
if d not in dates:
items.append({'createdate': d, 'count': 0})
I think this can be somewhat more optimized solution with #knbk 's solution with python. This has fewer iterations and iterations inside SET is highly optimized in python (both in processing and in CPU-cycles).
from_date = datetime.date.today() - datetime.timedelta(days=7)
orders = Order.objects.filter(created_at=from_date, dealer__executive__branch__user=user)
orders = orders.annotate(count=Count('id')).values('created_at').order_by('created_at')
if len(orders) < 7:
orders_list = list(orders)
dates = set([(datetime.date.today() - datetime.timedelta(days=i)) for i in range(6)])
order_set = set([ord['created_at'] for ord in orders])
for dt in (order_set - dates):
orders_list.append({'created_at': dt, 'count': 0})
orders_list = sorted(orders_list, key=lambda item: item['created_at'])
else:
orders_list = orders

django-rating filtering in django

I am trying to do something pretty simple and trivial but with no luck.
I am using django-rating to rate specific objects on my site.
On my model which I wanted to rate I have a field :
rating = RatingField(range=5)
Now , all I want is to filter all of the objects which have a rate of 2 and aobve for example.
If rating was IntegerField for example, I would only need to do :
objects.filter( rating__gte = 2)
how can I do the same using django-rating ?
Reading django-rate documentation I found this trick to sort by rate:
# In this example, ``rating`` is the attribute name for your ``RatingField``
qs = qs.extra(select={
'rating': '((100/%s*rating_score/(rating_votes+%s))+100)/2'
% (MyModel.rating.range, MyModel.rating.weight)
})
qs = qs.order_by('-rating')
Perhaps you can modify this code sample and use extra where to get your results:
qs = qs.extra(where=[
'((100/%s*rating_score/(rating_votes+%s))+100)/2 >= 2 ' %
(MyModel.rating.range, MyModel.rating.weight) ,
])

The queryset's `count` is wrong after `extra`

When I use extra in a certain way on a Django queryset (call it qs), the result of qs.count() is different than len(qs.all()). To reproduce:
Make an empty Django project and app, then add a trivial model:
class Baz(models.Model):
pass
Now make a few objects:
>>> Baz(id=1).save()
>>> Baz(id=2).save()
>>> Baz(id=3).save()
>>> Baz(id=4).save()
Using the extra method to select only some of them produces the expected count:
>>> Baz.objects.extra(where=['id > 2']).count()
2
>>> Baz.objects.extra(where=['-id < -2']).count()
2
But add a select clause to the extra and refer to it in the where clause, and the count is suddenly wrong, even though the result of all() is correct:
>>> Baz.objects.extra(select={'negid': '0 - id'}, where=['"negid" < -2']).all()
[<Baz: Baz object>, <Baz: Baz object>] # As expected
>>> Baz.objects.extra(select={'negid': '0 - id'}, where=['"negid" < -2']).count()
0 # Should be 2
I think the problem has to do with django.db.models.sql.query.BaseQuery.get_count(). It checks whether the BaseQuery's select or aggregate_select attributes have been set; if so, it uses a subquery. But django.db.models.sql.query.BaseQuery.add_extra adds only to the BaseQuery's extra attribute, not select or aggregate_select.
How can I fix the problem? I know I could just use len(qs.all()), but it would be nice to be able to pass the extra'ed queryset to other parts of the code, and those parts may call count() without knowing that it's broken.
Redefining get_count() and monkeypatching appears to fix the problem:
def get_count(self):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or self.extra:
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model, obj.connection)
obj.add_subquery(subquery)
obj.add_count_column()
number = obj.get_aggregation()[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
django.db.models.sql.query.BaseQuery.get_count = quuux.get_count
Testing:
>>> Baz.objects.extra(select={'negid': '0 - id'}, where=['"negid" < -2']).count()
2
Updated to work with Django 1.2.1:
def basequery_get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or self.extra:
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
obj.add_subquery(subquery, using=using)
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
models.sql.query.Query.get_count = basequery_get_count
I'm not sure if this fix will have other unintended consequences, however.