unsupported operand types in django - django

first date
d = datetime.datetime.utcnow().replace(tzinfo=utc)
second date
checkin = models.DateTimeField(default = timezone.now)
e = Checkin.objects.all().values()
t = last value in 'e'
co = d.time()
ci = t.time()
I want difference between 'co' and 'ci'

It looks like you probably need to make both of your datetime objects time zone aware.
I have a local Page model that has a DateTimeField called first_published_at. Here's how I handle:
target_tz = datetime.tzinfo('utc')
now_dt = datetime.datetime.utcnow().replace(tzinfo=target_tz)
inst = m.Page.objects.get(pk=18)
model_dt = inst.first_published_at.replace(tzinfo=target_tz)
print(now_dt - model_dt) # 734 days, 12:46:53.059321
Or you could make both of them timezone naive:
target_tz = None
now_dt = datetime.datetime.utcnow().replace(tzinfo=target_tz)
inst = m.Page.objects.get(pk=18)
model_dt = inst.first_published_at.replace(tzinfo=target_tz)
print(now_dt - model_dt) # Same as above

Related

Django queryset from raw SQL

I want an equivalent of this sql query in Django
SELECT Gender, ServCode
FROM [openimisproductTestDb_16_08_22].[dbo].[tblInsuree]
JOIN [openimisproductTestDb_16_08_22].[dbo].[tblServices] ON [openimisproductTestDb_16_08_22].[dbo].[tblInsuree].AuditUserID = [openimisproductTestDb_16_08_22].[dbo].[tblServices].AuditUserID
WHERE Gender = 'F'
AND ServCode = 'F4'
What I have tried:
def assisted_birth_with_cs_query(user, **kwargs):
date_from = kwargs.get("date_from")
date_to = kwargs.get("date_to")
hflocation = kwargs.get("hflocation")
format = "%Y-%m-%d"
date_from_object = datetime.datetime.strptime(date_from, format)
date_from_str = date_from_object.strftime("%d/%m/%Y")
date_to_object = datetime.datetime.strptime(date_to, format)
date_to_str = date_to_object.strftime("%d/%m/%Y")
dictBase = {
"dateFrom": date_from_str,
"dateTo": date_to_str,
}
dictGeo = {}
if hflocation and hflocation!="0" :
hflocationObj = HealthFacility.objects.filter(
code=hflocation,
validity_to__isnull=True
).first()
dictBase["fosa"] = hflocationObj.name
claimItem = Insuree.objects.filter(
validity_from__gte = date_from,
validity_to__lte = date_to,
**dictGeo,
gender = 'F'
).count()
data = Service.objects.filter(code = 'F4').count() | Insuree.objects.filter(gender = 'F').count()
dictGeo['health_facility'] = hflocationObj.id
dictBase["post"]= str(data)
return dictBase
I tried like that but the one just adds when I want the women included in the insured table and the F4 code contained in the service table. both tables have the auditUserID column in common
It would be great if you could add the models to better see the relations between Insuree and Service. Assuming it's a 1-M, I'd go with this query:
Service.objects.filter(code='F4', insuree__gender='F').count()

Problem when updating a table using celery task: OperationalError

EDIT 2022-10-04 18:40
I've tried using bulk_update and bulk_create as these method only query database once but still have the same issue
would appreciate any help/explanation on this issue
'''
Task to edit data correction forms (DCF) online
'''
#shared_task(bind=True)
def DCF_edition(self):
print(timezone.now())
DCF_BEFORE_UPDATE = pd.DataFrame.from_records(DataCorrectionForm.objects.all().values())
if not DCF_BEFORE_UPDATE.empty :
DCF_BEFORE_UPDATE = DCF_BEFORE_UPDATE.rename(columns={"patient": "pat"})
DCF_BEFORE_UPDATE = DCF_BEFORE_UPDATE.astype({'record_date': str,'created_date': str})
DCF_BEFORE_UPDATE['dcf_status'] = DCF_BEFORE_UPDATE.apply(lambda status: 0, axis=1)
# list of dataframe to concat
data = []
# load queries definition
queries = queries_definitions()
# print(queries)
if not queries.empty:
for index, row in queries.iterrows():
print('Query ide',row['ide'])
# print(row['ide'],row['query_type'],row['crf_name'].lower(),row['crf identification date'],row['variable_name'],row['variable_label'],row['query_condition'],row['fields_to_display'])
try:
missing_or_inconsistent = missing_or_inconsistent_data(row['ide'],row['query_type'],row['crf_name'].lower(),row['crf identification date'],row['variable_name'],row['variable_label'],row['query_condition'],row['fields_to_display']) #.iloc[:10] #to limit rows
missing_or_inconsistent.columns.values[2] = 'record_date' # rename the date column (that have database name)
missing_or_inconsistent['dcf_ide'] = str(row['ide']) + '_' + row['variable_name'] + '_' + missing_or_inconsistent[row['crf primary key']].astype(str)
missing_or_inconsistent['category'] = row['query_type']
missing_or_inconsistent['crf'] = row['crf_name']
missing_or_inconsistent['crf_ide'] = missing_or_inconsistent[row['crf primary key']]
missing_or_inconsistent['field_name'] = row['variable_name']
missing_or_inconsistent['field_label'] = row['variable_label']
missing_or_inconsistent['field_value'] = missing_or_inconsistent[row['variable_name']]
missing_or_inconsistent['message'] = row['query_message']
missing_or_inconsistent['query_id'] = 'Query ide ' + str(row['ide'])
missing_or_inconsistent['dcf_status'] = 1
missing_or_inconsistent['created_date'] = timezone.now()
missing_or_inconsistent['deactivated'] = False
missing_or_inconsistent['comments'] = None
data.append(missing_or_inconsistent[['ide','dcf_ide','category','crf','crf_ide','pat','record_date','field_name','field_label','message','field_value','dcf_status','created_date','query_id','deactivated','comments']])
dcf = pd.concat(data)
except Exception as e:
Log.objects.create(dcf_edition_status=0,dcf_edition_exception=str(e)[:200])
continue
DCF_AFTER_UPDATE = pd.concat([DCF_BEFORE_UPDATE,dcf])
DCF_AFTER_UPDATE['duplicate'] = DCF_AFTER_UPDATE.duplicated(subset=['dcf_ide'],keep='last')
DCF_AFTER_UPDATE['dcf_status'] = DCF_AFTER_UPDATE.apply(lambda row: 2 if row['duplicate'] else row['dcf_status'],axis=1)
DCF_AFTER_UPDATE = DCF_AFTER_UPDATE.drop_duplicates(subset=['dcf_ide'],keep='first').drop(columns=['duplicate'])
DCF_AFTER_UPDATE.rename(columns = {'pat':'patient',}, inplace = True)
# Cast date into string format to be able to dumps data
DCF_AFTER_UPDATE = DCF_AFTER_UPDATE.astype({'record_date': str}) if not DCF_AFTER_UPDATE.empty else DCF_AFTER_UPDATE
records_to_update = [
DataCorrectionForm(
ide=record['ide'],
dcf_ide=record['dcf_ide'],
category=record['category'],
crf=record['crf'],
crf_ide=record['crf_ide'],
patient=record['patient'],
record_date=record['record_date'],
field_name=record['field_name'],
field_label=record['field_label'],
message=record['message'],
field_value=record['field_value'],
dcf_status=record['dcf_status'],
created_date=record['created_date'],
query_id=record['query_id'],
deactivated=record['deactivated'],
comments=record['comments']
) for i, record in DCF_AFTER_UPDATE[(DCF_AFTER_UPDATE['dcf_status'] != 1)].iterrows()
]
if records_to_update:
DataCorrectionForm.objects.bulk_update(records_to_update,['dcf_status'])
records_to_create = [
DataCorrectionForm(
dcf_ide=record['dcf_ide'],
category=record['category'],
crf=record['crf'],
crf_ide=record['crf_ide'],
patient=record['patient'],
record_date=record['record_date'],
field_name=record['field_name'],
field_label=record['field_label'],
message=record['message'],
field_value=record['field_value'],
dcf_status=record['dcf_status'],
created_date=record['created_date'],
query_id=record['query_id'],
deactivated=record['deactivated'],
comments=record['comments']
) for i, record in DCF_AFTER_UPDATE[(DCF_AFTER_UPDATE['dcf_status'] == 1)].iterrows()
]
if records_to_create:
DataCorrectionForm.objects.bulk_create(records_to_create)
EDIT 2022-10-04 13:40
I've tried to "optimized" code using update_or_create() method but doesn't change anything
I still have an OperationalError with the line DataCorrectionForm.objects.update_or_create(...)
How can I update my database?
'''
Task to edit data correction forms (DCF) online
'''
#shared_task(bind=True)
def DCF_edition(self):
DCF_BEFORE_UPDATE = pd.DataFrame.from_records(DataCorrectionForm.objects.all().values())
if not DCF_BEFORE_UPDATE.empty :
DCF_BEFORE_UPDATE.drop(columns=['ide'])
DCF_BEFORE_UPDATE = DCF_BEFORE_UPDATE.rename(columns={"patient": "pat"})
DCF_BEFORE_UPDATE = DCF_BEFORE_UPDATE.astype({'record_date': str,'created_date': str})
DCF_BEFORE_UPDATE['dcf_status'] = DCF_BEFORE_UPDATE.apply(lambda status: 0, axis=1)
# list of dataframe to concat
data = []
# load queries definition
queries = queries_definitions()
if not queries.empty:
for index, row in queries.iterrows()
try:
missing_or_inconsistent = missing_or_inconsistent_data(row['ide'],row['query_type'],row['crf_name'].lower(),row['crf identification date'],row['variable_name'],row['variable_label'],row['query_condition'],row['fields_to_display']) #.iloc[:10] #to limit rows
missing_or_inconsistent.columns.values[2] = 'record_date' # rename the date column (that have database name)
missing_or_inconsistent['dcf_ide'] = str(row['ide']) + '_' + row['variable_name'] + '_' + missing_or_inconsistent[row['crf primary key']].astype(str)
missing_or_inconsistent['category'] = row['query_type']
missing_or_inconsistent['crf'] = row['crf_name']
missing_or_inconsistent['crf_ide'] = missing_or_inconsistent[row['crf primary key']]
missing_or_inconsistent['field_name'] = row['variable_name']
missing_or_inconsistent['field_label'] = row['variable_label']
missing_or_inconsistent['field_value'] = missing_or_inconsistent[row['variable_name']]
missing_or_inconsistent['message'] = row['query_message']
missing_or_inconsistent['DEF'] = 'Query ide ' + str(row['ide'])
missing_or_inconsistent['dcf_status'] = 1
missing_or_inconsistent['created_date'] = timezone.now()
missing_or_inconsistent['deactivated'] = False
missing_or_inconsistent['comments'] = None
data.append(missing_or_inconsistent[['dcf_ide','category','crf','crf_ide','pat','record_date','field_name','field_label','message','field_value','dcf_status','created_date','DEF','deactivated','comments']])
dcf = pd.concat(data)
except Exception as e:
Log.objects.create(dcf_edition_status=0,dcf_edition_exception=str(e)[:200])
continue
DCF_AFTER_UPDATE = pd.concat([DCF_BEFORE_UPDATE,dcf])
DCF_AFTER_UPDATE['duplicate'] = DCF_AFTER_UPDATE.duplicated(subset=['dcf_ide'],keep='last')
DCF_AFTER_UPDATE['dcf_status'] = DCF_AFTER_UPDATE.apply(lambda row: 2 if row['duplicate'] else row['dcf_status'],axis=1)
DCF_AFTER_UPDATE = DCF_AFTER_UPDATE.drop_duplicates(subset=['dcf_ide'],keep='first').drop(columns=['duplicate'])
DCF_AFTER_UPDATE = DCF_AFTER_UPDATE.drop(['DEF'], axis=1)
DCF_AFTER_UPDATE.rename(columns = {'pat':'patient',}, inplace = True)
# Cast date into string format to be able to dumps data
DCF_AFTER_UPDATE = DCF_AFTER_UPDATE.astype({'record_date': str}) if not DCF_AFTER_UPDATE.empty else DCF_AFTER_UPDATE
records = DCF_AFTER_UPDATE.to_dict(orient='records')
for record in records:
DataCorrectionForm.objects.update_or_create(
dcf_ide=record['dcf_ide'], # filter to search for existing objects => should not be pass to default (if not IntegrityError)
defaults = {
'category':record['category'],
'crf':record['crf'],
'crf_ide':record['crf_ide'],
'patient':record['patient'],
'record_date':record['record_date'],
'field_name':record['field_name'],
'field_label':record['field_label'],
'message':record['message'],
'field_value':record['field_value'],
'dcf_status':record['dcf_status'],
'created_date':record['created_date'],
# 'DEF':record['DEF'],
'deactivated':record['deactivated'],
'comments':record['comments']
}
)
Log.objects.create(dcf_edition_status=1)
return True
EDIT 2022-10-03 17:00
in fact reading CAVEATS:
The development server creates a new thread for each request it
handles, negating the effect of persistent connections. Don’t enable
them during development.
EDIT 2022-10-03 16:00
Django 2.2.5
I have tried to set DATABASES parameter CONN_MAX_AGE as per Django documentation but it doesn't change anythings
Default: 0
The lifetime of a database connection, as an integer of seconds. Use 0
to close database connections at the end of each request — Django’s
historical behavior — and None for unlimited persistent connections.
I use Celery task and got an error I do not understand.
I loop over a table (that contain query definitions) to edit missing/inconsistent data in a database (using API) and registered discrepencies in another table.
If I run query one at a time, it works but when I try to loop over queries, I got an error
OperationalError('server closed the connection unexpectedly\n\tThis probably means the server terminated abnormally\n\tbefore or while processing the request.\n')
def DCF_edition(self):
DCF_BEFORE_UPDATE = pd.DataFrame.from_records(DataCorrectionForm.objects.all().values())
DCF_BEFORE_UPDATE = DCF_BEFORE_UPDATE.astype({'record_date': str,'created_date': str}) if not DCF_BEFORE_UPDATE.empty else DCF_BEFORE_UPDATE
data = []
# load queries definition
queries = queries_definitions()
if not queries.empty:
for index, row in queries.iterrows():
try:
missing_or_inconsistent = missing_or_inconsistent_data(row['ide'],row['query_type'],row['crf_name'].lower(),row['crf identification
data.append(missing_or_inconsistent[['dcf_ide','category','crf','crf_ide','pat','record_date','field_name','field_label','message','field_value','dcf_status','DEF','deactivated']])
DCF_AFTER_UPDATE = pd.concat(data)
DCF_AFTER_UPDATE = DCF_AFTER_UPDATE.drop_duplicates(keep='last')
DCF_AFTER_UPDATE = DCF_AFTER_UPDATE.drop(['DEF'], axis=1)
DCF_AFTER_UPDATE.rename(columns = {'pat':'patient',}, inplace = True)
except Exception as e:
Log.objects.create(dcf_edition_status=0,dcf_edition_exception=str(e)[:200])
continue
# Cast date into string format to be able to dumps data
DCF_AFTER_UPDATE = DCF_AFTER_UPDATE.astype({'record_date': str}) if not DCF_AFTER_UPDATE.empty else DCF_AFTER_UPDATE
records = json.loads(json.dumps(list(DCF_AFTER_UPDATE.T.to_dict().values())))
for record in records:
if not DCF_BEFORE_UPDATE.empty and record['dcf_ide'] in DCF_BEFORE_UPDATE.values:
DataCorrectionForm.objects.filter(dcf_ide=record['dcf_ide']).update(dcf_status=2)
else:
DataCorrectionForm.objects.get_or_create(**record)
# resolved dcf => status=0
if not DCF_BEFORE_UPDATE.empty:
records = json.loads(json.dumps(list(DCF_BEFORE_UPDATE.T.to_dict().values())))
for record in records:
if record['dcf_ide'] not in DCF_AFTER_UPDATE.values:
DataCorrectionForm.objects.filter(dcf_ide=record['dcf_ide']).update(dcf_status=0)
Log.objects.create(dcf_edition_status=1)
return True
The lifetime of a database connection, as an integer of seconds. Use 0 to close database connections at the end of each request — Django’s historical behavior — and None for unlimited persistent connections.
It seems that your task is long running task and need to hold the db connection for a long period. Did you try to set it to None
DATABASES = {
'default': env.db(),
}
# https://docs.djangoproject.com/en/3.1/ref/settings/#conn-max-age
DATABASES['default']['CONN_MAX_AGE'] = None
How long does your task need to finish? It could be another problem with server database setting, ex tcp_keepalives_ilde..

Updating database based on previous csv file uploads - delete - create - or update Python/Dajngo

Please need help with the following
I am trying to update database in comparison to previous uploaded csv file. I need to update all fields except the vin if it changes (vin is the unique value), delete the item if it is no longer in the csv file and create one if one is new
vin. stock_no make model trim miles
12345789098765432 4535 honda civic lx 89000
j4j4jj49098765432 3453 toyota corolla DX 54555
12345345438765432 6254 ford mustang es 101299
When I change any value and the csv is uploaded it makes a duplicate:
def upload_file__view(request):
form = form(request.POST or None, request.FILES or
None)
company = Comp_info.objects.last()
if form.is_valid():
form.save()
obj = c.objects.get(activated=False)
with open(obj.file_name.path, 'r+') as f:
reader = c.reader(f)
for i, row in enumerate(reader):
if i==0:
pass
else:
# row = "".join(row)
# row = row.replace(",", " ")
# row = row.split()
print(row)
print(type(row))
vin = row[0].upper()
condition = row[1].replace("U", "Used").replace("N", "New")
stock_no = row[2]
year = int(row[5])
make = row[3]
model = row[4]
trim = row[6]
mileage = row[8]
mpg_city = row[18]
mpg_hwy = row[19]
engine = row[9]
transmission = row[12]
fuel_type = row[11]
vehicle_type = row[7]
drive_type = row[20].replace("4X2", "2WD").replace("4X4", "4WD")
exterior_color = row[15]
interior_color = row[16]
price = row[13].replace("0", "")
description = row[22]
features_2 = row[21]
images_data = row[23]
raw_images_list = images_data.split(',')
images_list = [""] * 25
for x in range(image_count):
if x == 25:
break
images_list[x] = raw_images_list[x]
for x in images_list:
print(x)
if images_list[0] == "":
images_list[0] = "https://www.beverlyhillscarclub.com/template/images/ina_f.jpg"
car_photo = images_list[0]
car_photo_1 = images_list[1]
car_photo_2 = images_list[2]
car_photo_3 = images_list[3]
car_photo_4 = images_list[4]
car_photo_5 = images_list[5]
car_photo_6 = images_list[6]
car_photo_7 = images_list[7]
car_photo_8 = images_list[8]
car_photo_9 = images_list[9]
car_photo_10 = images_list[10]
car_photo_11 = images_list[11]
car_photo_12 = images_list[12]
car_photo_13 = images_list[13]
car_photo_14 = images_list[14]
car_photo_15 = images_list[15]
car_photo_16 = images_list[16]
car_photo_17 = images_list[17]
car_photo_18 = images_list[18]
car_photo_19 = images_list[19]
car_photo_20 = images_list[20]
car_photo_21 = images_list[21]
car_photo_22 = images_list[22]
car_photo_23 = images_list[23]
car_photo_24 = images_list[24]
# notes = pip._vendor.requests(images_list[0], stream=True)
#car_photo = row[23]
# user = User.objects.get(username=row[3])
Cars.objects.update_or_create(
vin = vin,
condition = condition,
stock_no = stock_no,
year = year,
make = make,
model = model,
trim = trim,
mileage = mileage,
mpg_city = mpg_city,
engine = engine,
transmission = transmission,
fuel_type = fuel_type,
vehicle_type = vehicle_type,
drive_type = drive_type,
exterior_color = exterior_color,
interior_color = interior_color,
price = price,
description = description,
company_name = company.company_name,
address = company.company_address,
city = company.city,
state = company.state,
zip = company.zip_code,
phone_number = company.phone_number,
email = company.fax_number,
features_2 = features_2,
car_photo = downloadFile(car_photo),
car_photo_1 = downloadFile(car_photo_1),
car_photo_2 = downloadFile(car_photo_2),
car_photo_3 = downloadFile(car_photo_3),
car_photo_4 = downloadFile(car_photo_4),
car_photo_5 = downloadFile(car_photo_5),
car_photo_6 = downloadFile(car_photo_6),
car_photo_7 = downloadFile(car_photo_7),
car_photo_8 = downloadFile(car_photo_8),
car_photo_9 = downloadFile(car_photo_9),
car_photo_10 = downloadFile(car_photo_10),
car_photo_11 = downloadFile(car_photo_11),
car_photo_12 = downloadFile(car_photo_12),
car_photo_13 = downloadFile(car_photo_13),
car_photo_14 = downloadFile(car_photo_14),
car_photo_15 = downloadFile(car_photo_15),
car_photo_16 = downloadFile(car_photo_16),
car_photo_17 = downloadFile(car_photo_17),
car_photo_18 = downloadFile(car_photo_18),
car_photo_19 = downloadFile(car_photo_19),
car_photo_20 = downloadFile(car_photo_20),
car_photo_21 = downloadFile(car_photo_21),
car_photo_22 = downloadFile(car_photo_22),
car_photo_23 = downloadFile(car_photo_23),
car_photo_24 = downloadFile(car_photo_24)
#car_photo = car_photo,
# quantity = int(row[2]),
# salesman = user
)
obj.activated = True
obj.save()
data = {
'form' : form,
'now' : now,
}
return render(request, 'uploads.html', data)
Thanks in advance for any help!
Thank you
Step 1
An empty list was created to compare with uploaded data:
imported_cars = []
Step 2
Created a filter of unique value (primary Key) and checked if it existed and used the method get to update items. Created car (item variable) to update or create ubject.
if Cars.objects.filter(vin=vin).exists():
car = Cars.objects.get(vin=vin)
Step 3
Used else statement to create item if it did not exist.
else:
car = Cars.objects.create(vin=vin, condition=condition...)
Last, out of the loop populated empty list with updated and created cars and deleted items that were in the database but not in the csv file.
imported_cars_vin_numbers = [car.vin for car in imported_cars]
for car in Cars.objects.all():
if car.vin not in imported_cars_vin_numbers:
car.delete()
Special thanks and credit to Zack Plauché who was extremely helpful and professional in helping me and teaching me how to solve this issue.
Your issue is in the model.py
you should write the Cars object with the following.
vin = models.CharField(primary_key=True, editable=False)
Confirm this works, since I am suggesting solution without actually seeing the model.py
This should handle the update aspect of your logic. The part where you delete a vin if its not in the CSV will have to be done with new process I don't see written here.But a suggestion would be to clear the DB and repopulate, or create function that compares DB with CSV and delete object if not in CSV.

django query aggregate function is slow?

I am working with Django to see how to handle large databases. I use a database with fields name, age, date of birth(dob) and height. The database has about 500000 entries. I have to find the average height of persons of (1) same age and (2) born in same year. The aggregate function in querying table takes about 10s. Is it usual or am I missing something?
For age:
age = [i[0] for i in Data.objects.values_list('age').distinct()]
ht = []
for each in age:
aggr = Data.objects.filter(age=each).aggregate(ag_ht=Avg('height')
ht.append(aggr)
From dob,
age = [i[0].year for i in Data.objects.values_list('dob').distinct()]
for each in age:
aggr = Data.objects.filter(dob__contains=each).aggregate(ag_ht=Avg(‌​'height')
ht.append(aggr)
The year has to be extracted from dob. It is SQLite and I cannot use __year (join).
For these queries to be efficient, you have to create indexes on the age and dob columns.
You will get a small additional speedup by using covering indexes, i.e., using two-column indexes that also include the height column.
full version with time compare loop and query set version
import time
from dd.models import Data
from django.db.models import Avg
from django.db.models.functions import ExtractYear
for age
start = time.time()
age = [i[0] for i in Data.objects.values_list('age').distinct()]
ht = []
for each in age:
aggr = Data.objects.filter(age=each).aggregate(ag_ht=Avg('height'))
ht.append(aggr)
end = time.time()
loop_time = end - start
start = time.time()
qs = Data.objects.values('age').annotate(ag_ht=Avg('height')).order_by('age')
ht_qs = qs.values_list('age', 'ag_ht')
end = time.time()
qs_time = end - start
print loop_time / qs_time
for dob year, with easy refactoring your version(add set in the years)
start = time.time()
years = set([i[0].year for i in Data.objects.values_list('dob').distinct()])
ht_year_loop = []
for each in years:
aggr = Data.objects.filter(dob__contains=each).aggregate(ag_ht=Avg('height'))
ht_year_loop.append((each, aggr.get('ag_ht')))
end = time.time()
loop_time = end - start
start = time.time()
qs = Data.objects.annotate(dob_year=ExtractYear('dob')).values('dob_year').annotate(ag_ht=Avg('height'))
ht_qs = qs.values_list('dob_year', 'ag_ht')
end = time.time()
qs_time = end - start
print loop_time / qs_time

Django charts - date&time axis

I have one model which looks like this:
class Measurement(models.Model):
date = models.DateField('date')
time = models.TimeField('time')
Q = models.DecimalField(max_digits=10, decimal_places=6)
P = models.DecimalField(max_digits=10, decimal_places=6)
f = models.DecimalField(max_digits=10, decimal_places=6)
In my views, I would like to represent it. So I made this function:
def plotMeas(request):
# Count the events
c = Measurement.objects.all()
c = c.count()
# Variables
i = 0
a = [0]
P = a*c
Q = a*c
t = a*c
# Save dP_L1 & dQ_L1 in lists
for i in range(c):
meas = Measurement.objects.get(pk = i+1)
P [i] = meas.P
Q [i] = meas.Q
t [c-1-i] = i*10
if c > 100:
P = P[-100:]
Q = Q[-100:]
t [i] = t[-100:]
# Construct the graph
fig = Figure()
q = fig.add_subplot(211)
q.set_xlabel("time (minutes ago)")
q.set_ylabel("Q (VAR)")
p = fig.add_subplot(212)
p.set_xlabel("time (minutes ago)")
p.set_ylabel("P (W)")
p.plot(t,P, 'go-')
q.plot(t,Q, 'o-')
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
However, I would like that the horizontal axis would show the date and the time (saved in the model). Does anyone know how to do it?
Have a look at the documentation for plot_date. Conveniently plot_date takes similar arguments to plot. A call might look like:
p.plot_date(sequence_of_datetime_objects, y_axis_values, 'go-')
Using matplotlib.dates you can then customize the format of your x-axis labels.
A simple example:
The following will specify that the x-axis displays only every third month in the format Jan '09 (assuming English-speaking locale).
p.xaxis.set_major_locator(mdates.MonthLocator(interval=3))
p.xaxis.set_major_formatter(mdates.DateFormatter("%b '%y"))
Since you have dates and times stored separately you may either want to
change your model to use a DateTimeField, or
use Python to combine them.
For example:
import datetime as dt
t1 = dt.time(21,0,1,2) # 21:00:01.2
d1 = dt.date.today()
dt1 = dt.datetime.combine(d1,t1)
# result: datetime.datetime(2011, 4, 15, 21, 0, 1, 2)
To iterate over two sequences and combine them you might use zip (code for illustrative purposes only, not necessarily optimized):
sequence_of_datetime_objects = []
for a_date, a_time in zip(sequence_of_date_objects, sequence_of_time_objects):
sequence_of_datetime_objects.append(dt.datetime.combine(a_date, a_time))
Feel free to open another question if you get stuck implementing the specifics.