I have 2 microservices: one for subjects and one for questions. Don't ask me why. It has to be like that.
So in questions I have this m2m-like model:
class QuestionsSubjects(models.Model):
question = models.ForeignKey(Question, ...)
subject_id = models.PositiveIntegerField()
And question model has this property:
#property
def subjects(self) -> QuerySet[int]:
return QuestionsSubjects.objects \
.filter(question=self).values_list('subject_id', flat=True)
I don't have Subject model I only have ids coming from frontend.
I need to store relations between questions and subjects.
But what I have now is not convenient because it's hard to create questions with subjects.
For example, I have this QuestionSerializer:
class QuestionSerializer(serializers.ModelSerializer):
subjects = serializers.ListField(child=serializers.IntegerField())
class Meta:
model = Question
fields = [
# some other question fields
'subjects',
]
def create(self, validated_data: dict[str, Any]) -> Question:
subject_ids: list[int] = validated_data.pop('subjects', [])
question = Question.objects.create(**validated_data)
create_questions_subjects(question, subject_ids)
return question
def update(self, question: Question, validated_data: dict[str, Any]) -> Question:
# delete all existing rows for specific question
QuestionsSubjects.objects.filter(question=question).delete()
# and recreate them with new subject ids
subject_ids: list[int] = validated_data.pop('subjects', [])
create_questions_subjects(question, subject_ids)
return super().update(question, validated_data)
The problem is I'm getting subject ids from client and everytime I need to go through each subject id and create row in QuestionsDirections for each subject with specific question. So I created function called create_questions_subjects:
def create_questions_subjects(
question: Question,
subject_ids: list[int]
) -> None:
questions_subjects_list: list[QuestionsSubjects] = []
for id in subject_ids:
questions_subjects_list.append(
QuestionsSubjects(question=question, subject_id=id)
)
QuestionsSubjects.objects.bulk_create(questions_subjects_list)
But I don't where I have to put it. It's still in serializers.py. It works but looking so ugly to me. Is there a better way of dealing with such a task?
Related
Basically, I have a catalog viewset. In the list view I want to make a few filtering and return accordingly.
Relevant Catalog model fields are:
class Catalog(models.Model):
name = models.CharField(max_length=191, null=True, blank=False)
...
team = models.ForeignKey(Team, on_delete=models.CASCADE, editable=False, related_name='catalogs')
whitelist_users = models.JSONField(null=True, blank=True, default=list) # If white list is null, it is open to whole team
Views.py
class CatalogViewSet(viewsets.ModelViewSet):
permission_classes = (IsOwnerAdminOrRestricted,)
def get_queryset(self):
result = []
user = self.request.user
catalogs = Catalog.objects.filter(team__in=self.request.user.team_set.all())
for catalog in catalogs:
if catalog.whitelist_users == [] or catalog.whitelist_users == None:
# catalog is open to whole team
result.append(catalog)
else:
# catalog is private
if user in catalog.whitelist_users:
result.append(catalog)
return result
So this is my logic;
1 - Get the catalog object if catalog's team is one of the current user' team.
2 - Check if the catalog.whitelist_users contains the current user. (There is also an exception that if it is none means it s open to whole team so I can show it in the list view.)
Now this worked but since I am returning an array, it doesn't find the detail objects correctly. I mean /catalog/ID doesn't work correctly.
I am new to DRF so I am guessing there is something wrong here. How would you implement this filtering better?
As the name of the method suggests, you need to return a queryset. Also, avoid iterating over a queryset if that's not necessary. It's better to do it in a single database hit. For complex queries, you can use the Q object.
from django.db.models import Q
# ...
def get_queryset(self):
user = self.request.user
catalogs = Catalog.objects.filter(
Q(whitelist_users__in=[None, []]) | Q(whitelist_users__contains=user),
team__in=user.team_set.all())
return catalogs
Now I am not 100% sure the whitelist_users__contains=user will work since it depends on how you construct your JSON, but the idea is there, you will just need to adapt what it contains.
This will be much more effective than looping in python and will respect what get_queryset is meant for.
A simple solution that comes to mind is just creating a list of PKs and filtering again, that way you return a Queryset. Not the most efficient solution, but should work:
def get_queryset(self):
pks = []
user = self.request.user
catalogs = Catalog.objects.filter(team__in=user.team_set.all())
for catalog in catalogs:
if catalog.whitelist_users == [] or catalog.whitelist_users == None:
# catalog is open to whole team
pks.append(catalog.pk)
else:
# catalog is private
if user in catalog.whitelist_users:
pks.append(catalog.pk)
return Catalog.objects.filter(id__in=pks)
How would I go about updating a many to many relationship in Django REST framework?
Here is my model.
class SchoolTeacher(AbstractBase):
school = models.ForeignKey(School, on_delete=models.CASCADE,
related_name='teachers')
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='teacher_at',)
subjects = models.ManyToManyField(SchoolSubject,
related_name='teachers')
Here is my serializer:
class SchoolTeacherSerializer(serializers.ModelSerializer):
....
def create(self, validated_data):
school_class = validated_data.get('school_class', None)
stream = validated_data.get('stream', None)
school_teacher_model_fields = [
f.name for f in SchoolTeacher._meta.get_fields()]
valid_teacher_data = {
key: validated_data[key]
for key in school_teacher_model_fields
if key in validated_data.keys()
}
subjects = valid_teacher_data.pop('subjects')
teacher = SchoolTeacher.objects.create(**valid_teacher_data)
for subject in subjects:
teacher.subjects.add(subject)
self.add_class_teacher(stream, school_class, teacher)
return teacher
def update(self, instance, validated_data):
subjects = validated_data.pop('subjects')
school_class = validated_data.get('school_class', None)
stream = validated_data.get('stream', None)
teacher = instance
for (key, value) in validated_data.items():
setattr(teacher, key, value)
for subject in subjects:
teacher.subjects.add(subject)
teacher.save()
return teacher
How do I achieve updating subjects? Currently, I can only add and not delete existing subjects.
Here is a workaround. I wish however someone would post a better answer. The idea here is to delete all subjects first then add them. However, since of course we cannot do teacher.subjects.all().delete(), we can do
for existing_subject in teacher.subjects.all():
teacher.subjects.remove(existing_subject)
for subject in subjects:
teacher.subjects.add(subject)
As we are talking about many-to-many, both objects need to be persisted before you can assign them to each other.
Once you have that, you can simply add one to the regarding collection of the other, like this (source: https://docs.djangoproject.com/en/2.1/topics/db/examples/many_to_many/):
a1 = Article(headline='Django lets you build Web apps easily')
a1.save()
p1 = Publication(title='The Python Journal')
p1.save()
a1.publications.add(p1)
Or, in one step:
new_publication = a1.publications.create(title='Highlights for Children')
I haven't found out how to express "contains" yet, but the next step is to remove the subjects that are not in the new list and add the ones that are in the new but not the old list. Probably more efficient with sorted lists or better hashes. But my python is not good enough for that yet.
I have this model in my code:
class Conversation(models.Model):
participants = models.ManyToManyField(User, related_name="message_participants")
and I need to filter this "Conversation" model objects by the "participants" many-to-many field.
meaning: I have for example 3 User objects, so I want to retrieve the only "Conversation" objects that has this 3 Users in it's "participants" field.
I tried doing this:
def get_exist_conv_or_none(sender,recipients):
conv = Conversation.objects.filter(participants=sender)
for rec in recipients:
conv = conv.filter(participants=rec)
where sender is a User object and "recipients" is a list of User objects.
it won't raise error but it gives me the wrong Object of Conversation.
Thanks.
edit:
A more recent try lead me to this:
def get_exist_conv_or_none(sender,recipients):
participants=recipients
participants.append(sender)
conv = Conversation.objects.filter(participants__in=participants)
return conv
which basically have the same problem. It yields Objects which has one or more of the "participants" on the list. but what Im looking for is exact match of the many-to-many object.
Meaning, an Object with the exact "Users" on it's many-to-many relation.
edit 2: My last attempt. still, won't work.
def get_exist_conv_or_none(sender,recipients):
recipients.append(sender)
recipients = list(set(recipients))
conv = Conversation.objects.annotate(count=Count('participants')).filter(participants=recipients[0])
for participant in recipients[1:]:
conv.filter(participants=participant)
conv.filter(count=len(recipients))
return conv
Ok so I found the answer:
In order to make an exact match I have to chain-filter the model and then make sure it has the exact number of arguments it needs to have, so that the many-to-many field will have in it all the objects needed and no more.
I will check for the objects number using annotation: ( https://docs.djangoproject.com/en/dev/topics/db/aggregation/ )
ended up with this code:
def get_exist_conv_or_none(recipients):
conv = Conversation.objects.annotate(count=Count('participants')).filter(participants=recipients[0])
for participant in recipients[1:]:
conv = conv.filter(participants=participant)
conv = conv.filter(count=len(recipients))
return conv
For fast search using database index, I use this code:
class YandexWordstatQueue(models.Model):
regions = models.ManyToManyField(YandexRegion)
regions_cached = models.CharField(max_length=10000, editable=False, db_index=True)
phrase = models.ForeignKey(SearchPhrase, db_index=True)
tstamp = models.DateTimeField(auto_now_add=True)
class YandexWordstatRecord(models.Model):
regions = models.ManyToManyField(YandexRegion)
regions_cached = models.CharField(max_length=10000, editable=False, db_index=True)
phrase = models.ForeignKey(SearchPhrase, db_index=True)
Shows = models.IntegerField()
date = models.DateField(auto_now_add=True)
#receiver(m2m_changed, sender=YandexWordstatRecord.regions.through)
#receiver(m2m_changed, sender=YandexWordstatQueue.regions.through)
def yandexwordstat_regions_changed(sender, **kwargs):
if kwargs.get('action') in ['post_add', 'post_remove']:
instance = kwargs.get('instance')
l = list(instance.regions.values_list('RegionID', flat=True))
l.sort()
instance.regions_cached = json.dumps(l)
instance.save()
This adds overhead when saving, but now I can perform fast filter with this snippet:
region_ids = [1, 2, 3] # or list(some_queryset.values_list(...))
region_ids.sort()
regions_cahed = json.dumps(region_ids)
YandexWordstatQueue.objects.filter(regions_cached=regions_cached)
I have two models lets say:
class superfields(Model):
fieldA = models.FloatField()
fieldB = models.FloatField()
class Meta:
abstract = True
class my_model( superfields ):
def has_history( self ):
return self.my_model_history_set.count() > 0
class my_model_history( superfields ):
reason = models.TextField()
mymodel = models.ForeignKey( my_model )
'my_model' is populated with data (under fieldA and fieldB). Whenever someone edits 'my_model's fields and saves, I don't want to save the change in this model but want to store it as a new row with all values in 'my_model_history', in addition to a 'reason' field while 'my_model' data stays the same.
What is the best way to approach this scenario in terms of custom templates, custom views, model admins etc etc. Am I doing it correctly?
To give my question above some sense, in my project, the nature of data under 'my_model' is market prices and I need to maintain a history of all the market prices ever edited with a 'reason' for the edit.
Instead of editing an existing entry, why not use that entry as initial data for a form to create a new instance? The new object gets saved, the original stays the same...
My Solution:
yes. A simple and quick solution I am following is as follows:
I create three models similar to this:
class my_super_abstract_model(Model):
#All fields I need to keep a history for:
fieldA = models.FloatField()
fieldB = models.FloatField()
class Meta:
abstract = True
class my_model( my_super_abstract_model ):
def has_history( self ):
return self.my_model_history_set.count() > 0
class my_model_history( my_super_abstract_model ):
reason = models.TextField()
history_entry_for = models.ForeignKey( my_model )
I've setup a signal:
pre_save.connect( create_history,
sender = my_model_history )
and 'create history' to be called by the pre_save() signal before saving in my_model_history:
def create_history(sender, **kwargs):
#get variables passed by the pre-save signal:
history_model = kwargs['instance']
# Get main model object
main_model = history_model.history_entry_for
# swap all common fields between history edit and main model (except id)
main_model_fields = [f.name for f in main_model._meta.fields]
history_model_fields = [f.name for f in history_model._meta.fields]
field_index = list( [f for f in history_model_fields if f in main_model_fields and f != 'id' and f != 'created_date' ] )
#loop thru to swap values:
for field_name in field_index:
temp = getattr(main_model, field_name)
setattr( main_model, field_name, getattr( history_model, field_name ) )
setattr( history_model, field_name, temp)
# After the swap, save main model object here
main_model.save()
Whenever user clicks on a my_model row for editing, I use 'my_model_history' to generate my edit form and populate it with the values from the user selected row. (Have written a view and template to do that)
So the edit form will now have:
field A -populated with values from
my_model data row
field B -populated with values from
my_model data row
Reason -empty text box
history_entry_for -hidden from view
User can now edit fieldA/fieldB. Enter a reason. Press save to trigger the signal above.
Before saving,
Signal will swap the values between
the main model(old values) and
history model(New values)
Replace and save the main model row
(with the new values).
Insert and save a new row in the
history model (with the old values)
with a reason.
Hope it helps. Let me know if there are any further questions.
I found an explanation on keeping detailed edit histories in the book 'pro Django' page 264. After a read through I'll try an implementation of what I need. Will post my approach here when I'm done
EDIT:
It turns out the real question is - how do I get select_related to follow the m2m relationships I have defined? Those are the ones that are taxing my system. Any ideas?
I have two classes for my django app. The first (Item class) describes an item along with some functions that return information about the item. The second class (Itemlist class) takes a list of these items and then does some processing on them to return different values. The problem I'm having is that returning a list of items from Itemlist is taking a ton of queries, and I'm not sure where they're coming from.
class Item(models.Model):
# for archiving purposes
archive_id = models.IntegerField()
users = models.ManyToManyField(User, through='User_item_rel',
related_name='users_set')
# for many to one relationship (tags)
tag = models.ForeignKey(Tag)
sub_tag = models.CharField(default='',max_length=40)
name = models.CharField(max_length=40)
purch_date = models.DateField(default=datetime.datetime.now())
date_edited = models.DateTimeField(auto_now_add=True)
price = models.DecimalField(max_digits=6, decimal_places=2)
buyer = models.ManyToManyField(User, through='Buyer_item_rel',
related_name='buyers_set')
comments = models.CharField(default='',max_length=400)
house_id = models.IntegerField()
class Meta:
ordering = ['-purch_date']
def shortDisplayBuyers(self):
if len(self.buyer_item_rel_set.all()) != 1:
return "multiple buyers"
else:
return self.buyer_item_rel_set.all()[0].buyer.name
def listBuyers(self):
return self.buyer_item_rel_set.all()
def listUsers(self):
return self.user_item_rel_set.all()
def tag_name(self):
return self.tag
def sub_tag_name(self):
return self.sub_tag
def __unicode__(self):
return self.name
and the second class:
class Item_list:
def __init__(self, list = None, house_id = None, user_id = None,
archive_id = None, houseMode = 0):
self.list = list
self.house_id = house_id
self.uid = int(user_id)
self.archive_id = archive_id
self.gen_balancing_transactions()
self.houseMode = houseMode
def ret_list(self):
return self.list
So after I construct Itemlist with a large list of items, Itemlist.ret_list() takes up to 800 queries for 25 items. What can I do to fix this?
Try using select_related
As per a question I asked here
Dan is right in telling you to use select_related.
select_related can be read about here.
What it does is return in the same query data for the main object in your queryset and the model or fields specified in the select_related clause.
So, instead of a query like:
select * from item
followed by several queries like this every time you access one of the item_list objects:
select * from item_list where item_id = <one of the items for the query above>
the ORM will generate a query like:
select item.*, item_list.*
from item a join item_list b
where item a.id = b.item_id
In other words: it will hit the database once for all the data.
You probably want to use prefetch_related
Works similarly to select_related, but can deal with relations selected_related cannot. The join happens in python, but I've found it to be more efficient for this kind of work than the large # of queries.
Related reading on the subject