django custom manager manytomany field add - django

My code for custom manager below:
class CustomManager(models.Manager):
def create(self, **kwargs):
obj, created = Group.objects.get_or_create(name='group1')
kwargs.update({'groups': obj})
return super(CustomManager, self).create(**kwargs)
which creates TypeError: Direct assignment to the forward side of a many-to-many set is prohibited. Use groups.set() instead..

As is specified, you can not directly pass a groups= in a create. You can use .set(…) [Django-doc] (or .add(…) [Django-doc]) for example, so:
class CustomManager(models.Manager):
def create(self, **kwargs):
object = super(CustomManager, self).create(**kwargs)
group, __ = Group.objects.get_or_create(name='group1')
object.groups.set([group])
return object

Related

Django Admin Form Limit Dropdown Options

So in the Django admin I have an object change form like so:
class SurveyChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return u'{0} - {1}'.format(obj.id, obj.name)
class BlahAdminForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(BlahAdminForm, self).__init__(*args, **kwargs)
surveys = Survey.objects.filter(deleted=False).order_by('-id')
self.fields['survey'] = SurveyChoiceField(queryset=surveys)
class BlahAdmin(admin.ModelAdmin):
form = BlahAdminForm
and I would like to limit the dropdown to surveys of a certain type based on the blah. Something like
blah_id = self.blah.id
blah_survey_type = Blah.objects.filter(id=blah_id).get('survey_type')
surveys = Survey.objects.filter(deleted=False, type=blah_survey_type).order_by('-id')
but I'm not sure how to get the id of the Blah in the BlahAdminForm class.
A Django ModelForm has an instance which is the Blah instance it will create or edit. In case you edit an instance, the instance is passed through the instance parameter when creating the form. In case you create a new instance, then the instance is typically constructed in the super(BlahAdminForm, self).__init__(..) call (but has an id equal to None, since it is not yet saved).
You thus can obtain a reference to the instance that the form is editing, or a its id with:
class BlahAdminForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(BlahAdminForm, self).__init__(*args, **kwargs)
blah_id = self.instance.id
# ...
You can thus use this self.instance in the constructor, or in other methods to inspect (and alter) the instance the form is handling.

Add request/context to django in writable serializer

I want to add the request context to my serializer in the Django REST framework. In particular to a nested serializer, i (successfully) tried to do that with a SerializerMethodField ( as my solution per: context in nested serializers django rest framework ). This is the setup i use:
class VehicleTypeSerializer(RsModelSerializer):
class Meta:
model = VehicleType
class VehicleSerializer(RsModelSerializer):
vehicletype = SerializerMethodField()
class Meta:
model = Vehicle
fields = ('vehiclename', 'vehicledescription', 'vehicletype')
def get_vehicletype(self, obj):
return self.get_serializermethodfield_data(obj, VehicleType, VehicleTypeSerializer, 'vehicle')
def get_serializermethodfield_data(self, obj, model_class, serializer_class, filter_field):
filter = {filter_field: obj}
objs = model_class.objects.all().filter(**filter)
# We need the request-context for checking field permissions in the serializer
s = serializer_class(objs, many=True, context={'request': self.context.get('request')})
return s.data
Problem : I need a SerializerMethodField to pass the request-context to the nested-serializer (VehicleTypeSerializer)
But now i am stuck dealing with POST's since the SerializerMethodField is read-only. I can't POST an object to /api/v1/vehicle with:
{
"vehiclename": "test",
"vehicledescription": "test"
"vehicletype": "1" <---- get's ignored since SerializerMethodField is read-only
}
Question : Can someone point me in the right direction to add the request-context (especially the user information) to a nested serializer which i can write to?
I need the request context (request.user) in the VehicleSerializer as well as in the VechileTypeSerializer, because in the RsModelSerializer that i have defined, i check on a per-field-basis if the user that is doing the request has permission to read or update a field.
In the RsModelSerializer:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Make sure that there is a user mapped in the context (we need a user
# for checking permissions on a field). If there is no user, we set
# the user to None.
if not self.context:
self._context = getattr(self.Meta, 'context', {})
try:
self.user = self.context['request'].user
except (KeyError, AttributeError):
print('No request')
self.user = None
def get_fields(self):
"""
Override get_fields to ensure only fields that are allowed
by model-field-permissions are returned to the serializer
:return: Dict with allowed fields
"""
ret = OrderedDict()
fields = super().get_fields()
# If no user is associated with the serializer, return no fields
if self.user == None:
return None
# A superuser bypasses the permissions-check and gets all
# available fields
if self.user.is_superuser:
print_without_test("user is superuser, bypassing permissions")
return fields
# Walk through all available fields and check if a user has permission for
# it. If he does, add them to a return-array. This way all fields that
# are not allowed to 'read' will be dropped. Note: this is only used
# for read access. Write access is handled in the views (modelviewsets).
for f in fields:
if has_permission(user=self.user, app_label=self.Meta.model._meta.app_label,
table=self.Meta.model.__name__.lower(),
field=f,
permission='read'):
ret[f] = fields[f]
return ret
Method-1: Overriding the __init__() method of parent serializer
You can add the context to nested/child serializer in the __init__() method of parent serializer.
class RsModelSerializer(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
super(RsModelSerializer, self).__init__(*args, **kwargs)
request_obj = self.context.get('request') # get the request from parent serializer's context
# assign request object to nested serializer context
self.fields['nested_serializer_field'].context['request'] = request_obj
We cannot pass the context to nested serializer at the time of their __init__() because they get initialized at the time of declaration in the parent serializer.
class SomeParentSerializer(serializers.Serializer):
some_child = SomeChildSerializer() # gets initialized here
Method-2: Passing context when child serializer gets binded to its parent
Another option is to add the context when a child/nested serializer gets binded to the parent.
class SomeChildSerializer(Serializer):
def bind(self, field_name, parent):
super(SomeChildSerializer, self).bind(field_name, parent) # child gets binded to parent
request_obj = parent.context.get('request') # get the request from parent serializer context
self.context['request'] = request_obj
Quoting the DRF author's suggested option in the related ticket:
This should be considered private API, and the parent
__init__ style listed above should be preferred.
So, the better option is to override the __init__() method of ParentSerializer and pass the context to child/nested serializer.
(Source: check this related ticket on Github.)
If you need to pass a context to Serializer class. You can use Serializer's context
And you will be able to use it in a SerializerMethodField
class MySerializer(serializer.Serializer)
field = serializer.SerializerMethodField()
def get_field(self, obj):
return self.context.get('my_key')
You call it from view:
...
s = MySerializer(data=data, context={'my_key': 'my_value'})
...
EDIT:
If you need use this context in another Serializer class, pass to the first serializer in the pass to the nexted serializer:
# views.py
...
s = MySerializer(data=data, context={'my_key': 'my_value'})
...
# serializers.py
class MySerializer(serializer.Serializer):
field = serializer.SerializerMethodField()
def get_field(self, obj):
return MySecondSerializer(..., context=self.context)

Django ManyToMany field not preselecting rows with custom manager

Django 1.2.5: I've got a model with a custom manager. Data is saved correctly, but it's not retrieved correctly for related objects.
My models are:
Question -> related to a SubjectiveStatistic
SubjectiveStatistic extends Statistic as a Proxy. It has a custom manager to restrict the result set to only where the 'type' field matches 'SubjectiveStatistic' (the type field contains the class name of the object).
Here's the Question:
class Question(models.Model):
subjective_statistic = models.ManyToManyField(SubjectiveStatistic, null=True, blank=True)
Here is the SubjectiveStatistic:
class SubjectiveStatistic(Statistic):
## Use a custom model manager so that the default object collection is
# filtered by the object class name.
objects = RestrictByTypeManager('SubjectiveStatistic')
## Override the __init__ method to set the type field
def __init__(self, *args, **kwargs):
self.type = self.__class__.__name__
return super(SubjectiveStatistic, self).__init__(*args, **kwargs)
class Meta:
proxy = True
Here is the manager:
from django.db import models
## Custom model manager that returns objects filtered so that 'type' == a
# given string.
class RestrictByTypeManager(models.Manager):
def __init__(self, type='', *args, **kwargs):
self.type = type
return super(RestrictByTypeManager, self).__init__(*args, **kwargs)
def get_query_set(self):
return super(RestrictByTypeManager, self).get_query_set().filter(type=self.type)
What do I need to do so that related objects are returned correctly? question.subjective_statistic.exists() doesn't return anything, despite relations existing in the database.
Perhaps it's because the RestrictByTypeManager extends Manager instead of ManyRelatedManager (but I can't because that's an inner class) or something like that?
To use your custom manager from the Question model, add use_for_related_fields = True in the definition of the custom manager:
from django.db import models
## Custom model manager that returns objects filtered so that 'type' == a
# given string.
class RestrictByTypeManager(models.Manager):
use_for_related_fields = True
def __init__(self, type='', *args, **kwargs):
self.type = type
return super(RestrictByTypeManager, self).__init__(*args, **kwargs)
def get_query_set(self):
return super(RestrictByTypeManager, self).get_query_set().filter(type=self.type)
This way, RestrictByTypeManager will be used as manager for SubjectiveStatistic models either directly or in reverse access like with manytomany relations.
More informations here: Controlling automatic Manager types

Django: make ModelChoiceField evaluate queryset at run-time

I've overridden the default manager of my models in order to show only allowed items, according to the logged user (a sort of object-specific permission):
class User_manager(models.Manager):
def get_query_set(self):
""" Filter results according to logged user """
#Compose a filter dictionary with current user (stored in a middleware method)
user_filter = middleware.get_user_filter()
return super(User_manager, self).get_query_set().filter(**user_filter)
class Foo(models.Model):
objects = User_manager()
...
In this way, whenever I use Foo.objects, the current user is retrieved and a filter is applied to default queryset in order to show allowed records only.
Then, I have a model with a ForeignKey to Foo:
class Bar(models.Model):
foo = models.ForeignKey(Foo)
class BarForm(form.ModelForm):
class Meta:
model = Bar
When I compose BarForm I'm expecting to see only the filteres Foo instances but the filter is not applied. I think it is because the queryset is evaluated and cached on Django start-up, when no user is logged and no filter is applied.
Is there a method to make Django evalutate the ModelChoice queryset at run-time, without having to make it explicit in the form definition? (despite of all performance issues...)
EDIT
I've found where the queryset is evaluated (django\db\models\fields\related.py: 887):
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
Any hint?
Had exactly this problem -- needed to populate select form with user objects from a group, but fun_vit's answer is incorrect (at least for django 1.5)
Firstly, you don't want to overwrite the field['somefield'].choices object -- it is a ModelChoiceIterator object, not a queryset. Secondly, a comment in django.forms.BaseForm warns you against overriding base_fields:
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
This worked for me (django 1.5):
class MyForm(ModelForm):
users = ModelMultipleChoiceField(queryset=User.objects.none())
def __init__(self, *args, **kwargs):
super(MyForm, self).__init__(*args,**kwargs)
site = Site.objects.get_current()
self.fields['users'].queryset = site.user_group.user_set.all()
class Meta:
model = MyModel
i use init of custom form:
class BT_Form(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(BT_Form, self).__init__(*args, **kwargs)
#prepare new values
cities = [(u'',u'------')] #default value
cities.extend([
(
c.pk,
c.__unicode__()
) for c in City.objects.filter(enabled=True).all()
])
self.fields['fly_from_city'].choices = cities #renew values
No way: I had to rewrite queryset definition (which is evaluated at startup)

Django: Force select related?

I've created a model, and I'm rendering the default/unmodified model form for it. This alone generates 64 SQL queries because it has quite a few foreign keys, and those in turn have more foreign keys.
Is it possible to force it to always (by default) perform a select_related every time one of these models are returned?
You can create a custom manager, and simply override get_queryset for it to apply everywhere. For example:
class MyManager(models.Manager):
def get_queryset(self):
return super(MyManager, self).get_queryset().select_related('foo', 'bar')
(Prior to Django 1.6, it was get_query_set).
Here's also a fun trick:
class DefaultSelectOrPrefetchManager(models.Manager):
def __init__(self, *args, **kwargs):
self._select_related = kwargs.pop('select_related', None)
self._prefetch_related = kwargs.pop('prefetch_related', None)
super(DefaultSelectOrPrefetchManager, self).__init__(*args, **kwargs)
def get_queryset(self, *args, **kwargs):
qs = super(DefaultSelectOrPrefetchManager, self).get_queryset(*args, **kwargs)
if self._select_related:
qs = qs.select_related(*self._select_related)
if self._prefetch_related:
qs = qs.prefetch_related(*self._prefetch_related)
return qs
class Sandwich(models.Model):
bread = models.ForeignKey(Bread)
extras = models.ManyToManyField(Extra)
# ...
objects = DefaultSelectOrPrefetchManager(select_related=('bread',), prefetch_related=('extras',))
Then you can re-use the manager easily between model classes. As an example use case, this would be appropriate if you had a __unicode__ method on the model which rendered a string that included some information from a related model (or anything else that meant a related model was almost always required).
...and if you really want to get wacky, here's a more generalized version. It allows you to call any sequence of methods on the default queryset with any combination of args or kwargs. There might be some errors in the code, but you get the idea.
from django.db import models
class MethodCalls(object):
"""
A mock object which logs chained method calls.
"""
def __init__(self):
self._calls = []
def __getattr__(self, name):
c = Call(self, name)
self._calls.append(c)
return c
def __iter__(self):
for c in self._calls:
yield tuple(c)
class Call(object):
"""
Used by `MethodCalls` objects internally to represent chained method calls.
"""
def __init__(self, calls_obj, method_name):
self._calls = calls_obj
self.method_name = method_name
def __call__(self, *method_args, **method_kwargs):
self.method_args = method_args
self.method_kwargs = method_kwargs
return self._calls
def __iter__(self):
yield self.method_name
yield self.method_args
yield self.method_kwargs
class DefaultQuerysetMethodCallsManager(models.Manager):
"""
A model manager class which allows specification of a sequence of
method calls to be applied by default to base querysets.
`DefaultQuerysetMethodCallsManager` instances expose a property
`default_queryset_method_calls` to which chained method calls can be
applied to indicate which methods should be called on base querysets.
"""
def __init__(self, *args, **kwargs):
self.default_queryset_method_calls = MethodCalls()
super(DefaultQuerysetMethodCallsManager, self).__init__(*args, **kwargs)
def get_queryset(self, *args, **kwargs):
qs = super(DefaultQuerysetMethodCallsManager, self).get_queryset(*args, **kwargs)
for method_name, method_args, method_kwargs in self.default_queryset_method_calls:
qs = getattr(qs, method_name)(*method_args, **method_kwargs)
return qs
class Sandwich(models.Model):
bread = models.ForeignKey(Bread)
extras = models.ManyToManyField(Extra)
# Other field definitions...
objects = DefaultQuerysetMethodCallsManager()
objects.default_queryset_method_calls.filter(
bread__type='wheat',
).select_related(
'bread',
).prefetch_related(
'extras',
)
The python-mock-inspired MethodCalls object is an attempt at making the API more natural. Some might find that a bit confusing. If so, you could sub out that code for an __init__ arg or kwarg that just accepts a tuple of method call information.
Create a custom models.Manager and override all the methods (filter, get etc.) and append select_related onto every query. Then set this manager as the objects attribute on the model.
I would recommend just going through your code and adding the select_related where needed, because doing select_related on everything is going to cause some serious performance issues down the line (and it wouldn't be entirely clear where it's coming from).