Flatten json return by DRF - django

I have json API returned as below format.
But I want to return json API decomposing namingzone key as specified below.
Could anyone tell me how I can revise serializer to achieve this?
serializer.py is also specified below.
For models.py and views.py, please refer to my previous post.
current
{
"zone": {
"zone": "office_enclosed",
"namingzone": [
{
"naming": "moffice"
}
]
},
"lpd": 11.9,
"sensor": true
},
{
"zone": {
"zone": "office_open",
"namingzone": [
{
"naming": "off"
},
{
"naming": "office"
}
]
},
"lpd": 10.5,
"sensor": true
}
Target
{
"zone": "office_enclosed",
"naming": "moffice",
"lpd": 11.9,
"sensor": true
},
{
"zone": "office_open",
"naming": "off",
"lpd": 10.5,
"sensor": true
},
{
"zone": "office_open",
"naming": "office",
"lpd": 10.5,
"sensor": true
}
serializer.py
class namingNewSerializer(serializers.ModelSerializer):
class Meta:
model=Naming
fields=('naming',)
class zoneSerializer(serializers.ModelSerializer):
namingzone=namingNewSerializer(many=True)
class Meta:
model=Zone
fields = ('zone','namingzone')
class lightSerializer(serializers.ModelSerializer):
zone = zoneSerializer()
class Meta:
model=Light
fields = ('zone','lpd','sensor')
class namingSerializer(serializers.ModelSerializer):
zone=zoneSerializer()
class Meta:
model=Naming
fields=('zone','naming')

I would say using Serializer might complicate the implementations. Rather, you can take an pythonic approach. Try like this:
class SomeView(APIView):
...
def get(self, request, *args, **kwargs):
data = lightSerializer(Light.objects.all(), many=True).data
data = list(data) # convert lazy object to list
updated_data = list()
for item in data:
newdict = dict()
zone = item['zone']
newdict.update({'zone':zone['zone'], 'lpd': item['lpd'], 'sensor':item['sensor']})
for naming_zone in zone.get('namingzone'):
naming_zone.update(newDict)
updated_data.append(naming_zone)
return Response(updated_data, status=status.HTTP_200_OK)

See DRF Field document about source. It will help you.
https://www.django-rest-framework.org/api-guide/fields/#source

Related

Django/Wagtail Rest Framework API Ordering/Pagination

I am using wagtail. I have serialized my API. I want to order them by -first_published_at, when someone hit my API url api/v2/pages they will see an ordered API without filtering it via URL. here is my api.py code:
class ProdPagesAPIViewSet(BaseAPIViewSet):
renderer_classes = [JSONRenderer]
filter_backends = [FieldsFilter,
ChildOfFilter,
AncestorOfFilter,
DescendantOfFilter,
OrderingFilter,
TranslationOfFilter,
LocaleFilter,
SearchFilter,]
meta_fields = ["type","seo_title","search_description","first_published_at"]
body_fields = ["id","type","seo_title","search_description","first_published_at","title"]
listing_default_fields = ["type","seo_title","search_description","first_published_at","id","title","alternative_title","news_slug","blog_image","video_thumbnail","categories","blog_authors","excerpt","content","content2","tags",]
nested_default_fields = []
ordered_queryset= []
name = "pages"
model = AddStory
api_router.register_endpoint("pages", ProdPagesAPIViewSet)
I have tried ordered_queryset= [AddStory.objects.order_by('-first_published_at')]
But it's not ordered by the newest published stories. How should I do the query?
Here is my API response
{
"meta": {
"total_count": 6
},
"items": [
{
"id": 4,
"meta": {
"type": "blog.AddStory",
"seo_title": "",
"search_description": "",
"first_published_at": "2022-08-30T11:05:11.341355Z"
},
{
"id": 6,
"meta": {
"type": "blog.AddStory",
"seo_title": "",
"search_description": "",
"first_published_at": "2022-08-30T11:13:47.114889Z"
},
{
"id": 7,
"meta": {
"type": "blog.AddStory",
"seo_title": "",
"search_description": "",
"first_published_at": "2022-08-31T11:13:47.114889Z"
},
Solved after using get_queryset instead of order_queryset
#api.py
class ProdPagesAPIViewSet(BaseAPIViewSet):
renderer_classes = [JSONRenderer]
filter_backends = [FieldsFilter,
ChildOfFilter,
AncestorOfFilter,
DescendantOfFilter,
OrderingFilter,
TranslationOfFilter,
LocaleFilter,
SearchFilter,]
meta_fields = ["type","seo_title","search_description","first_published_at"]
body_fields = ["id","type","seo_title","search_description","first_published_at","title"]
listing_default_fields = ["type","seo_title","search_description","first_published_at","id","title","alternative_title","news_slug","blog_image","video_thumbnail","categories","blog_authors","excerpt","content","content2","tags",]
nested_default_fields = []
def get_queryset(self):
return self.model.objects.all().order_by("-first_published_at")
name = "pages"
model = AddStory
api_router.register_endpoint("pages", ProdPagesAPIViewSet)

Django rest framework - serializer for dictionary structured payload

I'm trying to create a Serializer for a payload that looks something like this -
{
"2fd08845-9b21-4972-87ed-2e7fd03448c5": {
"operation": "Create",
"operationId": "356f6501-a117-4c8d-98ce-dcb4344d481b",
"user": "superuser",
"immediate": "true"
},
"fe6d0c85-0021-431e-9955-e8e1b1ebc414": {
"operation": "Create",
"operationId": "adcedb2f-c751-441f-8108-2c29667ea9cf",
"user": "employee",
"immediate": "false"
}
}
I thought of using DictField, but my problem is that there isn't a field name. it's only a dictionary of keys and values.
I tried something like:
class UserOperationSerializer(serializers.Serializer):
operation = serializers.ChoiceField(choices=["Create", "Delete"])
operationId = serializers.UUIDField()
user = serializers.CharField()
immediate = serializers.BooleanField()
class UserOperationsSerializer(serializers.Serializer):
test = serializers.DictField(child=RelationshipAuthorizeObjectSerializer())
But again, there isn't a 'test' field.
I think your easiest path forward would be to flatten the payload to the following format:
[
{
"request_id": "2fd08845-9b21-4972-87ed-2e7fd03448c5",
"operation": "Create",
"operationId": "356f6501-a117-4c8d-98ce-dcb4344d481b",
"user": "superuser",
"immediate": "true"
},
{
"request_id": "fe6d0c85-0021-431e-9955-e8e1b1ebc414",
"operation": "Create",
"operationId": "adcedb2f-c751-441f-8108-2c29667ea9cf",
"user": "employee",
"immediate": "false"
}
]
And then serialize it. Otherwise, you'd be creating custom fields/serializers which is not pretty.
The way I finally solved it was to add a dynamic 'body' field that contains the real payload of the request.
class UserOperationSerializer(serializers.Serializer):
operation = serializers.ChoiceField(choices=["Create", "Delete"])
operationId = serializers.UUIDField()
user = serializers.CharField()
immediate = serializers.BooleanField()
class UserOperationsSerializer(serializers.Serializer):
body = serializers.DictField(child=UserOperationSerializer())
def __init__(self, *args, **kwargs):
kwargs['data'] = {'body': kwargs['data']}
super().__init__(*args, **kwargs)
Then, in the View, I will use that data as serializer.validated_data['body']
That did the work for me.

Django Rest Framework: Get unique list of values from nested structure

I want to be able to return a list of strings from a deeply nested structure of data. In this scenario, I have a API that manages a chain of bookstores with many locations in different regions.
Currently, I have an API endpoint that takes a region's ID and returns a nested JSON structure of details about the region, the individual bookstores, and the books that can be found in each store.
{
"region": [
{
"store": [
{
"book": {
"name": "Foo"
}
},
{
"book": {
"name": "Bar"
}
},
{
"book": {
"name": "Baz"
}
}
],
},
{
"store": [
{
"book": {
"name": "Foo"
}
},
{
"book": {
"name": "Bar"
}
}
],
},
{
"store": [
{
"book": {
"name": "Foo"
}
},
{
"book": {
"name": "Baz"
}
},
{
"book": {
"name": "Qux"
}
}
]
}
]
}
My models look like the following. I am aware these models don't make the most sense for this contrived example, but it does reflect my real world code:
class Book(TimeStampedModel):
name = models.CharField(default="", max_length=512)
class Bookstore(TimeStampedModel):
value = models.CharField(default="", max_length=1024)
book = models.ForeignKey(Book, on_delete=models.CASCADE)
class Region(TimeStampedModel):
stores = models.ManyToManyField(Bookstore)
class BookstoreChain(TimeStampedModel):
regions = models.ManyToManyField(Region)
The serializers I created for the above response look like:
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Book
fields = "__all__"
class BookstoreSerializer(serializers.ModelSerializer):
books = BookSerializer()
class Meta:
model = Bookstore
fields = "__all__"
class RegionSerializer(serializers.ModelSerializer):
stores = BookstoreSerializer(many=True)
class Meta:
model = Region
fields = "__all__"
class BookstoreChainSerializer(serializers.ModelSerializer):
regions = RegionSerializer(many=True)
class Meta:
model = BookstoreChain
fields = "__all__"
I'm not sure what my view or serializer for this solution need to look like. I'm more familiar with writing raw SQL or using an ORM/Linq to get a set of results.
While the above response is certainty useful, what I really want is an API endpoint to return a unique list of book names that can be found in a given region (Foo, Bar, Baz, Qux). I would hope my response to look like:
{
"books": [
"Foo",
"Bar",
"Baz",
"Qux"
]
}
My feeble attempt so far has a urls.py with the following path:
path("api/regions/<int:pk>/uniqueBooks/", views.UniqueBooksForRegionView.as_view(), name="uniqueBooksForRegion")
My views.py looks like:
class UniqueBooksForRegionView(generics.RetrieveAPIView):
queryset = Regions.objects.all()
serializer_class = ???
So you start from region you have to get the stores, so you can filter the books in the stores, here is a solution which will work.
Note:
Avoid using .get() in *APIView because it will trigger an error if the request does not have the ID, you can use get_object_or_404(), but then you cannot log your error in Sentry.
To get an element from an *APIView, use filter().
import logging as L
class UniqueBooksForRegionView(generics.RetrieveAPIView):
lookup_field = 'pk'
def get(self, *args, **kwargs)
regions = Region.objects.filter(pk=self.kwargs[self.lookup_field])
if regions.exists():
region = regions.first()
stores_qs = region.stores.all()
books_qs = Book.objects.filter(store__in=stores_qs).distinct()
# use your book serializer
serializer = BookSerializer(books_qs, many=True)
return Response(serializer.data, HTTP_200_OK)
else:
L.error(f'Region with id {self.kwargs[self.lookup_field]} not found.')
return Response({'detail':f'Region with id {self.kwargs[self.lookup_field]} not found.'}, HTTP_404_NOT_FOUND)
Note
Here is the flow, the code may need some tweaks, but I hope it helps you understand the flow

Graphene errors messages

I wonder if it is possible to translate the validation error messages that graphene provides? For example: "Authentication credentials were not provided" as shown in the code example below.
{
"errors": [
{
"message": "Authentication credentials were not provided",
"locations": [
{
"line": 2,
"column": 3
}
]
}
],
"data": {
"viewer": null
}
}
Create a custom error type
import graphene
from graphene_django.utils import camelize
class ErrorType(graphene.Scalar):
#staticmethod
def serialize(errors):
if isinstance(errors, dict):
if errors.get("__all__", False):
errors["non_field_errors"] = errors.pop("__all__")
return camelize(errors)
raise Exception("`errors` should be dict!")
Add it to your mutations
class MyMutation(graphene.Mutation):
# add the custom error type
errors = graphene.Field(ErrorType)
form = SomeForm
#classmethod
def mutate(cls, root, info, **kwargs):
f = cls.form(kwargs)
if f.is_valid():
pass
else:
# pass the form error to your custom error type
return cls(errors=f.errors.get_json_data())
Example
django-graphql-auth uses a similar error type, and it works like this, for example for registration:
mutation {
register(
email:"skywalker#email.com",
username:"skywalker",
password1: "123456",
password2:"123"
) {
success,
errors,
token,
refreshToken
}
}
should return:
{
"data": {
"register": {
"success": false,
"errors": {
"password2": [
{
"message": "The two password fields didn’t match.",
"code": "password_mismatch"
}
]
},
"token": null,
"refreshToken": null
}
}
}
My Django form errors types, for example:
from graphene.utils.str_converters import to_camel_case
class DjangoFormError(graphene.ObjectType):
field = graphene.String()
message = graphene.String()
#classmethod
def list_from_errors_dict(cls: Type[T], django_form_errors: dict) -> List[T]:
return [
cls(field=to_camel_case(field), message=' '.join(messages))
for field, messages in django_form_errors.items()
]
class DjangoFormErrorsByIdx(graphene.ObjectType):
form_idx = graphene.Int()
errors = graphene.List(DjangoFormError)
#classmethod
def list_from_idx_dict(cls: Type[T], errors_by_idx_dict: dict) -> List[T]:
return [
cls(
form_idx=idx,
errors=DjangoFormError.list_from_errors_dict(django_form_errors),
)
for idx, django_form_errors in errors_by_idx_dict.items()
]
# ...
# in mutation
if not django_form.is_valid():
form_errors = DjangoFormError.list_from_errors_dict(
django_form.errors
)

Elasticsearch in Django - sort alphabetically

I have a following doc:
#brand.doc_type
class BrandDocument(DocType):
class Meta:
model = Brand
id = IntegerField()
name = StringField(
fields={
'raw': {
'type': 'keyword',
'fielddata': True,
}
},
)
lookup_name = StringField(
fields={
'raw': {
'type': 'string',
}
},
)
and I try to make a lookup using this:
BrandDocument.search().sort({
'name.keyword': order,
})
The problem is that I'm getting results sorted in a case sensitive way, which means that instead of 'a', 'A', 'ab', 'AB' I get 'A', 'AB', 'a', 'ab'. How can this be fixed?
EDIT After some additional search I've come up with something like this:
lowercase_normalizer = normalizer(
'lowercase_normalizer',
filter=['lowercase']
)
lowercase_analyzer = analyzer(
'lowercase_analyzer',
tokenizer="keyword",
filter=['lowercase'],
)
#brand.doc_type
class BrandDocument(DocType):
class Meta:
model = Brand
id = IntegerField()
name = StringField(
analyzer=lowercase_analyzer,
fields={
'raw': Keyword(normalizer=lowercase_normalizer, fielddata=True),
},
)
The issue persists, however, and I can't find in the docs how this normalizer should be used.
I would suggest to create a custom analyzer with lowercase filter and apply it to the field while indexing.
So you have to update the following in the index settings:
{
"index": {
"analysis": {
"analyzer": {
"custom_sort": {
"tokenizer": "keyword",
"filter": [
"lowercase"
]
}
}
}
}
}
Add a field (based on which you need to sort) in mapping with the custom_sort analyzer as below:
{
"properties":{
"sortField":{
"type":"text",
"analyzer":"custom_sort"
}
}
}
If the field already exists in mapping then you can add a sub fields to the existing field with the analyzer as below.
Assuming the field name having type as keyword already exists, update it as:
{
"properties":{
"name":{
"type": "keyword",
"fields":{
"sortval":{
"type":"text",
"analyzer":"custom_sort"
}
}
}
}
}
Once done you need to reindex your data so that lowercase values are indexed. Then you can use the field to sort as:
Case 1 (new field):
"sort": [
{
"sortField": "desc"
}
]
Case 2 (sub field):
"sort": [
{
"name.sortval": "desc"
}
]