Sync to Async Django ORM queryset length - django

In asynchronous context I try to do:
invites = await InviteLogic.get_invites(self.app.user)
if len(invites) > 0:
...
InviteLogic is like this:
#sync_to_async
def get_invites(self, inviter):
return Invite.objects.filter(inviter=inviter)
I get an error in line if len(...
django.core.exceptions.SynchronousOnlyOperation: You cannot call this from an async context - use a thread or sync_to_async.
How do I call len asynchronously?

Try this:
invites = await InviteLogic.get_invites(self.app.user)
if len(await invites) > 0:
....

Related

Flask with Telethon dont close thread

I am trying to send a message to telegram via telethon and flask but the Telehton is not releasing the thread so i can not get response in postman.
I followed the telethon documentation but with no success :(
SendMessageApi.Exec is the second function that is only calling the SendMessage function.
#app.post('/send-message')
async def send_message():
req = request.get_json()
return await SendMessageApi.Exec(req)
async def Exec(req):
request = SendMessageRequest(**req)
response = await SendMessage(request)
return response
async def SendMessage(request: SendMessageRequest):
client = TelegramClient(request.phone, request.apiId, request.apiHash)
await client.connect()
if not await client.is_user_authorized():
await client.send_code_request(request.phone)
await client.sign_in(request.phone, input('send code'))
destionationEntity = await client.get_entity(request.destinationUsername)
responseMessage = await client.send_message(destionationEntity, request.message, parse_mode=request.parseMode, link_preview=True)
if request.pin:
await client.pin_message(destionationEntity, responseMessage, notify=True)
client.session.close()
return json.dumps(responseMessage, skipkeys=True, cls=json.JSONEncoder, default=json_default)
You can use flash + telethon with hypercorn
see full example here (scroll down)
https://urlk.ga/#telethon_codes

Django ORM sync_to_async error FATAL: remaining connection slots are reserved for non-replication superuser connections

I am trying to implement Asynchronous support(https://docs.djangoproject.com/en/3.1/topics/async/) for Django ORM to write many records in Database(Postgres).
I am getting the ERROR:root:FATAL: remaining connection slots are reserved for non-replication superuser connections
I am creating coroutines add adding them to running the asyincio loop
# Helper Class
class ModelsHelper:
#staticmethod
#sync_to_async
def __handle_resource_data(data):
// Some calculation to create kwargs dict
return Resource.objects.get_or_create(**kwargs)
async def store_data(metric):
// some calculation to get data
return await ModelsHelper.__handle_resource_data(data)
# Main File
def get_event_loop():
loop = None
try:
loop = asyncio.get_event_loop()
except Exception as e:
print(" New Event Loop ".center(50, '*'))
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
loop = get_event_loop()
future = asyncio.ensure_future(
asyncio.gather(*[ModelsHelper.store_data(metric) for metric in metrics]),
loop=loop
)
loop.run_until_complete(future)

django channels async process

I am still pretty new to django-channels and directly starting with channels 2.0 so diverse examples are still a bit hard to find yet. I am trying to understand how can I create an asynchronous process in a channel and make a js client listen to it?
while connecting to my consumer, I am checking for a running stream on a thread and try to send back predictions on the channel. This process is asynchronous but I am not sure how to properly use an AsyncConsumer or AsyncJsonWebsocketConsumer.
so far I have a simple consumer like this:
consumers.py
import threading
from sklearn.externals import joblib
from channels.generic.websocket import JsonWebsocketConsumer
from .views import readStream
class My_Consumer(JsonWebsocketConsumer):
groups = ["broadcast"]
the_thread_name = 'TestThread'
def connect(self):
the_thread = None
self.accept()
# check if there is an active stream on a thread
for thread in threading.enumerate():
if thread.name == self.the_thread_name:
the_thread = thread
break
if the_thread == None:
xsLog.infoLog('No Stream found yet...')
self.close()
else:
the_stream = readStream()
#...
the_estimator = joblib.load('some_file','r')
the_array = np.zeros(shape=(1,93), dtype=float)
self.predict(the_thread,the_stream,the_array,the_estimator)
def disconnect(self,close_code):
pass
async def predict(self,the_thread,the_stream,the_array,the_estimator):
while the_thread.isAlive():
sample = await the_stream.read()
if sample != [0.0,0.0,0.0] and 'nan' not in str(sample):
the_array = np.roll(self.the_array,-3)
the_array[0][-3] = sample[0]
the_array[0][-2] = sample[1]
the_array[0][-1] = sample[2]
new_val = await '{},{}'.format(the_estimator.predict(the_array)[0][0],the_estimator.predict(the_array)[0][1])
await self.send_json(new_val)
my js client is pretty simple and tries to listen:
const webSocketBridge = new channels.WebSocketBridge();
webSocketBridge.connect('some_route');
webSocketBridge.socket.addEventListener('open', function() {
console.log("Connected to WebSocket");
});
webSocketBridge.listen(function(message, stream) {console.log(message);});
EDIT
from the above mentionned question I tried the following simplified solution with an AsyncJsonWebsocketConsumer:
import threading
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from channels.db import database_sync_to_async
class My_Consumer(AsyncJsonWebsocketConsumer):
groups = ["broadcast"]
the_thread = None
the_data = None
#
#database_sync_to_async
def getUserData(self,the_user):
return models.UserData.objects.filter(user=the_user,datatype='codebooks')
#
async def connect(self):
await self.accept()
for thread in threading.enumerate():
if thread.name == 'some_thread_name':
self.the_thread = thread
break
if self.the_thread == None:
xsLog.infoLog('No Stream found yet...')
await self.close()
else:
the_data = await self.getUserData(the_user)
#
async def receive(self,content=None,text_data=None):
await self.predict(self.the_thread,self.the_data)
#
async def predict(self,the_thread,the_data):
"""..."""
while the_thread.isAlive():
# doing something here
new_data = data_from_thread + the_data
self.send_json(new_data)
#
async def disconnect(self,close_code):
await self.close()
and then I am sending once a message by the JS client to init the receive method of the consumer:
webSocketBridge.socket.addEventListener('open', function() {
console.log("Connected to WebSocket, Launching Predictions");
webSocketBridge.send('');
});
The issue here is that while the predict coroutine is launched it is blocking the whole receive one and my js client cannot receive any message through:
webSocketBridge.listen(function(message, stream) {
console.log(message.stream);
});
the disconnect coroutine is then also not recognized either.

Flask: refer to 'g' in gevent spawn in a request

I use db pool in my flask-restful project, I register a before request hook so that every request will get a db connection and store in the thread local variable g:
# acquire db connection from pool
#app.before_request
def get_connection():
setattr(g, '__con__', MysqlHandler())
My module layer will then get the db connection from g for CURD:
#classmethod
def get(cls, **kwargs):
res = g.__con__.simple_query(cls.__table__, query_cond=kwargs)
return cls(**res[0]) if res else None
Finally after the request, the connection will be committed and released back to the pool in after_request hook:
# commit db update after the request, if no exception
#app.after_request
def commit(response):
if getattr(g, '__con__', None):
g.__con__.commit()
return response
This framework works fine until I introduce gevent to handle some long term async task in a request:
#copy_current_request_context
def my_async_task():
time.sleep(5)
print 'I am g', g.__con__
class TeamListView(Resource):
# http GET handler, return all team members
def get(self):
gevent.spawn(my_async_task)
all_groups = Team.all()
return return_json(data=all_groups)
The above code will return JSON data to front-end immediately, which means the request context will be destroyed after the request, so that the g.__con__ could not be accessed after 5 seconds sleep in my async task.
My async task has to handle database operation via g.__con__, so is there any solution to keep the g, event after the request complete ?
Thanks in advance for your help.

Recover from task failed beyond max_retries

I am attempting to asynchronously consume a web service because it takes up to 45 seconds to return. Unfortunately, this web service is also somewhat unreliable and can throw errors. I have set up django-celery and have my tasks executing, which works fine until the task fails beyond max_retries.
Here is what I have so far:
#task(default_retry_delay=5, max_retries=10)
def request(xml):
try:
server = Client('https://www.whatever.net/RealTimeService.asmx?wsdl')
xml = server.service.RunRealTimeXML(
username=settings.WS_USERNAME,
password=settings.WS_PASSWORD,
xml=xml
)
except Exception, e:
result = Result(celery_id=request.request.id, details=e.reason, status="i")
result.save()
try:
return request.retry(exc=e)
except MaxRetriesExceededError, e:
result = Result(celery_id=request.request.id, details="Max Retries Exceeded", status="f")
result.save()
raise
result = Result(celery_id=request.request.id, details=xml, status="s")
result.save()
return result
Unfortunately, MaxRetriesExceededError is not being thrown by retry(), so I'm not sure how to handle the failure of this task. Django has already returned HTML to the client, and I am checking the contents of Result via AJAX, which is never getting to a full fail f status.
So the question is: How can I update my database when the Celery task has exceeded max_retries?
The issue is that celery is trying to re-raise the exception you passed in when it hits the retry limit. The code for doing this re-raising is here: https://github.com/celery/celery/blob/v3.1.20/celery/app/task.py#L673-L681
The simplest way around this is to just not have celery manage your exceptions at all:
#task(max_retries=10)
def mytask():
try:
do_the_thing()
except Exception as e:
try:
mytask.retry()
except MaxRetriesExceededError:
do_something_to_handle_the_error()
logger.exception(e)
You can override the after_return method of the celery task class, this method is called after the execution of the task whatever is the ret status (SUCCESS,FAILED,RETRY)
class MyTask(celery.task.Task)
def run(self, xml, **kwargs)
#Your stuffs here
def after_return(self, status, retval, task_id, args, kwargs, einfo=None):
if self.max_retries == int(kwargs['task_retries']):
#If max retries are equals to task retries do something
if status == "FAILURE":
#You can do also something if the tasks fail instead of check the retries
http://readthedocs.org/docs/celery/en/latest/reference/celery.task.base.html#celery.task.base.BaseTask.after_return
http://celery.readthedocs.org/en/latest/reference/celery.app.task.html?highlight=after_return#celery.app.task.Task.after_return
With Celery version 2.3.2 this approach has worked well for me:
class MyTask(celery.task.Task):
abstract = True
def after_return(self, status, retval, task_id, args, kwargs, einfo):
if self.max_retries == self.request.retries:
#If max retries is equal to task retries do something
#task(base=MyTask, default_retry_delay=5, max_retries=10)
def request(xml):
#Your stuff here
I'm just going with this for now, spares me the work of subclassing Task and is easily understood.
# auto-retry with delay as defined below. After that, hook is disabled.
#celery.shared_task(bind=True, max_retries=5, default_retry_delay=300)
def post_data(self, hook_object_id, url, event, payload):
headers = {'Content-type': 'application/json'}
try:
r = requests.post(url, data=payload, headers=headers)
r.raise_for_status()
except requests.exceptions.RequestException as e:
if self.request.retries >= self.max_retries:
log.warning("Auto-deactivating webhook %s for event %s", hook_object_id, event)
Webhook.objects.filter(object_id=hook_object_id).update(active=False)
return False
raise self.retry(exc=e)
return True