asyncio task hangs midway - django

So i am building a scraper which takes a bunch of urls, a success function that will run via celery if that url was fetched successfully and if any error occurs just return and collect the bunch of urls that were not successfull and send them to be scheduled again to a celery function.
Below is the code.
class AsyncRequest:
def __init__(self, urls_batch, callback, task_name, method, finish_callback=None, *args, **kwargs):
"""
:param urls_batch: List of urls to fetch in asycn
:param callback: Callback that process a successfull response
"""
self.tasks = []
self.headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
}
self.urls_batch = urls_batch
self.task_name = task_name
self.callback = callback
self.finish_callback = finish_callback
self.args = args
self.kwargs = kwargs
self.proxy = kwargs["proxy"] if "proxy" in kwargs.keys() else None
self.finish_callback = finish_callback
self.successfull_urls = []
self.verify_ssl = kwargs["verify_ssl"] if "verify_ssl" in kwargs.keys() else True
async def fetch(self, session, url, time_out=15, retry_limit=3, *args, **kwargs):
try:
for i in range(retry_limit):
try:
async with session.request(self.method, url, headers=self.headers,
timeout=ClientTimeout(total=None, sock_connect=time_out,
sock_read=time_out),
verify_ssl=self.verify_ssl, proxy=self.proxy) as response:
if response.status in [200, 203]:
result = await response.text()
self.successfull_urls.append(url)
# I dont think its a celery issue because even if i comment out the below line it still gets stuck
#self.callback.delay(result, url=url, *self.args, **self.kwargs)
return
else:
logger.error(
"{} ERROR===============================================>".format(self.task_name))
logger.error("status: {}".format(response.status))
except ClientHttpProxyError as e:
logger.error("{} ---> {} pkm: {}, timeout: {}".format(self.task_name, type(e), pkm,
proxy = UpdateProxy()
except Exception as e:
logger.error(
"{} ---> {} url: {}, timeout: {}!!!! returning".format(self.task_name, type(e), pkm, time_out))
logger.error("pkm: {} errored".format(self.kwargs["search_url_pkm_mapping"][url]))
async def main(self):
async with aiohttp.ClientSession(timeout=100) as session:
results = await asyncio.gather(*(self.fetch(session, url) for url in self.urls_batch),
return_exceptions=True)
logger.info("Gather operation done ----------> Results: {}".format(results))
logger.info("{} Successful Urls".format(len(self.successfull_urls)))
errored_urls = [url for url in self.urls_batch if url not in self.successfull_urls]
logger.error("{} urls errored".format(len(errored_urls)))
# Below code to send errored urls to a celery task that scheduled them again
# if self.finish_callback and len(errored_urls) > 0:
# self.finish_callback.delay(errored_urls, self.task_name, *self.args, **self.kwargs)
So what happens is if i send a batch of 50urls, almost 40 to 45 of them work perfectly and the remaining just hangs. Nothing happens, i expect that at least the tasks will throw sum error due to network issue or server or anything. They just should be finished and code after gather which shows the number of successfull urls and errored urls is executed. And this does not happen, the code just hangs. The log lines after gather are not executed and i dont know where the error is.
Any help will be appreciated highly.
EDIT: I have removed the celery code and just fetching and keeping track of successfull urls and errored one. It still gets stuck. If its necessary to note, i am sending the request to google(but even if google is blocking my request some error must be thrown right.)
EDIT2: One more thing i would like to list is if i just keep the url's batch size small such as 15 to 20. Then there is no hang and everything works as expected. But the moment i increase the urls batch to say 50 it stucks, on 3 to 5 urls.

Related

Every task failing to execute on Google Cloud Tasks

I need to run some asynchronous tasks in a Django app, and I started to look into Google Cloud Tasks. I think I have followed all the instructions - and every possible variation I could think of, without success so far.
The problem is that all created tasks go to the queue, but fail to execute. The console and the logs report only a http code 301 (permanent redirection). For the sake of simplicity, I deployed the same code to two services of an App Engine (standard), and routed the tasks request to only one of them.
It looks like the code itself is working fine. When I go to "https://[proj].appspot.com/api/v1/tasks", the routine executes nicely and there's no redirection according to DevTools/Network. When Cloud Tasks try to call "/api/v1/tasks", it fails every time.
If anyone could take a look at the code below and point out what may be causing this failure, I'd appreciate very much.
Thank you.
#--------------------------------
# [proj]/.../urls.py
#--------------------------------
from [proj].api import tasks
urlpatterns += [
# tasks api
path('api/v1/tasks', tasks, name='tasks'),
]
#--------------------------------
# [proj]/api.py:
#--------------------------------
from django.views.decorators.csrf import csrf_exempt
#csrf_exempt
def tasks(request):
print('Start api')
payload = request.body.decode("utf-8")
print (payload)
print('End api')
return HttpResponse('OK')
#--------------------------------
# [proj]/views/manut.py
#--------------------------------
from django.views.generic import View
from django.shortcuts import redirect
from [proj].tasks import TasksCreate
class ManutView(View):
template_name = '[proj]/manut.html'
def post(self, request, *args, **kwargs):
relative_url = '/api/v1/tasks'
testa_task = TasksCreate()
resp = testa_task.send_task(
url=relative_url,
schedule_time=5,
payload={'task_type': 1, 'id': 21}
)
print(resp)
return redirect(request.META['HTTP_REFERER'])
#--------------------------------
# [proj]/tasks/tasks.py:
#--------------------------------
from django.conf import settings
from google.cloud import tasks_v2
from google.protobuf import timestamp_pb2
from typing import Dict, Optional, Union
import json
import time
class TasksCreate:
def send_task(self,
url: str,
payload: Optional[Union[str, Dict]] = None,
schedule_time: Optional[int] = None, # in seconds
name: Optional[str] = None,
) -> None:
client = tasks_v2.CloudTasksClient()
parent = client.queue_path(
settings.GCP_PROJECT,
settings.GCP_LOCATION,
settings.GCP_QUEUE,
)
# App Engine task:
task = {
'app_engine_http_request': { # Specify the type of request.
'http_method': 'POST',
'relative_uri': url,
'app_engine_routing': {'service': 'tasks'}
}
}
if name:
task['name'] = name
if isinstance(payload, dict):
payload = json.dumps(payload)
if payload is not None:
converted_payload = payload.encode()
# task['http_request']['body'] = converted_payload
task['app_engine_http_request']['body'] = converted_payload
if schedule_time is not None:
now = time.time() + schedule_time
seconds = int(now)
nanos = int((now - seconds) * 10 ** 9)
# Create Timestamp protobuf.
timestamp = timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos)
# Add the timestamp to the tasks.
task['schedule_time'] = timestamp
resp = client.create_task(parent, task)
return resp
# --------------------------------
# [proj]/dispatch.yaml:
# --------------------------------
dispatch:
- url: "*/api/v1/tasks"
service: tasks
- url: "*/api/v1/tasks/"
service: tasks
- url: "*appspot.com/*"
service: default
#--------------------------------
# [proj]/app.yaml & tasks.yaml:
#--------------------------------
runtime: python37
instance_class: F1
automatic_scaling:
max_instances: 2
service: default
#handlers:
#- url: .*
# secure: always
# redirect_http_response_code: 301
# script: auto
entrypoint: gunicorn -b :$PORT --chdir src server.wsgi
env_variables:
...
UPDATE:
Here are the logs for an execution:
{
insertId: "1lfs38fa9"
jsonPayload: {
#type: "type.googleapis.com/google.cloud.tasks.logging.v1.TaskActivityLog"
attemptResponseLog: {
attemptDuration: "0.008005s"
dispatchCount: "5"
maxAttempts: 0
responseCount: "5"
retryTime: "2020-03-09T21:50:33.557783Z"
scheduleTime: "2020-03-09T21:50:23.548409Z"
status: "UNAVAILABLE"
targetAddress: "POST /api/v1/tasks"
targetType: "APP_ENGINE_HTTP"
}
task: "projects/[proj]/locations/us-central1/queues/tectaq/tasks/09687434589619534431"
}
logName: "projects/[proj]/logs/cloudtasks.googleapis.com%2Ftask_operations_log"
receiveTimestamp: "2020-03-09T21:50:24.375681687Z"
resource: {
labels: {
project_id: "[proj]"
queue_id: "tectaq"
target_type: "APP_ENGINE_HTTP"
}
type: "cloud_tasks_queue"
}
severity: "ERROR"
timestamp: "2020-03-09T21:50:23.557842532Z"
}
At last I could make Cloud Tasks work, but only using http_request type (with absolute url). There was no way I could make the tasks run when they were defined as app_engine_http_request (relative url).
I had already tried the http_request type with POST, but that was before I exempted the api function from have the csrf token previously checked, and that was causing an error Forbidden (Referer checking failed - no Referer.): /api/v1/tasks, which I failed to connect to the csrf omission.
If someone stumble across this issue in the future, and find out a way to make app_engine_http_request work on Cloud Tasks with Django, I'd still like very much to know the solution.
The problem is that App Engine task handlers do not follow redirects, so you have to find out why the request is being redirected and make an exception for App Engine requests. In my case I was redirecting http to https and had to make an exception like so: (Node Express)
app.use((req, res, next) => {
const protocol = req.headers['x-forwarded-proto']
const userAgent = req.headers['user-agent']
if (userAgent && userAgent.includes('AppEngine-Google')) {
console.log('USER AGENT IS GAE, SKIPPING REDIRECT TO HTTPS.')
return next()
} else if (protocol === 'http') {
res.redirect(301, `https://${req.headers.host}${req.url}`)
} else {
next()
}
})
The problem is that all created tasks go to the queue, but fail to execute. The console and the logs report only a http code 301 (permanent redirection).
Maybe the request handler for your task endpoint wants a trailing slash.
Try changing this:
class ManutView(View):
template_name = '[proj]/manut.html'
def post(self, request, *args, **kwargs):
relative_url = '/api/v1/tasks'
...
to this:
class ManutView(View):
template_name = '[proj]/manut.html'
def post(self, request, *args, **kwargs):
relative_url = '/api/v1/tasks/'
...
Also just try hitting the task url yourself and see if you can get a task to run from curl
If someone stumble across this issue in the future, and find out a way
to make app_engine_http_request work on Cloud Tasks with Django, I'd
still like very much to know the solution.
#JCampos I manage to make it work on my Django app (I use in addition DRF but I do no think it causes a big difference).
from google.cloud import tasks_v2
from google.protobuf import timestamp_pb2
import datetime
class CloudTasksMixin:
#property
def _cloud_task_client(self):
return tasks_v2.CloudTasksClient()
def send_to_cloud_tasks(self, url, http_method='POST', payload=None,in_seconds=None, name=None):
""" Send task to be executed """
parent = self._cloud_task_client.queue_path(settings.TASKS['PROJECT_NAME'], settings.TASKS['QUEUE_REGION'], queue=settings.TASKS['QUEUE_NAME'])
task = {
'app_engine_http_request': {
'http_method': http_method,
'relative_uri': url
}
}
...
And then I use a view like this one:
class CloudTaskView(views.APIView):
authentication_classes = []
def post(self, request, *args, **kwargs):
# Do your stuff
return Response()
Finally I implement this url in the urls.py (from DRF) with csrf_exempt(CloudTaskView.as_view())
At first I had 403 error, but thanks to you and your comment with csrf_exempt, it is now working.
It seems that Cloud Tasks calls App Engine using a HTTP url (that's ok because probably they are in the same network), but if you are using HTTPs, Django should be redirecting (http -> https) any request that's being received, including your handler endpoint.
To solve this, you should tell Django to not redirect your handler.
You can use settings.SECURE_REDIRECT_EXEMPT for it.
For instance:
SECURE_REDIRECT_EXEMPT = [r"^api/v1/tasks/$"]

Django Channels send group message from Celery task. Asyncio event loop stopping before all async tasks finished

I'm currently stuck on a particularly tricky problem, I'll try my best to explain it.
I have a Django project and it's main purpose is to execute queued tasks from a DB rapidly. I use Celery and Celerybeat to achieve this with Django channels to update my templates with the responses in real time.
The Celery worker is a gevent worker pool with a decent number of threads.
My Task(Simplified version):
#shared_task
def exec_task(action_id):
# execute the action
action = Action.objects.get(pk=action_id)
response = post_request(action)
# update action status
if response.status_code == 200:
action.status = 'completed'
else:
action.status = 'failed'
# save the action to the DB
action.save()
channel_layer = get_channel_layer()
status_data = {'id': action.id, 'status': action.status}
status_data = json.dumps(status_data)
try:
async_to_sync(channel_layer.group_send)('channel_group', {'type': 'propergate_status', 'data': status_data})
except:
event_loop = asyncio.get_running_loop()
future = asyncio.run_coroutine_threadsafe(channel_layer.group_send('channel_group', {'type': 'propergate_status', 'data': status_data}), event_loop)
result = future.result()
My Error:
[2019-10-03 18:47:59,990: WARNING/MainProcess] actions queued: 25
[2019-10-03 18:48:02,206: WARNING/MainProcess]
c:\users\jack\documents\github\mcr-admin\venv\lib\site-packages\gevent_socket3.py:123:
RuntimeWarning: coroutine 'AsyncToSync.main_wrap' was never awaited
self._read_event = io_class(fileno, 1)
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
[2019-10-03 18:48:02,212: WARNING/MainProcess] c:\users\jack\documents\github\mcr-admin\venv\lib\site-packages\gevent_socket3.py:123:
RuntimeWarning: coroutine 'BaseEventLoop.shutdown_asyncgens' was never
awaited self._read_event = io_class(fileno, 1) RuntimeWarning:
Originally after I saved the action to the DB I just called:
async_to_sync(channel_layer.group_send)('channel_group', {'type': 'propergate_status', 'data': status_data})
But I kept getting a runtime error because you can't use async_to_sync if there is already an asyncio event loop already running, as shown here at line 61. So I had multiple gevent threads trying to async_to_sync very close together, constantly throwing the error in the link.
Which led me to this wonderful answer and the current version of the exec_task which has a 98% success rate in messaging the Django Channels group but I really need it to be 100%.
The problem here is that occasionally the asyncio event loop is stopped before the Coroutine I add has a chance to finish and I've been tweaking my code, playing around with the asyncio and event loop api but I either break my code or get worse results. I have a feeling it might be to do with the Asgiref async_to_sync function closing the loop early but it's complex and I only started working with python async a couple of days ago.
Any feedback, comments, tips or fixes are most welcome!
Cheers.
In the end I couldn't solve the problem and choose an alternative solution using a Channels AsyncHttpConsumer to send the group message. It's not optimal but it works and keeps the workflow in the Channels library.
Consumer:
class celeryMessageConsumer(AsyncHttpConsumer):
async def handle(self, body):
# send response
await self.send_response(200, b"Recieved Loud and Clear", headers=[
(b"Content-Type", b"text/plain"),
])
# formating url encoded string into json
body_data = urllib.parse.unquote_plus(body.decode("utf-8"))
body_data = json.loads(body_data)
id = body_data['data']['id']
await self.channel_layer.group_send(
f"group_{id}",
{
'type': 'propergate.data',
'data': body_data['data']
}
)
Routing:
application = ProtocolTypeRouter({
'websocket': AuthMiddlewareStack(
URLRouter(
myApp.routing.websocket_urlpatterns
)
),
'http': URLRouter([
path("celeryToTemplate/", consumers.celeryMessageConsumer),
re_path('genericMyAppPath/.*', AsgiHandler),
]),
})
Http Request:
data = json.dumps({'id': id, 'status': status})
response = internal_post_request('http://genericAddress/celeryToTemplate/', data)
if response.status_code == 200:
# phew
pass
else:
# whoops
pass
Requests:
def internal_post_request(request_url, payload):
headers={
'Content-Type': 'application/json'
}
response = requests.post(request_url, data=payload, headers=headers)
return response
Hi i'm currently encountering your exact issue where it's critical to be able to send messages from completed celery tasks to the client.
I was able to group_send before by using a signal to a model method for example:
def SyncLogger(**kwargs):
""" a syncronous function to instigate the websocket layer
to send messages to all clients in the project """
instance = kwargs.get('instance')
# print('instance {}'.format(instance))
args = eval(instance.args)
channel_layer = channels.layers.get_channel_layer()
async_to_sync(channel_layer.group_send)(
args ['room'],
{
"type": "chat.message",
"operation": args['operation'],
"state": instance.state,
"task": instance.task
})
and the signal
post_save.connect(SyncLogger, TaskProgress)
Update
I was able to send messages as long as there's an event_loop
this works no matter whether the consumer is async or not
#shared_task()
def test_message():
channel_layer = get_channel_layer()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(channel_layer.group_send('sync_chat', {
'type': 'chat.message',
'operation': 'operation',
'state': 'state',
'task': 'task'
}))

Django system check stuck on unreachable url

In my project I have requests library that sends POST request. Url for that request is hardcoded in function, which is accessed from views.py.
The problem is that when I dont have internet connection, or host, on which url is pointing, is down, I cant launch developer server, it gets stuck on Performing system check. However, if I comment the line with url, or change it to guarantee working host, check is going well.
What is good workaround here ?
views.py
def index(request):
s = Sync()
s.do()
return HttpResponse("Hello, world. You're at the polls index.")
sync.py
class Sync:
def do(self):
reservations = Reservation.objects.filter(is_synced=False)
for reservation in reservations:
serializer = ReservationPKSerializer(reservation)
dictionary = {'url': 'url', 'hash': 'hash', 'json': serializer.data}
encoded_data = json.dumps(dictionary)
r = requests.post('http://gservice.ca29983.tmweb.ru/gdocs/do.php', headers={'Content-Type': 'application/json'}, data=encoded_data)
if r.status_code == 200:
reservation.is_synced = True
reservation.save()
It might appear to be stuck because requests automatically retries the connection a few times. Try reducing the retry count to 0 or 1 with:
Can I set max_retries for requests.request?

Celery: check if a task is completed to send an email to

I'm new to celery and an overall python noob. I must have stumbled upon the right solution during my research but I just don't seem to understand what I need to do for what seems to be a simple case scenario.
I followed the following guide to learn about flask+celery.
What I understand:
There seems there is something obvious I'm missing about how to trigger a task after the first one is finished. I tried using callbacks, using loops, even tried using Celery Flower and Celery beat to realise this has nothing with what I'm doing...
Goal:
After filling the form, I want to send an email with attachements (result of the task) or a failure email otherwise. Without having to wonder what my user is doing on the app (no HTTP requests)
My code:
class ClassWithTheTask:
def __init__(self, filename, proxies):
# do stuff until a variable results is created
self.results = 'this contains my result'
#app.route('/', methods=['GET', 'POST'])
#app.route('/index', methods=['GET', 'POST'])
def index():
form = MyForm()
if form.validate_on_submit():
# ...
# the task
my_task = task1.delay(file_path, proxies)
return redirect(url_for('taskstatus', task_id=my_task.id, filename=filename, email=form.email.data))
return render_template('index.html',
form=form)
#celery.task(bind=True)
def task1(self, filepath, proxies):
task = ClassWithTheTask(filepath, proxies)
return results
#celery.task
def send_async_email(msg):
"""Background task to send an email with Flask-Mail."""
with app.app_context():
mail.send(msg)
#app.route('/status/<task_id>/<filename>/<email>')
def taskstatus(task_id, filename, email):
task = task1.AsyncResult(task_id)
if task.state == 'PENDING':
# job did not start yet
response = {
'state': task.state,
'status': 'Pending...'
}
elif task.state != 'FAILURE':
response = {
'state': task.state,
'status': task.info.get('status', '')
}
if 'results' in task.info:
response['results'] = task.info['results']
response['untranslated'] = task.info['untranslated']
msg = Message('Task Complete for %s !' % filename,
recipients=[email])
msg.body = 'blabla'
with app.open_resource(response['results']) as fp:
msg.attach(response['results'], "text/csv", fp.read())
with app.open_resource(response['untranslated']) as fp:
msg.attach(response['untranslated'], "text/csv", fp.read())
# the big problem here is that it will send the email only if the user refreshes the page and get the 'SUCCESS' status.
send_async_email.delay(msg)
flash('task finished. sent an email.')
return redirect(url_for('index'))
else:
# something went wrong in the background job
response = {
'state': task.state,
'status': str(task.info), # this is the exception raised
}
return jsonify(response)
I don't get the goal of your method for status check. Anyway what you are describing can be accomplished this way.
if form.validate_on_submit():
# ...
# the task
my_task = (
task1.s(file_path, proxies).set(link_error=send_error_email.s(filename, error))
| send_async_email.s()
).delay()
return redirect(url_for('taskstatus', task_id=my_task.id, filename=filename, email=form.email.data))
Then your error task will look like this. The normal task can stay the way it is.
#celery.task
def send_error_email(task_id, filename, email):
task = AsyncResult(task_id)
.....
What happens here is that you are using a chain. You are telling Celery to run your task1, if that completes successfully then run send_async_email, if it fails run send_error_email. This should work, but you might need to adapt the parameters, consider it as pseudocode.
This does not seem right at all:
def task1(self, filepath, proxies):
task = ClassWithTheTask(filepath, proxies)
return results
The line my_task = task1.delay(file_path, proxies) earlier in your code suggests you want to return task but you return results which is not defined anywhere. (ClassWithTheTask is also undefined). This code would crash, and your task would never execute.

Recover from task failed beyond max_retries

I am attempting to asynchronously consume a web service because it takes up to 45 seconds to return. Unfortunately, this web service is also somewhat unreliable and can throw errors. I have set up django-celery and have my tasks executing, which works fine until the task fails beyond max_retries.
Here is what I have so far:
#task(default_retry_delay=5, max_retries=10)
def request(xml):
try:
server = Client('https://www.whatever.net/RealTimeService.asmx?wsdl')
xml = server.service.RunRealTimeXML(
username=settings.WS_USERNAME,
password=settings.WS_PASSWORD,
xml=xml
)
except Exception, e:
result = Result(celery_id=request.request.id, details=e.reason, status="i")
result.save()
try:
return request.retry(exc=e)
except MaxRetriesExceededError, e:
result = Result(celery_id=request.request.id, details="Max Retries Exceeded", status="f")
result.save()
raise
result = Result(celery_id=request.request.id, details=xml, status="s")
result.save()
return result
Unfortunately, MaxRetriesExceededError is not being thrown by retry(), so I'm not sure how to handle the failure of this task. Django has already returned HTML to the client, and I am checking the contents of Result via AJAX, which is never getting to a full fail f status.
So the question is: How can I update my database when the Celery task has exceeded max_retries?
The issue is that celery is trying to re-raise the exception you passed in when it hits the retry limit. The code for doing this re-raising is here: https://github.com/celery/celery/blob/v3.1.20/celery/app/task.py#L673-L681
The simplest way around this is to just not have celery manage your exceptions at all:
#task(max_retries=10)
def mytask():
try:
do_the_thing()
except Exception as e:
try:
mytask.retry()
except MaxRetriesExceededError:
do_something_to_handle_the_error()
logger.exception(e)
You can override the after_return method of the celery task class, this method is called after the execution of the task whatever is the ret status (SUCCESS,FAILED,RETRY)
class MyTask(celery.task.Task)
def run(self, xml, **kwargs)
#Your stuffs here
def after_return(self, status, retval, task_id, args, kwargs, einfo=None):
if self.max_retries == int(kwargs['task_retries']):
#If max retries are equals to task retries do something
if status == "FAILURE":
#You can do also something if the tasks fail instead of check the retries
http://readthedocs.org/docs/celery/en/latest/reference/celery.task.base.html#celery.task.base.BaseTask.after_return
http://celery.readthedocs.org/en/latest/reference/celery.app.task.html?highlight=after_return#celery.app.task.Task.after_return
With Celery version 2.3.2 this approach has worked well for me:
class MyTask(celery.task.Task):
abstract = True
def after_return(self, status, retval, task_id, args, kwargs, einfo):
if self.max_retries == self.request.retries:
#If max retries is equal to task retries do something
#task(base=MyTask, default_retry_delay=5, max_retries=10)
def request(xml):
#Your stuff here
I'm just going with this for now, spares me the work of subclassing Task and is easily understood.
# auto-retry with delay as defined below. After that, hook is disabled.
#celery.shared_task(bind=True, max_retries=5, default_retry_delay=300)
def post_data(self, hook_object_id, url, event, payload):
headers = {'Content-type': 'application/json'}
try:
r = requests.post(url, data=payload, headers=headers)
r.raise_for_status()
except requests.exceptions.RequestException as e:
if self.request.retries >= self.max_retries:
log.warning("Auto-deactivating webhook %s for event %s", hook_object_id, event)
Webhook.objects.filter(object_id=hook_object_id).update(active=False)
return False
raise self.retry(exc=e)
return True