Sending Email With Celery Using Django Default Email Backend - django

I am trying to send confirmation email to my registered user using Django and Celery. I am using RabbitMQ as the Broker. Whenever I'm executing the code, the celery log shows it is receiving the task and executing successfully, But I am not receiving any emails.
tasks.py
from celery.task import Task
from django.core.mail import send_mail
from django.template import loader
from django.utils.html import strip_tags
from config.celery import app
from config.settings import default
class SendConfirmationEmail(Task):
def __init__(self, *args, **kwargs):
self.user_name = kwargs.get('username')
self.user_id = kwargs.get('id')
self.user_hash = kwargs.get('hash')
self.user_email = kwargs.get('email')
def send_email(self):
confirm_mail = loader.render_to_string('mail/confirmation.html',
{'user': self.user_name, 'id': self.user_id,
'hash': self.user_hash,
'domain': default.SITE_URL})
text_email = strip_tags(confirm_mail)
send_mail(
subject='Confirm Your E-mail',
message=text_email,
from_email='no-reply#mysite.com',
recipient_list=[self.user_email],
fail_silently=False,
html_message=confirm_mail
)
def run(self, *args, **kwargs):
self.send_email()
app.register_task(SendConfirmationEmail())
signals.py
from django.db.models.signals import post_save
from django.dispatch import receiver
from apps.siteuser.models import User
from apps.siteuser.tasks import SendConfirmationEmail
#receiver(post_save, sender=User)
def create_employee_details(sender, instance, created, **kwargs):
if created:
task = SendConfirmationEmail(username=instance.first_name, id=instance.id, hash=instance.hash,
email=instance.email)
task.delay()
settings for Email & Celery:
CELERY_BROKER_URL = 'amqp://username:password#localhost:5672/vhost'
EMAIL_HOST = 'smtp.mailtrap.io'
EMAIL_HOST_USER = MY_USERNAME
EMAIL_HOST_PASSWORD = MY_PASSWORD
EMAIL_PORT = 2525
Celery log:
[2018-05-09 11:50:41,191: INFO/MainProcess] Received task: apps.user.tasks.SendConfirmationEmail[e63c0f5f-7b81-4065-85c1-9ef87acc792a]
[2018-05-09 11:50:41,197: INFO/ForkPoolWorker-1] Task apps.user.tasks.SendConfirmationEmail[e63c0f5f-7b81-4065-85c1-9ef87acc792a] succeeded in 0.004487647999667388s: None
NOTE: I have sent mail without Celery and it was working fine. But The Problem started after trying with Celery.I am using Mailtrap for development purpose.

This is how I generally call a celery task
Task.py
from celery import shared_task
#shared_task
def task1(*args,**kwargs):
pass
Caller.py
from task import task1
task1.delay(a,b,c...)
Is celery discovery configured correctly?

Related

How do I start/stop Hypercorn/Uvicorn server as a background task for an async application (like a discord bot) Python

I am currently creating a django application w/ asgi and I am currently having problems of setting up hypercorn and uvicorn to run in background with graceful shutdown. When I set up my application from asgi to run on hypercorn only using asyncio.create_task and starting it only, the website doesn't run.
Hypercorn code snippet:
from scripts import funcs
import nextcord
from nextcord.ext import commands
from nextcord import Interaction
import asyncio
# from uvicorn import Config, Server
# import uvicorn
import subprocess
from subprocess import CREATE_NEW_CONSOLE
import signal
# import multiprocessing
import nest_asyncio
import os
import sys
sys.path.insert(1, 'C:\\Users\\Sub01\\Project\\PaulWebsite\\app')
from hypercorn.config import Config
from hypercorn.asyncio import serve
from hypercorn.run import run
import hypercorn
import asyncio
from paul_site.asgi import application
import signal
nest_asyncio.apply()
createEmbed = funcs.embedCreator()
shutdown_event = asyncio.Event()
def _signal_handler(*_) -> None:
shutdown_event.set()
class HYPERCORN:
config = Config()
coro = None
def __init__(self) -> None:
self.config.from_object("paul_site.asgi")
self.evtLoop = asyncio.new_event_loop()
async def start(self):
self.coro = self.evtLoop.create_task(await serve(application, self.config))
def stop(self):
self.evtLoop.add_signal_handler(signal.SIGINT, _signal_handler)
self.evtLoop.run_until_complete(
asyncio.to_thread(serve(application, self.config, shutdown_trigger=shutdown_event.wait))
)
class baseCommand(commands.Cog):
proc = None
def __init__(self, client):
self.client = client
self.website = HYPERCORN()
#nextcord.slash_command()
async def bot(self, interaction: Interaction):
pass
#bot.subcommand(description="Stops the bot")
async def shutdown(self, interaction: Interaction):
await interaction.response.send_message(embed=createEmbed.createEmbed(title="Exit", description="Bot's down", footer=f"Requested by {interaction.user.name}"))
exit()
# Create command group site
#nextcord.slash_command()
async def site(self, interaction: Interaction):
pass
#site.subcommand(description="Starts the website")
async def start(self, interaction: Interaction):
try:
await self.website.start()
await interaction.response.send_message(embed=createEmbed.createEmbed(title="Start Website", description=f"""
**Website started successfully**
""", footer=f"Requested by {interaction.user.name}"))
except Exception as e:
await interaction.response.send_message(
embed=createEmbed.createEmbed(title="Start Website Error", description=
f"""
```bash
{e}
```
""", footer=f"Requested by {interaction.user.name}")
)
#site.subcommand(description='Stops the website')
async def stop(self, interaction: Interaction):
self.website.stop()
await interaction.followup.send(embed=createEmbed.createEmbed(title="Stop Website", description=f"""
**Website stopped successfully!**
""", footer=f"Requested by {interaction.user.name}"))
del self.proc
def setup(client):
client.add_cog(baseCommand(client))
Uvicorn code snippet:
import sys
sys.path.insert(1, 'C:\\Users\\Sub01\\Project\\PaulWebsite\\app')
import asyncio
from paul_site.asgi import application
import signal
import time
import uvicorn
from multiprocessing import Process
class UvicornServer(uvicorn.Server):
def __init__(self, host: str = "127.0.0.1", port: int = 8000):
self.host = host
self.port = port
async def setup(self):
self.proc = Process(
target=uvicorn.run,
args=[application],
kwargs={
'host': self.host,
'port': self.port,
},
daemon=True
)
# self.proc.run()
await self.proc.start()
await asyncio.sleep(0.5)
async def down(self):
self.proc.terminate()
def blockingFunc():
prevTime = time.time()
while True:
print("Elapsed time: ", time.time() - prevTime)
time.sleep(1)
if time.time() - prevTime >= 4:
break
async def main():
server = UvicornServer()
await server.setup()
blockingFunc()
await server.down()
asyncio.run(main())
Asgi.py:
"""
ASGI config for paul_site project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.1/howto/deployment/asgi/
"""
import os
from channels.routing import ProtocolTypeRouter, URLRouter
from django.core.asgi import get_asgi_application
from paul_site_app.ws_urlpatterns import ws_urlpatterns
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'paul_site.settings')
application = ProtocolTypeRouter({
'http': get_asgi_application(),
'websocket': URLRouter(ws_urlpatterns)
})
Looking at the examples from people incorporating FastAPI and running uvicorn as a background task, I tried it but it only results in a runtime error. I've also tried having a command open a terminal and running the application via cli but soon realized that the code that invokes a new terminal isn't compatible with different platforms.

IPN is not received anymore

I have a problem with my IPN sandbox PayPal. A few days ago it sent the ipn after a test-purchase but since a few days ago it doesn’t sent them anymore. What can I do to fix that error?
signals.py
from django.shortcuts import get_object_or_404
from .models import CourierPayment
from paypal.standard.ipn.signals import valid_ipn_received
from django.dispatch import receiver
#receiver(valid_ipn_received)
def payment_notification(sender, **kwargs):
ipn = sender
if ipn.payment_status == 'Completed':
# payment was successful
payment = get_object_or_404(CourierPayment, id=ipn.invoice)
if payment.price == ipn.mc_gross:
# mark the order as paid
payment.paid = True
payment.save()
apps.py
from django.apps import AppConfig
class CourierConfig(AppConfig):
name = 'courier'
def ready(self):
# import signal handlers
import courier.signals
init.py
default_app_config = 'courier.apps.CourierConfig'

celery beat using flask task issue

I would like to do cron jobs from Flask using Celery but I have an issue regarding celery beat schedule, because it seems that my task is not loaded and I don't know how to check where the issue is.
This is where I define my Flask app in views.py :
from celery.schedules import crontab
app = Flask(__name__)
app.config.update(
CELERY_BROKER_URL='redis://localhost:6379',
CELERY_RESULT_BACKEND='redis://localhost:6379',
CELERY_BEAT_SCHEDULE={
'task-number-one': {
'task': 'app.tasks.test',
'schedule': crontab(minute="*"),
}
},
CELERY_IMPORTS = ('app.tasks'),
CELERY_TASK_RESULT_EXPIRES = 30,
CELERY_TIMEZONE = 'UTC',
)
and this where my celery object is created in tasks.py:
from celery import Celery
from app.views import app
from celery import shared_task
def make_celery(app):
celery = Celery(app.import_name, backend=app.config['CELERY_RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
celery_app = make_celery(app)
#celery_app.task()
def test():
logger = test.get_logger()
logger.info("Hello")
views.py and tasks.py are under the same directories which is called app
This is what I have when launching celery worker (everything seems normal here):
But this is what I have when launching celery beat, it seems that my task is never sent by my schedule but I don't know where to check:
Can you help me on this?
Best
I believe Celery-Beat Tasks need to be configured at least after the #app.on_after_configure.connect signal is sent. You should be able to do the following in your tasks.py file.
celery_app.conf.CELERYBEAT_SCHEDULE = {
"test-every-minue": {
"task": "tasks.test",
"schedule": crontab(minute="*"),
},
}
Alternatively you can use this decorator syntax if your task is defined in the same file as your celery application instance.
#celery_app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(5 , test_two.s(), name='test-every-5')
If your tasks are defined in a separate module you can use the #app.on_after_finalize.connect decorator after importing your tasks.
#app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
from app.tasks import test
sender.add_periodic_task(10.0, test.s(), name='test-every-10')
Celery Beat Entry Docs

How to have scrapy spider run on flask app form submit?

I'm setting up a flask app that will allow me to input a string and it will pass that string argument to my spider to webscrape a page. I'm having difficulty getting the spider to run on the press of a form submit(integrating scrapy&flask).
I've looked at the following code snippet solutions to no avail:
Run Scrapy from Flask,
Running Scrapy spiders in a Celery task,
Scrapy and celery `update_state`
It definitely appears that there are different ways to complete the task. However - each of the code snippets above does not appear to be working.
routes.py
from flask import render_template, flash, redirect, url_for, session, jsonify
from flask import request
from flask_login import login_required
from flask_login import logout_user
from app import app, db
from app.forms import LoginForm
from flask_login import current_user, login_user
from app.models import User
from werkzeug.urls import url_parse
from app.forms import RegistrationForm, SearchForm
#from app.tasks import scrape_async_job
import pprint
import requests
import json
#app.route('/')
#app.route('/index', methods=['GET','POST'])
#login_required
def index():
jobvisuals = [
{
'Job': 'Example',
'Desc': 'This job requires a degree...',
'link': 'fakelink',
'salary': '10$/hr',
'applied': 'Boolean',
'interview': 'Boolean'}]
params = {
'spider_name': 'Indeedspider',
'start_requests': True
}
response = requests.get('http://localhost:9080/crawl.json', params).json()
data = response
pprint.pprint(data)
form = SearchForm()
if request.method == 'GET':
return render_template('index.html', title='home', jobvisuals=jobvisuals, form=form, search=session.get('search',''))
job_find = request.form['search']
session['search'] = job_find
if form.validate_on_submit():
print('Working on this feature :D')
flash('Searching for job {}').format(form.search.data)
return render_template('index.html', title='Home', jobvisuals=jobvisuals, form=form)
spider
import scrapy
class IndeedSpider(scrapy.Spider):
name = 'indeedspider'
allowed_domains = ['indeed.com']
def __init__(self, job='', **kwargs):
self.start_url('http://www.indeed.com/jobs?q={job}&l=San+Marcos%2C+CA')
super().__init__(**kwargs)
def parse(self, response):
for item in response.xpath("//div[contains(#class,'jobsearch-SerpJobCard unifiedRow row result clickcard')]").getall():
yield {
'title': item.xpath("//div[contains(#class,'title')]/text()").get(default='None'),
'desc': item.xpath("//div[contains(#class,'summary')]/text()").get(default='None'),
'link': item.xpath("//div[contains(#class,'title')]/#href").get(default='None'),
'location': item.xpath("//span[contains(#class,'location')]/text()").get(default='None'),
'salary': item.xpath("//div[contains(#class,'salarySnippet')]/text()").get(default='None')
}
Expected:
I type in a input box the job, job gets passed to spider on submit, spider scrapes indeed.com and pulls the first page only and returns that data on the index page.
Actual:
Unsure of where to start.
Can anyone point me in the right direction?

Issues while integrating tornado app with django site

I have a simple chat application in Tornado powered with RethinkDB.
Am trying to integrate this tornado chat application to run with django site.
For that reason, I have adopted below in rechat.py in order for it to work with django.
Namespaces tornado.wsgi and django.core.wsgi (get_wsgi_application)
Set environment variable for Django settings.py
os.environ['DJANGO_SETTINGS_MODULE'] = 'djangoapp.settings'
When I try to run it after the above changes, it connects the db server, but doesn't do anything. What am I missing?
How can I make this tornado app to work with django 1.8 site?
Below is my code of rechat.py (https://github.com/v3ss0n/rechat) -
import logging
import tornado.escape
from tornado.ioloop import IOLoop
import tornado.web
import os.path
import rethinkdb as r
from tornado import httpserver
from time import time
# from tornado.concurrent import Future
from tornado import gen
from tornado.options import define, options, parse_command_line
import tornado.wsgi
from django.core.wsgi import get_wsgi_application
define("port", default=8888, help="run on the given port", type=int)
define("debug", default=True, help="run in debug mode")
def setup_db(db_name="rechat", tables=['events']):
connection = r.connect(host="localhost")
try:
r.db_create(db_name).run(connection)
for tbl in tables:
r.db(db_name).table_create(tbl, durability="hard").run(connection)
logging.info('Database setup completed.')
except r.RqlRuntimeError:
logging.warn('Database/Table already exists.')
finally:
connection.close()
class RechatApp(tornado.web.Application):
def __init__(self, db):
handlers = [
(r"/", MainHandler),
(r"/a/message/new", MessageNewHandler),
(r"/a/message/updates", MessageUpdatesHandler),
]
settings = dict(cookie_secret="_asdfasdaasdfasfas",
template_path=os.path.join(
os.path.dirname(__file__), "templates"),
static_path=os.path.join(
os.path.dirname(__file__), "static"),
xsrf_cookies=True,
debug=options.debug)
self.db = db
logging.info(db)
tornado.web.Application.__init__(self, handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
def initialize(self):
self.db = self.application.db
self.evt = r.table("events")
class MainHandler(BaseHandler):
#gen.coroutine
def get(self):
curs = yield self.evt.run(self.db)
messages = []
while (yield curs.fetch_next()):
item = yield curs.next()
messages.append(item)
self.render("index.html", messages=messages)
class MessageNewHandler(BaseHandler):
#gen.coroutine
def post(self):
message = {
"body": self.get_argument("body")
}
# to_basestring is necessary for Python 3's json encoder,
# which doesn't accept byte strings.
start = time()
messages = (yield self.evt.insert(message).run(self.db))
time_taken = time() - start
logging.warn("DBINSERT: %s seconds" % time_taken)
message['id'] = messages['generated_keys'][0]
message["html"] = tornado.escape.to_basestring(
self.render_string("message.html", message=message))
if self.get_argument("next", None):
self.redirect(self.get_argument("next"))
else:
self.write(message)
class MessageUpdatesHandler(BaseHandler):
#gen.coroutine
def post(self):
curs = yield self.evt.changes().run(self.db)
while (yield curs.fetch_next()):
feed = yield curs.next()
message = {
'id': feed['new_val']['id'],
'html': tornado.escape.to_basestring(
self.render_string("message.html",
message=feed['new_val']))}
break
self.finish(dict(messages=[message]))
#gen.coroutine
def main():
""" Async main method. It needed to be async due to r.connect is async . """
parse_command_line()
os.environ['DJANGO_SETTINGS_MODULE'] = 'djangoapp.settings'
db_name = "rechat"
setup_db(db_name)
r.set_loop_type("tornado")
db = yield r.connect("localhost", db=db_name)
#Single db connection for everything thanks a lot Ben and Jeese
http_server = httpserver.HTTPServer(RechatApp(db))
http_server.listen(options.port)
if __name__ == "__main__":
IOLoop.current().run_sync(main)
IOLoop.current().start()