Issues while integrating tornado app with django site - django

I have a simple chat application in Tornado powered with RethinkDB.
Am trying to integrate this tornado chat application to run with django site.
For that reason, I have adopted below in rechat.py in order for it to work with django.
Namespaces tornado.wsgi and django.core.wsgi (get_wsgi_application)
Set environment variable for Django settings.py
os.environ['DJANGO_SETTINGS_MODULE'] = 'djangoapp.settings'
When I try to run it after the above changes, it connects the db server, but doesn't do anything. What am I missing?
How can I make this tornado app to work with django 1.8 site?
Below is my code of rechat.py (https://github.com/v3ss0n/rechat) -
import logging
import tornado.escape
from tornado.ioloop import IOLoop
import tornado.web
import os.path
import rethinkdb as r
from tornado import httpserver
from time import time
# from tornado.concurrent import Future
from tornado import gen
from tornado.options import define, options, parse_command_line
import tornado.wsgi
from django.core.wsgi import get_wsgi_application
define("port", default=8888, help="run on the given port", type=int)
define("debug", default=True, help="run in debug mode")
def setup_db(db_name="rechat", tables=['events']):
connection = r.connect(host="localhost")
try:
r.db_create(db_name).run(connection)
for tbl in tables:
r.db(db_name).table_create(tbl, durability="hard").run(connection)
logging.info('Database setup completed.')
except r.RqlRuntimeError:
logging.warn('Database/Table already exists.')
finally:
connection.close()
class RechatApp(tornado.web.Application):
def __init__(self, db):
handlers = [
(r"/", MainHandler),
(r"/a/message/new", MessageNewHandler),
(r"/a/message/updates", MessageUpdatesHandler),
]
settings = dict(cookie_secret="_asdfasdaasdfasfas",
template_path=os.path.join(
os.path.dirname(__file__), "templates"),
static_path=os.path.join(
os.path.dirname(__file__), "static"),
xsrf_cookies=True,
debug=options.debug)
self.db = db
logging.info(db)
tornado.web.Application.__init__(self, handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
def initialize(self):
self.db = self.application.db
self.evt = r.table("events")
class MainHandler(BaseHandler):
#gen.coroutine
def get(self):
curs = yield self.evt.run(self.db)
messages = []
while (yield curs.fetch_next()):
item = yield curs.next()
messages.append(item)
self.render("index.html", messages=messages)
class MessageNewHandler(BaseHandler):
#gen.coroutine
def post(self):
message = {
"body": self.get_argument("body")
}
# to_basestring is necessary for Python 3's json encoder,
# which doesn't accept byte strings.
start = time()
messages = (yield self.evt.insert(message).run(self.db))
time_taken = time() - start
logging.warn("DBINSERT: %s seconds" % time_taken)
message['id'] = messages['generated_keys'][0]
message["html"] = tornado.escape.to_basestring(
self.render_string("message.html", message=message))
if self.get_argument("next", None):
self.redirect(self.get_argument("next"))
else:
self.write(message)
class MessageUpdatesHandler(BaseHandler):
#gen.coroutine
def post(self):
curs = yield self.evt.changes().run(self.db)
while (yield curs.fetch_next()):
feed = yield curs.next()
message = {
'id': feed['new_val']['id'],
'html': tornado.escape.to_basestring(
self.render_string("message.html",
message=feed['new_val']))}
break
self.finish(dict(messages=[message]))
#gen.coroutine
def main():
""" Async main method. It needed to be async due to r.connect is async . """
parse_command_line()
os.environ['DJANGO_SETTINGS_MODULE'] = 'djangoapp.settings'
db_name = "rechat"
setup_db(db_name)
r.set_loop_type("tornado")
db = yield r.connect("localhost", db=db_name)
#Single db connection for everything thanks a lot Ben and Jeese
http_server = httpserver.HTTPServer(RechatApp(db))
http_server.listen(options.port)
if __name__ == "__main__":
IOLoop.current().run_sync(main)
IOLoop.current().start()

Related

How do I start/stop Hypercorn/Uvicorn server as a background task for an async application (like a discord bot) Python

I am currently creating a django application w/ asgi and I am currently having problems of setting up hypercorn and uvicorn to run in background with graceful shutdown. When I set up my application from asgi to run on hypercorn only using asyncio.create_task and starting it only, the website doesn't run.
Hypercorn code snippet:
from scripts import funcs
import nextcord
from nextcord.ext import commands
from nextcord import Interaction
import asyncio
# from uvicorn import Config, Server
# import uvicorn
import subprocess
from subprocess import CREATE_NEW_CONSOLE
import signal
# import multiprocessing
import nest_asyncio
import os
import sys
sys.path.insert(1, 'C:\\Users\\Sub01\\Project\\PaulWebsite\\app')
from hypercorn.config import Config
from hypercorn.asyncio import serve
from hypercorn.run import run
import hypercorn
import asyncio
from paul_site.asgi import application
import signal
nest_asyncio.apply()
createEmbed = funcs.embedCreator()
shutdown_event = asyncio.Event()
def _signal_handler(*_) -> None:
shutdown_event.set()
class HYPERCORN:
config = Config()
coro = None
def __init__(self) -> None:
self.config.from_object("paul_site.asgi")
self.evtLoop = asyncio.new_event_loop()
async def start(self):
self.coro = self.evtLoop.create_task(await serve(application, self.config))
def stop(self):
self.evtLoop.add_signal_handler(signal.SIGINT, _signal_handler)
self.evtLoop.run_until_complete(
asyncio.to_thread(serve(application, self.config, shutdown_trigger=shutdown_event.wait))
)
class baseCommand(commands.Cog):
proc = None
def __init__(self, client):
self.client = client
self.website = HYPERCORN()
#nextcord.slash_command()
async def bot(self, interaction: Interaction):
pass
#bot.subcommand(description="Stops the bot")
async def shutdown(self, interaction: Interaction):
await interaction.response.send_message(embed=createEmbed.createEmbed(title="Exit", description="Bot's down", footer=f"Requested by {interaction.user.name}"))
exit()
# Create command group site
#nextcord.slash_command()
async def site(self, interaction: Interaction):
pass
#site.subcommand(description="Starts the website")
async def start(self, interaction: Interaction):
try:
await self.website.start()
await interaction.response.send_message(embed=createEmbed.createEmbed(title="Start Website", description=f"""
**Website started successfully**
""", footer=f"Requested by {interaction.user.name}"))
except Exception as e:
await interaction.response.send_message(
embed=createEmbed.createEmbed(title="Start Website Error", description=
f"""
```bash
{e}
```
""", footer=f"Requested by {interaction.user.name}")
)
#site.subcommand(description='Stops the website')
async def stop(self, interaction: Interaction):
self.website.stop()
await interaction.followup.send(embed=createEmbed.createEmbed(title="Stop Website", description=f"""
**Website stopped successfully!**
""", footer=f"Requested by {interaction.user.name}"))
del self.proc
def setup(client):
client.add_cog(baseCommand(client))
Uvicorn code snippet:
import sys
sys.path.insert(1, 'C:\\Users\\Sub01\\Project\\PaulWebsite\\app')
import asyncio
from paul_site.asgi import application
import signal
import time
import uvicorn
from multiprocessing import Process
class UvicornServer(uvicorn.Server):
def __init__(self, host: str = "127.0.0.1", port: int = 8000):
self.host = host
self.port = port
async def setup(self):
self.proc = Process(
target=uvicorn.run,
args=[application],
kwargs={
'host': self.host,
'port': self.port,
},
daemon=True
)
# self.proc.run()
await self.proc.start()
await asyncio.sleep(0.5)
async def down(self):
self.proc.terminate()
def blockingFunc():
prevTime = time.time()
while True:
print("Elapsed time: ", time.time() - prevTime)
time.sleep(1)
if time.time() - prevTime >= 4:
break
async def main():
server = UvicornServer()
await server.setup()
blockingFunc()
await server.down()
asyncio.run(main())
Asgi.py:
"""
ASGI config for paul_site project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.1/howto/deployment/asgi/
"""
import os
from channels.routing import ProtocolTypeRouter, URLRouter
from django.core.asgi import get_asgi_application
from paul_site_app.ws_urlpatterns import ws_urlpatterns
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'paul_site.settings')
application = ProtocolTypeRouter({
'http': get_asgi_application(),
'websocket': URLRouter(ws_urlpatterns)
})
Looking at the examples from people incorporating FastAPI and running uvicorn as a background task, I tried it but it only results in a runtime error. I've also tried having a command open a terminal and running the application via cli but soon realized that the code that invokes a new terminal isn't compatible with different platforms.

Importing flask-socketio instance from a top level file?

I have a run.py file at the top level of my directory where I initialize flask-socketio. That file looks like this:
# /run.py
#!/usr/bin/env python
import os
from src.config import app_config
from dotenv import load_dotenv, find_dotenv
from flask_socketio import SocketIO
from src.app import create_app
load_dotenv(find_dotenv())
env_name = os.getenv('FLASK_ENV')
app = create_app(env_name)
socketio = SocketIO(app, async_mode=None)
if __name__ == '__main__':
port = os.getenv('PORT')
# run app
socketio.run(app, host='0.0.0.0', port=port)
My app.py file sits under src/app.py and looks like this:
def create_app(env_name):
"""
Create app
"""
# app initiliazation
app = Flask(__name__)
app.config.from_object(app_config[env_name])
# initializing bcrypt and db
bcrypt.init_app(app)
db.init_app(app)
app.register_blueprint(message_blueprint, url_prefix='/api/v1/message')
return app
I am trying to import the socketio instance into /src/views/MessageView.py
My MessageView.py file looks like this:
from ..models import db
from __main__ import socketio
from ..shared.Authentication import Auth
from threading import Lock
from flask import Flask, render_template, session, request, \
copy_current_request_context, g, Blueprint, json, Response
from flask_socketio import SocketIO, emit, join_room, leave_room, \
close_room, rooms, disconnect
message_api = Blueprint('message_api', __name__)
thread = None
thread_lock = Lock()
def background_thread():
"""Example of how to send server generated events to clients."""
count = 0
while True:
socketio.sleep(10)
count += 1
socketio.emit('my_response',
{'data': 'Server generated event', 'count': count},
namespace='/test')
#message_api.route('/')
def index():
return render_template('index.html', async_mode=socketio.async_mode)
#socketio.on('my_event', namespace='/test')
def test_message(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': message['data'], 'count': session['receive_count']})
#socketio.on('my_broadcast_event', namespace='/test')
def test_broadcast_message(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': message['data'], 'count': session['receive_count']},
broadcast=True)
#socketio.on('join', namespace='/test')
def join(message):
join_room(message['room'])
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': 'In rooms: ' + ', '.join(rooms()),
'count': session['receive_count']})
#socketio.on('leave', namespace='/test')
def leave(message):
leave_room(message['room'])
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': 'In rooms: ' + ', '.join(rooms()),
'count': session['receive_count']})
#socketio.on('close_room', namespace='/test')
def close(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response', {'data': 'Room ' + message['room'] + ' is closing.',
'count': session['receive_count']},
room=message['room'])
close_room(message['room'])
#socketio.on('my_room_event', namespace='/test')
def send_room_message(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': message['data'], 'count': session['receive_count']},
room=message['room'])
#socketio.on('disconnect_request', namespace='/test')
def disconnect_request():
#copy_current_request_context
def can_disconnect():
disconnect()
session['receive_count'] = session.get('receive_count', 0) + 1
# for this emit we use a callback function
# when the callback function is invoked we know that the message has been
# received and it is safe to disconnect
emit('my_response',
{'data': 'Disconnected!', 'count': session['receive_count']},
callback=can_disconnect)
#socketio.on('my_ping', namespace='/test')
def ping_pong():
emit('my_pong')
#socketio.on('connect', namespace='/test')
def test_connect():
global thread
with thread_lock:
if thread is None:
thread = socketio.start_background_task(background_thread)
emit('my_response', {'data': 'Connected', 'count': 0})
#socketio.on('disconnect', namespace='/test')
def test_disconnect():
print('Client disconnected', request.sid)
I have spent the last two days scouring the internet for help on how to fix this. The error i receive is:
ImportError: cannot import name 'socketio'
I have tried relative imports as well as monkey_patching. But each time the error still occurs. Any ideas on how to fix the issue would be greatly appreciated.
P.S. I am adapting the example that Miguel has in his flask-socketio repo located here: link. In his example, everything sits in one file, which would work in a basic app, however, for an app with 50+ API endpoints, that is not an optimal solution.
Why do you have the SocketIO object in the top-level run.py module? Since this is a Flask extension, it is better to have it with all your other extensions in src/app.py:
from flask_socketio import SocketIO
socketio = SocketIO()
def create_app(env_name):
"""
Create app
"""
# app initiliazation
app = Flask(__name__)
app.config.from_object(app_config[env_name])
# initializing bcrypt and db
bcrypt.init_app(app)
db.init_app(app)
# initialize socketio
socketio.init_app(app)
app.register_blueprint(message_blueprint, url_prefix='/api/v1/message')
return app
Then in run.py you can import this object:
from src.app import create_app, socketio
# ...
env_name = os.getenv('FLASK_ENV')
app = create_app(env_name)
# ...
if __name__ == '__main__':
port = os.getenv('PORT')
# run app
socketio.run(app, host='0.0.0.0', port=port)
And in the same way you can import it in your MessageView.py module:
from src.app import socketio
socketio.on('whatever')
def do_something(data):
pass
I have a complete example application that uses this structure here: https://github.com/miguelgrinberg/Flask-SocketIO-Chat.

how to use db instance in flask-apscheduler's jobs function

When I used flask-apscheduler(not apscheduler), I have some problems in my flask web project. Especially when I used db(flask-sqlalchemy) objects. The problem may be:
JOBS = [
{
'id': 'job1',
'func': 'app.monitor.views:test',
'args': (),
'trigger': 'interval',
'seconds': 2
}
]
./app/init.py:
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.pagedown import PageDown
from flask_apscheduler import APScheduler
from celery import Celery
# from apscheduler.schedulers.blocking import BlockingScheduler
from config import config,Config
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
celery = Celery(__name__, broker=Config.CELERY_BROKER_URL)
# https://pypi.python.org/pypi/Flask-APScheduler
scheduler = APScheduler()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
scheduler.init_app(app)
celery.conf.update(app.config)
if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
from flask.ext.sslify import SSLify
sslify = SSLify(app)
from .monitor import monitor as monitor_1_0_blueprint
from .laser import laser as laser_1_0_blueprint
app.register_blueprint(monitor_blueprint,url_prefix='/monitor/api')
app.register_blueprint(laser_1_0_blueprint,url_prefix='/laser/api/v1.0')
return app
Error 1:db is : Error 2:db is :No handlers
could be found for logger "apscheduler.executors.default" Error 3:db
is : raise RuntimeError('working outside of application context')
RuntimeError: working outside of application context
The key to the problem is to get the db and app objects in flask-apscheduler jobs function(views.py):
from app import scheduler
def test():
#to Solve the log error problem
import logging
log = logging.getLogger('apscheduler.executors.default')
log.setLevel(logging.INFO) # DEBUG
fmt = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
h = logging.StreamHandler()
h.setFormatter(fmt)
log.addHandler(h)
#get the app object
app = scheduler.app
#get the db object and use it
with app.app_context():
print '........................',db
from app import scheduler#
def test():
#to Solve the log error problem
import logging
log = logging.getLogger('apscheduler.executors.default')
log.setLevel(logging.INFO) # DEBUG
fmt = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
h = logging.StreamHandler()
h.setFormatter(fmt)
log.addHandler(h)
#get the app object
app = scheduler.app
#get the db object and use it
with app.app_context():
print '........................',db
def test():
#to Solve the log error problem
import logging
log = logging.getLogger('apscheduler.executors.default')
log.setLevel(logging.INFO) # DEBUG
fmt = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
h = logging.StreamHandler()
h.setFormatter(fmt)
log.addHandler(h)
#get the app object
app = scheduler.app
#get the db object and use it
with app.app_context():
print '........................',db #the right db object

App engine on development environement not printing log

I am trying to print log on the local environment of Google App engine. It seems the way it should be but still i am not able to print the log. Need some helping hand here?
I need this output on the standard console.
import webapp2
from google.appengine.api import urlfetch
from Webx import WebxClass
import json
import logging
class SearchHandler(webapp2.RequestHandler):
def __init__(self,*args, **kwargs):
super(SearchHandler,self).__init__(*args, **kwargs)
self.result=[]
self.searchPortals = [WebxClass()]
self.asa = []
def handleCallBack(self,rpc,portalObject):
try:
rr = rpc.get_result()
if rr.status_code == 200:
if isinstance(portalObject, WebxClass):
resultList=portalObject.getResultList(rr.content)
self.result.extend(resultList)
except urlfetch.DownloadError:
self.result = 'Error while fetching from portal - ' + portalObject.getName()
def getSearchResult(self):
rpcs=[]
searchKeyword=self.request.get('searchString')
logging.error("------------------------------")
for portal in self.searchPortals:
rpc = urlfetch.create_rpc(deadline=5)
rpc.callback = lambda: self.handleCallBack(rpc, portal)
urlfetch.make_fetch_call(rpc, portal.getSearchURL(searchKeyword))
rpcs.append(rpc)
for rpc in rpcs:
rpc.wait()
self.response.status_int = 200
self.response.headers['Content-Type'] = 'application/json'
self.response.headers.add_header("Access-Control-Allow-Origin", "*")
self.response.write(json.dumps(self.result))
app = webapp2.WSGIApplication([
webapp2.Route(r'/search', methods=['GET'], handler='Torrent.SearchHandler:getSearchResult')
], debug=True)
def main():
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("------------------------------")
app.run()
if __name__ == '__main__':
main()

Run a Scrapy spider in a Celery Task (django project)

I'm trying to run scrapy (spider/crawl)from django project (task in the admin interrface using celery). this is my code .
this is the error when I try to call the task from a python shell
djangoproject:
-monapp:
-tasks.py
-spider.py
-myspider.py '
-models.py
.....
tasks.py:
from djcelery import celery
from demoapp.spider import *
from demoapp.myspider import *
#celery.task
def add(x, y):
return x + y
#celery.task
def scra():
result_queue = Queue()
crawler = CrawlerWorker(MySpider(), result_queue)
crawler.start()
return "success"
spider.py:
from scrapy import project, signals
from scrapy.settings import Settings
from scrapy.crawler import Crawler
from scrapy.xlib.pydispatch import dispatcher
from multiprocessing.queues import Queue
import multiprocessing
class CrawlerWorker(multiprocessing.Process):
def __init__(self, spider, result_queue):
multiprocessing.Process.__init__(self)
self.result_queue = result_queue
self.crawler = Crawler(Settings())
if not hasattr(project, 'crawler'):
self.crawler.install()
self.crawler.configure()
self.items = []
self.spider = spider
dispatcher.connect(self._item_passed, signals.item_passed)
def _item_passed(self, item):
self.items.append(item)
def run(self):
self.crawler.crawl(self.spider)
self.crawler.start()
self.crawler.stop()
self.result_queue.put(self.items)
myspider.py
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.item import Item, Field
class TorentItem(Item):
title = Field()
desc = Field()
class MySpider(CrawlSpider):
name = 'job'
allowed_domains = ['tanitjobs.com']
start_urls = [\
'http://tanitjobs.com/browse-by-category/Nurse/',]
rules = (
Rule (SgmlLinkExtractor(allow=('page=*',)
,restrict_xpaths=('//div[#class="pageNavigation"]',),
unique = True)
, callback='parse_item', follow= True),
)
def parse_item(self, response):
hxs = HtmlXPathSelector(response)
items= hxs.select('\
//div[#class="offre"]/div[#class="detail"]')
scraped_items =[]
for item in items:
scraped_item = TorentItem()
scraped_item['title']=item.select(\
'a/strong/text()').extract()
scraped_item['desc'] =item.select(\
'./div[#class="descriptionjob"]/text()').extract()
scraped_items.append(scraped_item)
return scraped_items
I got it work mine on the shell using django management command. Below is my code snippet. Feel free to modify to fit your needs.
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy import signals
from scrapy.utils.project import get_project_settings
from django.core.management.base import BaseCommand
from myspiderproject.spiders.myspider import MySpider
class ReactorControl:
def __init__(self):
self.crawlers_running = 0
def add_crawler(self):
self.crawlers_running += 1
def remove_crawler(self):
self.crawlers_running -= 1
if self.crawlers_running == 0:
reactor.stop()
def setup_crawler(domain):
settings = get_project_settings()
crawler = Crawler(settings)
crawler.configure()
crawler.signals.connect(reactor_control.remove_crawler, signal=signals.spider_closed)
spider = MySpider(domain=domain)
crawler.crawl(spider)
reactor_control.add_crawler()
crawler.start()
reactor_control = ReactorControl()
class Command(BaseCommand):
help = 'Crawls the site'
def handle(self, *args, **options):
setup_crawler('somedomain.com')
reactor.run() # the script will block here until the spider_closed signal was sent
hope this helps.