hello guys i trying to use django_crontab on my django project and there not working does anyone know something about this im using Linux centos 8. I want to schedule a task to add some data to my database. Can someone help me
The steps that i have take is:
pip install django-crontab
add to the installed apps
build my cron function
` from django.core.management.base import BaseCommand
from backups.models import Backups
from devices.models import Devices
from datetime import datetime
from jnpr.junos import Device
from jnpr.junos.exception import ConnectError
from lxml import etree
from django.http import HttpResponse
from django.core.files import File
class Command(BaseCommand):
def handle(self, *args, **kwargs):
devices = Devices.objects.all()
for x in devices:
devid = Devices.objects.get(pk=x.id)
ip = x.ip_address
username = x.username
password = x.password
print(devid, ip, username, password)
dev1 = Device(host= ip ,user= username, passwd= password)
try:
dev1.open()
stype = "sucsess"
dataset = dev1.rpc.get_config(options={'format':'set'})
datatext = dev1.rpc.get_config(options={'format':'text'})
result = (etree.tostring(dataset, encoding='unicode'))
file_name = f'{ip}_{datetime.now().date()}.txt'
print(file_name)
with open("media/"f'{file_name}','w') as f:
f.write(etree.tostring(dataset, encoding='unicode'))
f.write(etree.tostring(datatext, encoding='unicode'))
backup = Backups(device_id=devid, host=ip, savetype=stype, time=datetime.now(), backuptext=file_name)
print(backup)
backup.save()
except ConnectError as err:
print ("Cannot connect to device: {0}".format(err))
print("----- Faild ----------")
stype = ("Cannot connect to device: {0}".format(err))
backup = Backups(device_id=devid, host=ip, savetype=stype, time=datetime.now())
backup.save()
`
add my cronjob to my setting.py file :
CRONJOBS = [ ('*/5 * * * *', 'django.core.management.call_command', ['backup-dev']), ]
5)
python manage.py crontab add
6)
python manage.py crontab show
Currently active jobs in crontab:
0662c1224789b131740fddef54f273c1 -> ('* * * * *', 'django.core.management.call_command', ['backup-dev'])
and still not working any ideas
and when i run this command: " python manage.py backup-dev" my task working perfectly
i Also try to add the management command direct to the centos machine via crontab with the command
crontab -e
and still nothing any ideas
Related
I have setup a scheduled task to run daily on PythonAnywhere.
The task uses the Django Commands as I found this was the preferred method to use with PythonAnywhere.
The tasks produces no errors but I don't get any output. 2022-06-16 22:56:13 -- Completed task, took 9.13 seconds, return code was 0.
I have tried uses Print() to debug areas of the code but I cannot produce any output in either the error or server logs. Even after trying print(date_today, file=sys.stderr).
I have set the path on the Scheduled Task as: (Not sure if this is correct but seems to be the only way I can get it to run without errors.)
workon advancementvenv && python3.8 /home/vicinstofsport/advancement_series/manage.py shell < /home/vicinstofsport/advancement_series/advancement/management/commands/schedule_task.py
I have tried setting the path as below but then it gets an error when I try to import from the models.py file (I know this is related to a relative import but cannot management to resolve it). Traceback (most recent call last): File "/home/vicinstofsport/advancement_series/advancement/management/commands/schedule_task.py", line 3, in <module> from advancement.models import Bookings ModuleNotFoundError: No module named 'advancement'
2022-06-17 03:41:22 -- Completed task, took 14.76 seconds, return code was 1.
Any ideas on how I can get this working? It all works fine locally if I use the command py manage.py scheduled_task just fails on PythonAnywhere.
Below is the task code and structure of the app.
from django.core.management.base import BaseCommand
import requests
from advancement.models import Bookings
from datetime import datetime, timedelta, date
import datetime
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from django.core.mail import send_mail
import os
from decouple import config
class Command(BaseCommand):
help = 'Sends Program Survey'
def handle(self, *args, **kwargs):
# Get today's date
date_today = datetime.datetime.now().date()
# Get booking data
bookings = Bookings.objects.all()
# For each booking today, send survey email
for booking in bookings:
if booking.booking_date == date_today:
if booking.program_type == "Sport Science":
booking_template_id = 'd-bbc79704a31a4a62a5bfea90f6342b7a'
email = booking.email
booking_message = Mail(from_email=config('FROM_EMAIL'),
to_emails=[email],
)
booking_message.template_id = booking_template_id
try:
sg = SendGridAPIClient(config('SG_API'))
response = sg.send(booking_message)
except Exception as e:
print(e)
else:
booking_template_id = 'd-3167927b3e2146519ff6d9035ab59256'
email = booking.email
booking_message = Mail(from_email=config('FROM_EMAIL'),
to_emails=[email],
)
booking_message.template_id = booking_template_id
try:
sg = SendGridAPIClient(config('SG_API'))
response = sg.send(booking_message)
except Exception as e:
print(e)
else:
print('No')
Thanks in advance for any help.
Thanks Filip and Glenn, testing within the bash console and changing the directory in the task helped to fix the issue. Adding 'cd /home/vicinstofsport/advancement_series && to my task allowed the function to run.'
I have a task of updating every single row of a MySQL table but it's super slow. I rarely need to do it and only when I change something fundamental, but I thought this would be a great change to learn about multi threading. However all the examples and tutorials online go over some things and not others and I'm struggling to piece all the information together.
I know I need to make a celery process I just don't know if I'm doing it right. A lot of tutorials talk about dockerizing a redis environment without explaining how to do it so I thought I'd come here for some real human-to-human interaction to maybe help me feel less stupid about this.Here's my code so far
/website/__init__.py
from flask import Flask, appcontext_popped, render_template
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from flask_migrate import Migrate
from flask_wtf import CSRFProtect
import logging
import celery
#Path Math
import sys
import os
from . import config
db:SQLAlchemy = SQLAlchemy()
migrate = Migrate()
csrf = CSRFProtect()
celery: celery.Celery
DB_NAME = "main"
def create_app(name):
#Flask Instance
app = Flask(__name__)
app.config.from_object(config.ProdTestConfig)
# logging stuff
#Database
db.init_app(app)
migrate.init_app(app, db)
csrf.init_app(app)
global celery
celery = make_celery(app)
with app.app_context():
db.create_all()
# Models and Blueprints here
from .helper_functions import migration_handling as mgh
#where you will find the thing I need to run async
app.before_first_request(mgh.run_back_check)
# log manager stuff
#error page handling
return app
def make_celery(app):
celery = celery.Celery(
app.import_name,
backend=app.config['CELERY_RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL']
)
celery.conf.update(app.config)
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
I've read some other ways seem to fit a bit better like using:
celery = Celery(__name__, broker=Config.CELERY_BROKER_URL, result_backend=Config.RESULT_BACKEND)
Then in create_app() they run celery.conf.update(app.config). The issue with this is that I don't know how to setup a redis server on my linode machine hosting the site and my personal windows machine. I have redis pip installed. This is how the function I'm trying to run async looks:
#celery.task(name='app.tasks.campaign_pay_out_process')
def campaign_pay_out_process():
'''
Process Every Campaigns Pay
'''
campaign: Campaigns
for campaign in Campaigns.query.filter_by():
campaign.process_pay()
db.session.commit()
current_app.logger.info('Done Campaign Pay Out Processing')
I'm running gunicorn off of supervisor because restarting is super easy and ridding my life of super long linux commands to start a process has been great. I know this is the command for celery: celery -A celery_worker.celery worker --pool=solo --loglevel=info and I'd love to know how to include that in my work flow. Here's my supervisor config:
[program:paymentwebapp]
directory=/home/sai/paymentWebApp
command=/home/sai/paymentWebApp/venv/bin/gunicorn --workers 1 --threads 3 wsgi:app
user=sai
autostart=true
autorestart=true
stopasgroup=true
killasgroup=true
stderr_logfile=/var/log/paymentwebapp/paymentwebapp.err.log
stdout_logfile=/var/log/paymentwebapp/paymentwebapp.out.log
Here's my flask config right now:
from os import environ, path
from dotenv import load_dotenv
DB_NAME = "main"
class Config:
"""Base config."""
#SESSION_COOKIE_NAME = environ.get('SESSION_COOKIE_NAME')
MAX_CONTENT_LENGTH = 16*1000*1000
RECEIPT_FOLDER = '../uploads/receipts'
IMPORT_FOLDER = 'uploads/imports'
UPLOAD_FOLDER = 'uploads'
EXPORT_FOLDER = '/uploads/exports'
UPLOAD_EXTENSIONS = ['.jpg', '.png', '.pdf', '.csv', '.xls', '.xlsx']
STATIC_FOLDER = 'static'
TEMPLATES_FOLDER = 'templates'
class ProdConfig(Config):
basedir = path.abspath(path.dirname(__file__))
load_dotenv('/home/sai/.env')
env_dict = dict(environ)
FLASK_ENV = 'production'
DEBUG = False
TESTING = False
SQLALCHEMY_DATABASE_URI = environ.get('PROD_DATABASE_URI')
SECRET_KEY = environ.get('SECRET_KEY')
SERVER_NAME = environ.get('SERVER_NAME')
SESSION_COOKIE_SECURE = True
WTF_CSRF_TIME_LIMIT = 600
#Uploads
class DevConfig(Config):
basedir = path.abspath(path.dirname(__file__))
load_dotenv('C:\saiscripts\intercept_branch\Payment Web App Project\.env')
env_dict = dict(environ)
FLASK_ENV = 'development'
DEBUG = True
SQLALCHEMY_DATABASE_URI = environ.get('DEV_DATABASE_URI')
SECRET_KEY = environ.get('SECRET_KEY')
class ProdTestConfig(DevConfig):
'''
Developer config settings but production database server
'''
SQLALCHEMY_DATABASE_URI = environ.get('PROD_DATABASE_URI')
if __name__ == '__main__':
print(environ.get('SQLALCHEMY_DATABASE_URI'))
This is where I copied some code from a tutorial because I'm supposed to make a celery worker:
#!/usr/bin/env python
import os
#from app import create_app, celery
from website import create_app
app = create_app()
app.app_context().push()
from website import celery
Running a small db on pythonanywhere, and am trying to set up a scheduled .backup of my sqlite3 database. Is there any way in the command line to add a time/date stamp to the filename, so that it doesn't overwrite the previous days backup?
Here's the code I'm using, if it matters:
sqlite3 db.sqlite3
.backup dbbackup.sqlite3
.quit
Running every 24 hours. The previous day's backup gets overwritten, though. I'd love to just be able to save it as dbbackup.timestamp.sqlite3 or something, so I could have multiple backups available.
Thanks!
I suggest you to handle this case with management commands and cronjob.
This an example how to do it; save this file eg in yourapp/management/commands/dbackup.py, don't forget to add __init__.py files.
yourapp/management/__init__.py
yourapp/management/commands/__init__.py
yourapp/management/commands/dbackup.py
But, previously add these lines below to your settings.py
USERNAME_SUPERUSER = 'yourname`
PASSWORD_SUPERUSER = `yourpassword`
EMAIL_SUPERUSER = `youremail#domain.com`
DATABASE_NAME = 'db.sqlite3'
The important tree path project if you deploying at pythonanywhere;
/home/yourusername/yourproject/manage.py
/home/yourusername/yourproject/db.sqlite3
/home/yourusername/yourproject/yourproject/settings.py
/home/yourusername/yourproject/yourapp/management/commands/dbackup.py
Add these script below into yourapp/management/commands/dbackup.py, you also can custom this script as you need.
import os
import time
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import (BaseCommand, CommandError)
USERNAME_SUPERUSER = settings.USERNAME_SUPERUSER
PASSWORD_SUPERUSER = settings.PASSWORD_SUPERUSER
EMAIL_SUPERUSER = settings.EMAIL_SUPERUSER
DATABASE_NAME = settings.DATABASE_NAME #eg: 'db.sqlite3'
class Command(BaseCommand):
help = ('Command to deploy and backup the latest database.')
def add_arguments(self, parser):
parser.add_argument(
'-b', '--backup', action='store_true',
help='Just backup command confirmation.'
)
def success_info(self, info):
return self.stdout.write(self.style.SUCCESS(info))
def error_info(self, info):
return self.stdout.write(self.style.ERROR(info))
def handle(self, *args, **options):
backup = options['backup']
if backup == False:
return self.print_help()
# Removing media files, if you need to remove all media files
# os.system('rm -rf media/images/')
# self.success_info("[+] Removed media files at `media/images/`")
# Removing database `db.sqlite3`
if os.path.isfile(DATABASE_NAME):
# backup the latest database, eg to: `db.2017-02-03.sqlite3`
backup_database = 'db.%s.sqlite3' % time.strftime('%Y-%m-%d')
os.rename(DATABASE_NAME, backup_database)
self.success_info("[+] Backup the database `%s` to %s" % (DATABASE_NAME, backup_database))
# remove the latest database
os.remove(DATABASE_NAME)
self.success_info("[+] Removed %s" % DATABASE_NAME)
# Removing all files migrations for `yourapp`
def remove_migrations(path):
exclude_files = ['__init__.py', '.gitignore']
path = os.path.join(settings.BASE_DIR, path)
filelist = [
f for f in os.listdir(path)
if f.endswith('.py')
and f not in exclude_files
]
for f in filelist:
os.remove(path + f)
self.success_info('[+] Removed files migrations for {}'.format(path))
# do remove all files migrations
remove_migrations('yourapp/migrations/')
# Removing all `.pyc` files
os.system('find . -name *.pyc -delete')
self.success_info('[+] Removed all *.pyc files.')
# Creating database migrations
# These commands should re-generate the new database, eg: `db.sqlite3`
os.system('python manage.py makemigrations')
os.system('python manage.py migrate')
self.success_info('[+] Created database migrations.')
# Creating a superuser
user = User.objects.create_superuser(
username=USERNAME_SUPERUSER,
password=PASSWORD_SUPERUSER,
email=EMAIL_SUPERUSER
)
user.save()
self.success_info('[+] Created a superuser for `{}`'.format(USERNAME_SUPERUSER))
Setup this command with crontab
$ sudo crontab -e
And add these following below lines;
# [minute] [hour] [date] [month] [year]
59 23 * * * source ~/path/to/yourenv/bin/activate && cd ~/path/to/yourenv/yourproject/ && ./manage.py dbackup -b
But, if you need to deploy at pythonanywhere, you just need to add these..
Daily at [hour] : [minute] UTC, ... fill the hour=23 and minute=59
source /home/yourusername/.virtualenvs/yourenv/bin/activate && cd /home/yourusername/yourproject/ && ./manage.py dbackup -b
Update 1
I suggest you to update the commands to execute the file of manage.py such as os.system('python manage.py makemigrations') with function of call_command;
from django.core.management import call_command
call_command('collectstatic', verbosity=3, interactive=False)
call_command('migrate', 'myapp', verbosity=3, interactive=False)
...is equal to the following commands typed in terminal:
$ ./manage.py collectstatic --noinput -v 3
$ ./manage.py migrate myapp --noinput -v 3
See running management commands from django docs.
Update 2
Previous condition is if you need to re-deploy your project and using a fresh database. But, if you only want to backup it by renaming the database, you can using module of shutil.copyfile
import os
import time
import shutil
from django.conf import settings
from django.core.management.base import (BaseCommand, CommandError)
DATABASE_NAME = settings.DATABASE_NAME #eg: 'db.sqlite3'
class Command(BaseCommand):
help = ('Command to deploy and backup the latest database.')
def add_arguments(self, parser):
parser.add_argument(
'-b', '--backup', action='store_true',
help='Just backup command confirmation.'
)
def success_info(self, info):
return self.stdout.write(self.style.SUCCESS(info))
def error_info(self, info):
return self.stdout.write(self.style.ERROR(info))
def handle(self, *args, **options):
backup = options['backup']
if backup == False:
return self.print_help()
if os.path.isfile(DATABASE_NAME):
# backup the latest database, eg to: `db.2017-02-29.sqlite3`
backup_database = 'db.%s.sqlite3' % time.strftime('%Y-%m-%d')
shutil.copyfile(DATABASE_NAME, backup_database)
self.success_info("[+] Backup the database `%s` to %s" % (DATABASE_NAME, backup_database))
I'm reading DATABASE_URL from env variable in uwsgi file. My django ORM connection is working with % in password but when I try to connect with sqlalchemy it is not working.
from sqlalchemy import orm, create_engine, MetaData
from app.settings import DATABASES
from urllib import quote_plus
con = "mysql+pymysql://{0}:{1}#{2}:{3}/{4}".format(
DATABASES['default']['USER'],
DATABASES['default']['PASSWORD'],
DATABASES['default']['HOST'],
DATABASES['default']['PORT'],
DATABASES['default']['NAME']
)
engine = create_engine(
con,
echo=False,
pool_recycle=1800,
max_overflow = 250,
pool_size=200
)
I even tried quote_plus but it did not worked.
This does not works
mysql+pymysql://api:#>J.6D%qhZ#localhost:3306/table_tmp
This works
mysql+pymysql://api:#>J.6DqhZ#localhost:3306/table_tmp
When I use same password with % via ipython it works but via env variable in uwsgi.ini file it doesn't
I use pylons in my job, but I'm new to django. I'm making an rss filtering application, and so I'd like to have two backend processes that run on a schedule: one to crawl rss feeds for each user, and another to determine relevance of individual posts relative to users' past preferences. In pylons, I'd just write paster commands to update the db with that data. Is there an equivalent in django? EG is there a way to run the equivalent of python manage.py shell in a non-interactive mode?
I think that's what Custom Management Commands are there for.
Yes, this is actually how I run my cron backup scripts. You just need to load your virtualenv if you're using virtual environments and your project settings.
I hope you can follow this, but after the line # manage.py shell you can write your code just as if you were in manage.py shell
You can import your virtualenv like so:
import site
site.addsitedir(VIRTUALENV_PATH + '/lib/python2.6/site-packages')
You can then add the django project to the path
import sys
sys.path.append(DJANGO_ROOT)
sys.path.append(PROJECT_PATH)
Next you load the django settings and chdir to the django project
import os
from django.core.management import setup_environ
from myproject import settings
setup_environ(settings)
os.chdir(PROJECT_PATH)
After this point your environment will be set just like if you started with manage.py shell
You can then run anything just as if you were in the interactive shell.
from application.models import MyModel
for element in MyModel:
element.delete()
Here is my backup file in full. I've abstracted the process out into functions. This would be named daily_backup and be put into the cron.daily folder to be run daily. You can see how to set up the environment and modify the functionality as needed.
#!/usr/bin/env python
import sys
import os
import site
import logging
from datetime import datetime
PROJECT_NAME = 'myproject'
DJANGO_ROOT = '/var/www/django'
PROJECT_PATH = DJANGO_ROOT + '/' + PROJECT_NAME
VIRTUALENV_PATH = '/var/www/envs/'+ PROJECT_NAME
BACKUP_DIR = '/var/www/backups/%s/daily' % (PROJECT_NAME)
TODAY = datetime.now().strftime('%Y%m%d-%H%M%S')
FILE_NAME = PROJECT_NAME + '_' + TODAY
site.addsitedir(VIRTUALENV_PATH + '/lib/python2.6/site-packages')
sys.path.append(DJANGO_ROOT)
sys.path.append(PROJECT_PATH)
from django.core.management import setup_environ
from myproject import settings
setup_environ(settings)
os.chdir(PROJECT_PATH)
# manage.py shell
from django.conf import settings
logging.basicConfig(level=logging.WARN)
def _setup():
if not os.path.exists(BACKUP_DIR):
logging.debug('Creating backup directory ' + BACKUP_DIR)
os.mkdir(BACKUP_DIR)
os.mkdir(BACKUP_DIR + '/databases')
else:
logging.debug('Using backup directory ' + BACKUP_DIR)
def _remove_old():
logging.debug('Cleaning out old backups')
# keep past 7 days
command = "find %s* -name '%s*' -mtime +7 -exec rm {} \\;" % (BACKUP_DIR, PROJECT_NAME)
os.system(command)
def _create_backup():
logging.debug('Backup database')
if settings.DATABASE_ENGINE == 'mysql':
command = 'mysqldump -u %s --password=%s %s > %s/databases/%s.sql' % (settings.DATABASE_USER, settings.DATABASE_PASSWORD, settings.DATABASE_NAME, BACKUP_DIR, FILE_NAME)
else:
command = '%s/bin/python %s/manage.py dumpdata --indent=4 > %s/databases/%s.json' % (VIRTUALENV_PATH, PROJECT_PATH, BACKUP_DIR, FILE_NAME)
os.system(command)
logging.debug('Backup project')
command = 'tar -czf %s/%s.tgz -C %s %s/' % (BACKUP_DIR, FILE_NAME, DJANGO_ROOT, PROJECT_NAME)
os.system(command)
if __name__ == '__main__':
_setup()
_remove_old()
_create_backup()
Sounds like you need some twod.wsgi in your life: http://packages.python.org/twod.wsgi/