How to backup PostgreSQL database using Django? - django

I'm trying to export Postgresql database but it returns backup() takes no arguments (1 given). I
tried various methods, but unable to export database.
from pathlib import Path, PureWindowsPath
from subprocess import PIPE,Popen
​
​
def backup():
version = 11
# postgresDir = "D:/Program Files (x86)/PostgreSQL/9.1/bin"
postgresDir = str("D:/Program Files (x86)/PostgreSQL/9.1/bin/").split('\\')[-1:][0]
​
directory = postgresDir
filename = 'myBackUp2' # output filename here
saveDir = Path("D:/{}.tar".format(filename)) # output directory here
file = PureWindowsPath(saveDir)
​
host = 'localhost'
user = 'postgres'
port = '5432'
dbname = 'BPS_Server' # database name here
proc = Popen(['pg_dump', '-h', host, '-U', user, '-W', '-p', port,
'-F', 't', '-f', str(file), '-d', dbname],
cwd=directory, shell=True, stdin=PIPE)
proc.wait()
​
backup()

for backup from postgresql you have two ways:
1) use a bash script and run it every day or month for get backup :
you can get more information here
2) use fixture in django:
def backup():
filename = 'myBackUp2' # output filename here
saveDir = open("D:/{}.json".format(filename), 'w')
# change application_name with your django app which you want to get backup from it
call_command('dumpdata', 'application_name', stdout=saveDir, indent=3)
saveDir.close()
call_command() uses for execute django command and dumpdata uses for get fixture from a table.
more information

Related

Issue with django crontabs there not working

hello guys i trying to use django_crontab on my django project and there not working does anyone know something about this im using Linux centos 8. I want to schedule a task to add some data to my database. Can someone help me
The steps that i have take is:
pip install django-crontab
add to the installed apps
build my cron function
` from django.core.management.base import BaseCommand
from backups.models import Backups
from devices.models import Devices
from datetime import datetime
from jnpr.junos import Device
from jnpr.junos.exception import ConnectError
from lxml import etree
from django.http import HttpResponse
from django.core.files import File
class Command(BaseCommand):
def handle(self, *args, **kwargs):
devices = Devices.objects.all()
for x in devices:
devid = Devices.objects.get(pk=x.id)
ip = x.ip_address
username = x.username
password = x.password
print(devid, ip, username, password)
dev1 = Device(host= ip ,user= username, passwd= password)
try:
dev1.open()
stype = "sucsess"
dataset = dev1.rpc.get_config(options={'format':'set'})
datatext = dev1.rpc.get_config(options={'format':'text'})
result = (etree.tostring(dataset, encoding='unicode'))
file_name = f'{ip}_{datetime.now().date()}.txt'
print(file_name)
with open("media/"f'{file_name}','w') as f:
f.write(etree.tostring(dataset, encoding='unicode'))
f.write(etree.tostring(datatext, encoding='unicode'))
backup = Backups(device_id=devid, host=ip, savetype=stype, time=datetime.now(), backuptext=file_name)
print(backup)
backup.save()
except ConnectError as err:
print ("Cannot connect to device: {0}".format(err))
print("----- Faild ----------")
stype = ("Cannot connect to device: {0}".format(err))
backup = Backups(device_id=devid, host=ip, savetype=stype, time=datetime.now())
backup.save()
`
add my cronjob to my setting.py file :
CRONJOBS = [ ('*/5 * * * *', 'django.core.management.call_command', ['backup-dev']), ]
5)
python manage.py crontab add
6)
python manage.py crontab show
Currently active jobs in crontab:
0662c1224789b131740fddef54f273c1 -> ('* * * * *', 'django.core.management.call_command', ['backup-dev'])
and still not working any ideas
and when i run this command: " python manage.py backup-dev" my task working perfectly
i Also try to add the management command direct to the centos machine via crontab with the command
crontab -e
and still nothing any ideas

Airflow Remote file sensor

I am trying find if there is any files in the remote server match the provided pattern. Something as similar to the below solution
Airflow File Sensor for sensing files on my local drive
I used SSHOperator with bash command as below,
SSH_Bash = """
echo 'poking for files...'
ls /home/files/test.txt
if [ $? -eq "0" ]; then
echo 'Found file'
else
echo 'failed to find'
fi
"""
t1 = SSHOperator(
ssh_conn_id='ssh_default',
task_id='test_ssh_operator',
command=SSH_Bash,
dag=dag)
It works but doesnt look like an optimal solution. Could someone help me to get better solution than Bash script to sense the files in the remote server.
I tried the below sftp sensor ,
import os
import re
import logging
from paramiko import SFTP_NO_SUCH_FILE
from airflow.contrib.hooks.sftp_hook import SFTPHook
from airflow.operators.sensors import BaseSensorOperator
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
class SFTPSensor(BaseSensorOperator):
#apply_defaults
def __init__(self, filepath,filepattern, sftp_conn_id='sftp_default', *args, **kwargs):
super(SFTPSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.filepattern = filepattern
self.hook = SFTPHook(sftp_conn_id)
def poke(self, context):
full_path = self.filepath
file_pattern = re.compile(self.filepattern)
try:
directory = os.listdir(self.hook.full_path)
for files in directory:
if not re.match(file_pattern, files):
self.log.info(files)
self.log.info(file_pattern)
else:
context["task_instance"].xcom_push("file_name", files)
return True
return False
except IOError as e:
if e.errno != SFTP_NO_SUCH_FILE:
raise e
return False
class SFTPSensorPlugin(AirflowPlugin):
name = "sftp_sensor"
sensors = [SFTPSensor]
But this always poke into local machine instead of remote machine. Could someone help me where i am making a mistake.
I replaced the line from
directory = os.listdir(self.hook.full_path)
to
directory = self.hook.list_directory(full_path)

Scheduled, timestamped sqlite3 .backup?

Running a small db on pythonanywhere, and am trying to set up a scheduled .backup of my sqlite3 database. Is there any way in the command line to add a time/date stamp to the filename, so that it doesn't overwrite the previous days backup?
Here's the code I'm using, if it matters:
sqlite3 db.sqlite3
.backup dbbackup.sqlite3
.quit
Running every 24 hours. The previous day's backup gets overwritten, though. I'd love to just be able to save it as dbbackup.timestamp.sqlite3 or something, so I could have multiple backups available.
Thanks!
I suggest you to handle this case with management commands and cronjob.
This an example how to do it; save this file eg in yourapp/management/commands/dbackup.py, don't forget to add __init__.py files.
yourapp/management/__init__.py
yourapp/management/commands/__init__.py
yourapp/management/commands/dbackup.py
But, previously add these lines below to your settings.py
USERNAME_SUPERUSER = 'yourname`
PASSWORD_SUPERUSER = `yourpassword`
EMAIL_SUPERUSER = `youremail#domain.com`
DATABASE_NAME = 'db.sqlite3'
The important tree path project if you deploying at pythonanywhere;
/home/yourusername/yourproject/manage.py
/home/yourusername/yourproject/db.sqlite3
/home/yourusername/yourproject/yourproject/settings.py
/home/yourusername/yourproject/yourapp/management/commands/dbackup.py
Add these script below into yourapp/management/commands/dbackup.py, you also can custom this script as you need.
import os
import time
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import (BaseCommand, CommandError)
USERNAME_SUPERUSER = settings.USERNAME_SUPERUSER
PASSWORD_SUPERUSER = settings.PASSWORD_SUPERUSER
EMAIL_SUPERUSER = settings.EMAIL_SUPERUSER
DATABASE_NAME = settings.DATABASE_NAME #eg: 'db.sqlite3'
class Command(BaseCommand):
help = ('Command to deploy and backup the latest database.')
def add_arguments(self, parser):
parser.add_argument(
'-b', '--backup', action='store_true',
help='Just backup command confirmation.'
)
def success_info(self, info):
return self.stdout.write(self.style.SUCCESS(info))
def error_info(self, info):
return self.stdout.write(self.style.ERROR(info))
def handle(self, *args, **options):
backup = options['backup']
if backup == False:
return self.print_help()
# Removing media files, if you need to remove all media files
# os.system('rm -rf media/images/')
# self.success_info("[+] Removed media files at `media/images/`")
# Removing database `db.sqlite3`
if os.path.isfile(DATABASE_NAME):
# backup the latest database, eg to: `db.2017-02-03.sqlite3`
backup_database = 'db.%s.sqlite3' % time.strftime('%Y-%m-%d')
os.rename(DATABASE_NAME, backup_database)
self.success_info("[+] Backup the database `%s` to %s" % (DATABASE_NAME, backup_database))
# remove the latest database
os.remove(DATABASE_NAME)
self.success_info("[+] Removed %s" % DATABASE_NAME)
# Removing all files migrations for `yourapp`
def remove_migrations(path):
exclude_files = ['__init__.py', '.gitignore']
path = os.path.join(settings.BASE_DIR, path)
filelist = [
f for f in os.listdir(path)
if f.endswith('.py')
and f not in exclude_files
]
for f in filelist:
os.remove(path + f)
self.success_info('[+] Removed files migrations for {}'.format(path))
# do remove all files migrations
remove_migrations('yourapp/migrations/')
# Removing all `.pyc` files
os.system('find . -name *.pyc -delete')
self.success_info('[+] Removed all *.pyc files.')
# Creating database migrations
# These commands should re-generate the new database, eg: `db.sqlite3`
os.system('python manage.py makemigrations')
os.system('python manage.py migrate')
self.success_info('[+] Created database migrations.')
# Creating a superuser
user = User.objects.create_superuser(
username=USERNAME_SUPERUSER,
password=PASSWORD_SUPERUSER,
email=EMAIL_SUPERUSER
)
user.save()
self.success_info('[+] Created a superuser for `{}`'.format(USERNAME_SUPERUSER))
Setup this command with crontab
$ sudo crontab -e
And add these following below lines;
# [minute] [hour] [date] [month] [year]
59 23 * * * source ~/path/to/yourenv/bin/activate && cd ~/path/to/yourenv/yourproject/ && ./manage.py dbackup -b
But, if you need to deploy at pythonanywhere, you just need to add these..
Daily at [hour] : [minute] UTC, ... fill the hour=23 and minute=59
source /home/yourusername/.virtualenvs/yourenv/bin/activate && cd /home/yourusername/yourproject/ && ./manage.py dbackup -b
Update 1
I suggest you to update the commands to execute the file of manage.py such as os.system('python manage.py makemigrations') with function of call_command;
from django.core.management import call_command
call_command('collectstatic', verbosity=3, interactive=False)
call_command('migrate', 'myapp', verbosity=3, interactive=False)
...is equal to the following commands typed in terminal:
$ ./manage.py collectstatic --noinput -v 3
$ ./manage.py migrate myapp --noinput -v 3
See running management commands from django docs.
Update 2
Previous condition is if you need to re-deploy your project and using a fresh database. But, if you only want to backup it by renaming the database, you can using module of shutil.copyfile
import os
import time
import shutil
from django.conf import settings
from django.core.management.base import (BaseCommand, CommandError)
DATABASE_NAME = settings.DATABASE_NAME #eg: 'db.sqlite3'
class Command(BaseCommand):
help = ('Command to deploy and backup the latest database.')
def add_arguments(self, parser):
parser.add_argument(
'-b', '--backup', action='store_true',
help='Just backup command confirmation.'
)
def success_info(self, info):
return self.stdout.write(self.style.SUCCESS(info))
def error_info(self, info):
return self.stdout.write(self.style.ERROR(info))
def handle(self, *args, **options):
backup = options['backup']
if backup == False:
return self.print_help()
if os.path.isfile(DATABASE_NAME):
# backup the latest database, eg to: `db.2017-02-29.sqlite3`
backup_database = 'db.%s.sqlite3' % time.strftime('%Y-%m-%d')
shutil.copyfile(DATABASE_NAME, backup_database)
self.success_info("[+] Backup the database `%s` to %s" % (DATABASE_NAME, backup_database))

Using python to update a file on google drive

I have the following script to upload a file unto google drive, using python27. As it is now it will upload a new copy of the file, but I want the existing file updated/overwritten. I can't find help in the Google Drive API references and guides for python. Any suggestions?
from __future__ import print_function
import os
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# Gain acces to google drive
SCOPES = 'https://www.googleapis.com/auth/drive.file'
store = file.Storage('storage.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
creds = tools.run_flow(flow, store, flags) \
if flags else tools.run(flow, store)
DRIVE = build('drive', 'v3', http=creds.authorize(Http()))
#The file that is being uploaded
FILES = (
('all-gm-keys.txt', 'application/vnd.google-apps.document'), #in google doc format
)
#Where the file ends on google drive
for filename, mimeType in FILES:
folder_id = '0B6V-MONTYPYTHONROCKS-lTcXc' #Not the real folder id
metadata = {'name': filename,'parents': [ folder_id ] }
if mimeType:
metadata['mimeType'] = mimeType
res = DRIVE.files().create(body=metadata, media_body=filename).execute()
if res:
print('Uploaded "%s" (%s)' % (filename, res['mimeType']))
I think that you are looking for the update method. Here is a link to the documentation. There is an example on overwriting the file in python.
I think that using the official google client api instead of pure http requests should make your task easier.
from apiclient import errors
from apiclient.http import MediaFileUpload
# ...
def update_file(service, file_id, new_title, new_description, new_mime_type,
new_filename, new_revision):
"""Update an existing file's metadata and content.
Args:
service: Drive API service instance.
file_id: ID of the file to update.
new_title: New title for the file.
new_description: New description for the file.
new_mime_type: New MIME type for the file.
new_filename: Filename of the new content to upload.
new_revision: Whether or not to create a new revision for this file.
Returns:
Updated file metadata if successful, None otherwise.
"""
try:
# First retrieve the file from the API.
file = service.files().get(fileId=file_id).execute()
# File's new metadata.
file['title'] = new_title
file['description'] = new_description
file['mimeType'] = new_mime_type
# File's new content.
media_body = MediaFileUpload(
new_filename, mimetype=new_mime_type, resumable=True)
# Send the request to the API.
updated_file = service.files().update(
fileId=file_id,
body=file,
newRevision=new_revision,
media_body=media_body).execute()
return updated_file
except errors.HttpError, error:
print 'An error occurred: %s' % error
return None
Link the example: https://developers.google.com/drive/api/v2/reference/files/update#examples

Django equivalent to paster for backend processes

I use pylons in my job, but I'm new to django. I'm making an rss filtering application, and so I'd like to have two backend processes that run on a schedule: one to crawl rss feeds for each user, and another to determine relevance of individual posts relative to users' past preferences. In pylons, I'd just write paster commands to update the db with that data. Is there an equivalent in django? EG is there a way to run the equivalent of python manage.py shell in a non-interactive mode?
I think that's what Custom Management Commands are there for.
Yes, this is actually how I run my cron backup scripts. You just need to load your virtualenv if you're using virtual environments and your project settings.
I hope you can follow this, but after the line # manage.py shell you can write your code just as if you were in manage.py shell
You can import your virtualenv like so:
import site
site.addsitedir(VIRTUALENV_PATH + '/lib/python2.6/site-packages')
You can then add the django project to the path
import sys
sys.path.append(DJANGO_ROOT)
sys.path.append(PROJECT_PATH)
Next you load the django settings and chdir to the django project
import os
from django.core.management import setup_environ
from myproject import settings
setup_environ(settings)
os.chdir(PROJECT_PATH)
After this point your environment will be set just like if you started with manage.py shell
You can then run anything just as if you were in the interactive shell.
from application.models import MyModel
for element in MyModel:
element.delete()
Here is my backup file in full. I've abstracted the process out into functions. This would be named daily_backup and be put into the cron.daily folder to be run daily. You can see how to set up the environment and modify the functionality as needed.
#!/usr/bin/env python
import sys
import os
import site
import logging
from datetime import datetime
PROJECT_NAME = 'myproject'
DJANGO_ROOT = '/var/www/django'
PROJECT_PATH = DJANGO_ROOT + '/' + PROJECT_NAME
VIRTUALENV_PATH = '/var/www/envs/'+ PROJECT_NAME
BACKUP_DIR = '/var/www/backups/%s/daily' % (PROJECT_NAME)
TODAY = datetime.now().strftime('%Y%m%d-%H%M%S')
FILE_NAME = PROJECT_NAME + '_' + TODAY
site.addsitedir(VIRTUALENV_PATH + '/lib/python2.6/site-packages')
sys.path.append(DJANGO_ROOT)
sys.path.append(PROJECT_PATH)
from django.core.management import setup_environ
from myproject import settings
setup_environ(settings)
os.chdir(PROJECT_PATH)
# manage.py shell
from django.conf import settings
logging.basicConfig(level=logging.WARN)
def _setup():
if not os.path.exists(BACKUP_DIR):
logging.debug('Creating backup directory ' + BACKUP_DIR)
os.mkdir(BACKUP_DIR)
os.mkdir(BACKUP_DIR + '/databases')
else:
logging.debug('Using backup directory ' + BACKUP_DIR)
def _remove_old():
logging.debug('Cleaning out old backups')
# keep past 7 days
command = "find %s* -name '%s*' -mtime +7 -exec rm {} \\;" % (BACKUP_DIR, PROJECT_NAME)
os.system(command)
def _create_backup():
logging.debug('Backup database')
if settings.DATABASE_ENGINE == 'mysql':
command = 'mysqldump -u %s --password=%s %s > %s/databases/%s.sql' % (settings.DATABASE_USER, settings.DATABASE_PASSWORD, settings.DATABASE_NAME, BACKUP_DIR, FILE_NAME)
else:
command = '%s/bin/python %s/manage.py dumpdata --indent=4 > %s/databases/%s.json' % (VIRTUALENV_PATH, PROJECT_PATH, BACKUP_DIR, FILE_NAME)
os.system(command)
logging.debug('Backup project')
command = 'tar -czf %s/%s.tgz -C %s %s/' % (BACKUP_DIR, FILE_NAME, DJANGO_ROOT, PROJECT_NAME)
os.system(command)
if __name__ == '__main__':
_setup()
_remove_old()
_create_backup()
Sounds like you need some twod.wsgi in your life: http://packages.python.org/twod.wsgi/