Log SQL queries even if DEBUG=False - django

This is my logging config in settings.py
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
'file': {
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'logs', 'django.log'),
},
},
'loggers': {
'django': {
'handlers': ['file', 'console'],
'level': 'DEBUG',
},
'django.template': {
'handlers': ['file', 'console'],
'level': 'INFO',
},
'App': {
'handlers': ['file', 'console'],
'level': 'DEBUG',
},
},
}
There's a behavior that I can't explain: if I run with debug=True I can see all SQL queries being logged to the console, but when debug=False that doesn't happen, even if I don't change the configuration above.
Why is this? How can I determine in my logging config whether or not SQL queries are logged to the console?

Query logging goes way deeper as Django wraps the database cursor based on the truthiness of a computed property.
From Line 226, django.db.backends.base.base v2.2.6 sources
def _prepare_cursor(self, cursor):
"""
Validate the connection is usable and perform database cursor wrapping.
"""
self.validate_thread_sharing()
if self.queries_logged:
wrapped_cursor = self.make_debug_cursor(cursor)
else:
wrapped_cursor = self.make_cursor(cursor)
return wrapped_cursor
This computed property gets to be decided on where debugging is forced or debugging is enabled in project settings
From Line 149, django.db.backends.base.base v2.2.6 sources
#property
def queries_logged(self):
return self.force_debug_cursor or settings.DEBUG
You should be able to get a proxy to the default connection and force the debug cursor to be used.
from django.db import connection
connection.force_debug_cursor = True
I however advise against this approach and favor query audits in the database.

That's because existing loggers already filtered your logs. In this case is Django's logger.
DEFAULT_LOGGING = {
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
...
}
and django.utils.log.RequireDebugTrue
class RequireDebugTrue(logging.Filter):
def filter(self, record):
return settings.DEBUG
To solve this issue, you could disable existing loggers by set disable_existing_loggers: True or you can override the filter like so:
LOGGING = {
...
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'filters': [],
},
...
}

Step 1: Write patch_django function in patchers.py
# Created by BaiJiFeiLong#gmail.com at 2022/6/6
import importlib
import importlib.util
import sys
# noinspection DuplicatedCode
def patch_module(module_name, modification_func):
spec = importlib.util.find_spec(module_name)
source = spec.loader.get_source(module_name)
source = modification_func(source)
module = importlib.util.module_from_spec(spec)
code = compile(source, module.__spec__.origin, 'exec')
exec(code, module.__dict__)
sys.modules[module_name] = module
return module
def patch_django():
patch_module("django.db.backends.base.base", lambda x: x.replace(
"self.force_debug_cursor = False",
"self.force_debug_cursor = True"))
Step2: Call patch_django before django imported.
manage.py
__import__("importlib").import_module("patchers").patch_django() # noqa
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
wsgi.py
__import__("importlib").import_module("patchers").patch_django() # noqa
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')
application = get_wsgi_application()
asgi.py
__import__("importlib").import_module("patchers").patch_django() # noqa
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')
application = get_asgi_application()
Step 3: Make sure django.db.backends is in debug level
'django.db.backends': {
'level': 'DEBUG',
},

Related

Django 3 Logging - custom app logger isn't used

I have this app structure
app/
customApp/
urls.py
app/
settings.py
manage.py
In settings.py this is my logging config:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'root': {
'handlers': ['console'],
'level': 'ERROR',
},
'customApp': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
}
}
I am trying to trigger the customApp logger in customApp.urls like this:
from django.urls import path
from django.http import HttpResponse
from logging import getLogger
logger = getLogger(__name__)
def make_response(request):
# if handler is set to console, print result is ALWAYS logged
# this is the logging level hierarchy debug < info < warning < error
logger.debug(f'DEBUG logged in {__name__}')
logger.info(f'INFO logged in {__name__}')
logger.warning(f'WARNING logged in {__name__}')
logger.error(f'ERROR logged in {__name__}')
return HttpResponse('<h1>Ok!</h1>')
urlpatterns = [
path('', make_response)
]
When running the django app and going to this path is using the root logger, logging only the ERROR.
ERROR logged in customApp.urls
Why isn't my customApp logger being used?
Instead of writing in setting.py, I usually create a new file utils.py
utils.py
import logging
from os import path
def setLogger():
logger = logging.getLogger('myservice')
BASE_DIR = path.dirname(path.dirname(__file__))
LOG_DIR = path.join(BASE_DIR, 'myservice/logs/')
hdlr = logging.FileHandler(LOG_DIR + 'myservice.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
Then in the views import it
views.py
from myservice.utils import setLogger
from traceback import format_exc
logger = setLogger()
def fn_login(req):
try:
pass
except Exception as identifier:
logger.error(format_exc())
logger.info(format_exc())
logger.warning(format_exc())

How configure Django logging to file for app

I am struggling with Django logging configuration. I have one app called "api" and I want to save to file all logs from this app. When I set up a logger to django everything works fine but when I change it to my app_name it doesn't.
Here is my configuration:
File structure:
email_api
api
tasks.py
email_api
celery.py
settings
logs
email.log
My logging configuration:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'logs/email.log',
},
},
'loggers': {
'api': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
tasks.py file where I logging:
import logging
logger = logging.getLogger(__name__)
#app.task(bind=True, default_retry_delay=3, max_retries=3,)
def send_email(self, data, email_id):
message = create_message(data, email)
try:
logger.debug("Log Message Here")
message.send()
Keys in the LOGGING['loggers'][...] dict are names of loggers. You have configured logging with api as a name of the logger.
In order to write to this logger, you should request it by that name:
logger = logging.getLogger('api')
...
logger.debug("Log Message Here")

How to log exceptions and errors into a file when using a Django stand alone script

I have created a stand alone script in Django, but although the logging seems to be correctly configured, it fails to log the stderr into a Django log file. I am using Python 3.6 with Django 2.1.
The content of the Django script my_script.py:
import os
import django
import logging
os.environ['DJANGO_SETTINGS_MODULE'] = 'my_project.settings'
django.setup()
logger = logging.getLogger('my_script')
def main():
logger.debug('This message is logged')
raise Exception('Error messages (stderr) are NOT logged!')
if __name__ == "__main__":
main()
My logging configuration in my_project/settings.py:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
'my_scrip_file': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': 'logs/my_script.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'verbose',
},
},
'loggers': {
'my_script': {
'handlers': ['my_scrip_file'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
'propagate': True
},
},
}
Taking into account that the script must be run via python my_script.py, does somebody know how to get stderr to store messages into the log file?
Redirecting stderr to the log file would be an option, but I believe a simple try clause with a traceback is simpler and neat:
# my_script.py
import os
import django
import traceback
import logging
os.environ['DJANGO_SETTINGS_MODULE'] = 'my_project.settings'
django.setup()
logger = logging.getLogger('my_script')
def main():
logger.debug('This message is logged')
raise Exception('Error messages (stderr) now are also logged!')
if __name__ == "__main__":
try:
main()
except Exception as e:
logger.error(traceback.format_exc())
raise e

Django logging does not write debug.log file issue

I just set up new server and while I'm checking it, I got 500 error. I expect that debug.log file has error message, However, when I check the file the file was empty. Nothing was written. So I changed the loggers settings may times, but still the file is empty and I can't fix the error because what is wrong with it...
This is my views.py
logger = logging.getLogger(__name__)
put this line to do logging.
settings.py
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'simple': {
'format': '%(asctime)s %(filename)s:%(lineno)d %(message)s',
}
},
'handlers': {
'file': {
'level': 'DEBUG',
'filters': ['require_debug_false'],
'class': 'logging.handlers.RotatingFileHandler',
'filename': '/var/log/service/debug.log',
'formatter': 'simple',
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'ERROR',
'propagate': True,
},
}
}
if DEBUG:
del LOGGING['loggers']['django']
del LOGGING['handlers']['file']
if not os.path.exists('log'):
os.makedirs('log')
I attached my wsgi.py
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
application = get_wsgi_application()
This is settings.py where import local settings.
try:
from local_settings import *
except ImportError:
raise ImportError('You must create local_settings.py on project root')
This is local_settings.py
DEBUG=False
You don't have logger named views in your settings. It's only logger named django there. So try to use it in views.py:
logger = logging.getLogger('django')
what is the setting of DEBUG on your server?
because:
if DEBUG:
del LOGGING['loggers']['django']
del LOGGING['handlers']['file']
if DEBUG is True you are deleting the logger and handler...

Celery periodic tasks not running when root logger is configured to use raven

I'm using Django with Celery in order to execute periodic tasks and Raven as a sentry client.
So far I've managed to run several apps with celery beat and all worked fine.
For some reason, In a recent app I'm working on, when I'm settings the root logger to use a 'sentry' hander, the periodic tasks are not running.
When settings the root logger to only use 'console' handler, it does work.
I can't wrap my head around what causing this issue.
This is my logging dict:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': os.environ.get('LOG_LEVEL','INFO'),
'handlers': ['console'],
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'sentry': {
'level': 'WARNING',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'raven': {
'level': 'WARNING',
'handlers': ['console','sentry'],
'propagate': True,
},
'celery': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
And the env var controlling the root logger handlers:
ENABLE_SENTRY = os.environ.get('ENABLE_SENTRY', 'FALSE') == 'TRUE'
if (ENABLE_SENTRY):
LOGGING['root']['handlers'] = ['console','sentry']
Note: Seems like the root logger doesn't log to console after that change
This is how I run the celery beat and worker:
python manage.py celery worker -E -B --maxtasksperchild=1000
--concurrency=10 --loglevel=DEBUG -Ofair
This is part of the packages I'm using:
celery==3.1.17 django-celery==3.1.16 raven==5.0.0 Django==1.8.7
This is my celery.py file:
"""
This module will hold celery configuration
"""
from __future__ import absolute_import
from django.conf import settings
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ltg_backend_app.settings')
# init the celery app
app = Celery('ltg_backend_app')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
Any help would be greatly appreciated!
did you configure raven to trap the necessary signals needed in order to work with Celery? here's how i configure my Celery app:
import celery
from django.conf import settings
import raven
from raven.contrib.celery import register_signal, register_logger_signal
class Celery(celery.Celery):
def on_configure(self):
#
# https://docs.sentry.io/clients/python/integrations/celery/
#
if 'dsn' in settings.RAVEN_CONFIG and settings.RAVEN_CONFIG['dsn']:
client = raven.Client(settings.RAVEN_CONFIG['dsn'])
else:
client = raven.Client() # should do nothing
# register a custom filter to filter out duplicate logs
register_logger_signal(client)
# hook into the Celery error handler
register_signal(client)
app = Celery('foobar')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
see here for more config options and details: https://docs.sentry.io/clients/python/integrations/celery/