I am new to logging and am having difficulty setting the logging level to debug. I have created my own logger
# logger.py
from pathlib import Path
import logging
# Create a custom logger
logger = logging.getLogger(__name__)
logger.propagate = False
logging.basicConfig(level=logging.DEBUG)
# Create handlers
c_handler = logging.StreamHandler()
f_handler = logging.FileHandler('my_log_file.log')
c_handler.setLevel(logging.DEBUG)
f_handler.setLevel(logging.DEBUG)
# Create formatters and add them to handlers
c_format = logging.Formatter('myapp: %(message)s')
f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
# Add handlers to the logger
logger.addHandler(c_handler)
logger.addHandler(f_handler)
def log(frame, obj):
"""Create log message and return the object."""
path = Path(frame[0][1])
module = path.stem
line = frame[0][2]
message_text = f'{obj} <{module}> {line}'
logger.warning(message_text)
return obj
I can create log messages in my application with
log(inspect.stack(), f'Something happened here {myobj.name}: {myobj.value}')
but it will not create messages unless I use the warning level
logger.warning(message_text)
in the logger. If I change it to
logger.debug(message_text)
nothing happens
I searched the application for WARNING, DEBUG and level but can find nothing that should affect my logger
I assume there is another logger that is over-ridding my level (Django perhaps)
Can anyone help?
Following the hint given by Willem Van Onsem I added a LOGGING section to Django's system.py and it now works as I expected
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'root': {
'handlers': ['console'],
'level': 'INFO',
},
}
Related
I have this app structure
app/
customApp/
urls.py
app/
settings.py
manage.py
In settings.py this is my logging config:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'root': {
'handlers': ['console'],
'level': 'ERROR',
},
'customApp': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
}
}
I am trying to trigger the customApp logger in customApp.urls like this:
from django.urls import path
from django.http import HttpResponse
from logging import getLogger
logger = getLogger(__name__)
def make_response(request):
# if handler is set to console, print result is ALWAYS logged
# this is the logging level hierarchy debug < info < warning < error
logger.debug(f'DEBUG logged in {__name__}')
logger.info(f'INFO logged in {__name__}')
logger.warning(f'WARNING logged in {__name__}')
logger.error(f'ERROR logged in {__name__}')
return HttpResponse('<h1>Ok!</h1>')
urlpatterns = [
path('', make_response)
]
When running the django app and going to this path is using the root logger, logging only the ERROR.
ERROR logged in customApp.urls
Why isn't my customApp logger being used?
Instead of writing in setting.py, I usually create a new file utils.py
utils.py
import logging
from os import path
def setLogger():
logger = logging.getLogger('myservice')
BASE_DIR = path.dirname(path.dirname(__file__))
LOG_DIR = path.join(BASE_DIR, 'myservice/logs/')
hdlr = logging.FileHandler(LOG_DIR + 'myservice.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
Then in the views import it
views.py
from myservice.utils import setLogger
from traceback import format_exc
logger = setLogger()
def fn_login(req):
try:
pass
except Exception as identifier:
logger.error(format_exc())
logger.info(format_exc())
logger.warning(format_exc())
This is my logging config in settings.py
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
'file': {
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'logs', 'django.log'),
},
},
'loggers': {
'django': {
'handlers': ['file', 'console'],
'level': 'DEBUG',
},
'django.template': {
'handlers': ['file', 'console'],
'level': 'INFO',
},
'App': {
'handlers': ['file', 'console'],
'level': 'DEBUG',
},
},
}
There's a behavior that I can't explain: if I run with debug=True I can see all SQL queries being logged to the console, but when debug=False that doesn't happen, even if I don't change the configuration above.
Why is this? How can I determine in my logging config whether or not SQL queries are logged to the console?
Query logging goes way deeper as Django wraps the database cursor based on the truthiness of a computed property.
From Line 226, django.db.backends.base.base v2.2.6 sources
def _prepare_cursor(self, cursor):
"""
Validate the connection is usable and perform database cursor wrapping.
"""
self.validate_thread_sharing()
if self.queries_logged:
wrapped_cursor = self.make_debug_cursor(cursor)
else:
wrapped_cursor = self.make_cursor(cursor)
return wrapped_cursor
This computed property gets to be decided on where debugging is forced or debugging is enabled in project settings
From Line 149, django.db.backends.base.base v2.2.6 sources
#property
def queries_logged(self):
return self.force_debug_cursor or settings.DEBUG
You should be able to get a proxy to the default connection and force the debug cursor to be used.
from django.db import connection
connection.force_debug_cursor = True
I however advise against this approach and favor query audits in the database.
That's because existing loggers already filtered your logs. In this case is Django's logger.
DEFAULT_LOGGING = {
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
...
}
and django.utils.log.RequireDebugTrue
class RequireDebugTrue(logging.Filter):
def filter(self, record):
return settings.DEBUG
To solve this issue, you could disable existing loggers by set disable_existing_loggers: True or you can override the filter like so:
LOGGING = {
...
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'filters': [],
},
...
}
Step 1: Write patch_django function in patchers.py
# Created by BaiJiFeiLong#gmail.com at 2022/6/6
import importlib
import importlib.util
import sys
# noinspection DuplicatedCode
def patch_module(module_name, modification_func):
spec = importlib.util.find_spec(module_name)
source = spec.loader.get_source(module_name)
source = modification_func(source)
module = importlib.util.module_from_spec(spec)
code = compile(source, module.__spec__.origin, 'exec')
exec(code, module.__dict__)
sys.modules[module_name] = module
return module
def patch_django():
patch_module("django.db.backends.base.base", lambda x: x.replace(
"self.force_debug_cursor = False",
"self.force_debug_cursor = True"))
Step2: Call patch_django before django imported.
manage.py
__import__("importlib").import_module("patchers").patch_django() # noqa
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
wsgi.py
__import__("importlib").import_module("patchers").patch_django() # noqa
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')
application = get_wsgi_application()
asgi.py
__import__("importlib").import_module("patchers").patch_django() # noqa
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')
application = get_asgi_application()
Step 3: Make sure django.db.backends is in debug level
'django.db.backends': {
'level': 'DEBUG',
},
I am struggling with Django logging configuration. I have one app called "api" and I want to save to file all logs from this app. When I set up a logger to django everything works fine but when I change it to my app_name it doesn't.
Here is my configuration:
File structure:
email_api
api
tasks.py
email_api
celery.py
settings
logs
email.log
My logging configuration:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'logs/email.log',
},
},
'loggers': {
'api': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
tasks.py file where I logging:
import logging
logger = logging.getLogger(__name__)
#app.task(bind=True, default_retry_delay=3, max_retries=3,)
def send_email(self, data, email_id):
message = create_message(data, email)
try:
logger.debug("Log Message Here")
message.send()
Keys in the LOGGING['loggers'][...] dict are names of loggers. You have configured logging with api as a name of the logger.
In order to write to this logger, you should request it by that name:
logger = logging.getLogger('api')
...
logger.debug("Log Message Here")
I'm use django 1.4.6, I want to use the logger module integrated with django to output the response content, however, I cannot see it in the log file.
Source sample shown here:
import logging
logger = logging.getLogger('__file__')
...
response = redirect(url)
logger.debug(response.content)
return response
Once you have configured your loggers, handlers, filters and formatters,
You need to call it as follows:
import logging
# Standard instance of a logger with __name__
stdlogger = logging.getLogger(__name__)
logger.debug(response.content)
response = redirect(url)
return response
The call to logging.getLogger() obtains (creating, if necessary) an instance of a logger. The logger instance is identified by a name. This name is used to identify the logger for configuration purposes.
By convention, the logger name is usually name .
The Python name syntax used for getLogger automatically assigns the package name as the logger name.
Please show peple configuration of logging in static file
I have little change for your code
logger = logging.getLogger(__name__)
Log configuation on settings.py
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
}
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/var/log/django_practices.log',
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'verbose'
},
},
'loggers': {
'name_your_app_django': {
'handlers': ['file', 'console'],
'level': ode'DEBUG',
}
}
}
In my configuration log will be print in console and file name.
Note : name_your_app_django change to fix with your code.
I'm using djutils's async decorator, which has the nasty side effect of not sending traceback emails when an exception is raised, since it runs on a separate thread.
It does, however, have the following place to put a logger.
def worker_thread():
while 1:
func, args, kwargs = queue.get()
try:
func(*args, **kwargs)
except:
pass # <-- log error here
finally:
queue.task_done()
I've confirmed this will work, but even with the try/except removed, it won't trip Django's traceback logger.
While it'd be pretty easy to tell it to write to a db/file on exception, I'd really like it to send a regular traceback as defined in settings. How can I do that?
Edit: answer seems to involve django.utils.log.AdminEmailHandler - but I'm having a hard time finding an example.
Edit 2: Here's my current (99% likely to be wrong) attempt.
from django.utils.log import AdminEmailHandler
def worker_thread():
while 1:
func, args, kwargs = queue.get()
try:
func(*args, **kwargs)
except:
import logging
from django.conf import settings
print settings.EMAIL_HOST
logger = logging.getLogger("async.logger")
logger.exception("Async exploded")
AdminEmailHandler
pass # <-- log error here
finally:
queue.task_done()
first, configure your logging settings i settings.py:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'myproject': {
'handlers': ['mail_admins'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
from now, all loggers which starts with 'myproject' should use AdminEmailHandler
your code should look like this:
import logging
logger = logging.getLogger('myproject.optional.path')
# example
# logger = logging.getLogger('myprojects.myapp.views')
def worker_thread():
while 1:
func, args, kwargs = queue.get()
try:
func(*args, **kwargs)
except:
logger.exception("Async exploded")
finally:
queue.task_done()