Django logging setting configuration issue - django

My Logging settings is like below:
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'logfile': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_ROOT + "/logfile",
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'console':{
'level':'INFO',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django': {
'handlers':['console'],
'propagate': True,
'level':'WARN',
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'home': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
},
}
}
I am calling it like
import logging
log = logging.getLogger('home')
log.error("Hey there it works!!")
getting error No handlers could be found for logger "home"
Seems I am missing something during configuration, also went through the past SQ questions to figure it out, but couldn't figure it out.
What is wrong, any clue?
Thanks in advance.

Related

How to log Django warnings and errors to log file in Production?

What I want to achieve is that warning and errors that happen in production (i.e. DEBUG=False) are logged into to a rotating log file.
I tried this
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'file': {
'level': 'WARNING',
'class': 'logging.FileHandler',
'filename': 'staging.log',
},
},
'loggers': {
'': {
'handlers': ['file'],
'level': 'WARNING',
'propagate': True,
},
'django': {
'handlers': ['file'],
'level': 'WARNING',
'propagate': True,
},
'django.request': {
'handlers': ['file'],
'level': 'WARNING',
'propagate': True,
},
},
}
However, the above logs everything, not just messages of log level WARNING and above.
You can follow the django logger here.
In your views.py:
import logging
logger = logging.getLogger(__name__)
then you can record using logger.error() or logger.warning() or logger.info().
It will create a logger file in your main project directory and it will list out all the logging details.
See this:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}',
'style': '{',
},
'simple': {
'format': '{levelname} {message}',
'style': '{',
},
},
'handlers': {
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': "yourproject_name.log",
'maxBytes': 100000,
'backupCount': 2,
'formatter': 'verbose',
},
},
'loggers': {
'django': {
'handlers': ['logfile'],
'level': 'INFO',
'propagate': True,
},
'apps': {
'handlers': ['logfile'],
'level': 'DEBUG',
'propagate': True,
},
},
}

Django server not working with AzureLogHandler (opencensus)

I'm trying to connect my django project logs to Azure Application Insights using OpenCensus. The middleware for montirong requests works well but I also want to send telemetry logs (not just requests) to Azure. Here is my django LOGGING configuration :
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(levelname).3s %(process)d %(name)s : %(message)s',
},
'simple': {
'format': '%(asctime)s %(levelname)-7s : %(message)s',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'azure': {
'formatter': 'simple',
'class': 'opencensus.ext.azure.log_exporter.AzureLogHandler',
'connection_string': 'InstrumentationKey=XXXX-XXXX-XXXX-XXXX'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'': {
'level': os.environ.get('LOGLEVEL', 'INFO'),
'handlers': ['console', 'azure'],
},
'devdebug': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'django': {
'handlers': ['console', 'mail_admins'],
'level': os.environ.get('LOGLEVEL', 'INFO'),
'propagate': False,
}
},
}
Without 'azure' handler in my root logger config, everything works fine. With 'azure' handler, the server starts but doesn't work : I am unable to connect to it. I really don't know what is happening as it doesn't show me unusual logs (even with LOGLEVEL=DEBUG).
My handler configuration should be good as I can receive logs in Azure (when I run any manage command). Even when I run manage runsslserver localhost:53215, I receive logs but it is like my server is not running when I try to reach it.
I have setup the logging via settings and have it working (although not using runsslserver. I have set the connection_string as an environment variable.
The problem I am having is that the 'django' logger does not work in production. All other loggers are sending messages but not that one??? I have a classic case of it works on my machine and our dev servers but not prod... FML!!
INTEGRATIONS = ['postgresql', 'httplib','logging', 'threading']
config_integration.trace_integrations(INTEGRATIONS)
# Set the AppInsights Key as an env variable so it can be used by the logging system
os.environ['APPLICATIONINSIGHTS_CONNECTION_STRING'] = 'InstrumentationKey=XXXXXXXXXXXXXXXXXXXXXXX'
LOGGING = {
'disable_existing_loggers': True, #False, #<-- if true then make sure that you have a 'django' and '' logger
'filters': {
"require_debug_false": {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'simple': {
'format': '[%(asctime)s] %(levelname)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'verbose': {
'format': '[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'azure_verbose': {
'format': '[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s traceId=%(traceId)s spanId=%(spanId)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'filters': ['require_debug_true'],
'formatter': 'verbose'
},
'azure':{
'level': 'INFO',
'filters': ['require_debug_false'],
'class': 'opencensus.ext.azure.log_exporter.AzureLogHandler',
'formatter': 'azure_verbose'
},
},
'loggers': {
'mylogger': {
"handlers": [
"azure",
"console",
],
},
'django': {
'handlers': [
'azure',
'console',
],
},
'py.warnings': {
'handlers': [
'azure',
'console',
],
},
'': {
'handlers': [
'azure',
'console',
],
},
},
'version': 1
}

Django class logger not working

I have a class named "Photo" in my Django application which is not writing messages to my log file when an error occurs.
My project hierarchy looks like this:
- myproj
- apps
- classes
- classes/__init__.py
- classes/photo.py
Here is my LOGGING configuration setting:
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'logfile': {
'level':LOG_LEVEL,
'class':'logging.handlers.RotatingFileHandler',
'filename': '/var/log/myproj/apps.log',
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'database_logfile': {
'level':LOG_LEVEL,
'class':'logging.handlers.RotatingFileHandler',
'filename': '/var/log/myproj/database.log',
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'console':{
'level':LOG_LEVEL,
'class':'logging.StreamHandler',
'formatter': 'standard'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
# Only send emails when DEBUG = False
#'filters': ['require_debug_false'],
},
},
'loggers': {
'django': {
'handlers':['console'],
'propagate': True,
'level':'WARN',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.db.backends': {
'handlers': ['database_logfile'],
'level': 'DEBUG',
'propagate': False,
},
'apps': {
'handlers': ['console', 'logfile', 'mail_admins'],
'level': 'DEBUG',
},
'classes.photo': {
'handlers': ['console', 'logfile', 'mail_admins'],
'level': 'DEBUG',
},
}
}
Here's part of my class:
# photo.py
import logging
logger = logging.getLogger(__name__)
class Photo:
def create_gallery_photo(self):
...
photo_size = os.path.getsize()
if photo_size > PHOTO_SIZE_LIMIT:
logger.error("Photo too big")
At first I only had an 'apps' handler and I realized that the error
wouldn't get logged since photo.py was outside the 'apps' application. But when I added a 'classes' logger I started getting a "No classes could be found for logger" error. Not sure of what to do, I changed the logger to 'classes.photo' and the 'no classes' error went away but the error message still isn't getting logged. I checked 'logger.name' and it's set to 'classes.photo'. Is there something else I need to do because this error is being logged from a class? All the logging in my 'apps' project is working just fine.
Try adding a root logger with a console handler, and see what %(name)s gets logged there. All messages should reach that, unless they first get handled by a logger with propagate=False.
...
'loggers': {
...,
'': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
...

Configure Raven using Celery and Django without Djcelery

How to configure django + raven with celery, without using the djcelery application that is in is out of date with celery 3.1+.
Old configuration using:
INSTALLED_APPS = INSTALLED_APPS + (
'raven.contrib.django.raven_compat',
)
You don't need djcelery with Celery 3.1+, just add raven.contrib.django.raven_compat to your INSTALLED_APPS.
INSTALLED_APPS = (
...
'raven.contrib.django.raven_compat',
)
RAVEN_CONFIG = {
'dsn': 'YOUR_DSN',
}
There is a sample code of LOGGING setting:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
},
'simple': {
'format': '%(levelname)s %(name)s %(message)s',
},
'clear': {
'format': '%(message)s',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'clear',
'filters': ['require_debug_true', ],
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false', ],
},
'sentry': {
'level': 'WARNING',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
'filters': ['require_debug_false', ],
},
},
'loggers': {
'raven': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'log_to_console': {
'handlers': ['console', ],
'level': 'DEBUG',
'propagate': False,
},
'log_to_sentry': {
'handlers': ['sentry', ],
'level': 'WARNING',
},
},
}

Why does my Django 1.3 logging setup cause all messages to be output twice?

My Django 1.3 logging setup causes all log messages to be output twice. I've read that importing settings.py two or more times has caused this problem in the past, but Django 1.3 has new logging features, and I don't think I'm importing settings.py twice anywhere.
settings.py config:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s: %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'custom': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
Code:
import logging
log = logging.getLogger('custom')
log.debug('message1')
log.debug('message2')
Output:
DEBUG: message1
DEBUG:custom:message1
Thanks for your help.
Have you tried setting propagate = False? Along with disable_existing_loggers = True?
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format': '%(levelname)s: %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'custom': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
}
}
You might want to try 'disable_existing_loggers': True?
I've been suffered by the same issue. And I fixed it by redirecting the root logs to a file, and project logs to both the file and the console.
I grep my code, and could not find anywhere basicConfig() exists, also tried to set disable_existing_loggers to True, It doesn't help, finally solved the problem by set a file logger. I guess it maybe a problem by design in some cases.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
# exact format is not important, this is the minimum information
'format': '%(asctime)s %(name)-12s %(lineno)d %(levelname)-8s %(message)s',
},
},
'handlers': {
'console': {
#'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
# Add Handler for mail_admins for `warning` and above
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
'file': {
#'level': 'INFO',
'class': 'logging.FileHandler',
'formatter': 'simple',
'filename': os.path.join(os.path.dirname(PROJECT_ROOT), 'crawler.admin.log'),
},
},
'loggers': {
# root logger
'': {
'level': 'INFO',
'handlers': ['file', 'mail_admins'],
},
'scrapy': {
'level': 'WARNING',
'handlers': ['console', 'mail_admins'],
'propagate': False,
},
'crawleradmin': {
'level': 'INFO',
'handlers': ['console', 'file', 'mail_admins'],
# required to avoid double logging with root logger
'propagate': False,
},
},
}