The following is the snippet of the setting for our project.The problem is that it did not seem like it is connecting to the LDAP server. I added a couple print statement in the django_auth_ldap module and none of them is printed out when I run the server and try to log in.
Can any body help look at the setting and give some idea what went wrong?
settings.py
import ldap
from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
AUTH_LDAP_SERVER_URI = "ldap://ldap.corperate.com:port"
AUTH_LDAP_BIND_DN = "CN=Network Inventory Management,OU=Service Accounts,DC=corperate,DC=com"
AUTH_LDAP_BIND_PASSWORD = "PASSWORD"
AUTH_LDAP_CONNECTION_OPTIONS={
ldap.OPT_DEBUG_LEVEL:1,
ldap.OPT_REFERRALS:0,
}
AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=accounts,dc=corperate,dc=com",ldap.SCOPE_SUBTREE, "(sAMAccountName=%(user)s)")
AUTH_LDAP_GROUP_SEARCH = LDAPSearch("ou=groups,dc=corperate,dc=com", ldap.SCOPE_SUBTREE, "(objectClass=groupOfNames)")
AUTH_LDAP_GROUP_TYPE = GroupOfNamesType(name_attr="cn")
AUTH_LDAP_USER_ATTR_MAP = {
"user_id": "employeeid",
"first_name": "givenName",
"last_name": "sn",
"email": "mail"
}
AUTH_LDAP_PROFILE_ATTR_MAP = {
"location":"physicalDeliveryOfficeName",
"employee_id":"employeeID",
"phone":"telephoneNumber",
"account_status":"userAccountControl",
"employee_id":"employeeID",
"distinguished_name":"distinguishedName",
}
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
"is_staff": "CN=Network Inventory Management Group,OU=django,OU=Groups,DC=corperate,DC=com",}
AUTH_LDAP_ALWAYS_UPDATE_USER = True
AUTH_LDAP_FIND_GROUP_PERMS = True
AUTH_LDAP_CACHE_GROUPS = True
AUTH_LDAP_GROUP_CACHE_TIMEOUT = 3600
AUTHENTICATION_BACKENDS = (
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'stream_to_console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'django_auth_ldap': {
'handlers': ['stream_to_console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
any error message ?
Enabling logging might help. Put this in the settings file:
import logging, logging.handlers
logfile = "/tmp/django-ldap-debug.log"
my_logger = logging.getLogger('django_auth_ldap')
my_logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(
logfile, maxBytes=1024 * 500, backupCount=5)
my_logger.addHandler(handler)
and
tail -f /tmp/django-ldap-debug.log
Related
Trying to implement a DB logger in my Django project
but I am facing a problem in managing the logs in my DB so how can I automatically delete the old records from DB
settings.py
INSTALLED_APPS = ['django_db_logger',]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'error': {
'class': 'django_db_logger.db_log_handler.DatabaseLogHandler',
'level': 'ERROR'
},
'console': {
'class': 'django_db_logger.db_log_handler.DatabaseLogHandler',
'level': 'INFO',
},
},
'loggers': {
'ERROR_LOG': {
'handlers': ['error'],
'propagate': True,
'level': 'ERROR'
},
'INFO_LOG': {
'handlers': ['console'],
'propagate': True,
'level': 'INFO'
}
}
}
and also created a function in the helpers.py file for
for managing the logs
helpers.py
import Logging
class Logging:
def __init__(self,request):
self.request = request
def log(self, message=None):
url =self.request.build_absolute_uri()
logger = logging.getLogger('ERROR_LOG')
logger.error(f'ERROR: {url} {self.request.data} {self.request.headers} {message}')
def info(self, message=None):
url = self.request.build_absolute_uri()
logger = logging.getLogger('INFO_LOG')
logger.info(f'INFO: {url} {self.request.data} {self.request.headers} {message}')
And if you have a better way of logging please do suggest me
here is the database response
table name : django_db_logger_statuslog
|---|---------------|----------------------------------------|
|id | logger_name | msg |
|---|---------------|----------------------------------------|
7 INFO_LOG INFO: http://127.0.0.1:8000/api/v1/farm/
{'farmer_id': '5840', 'lgd_state_id': 35,
'district_code'}
-------|-----------|---------------------|
level | trace | create_datetime |
-------|-----------|---------------------|
20 NULL 2022-08-30 06:17:21
I'm trying to integrate logging with loki in my django app like this:
handler = logging_loki.LokiHandler(
url="http://localhost:3100/loki/api/v1/push",
tags={"app": "django", "env": ENV},
version="1",
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '[%(asctime)s] {%(module)s} [%(levelname)s] - %(message)s',
'datefmt': '%d-%m-%Y %H:%M:%S'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
}
},
'loggers': {
'': {
'handlers': ['console', handler], # this doesnt work
'level': 'DEBUG',
'propagate': True,
# 'name': ENV
}
}
}
What do I need to change so that django uses this custom handler? I tried just referencing the handler object in the logging dict but that didn't need to be the right approach.
I also tried this:
LOGGING_CONFIG = None
logging.config.dictConfig(LOGGING)
logging.getLogger(__name__).addHandler(handler)
but that's not sending any logs to loki
Try this:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '[%(asctime)s] {%(module)s} [%(levelname)s] - %(message)s',
'datefmt': '%d-%m-%Y %H:%M:%S'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
'loki': {
'level': 'INFO',
'class': 'logging_loki.LokiHandler',
'url': "http://localhost:3100/loki/api/v1/push",
'tags' {"app": "django", "env": ENV},
'version': "1",
},
},
'loggers': {
'': {
'handlers': ['console', 'loki'],
'level': 'DEBUG',
'propagate': True,
}
}
}
Specifically, you have to:
Define you handler in the handlers dictionary in the LOGGING setting. The configuration here is used to specify the initialisation arguments for the handler.
Then assign this handler using its key (loki in the example I've given above) to the logger(s) you want it to handle.
I'm using Sentry with Django like this:
sentry_integration = DjangoIntegration()
sentry_sdk.init(
dsn="https://xxx#sentry.io/xxx",
integrations=[sentry_integration]
)
and with these settings for logging:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
'file': {
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'logs', 'django.log'),
},
},
'loggers': {
'django': {
'handlers': ['file', 'console'],
'level': 'DEBUG',
},
'django.template': {
'handlers': ['file', 'console'],
'level': 'INFO',
},
'app': {
'handlers': ['file', 'console'],
'level': 'DEBUG',
}
},
}
If I instiantiate and call a logger in my code, this gets sent to Sentry.
import logging
logger = logging.getLogger(__name__)
logger.error("error!")
However, now I'd like to also log .warning calls. The documentation says to do this:
sentry_logging = LoggingIntegration(event_level=logging.WARNING)
sentry_sdk.init(
dsn="https://xxx#sentry.io/xxx",
integrations=[sentry_logging]
)
But the LoggingIntegration is used rather than the DjangoIntegration. I've tried to use DjangoIntegration in the code above but I get this error:
TypeError: init() got an unexpected keyword argument 'event_level'
Is this possible?
The integrations argument is a list. You can pass in both integrations:
init(
integrations=[LoggingIntegration(...), DjangoIntegration(...)]
)
I've tried the below code for logging in django settings
from google.cloud import logging as google_cloud_logging
log_client = google_cloud_logging.Client()
log_client.setup_logging()
LOGGING = {
'version': 1,
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'stackdriver_logging': {
'class': 'google.cloud.logging.handlers.CloudLoggingHandler',
'client': log_client
},
},
'loggers': {
'django': {
'handlers': ['stackdriver_logging'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': [
'stackdriver_logging',
'mail_admins'
],
'level': 'ERROR',
}
},
}
gunicorn.conf.py
bind = "127.0.0.1:8000"
workers = 3
loglevel = "debug"
proc_name = "hhh"
daemon = False
I was able write to stackdriver logging with django development server but logging not working when I use gunicorn. But django.request is working.
Problem: logger "django" not being called and showing no errors with gunicorn
Please help.
After trying many ways finally it worked with below configuration
gcp_utils/stackdriver_logging.py
import logging
import traceback
from django.views.debug import ExceptionReporter
from google.cloud import error_reporting, logging as google_cloud_logging
client = error_reporting.Client()
log_client = google_cloud_logging.Client()
log_client.setup_logging()
class StackdriverErrorHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
def emit(self, record):
try:
request = record.request
if request.user.is_authenticated():
user = request.user.email
else:
user = 'Anonymous'
status_code = getattr(record, 'status_code', None)
http_context = error_reporting.HTTPContext(
method=request.method,
url=request.build_absolute_uri(),
user_agent=request.META.get('HTTP_USER_AGENT'),
referrer=request.META.get('HTTP_REFERER'),
response_status_code=status_code,
remote_ip=request.META.get('REMOTE_ADDR')
)
except Exception as e:
print e
http_context = None
if record.exc_info:
exc_info = record.exc_info
else:
exc_info = (None, record.getMessage(), None)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
tb_frames = reporter.get_traceback_frames()
if tb_frames:
tb = tb_frames[-1]
else:
tb = {}
report_location = {
'filePath': tb.get('filename'),
'lineNumber': tb.get('lineno'),
'functionName': tb.get('function')
}
client._send_error_report(
message=traceback.format_exc(),
http_context=http_context,
user=user,
report_location=report_location
)
settings.py
import sys
from google.cloud import logging as google_cloud_logging
log_client = google_cloud_logging.Client()
if not DEBUG:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'stackdriver_logging': {
'class': 'google.cloud.logging.handlers.CloudLoggingHandler',
'client': log_client
},
'stackdriver_error_reporting': {
'level': 'ERROR',
'class': 'gcp_utils.stackdriver_logging.StackdriverErrorHandler',
}
},
'loggers': {
'django': {
'handlers': ['console', 'stackdriver_logging'],
'level': 'DEBUG',
'propagate': True,
},
'django.request': {
'handlers': [
'stackdriver_logging',
'mail_admins'
],
'level': 'ERROR',
}
},
}
gunicorn.conf.py
from google.cloud import logging as google_cloud_logging
log_client = google_cloud_logging.Client()
log_client.setup_logging()
bind = "127.0.0.1:8000"
workers = 3
loglevel = "debug"
proc_name = "django_app"
daemon = False
pythonpath = "/path/to/python/"
timeout = 90
accesslog = '/home/user/logs/debug.log'
logconfig_dict = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'stackdriver_logging': {
'class': 'google.cloud.logging.handlers.CloudLoggingHandler',
'client': log_client
},
},
'loggers': {
'gunicorn.access': {
'handlers': ['stackdriver_logging'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': [
'stackdriver_logging',
'mail_admins'
],
'level': 'ERROR',
}
},
}
running gunicorn process
$ gunicorn -c gunicorn.conf.py wsgi:application
We must define the logconfig_dict in-order to make it work with google stackdriver logging.
I have this django setting:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'DEBUG',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'DEBUG',
'propagate': True,
},
}
}
In my management command code, I had to put this to log at debug level:
import logging
logger = logging.getLogger('main')
logger.setLevel(logging.DEBUG)
but I want to also add the date so I tried:
import logging
logger = logging.getLogger('main')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
and this results in things getting logged twice like:
2016-03-14 22:32:16,202 DEBUG: test
DEBUG: test
How can format the logger for the management command?