Beyond logging.basicConfig(), Python's logging module offers powerful patterns for production applications.
Structured Logging
Add context to every log message:
import logging
import json
class JSONFormatter(logging.Formatter):
def format(self, record):
log_data = {
'timestamp': self.formatTime(record),
'level': record.levelname,
'logger': record.name,
'message': record.getMessage(),
'module': record.module,
'function': record.funcName,
'line': record.lineno,
}
# Include extra fields
if hasattr(record, 'user_id'):
log_data['user_id'] = record.user_id
if hasattr(record, 'request_id'):
log_data['request_id'] = record.request_id
return json.dumps(log_data)
# Usage
handler = logging.StreamHandler()
handler.setFormatter(JSONFormatter())
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.info("User action", extra={'user_id': 123, 'request_id': 'abc'})LoggerAdapter for Context
import logging
class ContextAdapter(logging.LoggerAdapter):
def process(self, msg, kwargs):
# Add context to every message
context = ' '.join(f'{k}={v}' for k, v in self.extra.items())
return f"[{context}] {msg}", kwargs
logger = logging.getLogger(__name__)
adapter = ContextAdapter(logger, {'request_id': 'xyz', 'user': 'alice'})
adapter.info("Processing request")
# [request_id=xyz user=alice] Processing requestCustom Filters
import logging
class SensitiveDataFilter(logging.Filter):
def filter(self, record):
# Redact sensitive data
if hasattr(record, 'msg'):
record.msg = record.msg.replace('password=', 'password=***')
return True
class RateLimitFilter(logging.Filter):
def __init__(self, rate=10):
super().__init__()
self.rate = rate
self.count = 0
def filter(self, record):
self.count += 1
return self.count % self.rate == 0 # Log every Nth message
logger = logging.getLogger(__name__)
logger.addFilter(SensitiveDataFilter())Multiple Handlers
import logging
from logging.handlers import RotatingFileHandler, TimedRotatingFileHandler
logger = logging.getLogger('myapp')
logger.setLevel(logging.DEBUG)
# Console: INFO and above
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
# File: DEBUG and above, rotate at 10MB
file_handler = RotatingFileHandler(
'app.log',
maxBytes=10*1024*1024,
backupCount=5
)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(name)s %(levelname)s %(message)s'
))
# Daily rotation
daily_handler = TimedRotatingFileHandler(
'daily.log',
when='midnight',
backupCount=30
)
logger.addHandler(console)
logger.addHandler(file_handler)
logger.addHandler(daily_handler)Error Notification Handler
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler(
mailhost='smtp.example.com',
fromaddr='app@example.com',
toaddrs=['oncall@example.com'],
subject='Application Error'
)
mail_handler.setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
logger.addHandler(mail_handler)Queue Handler for Async Logging
import logging
from logging.handlers import QueueHandler, QueueListener
from queue import Queue
# Create queue
log_queue = Queue()
# Application uses QueueHandler (fast, non-blocking)
queue_handler = QueueHandler(log_queue)
logger = logging.getLogger(__name__)
logger.addHandler(queue_handler)
# Listener processes logs in background
file_handler = logging.FileHandler('app.log')
listener = QueueListener(log_queue, file_handler)
listener.start()
# On shutdown
listener.stop()Configuration from Dict
import logging.config
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
'json': {
'class': 'myapp.logging.JSONFormatter'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'standard',
'stream': 'ext://sys.stdout'
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'DEBUG',
'formatter': 'standard',
'filename': 'app.log',
'maxBytes': 10485760,
'backupCount': 5
}
},
'loggers': {
'': { # Root logger
'handlers': ['console', 'file'],
'level': 'DEBUG'
},
'myapp': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False
}
}
}
logging.config.dictConfig(LOGGING_CONFIG)Per-Request Context
import logging
import contextvars
import uuid
request_id_var = contextvars.ContextVar('request_id', default=None)
class RequestContextFilter(logging.Filter):
def filter(self, record):
record.request_id = request_id_var.get()
return True
# In request handler
def handle_request():
request_id_var.set(str(uuid.uuid4()))
logger.info("Processing") # Includes request_idException Logging
import logging
logger = logging.getLogger(__name__)
try:
risky_operation()
except Exception:
# Include full traceback
logger.exception("Operation failed")
# Or manually
logger.error("Failed", exc_info=True)
# With extra context
logger.error("Failed", exc_info=True, extra={'user_id': 123})Performance Logging
import logging
import time
from functools import wraps
def log_execution_time(logger):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.perf_counter()
result = func(*args, **kwargs)
elapsed = time.perf_counter() - start
logger.info(
f"{func.__name__} completed",
extra={'duration_ms': elapsed * 1000}
)
return result
return wrapper
return decorator
@log_execution_time(logging.getLogger(__name__))
def slow_function():
time.sleep(1)Conditional Logging
import logging
logger = logging.getLogger(__name__)
# Expensive string formatting only if needed
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"Data: {expensive_serialize(data)}")
# Or use lazy formatting
logger.debug("Data: %s", data) # Only formats if DEBUG enabledTesting Logs
import logging
import unittest
class TestLogging(unittest.TestCase):
def test_logs_warning(self):
with self.assertLogs('myapp', level='WARNING') as cm:
my_function()
self.assertIn('expected message', cm.output[0])Summary
Production logging patterns:
- Structured logging: JSON for log aggregation
- LoggerAdapter: Add context without changing code
- Filters: Redact sensitive data, rate limit
- Multiple handlers: Different levels to different outputs
- QueueHandler: Non-blocking async logging
- dictConfig: Centralized configuration
Good logging is the difference between "it's broken" and "here's exactly what happened."
React to this post: