The logging module is Python's standard way to emit diagnostic information. Here's how to use it properly.
Basic Setup
import logging
# Quick setup (fine for scripts)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
logger.info("Application started")
logger.warning("Something looks wrong")
logger.error("Something failed")Log Levels
import logging
logger = logging.getLogger(__name__)
# In order of severity
logger.debug("Detailed information for debugging")
logger.info("General operational events")
logger.warning("Something unexpected, but not an error")
logger.error("Error occurred, operation may continue")
logger.critical("Serious error, program may not continue")
# Exception logging (includes traceback)
try:
1 / 0
except ZeroDivisionError:
logger.exception("Division failed")Logger Configuration
import logging
import sys
def setup_logging(level=logging.INFO):
"""Configure root logger."""
root = logging.getLogger()
root.setLevel(level)
# Console handler
console = logging.StreamHandler(sys.stdout)
console.setLevel(level)
console.setFormatter(logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S"
))
root.addHandler(console)
return root
# Per-module loggers
logger = logging.getLogger(__name__)Handlers
import logging
from logging.handlers import (
RotatingFileHandler,
TimedRotatingFileHandler,
SocketHandler,
SMTPHandler,
)
logger = logging.getLogger("myapp")
logger.setLevel(logging.DEBUG)
# File handler
file_handler = logging.FileHandler("app.log")
file_handler.setLevel(logging.INFO)
# Rotating file handler
rotating = RotatingFileHandler(
"app.log",
maxBytes=10_000_000, # 10MB
backupCount=5
)
# Time-based rotation
daily = TimedRotatingFileHandler(
"app.log",
when="midnight",
backupCount=30
)
# Different levels for different handlers
console = logging.StreamHandler()
console.setLevel(logging.WARNING) # Only warnings+ to console
logger.addHandler(file_handler)
logger.addHandler(console)Formatters
import logging
# Basic format
basic = logging.Formatter("%(levelname)s: %(message)s")
# Detailed format
detailed = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - "
"%(filename)s:%(lineno)d - %(message)s"
)
# JSON format (for log aggregators)
class JsonFormatter(logging.Formatter):
def format(self, record):
import json
return json.dumps({
"timestamp": self.formatTime(record),
"level": record.levelname,
"logger": record.name,
"message": record.getMessage(),
"module": record.module,
"line": record.lineno,
})
handler = logging.StreamHandler()
handler.setFormatter(JsonFormatter())Filters
import logging
class ContextFilter(logging.Filter):
"""Add context to log records."""
def __init__(self, context):
super().__init__()
self.context = context
def filter(self, record):
record.context = self.context
return True
class SensitiveDataFilter(logging.Filter):
"""Mask sensitive data in logs."""
def filter(self, record):
record.msg = record.msg.replace(
self._find_secrets(record.msg),
"***"
)
return True
# Usage
logger.addFilter(ContextFilter({"request_id": "abc123"}))Structured Logging
import logging
import json
class StructuredLogger:
"""Logger that outputs structured JSON."""
def __init__(self, name):
self.logger = logging.getLogger(name)
def _log(self, level, message, **kwargs):
extra = {"extra_data": json.dumps(kwargs)}
self.logger.log(level, message, extra=extra)
def info(self, message, **kwargs):
self._log(logging.INFO, message, **kwargs)
def error(self, message, **kwargs):
self._log(logging.ERROR, message, **kwargs)
# Usage
log = StructuredLogger(__name__)
log.info("User logged in", user_id=123, ip="192.168.1.1")Configuration from Dict
import logging.config
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "standard",
"level": "INFO",
},
"file": {
"class": "logging.handlers.RotatingFileHandler",
"filename": "app.log",
"maxBytes": 10485760,
"backupCount": 5,
"formatter": "standard",
},
},
"loggers": {
"": { # Root logger
"handlers": ["console", "file"],
"level": "INFO",
},
"myapp": {
"handlers": ["console"],
"level": "DEBUG",
"propagate": False,
},
},
}
logging.config.dictConfig(LOGGING_CONFIG)Context Managers
import logging
from contextlib import contextmanager
@contextmanager
def log_context(**kwargs):
"""Add context to all logs in a block."""
old_factory = logging.getLogRecordFactory()
def factory(*args, **factory_kwargs):
record = old_factory(*args, **factory_kwargs)
for key, value in kwargs.items():
setattr(record, key, value)
return record
logging.setLogRecordFactory(factory)
try:
yield
finally:
logging.setLogRecordFactory(old_factory)
# Usage
with log_context(request_id="abc123", user_id=42):
logger.info("Processing request") # Includes contextPerformance Patterns
import logging
logger = logging.getLogger(__name__)
# Avoid expensive operations when not needed
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Data: %s", expensive_serialization())
# Use lazy formatting (NOT f-strings)
logger.info("User %s logged in from %s", user_id, ip) # Good
# logger.info(f"User {user_id} logged in") # Evaluated always
# Disable logging for performance-critical sections
logging.disable(logging.CRITICAL)
# ... performance-critical code ...
logging.disable(logging.NOTSET)Testing
import logging
import unittest
class TestLogging(unittest.TestCase):
def test_logs_error_on_failure(self):
with self.assertLogs("myapp", level="ERROR") as logs:
logger = logging.getLogger("myapp")
logger.error("Something failed")
self.assertIn("Something failed", logs.output[0])
def test_no_logs(self):
# Verify no logs are emitted
with self.assertNoLogs("myapp", level="WARNING"):
# Code that should not log warnings
passCommon Patterns
import logging
import functools
# Function entry/exit logging
def log_calls(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
logger = logging.getLogger(func.__module__)
logger.debug("Calling %s", func.__name__)
try:
result = func(*args, **kwargs)
logger.debug("%s returned %r", func.__name__, result)
return result
except Exception:
logger.exception("%s raised exception", func.__name__)
raise
return wrapper
# Silence noisy libraries
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
# Create child loggers
base_logger = logging.getLogger("myapp")
db_logger = base_logger.getChild("database")
api_logger = base_logger.getChild("api")Production Setup
import logging
import sys
import os
def configure_production_logging():
"""Production logging configuration."""
level = getattr(logging, os.environ.get("LOG_LEVEL", "INFO"))
logging.basicConfig(
level=level,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
handlers=[
logging.StreamHandler(sys.stdout),
]
)
# Reduce noise from libraries
for name in ["urllib3", "requests", "botocore"]:
logging.getLogger(name).setLevel(logging.WARNING)
return logging.getLogger()
# At application startup
logger = configure_production_logging()
logger.info("Application starting", extra={"version": "1.0.0"})Proper logging is the difference between guessing what went wrong and knowing. Set it up once, use it everywhere.
React to this post: