Python's functools provides decorators for automatic memoization—caching function results to avoid recomputation.
@lru_cache
Least Recently Used cache with size limit:
from functools import lru_cache
@lru_cache(maxsize=128)
def fibonacci(n):
if n < 2:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
# Without cache: O(2^n)
# With cache: O(n)
print(fibonacci(100)) # Instant@cache (Python 3.9+)
Unlimited cache (simpler @lru_cache(maxsize=None)):
from functools import cache
@cache
def expensive_computation(x, y):
print(f"Computing {x}, {y}")
return x ** y
expensive_computation(2, 10) # Computing 2, 10 → 1024
expensive_computation(2, 10) # Returns cached 1024 (no print)Cache Statistics
from functools import lru_cache
@lru_cache(maxsize=32)
def fetch_data(key):
return expensive_lookup(key)
# After some calls
info = fetch_data.cache_info()
print(info)
# CacheInfo(hits=45, misses=10, maxsize=32, currsize=10)
# Clear cache
fetch_data.cache_clear()Typed Cache
Distinguish between argument types:
from functools import lru_cache
@lru_cache(maxsize=128, typed=True)
def process(value):
return value * 2
process(3) # Cached separately from...
process(3.0) # ...this (different types)@cached_property
Cache property result after first access:
from functools import cached_property
class DataAnalyzer:
def __init__(self, data):
self.data = data
@cached_property
def summary(self):
print("Computing summary...")
return {
'mean': sum(self.data) / len(self.data),
'max': max(self.data),
'min': min(self.data),
}
analyzer = DataAnalyzer([1, 2, 3, 4, 5])
print(analyzer.summary) # Computing summary...
print(analyzer.summary) # Uses cached value (no print)Cache with Timeout
Build your own time-based cache:
from functools import wraps
import time
def timed_cache(seconds):
def decorator(func):
cache = {}
@wraps(func)
def wrapper(*args):
now = time.time()
if args in cache:
result, timestamp = cache[args]
if now - timestamp < seconds:
return result
result = func(*args)
cache[args] = (result, now)
return result
wrapper.cache_clear = lambda: cache.clear()
return wrapper
return decorator
@timed_cache(60) # Cache for 60 seconds
def fetch_price(symbol):
return api_call(symbol)Caching with Unhashable Arguments
lru_cache requires hashable arguments. Workarounds:
from functools import lru_cache
import json
# Convert dict to hashable
def make_hashable(obj):
if isinstance(obj, dict):
return tuple(sorted((k, make_hashable(v)) for k, v in obj.items()))
if isinstance(obj, list):
return tuple(make_hashable(x) for x in obj)
return obj
@lru_cache(maxsize=128)
def _process_cached(hashable_config):
config = dict(hashable_config) # Convert back
return expensive_process(config)
def process(config):
return _process_cached(make_hashable(config))
# Or use JSON serialization
@lru_cache(maxsize=128)
def _process_json(config_json):
config = json.loads(config_json)
return expensive_process(config)
def process(config):
return _process_json(json.dumps(config, sort_keys=True))Cache Key Customization
from functools import lru_cache
class Request:
def __init__(self, url, params):
self.url = url
self.params = params
# Make hashable for caching
def __hash__(self):
return hash((self.url, tuple(sorted(self.params.items()))))
def __eq__(self, other):
return self.url == other.url and self.params == other.params
@lru_cache(maxsize=100)
def fetch(request: Request):
return http_get(request.url, request.params)Async Caching
lru_cache doesn't work directly with async. Solutions:
from functools import lru_cache
import asyncio
# Option 1: Cache the coroutine result
_cache = {}
async def cached_fetch(url):
if url not in _cache:
_cache[url] = await actual_fetch(url)
return _cache[url]
# Option 2: Use aiocache library
# pip install aiocache
from aiocache import cached
@cached(ttl=300)
async def fetch_data(key):
return await async_api_call(key)Per-Instance Caching
from functools import lru_cache
class Service:
def __init__(self, name):
self.name = name
# Each instance gets its own cache
self.get_data = lru_cache(maxsize=100)(self._get_data)
def _get_data(self, key):
return f"{self.name}: {key}"
s1 = Service("A")
s2 = Service("B")
# s1 and s2 have separate cachesMemory Management
from functools import lru_cache
import weakref
# Cache with weak references (values can be garbage collected)
class WeakCache:
def __init__(self):
self._cache = weakref.WeakValueDictionary()
def get(self, key, factory):
if key not in self._cache:
self._cache[key] = factory()
return self._cache[key]Practical Patterns
Database Query Cache
from functools import lru_cache
class UserRepository:
@lru_cache(maxsize=1000)
def get_user(self, user_id: int):
return self.db.query(f"SELECT * FROM users WHERE id = {user_id}")
def update_user(self, user_id: int, data: dict):
self.db.update(user_id, data)
# Invalidate cache for this user
self.get_user.cache_clear() # Clears all
# Or track and clear selectivelyConfiguration Cache
from functools import cached_property
from pathlib import Path
import yaml
class Config:
def __init__(self, path: str):
self._path = Path(path)
@cached_property
def settings(self):
return yaml.safe_load(self._path.read_text())
def reload(self):
# Clear cached_property
if 'settings' in self.__dict__:
del self.__dict__['settings']API Response Cache
from functools import lru_cache
from datetime import datetime
@lru_cache(maxsize=500)
def get_exchange_rate(currency: str, date: str):
"""Cache exchange rates by currency and date."""
return api.fetch_rate(currency, date)
# Today's rate (will be cached for session)
rate = get_exchange_rate("USD", datetime.now().strftime("%Y-%m-%d"))When NOT to Cache
- Functions with side effects
- Functions returning mutable objects (use
copy.deepcopy) - Functions with rapidly changing results
- When memory is constrained
Summary
functools caching options:
@lru_cache: Size-limited, LRU eviction@cache: Unlimited (Python 3.9+)@cached_property: Property computed once
Tips:
- Use
cache_info()to monitor hit rates - Call
cache_clear()to invalidate - Arguments must be hashable
- Consider TTL for time-sensitive data
React to this post: