pythonintermediate

LRU Cache with TTL Support

Extend functools.lru_cache with time-based expiration for caching expensive function calls with staleness control.

python
import time
import functools
from typing import Callable


def ttl_cache(maxsize: int = 128, ttl_seconds: float = 300) -> Callable:
    """LRU cache with time-to-live expiration."""
    def decorator(func: Callable) -> Callable:
        cache: dict = {}
        timestamps: dict = {}

        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            key = (args, tuple(sorted(kwargs.items())))
            now = time.monotonic()

            # Check if cached and not expired
            if key in cache and (now - timestamps[key]) < ttl_seconds:
                return cache[key]

            # Evict oldest if at capacity
            if len(cache) >= maxsize and key not in cache:
                oldest_key = min(timestamps, key=timestamps.get)
                del cache[oldest_key]
                del timestamps[oldest_key]

            result = func(*args, **kwargs)
            cache[key] = result
            timestamps[key] = now
            return result

        wrapper.cache_clear = lambda: (cache.clear(), timestamps.clear())
        wrapper.cache_info = lambda: {
            "size": len(cache),
            "maxsize": maxsize,
            "ttl": ttl_seconds,
        }
        return wrapper

    return decorator


# Usage:
# @ttl_cache(maxsize=100, ttl_seconds=60)
# def get_user(user_id: int) -> dict:
#     return db.query(f"SELECT * FROM users WHERE id = {user_id}")
#
# get_user(1)  # hits DB
# get_user(1)  # returns cached
# time.sleep(61)
# get_user(1)  # hits DB again (expired)

Use Cases

  • Database query caching
  • API response memoization
  • Config value caching

Tags

Related Snippets

Similar patterns you can reuse in the same workflow.