Skip to content

Decorators

@cached performs memoization of a wrapped function:

from cachetory.caches.sync import Cache
from cachetory.decorators.sync import cached

cache = Cache[int, ...](backend=..., serializer=...)


@cached(cache)
def expensive_function(x: int) -> int:
    return 42 * x

Key functions

There are a few make_key functions provided by default:

cachetory.decorators.shared.make_default_key

make_default_key(
    callable_: Callable[..., Any], *args: Any, **kwargs: Any
) -> str

Generate a human-readable cache key out of decorated function fully-qualified name and stringified arguments.

The length of the key depends on the arguments.

Source code in cachetory/decorators/shared.py
def make_default_key(callable_: Callable[..., Any], *args: Any, **kwargs: Any) -> str:
    """
    Generate a human-readable cache key out of decorated function fully-qualified name and stringified arguments.

    The length of the key depends on the arguments.
    """

    # noinspection PyUnresolvedReferences
    parts = (
        callable_.__module__,
        callable_.__qualname__,
        # Since we join with `:`, we need to «escape» `:`s with `::`.
        *(str(arg).replace(":", "::") for arg in args),
        *(f"{str(key).replace(':', '::')}={str(value).replace(':', '::')}" for key, value in sorted(kwargs.items())),
    )
    return ":".join(parts)

cachetory.decorators.shared.make_default_hashed_key

make_default_hashed_key(
    callable_: Callable[..., Any], *args: Any, **kwargs: Any
) -> str

Generate a hashed fixed-length cache key given the callable and the arguments it's being called with.

Uses blake2s as the fastest algorithm from hashlib.

Source code in cachetory/decorators/shared.py
def make_default_hashed_key(callable_: Callable[..., Any], *args: Any, **kwargs: Any) -> str:
    """
    Generate a hashed fixed-length cache key given the callable and the arguments it's being called with.

    Uses `blake2s` as the fastest algorithm from `hashlib`.
    """
    return blake2s(make_default_key(callable_, *args, **kwargs).encode()).hexdigest()

Purging cache

Specific cached value can be deleted using the added purge() function, which accepts the same arguments as the original wrapped callable:

expensive_function(100500)
expensive_function.purge(100500)  # purge cached value for this argument

Synchronous @cached

Apply memoization to the wrapped callable.

Parameters:

Name Type Description Default
cache Cache[ValueT, WireT] | Callable[..., Cache[ValueT, WireT] | None] | None

Cache instance or a callable tha returns a Cache instance for each function call. In the latter case the specified callable gets called with a wrapped function as the first argument, and the rest of the arguments next to it. If the callable returns None, the cache is skipped.

required
make_key Callable[..., str]

callable to generate a custom cache key per each call.

make_default_key
time_to_live timedelta | Callable[..., timedelta | None] | None

cached value expiration time or callable that returns the expiration time. The callable needs to accept keyword arguments, and it is given the cache key to compute the expiration time.

None
if_not_exists bool

controls concurrent sets: if True – avoids overwriting a cached value.

False
exclude Callable[[str, ValueT], bool] | None

Optional callable to prevent a key-value pair from being cached if the callable returns true.

None
Source code in cachetory/decorators/sync.py
def cached(
    cache: Cache[ValueT, WireT] | Callable[..., Cache[ValueT, WireT] | None] | None,  # no way to use `P` here
    *,
    make_key: Callable[..., str] = shared.make_default_key,  # no way to use `P` here
    time_to_live: timedelta | Callable[..., timedelta | None] | None = None,
    if_not_exists: bool = False,
    exclude: Callable[[str, ValueT], bool] | None = None,
) -> Callable[[Callable[P, ValueT]], _CachedCallable[P, ValueT]]:
    """
    Apply memoization to the wrapped callable.

    Args:
        cache:
            `Cache` instance or a callable tha returns a `Cache` instance for each function call.
            In the latter case the specified callable gets called with a wrapped function as the first argument,
            and the rest of the arguments next to it.
            If the callable returns `None`, the cache is skipped.
        make_key: callable to generate a custom cache key per each call.
        time_to_live:
            cached value expiration time or callable that returns the expiration time.
            The callable needs to accept keyword arguments, and it is given the cache key to
            compute the expiration time.
        if_not_exists: controls concurrent sets: if `True` – avoids overwriting a cached value.
        exclude: Optional callable to prevent a key-value pair from being cached if the callable returns true.
    """

    def wrap(callable_: Callable[P, ValueT], /) -> _CachedCallable[P, ValueT]:
        get_cache = into_callable(cache)
        get_time_to_live = into_callable(time_to_live)

        @wraps(callable_)
        def cached_callable(*args: P.args, **kwargs: P.kwargs) -> ValueT:
            cache_ = get_cache(callable_, *args, **kwargs)
            key_ = make_key(callable_, *args, **kwargs)

            if cache_ is not None:
                with suppress(KeyError):
                    # `KeyError` normally means the value is «non-cached».
                    return cache_[key_]

            value = callable_(*args, **kwargs)
            if cache_ is not None and (exclude is None or not exclude(key_, value)):
                time_to_live_ = get_time_to_live(key=key_)
                cache_.set(key_, value, time_to_live=time_to_live_, if_not_exists=if_not_exists)
            return value

        def purge(*args: P.args, **kwargs: P.kwargs) -> bool:
            if (cache := get_cache(callable_, *args, **kwargs)) is not None:
                key = make_key(callable_, *args, **kwargs)
                return cache.delete(key)
            else:
                return False

        cached_callable.purge = purge  # type: ignore[attr-defined]
        return cached_callable  # type: ignore[return-value]

    return wrap

Cached callable protocol

Protocol of the wrapped callable.

Source code in cachetory/decorators/sync.py
class _CachedCallable(Protocol[P, ValueT_co]):
    """Protocol of the wrapped callable."""

    def __call__(*args: P.args, **kwargs: P.kwargs) -> ValueT_co: ...

    def purge(*args: P.args, **kwargs: P.kwargs) -> bool:
        """
        Delete the value that was cached using the same call arguments.

        Returns:
            whether a cached value existed
        """

purge

purge(*args: P.args, **kwargs: P.kwargs) -> bool

Delete the value that was cached using the same call arguments.

Returns:

Type Description
bool

whether a cached value existed

Source code in cachetory/decorators/sync.py
def purge(*args: P.args, **kwargs: P.kwargs) -> bool:
    """
    Delete the value that was cached using the same call arguments.

    Returns:
        whether a cached value existed
    """

Asynchronous @cached

Apply memoization to the wrapped callable.

Parameters:

Name Type Description Default
cache Cache[ValueT, WireT] | Callable[..., Cache[ValueT, WireT] | None] | Callable[..., Awaitable[Cache[ValueT, WireT] | None]] | None

Cache instance or a callable (sync or async) that returns a Cache instance for each function call. In the latter case the specific callable gets called with a wrapped function as the first argument, and the rest of the arguments next to it. If the callable returns None, the cache is skipped.

required
make_key Callable[..., str]

callable to generate a custom cache key per each call.

make_default_key
time_to_live timedelta | Callable[..., timedelta | None] | Callable[..., Awaitable[timedelta]] | None

cached value expiration time or a callable (sync or async) that returns the expiration time. The callable needs to accept keyword arguments, and it is given the cache key to compute the expiration time.

None
if_not_exists bool

controls concurrent sets: if True – avoids overwriting a cached value.

False
exclude Callable[[str, ValueT], bool] | Callable[[str, ValueT], Awaitable[bool]] | None

Optional callable to prevent a key-value pair from being cached if the callable returns true.

None
Source code in cachetory/decorators/async_.py
def cached(
    cache: Cache[ValueT, WireT]
    | Callable[..., Cache[ValueT, WireT] | None]
    | Callable[..., Awaitable[Cache[ValueT, WireT] | None]]
    | None,
    *,
    make_key: Callable[..., str] = shared.make_default_key,  # no way to use `P` here
    time_to_live: timedelta | Callable[..., timedelta | None] | Callable[..., Awaitable[timedelta]] | None = None,
    if_not_exists: bool = False,
    exclude: Callable[[str, ValueT], bool] | Callable[[str, ValueT], Awaitable[bool]] | None = None,
) -> Callable[[Callable[P, Awaitable[ValueT]]], _CachedCallable[P, Awaitable[ValueT]]]:
    """
    Apply memoization to the wrapped callable.

    Args:
        cache:
            `Cache` instance or a callable (sync or async) that returns a `Cache` instance for each function call.
            In the latter case the specific callable gets called with a wrapped function as the first argument,
            and the rest of the arguments next to it.
            If the callable returns `None`, the cache is skipped.
        make_key: callable to generate a custom cache key per each call.
        time_to_live:
            cached value expiration time or a callable (sync or async) that returns the expiration time.
            The callable needs to accept keyword arguments, and it is given the cache key to
            compute the expiration time.
        if_not_exists: controls concurrent sets: if `True` – avoids overwriting a cached value.
        exclude: Optional callable to prevent a key-value pair from being cached if the callable returns true.
    """

    def wrap(callable_: Callable[P, Awaitable[ValueT]], /) -> _CachedCallable[P, Awaitable[ValueT]]:
        get_cache = into_async_callable(cache)
        get_time_to_live = into_async_callable(time_to_live)
        exclude_: Callable[[str, ValueT], Awaitable[bool]] | None = (
            into_async_callable(exclude) if exclude is not None else None
        )

        @wraps(callable_)
        async def cached_callable(*args: P.args, **kwargs: P.kwargs) -> ValueT:
            cache_ = await get_cache(callable_, *args, **kwargs)
            key_ = make_key(callable_, *args, **kwargs)

            if cache_ is not None:
                value = await cache_.get(key_)
            else:
                value = None

            if value is None:
                value = await callable_(*args, **kwargs)
                if cache_ is not None and (exclude_ is None or not await exclude_(key_, value)):
                    time_to_live_ = await get_time_to_live(key=key_)
                    await cache_.set(key_, value, time_to_live=time_to_live_, if_not_exists=if_not_exists)

            return value

        async def purge(*args: P.args, **kwargs: P.kwargs) -> bool:
            """
            Delete the value that was cached using the same call arguments.

            Returns:
                whether a cached value existed
            """
            if (cache := await get_cache(callable_, *args, **kwargs)) is not None:
                key = make_key(callable_, *args, **kwargs)
                return await cache.delete(key)
            else:
                return False

        cached_callable.purge = purge  # type: ignore[attr-defined]
        return cached_callable  # type: ignore[return-value]

    return wrap

Cached callable protocol

Protocol of the wrapped callable.

Source code in cachetory/decorators/async_.py
class _CachedCallable(Protocol[P, ValueT_co]):
    """Protocol of the wrapped callable."""

    def __call__(*args: P.args, **kwargs: P.kwargs) -> ValueT_co: ...

    async def purge(*args: P.args, **kwargs: P.kwargs) -> bool:
        """
        Delete the value that was cached using the same call arguments.

        Returns:
            whether a cached value existed
        """

purge async

purge(*args: P.args, **kwargs: P.kwargs) -> bool

Delete the value that was cached using the same call arguments.

Returns:

Type Description
bool

whether a cached value existed

Source code in cachetory/decorators/async_.py
async def purge(*args: P.args, **kwargs: P.kwargs) -> bool:
    """
    Delete the value that was cached using the same call arguments.

    Returns:
        whether a cached value existed
    """