I'd like to activate or deactivate a "cache" in some class method during execution.
I found a way to activate it with something like that:
(...)
setattr(self, "_greedy_function", my_cache_decorator(self._cache)(getattr(self, "_greedy_function")))
(...)
where self._cache
is a cache object of my own that stores the results of self._greedy_function
.
It's working fine but now what if I want to deactivate the cache and "undecorate" _greedy_function
?
I see a possible solution, storing the reference of _greedy_function
before decorating it but maybe there is a way to retrieve it from the decorated function and that would be better.
As requested, here are the decorator and the cache object I'm using to cache results of my class functions:
import logging
from collections import OrderedDict, namedtuple
from functools import wraps
logging.basicConfig(
level=logging.WARNING,
format='%(asctime)s %(name)s %(levelname)s %(message)s'
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
CacheInfo = namedtuple("CacheInfo", "hits misses maxsize currsize")
def lru_cache(cache):
"""
A replacement for functools.lru_cache() build on a custom LRU Class.
It can cache class methods.
"""
def decorator(func):
logger.debug("assigning cache %r to function %s" % (cache, func.__name__))
@wraps(func)
def wrapped_func(*args, **kwargs):
try:
ret = cache[args]
logger.debug("cached value returned for function %s" % func.__name__)
return ret
except KeyError:
try:
ret = func(*args, **kwargs)
except:
raise
else:
logger.debug("cache updated for function %s" % func.__name__)
cache[args] = ret
return ret
return wrapped_func
return decorator
class LRU(OrderedDict):
"""
Custom implementation of a LRU cache, build on top of an Ordered dict.
"""
__slots__ = "_hits", "_misses", "_maxsize"
def __new__(cls, maxsize=128):
if maxsize is None:
return None
return super().__new__(cls, maxsize=maxsize)
def __init__(self, maxsize=128, *args, **kwargs):
self.maxsize = maxsize
self._hits = 0
self._misses = 0
super().__init__(*args, **kwargs)
def __getitem__(self, key):
try:
value = super().__getitem__(key)
except KeyError:
self._misses += 1
raise
else:
self.move_to_end(key)
self._hits += 1
return value
def __setitem__(self, key, value):
super().__setitem__(key, value)
if len(self) > self._maxsize:
oldest, = next(iter(self))
del self[oldest]
def __delitem__(self, key):
try:
super().__delitem__((key,))
except KeyError:
pass
def __repr__(self):
return "<%s object at %s: %s>" % (self.__class__.__name__, hex(id(self)), self.cache_info())
def cache_info(self):
return CacheInfo(self._hits, self._misses, self._maxsize, len(self))
def clear(self):
super().clear()
self._hits, self._misses = 0, 0
@property
def maxsize(self):
return self._maxsize
@maxsize.setter
def maxsize(self, maxsize):
if not isinstance(maxsize, int):
raise TypeError
elif maxsize < 2:
raise ValueError
elif maxsize & (maxsize - 1) != 0:
logger.warning("LRU feature performs best when maxsize is a power-of-two, maybe.")
while maxsize < len(self):
oldest, = next(iter(self))
print(oldest)
del self[oldest]
self._maxsize = maxsize
Edit: I've updated my code using the __wrapped__ attribute suggested in comments and it's working fine! The whole thing is here: https://gist.github.com/fbparis/b3ddd5673b603b42c880974b23db7cda (kik.set_cache() method...)
_greedy_function
which is generated bymy_cache_decorator
. So the question is already clearly defined. Although, it would be better if the PO can provide more context of the decorator.lru_cache
used in your class? I don't see any reference to it after declaration.__wrapped__
attribute for this purpose. That said, I’d recommend using/making a wrapper that provides a switch, rather than removing and reinstating the wrapper.setattr
/getattr
with an identifier as a string literal.