def locking_lru_cache(maxsize=128, typed=False): # can't implement ignored_keywords because we use python's lru_cache...
"An lru cache with a lock, to prevent concurrent invocations and allow reusing from cache"
def deco(func):
caching_func = lru_cache(maxsize, typed)(func)
func._main_lock = RLock()
func._keyed_locks = defaultdict(RLock)
@wraps(func)
def inner(*args, **kwargs):
key = _make_key(args, kwargs, typed=typed)
with func._main_lock:
key_lock = func._keyed_locks[key]
with key_lock:
return caching_func(*args, **kwargs)
@wraps(caching_func.cache_clear)
def clear():
with func._main_lock:
return caching_func.cache_clear()
inner.cache_clear = clear
return inner
return deco
评论列表
文章目录