Add cache subkey; allow non-caching requests

The cache key allows having multiple caches that use the same endpoint
(for instance, rpc.get_service_nodes for all-SNs versus single SN).

Non-caching requests still use a cached value if present, but don't
store it in the cache if they have to make an actual request.
This commit is contained in:
Jason Rhinelander 2020-08-31 13:05:17 -03:00
parent bdf9056c2a
commit 752f19784b
1 changed files with 28 additions and 8 deletions

36
lmq.py
View File

@ -22,19 +22,38 @@ cache_expiry = {}
class FutureJSON():
"""Class for making a LMQ JSON RPC request that uses a future to wait on the result, and caches
the results for a set amount of time so that if the same endpoint with the same arguments is
requested again the cache will be used instead of repeating the request."""
requested again the cache will be used instead of repeating the request.
def __init__(self, lmq, lokid, endpoint, cache_seconds=3, *, args=[], fail_okay=False, timeout=10):
Cached values are indexed by endpoint and optional key, and require matching arguments to the
previous call. The cache_key should generally be a fixed value (*not* an argument-dependent
value) and can be used to provide multiple caches for different uses of the same endpoint.
Cache entries are *not* purged, they are only replaced, so using dynamic data in the key would
result in unbounded memory growth.
lmq - the lmq object
lokid - the lokid lmq connection id object
endpoint - the lmq endpoint, e.g. 'rpc.get_info'
cache_seconds - how long to cache the response; can be None to not cache it at all
cache_key - fixed string to enable different caches of the same endpoint
args - if not None, a value to pass (after converting to JSON) as the request parameter. Typically a dict.
fail_okay - can be specified as True to make failures silent (i.e. if failures are sometimes expected for this request)
timeout - maximum time to spend waiting for a reply
"""
def __init__(self, lmq, lokid, endpoint, cache_seconds=3, *, cache_key='', args=None, fail_okay=False, timeout=10):
self.endpoint = endpoint
self.cache_key = self.endpoint + cache_key
self.fail_okay = fail_okay
if self.endpoint in cached and cached_args[self.endpoint] == args and cache_expiry[self.endpoint] >= datetime.now():
self.json = cached[self.endpoint]
if args is not None:
args = json.dumps(args).encode()
if self.cache_key in cached and cached_args[self.cache_key] == args and cache_expiry[self.cache_key] >= datetime.now():
self.json = cached[self.cache_key]
self.args = None
self.future = None
else:
self.json = None
self.args = args
self.future = lmq.request_future(lokid, self.endpoint, self.args, timeout=timeout)
self.future = lmq.request_future(lokid, self.endpoint, [] if self.args is None else [self.args], timeout=timeout)
self.cache_seconds = cache_seconds
def get(self):
@ -47,9 +66,10 @@ class FutureJSON():
if result[0] != b'200':
raise RuntimeError("Request for {} failed: got {}".format(self.endpoint, result))
self.json = json.loads(result[1])
cached[self.endpoint] = self.json
cached_args[self.endpoint] = self.args
cache_expiry[self.endpoint] = datetime.now() + timedelta(seconds=self.cache_seconds)
if self.cache_seconds is not None:
cached[self.cache_key] = self.json
cached_args[self.cache_key] = self.args
cache_expiry[self.cache_key] = datetime.now() + timedelta(seconds=self.cache_seconds)
except RuntimeError as e:
if not self.fail_okay:
print("Something getting wrong: {}".format(e), file=sys.stderr)