diff --git a/relstorage/cache/local_client.py b/relstorage/cache/local_client.py index 84611665..709b1b2e 100644 --- a/relstorage/cache/local_client.py +++ b/relstorage/cache/local_client.py @@ -37,6 +37,8 @@ class LocalClient(object): b'.b': bz2.decompress } + _bucket_type = LocalClientBucket + def __init__(self, options, prefix=None): self._lock = threading.Lock() self.options = options @@ -87,7 +89,7 @@ def _bucket0(self): def flush_all(self): with self._lock: - self.__bucket = LocalClientBucket(self._bucket_limit) + self.__bucket = self._bucket_type(self._bucket_limit) options = self.options if options.cache_local_dir: _Loader.load_local_cache(options, self.prefix, self._bucket0) diff --git a/relstorage/cache/mapping.py b/relstorage/cache/mapping.py index 9c714104..ccfeaf4f 100644 --- a/relstorage/cache/mapping.py +++ b/relstorage/cache/mapping.py @@ -53,7 +53,7 @@ class SizedLRUMapping(object): # When did we last age? _aged_at = 0 - + _cache_type = Cache def __init__(self, limit): # We experimented with using OOBTree and LOBTree @@ -67,7 +67,7 @@ def __init__(self, limit): # large BTrees, but since that's not the case, we abandoned the idea. # This holds all the ring entries, no matter which ring they are in. - cache = self._cache = Cache(limit) + cache = self._cache = self._cache_type(limit) self._dict = cache.data diff --git a/relstorage/cache/micro_benchmark_results.rst b/relstorage/cache/micro_benchmark_results.rst index c72dc15c..131ff53a 100644 --- a/relstorage/cache/micro_benchmark_results.rst +++ b/relstorage/cache/micro_benchmark_results.rst @@ -157,6 +157,18 @@ web07 76,118 20,484 53,283 22,835 8 web12 95,607 13,756 66,925 28,682 8 ========== ========== ======== ========= ========== ==== +Note that Financial1 and Financial2 are OLTP traces of a journal file, +and orm-busy and orm-night are traces of an ORM session cache with +short transactions. Both of these are dominated by *recency* and are +thus very easy for LRU caches; a frequency cache like the new code has +more trouble with them at smaller sizes. They are included to +demonstrate worst-case performance and are probably not representative +of typical RelStorage cache workloads (a RelStorage workload will have +some objects, such as catalog BTree objects, that are frequently +accessed which shouldn't be ejected if a more rare query occurs). +The hit rates of these workloads are strongly correlated to the size +of the eden generation. + Cache simulation ---------------- diff --git a/relstorage/cache/tests/test_cache.py b/relstorage/cache/tests/test_cache.py index 8c7bd2b8..8b064e48 100644 --- a/relstorage/cache/tests/test_cache.py +++ b/relstorage/cache/tests/test_cache.py @@ -17,6 +17,23 @@ from relstorage.tests.util import skipOnCI from functools import partial +from relstorage.cache.cache_ring import Cache as _BaseCache +class Cache(_BaseCache): + # Tweak the generation sizes to match what we developed the tests with + _gen_protected_pct = 0.8 + _gen_eden_pct = 0.1 + +from relstorage.cache.mapping import SizedLRUMapping as _BaseSizedLRUMapping + +class SizedLRUMapping(_BaseSizedLRUMapping): + _cache_type = Cache + +from relstorage.cache.local_client import LocalClient as _BaseLocalClient + +class LocalClient(_BaseLocalClient): + _bucket_type = SizedLRUMapping + + class StorageCacheTests(unittest.TestCase): def setUp(self): @@ -386,7 +403,6 @@ def assertNotNone(self, o): raise AssertionError("Expected not None") def getClass(self): - from relstorage.cache.mapping import SizedLRUMapping return SizedLRUMapping def test_age_empty(self): @@ -738,7 +754,6 @@ def test_load_from_multiple_files_hit_limit(self): class LocalClientTests(unittest.TestCase): def getClass(self): - from relstorage.cache.local_client import LocalClient return LocalClient def _makeOne(self, **kw): @@ -1123,7 +1138,6 @@ def test_bool(self): self.assertFalse(lru) def test_free_reuse(self): - from relstorage.cache.cache_ring import Cache cache = Cache(20) lru = cache.protected self.assertEqual(lru.limit, 16)