summaryrefslogtreecommitdiffstats
path: root/fs/mbcache2.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-02-22 12:33:03 -0500
committerTheodore Ts'o <tytso@mit.edu>2016-02-22 12:33:03 -0500
commitc2f3140fe2eceb3a6c1615b2648b9471544881c6 (patch)
tree40c4db4b8b362496ef6365bb676cd3591e4de485 /fs/mbcache2.c
parentecd1e64412d5242b8afdef58a714bab3c5464f79 (diff)
mbcache2: limit cache size
So far number of entries in mbcache is limited only by the pressure from the shrinker. Since too many entries degrade the hash table and generally we expect that caching more entries has diminishing returns, limit number of entries the same way as in the old mbcache to 16 * hash table size. Once we exceed the desired maximum number of entries, we schedule a backround work to reclaim entries. If the background work cannot keep up and the number of entries exceeds two times the desired maximum, we reclaim some entries directly when allocating a new entry. Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'fs/mbcache2.c')
-rw-r--r--fs/mbcache2.c50
1 files changed, 45 insertions, 5 deletions
diff --git a/fs/mbcache2.c b/fs/mbcache2.c
index 5c3e1a8c38f6..3e3198d6b9d6 100644
--- a/fs/mbcache2.c
+++ b/fs/mbcache2.c
@@ -4,6 +4,7 @@
4#include <linux/list_bl.h> 4#include <linux/list_bl.h>
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/workqueue.h>
7#include <linux/mbcache2.h> 8#include <linux/mbcache2.h>
8 9
9/* 10/*
@@ -27,16 +28,29 @@ struct mb2_cache {
27 struct hlist_bl_head *c_hash; 28 struct hlist_bl_head *c_hash;
28 /* log2 of hash table size */ 29 /* log2 of hash table size */
29 int c_bucket_bits; 30 int c_bucket_bits;
31 /* Maximum entries in cache to avoid degrading hash too much */
32 int c_max_entries;
30 /* Protects c_lru_list, c_entry_count */ 33 /* Protects c_lru_list, c_entry_count */
31 spinlock_t c_lru_list_lock; 34 spinlock_t c_lru_list_lock;
32 struct list_head c_lru_list; 35 struct list_head c_lru_list;
33 /* Number of entries in cache */ 36 /* Number of entries in cache */
34 unsigned long c_entry_count; 37 unsigned long c_entry_count;
35 struct shrinker c_shrink; 38 struct shrinker c_shrink;
39 /* Work for shrinking when the cache has too many entries */
40 struct work_struct c_shrink_work;
36}; 41};
37 42
38static struct kmem_cache *mb2_entry_cache; 43static struct kmem_cache *mb2_entry_cache;
39 44
45static unsigned long mb2_cache_shrink(struct mb2_cache *cache,
46 unsigned int nr_to_scan);
47
48/*
49 * Number of entries to reclaim synchronously when there are too many entries
50 * in cache
51 */
52#define SYNC_SHRINK_BATCH 64
53
40/* 54/*
41 * mb2_cache_entry_create - create entry in cache 55 * mb2_cache_entry_create - create entry in cache
42 * @cache - cache where the entry should be created 56 * @cache - cache where the entry should be created
@@ -55,6 +69,13 @@ int mb2_cache_entry_create(struct mb2_cache *cache, gfp_t mask, u32 key,
55 struct hlist_bl_node *dup_node; 69 struct hlist_bl_node *dup_node;
56 struct hlist_bl_head *head; 70 struct hlist_bl_head *head;
57 71
72 /* Schedule background reclaim if there are too many entries */
73 if (cache->c_entry_count >= cache->c_max_entries)
74 schedule_work(&cache->c_shrink_work);
75 /* Do some sync reclaim if background reclaim cannot keep up */
76 if (cache->c_entry_count >= 2*cache->c_max_entries)
77 mb2_cache_shrink(cache, SYNC_SHRINK_BATCH);
78
58 entry = kmem_cache_alloc(mb2_entry_cache, mask); 79 entry = kmem_cache_alloc(mb2_entry_cache, mask);
59 if (!entry) 80 if (!entry)
60 return -ENOMEM; 81 return -ENOMEM;
@@ -223,12 +244,9 @@ static unsigned long mb2_cache_count(struct shrinker *shrink,
223} 244}
224 245
225/* Shrink number of entries in cache */ 246/* Shrink number of entries in cache */
226static unsigned long mb2_cache_scan(struct shrinker *shrink, 247static unsigned long mb2_cache_shrink(struct mb2_cache *cache,
227 struct shrink_control *sc) 248 unsigned int nr_to_scan)
228{ 249{
229 int nr_to_scan = sc->nr_to_scan;
230 struct mb2_cache *cache = container_of(shrink, struct mb2_cache,
231 c_shrink);
232 struct mb2_cache_entry *entry; 250 struct mb2_cache_entry *entry;
233 struct hlist_bl_head *head; 251 struct hlist_bl_head *head;
234 unsigned int shrunk = 0; 252 unsigned int shrunk = 0;
@@ -261,6 +279,25 @@ static unsigned long mb2_cache_scan(struct shrinker *shrink,
261 return shrunk; 279 return shrunk;
262} 280}
263 281
282static unsigned long mb2_cache_scan(struct shrinker *shrink,
283 struct shrink_control *sc)
284{
285 int nr_to_scan = sc->nr_to_scan;
286 struct mb2_cache *cache = container_of(shrink, struct mb2_cache,
287 c_shrink);
288 return mb2_cache_shrink(cache, nr_to_scan);
289}
290
291/* We shrink 1/X of the cache when we have too many entries in it */
292#define SHRINK_DIVISOR 16
293
294static void mb2_cache_shrink_worker(struct work_struct *work)
295{
296 struct mb2_cache *cache = container_of(work, struct mb2_cache,
297 c_shrink_work);
298 mb2_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
299}
300
264/* 301/*
265 * mb2_cache_create - create cache 302 * mb2_cache_create - create cache
266 * @bucket_bits: log2 of the hash table size 303 * @bucket_bits: log2 of the hash table size
@@ -280,6 +317,7 @@ struct mb2_cache *mb2_cache_create(int bucket_bits)
280 if (!cache) 317 if (!cache)
281 goto err_out; 318 goto err_out;
282 cache->c_bucket_bits = bucket_bits; 319 cache->c_bucket_bits = bucket_bits;
320 cache->c_max_entries = bucket_count << 4;
283 INIT_LIST_HEAD(&cache->c_lru_list); 321 INIT_LIST_HEAD(&cache->c_lru_list);
284 spin_lock_init(&cache->c_lru_list_lock); 322 spin_lock_init(&cache->c_lru_list_lock);
285 cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head), 323 cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
@@ -296,6 +334,8 @@ struct mb2_cache *mb2_cache_create(int bucket_bits)
296 cache->c_shrink.seeks = DEFAULT_SEEKS; 334 cache->c_shrink.seeks = DEFAULT_SEEKS;
297 register_shrinker(&cache->c_shrink); 335 register_shrinker(&cache->c_shrink);
298 336
337 INIT_WORK(&cache->c_shrink_work, mb2_cache_shrink_worker);
338
299 return cache; 339 return cache;
300 340
301err_out: 341err_out: