aboutsummaryrefslogtreecommitdiffstats
path: root/fs/mbcache.c
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@suse.de>2010-08-16 13:05:23 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-08-18 06:24:41 -0400
commit3a48ee8a4ad26c3a538b6fc11a86a8f80c3dce18 (patch)
tree3e89deaa14c8d72c7484f61c2babc5755a9e991e /fs/mbcache.c
parent3b6036d148bad5bb7928b021a49bb9e395361084 (diff)
mbcache: Limit the maximum number of cache entries
Limit the maximum number of mb_cache entries depending on the number of hash buckets: if the only limit to the number of cache entries is the available memory the hash chains can grow very long, taking a long time to search. At least partially solves https://bugzilla.lustre.org/show_bug.cgi?id=22771. Signed-off-by: Andreas Gruenbacher <agruen@suse.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/mbcache.c')
-rw-r--r--fs/mbcache.c30
1 files changed, 24 insertions, 6 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c
index cf4e6cdfd15b..93444747237b 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -80,6 +80,7 @@ struct mb_cache {
80 struct list_head c_cache_list; 80 struct list_head c_cache_list;
81 const char *c_name; 81 const char *c_name;
82 atomic_t c_entry_count; 82 atomic_t c_entry_count;
83 int c_max_entries;
83 int c_bucket_bits; 84 int c_bucket_bits;
84 struct kmem_cache *c_entry_cache; 85 struct kmem_cache *c_entry_cache;
85 struct list_head *c_block_hash; 86 struct list_head *c_block_hash;
@@ -243,6 +244,12 @@ mb_cache_create(const char *name, int bucket_bits)
243 if (!cache->c_entry_cache) 244 if (!cache->c_entry_cache)
244 goto fail2; 245 goto fail2;
245 246
247 /*
248 * Set an upper limit on the number of cache entries so that the hash
249 * chains won't grow too long.
250 */
251 cache->c_max_entries = bucket_count << 4;
252
246 spin_lock(&mb_cache_spinlock); 253 spin_lock(&mb_cache_spinlock);
247 list_add(&cache->c_cache_list, &mb_cache_list); 254 list_add(&cache->c_cache_list, &mb_cache_list);
248 spin_unlock(&mb_cache_spinlock); 255 spin_unlock(&mb_cache_spinlock);
@@ -333,7 +340,6 @@ mb_cache_destroy(struct mb_cache *cache)
333 kfree(cache); 340 kfree(cache);
334} 341}
335 342
336
337/* 343/*
338 * mb_cache_entry_alloc() 344 * mb_cache_entry_alloc()
339 * 345 *
@@ -345,17 +351,29 @@ mb_cache_destroy(struct mb_cache *cache)
345struct mb_cache_entry * 351struct mb_cache_entry *
346mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) 352mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
347{ 353{
348 struct mb_cache_entry *ce; 354 struct mb_cache_entry *ce = NULL;
349 355
350 ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); 356 if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
351 if (ce) { 357 spin_lock(&mb_cache_spinlock);
358 if (!list_empty(&mb_cache_lru_list)) {
359 ce = list_entry(mb_cache_lru_list.next,
360 struct mb_cache_entry, e_lru_list);
361 list_del_init(&ce->e_lru_list);
362 __mb_cache_entry_unhash(ce);
363 }
364 spin_unlock(&mb_cache_spinlock);
365 }
366 if (!ce) {
367 ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
368 if (!ce)
369 return NULL;
352 atomic_inc(&cache->c_entry_count); 370 atomic_inc(&cache->c_entry_count);
353 INIT_LIST_HEAD(&ce->e_lru_list); 371 INIT_LIST_HEAD(&ce->e_lru_list);
354 INIT_LIST_HEAD(&ce->e_block_list); 372 INIT_LIST_HEAD(&ce->e_block_list);
355 ce->e_cache = cache; 373 ce->e_cache = cache;
356 ce->e_used = 1 + MB_CACHE_WRITER;
357 ce->e_queued = 0; 374 ce->e_queued = 0;
358 } 375 }
376 ce->e_used = 1 + MB_CACHE_WRITER;
359 return ce; 377 return ce;
360} 378}
361 379