summaryrefslogtreecommitdiffstats
path: root/fs/mbcache.c
diff options
context:
space:
mode:
authorT Makphaibulchoke <tmac@hp.com>2014-03-18 19:19:41 -0400
committerTheodore Ts'o <tytso@mit.edu>2014-03-18 19:19:41 -0400
commit3e037e5211252902a188a6a11aecd247409d0229 (patch)
tree55bddffd164cc22ceba4020d9a0e9d2f666d40d4 /fs/mbcache.c
parentb8a8684502a0fc852afa0056c6bb2a9273f6fcc0 (diff)
fs/mbcache.c: change block and index hash chain to hlist_bl_node
This patch changes each mb_cache's both block and index hash chains to use a hlist_bl_node, which contains a built-in lock. This is the first step in decoupling of locks serializing accesses to mb_cache global data and each mb_cache_entry local data. Signed-off-by: T. Makphaibulchoke <tmac@hp.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/mbcache.c')
-rw-r--r--fs/mbcache.c117
1 files changed, 78 insertions, 39 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c
index e519e45bf673..55db0daaca74 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -34,9 +34,9 @@
34#include <linux/mm.h> 34#include <linux/mm.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/sched.h> 36#include <linux/sched.h>
37#include <linux/init.h> 37#include <linux/list_bl.h>
38#include <linux/mbcache.h> 38#include <linux/mbcache.h>
39 39#include <linux/init.h>
40 40
41#ifdef MB_CACHE_DEBUG 41#ifdef MB_CACHE_DEBUG
42# define mb_debug(f...) do { \ 42# define mb_debug(f...) do { \
@@ -87,21 +87,38 @@ static LIST_HEAD(mb_cache_lru_list);
87static DEFINE_SPINLOCK(mb_cache_spinlock); 87static DEFINE_SPINLOCK(mb_cache_spinlock);
88 88
89static inline int 89static inline int
90__mb_cache_entry_is_hashed(struct mb_cache_entry *ce) 90__mb_cache_entry_is_block_hashed(struct mb_cache_entry *ce)
91{ 91{
92 return !list_empty(&ce->e_block_list); 92 return !hlist_bl_unhashed(&ce->e_block_list);
93} 93}
94 94
95 95
96static void 96static inline void
97__mb_cache_entry_unhash(struct mb_cache_entry *ce) 97__mb_cache_entry_unhash_block(struct mb_cache_entry *ce)
98{ 98{
99 if (__mb_cache_entry_is_hashed(ce)) { 99 if (__mb_cache_entry_is_block_hashed(ce))
100 list_del_init(&ce->e_block_list); 100 hlist_bl_del_init(&ce->e_block_list);
101 list_del(&ce->e_index.o_list); 101}
102 } 102
103static inline int
104__mb_cache_entry_is_index_hashed(struct mb_cache_entry *ce)
105{
106 return !hlist_bl_unhashed(&ce->e_index.o_list);
103} 107}
104 108
109static inline void
110__mb_cache_entry_unhash_index(struct mb_cache_entry *ce)
111{
112 if (__mb_cache_entry_is_index_hashed(ce))
113 hlist_bl_del_init(&ce->e_index.o_list);
114}
115
116static inline void
117__mb_cache_entry_unhash(struct mb_cache_entry *ce)
118{
119 __mb_cache_entry_unhash_index(ce);
120 __mb_cache_entry_unhash_block(ce);
121}
105 122
106static void 123static void
107__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) 124__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
@@ -125,7 +142,7 @@ __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
125 ce->e_used -= MB_CACHE_WRITER; 142 ce->e_used -= MB_CACHE_WRITER;
126 ce->e_used--; 143 ce->e_used--;
127 if (!(ce->e_used || ce->e_queued)) { 144 if (!(ce->e_used || ce->e_queued)) {
128 if (!__mb_cache_entry_is_hashed(ce)) 145 if (!__mb_cache_entry_is_block_hashed(ce))
129 goto forget; 146 goto forget;
130 mb_assert(list_empty(&ce->e_lru_list)); 147 mb_assert(list_empty(&ce->e_lru_list));
131 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list); 148 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
@@ -221,18 +238,18 @@ mb_cache_create(const char *name, int bucket_bits)
221 cache->c_name = name; 238 cache->c_name = name;
222 atomic_set(&cache->c_entry_count, 0); 239 atomic_set(&cache->c_entry_count, 0);
223 cache->c_bucket_bits = bucket_bits; 240 cache->c_bucket_bits = bucket_bits;
224 cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head), 241 cache->c_block_hash = kmalloc(bucket_count *
225 GFP_KERNEL); 242 sizeof(struct hlist_bl_head), GFP_KERNEL);
226 if (!cache->c_block_hash) 243 if (!cache->c_block_hash)
227 goto fail; 244 goto fail;
228 for (n=0; n<bucket_count; n++) 245 for (n=0; n<bucket_count; n++)
229 INIT_LIST_HEAD(&cache->c_block_hash[n]); 246 INIT_HLIST_BL_HEAD(&cache->c_block_hash[n]);
230 cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head), 247 cache->c_index_hash = kmalloc(bucket_count *
231 GFP_KERNEL); 248 sizeof(struct hlist_bl_head), GFP_KERNEL);
232 if (!cache->c_index_hash) 249 if (!cache->c_index_hash)
233 goto fail; 250 goto fail;
234 for (n=0; n<bucket_count; n++) 251 for (n=0; n<bucket_count; n++)
235 INIT_LIST_HEAD(&cache->c_index_hash[n]); 252 INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]);
236 cache->c_entry_cache = kmem_cache_create(name, 253 cache->c_entry_cache = kmem_cache_create(name,
237 sizeof(struct mb_cache_entry), 0, 254 sizeof(struct mb_cache_entry), 0,
238 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); 255 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
@@ -364,10 +381,13 @@ mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
364 return NULL; 381 return NULL;
365 atomic_inc(&cache->c_entry_count); 382 atomic_inc(&cache->c_entry_count);
366 INIT_LIST_HEAD(&ce->e_lru_list); 383 INIT_LIST_HEAD(&ce->e_lru_list);
367 INIT_LIST_HEAD(&ce->e_block_list); 384 INIT_HLIST_BL_NODE(&ce->e_block_list);
385 INIT_HLIST_BL_NODE(&ce->e_index.o_list);
368 ce->e_cache = cache; 386 ce->e_cache = cache;
369 ce->e_queued = 0; 387 ce->e_queued = 0;
370 } 388 }
389 ce->e_block_hash_p = &cache->c_block_hash[0];
390 ce->e_index_hash_p = &cache->c_index_hash[0];
371 ce->e_used = 1 + MB_CACHE_WRITER; 391 ce->e_used = 1 + MB_CACHE_WRITER;
372 return ce; 392 return ce;
373} 393}
@@ -393,25 +413,32 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
393{ 413{
394 struct mb_cache *cache = ce->e_cache; 414 struct mb_cache *cache = ce->e_cache;
395 unsigned int bucket; 415 unsigned int bucket;
396 struct list_head *l; 416 struct hlist_bl_node *l;
397 int error = -EBUSY; 417 int error = -EBUSY;
418 struct hlist_bl_head *block_hash_p;
419 struct hlist_bl_head *index_hash_p;
420 struct mb_cache_entry *lce;
398 421
422 mb_assert(ce);
399 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), 423 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
400 cache->c_bucket_bits); 424 cache->c_bucket_bits);
425 block_hash_p = &cache->c_block_hash[bucket];
401 spin_lock(&mb_cache_spinlock); 426 spin_lock(&mb_cache_spinlock);
402 list_for_each_prev(l, &cache->c_block_hash[bucket]) { 427 hlist_bl_for_each_entry(lce, l, block_hash_p, e_block_list) {
403 struct mb_cache_entry *ce = 428 if (lce->e_bdev == bdev && lce->e_block == block)
404 list_entry(l, struct mb_cache_entry, e_block_list);
405 if (ce->e_bdev == bdev && ce->e_block == block)
406 goto out; 429 goto out;
407 } 430 }
431 mb_assert(!__mb_cache_entry_is_block_hashed(ce));
408 __mb_cache_entry_unhash(ce); 432 __mb_cache_entry_unhash(ce);
409 ce->e_bdev = bdev; 433 ce->e_bdev = bdev;
410 ce->e_block = block; 434 ce->e_block = block;
411 list_add(&ce->e_block_list, &cache->c_block_hash[bucket]); 435 ce->e_block_hash_p = block_hash_p;
412 ce->e_index.o_key = key; 436 ce->e_index.o_key = key;
413 bucket = hash_long(key, cache->c_bucket_bits); 437 bucket = hash_long(key, cache->c_bucket_bits);
414 list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]); 438 index_hash_p = &cache->c_index_hash[bucket];
439 ce->e_index_hash_p = index_hash_p;
440 hlist_bl_add_head(&ce->e_index.o_list, index_hash_p);
441 hlist_bl_add_head(&ce->e_block_list, block_hash_p);
415 error = 0; 442 error = 0;
416out: 443out:
417 spin_unlock(&mb_cache_spinlock); 444 spin_unlock(&mb_cache_spinlock);
@@ -463,14 +490,16 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
463 sector_t block) 490 sector_t block)
464{ 491{
465 unsigned int bucket; 492 unsigned int bucket;
466 struct list_head *l; 493 struct hlist_bl_node *l;
467 struct mb_cache_entry *ce; 494 struct mb_cache_entry *ce;
495 struct hlist_bl_head *block_hash_p;
468 496
469 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), 497 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
470 cache->c_bucket_bits); 498 cache->c_bucket_bits);
499 block_hash_p = &cache->c_block_hash[bucket];
471 spin_lock(&mb_cache_spinlock); 500 spin_lock(&mb_cache_spinlock);
472 list_for_each(l, &cache->c_block_hash[bucket]) { 501 hlist_bl_for_each_entry(ce, l, block_hash_p, e_block_list) {
473 ce = list_entry(l, struct mb_cache_entry, e_block_list); 502 mb_assert(ce->e_block_hash_p == block_hash_p);
474 if (ce->e_bdev == bdev && ce->e_block == block) { 503 if (ce->e_bdev == bdev && ce->e_block == block) {
475 DEFINE_WAIT(wait); 504 DEFINE_WAIT(wait);
476 505
@@ -489,7 +518,7 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
489 finish_wait(&mb_cache_queue, &wait); 518 finish_wait(&mb_cache_queue, &wait);
490 ce->e_used += 1 + MB_CACHE_WRITER; 519 ce->e_used += 1 + MB_CACHE_WRITER;
491 520
492 if (!__mb_cache_entry_is_hashed(ce)) { 521 if (!__mb_cache_entry_is_block_hashed(ce)) {
493 __mb_cache_entry_release_unlock(ce); 522 __mb_cache_entry_release_unlock(ce);
494 return NULL; 523 return NULL;
495 } 524 }
@@ -506,12 +535,14 @@ cleanup:
506#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) 535#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
507 536
508static struct mb_cache_entry * 537static struct mb_cache_entry *
509__mb_cache_entry_find(struct list_head *l, struct list_head *head, 538__mb_cache_entry_find(struct hlist_bl_node *l, struct hlist_bl_head *head,
510 struct block_device *bdev, unsigned int key) 539 struct block_device *bdev, unsigned int key)
511{ 540{
512 while (l != head) { 541 while (l != NULL) {
513 struct mb_cache_entry *ce = 542 struct mb_cache_entry *ce =
514 list_entry(l, struct mb_cache_entry, e_index.o_list); 543 hlist_bl_entry(l, struct mb_cache_entry,
544 e_index.o_list);
545 mb_assert(ce->e_index_hash_p == head);
515 if (ce->e_bdev == bdev && ce->e_index.o_key == key) { 546 if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
516 DEFINE_WAIT(wait); 547 DEFINE_WAIT(wait);
517 548
@@ -532,7 +563,7 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head,
532 } 563 }
533 finish_wait(&mb_cache_queue, &wait); 564 finish_wait(&mb_cache_queue, &wait);
534 565
535 if (!__mb_cache_entry_is_hashed(ce)) { 566 if (!__mb_cache_entry_is_block_hashed(ce)) {
536 __mb_cache_entry_release_unlock(ce); 567 __mb_cache_entry_release_unlock(ce);
537 spin_lock(&mb_cache_spinlock); 568 spin_lock(&mb_cache_spinlock);
538 return ERR_PTR(-EAGAIN); 569 return ERR_PTR(-EAGAIN);
@@ -562,12 +593,16 @@ mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
562 unsigned int key) 593 unsigned int key)
563{ 594{
564 unsigned int bucket = hash_long(key, cache->c_bucket_bits); 595 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
565 struct list_head *l; 596 struct hlist_bl_node *l;
566 struct mb_cache_entry *ce; 597 struct mb_cache_entry *ce = NULL;
598 struct hlist_bl_head *index_hash_p;
567 599
600 index_hash_p = &cache->c_index_hash[bucket];
568 spin_lock(&mb_cache_spinlock); 601 spin_lock(&mb_cache_spinlock);
569 l = cache->c_index_hash[bucket].next; 602 if (!hlist_bl_empty(index_hash_p)) {
570 ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key); 603 l = hlist_bl_first(index_hash_p);
604 ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
605 }
571 spin_unlock(&mb_cache_spinlock); 606 spin_unlock(&mb_cache_spinlock);
572 return ce; 607 return ce;
573} 608}
@@ -597,12 +632,16 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev,
597{ 632{
598 struct mb_cache *cache = prev->e_cache; 633 struct mb_cache *cache = prev->e_cache;
599 unsigned int bucket = hash_long(key, cache->c_bucket_bits); 634 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
600 struct list_head *l; 635 struct hlist_bl_node *l;
601 struct mb_cache_entry *ce; 636 struct mb_cache_entry *ce;
637 struct hlist_bl_head *index_hash_p;
602 638
639 index_hash_p = &cache->c_index_hash[bucket];
640 mb_assert(prev->e_index_hash_p == index_hash_p);
603 spin_lock(&mb_cache_spinlock); 641 spin_lock(&mb_cache_spinlock);
642 mb_assert(!hlist_bl_empty(index_hash_p));
604 l = prev->e_index.o_list.next; 643 l = prev->e_index.o_list.next;
605 ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key); 644 ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
606 __mb_cache_entry_release_unlock(prev); 645 __mb_cache_entry_release_unlock(prev);
607 return ce; 646 return ce;
608} 647}