diff options
author | NeilBrown <neilb@suse.de> | 2010-08-12 03:04:08 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2010-09-21 21:51:11 -0400 |
commit | 1117449276bb909b029ed0b9ba13f53e4784db9d (patch) | |
tree | 14b119924ac54165e6b18f3772cfe966b6883245 /net/sunrpc/cache.c | |
parent | 2ed5282cd9b44686a6e718269abb5c5cd332d8f1 (diff) |
sunrpc/cache: change deferred-request hash table to use hlist.
Being a hash table, hlist is the best option.
There is currently some ugliness were we treat "->next == NULL" as
a special case to avoid having to initialise the whole array.
This change nicely gets rid of that case.
Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc/cache.c')
-rw-r--r-- | net/sunrpc/cache.c | 28 |
1 files changed, 10 insertions, 18 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index ca7c621cd975..2a8405194056 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -506,13 +506,13 @@ EXPORT_SYMBOL_GPL(cache_purge); | |||
506 | 506 | ||
507 | static DEFINE_SPINLOCK(cache_defer_lock); | 507 | static DEFINE_SPINLOCK(cache_defer_lock); |
508 | static LIST_HEAD(cache_defer_list); | 508 | static LIST_HEAD(cache_defer_list); |
509 | static struct list_head cache_defer_hash[DFR_HASHSIZE]; | 509 | static struct hlist_head cache_defer_hash[DFR_HASHSIZE]; |
510 | static int cache_defer_cnt; | 510 | static int cache_defer_cnt; |
511 | 511 | ||
512 | static void __unhash_deferred_req(struct cache_deferred_req *dreq) | 512 | static void __unhash_deferred_req(struct cache_deferred_req *dreq) |
513 | { | 513 | { |
514 | list_del_init(&dreq->recent); | 514 | list_del_init(&dreq->recent); |
515 | list_del_init(&dreq->hash); | 515 | hlist_del_init(&dreq->hash); |
516 | cache_defer_cnt--; | 516 | cache_defer_cnt--; |
517 | } | 517 | } |
518 | 518 | ||
@@ -521,9 +521,7 @@ static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_he | |||
521 | int hash = DFR_HASH(item); | 521 | int hash = DFR_HASH(item); |
522 | 522 | ||
523 | list_add(&dreq->recent, &cache_defer_list); | 523 | list_add(&dreq->recent, &cache_defer_list); |
524 | if (cache_defer_hash[hash].next == NULL) | 524 | hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); |
525 | INIT_LIST_HEAD(&cache_defer_hash[hash]); | ||
526 | list_add(&dreq->hash, &cache_defer_hash[hash]); | ||
527 | } | 525 | } |
528 | 526 | ||
529 | static int setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item) | 527 | static int setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item) |
@@ -588,7 +586,7 @@ static int cache_wait_req(struct cache_req *req, struct cache_head *item) | |||
588 | * to clean up | 586 | * to clean up |
589 | */ | 587 | */ |
590 | spin_lock(&cache_defer_lock); | 588 | spin_lock(&cache_defer_lock); |
591 | if (!list_empty(&sleeper.handle.hash)) { | 589 | if (!hlist_unhashed(&sleeper.handle.hash)) { |
592 | __unhash_deferred_req(&sleeper.handle); | 590 | __unhash_deferred_req(&sleeper.handle); |
593 | spin_unlock(&cache_defer_lock); | 591 | spin_unlock(&cache_defer_lock); |
594 | } else { | 592 | } else { |
@@ -642,24 +640,18 @@ static void cache_revisit_request(struct cache_head *item) | |||
642 | { | 640 | { |
643 | struct cache_deferred_req *dreq; | 641 | struct cache_deferred_req *dreq; |
644 | struct list_head pending; | 642 | struct list_head pending; |
645 | 643 | struct hlist_node *lp, *tmp; | |
646 | struct list_head *lp; | ||
647 | int hash = DFR_HASH(item); | 644 | int hash = DFR_HASH(item); |
648 | 645 | ||
649 | INIT_LIST_HEAD(&pending); | 646 | INIT_LIST_HEAD(&pending); |
650 | spin_lock(&cache_defer_lock); | 647 | spin_lock(&cache_defer_lock); |
651 | 648 | ||
652 | lp = cache_defer_hash[hash].next; | 649 | hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash) |
653 | if (lp) { | 650 | if (dreq->item == item) { |
654 | while (lp != &cache_defer_hash[hash]) { | 651 | __unhash_deferred_req(dreq); |
655 | dreq = list_entry(lp, struct cache_deferred_req, hash); | 652 | list_add(&dreq->recent, &pending); |
656 | lp = lp->next; | ||
657 | if (dreq->item == item) { | ||
658 | __unhash_deferred_req(dreq); | ||
659 | list_add(&dreq->recent, &pending); | ||
660 | } | ||
661 | } | 653 | } |
662 | } | 654 | |
663 | spin_unlock(&cache_defer_lock); | 655 | spin_unlock(&cache_defer_lock); |
664 | 656 | ||
665 | while (!list_empty(&pending)) { | 657 | while (!list_empty(&pending)) { |