diff options
author | NeilBrown <neilb@suse.de> | 2009-09-09 02:32:54 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2009-09-18 11:47:49 -0400 |
commit | 67e7328f1577230ef3a1430c1a7e5c07978c6e51 (patch) | |
tree | 6b160d46c8bfbc5b04b4d532f0ad99067feab49e /net | |
parent | c0826574ddc0df486ecfc2d655e08904c6513209 (diff) |
sunrpc/cache: use list_del_init for the list_head entries in cache_deferred_req
Using list_del_init is generally safer than list_del, and it will
allow us, in a subsequent patch, to see if an entry has already been
processed or not.
Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/cache.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index f2895d0a5f81..4a32a30a03eb 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -529,8 +529,8 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item) | |||
529 | if (++cache_defer_cnt > DFR_MAX) { | 529 | if (++cache_defer_cnt > DFR_MAX) { |
530 | dreq = list_entry(cache_defer_list.prev, | 530 | dreq = list_entry(cache_defer_list.prev, |
531 | struct cache_deferred_req, recent); | 531 | struct cache_deferred_req, recent); |
532 | list_del(&dreq->recent); | 532 | list_del_init(&dreq->recent); |
533 | list_del(&dreq->hash); | 533 | list_del_init(&dreq->hash); |
534 | cache_defer_cnt--; | 534 | cache_defer_cnt--; |
535 | } | 535 | } |
536 | spin_unlock(&cache_defer_lock); | 536 | spin_unlock(&cache_defer_lock); |
@@ -564,7 +564,7 @@ static void cache_revisit_request(struct cache_head *item) | |||
564 | dreq = list_entry(lp, struct cache_deferred_req, hash); | 564 | dreq = list_entry(lp, struct cache_deferred_req, hash); |
565 | lp = lp->next; | 565 | lp = lp->next; |
566 | if (dreq->item == item) { | 566 | if (dreq->item == item) { |
567 | list_del(&dreq->hash); | 567 | list_del_init(&dreq->hash); |
568 | list_move(&dreq->recent, &pending); | 568 | list_move(&dreq->recent, &pending); |
569 | cache_defer_cnt--; | 569 | cache_defer_cnt--; |
570 | } | 570 | } |
@@ -590,7 +590,7 @@ void cache_clean_deferred(void *owner) | |||
590 | 590 | ||
591 | list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { | 591 | list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { |
592 | if (dreq->owner == owner) { | 592 | if (dreq->owner == owner) { |
593 | list_del(&dreq->hash); | 593 | list_del_init(&dreq->hash); |
594 | list_move(&dreq->recent, &pending); | 594 | list_move(&dreq->recent, &pending); |
595 | cache_defer_cnt--; | 595 | cache_defer_cnt--; |
596 | } | 596 | } |