aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fscache/page.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2009-11-19 13:11:25 -0500
committerDavid Howells <dhowells@redhat.com>2009-11-19 13:11:25 -0500
commit1bccf513ac49d44604ba1cddcc29f5886e70f1b6 (patch)
tree096ba75a3d02018c5f6e1857aaf1d41471733850 /fs/fscache/page.c
parent6897e3df8fc37bd4a58bbcdef8306da7fc175584 (diff)
FS-Cache: Fix lock misorder in fscache_write_op()
FS-Cache has two structs internally for keeping track of the internal state of a cached file: the fscache_cookie struct, which represents the netfs's state, and fscache_object struct, which represents the cache's state. Each has a pointer that points to the other (when both are in existence), and each has a spinlock for pointer maintenance. Since netfs operations approach these structures from the cookie side, they get the cookie lock first, then the object lock. Cache operations, on the other hand, approach from the object side, and get the object lock first. It is not then permitted for a cache operation to get the cookie lock whilst it is holding the object lock lest deadlock occur; instead, it must do one of two things: (1) increment the cookie usage counter, drop the object lock and then get both locks in order, or (2) simply hold the object lock as certain parts of the cookie may not be altered whilst the object lock is held. It is also not permitted to follow either pointer without holding the lock at the end you start with. To break the pointers between the cookie and the object, both locks must be held. fscache_write_op(), however, violates the locking rules: It attempts to get the cookie lock without (a) checking that the cookie pointer is a valid pointer, and (b) holding the object lock to protect the cookie pointer whilst it follows it. This is so that it can access the pending page store tree without interference from __fscache_write_page(). This is fixed by splitting the cookie lock, such that the page store tracking tree is protected by its own lock, and checking that the cookie pointer is non-NULL before we attempt to follow it whilst holding the object lock. The new lock is subordinate to both the cookie lock and the object lock, and so should be taken after those. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs/fscache/page.c')
-rw-r--r--fs/fscache/page.c52
1 files changed, 35 insertions, 17 deletions
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index e6f2e61133a1..3ea8897bc217 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -45,16 +45,26 @@ EXPORT_SYMBOL(__fscache_wait_on_page_write);
45/* 45/*
46 * note that a page has finished being written to the cache 46 * note that a page has finished being written to the cache
47 */ 47 */
48static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page) 48static void fscache_end_page_write(struct fscache_object *object,
49 struct page *page)
49{ 50{
50 struct page *xpage; 51 struct fscache_cookie *cookie;
52 struct page *xpage = NULL;
51 53
52 spin_lock(&cookie->lock); 54 spin_lock(&object->lock);
53 xpage = radix_tree_delete(&cookie->stores, page->index); 55 cookie = object->cookie;
54 spin_unlock(&cookie->lock); 56 if (cookie) {
55 ASSERT(xpage != NULL); 57 /* delete the page from the tree if it is now no longer
56 58 * pending */
57 wake_up_bit(&cookie->flags, 0); 59 spin_lock(&cookie->stores_lock);
60 fscache_stat(&fscache_n_store_radix_deletes);
61 xpage = radix_tree_delete(&cookie->stores, page->index);
62 spin_unlock(&cookie->stores_lock);
63 wake_up_bit(&cookie->flags, 0);
64 }
65 spin_unlock(&object->lock);
66 if (xpage)
67 page_cache_release(xpage);
58} 68}
59 69
60/* 70/*
@@ -591,7 +601,7 @@ static void fscache_write_op(struct fscache_operation *_op)
591 struct fscache_storage *op = 601 struct fscache_storage *op =
592 container_of(_op, struct fscache_storage, op); 602 container_of(_op, struct fscache_storage, op);
593 struct fscache_object *object = op->op.object; 603 struct fscache_object *object = op->op.object;
594 struct fscache_cookie *cookie = object->cookie; 604 struct fscache_cookie *cookie;
595 struct page *page; 605 struct page *page;
596 unsigned n; 606 unsigned n;
597 void *results[1]; 607 void *results[1];
@@ -601,16 +611,17 @@ static void fscache_write_op(struct fscache_operation *_op)
601 611
602 fscache_set_op_state(&op->op, "GetPage"); 612 fscache_set_op_state(&op->op, "GetPage");
603 613
604 spin_lock(&cookie->lock);
605 spin_lock(&object->lock); 614 spin_lock(&object->lock);
615 cookie = object->cookie;
606 616
607 if (!fscache_object_is_active(object)) { 617 if (!fscache_object_is_active(object) || !cookie) {
608 spin_unlock(&object->lock); 618 spin_unlock(&object->lock);
609 spin_unlock(&cookie->lock);
610 _leave(""); 619 _leave("");
611 return; 620 return;
612 } 621 }
613 622
623 spin_lock(&cookie->stores_lock);
624
614 fscache_stat(&fscache_n_store_calls); 625 fscache_stat(&fscache_n_store_calls);
615 626
616 /* find a page to store */ 627 /* find a page to store */
@@ -621,23 +632,25 @@ static void fscache_write_op(struct fscache_operation *_op)
621 goto superseded; 632 goto superseded;
622 page = results[0]; 633 page = results[0];
623 _debug("gang %d [%lx]", n, page->index); 634 _debug("gang %d [%lx]", n, page->index);
624 if (page->index > op->store_limit) 635 if (page->index > op->store_limit) {
636 fscache_stat(&fscache_n_store_pages_over_limit);
625 goto superseded; 637 goto superseded;
638 }
626 639
627 radix_tree_tag_clear(&cookie->stores, page->index, 640 radix_tree_tag_clear(&cookie->stores, page->index,
628 FSCACHE_COOKIE_PENDING_TAG); 641 FSCACHE_COOKIE_PENDING_TAG);
629 642
643 spin_unlock(&cookie->stores_lock);
630 spin_unlock(&object->lock); 644 spin_unlock(&object->lock);
631 spin_unlock(&cookie->lock);
632 645
633 if (page) { 646 if (page) {
634 fscache_set_op_state(&op->op, "Store"); 647 fscache_set_op_state(&op->op, "Store");
648 fscache_stat(&fscache_n_store_pages);
635 fscache_stat(&fscache_n_cop_write_page); 649 fscache_stat(&fscache_n_cop_write_page);
636 ret = object->cache->ops->write_page(op, page); 650 ret = object->cache->ops->write_page(op, page);
637 fscache_stat_d(&fscache_n_cop_write_page); 651 fscache_stat_d(&fscache_n_cop_write_page);
638 fscache_set_op_state(&op->op, "EndWrite"); 652 fscache_set_op_state(&op->op, "EndWrite");
639 fscache_end_page_write(cookie, page); 653 fscache_end_page_write(object, page);
640 page_cache_release(page);
641 if (ret < 0) { 654 if (ret < 0) {
642 fscache_set_op_state(&op->op, "Abort"); 655 fscache_set_op_state(&op->op, "Abort");
643 fscache_abort_object(object); 656 fscache_abort_object(object);
@@ -653,9 +666,9 @@ superseded:
653 /* this writer is going away and there aren't any more things to 666 /* this writer is going away and there aren't any more things to
654 * write */ 667 * write */
655 _debug("cease"); 668 _debug("cease");
669 spin_unlock(&cookie->stores_lock);
656 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 670 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
657 spin_unlock(&object->lock); 671 spin_unlock(&object->lock);
658 spin_unlock(&cookie->lock);
659 _leave(""); 672 _leave("");
660} 673}
661 674
@@ -731,6 +744,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
731 /* add the page to the pending-storage radix tree on the backing 744 /* add the page to the pending-storage radix tree on the backing
732 * object */ 745 * object */
733 spin_lock(&object->lock); 746 spin_lock(&object->lock);
747 spin_lock(&cookie->stores_lock);
734 748
735 _debug("store limit %llx", (unsigned long long) object->store_limit); 749 _debug("store limit %llx", (unsigned long long) object->store_limit);
736 750
@@ -751,6 +765,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
751 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) 765 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
752 goto already_pending; 766 goto already_pending;
753 767
768 spin_unlock(&cookie->stores_lock);
754 spin_unlock(&object->lock); 769 spin_unlock(&object->lock);
755 770
756 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); 771 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
@@ -772,6 +787,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
772already_queued: 787already_queued:
773 fscache_stat(&fscache_n_stores_again); 788 fscache_stat(&fscache_n_stores_again);
774already_pending: 789already_pending:
790 spin_unlock(&cookie->stores_lock);
775 spin_unlock(&object->lock); 791 spin_unlock(&object->lock);
776 spin_unlock(&cookie->lock); 792 spin_unlock(&cookie->lock);
777 radix_tree_preload_end(); 793 radix_tree_preload_end();
@@ -781,7 +797,9 @@ already_pending:
781 return 0; 797 return 0;
782 798
783submit_failed: 799submit_failed:
800 spin_lock(&cookie->stores_lock);
784 radix_tree_delete(&cookie->stores, page->index); 801 radix_tree_delete(&cookie->stores, page->index);
802 spin_unlock(&cookie->stores_lock);
785 page_cache_release(page); 803 page_cache_release(page);
786 ret = -ENOBUFS; 804 ret = -ENOBUFS;
787 goto nobufs; 805 goto nobufs;