aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2011-03-22 07:23:41 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2011-03-24 21:17:51 -0400
commita66979abad090b2765a6c6790c9fdeab996833f2 (patch)
treee48b2d0fac8f96456286a503aeeb952620234961 /mm
parent55fa6091d83160ca772fc37cebae45d42695a708 (diff)
fs: move i_wb_list out from under inode_lock
Protect the inode writeback list with a new global lock inode_wb_list_lock and use it to protect the list manipulations and traversals. This lock replaces the inode_lock as the inodes on the list can be validity checked while holding the inode->i_lock and hence the inode_lock is no longer needed to protect the list. Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/rmap.c4
3 files changed, 10 insertions, 10 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 027100d30227..4b3e9f17ee21 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -73,14 +73,14 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
73 struct inode *inode; 73 struct inode *inode;
74 74
75 nr_wb = nr_dirty = nr_io = nr_more_io = 0; 75 nr_wb = nr_dirty = nr_io = nr_more_io = 0;
76 spin_lock(&inode_lock); 76 spin_lock(&inode_wb_list_lock);
77 list_for_each_entry(inode, &wb->b_dirty, i_wb_list) 77 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
78 nr_dirty++; 78 nr_dirty++;
79 list_for_each_entry(inode, &wb->b_io, i_wb_list) 79 list_for_each_entry(inode, &wb->b_io, i_wb_list)
80 nr_io++; 80 nr_io++;
81 list_for_each_entry(inode, &wb->b_more_io, i_wb_list) 81 list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
82 nr_more_io++; 82 nr_more_io++;
83 spin_unlock(&inode_lock); 83 spin_unlock(&inode_wb_list_lock);
84 84
85 global_dirty_limits(&background_thresh, &dirty_thresh); 85 global_dirty_limits(&background_thresh, &dirty_thresh);
86 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); 86 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
@@ -682,11 +682,11 @@ void bdi_destroy(struct backing_dev_info *bdi)
682 if (bdi_has_dirty_io(bdi)) { 682 if (bdi_has_dirty_io(bdi)) {
683 struct bdi_writeback *dst = &default_backing_dev_info.wb; 683 struct bdi_writeback *dst = &default_backing_dev_info.wb;
684 684
685 spin_lock(&inode_lock); 685 spin_lock(&inode_wb_list_lock);
686 list_splice(&bdi->wb.b_dirty, &dst->b_dirty); 686 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
687 list_splice(&bdi->wb.b_io, &dst->b_io); 687 list_splice(&bdi->wb.b_io, &dst->b_io);
688 list_splice(&bdi->wb.b_more_io, &dst->b_more_io); 688 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
689 spin_unlock(&inode_lock); 689 spin_unlock(&inode_wb_list_lock);
690 } 690 }
691 691
692 bdi_unregister(bdi); 692 bdi_unregister(bdi);
diff --git a/mm/filemap.c b/mm/filemap.c
index 499e9aa91450..d8b34d1a1071 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -80,8 +80,8 @@
80 * ->i_mutex 80 * ->i_mutex
81 * ->i_alloc_sem (various) 81 * ->i_alloc_sem (various)
82 * 82 *
83 * ->inode_lock 83 * inode_wb_list_lock
84 * ->sb_lock (fs/fs-writeback.c) 84 * sb_lock (fs/fs-writeback.c)
85 * ->mapping->tree_lock (__sync_single_inode) 85 * ->mapping->tree_lock (__sync_single_inode)
86 * 86 *
87 * ->i_mmap_lock 87 * ->i_mmap_lock
@@ -98,9 +98,9 @@
98 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 98 * ->zone.lru_lock (check_pte_range->isolate_lru_page)
99 * ->private_lock (page_remove_rmap->set_page_dirty) 99 * ->private_lock (page_remove_rmap->set_page_dirty)
100 * ->tree_lock (page_remove_rmap->set_page_dirty) 100 * ->tree_lock (page_remove_rmap->set_page_dirty)
101 * ->inode_lock (page_remove_rmap->set_page_dirty) 101 * inode_wb_list_lock (page_remove_rmap->set_page_dirty)
102 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 102 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
103 * ->inode_lock (zap_pte_range->set_page_dirty) 103 * inode_wb_list_lock (zap_pte_range->set_page_dirty)
104 * ->inode->i_lock (zap_pte_range->set_page_dirty) 104 * ->inode->i_lock (zap_pte_range->set_page_dirty)
105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
106 * 106 *
diff --git a/mm/rmap.c b/mm/rmap.c
index 7dada0456448..8da044a1db0f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -31,12 +31,12 @@
31 * swap_lock (in swap_duplicate, swap_info_get) 31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others) 32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers) 33 * mapping->private_lock (in __set_page_dirty_buffers)
34 * inode_lock (in set_page_dirty's __mark_inode_dirty)
35 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 34 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
35 * inode_wb_list_lock (in set_page_dirty's __mark_inode_dirty)
36 * sb_lock (within inode_lock in fs/fs-writeback.c) 36 * sb_lock (within inode_lock in fs/fs-writeback.c)
37 * mapping->tree_lock (widely used, in set_page_dirty, 37 * mapping->tree_lock (widely used, in set_page_dirty,
38 * in arch-dependent flush_dcache_mmap_lock, 38 * in arch-dependent flush_dcache_mmap_lock,
39 * within inode_lock in __sync_single_inode) 39 * within inode_wb_list_lock in __sync_single_inode)
40 * 40 *
41 * (code doesn't rely on that order so it could be switched around) 41 * (code doesn't rely on that order so it could be switched around)
42 * ->tasklist_lock 42 * ->tasklist_lock