aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/fs-writeback.c76
-rw-r--r--fs/inode.c12
-rw-r--r--fs/internal.h5
-rw-r--r--include/linux/writeback.h1
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/rmap.c4
8 files changed, 70 insertions, 48 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index bc39b18cf3d0..2bbc0e62102f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -55,13 +55,13 @@ EXPORT_SYMBOL(I_BDEV);
55static void bdev_inode_switch_bdi(struct inode *inode, 55static void bdev_inode_switch_bdi(struct inode *inode,
56 struct backing_dev_info *dst) 56 struct backing_dev_info *dst)
57{ 57{
58 spin_lock(&inode_lock); 58 spin_lock(&inode_wb_list_lock);
59 spin_lock(&inode->i_lock); 59 spin_lock(&inode->i_lock);
60 inode->i_data.backing_dev_info = dst; 60 inode->i_data.backing_dev_info = dst;
61 if (inode->i_state & I_DIRTY) 61 if (inode->i_state & I_DIRTY)
62 list_move(&inode->i_wb_list, &dst->wb.b_dirty); 62 list_move(&inode->i_wb_list, &dst->wb.b_dirty);
63 spin_unlock(&inode->i_lock); 63 spin_unlock(&inode->i_lock);
64 spin_unlock(&inode_lock); 64 spin_unlock(&inode_wb_list_lock);
65} 65}
66 66
67static sector_t max_block(struct block_device *bdev) 67static sector_t max_block(struct block_device *bdev)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 5de56a2182bb..ed800656356b 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -176,6 +176,17 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
176} 176}
177 177
178/* 178/*
179 * Remove the inode from the writeback list it is on.
180 */
181void inode_wb_list_del(struct inode *inode)
182{
183 spin_lock(&inode_wb_list_lock);
184 list_del_init(&inode->i_wb_list);
185 spin_unlock(&inode_wb_list_lock);
186}
187
188
189/*
179 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 190 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
180 * furthest end of its superblock's dirty-inode list. 191 * furthest end of its superblock's dirty-inode list.
181 * 192 *
@@ -188,6 +199,7 @@ static void redirty_tail(struct inode *inode)
188{ 199{
189 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 200 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
190 201
202 assert_spin_locked(&inode_wb_list_lock);
191 if (!list_empty(&wb->b_dirty)) { 203 if (!list_empty(&wb->b_dirty)) {
192 struct inode *tail; 204 struct inode *tail;
193 205
@@ -205,14 +217,17 @@ static void requeue_io(struct inode *inode)
205{ 217{
206 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 218 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
207 219
220 assert_spin_locked(&inode_wb_list_lock);
208 list_move(&inode->i_wb_list, &wb->b_more_io); 221 list_move(&inode->i_wb_list, &wb->b_more_io);
209} 222}
210 223
211static void inode_sync_complete(struct inode *inode) 224static void inode_sync_complete(struct inode *inode)
212{ 225{
213 /* 226 /*
214 * Prevent speculative execution through spin_unlock(&inode_lock); 227 * Prevent speculative execution through
228 * spin_unlock(&inode_wb_list_lock);
215 */ 229 */
230
216 smp_mb(); 231 smp_mb();
217 wake_up_bit(&inode->i_state, __I_SYNC); 232 wake_up_bit(&inode->i_state, __I_SYNC);
218} 233}
@@ -286,6 +301,7 @@ static void move_expired_inodes(struct list_head *delaying_queue,
286 */ 301 */
287static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 302static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
288{ 303{
304 assert_spin_locked(&inode_wb_list_lock);
289 list_splice_init(&wb->b_more_io, &wb->b_io); 305 list_splice_init(&wb->b_more_io, &wb->b_io);
290 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 306 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
291} 307}
@@ -308,25 +324,23 @@ static void inode_wait_for_writeback(struct inode *inode)
308 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 324 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
309 while (inode->i_state & I_SYNC) { 325 while (inode->i_state & I_SYNC) {
310 spin_unlock(&inode->i_lock); 326 spin_unlock(&inode->i_lock);
311 spin_unlock(&inode_lock); 327 spin_unlock(&inode_wb_list_lock);
312 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 328 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
313 spin_lock(&inode_lock); 329 spin_lock(&inode_wb_list_lock);
314 spin_lock(&inode->i_lock); 330 spin_lock(&inode->i_lock);
315 } 331 }
316} 332}
317 333
318/* 334/*
319 * Write out an inode's dirty pages. Called under inode_lock. Either the 335 * Write out an inode's dirty pages. Called under inode_wb_list_lock. Either
320 * caller has ref on the inode (either via __iget or via syscall against an fd) 336 * the caller has an active reference on the inode or the inode has I_WILL_FREE
321 * or the inode has I_WILL_FREE set (via generic_forget_inode) 337 * set.
322 * 338 *
323 * If `wait' is set, wait on the writeout. 339 * If `wait' is set, wait on the writeout.
324 * 340 *
325 * The whole writeout design is quite complex and fragile. We want to avoid 341 * The whole writeout design is quite complex and fragile. We want to avoid
326 * starvation of particular inodes when others are being redirtied, prevent 342 * starvation of particular inodes when others are being redirtied, prevent
327 * livelocks, etc. 343 * livelocks, etc.
328 *
329 * Called under inode_lock.
330 */ 344 */
331static int 345static int
332writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 346writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
@@ -368,7 +382,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
368 inode->i_state |= I_SYNC; 382 inode->i_state |= I_SYNC;
369 inode->i_state &= ~I_DIRTY_PAGES; 383 inode->i_state &= ~I_DIRTY_PAGES;
370 spin_unlock(&inode->i_lock); 384 spin_unlock(&inode->i_lock);
371 spin_unlock(&inode_lock); 385 spin_unlock(&inode_wb_list_lock);
372 386
373 ret = do_writepages(mapping, wbc); 387 ret = do_writepages(mapping, wbc);
374 388
@@ -388,12 +402,10 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
388 * due to delalloc, clear dirty metadata flags right before 402 * due to delalloc, clear dirty metadata flags right before
389 * write_inode() 403 * write_inode()
390 */ 404 */
391 spin_lock(&inode_lock);
392 spin_lock(&inode->i_lock); 405 spin_lock(&inode->i_lock);
393 dirty = inode->i_state & I_DIRTY; 406 dirty = inode->i_state & I_DIRTY;
394 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 407 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
395 spin_unlock(&inode->i_lock); 408 spin_unlock(&inode->i_lock);
396 spin_unlock(&inode_lock);
397 /* Don't write the inode if only I_DIRTY_PAGES was set */ 409 /* Don't write the inode if only I_DIRTY_PAGES was set */
398 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 410 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
399 int err = write_inode(inode, wbc); 411 int err = write_inode(inode, wbc);
@@ -401,7 +413,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
401 ret = err; 413 ret = err;
402 } 414 }
403 415
404 spin_lock(&inode_lock); 416 spin_lock(&inode_wb_list_lock);
405 spin_lock(&inode->i_lock); 417 spin_lock(&inode->i_lock);
406 inode->i_state &= ~I_SYNC; 418 inode->i_state &= ~I_SYNC;
407 if (!(inode->i_state & I_FREEING)) { 419 if (!(inode->i_state & I_FREEING)) {
@@ -543,10 +555,10 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
543 */ 555 */
544 redirty_tail(inode); 556 redirty_tail(inode);
545 } 557 }
546 spin_unlock(&inode_lock); 558 spin_unlock(&inode_wb_list_lock);
547 iput(inode); 559 iput(inode);
548 cond_resched(); 560 cond_resched();
549 spin_lock(&inode_lock); 561 spin_lock(&inode_wb_list_lock);
550 if (wbc->nr_to_write <= 0) { 562 if (wbc->nr_to_write <= 0) {
551 wbc->more_io = 1; 563 wbc->more_io = 1;
552 return 1; 564 return 1;
@@ -565,7 +577,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
565 577
566 if (!wbc->wb_start) 578 if (!wbc->wb_start)
567 wbc->wb_start = jiffies; /* livelock avoidance */ 579 wbc->wb_start = jiffies; /* livelock avoidance */
568 spin_lock(&inode_lock); 580 spin_lock(&inode_wb_list_lock);
569 if (!wbc->for_kupdate || list_empty(&wb->b_io)) 581 if (!wbc->for_kupdate || list_empty(&wb->b_io))
570 queue_io(wb, wbc->older_than_this); 582 queue_io(wb, wbc->older_than_this);
571 583
@@ -583,7 +595,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
583 if (ret) 595 if (ret)
584 break; 596 break;
585 } 597 }
586 spin_unlock(&inode_lock); 598 spin_unlock(&inode_wb_list_lock);
587 /* Leave any unwritten inodes on b_io */ 599 /* Leave any unwritten inodes on b_io */
588} 600}
589 601
@@ -592,11 +604,11 @@ static void __writeback_inodes_sb(struct super_block *sb,
592{ 604{
593 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 605 WARN_ON(!rwsem_is_locked(&sb->s_umount));
594 606
595 spin_lock(&inode_lock); 607 spin_lock(&inode_wb_list_lock);
596 if (!wbc->for_kupdate || list_empty(&wb->b_io)) 608 if (!wbc->for_kupdate || list_empty(&wb->b_io))
597 queue_io(wb, wbc->older_than_this); 609 queue_io(wb, wbc->older_than_this);
598 writeback_sb_inodes(sb, wb, wbc, true); 610 writeback_sb_inodes(sb, wb, wbc, true);
599 spin_unlock(&inode_lock); 611 spin_unlock(&inode_wb_list_lock);
600} 612}
601 613
602/* 614/*
@@ -735,7 +747,7 @@ static long wb_writeback(struct bdi_writeback *wb,
735 * become available for writeback. Otherwise 747 * become available for writeback. Otherwise
736 * we'll just busyloop. 748 * we'll just busyloop.
737 */ 749 */
738 spin_lock(&inode_lock); 750 spin_lock(&inode_wb_list_lock);
739 if (!list_empty(&wb->b_more_io)) { 751 if (!list_empty(&wb->b_more_io)) {
740 inode = wb_inode(wb->b_more_io.prev); 752 inode = wb_inode(wb->b_more_io.prev);
741 trace_wbc_writeback_wait(&wbc, wb->bdi); 753 trace_wbc_writeback_wait(&wbc, wb->bdi);
@@ -743,7 +755,7 @@ static long wb_writeback(struct bdi_writeback *wb,
743 inode_wait_for_writeback(inode); 755 inode_wait_for_writeback(inode);
744 spin_unlock(&inode->i_lock); 756 spin_unlock(&inode->i_lock);
745 } 757 }
746 spin_unlock(&inode_lock); 758 spin_unlock(&inode_wb_list_lock);
747 } 759 }
748 760
749 return wrote; 761 return wrote;
@@ -1009,7 +1021,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
1009{ 1021{
1010 struct super_block *sb = inode->i_sb; 1022 struct super_block *sb = inode->i_sb;
1011 struct backing_dev_info *bdi = NULL; 1023 struct backing_dev_info *bdi = NULL;
1012 bool wakeup_bdi = false;
1013 1024
1014 /* 1025 /*
1015 * Don't do this for I_DIRTY_PAGES - that doesn't actually 1026 * Don't do this for I_DIRTY_PAGES - that doesn't actually
@@ -1033,7 +1044,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
1033 if (unlikely(block_dump)) 1044 if (unlikely(block_dump))
1034 block_dump___mark_inode_dirty(inode); 1045 block_dump___mark_inode_dirty(inode);
1035 1046
1036 spin_lock(&inode_lock);
1037 spin_lock(&inode->i_lock); 1047 spin_lock(&inode->i_lock);
1038 if ((inode->i_state & flags) != flags) { 1048 if ((inode->i_state & flags) != flags) {
1039 const int was_dirty = inode->i_state & I_DIRTY; 1049 const int was_dirty = inode->i_state & I_DIRTY;
@@ -1059,12 +1069,12 @@ void __mark_inode_dirty(struct inode *inode, int flags)
1059 if (inode->i_state & I_FREEING) 1069 if (inode->i_state & I_FREEING)
1060 goto out_unlock_inode; 1070 goto out_unlock_inode;
1061 1071
1062 spin_unlock(&inode->i_lock);
1063 /* 1072 /*
1064 * If the inode was already on b_dirty/b_io/b_more_io, don't 1073 * If the inode was already on b_dirty/b_io/b_more_io, don't
1065 * reposition it (that would break b_dirty time-ordering). 1074 * reposition it (that would break b_dirty time-ordering).
1066 */ 1075 */
1067 if (!was_dirty) { 1076 if (!was_dirty) {
1077 bool wakeup_bdi = false;
1068 bdi = inode_to_bdi(inode); 1078 bdi = inode_to_bdi(inode);
1069 1079
1070 if (bdi_cap_writeback_dirty(bdi)) { 1080 if (bdi_cap_writeback_dirty(bdi)) {
@@ -1081,18 +1091,20 @@ void __mark_inode_dirty(struct inode *inode, int flags)
1081 wakeup_bdi = true; 1091 wakeup_bdi = true;
1082 } 1092 }
1083 1093
1094 spin_unlock(&inode->i_lock);
1095 spin_lock(&inode_wb_list_lock);
1084 inode->dirtied_when = jiffies; 1096 inode->dirtied_when = jiffies;
1085 list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 1097 list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1098 spin_unlock(&inode_wb_list_lock);
1099
1100 if (wakeup_bdi)
1101 bdi_wakeup_thread_delayed(bdi);
1102 return;
1086 } 1103 }
1087 goto out;
1088 } 1104 }
1089out_unlock_inode: 1105out_unlock_inode:
1090 spin_unlock(&inode->i_lock); 1106 spin_unlock(&inode->i_lock);
1091out:
1092 spin_unlock(&inode_lock);
1093 1107
1094 if (wakeup_bdi)
1095 bdi_wakeup_thread_delayed(bdi);
1096} 1108}
1097EXPORT_SYMBOL(__mark_inode_dirty); 1109EXPORT_SYMBOL(__mark_inode_dirty);
1098 1110
@@ -1296,9 +1308,9 @@ int write_inode_now(struct inode *inode, int sync)
1296 wbc.nr_to_write = 0; 1308 wbc.nr_to_write = 0;
1297 1309
1298 might_sleep(); 1310 might_sleep();
1299 spin_lock(&inode_lock); 1311 spin_lock(&inode_wb_list_lock);
1300 ret = writeback_single_inode(inode, &wbc); 1312 ret = writeback_single_inode(inode, &wbc);
1301 spin_unlock(&inode_lock); 1313 spin_unlock(&inode_wb_list_lock);
1302 if (sync) 1314 if (sync)
1303 inode_sync_wait(inode); 1315 inode_sync_wait(inode);
1304 return ret; 1316 return ret;
@@ -1320,9 +1332,9 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc)
1320{ 1332{
1321 int ret; 1333 int ret;
1322 1334
1323 spin_lock(&inode_lock); 1335 spin_lock(&inode_wb_list_lock);
1324 ret = writeback_single_inode(inode, wbc); 1336 ret = writeback_single_inode(inode, wbc);
1325 spin_unlock(&inode_lock); 1337 spin_unlock(&inode_wb_list_lock);
1326 return ret; 1338 return ret;
1327} 1339}
1328EXPORT_SYMBOL(sync_inode); 1340EXPORT_SYMBOL(sync_inode);
diff --git a/fs/inode.c b/fs/inode.c
index 785b1ab23ff0..239fdc08719e 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -26,6 +26,7 @@
26#include <linux/posix_acl.h> 26#include <linux/posix_acl.h>
27#include <linux/ima.h> 27#include <linux/ima.h>
28#include <linux/cred.h> 28#include <linux/cred.h>
29#include "internal.h"
29 30
30/* 31/*
31 * inode locking rules. 32 * inode locking rules.
@@ -36,6 +37,8 @@
36 * inode_lru, inode->i_lru 37 * inode_lru, inode->i_lru
37 * inode_sb_list_lock protects: 38 * inode_sb_list_lock protects:
38 * sb->s_inodes, inode->i_sb_list 39 * sb->s_inodes, inode->i_sb_list
40 * inode_wb_list_lock protects:
41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
39 * 42 *
40 * Lock ordering: 43 * Lock ordering:
41 * inode_lock 44 * inode_lock
@@ -44,6 +47,9 @@
44 * inode_sb_list_lock 47 * inode_sb_list_lock
45 * inode->i_lock 48 * inode->i_lock
46 * inode_lru_lock 49 * inode_lru_lock
50 *
51 * inode_wb_list_lock
52 * inode->i_lock
47 */ 53 */
48 54
49/* 55/*
@@ -105,6 +111,7 @@ static struct hlist_head *inode_hashtable __read_mostly;
105DEFINE_SPINLOCK(inode_lock); 111DEFINE_SPINLOCK(inode_lock);
106 112
107__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 113__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
114__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
108 115
109/* 116/*
110 * iprune_sem provides exclusion between the icache shrinking and the 117 * iprune_sem provides exclusion between the icache shrinking and the
@@ -483,10 +490,7 @@ static void evict(struct inode *inode)
483 BUG_ON(!(inode->i_state & I_FREEING)); 490 BUG_ON(!(inode->i_state & I_FREEING));
484 BUG_ON(!list_empty(&inode->i_lru)); 491 BUG_ON(!list_empty(&inode->i_lru));
485 492
486 spin_lock(&inode_lock); 493 inode_wb_list_del(inode);
487 list_del_init(&inode->i_wb_list);
488 spin_unlock(&inode_lock);
489
490 inode_sb_list_del(inode); 494 inode_sb_list_del(inode);
491 495
492 if (op->evict_inode) { 496 if (op->evict_inode) {
diff --git a/fs/internal.h b/fs/internal.h
index 7013ae0c88c1..b29c46e4e32f 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -127,6 +127,11 @@ extern long do_handle_open(int mountdirfd,
127 */ 127 */
128extern spinlock_t inode_sb_list_lock; 128extern spinlock_t inode_sb_list_lock;
129 129
130/*
131 * fs-writeback.c
132 */
133extern void inode_wb_list_del(struct inode *inode);
134
130extern int get_nr_dirty_inodes(void); 135extern int get_nr_dirty_inodes(void);
131extern void evict_inodes(struct super_block *); 136extern void evict_inodes(struct super_block *);
132extern int invalidate_inodes(struct super_block *, bool); 137extern int invalidate_inodes(struct super_block *, bool);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 0ead399e08b5..3f5fee718329 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -10,6 +10,7 @@
10struct backing_dev_info; 10struct backing_dev_info;
11 11
12extern spinlock_t inode_lock; 12extern spinlock_t inode_lock;
13extern spinlock_t inode_wb_list_lock;
13 14
14/* 15/*
15 * fs/fs-writeback.c 16 * fs/fs-writeback.c
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 027100d30227..4b3e9f17ee21 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -73,14 +73,14 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
73 struct inode *inode; 73 struct inode *inode;
74 74
75 nr_wb = nr_dirty = nr_io = nr_more_io = 0; 75 nr_wb = nr_dirty = nr_io = nr_more_io = 0;
76 spin_lock(&inode_lock); 76 spin_lock(&inode_wb_list_lock);
77 list_for_each_entry(inode, &wb->b_dirty, i_wb_list) 77 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
78 nr_dirty++; 78 nr_dirty++;
79 list_for_each_entry(inode, &wb->b_io, i_wb_list) 79 list_for_each_entry(inode, &wb->b_io, i_wb_list)
80 nr_io++; 80 nr_io++;
81 list_for_each_entry(inode, &wb->b_more_io, i_wb_list) 81 list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
82 nr_more_io++; 82 nr_more_io++;
83 spin_unlock(&inode_lock); 83 spin_unlock(&inode_wb_list_lock);
84 84
85 global_dirty_limits(&background_thresh, &dirty_thresh); 85 global_dirty_limits(&background_thresh, &dirty_thresh);
86 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); 86 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
@@ -682,11 +682,11 @@ void bdi_destroy(struct backing_dev_info *bdi)
682 if (bdi_has_dirty_io(bdi)) { 682 if (bdi_has_dirty_io(bdi)) {
683 struct bdi_writeback *dst = &default_backing_dev_info.wb; 683 struct bdi_writeback *dst = &default_backing_dev_info.wb;
684 684
685 spin_lock(&inode_lock); 685 spin_lock(&inode_wb_list_lock);
686 list_splice(&bdi->wb.b_dirty, &dst->b_dirty); 686 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
687 list_splice(&bdi->wb.b_io, &dst->b_io); 687 list_splice(&bdi->wb.b_io, &dst->b_io);
688 list_splice(&bdi->wb.b_more_io, &dst->b_more_io); 688 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
689 spin_unlock(&inode_lock); 689 spin_unlock(&inode_wb_list_lock);
690 } 690 }
691 691
692 bdi_unregister(bdi); 692 bdi_unregister(bdi);
diff --git a/mm/filemap.c b/mm/filemap.c
index 499e9aa91450..d8b34d1a1071 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -80,8 +80,8 @@
80 * ->i_mutex 80 * ->i_mutex
81 * ->i_alloc_sem (various) 81 * ->i_alloc_sem (various)
82 * 82 *
83 * ->inode_lock 83 * inode_wb_list_lock
84 * ->sb_lock (fs/fs-writeback.c) 84 * sb_lock (fs/fs-writeback.c)
85 * ->mapping->tree_lock (__sync_single_inode) 85 * ->mapping->tree_lock (__sync_single_inode)
86 * 86 *
87 * ->i_mmap_lock 87 * ->i_mmap_lock
@@ -98,9 +98,9 @@
98 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 98 * ->zone.lru_lock (check_pte_range->isolate_lru_page)
99 * ->private_lock (page_remove_rmap->set_page_dirty) 99 * ->private_lock (page_remove_rmap->set_page_dirty)
100 * ->tree_lock (page_remove_rmap->set_page_dirty) 100 * ->tree_lock (page_remove_rmap->set_page_dirty)
101 * ->inode_lock (page_remove_rmap->set_page_dirty) 101 * inode_wb_list_lock (page_remove_rmap->set_page_dirty)
102 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 102 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
103 * ->inode_lock (zap_pte_range->set_page_dirty) 103 * inode_wb_list_lock (zap_pte_range->set_page_dirty)
104 * ->inode->i_lock (zap_pte_range->set_page_dirty) 104 * ->inode->i_lock (zap_pte_range->set_page_dirty)
105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
106 * 106 *
diff --git a/mm/rmap.c b/mm/rmap.c
index 7dada0456448..8da044a1db0f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -31,12 +31,12 @@
31 * swap_lock (in swap_duplicate, swap_info_get) 31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others) 32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers) 33 * mapping->private_lock (in __set_page_dirty_buffers)
34 * inode_lock (in set_page_dirty's __mark_inode_dirty)
35 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 34 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
35 * inode_wb_list_lock (in set_page_dirty's __mark_inode_dirty)
36 * sb_lock (within inode_lock in fs/fs-writeback.c) 36 * sb_lock (within inode_lock in fs/fs-writeback.c)
37 * mapping->tree_lock (widely used, in set_page_dirty, 37 * mapping->tree_lock (widely used, in set_page_dirty,
38 * in arch-dependent flush_dcache_mmap_lock, 38 * in arch-dependent flush_dcache_mmap_lock,
39 * within inode_lock in __sync_single_inode) 39 * within inode_wb_list_lock in __sync_single_inode)
40 * 40 *
41 * (code doesn't rely on that order so it could be switched around) 41 * (code doesn't rely on that order so it could be switched around)
42 * ->tasklist_lock 42 * ->tasklist_lock