diff options
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r-- | fs/fs-writeback.c | 141 |
1 files changed, 91 insertions, 50 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 59c6e4956786..b5ed541fb137 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -176,6 +176,17 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi) | |||
176 | } | 176 | } |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * Remove the inode from the writeback list it is on. | ||
180 | */ | ||
181 | void inode_wb_list_del(struct inode *inode) | ||
182 | { | ||
183 | spin_lock(&inode_wb_list_lock); | ||
184 | list_del_init(&inode->i_wb_list); | ||
185 | spin_unlock(&inode_wb_list_lock); | ||
186 | } | ||
187 | |||
188 | |||
189 | /* | ||
179 | * Redirty an inode: set its when-it-was dirtied timestamp and move it to the | 190 | * Redirty an inode: set its when-it-was dirtied timestamp and move it to the |
180 | * furthest end of its superblock's dirty-inode list. | 191 | * furthest end of its superblock's dirty-inode list. |
181 | * | 192 | * |
@@ -188,6 +199,7 @@ static void redirty_tail(struct inode *inode) | |||
188 | { | 199 | { |
189 | struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; | 200 | struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; |
190 | 201 | ||
202 | assert_spin_locked(&inode_wb_list_lock); | ||
191 | if (!list_empty(&wb->b_dirty)) { | 203 | if (!list_empty(&wb->b_dirty)) { |
192 | struct inode *tail; | 204 | struct inode *tail; |
193 | 205 | ||
@@ -205,14 +217,17 @@ static void requeue_io(struct inode *inode) | |||
205 | { | 217 | { |
206 | struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; | 218 | struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; |
207 | 219 | ||
220 | assert_spin_locked(&inode_wb_list_lock); | ||
208 | list_move(&inode->i_wb_list, &wb->b_more_io); | 221 | list_move(&inode->i_wb_list, &wb->b_more_io); |
209 | } | 222 | } |
210 | 223 | ||
211 | static void inode_sync_complete(struct inode *inode) | 224 | static void inode_sync_complete(struct inode *inode) |
212 | { | 225 | { |
213 | /* | 226 | /* |
214 | * Prevent speculative execution through spin_unlock(&inode_lock); | 227 | * Prevent speculative execution through |
228 | * spin_unlock(&inode_wb_list_lock); | ||
215 | */ | 229 | */ |
230 | |||
216 | smp_mb(); | 231 | smp_mb(); |
217 | wake_up_bit(&inode->i_state, __I_SYNC); | 232 | wake_up_bit(&inode->i_state, __I_SYNC); |
218 | } | 233 | } |
@@ -286,6 +301,7 @@ static void move_expired_inodes(struct list_head *delaying_queue, | |||
286 | */ | 301 | */ |
287 | static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) | 302 | static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) |
288 | { | 303 | { |
304 | assert_spin_locked(&inode_wb_list_lock); | ||
289 | list_splice_init(&wb->b_more_io, &wb->b_io); | 305 | list_splice_init(&wb->b_more_io, &wb->b_io); |
290 | move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); | 306 | move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); |
291 | } | 307 | } |
@@ -306,25 +322,25 @@ static void inode_wait_for_writeback(struct inode *inode) | |||
306 | wait_queue_head_t *wqh; | 322 | wait_queue_head_t *wqh; |
307 | 323 | ||
308 | wqh = bit_waitqueue(&inode->i_state, __I_SYNC); | 324 | wqh = bit_waitqueue(&inode->i_state, __I_SYNC); |
309 | while (inode->i_state & I_SYNC) { | 325 | while (inode->i_state & I_SYNC) { |
310 | spin_unlock(&inode_lock); | 326 | spin_unlock(&inode->i_lock); |
327 | spin_unlock(&inode_wb_list_lock); | ||
311 | __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); | 328 | __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); |
312 | spin_lock(&inode_lock); | 329 | spin_lock(&inode_wb_list_lock); |
330 | spin_lock(&inode->i_lock); | ||
313 | } | 331 | } |
314 | } | 332 | } |
315 | 333 | ||
316 | /* | 334 | /* |
317 | * Write out an inode's dirty pages. Called under inode_lock. Either the | 335 | * Write out an inode's dirty pages. Called under inode_wb_list_lock and |
318 | * caller has ref on the inode (either via __iget or via syscall against an fd) | 336 | * inode->i_lock. Either the caller has an active reference on the inode or |
319 | * or the inode has I_WILL_FREE set (via generic_forget_inode) | 337 | * the inode has I_WILL_FREE set. |
320 | * | 338 | * |
321 | * If `wait' is set, wait on the writeout. | 339 | * If `wait' is set, wait on the writeout. |
322 | * | 340 | * |
323 | * The whole writeout design is quite complex and fragile. We want to avoid | 341 | * The whole writeout design is quite complex and fragile. We want to avoid |
324 | * starvation of particular inodes when others are being redirtied, prevent | 342 | * starvation of particular inodes when others are being redirtied, prevent |
325 | * livelocks, etc. | 343 | * livelocks, etc. |
326 | * | ||
327 | * Called under inode_lock. | ||
328 | */ | 344 | */ |
329 | static int | 345 | static int |
330 | writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | 346 | writeback_single_inode(struct inode *inode, struct writeback_control *wbc) |
@@ -333,6 +349,9 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
333 | unsigned dirty; | 349 | unsigned dirty; |
334 | int ret; | 350 | int ret; |
335 | 351 | ||
352 | assert_spin_locked(&inode_wb_list_lock); | ||
353 | assert_spin_locked(&inode->i_lock); | ||
354 | |||
336 | if (!atomic_read(&inode->i_count)) | 355 | if (!atomic_read(&inode->i_count)) |
337 | WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); | 356 | WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); |
338 | else | 357 | else |
@@ -363,7 +382,8 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
363 | /* Set I_SYNC, reset I_DIRTY_PAGES */ | 382 | /* Set I_SYNC, reset I_DIRTY_PAGES */ |
364 | inode->i_state |= I_SYNC; | 383 | inode->i_state |= I_SYNC; |
365 | inode->i_state &= ~I_DIRTY_PAGES; | 384 | inode->i_state &= ~I_DIRTY_PAGES; |
366 | spin_unlock(&inode_lock); | 385 | spin_unlock(&inode->i_lock); |
386 | spin_unlock(&inode_wb_list_lock); | ||
367 | 387 | ||
368 | ret = do_writepages(mapping, wbc); | 388 | ret = do_writepages(mapping, wbc); |
369 | 389 | ||
@@ -383,10 +403,10 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
383 | * due to delalloc, clear dirty metadata flags right before | 403 | * due to delalloc, clear dirty metadata flags right before |
384 | * write_inode() | 404 | * write_inode() |
385 | */ | 405 | */ |
386 | spin_lock(&inode_lock); | 406 | spin_lock(&inode->i_lock); |
387 | dirty = inode->i_state & I_DIRTY; | 407 | dirty = inode->i_state & I_DIRTY; |
388 | inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); | 408 | inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); |
389 | spin_unlock(&inode_lock); | 409 | spin_unlock(&inode->i_lock); |
390 | /* Don't write the inode if only I_DIRTY_PAGES was set */ | 410 | /* Don't write the inode if only I_DIRTY_PAGES was set */ |
391 | if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { | 411 | if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { |
392 | int err = write_inode(inode, wbc); | 412 | int err = write_inode(inode, wbc); |
@@ -394,7 +414,8 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
394 | ret = err; | 414 | ret = err; |
395 | } | 415 | } |
396 | 416 | ||
397 | spin_lock(&inode_lock); | 417 | spin_lock(&inode_wb_list_lock); |
418 | spin_lock(&inode->i_lock); | ||
398 | inode->i_state &= ~I_SYNC; | 419 | inode->i_state &= ~I_SYNC; |
399 | if (!(inode->i_state & I_FREEING)) { | 420 | if (!(inode->i_state & I_FREEING)) { |
400 | if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { | 421 | if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { |
@@ -506,7 +527,9 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, | |||
506 | * kind does not need peridic writeout yet, and for the latter | 527 | * kind does not need peridic writeout yet, and for the latter |
507 | * kind writeout is handled by the freer. | 528 | * kind writeout is handled by the freer. |
508 | */ | 529 | */ |
530 | spin_lock(&inode->i_lock); | ||
509 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { | 531 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { |
532 | spin_unlock(&inode->i_lock); | ||
510 | requeue_io(inode); | 533 | requeue_io(inode); |
511 | continue; | 534 | continue; |
512 | } | 535 | } |
@@ -515,10 +538,13 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, | |||
515 | * Was this inode dirtied after sync_sb_inodes was called? | 538 | * Was this inode dirtied after sync_sb_inodes was called? |
516 | * This keeps sync from extra jobs and livelock. | 539 | * This keeps sync from extra jobs and livelock. |
517 | */ | 540 | */ |
518 | if (inode_dirtied_after(inode, wbc->wb_start)) | 541 | if (inode_dirtied_after(inode, wbc->wb_start)) { |
542 | spin_unlock(&inode->i_lock); | ||
519 | return 1; | 543 | return 1; |
544 | } | ||
520 | 545 | ||
521 | __iget(inode); | 546 | __iget(inode); |
547 | |||
522 | pages_skipped = wbc->pages_skipped; | 548 | pages_skipped = wbc->pages_skipped; |
523 | writeback_single_inode(inode, wbc); | 549 | writeback_single_inode(inode, wbc); |
524 | if (wbc->pages_skipped != pages_skipped) { | 550 | if (wbc->pages_skipped != pages_skipped) { |
@@ -528,10 +554,11 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, | |||
528 | */ | 554 | */ |
529 | redirty_tail(inode); | 555 | redirty_tail(inode); |
530 | } | 556 | } |
531 | spin_unlock(&inode_lock); | 557 | spin_unlock(&inode->i_lock); |
558 | spin_unlock(&inode_wb_list_lock); | ||
532 | iput(inode); | 559 | iput(inode); |
533 | cond_resched(); | 560 | cond_resched(); |
534 | spin_lock(&inode_lock); | 561 | spin_lock(&inode_wb_list_lock); |
535 | if (wbc->nr_to_write <= 0) { | 562 | if (wbc->nr_to_write <= 0) { |
536 | wbc->more_io = 1; | 563 | wbc->more_io = 1; |
537 | return 1; | 564 | return 1; |
@@ -550,7 +577,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb, | |||
550 | 577 | ||
551 | if (!wbc->wb_start) | 578 | if (!wbc->wb_start) |
552 | wbc->wb_start = jiffies; /* livelock avoidance */ | 579 | wbc->wb_start = jiffies; /* livelock avoidance */ |
553 | spin_lock(&inode_lock); | 580 | spin_lock(&inode_wb_list_lock); |
554 | if (!wbc->for_kupdate || list_empty(&wb->b_io)) | 581 | if (!wbc->for_kupdate || list_empty(&wb->b_io)) |
555 | queue_io(wb, wbc->older_than_this); | 582 | queue_io(wb, wbc->older_than_this); |
556 | 583 | ||
@@ -568,7 +595,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb, | |||
568 | if (ret) | 595 | if (ret) |
569 | break; | 596 | break; |
570 | } | 597 | } |
571 | spin_unlock(&inode_lock); | 598 | spin_unlock(&inode_wb_list_lock); |
572 | /* Leave any unwritten inodes on b_io */ | 599 | /* Leave any unwritten inodes on b_io */ |
573 | } | 600 | } |
574 | 601 | ||
@@ -577,11 +604,11 @@ static void __writeback_inodes_sb(struct super_block *sb, | |||
577 | { | 604 | { |
578 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | 605 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
579 | 606 | ||
580 | spin_lock(&inode_lock); | 607 | spin_lock(&inode_wb_list_lock); |
581 | if (!wbc->for_kupdate || list_empty(&wb->b_io)) | 608 | if (!wbc->for_kupdate || list_empty(&wb->b_io)) |
582 | queue_io(wb, wbc->older_than_this); | 609 | queue_io(wb, wbc->older_than_this); |
583 | writeback_sb_inodes(sb, wb, wbc, true); | 610 | writeback_sb_inodes(sb, wb, wbc, true); |
584 | spin_unlock(&inode_lock); | 611 | spin_unlock(&inode_wb_list_lock); |
585 | } | 612 | } |
586 | 613 | ||
587 | /* | 614 | /* |
@@ -720,13 +747,15 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
720 | * become available for writeback. Otherwise | 747 | * become available for writeback. Otherwise |
721 | * we'll just busyloop. | 748 | * we'll just busyloop. |
722 | */ | 749 | */ |
723 | spin_lock(&inode_lock); | 750 | spin_lock(&inode_wb_list_lock); |
724 | if (!list_empty(&wb->b_more_io)) { | 751 | if (!list_empty(&wb->b_more_io)) { |
725 | inode = wb_inode(wb->b_more_io.prev); | 752 | inode = wb_inode(wb->b_more_io.prev); |
726 | trace_wbc_writeback_wait(&wbc, wb->bdi); | 753 | trace_wbc_writeback_wait(&wbc, wb->bdi); |
754 | spin_lock(&inode->i_lock); | ||
727 | inode_wait_for_writeback(inode); | 755 | inode_wait_for_writeback(inode); |
756 | spin_unlock(&inode->i_lock); | ||
728 | } | 757 | } |
729 | spin_unlock(&inode_lock); | 758 | spin_unlock(&inode_wb_list_lock); |
730 | } | 759 | } |
731 | 760 | ||
732 | return wrote; | 761 | return wrote; |
@@ -992,7 +1021,6 @@ void __mark_inode_dirty(struct inode *inode, int flags) | |||
992 | { | 1021 | { |
993 | struct super_block *sb = inode->i_sb; | 1022 | struct super_block *sb = inode->i_sb; |
994 | struct backing_dev_info *bdi = NULL; | 1023 | struct backing_dev_info *bdi = NULL; |
995 | bool wakeup_bdi = false; | ||
996 | 1024 | ||
997 | /* | 1025 | /* |
998 | * Don't do this for I_DIRTY_PAGES - that doesn't actually | 1026 | * Don't do this for I_DIRTY_PAGES - that doesn't actually |
@@ -1016,7 +1044,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) | |||
1016 | if (unlikely(block_dump)) | 1044 | if (unlikely(block_dump)) |
1017 | block_dump___mark_inode_dirty(inode); | 1045 | block_dump___mark_inode_dirty(inode); |
1018 | 1046 | ||
1019 | spin_lock(&inode_lock); | 1047 | spin_lock(&inode->i_lock); |
1020 | if ((inode->i_state & flags) != flags) { | 1048 | if ((inode->i_state & flags) != flags) { |
1021 | const int was_dirty = inode->i_state & I_DIRTY; | 1049 | const int was_dirty = inode->i_state & I_DIRTY; |
1022 | 1050 | ||
@@ -1028,7 +1056,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) | |||
1028 | * superblock list, based upon its state. | 1056 | * superblock list, based upon its state. |
1029 | */ | 1057 | */ |
1030 | if (inode->i_state & I_SYNC) | 1058 | if (inode->i_state & I_SYNC) |
1031 | goto out; | 1059 | goto out_unlock_inode; |
1032 | 1060 | ||
1033 | /* | 1061 | /* |
1034 | * Only add valid (hashed) inodes to the superblock's | 1062 | * Only add valid (hashed) inodes to the superblock's |
@@ -1036,16 +1064,17 @@ void __mark_inode_dirty(struct inode *inode, int flags) | |||
1036 | */ | 1064 | */ |
1037 | if (!S_ISBLK(inode->i_mode)) { | 1065 | if (!S_ISBLK(inode->i_mode)) { |
1038 | if (inode_unhashed(inode)) | 1066 | if (inode_unhashed(inode)) |
1039 | goto out; | 1067 | goto out_unlock_inode; |
1040 | } | 1068 | } |
1041 | if (inode->i_state & I_FREEING) | 1069 | if (inode->i_state & I_FREEING) |
1042 | goto out; | 1070 | goto out_unlock_inode; |
1043 | 1071 | ||
1044 | /* | 1072 | /* |
1045 | * If the inode was already on b_dirty/b_io/b_more_io, don't | 1073 | * If the inode was already on b_dirty/b_io/b_more_io, don't |
1046 | * reposition it (that would break b_dirty time-ordering). | 1074 | * reposition it (that would break b_dirty time-ordering). |
1047 | */ | 1075 | */ |
1048 | if (!was_dirty) { | 1076 | if (!was_dirty) { |
1077 | bool wakeup_bdi = false; | ||
1049 | bdi = inode_to_bdi(inode); | 1078 | bdi = inode_to_bdi(inode); |
1050 | 1079 | ||
1051 | if (bdi_cap_writeback_dirty(bdi)) { | 1080 | if (bdi_cap_writeback_dirty(bdi)) { |
@@ -1062,15 +1091,20 @@ void __mark_inode_dirty(struct inode *inode, int flags) | |||
1062 | wakeup_bdi = true; | 1091 | wakeup_bdi = true; |
1063 | } | 1092 | } |
1064 | 1093 | ||
1094 | spin_unlock(&inode->i_lock); | ||
1095 | spin_lock(&inode_wb_list_lock); | ||
1065 | inode->dirtied_when = jiffies; | 1096 | inode->dirtied_when = jiffies; |
1066 | list_move(&inode->i_wb_list, &bdi->wb.b_dirty); | 1097 | list_move(&inode->i_wb_list, &bdi->wb.b_dirty); |
1098 | spin_unlock(&inode_wb_list_lock); | ||
1099 | |||
1100 | if (wakeup_bdi) | ||
1101 | bdi_wakeup_thread_delayed(bdi); | ||
1102 | return; | ||
1067 | } | 1103 | } |
1068 | } | 1104 | } |
1069 | out: | 1105 | out_unlock_inode: |
1070 | spin_unlock(&inode_lock); | 1106 | spin_unlock(&inode->i_lock); |
1071 | 1107 | ||
1072 | if (wakeup_bdi) | ||
1073 | bdi_wakeup_thread_delayed(bdi); | ||
1074 | } | 1108 | } |
1075 | EXPORT_SYMBOL(__mark_inode_dirty); | 1109 | EXPORT_SYMBOL(__mark_inode_dirty); |
1076 | 1110 | ||
@@ -1101,7 +1135,7 @@ static void wait_sb_inodes(struct super_block *sb) | |||
1101 | */ | 1135 | */ |
1102 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | 1136 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
1103 | 1137 | ||
1104 | spin_lock(&inode_lock); | 1138 | spin_lock(&inode_sb_list_lock); |
1105 | 1139 | ||
1106 | /* | 1140 | /* |
1107 | * Data integrity sync. Must wait for all pages under writeback, | 1141 | * Data integrity sync. Must wait for all pages under writeback, |
@@ -1111,22 +1145,25 @@ static void wait_sb_inodes(struct super_block *sb) | |||
1111 | * we still have to wait for that writeout. | 1145 | * we still have to wait for that writeout. |
1112 | */ | 1146 | */ |
1113 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 1147 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
1114 | struct address_space *mapping; | 1148 | struct address_space *mapping = inode->i_mapping; |
1115 | 1149 | ||
1116 | if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) | 1150 | spin_lock(&inode->i_lock); |
1117 | continue; | 1151 | if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || |
1118 | mapping = inode->i_mapping; | 1152 | (mapping->nrpages == 0)) { |
1119 | if (mapping->nrpages == 0) | 1153 | spin_unlock(&inode->i_lock); |
1120 | continue; | 1154 | continue; |
1155 | } | ||
1121 | __iget(inode); | 1156 | __iget(inode); |
1122 | spin_unlock(&inode_lock); | 1157 | spin_unlock(&inode->i_lock); |
1158 | spin_unlock(&inode_sb_list_lock); | ||
1159 | |||
1123 | /* | 1160 | /* |
1124 | * We hold a reference to 'inode' so it couldn't have | 1161 | * We hold a reference to 'inode' so it couldn't have been |
1125 | * been removed from s_inodes list while we dropped the | 1162 | * removed from s_inodes list while we dropped the |
1126 | * inode_lock. We cannot iput the inode now as we can | 1163 | * inode_sb_list_lock. We cannot iput the inode now as we can |
1127 | * be holding the last reference and we cannot iput it | 1164 | * be holding the last reference and we cannot iput it under |
1128 | * under inode_lock. So we keep the reference and iput | 1165 | * inode_sb_list_lock. So we keep the reference and iput it |
1129 | * it later. | 1166 | * later. |
1130 | */ | 1167 | */ |
1131 | iput(old_inode); | 1168 | iput(old_inode); |
1132 | old_inode = inode; | 1169 | old_inode = inode; |
@@ -1135,9 +1172,9 @@ static void wait_sb_inodes(struct super_block *sb) | |||
1135 | 1172 | ||
1136 | cond_resched(); | 1173 | cond_resched(); |
1137 | 1174 | ||
1138 | spin_lock(&inode_lock); | 1175 | spin_lock(&inode_sb_list_lock); |
1139 | } | 1176 | } |
1140 | spin_unlock(&inode_lock); | 1177 | spin_unlock(&inode_sb_list_lock); |
1141 | iput(old_inode); | 1178 | iput(old_inode); |
1142 | } | 1179 | } |
1143 | 1180 | ||
@@ -1271,9 +1308,11 @@ int write_inode_now(struct inode *inode, int sync) | |||
1271 | wbc.nr_to_write = 0; | 1308 | wbc.nr_to_write = 0; |
1272 | 1309 | ||
1273 | might_sleep(); | 1310 | might_sleep(); |
1274 | spin_lock(&inode_lock); | 1311 | spin_lock(&inode_wb_list_lock); |
1312 | spin_lock(&inode->i_lock); | ||
1275 | ret = writeback_single_inode(inode, &wbc); | 1313 | ret = writeback_single_inode(inode, &wbc); |
1276 | spin_unlock(&inode_lock); | 1314 | spin_unlock(&inode->i_lock); |
1315 | spin_unlock(&inode_wb_list_lock); | ||
1277 | if (sync) | 1316 | if (sync) |
1278 | inode_sync_wait(inode); | 1317 | inode_sync_wait(inode); |
1279 | return ret; | 1318 | return ret; |
@@ -1295,9 +1334,11 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc) | |||
1295 | { | 1334 | { |
1296 | int ret; | 1335 | int ret; |
1297 | 1336 | ||
1298 | spin_lock(&inode_lock); | 1337 | spin_lock(&inode_wb_list_lock); |
1338 | spin_lock(&inode->i_lock); | ||
1299 | ret = writeback_single_inode(inode, wbc); | 1339 | ret = writeback_single_inode(inode, wbc); |
1300 | spin_unlock(&inode_lock); | 1340 | spin_unlock(&inode->i_lock); |
1341 | spin_unlock(&inode_wb_list_lock); | ||
1301 | return ret; | 1342 | return ret; |
1302 | } | 1343 | } |
1303 | EXPORT_SYMBOL(sync_inode); | 1344 | EXPORT_SYMBOL(sync_inode); |