aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2013-06-28 10:04:02 -0400
committerJens Axboe <axboe@kernel.dk>2013-06-28 10:04:02 -0400
commita5faeaf9109578e65e1a32e2a3e76c8b47e7dcb6 (patch)
tree9082ec395e98ad41ec046786234cf7669a95bc05 /fs
parent9138125beabbb76b4a373d4a619870f6f5d86fc5 (diff)
writeback: Fix periodic writeback after fs mount
Code in blkdev.c moves a device inode to default_backing_dev_info when the last reference to the device is put and moves the device inode back to its bdi when the first reference is acquired. This includes moving to wb.b_dirty list if the device inode is dirty. The code however doesn't setup timer to wake corresponding flusher thread and while wb.b_dirty list is non-empty __mark_inode_dirty() will not set it up either. Thus periodic writeback is effectively disabled until a sync(2) call which can lead to unexpected data loss in case of crash or power failure. Fix the problem by setting up a timer for periodic writeback in case we add the first dirty inode to wb.b_dirty list in bdev_inode_switch_bdi(). Reported-by: Bert De Jonghe <Bert.DeJonghe@amplidata.com> CC: stable@vger.kernel.org # >= 3.0 Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/block_dev.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 2091db8cdd78..85f5c85ec91c 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -58,17 +58,24 @@ static void bdev_inode_switch_bdi(struct inode *inode,
58 struct backing_dev_info *dst) 58 struct backing_dev_info *dst)
59{ 59{
60 struct backing_dev_info *old = inode->i_data.backing_dev_info; 60 struct backing_dev_info *old = inode->i_data.backing_dev_info;
61 bool wakeup_bdi = false;
61 62
62 if (unlikely(dst == old)) /* deadlock avoidance */ 63 if (unlikely(dst == old)) /* deadlock avoidance */
63 return; 64 return;
64 bdi_lock_two(&old->wb, &dst->wb); 65 bdi_lock_two(&old->wb, &dst->wb);
65 spin_lock(&inode->i_lock); 66 spin_lock(&inode->i_lock);
66 inode->i_data.backing_dev_info = dst; 67 inode->i_data.backing_dev_info = dst;
67 if (inode->i_state & I_DIRTY) 68 if (inode->i_state & I_DIRTY) {
69 if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb))
70 wakeup_bdi = true;
68 list_move(&inode->i_wb_list, &dst->wb.b_dirty); 71 list_move(&inode->i_wb_list, &dst->wb.b_dirty);
72 }
69 spin_unlock(&inode->i_lock); 73 spin_unlock(&inode->i_lock);
70 spin_unlock(&old->wb.list_lock); 74 spin_unlock(&old->wb.list_lock);
71 spin_unlock(&dst->wb.list_lock); 75 spin_unlock(&dst->wb.list_lock);
76
77 if (wakeup_bdi)
78 bdi_wakeup_thread_delayed(dst);
72} 79}
73 80
74/* Kill _all_ buffers and pagecache , dirty or not.. */ 81/* Kill _all_ buffers and pagecache , dirty or not.. */