aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2014-04-03 17:46:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-03 19:20:49 -0400
commit5acda9d12dcf1ad0d9a5a2a7c646de3472fa7555 (patch)
tree5103d2779fbf2f1f07edb1ed51083d1303073ef9
parent6ca738d60c563d5c6cf6253ee4b8e76fa77b2b9e (diff)
bdi: avoid oops on device removal
After commit 839a8e8660b6 ("writeback: replace custom worker pool implementation with unbound workqueue") when device is removed while we are writing to it we crash in bdi_writeback_workfn() -> set_worker_desc() because bdi->dev is NULL. This can happen because even though bdi_unregister() cancels all pending flushing work, nothing really prevents new ones from being queued from balance_dirty_pages() or other places. Fix the problem by clearing BDI_registered bit in bdi_unregister() and checking it before scheduling of any flushing work. Fixes: 839a8e8660b6777e7fe4e80af1a048aebe2b5977 Reviewed-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jan Kara <jack@suse.cz> Cc: Derek Basehore <dbasehore@chromium.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/fs-writeback.c23
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--mm/backing-dev.c13
3 files changed, 28 insertions, 10 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 6788fe0c9761..a16315957ef3 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -89,16 +89,29 @@ static inline struct inode *wb_inode(struct list_head *head)
89#define CREATE_TRACE_POINTS 89#define CREATE_TRACE_POINTS
90#include <trace/events/writeback.h> 90#include <trace/events/writeback.h>
91 91
92static void bdi_wakeup_thread(struct backing_dev_info *bdi)
93{
94 spin_lock_bh(&bdi->wb_lock);
95 if (test_bit(BDI_registered, &bdi->state))
96 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
97 spin_unlock_bh(&bdi->wb_lock);
98}
99
92static void bdi_queue_work(struct backing_dev_info *bdi, 100static void bdi_queue_work(struct backing_dev_info *bdi,
93 struct wb_writeback_work *work) 101 struct wb_writeback_work *work)
94{ 102{
95 trace_writeback_queue(bdi, work); 103 trace_writeback_queue(bdi, work);
96 104
97 spin_lock_bh(&bdi->wb_lock); 105 spin_lock_bh(&bdi->wb_lock);
106 if (!test_bit(BDI_registered, &bdi->state)) {
107 if (work->done)
108 complete(work->done);
109 goto out_unlock;
110 }
98 list_add_tail(&work->list, &bdi->work_list); 111 list_add_tail(&work->list, &bdi->work_list);
99 spin_unlock_bh(&bdi->wb_lock);
100
101 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 112 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
113out_unlock:
114 spin_unlock_bh(&bdi->wb_lock);
102} 115}
103 116
104static void 117static void
@@ -114,7 +127,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
114 work = kzalloc(sizeof(*work), GFP_ATOMIC); 127 work = kzalloc(sizeof(*work), GFP_ATOMIC);
115 if (!work) { 128 if (!work) {
116 trace_writeback_nowork(bdi); 129 trace_writeback_nowork(bdi);
117 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 130 bdi_wakeup_thread(bdi);
118 return; 131 return;
119 } 132 }
120 133
@@ -161,7 +174,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
161 * writeback as soon as there is no other work to do. 174 * writeback as soon as there is no other work to do.
162 */ 175 */
163 trace_writeback_wake_background(bdi); 176 trace_writeback_wake_background(bdi);
164 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 177 bdi_wakeup_thread(bdi);
165} 178}
166 179
167/* 180/*
@@ -1017,7 +1030,7 @@ void bdi_writeback_workfn(struct work_struct *work)
1017 current->flags |= PF_SWAPWRITE; 1030 current->flags |= PF_SWAPWRITE;
1018 1031
1019 if (likely(!current_is_workqueue_rescuer() || 1032 if (likely(!current_is_workqueue_rescuer() ||
1020 list_empty(&bdi->bdi_list))) { 1033 !test_bit(BDI_registered, &bdi->state))) {
1021 /* 1034 /*
1022 * The normal path. Keep writing back @bdi until its 1035 * The normal path. Keep writing back @bdi until its
1023 * work_list is empty. Note that this path is also taken 1036 * work_list is empty. Note that this path is also taken
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 24819001f5c8..e488e9459a93 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -95,7 +95,7 @@ struct backing_dev_info {
95 unsigned int max_ratio, max_prop_frac; 95 unsigned int max_ratio, max_prop_frac;
96 96
97 struct bdi_writeback wb; /* default writeback info for this bdi */ 97 struct bdi_writeback wb; /* default writeback info for this bdi */
98 spinlock_t wb_lock; /* protects work_list */ 98 spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
99 99
100 struct list_head work_list; 100 struct list_head work_list;
101 101
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index fab8401fc54e..09d9591b7708 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -297,7 +297,10 @@ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
297 unsigned long timeout; 297 unsigned long timeout;
298 298
299 timeout = msecs_to_jiffies(dirty_writeback_interval * 10); 299 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
300 queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout); 300 spin_lock_bh(&bdi->wb_lock);
301 if (test_bit(BDI_registered, &bdi->state))
302 queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
303 spin_unlock_bh(&bdi->wb_lock);
301} 304}
302 305
303/* 306/*
@@ -310,9 +313,6 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
310 spin_unlock_bh(&bdi_lock); 313 spin_unlock_bh(&bdi_lock);
311 314
312 synchronize_rcu_expedited(); 315 synchronize_rcu_expedited();
313
314 /* bdi_list is now unused, clear it to mark @bdi dying */
315 INIT_LIST_HEAD(&bdi->bdi_list);
316} 316}
317 317
318int bdi_register(struct backing_dev_info *bdi, struct device *parent, 318int bdi_register(struct backing_dev_info *bdi, struct device *parent,
@@ -363,6 +363,11 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
363 */ 363 */
364 bdi_remove_from_list(bdi); 364 bdi_remove_from_list(bdi);
365 365
366 /* Make sure nobody queues further work */
367 spin_lock_bh(&bdi->wb_lock);
368 clear_bit(BDI_registered, &bdi->state);
369 spin_unlock_bh(&bdi->wb_lock);
370
366 /* 371 /*
367 * Drain work list and shutdown the delayed_work. At this point, 372 * Drain work list and shutdown the delayed_work. At this point,
368 * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi 373 * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi