aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@sandisk.com>2016-11-11 20:05:27 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-01-06 04:40:15 -0500
commite362c317ba76c9d397238737deef952187a21395 (patch)
tree034b5468ac04b334b1a040cf8069e28e6503b6a3
parent2c017f77e13d4325d8739fc9ed7ed2c3629da845 (diff)
dm rq: fix a race condition in rq_completed()
commit d15bb3a6467e102e60d954aadda5fb19ce6fd8ec upstream. It is required to hold the queue lock when calling blk_run_queue_async() to avoid that a race between blk_run_queue_async() and blk_cleanup_queue() is triggered. Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/md/dm-rq.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 1d0d2adc050a..31a89c8832c0 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -226,6 +226,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
226 */ 226 */
227static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 227static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
228{ 228{
229 struct request_queue *q = md->queue;
230 unsigned long flags;
231
229 atomic_dec(&md->pending[rw]); 232 atomic_dec(&md->pending[rw]);
230 233
231 /* nudge anyone waiting on suspend queue */ 234 /* nudge anyone waiting on suspend queue */
@@ -238,8 +241,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
238 * back into ->request_fn() could deadlock attempting to grab the 241 * back into ->request_fn() could deadlock attempting to grab the
239 * queue lock again. 242 * queue lock again.
240 */ 243 */
241 if (!md->queue->mq_ops && run_queue) 244 if (!q->mq_ops && run_queue) {
242 blk_run_queue_async(md->queue); 245 spin_lock_irqsave(q->queue_lock, flags);
246 blk_run_queue_async(q);
247 spin_unlock_irqrestore(q->queue_lock, flags);
248 }
243 249
244 /* 250 /*
245 * dm_put() must be at the end of this function. See the comment above 251 * dm_put() must be at the end of this function. See the comment above