diff options
author | Bart Van Assche <bart.vanassche@sandisk.com> | 2016-11-11 20:05:27 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2016-11-14 15:17:50 -0500 |
commit | d15bb3a6467e102e60d954aadda5fb19ce6fd8ec (patch) | |
tree | 5499a3ea35eceb92c936ff53679ea8d56b294920 /drivers/md/dm-rq.c | |
parent | 2e8ed71102ff8fe3919dd3a2d73ac4da72686efc (diff) |
dm rq: fix a race condition in rq_completed()
It is required to hold the queue lock when calling blk_run_queue_async()
to avoid that a race between blk_run_queue_async() and
blk_cleanup_queue() is triggered.
Cc: stable@vger.kernel.org
Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-rq.c')
-rw-r--r-- | drivers/md/dm-rq.c | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 1d0d2adc050a..31a89c8832c0 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
@@ -226,6 +226,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) | |||
226 | */ | 226 | */ |
227 | static void rq_completed(struct mapped_device *md, int rw, bool run_queue) | 227 | static void rq_completed(struct mapped_device *md, int rw, bool run_queue) |
228 | { | 228 | { |
229 | struct request_queue *q = md->queue; | ||
230 | unsigned long flags; | ||
231 | |||
229 | atomic_dec(&md->pending[rw]); | 232 | atomic_dec(&md->pending[rw]); |
230 | 233 | ||
231 | /* nudge anyone waiting on suspend queue */ | 234 | /* nudge anyone waiting on suspend queue */ |
@@ -238,8 +241,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) | |||
238 | * back into ->request_fn() could deadlock attempting to grab the | 241 | * back into ->request_fn() could deadlock attempting to grab the |
239 | * queue lock again. | 242 | * queue lock again. |
240 | */ | 243 | */ |
241 | if (!md->queue->mq_ops && run_queue) | 244 | if (!q->mq_ops && run_queue) { |
242 | blk_run_queue_async(md->queue); | 245 | spin_lock_irqsave(q->queue_lock, flags); |
246 | blk_run_queue_async(q); | ||
247 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
248 | } | ||
243 | 249 | ||
244 | /* | 250 | /* |
245 | * dm_put() must be at the end of this function. See the comment above | 251 | * dm_put() must be at the end of this function. See the comment above |