diff options
author | Mike Snitzer <snitzer@redhat.com> | 2016-08-02 12:51:11 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2016-08-02 16:21:36 -0400 |
commit | 7d9595d848cdff5c7939f68eec39e0c5d36a1d67 (patch) | |
tree | 7c67e749ebd0d30db067ed41e18fde8fdc34a0d0 | |
parent | 1814f2e3fb95b58490e56a38fefe462ffe8fb9ad (diff) |
dm rq: fix the starting and stopping of blk-mq queues
Improve dm_stop_queue() to cancel any requeue_work. Also, have
dm_start_queue() and dm_stop_queue() clear/set the QUEUE_FLAG_STOPPED
for the blk-mq request_queue.
On suspend dm_stop_queue() handles stopping the blk-mq request_queue
BUT: even though the hw_queues are marked BLK_MQ_S_STOPPED at that point
there is still a race that is allowing block/blk-mq.c to call ->queue_rq
against a hctx that it really shouldn't. Add a check to
dm_mq_queue_rq() that guards against this rarity (albeit _not_
race-free).
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org # must patch dm.c on < 4.8 kernels
-rw-r--r-- | drivers/md/dm-rq.c | 20 |
1 files changed, 19 insertions, 1 deletions
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 7a9661868496..1ca7463e8bb2 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
@@ -78,6 +78,7 @@ void dm_start_queue(struct request_queue *q) | |||
78 | if (!q->mq_ops) | 78 | if (!q->mq_ops) |
79 | dm_old_start_queue(q); | 79 | dm_old_start_queue(q); |
80 | else { | 80 | else { |
81 | queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q); | ||
81 | blk_mq_start_stopped_hw_queues(q, true); | 82 | blk_mq_start_stopped_hw_queues(q, true); |
82 | blk_mq_kick_requeue_list(q); | 83 | blk_mq_kick_requeue_list(q); |
83 | } | 84 | } |
@@ -101,8 +102,14 @@ void dm_stop_queue(struct request_queue *q) | |||
101 | { | 102 | { |
102 | if (!q->mq_ops) | 103 | if (!q->mq_ops) |
103 | dm_old_stop_queue(q); | 104 | dm_old_stop_queue(q); |
104 | else | 105 | else { |
106 | spin_lock_irq(q->queue_lock); | ||
107 | queue_flag_set(QUEUE_FLAG_STOPPED, q); | ||
108 | spin_unlock_irq(q->queue_lock); | ||
109 | |||
110 | blk_mq_cancel_requeue_work(q); | ||
105 | blk_mq_stop_hw_queues(q); | 111 | blk_mq_stop_hw_queues(q); |
112 | } | ||
106 | } | 113 | } |
107 | 114 | ||
108 | static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md, | 115 | static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md, |
@@ -864,6 +871,17 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
864 | dm_put_live_table(md, srcu_idx); | 871 | dm_put_live_table(md, srcu_idx); |
865 | } | 872 | } |
866 | 873 | ||
874 | /* | ||
875 | * On suspend dm_stop_queue() handles stopping the blk-mq | ||
876 | * request_queue BUT: even though the hw_queues are marked | ||
877 | * BLK_MQ_S_STOPPED at that point there is still a race that | ||
878 | * is allowing block/blk-mq.c to call ->queue_rq against a | ||
879 | * hctx that it really shouldn't. The following check guards | ||
880 | * against this rarity (albeit _not_ race-free). | ||
881 | */ | ||
882 | if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) | ||
883 | return BLK_MQ_RQ_QUEUE_BUSY; | ||
884 | |||
867 | if (ti->type->busy && ti->type->busy(ti)) | 885 | if (ti->type->busy && ti->type->busy(ti)) |
868 | return BLK_MQ_RQ_QUEUE_BUSY; | 886 | return BLK_MQ_RQ_QUEUE_BUSY; |
869 | 887 | ||