aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/virtio_blk.c
diff options
context:
space:
mode:
authorMing Lei <tom.leiming@gmail.com>2014-05-16 11:31:21 -0400
committerJens Axboe <axboe@fb.com>2014-05-16 11:40:31 -0400
commit0c29e93eae8a7f703e463c7b38ebc85d8718cae2 (patch)
treedc523b00af578c95aaf8dd34c69fc8f6ca4da208 /drivers/block/virtio_blk.c
parent9acf03cfb1fbecc058d3f223323e3ed97763f1e6 (diff)
virtio_blk: fix race between start and stop queue
When there isn't enough vring descriptor for adding to vq, blk-mq will be put as stopped state until some of pending descriptors are completed & freed. Unfortunately, the vq's interrupt may come just before blk-mq's BLK_MQ_S_STOPPED flag is set, so the blk-mq will still be kept as stopped even though lots of descriptors are completed and freed in the interrupt handler. The worst case is that all pending descriptors are freed in the interrupt handler, and the queue is kept as stopped forever. This patch fixes the problem by starting/stopping blk-mq with holding vq_lock. Cc: Jens Axboe <axboe@kernel.dk> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block/virtio_blk.c')
-rw-r--r--drivers/block/virtio_blk.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7a51f065edcd..9f340fafca5c 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -147,11 +147,11 @@ static void virtblk_done(struct virtqueue *vq)
147 if (unlikely(virtqueue_is_broken(vq))) 147 if (unlikely(virtqueue_is_broken(vq)))
148 break; 148 break;
149 } while (!virtqueue_enable_cb(vq)); 149 } while (!virtqueue_enable_cb(vq));
150 spin_unlock_irqrestore(&vblk->vq_lock, flags);
151 150
152 /* In case queue is stopped waiting for more buffers. */ 151 /* In case queue is stopped waiting for more buffers. */
153 if (req_done) 152 if (req_done)
154 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); 153 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
154 spin_unlock_irqrestore(&vblk->vq_lock, flags);
155} 155}
156 156
157static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) 157static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
@@ -205,8 +205,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
205 err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); 205 err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
206 if (err) { 206 if (err) {
207 virtqueue_kick(vblk->vq); 207 virtqueue_kick(vblk->vq);
208 spin_unlock_irqrestore(&vblk->vq_lock, flags);
209 blk_mq_stop_hw_queue(hctx); 208 blk_mq_stop_hw_queue(hctx);
209 spin_unlock_irqrestore(&vblk->vq_lock, flags);
210 /* Out of mem doesn't actually happen, since we fall back 210 /* Out of mem doesn't actually happen, since we fall back
211 * to direct descriptors */ 211 * to direct descriptors */
212 if (err == -ENOMEM || err == -ENOSPC) 212 if (err == -ENOMEM || err == -ENOSPC)