diff options
Diffstat (limited to 'drivers/block/virtio_blk.c')
-rw-r--r-- | drivers/block/virtio_blk.c | 88 |
1 files changed, 51 insertions, 37 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 6d8a87f252de..f63d358f3d93 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -30,6 +30,9 @@ struct virtio_blk | |||
30 | /* The disk structure for the kernel. */ | 30 | /* The disk structure for the kernel. */ |
31 | struct gendisk *disk; | 31 | struct gendisk *disk; |
32 | 32 | ||
33 | /* Block layer tags. */ | ||
34 | struct blk_mq_tag_set tag_set; | ||
35 | |||
33 | /* Process context for config space updates */ | 36 | /* Process context for config space updates */ |
34 | struct work_struct config_work; | 37 | struct work_struct config_work; |
35 | 38 | ||
@@ -112,7 +115,7 @@ static int __virtblk_add_req(struct virtqueue *vq, | |||
112 | 115 | ||
113 | static inline void virtblk_request_done(struct request *req) | 116 | static inline void virtblk_request_done(struct request *req) |
114 | { | 117 | { |
115 | struct virtblk_req *vbr = req->special; | 118 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); |
116 | int error = virtblk_result(vbr); | 119 | int error = virtblk_result(vbr); |
117 | 120 | ||
118 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) { | 121 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) { |
@@ -144,21 +147,22 @@ static void virtblk_done(struct virtqueue *vq) | |||
144 | if (unlikely(virtqueue_is_broken(vq))) | 147 | if (unlikely(virtqueue_is_broken(vq))) |
145 | break; | 148 | break; |
146 | } while (!virtqueue_enable_cb(vq)); | 149 | } while (!virtqueue_enable_cb(vq)); |
147 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
148 | 150 | ||
149 | /* In case queue is stopped waiting for more buffers. */ | 151 | /* In case queue is stopped waiting for more buffers. */ |
150 | if (req_done) | 152 | if (req_done) |
151 | blk_mq_start_stopped_hw_queues(vblk->disk->queue); | 153 | blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); |
154 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
152 | } | 155 | } |
153 | 156 | ||
154 | static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) | 157 | static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) |
155 | { | 158 | { |
156 | struct virtio_blk *vblk = hctx->queue->queuedata; | 159 | struct virtio_blk *vblk = hctx->queue->queuedata; |
157 | struct virtblk_req *vbr = req->special; | 160 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); |
158 | unsigned long flags; | 161 | unsigned long flags; |
159 | unsigned int num; | 162 | unsigned int num; |
160 | const bool last = (req->cmd_flags & REQ_END) != 0; | 163 | const bool last = (req->cmd_flags & REQ_END) != 0; |
161 | int err; | 164 | int err; |
165 | bool notify = false; | ||
162 | 166 | ||
163 | BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); | 167 | BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); |
164 | 168 | ||
@@ -202,8 +206,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) | |||
202 | err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); | 206 | err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); |
203 | if (err) { | 207 | if (err) { |
204 | virtqueue_kick(vblk->vq); | 208 | virtqueue_kick(vblk->vq); |
205 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
206 | blk_mq_stop_hw_queue(hctx); | 209 | blk_mq_stop_hw_queue(hctx); |
210 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
207 | /* Out of mem doesn't actually happen, since we fall back | 211 | /* Out of mem doesn't actually happen, since we fall back |
208 | * to direct descriptors */ | 212 | * to direct descriptors */ |
209 | if (err == -ENOMEM || err == -ENOSPC) | 213 | if (err == -ENOMEM || err == -ENOSPC) |
@@ -211,10 +215,12 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) | |||
211 | return BLK_MQ_RQ_QUEUE_ERROR; | 215 | return BLK_MQ_RQ_QUEUE_ERROR; |
212 | } | 216 | } |
213 | 217 | ||
214 | if (last) | 218 | if (last && virtqueue_kick_prepare(vblk->vq)) |
215 | virtqueue_kick(vblk->vq); | 219 | notify = true; |
216 | |||
217 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | 220 | spin_unlock_irqrestore(&vblk->vq_lock, flags); |
221 | |||
222 | if (notify) | ||
223 | virtqueue_notify(vblk->vq); | ||
218 | return BLK_MQ_RQ_QUEUE_OK; | 224 | return BLK_MQ_RQ_QUEUE_OK; |
219 | } | 225 | } |
220 | 226 | ||
@@ -480,33 +486,27 @@ static const struct device_attribute dev_attr_cache_type_rw = | |||
480 | __ATTR(cache_type, S_IRUGO|S_IWUSR, | 486 | __ATTR(cache_type, S_IRUGO|S_IWUSR, |
481 | virtblk_cache_type_show, virtblk_cache_type_store); | 487 | virtblk_cache_type_show, virtblk_cache_type_store); |
482 | 488 | ||
483 | static struct blk_mq_ops virtio_mq_ops = { | 489 | static int virtblk_init_request(void *data, struct request *rq, |
484 | .queue_rq = virtio_queue_rq, | 490 | unsigned int hctx_idx, unsigned int request_idx, |
485 | .map_queue = blk_mq_map_queue, | 491 | unsigned int numa_node) |
486 | .alloc_hctx = blk_mq_alloc_single_hw_queue, | ||
487 | .free_hctx = blk_mq_free_single_hw_queue, | ||
488 | .complete = virtblk_request_done, | ||
489 | }; | ||
490 | |||
491 | static struct blk_mq_reg virtio_mq_reg = { | ||
492 | .ops = &virtio_mq_ops, | ||
493 | .nr_hw_queues = 1, | ||
494 | .queue_depth = 0, /* Set in virtblk_probe */ | ||
495 | .numa_node = NUMA_NO_NODE, | ||
496 | .flags = BLK_MQ_F_SHOULD_MERGE, | ||
497 | }; | ||
498 | module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444); | ||
499 | |||
500 | static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx, | ||
501 | struct request *rq, unsigned int nr) | ||
502 | { | 492 | { |
503 | struct virtio_blk *vblk = data; | 493 | struct virtio_blk *vblk = data; |
504 | struct virtblk_req *vbr = rq->special; | 494 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); |
505 | 495 | ||
506 | sg_init_table(vbr->sg, vblk->sg_elems); | 496 | sg_init_table(vbr->sg, vblk->sg_elems); |
507 | return 0; | 497 | return 0; |
508 | } | 498 | } |
509 | 499 | ||
500 | static struct blk_mq_ops virtio_mq_ops = { | ||
501 | .queue_rq = virtio_queue_rq, | ||
502 | .map_queue = blk_mq_map_queue, | ||
503 | .complete = virtblk_request_done, | ||
504 | .init_request = virtblk_init_request, | ||
505 | }; | ||
506 | |||
507 | static unsigned int virtblk_queue_depth; | ||
508 | module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); | ||
509 | |||
510 | static int virtblk_probe(struct virtio_device *vdev) | 510 | static int virtblk_probe(struct virtio_device *vdev) |
511 | { | 511 | { |
512 | struct virtio_blk *vblk; | 512 | struct virtio_blk *vblk; |
@@ -561,24 +561,34 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
561 | } | 561 | } |
562 | 562 | ||
563 | /* Default queue sizing is to fill the ring. */ | 563 | /* Default queue sizing is to fill the ring. */ |
564 | if (!virtio_mq_reg.queue_depth) { | 564 | if (!virtblk_queue_depth) { |
565 | virtio_mq_reg.queue_depth = vblk->vq->num_free; | 565 | virtblk_queue_depth = vblk->vq->num_free; |
566 | /* ... but without indirect descs, we use 2 descs per req */ | 566 | /* ... but without indirect descs, we use 2 descs per req */ |
567 | if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) | 567 | if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) |
568 | virtio_mq_reg.queue_depth /= 2; | 568 | virtblk_queue_depth /= 2; |
569 | } | 569 | } |
570 | virtio_mq_reg.cmd_size = | 570 | |
571 | memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); | ||
572 | vblk->tag_set.ops = &virtio_mq_ops; | ||
573 | vblk->tag_set.nr_hw_queues = 1; | ||
574 | vblk->tag_set.queue_depth = virtblk_queue_depth; | ||
575 | vblk->tag_set.numa_node = NUMA_NO_NODE; | ||
576 | vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||
577 | vblk->tag_set.cmd_size = | ||
571 | sizeof(struct virtblk_req) + | 578 | sizeof(struct virtblk_req) + |
572 | sizeof(struct scatterlist) * sg_elems; | 579 | sizeof(struct scatterlist) * sg_elems; |
580 | vblk->tag_set.driver_data = vblk; | ||
581 | |||
582 | err = blk_mq_alloc_tag_set(&vblk->tag_set); | ||
583 | if (err) | ||
584 | goto out_put_disk; | ||
573 | 585 | ||
574 | q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk); | 586 | q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); |
575 | if (!q) { | 587 | if (!q) { |
576 | err = -ENOMEM; | 588 | err = -ENOMEM; |
577 | goto out_put_disk; | 589 | goto out_free_tags; |
578 | } | 590 | } |
579 | 591 | ||
580 | blk_mq_init_commands(q, virtblk_init_vbr, vblk); | ||
581 | |||
582 | q->queuedata = vblk; | 592 | q->queuedata = vblk; |
583 | 593 | ||
584 | virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); | 594 | virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); |
@@ -679,6 +689,8 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
679 | out_del_disk: | 689 | out_del_disk: |
680 | del_gendisk(vblk->disk); | 690 | del_gendisk(vblk->disk); |
681 | blk_cleanup_queue(vblk->disk->queue); | 691 | blk_cleanup_queue(vblk->disk->queue); |
692 | out_free_tags: | ||
693 | blk_mq_free_tag_set(&vblk->tag_set); | ||
682 | out_put_disk: | 694 | out_put_disk: |
683 | put_disk(vblk->disk); | 695 | put_disk(vblk->disk); |
684 | out_free_vq: | 696 | out_free_vq: |
@@ -705,6 +717,8 @@ static void virtblk_remove(struct virtio_device *vdev) | |||
705 | del_gendisk(vblk->disk); | 717 | del_gendisk(vblk->disk); |
706 | blk_cleanup_queue(vblk->disk->queue); | 718 | blk_cleanup_queue(vblk->disk->queue); |
707 | 719 | ||
720 | blk_mq_free_tag_set(&vblk->tag_set); | ||
721 | |||
708 | /* Stop all the virtqueues. */ | 722 | /* Stop all the virtqueues. */ |
709 | vdev->config->reset(vdev); | 723 | vdev->config->reset(vdev); |
710 | 724 | ||
@@ -749,7 +763,7 @@ static int virtblk_restore(struct virtio_device *vdev) | |||
749 | vblk->config_enable = true; | 763 | vblk->config_enable = true; |
750 | ret = init_vq(vdev->priv); | 764 | ret = init_vq(vdev->priv); |
751 | if (!ret) | 765 | if (!ret) |
752 | blk_mq_start_stopped_hw_queues(vblk->disk->queue); | 766 | blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); |
753 | 767 | ||
754 | return ret; | 768 | return ret; |
755 | } | 769 | } |