aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/virtio_blk.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@canonical.com>2014-05-29 22:49:29 -0400
committerJens Axboe <axboe@fb.com>2014-05-30 10:19:39 -0400
commite8edca6f7f92234202d6dd163c118ef495244d7c (patch)
tree3546ab7cd1062b0cad3007818c08e468aa189a0c /drivers/block/virtio_blk.c
parentf89ca166460e84620db73d4542f28d34c40a8917 (diff)
block: virtio_blk: don't hold spin lock during world switch
Firstly, it isn't necessary to hold lock of vblk->vq_lock when notifying hypervisor about queued I/O. Secondly, virtqueue_notify() will cause world switch and it may take long time on some hypervisors(such as, qemu-arm), so it isn't good to hold the lock and block other vCPUs. On arm64 quad core VM(qemu-kvm), the patch can increase I/O performance a lot with VIRTIO_RING_F_EVENT_IDX enabled: - without the patch: 14K IOPS - with the patch: 34K IOPS fio script: [global] direct=1 bsrange=4k-4k timeout=10 numjobs=4 ioengine=libaio iodepth=64 filename=/dev/vdc group_reporting=1 [f1] rw=randread Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: virtualization@lists.linux-foundation.org Signed-off-by: Ming Lei <ming.lei@canonical.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Cc: stable@kernel.org # 3.13+ Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block/virtio_blk.c')
-rw-r--r--drivers/block/virtio_blk.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c8f286e8d80f..f63d358f3d93 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -162,6 +162,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
162 unsigned int num; 162 unsigned int num;
163 const bool last = (req->cmd_flags & REQ_END) != 0; 163 const bool last = (req->cmd_flags & REQ_END) != 0;
164 int err; 164 int err;
165 bool notify = false;
165 166
166 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); 167 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
167 168
@@ -214,10 +215,12 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
214 return BLK_MQ_RQ_QUEUE_ERROR; 215 return BLK_MQ_RQ_QUEUE_ERROR;
215 } 216 }
216 217
217 if (last) 218 if (last && virtqueue_kick_prepare(vblk->vq))
218 virtqueue_kick(vblk->vq); 219 notify = true;
219
220 spin_unlock_irqrestore(&vblk->vq_lock, flags); 220 spin_unlock_irqrestore(&vblk->vq_lock, flags);
221
222 if (notify)
223 virtqueue_notify(vblk->vq);
221 return BLK_MQ_RQ_QUEUE_OK; 224 return BLK_MQ_RQ_QUEUE_OK;
222} 225}
223 226