aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAsias He <asias@redhat.com>2012-08-08 04:07:04 -0400
committerRusty Russell <rusty@rustcorp.com.au>2012-09-28 01:35:13 -0400
commita98755c559e0e944a44174883b74a97019e3a367 (patch)
treee93802547d879eb6745443631cf461f1796cf930 /drivers
parent33e1afc3d82697599ccc8dc8f2fa44ffff5ae329 (diff)
virtio-blk: Add bio-based IO path for virtio-blk
This patch introduces bio-based IO path for virtio-blk. Compared to request-based IO path, bio-based IO path uses driver provided ->make_request_fn() method to bypasses the IO scheduler. It handles the bio to device directly without allocating a request in block layer. This reduces the IO path in guest kernel to achieve high IOPS and lower latency. The downside is that guest can not use the IO scheduler to merge and sort requests. However, this is not a big problem if the backend disk in host side uses faster disk device. When the bio-based IO path is not enabled, virtio-blk still uses the original request-based IO path, no performance difference is observed. Using a slow device e.g. normal SATA disk, the bio-based IO path for sequential read and write are slower than req-based IO path due to lack of merge in guest kernel. So we make the bio-based path optional. Performance evaluation: ----------------------------- 1) Fio test is performed in a 8 vcpu guest with ramdisk based guest using kvm tool. Short version: With bio-based IO path, sequential read/write, random read/write IOPS boost : 28%, 24%, 21%, 16% Latency improvement: 32%, 17%, 21%, 16% Long version: With bio-based IO path: seq-read : io=2048.0MB, bw=116996KB/s, iops=233991 , runt= 17925msec seq-write : io=2048.0MB, bw=100829KB/s, iops=201658 , runt= 20799msec rand-read : io=3095.7MB, bw=112134KB/s, iops=224268 , runt= 28269msec rand-write: io=3095.7MB, bw=96198KB/s, iops=192396 , runt= 32952msec clat (usec): min=0 , max=2631.6K, avg=58716.99, stdev=191377.30 clat (usec): min=0 , max=1753.2K, avg=66423.25, stdev=81774.35 clat (usec): min=0 , max=2915.5K, avg=61685.70, stdev=120598.39 clat (usec): min=0 , max=1933.4K, avg=76935.12, stdev=96603.45 cpu : usr=74.08%, sys=703.84%, ctx=29661403, majf=21354, minf=22460954 cpu : usr=70.92%, sys=702.81%, ctx=77219828, majf=13980, minf=27713137 cpu : usr=72.23%, sys=695.37%, ctx=88081059, majf=18475, minf=28177648 cpu : usr=69.69%, sys=654.13%, ctx=145476035, majf=15867, minf=26176375 With request-based IO path: seq-read : io=2048.0MB, bw=91074KB/s, iops=182147 , runt= 23027msec seq-write : io=2048.0MB, bw=80725KB/s, iops=161449 , runt= 25979msec rand-read : io=3095.7MB, bw=92106KB/s, iops=184211 , runt= 34416msec rand-write: io=3095.7MB, bw=82815KB/s, iops=165630 , runt= 38277msec clat (usec): min=0 , max=1932.4K, avg=77824.17, stdev=170339.49 clat (usec): min=0 , max=2510.2K, avg=78023.96, stdev=146949.15 clat (usec): min=0 , max=3037.2K, avg=74746.53, stdev=128498.27 clat (usec): min=0 , max=1363.4K, avg=89830.75, stdev=114279.68 cpu : usr=53.28%, sys=724.19%, ctx=37988895, majf=17531, minf=23577622 cpu : usr=49.03%, sys=633.20%, ctx=205935380, majf=18197, minf=27288959 cpu : usr=55.78%, sys=722.40%, ctx=101525058, majf=19273, minf=28067082 cpu : usr=56.55%, sys=690.83%, ctx=228205022, majf=18039, minf=26551985 2) Fio test is performed in a 8 vcpu guest with Fusion-IO based guest using kvm tool. Short version: With bio-based IO path, sequential read/write, random read/write IOPS boost : 11%, 11%, 13%, 10% Latency improvement: 10%, 10%, 12%, 10% Long Version: With bio-based IO path: read : io=2048.0MB, bw=58920KB/s, iops=117840 , runt= 35593msec write: io=2048.0MB, bw=64308KB/s, iops=128616 , runt= 32611msec read : io=3095.7MB, bw=59633KB/s, iops=119266 , runt= 53157msec write: io=3095.7MB, bw=62993KB/s, iops=125985 , runt= 50322msec clat (usec): min=0 , max=1284.3K, avg=128109.01, stdev=71513.29 clat (usec): min=94 , max=962339 , avg=116832.95, stdev=65836.80 clat (usec): min=0 , max=1846.6K, avg=128509.99, stdev=89575.07 clat (usec): min=0 , max=2256.4K, avg=121361.84, stdev=82747.25 cpu : usr=56.79%, sys=421.70%, ctx=147335118, majf=21080, minf=19852517 cpu : usr=61.81%, sys=455.53%, ctx=143269950, majf=16027, minf=24800604 cpu : usr=63.10%, sys=455.38%, ctx=178373538, majf=16958, minf=24822612 cpu : usr=62.04%, sys=453.58%, ctx=226902362, majf=16089, minf=23278105 With request-based IO path: read : io=2048.0MB, bw=52896KB/s, iops=105791 , runt= 39647msec write: io=2048.0MB, bw=57856KB/s, iops=115711 , runt= 36248msec read : io=3095.7MB, bw=52387KB/s, iops=104773 , runt= 60510msec write: io=3095.7MB, bw=57310KB/s, iops=114619 , runt= 55312msec clat (usec): min=0 , max=1532.6K, avg=142085.62, stdev=109196.84 clat (usec): min=0 , max=1487.4K, avg=129110.71, stdev=114973.64 clat (usec): min=0 , max=1388.6K, avg=145049.22, stdev=107232.55 clat (usec): min=0 , max=1465.9K, avg=133585.67, stdev=110322.95 cpu : usr=44.08%, sys=590.71%, ctx=451812322, majf=14841, minf=17648641 cpu : usr=48.73%, sys=610.78%, ctx=418953997, majf=22164, minf=26850689 cpu : usr=45.58%, sys=581.16%, ctx=714079216, majf=21497, minf=22558223 cpu : usr=48.40%, sys=599.65%, ctx=656089423, majf=16393, minf=23824409 3) Fio test is performed in a 8 vcpu guest with normal SATA based guest using kvm tool. Short version: With bio-based IO path, sequential read/write, random read/write IOPS boost : -10%, -10%, 4.4%, 0.5% Latency improvement: -12%, -15%, 2.5%, 0.8% Long Version: With bio-based IO path: read : io=124812KB, bw=36537KB/s, iops=9060 , runt= 3416msec write: io=169180KB, bw=24406KB/s, iops=6065 , runt= 6932msec read : io=256200KB, bw=2089.3KB/s, iops=520 , runt=122630msec write: io=257988KB, bw=1545.7KB/s, iops=384 , runt=166910msec clat (msec): min=1 , max=1527 , avg=28.06, stdev=89.54 clat (msec): min=2 , max=344 , avg=41.12, stdev=38.70 clat (msec): min=8 , max=1984 , avg=490.63, stdev=207.28 clat (msec): min=33 , max=4131 , avg=659.19, stdev=304.71 cpu : usr=4.85%, sys=17.15%, ctx=31593, majf=0, minf=7 cpu : usr=3.04%, sys=11.45%, ctx=39377, majf=0, minf=0 cpu : usr=0.47%, sys=1.59%, ctx=262986, majf=0, minf=16 cpu : usr=0.47%, sys=1.46%, ctx=337410, majf=0, minf=0 With request-based IO path: read : io=150120KB, bw=40420KB/s, iops=10037 , runt= 3714msec write: io=194932KB, bw=27029KB/s, iops=6722 , runt= 7212msec read : io=257136KB, bw=2001.1KB/s, iops=498 , runt=128443msec write: io=258276KB, bw=1537.2KB/s, iops=382 , runt=168028msec clat (msec): min=1 , max=1542 , avg=24.84, stdev=32.45 clat (msec): min=3 , max=628 , avg=35.62, stdev=39.71 clat (msec): min=8 , max=2540 , avg=503.28, stdev=236.97 clat (msec): min=41 , max=4398 , avg=653.88, stdev=302.61 cpu : usr=3.91%, sys=15.75%, ctx=26968, majf=0, minf=23 cpu : usr=2.50%, sys=10.56%, ctx=19090, majf=0, minf=0 cpu : usr=0.16%, sys=0.43%, ctx=20159, majf=0, minf=16 cpu : usr=0.18%, sys=0.53%, ctx=81364, majf=0, minf=0 How to use: ----------------------------- Add 'virtio_blk.use_bio=1' to kernel cmdline or 'modprobe virtio_blk use_bio=1' to enable ->make_request_fn() based I/O path. Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jens Axboe <axboe@kernel.dk> Cc: Christoph Hellwig <hch@lst.de> Cc: Tejun Heo <tj@kernel.org> Cc: Shaohua Li <shli@kernel.org> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Asias He <asias@redhat.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/virtio_blk.c203
1 files changed, 163 insertions, 40 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c0bbeb470754..95cfeeda4f3a 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -14,6 +14,9 @@
14 14
15#define PART_BITS 4 15#define PART_BITS 4
16 16
17static bool use_bio;
18module_param(use_bio, bool, S_IRUGO);
19
17static int major; 20static int major;
18static DEFINE_IDA(vd_index_ida); 21static DEFINE_IDA(vd_index_ida);
19 22
@@ -23,6 +26,7 @@ struct virtio_blk
23{ 26{
24 struct virtio_device *vdev; 27 struct virtio_device *vdev;
25 struct virtqueue *vq; 28 struct virtqueue *vq;
29 wait_queue_head_t queue_wait;
26 30
27 /* The disk structure for the kernel. */ 31 /* The disk structure for the kernel. */
28 struct gendisk *disk; 32 struct gendisk *disk;
@@ -51,53 +55,87 @@ struct virtio_blk
51struct virtblk_req 55struct virtblk_req
52{ 56{
53 struct request *req; 57 struct request *req;
58 struct bio *bio;
54 struct virtio_blk_outhdr out_hdr; 59 struct virtio_blk_outhdr out_hdr;
55 struct virtio_scsi_inhdr in_hdr; 60 struct virtio_scsi_inhdr in_hdr;
56 u8 status; 61 u8 status;
62 struct scatterlist sg[];
57}; 63};
58 64
59static void blk_done(struct virtqueue *vq) 65static inline int virtblk_result(struct virtblk_req *vbr)
66{
67 switch (vbr->status) {
68 case VIRTIO_BLK_S_OK:
69 return 0;
70 case VIRTIO_BLK_S_UNSUPP:
71 return -ENOTTY;
72 default:
73 return -EIO;
74 }
75}
76
77static inline void virtblk_request_done(struct virtio_blk *vblk,
78 struct virtblk_req *vbr)
79{
80 struct request *req = vbr->req;
81 int error = virtblk_result(vbr);
82
83 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
84 req->resid_len = vbr->in_hdr.residual;
85 req->sense_len = vbr->in_hdr.sense_len;
86 req->errors = vbr->in_hdr.errors;
87 } else if (req->cmd_type == REQ_TYPE_SPECIAL) {
88 req->errors = (error != 0);
89 }
90
91 __blk_end_request_all(req, error);
92 mempool_free(vbr, vblk->pool);
93}
94
95static inline void virtblk_bio_done(struct virtio_blk *vblk,
96 struct virtblk_req *vbr)
97{
98 bio_endio(vbr->bio, virtblk_result(vbr));
99 mempool_free(vbr, vblk->pool);
100}
101
102static void virtblk_done(struct virtqueue *vq)
60{ 103{
61 struct virtio_blk *vblk = vq->vdev->priv; 104 struct virtio_blk *vblk = vq->vdev->priv;
105 unsigned long bio_done = 0, req_done = 0;
62 struct virtblk_req *vbr; 106 struct virtblk_req *vbr;
63 unsigned int len;
64 unsigned long flags; 107 unsigned long flags;
108 unsigned int len;
65 109
66 spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); 110 spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
67 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { 111 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
68 int error; 112 if (vbr->bio) {
69 113 virtblk_bio_done(vblk, vbr);
70 switch (vbr->status) { 114 bio_done++;
71 case VIRTIO_BLK_S_OK: 115 } else {
72 error = 0; 116 virtblk_request_done(vblk, vbr);
73 break; 117 req_done++;
74 case VIRTIO_BLK_S_UNSUPP:
75 error = -ENOTTY;
76 break;
77 default:
78 error = -EIO;
79 break;
80 }
81
82 switch (vbr->req->cmd_type) {
83 case REQ_TYPE_BLOCK_PC:
84 vbr->req->resid_len = vbr->in_hdr.residual;
85 vbr->req->sense_len = vbr->in_hdr.sense_len;
86 vbr->req->errors = vbr->in_hdr.errors;
87 break;
88 case REQ_TYPE_SPECIAL:
89 vbr->req->errors = (error != 0);
90 break;
91 default:
92 break;
93 } 118 }
94
95 __blk_end_request_all(vbr->req, error);
96 mempool_free(vbr, vblk->pool);
97 } 119 }
98 /* In case queue is stopped waiting for more buffers. */ 120 /* In case queue is stopped waiting for more buffers. */
99 blk_start_queue(vblk->disk->queue); 121 if (req_done)
122 blk_start_queue(vblk->disk->queue);
100 spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags); 123 spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
124
125 if (bio_done)
126 wake_up(&vblk->queue_wait);
127}
128
129static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
130 gfp_t gfp_mask)
131{
132 struct virtblk_req *vbr;
133
134 vbr = mempool_alloc(vblk->pool, gfp_mask);
135 if (vbr && use_bio)
136 sg_init_table(vbr->sg, vblk->sg_elems);
137
138 return vbr;
101} 139}
102 140
103static bool do_req(struct request_queue *q, struct virtio_blk *vblk, 141static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
@@ -106,13 +144,13 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
106 unsigned long num, out = 0, in = 0; 144 unsigned long num, out = 0, in = 0;
107 struct virtblk_req *vbr; 145 struct virtblk_req *vbr;
108 146
109 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); 147 vbr = virtblk_alloc_req(vblk, GFP_ATOMIC);
110 if (!vbr) 148 if (!vbr)
111 /* When another request finishes we'll try again. */ 149 /* When another request finishes we'll try again. */
112 return false; 150 return false;
113 151
114 vbr->req = req; 152 vbr->req = req;
115 153 vbr->bio = NULL;
116 if (req->cmd_flags & REQ_FLUSH) { 154 if (req->cmd_flags & REQ_FLUSH) {
117 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; 155 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
118 vbr->out_hdr.sector = 0; 156 vbr->out_hdr.sector = 0;
@@ -172,7 +210,8 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
172 } 210 }
173 } 211 }
174 212
175 if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, GFP_ATOMIC)<0) { 213 if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr,
214 GFP_ATOMIC) < 0) {
176 mempool_free(vbr, vblk->pool); 215 mempool_free(vbr, vblk->pool);
177 return false; 216 return false;
178 } 217 }
@@ -180,7 +219,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
180 return true; 219 return true;
181} 220}
182 221
183static void do_virtblk_request(struct request_queue *q) 222static void virtblk_request(struct request_queue *q)
184{ 223{
185 struct virtio_blk *vblk = q->queuedata; 224 struct virtio_blk *vblk = q->queuedata;
186 struct request *req; 225 struct request *req;
@@ -203,6 +242,82 @@ static void do_virtblk_request(struct request_queue *q)
203 virtqueue_kick(vblk->vq); 242 virtqueue_kick(vblk->vq);
204} 243}
205 244
245static void virtblk_add_buf_wait(struct virtio_blk *vblk,
246 struct virtblk_req *vbr,
247 unsigned long out,
248 unsigned long in)
249{
250 DEFINE_WAIT(wait);
251
252 for (;;) {
253 prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
254 TASK_UNINTERRUPTIBLE);
255
256 spin_lock_irq(vblk->disk->queue->queue_lock);
257 if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
258 GFP_ATOMIC) < 0) {
259 spin_unlock_irq(vblk->disk->queue->queue_lock);
260 io_schedule();
261 } else {
262 virtqueue_kick(vblk->vq);
263 spin_unlock_irq(vblk->disk->queue->queue_lock);
264 break;
265 }
266
267 }
268
269 finish_wait(&vblk->queue_wait, &wait);
270}
271
272static void virtblk_make_request(struct request_queue *q, struct bio *bio)
273{
274 struct virtio_blk *vblk = q->queuedata;
275 unsigned int num, out = 0, in = 0;
276 struct virtblk_req *vbr;
277
278 BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems);
279 BUG_ON(bio->bi_rw & (REQ_FLUSH | REQ_FUA));
280
281 vbr = virtblk_alloc_req(vblk, GFP_NOIO);
282 if (!vbr) {
283 bio_endio(bio, -ENOMEM);
284 return;
285 }
286
287 vbr->bio = bio;
288 vbr->req = NULL;
289 vbr->out_hdr.type = 0;
290 vbr->out_hdr.sector = bio->bi_sector;
291 vbr->out_hdr.ioprio = bio_prio(bio);
292
293 sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
294
295 num = blk_bio_map_sg(q, bio, vbr->sg + out);
296
297 sg_set_buf(&vbr->sg[num + out + in++], &vbr->status,
298 sizeof(vbr->status));
299
300 if (num) {
301 if (bio->bi_rw & REQ_WRITE) {
302 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
303 out += num;
304 } else {
305 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
306 in += num;
307 }
308 }
309
310 spin_lock_irq(vblk->disk->queue->queue_lock);
311 if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
312 GFP_ATOMIC) < 0)) {
313 spin_unlock_irq(vblk->disk->queue->queue_lock);
314 virtblk_add_buf_wait(vblk, vbr, out, in);
315 return;
316 }
317 virtqueue_kick(vblk->vq);
318 spin_unlock_irq(vblk->disk->queue->queue_lock);
319}
320
206/* return id (s/n) string for *disk to *id_str 321/* return id (s/n) string for *disk to *id_str
207 */ 322 */
208static int virtblk_get_id(struct gendisk *disk, char *id_str) 323static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -360,7 +475,7 @@ static int init_vq(struct virtio_blk *vblk)
360 int err = 0; 475 int err = 0;
361 476
362 /* We expect one virtqueue, for output. */ 477 /* We expect one virtqueue, for output. */
363 vblk->vq = virtio_find_single_vq(vblk->vdev, blk_done, "requests"); 478 vblk->vq = virtio_find_single_vq(vblk->vdev, virtblk_done, "requests");
364 if (IS_ERR(vblk->vq)) 479 if (IS_ERR(vblk->vq))
365 err = PTR_ERR(vblk->vq); 480 err = PTR_ERR(vblk->vq);
366 481
@@ -414,7 +529,7 @@ static void virtblk_update_cache_mode(struct virtio_device *vdev)
414 u8 writeback = virtblk_get_cache_mode(vdev); 529 u8 writeback = virtblk_get_cache_mode(vdev);
415 struct virtio_blk *vblk = vdev->priv; 530 struct virtio_blk *vblk = vdev->priv;
416 531
417 if (writeback) 532 if (writeback && !use_bio)
418 blk_queue_flush(vblk->disk->queue, REQ_FLUSH); 533 blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
419 else 534 else
420 blk_queue_flush(vblk->disk->queue, 0); 535 blk_queue_flush(vblk->disk->queue, 0);
@@ -477,6 +592,8 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
477 struct virtio_blk *vblk; 592 struct virtio_blk *vblk;
478 struct request_queue *q; 593 struct request_queue *q;
479 int err, index; 594 int err, index;
595 int pool_size;
596
480 u64 cap; 597 u64 cap;
481 u32 v, blk_size, sg_elems, opt_io_size; 598 u32 v, blk_size, sg_elems, opt_io_size;
482 u16 min_io_size; 599 u16 min_io_size;
@@ -506,10 +623,12 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
506 goto out_free_index; 623 goto out_free_index;
507 } 624 }
508 625
626 init_waitqueue_head(&vblk->queue_wait);
509 vblk->vdev = vdev; 627 vblk->vdev = vdev;
510 vblk->sg_elems = sg_elems; 628 vblk->sg_elems = sg_elems;
511 sg_init_table(vblk->sg, vblk->sg_elems); 629 sg_init_table(vblk->sg, vblk->sg_elems);
512 mutex_init(&vblk->config_lock); 630 mutex_init(&vblk->config_lock);
631
513 INIT_WORK(&vblk->config_work, virtblk_config_changed_work); 632 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
514 vblk->config_enable = true; 633 vblk->config_enable = true;
515 634
@@ -517,7 +636,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
517 if (err) 636 if (err)
518 goto out_free_vblk; 637 goto out_free_vblk;
519 638
520 vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req)); 639 pool_size = sizeof(struct virtblk_req);
640 if (use_bio)
641 pool_size += sizeof(struct scatterlist) * sg_elems;
642 vblk->pool = mempool_create_kmalloc_pool(1, pool_size);
521 if (!vblk->pool) { 643 if (!vblk->pool) {
522 err = -ENOMEM; 644 err = -ENOMEM;
523 goto out_free_vq; 645 goto out_free_vq;
@@ -530,12 +652,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
530 goto out_mempool; 652 goto out_mempool;
531 } 653 }
532 654
533 q = vblk->disk->queue = blk_init_queue(do_virtblk_request, NULL); 655 q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL);
534 if (!q) { 656 if (!q) {
535 err = -ENOMEM; 657 err = -ENOMEM;
536 goto out_put_disk; 658 goto out_put_disk;
537 } 659 }
538 660
661 if (use_bio)
662 blk_queue_make_request(q, virtblk_make_request);
539 q->queuedata = vblk; 663 q->queuedata = vblk;
540 664
541 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); 665 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
@@ -620,7 +744,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
620 if (!err && opt_io_size) 744 if (!err && opt_io_size)
621 blk_queue_io_opt(q, blk_size * opt_io_size); 745 blk_queue_io_opt(q, blk_size * opt_io_size);
622 746
623
624 add_disk(vblk->disk); 747 add_disk(vblk->disk);
625 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); 748 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
626 if (err) 749 if (err)