aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/virtio_blk.c203
1 files changed, 163 insertions, 40 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c0bbeb470754..95cfeeda4f3a 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -14,6 +14,9 @@
14 14
15#define PART_BITS 4 15#define PART_BITS 4
16 16
17static bool use_bio;
18module_param(use_bio, bool, S_IRUGO);
19
17static int major; 20static int major;
18static DEFINE_IDA(vd_index_ida); 21static DEFINE_IDA(vd_index_ida);
19 22
@@ -23,6 +26,7 @@ struct virtio_blk
23{ 26{
24 struct virtio_device *vdev; 27 struct virtio_device *vdev;
25 struct virtqueue *vq; 28 struct virtqueue *vq;
29 wait_queue_head_t queue_wait;
26 30
27 /* The disk structure for the kernel. */ 31 /* The disk structure for the kernel. */
28 struct gendisk *disk; 32 struct gendisk *disk;
@@ -51,53 +55,87 @@ struct virtio_blk
51struct virtblk_req 55struct virtblk_req
52{ 56{
53 struct request *req; 57 struct request *req;
58 struct bio *bio;
54 struct virtio_blk_outhdr out_hdr; 59 struct virtio_blk_outhdr out_hdr;
55 struct virtio_scsi_inhdr in_hdr; 60 struct virtio_scsi_inhdr in_hdr;
56 u8 status; 61 u8 status;
62 struct scatterlist sg[];
57}; 63};
58 64
59static void blk_done(struct virtqueue *vq) 65static inline int virtblk_result(struct virtblk_req *vbr)
66{
67 switch (vbr->status) {
68 case VIRTIO_BLK_S_OK:
69 return 0;
70 case VIRTIO_BLK_S_UNSUPP:
71 return -ENOTTY;
72 default:
73 return -EIO;
74 }
75}
76
77static inline void virtblk_request_done(struct virtio_blk *vblk,
78 struct virtblk_req *vbr)
79{
80 struct request *req = vbr->req;
81 int error = virtblk_result(vbr);
82
83 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
84 req->resid_len = vbr->in_hdr.residual;
85 req->sense_len = vbr->in_hdr.sense_len;
86 req->errors = vbr->in_hdr.errors;
87 } else if (req->cmd_type == REQ_TYPE_SPECIAL) {
88 req->errors = (error != 0);
89 }
90
91 __blk_end_request_all(req, error);
92 mempool_free(vbr, vblk->pool);
93}
94
95static inline void virtblk_bio_done(struct virtio_blk *vblk,
96 struct virtblk_req *vbr)
97{
98 bio_endio(vbr->bio, virtblk_result(vbr));
99 mempool_free(vbr, vblk->pool);
100}
101
102static void virtblk_done(struct virtqueue *vq)
60{ 103{
61 struct virtio_blk *vblk = vq->vdev->priv; 104 struct virtio_blk *vblk = vq->vdev->priv;
105 unsigned long bio_done = 0, req_done = 0;
62 struct virtblk_req *vbr; 106 struct virtblk_req *vbr;
63 unsigned int len;
64 unsigned long flags; 107 unsigned long flags;
108 unsigned int len;
65 109
66 spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); 110 spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
67 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { 111 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
68 int error; 112 if (vbr->bio) {
69 113 virtblk_bio_done(vblk, vbr);
70 switch (vbr->status) { 114 bio_done++;
71 case VIRTIO_BLK_S_OK: 115 } else {
72 error = 0; 116 virtblk_request_done(vblk, vbr);
73 break; 117 req_done++;
74 case VIRTIO_BLK_S_UNSUPP:
75 error = -ENOTTY;
76 break;
77 default:
78 error = -EIO;
79 break;
80 }
81
82 switch (vbr->req->cmd_type) {
83 case REQ_TYPE_BLOCK_PC:
84 vbr->req->resid_len = vbr->in_hdr.residual;
85 vbr->req->sense_len = vbr->in_hdr.sense_len;
86 vbr->req->errors = vbr->in_hdr.errors;
87 break;
88 case REQ_TYPE_SPECIAL:
89 vbr->req->errors = (error != 0);
90 break;
91 default:
92 break;
93 } 118 }
94
95 __blk_end_request_all(vbr->req, error);
96 mempool_free(vbr, vblk->pool);
97 } 119 }
98 /* In case queue is stopped waiting for more buffers. */ 120 /* In case queue is stopped waiting for more buffers. */
99 blk_start_queue(vblk->disk->queue); 121 if (req_done)
122 blk_start_queue(vblk->disk->queue);
100 spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags); 123 spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
124
125 if (bio_done)
126 wake_up(&vblk->queue_wait);
127}
128
129static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
130 gfp_t gfp_mask)
131{
132 struct virtblk_req *vbr;
133
134 vbr = mempool_alloc(vblk->pool, gfp_mask);
135 if (vbr && use_bio)
136 sg_init_table(vbr->sg, vblk->sg_elems);
137
138 return vbr;
101} 139}
102 140
103static bool do_req(struct request_queue *q, struct virtio_blk *vblk, 141static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
@@ -106,13 +144,13 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
106 unsigned long num, out = 0, in = 0; 144 unsigned long num, out = 0, in = 0;
107 struct virtblk_req *vbr; 145 struct virtblk_req *vbr;
108 146
109 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); 147 vbr = virtblk_alloc_req(vblk, GFP_ATOMIC);
110 if (!vbr) 148 if (!vbr)
111 /* When another request finishes we'll try again. */ 149 /* When another request finishes we'll try again. */
112 return false; 150 return false;
113 151
114 vbr->req = req; 152 vbr->req = req;
115 153 vbr->bio = NULL;
116 if (req->cmd_flags & REQ_FLUSH) { 154 if (req->cmd_flags & REQ_FLUSH) {
117 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; 155 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
118 vbr->out_hdr.sector = 0; 156 vbr->out_hdr.sector = 0;
@@ -172,7 +210,8 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
172 } 210 }
173 } 211 }
174 212
175 if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, GFP_ATOMIC)<0) { 213 if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr,
214 GFP_ATOMIC) < 0) {
176 mempool_free(vbr, vblk->pool); 215 mempool_free(vbr, vblk->pool);
177 return false; 216 return false;
178 } 217 }
@@ -180,7 +219,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
180 return true; 219 return true;
181} 220}
182 221
183static void do_virtblk_request(struct request_queue *q) 222static void virtblk_request(struct request_queue *q)
184{ 223{
185 struct virtio_blk *vblk = q->queuedata; 224 struct virtio_blk *vblk = q->queuedata;
186 struct request *req; 225 struct request *req;
@@ -203,6 +242,82 @@ static void do_virtblk_request(struct request_queue *q)
203 virtqueue_kick(vblk->vq); 242 virtqueue_kick(vblk->vq);
204} 243}
205 244
245static void virtblk_add_buf_wait(struct virtio_blk *vblk,
246 struct virtblk_req *vbr,
247 unsigned long out,
248 unsigned long in)
249{
250 DEFINE_WAIT(wait);
251
252 for (;;) {
253 prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
254 TASK_UNINTERRUPTIBLE);
255
256 spin_lock_irq(vblk->disk->queue->queue_lock);
257 if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
258 GFP_ATOMIC) < 0) {
259 spin_unlock_irq(vblk->disk->queue->queue_lock);
260 io_schedule();
261 } else {
262 virtqueue_kick(vblk->vq);
263 spin_unlock_irq(vblk->disk->queue->queue_lock);
264 break;
265 }
266
267 }
268
269 finish_wait(&vblk->queue_wait, &wait);
270}
271
272static void virtblk_make_request(struct request_queue *q, struct bio *bio)
273{
274 struct virtio_blk *vblk = q->queuedata;
275 unsigned int num, out = 0, in = 0;
276 struct virtblk_req *vbr;
277
278 BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems);
279 BUG_ON(bio->bi_rw & (REQ_FLUSH | REQ_FUA));
280
281 vbr = virtblk_alloc_req(vblk, GFP_NOIO);
282 if (!vbr) {
283 bio_endio(bio, -ENOMEM);
284 return;
285 }
286
287 vbr->bio = bio;
288 vbr->req = NULL;
289 vbr->out_hdr.type = 0;
290 vbr->out_hdr.sector = bio->bi_sector;
291 vbr->out_hdr.ioprio = bio_prio(bio);
292
293 sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
294
295 num = blk_bio_map_sg(q, bio, vbr->sg + out);
296
297 sg_set_buf(&vbr->sg[num + out + in++], &vbr->status,
298 sizeof(vbr->status));
299
300 if (num) {
301 if (bio->bi_rw & REQ_WRITE) {
302 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
303 out += num;
304 } else {
305 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
306 in += num;
307 }
308 }
309
310 spin_lock_irq(vblk->disk->queue->queue_lock);
311 if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
312 GFP_ATOMIC) < 0)) {
313 spin_unlock_irq(vblk->disk->queue->queue_lock);
314 virtblk_add_buf_wait(vblk, vbr, out, in);
315 return;
316 }
317 virtqueue_kick(vblk->vq);
318 spin_unlock_irq(vblk->disk->queue->queue_lock);
319}
320
206/* return id (s/n) string for *disk to *id_str 321/* return id (s/n) string for *disk to *id_str
207 */ 322 */
208static int virtblk_get_id(struct gendisk *disk, char *id_str) 323static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -360,7 +475,7 @@ static int init_vq(struct virtio_blk *vblk)
360 int err = 0; 475 int err = 0;
361 476
362 /* We expect one virtqueue, for output. */ 477 /* We expect one virtqueue, for output. */
363 vblk->vq = virtio_find_single_vq(vblk->vdev, blk_done, "requests"); 478 vblk->vq = virtio_find_single_vq(vblk->vdev, virtblk_done, "requests");
364 if (IS_ERR(vblk->vq)) 479 if (IS_ERR(vblk->vq))
365 err = PTR_ERR(vblk->vq); 480 err = PTR_ERR(vblk->vq);
366 481
@@ -414,7 +529,7 @@ static void virtblk_update_cache_mode(struct virtio_device *vdev)
414 u8 writeback = virtblk_get_cache_mode(vdev); 529 u8 writeback = virtblk_get_cache_mode(vdev);
415 struct virtio_blk *vblk = vdev->priv; 530 struct virtio_blk *vblk = vdev->priv;
416 531
417 if (writeback) 532 if (writeback && !use_bio)
418 blk_queue_flush(vblk->disk->queue, REQ_FLUSH); 533 blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
419 else 534 else
420 blk_queue_flush(vblk->disk->queue, 0); 535 blk_queue_flush(vblk->disk->queue, 0);
@@ -477,6 +592,8 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
477 struct virtio_blk *vblk; 592 struct virtio_blk *vblk;
478 struct request_queue *q; 593 struct request_queue *q;
479 int err, index; 594 int err, index;
595 int pool_size;
596
480 u64 cap; 597 u64 cap;
481 u32 v, blk_size, sg_elems, opt_io_size; 598 u32 v, blk_size, sg_elems, opt_io_size;
482 u16 min_io_size; 599 u16 min_io_size;
@@ -506,10 +623,12 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
506 goto out_free_index; 623 goto out_free_index;
507 } 624 }
508 625
626 init_waitqueue_head(&vblk->queue_wait);
509 vblk->vdev = vdev; 627 vblk->vdev = vdev;
510 vblk->sg_elems = sg_elems; 628 vblk->sg_elems = sg_elems;
511 sg_init_table(vblk->sg, vblk->sg_elems); 629 sg_init_table(vblk->sg, vblk->sg_elems);
512 mutex_init(&vblk->config_lock); 630 mutex_init(&vblk->config_lock);
631
513 INIT_WORK(&vblk->config_work, virtblk_config_changed_work); 632 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
514 vblk->config_enable = true; 633 vblk->config_enable = true;
515 634
@@ -517,7 +636,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
517 if (err) 636 if (err)
518 goto out_free_vblk; 637 goto out_free_vblk;
519 638
520 vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req)); 639 pool_size = sizeof(struct virtblk_req);
640 if (use_bio)
641 pool_size += sizeof(struct scatterlist) * sg_elems;
642 vblk->pool = mempool_create_kmalloc_pool(1, pool_size);
521 if (!vblk->pool) { 643 if (!vblk->pool) {
522 err = -ENOMEM; 644 err = -ENOMEM;
523 goto out_free_vq; 645 goto out_free_vq;
@@ -530,12 +652,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
530 goto out_mempool; 652 goto out_mempool;
531 } 653 }
532 654
533 q = vblk->disk->queue = blk_init_queue(do_virtblk_request, NULL); 655 q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL);
534 if (!q) { 656 if (!q) {
535 err = -ENOMEM; 657 err = -ENOMEM;
536 goto out_put_disk; 658 goto out_put_disk;
537 } 659 }
538 660
661 if (use_bio)
662 blk_queue_make_request(q, virtblk_make_request);
539 q->queuedata = vblk; 663 q->queuedata = vblk;
540 664
541 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); 665 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
@@ -620,7 +744,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
620 if (!err && opt_io_size) 744 if (!err && opt_io_size)
621 blk_queue_io_opt(q, blk_size * opt_io_size); 745 blk_queue_io_opt(q, blk_size * opt_io_size);
622 746
623
624 add_disk(vblk->disk); 747 add_disk(vblk->disk);
625 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); 748 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
626 if (err) 749 if (err)