aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/virtio_blk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/virtio_blk.c')
-rw-r--r--drivers/block/virtio_blk.c88
1 files changed, 49 insertions, 39 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 23b7c48df843..2aafafca2b13 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -2,6 +2,7 @@
2#include <linux/spinlock.h> 2#include <linux/spinlock.h>
3#include <linux/slab.h> 3#include <linux/slab.h>
4#include <linux/blkdev.h> 4#include <linux/blkdev.h>
5#include <linux/smp_lock.h>
5#include <linux/hdreg.h> 6#include <linux/hdreg.h>
6#include <linux/virtio.h> 7#include <linux/virtio.h>
7#include <linux/virtio_blk.h> 8#include <linux/virtio_blk.h>
@@ -65,13 +66,18 @@ static void blk_done(struct virtqueue *vq)
65 break; 66 break;
66 } 67 }
67 68
68 if (blk_pc_request(vbr->req)) { 69 switch (vbr->req->cmd_type) {
70 case REQ_TYPE_BLOCK_PC:
69 vbr->req->resid_len = vbr->in_hdr.residual; 71 vbr->req->resid_len = vbr->in_hdr.residual;
70 vbr->req->sense_len = vbr->in_hdr.sense_len; 72 vbr->req->sense_len = vbr->in_hdr.sense_len;
71 vbr->req->errors = vbr->in_hdr.errors; 73 vbr->req->errors = vbr->in_hdr.errors;
72 } 74 break;
73 if (blk_special_request(vbr->req)) 75 case REQ_TYPE_SPECIAL:
74 vbr->req->errors = (error != 0); 76 vbr->req->errors = (error != 0);
77 break;
78 default:
79 break;
80 }
75 81
76 __blk_end_request_all(vbr->req, error); 82 __blk_end_request_all(vbr->req, error);
77 list_del(&vbr->list); 83 list_del(&vbr->list);
@@ -94,36 +100,35 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
94 return false; 100 return false;
95 101
96 vbr->req = req; 102 vbr->req = req;
97 switch (req->cmd_type) { 103
98 case REQ_TYPE_FS: 104 if (req->cmd_flags & REQ_FLUSH) {
99 vbr->out_hdr.type = 0; 105 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
100 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
101 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
102 break;
103 case REQ_TYPE_BLOCK_PC:
104 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
105 vbr->out_hdr.sector = 0;
106 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
107 break;
108 case REQ_TYPE_SPECIAL:
109 vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
110 vbr->out_hdr.sector = 0; 106 vbr->out_hdr.sector = 0;
111 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 107 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
112 break; 108 } else {
113 case REQ_TYPE_LINUX_BLOCK: 109 switch (req->cmd_type) {
114 if (req->cmd[0] == REQ_LB_OP_FLUSH) { 110 case REQ_TYPE_FS:
115 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; 111 vbr->out_hdr.type = 0;
112 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
113 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
114 break;
115 case REQ_TYPE_BLOCK_PC:
116 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
116 vbr->out_hdr.sector = 0; 117 vbr->out_hdr.sector = 0;
117 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 118 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
118 break; 119 break;
120 case REQ_TYPE_SPECIAL:
121 vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
122 vbr->out_hdr.sector = 0;
123 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
124 break;
125 default:
126 /* We don't put anything else in the queue. */
127 BUG();
119 } 128 }
120 /*FALLTHRU*/
121 default:
122 /* We don't put anything else in the queue. */
123 BUG();
124 } 129 }
125 130
126 if (blk_barrier_rq(vbr->req)) 131 if (vbr->req->cmd_flags & REQ_HARDBARRIER)
127 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; 132 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
128 133
129 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); 134 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
@@ -134,12 +139,12 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
134 * block, and before the normal inhdr we put the sense data and the 139 * block, and before the normal inhdr we put the sense data and the
135 * inhdr with additional status information before the normal inhdr. 140 * inhdr with additional status information before the normal inhdr.
136 */ 141 */
137 if (blk_pc_request(vbr->req)) 142 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
138 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len); 143 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
139 144
140 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); 145 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
141 146
142 if (blk_pc_request(vbr->req)) { 147 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
143 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); 148 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
144 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, 149 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
145 sizeof(vbr->in_hdr)); 150 sizeof(vbr->in_hdr));
@@ -190,12 +195,6 @@ static void do_virtblk_request(struct request_queue *q)
190 virtqueue_kick(vblk->vq); 195 virtqueue_kick(vblk->vq);
191} 196}
192 197
193static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
194{
195 req->cmd_type = REQ_TYPE_LINUX_BLOCK;
196 req->cmd[0] = REQ_LB_OP_FLUSH;
197}
198
199/* return id (s/n) string for *disk to *id_str 198/* return id (s/n) string for *disk to *id_str
200 */ 199 */
201static int virtblk_get_id(struct gendisk *disk, char *id_str) 200static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -219,7 +218,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
219 return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); 218 return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
220} 219}
221 220
222static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, 221static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
223 unsigned cmd, unsigned long data) 222 unsigned cmd, unsigned long data)
224{ 223{
225 struct gendisk *disk = bdev->bd_disk; 224 struct gendisk *disk = bdev->bd_disk;
@@ -235,6 +234,18 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
235 (void __user *)data); 234 (void __user *)data);
236} 235}
237 236
237static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
238 unsigned int cmd, unsigned long param)
239{
240 int ret;
241
242 lock_kernel();
243 ret = virtblk_locked_ioctl(bdev, mode, cmd, param);
244 unlock_kernel();
245
246 return ret;
247}
248
238/* We provide getgeo only to please some old bootloader/partitioning tools */ 249/* We provide getgeo only to please some old bootloader/partitioning tools */
239static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) 250static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
240{ 251{
@@ -261,7 +272,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
261} 272}
262 273
263static const struct block_device_operations virtblk_fops = { 274static const struct block_device_operations virtblk_fops = {
264 .locked_ioctl = virtblk_ioctl, 275 .ioctl = virtblk_ioctl,
265 .owner = THIS_MODULE, 276 .owner = THIS_MODULE,
266 .getgeo = virtblk_getgeo, 277 .getgeo = virtblk_getgeo,
267}; 278};
@@ -383,8 +394,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
383 * flushing a volatile write cache on the host. Use that 394 * flushing a volatile write cache on the host. Use that
384 * to implement write barrier support. 395 * to implement write barrier support.
385 */ 396 */
386 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, 397 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
387 virtblk_prepare_flush);
388 } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) { 398 } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) {
389 /* 399 /*
390 * If the BARRIER feature is supported the host expects us 400 * If the BARRIER feature is supported the host expects us
@@ -393,7 +403,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
393 * never re-orders outstanding I/O. This feature is not 403 * never re-orders outstanding I/O. This feature is not
394 * useful for real life scenarious and deprecated. 404 * useful for real life scenarious and deprecated.
395 */ 405 */
396 blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL); 406 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
397 } else { 407 } else {
398 /* 408 /*
399 * If the FLUSH feature is not supported we must assume that 409 * If the FLUSH feature is not supported we must assume that
@@ -401,7 +411,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
401 * caching. We still need to drain the queue to provider 411 * caching. We still need to drain the queue to provider
402 * proper barrier semantics. 412 * proper barrier semantics.
403 */ 413 */
404 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN, NULL); 414 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN);
405 } 415 }
406 416
407 /* If disk is read-only in the host, the guest should obey */ 417 /* If disk is read-only in the host, the guest should obey */