diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2010-07-03 04:45:38 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-08-07 12:24:14 -0400 |
commit | dd40e456a40ebb87330b7fc694359ce52f1996aa (patch) | |
tree | d5d1bea7f754e54b1cc9c975652bc4dfa714991f /drivers/block/virtio_blk.c | |
parent | 144d6ed551ce430084489b198826c89bac5680dc (diff) |
virtio_blk: stop using q->prepare_flush_fn
use REQ_FLUSH flag instead.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/block/virtio_blk.c')
-rw-r--r-- | drivers/block/virtio_blk.c | 50 |
1 files changed, 21 insertions, 29 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index b5ebcd33bdb6..b277f9e6abac 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -99,33 +99,32 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, | |||
99 | return false; | 99 | return false; |
100 | 100 | ||
101 | vbr->req = req; | 101 | vbr->req = req; |
102 | switch (req->cmd_type) { | 102 | |
103 | case REQ_TYPE_FS: | 103 | if (req->cmd_flags & REQ_FLUSH) { |
104 | vbr->out_hdr.type = 0; | 104 | vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; |
105 | vbr->out_hdr.sector = blk_rq_pos(vbr->req); | ||
106 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | ||
107 | break; | ||
108 | case REQ_TYPE_BLOCK_PC: | ||
109 | vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; | ||
110 | vbr->out_hdr.sector = 0; | ||
111 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | ||
112 | break; | ||
113 | case REQ_TYPE_SPECIAL: | ||
114 | vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID; | ||
115 | vbr->out_hdr.sector = 0; | 105 | vbr->out_hdr.sector = 0; |
116 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | 106 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); |
117 | break; | 107 | } else { |
118 | case REQ_TYPE_LINUX_BLOCK: | 108 | switch (req->cmd_type) { |
119 | if (req->cmd[0] == REQ_LB_OP_FLUSH) { | 109 | case REQ_TYPE_FS: |
120 | vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; | 110 | vbr->out_hdr.type = 0; |
111 | vbr->out_hdr.sector = blk_rq_pos(vbr->req); | ||
112 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | ||
113 | break; | ||
114 | case REQ_TYPE_BLOCK_PC: | ||
115 | vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; | ||
121 | vbr->out_hdr.sector = 0; | 116 | vbr->out_hdr.sector = 0; |
122 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | 117 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); |
123 | break; | 118 | break; |
119 | case REQ_TYPE_SPECIAL: | ||
120 | vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID; | ||
121 | vbr->out_hdr.sector = 0; | ||
122 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | ||
123 | break; | ||
124 | default: | ||
125 | /* We don't put anything else in the queue. */ | ||
126 | BUG(); | ||
124 | } | 127 | } |
125 | /*FALLTHRU*/ | ||
126 | default: | ||
127 | /* We don't put anything else in the queue. */ | ||
128 | BUG(); | ||
129 | } | 128 | } |
130 | 129 | ||
131 | if (vbr->req->cmd_flags & REQ_HARDBARRIER) | 130 | if (vbr->req->cmd_flags & REQ_HARDBARRIER) |
@@ -195,12 +194,6 @@ static void do_virtblk_request(struct request_queue *q) | |||
195 | virtqueue_kick(vblk->vq); | 194 | virtqueue_kick(vblk->vq); |
196 | } | 195 | } |
197 | 196 | ||
198 | static void virtblk_prepare_flush(struct request_queue *q, struct request *req) | ||
199 | { | ||
200 | req->cmd_type = REQ_TYPE_LINUX_BLOCK; | ||
201 | req->cmd[0] = REQ_LB_OP_FLUSH; | ||
202 | } | ||
203 | |||
204 | /* return id (s/n) string for *disk to *id_str | 197 | /* return id (s/n) string for *disk to *id_str |
205 | */ | 198 | */ |
206 | static int virtblk_get_id(struct gendisk *disk, char *id_str) | 199 | static int virtblk_get_id(struct gendisk *disk, char *id_str) |
@@ -373,8 +366,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
373 | 366 | ||
374 | /* If barriers are supported, tell block layer that queue is ordered */ | 367 | /* If barriers are supported, tell block layer that queue is ordered */ |
375 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) | 368 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) |
376 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, | 369 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, NULL); |
377 | virtblk_prepare_flush); | ||
378 | else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) | 370 | else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) |
379 | blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL); | 371 | blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL); |
380 | 372 | ||