diff options
Diffstat (limited to 'drivers/block/virtio_blk.c')
-rw-r--r-- | drivers/block/virtio_blk.c | 404 |
1 files changed, 102 insertions, 302 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 5cdf88b7ad9e..6a680d4de7f1 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -11,12 +11,11 @@ | |||
11 | #include <linux/string_helpers.h> | 11 | #include <linux/string_helpers.h> |
12 | #include <scsi/scsi_cmnd.h> | 12 | #include <scsi/scsi_cmnd.h> |
13 | #include <linux/idr.h> | 13 | #include <linux/idr.h> |
14 | #include <linux/blk-mq.h> | ||
15 | #include <linux/numa.h> | ||
14 | 16 | ||
15 | #define PART_BITS 4 | 17 | #define PART_BITS 4 |
16 | 18 | ||
17 | static bool use_bio; | ||
18 | module_param(use_bio, bool, S_IRUGO); | ||
19 | |||
20 | static int major; | 19 | static int major; |
21 | static DEFINE_IDA(vd_index_ida); | 20 | static DEFINE_IDA(vd_index_ida); |
22 | 21 | ||
@@ -26,13 +25,11 @@ struct virtio_blk | |||
26 | { | 25 | { |
27 | struct virtio_device *vdev; | 26 | struct virtio_device *vdev; |
28 | struct virtqueue *vq; | 27 | struct virtqueue *vq; |
29 | wait_queue_head_t queue_wait; | 28 | spinlock_t vq_lock; |
30 | 29 | ||
31 | /* The disk structure for the kernel. */ | 30 | /* The disk structure for the kernel. */ |
32 | struct gendisk *disk; | 31 | struct gendisk *disk; |
33 | 32 | ||
34 | mempool_t *pool; | ||
35 | |||
36 | /* Process context for config space updates */ | 33 | /* Process context for config space updates */ |
37 | struct work_struct config_work; | 34 | struct work_struct config_work; |
38 | 35 | ||
@@ -47,31 +44,17 @@ struct virtio_blk | |||
47 | 44 | ||
48 | /* Ida index - used to track minor number allocations. */ | 45 | /* Ida index - used to track minor number allocations. */ |
49 | int index; | 46 | int index; |
50 | |||
51 | /* Scatterlist: can be too big for stack. */ | ||
52 | struct scatterlist sg[/*sg_elems*/]; | ||
53 | }; | 47 | }; |
54 | 48 | ||
55 | struct virtblk_req | 49 | struct virtblk_req |
56 | { | 50 | { |
57 | struct request *req; | 51 | struct request *req; |
58 | struct bio *bio; | ||
59 | struct virtio_blk_outhdr out_hdr; | 52 | struct virtio_blk_outhdr out_hdr; |
60 | struct virtio_scsi_inhdr in_hdr; | 53 | struct virtio_scsi_inhdr in_hdr; |
61 | struct work_struct work; | ||
62 | struct virtio_blk *vblk; | ||
63 | int flags; | ||
64 | u8 status; | 54 | u8 status; |
65 | struct scatterlist sg[]; | 55 | struct scatterlist sg[]; |
66 | }; | 56 | }; |
67 | 57 | ||
68 | enum { | ||
69 | VBLK_IS_FLUSH = 1, | ||
70 | VBLK_REQ_FLUSH = 2, | ||
71 | VBLK_REQ_DATA = 4, | ||
72 | VBLK_REQ_FUA = 8, | ||
73 | }; | ||
74 | |||
75 | static inline int virtblk_result(struct virtblk_req *vbr) | 58 | static inline int virtblk_result(struct virtblk_req *vbr) |
76 | { | 59 | { |
77 | switch (vbr->status) { | 60 | switch (vbr->status) { |
@@ -84,22 +67,6 @@ static inline int virtblk_result(struct virtblk_req *vbr) | |||
84 | } | 67 | } |
85 | } | 68 | } |
86 | 69 | ||
87 | static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk, | ||
88 | gfp_t gfp_mask) | ||
89 | { | ||
90 | struct virtblk_req *vbr; | ||
91 | |||
92 | vbr = mempool_alloc(vblk->pool, gfp_mask); | ||
93 | if (!vbr) | ||
94 | return NULL; | ||
95 | |||
96 | vbr->vblk = vblk; | ||
97 | if (use_bio) | ||
98 | sg_init_table(vbr->sg, vblk->sg_elems); | ||
99 | |||
100 | return vbr; | ||
101 | } | ||
102 | |||
103 | static int __virtblk_add_req(struct virtqueue *vq, | 70 | static int __virtblk_add_req(struct virtqueue *vq, |
104 | struct virtblk_req *vbr, | 71 | struct virtblk_req *vbr, |
105 | struct scatterlist *data_sg, | 72 | struct scatterlist *data_sg, |
@@ -143,83 +110,8 @@ static int __virtblk_add_req(struct virtqueue *vq, | |||
143 | return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); | 110 | return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); |
144 | } | 111 | } |
145 | 112 | ||
146 | static void virtblk_add_req(struct virtblk_req *vbr, bool have_data) | ||
147 | { | ||
148 | struct virtio_blk *vblk = vbr->vblk; | ||
149 | DEFINE_WAIT(wait); | ||
150 | int ret; | ||
151 | |||
152 | spin_lock_irq(vblk->disk->queue->queue_lock); | ||
153 | while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg, | ||
154 | have_data)) < 0)) { | ||
155 | prepare_to_wait_exclusive(&vblk->queue_wait, &wait, | ||
156 | TASK_UNINTERRUPTIBLE); | ||
157 | |||
158 | spin_unlock_irq(vblk->disk->queue->queue_lock); | ||
159 | io_schedule(); | ||
160 | spin_lock_irq(vblk->disk->queue->queue_lock); | ||
161 | |||
162 | finish_wait(&vblk->queue_wait, &wait); | ||
163 | } | ||
164 | |||
165 | virtqueue_kick(vblk->vq); | ||
166 | spin_unlock_irq(vblk->disk->queue->queue_lock); | ||
167 | } | ||
168 | |||
169 | static void virtblk_bio_send_flush(struct virtblk_req *vbr) | ||
170 | { | ||
171 | vbr->flags |= VBLK_IS_FLUSH; | ||
172 | vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; | ||
173 | vbr->out_hdr.sector = 0; | ||
174 | vbr->out_hdr.ioprio = 0; | ||
175 | |||
176 | virtblk_add_req(vbr, false); | ||
177 | } | ||
178 | |||
179 | static void virtblk_bio_send_data(struct virtblk_req *vbr) | ||
180 | { | ||
181 | struct virtio_blk *vblk = vbr->vblk; | ||
182 | struct bio *bio = vbr->bio; | ||
183 | bool have_data; | ||
184 | |||
185 | vbr->flags &= ~VBLK_IS_FLUSH; | ||
186 | vbr->out_hdr.type = 0; | ||
187 | vbr->out_hdr.sector = bio->bi_sector; | ||
188 | vbr->out_hdr.ioprio = bio_prio(bio); | ||
189 | |||
190 | if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) { | ||
191 | have_data = true; | ||
192 | if (bio->bi_rw & REQ_WRITE) | ||
193 | vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; | ||
194 | else | ||
195 | vbr->out_hdr.type |= VIRTIO_BLK_T_IN; | ||
196 | } else | ||
197 | have_data = false; | ||
198 | |||
199 | virtblk_add_req(vbr, have_data); | ||
200 | } | ||
201 | |||
202 | static void virtblk_bio_send_data_work(struct work_struct *work) | ||
203 | { | ||
204 | struct virtblk_req *vbr; | ||
205 | |||
206 | vbr = container_of(work, struct virtblk_req, work); | ||
207 | |||
208 | virtblk_bio_send_data(vbr); | ||
209 | } | ||
210 | |||
211 | static void virtblk_bio_send_flush_work(struct work_struct *work) | ||
212 | { | ||
213 | struct virtblk_req *vbr; | ||
214 | |||
215 | vbr = container_of(work, struct virtblk_req, work); | ||
216 | |||
217 | virtblk_bio_send_flush(vbr); | ||
218 | } | ||
219 | |||
220 | static inline void virtblk_request_done(struct virtblk_req *vbr) | 113 | static inline void virtblk_request_done(struct virtblk_req *vbr) |
221 | { | 114 | { |
222 | struct virtio_blk *vblk = vbr->vblk; | ||
223 | struct request *req = vbr->req; | 115 | struct request *req = vbr->req; |
224 | int error = virtblk_result(vbr); | 116 | int error = virtblk_result(vbr); |
225 | 117 | ||
@@ -231,90 +123,45 @@ static inline void virtblk_request_done(struct virtblk_req *vbr) | |||
231 | req->errors = (error != 0); | 123 | req->errors = (error != 0); |
232 | } | 124 | } |
233 | 125 | ||
234 | __blk_end_request_all(req, error); | 126 | blk_mq_end_io(req, error); |
235 | mempool_free(vbr, vblk->pool); | ||
236 | } | ||
237 | |||
238 | static inline void virtblk_bio_flush_done(struct virtblk_req *vbr) | ||
239 | { | ||
240 | struct virtio_blk *vblk = vbr->vblk; | ||
241 | |||
242 | if (vbr->flags & VBLK_REQ_DATA) { | ||
243 | /* Send out the actual write data */ | ||
244 | INIT_WORK(&vbr->work, virtblk_bio_send_data_work); | ||
245 | queue_work(virtblk_wq, &vbr->work); | ||
246 | } else { | ||
247 | bio_endio(vbr->bio, virtblk_result(vbr)); | ||
248 | mempool_free(vbr, vblk->pool); | ||
249 | } | ||
250 | } | ||
251 | |||
252 | static inline void virtblk_bio_data_done(struct virtblk_req *vbr) | ||
253 | { | ||
254 | struct virtio_blk *vblk = vbr->vblk; | ||
255 | |||
256 | if (unlikely(vbr->flags & VBLK_REQ_FUA)) { | ||
257 | /* Send out a flush before end the bio */ | ||
258 | vbr->flags &= ~VBLK_REQ_DATA; | ||
259 | INIT_WORK(&vbr->work, virtblk_bio_send_flush_work); | ||
260 | queue_work(virtblk_wq, &vbr->work); | ||
261 | } else { | ||
262 | bio_endio(vbr->bio, virtblk_result(vbr)); | ||
263 | mempool_free(vbr, vblk->pool); | ||
264 | } | ||
265 | } | ||
266 | |||
267 | static inline void virtblk_bio_done(struct virtblk_req *vbr) | ||
268 | { | ||
269 | if (unlikely(vbr->flags & VBLK_IS_FLUSH)) | ||
270 | virtblk_bio_flush_done(vbr); | ||
271 | else | ||
272 | virtblk_bio_data_done(vbr); | ||
273 | } | 127 | } |
274 | 128 | ||
275 | static void virtblk_done(struct virtqueue *vq) | 129 | static void virtblk_done(struct virtqueue *vq) |
276 | { | 130 | { |
277 | struct virtio_blk *vblk = vq->vdev->priv; | 131 | struct virtio_blk *vblk = vq->vdev->priv; |
278 | bool bio_done = false, req_done = false; | 132 | bool req_done = false; |
279 | struct virtblk_req *vbr; | 133 | struct virtblk_req *vbr; |
280 | unsigned long flags; | 134 | unsigned long flags; |
281 | unsigned int len; | 135 | unsigned int len; |
282 | 136 | ||
283 | spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); | 137 | spin_lock_irqsave(&vblk->vq_lock, flags); |
284 | do { | 138 | do { |
285 | virtqueue_disable_cb(vq); | 139 | virtqueue_disable_cb(vq); |
286 | while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { | 140 | while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { |
287 | if (vbr->bio) { | 141 | virtblk_request_done(vbr); |
288 | virtblk_bio_done(vbr); | 142 | req_done = true; |
289 | bio_done = true; | ||
290 | } else { | ||
291 | virtblk_request_done(vbr); | ||
292 | req_done = true; | ||
293 | } | ||
294 | } | 143 | } |
144 | if (unlikely(virtqueue_is_broken(vq))) | ||
145 | break; | ||
295 | } while (!virtqueue_enable_cb(vq)); | 146 | } while (!virtqueue_enable_cb(vq)); |
147 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
148 | |||
296 | /* In case queue is stopped waiting for more buffers. */ | 149 | /* In case queue is stopped waiting for more buffers. */ |
297 | if (req_done) | 150 | if (req_done) |
298 | blk_start_queue(vblk->disk->queue); | 151 | blk_mq_start_stopped_hw_queues(vblk->disk->queue); |
299 | spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags); | ||
300 | |||
301 | if (bio_done) | ||
302 | wake_up(&vblk->queue_wait); | ||
303 | } | 152 | } |
304 | 153 | ||
305 | static bool do_req(struct request_queue *q, struct virtio_blk *vblk, | 154 | static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) |
306 | struct request *req) | ||
307 | { | 155 | { |
156 | struct virtio_blk *vblk = hctx->queue->queuedata; | ||
157 | struct virtblk_req *vbr = req->special; | ||
158 | unsigned long flags; | ||
308 | unsigned int num; | 159 | unsigned int num; |
309 | struct virtblk_req *vbr; | 160 | const bool last = (req->cmd_flags & REQ_END) != 0; |
310 | 161 | ||
311 | vbr = virtblk_alloc_req(vblk, GFP_ATOMIC); | 162 | BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); |
312 | if (!vbr) | ||
313 | /* When another request finishes we'll try again. */ | ||
314 | return false; | ||
315 | 163 | ||
316 | vbr->req = req; | 164 | vbr->req = req; |
317 | vbr->bio = NULL; | ||
318 | if (req->cmd_flags & REQ_FLUSH) { | 165 | if (req->cmd_flags & REQ_FLUSH) { |
319 | vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; | 166 | vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; |
320 | vbr->out_hdr.sector = 0; | 167 | vbr->out_hdr.sector = 0; |
@@ -342,7 +189,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, | |||
342 | } | 189 | } |
343 | } | 190 | } |
344 | 191 | ||
345 | num = blk_rq_map_sg(q, vbr->req, vblk->sg); | 192 | num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg); |
346 | if (num) { | 193 | if (num) { |
347 | if (rq_data_dir(vbr->req) == WRITE) | 194 | if (rq_data_dir(vbr->req) == WRITE) |
348 | vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; | 195 | vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; |
@@ -350,63 +197,19 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, | |||
350 | vbr->out_hdr.type |= VIRTIO_BLK_T_IN; | 197 | vbr->out_hdr.type |= VIRTIO_BLK_T_IN; |
351 | } | 198 | } |
352 | 199 | ||
353 | if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) { | 200 | spin_lock_irqsave(&vblk->vq_lock, flags); |
354 | mempool_free(vbr, vblk->pool); | 201 | if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { |
355 | return false; | 202 | virtqueue_kick(vblk->vq); |
356 | } | 203 | spin_unlock_irqrestore(&vblk->vq_lock, flags); |
357 | 204 | blk_mq_stop_hw_queue(hctx); | |
358 | return true; | 205 | return BLK_MQ_RQ_QUEUE_BUSY; |
359 | } | ||
360 | |||
361 | static void virtblk_request(struct request_queue *q) | ||
362 | { | ||
363 | struct virtio_blk *vblk = q->queuedata; | ||
364 | struct request *req; | ||
365 | unsigned int issued = 0; | ||
366 | |||
367 | while ((req = blk_peek_request(q)) != NULL) { | ||
368 | BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); | ||
369 | |||
370 | /* If this request fails, stop queue and wait for something to | ||
371 | finish to restart it. */ | ||
372 | if (!do_req(q, vblk, req)) { | ||
373 | blk_stop_queue(q); | ||
374 | break; | ||
375 | } | ||
376 | blk_start_request(req); | ||
377 | issued++; | ||
378 | } | 206 | } |
379 | 207 | ||
380 | if (issued) | 208 | if (last) |
381 | virtqueue_kick(vblk->vq); | 209 | virtqueue_kick(vblk->vq); |
382 | } | ||
383 | 210 | ||
384 | static void virtblk_make_request(struct request_queue *q, struct bio *bio) | 211 | spin_unlock_irqrestore(&vblk->vq_lock, flags); |
385 | { | 212 | return BLK_MQ_RQ_QUEUE_OK; |
386 | struct virtio_blk *vblk = q->queuedata; | ||
387 | struct virtblk_req *vbr; | ||
388 | |||
389 | BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems); | ||
390 | |||
391 | vbr = virtblk_alloc_req(vblk, GFP_NOIO); | ||
392 | if (!vbr) { | ||
393 | bio_endio(bio, -ENOMEM); | ||
394 | return; | ||
395 | } | ||
396 | |||
397 | vbr->bio = bio; | ||
398 | vbr->flags = 0; | ||
399 | if (bio->bi_rw & REQ_FLUSH) | ||
400 | vbr->flags |= VBLK_REQ_FLUSH; | ||
401 | if (bio->bi_rw & REQ_FUA) | ||
402 | vbr->flags |= VBLK_REQ_FUA; | ||
403 | if (bio->bi_size) | ||
404 | vbr->flags |= VBLK_REQ_DATA; | ||
405 | |||
406 | if (unlikely(vbr->flags & VBLK_REQ_FLUSH)) | ||
407 | virtblk_bio_send_flush(vbr); | ||
408 | else | ||
409 | virtblk_bio_send_data(vbr); | ||
410 | } | 213 | } |
411 | 214 | ||
412 | /* return id (s/n) string for *disk to *id_str | 215 | /* return id (s/n) string for *disk to *id_str |
@@ -456,18 +259,15 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, | |||
456 | static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) | 259 | static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) |
457 | { | 260 | { |
458 | struct virtio_blk *vblk = bd->bd_disk->private_data; | 261 | struct virtio_blk *vblk = bd->bd_disk->private_data; |
459 | struct virtio_blk_geometry vgeo; | ||
460 | int err; | ||
461 | 262 | ||
462 | /* see if the host passed in geometry config */ | 263 | /* see if the host passed in geometry config */ |
463 | err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY, | 264 | if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { |
464 | offsetof(struct virtio_blk_config, geometry), | 265 | virtio_cread(vblk->vdev, struct virtio_blk_config, |
465 | &vgeo); | 266 | geometry.cylinders, &geo->cylinders); |
466 | 267 | virtio_cread(vblk->vdev, struct virtio_blk_config, | |
467 | if (!err) { | 268 | geometry.heads, &geo->heads); |
468 | geo->heads = vgeo.heads; | 269 | virtio_cread(vblk->vdev, struct virtio_blk_config, |
469 | geo->sectors = vgeo.sectors; | 270 | geometry.sectors, &geo->sectors); |
470 | geo->cylinders = vgeo.cylinders; | ||
471 | } else { | 271 | } else { |
472 | /* some standard values, similar to sd */ | 272 | /* some standard values, similar to sd */ |
473 | geo->heads = 1 << 6; | 273 | geo->heads = 1 << 6; |
@@ -529,8 +329,7 @@ static void virtblk_config_changed_work(struct work_struct *work) | |||
529 | goto done; | 329 | goto done; |
530 | 330 | ||
531 | /* Host must always specify the capacity. */ | 331 | /* Host must always specify the capacity. */ |
532 | vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), | 332 | virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity); |
533 | &capacity, sizeof(capacity)); | ||
534 | 333 | ||
535 | /* If capacity is too big, truncate with warning. */ | 334 | /* If capacity is too big, truncate with warning. */ |
536 | if ((sector_t)capacity != capacity) { | 335 | if ((sector_t)capacity != capacity) { |
@@ -608,9 +407,9 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev) | |||
608 | u8 writeback; | 407 | u8 writeback; |
609 | int err; | 408 | int err; |
610 | 409 | ||
611 | err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE, | 410 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE, |
612 | offsetof(struct virtio_blk_config, wce), | 411 | struct virtio_blk_config, wce, |
613 | &writeback); | 412 | &writeback); |
614 | if (err) | 413 | if (err) |
615 | writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE); | 414 | writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE); |
616 | 415 | ||
@@ -642,7 +441,6 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, | |||
642 | struct virtio_blk *vblk = disk->private_data; | 441 | struct virtio_blk *vblk = disk->private_data; |
643 | struct virtio_device *vdev = vblk->vdev; | 442 | struct virtio_device *vdev = vblk->vdev; |
644 | int i; | 443 | int i; |
645 | u8 writeback; | ||
646 | 444 | ||
647 | BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); | 445 | BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); |
648 | for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) | 446 | for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) |
@@ -652,11 +450,7 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, | |||
652 | if (i < 0) | 450 | if (i < 0) |
653 | return -EINVAL; | 451 | return -EINVAL; |
654 | 452 | ||
655 | writeback = i; | 453 | virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); |
656 | vdev->config->set(vdev, | ||
657 | offsetof(struct virtio_blk_config, wce), | ||
658 | &writeback, sizeof(writeback)); | ||
659 | |||
660 | virtblk_update_cache_mode(vdev); | 454 | virtblk_update_cache_mode(vdev); |
661 | return count; | 455 | return count; |
662 | } | 456 | } |
@@ -680,12 +474,35 @@ static const struct device_attribute dev_attr_cache_type_rw = | |||
680 | __ATTR(cache_type, S_IRUGO|S_IWUSR, | 474 | __ATTR(cache_type, S_IRUGO|S_IWUSR, |
681 | virtblk_cache_type_show, virtblk_cache_type_store); | 475 | virtblk_cache_type_show, virtblk_cache_type_store); |
682 | 476 | ||
477 | static struct blk_mq_ops virtio_mq_ops = { | ||
478 | .queue_rq = virtio_queue_rq, | ||
479 | .map_queue = blk_mq_map_queue, | ||
480 | .alloc_hctx = blk_mq_alloc_single_hw_queue, | ||
481 | .free_hctx = blk_mq_free_single_hw_queue, | ||
482 | }; | ||
483 | |||
484 | static struct blk_mq_reg virtio_mq_reg = { | ||
485 | .ops = &virtio_mq_ops, | ||
486 | .nr_hw_queues = 1, | ||
487 | .queue_depth = 64, | ||
488 | .numa_node = NUMA_NO_NODE, | ||
489 | .flags = BLK_MQ_F_SHOULD_MERGE, | ||
490 | }; | ||
491 | |||
492 | static void virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx, | ||
493 | struct request *rq, unsigned int nr) | ||
494 | { | ||
495 | struct virtio_blk *vblk = data; | ||
496 | struct virtblk_req *vbr = rq->special; | ||
497 | |||
498 | sg_init_table(vbr->sg, vblk->sg_elems); | ||
499 | } | ||
500 | |||
683 | static int virtblk_probe(struct virtio_device *vdev) | 501 | static int virtblk_probe(struct virtio_device *vdev) |
684 | { | 502 | { |
685 | struct virtio_blk *vblk; | 503 | struct virtio_blk *vblk; |
686 | struct request_queue *q; | 504 | struct request_queue *q; |
687 | int err, index; | 505 | int err, index; |
688 | int pool_size; | ||
689 | 506 | ||
690 | u64 cap; | 507 | u64 cap; |
691 | u32 v, blk_size, sg_elems, opt_io_size; | 508 | u32 v, blk_size, sg_elems, opt_io_size; |
@@ -699,9 +516,9 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
699 | index = err; | 516 | index = err; |
700 | 517 | ||
701 | /* We need to know how many segments before we allocate. */ | 518 | /* We need to know how many segments before we allocate. */ |
702 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, | 519 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX, |
703 | offsetof(struct virtio_blk_config, seg_max), | 520 | struct virtio_blk_config, seg_max, |
704 | &sg_elems); | 521 | &sg_elems); |
705 | 522 | ||
706 | /* We need at least one SG element, whatever they say. */ | 523 | /* We need at least one SG element, whatever they say. */ |
707 | if (err || !sg_elems) | 524 | if (err || !sg_elems) |
@@ -709,17 +526,14 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
709 | 526 | ||
710 | /* We need an extra sg elements at head and tail. */ | 527 | /* We need an extra sg elements at head and tail. */ |
711 | sg_elems += 2; | 528 | sg_elems += 2; |
712 | vdev->priv = vblk = kmalloc(sizeof(*vblk) + | 529 | vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); |
713 | sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL); | ||
714 | if (!vblk) { | 530 | if (!vblk) { |
715 | err = -ENOMEM; | 531 | err = -ENOMEM; |
716 | goto out_free_index; | 532 | goto out_free_index; |
717 | } | 533 | } |
718 | 534 | ||
719 | init_waitqueue_head(&vblk->queue_wait); | ||
720 | vblk->vdev = vdev; | 535 | vblk->vdev = vdev; |
721 | vblk->sg_elems = sg_elems; | 536 | vblk->sg_elems = sg_elems; |
722 | sg_init_table(vblk->sg, vblk->sg_elems); | ||
723 | mutex_init(&vblk->config_lock); | 537 | mutex_init(&vblk->config_lock); |
724 | 538 | ||
725 | INIT_WORK(&vblk->config_work, virtblk_config_changed_work); | 539 | INIT_WORK(&vblk->config_work, virtblk_config_changed_work); |
@@ -728,31 +542,27 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
728 | err = init_vq(vblk); | 542 | err = init_vq(vblk); |
729 | if (err) | 543 | if (err) |
730 | goto out_free_vblk; | 544 | goto out_free_vblk; |
731 | 545 | spin_lock_init(&vblk->vq_lock); | |
732 | pool_size = sizeof(struct virtblk_req); | ||
733 | if (use_bio) | ||
734 | pool_size += sizeof(struct scatterlist) * sg_elems; | ||
735 | vblk->pool = mempool_create_kmalloc_pool(1, pool_size); | ||
736 | if (!vblk->pool) { | ||
737 | err = -ENOMEM; | ||
738 | goto out_free_vq; | ||
739 | } | ||
740 | 546 | ||
741 | /* FIXME: How many partitions? How long is a piece of string? */ | 547 | /* FIXME: How many partitions? How long is a piece of string? */ |
742 | vblk->disk = alloc_disk(1 << PART_BITS); | 548 | vblk->disk = alloc_disk(1 << PART_BITS); |
743 | if (!vblk->disk) { | 549 | if (!vblk->disk) { |
744 | err = -ENOMEM; | 550 | err = -ENOMEM; |
745 | goto out_mempool; | 551 | goto out_free_vq; |
746 | } | 552 | } |
747 | 553 | ||
748 | q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL); | 554 | virtio_mq_reg.cmd_size = |
555 | sizeof(struct virtblk_req) + | ||
556 | sizeof(struct scatterlist) * sg_elems; | ||
557 | |||
558 | q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk); | ||
749 | if (!q) { | 559 | if (!q) { |
750 | err = -ENOMEM; | 560 | err = -ENOMEM; |
751 | goto out_put_disk; | 561 | goto out_put_disk; |
752 | } | 562 | } |
753 | 563 | ||
754 | if (use_bio) | 564 | blk_mq_init_commands(q, virtblk_init_vbr, vblk); |
755 | blk_queue_make_request(q, virtblk_make_request); | 565 | |
756 | q->queuedata = vblk; | 566 | q->queuedata = vblk; |
757 | 567 | ||
758 | virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); | 568 | virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); |
@@ -772,8 +582,7 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
772 | set_disk_ro(vblk->disk, 1); | 582 | set_disk_ro(vblk->disk, 1); |
773 | 583 | ||
774 | /* Host must always specify the capacity. */ | 584 | /* Host must always specify the capacity. */ |
775 | vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), | 585 | virtio_cread(vdev, struct virtio_blk_config, capacity, &cap); |
776 | &cap, sizeof(cap)); | ||
777 | 586 | ||
778 | /* If capacity is too big, truncate with warning. */ | 587 | /* If capacity is too big, truncate with warning. */ |
779 | if ((sector_t)cap != cap) { | 588 | if ((sector_t)cap != cap) { |
@@ -794,46 +603,45 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
794 | 603 | ||
795 | /* Host can optionally specify maximum segment size and number of | 604 | /* Host can optionally specify maximum segment size and number of |
796 | * segments. */ | 605 | * segments. */ |
797 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, | 606 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX, |
798 | offsetof(struct virtio_blk_config, size_max), | 607 | struct virtio_blk_config, size_max, &v); |
799 | &v); | ||
800 | if (!err) | 608 | if (!err) |
801 | blk_queue_max_segment_size(q, v); | 609 | blk_queue_max_segment_size(q, v); |
802 | else | 610 | else |
803 | blk_queue_max_segment_size(q, -1U); | 611 | blk_queue_max_segment_size(q, -1U); |
804 | 612 | ||
805 | /* Host can optionally specify the block size of the device */ | 613 | /* Host can optionally specify the block size of the device */ |
806 | err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, | 614 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, |
807 | offsetof(struct virtio_blk_config, blk_size), | 615 | struct virtio_blk_config, blk_size, |
808 | &blk_size); | 616 | &blk_size); |
809 | if (!err) | 617 | if (!err) |
810 | blk_queue_logical_block_size(q, blk_size); | 618 | blk_queue_logical_block_size(q, blk_size); |
811 | else | 619 | else |
812 | blk_size = queue_logical_block_size(q); | 620 | blk_size = queue_logical_block_size(q); |
813 | 621 | ||
814 | /* Use topology information if available */ | 622 | /* Use topology information if available */ |
815 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | 623 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, |
816 | offsetof(struct virtio_blk_config, physical_block_exp), | 624 | struct virtio_blk_config, physical_block_exp, |
817 | &physical_block_exp); | 625 | &physical_block_exp); |
818 | if (!err && physical_block_exp) | 626 | if (!err && physical_block_exp) |
819 | blk_queue_physical_block_size(q, | 627 | blk_queue_physical_block_size(q, |
820 | blk_size * (1 << physical_block_exp)); | 628 | blk_size * (1 << physical_block_exp)); |
821 | 629 | ||
822 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | 630 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, |
823 | offsetof(struct virtio_blk_config, alignment_offset), | 631 | struct virtio_blk_config, alignment_offset, |
824 | &alignment_offset); | 632 | &alignment_offset); |
825 | if (!err && alignment_offset) | 633 | if (!err && alignment_offset) |
826 | blk_queue_alignment_offset(q, blk_size * alignment_offset); | 634 | blk_queue_alignment_offset(q, blk_size * alignment_offset); |
827 | 635 | ||
828 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | 636 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, |
829 | offsetof(struct virtio_blk_config, min_io_size), | 637 | struct virtio_blk_config, min_io_size, |
830 | &min_io_size); | 638 | &min_io_size); |
831 | if (!err && min_io_size) | 639 | if (!err && min_io_size) |
832 | blk_queue_io_min(q, blk_size * min_io_size); | 640 | blk_queue_io_min(q, blk_size * min_io_size); |
833 | 641 | ||
834 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | 642 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, |
835 | offsetof(struct virtio_blk_config, opt_io_size), | 643 | struct virtio_blk_config, opt_io_size, |
836 | &opt_io_size); | 644 | &opt_io_size); |
837 | if (!err && opt_io_size) | 645 | if (!err && opt_io_size) |
838 | blk_queue_io_opt(q, blk_size * opt_io_size); | 646 | blk_queue_io_opt(q, blk_size * opt_io_size); |
839 | 647 | ||
@@ -857,8 +665,6 @@ out_del_disk: | |||
857 | blk_cleanup_queue(vblk->disk->queue); | 665 | blk_cleanup_queue(vblk->disk->queue); |
858 | out_put_disk: | 666 | out_put_disk: |
859 | put_disk(vblk->disk); | 667 | put_disk(vblk->disk); |
860 | out_mempool: | ||
861 | mempool_destroy(vblk->pool); | ||
862 | out_free_vq: | 668 | out_free_vq: |
863 | vdev->config->del_vqs(vdev); | 669 | vdev->config->del_vqs(vdev); |
864 | out_free_vblk: | 670 | out_free_vblk: |
@@ -890,7 +696,6 @@ static void virtblk_remove(struct virtio_device *vdev) | |||
890 | 696 | ||
891 | refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); | 697 | refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); |
892 | put_disk(vblk->disk); | 698 | put_disk(vblk->disk); |
893 | mempool_destroy(vblk->pool); | ||
894 | vdev->config->del_vqs(vdev); | 699 | vdev->config->del_vqs(vdev); |
895 | kfree(vblk); | 700 | kfree(vblk); |
896 | 701 | ||
@@ -899,7 +704,7 @@ static void virtblk_remove(struct virtio_device *vdev) | |||
899 | ida_simple_remove(&vd_index_ida, index); | 704 | ida_simple_remove(&vd_index_ida, index); |
900 | } | 705 | } |
901 | 706 | ||
902 | #ifdef CONFIG_PM | 707 | #ifdef CONFIG_PM_SLEEP |
903 | static int virtblk_freeze(struct virtio_device *vdev) | 708 | static int virtblk_freeze(struct virtio_device *vdev) |
904 | { | 709 | { |
905 | struct virtio_blk *vblk = vdev->priv; | 710 | struct virtio_blk *vblk = vdev->priv; |
@@ -914,10 +719,7 @@ static int virtblk_freeze(struct virtio_device *vdev) | |||
914 | 719 | ||
915 | flush_work(&vblk->config_work); | 720 | flush_work(&vblk->config_work); |
916 | 721 | ||
917 | spin_lock_irq(vblk->disk->queue->queue_lock); | 722 | blk_mq_stop_hw_queues(vblk->disk->queue); |
918 | blk_stop_queue(vblk->disk->queue); | ||
919 | spin_unlock_irq(vblk->disk->queue->queue_lock); | ||
920 | blk_sync_queue(vblk->disk->queue); | ||
921 | 723 | ||
922 | vdev->config->del_vqs(vdev); | 724 | vdev->config->del_vqs(vdev); |
923 | return 0; | 725 | return 0; |
@@ -930,11 +732,9 @@ static int virtblk_restore(struct virtio_device *vdev) | |||
930 | 732 | ||
931 | vblk->config_enable = true; | 733 | vblk->config_enable = true; |
932 | ret = init_vq(vdev->priv); | 734 | ret = init_vq(vdev->priv); |
933 | if (!ret) { | 735 | if (!ret) |
934 | spin_lock_irq(vblk->disk->queue->queue_lock); | 736 | blk_mq_start_stopped_hw_queues(vblk->disk->queue); |
935 | blk_start_queue(vblk->disk->queue); | 737 | |
936 | spin_unlock_irq(vblk->disk->queue->queue_lock); | ||
937 | } | ||
938 | return ret; | 738 | return ret; |
939 | } | 739 | } |
940 | #endif | 740 | #endif |
@@ -959,7 +759,7 @@ static struct virtio_driver virtio_blk = { | |||
959 | .probe = virtblk_probe, | 759 | .probe = virtblk_probe, |
960 | .remove = virtblk_remove, | 760 | .remove = virtblk_remove, |
961 | .config_changed = virtblk_config_changed, | 761 | .config_changed = virtblk_config_changed, |
962 | #ifdef CONFIG_PM | 762 | #ifdef CONFIG_PM_SLEEP |
963 | .freeze = virtblk_freeze, | 763 | .freeze = virtblk_freeze, |
964 | .restore = virtblk_restore, | 764 | .restore = virtblk_restore, |
965 | #endif | 765 | #endif |