aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-15 19:33:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-15 19:33:41 -0500
commitf412f2c60b480fa5140a4b4cb321cd48c64e1812 (patch)
treeaafd5a5922b43daca4abdfa9bb723fc1f334108d /drivers/block
parentcd1177f25069cb494680eedd718e7c6d8fd85d10 (diff)
parent1cf7e9c68fe84248174e998922b39e508375e7c1 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull second round of block driver updates from Jens Axboe: "As mentioned in the original pull request, the bcache bits were pulled because of their dependency on the immutable bio vecs. Kent re-did this part and resubmitted it, so here's the 2nd round of (mostly) driver updates for 3.13. It contains: - The bcache work from Kent. - Conversion of virtio-blk to blk-mq. This removes the bio and request path, and substitutes with the blk-mq path instead. The end result almost 200 deleted lines. Patch is acked by Asias and Christoph, who both did a bunch of testing. - A removal of bootmem.h include from Grygorii Strashko, part of a larger series of his killing the dependency on that header file. - Removal of __cpuinit from blk-mq from Paul Gortmaker" * 'for-linus' of git://git.kernel.dk/linux-block: (56 commits) virtio_blk: blk-mq support blk-mq: remove newly added instances of __cpuinit bcache: defensively handle format strings bcache: Bypass torture test bcache: Delete some slower inline asm bcache: Use ida for bcache block dev minor bcache: Fix sysfs splat on shutdown with flash only devs bcache: Better full stripe scanning bcache: Have btree_split() insert into parent directly bcache: Move spinlock into struct time_stats bcache: Kill sequential_merge option bcache: Kill bch_next_recurse_key() bcache: Avoid deadlocking in garbage collection bcache: Incremental gc bcache: Add make_btree_freeing_key() bcache: Add btree_node_write_sync() bcache: PRECEDING_KEY() bcache: bch_(btree|extent)_ptr_invalid() bcache: Don't bother with bucket refcount for btree node allocations bcache: Debug code improvements ...
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/virtio_blk.c322
1 files changed, 65 insertions, 257 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index f3be496ac8fa..588479d58f52 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -11,12 +11,11 @@
11#include <linux/string_helpers.h> 11#include <linux/string_helpers.h>
12#include <scsi/scsi_cmnd.h> 12#include <scsi/scsi_cmnd.h>
13#include <linux/idr.h> 13#include <linux/idr.h>
14#include <linux/blk-mq.h>
15#include <linux/numa.h>
14 16
15#define PART_BITS 4 17#define PART_BITS 4
16 18
17static bool use_bio;
18module_param(use_bio, bool, S_IRUGO);
19
20static int major; 19static int major;
21static DEFINE_IDA(vd_index_ida); 20static DEFINE_IDA(vd_index_ida);
22 21
@@ -26,13 +25,11 @@ struct virtio_blk
26{ 25{
27 struct virtio_device *vdev; 26 struct virtio_device *vdev;
28 struct virtqueue *vq; 27 struct virtqueue *vq;
29 wait_queue_head_t queue_wait; 28 spinlock_t vq_lock;
30 29
31 /* The disk structure for the kernel. */ 30 /* The disk structure for the kernel. */
32 struct gendisk *disk; 31 struct gendisk *disk;
33 32
34 mempool_t *pool;
35
36 /* Process context for config space updates */ 33 /* Process context for config space updates */
37 struct work_struct config_work; 34 struct work_struct config_work;
38 35
@@ -47,31 +44,17 @@ struct virtio_blk
47 44
48 /* Ida index - used to track minor number allocations. */ 45 /* Ida index - used to track minor number allocations. */
49 int index; 46 int index;
50
51 /* Scatterlist: can be too big for stack. */
52 struct scatterlist sg[/*sg_elems*/];
53}; 47};
54 48
55struct virtblk_req 49struct virtblk_req
56{ 50{
57 struct request *req; 51 struct request *req;
58 struct bio *bio;
59 struct virtio_blk_outhdr out_hdr; 52 struct virtio_blk_outhdr out_hdr;
60 struct virtio_scsi_inhdr in_hdr; 53 struct virtio_scsi_inhdr in_hdr;
61 struct work_struct work;
62 struct virtio_blk *vblk;
63 int flags;
64 u8 status; 54 u8 status;
65 struct scatterlist sg[]; 55 struct scatterlist sg[];
66}; 56};
67 57
68enum {
69 VBLK_IS_FLUSH = 1,
70 VBLK_REQ_FLUSH = 2,
71 VBLK_REQ_DATA = 4,
72 VBLK_REQ_FUA = 8,
73};
74
75static inline int virtblk_result(struct virtblk_req *vbr) 58static inline int virtblk_result(struct virtblk_req *vbr)
76{ 59{
77 switch (vbr->status) { 60 switch (vbr->status) {
@@ -84,22 +67,6 @@ static inline int virtblk_result(struct virtblk_req *vbr)
84 } 67 }
85} 68}
86 69
87static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
88 gfp_t gfp_mask)
89{
90 struct virtblk_req *vbr;
91
92 vbr = mempool_alloc(vblk->pool, gfp_mask);
93 if (!vbr)
94 return NULL;
95
96 vbr->vblk = vblk;
97 if (use_bio)
98 sg_init_table(vbr->sg, vblk->sg_elems);
99
100 return vbr;
101}
102
103static int __virtblk_add_req(struct virtqueue *vq, 70static int __virtblk_add_req(struct virtqueue *vq,
104 struct virtblk_req *vbr, 71 struct virtblk_req *vbr,
105 struct scatterlist *data_sg, 72 struct scatterlist *data_sg,
@@ -143,83 +110,8 @@ static int __virtblk_add_req(struct virtqueue *vq,
143 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); 110 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
144} 111}
145 112
146static void virtblk_add_req(struct virtblk_req *vbr, bool have_data)
147{
148 struct virtio_blk *vblk = vbr->vblk;
149 DEFINE_WAIT(wait);
150 int ret;
151
152 spin_lock_irq(vblk->disk->queue->queue_lock);
153 while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg,
154 have_data)) < 0)) {
155 prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
156 TASK_UNINTERRUPTIBLE);
157
158 spin_unlock_irq(vblk->disk->queue->queue_lock);
159 io_schedule();
160 spin_lock_irq(vblk->disk->queue->queue_lock);
161
162 finish_wait(&vblk->queue_wait, &wait);
163 }
164
165 virtqueue_kick(vblk->vq);
166 spin_unlock_irq(vblk->disk->queue->queue_lock);
167}
168
169static void virtblk_bio_send_flush(struct virtblk_req *vbr)
170{
171 vbr->flags |= VBLK_IS_FLUSH;
172 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
173 vbr->out_hdr.sector = 0;
174 vbr->out_hdr.ioprio = 0;
175
176 virtblk_add_req(vbr, false);
177}
178
179static void virtblk_bio_send_data(struct virtblk_req *vbr)
180{
181 struct virtio_blk *vblk = vbr->vblk;
182 struct bio *bio = vbr->bio;
183 bool have_data;
184
185 vbr->flags &= ~VBLK_IS_FLUSH;
186 vbr->out_hdr.type = 0;
187 vbr->out_hdr.sector = bio->bi_sector;
188 vbr->out_hdr.ioprio = bio_prio(bio);
189
190 if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) {
191 have_data = true;
192 if (bio->bi_rw & REQ_WRITE)
193 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
194 else
195 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
196 } else
197 have_data = false;
198
199 virtblk_add_req(vbr, have_data);
200}
201
202static void virtblk_bio_send_data_work(struct work_struct *work)
203{
204 struct virtblk_req *vbr;
205
206 vbr = container_of(work, struct virtblk_req, work);
207
208 virtblk_bio_send_data(vbr);
209}
210
211static void virtblk_bio_send_flush_work(struct work_struct *work)
212{
213 struct virtblk_req *vbr;
214
215 vbr = container_of(work, struct virtblk_req, work);
216
217 virtblk_bio_send_flush(vbr);
218}
219
220static inline void virtblk_request_done(struct virtblk_req *vbr) 113static inline void virtblk_request_done(struct virtblk_req *vbr)
221{ 114{
222 struct virtio_blk *vblk = vbr->vblk;
223 struct request *req = vbr->req; 115 struct request *req = vbr->req;
224 int error = virtblk_result(vbr); 116 int error = virtblk_result(vbr);
225 117
@@ -231,92 +123,45 @@ static inline void virtblk_request_done(struct virtblk_req *vbr)
231 req->errors = (error != 0); 123 req->errors = (error != 0);
232 } 124 }
233 125
234 __blk_end_request_all(req, error); 126 blk_mq_end_io(req, error);
235 mempool_free(vbr, vblk->pool);
236}
237
238static inline void virtblk_bio_flush_done(struct virtblk_req *vbr)
239{
240 struct virtio_blk *vblk = vbr->vblk;
241
242 if (vbr->flags & VBLK_REQ_DATA) {
243 /* Send out the actual write data */
244 INIT_WORK(&vbr->work, virtblk_bio_send_data_work);
245 queue_work(virtblk_wq, &vbr->work);
246 } else {
247 bio_endio(vbr->bio, virtblk_result(vbr));
248 mempool_free(vbr, vblk->pool);
249 }
250}
251
252static inline void virtblk_bio_data_done(struct virtblk_req *vbr)
253{
254 struct virtio_blk *vblk = vbr->vblk;
255
256 if (unlikely(vbr->flags & VBLK_REQ_FUA)) {
257 /* Send out a flush before end the bio */
258 vbr->flags &= ~VBLK_REQ_DATA;
259 INIT_WORK(&vbr->work, virtblk_bio_send_flush_work);
260 queue_work(virtblk_wq, &vbr->work);
261 } else {
262 bio_endio(vbr->bio, virtblk_result(vbr));
263 mempool_free(vbr, vblk->pool);
264 }
265}
266
267static inline void virtblk_bio_done(struct virtblk_req *vbr)
268{
269 if (unlikely(vbr->flags & VBLK_IS_FLUSH))
270 virtblk_bio_flush_done(vbr);
271 else
272 virtblk_bio_data_done(vbr);
273} 127}
274 128
275static void virtblk_done(struct virtqueue *vq) 129static void virtblk_done(struct virtqueue *vq)
276{ 130{
277 struct virtio_blk *vblk = vq->vdev->priv; 131 struct virtio_blk *vblk = vq->vdev->priv;
278 bool bio_done = false, req_done = false; 132 bool req_done = false;
279 struct virtblk_req *vbr; 133 struct virtblk_req *vbr;
280 unsigned long flags; 134 unsigned long flags;
281 unsigned int len; 135 unsigned int len;
282 136
283 spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); 137 spin_lock_irqsave(&vblk->vq_lock, flags);
284 do { 138 do {
285 virtqueue_disable_cb(vq); 139 virtqueue_disable_cb(vq);
286 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { 140 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
287 if (vbr->bio) { 141 virtblk_request_done(vbr);
288 virtblk_bio_done(vbr); 142 req_done = true;
289 bio_done = true;
290 } else {
291 virtblk_request_done(vbr);
292 req_done = true;
293 }
294 } 143 }
295 if (unlikely(virtqueue_is_broken(vq))) 144 if (unlikely(virtqueue_is_broken(vq)))
296 break; 145 break;
297 } while (!virtqueue_enable_cb(vq)); 146 } while (!virtqueue_enable_cb(vq));
147 spin_unlock_irqrestore(&vblk->vq_lock, flags);
148
298 /* In case queue is stopped waiting for more buffers. */ 149 /* In case queue is stopped waiting for more buffers. */
299 if (req_done) 150 if (req_done)
300 blk_start_queue(vblk->disk->queue); 151 blk_mq_start_stopped_hw_queues(vblk->disk->queue);
301 spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
302
303 if (bio_done)
304 wake_up(&vblk->queue_wait);
305} 152}
306 153
307static bool do_req(struct request_queue *q, struct virtio_blk *vblk, 154static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
308 struct request *req)
309{ 155{
156 struct virtio_blk *vblk = hctx->queue->queuedata;
157 struct virtblk_req *vbr = req->special;
158 unsigned long flags;
310 unsigned int num; 159 unsigned int num;
311 struct virtblk_req *vbr; 160 const bool last = (req->cmd_flags & REQ_END) != 0;
312 161
313 vbr = virtblk_alloc_req(vblk, GFP_ATOMIC); 162 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
314 if (!vbr)
315 /* When another request finishes we'll try again. */
316 return false;
317 163
318 vbr->req = req; 164 vbr->req = req;
319 vbr->bio = NULL;
320 if (req->cmd_flags & REQ_FLUSH) { 165 if (req->cmd_flags & REQ_FLUSH) {
321 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; 166 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
322 vbr->out_hdr.sector = 0; 167 vbr->out_hdr.sector = 0;
@@ -344,7 +189,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
344 } 189 }
345 } 190 }
346 191
347 num = blk_rq_map_sg(q, vbr->req, vblk->sg); 192 num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
348 if (num) { 193 if (num) {
349 if (rq_data_dir(vbr->req) == WRITE) 194 if (rq_data_dir(vbr->req) == WRITE)
350 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; 195 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
@@ -352,63 +197,18 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
352 vbr->out_hdr.type |= VIRTIO_BLK_T_IN; 197 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
353 } 198 }
354 199
355 if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) { 200 spin_lock_irqsave(&vblk->vq_lock, flags);
356 mempool_free(vbr, vblk->pool); 201 if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) {
357 return false; 202 spin_unlock_irqrestore(&vblk->vq_lock, flags);
358 } 203 blk_mq_stop_hw_queue(hctx);
359
360 return true;
361}
362
363static void virtblk_request(struct request_queue *q)
364{
365 struct virtio_blk *vblk = q->queuedata;
366 struct request *req;
367 unsigned int issued = 0;
368
369 while ((req = blk_peek_request(q)) != NULL) {
370 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
371
372 /* If this request fails, stop queue and wait for something to
373 finish to restart it. */
374 if (!do_req(q, vblk, req)) {
375 blk_stop_queue(q);
376 break;
377 }
378 blk_start_request(req);
379 issued++;
380 }
381
382 if (issued)
383 virtqueue_kick(vblk->vq); 204 virtqueue_kick(vblk->vq);
384} 205 return BLK_MQ_RQ_QUEUE_BUSY;
385
386static void virtblk_make_request(struct request_queue *q, struct bio *bio)
387{
388 struct virtio_blk *vblk = q->queuedata;
389 struct virtblk_req *vbr;
390
391 BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems);
392
393 vbr = virtblk_alloc_req(vblk, GFP_NOIO);
394 if (!vbr) {
395 bio_endio(bio, -ENOMEM);
396 return;
397 } 206 }
207 spin_unlock_irqrestore(&vblk->vq_lock, flags);
398 208
399 vbr->bio = bio; 209 if (last)
400 vbr->flags = 0; 210 virtqueue_kick(vblk->vq);
401 if (bio->bi_rw & REQ_FLUSH) 211 return BLK_MQ_RQ_QUEUE_OK;
402 vbr->flags |= VBLK_REQ_FLUSH;
403 if (bio->bi_rw & REQ_FUA)
404 vbr->flags |= VBLK_REQ_FUA;
405 if (bio->bi_size)
406 vbr->flags |= VBLK_REQ_DATA;
407
408 if (unlikely(vbr->flags & VBLK_REQ_FLUSH))
409 virtblk_bio_send_flush(vbr);
410 else
411 virtblk_bio_send_data(vbr);
412} 212}
413 213
414/* return id (s/n) string for *disk to *id_str 214/* return id (s/n) string for *disk to *id_str
@@ -673,12 +473,35 @@ static const struct device_attribute dev_attr_cache_type_rw =
673 __ATTR(cache_type, S_IRUGO|S_IWUSR, 473 __ATTR(cache_type, S_IRUGO|S_IWUSR,
674 virtblk_cache_type_show, virtblk_cache_type_store); 474 virtblk_cache_type_show, virtblk_cache_type_store);
675 475
476static struct blk_mq_ops virtio_mq_ops = {
477 .queue_rq = virtio_queue_rq,
478 .map_queue = blk_mq_map_queue,
479 .alloc_hctx = blk_mq_alloc_single_hw_queue,
480 .free_hctx = blk_mq_free_single_hw_queue,
481};
482
483static struct blk_mq_reg virtio_mq_reg = {
484 .ops = &virtio_mq_ops,
485 .nr_hw_queues = 1,
486 .queue_depth = 64,
487 .numa_node = NUMA_NO_NODE,
488 .flags = BLK_MQ_F_SHOULD_MERGE,
489};
490
491static void virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
492 struct request *rq, unsigned int nr)
493{
494 struct virtio_blk *vblk = data;
495 struct virtblk_req *vbr = rq->special;
496
497 sg_init_table(vbr->sg, vblk->sg_elems);
498}
499
676static int virtblk_probe(struct virtio_device *vdev) 500static int virtblk_probe(struct virtio_device *vdev)
677{ 501{
678 struct virtio_blk *vblk; 502 struct virtio_blk *vblk;
679 struct request_queue *q; 503 struct request_queue *q;
680 int err, index; 504 int err, index;
681 int pool_size;
682 505
683 u64 cap; 506 u64 cap;
684 u32 v, blk_size, sg_elems, opt_io_size; 507 u32 v, blk_size, sg_elems, opt_io_size;
@@ -702,17 +525,14 @@ static int virtblk_probe(struct virtio_device *vdev)
702 525
703 /* We need an extra sg elements at head and tail. */ 526 /* We need an extra sg elements at head and tail. */
704 sg_elems += 2; 527 sg_elems += 2;
705 vdev->priv = vblk = kmalloc(sizeof(*vblk) + 528 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
706 sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
707 if (!vblk) { 529 if (!vblk) {
708 err = -ENOMEM; 530 err = -ENOMEM;
709 goto out_free_index; 531 goto out_free_index;
710 } 532 }
711 533
712 init_waitqueue_head(&vblk->queue_wait);
713 vblk->vdev = vdev; 534 vblk->vdev = vdev;
714 vblk->sg_elems = sg_elems; 535 vblk->sg_elems = sg_elems;
715 sg_init_table(vblk->sg, vblk->sg_elems);
716 mutex_init(&vblk->config_lock); 536 mutex_init(&vblk->config_lock);
717 537
718 INIT_WORK(&vblk->config_work, virtblk_config_changed_work); 538 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
@@ -721,31 +541,27 @@ static int virtblk_probe(struct virtio_device *vdev)
721 err = init_vq(vblk); 541 err = init_vq(vblk);
722 if (err) 542 if (err)
723 goto out_free_vblk; 543 goto out_free_vblk;
724 544 spin_lock_init(&vblk->vq_lock);
725 pool_size = sizeof(struct virtblk_req);
726 if (use_bio)
727 pool_size += sizeof(struct scatterlist) * sg_elems;
728 vblk->pool = mempool_create_kmalloc_pool(1, pool_size);
729 if (!vblk->pool) {
730 err = -ENOMEM;
731 goto out_free_vq;
732 }
733 545
734 /* FIXME: How many partitions? How long is a piece of string? */ 546 /* FIXME: How many partitions? How long is a piece of string? */
735 vblk->disk = alloc_disk(1 << PART_BITS); 547 vblk->disk = alloc_disk(1 << PART_BITS);
736 if (!vblk->disk) { 548 if (!vblk->disk) {
737 err = -ENOMEM; 549 err = -ENOMEM;
738 goto out_mempool; 550 goto out_free_vq;
739 } 551 }
740 552
741 q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL); 553 virtio_mq_reg.cmd_size =
554 sizeof(struct virtblk_req) +
555 sizeof(struct scatterlist) * sg_elems;
556
557 q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
742 if (!q) { 558 if (!q) {
743 err = -ENOMEM; 559 err = -ENOMEM;
744 goto out_put_disk; 560 goto out_put_disk;
745 } 561 }
746 562
747 if (use_bio) 563 blk_mq_init_commands(q, virtblk_init_vbr, vblk);
748 blk_queue_make_request(q, virtblk_make_request); 564
749 q->queuedata = vblk; 565 q->queuedata = vblk;
750 566
751 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); 567 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
@@ -848,8 +664,6 @@ out_del_disk:
848 blk_cleanup_queue(vblk->disk->queue); 664 blk_cleanup_queue(vblk->disk->queue);
849out_put_disk: 665out_put_disk:
850 put_disk(vblk->disk); 666 put_disk(vblk->disk);
851out_mempool:
852 mempool_destroy(vblk->pool);
853out_free_vq: 667out_free_vq:
854 vdev->config->del_vqs(vdev); 668 vdev->config->del_vqs(vdev);
855out_free_vblk: 669out_free_vblk:
@@ -881,7 +695,6 @@ static void virtblk_remove(struct virtio_device *vdev)
881 695
882 refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); 696 refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
883 put_disk(vblk->disk); 697 put_disk(vblk->disk);
884 mempool_destroy(vblk->pool);
885 vdev->config->del_vqs(vdev); 698 vdev->config->del_vqs(vdev);
886 kfree(vblk); 699 kfree(vblk);
887 700
@@ -905,10 +718,7 @@ static int virtblk_freeze(struct virtio_device *vdev)
905 718
906 flush_work(&vblk->config_work); 719 flush_work(&vblk->config_work);
907 720
908 spin_lock_irq(vblk->disk->queue->queue_lock); 721 blk_mq_stop_hw_queues(vblk->disk->queue);
909 blk_stop_queue(vblk->disk->queue);
910 spin_unlock_irq(vblk->disk->queue->queue_lock);
911 blk_sync_queue(vblk->disk->queue);
912 722
913 vdev->config->del_vqs(vdev); 723 vdev->config->del_vqs(vdev);
914 return 0; 724 return 0;
@@ -921,11 +731,9 @@ static int virtblk_restore(struct virtio_device *vdev)
921 731
922 vblk->config_enable = true; 732 vblk->config_enable = true;
923 ret = init_vq(vdev->priv); 733 ret = init_vq(vdev->priv);
924 if (!ret) { 734 if (!ret)
925 spin_lock_irq(vblk->disk->queue->queue_lock); 735 blk_mq_start_stopped_hw_queues(vblk->disk->queue);
926 blk_start_queue(vblk->disk->queue); 736
927 spin_unlock_irq(vblk->disk->queue->queue_lock);
928 }
929 return ret; 737 return ret;
930} 738}
931#endif 739#endif