aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/virtio_scsi.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2013-04-08 09:33:25 -0400
committerRusty Russell <rusty@rustcorp.com.au>2013-04-08 09:36:55 -0400
commit9141a4ca0d9551729573042660e9bce83a01e0af (patch)
treef9082304ab08c29f1abc5cca230a2529ab7c9b2f /drivers/scsi/virtio_scsi.c
parent10f34f64d3a50912ae49c67c08c9162effdf546a (diff)
virtio-scsi: introduce multiqueue support
This patch adds queue steering to virtio-scsi. When a target is sent multiple requests, we always drive them to the same queue so that FIFO processing order is kept. However, if a target was idle, we can choose a queue arbitrarily. In this case the queue is chosen according to the current VCPU, so the driver expects the number of request queues to be equal to the number of VCPUs. This makes it easy and fast to select the queue, and also lets the driver optimize the IRQ affinity for the virtqueues (each virtqueue's affinity is set to the CPU that "owns" the queue). The speedup comes from improving cache locality and giving CPU affinity to the virtqueues, which is why this scheme was selected. Assuming that the thread that is sending requests to the device is I/O-bound, it is likely to be sleeping at the time the ISR is executed, and thus executing the ISR on the same processor that sent the requests is cheap. However, the kernel will not execute the ISR on the "best" processor unless you explicitly set the affinity. This is because in practice you will have many such I/O-bound processes and thus many otherwise idle processors. Then the kernel will execute the ISR on a random processor, rather than the one that is sending requests to the device. The alternative to per-CPU virtqueues is per-target virtqueues. To achieve the same locality, we could dynamically choose the virtqueue's affinity based on the CPU of the last task that sent a request. This is less appealing because we do not set the affinity directly---we only provide a hint to the irqbalanced running in userspace. Dynamically changing the affinity only works if the userspace applies the hint fast enough. Cc: linux-scsi@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com> Reviewed-by: Asias He <asias@redhat.com> Tested-by: Venkatesh Srinivas <venkateshs@google.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/scsi/virtio_scsi.c')
-rw-r--r--drivers/scsi/virtio_scsi.c282
1 files changed, 254 insertions, 28 deletions
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index dc2daec9a10d..8dcdef0783db 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -22,12 +22,14 @@
22#include <linux/virtio_ids.h> 22#include <linux/virtio_ids.h>
23#include <linux/virtio_config.h> 23#include <linux/virtio_config.h>
24#include <linux/virtio_scsi.h> 24#include <linux/virtio_scsi.h>
25#include <linux/cpu.h>
25#include <scsi/scsi_host.h> 26#include <scsi/scsi_host.h>
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
27#include <scsi/scsi_cmnd.h> 28#include <scsi/scsi_cmnd.h>
28 29
29#define VIRTIO_SCSI_MEMPOOL_SZ 64 30#define VIRTIO_SCSI_MEMPOOL_SZ 64
30#define VIRTIO_SCSI_EVENT_LEN 8 31#define VIRTIO_SCSI_EVENT_LEN 8
32#define VIRTIO_SCSI_VQ_BASE 2
31 33
32/* Command queue element */ 34/* Command queue element */
33struct virtio_scsi_cmd { 35struct virtio_scsi_cmd {
@@ -59,22 +61,58 @@ struct virtio_scsi_vq {
59 struct virtqueue *vq; 61 struct virtqueue *vq;
60}; 62};
61 63
62/* Per-target queue state */ 64/*
65 * Per-target queue state.
66 *
67 * This struct holds the data needed by the queue steering policy. When a
68 * target is sent multiple requests, we need to drive them to the same queue so
69 * that FIFO processing order is kept. However, if a target was idle, we can
70 * choose a queue arbitrarily. In this case the queue is chosen according to
71 * the current VCPU, so the driver expects the number of request queues to be
72 * equal to the number of VCPUs. This makes it easy and fast to select the
73 * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
74 * (each virtqueue's affinity is set to the CPU that "owns" the queue).
75 *
76 * An interesting effect of this policy is that only writes to req_vq need to
77 * take the tgt_lock. Read can be done outside the lock because:
78 *
79 * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1.
80 * In that case, no other CPU is reading req_vq: even if they were in
81 * virtscsi_queuecommand_multi, they would be spinning on tgt_lock.
82 *
83 * - reads of req_vq only occur when the target is not idle (reqs != 0).
84 * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq.
85 *
86 * Similarly, decrements of reqs are never concurrent with writes of req_vq.
87 * Thus they can happen outside the tgt_lock, provided of course we make reqs
88 * an atomic_t.
89 */
63struct virtio_scsi_target_state { 90struct virtio_scsi_target_state {
64 /* Never held at the same time as vq_lock. */ 91 /* This spinlock never held at the same time as vq_lock. */
65 spinlock_t tgt_lock; 92 spinlock_t tgt_lock;
93
94 /* Count of outstanding requests. */
95 atomic_t reqs;
96
97 /* Currently active virtqueue for requests sent to this target. */
98 struct virtio_scsi_vq *req_vq;
66}; 99};
67 100
68/* Driver instance state */ 101/* Driver instance state */
69struct virtio_scsi { 102struct virtio_scsi {
70 struct virtio_device *vdev; 103 struct virtio_device *vdev;
71 104
72 struct virtio_scsi_vq ctrl_vq;
73 struct virtio_scsi_vq event_vq;
74 struct virtio_scsi_vq req_vq;
75
76 /* Get some buffers ready for event vq */ 105 /* Get some buffers ready for event vq */
77 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; 106 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
107
108 u32 num_queues;
109
110 /* If the affinity hint is set for virtqueues */
111 bool affinity_hint_set;
112
113 struct virtio_scsi_vq ctrl_vq;
114 struct virtio_scsi_vq event_vq;
115 struct virtio_scsi_vq req_vqs[];
78}; 116};
79 117
80static struct kmem_cache *virtscsi_cmd_cache; 118static struct kmem_cache *virtscsi_cmd_cache;
@@ -109,6 +147,8 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
109 struct virtio_scsi_cmd *cmd = buf; 147 struct virtio_scsi_cmd *cmd = buf;
110 struct scsi_cmnd *sc = cmd->sc; 148 struct scsi_cmnd *sc = cmd->sc;
111 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; 149 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
150 struct virtio_scsi_target_state *tgt =
151 scsi_target(sc->device)->hostdata;
112 152
113 dev_dbg(&sc->device->sdev_gendev, 153 dev_dbg(&sc->device->sdev_gendev,
114 "cmd %p response %u status %#02x sense_len %u\n", 154 "cmd %p response %u status %#02x sense_len %u\n",
@@ -163,6 +203,8 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
163 203
164 mempool_free(cmd, virtscsi_cmd_pool); 204 mempool_free(cmd, virtscsi_cmd_pool);
165 sc->scsi_done(sc); 205 sc->scsi_done(sc);
206
207 atomic_dec(&tgt->reqs);
166} 208}
167 209
168static void virtscsi_vq_done(struct virtio_scsi *vscsi, 210static void virtscsi_vq_done(struct virtio_scsi *vscsi,
@@ -187,8 +229,42 @@ static void virtscsi_req_done(struct virtqueue *vq)
187{ 229{
188 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); 230 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
189 struct virtio_scsi *vscsi = shost_priv(sh); 231 struct virtio_scsi *vscsi = shost_priv(sh);
232 int index = vq->index - VIRTIO_SCSI_VQ_BASE;
233 struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
190 234
191 virtscsi_vq_done(vscsi, &vscsi->req_vq, virtscsi_complete_cmd); 235 /*
236 * Read req_vq before decrementing the reqs field in
237 * virtscsi_complete_cmd.
238 *
239 * With barriers:
240 *
241 * CPU #0 virtscsi_queuecommand_multi (CPU #1)
242 * ------------------------------------------------------------
243 * lock vq_lock
244 * read req_vq
245 * read reqs (reqs = 1)
246 * write reqs (reqs = 0)
247 * increment reqs (reqs = 1)
248 * write req_vq
249 *
250 * Possible reordering without barriers:
251 *
252 * CPU #0 virtscsi_queuecommand_multi (CPU #1)
253 * ------------------------------------------------------------
254 * lock vq_lock
255 * read reqs (reqs = 1)
256 * write reqs (reqs = 0)
257 * increment reqs (reqs = 1)
258 * write req_vq
259 * read (wrong) req_vq
260 *
261 * We do not need a full smp_rmb, because req_vq is required to get
262 * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored
263 * in the virtqueue as the user token.
264 */
265 smp_read_barrier_depends();
266
267 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
192}; 268};
193 269
194static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) 270static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
@@ -251,7 +327,7 @@ static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
251} 327}
252 328
253static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, 329static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
254 struct virtio_scsi_event *event) 330 struct virtio_scsi_event *event)
255{ 331{
256 struct scsi_device *sdev; 332 struct scsi_device *sdev;
257 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); 333 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
@@ -410,9 +486,10 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
410 return err; 486 return err;
411} 487}
412 488
413static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) 489static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
490 struct virtio_scsi_vq *req_vq,
491 struct scsi_cmnd *sc)
414{ 492{
415 struct virtio_scsi *vscsi = shost_priv(sh);
416 struct virtio_scsi_cmd *cmd; 493 struct virtio_scsi_cmd *cmd;
417 int ret; 494 int ret;
418 495
@@ -446,7 +523,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
446 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); 523 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
447 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); 524 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
448 525
449 if (virtscsi_kick_cmd(&vscsi->req_vq, cmd, 526 if (virtscsi_kick_cmd(req_vq, cmd,
450 sizeof cmd->req.cmd, sizeof cmd->resp.cmd, 527 sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
451 GFP_ATOMIC) == 0) 528 GFP_ATOMIC) == 0)
452 ret = 0; 529 ret = 0;
@@ -457,6 +534,55 @@ out:
457 return ret; 534 return ret;
458} 535}
459 536
537static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
538 struct scsi_cmnd *sc)
539{
540 struct virtio_scsi *vscsi = shost_priv(sh);
541 struct virtio_scsi_target_state *tgt =
542 scsi_target(sc->device)->hostdata;
543
544 atomic_inc(&tgt->reqs);
545 return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
546}
547
548static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
549 struct virtio_scsi_target_state *tgt)
550{
551 struct virtio_scsi_vq *vq;
552 unsigned long flags;
553 u32 queue_num;
554
555 spin_lock_irqsave(&tgt->tgt_lock, flags);
556
557 /*
558 * The memory barrier after atomic_inc_return matches
559 * the smp_read_barrier_depends() in virtscsi_req_done.
560 */
561 if (atomic_inc_return(&tgt->reqs) > 1)
562 vq = ACCESS_ONCE(tgt->req_vq);
563 else {
564 queue_num = smp_processor_id();
565 while (unlikely(queue_num >= vscsi->num_queues))
566 queue_num -= vscsi->num_queues;
567
568 tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
569 }
570
571 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
572 return vq;
573}
574
575static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
576 struct scsi_cmnd *sc)
577{
578 struct virtio_scsi *vscsi = shost_priv(sh);
579 struct virtio_scsi_target_state *tgt =
580 scsi_target(sc->device)->hostdata;
581 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt);
582
583 return virtscsi_queuecommand(vscsi, req_vq, sc);
584}
585
460static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) 586static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
461{ 587{
462 DECLARE_COMPLETION_ONSTACK(comp); 588 DECLARE_COMPLETION_ONSTACK(comp);
@@ -533,6 +659,8 @@ static int virtscsi_target_alloc(struct scsi_target *starget)
533 return -ENOMEM; 659 return -ENOMEM;
534 660
535 spin_lock_init(&tgt->tgt_lock); 661 spin_lock_init(&tgt->tgt_lock);
662 atomic_set(&tgt->reqs, 0);
663 tgt->req_vq = NULL;
536 664
537 starget->hostdata = tgt; 665 starget->hostdata = tgt;
538 return 0; 666 return 0;
@@ -544,12 +672,28 @@ static void virtscsi_target_destroy(struct scsi_target *starget)
544 kfree(tgt); 672 kfree(tgt);
545} 673}
546 674
547static struct scsi_host_template virtscsi_host_template = { 675static struct scsi_host_template virtscsi_host_template_single = {
548 .module = THIS_MODULE, 676 .module = THIS_MODULE,
549 .name = "Virtio SCSI HBA", 677 .name = "Virtio SCSI HBA",
550 .proc_name = "virtio_scsi", 678 .proc_name = "virtio_scsi",
551 .queuecommand = virtscsi_queuecommand,
552 .this_id = -1, 679 .this_id = -1,
680 .queuecommand = virtscsi_queuecommand_single,
681 .eh_abort_handler = virtscsi_abort,
682 .eh_device_reset_handler = virtscsi_device_reset,
683
684 .can_queue = 1024,
685 .dma_boundary = UINT_MAX,
686 .use_clustering = ENABLE_CLUSTERING,
687 .target_alloc = virtscsi_target_alloc,
688 .target_destroy = virtscsi_target_destroy,
689};
690
691static struct scsi_host_template virtscsi_host_template_multi = {
692 .module = THIS_MODULE,
693 .name = "Virtio SCSI HBA",
694 .proc_name = "virtio_scsi",
695 .this_id = -1,
696 .queuecommand = virtscsi_queuecommand_multi,
553 .eh_abort_handler = virtscsi_abort, 697 .eh_abort_handler = virtscsi_abort,
554 .eh_device_reset_handler = virtscsi_device_reset, 698 .eh_device_reset_handler = virtscsi_device_reset,
555 699
@@ -577,6 +721,47 @@ static struct scsi_host_template virtscsi_host_template = {
577 &__val, sizeof(__val)); \ 721 &__val, sizeof(__val)); \
578 }) 722 })
579 723
724static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
725{
726 int i;
727 int cpu;
728
729 /* In multiqueue mode, when the number of cpu is equal
730 * to the number of request queues, we let the qeueues
731 * to be private to one cpu by setting the affinity hint
732 * to eliminate the contention.
733 */
734 if ((vscsi->num_queues == 1 ||
735 vscsi->num_queues != num_online_cpus()) && affinity) {
736 if (vscsi->affinity_hint_set)
737 affinity = false;
738 else
739 return;
740 }
741
742 if (affinity) {
743 i = 0;
744 for_each_online_cpu(cpu) {
745 virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
746 i++;
747 }
748
749 vscsi->affinity_hint_set = true;
750 } else {
751 for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++)
752 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
753
754 vscsi->affinity_hint_set = false;
755 }
756}
757
758static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
759{
760 get_online_cpus();
761 __virtscsi_set_affinity(vscsi, affinity);
762 put_online_cpus();
763}
764
580static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, 765static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
581 struct virtqueue *vq) 766 struct virtqueue *vq)
582{ 767{
@@ -593,6 +778,11 @@ static void virtscsi_scan(struct virtio_device *vdev)
593 778
594static void virtscsi_remove_vqs(struct virtio_device *vdev) 779static void virtscsi_remove_vqs(struct virtio_device *vdev)
595{ 780{
781 struct Scsi_Host *sh = virtio_scsi_host(vdev);
782 struct virtio_scsi *vscsi = shost_priv(sh);
783
784 virtscsi_set_affinity(vscsi, false);
785
596 /* Stop all the virtqueues. */ 786 /* Stop all the virtqueues. */
597 vdev->config->reset(vdev); 787 vdev->config->reset(vdev);
598 788
@@ -603,27 +793,43 @@ static int virtscsi_init(struct virtio_device *vdev,
603 struct virtio_scsi *vscsi) 793 struct virtio_scsi *vscsi)
604{ 794{
605 int err; 795 int err;
606 struct virtqueue *vqs[3]; 796 u32 i;
797 u32 num_vqs;
798 vq_callback_t **callbacks;
799 const char **names;
800 struct virtqueue **vqs;
801
802 num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
803 vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
804 callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
805 names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
806
807 if (!callbacks || !vqs || !names) {
808 err = -ENOMEM;
809 goto out;
810 }
607 811
608 vq_callback_t *callbacks[] = { 812 callbacks[0] = virtscsi_ctrl_done;
609 virtscsi_ctrl_done, 813 callbacks[1] = virtscsi_event_done;
610 virtscsi_event_done, 814 names[0] = "control";
611 virtscsi_req_done 815 names[1] = "event";
612 }; 816 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
613 const char *names[] = { 817 callbacks[i] = virtscsi_req_done;
614 "control", 818 names[i] = "request";
615 "event", 819 }
616 "request"
617 };
618 820
619 /* Discover virtqueues and write information to configuration. */ 821 /* Discover virtqueues and write information to configuration. */
620 err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names); 822 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
621 if (err) 823 if (err)
622 return err; 824 goto out;
623 825
624 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); 826 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
625 virtscsi_init_vq(&vscsi->event_vq, vqs[1]); 827 virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
626 virtscsi_init_vq(&vscsi->req_vq, vqs[2]); 828 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
829 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
830 vqs[i]);
831
832 virtscsi_set_affinity(vscsi, true);
627 833
628 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); 834 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
629 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); 835 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
@@ -631,6 +837,14 @@ static int virtscsi_init(struct virtio_device *vdev,
631 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) 837 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
632 virtscsi_kick_event_all(vscsi); 838 virtscsi_kick_event_all(vscsi);
633 839
840 err = 0;
841
842out:
843 kfree(names);
844 kfree(callbacks);
845 kfree(vqs);
846 if (err)
847 virtscsi_remove_vqs(vdev);
634 return err; 848 return err;
635} 849}
636 850
@@ -641,10 +855,21 @@ static int virtscsi_probe(struct virtio_device *vdev)
641 int err; 855 int err;
642 u32 sg_elems, num_targets; 856 u32 sg_elems, num_targets;
643 u32 cmd_per_lun; 857 u32 cmd_per_lun;
858 u32 num_queues;
859 struct scsi_host_template *hostt;
860
861 /* We need to know how many queues before we allocate. */
862 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
644 863
645 num_targets = virtscsi_config_get(vdev, max_target) + 1; 864 num_targets = virtscsi_config_get(vdev, max_target) + 1;
646 865
647 shost = scsi_host_alloc(&virtscsi_host_template, sizeof(*vscsi)); 866 if (num_queues == 1)
867 hostt = &virtscsi_host_template_single;
868 else
869 hostt = &virtscsi_host_template_multi;
870
871 shost = scsi_host_alloc(hostt,
872 sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
648 if (!shost) 873 if (!shost)
649 return -ENOMEM; 874 return -ENOMEM;
650 875
@@ -652,6 +877,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
652 shost->sg_tablesize = sg_elems; 877 shost->sg_tablesize = sg_elems;
653 vscsi = shost_priv(shost); 878 vscsi = shost_priv(shost);
654 vscsi->vdev = vdev; 879 vscsi->vdev = vdev;
880 vscsi->num_queues = num_queues;
655 vdev->priv = shost; 881 vdev->priv = shost;
656 882
657 err = virtscsi_init(vdev, vscsi); 883 err = virtscsi_init(vdev, vscsi);