diff options
-rw-r--r-- | drivers/scsi/virtio_scsi.c | 52 |
1 files changed, 0 insertions, 52 deletions
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 1c72db94270e..198af631244c 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
@@ -68,33 +68,6 @@ struct virtio_scsi_vq { | |||
68 | struct virtqueue *vq; | 68 | struct virtqueue *vq; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | /* | ||
72 | * Per-target queue state. | ||
73 | * | ||
74 | * This struct holds the data needed by the queue steering policy. When a | ||
75 | * target is sent multiple requests, we need to drive them to the same queue so | ||
76 | * that FIFO processing order is kept. However, if a target was idle, we can | ||
77 | * choose a queue arbitrarily. In this case the queue is chosen according to | ||
78 | * the current VCPU, so the driver expects the number of request queues to be | ||
79 | * equal to the number of VCPUs. This makes it easy and fast to select the | ||
80 | * queue, and also lets the driver optimize the IRQ affinity for the virtqueues | ||
81 | * (each virtqueue's affinity is set to the CPU that "owns" the queue). | ||
82 | * | ||
83 | * tgt_seq is held to serialize reading and writing req_vq. | ||
84 | * | ||
85 | * Decrements of reqs are never concurrent with writes of req_vq: before the | ||
86 | * decrement reqs will be != 0; after the decrement the virtqueue completion | ||
87 | * routine will not use the req_vq so it can be changed by a new request. | ||
88 | * Thus they can happen outside the tgt_seq, provided of course we make reqs | ||
89 | * an atomic_t. | ||
90 | */ | ||
91 | struct virtio_scsi_target_state { | ||
92 | seqcount_t tgt_seq; | ||
93 | |||
94 | /* Currently active virtqueue for requests sent to this target. */ | ||
95 | struct virtio_scsi_vq *req_vq; | ||
96 | }; | ||
97 | |||
98 | /* Driver instance state */ | 71 | /* Driver instance state */ |
99 | struct virtio_scsi { | 72 | struct virtio_scsi { |
100 | struct virtio_device *vdev; | 73 | struct virtio_device *vdev; |
@@ -693,29 +666,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc) | |||
693 | return virtscsi_tmf(vscsi, cmd); | 666 | return virtscsi_tmf(vscsi, cmd); |
694 | } | 667 | } |
695 | 668 | ||
696 | static int virtscsi_target_alloc(struct scsi_target *starget) | ||
697 | { | ||
698 | struct Scsi_Host *sh = dev_to_shost(starget->dev.parent); | ||
699 | struct virtio_scsi *vscsi = shost_priv(sh); | ||
700 | |||
701 | struct virtio_scsi_target_state *tgt = | ||
702 | kmalloc(sizeof(*tgt), GFP_KERNEL); | ||
703 | if (!tgt) | ||
704 | return -ENOMEM; | ||
705 | |||
706 | seqcount_init(&tgt->tgt_seq); | ||
707 | tgt->req_vq = &vscsi->req_vqs[0]; | ||
708 | |||
709 | starget->hostdata = tgt; | ||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | static void virtscsi_target_destroy(struct scsi_target *starget) | ||
714 | { | ||
715 | struct virtio_scsi_target_state *tgt = starget->hostdata; | ||
716 | kfree(tgt); | ||
717 | } | ||
718 | |||
719 | static int virtscsi_map_queues(struct Scsi_Host *shost) | 669 | static int virtscsi_map_queues(struct Scsi_Host *shost) |
720 | { | 670 | { |
721 | struct virtio_scsi *vscsi = shost_priv(shost); | 671 | struct virtio_scsi *vscsi = shost_priv(shost); |
@@ -748,8 +698,6 @@ static struct scsi_host_template virtscsi_host_template = { | |||
748 | 698 | ||
749 | .dma_boundary = UINT_MAX, | 699 | .dma_boundary = UINT_MAX, |
750 | .use_clustering = ENABLE_CLUSTERING, | 700 | .use_clustering = ENABLE_CLUSTERING, |
751 | .target_alloc = virtscsi_target_alloc, | ||
752 | .target_destroy = virtscsi_target_destroy, | ||
753 | .map_queues = virtscsi_map_queues, | 701 | .map_queues = virtscsi_map_queues, |
754 | .track_queue_depth = 1, | 702 | .track_queue_depth = 1, |
755 | .force_blk_mq = 1, | 703 | .force_blk_mq = 1, |