diff options
Diffstat (limited to 'drivers/scsi/virtio_scsi.c')
-rw-r--r-- | drivers/scsi/virtio_scsi.c | 129 |
1 files changed, 16 insertions, 113 deletions
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 7c28e8d4955a..45d04631888a 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
@@ -91,9 +91,6 @@ struct virtio_scsi_vq { | |||
91 | struct virtio_scsi_target_state { | 91 | struct virtio_scsi_target_state { |
92 | seqcount_t tgt_seq; | 92 | seqcount_t tgt_seq; |
93 | 93 | ||
94 | /* Count of outstanding requests. */ | ||
95 | atomic_t reqs; | ||
96 | |||
97 | /* Currently active virtqueue for requests sent to this target. */ | 94 | /* Currently active virtqueue for requests sent to this target. */ |
98 | struct virtio_scsi_vq *req_vq; | 95 | struct virtio_scsi_vq *req_vq; |
99 | }; | 96 | }; |
@@ -152,8 +149,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) | |||
152 | struct virtio_scsi_cmd *cmd = buf; | 149 | struct virtio_scsi_cmd *cmd = buf; |
153 | struct scsi_cmnd *sc = cmd->sc; | 150 | struct scsi_cmnd *sc = cmd->sc; |
154 | struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; | 151 | struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; |
155 | struct virtio_scsi_target_state *tgt = | ||
156 | scsi_target(sc->device)->hostdata; | ||
157 | 152 | ||
158 | dev_dbg(&sc->device->sdev_gendev, | 153 | dev_dbg(&sc->device->sdev_gendev, |
159 | "cmd %p response %u status %#02x sense_len %u\n", | 154 | "cmd %p response %u status %#02x sense_len %u\n", |
@@ -210,8 +205,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) | |||
210 | } | 205 | } |
211 | 206 | ||
212 | sc->scsi_done(sc); | 207 | sc->scsi_done(sc); |
213 | |||
214 | atomic_dec(&tgt->reqs); | ||
215 | } | 208 | } |
216 | 209 | ||
217 | static void virtscsi_vq_done(struct virtio_scsi *vscsi, | 210 | static void virtscsi_vq_done(struct virtio_scsi *vscsi, |
@@ -529,11 +522,20 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, | |||
529 | } | 522 | } |
530 | #endif | 523 | #endif |
531 | 524 | ||
532 | static int virtscsi_queuecommand(struct virtio_scsi *vscsi, | 525 | static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, |
533 | struct virtio_scsi_vq *req_vq, | 526 | struct scsi_cmnd *sc) |
527 | { | ||
528 | u32 tag = blk_mq_unique_tag(sc->request); | ||
529 | u16 hwq = blk_mq_unique_tag_to_hwq(tag); | ||
530 | |||
531 | return &vscsi->req_vqs[hwq]; | ||
532 | } | ||
533 | |||
534 | static int virtscsi_queuecommand(struct Scsi_Host *shost, | ||
534 | struct scsi_cmnd *sc) | 535 | struct scsi_cmnd *sc) |
535 | { | 536 | { |
536 | struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); | 537 | struct virtio_scsi *vscsi = shost_priv(shost); |
538 | struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); | ||
537 | struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); | 539 | struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); |
538 | unsigned long flags; | 540 | unsigned long flags; |
539 | int req_size; | 541 | int req_size; |
@@ -576,79 +578,6 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, | |||
576 | return 0; | 578 | return 0; |
577 | } | 579 | } |
578 | 580 | ||
579 | static int virtscsi_queuecommand_single(struct Scsi_Host *sh, | ||
580 | struct scsi_cmnd *sc) | ||
581 | { | ||
582 | struct virtio_scsi *vscsi = shost_priv(sh); | ||
583 | struct virtio_scsi_target_state *tgt = | ||
584 | scsi_target(sc->device)->hostdata; | ||
585 | |||
586 | atomic_inc(&tgt->reqs); | ||
587 | return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); | ||
588 | } | ||
589 | |||
590 | static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, | ||
591 | struct scsi_cmnd *sc) | ||
592 | { | ||
593 | u32 tag = blk_mq_unique_tag(sc->request); | ||
594 | u16 hwq = blk_mq_unique_tag_to_hwq(tag); | ||
595 | |||
596 | return &vscsi->req_vqs[hwq]; | ||
597 | } | ||
598 | |||
599 | static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, | ||
600 | struct virtio_scsi_target_state *tgt) | ||
601 | { | ||
602 | struct virtio_scsi_vq *vq; | ||
603 | unsigned long flags; | ||
604 | u32 queue_num; | ||
605 | |||
606 | local_irq_save(flags); | ||
607 | if (atomic_inc_return(&tgt->reqs) > 1) { | ||
608 | unsigned long seq; | ||
609 | |||
610 | do { | ||
611 | seq = read_seqcount_begin(&tgt->tgt_seq); | ||
612 | vq = tgt->req_vq; | ||
613 | } while (read_seqcount_retry(&tgt->tgt_seq, seq)); | ||
614 | } else { | ||
615 | /* no writes can be concurrent because of atomic_t */ | ||
616 | write_seqcount_begin(&tgt->tgt_seq); | ||
617 | |||
618 | /* keep previous req_vq if a reader just arrived */ | ||
619 | if (unlikely(atomic_read(&tgt->reqs) > 1)) { | ||
620 | vq = tgt->req_vq; | ||
621 | goto unlock; | ||
622 | } | ||
623 | |||
624 | queue_num = smp_processor_id(); | ||
625 | while (unlikely(queue_num >= vscsi->num_queues)) | ||
626 | queue_num -= vscsi->num_queues; | ||
627 | tgt->req_vq = vq = &vscsi->req_vqs[queue_num]; | ||
628 | unlock: | ||
629 | write_seqcount_end(&tgt->tgt_seq); | ||
630 | } | ||
631 | local_irq_restore(flags); | ||
632 | |||
633 | return vq; | ||
634 | } | ||
635 | |||
636 | static int virtscsi_queuecommand_multi(struct Scsi_Host *sh, | ||
637 | struct scsi_cmnd *sc) | ||
638 | { | ||
639 | struct virtio_scsi *vscsi = shost_priv(sh); | ||
640 | struct virtio_scsi_target_state *tgt = | ||
641 | scsi_target(sc->device)->hostdata; | ||
642 | struct virtio_scsi_vq *req_vq; | ||
643 | |||
644 | if (shost_use_blk_mq(sh)) | ||
645 | req_vq = virtscsi_pick_vq_mq(vscsi, sc); | ||
646 | else | ||
647 | req_vq = virtscsi_pick_vq(vscsi, tgt); | ||
648 | |||
649 | return virtscsi_queuecommand(vscsi, req_vq, sc); | ||
650 | } | ||
651 | |||
652 | static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) | 581 | static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) |
653 | { | 582 | { |
654 | DECLARE_COMPLETION_ONSTACK(comp); | 583 | DECLARE_COMPLETION_ONSTACK(comp); |
@@ -775,7 +704,6 @@ static int virtscsi_target_alloc(struct scsi_target *starget) | |||
775 | return -ENOMEM; | 704 | return -ENOMEM; |
776 | 705 | ||
777 | seqcount_init(&tgt->tgt_seq); | 706 | seqcount_init(&tgt->tgt_seq); |
778 | atomic_set(&tgt->reqs, 0); | ||
779 | tgt->req_vq = &vscsi->req_vqs[0]; | 707 | tgt->req_vq = &vscsi->req_vqs[0]; |
780 | 708 | ||
781 | starget->hostdata = tgt; | 709 | starget->hostdata = tgt; |
@@ -805,33 +733,13 @@ static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd) | |||
805 | return BLK_EH_RESET_TIMER; | 733 | return BLK_EH_RESET_TIMER; |
806 | } | 734 | } |
807 | 735 | ||
808 | static struct scsi_host_template virtscsi_host_template_single = { | 736 | static struct scsi_host_template virtscsi_host_template = { |
809 | .module = THIS_MODULE, | ||
810 | .name = "Virtio SCSI HBA", | ||
811 | .proc_name = "virtio_scsi", | ||
812 | .this_id = -1, | ||
813 | .cmd_size = sizeof(struct virtio_scsi_cmd), | ||
814 | .queuecommand = virtscsi_queuecommand_single, | ||
815 | .change_queue_depth = virtscsi_change_queue_depth, | ||
816 | .eh_abort_handler = virtscsi_abort, | ||
817 | .eh_device_reset_handler = virtscsi_device_reset, | ||
818 | .eh_timed_out = virtscsi_eh_timed_out, | ||
819 | .slave_alloc = virtscsi_device_alloc, | ||
820 | |||
821 | .dma_boundary = UINT_MAX, | ||
822 | .use_clustering = ENABLE_CLUSTERING, | ||
823 | .target_alloc = virtscsi_target_alloc, | ||
824 | .target_destroy = virtscsi_target_destroy, | ||
825 | .track_queue_depth = 1, | ||
826 | }; | ||
827 | |||
828 | static struct scsi_host_template virtscsi_host_template_multi = { | ||
829 | .module = THIS_MODULE, | 737 | .module = THIS_MODULE, |
830 | .name = "Virtio SCSI HBA", | 738 | .name = "Virtio SCSI HBA", |
831 | .proc_name = "virtio_scsi", | 739 | .proc_name = "virtio_scsi", |
832 | .this_id = -1, | 740 | .this_id = -1, |
833 | .cmd_size = sizeof(struct virtio_scsi_cmd), | 741 | .cmd_size = sizeof(struct virtio_scsi_cmd), |
834 | .queuecommand = virtscsi_queuecommand_multi, | 742 | .queuecommand = virtscsi_queuecommand, |
835 | .change_queue_depth = virtscsi_change_queue_depth, | 743 | .change_queue_depth = virtscsi_change_queue_depth, |
836 | .eh_abort_handler = virtscsi_abort, | 744 | .eh_abort_handler = virtscsi_abort, |
837 | .eh_device_reset_handler = virtscsi_device_reset, | 745 | .eh_device_reset_handler = virtscsi_device_reset, |
@@ -844,6 +752,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { | |||
844 | .target_destroy = virtscsi_target_destroy, | 752 | .target_destroy = virtscsi_target_destroy, |
845 | .map_queues = virtscsi_map_queues, | 753 | .map_queues = virtscsi_map_queues, |
846 | .track_queue_depth = 1, | 754 | .track_queue_depth = 1, |
755 | .force_blk_mq = 1, | ||
847 | }; | 756 | }; |
848 | 757 | ||
849 | #define virtscsi_config_get(vdev, fld) \ | 758 | #define virtscsi_config_get(vdev, fld) \ |
@@ -936,7 +845,6 @@ static int virtscsi_probe(struct virtio_device *vdev) | |||
936 | u32 sg_elems, num_targets; | 845 | u32 sg_elems, num_targets; |
937 | u32 cmd_per_lun; | 846 | u32 cmd_per_lun; |
938 | u32 num_queues; | 847 | u32 num_queues; |
939 | struct scsi_host_template *hostt; | ||
940 | 848 | ||
941 | if (!vdev->config->get) { | 849 | if (!vdev->config->get) { |
942 | dev_err(&vdev->dev, "%s failure: config access disabled\n", | 850 | dev_err(&vdev->dev, "%s failure: config access disabled\n", |
@@ -949,12 +857,7 @@ static int virtscsi_probe(struct virtio_device *vdev) | |||
949 | 857 | ||
950 | num_targets = virtscsi_config_get(vdev, max_target) + 1; | 858 | num_targets = virtscsi_config_get(vdev, max_target) + 1; |
951 | 859 | ||
952 | if (num_queues == 1) | 860 | shost = scsi_host_alloc(&virtscsi_host_template, |
953 | hostt = &virtscsi_host_template_single; | ||
954 | else | ||
955 | hostt = &virtscsi_host_template_multi; | ||
956 | |||
957 | shost = scsi_host_alloc(hostt, | ||
958 | sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues); | 861 | sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues); |
959 | if (!shost) | 862 | if (!shost) |
960 | return -ENOMEM; | 863 | return -ENOMEM; |