aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/target/target_core_alua.c3
-rw-r--r--drivers/vhost/tcm_vhost.c198
2 files changed, 133 insertions, 68 deletions
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index ff1c5ee352cb..cbe48ab41745 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -409,6 +409,7 @@ static inline int core_alua_state_standby(
409 case REPORT_LUNS: 409 case REPORT_LUNS:
410 case RECEIVE_DIAGNOSTIC: 410 case RECEIVE_DIAGNOSTIC:
411 case SEND_DIAGNOSTIC: 411 case SEND_DIAGNOSTIC:
412 return 0;
412 case MAINTENANCE_IN: 413 case MAINTENANCE_IN:
413 switch (cdb[1] & 0x1f) { 414 switch (cdb[1] & 0x1f) {
414 case MI_REPORT_TARGET_PGS: 415 case MI_REPORT_TARGET_PGS:
@@ -451,6 +452,7 @@ static inline int core_alua_state_unavailable(
451 switch (cdb[0]) { 452 switch (cdb[0]) {
452 case INQUIRY: 453 case INQUIRY:
453 case REPORT_LUNS: 454 case REPORT_LUNS:
455 return 0;
454 case MAINTENANCE_IN: 456 case MAINTENANCE_IN:
455 switch (cdb[1] & 0x1f) { 457 switch (cdb[1] & 0x1f) {
456 case MI_REPORT_TARGET_PGS: 458 case MI_REPORT_TARGET_PGS:
@@ -491,6 +493,7 @@ static inline int core_alua_state_transition(
491 switch (cdb[0]) { 493 switch (cdb[0]) {
492 case INQUIRY: 494 case INQUIRY:
493 case REPORT_LUNS: 495 case REPORT_LUNS:
496 return 0;
494 case MAINTENANCE_IN: 497 case MAINTENANCE_IN:
495 switch (cdb[1] & 0x1f) { 498 switch (cdb[1] & 0x1f) {
496 case MI_REPORT_TARGET_PGS: 499 case MI_REPORT_TARGET_PGS:
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 2968b4934659..957a0b98a5d9 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -74,9 +74,8 @@ enum {
74 74
75struct vhost_scsi { 75struct vhost_scsi {
76 /* Protected by vhost_scsi->dev.mutex */ 76 /* Protected by vhost_scsi->dev.mutex */
77 struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET]; 77 struct tcm_vhost_tpg **vs_tpg;
78 char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; 78 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
79 bool vs_endpoint;
80 79
81 struct vhost_dev dev; 80 struct vhost_dev dev;
82 struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; 81 struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
@@ -579,9 +578,27 @@ static void tcm_vhost_submission_work(struct work_struct *work)
579 } 578 }
580} 579}
581 580
581static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
582 struct vhost_virtqueue *vq, int head, unsigned out)
583{
584 struct virtio_scsi_cmd_resp __user *resp;
585 struct virtio_scsi_cmd_resp rsp;
586 int ret;
587
588 memset(&rsp, 0, sizeof(rsp));
589 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
590 resp = vq->iov[out].iov_base;
591 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
592 if (!ret)
593 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
594 else
595 pr_err("Faulted on virtio_scsi_cmd_resp\n");
596}
597
582static void vhost_scsi_handle_vq(struct vhost_scsi *vs, 598static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
583 struct vhost_virtqueue *vq) 599 struct vhost_virtqueue *vq)
584{ 600{
601 struct tcm_vhost_tpg **vs_tpg;
585 struct virtio_scsi_cmd_req v_req; 602 struct virtio_scsi_cmd_req v_req;
586 struct tcm_vhost_tpg *tv_tpg; 603 struct tcm_vhost_tpg *tv_tpg;
587 struct tcm_vhost_cmd *tv_cmd; 604 struct tcm_vhost_cmd *tv_cmd;
@@ -590,8 +607,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
590 int head, ret; 607 int head, ret;
591 u8 target; 608 u8 target;
592 609
593 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ 610 /*
594 if (unlikely(!vs->vs_endpoint)) 611 * We can handle the vq only after the endpoint is setup by calling the
612 * VHOST_SCSI_SET_ENDPOINT ioctl.
613 *
614 * TODO: Check that we are running from vhost_worker which acts
615 * as read-side critical section for vhost kind of RCU.
616 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
617 */
618 vs_tpg = rcu_dereference_check(vq->private_data, 1);
619 if (!vs_tpg)
595 return; 620 return;
596 621
597 mutex_lock(&vq->mutex); 622 mutex_lock(&vq->mutex);
@@ -661,23 +686,11 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
661 686
662 /* Extract the tpgt */ 687 /* Extract the tpgt */
663 target = v_req.lun[1]; 688 target = v_req.lun[1];
664 tv_tpg = vs->vs_tpg[target]; 689 tv_tpg = ACCESS_ONCE(vs_tpg[target]);
665 690
666 /* Target does not exist, fail the request */ 691 /* Target does not exist, fail the request */
667 if (unlikely(!tv_tpg)) { 692 if (unlikely(!tv_tpg)) {
668 struct virtio_scsi_cmd_resp __user *resp; 693 vhost_scsi_send_bad_target(vs, vq, head, out);
669 struct virtio_scsi_cmd_resp rsp;
670
671 memset(&rsp, 0, sizeof(rsp));
672 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
673 resp = vq->iov[out].iov_base;
674 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
675 if (!ret)
676 vhost_add_used_and_signal(&vs->dev,
677 vq, head, 0);
678 else
679 pr_err("Faulted on virtio_scsi_cmd_resp\n");
680
681 continue; 694 continue;
682 } 695 }
683 696
@@ -690,22 +703,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
690 if (IS_ERR(tv_cmd)) { 703 if (IS_ERR(tv_cmd)) {
691 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", 704 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
692 PTR_ERR(tv_cmd)); 705 PTR_ERR(tv_cmd));
693 break; 706 goto err_cmd;
694 } 707 }
695 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" 708 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
696 ": %d\n", tv_cmd, exp_data_len, data_direction); 709 ": %d\n", tv_cmd, exp_data_len, data_direction);
697 710
698 tv_cmd->tvc_vhost = vs; 711 tv_cmd->tvc_vhost = vs;
699 tv_cmd->tvc_vq = vq; 712 tv_cmd->tvc_vq = vq;
700
701 if (unlikely(vq->iov[out].iov_len !=
702 sizeof(struct virtio_scsi_cmd_resp))) {
703 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
704 " bytes, out: %d, in: %d\n",
705 vq->iov[out].iov_len, out, in);
706 break;
707 }
708
709 tv_cmd->tvc_resp = vq->iov[out].iov_base; 713 tv_cmd->tvc_resp = vq->iov[out].iov_base;
710 714
711 /* 715 /*
@@ -725,7 +729,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
725 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 729 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
726 scsi_command_size(tv_cmd->tvc_cdb), 730 scsi_command_size(tv_cmd->tvc_cdb),
727 TCM_VHOST_MAX_CDB_SIZE); 731 TCM_VHOST_MAX_CDB_SIZE);
728 break; /* TODO */ 732 goto err_free;
729 } 733 }
730 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; 734 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
731 735
@@ -738,7 +742,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
738 data_direction == DMA_TO_DEVICE); 742 data_direction == DMA_TO_DEVICE);
739 if (unlikely(ret)) { 743 if (unlikely(ret)) {
740 vq_err(vq, "Failed to map iov to sgl\n"); 744 vq_err(vq, "Failed to map iov to sgl\n");
741 break; /* TODO */ 745 goto err_free;
742 } 746 }
743 } 747 }
744 748
@@ -759,6 +763,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
759 } 763 }
760 764
761 mutex_unlock(&vq->mutex); 765 mutex_unlock(&vq->mutex);
766 return;
767
768err_free:
769 vhost_scsi_free_cmd(tv_cmd);
770err_cmd:
771 vhost_scsi_send_bad_target(vs, vq, head, out);
772 mutex_unlock(&vq->mutex);
762} 773}
763 774
764static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) 775static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
@@ -780,6 +791,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
780 vhost_scsi_handle_vq(vs, vq); 791 vhost_scsi_handle_vq(vs, vq);
781} 792}
782 793
794static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
795{
796 vhost_poll_flush(&vs->dev.vqs[index].poll);
797}
798
799static void vhost_scsi_flush(struct vhost_scsi *vs)
800{
801 int i;
802
803 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
804 vhost_scsi_flush_vq(vs, i);
805 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
806}
807
783/* 808/*
784 * Called from vhost_scsi_ioctl() context to walk the list of available 809 * Called from vhost_scsi_ioctl() context to walk the list of available
785 * tcm_vhost_tpg with an active struct tcm_vhost_nexus 810 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
@@ -790,8 +815,10 @@ static int vhost_scsi_set_endpoint(
790{ 815{
791 struct tcm_vhost_tport *tv_tport; 816 struct tcm_vhost_tport *tv_tport;
792 struct tcm_vhost_tpg *tv_tpg; 817 struct tcm_vhost_tpg *tv_tpg;
818 struct tcm_vhost_tpg **vs_tpg;
819 struct vhost_virtqueue *vq;
820 int index, ret, i, len;
793 bool match = false; 821 bool match = false;
794 int index, ret;
795 822
796 mutex_lock(&vs->dev.mutex); 823 mutex_lock(&vs->dev.mutex);
797 /* Verify that ring has been setup correctly. */ 824 /* Verify that ring has been setup correctly. */
@@ -803,6 +830,15 @@ static int vhost_scsi_set_endpoint(
803 } 830 }
804 } 831 }
805 832
833 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
834 vs_tpg = kzalloc(len, GFP_KERNEL);
835 if (!vs_tpg) {
836 mutex_unlock(&vs->dev.mutex);
837 return -ENOMEM;
838 }
839 if (vs->vs_tpg)
840 memcpy(vs_tpg, vs->vs_tpg, len);
841
806 mutex_lock(&tcm_vhost_mutex); 842 mutex_lock(&tcm_vhost_mutex);
807 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { 843 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
808 mutex_lock(&tv_tpg->tv_tpg_mutex); 844 mutex_lock(&tv_tpg->tv_tpg_mutex);
@@ -817,14 +853,15 @@ static int vhost_scsi_set_endpoint(
817 tv_tport = tv_tpg->tport; 853 tv_tport = tv_tpg->tport;
818 854
819 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 855 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
820 if (vs->vs_tpg[tv_tpg->tport_tpgt]) { 856 if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
821 mutex_unlock(&tv_tpg->tv_tpg_mutex); 857 mutex_unlock(&tv_tpg->tv_tpg_mutex);
822 mutex_unlock(&tcm_vhost_mutex); 858 mutex_unlock(&tcm_vhost_mutex);
823 mutex_unlock(&vs->dev.mutex); 859 mutex_unlock(&vs->dev.mutex);
860 kfree(vs_tpg);
824 return -EEXIST; 861 return -EEXIST;
825 } 862 }
826 tv_tpg->tv_tpg_vhost_count++; 863 tv_tpg->tv_tpg_vhost_count++;
827 vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; 864 vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
828 smp_mb__after_atomic_inc(); 865 smp_mb__after_atomic_inc();
829 match = true; 866 match = true;
830 } 867 }
@@ -835,12 +872,27 @@ static int vhost_scsi_set_endpoint(
835 if (match) { 872 if (match) {
836 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, 873 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
837 sizeof(vs->vs_vhost_wwpn)); 874 sizeof(vs->vs_vhost_wwpn));
838 vs->vs_endpoint = true; 875 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
876 vq = &vs->vqs[i];
877 /* Flushing the vhost_work acts as synchronize_rcu */
878 mutex_lock(&vq->mutex);
879 rcu_assign_pointer(vq->private_data, vs_tpg);
880 vhost_init_used(vq);
881 mutex_unlock(&vq->mutex);
882 }
839 ret = 0; 883 ret = 0;
840 } else { 884 } else {
841 ret = -EEXIST; 885 ret = -EEXIST;
842 } 886 }
843 887
888 /*
889 * Act as synchronize_rcu to make sure access to
890 * old vs->vs_tpg is finished.
891 */
892 vhost_scsi_flush(vs);
893 kfree(vs->vs_tpg);
894 vs->vs_tpg = vs_tpg;
895
844 mutex_unlock(&vs->dev.mutex); 896 mutex_unlock(&vs->dev.mutex);
845 return ret; 897 return ret;
846} 898}
@@ -851,6 +903,8 @@ static int vhost_scsi_clear_endpoint(
851{ 903{
852 struct tcm_vhost_tport *tv_tport; 904 struct tcm_vhost_tport *tv_tport;
853 struct tcm_vhost_tpg *tv_tpg; 905 struct tcm_vhost_tpg *tv_tpg;
906 struct vhost_virtqueue *vq;
907 bool match = false;
854 int index, ret, i; 908 int index, ret, i;
855 u8 target; 909 u8 target;
856 910
@@ -862,9 +916,14 @@ static int vhost_scsi_clear_endpoint(
862 goto err_dev; 916 goto err_dev;
863 } 917 }
864 } 918 }
919
920 if (!vs->vs_tpg) {
921 mutex_unlock(&vs->dev.mutex);
922 return 0;
923 }
924
865 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { 925 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
866 target = i; 926 target = i;
867
868 tv_tpg = vs->vs_tpg[target]; 927 tv_tpg = vs->vs_tpg[target];
869 if (!tv_tpg) 928 if (!tv_tpg)
870 continue; 929 continue;
@@ -886,10 +945,27 @@ static int vhost_scsi_clear_endpoint(
886 } 945 }
887 tv_tpg->tv_tpg_vhost_count--; 946 tv_tpg->tv_tpg_vhost_count--;
888 vs->vs_tpg[target] = NULL; 947 vs->vs_tpg[target] = NULL;
889 vs->vs_endpoint = false; 948 match = true;
890 mutex_unlock(&tv_tpg->tv_tpg_mutex); 949 mutex_unlock(&tv_tpg->tv_tpg_mutex);
891 } 950 }
951 if (match) {
952 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
953 vq = &vs->vqs[i];
954 /* Flushing the vhost_work acts as synchronize_rcu */
955 mutex_lock(&vq->mutex);
956 rcu_assign_pointer(vq->private_data, NULL);
957 mutex_unlock(&vq->mutex);
958 }
959 }
960 /*
961 * Act as synchronize_rcu to make sure access to
962 * old vs->vs_tpg is finished.
963 */
964 vhost_scsi_flush(vs);
965 kfree(vs->vs_tpg);
966 vs->vs_tpg = NULL;
892 mutex_unlock(&vs->dev.mutex); 967 mutex_unlock(&vs->dev.mutex);
968
893 return 0; 969 return 0;
894 970
895err_tpg: 971err_tpg:
@@ -899,6 +975,24 @@ err_dev:
899 return ret; 975 return ret;
900} 976}
901 977
978static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
979{
980 if (features & ~VHOST_SCSI_FEATURES)
981 return -EOPNOTSUPP;
982
983 mutex_lock(&vs->dev.mutex);
984 if ((features & (1 << VHOST_F_LOG_ALL)) &&
985 !vhost_log_access_ok(&vs->dev)) {
986 mutex_unlock(&vs->dev.mutex);
987 return -EFAULT;
988 }
989 vs->dev.acked_features = features;
990 smp_wmb();
991 vhost_scsi_flush(vs);
992 mutex_unlock(&vs->dev.mutex);
993 return 0;
994}
995
902static int vhost_scsi_open(struct inode *inode, struct file *f) 996static int vhost_scsi_open(struct inode *inode, struct file *f)
903{ 997{
904 struct vhost_scsi *s; 998 struct vhost_scsi *s;
@@ -939,38 +1033,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
939 return 0; 1033 return 0;
940} 1034}
941 1035
942static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
943{
944 vhost_poll_flush(&vs->dev.vqs[index].poll);
945}
946
947static void vhost_scsi_flush(struct vhost_scsi *vs)
948{
949 int i;
950
951 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
952 vhost_scsi_flush_vq(vs, i);
953 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
954}
955
956static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
957{
958 if (features & ~VHOST_SCSI_FEATURES)
959 return -EOPNOTSUPP;
960
961 mutex_lock(&vs->dev.mutex);
962 if ((features & (1 << VHOST_F_LOG_ALL)) &&
963 !vhost_log_access_ok(&vs->dev)) {
964 mutex_unlock(&vs->dev.mutex);
965 return -EFAULT;
966 }
967 vs->dev.acked_features = features;
968 smp_wmb();
969 vhost_scsi_flush(vs);
970 mutex_unlock(&vs->dev.mutex);
971 return 0;
972}
973
974static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, 1036static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
975 unsigned long arg) 1037 unsigned long arg)
976{ 1038{