aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost/tcm_vhost.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vhost/tcm_vhost.c')
-rw-r--r--drivers/vhost/tcm_vhost.c144
1 files changed, 101 insertions, 43 deletions
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index dd9614eb2577..6cda137bb208 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -74,9 +74,8 @@ enum {
74 74
75struct vhost_scsi { 75struct vhost_scsi {
76 /* Protected by vhost_scsi->dev.mutex */ 76 /* Protected by vhost_scsi->dev.mutex */
77 struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET]; 77 struct tcm_vhost_tpg **vs_tpg;
78 char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; 78 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
79 bool vs_endpoint;
80 79
81 struct vhost_dev dev; 80 struct vhost_dev dev;
82 struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; 81 struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
@@ -582,6 +581,7 @@ static void tcm_vhost_submission_work(struct work_struct *work)
582static void vhost_scsi_handle_vq(struct vhost_scsi *vs, 581static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
583 struct vhost_virtqueue *vq) 582 struct vhost_virtqueue *vq)
584{ 583{
584 struct tcm_vhost_tpg **vs_tpg;
585 struct virtio_scsi_cmd_req v_req; 585 struct virtio_scsi_cmd_req v_req;
586 struct tcm_vhost_tpg *tv_tpg; 586 struct tcm_vhost_tpg *tv_tpg;
587 struct tcm_vhost_cmd *tv_cmd; 587 struct tcm_vhost_cmd *tv_cmd;
@@ -590,8 +590,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
590 int head, ret; 590 int head, ret;
591 u8 target; 591 u8 target;
592 592
593 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ 593 /*
594 if (unlikely(!vs->vs_endpoint)) 594 * We can handle the vq only after the endpoint is setup by calling the
595 * VHOST_SCSI_SET_ENDPOINT ioctl.
596 *
597 * TODO: Check that we are running from vhost_worker which acts
598 * as read-side critical section for vhost kind of RCU.
599 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
600 */
601 vs_tpg = rcu_dereference_check(vq->private_data, 1);
602 if (!vs_tpg)
595 return; 603 return;
596 604
597 mutex_lock(&vq->mutex); 605 mutex_lock(&vq->mutex);
@@ -661,7 +669,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
661 669
662 /* Extract the tpgt */ 670 /* Extract the tpgt */
663 target = v_req.lun[1]; 671 target = v_req.lun[1];
664 tv_tpg = ACCESS_ONCE(vs->vs_tpg[target]); 672 tv_tpg = ACCESS_ONCE(vs_tpg[target]);
665 673
666 /* Target does not exist, fail the request */ 674 /* Target does not exist, fail the request */
667 if (unlikely(!tv_tpg)) { 675 if (unlikely(!tv_tpg)) {
@@ -780,6 +788,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
780 vhost_scsi_handle_vq(vs, vq); 788 vhost_scsi_handle_vq(vs, vq);
781} 789}
782 790
791static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
792{
793 vhost_poll_flush(&vs->dev.vqs[index].poll);
794}
795
796static void vhost_scsi_flush(struct vhost_scsi *vs)
797{
798 int i;
799
800 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
801 vhost_scsi_flush_vq(vs, i);
802 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
803}
804
783/* 805/*
784 * Called from vhost_scsi_ioctl() context to walk the list of available 806 * Called from vhost_scsi_ioctl() context to walk the list of available
785 * tcm_vhost_tpg with an active struct tcm_vhost_nexus 807 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
@@ -790,8 +812,10 @@ static int vhost_scsi_set_endpoint(
790{ 812{
791 struct tcm_vhost_tport *tv_tport; 813 struct tcm_vhost_tport *tv_tport;
792 struct tcm_vhost_tpg *tv_tpg; 814 struct tcm_vhost_tpg *tv_tpg;
815 struct tcm_vhost_tpg **vs_tpg;
816 struct vhost_virtqueue *vq;
817 int index, ret, i, len;
793 bool match = false; 818 bool match = false;
794 int index, ret;
795 819
796 mutex_lock(&vs->dev.mutex); 820 mutex_lock(&vs->dev.mutex);
797 /* Verify that ring has been setup correctly. */ 821 /* Verify that ring has been setup correctly. */
@@ -803,6 +827,15 @@ static int vhost_scsi_set_endpoint(
803 } 827 }
804 } 828 }
805 829
830 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
831 vs_tpg = kzalloc(len, GFP_KERNEL);
832 if (!vs_tpg) {
833 mutex_unlock(&vs->dev.mutex);
834 return -ENOMEM;
835 }
836 if (vs->vs_tpg)
837 memcpy(vs_tpg, vs->vs_tpg, len);
838
806 mutex_lock(&tcm_vhost_mutex); 839 mutex_lock(&tcm_vhost_mutex);
807 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { 840 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
808 mutex_lock(&tv_tpg->tv_tpg_mutex); 841 mutex_lock(&tv_tpg->tv_tpg_mutex);
@@ -817,14 +850,15 @@ static int vhost_scsi_set_endpoint(
817 tv_tport = tv_tpg->tport; 850 tv_tport = tv_tpg->tport;
818 851
819 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 852 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
820 if (vs->vs_tpg[tv_tpg->tport_tpgt]) { 853 if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
821 mutex_unlock(&tv_tpg->tv_tpg_mutex); 854 mutex_unlock(&tv_tpg->tv_tpg_mutex);
822 mutex_unlock(&tcm_vhost_mutex); 855 mutex_unlock(&tcm_vhost_mutex);
823 mutex_unlock(&vs->dev.mutex); 856 mutex_unlock(&vs->dev.mutex);
857 kfree(vs_tpg);
824 return -EEXIST; 858 return -EEXIST;
825 } 859 }
826 tv_tpg->tv_tpg_vhost_count++; 860 tv_tpg->tv_tpg_vhost_count++;
827 vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; 861 vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
828 smp_mb__after_atomic_inc(); 862 smp_mb__after_atomic_inc();
829 match = true; 863 match = true;
830 } 864 }
@@ -835,12 +869,26 @@ static int vhost_scsi_set_endpoint(
835 if (match) { 869 if (match) {
836 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, 870 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
837 sizeof(vs->vs_vhost_wwpn)); 871 sizeof(vs->vs_vhost_wwpn));
838 vs->vs_endpoint = true; 872 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
873 vq = &vs->vqs[i];
874 /* Flushing the vhost_work acts as synchronize_rcu */
875 mutex_lock(&vq->mutex);
876 rcu_assign_pointer(vq->private_data, vs_tpg);
877 mutex_unlock(&vq->mutex);
878 }
839 ret = 0; 879 ret = 0;
840 } else { 880 } else {
841 ret = -EEXIST; 881 ret = -EEXIST;
842 } 882 }
843 883
884 /*
885 * Act as synchronize_rcu to make sure access to
886 * old vs->vs_tpg is finished.
887 */
888 vhost_scsi_flush(vs);
889 kfree(vs->vs_tpg);
890 vs->vs_tpg = vs_tpg;
891
844 mutex_unlock(&vs->dev.mutex); 892 mutex_unlock(&vs->dev.mutex);
845 return ret; 893 return ret;
846} 894}
@@ -851,6 +899,8 @@ static int vhost_scsi_clear_endpoint(
851{ 899{
852 struct tcm_vhost_tport *tv_tport; 900 struct tcm_vhost_tport *tv_tport;
853 struct tcm_vhost_tpg *tv_tpg; 901 struct tcm_vhost_tpg *tv_tpg;
902 struct vhost_virtqueue *vq;
903 bool match = false;
854 int index, ret, i; 904 int index, ret, i;
855 u8 target; 905 u8 target;
856 906
@@ -862,9 +912,14 @@ static int vhost_scsi_clear_endpoint(
862 goto err_dev; 912 goto err_dev;
863 } 913 }
864 } 914 }
915
916 if (!vs->vs_tpg) {
917 mutex_unlock(&vs->dev.mutex);
918 return 0;
919 }
920
865 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { 921 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
866 target = i; 922 target = i;
867
868 tv_tpg = vs->vs_tpg[target]; 923 tv_tpg = vs->vs_tpg[target];
869 if (!tv_tpg) 924 if (!tv_tpg)
870 continue; 925 continue;
@@ -886,10 +941,27 @@ static int vhost_scsi_clear_endpoint(
886 } 941 }
887 tv_tpg->tv_tpg_vhost_count--; 942 tv_tpg->tv_tpg_vhost_count--;
888 vs->vs_tpg[target] = NULL; 943 vs->vs_tpg[target] = NULL;
889 vs->vs_endpoint = false; 944 match = true;
890 mutex_unlock(&tv_tpg->tv_tpg_mutex); 945 mutex_unlock(&tv_tpg->tv_tpg_mutex);
891 } 946 }
947 if (match) {
948 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
949 vq = &vs->vqs[i];
950 /* Flushing the vhost_work acts as synchronize_rcu */
951 mutex_lock(&vq->mutex);
952 rcu_assign_pointer(vq->private_data, NULL);
953 mutex_unlock(&vq->mutex);
954 }
955 }
956 /*
957 * Act as synchronize_rcu to make sure access to
958 * old vs->vs_tpg is finished.
959 */
960 vhost_scsi_flush(vs);
961 kfree(vs->vs_tpg);
962 vs->vs_tpg = NULL;
892 mutex_unlock(&vs->dev.mutex); 963 mutex_unlock(&vs->dev.mutex);
964
893 return 0; 965 return 0;
894 966
895err_tpg: 967err_tpg:
@@ -899,6 +971,24 @@ err_dev:
899 return ret; 971 return ret;
900} 972}
901 973
974static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
975{
976 if (features & ~VHOST_SCSI_FEATURES)
977 return -EOPNOTSUPP;
978
979 mutex_lock(&vs->dev.mutex);
980 if ((features & (1 << VHOST_F_LOG_ALL)) &&
981 !vhost_log_access_ok(&vs->dev)) {
982 mutex_unlock(&vs->dev.mutex);
983 return -EFAULT;
984 }
985 vs->dev.acked_features = features;
986 smp_wmb();
987 vhost_scsi_flush(vs);
988 mutex_unlock(&vs->dev.mutex);
989 return 0;
990}
991
902static int vhost_scsi_open(struct inode *inode, struct file *f) 992static int vhost_scsi_open(struct inode *inode, struct file *f)
903{ 993{
904 struct vhost_scsi *s; 994 struct vhost_scsi *s;
@@ -939,38 +1029,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
939 return 0; 1029 return 0;
940} 1030}
941 1031
942static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
943{
944 vhost_poll_flush(&vs->dev.vqs[index].poll);
945}
946
947static void vhost_scsi_flush(struct vhost_scsi *vs)
948{
949 int i;
950
951 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
952 vhost_scsi_flush_vq(vs, i);
953 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
954}
955
956static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
957{
958 if (features & ~VHOST_SCSI_FEATURES)
959 return -EOPNOTSUPP;
960
961 mutex_lock(&vs->dev.mutex);
962 if ((features & (1 << VHOST_F_LOG_ALL)) &&
963 !vhost_log_access_ok(&vs->dev)) {
964 mutex_unlock(&vs->dev.mutex);
965 return -EFAULT;
966 }
967 vs->dev.acked_features = features;
968 smp_wmb();
969 vhost_scsi_flush(vs);
970 mutex_unlock(&vs->dev.mutex);
971 return 0;
972}
973
974static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, 1032static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
975 unsigned long arg) 1033 unsigned long arg)
976{ 1034{