diff options
Diffstat (limited to 'drivers/vhost/tcm_vhost.c')
-rw-r--r-- | drivers/vhost/tcm_vhost.c | 262 |
1 files changed, 246 insertions, 16 deletions
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 957a0b98a5d9..1677238d281f 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c | |||
@@ -66,11 +66,13 @@ enum { | |||
66 | * TODO: debug and remove the workaround. | 66 | * TODO: debug and remove the workaround. |
67 | */ | 67 | */ |
68 | enum { | 68 | enum { |
69 | VHOST_SCSI_FEATURES = VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX) | 69 | VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) | |
70 | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | ||
70 | }; | 71 | }; |
71 | 72 | ||
72 | #define VHOST_SCSI_MAX_TARGET 256 | 73 | #define VHOST_SCSI_MAX_TARGET 256 |
73 | #define VHOST_SCSI_MAX_VQ 128 | 74 | #define VHOST_SCSI_MAX_VQ 128 |
75 | #define VHOST_SCSI_MAX_EVENT 128 | ||
74 | 76 | ||
75 | struct vhost_scsi { | 77 | struct vhost_scsi { |
76 | /* Protected by vhost_scsi->dev.mutex */ | 78 | /* Protected by vhost_scsi->dev.mutex */ |
@@ -82,6 +84,12 @@ struct vhost_scsi { | |||
82 | 84 | ||
83 | struct vhost_work vs_completion_work; /* cmd completion work item */ | 85 | struct vhost_work vs_completion_work; /* cmd completion work item */ |
84 | struct llist_head vs_completion_list; /* cmd completion queue */ | 86 | struct llist_head vs_completion_list; /* cmd completion queue */ |
87 | |||
88 | struct vhost_work vs_event_work; /* evt injection work item */ | ||
89 | struct llist_head vs_event_list; /* evt injection queue */ | ||
90 | |||
91 | bool vs_events_missed; /* any missed events, protected by vq->mutex */ | ||
92 | int vs_events_nr; /* num of pending events, protected by vq->mutex */ | ||
85 | }; | 93 | }; |
86 | 94 | ||
87 | /* Local pointer to allocated TCM configfs fabric module */ | 95 | /* Local pointer to allocated TCM configfs fabric module */ |
@@ -349,6 +357,37 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) | |||
349 | return 0; | 357 | return 0; |
350 | } | 358 | } |
351 | 359 | ||
360 | static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) | ||
361 | { | ||
362 | vs->vs_events_nr--; | ||
363 | kfree(evt); | ||
364 | } | ||
365 | |||
366 | static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs, | ||
367 | u32 event, u32 reason) | ||
368 | { | ||
369 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | ||
370 | struct tcm_vhost_evt *evt; | ||
371 | |||
372 | if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { | ||
373 | vs->vs_events_missed = true; | ||
374 | return NULL; | ||
375 | } | ||
376 | |||
377 | evt = kzalloc(sizeof(*evt), GFP_KERNEL); | ||
378 | if (!evt) { | ||
379 | vq_err(vq, "Failed to allocate tcm_vhost_evt\n"); | ||
380 | vs->vs_events_missed = true; | ||
381 | return NULL; | ||
382 | } | ||
383 | |||
384 | evt->event.event = event; | ||
385 | evt->event.reason = reason; | ||
386 | vs->vs_events_nr++; | ||
387 | |||
388 | return evt; | ||
389 | } | ||
390 | |||
352 | static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) | 391 | static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) |
353 | { | 392 | { |
354 | struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; | 393 | struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; |
@@ -367,6 +406,75 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) | |||
367 | kfree(tv_cmd); | 406 | kfree(tv_cmd); |
368 | } | 407 | } |
369 | 408 | ||
409 | static void tcm_vhost_do_evt_work(struct vhost_scsi *vs, | ||
410 | struct tcm_vhost_evt *evt) | ||
411 | { | ||
412 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | ||
413 | struct virtio_scsi_event *event = &evt->event; | ||
414 | struct virtio_scsi_event __user *eventp; | ||
415 | unsigned out, in; | ||
416 | int head, ret; | ||
417 | |||
418 | if (!vq->private_data) { | ||
419 | vs->vs_events_missed = true; | ||
420 | return; | ||
421 | } | ||
422 | |||
423 | again: | ||
424 | vhost_disable_notify(&vs->dev, vq); | ||
425 | head = vhost_get_vq_desc(&vs->dev, vq, vq->iov, | ||
426 | ARRAY_SIZE(vq->iov), &out, &in, | ||
427 | NULL, NULL); | ||
428 | if (head < 0) { | ||
429 | vs->vs_events_missed = true; | ||
430 | return; | ||
431 | } | ||
432 | if (head == vq->num) { | ||
433 | if (vhost_enable_notify(&vs->dev, vq)) | ||
434 | goto again; | ||
435 | vs->vs_events_missed = true; | ||
436 | return; | ||
437 | } | ||
438 | |||
439 | if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) { | ||
440 | vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n", | ||
441 | vq->iov[out].iov_len); | ||
442 | vs->vs_events_missed = true; | ||
443 | return; | ||
444 | } | ||
445 | |||
446 | if (vs->vs_events_missed) { | ||
447 | event->event |= VIRTIO_SCSI_T_EVENTS_MISSED; | ||
448 | vs->vs_events_missed = false; | ||
449 | } | ||
450 | |||
451 | eventp = vq->iov[out].iov_base; | ||
452 | ret = __copy_to_user(eventp, event, sizeof(*event)); | ||
453 | if (!ret) | ||
454 | vhost_add_used_and_signal(&vs->dev, vq, head, 0); | ||
455 | else | ||
456 | vq_err(vq, "Faulted on tcm_vhost_send_event\n"); | ||
457 | } | ||
458 | |||
459 | static void tcm_vhost_evt_work(struct vhost_work *work) | ||
460 | { | ||
461 | struct vhost_scsi *vs = container_of(work, struct vhost_scsi, | ||
462 | vs_event_work); | ||
463 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | ||
464 | struct tcm_vhost_evt *evt; | ||
465 | struct llist_node *llnode; | ||
466 | |||
467 | mutex_lock(&vq->mutex); | ||
468 | llnode = llist_del_all(&vs->vs_event_list); | ||
469 | while (llnode) { | ||
470 | evt = llist_entry(llnode, struct tcm_vhost_evt, list); | ||
471 | llnode = llist_next(llnode); | ||
472 | tcm_vhost_do_evt_work(vs, evt); | ||
473 | tcm_vhost_free_evt(vs, evt); | ||
474 | } | ||
475 | mutex_unlock(&vq->mutex); | ||
476 | } | ||
477 | |||
370 | /* Fill in status and signal that we are done processing this command | 478 | /* Fill in status and signal that we are done processing this command |
371 | * | 479 | * |
372 | * This is scheduled in the vhost work queue so we are called with the owner | 480 | * This is scheduled in the vhost work queue so we are called with the owner |
@@ -777,9 +885,46 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) | |||
777 | pr_debug("%s: The handling func for control queue.\n", __func__); | 885 | pr_debug("%s: The handling func for control queue.\n", __func__); |
778 | } | 886 | } |
779 | 887 | ||
888 | static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg, | ||
889 | struct se_lun *lun, u32 event, u32 reason) | ||
890 | { | ||
891 | struct tcm_vhost_evt *evt; | ||
892 | |||
893 | evt = tcm_vhost_allocate_evt(vs, event, reason); | ||
894 | if (!evt) | ||
895 | return; | ||
896 | |||
897 | if (tpg && lun) { | ||
898 | /* TODO: share lun setup code with virtio-scsi.ko */ | ||
899 | /* | ||
900 | * Note: evt->event is zeroed when we allocate it and | ||
901 | * lun[4-7] need to be zero according to virtio-scsi spec. | ||
902 | */ | ||
903 | evt->event.lun[0] = 0x01; | ||
904 | evt->event.lun[1] = tpg->tport_tpgt & 0xFF; | ||
905 | if (lun->unpacked_lun >= 256) | ||
906 | evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; | ||
907 | evt->event.lun[3] = lun->unpacked_lun & 0xFF; | ||
908 | } | ||
909 | |||
910 | llist_add(&evt->list, &vs->vs_event_list); | ||
911 | vhost_work_queue(&vs->dev, &vs->vs_event_work); | ||
912 | } | ||
913 | |||
780 | static void vhost_scsi_evt_handle_kick(struct vhost_work *work) | 914 | static void vhost_scsi_evt_handle_kick(struct vhost_work *work) |
781 | { | 915 | { |
782 | pr_debug("%s: The handling func for event queue.\n", __func__); | 916 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
917 | poll.work); | ||
918 | struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); | ||
919 | |||
920 | mutex_lock(&vq->mutex); | ||
921 | if (!vq->private_data) | ||
922 | goto out; | ||
923 | |||
924 | if (vs->vs_events_missed) | ||
925 | tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); | ||
926 | out: | ||
927 | mutex_unlock(&vq->mutex); | ||
783 | } | 928 | } |
784 | 929 | ||
785 | static void vhost_scsi_handle_kick(struct vhost_work *work) | 930 | static void vhost_scsi_handle_kick(struct vhost_work *work) |
@@ -803,11 +948,15 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) | |||
803 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) | 948 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) |
804 | vhost_scsi_flush_vq(vs, i); | 949 | vhost_scsi_flush_vq(vs, i); |
805 | vhost_work_flush(&vs->dev, &vs->vs_completion_work); | 950 | vhost_work_flush(&vs->dev, &vs->vs_completion_work); |
951 | vhost_work_flush(&vs->dev, &vs->vs_event_work); | ||
806 | } | 952 | } |
807 | 953 | ||
808 | /* | 954 | /* |
809 | * Called from vhost_scsi_ioctl() context to walk the list of available | 955 | * Called from vhost_scsi_ioctl() context to walk the list of available |
810 | * tcm_vhost_tpg with an active struct tcm_vhost_nexus | 956 | * tcm_vhost_tpg with an active struct tcm_vhost_nexus |
957 | * | ||
958 | * The lock nesting rule is: | ||
959 | * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex | ||
811 | */ | 960 | */ |
812 | static int vhost_scsi_set_endpoint( | 961 | static int vhost_scsi_set_endpoint( |
813 | struct vhost_scsi *vs, | 962 | struct vhost_scsi *vs, |
@@ -820,26 +969,27 @@ static int vhost_scsi_set_endpoint( | |||
820 | int index, ret, i, len; | 969 | int index, ret, i, len; |
821 | bool match = false; | 970 | bool match = false; |
822 | 971 | ||
972 | mutex_lock(&tcm_vhost_mutex); | ||
823 | mutex_lock(&vs->dev.mutex); | 973 | mutex_lock(&vs->dev.mutex); |
974 | |||
824 | /* Verify that ring has been setup correctly. */ | 975 | /* Verify that ring has been setup correctly. */ |
825 | for (index = 0; index < vs->dev.nvqs; ++index) { | 976 | for (index = 0; index < vs->dev.nvqs; ++index) { |
826 | /* Verify that ring has been setup correctly. */ | 977 | /* Verify that ring has been setup correctly. */ |
827 | if (!vhost_vq_access_ok(&vs->vqs[index])) { | 978 | if (!vhost_vq_access_ok(&vs->vqs[index])) { |
828 | mutex_unlock(&vs->dev.mutex); | 979 | ret = -EFAULT; |
829 | return -EFAULT; | 980 | goto out; |
830 | } | 981 | } |
831 | } | 982 | } |
832 | 983 | ||
833 | len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; | 984 | len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; |
834 | vs_tpg = kzalloc(len, GFP_KERNEL); | 985 | vs_tpg = kzalloc(len, GFP_KERNEL); |
835 | if (!vs_tpg) { | 986 | if (!vs_tpg) { |
836 | mutex_unlock(&vs->dev.mutex); | 987 | ret = -ENOMEM; |
837 | return -ENOMEM; | 988 | goto out; |
838 | } | 989 | } |
839 | if (vs->vs_tpg) | 990 | if (vs->vs_tpg) |
840 | memcpy(vs_tpg, vs->vs_tpg, len); | 991 | memcpy(vs_tpg, vs->vs_tpg, len); |
841 | 992 | ||
842 | mutex_lock(&tcm_vhost_mutex); | ||
843 | list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { | 993 | list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { |
844 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 994 | mutex_lock(&tv_tpg->tv_tpg_mutex); |
845 | if (!tv_tpg->tpg_nexus) { | 995 | if (!tv_tpg->tpg_nexus) { |
@@ -854,20 +1004,19 @@ static int vhost_scsi_set_endpoint( | |||
854 | 1004 | ||
855 | if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { | 1005 | if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { |
856 | if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) { | 1006 | if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) { |
857 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | ||
858 | mutex_unlock(&tcm_vhost_mutex); | ||
859 | mutex_unlock(&vs->dev.mutex); | ||
860 | kfree(vs_tpg); | 1007 | kfree(vs_tpg); |
861 | return -EEXIST; | 1008 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
1009 | ret = -EEXIST; | ||
1010 | goto out; | ||
862 | } | 1011 | } |
863 | tv_tpg->tv_tpg_vhost_count++; | 1012 | tv_tpg->tv_tpg_vhost_count++; |
1013 | tv_tpg->vhost_scsi = vs; | ||
864 | vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; | 1014 | vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; |
865 | smp_mb__after_atomic_inc(); | 1015 | smp_mb__after_atomic_inc(); |
866 | match = true; | 1016 | match = true; |
867 | } | 1017 | } |
868 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1018 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
869 | } | 1019 | } |
870 | mutex_unlock(&tcm_vhost_mutex); | ||
871 | 1020 | ||
872 | if (match) { | 1021 | if (match) { |
873 | memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, | 1022 | memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, |
@@ -893,7 +1042,9 @@ static int vhost_scsi_set_endpoint( | |||
893 | kfree(vs->vs_tpg); | 1042 | kfree(vs->vs_tpg); |
894 | vs->vs_tpg = vs_tpg; | 1043 | vs->vs_tpg = vs_tpg; |
895 | 1044 | ||
1045 | out: | ||
896 | mutex_unlock(&vs->dev.mutex); | 1046 | mutex_unlock(&vs->dev.mutex); |
1047 | mutex_unlock(&tcm_vhost_mutex); | ||
897 | return ret; | 1048 | return ret; |
898 | } | 1049 | } |
899 | 1050 | ||
@@ -908,6 +1059,7 @@ static int vhost_scsi_clear_endpoint( | |||
908 | int index, ret, i; | 1059 | int index, ret, i; |
909 | u8 target; | 1060 | u8 target; |
910 | 1061 | ||
1062 | mutex_lock(&tcm_vhost_mutex); | ||
911 | mutex_lock(&vs->dev.mutex); | 1063 | mutex_lock(&vs->dev.mutex); |
912 | /* Verify that ring has been setup correctly. */ | 1064 | /* Verify that ring has been setup correctly. */ |
913 | for (index = 0; index < vs->dev.nvqs; ++index) { | 1065 | for (index = 0; index < vs->dev.nvqs; ++index) { |
@@ -918,8 +1070,8 @@ static int vhost_scsi_clear_endpoint( | |||
918 | } | 1070 | } |
919 | 1071 | ||
920 | if (!vs->vs_tpg) { | 1072 | if (!vs->vs_tpg) { |
921 | mutex_unlock(&vs->dev.mutex); | 1073 | ret = 0; |
922 | return 0; | 1074 | goto err_dev; |
923 | } | 1075 | } |
924 | 1076 | ||
925 | for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { | 1077 | for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { |
@@ -944,6 +1096,7 @@ static int vhost_scsi_clear_endpoint( | |||
944 | goto err_tpg; | 1096 | goto err_tpg; |
945 | } | 1097 | } |
946 | tv_tpg->tv_tpg_vhost_count--; | 1098 | tv_tpg->tv_tpg_vhost_count--; |
1099 | tv_tpg->vhost_scsi = NULL; | ||
947 | vs->vs_tpg[target] = NULL; | 1100 | vs->vs_tpg[target] = NULL; |
948 | match = true; | 1101 | match = true; |
949 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1102 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
@@ -964,14 +1117,16 @@ static int vhost_scsi_clear_endpoint( | |||
964 | vhost_scsi_flush(vs); | 1117 | vhost_scsi_flush(vs); |
965 | kfree(vs->vs_tpg); | 1118 | kfree(vs->vs_tpg); |
966 | vs->vs_tpg = NULL; | 1119 | vs->vs_tpg = NULL; |
1120 | WARN_ON(vs->vs_events_nr); | ||
967 | mutex_unlock(&vs->dev.mutex); | 1121 | mutex_unlock(&vs->dev.mutex); |
968 | 1122 | mutex_unlock(&tcm_vhost_mutex); | |
969 | return 0; | 1123 | return 0; |
970 | 1124 | ||
971 | err_tpg: | 1125 | err_tpg: |
972 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1126 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
973 | err_dev: | 1127 | err_dev: |
974 | mutex_unlock(&vs->dev.mutex); | 1128 | mutex_unlock(&vs->dev.mutex); |
1129 | mutex_unlock(&tcm_vhost_mutex); | ||
975 | return ret; | 1130 | return ret; |
976 | } | 1131 | } |
977 | 1132 | ||
@@ -1003,6 +1158,10 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) | |||
1003 | return -ENOMEM; | 1158 | return -ENOMEM; |
1004 | 1159 | ||
1005 | vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); | 1160 | vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); |
1161 | vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work); | ||
1162 | |||
1163 | s->vs_events_nr = 0; | ||
1164 | s->vs_events_missed = false; | ||
1006 | 1165 | ||
1007 | s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; | 1166 | s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; |
1008 | s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; | 1167 | s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; |
@@ -1029,6 +1188,8 @@ static int vhost_scsi_release(struct inode *inode, struct file *f) | |||
1029 | vhost_scsi_clear_endpoint(s, &t); | 1188 | vhost_scsi_clear_endpoint(s, &t); |
1030 | vhost_dev_stop(&s->dev); | 1189 | vhost_dev_stop(&s->dev); |
1031 | vhost_dev_cleanup(&s->dev, false); | 1190 | vhost_dev_cleanup(&s->dev, false); |
1191 | /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ | ||
1192 | vhost_scsi_flush(s); | ||
1032 | kfree(s); | 1193 | kfree(s); |
1033 | return 0; | 1194 | return 0; |
1034 | } | 1195 | } |
@@ -1040,8 +1201,11 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, | |||
1040 | struct vhost_scsi_target backend; | 1201 | struct vhost_scsi_target backend; |
1041 | void __user *argp = (void __user *)arg; | 1202 | void __user *argp = (void __user *)arg; |
1042 | u64 __user *featurep = argp; | 1203 | u64 __user *featurep = argp; |
1204 | u32 __user *eventsp = argp; | ||
1205 | u32 events_missed; | ||
1043 | u64 features; | 1206 | u64 features; |
1044 | int r, abi_version = VHOST_SCSI_ABI_VERSION; | 1207 | int r, abi_version = VHOST_SCSI_ABI_VERSION; |
1208 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | ||
1045 | 1209 | ||
1046 | switch (ioctl) { | 1210 | switch (ioctl) { |
1047 | case VHOST_SCSI_SET_ENDPOINT: | 1211 | case VHOST_SCSI_SET_ENDPOINT: |
@@ -1062,6 +1226,20 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, | |||
1062 | if (copy_to_user(argp, &abi_version, sizeof abi_version)) | 1226 | if (copy_to_user(argp, &abi_version, sizeof abi_version)) |
1063 | return -EFAULT; | 1227 | return -EFAULT; |
1064 | return 0; | 1228 | return 0; |
1229 | case VHOST_SCSI_SET_EVENTS_MISSED: | ||
1230 | if (get_user(events_missed, eventsp)) | ||
1231 | return -EFAULT; | ||
1232 | mutex_lock(&vq->mutex); | ||
1233 | vs->vs_events_missed = events_missed; | ||
1234 | mutex_unlock(&vq->mutex); | ||
1235 | return 0; | ||
1236 | case VHOST_SCSI_GET_EVENTS_MISSED: | ||
1237 | mutex_lock(&vq->mutex); | ||
1238 | events_missed = vs->vs_events_missed; | ||
1239 | mutex_unlock(&vq->mutex); | ||
1240 | if (put_user(events_missed, eventsp)) | ||
1241 | return -EFAULT; | ||
1242 | return 0; | ||
1065 | case VHOST_GET_FEATURES: | 1243 | case VHOST_GET_FEATURES: |
1066 | features = VHOST_SCSI_FEATURES; | 1244 | features = VHOST_SCSI_FEATURES; |
1067 | if (copy_to_user(featurep, &features, sizeof features)) | 1245 | if (copy_to_user(featurep, &features, sizeof features)) |
@@ -1133,28 +1311,80 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) | |||
1133 | return "Unknown"; | 1311 | return "Unknown"; |
1134 | } | 1312 | } |
1135 | 1313 | ||
1314 | static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, | ||
1315 | struct se_lun *lun, bool plug) | ||
1316 | { | ||
1317 | |||
1318 | struct vhost_scsi *vs = tpg->vhost_scsi; | ||
1319 | struct vhost_virtqueue *vq; | ||
1320 | u32 reason; | ||
1321 | |||
1322 | if (!vs) | ||
1323 | return; | ||
1324 | |||
1325 | mutex_lock(&vs->dev.mutex); | ||
1326 | if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) { | ||
1327 | mutex_unlock(&vs->dev.mutex); | ||
1328 | return; | ||
1329 | } | ||
1330 | |||
1331 | if (plug) | ||
1332 | reason = VIRTIO_SCSI_EVT_RESET_RESCAN; | ||
1333 | else | ||
1334 | reason = VIRTIO_SCSI_EVT_RESET_REMOVED; | ||
1335 | |||
1336 | vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | ||
1337 | mutex_lock(&vq->mutex); | ||
1338 | tcm_vhost_send_evt(vs, tpg, lun, | ||
1339 | VIRTIO_SCSI_T_TRANSPORT_RESET, reason); | ||
1340 | mutex_unlock(&vq->mutex); | ||
1341 | mutex_unlock(&vs->dev.mutex); | ||
1342 | } | ||
1343 | |||
1344 | static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) | ||
1345 | { | ||
1346 | tcm_vhost_do_plug(tpg, lun, true); | ||
1347 | } | ||
1348 | |||
1349 | static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) | ||
1350 | { | ||
1351 | tcm_vhost_do_plug(tpg, lun, false); | ||
1352 | } | ||
1353 | |||
1136 | static int tcm_vhost_port_link(struct se_portal_group *se_tpg, | 1354 | static int tcm_vhost_port_link(struct se_portal_group *se_tpg, |
1137 | struct se_lun *lun) | 1355 | struct se_lun *lun) |
1138 | { | 1356 | { |
1139 | struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, | 1357 | struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, |
1140 | struct tcm_vhost_tpg, se_tpg); | 1358 | struct tcm_vhost_tpg, se_tpg); |
1141 | 1359 | ||
1360 | mutex_lock(&tcm_vhost_mutex); | ||
1361 | |||
1142 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 1362 | mutex_lock(&tv_tpg->tv_tpg_mutex); |
1143 | tv_tpg->tv_tpg_port_count++; | 1363 | tv_tpg->tv_tpg_port_count++; |
1144 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1364 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
1145 | 1365 | ||
1366 | tcm_vhost_hotplug(tv_tpg, lun); | ||
1367 | |||
1368 | mutex_unlock(&tcm_vhost_mutex); | ||
1369 | |||
1146 | return 0; | 1370 | return 0; |
1147 | } | 1371 | } |
1148 | 1372 | ||
1149 | static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, | 1373 | static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, |
1150 | struct se_lun *se_lun) | 1374 | struct se_lun *lun) |
1151 | { | 1375 | { |
1152 | struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, | 1376 | struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, |
1153 | struct tcm_vhost_tpg, se_tpg); | 1377 | struct tcm_vhost_tpg, se_tpg); |
1154 | 1378 | ||
1379 | mutex_lock(&tcm_vhost_mutex); | ||
1380 | |||
1155 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 1381 | mutex_lock(&tv_tpg->tv_tpg_mutex); |
1156 | tv_tpg->tv_tpg_port_count--; | 1382 | tv_tpg->tv_tpg_port_count--; |
1157 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1383 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
1384 | |||
1385 | tcm_vhost_hotunplug(tv_tpg, lun); | ||
1386 | |||
1387 | mutex_unlock(&tcm_vhost_mutex); | ||
1158 | } | 1388 | } |
1159 | 1389 | ||
1160 | static struct se_node_acl *tcm_vhost_make_nodeacl( | 1390 | static struct se_node_acl *tcm_vhost_make_nodeacl( |