diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-11-21 19:28:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-11-21 19:28:45 -0500 |
commit | a46171d0100eafc0c276962d80f470406d66dcdd (patch) | |
tree | ed2a94222068a7a4da1c19aeb8edc47ebe1f4d56 | |
parent | 4ec69c7ebc2cb8df1f33b26f4492481c452fef66 (diff) | |
parent | b1a5ad006b34ded9dc7ec64988deba1b3ecad367 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target fixes from Nicholas Bellinger:
"Here are the target-pending fixes queued for v3.18-rc6.
The highlights include:
- target-core OOPs fix with tcm_qla2xxx + vxworks FC initiators +
zero length SCSI commands having a transfer direction set. (Roland
+ Craig Watson)
- vhost-scsi OOPs fix to explicitly prevent WWPN endpoint configfs
group removal while qemu still has an active reference. (Paolo +
nab)
- ib_srpt fix for RDMA hardware with lower srp_sq_size limits.
(Bart)
- two ib_isert work-arounds for running on ocrdma hardware (Or + Sagi
+ Chris)
- iscsi-target discovery portal typo + SPC-3 PR Preempt SA key
matching fix (Steve)"
* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
IB/isert: Adjust CQ size to HW limits
target: return CONFLICT only when SA key unmatched
iser-target: Handle DEVICE_REMOVAL event on network portal listener correctly
ib_isert: Add max_send_sge=2 minimum for control PDU responses
srp-target: Retry when QP creation fails with ENOMEM
iscsi-target: return the correct port in SendTargets
vhost-scsi: Take configfs group dependency during VHOST_SCSI_SET_ENDPOINT
target: Don't call TFO->write_pending if data_length == 0
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 44 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srpt/ib_srpt.c | 8 | ||||
-rw-r--r-- | drivers/target/iscsi/iscsi_target.c | 2 | ||||
-rw-r--r-- | drivers/target/target_core_pr.c | 9 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 2 | ||||
-rw-r--r-- | drivers/vhost/scsi.c | 24 |
6 files changed, 69 insertions, 20 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 3effa931fce2..10641b7816f4 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -115,9 +115,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, | |||
115 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; | 115 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; |
116 | /* | 116 | /* |
117 | * FIXME: Use devattr.max_sge - 2 for max_send_sge as | 117 | * FIXME: Use devattr.max_sge - 2 for max_send_sge as |
118 | * work-around for RDMA_READ.. | 118 | * work-around for RDMA_READs with ConnectX-2. |
119 | * | ||
120 | * Also, still make sure to have at least two SGEs for | ||
121 | * outgoing control PDU responses. | ||
119 | */ | 122 | */ |
120 | attr.cap.max_send_sge = device->dev_attr.max_sge - 2; | 123 | attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2); |
121 | isert_conn->max_sge = attr.cap.max_send_sge; | 124 | isert_conn->max_sge = attr.cap.max_send_sge; |
122 | 125 | ||
123 | attr.cap.max_recv_sge = 1; | 126 | attr.cap.max_recv_sge = 1; |
@@ -225,12 +228,16 @@ isert_create_device_ib_res(struct isert_device *device) | |||
225 | struct isert_cq_desc *cq_desc; | 228 | struct isert_cq_desc *cq_desc; |
226 | struct ib_device_attr *dev_attr; | 229 | struct ib_device_attr *dev_attr; |
227 | int ret = 0, i, j; | 230 | int ret = 0, i, j; |
231 | int max_rx_cqe, max_tx_cqe; | ||
228 | 232 | ||
229 | dev_attr = &device->dev_attr; | 233 | dev_attr = &device->dev_attr; |
230 | ret = isert_query_device(ib_dev, dev_attr); | 234 | ret = isert_query_device(ib_dev, dev_attr); |
231 | if (ret) | 235 | if (ret) |
232 | return ret; | 236 | return ret; |
233 | 237 | ||
238 | max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); | ||
239 | max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe); | ||
240 | |||
234 | /* asign function handlers */ | 241 | /* asign function handlers */ |
235 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && | 242 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && |
236 | dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { | 243 | dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { |
@@ -272,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device) | |||
272 | isert_cq_rx_callback, | 279 | isert_cq_rx_callback, |
273 | isert_cq_event_callback, | 280 | isert_cq_event_callback, |
274 | (void *)&cq_desc[i], | 281 | (void *)&cq_desc[i], |
275 | ISER_MAX_RX_CQ_LEN, i); | 282 | max_rx_cqe, i); |
276 | if (IS_ERR(device->dev_rx_cq[i])) { | 283 | if (IS_ERR(device->dev_rx_cq[i])) { |
277 | ret = PTR_ERR(device->dev_rx_cq[i]); | 284 | ret = PTR_ERR(device->dev_rx_cq[i]); |
278 | device->dev_rx_cq[i] = NULL; | 285 | device->dev_rx_cq[i] = NULL; |
@@ -284,7 +291,7 @@ isert_create_device_ib_res(struct isert_device *device) | |||
284 | isert_cq_tx_callback, | 291 | isert_cq_tx_callback, |
285 | isert_cq_event_callback, | 292 | isert_cq_event_callback, |
286 | (void *)&cq_desc[i], | 293 | (void *)&cq_desc[i], |
287 | ISER_MAX_TX_CQ_LEN, i); | 294 | max_tx_cqe, i); |
288 | if (IS_ERR(device->dev_tx_cq[i])) { | 295 | if (IS_ERR(device->dev_tx_cq[i])) { |
289 | ret = PTR_ERR(device->dev_tx_cq[i]); | 296 | ret = PTR_ERR(device->dev_tx_cq[i]); |
290 | device->dev_tx_cq[i] = NULL; | 297 | device->dev_tx_cq[i] = NULL; |
@@ -803,14 +810,25 @@ wake_up: | |||
803 | complete(&isert_conn->conn_wait); | 810 | complete(&isert_conn->conn_wait); |
804 | } | 811 | } |
805 | 812 | ||
806 | static void | 813 | static int |
807 | isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) | 814 | isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) |
808 | { | 815 | { |
809 | struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; | 816 | struct isert_conn *isert_conn; |
817 | |||
818 | if (!cma_id->qp) { | ||
819 | struct isert_np *isert_np = cma_id->context; | ||
820 | |||
821 | isert_np->np_cm_id = NULL; | ||
822 | return -1; | ||
823 | } | ||
824 | |||
825 | isert_conn = (struct isert_conn *)cma_id->context; | ||
810 | 826 | ||
811 | isert_conn->disconnect = disconnect; | 827 | isert_conn->disconnect = disconnect; |
812 | INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); | 828 | INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); |
813 | schedule_work(&isert_conn->conn_logout_work); | 829 | schedule_work(&isert_conn->conn_logout_work); |
830 | |||
831 | return 0; | ||
814 | } | 832 | } |
815 | 833 | ||
816 | static int | 834 | static int |
@@ -825,6 +843,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
825 | switch (event->event) { | 843 | switch (event->event) { |
826 | case RDMA_CM_EVENT_CONNECT_REQUEST: | 844 | case RDMA_CM_EVENT_CONNECT_REQUEST: |
827 | ret = isert_connect_request(cma_id, event); | 845 | ret = isert_connect_request(cma_id, event); |
846 | if (ret) | ||
847 | pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", | ||
848 | event->event, ret); | ||
828 | break; | 849 | break; |
829 | case RDMA_CM_EVENT_ESTABLISHED: | 850 | case RDMA_CM_EVENT_ESTABLISHED: |
830 | isert_connected_handler(cma_id); | 851 | isert_connected_handler(cma_id); |
@@ -834,7 +855,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
834 | case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ | 855 | case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ |
835 | disconnect = true; | 856 | disconnect = true; |
836 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ | 857 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ |
837 | isert_disconnected_handler(cma_id, disconnect); | 858 | ret = isert_disconnected_handler(cma_id, disconnect); |
838 | break; | 859 | break; |
839 | case RDMA_CM_EVENT_CONNECT_ERROR: | 860 | case RDMA_CM_EVENT_CONNECT_ERROR: |
840 | default: | 861 | default: |
@@ -842,12 +863,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
842 | break; | 863 | break; |
843 | } | 864 | } |
844 | 865 | ||
845 | if (ret != 0) { | ||
846 | pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", | ||
847 | event->event, ret); | ||
848 | dump_stack(); | ||
849 | } | ||
850 | |||
851 | return ret; | 866 | return ret; |
852 | } | 867 | } |
853 | 868 | ||
@@ -3190,7 +3205,8 @@ isert_free_np(struct iscsi_np *np) | |||
3190 | { | 3205 | { |
3191 | struct isert_np *isert_np = (struct isert_np *)np->np_context; | 3206 | struct isert_np *isert_np = (struct isert_np *)np->np_context; |
3192 | 3207 | ||
3193 | rdma_destroy_id(isert_np->np_cm_id); | 3208 | if (isert_np->np_cm_id) |
3209 | rdma_destroy_id(isert_np->np_cm_id); | ||
3194 | 3210 | ||
3195 | np->np_context = NULL; | 3211 | np->np_context = NULL; |
3196 | kfree(isert_np); | 3212 | kfree(isert_np); |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 7206547c13ce..dc829682701a 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -2092,6 +2092,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) | |||
2092 | if (!qp_init) | 2092 | if (!qp_init) |
2093 | goto out; | 2093 | goto out; |
2094 | 2094 | ||
2095 | retry: | ||
2095 | ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, | 2096 | ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, |
2096 | ch->rq_size + srp_sq_size, 0); | 2097 | ch->rq_size + srp_sq_size, 0); |
2097 | if (IS_ERR(ch->cq)) { | 2098 | if (IS_ERR(ch->cq)) { |
@@ -2115,6 +2116,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) | |||
2115 | ch->qp = ib_create_qp(sdev->pd, qp_init); | 2116 | ch->qp = ib_create_qp(sdev->pd, qp_init); |
2116 | if (IS_ERR(ch->qp)) { | 2117 | if (IS_ERR(ch->qp)) { |
2117 | ret = PTR_ERR(ch->qp); | 2118 | ret = PTR_ERR(ch->qp); |
2119 | if (ret == -ENOMEM) { | ||
2120 | srp_sq_size /= 2; | ||
2121 | if (srp_sq_size >= MIN_SRPT_SQ_SIZE) { | ||
2122 | ib_destroy_cq(ch->cq); | ||
2123 | goto retry; | ||
2124 | } | ||
2125 | } | ||
2118 | printk(KERN_ERR "failed to create_qp ret= %d\n", ret); | 2126 | printk(KERN_ERR "failed to create_qp ret= %d\n", ret); |
2119 | goto err_destroy_cq; | 2127 | goto err_destroy_cq; |
2120 | } | 2128 | } |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index b19e4329ba00..73e58d22e325 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -3491,7 +3491,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, | |||
3491 | len = sprintf(buf, "TargetAddress=" | 3491 | len = sprintf(buf, "TargetAddress=" |
3492 | "%s:%hu,%hu", | 3492 | "%s:%hu,%hu", |
3493 | inaddr_any ? conn->local_ip : np->np_ip, | 3493 | inaddr_any ? conn->local_ip : np->np_ip, |
3494 | inaddr_any ? conn->local_port : np->np_port, | 3494 | np->np_port, |
3495 | tpg->tpgt); | 3495 | tpg->tpgt); |
3496 | len += 1; | 3496 | len += 1; |
3497 | 3497 | ||
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 8c60a1a1ae8d..9f93b8234095 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -2738,7 +2738,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, | |||
2738 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; | 2738 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; |
2739 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 2739 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
2740 | u32 pr_res_mapped_lun = 0; | 2740 | u32 pr_res_mapped_lun = 0; |
2741 | int all_reg = 0, calling_it_nexus = 0, released_regs = 0; | 2741 | int all_reg = 0, calling_it_nexus = 0; |
2742 | bool sa_res_key_unmatched = sa_res_key != 0; | ||
2742 | int prh_type = 0, prh_scope = 0; | 2743 | int prh_type = 0, prh_scope = 0; |
2743 | 2744 | ||
2744 | if (!se_sess) | 2745 | if (!se_sess) |
@@ -2813,6 +2814,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, | |||
2813 | if (!all_reg) { | 2814 | if (!all_reg) { |
2814 | if (pr_reg->pr_res_key != sa_res_key) | 2815 | if (pr_reg->pr_res_key != sa_res_key) |
2815 | continue; | 2816 | continue; |
2817 | sa_res_key_unmatched = false; | ||
2816 | 2818 | ||
2817 | calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; | 2819 | calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; |
2818 | pr_reg_nacl = pr_reg->pr_reg_nacl; | 2820 | pr_reg_nacl = pr_reg->pr_reg_nacl; |
@@ -2820,7 +2822,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, | |||
2820 | __core_scsi3_free_registration(dev, pr_reg, | 2822 | __core_scsi3_free_registration(dev, pr_reg, |
2821 | (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : | 2823 | (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : |
2822 | NULL, calling_it_nexus); | 2824 | NULL, calling_it_nexus); |
2823 | released_regs++; | ||
2824 | } else { | 2825 | } else { |
2825 | /* | 2826 | /* |
2826 | * Case for any existing all registrants type | 2827 | * Case for any existing all registrants type |
@@ -2838,6 +2839,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, | |||
2838 | if ((sa_res_key) && | 2839 | if ((sa_res_key) && |
2839 | (pr_reg->pr_res_key != sa_res_key)) | 2840 | (pr_reg->pr_res_key != sa_res_key)) |
2840 | continue; | 2841 | continue; |
2842 | sa_res_key_unmatched = false; | ||
2841 | 2843 | ||
2842 | calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; | 2844 | calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; |
2843 | if (calling_it_nexus) | 2845 | if (calling_it_nexus) |
@@ -2848,7 +2850,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, | |||
2848 | __core_scsi3_free_registration(dev, pr_reg, | 2850 | __core_scsi3_free_registration(dev, pr_reg, |
2849 | (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : | 2851 | (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : |
2850 | NULL, 0); | 2852 | NULL, 0); |
2851 | released_regs++; | ||
2852 | } | 2853 | } |
2853 | if (!calling_it_nexus) | 2854 | if (!calling_it_nexus) |
2854 | core_scsi3_ua_allocate(pr_reg_nacl, | 2855 | core_scsi3_ua_allocate(pr_reg_nacl, |
@@ -2863,7 +2864,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, | |||
2863 | * registered reservation key, then the device server shall | 2864 | * registered reservation key, then the device server shall |
2864 | * complete the command with RESERVATION CONFLICT status. | 2865 | * complete the command with RESERVATION CONFLICT status. |
2865 | */ | 2866 | */ |
2866 | if (!released_regs) { | 2867 | if (sa_res_key_unmatched) { |
2867 | spin_unlock(&dev->dev_reservation_lock); | 2868 | spin_unlock(&dev->dev_reservation_lock); |
2868 | core_scsi3_put_pr_reg(pr_reg_n); | 2869 | core_scsi3_put_pr_reg(pr_reg_n); |
2869 | return TCM_RESERVATION_CONFLICT; | 2870 | return TCM_RESERVATION_CONFLICT; |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 9ea0d5f03f7a..be877bf6f730 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -2292,7 +2292,7 @@ transport_generic_new_cmd(struct se_cmd *cmd) | |||
2292 | * and let it call back once the write buffers are ready. | 2292 | * and let it call back once the write buffers are ready. |
2293 | */ | 2293 | */ |
2294 | target_add_to_state_list(cmd); | 2294 | target_add_to_state_list(cmd); |
2295 | if (cmd->data_direction != DMA_TO_DEVICE) { | 2295 | if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { |
2296 | target_execute_cmd(cmd); | 2296 | target_execute_cmd(cmd); |
2297 | return 0; | 2297 | return 0; |
2298 | } | 2298 | } |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 69906cacd04f..a17f11850669 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -1312,6 +1312,7 @@ static int | |||
1312 | vhost_scsi_set_endpoint(struct vhost_scsi *vs, | 1312 | vhost_scsi_set_endpoint(struct vhost_scsi *vs, |
1313 | struct vhost_scsi_target *t) | 1313 | struct vhost_scsi_target *t) |
1314 | { | 1314 | { |
1315 | struct se_portal_group *se_tpg; | ||
1315 | struct tcm_vhost_tport *tv_tport; | 1316 | struct tcm_vhost_tport *tv_tport; |
1316 | struct tcm_vhost_tpg *tpg; | 1317 | struct tcm_vhost_tpg *tpg; |
1317 | struct tcm_vhost_tpg **vs_tpg; | 1318 | struct tcm_vhost_tpg **vs_tpg; |
@@ -1359,6 +1360,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, | |||
1359 | ret = -EEXIST; | 1360 | ret = -EEXIST; |
1360 | goto out; | 1361 | goto out; |
1361 | } | 1362 | } |
1363 | /* | ||
1364 | * In order to ensure individual vhost-scsi configfs | ||
1365 | * groups cannot be removed while in use by vhost ioctl, | ||
1366 | * go ahead and take an explicit se_tpg->tpg_group.cg_item | ||
1367 | * dependency now. | ||
1368 | */ | ||
1369 | se_tpg = &tpg->se_tpg; | ||
1370 | ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, | ||
1371 | &se_tpg->tpg_group.cg_item); | ||
1372 | if (ret) { | ||
1373 | pr_warn("configfs_depend_item() failed: %d\n", ret); | ||
1374 | kfree(vs_tpg); | ||
1375 | mutex_unlock(&tpg->tv_tpg_mutex); | ||
1376 | goto out; | ||
1377 | } | ||
1362 | tpg->tv_tpg_vhost_count++; | 1378 | tpg->tv_tpg_vhost_count++; |
1363 | tpg->vhost_scsi = vs; | 1379 | tpg->vhost_scsi = vs; |
1364 | vs_tpg[tpg->tport_tpgt] = tpg; | 1380 | vs_tpg[tpg->tport_tpgt] = tpg; |
@@ -1401,6 +1417,7 @@ static int | |||
1401 | vhost_scsi_clear_endpoint(struct vhost_scsi *vs, | 1417 | vhost_scsi_clear_endpoint(struct vhost_scsi *vs, |
1402 | struct vhost_scsi_target *t) | 1418 | struct vhost_scsi_target *t) |
1403 | { | 1419 | { |
1420 | struct se_portal_group *se_tpg; | ||
1404 | struct tcm_vhost_tport *tv_tport; | 1421 | struct tcm_vhost_tport *tv_tport; |
1405 | struct tcm_vhost_tpg *tpg; | 1422 | struct tcm_vhost_tpg *tpg; |
1406 | struct vhost_virtqueue *vq; | 1423 | struct vhost_virtqueue *vq; |
@@ -1449,6 +1466,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, | |||
1449 | vs->vs_tpg[target] = NULL; | 1466 | vs->vs_tpg[target] = NULL; |
1450 | match = true; | 1467 | match = true; |
1451 | mutex_unlock(&tpg->tv_tpg_mutex); | 1468 | mutex_unlock(&tpg->tv_tpg_mutex); |
1469 | /* | ||
1470 | * Release se_tpg->tpg_group.cg_item configfs dependency now | ||
1471 | * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. | ||
1472 | */ | ||
1473 | se_tpg = &tpg->se_tpg; | ||
1474 | configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, | ||
1475 | &se_tpg->tpg_group.cg_item); | ||
1452 | } | 1476 | } |
1453 | if (match) { | 1477 | if (match) { |
1454 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { | 1478 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { |