diff options
author | Doug Ledford <dledford@redhat.com> | 2016-11-16 20:05:10 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-11-16 20:05:10 -0500 |
commit | 6fa1f2f0aa6191193704b9ff10e5a2cafe540738 (patch) | |
tree | 86398ad896bd0bfbf173b86f0d01210e47848537 | |
parent | 2b16056f845207967a32497f41cf92b57849f934 (diff) | |
parent | 6d931308f55faaef3f30bd0346c47f99528b229d (diff) |
Merge branches 'hfi1' and 'mlx' into k.o/for-4.9-rc
-rw-r--r-- | drivers/infiniband/core/addr.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/core/cm.c | 126 | ||||
-rw-r--r-- | drivers/infiniband/core/cma.c | 21 | ||||
-rw-r--r-- | drivers/infiniband/core/umem.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_main.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/ah.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/cq.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/cq.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_net.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_qp.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_queue.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_queue.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_req.c | 21 |
17 files changed, 203 insertions, 50 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index b136d3acc5bd..0f58f46dbad7 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -699,13 +699,16 @@ EXPORT_SYMBOL(rdma_addr_cancel); | |||
699 | struct resolve_cb_context { | 699 | struct resolve_cb_context { |
700 | struct rdma_dev_addr *addr; | 700 | struct rdma_dev_addr *addr; |
701 | struct completion comp; | 701 | struct completion comp; |
702 | int status; | ||
702 | }; | 703 | }; |
703 | 704 | ||
704 | static void resolve_cb(int status, struct sockaddr *src_addr, | 705 | static void resolve_cb(int status, struct sockaddr *src_addr, |
705 | struct rdma_dev_addr *addr, void *context) | 706 | struct rdma_dev_addr *addr, void *context) |
706 | { | 707 | { |
707 | memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct | 708 | if (!status) |
708 | rdma_dev_addr)); | 709 | memcpy(((struct resolve_cb_context *)context)->addr, |
710 | addr, sizeof(struct rdma_dev_addr)); | ||
711 | ((struct resolve_cb_context *)context)->status = status; | ||
709 | complete(&((struct resolve_cb_context *)context)->comp); | 712 | complete(&((struct resolve_cb_context *)context)->comp); |
710 | } | 713 | } |
711 | 714 | ||
@@ -743,6 +746,10 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, | |||
743 | 746 | ||
744 | wait_for_completion(&ctx.comp); | 747 | wait_for_completion(&ctx.comp); |
745 | 748 | ||
749 | ret = ctx.status; | ||
750 | if (ret) | ||
751 | return ret; | ||
752 | |||
746 | memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); | 753 | memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); |
747 | dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if); | 754 | dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if); |
748 | if (!dev) | 755 | if (!dev) |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c99525512b34..71c7c4c328ef 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -80,6 +80,8 @@ static struct ib_cm { | |||
80 | __be32 random_id_operand; | 80 | __be32 random_id_operand; |
81 | struct list_head timewait_list; | 81 | struct list_head timewait_list; |
82 | struct workqueue_struct *wq; | 82 | struct workqueue_struct *wq; |
83 | /* Sync on cm change port state */ | ||
84 | spinlock_t state_lock; | ||
83 | } cm; | 85 | } cm; |
84 | 86 | ||
85 | /* Counter indexes ordered by attribute ID */ | 87 | /* Counter indexes ordered by attribute ID */ |
@@ -161,6 +163,8 @@ struct cm_port { | |||
161 | struct ib_mad_agent *mad_agent; | 163 | struct ib_mad_agent *mad_agent; |
162 | struct kobject port_obj; | 164 | struct kobject port_obj; |
163 | u8 port_num; | 165 | u8 port_num; |
166 | struct list_head cm_priv_prim_list; | ||
167 | struct list_head cm_priv_altr_list; | ||
164 | struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; | 168 | struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; |
165 | }; | 169 | }; |
166 | 170 | ||
@@ -241,6 +245,12 @@ struct cm_id_private { | |||
241 | u8 service_timeout; | 245 | u8 service_timeout; |
242 | u8 target_ack_delay; | 246 | u8 target_ack_delay; |
243 | 247 | ||
248 | struct list_head prim_list; | ||
249 | struct list_head altr_list; | ||
250 | /* Indicates that the send port mad is registered and av is set */ | ||
251 | int prim_send_port_not_ready; | ||
252 | int altr_send_port_not_ready; | ||
253 | |||
244 | struct list_head work_list; | 254 | struct list_head work_list; |
245 | atomic_t work_count; | 255 | atomic_t work_count; |
246 | }; | 256 | }; |
@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, | |||
259 | struct ib_mad_agent *mad_agent; | 269 | struct ib_mad_agent *mad_agent; |
260 | struct ib_mad_send_buf *m; | 270 | struct ib_mad_send_buf *m; |
261 | struct ib_ah *ah; | 271 | struct ib_ah *ah; |
272 | struct cm_av *av; | ||
273 | unsigned long flags, flags2; | ||
274 | int ret = 0; | ||
262 | 275 | ||
276 | /* don't let the port to be released till the agent is down */ | ||
277 | spin_lock_irqsave(&cm.state_lock, flags2); | ||
278 | spin_lock_irqsave(&cm.lock, flags); | ||
279 | if (!cm_id_priv->prim_send_port_not_ready) | ||
280 | av = &cm_id_priv->av; | ||
281 | else if (!cm_id_priv->altr_send_port_not_ready && | ||
282 | (cm_id_priv->alt_av.port)) | ||
283 | av = &cm_id_priv->alt_av; | ||
284 | else { | ||
285 | pr_info("%s: not valid CM id\n", __func__); | ||
286 | ret = -ENODEV; | ||
287 | spin_unlock_irqrestore(&cm.lock, flags); | ||
288 | goto out; | ||
289 | } | ||
290 | spin_unlock_irqrestore(&cm.lock, flags); | ||
291 | /* Make sure the port haven't released the mad yet */ | ||
263 | mad_agent = cm_id_priv->av.port->mad_agent; | 292 | mad_agent = cm_id_priv->av.port->mad_agent; |
264 | ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); | 293 | if (!mad_agent) { |
265 | if (IS_ERR(ah)) | 294 | pr_info("%s: not a valid MAD agent\n", __func__); |
266 | return PTR_ERR(ah); | 295 | ret = -ENODEV; |
296 | goto out; | ||
297 | } | ||
298 | ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr); | ||
299 | if (IS_ERR(ah)) { | ||
300 | ret = PTR_ERR(ah); | ||
301 | goto out; | ||
302 | } | ||
267 | 303 | ||
268 | m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, | 304 | m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, |
269 | cm_id_priv->av.pkey_index, | 305 | av->pkey_index, |
270 | 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, | 306 | 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, |
271 | GFP_ATOMIC, | 307 | GFP_ATOMIC, |
272 | IB_MGMT_BASE_VERSION); | 308 | IB_MGMT_BASE_VERSION); |
273 | if (IS_ERR(m)) { | 309 | if (IS_ERR(m)) { |
274 | ib_destroy_ah(ah); | 310 | ib_destroy_ah(ah); |
275 | return PTR_ERR(m); | 311 | ret = PTR_ERR(m); |
312 | goto out; | ||
276 | } | 313 | } |
277 | 314 | ||
278 | /* Timeout set by caller if response is expected. */ | 315 | /* Timeout set by caller if response is expected. */ |
@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, | |||
282 | atomic_inc(&cm_id_priv->refcount); | 319 | atomic_inc(&cm_id_priv->refcount); |
283 | m->context[0] = cm_id_priv; | 320 | m->context[0] = cm_id_priv; |
284 | *msg = m; | 321 | *msg = m; |
285 | return 0; | 322 | |
323 | out: | ||
324 | spin_unlock_irqrestore(&cm.state_lock, flags2); | ||
325 | return ret; | ||
286 | } | 326 | } |
287 | 327 | ||
288 | static int cm_alloc_response_msg(struct cm_port *port, | 328 | static int cm_alloc_response_msg(struct cm_port *port, |
@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, | |||
352 | grh, &av->ah_attr); | 392 | grh, &av->ah_attr); |
353 | } | 393 | } |
354 | 394 | ||
355 | static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) | 395 | static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av, |
396 | struct cm_id_private *cm_id_priv) | ||
356 | { | 397 | { |
357 | struct cm_device *cm_dev; | 398 | struct cm_device *cm_dev; |
358 | struct cm_port *port = NULL; | 399 | struct cm_port *port = NULL; |
@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) | |||
387 | &av->ah_attr); | 428 | &av->ah_attr); |
388 | av->timeout = path->packet_life_time + 1; | 429 | av->timeout = path->packet_life_time + 1; |
389 | 430 | ||
390 | return 0; | 431 | spin_lock_irqsave(&cm.lock, flags); |
432 | if (&cm_id_priv->av == av) | ||
433 | list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list); | ||
434 | else if (&cm_id_priv->alt_av == av) | ||
435 | list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list); | ||
436 | else | ||
437 | ret = -EINVAL; | ||
438 | |||
439 | spin_unlock_irqrestore(&cm.lock, flags); | ||
440 | |||
441 | return ret; | ||
391 | } | 442 | } |
392 | 443 | ||
393 | static int cm_alloc_id(struct cm_id_private *cm_id_priv) | 444 | static int cm_alloc_id(struct cm_id_private *cm_id_priv) |
@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, | |||
677 | spin_lock_init(&cm_id_priv->lock); | 728 | spin_lock_init(&cm_id_priv->lock); |
678 | init_completion(&cm_id_priv->comp); | 729 | init_completion(&cm_id_priv->comp); |
679 | INIT_LIST_HEAD(&cm_id_priv->work_list); | 730 | INIT_LIST_HEAD(&cm_id_priv->work_list); |
731 | INIT_LIST_HEAD(&cm_id_priv->prim_list); | ||
732 | INIT_LIST_HEAD(&cm_id_priv->altr_list); | ||
680 | atomic_set(&cm_id_priv->work_count, -1); | 733 | atomic_set(&cm_id_priv->work_count, -1); |
681 | atomic_set(&cm_id_priv->refcount, 1); | 734 | atomic_set(&cm_id_priv->refcount, 1); |
682 | return &cm_id_priv->id; | 735 | return &cm_id_priv->id; |
@@ -892,6 +945,15 @@ retest: | |||
892 | break; | 945 | break; |
893 | } | 946 | } |
894 | 947 | ||
948 | spin_lock_irq(&cm.lock); | ||
949 | if (!list_empty(&cm_id_priv->altr_list) && | ||
950 | (!cm_id_priv->altr_send_port_not_ready)) | ||
951 | list_del(&cm_id_priv->altr_list); | ||
952 | if (!list_empty(&cm_id_priv->prim_list) && | ||
953 | (!cm_id_priv->prim_send_port_not_ready)) | ||
954 | list_del(&cm_id_priv->prim_list); | ||
955 | spin_unlock_irq(&cm.lock); | ||
956 | |||
895 | cm_free_id(cm_id->local_id); | 957 | cm_free_id(cm_id->local_id); |
896 | cm_deref_id(cm_id_priv); | 958 | cm_deref_id(cm_id_priv); |
897 | wait_for_completion(&cm_id_priv->comp); | 959 | wait_for_completion(&cm_id_priv->comp); |
@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, | |||
1192 | goto out; | 1254 | goto out; |
1193 | } | 1255 | } |
1194 | 1256 | ||
1195 | ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); | 1257 | ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av, |
1258 | cm_id_priv); | ||
1196 | if (ret) | 1259 | if (ret) |
1197 | goto error1; | 1260 | goto error1; |
1198 | if (param->alternate_path) { | 1261 | if (param->alternate_path) { |
1199 | ret = cm_init_av_by_path(param->alternate_path, | 1262 | ret = cm_init_av_by_path(param->alternate_path, |
1200 | &cm_id_priv->alt_av); | 1263 | &cm_id_priv->alt_av, cm_id_priv); |
1201 | if (ret) | 1264 | if (ret) |
1202 | goto error1; | 1265 | goto error1; |
1203 | } | 1266 | } |
@@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work) | |||
1653 | dev_put(gid_attr.ndev); | 1716 | dev_put(gid_attr.ndev); |
1654 | } | 1717 | } |
1655 | work->path[0].gid_type = gid_attr.gid_type; | 1718 | work->path[0].gid_type = gid_attr.gid_type; |
1656 | ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); | 1719 | ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av, |
1720 | cm_id_priv); | ||
1657 | } | 1721 | } |
1658 | if (ret) { | 1722 | if (ret) { |
1659 | int err = ib_get_cached_gid(work->port->cm_dev->ib_device, | 1723 | int err = ib_get_cached_gid(work->port->cm_dev->ib_device, |
@@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work) | |||
1672 | goto rejected; | 1736 | goto rejected; |
1673 | } | 1737 | } |
1674 | if (req_msg->alt_local_lid) { | 1738 | if (req_msg->alt_local_lid) { |
1675 | ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); | 1739 | ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av, |
1740 | cm_id_priv); | ||
1676 | if (ret) { | 1741 | if (ret) { |
1677 | ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, | 1742 | ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, |
1678 | &work->path[0].sgid, | 1743 | &work->path[0].sgid, |
@@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id, | |||
2727 | goto out; | 2792 | goto out; |
2728 | } | 2793 | } |
2729 | 2794 | ||
2730 | ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); | 2795 | ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av, |
2796 | cm_id_priv); | ||
2731 | if (ret) | 2797 | if (ret) |
2732 | goto out; | 2798 | goto out; |
2733 | cm_id_priv->alt_av.timeout = | 2799 | cm_id_priv->alt_av.timeout = |
@@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work) | |||
2839 | cm_init_av_for_response(work->port, work->mad_recv_wc->wc, | 2905 | cm_init_av_for_response(work->port, work->mad_recv_wc->wc, |
2840 | work->mad_recv_wc->recv_buf.grh, | 2906 | work->mad_recv_wc->recv_buf.grh, |
2841 | &cm_id_priv->av); | 2907 | &cm_id_priv->av); |
2842 | cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); | 2908 | cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av, |
2909 | cm_id_priv); | ||
2843 | ret = atomic_inc_and_test(&cm_id_priv->work_count); | 2910 | ret = atomic_inc_and_test(&cm_id_priv->work_count); |
2844 | if (!ret) | 2911 | if (!ret) |
2845 | list_add_tail(&work->list, &cm_id_priv->work_list); | 2912 | list_add_tail(&work->list, &cm_id_priv->work_list); |
@@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, | |||
3031 | return -EINVAL; | 3098 | return -EINVAL; |
3032 | 3099 | ||
3033 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); | 3100 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
3034 | ret = cm_init_av_by_path(param->path, &cm_id_priv->av); | 3101 | ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv); |
3035 | if (ret) | 3102 | if (ret) |
3036 | goto out; | 3103 | goto out; |
3037 | 3104 | ||
@@ -3468,7 +3535,9 @@ out: | |||
3468 | static int cm_migrate(struct ib_cm_id *cm_id) | 3535 | static int cm_migrate(struct ib_cm_id *cm_id) |
3469 | { | 3536 | { |
3470 | struct cm_id_private *cm_id_priv; | 3537 | struct cm_id_private *cm_id_priv; |
3538 | struct cm_av tmp_av; | ||
3471 | unsigned long flags; | 3539 | unsigned long flags; |
3540 | int tmp_send_port_not_ready; | ||
3472 | int ret = 0; | 3541 | int ret = 0; |
3473 | 3542 | ||
3474 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); | 3543 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
@@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id) | |||
3477 | (cm_id->lap_state == IB_CM_LAP_UNINIT || | 3546 | (cm_id->lap_state == IB_CM_LAP_UNINIT || |
3478 | cm_id->lap_state == IB_CM_LAP_IDLE)) { | 3547 | cm_id->lap_state == IB_CM_LAP_IDLE)) { |
3479 | cm_id->lap_state = IB_CM_LAP_IDLE; | 3548 | cm_id->lap_state = IB_CM_LAP_IDLE; |
3549 | /* Swap address vector */ | ||
3550 | tmp_av = cm_id_priv->av; | ||
3480 | cm_id_priv->av = cm_id_priv->alt_av; | 3551 | cm_id_priv->av = cm_id_priv->alt_av; |
3552 | cm_id_priv->alt_av = tmp_av; | ||
3553 | /* Swap port send ready state */ | ||
3554 | tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready; | ||
3555 | cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready; | ||
3556 | cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready; | ||
3481 | } else | 3557 | } else |
3482 | ret = -EINVAL; | 3558 | ret = -EINVAL; |
3483 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 3559 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
@@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device) | |||
3888 | port->cm_dev = cm_dev; | 3964 | port->cm_dev = cm_dev; |
3889 | port->port_num = i; | 3965 | port->port_num = i; |
3890 | 3966 | ||
3967 | INIT_LIST_HEAD(&port->cm_priv_prim_list); | ||
3968 | INIT_LIST_HEAD(&port->cm_priv_altr_list); | ||
3969 | |||
3891 | ret = cm_create_port_fs(port); | 3970 | ret = cm_create_port_fs(port); |
3892 | if (ret) | 3971 | if (ret) |
3893 | goto error1; | 3972 | goto error1; |
@@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) | |||
3945 | { | 4024 | { |
3946 | struct cm_device *cm_dev = client_data; | 4025 | struct cm_device *cm_dev = client_data; |
3947 | struct cm_port *port; | 4026 | struct cm_port *port; |
4027 | struct cm_id_private *cm_id_priv; | ||
4028 | struct ib_mad_agent *cur_mad_agent; | ||
3948 | struct ib_port_modify port_modify = { | 4029 | struct ib_port_modify port_modify = { |
3949 | .clr_port_cap_mask = IB_PORT_CM_SUP | 4030 | .clr_port_cap_mask = IB_PORT_CM_SUP |
3950 | }; | 4031 | }; |
@@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) | |||
3968 | 4049 | ||
3969 | port = cm_dev->port[i-1]; | 4050 | port = cm_dev->port[i-1]; |
3970 | ib_modify_port(ib_device, port->port_num, 0, &port_modify); | 4051 | ib_modify_port(ib_device, port->port_num, 0, &port_modify); |
4052 | /* Mark all the cm_id's as not valid */ | ||
4053 | spin_lock_irq(&cm.lock); | ||
4054 | list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list) | ||
4055 | cm_id_priv->altr_send_port_not_ready = 1; | ||
4056 | list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list) | ||
4057 | cm_id_priv->prim_send_port_not_ready = 1; | ||
4058 | spin_unlock_irq(&cm.lock); | ||
3971 | /* | 4059 | /* |
3972 | * We flush the queue here after the going_down set, this | 4060 | * We flush the queue here after the going_down set, this |
3973 | * verify that no new works will be queued in the recv handler, | 4061 | * verify that no new works will be queued in the recv handler, |
3974 | * after that we can call the unregister_mad_agent | 4062 | * after that we can call the unregister_mad_agent |
3975 | */ | 4063 | */ |
3976 | flush_workqueue(cm.wq); | 4064 | flush_workqueue(cm.wq); |
3977 | ib_unregister_mad_agent(port->mad_agent); | 4065 | spin_lock_irq(&cm.state_lock); |
4066 | cur_mad_agent = port->mad_agent; | ||
4067 | port->mad_agent = NULL; | ||
4068 | spin_unlock_irq(&cm.state_lock); | ||
4069 | ib_unregister_mad_agent(cur_mad_agent); | ||
3978 | cm_remove_port_fs(port); | 4070 | cm_remove_port_fs(port); |
3979 | } | 4071 | } |
4072 | |||
3980 | device_unregister(cm_dev->device); | 4073 | device_unregister(cm_dev->device); |
3981 | kfree(cm_dev); | 4074 | kfree(cm_dev); |
3982 | } | 4075 | } |
@@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void) | |||
3989 | INIT_LIST_HEAD(&cm.device_list); | 4082 | INIT_LIST_HEAD(&cm.device_list); |
3990 | rwlock_init(&cm.device_lock); | 4083 | rwlock_init(&cm.device_lock); |
3991 | spin_lock_init(&cm.lock); | 4084 | spin_lock_init(&cm.lock); |
4085 | spin_lock_init(&cm.state_lock); | ||
3992 | cm.listen_service_table = RB_ROOT; | 4086 | cm.listen_service_table = RB_ROOT; |
3993 | cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); | 4087 | cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); |
3994 | cm.remote_id_table = RB_ROOT; | 4088 | cm.remote_id_table = RB_ROOT; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 36bf50ebb187..9ca0da0a37c4 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -2436,6 +2436,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos) | |||
2436 | return 0; | 2436 | return 0; |
2437 | } | 2437 | } |
2438 | 2438 | ||
2439 | static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, | ||
2440 | unsigned long supported_gids, | ||
2441 | enum ib_gid_type default_gid) | ||
2442 | { | ||
2443 | if ((network_type == RDMA_NETWORK_IPV4 || | ||
2444 | network_type == RDMA_NETWORK_IPV6) && | ||
2445 | test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) | ||
2446 | return IB_GID_TYPE_ROCE_UDP_ENCAP; | ||
2447 | |||
2448 | return default_gid; | ||
2449 | } | ||
2450 | |||
2439 | static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | 2451 | static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) |
2440 | { | 2452 | { |
2441 | struct rdma_route *route = &id_priv->id.route; | 2453 | struct rdma_route *route = &id_priv->id.route; |
@@ -2461,6 +2473,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
2461 | route->num_paths = 1; | 2473 | route->num_paths = 1; |
2462 | 2474 | ||
2463 | if (addr->dev_addr.bound_dev_if) { | 2475 | if (addr->dev_addr.bound_dev_if) { |
2476 | unsigned long supported_gids; | ||
2477 | |||
2464 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); | 2478 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); |
2465 | if (!ndev) { | 2479 | if (!ndev) { |
2466 | ret = -ENODEV; | 2480 | ret = -ENODEV; |
@@ -2484,7 +2498,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
2484 | 2498 | ||
2485 | route->path_rec->net = &init_net; | 2499 | route->path_rec->net = &init_net; |
2486 | route->path_rec->ifindex = ndev->ifindex; | 2500 | route->path_rec->ifindex = ndev->ifindex; |
2487 | route->path_rec->gid_type = id_priv->gid_type; | 2501 | supported_gids = roce_gid_type_mask_support(id_priv->id.device, |
2502 | id_priv->id.port_num); | ||
2503 | route->path_rec->gid_type = | ||
2504 | cma_route_gid_type(addr->dev_addr.network, | ||
2505 | supported_gids, | ||
2506 | id_priv->gid_type); | ||
2488 | } | 2507 | } |
2489 | if (!ndev) { | 2508 | if (!ndev) { |
2490 | ret = -ENODEV; | 2509 | ret = -ENODEV; |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 224ad274ea0b..84b4eff90395 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
175 | 175 | ||
176 | cur_base = addr & PAGE_MASK; | 176 | cur_base = addr & PAGE_MASK; |
177 | 177 | ||
178 | if (npages == 0) { | 178 | if (npages == 0 || npages > UINT_MAX) { |
179 | ret = -EINVAL; | 179 | ret = -EINVAL; |
180 | goto out; | 180 | goto out; |
181 | } | 181 | } |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 0012fa58c105..44b1104eb168 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
262 | container_of(uobj, struct ib_uqp_object, uevent.uobject); | 262 | container_of(uobj, struct ib_uqp_object, uevent.uobject); |
263 | 263 | ||
264 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); | 264 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); |
265 | if (qp != qp->real_qp) { | 265 | if (qp == qp->real_qp) |
266 | ib_close_qp(qp); | ||
267 | } else { | ||
268 | ib_uverbs_detach_umcast(qp, uqp); | 266 | ib_uverbs_detach_umcast(qp, uqp); |
269 | ib_destroy_qp(qp); | 267 | ib_destroy_qp(qp); |
270 | } | ||
271 | ib_uverbs_release_uevent(file, &uqp->uevent); | 268 | ib_uverbs_release_uevent(file, &uqp->uevent); |
272 | kfree(uqp); | 269 | kfree(uqp); |
273 | } | 270 | } |
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 5fc623362731..b9bf0759f10a 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c | |||
@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr | |||
102 | if (vlan_tag < 0x1000) | 102 | if (vlan_tag < 0x1000) |
103 | vlan_tag |= (ah_attr->sl & 7) << 13; | 103 | vlan_tag |= (ah_attr->sl & 7) << 13; |
104 | ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); | 104 | ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); |
105 | ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); | 105 | ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); |
106 | if (ret < 0) | ||
107 | return ERR_PTR(ret); | ||
108 | ah->av.eth.gid_index = ret; | ||
106 | ah->av.eth.vlan = cpu_to_be16(vlan_tag); | 109 | ah->av.eth.vlan = cpu_to_be16(vlan_tag); |
107 | ah->av.eth.hop_limit = ah_attr->grh.hop_limit; | 110 | ah->av.eth.hop_limit = ah_attr->grh.hop_limit; |
108 | if (ah_attr->static_rate) { | 111 | if (ah_attr->static_rate) { |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 1ea686b9e0f9..6a0fec357dae 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, | |||
253 | if (context) | 253 | if (context) |
254 | if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { | 254 | if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { |
255 | err = -EFAULT; | 255 | err = -EFAULT; |
256 | goto err_dbmap; | 256 | goto err_cq_free; |
257 | } | 257 | } |
258 | 258 | ||
259 | return &cq->ibcq; | 259 | return &cq->ibcq; |
260 | 260 | ||
261 | err_cq_free: | ||
262 | mlx4_cq_free(dev->dev, &cq->mcq); | ||
263 | |||
261 | err_dbmap: | 264 | err_dbmap: |
262 | if (context) | 265 | if (context) |
263 | mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); | 266 | mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 79d017baf6f4..fcd04b881ec1 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -932,8 +932,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, | |||
932 | if (err) | 932 | if (err) |
933 | goto err_create; | 933 | goto err_create; |
934 | } else { | 934 | } else { |
935 | /* for now choose 64 bytes till we have a proper interface */ | 935 | cqe_size = cache_line_size() == 128 ? 128 : 64; |
936 | cqe_size = 64; | ||
937 | err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, | 936 | err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, |
938 | &index, &inlen); | 937 | &index, &inlen); |
939 | if (err) | 938 | if (err) |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 22174774dbb8..a014ad38d889 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -2311,14 +2311,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, | |||
2311 | { | 2311 | { |
2312 | struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; | 2312 | struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; |
2313 | struct ib_event ibev; | 2313 | struct ib_event ibev; |
2314 | 2314 | bool fatal = false; | |
2315 | u8 port = 0; | 2315 | u8 port = 0; |
2316 | 2316 | ||
2317 | switch (event) { | 2317 | switch (event) { |
2318 | case MLX5_DEV_EVENT_SYS_ERROR: | 2318 | case MLX5_DEV_EVENT_SYS_ERROR: |
2319 | ibdev->ib_active = false; | ||
2320 | ibev.event = IB_EVENT_DEVICE_FATAL; | 2319 | ibev.event = IB_EVENT_DEVICE_FATAL; |
2321 | mlx5_ib_handle_internal_error(ibdev); | 2320 | mlx5_ib_handle_internal_error(ibdev); |
2321 | fatal = true; | ||
2322 | break; | 2322 | break; |
2323 | 2323 | ||
2324 | case MLX5_DEV_EVENT_PORT_UP: | 2324 | case MLX5_DEV_EVENT_PORT_UP: |
@@ -2370,6 +2370,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, | |||
2370 | 2370 | ||
2371 | if (ibdev->ib_active) | 2371 | if (ibdev->ib_active) |
2372 | ib_dispatch_event(&ibev); | 2372 | ib_dispatch_event(&ibev); |
2373 | |||
2374 | if (fatal) | ||
2375 | ibdev->ib_active = false; | ||
2373 | } | 2376 | } |
2374 | 2377 | ||
2375 | static void get_ext_port_caps(struct mlx5_ib_dev *dev) | 2378 | static void get_ext_port_caps(struct mlx5_ib_dev *dev) |
@@ -3115,7 +3118,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
3115 | } | 3118 | } |
3116 | err = init_node_data(dev); | 3119 | err = init_node_data(dev); |
3117 | if (err) | 3120 | if (err) |
3118 | goto err_dealloc; | 3121 | goto err_free_port; |
3119 | 3122 | ||
3120 | mutex_init(&dev->flow_db.lock); | 3123 | mutex_init(&dev->flow_db.lock); |
3121 | mutex_init(&dev->cap_mask_mutex); | 3124 | mutex_init(&dev->cap_mask_mutex); |
@@ -3125,7 +3128,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
3125 | if (ll == IB_LINK_LAYER_ETHERNET) { | 3128 | if (ll == IB_LINK_LAYER_ETHERNET) { |
3126 | err = mlx5_enable_roce(dev); | 3129 | err = mlx5_enable_roce(dev); |
3127 | if (err) | 3130 | if (err) |
3128 | goto err_dealloc; | 3131 | goto err_free_port; |
3129 | } | 3132 | } |
3130 | 3133 | ||
3131 | err = create_dev_resources(&dev->devr); | 3134 | err = create_dev_resources(&dev->devr); |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index dcdcd195fe53..7d689903c87c 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -626,6 +626,8 @@ struct mlx5_ib_dev { | |||
626 | struct mlx5_ib_resources devr; | 626 | struct mlx5_ib_resources devr; |
627 | struct mlx5_mr_cache cache; | 627 | struct mlx5_mr_cache cache; |
628 | struct timer_list delay_timer; | 628 | struct timer_list delay_timer; |
629 | /* Prevents soft lock on massive reg MRs */ | ||
630 | struct mutex slow_path_mutex; | ||
629 | int fill_delay; | 631 | int fill_delay; |
630 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | 632 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
631 | struct ib_odp_caps odp_caps; | 633 | struct ib_odp_caps odp_caps; |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index d4ad672b905b..4e9012463c37 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -610,6 +610,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) | |||
610 | int err; | 610 | int err; |
611 | int i; | 611 | int i; |
612 | 612 | ||
613 | mutex_init(&dev->slow_path_mutex); | ||
613 | cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); | 614 | cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); |
614 | if (!cache->wq) { | 615 | if (!cache->wq) { |
615 | mlx5_ib_warn(dev, "failed to create work queue\n"); | 616 | mlx5_ib_warn(dev, "failed to create work queue\n"); |
@@ -1182,9 +1183,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
1182 | goto error; | 1183 | goto error; |
1183 | } | 1184 | } |
1184 | 1185 | ||
1185 | if (!mr) | 1186 | if (!mr) { |
1187 | mutex_lock(&dev->slow_path_mutex); | ||
1186 | mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, | 1188 | mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, |
1187 | page_shift, access_flags); | 1189 | page_shift, access_flags); |
1190 | mutex_unlock(&dev->slow_path_mutex); | ||
1191 | } | ||
1188 | 1192 | ||
1189 | if (IS_ERR(mr)) { | 1193 | if (IS_ERR(mr)) { |
1190 | err = PTR_ERR(mr); | 1194 | err = PTR_ERR(mr); |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 41f4c2afbcdd..59c4c89460d1 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -2052,8 +2052,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, | |||
2052 | 2052 | ||
2053 | mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", | 2053 | mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", |
2054 | qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, | 2054 | qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, |
2055 | to_mcq(init_attr->recv_cq)->mcq.cqn, | 2055 | init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1, |
2056 | to_mcq(init_attr->send_cq)->mcq.cqn); | 2056 | init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1); |
2057 | 2057 | ||
2058 | qp->trans_qp.xrcdn = xrcdn; | 2058 | qp->trans_qp.xrcdn = xrcdn; |
2059 | 2059 | ||
@@ -4815,6 +4815,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, | |||
4815 | udata->inlen)) | 4815 | udata->inlen)) |
4816 | return ERR_PTR(-EOPNOTSUPP); | 4816 | return ERR_PTR(-EOPNOTSUPP); |
4817 | 4817 | ||
4818 | if (init_attr->log_ind_tbl_size > | ||
4819 | MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) { | ||
4820 | mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n", | ||
4821 | init_attr->log_ind_tbl_size, | ||
4822 | MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)); | ||
4823 | return ERR_PTR(-EINVAL); | ||
4824 | } | ||
4825 | |||
4818 | min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); | 4826 | min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); |
4819 | if (udata->outlen && udata->outlen < min_resp_len) | 4827 | if (udata->outlen && udata->outlen < min_resp_len) |
4820 | return ERR_PTR(-EINVAL); | 4828 | return ERR_PTR(-EINVAL); |
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index b8258e4f0aea..ffff5a54cb34 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c | |||
@@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, | |||
243 | { | 243 | { |
244 | int err; | 244 | int err; |
245 | struct socket *sock; | 245 | struct socket *sock; |
246 | struct udp_port_cfg udp_cfg; | 246 | struct udp_port_cfg udp_cfg = {0}; |
247 | struct udp_tunnel_sock_cfg tnl_cfg; | 247 | struct udp_tunnel_sock_cfg tnl_cfg = {0}; |
248 | |||
249 | memset(&udp_cfg, 0, sizeof(udp_cfg)); | ||
250 | 248 | ||
251 | if (ipv6) { | 249 | if (ipv6) { |
252 | udp_cfg.family = AF_INET6; | 250 | udp_cfg.family = AF_INET6; |
@@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, | |||
264 | return ERR_PTR(err); | 262 | return ERR_PTR(err); |
265 | } | 263 | } |
266 | 264 | ||
267 | tnl_cfg.sk_user_data = NULL; | ||
268 | tnl_cfg.encap_type = 1; | 265 | tnl_cfg.encap_type = 1; |
269 | tnl_cfg.encap_rcv = rxe_udp_encap_recv; | 266 | tnl_cfg.encap_rcv = rxe_udp_encap_recv; |
270 | tnl_cfg.encap_destroy = NULL; | ||
271 | 267 | ||
272 | /* Setup UDP tunnel */ | 268 | /* Setup UDP tunnel */ |
273 | setup_udp_tunnel_sock(net, sock, &tnl_cfg); | 269 | setup_udp_tunnel_sock(net, sock, &tnl_cfg); |
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index b8036cfbce04..c3e60e4bde6e 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c | |||
@@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp) | |||
522 | if (qp->sq.queue) { | 522 | if (qp->sq.queue) { |
523 | __rxe_do_task(&qp->comp.task); | 523 | __rxe_do_task(&qp->comp.task); |
524 | __rxe_do_task(&qp->req.task); | 524 | __rxe_do_task(&qp->req.task); |
525 | rxe_queue_reset(qp->sq.queue); | ||
525 | } | 526 | } |
526 | 527 | ||
527 | /* cleanup attributes */ | 528 | /* cleanup attributes */ |
@@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp) | |||
573 | { | 574 | { |
574 | qp->req.state = QP_STATE_ERROR; | 575 | qp->req.state = QP_STATE_ERROR; |
575 | qp->resp.state = QP_STATE_ERROR; | 576 | qp->resp.state = QP_STATE_ERROR; |
577 | qp->attr.qp_state = IB_QPS_ERR; | ||
576 | 578 | ||
577 | /* drain work and packet queues */ | 579 | /* drain work and packet queues */ |
578 | rxe_run_task(&qp->resp.task, 1); | 580 | rxe_run_task(&qp->resp.task, 1); |
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index 08274254eb88..d14bf496d62d 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c | |||
@@ -84,6 +84,15 @@ err1: | |||
84 | return -EINVAL; | 84 | return -EINVAL; |
85 | } | 85 | } |
86 | 86 | ||
87 | inline void rxe_queue_reset(struct rxe_queue *q) | ||
88 | { | ||
89 | /* queue is comprised from header and the memory | ||
90 | * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h | ||
91 | * reset only the queue itself and not the management header | ||
92 | */ | ||
93 | memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); | ||
94 | } | ||
95 | |||
87 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, | 96 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, |
88 | int *num_elem, | 97 | int *num_elem, |
89 | unsigned int elem_size) | 98 | unsigned int elem_size) |
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h index 239fd609c31e..8c8641c87817 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.h +++ b/drivers/infiniband/sw/rxe/rxe_queue.h | |||
@@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe, | |||
84 | size_t buf_size, | 84 | size_t buf_size, |
85 | struct rxe_mmap_info **ip_p); | 85 | struct rxe_mmap_info **ip_p); |
86 | 86 | ||
87 | void rxe_queue_reset(struct rxe_queue *q); | ||
88 | |||
87 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, | 89 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, |
88 | int *num_elem, | 90 | int *num_elem, |
89 | unsigned int elem_size); | 91 | unsigned int elem_size); |
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 832846b73ea0..22bd9630dcd9 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c | |||
@@ -696,7 +696,8 @@ next_wqe: | |||
696 | qp->req.wqe_index); | 696 | qp->req.wqe_index); |
697 | wqe->state = wqe_state_done; | 697 | wqe->state = wqe_state_done; |
698 | wqe->status = IB_WC_SUCCESS; | 698 | wqe->status = IB_WC_SUCCESS; |
699 | goto complete; | 699 | __rxe_do_task(&qp->comp.task); |
700 | return 0; | ||
700 | } | 701 | } |
701 | payload = mtu; | 702 | payload = mtu; |
702 | } | 703 | } |
@@ -745,13 +746,17 @@ err: | |||
745 | wqe->status = IB_WC_LOC_PROT_ERR; | 746 | wqe->status = IB_WC_LOC_PROT_ERR; |
746 | wqe->state = wqe_state_error; | 747 | wqe->state = wqe_state_error; |
747 | 748 | ||
748 | complete: | 749 | /* |
749 | if (qp_type(qp) != IB_QPT_RC) { | 750 | * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS |
750 | while (rxe_completer(qp) == 0) | 751 | * ---------8<---------8<------------- |
751 | ; | 752 | * ...Note that if a completion error occurs, a Work Completion |
752 | } | 753 | * will always be generated, even if the signaling |
753 | 754 | * indicator requests an Unsignaled Completion. | |
754 | return 0; | 755 | * ---------8<---------8<------------- |
756 | */ | ||
757 | wqe->wr.send_flags |= IB_SEND_SIGNALED; | ||
758 | __rxe_do_task(&qp->comp.task); | ||
759 | return -EAGAIN; | ||
755 | 760 | ||
756 | exit: | 761 | exit: |
757 | return -EAGAIN; | 762 | return -EAGAIN; |