aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2008-02-13 17:33:53 -0500
committerRoland Dreier <rolandd@cisco.com>2008-02-14 18:30:41 -0500
commitead595aeb0974171eddd012df115424752413c26 (patch)
tree6505d5bc351667a8a3b35fd1025f123081a0bba4 /drivers/infiniband
parent11e75a7455a7bc73e752c0c985986c2b1f8c930a (diff)
RDMA/cma: Do not issue MRA if user rejects connection request
There's an undesirable interaction with issuing MRA requests to increase connection timeouts and the listen backlog. When the rdma_cm receives a connection request, it queues an MRA with the ib_cm. (The ib_cm will send an MRA if it receives a duplicate REQ.) The rdma_cm will then create a new rdma_cm_id and give that to the user, which in this case is the rdma_user_cm. If the listen backlog maintained in the rdma_user_cm is full, it destroys the rdma_cm_id, which in turns destroys the ib_cm_id. The ib_cm_id generates a REJ because the state of the ib_cm_id has changed to MRA sent, versus REQ received. When the backlog is full, we just want to drop the REQ so that it is retried later. Fix this by deferring queuing the MRA until after the user of the rdma_cm has examined the connection request. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cma.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 1eff1b2c0e08..34507daaf9b6 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1107,7 +1107,6 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1107 event.param.ud.private_data_len = 1107 event.param.ud.private_data_len =
1108 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1108 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1109 } else { 1109 } else {
1110 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1111 conn_id = cma_new_conn_id(&listen_id->id, ib_event); 1110 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1112 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1111 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1113 ib_event->private_data, offset); 1112 ib_event->private_data, offset);
@@ -1130,6 +1129,15 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1130 1129
1131 ret = conn_id->id.event_handler(&conn_id->id, &event); 1130 ret = conn_id->id.event_handler(&conn_id->id, &event);
1132 if (!ret) { 1131 if (!ret) {
1132 /*
1133 * Acquire mutex to prevent user executing rdma_destroy_id()
1134 * while we're accessing the cm_id.
1135 */
1136 mutex_lock(&lock);
1137 if (cma_comp(conn_id, CMA_CONNECT) &&
1138 !cma_is_ud_ps(conn_id->id.ps))
1139 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1140 mutex_unlock(&lock);
1133 cma_enable_remove(conn_id); 1141 cma_enable_remove(conn_id);
1134 goto out; 1142 goto out;
1135 } 1143 }