aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-12-05 09:37:56 -0500
committerDavid Howells <dhowells@warthog.cambridge.redhat.com>2006-12-05 09:37:56 -0500
commit4c1ac1b49122b805adfa4efc620592f68dccf5db (patch)
tree87557f4bc2fd4fe65b7570489c2f610c45c0adcd /drivers/infiniband/core
parentc4028958b6ecad064b1a6303a6a5906d4fe48d73 (diff)
parentd916faace3efc0bf19fe9a615a1ab8fa1a24cd93 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: drivers/infiniband/core/iwcm.c drivers/net/chelsio/cxgb2.c drivers/net/wireless/bcm43xx/bcm43xx_main.c drivers/net/wireless/prism54/islpci_eth.c drivers/usb/core/hub.h drivers/usb/input/hid-core.c net/core/netpoll.c Fix up merge failures with Linus's head and fix new compilation failures. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/addr.c19
-rw-r--r--drivers/infiniband/core/cm.c121
-rw-r--r--drivers/infiniband/core/cma.c49
-rw-r--r--drivers/infiniband/core/iwcm.c44
-rw-r--r--drivers/infiniband/core/mad.c2
-rw-r--r--drivers/infiniband/core/ucm.c20
6 files changed, 152 insertions, 103 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 84b2f5cb3722..af939796750d 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -139,7 +139,7 @@ static void queue_req(struct addr_req *req)
139 139
140 mutex_lock(&lock); 140 mutex_lock(&lock);
141 list_for_each_entry_reverse(temp_req, &req_list, list) { 141 list_for_each_entry_reverse(temp_req, &req_list, list) {
142 if (time_after(req->timeout, temp_req->timeout)) 142 if (time_after_eq(req->timeout, temp_req->timeout))
143 break; 143 break;
144 } 144 }
145 145
@@ -225,19 +225,17 @@ static void process_req(struct work_struct *work)
225 225
226 mutex_lock(&lock); 226 mutex_lock(&lock);
227 list_for_each_entry_safe(req, temp_req, &req_list, list) { 227 list_for_each_entry_safe(req, temp_req, &req_list, list) {
228 if (req->status) { 228 if (req->status == -ENODATA) {
229 src_in = (struct sockaddr_in *) &req->src_addr; 229 src_in = (struct sockaddr_in *) &req->src_addr;
230 dst_in = (struct sockaddr_in *) &req->dst_addr; 230 dst_in = (struct sockaddr_in *) &req->dst_addr;
231 req->status = addr_resolve_remote(src_in, dst_in, 231 req->status = addr_resolve_remote(src_in, dst_in,
232 req->addr); 232 req->addr);
233 if (req->status && time_after_eq(jiffies, req->timeout))
234 req->status = -ETIMEDOUT;
235 else if (req->status == -ENODATA)
236 continue;
233 } 237 }
234 if (req->status && time_after(jiffies, req->timeout)) 238 list_move_tail(&req->list, &done_list);
235 req->status = -ETIMEDOUT;
236 else if (req->status == -ENODATA)
237 continue;
238
239 list_del(&req->list);
240 list_add_tail(&req->list, &done_list);
241 } 239 }
242 240
243 if (!list_empty(&req_list)) { 241 if (!list_empty(&req_list)) {
@@ -347,8 +345,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
347 if (req->addr == addr) { 345 if (req->addr == addr) {
348 req->status = -ECANCELED; 346 req->status = -ECANCELED;
349 req->timeout = jiffies; 347 req->timeout = jiffies;
350 list_del(&req->list); 348 list_move(&req->list, &req_list);
351 list_add(&req->list, &req_list);
352 set_timeout(req->timeout); 349 set_timeout(req->timeout);
353 break; 350 break;
354 } 351 }
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index e1990f531d0a..79c937bf6962 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -147,12 +147,12 @@ struct cm_id_private {
147 __be32 rq_psn; 147 __be32 rq_psn;
148 int timeout_ms; 148 int timeout_ms;
149 enum ib_mtu path_mtu; 149 enum ib_mtu path_mtu;
150 __be16 pkey;
150 u8 private_data_len; 151 u8 private_data_len;
151 u8 max_cm_retries; 152 u8 max_cm_retries;
152 u8 peer_to_peer; 153 u8 peer_to_peer;
153 u8 responder_resources; 154 u8 responder_resources;
154 u8 initiator_depth; 155 u8 initiator_depth;
155 u8 local_ack_timeout;
156 u8 retry_count; 156 u8 retry_count;
157 u8 rnr_retry_count; 157 u8 rnr_retry_count;
158 u8 service_timeout; 158 u8 service_timeout;
@@ -240,11 +240,10 @@ static void * cm_copy_private_data(const void *private_data,
240 if (!private_data || !private_data_len) 240 if (!private_data || !private_data_len)
241 return NULL; 241 return NULL;
242 242
243 data = kmalloc(private_data_len, GFP_KERNEL); 243 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
244 if (!data) 244 if (!data)
245 return ERR_PTR(-ENOMEM); 245 return ERR_PTR(-ENOMEM);
246 246
247 memcpy(data, private_data, private_data_len);
248 return data; 247 return data;
249} 248}
250 249
@@ -690,7 +689,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
690 * timewait before notifying the user that we've exited timewait. 689 * timewait before notifying the user that we've exited timewait.
691 */ 690 */
692 cm_id_priv->id.state = IB_CM_TIMEWAIT; 691 cm_id_priv->id.state = IB_CM_TIMEWAIT;
693 wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout); 692 wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
694 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 693 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
695 msecs_to_jiffies(wait_time)); 694 msecs_to_jiffies(wait_time));
696 cm_id_priv->timewait_info = NULL; 695 cm_id_priv->timewait_info = NULL;
@@ -1009,6 +1008,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
1009 cm_id_priv->responder_resources = param->responder_resources; 1008 cm_id_priv->responder_resources = param->responder_resources;
1010 cm_id_priv->retry_count = param->retry_count; 1009 cm_id_priv->retry_count = param->retry_count;
1011 cm_id_priv->path_mtu = param->primary_path->mtu; 1010 cm_id_priv->path_mtu = param->primary_path->mtu;
1011 cm_id_priv->pkey = param->primary_path->pkey;
1012 cm_id_priv->qp_type = param->qp_type; 1012 cm_id_priv->qp_type = param->qp_type;
1013 1013
1014 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 1014 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
@@ -1023,8 +1023,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
1023 1023
1024 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 1024 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1025 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1025 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1026 cm_id_priv->local_ack_timeout =
1027 cm_req_get_primary_local_ack_timeout(req_msg);
1028 1026
1029 spin_lock_irqsave(&cm_id_priv->lock, flags); 1027 spin_lock_irqsave(&cm_id_priv->lock, flags);
1030 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1028 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
@@ -1409,9 +1407,8 @@ static int cm_req_handler(struct cm_work *work)
1409 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1407 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1410 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1408 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1411 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1409 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1410 cm_id_priv->pkey = req_msg->pkey;
1412 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1411 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1413 cm_id_priv->local_ack_timeout =
1414 cm_req_get_primary_local_ack_timeout(req_msg);
1415 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1412 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1416 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1413 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1417 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1414 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
@@ -1715,7 +1712,7 @@ static int cm_establish_handler(struct cm_work *work)
1715 unsigned long flags; 1712 unsigned long flags;
1716 int ret; 1713 int ret;
1717 1714
1718 /* See comment in ib_cm_establish about lookup. */ 1715 /* See comment in cm_establish about lookup. */
1719 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 1716 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1720 if (!cm_id_priv) 1717 if (!cm_id_priv)
1721 return -EINVAL; 1718 return -EINVAL;
@@ -2401,11 +2398,16 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
2401 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2398 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2402 spin_lock_irqsave(&cm_id_priv->lock, flags); 2399 spin_lock_irqsave(&cm_id_priv->lock, flags);
2403 if (cm_id->state != IB_CM_ESTABLISHED || 2400 if (cm_id->state != IB_CM_ESTABLISHED ||
2404 cm_id->lap_state != IB_CM_LAP_IDLE) { 2401 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2402 cm_id->lap_state != IB_CM_LAP_IDLE)) {
2405 ret = -EINVAL; 2403 ret = -EINVAL;
2406 goto out; 2404 goto out;
2407 } 2405 }
2408 2406
2407 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
2408 if (ret)
2409 goto out;
2410
2409 ret = cm_alloc_msg(cm_id_priv, &msg); 2411 ret = cm_alloc_msg(cm_id_priv, &msg);
2410 if (ret) 2412 if (ret)
2411 goto out; 2413 goto out;
@@ -2430,7 +2432,8 @@ out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2430} 2432}
2431EXPORT_SYMBOL(ib_send_cm_lap); 2433EXPORT_SYMBOL(ib_send_cm_lap);
2432 2434
2433static void cm_format_path_from_lap(struct ib_sa_path_rec *path, 2435static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2436 struct ib_sa_path_rec *path,
2434 struct cm_lap_msg *lap_msg) 2437 struct cm_lap_msg *lap_msg)
2435{ 2438{
2436 memset(path, 0, sizeof *path); 2439 memset(path, 0, sizeof *path);
@@ -2442,10 +2445,10 @@ static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
2442 path->hop_limit = lap_msg->alt_hop_limit; 2445 path->hop_limit = lap_msg->alt_hop_limit;
2443 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2446 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2444 path->reversible = 1; 2447 path->reversible = 1;
2445 /* pkey is same as in REQ */ 2448 path->pkey = cm_id_priv->pkey;
2446 path->sl = cm_lap_get_sl(lap_msg); 2449 path->sl = cm_lap_get_sl(lap_msg);
2447 path->mtu_selector = IB_SA_EQ; 2450 path->mtu_selector = IB_SA_EQ;
2448 /* mtu is same as in REQ */ 2451 path->mtu = cm_id_priv->path_mtu;
2449 path->rate_selector = IB_SA_EQ; 2452 path->rate_selector = IB_SA_EQ;
2450 path->rate = cm_lap_get_packet_rate(lap_msg); 2453 path->rate = cm_lap_get_packet_rate(lap_msg);
2451 path->packet_life_time_selector = IB_SA_EQ; 2454 path->packet_life_time_selector = IB_SA_EQ;
@@ -2471,7 +2474,7 @@ static int cm_lap_handler(struct cm_work *work)
2471 2474
2472 param = &work->cm_event.param.lap_rcvd; 2475 param = &work->cm_event.param.lap_rcvd;
2473 param->alternate_path = &work->path[0]; 2476 param->alternate_path = &work->path[0];
2474 cm_format_path_from_lap(param->alternate_path, lap_msg); 2477 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2475 work->cm_event.private_data = &lap_msg->private_data; 2478 work->cm_event.private_data = &lap_msg->private_data;
2476 2479
2477 spin_lock_irqsave(&cm_id_priv->lock, flags); 2480 spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -2479,6 +2482,7 @@ static int cm_lap_handler(struct cm_work *work)
2479 goto unlock; 2482 goto unlock;
2480 2483
2481 switch (cm_id_priv->id.lap_state) { 2484 switch (cm_id_priv->id.lap_state) {
2485 case IB_CM_LAP_UNINIT:
2482 case IB_CM_LAP_IDLE: 2486 case IB_CM_LAP_IDLE:
2483 break; 2487 break;
2484 case IB_CM_MRA_LAP_SENT: 2488 case IB_CM_MRA_LAP_SENT:
@@ -2501,6 +2505,10 @@ static int cm_lap_handler(struct cm_work *work)
2501 2505
2502 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2506 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2503 cm_id_priv->tid = lap_msg->hdr.tid; 2507 cm_id_priv->tid = lap_msg->hdr.tid;
2508 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2509 work->mad_recv_wc->recv_buf.grh,
2510 &cm_id_priv->av);
2511 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
2504 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2512 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2505 if (!ret) 2513 if (!ret)
2506 list_add_tail(&work->list, &cm_id_priv->work_list); 2514 list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -3039,7 +3047,7 @@ static void cm_work_handler(struct work_struct *_work)
3039 cm_free_work(work); 3047 cm_free_work(work);
3040} 3048}
3041 3049
3042int ib_cm_establish(struct ib_cm_id *cm_id) 3050static int cm_establish(struct ib_cm_id *cm_id)
3043{ 3051{
3044 struct cm_id_private *cm_id_priv; 3052 struct cm_id_private *cm_id_priv;
3045 struct cm_work *work; 3053 struct cm_work *work;
@@ -3087,7 +3095,44 @@ int ib_cm_establish(struct ib_cm_id *cm_id)
3087out: 3095out:
3088 return ret; 3096 return ret;
3089} 3097}
3090EXPORT_SYMBOL(ib_cm_establish); 3098
3099static int cm_migrate(struct ib_cm_id *cm_id)
3100{
3101 struct cm_id_private *cm_id_priv;
3102 unsigned long flags;
3103 int ret = 0;
3104
3105 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3106 spin_lock_irqsave(&cm_id_priv->lock, flags);
3107 if (cm_id->state == IB_CM_ESTABLISHED &&
3108 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3109 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3110 cm_id->lap_state = IB_CM_LAP_IDLE;
3111 cm_id_priv->av = cm_id_priv->alt_av;
3112 } else
3113 ret = -EINVAL;
3114 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3115
3116 return ret;
3117}
3118
3119int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3120{
3121 int ret;
3122
3123 switch (event) {
3124 case IB_EVENT_COMM_EST:
3125 ret = cm_establish(cm_id);
3126 break;
3127 case IB_EVENT_PATH_MIG:
3128 ret = cm_migrate(cm_id);
3129 break;
3130 default:
3131 ret = -EINVAL;
3132 }
3133 return ret;
3134}
3135EXPORT_SYMBOL(ib_cm_notify);
3091 3136
3092static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3137static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3093 struct ib_mad_recv_wc *mad_recv_wc) 3138 struct ib_mad_recv_wc *mad_recv_wc)
@@ -3172,8 +3217,7 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3172 case IB_CM_ESTABLISHED: 3217 case IB_CM_ESTABLISHED:
3173 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 3218 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3174 IB_QP_PKEY_INDEX | IB_QP_PORT; 3219 IB_QP_PKEY_INDEX | IB_QP_PORT;
3175 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 3220 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3176 IB_ACCESS_REMOTE_WRITE;
3177 if (cm_id_priv->responder_resources) 3221 if (cm_id_priv->responder_resources)
3178 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | 3222 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3179 IB_ACCESS_REMOTE_ATOMIC; 3223 IB_ACCESS_REMOTE_ATOMIC;
@@ -3221,6 +3265,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3221 if (cm_id_priv->alt_av.ah_attr.dlid) { 3265 if (cm_id_priv->alt_av.ah_attr.dlid) {
3222 *qp_attr_mask |= IB_QP_ALT_PATH; 3266 *qp_attr_mask |= IB_QP_ALT_PATH;
3223 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3267 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3268 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3269 qp_attr->alt_timeout =
3270 cm_id_priv->alt_av.packet_life_time + 1;
3224 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3271 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3225 } 3272 }
3226 ret = 0; 3273 ret = 0;
@@ -3247,19 +3294,31 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3247 case IB_CM_REP_SENT: 3294 case IB_CM_REP_SENT:
3248 case IB_CM_MRA_REP_RCVD: 3295 case IB_CM_MRA_REP_RCVD:
3249 case IB_CM_ESTABLISHED: 3296 case IB_CM_ESTABLISHED:
3250 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3297 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3251 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3298 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3252 if (cm_id_priv->qp_type == IB_QPT_RC) { 3299 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3253 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 3300 if (cm_id_priv->qp_type == IB_QPT_RC) {
3254 IB_QP_RNR_RETRY | 3301 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3255 IB_QP_MAX_QP_RD_ATOMIC; 3302 IB_QP_RNR_RETRY |
3256 qp_attr->timeout = cm_id_priv->local_ack_timeout; 3303 IB_QP_MAX_QP_RD_ATOMIC;
3257 qp_attr->retry_cnt = cm_id_priv->retry_count; 3304 qp_attr->timeout =
3258 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3305 cm_id_priv->av.packet_life_time + 1;
3259 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; 3306 qp_attr->retry_cnt = cm_id_priv->retry_count;
3260 } 3307 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3261 if (cm_id_priv->alt_av.ah_attr.dlid) { 3308 qp_attr->max_rd_atomic =
3262 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3309 cm_id_priv->initiator_depth;
3310 }
3311 if (cm_id_priv->alt_av.ah_attr.dlid) {
3312 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3313 qp_attr->path_mig_state = IB_MIG_REARM;
3314 }
3315 } else {
3316 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3317 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3318 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3319 qp_attr->alt_timeout =
3320 cm_id_priv->alt_av.packet_life_time + 1;
3321 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3263 qp_attr->path_mig_state = IB_MIG_REARM; 3322 qp_attr->path_mig_state = IB_MIG_REARM;
3264 } 3323 }
3265 ret = 0; 3324 ret = 0;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 189f73f3f721..985a6b564d8f 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -344,7 +344,7 @@ static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
344 return ret; 344 return ret;
345 345
346 qp_attr.qp_state = IB_QPS_INIT; 346 qp_attr.qp_state = IB_QPS_INIT;
347 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; 347 qp_attr.qp_access_flags = 0;
348 qp_attr.port_num = id_priv->id.port_num; 348 qp_attr.port_num = id_priv->id.port_num;
349 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS | 349 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS |
350 IB_QP_PKEY_INDEX | IB_QP_PORT); 350 IB_QP_PKEY_INDEX | IB_QP_PORT);
@@ -935,13 +935,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
935 mutex_lock(&lock); 935 mutex_lock(&lock);
936 ret = cma_acquire_dev(conn_id); 936 ret = cma_acquire_dev(conn_id);
937 mutex_unlock(&lock); 937 mutex_unlock(&lock);
938 if (ret) { 938 if (ret)
939 ret = -ENODEV; 939 goto release_conn_id;
940 cma_exch(conn_id, CMA_DESTROYING);
941 cma_release_remove(conn_id);
942 rdma_destroy_id(&conn_id->id);
943 goto out;
944 }
945 940
946 conn_id->cm_id.ib = cm_id; 941 conn_id->cm_id.ib = cm_id;
947 cm_id->context = conn_id; 942 cm_id->context = conn_id;
@@ -951,13 +946,17 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
951 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, 946 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
952 ib_event->private_data + offset, 947 ib_event->private_data + offset,
953 IB_CM_REQ_PRIVATE_DATA_SIZE - offset); 948 IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
954 if (ret) { 949 if (!ret)
955 /* Destroy the CM ID by returning a non-zero value. */ 950 goto out;
956 conn_id->cm_id.ib = NULL; 951
957 cma_exch(conn_id, CMA_DESTROYING); 952 /* Destroy the CM ID by returning a non-zero value. */
958 cma_release_remove(conn_id); 953 conn_id->cm_id.ib = NULL;
959 rdma_destroy_id(&conn_id->id); 954
960 } 955release_conn_id:
956 cma_exch(conn_id, CMA_DESTROYING);
957 cma_release_remove(conn_id);
958 rdma_destroy_id(&conn_id->id);
959
961out: 960out:
962 cma_release_remove(listen_id); 961 cma_release_remove(listen_id);
963 return ret; 962 return ret;
@@ -1481,19 +1480,18 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
1481 u8 p; 1480 u8 p;
1482 1481
1483 mutex_lock(&lock); 1482 mutex_lock(&lock);
1483 if (list_empty(&dev_list)) {
1484 ret = -ENODEV;
1485 goto out;
1486 }
1484 list_for_each_entry(cma_dev, &dev_list, list) 1487 list_for_each_entry(cma_dev, &dev_list, list)
1485 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) 1488 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
1486 if (!ib_query_port (cma_dev->device, p, &port_attr) && 1489 if (!ib_query_port(cma_dev->device, p, &port_attr) &&
1487 port_attr.state == IB_PORT_ACTIVE) 1490 port_attr.state == IB_PORT_ACTIVE)
1488 goto port_found; 1491 goto port_found;
1489 1492
1490 if (!list_empty(&dev_list)) { 1493 p = 1;
1491 p = 1; 1494 cma_dev = list_entry(dev_list.next, struct cma_device, list);
1492 cma_dev = list_entry(dev_list.next, struct cma_device, list);
1493 } else {
1494 ret = -ENODEV;
1495 goto out;
1496 }
1497 1495
1498port_found: 1496port_found:
1499 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); 1497 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
@@ -2123,8 +2121,6 @@ static void cma_add_one(struct ib_device *device)
2123 2121
2124 cma_dev->device = device; 2122 cma_dev->device = device;
2125 cma_dev->node_guid = device->node_guid; 2123 cma_dev->node_guid = device->node_guid;
2126 if (!cma_dev->node_guid)
2127 goto err;
2128 2124
2129 init_completion(&cma_dev->comp); 2125 init_completion(&cma_dev->comp);
2130 atomic_set(&cma_dev->refcount, 1); 2126 atomic_set(&cma_dev->refcount, 1);
@@ -2136,9 +2132,6 @@ static void cma_add_one(struct ib_device *device)
2136 list_for_each_entry(id_priv, &listen_any_list, list) 2132 list_for_each_entry(id_priv, &listen_any_list, list)
2137 cma_listen_on_dev(id_priv, cma_dev); 2133 cma_listen_on_dev(id_priv, cma_dev);
2138 mutex_unlock(&lock); 2134 mutex_unlock(&lock);
2139 return;
2140err:
2141 kfree(cma_dev);
2142} 2135}
2143 2136
2144static int cma_remove_id_dev(struct rdma_id_private *id_priv) 2137static int cma_remove_id_dev(struct rdma_id_private *id_priv)
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 9bfa785252dc..1039ad57d53b 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -80,7 +80,7 @@ struct iwcm_work {
80 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If 80 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
81 * the backlog is exceeded, then no more connection request events will 81 * the backlog is exceeded, then no more connection request events will
82 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up 82 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
83 * to the provider to reject the connectino request. 83 * to the provider to reject the connection request.
84 * 2) in the connection request workqueue handler, cm_conn_req_handler(). 84 * 2) in the connection request workqueue handler, cm_conn_req_handler().
85 * If work elements cannot be allocated for the new connect request cm_id, 85 * If work elements cannot be allocated for the new connect request cm_id,
86 * then IWCM will call the provider reject method. This is ok since 86 * then IWCM will call the provider reject method. This is ok since
@@ -131,26 +131,25 @@ static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
131} 131}
132 132
133/* 133/*
134 * Save private data from incoming connection requests in the 134 * Save private data from incoming connection requests to
135 * cm_id_priv so the low level driver doesn't have to. Adjust 135 * iw_cm_event, so the low level driver doesn't have to. Adjust
136 * the event ptr to point to the local copy. 136 * the event ptr to point to the local copy.
137 */ 137 */
138static int copy_private_data(struct iwcm_id_private *cm_id_priv, 138static int copy_private_data(struct iw_cm_event *event)
139 struct iw_cm_event *event)
140{ 139{
141 void *p; 140 void *p;
142 141
143 p = kmalloc(event->private_data_len, GFP_ATOMIC); 142 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
144 if (!p) 143 if (!p)
145 return -ENOMEM; 144 return -ENOMEM;
146 memcpy(p, event->private_data, event->private_data_len);
147 event->private_data = p; 145 event->private_data = p;
148 return 0; 146 return 0;
149} 147}
150 148
151/* 149/*
152 * Release a reference on cm_id. If the last reference is being removed 150 * Release a reference on cm_id. If the last reference is being
153 * and iw_destroy_cm_id is waiting, wake up the waiting thread. 151 * released, enable the waiting thread (in iw_destroy_cm_id) to
152 * get woken up, and return 1 if a thread is already waiting.
154 */ 153 */
155static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) 154static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
156{ 155{
@@ -243,7 +242,7 @@ static int iwcm_modify_qp_sqd(struct ib_qp *qp)
243/* 242/*
244 * CM_ID <-- CLOSING 243 * CM_ID <-- CLOSING
245 * 244 *
246 * Block if a passive or active connection is currenlty being processed. Then 245 * Block if a passive or active connection is currently being processed. Then
247 * process the event as follows: 246 * process the event as follows:
248 * - If we are ESTABLISHED, move to CLOSING and modify the QP state 247 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
249 * based on the abrupt flag 248 * based on the abrupt flag
@@ -408,7 +407,7 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
408{ 407{
409 struct iwcm_id_private *cm_id_priv; 408 struct iwcm_id_private *cm_id_priv;
410 unsigned long flags; 409 unsigned long flags;
411 int ret = 0; 410 int ret;
412 411
413 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 412 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
414 413
@@ -535,7 +534,7 @@ EXPORT_SYMBOL(iw_cm_accept);
535int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) 534int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
536{ 535{
537 struct iwcm_id_private *cm_id_priv; 536 struct iwcm_id_private *cm_id_priv;
538 int ret = 0; 537 int ret;
539 unsigned long flags; 538 unsigned long flags;
540 struct ib_qp *qp; 539 struct ib_qp *qp;
541 540
@@ -620,7 +619,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
620 spin_lock_irqsave(&listen_id_priv->lock, flags); 619 spin_lock_irqsave(&listen_id_priv->lock, flags);
621 if (listen_id_priv->state != IW_CM_STATE_LISTEN) { 620 if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
622 spin_unlock_irqrestore(&listen_id_priv->lock, flags); 621 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
623 return; 622 goto out;
624 } 623 }
625 spin_unlock_irqrestore(&listen_id_priv->lock, flags); 624 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
626 625
@@ -629,7 +628,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
629 listen_id_priv->id.context); 628 listen_id_priv->id.context);
630 /* If the cm_id could not be created, ignore the request */ 629 /* If the cm_id could not be created, ignore the request */
631 if (IS_ERR(cm_id)) 630 if (IS_ERR(cm_id))
632 return; 631 goto out;
633 632
634 cm_id->provider_data = iw_event->provider_data; 633 cm_id->provider_data = iw_event->provider_data;
635 cm_id->local_addr = iw_event->local_addr; 634 cm_id->local_addr = iw_event->local_addr;
@@ -642,7 +641,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
642 if (ret) { 641 if (ret) {
643 iw_cm_reject(cm_id, NULL, 0); 642 iw_cm_reject(cm_id, NULL, 0);
644 iw_destroy_cm_id(cm_id); 643 iw_destroy_cm_id(cm_id);
645 return; 644 goto out;
646 } 645 }
647 646
648 /* Call the client CM handler */ 647 /* Call the client CM handler */
@@ -654,6 +653,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
654 kfree(cm_id); 653 kfree(cm_id);
655 } 654 }
656 655
656out:
657 if (iw_event->private_data_len) 657 if (iw_event->private_data_len)
658 kfree(iw_event->private_data); 658 kfree(iw_event->private_data);
659} 659}
@@ -674,7 +674,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
674 struct iw_cm_event *iw_event) 674 struct iw_cm_event *iw_event)
675{ 675{
676 unsigned long flags; 676 unsigned long flags;
677 int ret = 0; 677 int ret;
678 678
679 spin_lock_irqsave(&cm_id_priv->lock, flags); 679 spin_lock_irqsave(&cm_id_priv->lock, flags);
680 680
@@ -704,7 +704,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
704 struct iw_cm_event *iw_event) 704 struct iw_cm_event *iw_event)
705{ 705{
706 unsigned long flags; 706 unsigned long flags;
707 int ret = 0; 707 int ret;
708 708
709 spin_lock_irqsave(&cm_id_priv->lock, flags); 709 spin_lock_irqsave(&cm_id_priv->lock, flags);
710 /* 710 /*
@@ -830,8 +830,8 @@ static int process_event(struct iwcm_id_private *cm_id_priv,
830 */ 830 */
831static void cm_work_handler(struct work_struct *_work) 831static void cm_work_handler(struct work_struct *_work)
832{ 832{
833 struct iwcm_work lwork, *work = 833 struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
834 container_of(_work, struct iwcm_work, work); 834 struct iw_cm_event levent;
835 struct iwcm_id_private *cm_id_priv = work->cm_id; 835 struct iwcm_id_private *cm_id_priv = work->cm_id;
836 unsigned long flags; 836 unsigned long flags;
837 int empty; 837 int empty;
@@ -844,11 +844,11 @@ static void cm_work_handler(struct work_struct *_work)
844 struct iwcm_work, list); 844 struct iwcm_work, list);
845 list_del_init(&work->list); 845 list_del_init(&work->list);
846 empty = list_empty(&cm_id_priv->work_list); 846 empty = list_empty(&cm_id_priv->work_list);
847 lwork = *work; 847 levent = work->event;
848 put_work(work); 848 put_work(work);
849 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 849 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
850 850
851 ret = process_event(cm_id_priv, &work->event); 851 ret = process_event(cm_id_priv, &levent);
852 if (ret) { 852 if (ret) {
853 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); 853 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
854 destroy_cm_id(&cm_id_priv->id); 854 destroy_cm_id(&cm_id_priv->id);
@@ -907,7 +907,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
907 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || 907 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
908 work->event.event == IW_CM_EVENT_CONNECT_REPLY) && 908 work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
909 work->event.private_data_len) { 909 work->event.private_data_len) {
910 ret = copy_private_data(cm_id_priv, &work->event); 910 ret = copy_private_data(&work->event);
911 if (ret) { 911 if (ret) {
912 put_work(work); 912 put_work(work);
913 goto out; 913 goto out;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5a54ac35e961..15f38d94b3a8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -46,7 +46,7 @@ MODULE_DESCRIPTION("kernel IB MAD API");
46MODULE_AUTHOR("Hal Rosenstock"); 46MODULE_AUTHOR("Hal Rosenstock");
47MODULE_AUTHOR("Sean Hefty"); 47MODULE_AUTHOR("Sean Hefty");
48 48
49static kmem_cache_t *ib_mad_cache; 49static struct kmem_cache *ib_mad_cache;
50 50
51static struct list_head ib_mad_port_list; 51static struct list_head ib_mad_port_list;
52static u32 ib_mad_client_id = 0; 52static u32 ib_mad_client_id = 0;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index ad4f4d5c2924..f15220a0ee75 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -161,12 +161,14 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
161 struct ib_ucm_event, ctx_list); 161 struct ib_ucm_event, ctx_list);
162 list_del(&uevent->file_list); 162 list_del(&uevent->file_list);
163 list_del(&uevent->ctx_list); 163 list_del(&uevent->ctx_list);
164 mutex_unlock(&ctx->file->file_mutex);
164 165
165 /* clear incoming connections. */ 166 /* clear incoming connections. */
166 if (ib_ucm_new_cm_id(uevent->resp.event)) 167 if (ib_ucm_new_cm_id(uevent->resp.event))
167 ib_destroy_cm_id(uevent->cm_id); 168 ib_destroy_cm_id(uevent->cm_id);
168 169
169 kfree(uevent); 170 kfree(uevent);
171 mutex_lock(&ctx->file->file_mutex);
170 } 172 }
171 mutex_unlock(&ctx->file->file_mutex); 173 mutex_unlock(&ctx->file->file_mutex);
172} 174}
@@ -328,20 +330,18 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
328 } 330 }
329 331
330 if (uvt->data_len) { 332 if (uvt->data_len) {
331 uvt->data = kmalloc(uvt->data_len, GFP_KERNEL); 333 uvt->data = kmemdup(evt->private_data, uvt->data_len, GFP_KERNEL);
332 if (!uvt->data) 334 if (!uvt->data)
333 goto err1; 335 goto err1;
334 336
335 memcpy(uvt->data, evt->private_data, uvt->data_len);
336 uvt->resp.present |= IB_UCM_PRES_DATA; 337 uvt->resp.present |= IB_UCM_PRES_DATA;
337 } 338 }
338 339
339 if (uvt->info_len) { 340 if (uvt->info_len) {
340 uvt->info = kmalloc(uvt->info_len, GFP_KERNEL); 341 uvt->info = kmemdup(info, uvt->info_len, GFP_KERNEL);
341 if (!uvt->info) 342 if (!uvt->info)
342 goto err2; 343 goto err2;
343 344
344 memcpy(uvt->info, info, uvt->info_len);
345 uvt->resp.present |= IB_UCM_PRES_INFO; 345 uvt->resp.present |= IB_UCM_PRES_INFO;
346 } 346 }
347 return 0; 347 return 0;
@@ -685,11 +685,11 @@ out:
685 return result; 685 return result;
686} 686}
687 687
688static ssize_t ib_ucm_establish(struct ib_ucm_file *file, 688static ssize_t ib_ucm_notify(struct ib_ucm_file *file,
689 const char __user *inbuf, 689 const char __user *inbuf,
690 int in_len, int out_len) 690 int in_len, int out_len)
691{ 691{
692 struct ib_ucm_establish cmd; 692 struct ib_ucm_notify cmd;
693 struct ib_ucm_context *ctx; 693 struct ib_ucm_context *ctx;
694 int result; 694 int result;
695 695
@@ -700,7 +700,7 @@ static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
700 if (IS_ERR(ctx)) 700 if (IS_ERR(ctx))
701 return PTR_ERR(ctx); 701 return PTR_ERR(ctx);
702 702
703 result = ib_cm_establish(ctx->cm_id); 703 result = ib_cm_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
704 ib_ucm_ctx_put(ctx); 704 ib_ucm_ctx_put(ctx);
705 return result; 705 return result;
706} 706}
@@ -1107,7 +1107,7 @@ static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file,
1107 [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id, 1107 [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id,
1108 [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id, 1108 [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id,
1109 [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen, 1109 [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen,
1110 [IB_USER_CM_CMD_ESTABLISH] = ib_ucm_establish, 1110 [IB_USER_CM_CMD_NOTIFY] = ib_ucm_notify,
1111 [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req, 1111 [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req,
1112 [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep, 1112 [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep,
1113 [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu, 1113 [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu,