diff options
author | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-01 19:43:06 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-01 19:43:06 -0500 |
commit | 5f56bbdf1e35d41b4b3d4c92bdb3e70c63877e4d (patch) | |
tree | 90d2606b1a12b2a01f8527c7daedada650fea854 /drivers | |
parent | 9641219825a54249d77d7aa1afa7d874a05c7f90 (diff) | |
parent | f469b2626f48829c06e40ac799c1edf62b12048e (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (25 commits)
IB/ucm: Fix deadlock in cleanup
IB/cm: Fix automatic path migration support
IPoIB: Fix skb leak when freeing neighbour
IB/srp: Fix memory leak on reconnect
RDMA/addr: list_move() cleanups
RDMA/addr: Fix some cancellation problems in process_req()
RDMA/amso1100: Prevent deadlock in destroy QP
IB/mthca: Fix initial SRQ logsize for mem-free HCAs
IB/ehca: Use WQE offset instead of WQE addr for pending work reqs
RDMA/iwcm: Fix comment for iwcm_deref_id() to match code
RDMA/iwcm: Remove unnecessary function argument
RDMA/iwcm: Remove unnecessary initializations
RDMA/iwcm: Fix memory leak
RDMA/iwcm: Fix memory corruption bug in cm_work_handler()
IB: Convert kmem_cache_t -> struct kmem_cache
IB/ipath: Fix typo in pma_counter_select subscript
RDMA/amso1100: Fix section mismatches
IB/mthca: Fix section mismatches
IB/srp: Increase supported CDB size
RDMA/cm: Remove setting local write as part of QP access flags
...
Diffstat (limited to 'drivers')
30 files changed, 281 insertions, 186 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index e11187ecc931..7767a11b6890 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -139,7 +139,7 @@ static void queue_req(struct addr_req *req) | |||
139 | 139 | ||
140 | mutex_lock(&lock); | 140 | mutex_lock(&lock); |
141 | list_for_each_entry_reverse(temp_req, &req_list, list) { | 141 | list_for_each_entry_reverse(temp_req, &req_list, list) { |
142 | if (time_after(req->timeout, temp_req->timeout)) | 142 | if (time_after_eq(req->timeout, temp_req->timeout)) |
143 | break; | 143 | break; |
144 | } | 144 | } |
145 | 145 | ||
@@ -225,19 +225,17 @@ static void process_req(void *data) | |||
225 | 225 | ||
226 | mutex_lock(&lock); | 226 | mutex_lock(&lock); |
227 | list_for_each_entry_safe(req, temp_req, &req_list, list) { | 227 | list_for_each_entry_safe(req, temp_req, &req_list, list) { |
228 | if (req->status) { | 228 | if (req->status == -ENODATA) { |
229 | src_in = (struct sockaddr_in *) &req->src_addr; | 229 | src_in = (struct sockaddr_in *) &req->src_addr; |
230 | dst_in = (struct sockaddr_in *) &req->dst_addr; | 230 | dst_in = (struct sockaddr_in *) &req->dst_addr; |
231 | req->status = addr_resolve_remote(src_in, dst_in, | 231 | req->status = addr_resolve_remote(src_in, dst_in, |
232 | req->addr); | 232 | req->addr); |
233 | if (req->status && time_after_eq(jiffies, req->timeout)) | ||
234 | req->status = -ETIMEDOUT; | ||
235 | else if (req->status == -ENODATA) | ||
236 | continue; | ||
233 | } | 237 | } |
234 | if (req->status && time_after(jiffies, req->timeout)) | 238 | list_move_tail(&req->list, &done_list); |
235 | req->status = -ETIMEDOUT; | ||
236 | else if (req->status == -ENODATA) | ||
237 | continue; | ||
238 | |||
239 | list_del(&req->list); | ||
240 | list_add_tail(&req->list, &done_list); | ||
241 | } | 239 | } |
242 | 240 | ||
243 | if (!list_empty(&req_list)) { | 241 | if (!list_empty(&req_list)) { |
@@ -347,8 +345,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) | |||
347 | if (req->addr == addr) { | 345 | if (req->addr == addr) { |
348 | req->status = -ECANCELED; | 346 | req->status = -ECANCELED; |
349 | req->timeout = jiffies; | 347 | req->timeout = jiffies; |
350 | list_del(&req->list); | 348 | list_move(&req->list, &req_list); |
351 | list_add(&req->list, &req_list); | ||
352 | set_timeout(req->timeout); | 349 | set_timeout(req->timeout); |
353 | break; | 350 | break; |
354 | } | 351 | } |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 25b1018a476c..e5dc4530808a 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -147,12 +147,12 @@ struct cm_id_private { | |||
147 | __be32 rq_psn; | 147 | __be32 rq_psn; |
148 | int timeout_ms; | 148 | int timeout_ms; |
149 | enum ib_mtu path_mtu; | 149 | enum ib_mtu path_mtu; |
150 | __be16 pkey; | ||
150 | u8 private_data_len; | 151 | u8 private_data_len; |
151 | u8 max_cm_retries; | 152 | u8 max_cm_retries; |
152 | u8 peer_to_peer; | 153 | u8 peer_to_peer; |
153 | u8 responder_resources; | 154 | u8 responder_resources; |
154 | u8 initiator_depth; | 155 | u8 initiator_depth; |
155 | u8 local_ack_timeout; | ||
156 | u8 retry_count; | 156 | u8 retry_count; |
157 | u8 rnr_retry_count; | 157 | u8 rnr_retry_count; |
158 | u8 service_timeout; | 158 | u8 service_timeout; |
@@ -240,11 +240,10 @@ static void * cm_copy_private_data(const void *private_data, | |||
240 | if (!private_data || !private_data_len) | 240 | if (!private_data || !private_data_len) |
241 | return NULL; | 241 | return NULL; |
242 | 242 | ||
243 | data = kmalloc(private_data_len, GFP_KERNEL); | 243 | data = kmemdup(private_data, private_data_len, GFP_KERNEL); |
244 | if (!data) | 244 | if (!data) |
245 | return ERR_PTR(-ENOMEM); | 245 | return ERR_PTR(-ENOMEM); |
246 | 246 | ||
247 | memcpy(data, private_data, private_data_len); | ||
248 | return data; | 247 | return data; |
249 | } | 248 | } |
250 | 249 | ||
@@ -691,7 +690,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv) | |||
691 | * timewait before notifying the user that we've exited timewait. | 690 | * timewait before notifying the user that we've exited timewait. |
692 | */ | 691 | */ |
693 | cm_id_priv->id.state = IB_CM_TIMEWAIT; | 692 | cm_id_priv->id.state = IB_CM_TIMEWAIT; |
694 | wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout); | 693 | wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1); |
695 | queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, | 694 | queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, |
696 | msecs_to_jiffies(wait_time)); | 695 | msecs_to_jiffies(wait_time)); |
697 | cm_id_priv->timewait_info = NULL; | 696 | cm_id_priv->timewait_info = NULL; |
@@ -1010,6 +1009,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, | |||
1010 | cm_id_priv->responder_resources = param->responder_resources; | 1009 | cm_id_priv->responder_resources = param->responder_resources; |
1011 | cm_id_priv->retry_count = param->retry_count; | 1010 | cm_id_priv->retry_count = param->retry_count; |
1012 | cm_id_priv->path_mtu = param->primary_path->mtu; | 1011 | cm_id_priv->path_mtu = param->primary_path->mtu; |
1012 | cm_id_priv->pkey = param->primary_path->pkey; | ||
1013 | cm_id_priv->qp_type = param->qp_type; | 1013 | cm_id_priv->qp_type = param->qp_type; |
1014 | 1014 | ||
1015 | ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); | 1015 | ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); |
@@ -1024,8 +1024,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, | |||
1024 | 1024 | ||
1025 | cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); | 1025 | cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); |
1026 | cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); | 1026 | cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); |
1027 | cm_id_priv->local_ack_timeout = | ||
1028 | cm_req_get_primary_local_ack_timeout(req_msg); | ||
1029 | 1027 | ||
1030 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 1028 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
1031 | ret = ib_post_send_mad(cm_id_priv->msg, NULL); | 1029 | ret = ib_post_send_mad(cm_id_priv->msg, NULL); |
@@ -1410,9 +1408,8 @@ static int cm_req_handler(struct cm_work *work) | |||
1410 | cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); | 1408 | cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); |
1411 | cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); | 1409 | cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); |
1412 | cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); | 1410 | cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); |
1411 | cm_id_priv->pkey = req_msg->pkey; | ||
1413 | cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); | 1412 | cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); |
1414 | cm_id_priv->local_ack_timeout = | ||
1415 | cm_req_get_primary_local_ack_timeout(req_msg); | ||
1416 | cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); | 1413 | cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); |
1417 | cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); | 1414 | cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); |
1418 | cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); | 1415 | cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); |
@@ -1716,7 +1713,7 @@ static int cm_establish_handler(struct cm_work *work) | |||
1716 | unsigned long flags; | 1713 | unsigned long flags; |
1717 | int ret; | 1714 | int ret; |
1718 | 1715 | ||
1719 | /* See comment in ib_cm_establish about lookup. */ | 1716 | /* See comment in cm_establish about lookup. */ |
1720 | cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); | 1717 | cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); |
1721 | if (!cm_id_priv) | 1718 | if (!cm_id_priv) |
1722 | return -EINVAL; | 1719 | return -EINVAL; |
@@ -2402,11 +2399,16 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id, | |||
2402 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); | 2399 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
2403 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 2400 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
2404 | if (cm_id->state != IB_CM_ESTABLISHED || | 2401 | if (cm_id->state != IB_CM_ESTABLISHED || |
2405 | cm_id->lap_state != IB_CM_LAP_IDLE) { | 2402 | (cm_id->lap_state != IB_CM_LAP_UNINIT && |
2403 | cm_id->lap_state != IB_CM_LAP_IDLE)) { | ||
2406 | ret = -EINVAL; | 2404 | ret = -EINVAL; |
2407 | goto out; | 2405 | goto out; |
2408 | } | 2406 | } |
2409 | 2407 | ||
2408 | ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); | ||
2409 | if (ret) | ||
2410 | goto out; | ||
2411 | |||
2410 | ret = cm_alloc_msg(cm_id_priv, &msg); | 2412 | ret = cm_alloc_msg(cm_id_priv, &msg); |
2411 | if (ret) | 2413 | if (ret) |
2412 | goto out; | 2414 | goto out; |
@@ -2431,7 +2433,8 @@ out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |||
2431 | } | 2433 | } |
2432 | EXPORT_SYMBOL(ib_send_cm_lap); | 2434 | EXPORT_SYMBOL(ib_send_cm_lap); |
2433 | 2435 | ||
2434 | static void cm_format_path_from_lap(struct ib_sa_path_rec *path, | 2436 | static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, |
2437 | struct ib_sa_path_rec *path, | ||
2435 | struct cm_lap_msg *lap_msg) | 2438 | struct cm_lap_msg *lap_msg) |
2436 | { | 2439 | { |
2437 | memset(path, 0, sizeof *path); | 2440 | memset(path, 0, sizeof *path); |
@@ -2443,10 +2446,10 @@ static void cm_format_path_from_lap(struct ib_sa_path_rec *path, | |||
2443 | path->hop_limit = lap_msg->alt_hop_limit; | 2446 | path->hop_limit = lap_msg->alt_hop_limit; |
2444 | path->traffic_class = cm_lap_get_traffic_class(lap_msg); | 2447 | path->traffic_class = cm_lap_get_traffic_class(lap_msg); |
2445 | path->reversible = 1; | 2448 | path->reversible = 1; |
2446 | /* pkey is same as in REQ */ | 2449 | path->pkey = cm_id_priv->pkey; |
2447 | path->sl = cm_lap_get_sl(lap_msg); | 2450 | path->sl = cm_lap_get_sl(lap_msg); |
2448 | path->mtu_selector = IB_SA_EQ; | 2451 | path->mtu_selector = IB_SA_EQ; |
2449 | /* mtu is same as in REQ */ | 2452 | path->mtu = cm_id_priv->path_mtu; |
2450 | path->rate_selector = IB_SA_EQ; | 2453 | path->rate_selector = IB_SA_EQ; |
2451 | path->rate = cm_lap_get_packet_rate(lap_msg); | 2454 | path->rate = cm_lap_get_packet_rate(lap_msg); |
2452 | path->packet_life_time_selector = IB_SA_EQ; | 2455 | path->packet_life_time_selector = IB_SA_EQ; |
@@ -2472,7 +2475,7 @@ static int cm_lap_handler(struct cm_work *work) | |||
2472 | 2475 | ||
2473 | param = &work->cm_event.param.lap_rcvd; | 2476 | param = &work->cm_event.param.lap_rcvd; |
2474 | param->alternate_path = &work->path[0]; | 2477 | param->alternate_path = &work->path[0]; |
2475 | cm_format_path_from_lap(param->alternate_path, lap_msg); | 2478 | cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); |
2476 | work->cm_event.private_data = &lap_msg->private_data; | 2479 | work->cm_event.private_data = &lap_msg->private_data; |
2477 | 2480 | ||
2478 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 2481 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
@@ -2480,6 +2483,7 @@ static int cm_lap_handler(struct cm_work *work) | |||
2480 | goto unlock; | 2483 | goto unlock; |
2481 | 2484 | ||
2482 | switch (cm_id_priv->id.lap_state) { | 2485 | switch (cm_id_priv->id.lap_state) { |
2486 | case IB_CM_LAP_UNINIT: | ||
2483 | case IB_CM_LAP_IDLE: | 2487 | case IB_CM_LAP_IDLE: |
2484 | break; | 2488 | break; |
2485 | case IB_CM_MRA_LAP_SENT: | 2489 | case IB_CM_MRA_LAP_SENT: |
@@ -2502,6 +2506,10 @@ static int cm_lap_handler(struct cm_work *work) | |||
2502 | 2506 | ||
2503 | cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; | 2507 | cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; |
2504 | cm_id_priv->tid = lap_msg->hdr.tid; | 2508 | cm_id_priv->tid = lap_msg->hdr.tid; |
2509 | cm_init_av_for_response(work->port, work->mad_recv_wc->wc, | ||
2510 | work->mad_recv_wc->recv_buf.grh, | ||
2511 | &cm_id_priv->av); | ||
2512 | cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); | ||
2505 | ret = atomic_inc_and_test(&cm_id_priv->work_count); | 2513 | ret = atomic_inc_and_test(&cm_id_priv->work_count); |
2506 | if (!ret) | 2514 | if (!ret) |
2507 | list_add_tail(&work->list, &cm_id_priv->work_list); | 2515 | list_add_tail(&work->list, &cm_id_priv->work_list); |
@@ -3040,7 +3048,7 @@ static void cm_work_handler(void *data) | |||
3040 | cm_free_work(work); | 3048 | cm_free_work(work); |
3041 | } | 3049 | } |
3042 | 3050 | ||
3043 | int ib_cm_establish(struct ib_cm_id *cm_id) | 3051 | static int cm_establish(struct ib_cm_id *cm_id) |
3044 | { | 3052 | { |
3045 | struct cm_id_private *cm_id_priv; | 3053 | struct cm_id_private *cm_id_priv; |
3046 | struct cm_work *work; | 3054 | struct cm_work *work; |
@@ -3088,7 +3096,44 @@ int ib_cm_establish(struct ib_cm_id *cm_id) | |||
3088 | out: | 3096 | out: |
3089 | return ret; | 3097 | return ret; |
3090 | } | 3098 | } |
3091 | EXPORT_SYMBOL(ib_cm_establish); | 3099 | |
3100 | static int cm_migrate(struct ib_cm_id *cm_id) | ||
3101 | { | ||
3102 | struct cm_id_private *cm_id_priv; | ||
3103 | unsigned long flags; | ||
3104 | int ret = 0; | ||
3105 | |||
3106 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); | ||
3107 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
3108 | if (cm_id->state == IB_CM_ESTABLISHED && | ||
3109 | (cm_id->lap_state == IB_CM_LAP_UNINIT || | ||
3110 | cm_id->lap_state == IB_CM_LAP_IDLE)) { | ||
3111 | cm_id->lap_state = IB_CM_LAP_IDLE; | ||
3112 | cm_id_priv->av = cm_id_priv->alt_av; | ||
3113 | } else | ||
3114 | ret = -EINVAL; | ||
3115 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
3116 | |||
3117 | return ret; | ||
3118 | } | ||
3119 | |||
3120 | int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) | ||
3121 | { | ||
3122 | int ret; | ||
3123 | |||
3124 | switch (event) { | ||
3125 | case IB_EVENT_COMM_EST: | ||
3126 | ret = cm_establish(cm_id); | ||
3127 | break; | ||
3128 | case IB_EVENT_PATH_MIG: | ||
3129 | ret = cm_migrate(cm_id); | ||
3130 | break; | ||
3131 | default: | ||
3132 | ret = -EINVAL; | ||
3133 | } | ||
3134 | return ret; | ||
3135 | } | ||
3136 | EXPORT_SYMBOL(ib_cm_notify); | ||
3092 | 3137 | ||
3093 | static void cm_recv_handler(struct ib_mad_agent *mad_agent, | 3138 | static void cm_recv_handler(struct ib_mad_agent *mad_agent, |
3094 | struct ib_mad_recv_wc *mad_recv_wc) | 3139 | struct ib_mad_recv_wc *mad_recv_wc) |
@@ -3173,8 +3218,7 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, | |||
3173 | case IB_CM_ESTABLISHED: | 3218 | case IB_CM_ESTABLISHED: |
3174 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | | 3219 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | |
3175 | IB_QP_PKEY_INDEX | IB_QP_PORT; | 3220 | IB_QP_PKEY_INDEX | IB_QP_PORT; |
3176 | qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | | 3221 | qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; |
3177 | IB_ACCESS_REMOTE_WRITE; | ||
3178 | if (cm_id_priv->responder_resources) | 3222 | if (cm_id_priv->responder_resources) |
3179 | qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | | 3223 | qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | |
3180 | IB_ACCESS_REMOTE_ATOMIC; | 3224 | IB_ACCESS_REMOTE_ATOMIC; |
@@ -3222,6 +3266,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, | |||
3222 | if (cm_id_priv->alt_av.ah_attr.dlid) { | 3266 | if (cm_id_priv->alt_av.ah_attr.dlid) { |
3223 | *qp_attr_mask |= IB_QP_ALT_PATH; | 3267 | *qp_attr_mask |= IB_QP_ALT_PATH; |
3224 | qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; | 3268 | qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; |
3269 | qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; | ||
3270 | qp_attr->alt_timeout = | ||
3271 | cm_id_priv->alt_av.packet_life_time + 1; | ||
3225 | qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; | 3272 | qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; |
3226 | } | 3273 | } |
3227 | ret = 0; | 3274 | ret = 0; |
@@ -3248,19 +3295,31 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, | |||
3248 | case IB_CM_REP_SENT: | 3295 | case IB_CM_REP_SENT: |
3249 | case IB_CM_MRA_REP_RCVD: | 3296 | case IB_CM_MRA_REP_RCVD: |
3250 | case IB_CM_ESTABLISHED: | 3297 | case IB_CM_ESTABLISHED: |
3251 | *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; | 3298 | if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { |
3252 | qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); | 3299 | *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; |
3253 | if (cm_id_priv->qp_type == IB_QPT_RC) { | 3300 | qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); |
3254 | *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | | 3301 | if (cm_id_priv->qp_type == IB_QPT_RC) { |
3255 | IB_QP_RNR_RETRY | | 3302 | *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | |
3256 | IB_QP_MAX_QP_RD_ATOMIC; | 3303 | IB_QP_RNR_RETRY | |
3257 | qp_attr->timeout = cm_id_priv->local_ack_timeout; | 3304 | IB_QP_MAX_QP_RD_ATOMIC; |
3258 | qp_attr->retry_cnt = cm_id_priv->retry_count; | 3305 | qp_attr->timeout = |
3259 | qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; | 3306 | cm_id_priv->av.packet_life_time + 1; |
3260 | qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; | 3307 | qp_attr->retry_cnt = cm_id_priv->retry_count; |
3261 | } | 3308 | qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; |
3262 | if (cm_id_priv->alt_av.ah_attr.dlid) { | 3309 | qp_attr->max_rd_atomic = |
3263 | *qp_attr_mask |= IB_QP_PATH_MIG_STATE; | 3310 | cm_id_priv->initiator_depth; |
3311 | } | ||
3312 | if (cm_id_priv->alt_av.ah_attr.dlid) { | ||
3313 | *qp_attr_mask |= IB_QP_PATH_MIG_STATE; | ||
3314 | qp_attr->path_mig_state = IB_MIG_REARM; | ||
3315 | } | ||
3316 | } else { | ||
3317 | *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; | ||
3318 | qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; | ||
3319 | qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; | ||
3320 | qp_attr->alt_timeout = | ||
3321 | cm_id_priv->alt_av.packet_life_time + 1; | ||
3322 | qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; | ||
3264 | qp_attr->path_mig_state = IB_MIG_REARM; | 3323 | qp_attr->path_mig_state = IB_MIG_REARM; |
3265 | } | 3324 | } |
3266 | ret = 0; | 3325 | ret = 0; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 845090b0859c..cf48f2697434 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -344,7 +344,7 @@ static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) | |||
344 | return ret; | 344 | return ret; |
345 | 345 | ||
346 | qp_attr.qp_state = IB_QPS_INIT; | 346 | qp_attr.qp_state = IB_QPS_INIT; |
347 | qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; | 347 | qp_attr.qp_access_flags = 0; |
348 | qp_attr.port_num = id_priv->id.port_num; | 348 | qp_attr.port_num = id_priv->id.port_num; |
349 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS | | 349 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS | |
350 | IB_QP_PKEY_INDEX | IB_QP_PORT); | 350 | IB_QP_PKEY_INDEX | IB_QP_PORT); |
@@ -935,13 +935,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
935 | mutex_lock(&lock); | 935 | mutex_lock(&lock); |
936 | ret = cma_acquire_dev(conn_id); | 936 | ret = cma_acquire_dev(conn_id); |
937 | mutex_unlock(&lock); | 937 | mutex_unlock(&lock); |
938 | if (ret) { | 938 | if (ret) |
939 | ret = -ENODEV; | 939 | goto release_conn_id; |
940 | cma_exch(conn_id, CMA_DESTROYING); | ||
941 | cma_release_remove(conn_id); | ||
942 | rdma_destroy_id(&conn_id->id); | ||
943 | goto out; | ||
944 | } | ||
945 | 940 | ||
946 | conn_id->cm_id.ib = cm_id; | 941 | conn_id->cm_id.ib = cm_id; |
947 | cm_id->context = conn_id; | 942 | cm_id->context = conn_id; |
@@ -951,13 +946,17 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
951 | ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, | 946 | ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, |
952 | ib_event->private_data + offset, | 947 | ib_event->private_data + offset, |
953 | IB_CM_REQ_PRIVATE_DATA_SIZE - offset); | 948 | IB_CM_REQ_PRIVATE_DATA_SIZE - offset); |
954 | if (ret) { | 949 | if (!ret) |
955 | /* Destroy the CM ID by returning a non-zero value. */ | 950 | goto out; |
956 | conn_id->cm_id.ib = NULL; | 951 | |
957 | cma_exch(conn_id, CMA_DESTROYING); | 952 | /* Destroy the CM ID by returning a non-zero value. */ |
958 | cma_release_remove(conn_id); | 953 | conn_id->cm_id.ib = NULL; |
959 | rdma_destroy_id(&conn_id->id); | 954 | |
960 | } | 955 | release_conn_id: |
956 | cma_exch(conn_id, CMA_DESTROYING); | ||
957 | cma_release_remove(conn_id); | ||
958 | rdma_destroy_id(&conn_id->id); | ||
959 | |||
961 | out: | 960 | out: |
962 | cma_release_remove(listen_id); | 961 | cma_release_remove(listen_id); |
963 | return ret; | 962 | return ret; |
@@ -1481,19 +1480,18 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) | |||
1481 | u8 p; | 1480 | u8 p; |
1482 | 1481 | ||
1483 | mutex_lock(&lock); | 1482 | mutex_lock(&lock); |
1483 | if (list_empty(&dev_list)) { | ||
1484 | ret = -ENODEV; | ||
1485 | goto out; | ||
1486 | } | ||
1484 | list_for_each_entry(cma_dev, &dev_list, list) | 1487 | list_for_each_entry(cma_dev, &dev_list, list) |
1485 | for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) | 1488 | for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) |
1486 | if (!ib_query_port (cma_dev->device, p, &port_attr) && | 1489 | if (!ib_query_port(cma_dev->device, p, &port_attr) && |
1487 | port_attr.state == IB_PORT_ACTIVE) | 1490 | port_attr.state == IB_PORT_ACTIVE) |
1488 | goto port_found; | 1491 | goto port_found; |
1489 | 1492 | ||
1490 | if (!list_empty(&dev_list)) { | 1493 | p = 1; |
1491 | p = 1; | 1494 | cma_dev = list_entry(dev_list.next, struct cma_device, list); |
1492 | cma_dev = list_entry(dev_list.next, struct cma_device, list); | ||
1493 | } else { | ||
1494 | ret = -ENODEV; | ||
1495 | goto out; | ||
1496 | } | ||
1497 | 1495 | ||
1498 | port_found: | 1496 | port_found: |
1499 | ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); | 1497 | ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); |
@@ -2123,8 +2121,6 @@ static void cma_add_one(struct ib_device *device) | |||
2123 | 2121 | ||
2124 | cma_dev->device = device; | 2122 | cma_dev->device = device; |
2125 | cma_dev->node_guid = device->node_guid; | 2123 | cma_dev->node_guid = device->node_guid; |
2126 | if (!cma_dev->node_guid) | ||
2127 | goto err; | ||
2128 | 2124 | ||
2129 | init_completion(&cma_dev->comp); | 2125 | init_completion(&cma_dev->comp); |
2130 | atomic_set(&cma_dev->refcount, 1); | 2126 | atomic_set(&cma_dev->refcount, 1); |
@@ -2136,9 +2132,6 @@ static void cma_add_one(struct ib_device *device) | |||
2136 | list_for_each_entry(id_priv, &listen_any_list, list) | 2132 | list_for_each_entry(id_priv, &listen_any_list, list) |
2137 | cma_listen_on_dev(id_priv, cma_dev); | 2133 | cma_listen_on_dev(id_priv, cma_dev); |
2138 | mutex_unlock(&lock); | 2134 | mutex_unlock(&lock); |
2139 | return; | ||
2140 | err: | ||
2141 | kfree(cma_dev); | ||
2142 | } | 2135 | } |
2143 | 2136 | ||
2144 | static int cma_remove_id_dev(struct rdma_id_private *id_priv) | 2137 | static int cma_remove_id_dev(struct rdma_id_private *id_priv) |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index c3fb304a4e86..cf797d7aea09 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -80,7 +80,7 @@ struct iwcm_work { | |||
80 | * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If | 80 | * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If |
81 | * the backlog is exceeded, then no more connection request events will | 81 | * the backlog is exceeded, then no more connection request events will |
82 | * be processed. cm_event_handler() returns -ENOMEM in this case. Its up | 82 | * be processed. cm_event_handler() returns -ENOMEM in this case. Its up |
83 | * to the provider to reject the connectino request. | 83 | * to the provider to reject the connection request. |
84 | * 2) in the connection request workqueue handler, cm_conn_req_handler(). | 84 | * 2) in the connection request workqueue handler, cm_conn_req_handler(). |
85 | * If work elements cannot be allocated for the new connect request cm_id, | 85 | * If work elements cannot be allocated for the new connect request cm_id, |
86 | * then IWCM will call the provider reject method. This is ok since | 86 | * then IWCM will call the provider reject method. This is ok since |
@@ -131,26 +131,25 @@ static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) | |||
131 | } | 131 | } |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * Save private data from incoming connection requests in the | 134 | * Save private data from incoming connection requests to |
135 | * cm_id_priv so the low level driver doesn't have to. Adjust | 135 | * iw_cm_event, so the low level driver doesn't have to. Adjust |
136 | * the event ptr to point to the local copy. | 136 | * the event ptr to point to the local copy. |
137 | */ | 137 | */ |
138 | static int copy_private_data(struct iwcm_id_private *cm_id_priv, | 138 | static int copy_private_data(struct iw_cm_event *event) |
139 | struct iw_cm_event *event) | ||
140 | { | 139 | { |
141 | void *p; | 140 | void *p; |
142 | 141 | ||
143 | p = kmalloc(event->private_data_len, GFP_ATOMIC); | 142 | p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC); |
144 | if (!p) | 143 | if (!p) |
145 | return -ENOMEM; | 144 | return -ENOMEM; |
146 | memcpy(p, event->private_data, event->private_data_len); | ||
147 | event->private_data = p; | 145 | event->private_data = p; |
148 | return 0; | 146 | return 0; |
149 | } | 147 | } |
150 | 148 | ||
151 | /* | 149 | /* |
152 | * Release a reference on cm_id. If the last reference is being removed | 150 | * Release a reference on cm_id. If the last reference is being |
153 | * and iw_destroy_cm_id is waiting, wake up the waiting thread. | 151 | * released, enable the waiting thread (in iw_destroy_cm_id) to |
152 | * get woken up, and return 1 if a thread is already waiting. | ||
154 | */ | 153 | */ |
155 | static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) | 154 | static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) |
156 | { | 155 | { |
@@ -243,7 +242,7 @@ static int iwcm_modify_qp_sqd(struct ib_qp *qp) | |||
243 | /* | 242 | /* |
244 | * CM_ID <-- CLOSING | 243 | * CM_ID <-- CLOSING |
245 | * | 244 | * |
246 | * Block if a passive or active connection is currenlty being processed. Then | 245 | * Block if a passive or active connection is currently being processed. Then |
247 | * process the event as follows: | 246 | * process the event as follows: |
248 | * - If we are ESTABLISHED, move to CLOSING and modify the QP state | 247 | * - If we are ESTABLISHED, move to CLOSING and modify the QP state |
249 | * based on the abrupt flag | 248 | * based on the abrupt flag |
@@ -408,7 +407,7 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) | |||
408 | { | 407 | { |
409 | struct iwcm_id_private *cm_id_priv; | 408 | struct iwcm_id_private *cm_id_priv; |
410 | unsigned long flags; | 409 | unsigned long flags; |
411 | int ret = 0; | 410 | int ret; |
412 | 411 | ||
413 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | 412 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
414 | 413 | ||
@@ -535,7 +534,7 @@ EXPORT_SYMBOL(iw_cm_accept); | |||
535 | int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | 534 | int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) |
536 | { | 535 | { |
537 | struct iwcm_id_private *cm_id_priv; | 536 | struct iwcm_id_private *cm_id_priv; |
538 | int ret = 0; | 537 | int ret; |
539 | unsigned long flags; | 538 | unsigned long flags; |
540 | struct ib_qp *qp; | 539 | struct ib_qp *qp; |
541 | 540 | ||
@@ -620,7 +619,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
620 | spin_lock_irqsave(&listen_id_priv->lock, flags); | 619 | spin_lock_irqsave(&listen_id_priv->lock, flags); |
621 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { | 620 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { |
622 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | 621 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); |
623 | return; | 622 | goto out; |
624 | } | 623 | } |
625 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | 624 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); |
626 | 625 | ||
@@ -629,7 +628,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
629 | listen_id_priv->id.context); | 628 | listen_id_priv->id.context); |
630 | /* If the cm_id could not be created, ignore the request */ | 629 | /* If the cm_id could not be created, ignore the request */ |
631 | if (IS_ERR(cm_id)) | 630 | if (IS_ERR(cm_id)) |
632 | return; | 631 | goto out; |
633 | 632 | ||
634 | cm_id->provider_data = iw_event->provider_data; | 633 | cm_id->provider_data = iw_event->provider_data; |
635 | cm_id->local_addr = iw_event->local_addr; | 634 | cm_id->local_addr = iw_event->local_addr; |
@@ -642,7 +641,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
642 | if (ret) { | 641 | if (ret) { |
643 | iw_cm_reject(cm_id, NULL, 0); | 642 | iw_cm_reject(cm_id, NULL, 0); |
644 | iw_destroy_cm_id(cm_id); | 643 | iw_destroy_cm_id(cm_id); |
645 | return; | 644 | goto out; |
646 | } | 645 | } |
647 | 646 | ||
648 | /* Call the client CM handler */ | 647 | /* Call the client CM handler */ |
@@ -654,6 +653,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
654 | kfree(cm_id); | 653 | kfree(cm_id); |
655 | } | 654 | } |
656 | 655 | ||
656 | out: | ||
657 | if (iw_event->private_data_len) | 657 | if (iw_event->private_data_len) |
658 | kfree(iw_event->private_data); | 658 | kfree(iw_event->private_data); |
659 | } | 659 | } |
@@ -674,7 +674,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, | |||
674 | struct iw_cm_event *iw_event) | 674 | struct iw_cm_event *iw_event) |
675 | { | 675 | { |
676 | unsigned long flags; | 676 | unsigned long flags; |
677 | int ret = 0; | 677 | int ret; |
678 | 678 | ||
679 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 679 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
680 | 680 | ||
@@ -704,7 +704,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, | |||
704 | struct iw_cm_event *iw_event) | 704 | struct iw_cm_event *iw_event) |
705 | { | 705 | { |
706 | unsigned long flags; | 706 | unsigned long flags; |
707 | int ret = 0; | 707 | int ret; |
708 | 708 | ||
709 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 709 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
710 | /* | 710 | /* |
@@ -830,7 +830,8 @@ static int process_event(struct iwcm_id_private *cm_id_priv, | |||
830 | */ | 830 | */ |
831 | static void cm_work_handler(void *arg) | 831 | static void cm_work_handler(void *arg) |
832 | { | 832 | { |
833 | struct iwcm_work *work = arg, lwork; | 833 | struct iwcm_work *work = arg; |
834 | struct iw_cm_event levent; | ||
834 | struct iwcm_id_private *cm_id_priv = work->cm_id; | 835 | struct iwcm_id_private *cm_id_priv = work->cm_id; |
835 | unsigned long flags; | 836 | unsigned long flags; |
836 | int empty; | 837 | int empty; |
@@ -843,11 +844,11 @@ static void cm_work_handler(void *arg) | |||
843 | struct iwcm_work, list); | 844 | struct iwcm_work, list); |
844 | list_del_init(&work->list); | 845 | list_del_init(&work->list); |
845 | empty = list_empty(&cm_id_priv->work_list); | 846 | empty = list_empty(&cm_id_priv->work_list); |
846 | lwork = *work; | 847 | levent = work->event; |
847 | put_work(work); | 848 | put_work(work); |
848 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 849 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
849 | 850 | ||
850 | ret = process_event(cm_id_priv, &work->event); | 851 | ret = process_event(cm_id_priv, &levent); |
851 | if (ret) { | 852 | if (ret) { |
852 | set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | 853 | set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); |
853 | destroy_cm_id(&cm_id_priv->id); | 854 | destroy_cm_id(&cm_id_priv->id); |
@@ -906,7 +907,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id, | |||
906 | if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || | 907 | if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || |
907 | work->event.event == IW_CM_EVENT_CONNECT_REPLY) && | 908 | work->event.event == IW_CM_EVENT_CONNECT_REPLY) && |
908 | work->event.private_data_len) { | 909 | work->event.private_data_len) { |
909 | ret = copy_private_data(cm_id_priv, &work->event); | 910 | ret = copy_private_data(&work->event); |
910 | if (ret) { | 911 | if (ret) { |
911 | put_work(work); | 912 | put_work(work); |
912 | goto out; | 913 | goto out; |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index a72bcea46ff6..3f9c16232c4d 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -46,7 +46,7 @@ MODULE_DESCRIPTION("kernel IB MAD API"); | |||
46 | MODULE_AUTHOR("Hal Rosenstock"); | 46 | MODULE_AUTHOR("Hal Rosenstock"); |
47 | MODULE_AUTHOR("Sean Hefty"); | 47 | MODULE_AUTHOR("Sean Hefty"); |
48 | 48 | ||
49 | static kmem_cache_t *ib_mad_cache; | 49 | static struct kmem_cache *ib_mad_cache; |
50 | 50 | ||
51 | static struct list_head ib_mad_port_list; | 51 | static struct list_head ib_mad_port_list; |
52 | static u32 ib_mad_client_id = 0; | 52 | static u32 ib_mad_client_id = 0; |
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index ad4f4d5c2924..f15220a0ee75 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -161,12 +161,14 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx) | |||
161 | struct ib_ucm_event, ctx_list); | 161 | struct ib_ucm_event, ctx_list); |
162 | list_del(&uevent->file_list); | 162 | list_del(&uevent->file_list); |
163 | list_del(&uevent->ctx_list); | 163 | list_del(&uevent->ctx_list); |
164 | mutex_unlock(&ctx->file->file_mutex); | ||
164 | 165 | ||
165 | /* clear incoming connections. */ | 166 | /* clear incoming connections. */ |
166 | if (ib_ucm_new_cm_id(uevent->resp.event)) | 167 | if (ib_ucm_new_cm_id(uevent->resp.event)) |
167 | ib_destroy_cm_id(uevent->cm_id); | 168 | ib_destroy_cm_id(uevent->cm_id); |
168 | 169 | ||
169 | kfree(uevent); | 170 | kfree(uevent); |
171 | mutex_lock(&ctx->file->file_mutex); | ||
170 | } | 172 | } |
171 | mutex_unlock(&ctx->file->file_mutex); | 173 | mutex_unlock(&ctx->file->file_mutex); |
172 | } | 174 | } |
@@ -328,20 +330,18 @@ static int ib_ucm_event_process(struct ib_cm_event *evt, | |||
328 | } | 330 | } |
329 | 331 | ||
330 | if (uvt->data_len) { | 332 | if (uvt->data_len) { |
331 | uvt->data = kmalloc(uvt->data_len, GFP_KERNEL); | 333 | uvt->data = kmemdup(evt->private_data, uvt->data_len, GFP_KERNEL); |
332 | if (!uvt->data) | 334 | if (!uvt->data) |
333 | goto err1; | 335 | goto err1; |
334 | 336 | ||
335 | memcpy(uvt->data, evt->private_data, uvt->data_len); | ||
336 | uvt->resp.present |= IB_UCM_PRES_DATA; | 337 | uvt->resp.present |= IB_UCM_PRES_DATA; |
337 | } | 338 | } |
338 | 339 | ||
339 | if (uvt->info_len) { | 340 | if (uvt->info_len) { |
340 | uvt->info = kmalloc(uvt->info_len, GFP_KERNEL); | 341 | uvt->info = kmemdup(info, uvt->info_len, GFP_KERNEL); |
341 | if (!uvt->info) | 342 | if (!uvt->info) |
342 | goto err2; | 343 | goto err2; |
343 | 344 | ||
344 | memcpy(uvt->info, info, uvt->info_len); | ||
345 | uvt->resp.present |= IB_UCM_PRES_INFO; | 345 | uvt->resp.present |= IB_UCM_PRES_INFO; |
346 | } | 346 | } |
347 | return 0; | 347 | return 0; |
@@ -685,11 +685,11 @@ out: | |||
685 | return result; | 685 | return result; |
686 | } | 686 | } |
687 | 687 | ||
688 | static ssize_t ib_ucm_establish(struct ib_ucm_file *file, | 688 | static ssize_t ib_ucm_notify(struct ib_ucm_file *file, |
689 | const char __user *inbuf, | 689 | const char __user *inbuf, |
690 | int in_len, int out_len) | 690 | int in_len, int out_len) |
691 | { | 691 | { |
692 | struct ib_ucm_establish cmd; | 692 | struct ib_ucm_notify cmd; |
693 | struct ib_ucm_context *ctx; | 693 | struct ib_ucm_context *ctx; |
694 | int result; | 694 | int result; |
695 | 695 | ||
@@ -700,7 +700,7 @@ static ssize_t ib_ucm_establish(struct ib_ucm_file *file, | |||
700 | if (IS_ERR(ctx)) | 700 | if (IS_ERR(ctx)) |
701 | return PTR_ERR(ctx); | 701 | return PTR_ERR(ctx); |
702 | 702 | ||
703 | result = ib_cm_establish(ctx->cm_id); | 703 | result = ib_cm_notify(ctx->cm_id, (enum ib_event_type) cmd.event); |
704 | ib_ucm_ctx_put(ctx); | 704 | ib_ucm_ctx_put(ctx); |
705 | return result; | 705 | return result; |
706 | } | 706 | } |
@@ -1107,7 +1107,7 @@ static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file, | |||
1107 | [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id, | 1107 | [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id, |
1108 | [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id, | 1108 | [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id, |
1109 | [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen, | 1109 | [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen, |
1110 | [IB_USER_CM_CMD_ESTABLISH] = ib_ucm_establish, | 1110 | [IB_USER_CM_CMD_NOTIFY] = ib_ucm_notify, |
1111 | [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req, | 1111 | [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req, |
1112 | [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep, | 1112 | [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep, |
1113 | [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu, | 1113 | [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu, |
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h index 1b17dcdd0505..04a9db5de881 100644 --- a/drivers/infiniband/hw/amso1100/c2.h +++ b/drivers/infiniband/hw/amso1100/c2.h | |||
@@ -302,7 +302,7 @@ struct c2_dev { | |||
302 | unsigned long pa; /* PA device memory */ | 302 | unsigned long pa; /* PA device memory */ |
303 | void **qptr_array; | 303 | void **qptr_array; |
304 | 304 | ||
305 | kmem_cache_t *host_msg_cache; | 305 | struct kmem_cache *host_msg_cache; |
306 | 306 | ||
307 | struct list_head cca_link; /* adapter list */ | 307 | struct list_head cca_link; /* adapter list */ |
308 | struct list_head eh_wakeup_list; /* event wakeup list */ | 308 | struct list_head eh_wakeup_list; /* event wakeup list */ |
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c index 5bcf697aa335..179d005ed4a5 100644 --- a/drivers/infiniband/hw/amso1100/c2_qp.c +++ b/drivers/infiniband/hw/amso1100/c2_qp.c | |||
@@ -564,6 +564,32 @@ int c2_alloc_qp(struct c2_dev *c2dev, | |||
564 | return err; | 564 | return err; |
565 | } | 565 | } |
566 | 566 | ||
567 | static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) | ||
568 | { | ||
569 | if (send_cq == recv_cq) | ||
570 | spin_lock_irq(&send_cq->lock); | ||
571 | else if (send_cq > recv_cq) { | ||
572 | spin_lock_irq(&send_cq->lock); | ||
573 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); | ||
574 | } else { | ||
575 | spin_lock_irq(&recv_cq->lock); | ||
576 | spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); | ||
577 | } | ||
578 | } | ||
579 | |||
580 | static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) | ||
581 | { | ||
582 | if (send_cq == recv_cq) | ||
583 | spin_unlock_irq(&send_cq->lock); | ||
584 | else if (send_cq > recv_cq) { | ||
585 | spin_unlock(&recv_cq->lock); | ||
586 | spin_unlock_irq(&send_cq->lock); | ||
587 | } else { | ||
588 | spin_unlock(&send_cq->lock); | ||
589 | spin_unlock_irq(&recv_cq->lock); | ||
590 | } | ||
591 | } | ||
592 | |||
567 | void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) | 593 | void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) |
568 | { | 594 | { |
569 | struct c2_cq *send_cq; | 595 | struct c2_cq *send_cq; |
@@ -576,15 +602,9 @@ void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) | |||
576 | * Lock CQs here, so that CQ polling code can do QP lookup | 602 | * Lock CQs here, so that CQ polling code can do QP lookup |
577 | * without taking a lock. | 603 | * without taking a lock. |
578 | */ | 604 | */ |
579 | spin_lock_irq(&send_cq->lock); | 605 | c2_lock_cqs(send_cq, recv_cq); |
580 | if (send_cq != recv_cq) | ||
581 | spin_lock(&recv_cq->lock); | ||
582 | |||
583 | c2_free_qpn(c2dev, qp->qpn); | 606 | c2_free_qpn(c2dev, qp->qpn); |
584 | 607 | c2_unlock_cqs(send_cq, recv_cq); | |
585 | if (send_cq != recv_cq) | ||
586 | spin_unlock(&recv_cq->lock); | ||
587 | spin_unlock_irq(&send_cq->lock); | ||
588 | 608 | ||
589 | /* | 609 | /* |
590 | * Destory qp in the rnic... | 610 | * Destory qp in the rnic... |
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index 623dc95f91df..1687c511cb2f 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c | |||
@@ -441,7 +441,7 @@ static int c2_rnic_close(struct c2_dev *c2dev) | |||
441 | * involves initalizing the various limits and resouce pools that | 441 | * involves initalizing the various limits and resouce pools that |
442 | * comprise the RNIC instance. | 442 | * comprise the RNIC instance. |
443 | */ | 443 | */ |
444 | int c2_rnic_init(struct c2_dev *c2dev) | 444 | int __devinit c2_rnic_init(struct c2_dev *c2dev) |
445 | { | 445 | { |
446 | int err; | 446 | int err; |
447 | u32 qsize, msgsize; | 447 | u32 qsize, msgsize; |
@@ -611,7 +611,7 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
611 | /* | 611 | /* |
612 | * Called by c2_remove to cleanup the RNIC resources. | 612 | * Called by c2_remove to cleanup the RNIC resources. |
613 | */ | 613 | */ |
614 | void c2_rnic_term(struct c2_dev *c2dev) | 614 | void __devexit c2_rnic_term(struct c2_dev *c2dev) |
615 | { | 615 | { |
616 | 616 | ||
617 | /* Close the open adapter instance */ | 617 | /* Close the open adapter instance */ |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 01f5aa9cb56d..3d1c1c535038 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -52,7 +52,7 @@ | |||
52 | MODULE_LICENSE("Dual BSD/GPL"); | 52 | MODULE_LICENSE("Dual BSD/GPL"); |
53 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | 53 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); |
54 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); | 54 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); |
55 | MODULE_VERSION("SVNEHCA_0018"); | 55 | MODULE_VERSION("SVNEHCA_0019"); |
56 | 56 | ||
57 | int ehca_open_aqp1 = 0; | 57 | int ehca_open_aqp1 = 0; |
58 | int ehca_debug_level = 0; | 58 | int ehca_debug_level = 0; |
@@ -790,7 +790,7 @@ int __init ehca_module_init(void) | |||
790 | int ret; | 790 | int ret; |
791 | 791 | ||
792 | printk(KERN_INFO "eHCA Infiniband Device Driver " | 792 | printk(KERN_INFO "eHCA Infiniband Device Driver " |
793 | "(Rel.: SVNEHCA_0018)\n"); | 793 | "(Rel.: SVNEHCA_0019)\n"); |
794 | idr_init(&ehca_qp_idr); | 794 | idr_init(&ehca_qp_idr); |
795 | idr_init(&ehca_cq_idr); | 795 | idr_init(&ehca_cq_idr); |
796 | spin_lock_init(&ehca_qp_idr_lock); | 796 | spin_lock_init(&ehca_qp_idr_lock); |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index cf3e50ee2d06..8682aa50c707 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -732,8 +732,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
732 | u64 h_ret; | 732 | u64 h_ret; |
733 | struct ipz_queue *squeue; | 733 | struct ipz_queue *squeue; |
734 | void *bad_send_wqe_p, *bad_send_wqe_v; | 734 | void *bad_send_wqe_p, *bad_send_wqe_v; |
735 | void *squeue_start_p, *squeue_end_p; | 735 | u64 q_ofs; |
736 | void *squeue_start_v, *squeue_end_v; | ||
737 | struct ehca_wqe *wqe; | 736 | struct ehca_wqe *wqe; |
738 | int qp_num = my_qp->ib_qp.qp_num; | 737 | int qp_num = my_qp->ib_qp.qp_num; |
739 | 738 | ||
@@ -755,26 +754,23 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
755 | if (ehca_debug_level) | 754 | if (ehca_debug_level) |
756 | ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); | 755 | ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); |
757 | squeue = &my_qp->ipz_squeue; | 756 | squeue = &my_qp->ipz_squeue; |
758 | squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L)); | 757 | if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) { |
759 | squeue_end_p = squeue_start_p+squeue->queue_length; | 758 | ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x" |
760 | squeue_start_v = abs_to_virt((u64)squeue_start_p); | 759 | " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p); |
761 | squeue_end_v = abs_to_virt((u64)squeue_end_p); | 760 | return -EFAULT; |
762 | ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p", | 761 | } |
763 | qp_num, squeue_start_v, squeue_end_v); | ||
764 | 762 | ||
765 | /* loop sets wqe's purge bit */ | 763 | /* loop sets wqe's purge bit */ |
766 | wqe = (struct ehca_wqe*)bad_send_wqe_v; | 764 | wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs); |
767 | *bad_wqe_cnt = 0; | 765 | *bad_wqe_cnt = 0; |
768 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { | 766 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { |
769 | if (ehca_debug_level) | 767 | if (ehca_debug_level) |
770 | ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); | 768 | ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); |
771 | wqe->nr_of_data_seg = 0; /* suppress data access */ | 769 | wqe->nr_of_data_seg = 0; /* suppress data access */ |
772 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ | 770 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ |
773 | wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size); | 771 | q_ofs = ipz_queue_advance_offset(squeue, q_ofs); |
772 | wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs); | ||
774 | *bad_wqe_cnt = (*bad_wqe_cnt)+1; | 773 | *bad_wqe_cnt = (*bad_wqe_cnt)+1; |
775 | if ((void*)wqe >= squeue_end_v) { | ||
776 | wqe = squeue_start_v; | ||
777 | } | ||
778 | } | 774 | } |
779 | /* | 775 | /* |
780 | * bad wqe will be reprocessed and ignored when pol_cq() is called, | 776 | * bad wqe will be reprocessed and ignored when pol_cq() is called, |
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c index e028ff1588cc..bf7a40088f61 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c | |||
@@ -70,6 +70,19 @@ void *ipz_qeit_eq_get_inc(struct ipz_queue *queue) | |||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
73 | int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset) | ||
74 | { | ||
75 | int i; | ||
76 | for (i = 0; i < queue->queue_length / queue->pagesize; i++) { | ||
77 | u64 page = (u64)virt_to_abs(queue->queue_pages[i]); | ||
78 | if (addr >= page && addr < page + queue->pagesize) { | ||
79 | *q_offset = addr - page + i * queue->pagesize; | ||
80 | return 0; | ||
81 | } | ||
82 | } | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | |||
73 | int ipz_queue_ctor(struct ipz_queue *queue, | 86 | int ipz_queue_ctor(struct ipz_queue *queue, |
74 | const u32 nr_of_pages, | 87 | const u32 nr_of_pages, |
75 | const u32 pagesize, const u32 qe_size, const u32 nr_of_sg) | 88 | const u32 pagesize, const u32 qe_size, const u32 nr_of_sg) |
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h index 2f13509d5257..dc3bda2634b7 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h | |||
@@ -150,6 +150,21 @@ static inline void *ipz_qeit_reset(struct ipz_queue *queue) | |||
150 | return ipz_qeit_get(queue); | 150 | return ipz_qeit_get(queue); |
151 | } | 151 | } |
152 | 152 | ||
153 | /* | ||
154 | * return the q_offset corresponding to an absolute address | ||
155 | */ | ||
156 | int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset); | ||
157 | |||
158 | /* | ||
159 | * return the next queue offset. don't modify the queue. | ||
160 | */ | ||
161 | static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset) | ||
162 | { | ||
163 | offset += queue->qe_size; | ||
164 | if (offset >= queue->queue_length) offset = 0; | ||
165 | return offset; | ||
166 | } | ||
167 | |||
153 | /* struct generic page table */ | 168 | /* struct generic page table */ |
154 | struct ipz_pt { | 169 | struct ipz_pt { |
155 | u64 entries[EHCA_PT_ENTRIES]; | 170 | u64 entries[EHCA_PT_ENTRIES]; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index a5456108dbad..acdee33ee1f8 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -1487,7 +1487,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd) | |||
1487 | idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; | 1487 | idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; |
1488 | idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; | 1488 | idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; |
1489 | idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; | 1489 | idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; |
1490 | idev->pma_counter_select[5] = IB_PMA_PORT_XMIT_WAIT; | 1490 | idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; |
1491 | idev->link_width_enabled = 3; /* 1x or 4x */ | 1491 | idev->link_width_enabled = 3; /* 1x or 4x */ |
1492 | 1492 | ||
1493 | /* Snapshot current HW counters to "clear" them. */ | 1493 | /* Snapshot current HW counters to "clear" them. */ |
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index 69599455aca2..57cdc1bc5f50 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c | |||
@@ -33,7 +33,6 @@ | |||
33 | * $Id: mthca_av.c 1349 2004-12-16 21:09:43Z roland $ | 33 | * $Id: mthca_av.c 1349 2004-12-16 21:09:43Z roland $ |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/init.h> | ||
37 | #include <linux/string.h> | 36 | #include <linux/string.h> |
38 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
39 | 38 | ||
@@ -323,7 +322,7 @@ int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr) | |||
323 | return 0; | 322 | return 0; |
324 | } | 323 | } |
325 | 324 | ||
326 | int __devinit mthca_init_av_table(struct mthca_dev *dev) | 325 | int mthca_init_av_table(struct mthca_dev *dev) |
327 | { | 326 | { |
328 | int err; | 327 | int err; |
329 | 328 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 149b36901239..283d50b76c3d 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -36,7 +36,6 @@ | |||
36 | * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $ | 36 | * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $ |
37 | */ | 37 | */ |
38 | 38 | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/hardirq.h> | 39 | #include <linux/hardirq.h> |
41 | 40 | ||
42 | #include <asm/io.h> | 41 | #include <asm/io.h> |
@@ -970,7 +969,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
970 | mthca_free_mailbox(dev, mailbox); | 969 | mthca_free_mailbox(dev, mailbox); |
971 | } | 970 | } |
972 | 971 | ||
973 | int __devinit mthca_init_cq_table(struct mthca_dev *dev) | 972 | int mthca_init_cq_table(struct mthca_dev *dev) |
974 | { | 973 | { |
975 | int err; | 974 | int err; |
976 | 975 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index e284e0613a94..8ec9fa1ff9ea 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -33,7 +33,6 @@ | |||
33 | * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $ | 33 | * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $ |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/init.h> | ||
37 | #include <linux/errno.h> | 36 | #include <linux/errno.h> |
38 | #include <linux/interrupt.h> | 37 | #include <linux/interrupt.h> |
39 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
@@ -479,10 +478,10 @@ static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr) | |||
479 | return IRQ_HANDLED; | 478 | return IRQ_HANDLED; |
480 | } | 479 | } |
481 | 480 | ||
482 | static int __devinit mthca_create_eq(struct mthca_dev *dev, | 481 | static int mthca_create_eq(struct mthca_dev *dev, |
483 | int nent, | 482 | int nent, |
484 | u8 intr, | 483 | u8 intr, |
485 | struct mthca_eq *eq) | 484 | struct mthca_eq *eq) |
486 | { | 485 | { |
487 | int npages; | 486 | int npages; |
488 | u64 *dma_list = NULL; | 487 | u64 *dma_list = NULL; |
@@ -664,9 +663,9 @@ static void mthca_free_irqs(struct mthca_dev *dev) | |||
664 | dev->eq_table.eq + i); | 663 | dev->eq_table.eq + i); |
665 | } | 664 | } |
666 | 665 | ||
667 | static int __devinit mthca_map_reg(struct mthca_dev *dev, | 666 | static int mthca_map_reg(struct mthca_dev *dev, |
668 | unsigned long offset, unsigned long size, | 667 | unsigned long offset, unsigned long size, |
669 | void __iomem **map) | 668 | void __iomem **map) |
670 | { | 669 | { |
671 | unsigned long base = pci_resource_start(dev->pdev, 0); | 670 | unsigned long base = pci_resource_start(dev->pdev, 0); |
672 | 671 | ||
@@ -691,7 +690,7 @@ static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset, | |||
691 | iounmap(map); | 690 | iounmap(map); |
692 | } | 691 | } |
693 | 692 | ||
694 | static int __devinit mthca_map_eq_regs(struct mthca_dev *dev) | 693 | static int mthca_map_eq_regs(struct mthca_dev *dev) |
695 | { | 694 | { |
696 | if (mthca_is_memfree(dev)) { | 695 | if (mthca_is_memfree(dev)) { |
697 | /* | 696 | /* |
@@ -781,7 +780,7 @@ static void mthca_unmap_eq_regs(struct mthca_dev *dev) | |||
781 | } | 780 | } |
782 | } | 781 | } |
783 | 782 | ||
784 | int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) | 783 | int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) |
785 | { | 784 | { |
786 | int ret; | 785 | int ret; |
787 | u8 status; | 786 | u8 status; |
@@ -825,7 +824,7 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev) | |||
825 | __free_page(dev->eq_table.icm_page); | 824 | __free_page(dev->eq_table.icm_page); |
826 | } | 825 | } |
827 | 826 | ||
828 | int __devinit mthca_init_eq_table(struct mthca_dev *dev) | 827 | int mthca_init_eq_table(struct mthca_dev *dev) |
829 | { | 828 | { |
830 | int err; | 829 | int err; |
831 | u8 status; | 830 | u8 status; |
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 45e106f14807..acfa41d968ee 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c | |||
@@ -317,7 +317,7 @@ err: | |||
317 | return ret; | 317 | return ret; |
318 | } | 318 | } |
319 | 319 | ||
320 | void __devexit mthca_free_agents(struct mthca_dev *dev) | 320 | void mthca_free_agents(struct mthca_dev *dev) |
321 | { | 321 | { |
322 | struct ib_mad_agent *agent; | 322 | struct ib_mad_agent *agent; |
323 | int p, q; | 323 | int p, q; |
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 47ea02148368..0491ec7a7c0a 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -98,7 +98,7 @@ static struct mthca_profile default_profile = { | |||
98 | .uarc_size = 1 << 18, /* Arbel only */ | 98 | .uarc_size = 1 << 18, /* Arbel only */ |
99 | }; | 99 | }; |
100 | 100 | ||
101 | static int __devinit mthca_tune_pci(struct mthca_dev *mdev) | 101 | static int mthca_tune_pci(struct mthca_dev *mdev) |
102 | { | 102 | { |
103 | int cap; | 103 | int cap; |
104 | u16 val; | 104 | u16 val; |
@@ -143,7 +143,7 @@ static int __devinit mthca_tune_pci(struct mthca_dev *mdev) | |||
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | 145 | ||
146 | static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) | 146 | static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) |
147 | { | 147 | { |
148 | int err; | 148 | int err; |
149 | u8 status; | 149 | u8 status; |
@@ -255,7 +255,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim | |||
255 | return 0; | 255 | return 0; |
256 | } | 256 | } |
257 | 257 | ||
258 | static int __devinit mthca_init_tavor(struct mthca_dev *mdev) | 258 | static int mthca_init_tavor(struct mthca_dev *mdev) |
259 | { | 259 | { |
260 | u8 status; | 260 | u8 status; |
261 | int err; | 261 | int err; |
@@ -333,7 +333,7 @@ err_disable: | |||
333 | return err; | 333 | return err; |
334 | } | 334 | } |
335 | 335 | ||
336 | static int __devinit mthca_load_fw(struct mthca_dev *mdev) | 336 | static int mthca_load_fw(struct mthca_dev *mdev) |
337 | { | 337 | { |
338 | u8 status; | 338 | u8 status; |
339 | int err; | 339 | int err; |
@@ -379,10 +379,10 @@ err_free: | |||
379 | return err; | 379 | return err; |
380 | } | 380 | } |
381 | 381 | ||
382 | static int __devinit mthca_init_icm(struct mthca_dev *mdev, | 382 | static int mthca_init_icm(struct mthca_dev *mdev, |
383 | struct mthca_dev_lim *dev_lim, | 383 | struct mthca_dev_lim *dev_lim, |
384 | struct mthca_init_hca_param *init_hca, | 384 | struct mthca_init_hca_param *init_hca, |
385 | u64 icm_size) | 385 | u64 icm_size) |
386 | { | 386 | { |
387 | u64 aux_pages; | 387 | u64 aux_pages; |
388 | u8 status; | 388 | u8 status; |
@@ -575,7 +575,7 @@ static void mthca_free_icms(struct mthca_dev *mdev) | |||
575 | mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); | 575 | mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); |
576 | } | 576 | } |
577 | 577 | ||
578 | static int __devinit mthca_init_arbel(struct mthca_dev *mdev) | 578 | static int mthca_init_arbel(struct mthca_dev *mdev) |
579 | { | 579 | { |
580 | struct mthca_dev_lim dev_lim; | 580 | struct mthca_dev_lim dev_lim; |
581 | struct mthca_profile profile; | 581 | struct mthca_profile profile; |
@@ -683,7 +683,7 @@ static void mthca_close_hca(struct mthca_dev *mdev) | |||
683 | mthca_SYS_DIS(mdev, &status); | 683 | mthca_SYS_DIS(mdev, &status); |
684 | } | 684 | } |
685 | 685 | ||
686 | static int __devinit mthca_init_hca(struct mthca_dev *mdev) | 686 | static int mthca_init_hca(struct mthca_dev *mdev) |
687 | { | 687 | { |
688 | u8 status; | 688 | u8 status; |
689 | int err; | 689 | int err; |
@@ -720,7 +720,7 @@ err_close: | |||
720 | return err; | 720 | return err; |
721 | } | 721 | } |
722 | 722 | ||
723 | static int __devinit mthca_setup_hca(struct mthca_dev *dev) | 723 | static int mthca_setup_hca(struct mthca_dev *dev) |
724 | { | 724 | { |
725 | int err; | 725 | int err; |
726 | u8 status; | 726 | u8 status; |
@@ -875,8 +875,7 @@ err_uar_table_free: | |||
875 | return err; | 875 | return err; |
876 | } | 876 | } |
877 | 877 | ||
878 | static int __devinit mthca_request_regions(struct pci_dev *pdev, | 878 | static int mthca_request_regions(struct pci_dev *pdev, int ddr_hidden) |
879 | int ddr_hidden) | ||
880 | { | 879 | { |
881 | int err; | 880 | int err; |
882 | 881 | ||
@@ -928,7 +927,7 @@ static void mthca_release_regions(struct pci_dev *pdev, | |||
928 | MTHCA_HCR_SIZE); | 927 | MTHCA_HCR_SIZE); |
929 | } | 928 | } |
930 | 929 | ||
931 | static int __devinit mthca_enable_msi_x(struct mthca_dev *mdev) | 930 | static int mthca_enable_msi_x(struct mthca_dev *mdev) |
932 | { | 931 | { |
933 | struct msix_entry entries[3]; | 932 | struct msix_entry entries[3]; |
934 | int err; | 933 | int err; |
@@ -1213,7 +1212,7 @@ int __mthca_restart_one(struct pci_dev *pdev) | |||
1213 | } | 1212 | } |
1214 | 1213 | ||
1215 | static int __devinit mthca_init_one(struct pci_dev *pdev, | 1214 | static int __devinit mthca_init_one(struct pci_dev *pdev, |
1216 | const struct pci_device_id *id) | 1215 | const struct pci_device_id *id) |
1217 | { | 1216 | { |
1218 | static int mthca_version_printed = 0; | 1217 | static int mthca_version_printed = 0; |
1219 | int ret; | 1218 | int ret; |
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c index 47ca8a9b7247..a8ad072be074 100644 --- a/drivers/infiniband/hw/mthca/mthca_mcg.c +++ b/drivers/infiniband/hw/mthca/mthca_mcg.c | |||
@@ -32,7 +32,6 @@ | |||
32 | * $Id: mthca_mcg.c 1349 2004-12-16 21:09:43Z roland $ | 32 | * $Id: mthca_mcg.c 1349 2004-12-16 21:09:43Z roland $ |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/string.h> | 35 | #include <linux/string.h> |
37 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
38 | 37 | ||
@@ -371,7 +370,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
371 | return err; | 370 | return err; |
372 | } | 371 | } |
373 | 372 | ||
374 | int __devinit mthca_init_mcg_table(struct mthca_dev *dev) | 373 | int mthca_init_mcg_table(struct mthca_dev *dev) |
375 | { | 374 | { |
376 | int err; | 375 | int err; |
377 | int table_size = dev->limits.num_mgms + dev->limits.num_amgms; | 376 | int table_size = dev->limits.num_mgms + dev->limits.num_amgms; |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index a486dec1707e..f71ffa88db3a 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -34,7 +34,6 @@ | |||
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | #include <linux/init.h> | ||
38 | #include <linux/errno.h> | 37 | #include <linux/errno.h> |
39 | 38 | ||
40 | #include "mthca_dev.h" | 39 | #include "mthca_dev.h" |
@@ -135,7 +134,7 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) | |||
135 | spin_unlock(&buddy->lock); | 134 | spin_unlock(&buddy->lock); |
136 | } | 135 | } |
137 | 136 | ||
138 | static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order) | 137 | static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) |
139 | { | 138 | { |
140 | int i, s; | 139 | int i, s; |
141 | 140 | ||
@@ -759,7 +758,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) | |||
759 | *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; | 758 | *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; |
760 | } | 759 | } |
761 | 760 | ||
762 | int __devinit mthca_init_mr_table(struct mthca_dev *dev) | 761 | int mthca_init_mr_table(struct mthca_dev *dev) |
763 | { | 762 | { |
764 | unsigned long addr; | 763 | unsigned long addr; |
765 | int err, i; | 764 | int err, i; |
diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c index 59df51614c85..c1e950764bd8 100644 --- a/drivers/infiniband/hw/mthca/mthca_pd.c +++ b/drivers/infiniband/hw/mthca/mthca_pd.c | |||
@@ -34,7 +34,6 @@ | |||
34 | * $Id: mthca_pd.c 1349 2004-12-16 21:09:43Z roland $ | 34 | * $Id: mthca_pd.c 1349 2004-12-16 21:09:43Z roland $ |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/errno.h> | 37 | #include <linux/errno.h> |
39 | 38 | ||
40 | #include "mthca_dev.h" | 39 | #include "mthca_dev.h" |
@@ -69,7 +68,7 @@ void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd) | |||
69 | mthca_free(&dev->pd_table.alloc, pd->pd_num); | 68 | mthca_free(&dev->pd_table.alloc, pd->pd_num); |
70 | } | 69 | } |
71 | 70 | ||
72 | int __devinit mthca_init_pd_table(struct mthca_dev *dev) | 71 | int mthca_init_pd_table(struct mthca_dev *dev) |
73 | { | 72 | { |
74 | return mthca_alloc_init(&dev->pd_table.alloc, | 73 | return mthca_alloc_init(&dev->pd_table.alloc, |
75 | dev->limits.num_pds, | 74 | dev->limits.num_pds, |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index fc67f780581b..21422a3336ad 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -1100,11 +1100,10 @@ static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | |||
1100 | struct mthca_fmr *fmr; | 1100 | struct mthca_fmr *fmr; |
1101 | int err; | 1101 | int err; |
1102 | 1102 | ||
1103 | fmr = kmalloc(sizeof *fmr, GFP_KERNEL); | 1103 | fmr = kmemdup(fmr_attr, sizeof *fmr, GFP_KERNEL); |
1104 | if (!fmr) | 1104 | if (!fmr) |
1105 | return ERR_PTR(-ENOMEM); | 1105 | return ERR_PTR(-ENOMEM); |
1106 | 1106 | ||
1107 | memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr); | ||
1108 | err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num, | 1107 | err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num, |
1109 | convert_access(mr_access_flags), fmr); | 1108 | convert_access(mr_access_flags), fmr); |
1110 | 1109 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 6a7822e0fc19..33e3ba7937f1 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -35,7 +35,6 @@ | |||
35 | * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $ | 35 | * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $ |
36 | */ | 36 | */ |
37 | 37 | ||
38 | #include <linux/init.h> | ||
39 | #include <linux/string.h> | 38 | #include <linux/string.h> |
40 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
41 | 40 | ||
@@ -2241,7 +2240,7 @@ void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, | |||
2241 | *new_wqe = 0; | 2240 | *new_wqe = 0; |
2242 | } | 2241 | } |
2243 | 2242 | ||
2244 | int __devinit mthca_init_qp_table(struct mthca_dev *dev) | 2243 | int mthca_init_qp_table(struct mthca_dev *dev) |
2245 | { | 2244 | { |
2246 | int err; | 2245 | int err; |
2247 | u8 status; | 2246 | u8 status; |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index f5d7677d1079..34d2c4768962 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -120,7 +120,7 @@ static void mthca_arbel_init_srq_context(struct mthca_dev *dev, | |||
120 | 120 | ||
121 | memset(context, 0, sizeof *context); | 121 | memset(context, 0, sizeof *context); |
122 | 122 | ||
123 | logsize = long_log2(srq->max) + srq->wqe_shift; | 123 | logsize = long_log2(srq->max); |
124 | context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); | 124 | context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); |
125 | context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); | 125 | context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); |
126 | context->db_index = cpu_to_be32(srq->db_index); | 126 | context->db_index = cpu_to_be32(srq->db_index); |
@@ -715,7 +715,7 @@ int mthca_max_srq_sge(struct mthca_dev *dev) | |||
715 | sizeof (struct mthca_data_seg)); | 715 | sizeof (struct mthca_data_seg)); |
716 | } | 716 | } |
717 | 717 | ||
718 | int __devinit mthca_init_srq_table(struct mthca_dev *dev) | 718 | int mthca_init_srq_table(struct mthca_dev *dev) |
719 | { | 719 | { |
720 | int err; | 720 | int err; |
721 | 721 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 0b8a79d53a00..f2b61851a49c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -233,7 +233,7 @@ static inline struct ipoib_neigh **to_ipoib_neigh(struct neighbour *neigh) | |||
233 | } | 233 | } |
234 | 234 | ||
235 | struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh); | 235 | struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh); |
236 | void ipoib_neigh_free(struct ipoib_neigh *neigh); | 236 | void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh); |
237 | 237 | ||
238 | extern struct workqueue_struct *ipoib_workqueue; | 238 | extern struct workqueue_struct *ipoib_workqueue; |
239 | 239 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 85522daeb946..5ba3154320b4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -264,7 +264,7 @@ static void path_free(struct net_device *dev, struct ipoib_path *path) | |||
264 | if (neigh->ah) | 264 | if (neigh->ah) |
265 | ipoib_put_ah(neigh->ah); | 265 | ipoib_put_ah(neigh->ah); |
266 | 266 | ||
267 | ipoib_neigh_free(neigh); | 267 | ipoib_neigh_free(dev, neigh); |
268 | } | 268 | } |
269 | 269 | ||
270 | spin_unlock_irqrestore(&priv->lock, flags); | 270 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -525,10 +525,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) | |||
525 | ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha)); | 525 | ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha)); |
526 | } else { | 526 | } else { |
527 | neigh->ah = NULL; | 527 | neigh->ah = NULL; |
528 | __skb_queue_tail(&neigh->queue, skb); | ||
529 | 528 | ||
530 | if (!path->query && path_rec_start(dev, path)) | 529 | if (!path->query && path_rec_start(dev, path)) |
531 | goto err_list; | 530 | goto err_list; |
531 | |||
532 | __skb_queue_tail(&neigh->queue, skb); | ||
532 | } | 533 | } |
533 | 534 | ||
534 | spin_unlock(&priv->lock); | 535 | spin_unlock(&priv->lock); |
@@ -538,7 +539,7 @@ err_list: | |||
538 | list_del(&neigh->list); | 539 | list_del(&neigh->list); |
539 | 540 | ||
540 | err_path: | 541 | err_path: |
541 | ipoib_neigh_free(neigh); | 542 | ipoib_neigh_free(dev, neigh); |
542 | ++priv->stats.tx_dropped; | 543 | ++priv->stats.tx_dropped; |
543 | dev_kfree_skb_any(skb); | 544 | dev_kfree_skb_any(skb); |
544 | 545 | ||
@@ -655,7 +656,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
655 | */ | 656 | */ |
656 | ipoib_put_ah(neigh->ah); | 657 | ipoib_put_ah(neigh->ah); |
657 | list_del(&neigh->list); | 658 | list_del(&neigh->list); |
658 | ipoib_neigh_free(neigh); | 659 | ipoib_neigh_free(dev, neigh); |
659 | spin_unlock(&priv->lock); | 660 | spin_unlock(&priv->lock); |
660 | ipoib_path_lookup(skb, dev); | 661 | ipoib_path_lookup(skb, dev); |
661 | goto out; | 662 | goto out; |
@@ -786,7 +787,7 @@ static void ipoib_neigh_destructor(struct neighbour *n) | |||
786 | if (neigh->ah) | 787 | if (neigh->ah) |
787 | ah = neigh->ah; | 788 | ah = neigh->ah; |
788 | list_del(&neigh->list); | 789 | list_del(&neigh->list); |
789 | ipoib_neigh_free(neigh); | 790 | ipoib_neigh_free(n->dev, neigh); |
790 | } | 791 | } |
791 | 792 | ||
792 | spin_unlock_irqrestore(&priv->lock, flags); | 793 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -809,9 +810,15 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour) | |||
809 | return neigh; | 810 | return neigh; |
810 | } | 811 | } |
811 | 812 | ||
812 | void ipoib_neigh_free(struct ipoib_neigh *neigh) | 813 | void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh) |
813 | { | 814 | { |
815 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
816 | struct sk_buff *skb; | ||
814 | *to_ipoib_neigh(neigh->neighbour) = NULL; | 817 | *to_ipoib_neigh(neigh->neighbour) = NULL; |
818 | while ((skb = __skb_dequeue(&neigh->queue))) { | ||
819 | ++priv->stats.tx_dropped; | ||
820 | dev_kfree_skb_any(skb); | ||
821 | } | ||
815 | kfree(neigh); | 822 | kfree(neigh); |
816 | } | 823 | } |
817 | 824 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 3faa1820f0e9..d282d65e3ee0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -114,7 +114,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) | |||
114 | */ | 114 | */ |
115 | if (neigh->ah) | 115 | if (neigh->ah) |
116 | ipoib_put_ah(neigh->ah); | 116 | ipoib_put_ah(neigh->ah); |
117 | ipoib_neigh_free(neigh); | 117 | ipoib_neigh_free(dev, neigh); |
118 | } | 118 | } |
119 | 119 | ||
120 | spin_unlock_irqrestore(&priv->lock, flags); | 120 | spin_unlock_irqrestore(&priv->lock, flags); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 9c53916f28c2..234e5b061a75 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -283,7 +283,7 @@ struct iser_global { | |||
283 | struct mutex connlist_mutex; | 283 | struct mutex connlist_mutex; |
284 | struct list_head connlist; /* all iSER IB connections */ | 284 | struct list_head connlist; /* all iSER IB connections */ |
285 | 285 | ||
286 | kmem_cache_t *desc_cache; | 286 | struct kmem_cache *desc_cache; |
287 | }; | 287 | }; |
288 | 288 | ||
289 | extern struct iser_global ig; | 289 | extern struct iser_global ig; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 4b09147f438f..64ab5fc7cca3 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -1176,9 +1176,11 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1176 | break; | 1176 | break; |
1177 | } | 1177 | } |
1178 | 1178 | ||
1179 | target->status = srp_alloc_iu_bufs(target); | 1179 | if (!target->rx_ring[0]) { |
1180 | if (target->status) | 1180 | target->status = srp_alloc_iu_bufs(target); |
1181 | break; | 1181 | if (target->status) |
1182 | break; | ||
1183 | } | ||
1182 | 1184 | ||
1183 | qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); | 1185 | qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); |
1184 | if (!qp_attr) { | 1186 | if (!qp_attr) { |
@@ -1716,7 +1718,8 @@ static ssize_t srp_create_target(struct class_device *class_dev, | |||
1716 | if (!target_host) | 1718 | if (!target_host) |
1717 | return -ENOMEM; | 1719 | return -ENOMEM; |
1718 | 1720 | ||
1719 | target_host->max_lun = SRP_MAX_LUN; | 1721 | target_host->max_lun = SRP_MAX_LUN; |
1722 | target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; | ||
1720 | 1723 | ||
1721 | target = host_to_target(target_host); | 1724 | target = host_to_target(target_host); |
1722 | 1725 | ||