diff options
author | Dave Jones <davej@redhat.com> | 2006-12-12 17:41:41 -0500 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2006-12-12 17:41:41 -0500 |
commit | c4366889dda8110247be59ca41fddb82951a8c26 (patch) | |
tree | 705c1a996bed8fd48ce94ff33ec9fd00f9b94875 /drivers/infiniband | |
parent | db2fb9db5735cc532fd4fc55e94b9a3c3750378e (diff) | |
parent | e1036502e5263851259d147771226161e5ccc85a (diff) |
Merge ../linus
Conflicts:
drivers/cpufreq/cpufreq.c
Diffstat (limited to 'drivers/infiniband')
69 files changed, 785 insertions, 577 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 60d3fbdd216c..af939796750d 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -47,6 +47,7 @@ struct addr_req { | |||
47 | struct sockaddr src_addr; | 47 | struct sockaddr src_addr; |
48 | struct sockaddr dst_addr; | 48 | struct sockaddr dst_addr; |
49 | struct rdma_dev_addr *addr; | 49 | struct rdma_dev_addr *addr; |
50 | struct rdma_addr_client *client; | ||
50 | void *context; | 51 | void *context; |
51 | void (*callback)(int status, struct sockaddr *src_addr, | 52 | void (*callback)(int status, struct sockaddr *src_addr, |
52 | struct rdma_dev_addr *addr, void *context); | 53 | struct rdma_dev_addr *addr, void *context); |
@@ -54,13 +55,33 @@ struct addr_req { | |||
54 | int status; | 55 | int status; |
55 | }; | 56 | }; |
56 | 57 | ||
57 | static void process_req(void *data); | 58 | static void process_req(struct work_struct *work); |
58 | 59 | ||
59 | static DEFINE_MUTEX(lock); | 60 | static DEFINE_MUTEX(lock); |
60 | static LIST_HEAD(req_list); | 61 | static LIST_HEAD(req_list); |
61 | static DECLARE_WORK(work, process_req, NULL); | 62 | static DECLARE_DELAYED_WORK(work, process_req); |
62 | static struct workqueue_struct *addr_wq; | 63 | static struct workqueue_struct *addr_wq; |
63 | 64 | ||
65 | void rdma_addr_register_client(struct rdma_addr_client *client) | ||
66 | { | ||
67 | atomic_set(&client->refcount, 1); | ||
68 | init_completion(&client->comp); | ||
69 | } | ||
70 | EXPORT_SYMBOL(rdma_addr_register_client); | ||
71 | |||
72 | static inline void put_client(struct rdma_addr_client *client) | ||
73 | { | ||
74 | if (atomic_dec_and_test(&client->refcount)) | ||
75 | complete(&client->comp); | ||
76 | } | ||
77 | |||
78 | void rdma_addr_unregister_client(struct rdma_addr_client *client) | ||
79 | { | ||
80 | put_client(client); | ||
81 | wait_for_completion(&client->comp); | ||
82 | } | ||
83 | EXPORT_SYMBOL(rdma_addr_unregister_client); | ||
84 | |||
64 | int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, | 85 | int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, |
65 | const unsigned char *dst_dev_addr) | 86 | const unsigned char *dst_dev_addr) |
66 | { | 87 | { |
@@ -118,7 +139,7 @@ static void queue_req(struct addr_req *req) | |||
118 | 139 | ||
119 | mutex_lock(&lock); | 140 | mutex_lock(&lock); |
120 | list_for_each_entry_reverse(temp_req, &req_list, list) { | 141 | list_for_each_entry_reverse(temp_req, &req_list, list) { |
121 | if (time_after(req->timeout, temp_req->timeout)) | 142 | if (time_after_eq(req->timeout, temp_req->timeout)) |
122 | break; | 143 | break; |
123 | } | 144 | } |
124 | 145 | ||
@@ -194,7 +215,7 @@ out: | |||
194 | return ret; | 215 | return ret; |
195 | } | 216 | } |
196 | 217 | ||
197 | static void process_req(void *data) | 218 | static void process_req(struct work_struct *work) |
198 | { | 219 | { |
199 | struct addr_req *req, *temp_req; | 220 | struct addr_req *req, *temp_req; |
200 | struct sockaddr_in *src_in, *dst_in; | 221 | struct sockaddr_in *src_in, *dst_in; |
@@ -204,19 +225,17 @@ static void process_req(void *data) | |||
204 | 225 | ||
205 | mutex_lock(&lock); | 226 | mutex_lock(&lock); |
206 | list_for_each_entry_safe(req, temp_req, &req_list, list) { | 227 | list_for_each_entry_safe(req, temp_req, &req_list, list) { |
207 | if (req->status) { | 228 | if (req->status == -ENODATA) { |
208 | src_in = (struct sockaddr_in *) &req->src_addr; | 229 | src_in = (struct sockaddr_in *) &req->src_addr; |
209 | dst_in = (struct sockaddr_in *) &req->dst_addr; | 230 | dst_in = (struct sockaddr_in *) &req->dst_addr; |
210 | req->status = addr_resolve_remote(src_in, dst_in, | 231 | req->status = addr_resolve_remote(src_in, dst_in, |
211 | req->addr); | 232 | req->addr); |
233 | if (req->status && time_after_eq(jiffies, req->timeout)) | ||
234 | req->status = -ETIMEDOUT; | ||
235 | else if (req->status == -ENODATA) | ||
236 | continue; | ||
212 | } | 237 | } |
213 | if (req->status && time_after(jiffies, req->timeout)) | 238 | list_move_tail(&req->list, &done_list); |
214 | req->status = -ETIMEDOUT; | ||
215 | else if (req->status == -ENODATA) | ||
216 | continue; | ||
217 | |||
218 | list_del(&req->list); | ||
219 | list_add_tail(&req->list, &done_list); | ||
220 | } | 239 | } |
221 | 240 | ||
222 | if (!list_empty(&req_list)) { | 241 | if (!list_empty(&req_list)) { |
@@ -229,6 +248,7 @@ static void process_req(void *data) | |||
229 | list_del(&req->list); | 248 | list_del(&req->list); |
230 | req->callback(req->status, &req->src_addr, req->addr, | 249 | req->callback(req->status, &req->src_addr, req->addr, |
231 | req->context); | 250 | req->context); |
251 | put_client(req->client); | ||
232 | kfree(req); | 252 | kfree(req); |
233 | } | 253 | } |
234 | } | 254 | } |
@@ -264,7 +284,8 @@ static int addr_resolve_local(struct sockaddr_in *src_in, | |||
264 | return ret; | 284 | return ret; |
265 | } | 285 | } |
266 | 286 | ||
267 | int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr, | 287 | int rdma_resolve_ip(struct rdma_addr_client *client, |
288 | struct sockaddr *src_addr, struct sockaddr *dst_addr, | ||
268 | struct rdma_dev_addr *addr, int timeout_ms, | 289 | struct rdma_dev_addr *addr, int timeout_ms, |
269 | void (*callback)(int status, struct sockaddr *src_addr, | 290 | void (*callback)(int status, struct sockaddr *src_addr, |
270 | struct rdma_dev_addr *addr, void *context), | 291 | struct rdma_dev_addr *addr, void *context), |
@@ -285,6 +306,8 @@ int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr, | |||
285 | req->addr = addr; | 306 | req->addr = addr; |
286 | req->callback = callback; | 307 | req->callback = callback; |
287 | req->context = context; | 308 | req->context = context; |
309 | req->client = client; | ||
310 | atomic_inc(&client->refcount); | ||
288 | 311 | ||
289 | src_in = (struct sockaddr_in *) &req->src_addr; | 312 | src_in = (struct sockaddr_in *) &req->src_addr; |
290 | dst_in = (struct sockaddr_in *) &req->dst_addr; | 313 | dst_in = (struct sockaddr_in *) &req->dst_addr; |
@@ -305,6 +328,7 @@ int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr, | |||
305 | break; | 328 | break; |
306 | default: | 329 | default: |
307 | ret = req->status; | 330 | ret = req->status; |
331 | atomic_dec(&client->refcount); | ||
308 | kfree(req); | 332 | kfree(req); |
309 | break; | 333 | break; |
310 | } | 334 | } |
@@ -321,8 +345,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) | |||
321 | if (req->addr == addr) { | 345 | if (req->addr == addr) { |
322 | req->status = -ECANCELED; | 346 | req->status = -ECANCELED; |
323 | req->timeout = jiffies; | 347 | req->timeout = jiffies; |
324 | list_del(&req->list); | 348 | list_move(&req->list, &req_list); |
325 | list_add(&req->list, &req_list); | ||
326 | set_timeout(req->timeout); | 349 | set_timeout(req->timeout); |
327 | break; | 350 | break; |
328 | } | 351 | } |
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 20e9f64e67a6..98272fbbfb31 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -285,9 +285,10 @@ err: | |||
285 | kfree(tprops); | 285 | kfree(tprops); |
286 | } | 286 | } |
287 | 287 | ||
288 | static void ib_cache_task(void *work_ptr) | 288 | static void ib_cache_task(struct work_struct *_work) |
289 | { | 289 | { |
290 | struct ib_update_work *work = work_ptr; | 290 | struct ib_update_work *work = |
291 | container_of(_work, struct ib_update_work, work); | ||
291 | 292 | ||
292 | ib_cache_update(work->device, work->port_num); | 293 | ib_cache_update(work->device, work->port_num); |
293 | kfree(work); | 294 | kfree(work); |
@@ -306,7 +307,7 @@ static void ib_cache_event(struct ib_event_handler *handler, | |||
306 | event->event == IB_EVENT_CLIENT_REREGISTER) { | 307 | event->event == IB_EVENT_CLIENT_REREGISTER) { |
307 | work = kmalloc(sizeof *work, GFP_ATOMIC); | 308 | work = kmalloc(sizeof *work, GFP_ATOMIC); |
308 | if (work) { | 309 | if (work) { |
309 | INIT_WORK(&work->work, ib_cache_task, work); | 310 | INIT_WORK(&work->work, ib_cache_task); |
310 | work->device = event->device; | 311 | work->device = event->device; |
311 | work->port_num = event->element.port_num; | 312 | work->port_num = event->element.port_num; |
312 | schedule_work(&work->work); | 313 | schedule_work(&work->work); |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 25b1018a476c..79c937bf6962 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -101,7 +101,7 @@ struct cm_av { | |||
101 | }; | 101 | }; |
102 | 102 | ||
103 | struct cm_work { | 103 | struct cm_work { |
104 | struct work_struct work; | 104 | struct delayed_work work; |
105 | struct list_head list; | 105 | struct list_head list; |
106 | struct cm_port *port; | 106 | struct cm_port *port; |
107 | struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ | 107 | struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ |
@@ -147,12 +147,12 @@ struct cm_id_private { | |||
147 | __be32 rq_psn; | 147 | __be32 rq_psn; |
148 | int timeout_ms; | 148 | int timeout_ms; |
149 | enum ib_mtu path_mtu; | 149 | enum ib_mtu path_mtu; |
150 | __be16 pkey; | ||
150 | u8 private_data_len; | 151 | u8 private_data_len; |
151 | u8 max_cm_retries; | 152 | u8 max_cm_retries; |
152 | u8 peer_to_peer; | 153 | u8 peer_to_peer; |
153 | u8 responder_resources; | 154 | u8 responder_resources; |
154 | u8 initiator_depth; | 155 | u8 initiator_depth; |
155 | u8 local_ack_timeout; | ||
156 | u8 retry_count; | 156 | u8 retry_count; |
157 | u8 rnr_retry_count; | 157 | u8 rnr_retry_count; |
158 | u8 service_timeout; | 158 | u8 service_timeout; |
@@ -161,7 +161,7 @@ struct cm_id_private { | |||
161 | atomic_t work_count; | 161 | atomic_t work_count; |
162 | }; | 162 | }; |
163 | 163 | ||
164 | static void cm_work_handler(void *data); | 164 | static void cm_work_handler(struct work_struct *work); |
165 | 165 | ||
166 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) | 166 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) |
167 | { | 167 | { |
@@ -240,11 +240,10 @@ static void * cm_copy_private_data(const void *private_data, | |||
240 | if (!private_data || !private_data_len) | 240 | if (!private_data || !private_data_len) |
241 | return NULL; | 241 | return NULL; |
242 | 242 | ||
243 | data = kmalloc(private_data_len, GFP_KERNEL); | 243 | data = kmemdup(private_data, private_data_len, GFP_KERNEL); |
244 | if (!data) | 244 | if (!data) |
245 | return ERR_PTR(-ENOMEM); | 245 | return ERR_PTR(-ENOMEM); |
246 | 246 | ||
247 | memcpy(data, private_data, private_data_len); | ||
248 | return data; | 247 | return data; |
249 | } | 248 | } |
250 | 249 | ||
@@ -669,8 +668,7 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) | |||
669 | return ERR_PTR(-ENOMEM); | 668 | return ERR_PTR(-ENOMEM); |
670 | 669 | ||
671 | timewait_info->work.local_id = local_id; | 670 | timewait_info->work.local_id = local_id; |
672 | INIT_WORK(&timewait_info->work.work, cm_work_handler, | 671 | INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); |
673 | &timewait_info->work); | ||
674 | timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; | 672 | timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; |
675 | return timewait_info; | 673 | return timewait_info; |
676 | } | 674 | } |
@@ -691,7 +689,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv) | |||
691 | * timewait before notifying the user that we've exited timewait. | 689 | * timewait before notifying the user that we've exited timewait. |
692 | */ | 690 | */ |
693 | cm_id_priv->id.state = IB_CM_TIMEWAIT; | 691 | cm_id_priv->id.state = IB_CM_TIMEWAIT; |
694 | wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout); | 692 | wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1); |
695 | queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, | 693 | queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, |
696 | msecs_to_jiffies(wait_time)); | 694 | msecs_to_jiffies(wait_time)); |
697 | cm_id_priv->timewait_info = NULL; | 695 | cm_id_priv->timewait_info = NULL; |
@@ -1010,6 +1008,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, | |||
1010 | cm_id_priv->responder_resources = param->responder_resources; | 1008 | cm_id_priv->responder_resources = param->responder_resources; |
1011 | cm_id_priv->retry_count = param->retry_count; | 1009 | cm_id_priv->retry_count = param->retry_count; |
1012 | cm_id_priv->path_mtu = param->primary_path->mtu; | 1010 | cm_id_priv->path_mtu = param->primary_path->mtu; |
1011 | cm_id_priv->pkey = param->primary_path->pkey; | ||
1013 | cm_id_priv->qp_type = param->qp_type; | 1012 | cm_id_priv->qp_type = param->qp_type; |
1014 | 1013 | ||
1015 | ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); | 1014 | ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); |
@@ -1024,8 +1023,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, | |||
1024 | 1023 | ||
1025 | cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); | 1024 | cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); |
1026 | cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); | 1025 | cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); |
1027 | cm_id_priv->local_ack_timeout = | ||
1028 | cm_req_get_primary_local_ack_timeout(req_msg); | ||
1029 | 1026 | ||
1030 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 1027 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
1031 | ret = ib_post_send_mad(cm_id_priv->msg, NULL); | 1028 | ret = ib_post_send_mad(cm_id_priv->msg, NULL); |
@@ -1410,9 +1407,8 @@ static int cm_req_handler(struct cm_work *work) | |||
1410 | cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); | 1407 | cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); |
1411 | cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); | 1408 | cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); |
1412 | cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); | 1409 | cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); |
1410 | cm_id_priv->pkey = req_msg->pkey; | ||
1413 | cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); | 1411 | cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); |
1414 | cm_id_priv->local_ack_timeout = | ||
1415 | cm_req_get_primary_local_ack_timeout(req_msg); | ||
1416 | cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); | 1412 | cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); |
1417 | cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); | 1413 | cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); |
1418 | cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); | 1414 | cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); |
@@ -1716,7 +1712,7 @@ static int cm_establish_handler(struct cm_work *work) | |||
1716 | unsigned long flags; | 1712 | unsigned long flags; |
1717 | int ret; | 1713 | int ret; |
1718 | 1714 | ||
1719 | /* See comment in ib_cm_establish about lookup. */ | 1715 | /* See comment in cm_establish about lookup. */ |
1720 | cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); | 1716 | cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); |
1721 | if (!cm_id_priv) | 1717 | if (!cm_id_priv) |
1722 | return -EINVAL; | 1718 | return -EINVAL; |
@@ -2402,11 +2398,16 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id, | |||
2402 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); | 2398 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
2403 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 2399 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
2404 | if (cm_id->state != IB_CM_ESTABLISHED || | 2400 | if (cm_id->state != IB_CM_ESTABLISHED || |
2405 | cm_id->lap_state != IB_CM_LAP_IDLE) { | 2401 | (cm_id->lap_state != IB_CM_LAP_UNINIT && |
2402 | cm_id->lap_state != IB_CM_LAP_IDLE)) { | ||
2406 | ret = -EINVAL; | 2403 | ret = -EINVAL; |
2407 | goto out; | 2404 | goto out; |
2408 | } | 2405 | } |
2409 | 2406 | ||
2407 | ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); | ||
2408 | if (ret) | ||
2409 | goto out; | ||
2410 | |||
2410 | ret = cm_alloc_msg(cm_id_priv, &msg); | 2411 | ret = cm_alloc_msg(cm_id_priv, &msg); |
2411 | if (ret) | 2412 | if (ret) |
2412 | goto out; | 2413 | goto out; |
@@ -2431,7 +2432,8 @@ out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |||
2431 | } | 2432 | } |
2432 | EXPORT_SYMBOL(ib_send_cm_lap); | 2433 | EXPORT_SYMBOL(ib_send_cm_lap); |
2433 | 2434 | ||
2434 | static void cm_format_path_from_lap(struct ib_sa_path_rec *path, | 2435 | static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, |
2436 | struct ib_sa_path_rec *path, | ||
2435 | struct cm_lap_msg *lap_msg) | 2437 | struct cm_lap_msg *lap_msg) |
2436 | { | 2438 | { |
2437 | memset(path, 0, sizeof *path); | 2439 | memset(path, 0, sizeof *path); |
@@ -2443,10 +2445,10 @@ static void cm_format_path_from_lap(struct ib_sa_path_rec *path, | |||
2443 | path->hop_limit = lap_msg->alt_hop_limit; | 2445 | path->hop_limit = lap_msg->alt_hop_limit; |
2444 | path->traffic_class = cm_lap_get_traffic_class(lap_msg); | 2446 | path->traffic_class = cm_lap_get_traffic_class(lap_msg); |
2445 | path->reversible = 1; | 2447 | path->reversible = 1; |
2446 | /* pkey is same as in REQ */ | 2448 | path->pkey = cm_id_priv->pkey; |
2447 | path->sl = cm_lap_get_sl(lap_msg); | 2449 | path->sl = cm_lap_get_sl(lap_msg); |
2448 | path->mtu_selector = IB_SA_EQ; | 2450 | path->mtu_selector = IB_SA_EQ; |
2449 | /* mtu is same as in REQ */ | 2451 | path->mtu = cm_id_priv->path_mtu; |
2450 | path->rate_selector = IB_SA_EQ; | 2452 | path->rate_selector = IB_SA_EQ; |
2451 | path->rate = cm_lap_get_packet_rate(lap_msg); | 2453 | path->rate = cm_lap_get_packet_rate(lap_msg); |
2452 | path->packet_life_time_selector = IB_SA_EQ; | 2454 | path->packet_life_time_selector = IB_SA_EQ; |
@@ -2472,7 +2474,7 @@ static int cm_lap_handler(struct cm_work *work) | |||
2472 | 2474 | ||
2473 | param = &work->cm_event.param.lap_rcvd; | 2475 | param = &work->cm_event.param.lap_rcvd; |
2474 | param->alternate_path = &work->path[0]; | 2476 | param->alternate_path = &work->path[0]; |
2475 | cm_format_path_from_lap(param->alternate_path, lap_msg); | 2477 | cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); |
2476 | work->cm_event.private_data = &lap_msg->private_data; | 2478 | work->cm_event.private_data = &lap_msg->private_data; |
2477 | 2479 | ||
2478 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 2480 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
@@ -2480,6 +2482,7 @@ static int cm_lap_handler(struct cm_work *work) | |||
2480 | goto unlock; | 2482 | goto unlock; |
2481 | 2483 | ||
2482 | switch (cm_id_priv->id.lap_state) { | 2484 | switch (cm_id_priv->id.lap_state) { |
2485 | case IB_CM_LAP_UNINIT: | ||
2483 | case IB_CM_LAP_IDLE: | 2486 | case IB_CM_LAP_IDLE: |
2484 | break; | 2487 | break; |
2485 | case IB_CM_MRA_LAP_SENT: | 2488 | case IB_CM_MRA_LAP_SENT: |
@@ -2502,6 +2505,10 @@ static int cm_lap_handler(struct cm_work *work) | |||
2502 | 2505 | ||
2503 | cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; | 2506 | cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; |
2504 | cm_id_priv->tid = lap_msg->hdr.tid; | 2507 | cm_id_priv->tid = lap_msg->hdr.tid; |
2508 | cm_init_av_for_response(work->port, work->mad_recv_wc->wc, | ||
2509 | work->mad_recv_wc->recv_buf.grh, | ||
2510 | &cm_id_priv->av); | ||
2511 | cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); | ||
2505 | ret = atomic_inc_and_test(&cm_id_priv->work_count); | 2512 | ret = atomic_inc_and_test(&cm_id_priv->work_count); |
2506 | if (!ret) | 2513 | if (!ret) |
2507 | list_add_tail(&work->list, &cm_id_priv->work_list); | 2514 | list_add_tail(&work->list, &cm_id_priv->work_list); |
@@ -2987,9 +2994,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, | |||
2987 | } | 2994 | } |
2988 | } | 2995 | } |
2989 | 2996 | ||
2990 | static void cm_work_handler(void *data) | 2997 | static void cm_work_handler(struct work_struct *_work) |
2991 | { | 2998 | { |
2992 | struct cm_work *work = data; | 2999 | struct cm_work *work = container_of(_work, struct cm_work, work.work); |
2993 | int ret; | 3000 | int ret; |
2994 | 3001 | ||
2995 | switch (work->cm_event.event) { | 3002 | switch (work->cm_event.event) { |
@@ -3040,7 +3047,7 @@ static void cm_work_handler(void *data) | |||
3040 | cm_free_work(work); | 3047 | cm_free_work(work); |
3041 | } | 3048 | } |
3042 | 3049 | ||
3043 | int ib_cm_establish(struct ib_cm_id *cm_id) | 3050 | static int cm_establish(struct ib_cm_id *cm_id) |
3044 | { | 3051 | { |
3045 | struct cm_id_private *cm_id_priv; | 3052 | struct cm_id_private *cm_id_priv; |
3046 | struct cm_work *work; | 3053 | struct cm_work *work; |
@@ -3079,16 +3086,53 @@ int ib_cm_establish(struct ib_cm_id *cm_id) | |||
3079 | * we need to find the cm_id once we're in the context of the | 3086 | * we need to find the cm_id once we're in the context of the |
3080 | * worker thread, rather than holding a reference on it. | 3087 | * worker thread, rather than holding a reference on it. |
3081 | */ | 3088 | */ |
3082 | INIT_WORK(&work->work, cm_work_handler, work); | 3089 | INIT_DELAYED_WORK(&work->work, cm_work_handler); |
3083 | work->local_id = cm_id->local_id; | 3090 | work->local_id = cm_id->local_id; |
3084 | work->remote_id = cm_id->remote_id; | 3091 | work->remote_id = cm_id->remote_id; |
3085 | work->mad_recv_wc = NULL; | 3092 | work->mad_recv_wc = NULL; |
3086 | work->cm_event.event = IB_CM_USER_ESTABLISHED; | 3093 | work->cm_event.event = IB_CM_USER_ESTABLISHED; |
3087 | queue_work(cm.wq, &work->work); | 3094 | queue_delayed_work(cm.wq, &work->work, 0); |
3088 | out: | 3095 | out: |
3089 | return ret; | 3096 | return ret; |
3090 | } | 3097 | } |
3091 | EXPORT_SYMBOL(ib_cm_establish); | 3098 | |
3099 | static int cm_migrate(struct ib_cm_id *cm_id) | ||
3100 | { | ||
3101 | struct cm_id_private *cm_id_priv; | ||
3102 | unsigned long flags; | ||
3103 | int ret = 0; | ||
3104 | |||
3105 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); | ||
3106 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
3107 | if (cm_id->state == IB_CM_ESTABLISHED && | ||
3108 | (cm_id->lap_state == IB_CM_LAP_UNINIT || | ||
3109 | cm_id->lap_state == IB_CM_LAP_IDLE)) { | ||
3110 | cm_id->lap_state = IB_CM_LAP_IDLE; | ||
3111 | cm_id_priv->av = cm_id_priv->alt_av; | ||
3112 | } else | ||
3113 | ret = -EINVAL; | ||
3114 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
3115 | |||
3116 | return ret; | ||
3117 | } | ||
3118 | |||
3119 | int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) | ||
3120 | { | ||
3121 | int ret; | ||
3122 | |||
3123 | switch (event) { | ||
3124 | case IB_EVENT_COMM_EST: | ||
3125 | ret = cm_establish(cm_id); | ||
3126 | break; | ||
3127 | case IB_EVENT_PATH_MIG: | ||
3128 | ret = cm_migrate(cm_id); | ||
3129 | break; | ||
3130 | default: | ||
3131 | ret = -EINVAL; | ||
3132 | } | ||
3133 | return ret; | ||
3134 | } | ||
3135 | EXPORT_SYMBOL(ib_cm_notify); | ||
3092 | 3136 | ||
3093 | static void cm_recv_handler(struct ib_mad_agent *mad_agent, | 3137 | static void cm_recv_handler(struct ib_mad_agent *mad_agent, |
3094 | struct ib_mad_recv_wc *mad_recv_wc) | 3138 | struct ib_mad_recv_wc *mad_recv_wc) |
@@ -3146,11 +3190,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, | |||
3146 | return; | 3190 | return; |
3147 | } | 3191 | } |
3148 | 3192 | ||
3149 | INIT_WORK(&work->work, cm_work_handler, work); | 3193 | INIT_DELAYED_WORK(&work->work, cm_work_handler); |
3150 | work->cm_event.event = event; | 3194 | work->cm_event.event = event; |
3151 | work->mad_recv_wc = mad_recv_wc; | 3195 | work->mad_recv_wc = mad_recv_wc; |
3152 | work->port = (struct cm_port *)mad_agent->context; | 3196 | work->port = (struct cm_port *)mad_agent->context; |
3153 | queue_work(cm.wq, &work->work); | 3197 | queue_delayed_work(cm.wq, &work->work, 0); |
3154 | } | 3198 | } |
3155 | 3199 | ||
3156 | static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, | 3200 | static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, |
@@ -3173,8 +3217,7 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, | |||
3173 | case IB_CM_ESTABLISHED: | 3217 | case IB_CM_ESTABLISHED: |
3174 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | | 3218 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | |
3175 | IB_QP_PKEY_INDEX | IB_QP_PORT; | 3219 | IB_QP_PKEY_INDEX | IB_QP_PORT; |
3176 | qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | | 3220 | qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; |
3177 | IB_ACCESS_REMOTE_WRITE; | ||
3178 | if (cm_id_priv->responder_resources) | 3221 | if (cm_id_priv->responder_resources) |
3179 | qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | | 3222 | qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | |
3180 | IB_ACCESS_REMOTE_ATOMIC; | 3223 | IB_ACCESS_REMOTE_ATOMIC; |
@@ -3222,6 +3265,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, | |||
3222 | if (cm_id_priv->alt_av.ah_attr.dlid) { | 3265 | if (cm_id_priv->alt_av.ah_attr.dlid) { |
3223 | *qp_attr_mask |= IB_QP_ALT_PATH; | 3266 | *qp_attr_mask |= IB_QP_ALT_PATH; |
3224 | qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; | 3267 | qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; |
3268 | qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; | ||
3269 | qp_attr->alt_timeout = | ||
3270 | cm_id_priv->alt_av.packet_life_time + 1; | ||
3225 | qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; | 3271 | qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; |
3226 | } | 3272 | } |
3227 | ret = 0; | 3273 | ret = 0; |
@@ -3248,19 +3294,31 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, | |||
3248 | case IB_CM_REP_SENT: | 3294 | case IB_CM_REP_SENT: |
3249 | case IB_CM_MRA_REP_RCVD: | 3295 | case IB_CM_MRA_REP_RCVD: |
3250 | case IB_CM_ESTABLISHED: | 3296 | case IB_CM_ESTABLISHED: |
3251 | *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; | 3297 | if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { |
3252 | qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); | 3298 | *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; |
3253 | if (cm_id_priv->qp_type == IB_QPT_RC) { | 3299 | qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); |
3254 | *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | | 3300 | if (cm_id_priv->qp_type == IB_QPT_RC) { |
3255 | IB_QP_RNR_RETRY | | 3301 | *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | |
3256 | IB_QP_MAX_QP_RD_ATOMIC; | 3302 | IB_QP_RNR_RETRY | |
3257 | qp_attr->timeout = cm_id_priv->local_ack_timeout; | 3303 | IB_QP_MAX_QP_RD_ATOMIC; |
3258 | qp_attr->retry_cnt = cm_id_priv->retry_count; | 3304 | qp_attr->timeout = |
3259 | qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; | 3305 | cm_id_priv->av.packet_life_time + 1; |
3260 | qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; | 3306 | qp_attr->retry_cnt = cm_id_priv->retry_count; |
3261 | } | 3307 | qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; |
3262 | if (cm_id_priv->alt_av.ah_attr.dlid) { | 3308 | qp_attr->max_rd_atomic = |
3263 | *qp_attr_mask |= IB_QP_PATH_MIG_STATE; | 3309 | cm_id_priv->initiator_depth; |
3310 | } | ||
3311 | if (cm_id_priv->alt_av.ah_attr.dlid) { | ||
3312 | *qp_attr_mask |= IB_QP_PATH_MIG_STATE; | ||
3313 | qp_attr->path_mig_state = IB_MIG_REARM; | ||
3314 | } | ||
3315 | } else { | ||
3316 | *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; | ||
3317 | qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; | ||
3318 | qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; | ||
3319 | qp_attr->alt_timeout = | ||
3320 | cm_id_priv->alt_av.packet_life_time + 1; | ||
3321 | qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; | ||
3264 | qp_attr->path_mig_state = IB_MIG_REARM; | 3322 | qp_attr->path_mig_state = IB_MIG_REARM; |
3265 | } | 3323 | } |
3266 | ret = 0; | 3324 | ret = 0; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 9ae4f3a67c70..985a6b564d8f 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -63,6 +63,7 @@ static struct ib_client cma_client = { | |||
63 | }; | 63 | }; |
64 | 64 | ||
65 | static struct ib_sa_client sa_client; | 65 | static struct ib_sa_client sa_client; |
66 | static struct rdma_addr_client addr_client; | ||
66 | static LIST_HEAD(dev_list); | 67 | static LIST_HEAD(dev_list); |
67 | static LIST_HEAD(listen_any_list); | 68 | static LIST_HEAD(listen_any_list); |
68 | static DEFINE_MUTEX(lock); | 69 | static DEFINE_MUTEX(lock); |
@@ -343,7 +344,7 @@ static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) | |||
343 | return ret; | 344 | return ret; |
344 | 345 | ||
345 | qp_attr.qp_state = IB_QPS_INIT; | 346 | qp_attr.qp_state = IB_QPS_INIT; |
346 | qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; | 347 | qp_attr.qp_access_flags = 0; |
347 | qp_attr.port_num = id_priv->id.port_num; | 348 | qp_attr.port_num = id_priv->id.port_num; |
348 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS | | 349 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS | |
349 | IB_QP_PKEY_INDEX | IB_QP_PORT); | 350 | IB_QP_PKEY_INDEX | IB_QP_PORT); |
@@ -934,13 +935,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
934 | mutex_lock(&lock); | 935 | mutex_lock(&lock); |
935 | ret = cma_acquire_dev(conn_id); | 936 | ret = cma_acquire_dev(conn_id); |
936 | mutex_unlock(&lock); | 937 | mutex_unlock(&lock); |
937 | if (ret) { | 938 | if (ret) |
938 | ret = -ENODEV; | 939 | goto release_conn_id; |
939 | cma_exch(conn_id, CMA_DESTROYING); | ||
940 | cma_release_remove(conn_id); | ||
941 | rdma_destroy_id(&conn_id->id); | ||
942 | goto out; | ||
943 | } | ||
944 | 940 | ||
945 | conn_id->cm_id.ib = cm_id; | 941 | conn_id->cm_id.ib = cm_id; |
946 | cm_id->context = conn_id; | 942 | cm_id->context = conn_id; |
@@ -950,13 +946,17 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
950 | ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, | 946 | ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, |
951 | ib_event->private_data + offset, | 947 | ib_event->private_data + offset, |
952 | IB_CM_REQ_PRIVATE_DATA_SIZE - offset); | 948 | IB_CM_REQ_PRIVATE_DATA_SIZE - offset); |
953 | if (ret) { | 949 | if (!ret) |
954 | /* Destroy the CM ID by returning a non-zero value. */ | 950 | goto out; |
955 | conn_id->cm_id.ib = NULL; | 951 | |
956 | cma_exch(conn_id, CMA_DESTROYING); | 952 | /* Destroy the CM ID by returning a non-zero value. */ |
957 | cma_release_remove(conn_id); | 953 | conn_id->cm_id.ib = NULL; |
958 | rdma_destroy_id(&conn_id->id); | 954 | |
959 | } | 955 | release_conn_id: |
956 | cma_exch(conn_id, CMA_DESTROYING); | ||
957 | cma_release_remove(conn_id); | ||
958 | rdma_destroy_id(&conn_id->id); | ||
959 | |||
960 | out: | 960 | out: |
961 | cma_release_remove(listen_id); | 961 | cma_release_remove(listen_id); |
962 | return ret; | 962 | return ret; |
@@ -1340,9 +1340,9 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, | |||
1340 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; | 1340 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; |
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | static void cma_work_handler(void *data) | 1343 | static void cma_work_handler(struct work_struct *_work) |
1344 | { | 1344 | { |
1345 | struct cma_work *work = data; | 1345 | struct cma_work *work = container_of(_work, struct cma_work, work); |
1346 | struct rdma_id_private *id_priv = work->id; | 1346 | struct rdma_id_private *id_priv = work->id; |
1347 | int destroy = 0; | 1347 | int destroy = 0; |
1348 | 1348 | ||
@@ -1373,7 +1373,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1373 | return -ENOMEM; | 1373 | return -ENOMEM; |
1374 | 1374 | ||
1375 | work->id = id_priv; | 1375 | work->id = id_priv; |
1376 | INIT_WORK(&work->work, cma_work_handler, work); | 1376 | INIT_WORK(&work->work, cma_work_handler); |
1377 | work->old_state = CMA_ROUTE_QUERY; | 1377 | work->old_state = CMA_ROUTE_QUERY; |
1378 | work->new_state = CMA_ROUTE_RESOLVED; | 1378 | work->new_state = CMA_ROUTE_RESOLVED; |
1379 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1379 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
@@ -1430,7 +1430,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1430 | return -ENOMEM; | 1430 | return -ENOMEM; |
1431 | 1431 | ||
1432 | work->id = id_priv; | 1432 | work->id = id_priv; |
1433 | INIT_WORK(&work->work, cma_work_handler, work); | 1433 | INIT_WORK(&work->work, cma_work_handler); |
1434 | work->old_state = CMA_ROUTE_QUERY; | 1434 | work->old_state = CMA_ROUTE_QUERY; |
1435 | work->new_state = CMA_ROUTE_RESOLVED; | 1435 | work->new_state = CMA_ROUTE_RESOLVED; |
1436 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1436 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
@@ -1480,19 +1480,18 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) | |||
1480 | u8 p; | 1480 | u8 p; |
1481 | 1481 | ||
1482 | mutex_lock(&lock); | 1482 | mutex_lock(&lock); |
1483 | if (list_empty(&dev_list)) { | ||
1484 | ret = -ENODEV; | ||
1485 | goto out; | ||
1486 | } | ||
1483 | list_for_each_entry(cma_dev, &dev_list, list) | 1487 | list_for_each_entry(cma_dev, &dev_list, list) |
1484 | for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) | 1488 | for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) |
1485 | if (!ib_query_port (cma_dev->device, p, &port_attr) && | 1489 | if (!ib_query_port(cma_dev->device, p, &port_attr) && |
1486 | port_attr.state == IB_PORT_ACTIVE) | 1490 | port_attr.state == IB_PORT_ACTIVE) |
1487 | goto port_found; | 1491 | goto port_found; |
1488 | 1492 | ||
1489 | if (!list_empty(&dev_list)) { | 1493 | p = 1; |
1490 | p = 1; | 1494 | cma_dev = list_entry(dev_list.next, struct cma_device, list); |
1491 | cma_dev = list_entry(dev_list.next, struct cma_device, list); | ||
1492 | } else { | ||
1493 | ret = -ENODEV; | ||
1494 | goto out; | ||
1495 | } | ||
1496 | 1495 | ||
1497 | port_found: | 1496 | port_found: |
1498 | ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); | 1497 | ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); |
@@ -1584,7 +1583,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) | |||
1584 | } | 1583 | } |
1585 | 1584 | ||
1586 | work->id = id_priv; | 1585 | work->id = id_priv; |
1587 | INIT_WORK(&work->work, cma_work_handler, work); | 1586 | INIT_WORK(&work->work, cma_work_handler); |
1588 | work->old_state = CMA_ADDR_QUERY; | 1587 | work->old_state = CMA_ADDR_QUERY; |
1589 | work->new_state = CMA_ADDR_RESOLVED; | 1588 | work->new_state = CMA_ADDR_RESOLVED; |
1590 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; | 1589 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
@@ -1625,8 +1624,8 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, | |||
1625 | if (cma_any_addr(dst_addr)) | 1624 | if (cma_any_addr(dst_addr)) |
1626 | ret = cma_resolve_loopback(id_priv); | 1625 | ret = cma_resolve_loopback(id_priv); |
1627 | else | 1626 | else |
1628 | ret = rdma_resolve_ip(&id->route.addr.src_addr, dst_addr, | 1627 | ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr, |
1629 | &id->route.addr.dev_addr, | 1628 | dst_addr, &id->route.addr.dev_addr, |
1630 | timeout_ms, addr_handler, id_priv); | 1629 | timeout_ms, addr_handler, id_priv); |
1631 | if (ret) | 1630 | if (ret) |
1632 | goto err; | 1631 | goto err; |
@@ -1762,22 +1761,29 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
1762 | 1761 | ||
1763 | if (!cma_any_addr(addr)) { | 1762 | if (!cma_any_addr(addr)) { |
1764 | ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); | 1763 | ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); |
1765 | if (!ret) { | ||
1766 | mutex_lock(&lock); | ||
1767 | ret = cma_acquire_dev(id_priv); | ||
1768 | mutex_unlock(&lock); | ||
1769 | } | ||
1770 | if (ret) | 1764 | if (ret) |
1771 | goto err; | 1765 | goto err1; |
1766 | |||
1767 | mutex_lock(&lock); | ||
1768 | ret = cma_acquire_dev(id_priv); | ||
1769 | mutex_unlock(&lock); | ||
1770 | if (ret) | ||
1771 | goto err1; | ||
1772 | } | 1772 | } |
1773 | 1773 | ||
1774 | memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); | 1774 | memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); |
1775 | ret = cma_get_port(id_priv); | 1775 | ret = cma_get_port(id_priv); |
1776 | if (ret) | 1776 | if (ret) |
1777 | goto err; | 1777 | goto err2; |
1778 | 1778 | ||
1779 | return 0; | 1779 | return 0; |
1780 | err: | 1780 | err2: |
1781 | if (!cma_any_addr(addr)) { | ||
1782 | mutex_lock(&lock); | ||
1783 | cma_detach_from_dev(id_priv); | ||
1784 | mutex_unlock(&lock); | ||
1785 | } | ||
1786 | err1: | ||
1781 | cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); | 1787 | cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); |
1782 | return ret; | 1788 | return ret; |
1783 | } | 1789 | } |
@@ -2115,8 +2121,6 @@ static void cma_add_one(struct ib_device *device) | |||
2115 | 2121 | ||
2116 | cma_dev->device = device; | 2122 | cma_dev->device = device; |
2117 | cma_dev->node_guid = device->node_guid; | 2123 | cma_dev->node_guid = device->node_guid; |
2118 | if (!cma_dev->node_guid) | ||
2119 | goto err; | ||
2120 | 2124 | ||
2121 | init_completion(&cma_dev->comp); | 2125 | init_completion(&cma_dev->comp); |
2122 | atomic_set(&cma_dev->refcount, 1); | 2126 | atomic_set(&cma_dev->refcount, 1); |
@@ -2128,9 +2132,6 @@ static void cma_add_one(struct ib_device *device) | |||
2128 | list_for_each_entry(id_priv, &listen_any_list, list) | 2132 | list_for_each_entry(id_priv, &listen_any_list, list) |
2129 | cma_listen_on_dev(id_priv, cma_dev); | 2133 | cma_listen_on_dev(id_priv, cma_dev); |
2130 | mutex_unlock(&lock); | 2134 | mutex_unlock(&lock); |
2131 | return; | ||
2132 | err: | ||
2133 | kfree(cma_dev); | ||
2134 | } | 2135 | } |
2135 | 2136 | ||
2136 | static int cma_remove_id_dev(struct rdma_id_private *id_priv) | 2137 | static int cma_remove_id_dev(struct rdma_id_private *id_priv) |
@@ -2210,6 +2211,7 @@ static int cma_init(void) | |||
2210 | return -ENOMEM; | 2211 | return -ENOMEM; |
2211 | 2212 | ||
2212 | ib_sa_register_client(&sa_client); | 2213 | ib_sa_register_client(&sa_client); |
2214 | rdma_addr_register_client(&addr_client); | ||
2213 | 2215 | ||
2214 | ret = ib_register_client(&cma_client); | 2216 | ret = ib_register_client(&cma_client); |
2215 | if (ret) | 2217 | if (ret) |
@@ -2217,6 +2219,7 @@ static int cma_init(void) | |||
2217 | return 0; | 2219 | return 0; |
2218 | 2220 | ||
2219 | err: | 2221 | err: |
2222 | rdma_addr_unregister_client(&addr_client); | ||
2220 | ib_sa_unregister_client(&sa_client); | 2223 | ib_sa_unregister_client(&sa_client); |
2221 | destroy_workqueue(cma_wq); | 2224 | destroy_workqueue(cma_wq); |
2222 | return ret; | 2225 | return ret; |
@@ -2225,6 +2228,7 @@ err: | |||
2225 | static void cma_cleanup(void) | 2228 | static void cma_cleanup(void) |
2226 | { | 2229 | { |
2227 | ib_unregister_client(&cma_client); | 2230 | ib_unregister_client(&cma_client); |
2231 | rdma_addr_unregister_client(&addr_client); | ||
2228 | ib_sa_unregister_client(&sa_client); | 2232 | ib_sa_unregister_client(&sa_client); |
2229 | destroy_workqueue(cma_wq); | 2233 | destroy_workqueue(cma_wq); |
2230 | idr_destroy(&sdp_ps); | 2234 | idr_destroy(&sdp_ps); |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index c3fb304a4e86..1039ad57d53b 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -80,7 +80,7 @@ struct iwcm_work { | |||
80 | * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If | 80 | * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If |
81 | * the backlog is exceeded, then no more connection request events will | 81 | * the backlog is exceeded, then no more connection request events will |
82 | * be processed. cm_event_handler() returns -ENOMEM in this case. Its up | 82 | * be processed. cm_event_handler() returns -ENOMEM in this case. Its up |
83 | * to the provider to reject the connectino request. | 83 | * to the provider to reject the connection request. |
84 | * 2) in the connection request workqueue handler, cm_conn_req_handler(). | 84 | * 2) in the connection request workqueue handler, cm_conn_req_handler(). |
85 | * If work elements cannot be allocated for the new connect request cm_id, | 85 | * If work elements cannot be allocated for the new connect request cm_id, |
86 | * then IWCM will call the provider reject method. This is ok since | 86 | * then IWCM will call the provider reject method. This is ok since |
@@ -131,26 +131,25 @@ static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) | |||
131 | } | 131 | } |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * Save private data from incoming connection requests in the | 134 | * Save private data from incoming connection requests to |
135 | * cm_id_priv so the low level driver doesn't have to. Adjust | 135 | * iw_cm_event, so the low level driver doesn't have to. Adjust |
136 | * the event ptr to point to the local copy. | 136 | * the event ptr to point to the local copy. |
137 | */ | 137 | */ |
138 | static int copy_private_data(struct iwcm_id_private *cm_id_priv, | 138 | static int copy_private_data(struct iw_cm_event *event) |
139 | struct iw_cm_event *event) | ||
140 | { | 139 | { |
141 | void *p; | 140 | void *p; |
142 | 141 | ||
143 | p = kmalloc(event->private_data_len, GFP_ATOMIC); | 142 | p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC); |
144 | if (!p) | 143 | if (!p) |
145 | return -ENOMEM; | 144 | return -ENOMEM; |
146 | memcpy(p, event->private_data, event->private_data_len); | ||
147 | event->private_data = p; | 145 | event->private_data = p; |
148 | return 0; | 146 | return 0; |
149 | } | 147 | } |
150 | 148 | ||
151 | /* | 149 | /* |
152 | * Release a reference on cm_id. If the last reference is being removed | 150 | * Release a reference on cm_id. If the last reference is being |
153 | * and iw_destroy_cm_id is waiting, wake up the waiting thread. | 151 | * released, enable the waiting thread (in iw_destroy_cm_id) to |
152 | * get woken up, and return 1 if a thread is already waiting. | ||
154 | */ | 153 | */ |
155 | static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) | 154 | static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) |
156 | { | 155 | { |
@@ -243,7 +242,7 @@ static int iwcm_modify_qp_sqd(struct ib_qp *qp) | |||
243 | /* | 242 | /* |
244 | * CM_ID <-- CLOSING | 243 | * CM_ID <-- CLOSING |
245 | * | 244 | * |
246 | * Block if a passive or active connection is currenlty being processed. Then | 245 | * Block if a passive or active connection is currently being processed. Then |
247 | * process the event as follows: | 246 | * process the event as follows: |
248 | * - If we are ESTABLISHED, move to CLOSING and modify the QP state | 247 | * - If we are ESTABLISHED, move to CLOSING and modify the QP state |
249 | * based on the abrupt flag | 248 | * based on the abrupt flag |
@@ -408,7 +407,7 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) | |||
408 | { | 407 | { |
409 | struct iwcm_id_private *cm_id_priv; | 408 | struct iwcm_id_private *cm_id_priv; |
410 | unsigned long flags; | 409 | unsigned long flags; |
411 | int ret = 0; | 410 | int ret; |
412 | 411 | ||
413 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | 412 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
414 | 413 | ||
@@ -535,7 +534,7 @@ EXPORT_SYMBOL(iw_cm_accept); | |||
535 | int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | 534 | int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) |
536 | { | 535 | { |
537 | struct iwcm_id_private *cm_id_priv; | 536 | struct iwcm_id_private *cm_id_priv; |
538 | int ret = 0; | 537 | int ret; |
539 | unsigned long flags; | 538 | unsigned long flags; |
540 | struct ib_qp *qp; | 539 | struct ib_qp *qp; |
541 | 540 | ||
@@ -620,7 +619,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
620 | spin_lock_irqsave(&listen_id_priv->lock, flags); | 619 | spin_lock_irqsave(&listen_id_priv->lock, flags); |
621 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { | 620 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { |
622 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | 621 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); |
623 | return; | 622 | goto out; |
624 | } | 623 | } |
625 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | 624 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); |
626 | 625 | ||
@@ -629,7 +628,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
629 | listen_id_priv->id.context); | 628 | listen_id_priv->id.context); |
630 | /* If the cm_id could not be created, ignore the request */ | 629 | /* If the cm_id could not be created, ignore the request */ |
631 | if (IS_ERR(cm_id)) | 630 | if (IS_ERR(cm_id)) |
632 | return; | 631 | goto out; |
633 | 632 | ||
634 | cm_id->provider_data = iw_event->provider_data; | 633 | cm_id->provider_data = iw_event->provider_data; |
635 | cm_id->local_addr = iw_event->local_addr; | 634 | cm_id->local_addr = iw_event->local_addr; |
@@ -642,7 +641,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
642 | if (ret) { | 641 | if (ret) { |
643 | iw_cm_reject(cm_id, NULL, 0); | 642 | iw_cm_reject(cm_id, NULL, 0); |
644 | iw_destroy_cm_id(cm_id); | 643 | iw_destroy_cm_id(cm_id); |
645 | return; | 644 | goto out; |
646 | } | 645 | } |
647 | 646 | ||
648 | /* Call the client CM handler */ | 647 | /* Call the client CM handler */ |
@@ -654,6 +653,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
654 | kfree(cm_id); | 653 | kfree(cm_id); |
655 | } | 654 | } |
656 | 655 | ||
656 | out: | ||
657 | if (iw_event->private_data_len) | 657 | if (iw_event->private_data_len) |
658 | kfree(iw_event->private_data); | 658 | kfree(iw_event->private_data); |
659 | } | 659 | } |
@@ -674,7 +674,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, | |||
674 | struct iw_cm_event *iw_event) | 674 | struct iw_cm_event *iw_event) |
675 | { | 675 | { |
676 | unsigned long flags; | 676 | unsigned long flags; |
677 | int ret = 0; | 677 | int ret; |
678 | 678 | ||
679 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 679 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
680 | 680 | ||
@@ -704,7 +704,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, | |||
704 | struct iw_cm_event *iw_event) | 704 | struct iw_cm_event *iw_event) |
705 | { | 705 | { |
706 | unsigned long flags; | 706 | unsigned long flags; |
707 | int ret = 0; | 707 | int ret; |
708 | 708 | ||
709 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 709 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
710 | /* | 710 | /* |
@@ -828,9 +828,10 @@ static int process_event(struct iwcm_id_private *cm_id_priv, | |||
828 | * thread asleep on the destroy_comp list vs. an object destroyed | 828 | * thread asleep on the destroy_comp list vs. an object destroyed |
829 | * here synchronously when the last reference is removed. | 829 | * here synchronously when the last reference is removed. |
830 | */ | 830 | */ |
831 | static void cm_work_handler(void *arg) | 831 | static void cm_work_handler(struct work_struct *_work) |
832 | { | 832 | { |
833 | struct iwcm_work *work = arg, lwork; | 833 | struct iwcm_work *work = container_of(_work, struct iwcm_work, work); |
834 | struct iw_cm_event levent; | ||
834 | struct iwcm_id_private *cm_id_priv = work->cm_id; | 835 | struct iwcm_id_private *cm_id_priv = work->cm_id; |
835 | unsigned long flags; | 836 | unsigned long flags; |
836 | int empty; | 837 | int empty; |
@@ -843,11 +844,11 @@ static void cm_work_handler(void *arg) | |||
843 | struct iwcm_work, list); | 844 | struct iwcm_work, list); |
844 | list_del_init(&work->list); | 845 | list_del_init(&work->list); |
845 | empty = list_empty(&cm_id_priv->work_list); | 846 | empty = list_empty(&cm_id_priv->work_list); |
846 | lwork = *work; | 847 | levent = work->event; |
847 | put_work(work); | 848 | put_work(work); |
848 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 849 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
849 | 850 | ||
850 | ret = process_event(cm_id_priv, &work->event); | 851 | ret = process_event(cm_id_priv, &levent); |
851 | if (ret) { | 852 | if (ret) { |
852 | set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | 853 | set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); |
853 | destroy_cm_id(&cm_id_priv->id); | 854 | destroy_cm_id(&cm_id_priv->id); |
@@ -899,14 +900,14 @@ static int cm_event_handler(struct iw_cm_id *cm_id, | |||
899 | goto out; | 900 | goto out; |
900 | } | 901 | } |
901 | 902 | ||
902 | INIT_WORK(&work->work, cm_work_handler, work); | 903 | INIT_WORK(&work->work, cm_work_handler); |
903 | work->cm_id = cm_id_priv; | 904 | work->cm_id = cm_id_priv; |
904 | work->event = *iw_event; | 905 | work->event = *iw_event; |
905 | 906 | ||
906 | if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || | 907 | if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || |
907 | work->event.event == IW_CM_EVENT_CONNECT_REPLY) && | 908 | work->event.event == IW_CM_EVENT_CONNECT_REPLY) && |
908 | work->event.private_data_len) { | 909 | work->event.private_data_len) { |
909 | ret = copy_private_data(cm_id_priv, &work->event); | 910 | ret = copy_private_data(&work->event); |
910 | if (ret) { | 911 | if (ret) { |
911 | put_work(work); | 912 | put_work(work); |
912 | goto out; | 913 | goto out; |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 493f4c65c7a2..15f38d94b3a8 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -46,7 +46,7 @@ MODULE_DESCRIPTION("kernel IB MAD API"); | |||
46 | MODULE_AUTHOR("Hal Rosenstock"); | 46 | MODULE_AUTHOR("Hal Rosenstock"); |
47 | MODULE_AUTHOR("Sean Hefty"); | 47 | MODULE_AUTHOR("Sean Hefty"); |
48 | 48 | ||
49 | static kmem_cache_t *ib_mad_cache; | 49 | static struct kmem_cache *ib_mad_cache; |
50 | 50 | ||
51 | static struct list_head ib_mad_port_list; | 51 | static struct list_head ib_mad_port_list; |
52 | static u32 ib_mad_client_id = 0; | 52 | static u32 ib_mad_client_id = 0; |
@@ -65,8 +65,8 @@ static struct ib_mad_agent_private *find_mad_agent( | |||
65 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | 65 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, |
66 | struct ib_mad_private *mad); | 66 | struct ib_mad_private *mad); |
67 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); | 67 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); |
68 | static void timeout_sends(void *data); | 68 | static void timeout_sends(struct work_struct *work); |
69 | static void local_completions(void *data); | 69 | static void local_completions(struct work_struct *work); |
70 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | 70 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, |
71 | struct ib_mad_agent_private *agent_priv, | 71 | struct ib_mad_agent_private *agent_priv, |
72 | u8 mgmt_class); | 72 | u8 mgmt_class); |
@@ -356,10 +356,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
356 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); | 356 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); |
357 | INIT_LIST_HEAD(&mad_agent_priv->done_list); | 357 | INIT_LIST_HEAD(&mad_agent_priv->done_list); |
358 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); | 358 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); |
359 | INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); | 359 | INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); |
360 | INIT_LIST_HEAD(&mad_agent_priv->local_list); | 360 | INIT_LIST_HEAD(&mad_agent_priv->local_list); |
361 | INIT_WORK(&mad_agent_priv->local_work, local_completions, | 361 | INIT_WORK(&mad_agent_priv->local_work, local_completions); |
362 | mad_agent_priv); | ||
363 | atomic_set(&mad_agent_priv->refcount, 1); | 362 | atomic_set(&mad_agent_priv->refcount, 1); |
364 | init_completion(&mad_agent_priv->comp); | 363 | init_completion(&mad_agent_priv->comp); |
365 | 364 | ||
@@ -1750,7 +1749,7 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, | |||
1750 | */ | 1749 | */ |
1751 | (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || | 1750 | (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || |
1752 | rcv_has_same_gid(mad_agent_priv, wr, wc))) | 1751 | rcv_has_same_gid(mad_agent_priv, wr, wc))) |
1753 | return wr; | 1752 | return (wr->status == IB_WC_SUCCESS) ? wr : NULL; |
1754 | } | 1753 | } |
1755 | 1754 | ||
1756 | /* | 1755 | /* |
@@ -2198,12 +2197,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, | |||
2198 | /* | 2197 | /* |
2199 | * IB MAD completion callback | 2198 | * IB MAD completion callback |
2200 | */ | 2199 | */ |
2201 | static void ib_mad_completion_handler(void *data) | 2200 | static void ib_mad_completion_handler(struct work_struct *work) |
2202 | { | 2201 | { |
2203 | struct ib_mad_port_private *port_priv; | 2202 | struct ib_mad_port_private *port_priv; |
2204 | struct ib_wc wc; | 2203 | struct ib_wc wc; |
2205 | 2204 | ||
2206 | port_priv = (struct ib_mad_port_private *)data; | 2205 | port_priv = container_of(work, struct ib_mad_port_private, work); |
2207 | ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | 2206 | ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); |
2208 | 2207 | ||
2209 | while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { | 2208 | while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { |
@@ -2324,7 +2323,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent, | |||
2324 | } | 2323 | } |
2325 | EXPORT_SYMBOL(ib_cancel_mad); | 2324 | EXPORT_SYMBOL(ib_cancel_mad); |
2326 | 2325 | ||
2327 | static void local_completions(void *data) | 2326 | static void local_completions(struct work_struct *work) |
2328 | { | 2327 | { |
2329 | struct ib_mad_agent_private *mad_agent_priv; | 2328 | struct ib_mad_agent_private *mad_agent_priv; |
2330 | struct ib_mad_local_private *local; | 2329 | struct ib_mad_local_private *local; |
@@ -2334,7 +2333,8 @@ static void local_completions(void *data) | |||
2334 | struct ib_wc wc; | 2333 | struct ib_wc wc; |
2335 | struct ib_mad_send_wc mad_send_wc; | 2334 | struct ib_mad_send_wc mad_send_wc; |
2336 | 2335 | ||
2337 | mad_agent_priv = (struct ib_mad_agent_private *)data; | 2336 | mad_agent_priv = |
2337 | container_of(work, struct ib_mad_agent_private, local_work); | ||
2338 | 2338 | ||
2339 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2339 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
2340 | while (!list_empty(&mad_agent_priv->local_list)) { | 2340 | while (!list_empty(&mad_agent_priv->local_list)) { |
@@ -2434,14 +2434,15 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) | |||
2434 | return ret; | 2434 | return ret; |
2435 | } | 2435 | } |
2436 | 2436 | ||
2437 | static void timeout_sends(void *data) | 2437 | static void timeout_sends(struct work_struct *work) |
2438 | { | 2438 | { |
2439 | struct ib_mad_agent_private *mad_agent_priv; | 2439 | struct ib_mad_agent_private *mad_agent_priv; |
2440 | struct ib_mad_send_wr_private *mad_send_wr; | 2440 | struct ib_mad_send_wr_private *mad_send_wr; |
2441 | struct ib_mad_send_wc mad_send_wc; | 2441 | struct ib_mad_send_wc mad_send_wc; |
2442 | unsigned long flags, delay; | 2442 | unsigned long flags, delay; |
2443 | 2443 | ||
2444 | mad_agent_priv = (struct ib_mad_agent_private *)data; | 2444 | mad_agent_priv = container_of(work, struct ib_mad_agent_private, |
2445 | timed_work.work); | ||
2445 | mad_send_wc.vendor_err = 0; | 2446 | mad_send_wc.vendor_err = 0; |
2446 | 2447 | ||
2447 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2448 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
@@ -2799,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device, | |||
2799 | ret = -ENOMEM; | 2800 | ret = -ENOMEM; |
2800 | goto error8; | 2801 | goto error8; |
2801 | } | 2802 | } |
2802 | INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv); | 2803 | INIT_WORK(&port_priv->work, ib_mad_completion_handler); |
2803 | 2804 | ||
2804 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); | 2805 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
2805 | list_add_tail(&port_priv->port_list, &ib_mad_port_list); | 2806 | list_add_tail(&port_priv->port_list, &ib_mad_port_list); |
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index d06b59083f6e..d5548e73e068 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h | |||
@@ -102,7 +102,7 @@ struct ib_mad_agent_private { | |||
102 | struct list_head send_list; | 102 | struct list_head send_list; |
103 | struct list_head wait_list; | 103 | struct list_head wait_list; |
104 | struct list_head done_list; | 104 | struct list_head done_list; |
105 | struct work_struct timed_work; | 105 | struct delayed_work timed_work; |
106 | unsigned long timeout; | 106 | unsigned long timeout; |
107 | struct list_head local_list; | 107 | struct list_head local_list; |
108 | struct work_struct local_work; | 108 | struct work_struct local_work; |
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index 1ef79d015a1e..3663fd7022be 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -45,8 +45,8 @@ enum rmpp_state { | |||
45 | struct mad_rmpp_recv { | 45 | struct mad_rmpp_recv { |
46 | struct ib_mad_agent_private *agent; | 46 | struct ib_mad_agent_private *agent; |
47 | struct list_head list; | 47 | struct list_head list; |
48 | struct work_struct timeout_work; | 48 | struct delayed_work timeout_work; |
49 | struct work_struct cleanup_work; | 49 | struct delayed_work cleanup_work; |
50 | struct completion comp; | 50 | struct completion comp; |
51 | enum rmpp_state state; | 51 | enum rmpp_state state; |
52 | spinlock_t lock; | 52 | spinlock_t lock; |
@@ -233,9 +233,10 @@ static void nack_recv(struct ib_mad_agent_private *agent, | |||
233 | } | 233 | } |
234 | } | 234 | } |
235 | 235 | ||
236 | static void recv_timeout_handler(void *data) | 236 | static void recv_timeout_handler(struct work_struct *work) |
237 | { | 237 | { |
238 | struct mad_rmpp_recv *rmpp_recv = data; | 238 | struct mad_rmpp_recv *rmpp_recv = |
239 | container_of(work, struct mad_rmpp_recv, timeout_work.work); | ||
239 | struct ib_mad_recv_wc *rmpp_wc; | 240 | struct ib_mad_recv_wc *rmpp_wc; |
240 | unsigned long flags; | 241 | unsigned long flags; |
241 | 242 | ||
@@ -254,9 +255,10 @@ static void recv_timeout_handler(void *data) | |||
254 | ib_free_recv_mad(rmpp_wc); | 255 | ib_free_recv_mad(rmpp_wc); |
255 | } | 256 | } |
256 | 257 | ||
257 | static void recv_cleanup_handler(void *data) | 258 | static void recv_cleanup_handler(struct work_struct *work) |
258 | { | 259 | { |
259 | struct mad_rmpp_recv *rmpp_recv = data; | 260 | struct mad_rmpp_recv *rmpp_recv = |
261 | container_of(work, struct mad_rmpp_recv, cleanup_work.work); | ||
260 | unsigned long flags; | 262 | unsigned long flags; |
261 | 263 | ||
262 | spin_lock_irqsave(&rmpp_recv->agent->lock, flags); | 264 | spin_lock_irqsave(&rmpp_recv->agent->lock, flags); |
@@ -285,8 +287,8 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, | |||
285 | 287 | ||
286 | rmpp_recv->agent = agent; | 288 | rmpp_recv->agent = agent; |
287 | init_completion(&rmpp_recv->comp); | 289 | init_completion(&rmpp_recv->comp); |
288 | INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); | 290 | INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler); |
289 | INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); | 291 | INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler); |
290 | spin_lock_init(&rmpp_recv->lock); | 292 | spin_lock_init(&rmpp_recv->lock); |
291 | rmpp_recv->state = RMPP_STATE_ACTIVE; | 293 | rmpp_recv->state = RMPP_STATE_ACTIVE; |
292 | atomic_set(&rmpp_recv->refcount, 1); | 294 | atomic_set(&rmpp_recv->refcount, 1); |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 1706d3c7e95e..e45afba75341 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -360,9 +360,10 @@ static void free_sm_ah(struct kref *kref) | |||
360 | kfree(sm_ah); | 360 | kfree(sm_ah); |
361 | } | 361 | } |
362 | 362 | ||
363 | static void update_sm_ah(void *port_ptr) | 363 | static void update_sm_ah(struct work_struct *work) |
364 | { | 364 | { |
365 | struct ib_sa_port *port = port_ptr; | 365 | struct ib_sa_port *port = |
366 | container_of(work, struct ib_sa_port, update_task); | ||
366 | struct ib_sa_sm_ah *new_ah, *old_ah; | 367 | struct ib_sa_sm_ah *new_ah, *old_ah; |
367 | struct ib_port_attr port_attr; | 368 | struct ib_port_attr port_attr; |
368 | struct ib_ah_attr ah_attr; | 369 | struct ib_ah_attr ah_attr; |
@@ -992,8 +993,7 @@ static void ib_sa_add_one(struct ib_device *device) | |||
992 | if (IS_ERR(sa_dev->port[i].agent)) | 993 | if (IS_ERR(sa_dev->port[i].agent)) |
993 | goto err; | 994 | goto err; |
994 | 995 | ||
995 | INIT_WORK(&sa_dev->port[i].update_task, | 996 | INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); |
996 | update_sm_ah, &sa_dev->port[i]); | ||
997 | } | 997 | } |
998 | 998 | ||
999 | ib_set_client_data(device, &sa_client, sa_dev); | 999 | ib_set_client_data(device, &sa_client, sa_dev); |
@@ -1010,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device) | |||
1010 | goto err; | 1010 | goto err; |
1011 | 1011 | ||
1012 | for (i = 0; i <= e - s; ++i) | 1012 | for (i = 0; i <= e - s; ++i) |
1013 | update_sm_ah(&sa_dev->port[i]); | 1013 | update_sm_ah(&sa_dev->port[i].update_task); |
1014 | 1014 | ||
1015 | return; | 1015 | return; |
1016 | 1016 | ||
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index ad4f4d5c2924..f15220a0ee75 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -161,12 +161,14 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx) | |||
161 | struct ib_ucm_event, ctx_list); | 161 | struct ib_ucm_event, ctx_list); |
162 | list_del(&uevent->file_list); | 162 | list_del(&uevent->file_list); |
163 | list_del(&uevent->ctx_list); | 163 | list_del(&uevent->ctx_list); |
164 | mutex_unlock(&ctx->file->file_mutex); | ||
164 | 165 | ||
165 | /* clear incoming connections. */ | 166 | /* clear incoming connections. */ |
166 | if (ib_ucm_new_cm_id(uevent->resp.event)) | 167 | if (ib_ucm_new_cm_id(uevent->resp.event)) |
167 | ib_destroy_cm_id(uevent->cm_id); | 168 | ib_destroy_cm_id(uevent->cm_id); |
168 | 169 | ||
169 | kfree(uevent); | 170 | kfree(uevent); |
171 | mutex_lock(&ctx->file->file_mutex); | ||
170 | } | 172 | } |
171 | mutex_unlock(&ctx->file->file_mutex); | 173 | mutex_unlock(&ctx->file->file_mutex); |
172 | } | 174 | } |
@@ -328,20 +330,18 @@ static int ib_ucm_event_process(struct ib_cm_event *evt, | |||
328 | } | 330 | } |
329 | 331 | ||
330 | if (uvt->data_len) { | 332 | if (uvt->data_len) { |
331 | uvt->data = kmalloc(uvt->data_len, GFP_KERNEL); | 333 | uvt->data = kmemdup(evt->private_data, uvt->data_len, GFP_KERNEL); |
332 | if (!uvt->data) | 334 | if (!uvt->data) |
333 | goto err1; | 335 | goto err1; |
334 | 336 | ||
335 | memcpy(uvt->data, evt->private_data, uvt->data_len); | ||
336 | uvt->resp.present |= IB_UCM_PRES_DATA; | 337 | uvt->resp.present |= IB_UCM_PRES_DATA; |
337 | } | 338 | } |
338 | 339 | ||
339 | if (uvt->info_len) { | 340 | if (uvt->info_len) { |
340 | uvt->info = kmalloc(uvt->info_len, GFP_KERNEL); | 341 | uvt->info = kmemdup(info, uvt->info_len, GFP_KERNEL); |
341 | if (!uvt->info) | 342 | if (!uvt->info) |
342 | goto err2; | 343 | goto err2; |
343 | 344 | ||
344 | memcpy(uvt->info, info, uvt->info_len); | ||
345 | uvt->resp.present |= IB_UCM_PRES_INFO; | 345 | uvt->resp.present |= IB_UCM_PRES_INFO; |
346 | } | 346 | } |
347 | return 0; | 347 | return 0; |
@@ -685,11 +685,11 @@ out: | |||
685 | return result; | 685 | return result; |
686 | } | 686 | } |
687 | 687 | ||
688 | static ssize_t ib_ucm_establish(struct ib_ucm_file *file, | 688 | static ssize_t ib_ucm_notify(struct ib_ucm_file *file, |
689 | const char __user *inbuf, | 689 | const char __user *inbuf, |
690 | int in_len, int out_len) | 690 | int in_len, int out_len) |
691 | { | 691 | { |
692 | struct ib_ucm_establish cmd; | 692 | struct ib_ucm_notify cmd; |
693 | struct ib_ucm_context *ctx; | 693 | struct ib_ucm_context *ctx; |
694 | int result; | 694 | int result; |
695 | 695 | ||
@@ -700,7 +700,7 @@ static ssize_t ib_ucm_establish(struct ib_ucm_file *file, | |||
700 | if (IS_ERR(ctx)) | 700 | if (IS_ERR(ctx)) |
701 | return PTR_ERR(ctx); | 701 | return PTR_ERR(ctx); |
702 | 702 | ||
703 | result = ib_cm_establish(ctx->cm_id); | 703 | result = ib_cm_notify(ctx->cm_id, (enum ib_event_type) cmd.event); |
704 | ib_ucm_ctx_put(ctx); | 704 | ib_ucm_ctx_put(ctx); |
705 | return result; | 705 | return result; |
706 | } | 706 | } |
@@ -1107,7 +1107,7 @@ static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file, | |||
1107 | [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id, | 1107 | [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id, |
1108 | [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id, | 1108 | [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id, |
1109 | [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen, | 1109 | [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen, |
1110 | [IB_USER_CM_CMD_ESTABLISH] = ib_ucm_establish, | 1110 | [IB_USER_CM_CMD_NOTIFY] = ib_ucm_notify, |
1111 | [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req, | 1111 | [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req, |
1112 | [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep, | 1112 | [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep, |
1113 | [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu, | 1113 | [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu, |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index b72c7f69ca90..743247ec065e 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -1214,7 +1214,7 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, | |||
1214 | resp.qp_access_flags = attr->qp_access_flags; | 1214 | resp.qp_access_flags = attr->qp_access_flags; |
1215 | resp.pkey_index = attr->pkey_index; | 1215 | resp.pkey_index = attr->pkey_index; |
1216 | resp.alt_pkey_index = attr->alt_pkey_index; | 1216 | resp.alt_pkey_index = attr->alt_pkey_index; |
1217 | resp.en_sqd_async_notify = attr->en_sqd_async_notify; | 1217 | resp.sq_draining = attr->sq_draining; |
1218 | resp.max_rd_atomic = attr->max_rd_atomic; | 1218 | resp.max_rd_atomic = attr->max_rd_atomic; |
1219 | resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; | 1219 | resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; |
1220 | resp.min_rnr_timer = attr->min_rnr_timer; | 1220 | resp.min_rnr_timer = attr->min_rnr_timer; |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 4e16314e8e6d..a617ca7b6923 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -534,9 +534,9 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, | |||
534 | * module reference. | 534 | * module reference. |
535 | */ | 535 | */ |
536 | filp->f_op = fops_get(&uverbs_event_fops); | 536 | filp->f_op = fops_get(&uverbs_event_fops); |
537 | filp->f_vfsmnt = mntget(uverbs_event_mnt); | 537 | filp->f_path.mnt = mntget(uverbs_event_mnt); |
538 | filp->f_dentry = dget(uverbs_event_mnt->mnt_root); | 538 | filp->f_path.dentry = dget(uverbs_event_mnt->mnt_root); |
539 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; | 539 | filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; |
540 | filp->f_flags = O_RDONLY; | 540 | filp->f_flags = O_RDONLY; |
541 | filp->f_mode = FMODE_READ; | 541 | filp->f_mode = FMODE_READ; |
542 | filp->private_data = ev_file; | 542 | filp->private_data = ev_file; |
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c index efe147dbeb42..db12cc0841df 100644 --- a/drivers/infiniband/core/uverbs_mem.c +++ b/drivers/infiniband/core/uverbs_mem.c | |||
@@ -179,9 +179,10 @@ void ib_umem_release(struct ib_device *dev, struct ib_umem *umem) | |||
179 | up_write(¤t->mm->mmap_sem); | 179 | up_write(¤t->mm->mmap_sem); |
180 | } | 180 | } |
181 | 181 | ||
182 | static void ib_umem_account(void *work_ptr) | 182 | static void ib_umem_account(struct work_struct *_work) |
183 | { | 183 | { |
184 | struct ib_umem_account_work *work = work_ptr; | 184 | struct ib_umem_account_work *work = |
185 | container_of(_work, struct ib_umem_account_work, work); | ||
185 | 186 | ||
186 | down_write(&work->mm->mmap_sem); | 187 | down_write(&work->mm->mmap_sem); |
187 | work->mm->locked_vm -= work->diff; | 188 | work->mm->locked_vm -= work->diff; |
@@ -216,7 +217,7 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem) | |||
216 | return; | 217 | return; |
217 | } | 218 | } |
218 | 219 | ||
219 | INIT_WORK(&work->work, ib_umem_account, work); | 220 | INIT_WORK(&work->work, ib_umem_account); |
220 | work->mm = mm; | 221 | work->mm = mm; |
221 | work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; | 222 | work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; |
222 | 223 | ||
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index dc1ebeac35c7..27fe242ed435 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c | |||
@@ -1155,7 +1155,8 @@ static int __devinit c2_probe(struct pci_dev *pcidev, | |||
1155 | goto bail10; | 1155 | goto bail10; |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | c2_register_device(c2dev); | 1158 | if (c2_register_device(c2dev)) |
1159 | goto bail10; | ||
1159 | 1160 | ||
1160 | return 0; | 1161 | return 0; |
1161 | 1162 | ||
@@ -1243,7 +1244,7 @@ static struct pci_driver c2_pci_driver = { | |||
1243 | 1244 | ||
1244 | static int __init c2_init_module(void) | 1245 | static int __init c2_init_module(void) |
1245 | { | 1246 | { |
1246 | return pci_module_init(&c2_pci_driver); | 1247 | return pci_register_driver(&c2_pci_driver); |
1247 | } | 1248 | } |
1248 | 1249 | ||
1249 | static void __exit c2_exit_module(void) | 1250 | static void __exit c2_exit_module(void) |
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h index 1b17dcdd0505..04a9db5de881 100644 --- a/drivers/infiniband/hw/amso1100/c2.h +++ b/drivers/infiniband/hw/amso1100/c2.h | |||
@@ -302,7 +302,7 @@ struct c2_dev { | |||
302 | unsigned long pa; /* PA device memory */ | 302 | unsigned long pa; /* PA device memory */ |
303 | void **qptr_array; | 303 | void **qptr_array; |
304 | 304 | ||
305 | kmem_cache_t *host_msg_cache; | 305 | struct kmem_cache *host_msg_cache; |
306 | 306 | ||
307 | struct list_head cca_link; /* adapter list */ | 307 | struct list_head cca_link; /* adapter list */ |
308 | struct list_head eh_wakeup_list; /* event wakeup list */ | 308 | struct list_head eh_wakeup_list; /* event wakeup list */ |
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c index 028a60bbfca9..0315f99e4191 100644 --- a/drivers/infiniband/hw/amso1100/c2_alloc.c +++ b/drivers/infiniband/hw/amso1100/c2_alloc.c | |||
@@ -42,13 +42,14 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, | |||
42 | { | 42 | { |
43 | int i; | 43 | int i; |
44 | struct sp_chunk *new_head; | 44 | struct sp_chunk *new_head; |
45 | dma_addr_t dma_addr; | ||
45 | 46 | ||
46 | new_head = (struct sp_chunk *) __get_free_page(gfp_mask); | 47 | new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE, |
48 | &dma_addr, gfp_mask); | ||
47 | if (new_head == NULL) | 49 | if (new_head == NULL) |
48 | return -ENOMEM; | 50 | return -ENOMEM; |
49 | 51 | ||
50 | new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head, | 52 | new_head->dma_addr = dma_addr; |
51 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
52 | pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); | 53 | pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); |
53 | 54 | ||
54 | new_head->next = NULL; | 55 | new_head->next = NULL; |
@@ -80,10 +81,8 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root) | |||
80 | 81 | ||
81 | while (root) { | 82 | while (root) { |
82 | next = root->next; | 83 | next = root->next; |
83 | dma_unmap_single(c2dev->ibdev.dma_device, | 84 | dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root, |
84 | pci_unmap_addr(root, mapping), PAGE_SIZE, | 85 | pci_unmap_addr(root, mapping)); |
85 | DMA_FROM_DEVICE); | ||
86 | __free_page((struct page *) root); | ||
87 | root = next; | 86 | root = next; |
88 | } | 87 | } |
89 | } | 88 | } |
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c index 9d7bcc5ade93..05c9154d46f4 100644 --- a/drivers/infiniband/hw/amso1100/c2_cq.c +++ b/drivers/infiniband/hw/amso1100/c2_cq.c | |||
@@ -246,20 +246,17 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | |||
246 | 246 | ||
247 | static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) | 247 | static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) |
248 | { | 248 | { |
249 | 249 | dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size, | |
250 | dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping), | 250 | mq->msg_pool.host, pci_unmap_addr(mq, mapping)); |
251 | mq->q_size * mq->msg_size, DMA_FROM_DEVICE); | ||
252 | free_pages((unsigned long) mq->msg_pool.host, | ||
253 | get_order(mq->q_size * mq->msg_size)); | ||
254 | } | 251 | } |
255 | 252 | ||
256 | static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, | 253 | static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, |
257 | int msg_size) | 254 | int msg_size) |
258 | { | 255 | { |
259 | unsigned long pool_start; | 256 | u8 *pool_start; |
260 | 257 | ||
261 | pool_start = __get_free_pages(GFP_KERNEL, | 258 | pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size, |
262 | get_order(q_size * msg_size)); | 259 | &mq->host_dma, GFP_KERNEL); |
263 | if (!pool_start) | 260 | if (!pool_start) |
264 | return -ENOMEM; | 261 | return -ENOMEM; |
265 | 262 | ||
@@ -267,13 +264,10 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, | |||
267 | 0, /* index (currently unknown) */ | 264 | 0, /* index (currently unknown) */ |
268 | q_size, | 265 | q_size, |
269 | msg_size, | 266 | msg_size, |
270 | (u8 *) pool_start, | 267 | pool_start, |
271 | NULL, /* peer (currently unknown) */ | 268 | NULL, /* peer (currently unknown) */ |
272 | C2_MQ_HOST_TARGET); | 269 | C2_MQ_HOST_TARGET); |
273 | 270 | ||
274 | mq->host_dma = dma_map_single(c2dev->ibdev.dma_device, | ||
275 | (void *)pool_start, | ||
276 | q_size * msg_size, DMA_FROM_DEVICE); | ||
277 | pci_unmap_addr_set(mq, mapping, mq->host_dma); | 271 | pci_unmap_addr_set(mq, mapping, mq->host_dma); |
278 | 272 | ||
279 | return 0; | 273 | return 0; |
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index da98d9f71429..fef972752912 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
@@ -757,20 +757,17 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev) | |||
757 | 757 | ||
758 | int c2_register_device(struct c2_dev *dev) | 758 | int c2_register_device(struct c2_dev *dev) |
759 | { | 759 | { |
760 | int ret; | 760 | int ret = -ENOMEM; |
761 | int i; | 761 | int i; |
762 | 762 | ||
763 | /* Register pseudo network device */ | 763 | /* Register pseudo network device */ |
764 | dev->pseudo_netdev = c2_pseudo_netdev_init(dev); | 764 | dev->pseudo_netdev = c2_pseudo_netdev_init(dev); |
765 | if (dev->pseudo_netdev) { | 765 | if (!dev->pseudo_netdev) |
766 | ret = register_netdev(dev->pseudo_netdev); | 766 | goto out3; |
767 | if (ret) { | 767 | |
768 | printk(KERN_ERR PFX | 768 | ret = register_netdev(dev->pseudo_netdev); |
769 | "Unable to register netdev, ret = %d\n", ret); | 769 | if (ret) |
770 | free_netdev(dev->pseudo_netdev); | 770 | goto out2; |
771 | return ret; | ||
772 | } | ||
773 | } | ||
774 | 771 | ||
775 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | 772 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); |
776 | strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); | 773 | strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); |
@@ -848,21 +845,25 @@ int c2_register_device(struct c2_dev *dev) | |||
848 | 845 | ||
849 | ret = ib_register_device(&dev->ibdev); | 846 | ret = ib_register_device(&dev->ibdev); |
850 | if (ret) | 847 | if (ret) |
851 | return ret; | 848 | goto out1; |
852 | 849 | ||
853 | for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) { | 850 | for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) { |
854 | ret = class_device_create_file(&dev->ibdev.class_dev, | 851 | ret = class_device_create_file(&dev->ibdev.class_dev, |
855 | c2_class_attributes[i]); | 852 | c2_class_attributes[i]); |
856 | if (ret) { | 853 | if (ret) |
857 | unregister_netdev(dev->pseudo_netdev); | 854 | goto out0; |
858 | free_netdev(dev->pseudo_netdev); | ||
859 | ib_unregister_device(&dev->ibdev); | ||
860 | return ret; | ||
861 | } | ||
862 | } | 855 | } |
856 | goto out3; | ||
863 | 857 | ||
864 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | 858 | out0: |
865 | return 0; | 859 | ib_unregister_device(&dev->ibdev); |
860 | out1: | ||
861 | unregister_netdev(dev->pseudo_netdev); | ||
862 | out2: | ||
863 | free_netdev(dev->pseudo_netdev); | ||
864 | out3: | ||
865 | pr_debug("%s:%u ret=%d\n", __FUNCTION__, __LINE__, ret); | ||
866 | return ret; | ||
866 | } | 867 | } |
867 | 868 | ||
868 | void c2_unregister_device(struct c2_dev *dev) | 869 | void c2_unregister_device(struct c2_dev *dev) |
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c index 5bcf697aa335..179d005ed4a5 100644 --- a/drivers/infiniband/hw/amso1100/c2_qp.c +++ b/drivers/infiniband/hw/amso1100/c2_qp.c | |||
@@ -564,6 +564,32 @@ int c2_alloc_qp(struct c2_dev *c2dev, | |||
564 | return err; | 564 | return err; |
565 | } | 565 | } |
566 | 566 | ||
567 | static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) | ||
568 | { | ||
569 | if (send_cq == recv_cq) | ||
570 | spin_lock_irq(&send_cq->lock); | ||
571 | else if (send_cq > recv_cq) { | ||
572 | spin_lock_irq(&send_cq->lock); | ||
573 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); | ||
574 | } else { | ||
575 | spin_lock_irq(&recv_cq->lock); | ||
576 | spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); | ||
577 | } | ||
578 | } | ||
579 | |||
580 | static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) | ||
581 | { | ||
582 | if (send_cq == recv_cq) | ||
583 | spin_unlock_irq(&send_cq->lock); | ||
584 | else if (send_cq > recv_cq) { | ||
585 | spin_unlock(&recv_cq->lock); | ||
586 | spin_unlock_irq(&send_cq->lock); | ||
587 | } else { | ||
588 | spin_unlock(&send_cq->lock); | ||
589 | spin_unlock_irq(&recv_cq->lock); | ||
590 | } | ||
591 | } | ||
592 | |||
567 | void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) | 593 | void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) |
568 | { | 594 | { |
569 | struct c2_cq *send_cq; | 595 | struct c2_cq *send_cq; |
@@ -576,15 +602,9 @@ void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) | |||
576 | * Lock CQs here, so that CQ polling code can do QP lookup | 602 | * Lock CQs here, so that CQ polling code can do QP lookup |
577 | * without taking a lock. | 603 | * without taking a lock. |
578 | */ | 604 | */ |
579 | spin_lock_irq(&send_cq->lock); | 605 | c2_lock_cqs(send_cq, recv_cq); |
580 | if (send_cq != recv_cq) | ||
581 | spin_lock(&recv_cq->lock); | ||
582 | |||
583 | c2_free_qpn(c2dev, qp->qpn); | 606 | c2_free_qpn(c2dev, qp->qpn); |
584 | 607 | c2_unlock_cqs(send_cq, recv_cq); | |
585 | if (send_cq != recv_cq) | ||
586 | spin_unlock(&recv_cq->lock); | ||
587 | spin_unlock_irq(&send_cq->lock); | ||
588 | 608 | ||
589 | /* | 609 | /* |
590 | * Destory qp in the rnic... | 610 | * Destory qp in the rnic... |
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index e37c5688c214..1687c511cb2f 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c | |||
@@ -150,15 +150,15 @@ static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props) | |||
150 | (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg); | 150 | (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg); |
151 | if (!reply) | 151 | if (!reply) |
152 | err = -ENOMEM; | 152 | err = -ENOMEM; |
153 | 153 | else | |
154 | err = c2_errno(reply); | 154 | err = c2_errno(reply); |
155 | if (err) | 155 | if (err) |
156 | goto bail2; | 156 | goto bail2; |
157 | 157 | ||
158 | props->fw_ver = | 158 | props->fw_ver = |
159 | ((u64)be32_to_cpu(reply->fw_ver_major) << 32) | | 159 | ((u64)be32_to_cpu(reply->fw_ver_major) << 32) | |
160 | ((be32_to_cpu(reply->fw_ver_minor) && 0xFFFF) << 16) | | 160 | ((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) | |
161 | (be32_to_cpu(reply->fw_ver_patch) && 0xFFFF); | 161 | (be32_to_cpu(reply->fw_ver_patch) & 0xFFFF); |
162 | memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6); | 162 | memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6); |
163 | props->max_mr_size = 0xFFFFFFFF; | 163 | props->max_mr_size = 0xFFFFFFFF; |
164 | props->page_size_cap = ~(C2_MIN_PAGESIZE-1); | 164 | props->page_size_cap = ~(C2_MIN_PAGESIZE-1); |
@@ -441,7 +441,7 @@ static int c2_rnic_close(struct c2_dev *c2dev) | |||
441 | * involves initalizing the various limits and resouce pools that | 441 | * involves initalizing the various limits and resouce pools that |
442 | * comprise the RNIC instance. | 442 | * comprise the RNIC instance. |
443 | */ | 443 | */ |
444 | int c2_rnic_init(struct c2_dev *c2dev) | 444 | int __devinit c2_rnic_init(struct c2_dev *c2dev) |
445 | { | 445 | { |
446 | int err; | 446 | int err; |
447 | u32 qsize, msgsize; | 447 | u32 qsize, msgsize; |
@@ -517,14 +517,12 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
517 | /* Initialize the Verbs Reply Queue */ | 517 | /* Initialize the Verbs Reply Queue */ |
518 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); | 518 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); |
519 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); | 519 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); |
520 | q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL); | 520 | q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, |
521 | &c2dev->rep_vq.host_dma, GFP_KERNEL); | ||
521 | if (!q1_pages) { | 522 | if (!q1_pages) { |
522 | err = -ENOMEM; | 523 | err = -ENOMEM; |
523 | goto bail1; | 524 | goto bail1; |
524 | } | 525 | } |
525 | c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device, | ||
526 | (void *)q1_pages, qsize * msgsize, | ||
527 | DMA_FROM_DEVICE); | ||
528 | pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); | 526 | pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); |
529 | pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, | 527 | pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, |
530 | (unsigned long long) c2dev->rep_vq.host_dma); | 528 | (unsigned long long) c2dev->rep_vq.host_dma); |
@@ -540,17 +538,15 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
540 | /* Initialize the Asynchronus Event Queue */ | 538 | /* Initialize the Asynchronus Event Queue */ |
541 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); | 539 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); |
542 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); | 540 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); |
543 | q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL); | 541 | q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, |
542 | &c2dev->aeq.host_dma, GFP_KERNEL); | ||
544 | if (!q2_pages) { | 543 | if (!q2_pages) { |
545 | err = -ENOMEM; | 544 | err = -ENOMEM; |
546 | goto bail2; | 545 | goto bail2; |
547 | } | 546 | } |
548 | c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device, | ||
549 | (void *)q2_pages, qsize * msgsize, | ||
550 | DMA_FROM_DEVICE); | ||
551 | pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); | 547 | pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); |
552 | pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, | 548 | pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q2_pages, |
553 | (unsigned long long) c2dev->rep_vq.host_dma); | 549 | (unsigned long long) c2dev->aeq.host_dma); |
554 | c2_mq_rep_init(&c2dev->aeq, | 550 | c2_mq_rep_init(&c2dev->aeq, |
555 | 2, | 551 | 2, |
556 | qsize, | 552 | qsize, |
@@ -597,17 +593,13 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
597 | bail4: | 593 | bail4: |
598 | vq_term(c2dev); | 594 | vq_term(c2dev); |
599 | bail3: | 595 | bail3: |
600 | dma_unmap_single(c2dev->ibdev.dma_device, | 596 | dma_free_coherent(&c2dev->pcidev->dev, |
601 | pci_unmap_addr(&c2dev->aeq, mapping), | 597 | c2dev->aeq.q_size * c2dev->aeq.msg_size, |
602 | c2dev->aeq.q_size * c2dev->aeq.msg_size, | 598 | q2_pages, pci_unmap_addr(&c2dev->aeq, mapping)); |
603 | DMA_FROM_DEVICE); | ||
604 | kfree(q2_pages); | ||
605 | bail2: | 599 | bail2: |
606 | dma_unmap_single(c2dev->ibdev.dma_device, | 600 | dma_free_coherent(&c2dev->pcidev->dev, |
607 | pci_unmap_addr(&c2dev->rep_vq, mapping), | 601 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, |
608 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, | 602 | q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping)); |
609 | DMA_FROM_DEVICE); | ||
610 | kfree(q1_pages); | ||
611 | bail1: | 603 | bail1: |
612 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); | 604 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); |
613 | bail0: | 605 | bail0: |
@@ -619,7 +611,7 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
619 | /* | 611 | /* |
620 | * Called by c2_remove to cleanup the RNIC resources. | 612 | * Called by c2_remove to cleanup the RNIC resources. |
621 | */ | 613 | */ |
622 | void c2_rnic_term(struct c2_dev *c2dev) | 614 | void __devexit c2_rnic_term(struct c2_dev *c2dev) |
623 | { | 615 | { |
624 | 616 | ||
625 | /* Close the open adapter instance */ | 617 | /* Close the open adapter instance */ |
@@ -640,19 +632,17 @@ void c2_rnic_term(struct c2_dev *c2dev) | |||
640 | /* Free the verbs request allocator */ | 632 | /* Free the verbs request allocator */ |
641 | vq_term(c2dev); | 633 | vq_term(c2dev); |
642 | 634 | ||
643 | /* Unmap and free the asynchronus event queue */ | 635 | /* Free the asynchronus event queue */ |
644 | dma_unmap_single(c2dev->ibdev.dma_device, | 636 | dma_free_coherent(&c2dev->pcidev->dev, |
645 | pci_unmap_addr(&c2dev->aeq, mapping), | 637 | c2dev->aeq.q_size * c2dev->aeq.msg_size, |
646 | c2dev->aeq.q_size * c2dev->aeq.msg_size, | 638 | c2dev->aeq.msg_pool.host, |
647 | DMA_FROM_DEVICE); | 639 | pci_unmap_addr(&c2dev->aeq, mapping)); |
648 | kfree(c2dev->aeq.msg_pool.host); | 640 | |
649 | 641 | /* Free the verbs reply queue */ | |
650 | /* Unmap and free the verbs reply queue */ | 642 | dma_free_coherent(&c2dev->pcidev->dev, |
651 | dma_unmap_single(c2dev->ibdev.dma_device, | 643 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, |
652 | pci_unmap_addr(&c2dev->rep_vq, mapping), | 644 | c2dev->rep_vq.msg_pool.host, |
653 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, | 645 | pci_unmap_addr(&c2dev->rep_vq, mapping)); |
654 | DMA_FROM_DEVICE); | ||
655 | kfree(c2dev->rep_vq.msg_pool.host); | ||
656 | 646 | ||
657 | /* Free the MQ shared pointer pool */ | 647 | /* Free the MQ shared pointer pool */ |
658 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); | 648 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); |
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c index 40caeb5f41b4..36620a22413c 100644 --- a/drivers/infiniband/hw/amso1100/c2_vq.c +++ b/drivers/infiniband/hw/amso1100/c2_vq.c | |||
@@ -164,7 +164,7 @@ void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r) | |||
164 | */ | 164 | */ |
165 | void *vq_repbuf_alloc(struct c2_dev *c2dev) | 165 | void *vq_repbuf_alloc(struct c2_dev *c2dev) |
166 | { | 166 | { |
167 | return kmem_cache_alloc(c2dev->host_msg_cache, SLAB_ATOMIC); | 167 | return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC); |
168 | } | 168 | } |
169 | 169 | ||
170 | /* | 170 | /* |
diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/infiniband/hw/ehca/Kconfig index 922389b64394..727b10d89686 100644 --- a/drivers/infiniband/hw/ehca/Kconfig +++ b/drivers/infiniband/hw/ehca/Kconfig | |||
@@ -10,6 +10,7 @@ config INFINIBAND_EHCA | |||
10 | config INFINIBAND_EHCA_SCALING | 10 | config INFINIBAND_EHCA_SCALING |
11 | bool "Scaling support (EXPERIMENTAL)" | 11 | bool "Scaling support (EXPERIMENTAL)" |
12 | depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL | 12 | depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL |
13 | default y | ||
13 | ---help--- | 14 | ---help--- |
14 | eHCA scaling support schedules the CQ callbacks to different CPUs. | 15 | eHCA scaling support schedules the CQ callbacks to different CPUs. |
15 | 16 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c index 3bac197f9014..0d6e2c4bb245 100644 --- a/drivers/infiniband/hw/ehca/ehca_av.c +++ b/drivers/infiniband/hw/ehca/ehca_av.c | |||
@@ -57,7 +57,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | |||
57 | struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, | 57 | struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, |
58 | ib_device); | 58 | ib_device); |
59 | 59 | ||
60 | av = kmem_cache_alloc(av_cache, SLAB_KERNEL); | 60 | av = kmem_cache_alloc(av_cache, GFP_KERNEL); |
61 | if (!av) { | 61 | if (!av) { |
62 | ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p", | 62 | ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p", |
63 | pd, ah_attr); | 63 | pd, ah_attr); |
@@ -118,8 +118,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | |||
118 | } | 118 | } |
119 | memcpy(&av->av.grh.word_1, &gid, sizeof(gid)); | 119 | memcpy(&av->av.grh.word_1, &gid, sizeof(gid)); |
120 | } | 120 | } |
121 | /* for the time being we use a hard coded PMTU of 2048 Bytes */ | 121 | av->av.pmtu = EHCA_MAX_MTU; |
122 | av->av.pmtu = 4; | ||
123 | 122 | ||
124 | /* dgid comes in grh.word_3 */ | 123 | /* dgid comes in grh.word_3 */ |
125 | memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid, | 124 | memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid, |
@@ -193,7 +192,7 @@ int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) | |||
193 | memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid)); | 192 | memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid)); |
194 | } | 193 | } |
195 | 194 | ||
196 | new_ehca_av.pmtu = 4; /* see also comment in create_ah() */ | 195 | new_ehca_av.pmtu = EHCA_MAX_MTU; |
197 | 196 | ||
198 | memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid, | 197 | memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid, |
199 | sizeof(ah_attr->grh.dgid)); | 198 | sizeof(ah_attr->grh.dgid)); |
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index 458fe19648a1..93995b658d94 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c | |||
@@ -134,7 +134,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, | |||
134 | if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) | 134 | if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) |
135 | return ERR_PTR(-EINVAL); | 135 | return ERR_PTR(-EINVAL); |
136 | 136 | ||
137 | my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL); | 137 | my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL); |
138 | if (!my_cq) { | 138 | if (!my_cq) { |
139 | ehca_err(device, "Out of memory for ehca_cq struct device=%p", | 139 | ehca_err(device, "Out of memory for ehca_cq struct device=%p", |
140 | device); | 140 | device); |
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index 5eae6ac48425..e1b618c5f685 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c | |||
@@ -40,6 +40,7 @@ | |||
40 | */ | 40 | */ |
41 | 41 | ||
42 | #include "ehca_tools.h" | 42 | #include "ehca_tools.h" |
43 | #include "ehca_iverbs.h" | ||
43 | #include "hcp_if.h" | 44 | #include "hcp_if.h" |
44 | 45 | ||
45 | int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) | 46 | int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) |
@@ -49,7 +50,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) | |||
49 | ib_device); | 50 | ib_device); |
50 | struct hipz_query_hca *rblock; | 51 | struct hipz_query_hca *rblock; |
51 | 52 | ||
52 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 53 | rblock = ehca_alloc_fw_ctrlblock(); |
53 | if (!rblock) { | 54 | if (!rblock) { |
54 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | 55 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); |
55 | return -ENOMEM; | 56 | return -ENOMEM; |
@@ -96,7 +97,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) | |||
96 | = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX); | 97 | = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX); |
97 | 98 | ||
98 | query_device1: | 99 | query_device1: |
99 | kfree(rblock); | 100 | ehca_free_fw_ctrlblock(rblock); |
100 | 101 | ||
101 | return ret; | 102 | return ret; |
102 | } | 103 | } |
@@ -109,7 +110,7 @@ int ehca_query_port(struct ib_device *ibdev, | |||
109 | ib_device); | 110 | ib_device); |
110 | struct hipz_query_port *rblock; | 111 | struct hipz_query_port *rblock; |
111 | 112 | ||
112 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 113 | rblock = ehca_alloc_fw_ctrlblock(); |
113 | if (!rblock) { | 114 | if (!rblock) { |
114 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | 115 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); |
115 | return -ENOMEM; | 116 | return -ENOMEM; |
@@ -162,7 +163,7 @@ int ehca_query_port(struct ib_device *ibdev, | |||
162 | props->active_speed = 0x1; | 163 | props->active_speed = 0x1; |
163 | 164 | ||
164 | query_port1: | 165 | query_port1: |
165 | kfree(rblock); | 166 | ehca_free_fw_ctrlblock(rblock); |
166 | 167 | ||
167 | return ret; | 168 | return ret; |
168 | } | 169 | } |
@@ -178,7 +179,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) | |||
178 | return -EINVAL; | 179 | return -EINVAL; |
179 | } | 180 | } |
180 | 181 | ||
181 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 182 | rblock = ehca_alloc_fw_ctrlblock(); |
182 | if (!rblock) { | 183 | if (!rblock) { |
183 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | 184 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); |
184 | return -ENOMEM; | 185 | return -ENOMEM; |
@@ -193,7 +194,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) | |||
193 | memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16)); | 194 | memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16)); |
194 | 195 | ||
195 | query_pkey1: | 196 | query_pkey1: |
196 | kfree(rblock); | 197 | ehca_free_fw_ctrlblock(rblock); |
197 | 198 | ||
198 | return ret; | 199 | return ret; |
199 | } | 200 | } |
@@ -211,7 +212,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port, | |||
211 | return -EINVAL; | 212 | return -EINVAL; |
212 | } | 213 | } |
213 | 214 | ||
214 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 215 | rblock = ehca_alloc_fw_ctrlblock(); |
215 | if (!rblock) { | 216 | if (!rblock) { |
216 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | 217 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); |
217 | return -ENOMEM; | 218 | return -ENOMEM; |
@@ -227,7 +228,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port, | |||
227 | memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64)); | 228 | memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64)); |
228 | 229 | ||
229 | query_gid1: | 230 | query_gid1: |
230 | kfree(rblock); | 231 | ehca_free_fw_ctrlblock(rblock); |
231 | 232 | ||
232 | return ret; | 233 | return ret; |
233 | } | 234 | } |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 048cc443d1e7..c3ea746e9045 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include "ehca_tools.h" | 45 | #include "ehca_tools.h" |
46 | #include "hcp_if.h" | 46 | #include "hcp_if.h" |
47 | #include "hipz_fns.h" | 47 | #include "hipz_fns.h" |
48 | #include "ipz_pt_fn.h" | ||
48 | 49 | ||
49 | #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) | 50 | #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) |
50 | #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31) | 51 | #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31) |
@@ -137,38 +138,36 @@ int ehca_error_data(struct ehca_shca *shca, void *data, | |||
137 | u64 *rblock; | 138 | u64 *rblock; |
138 | unsigned long block_count; | 139 | unsigned long block_count; |
139 | 140 | ||
140 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 141 | rblock = ehca_alloc_fw_ctrlblock(); |
141 | if (!rblock) { | 142 | if (!rblock) { |
142 | ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); | 143 | ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); |
143 | ret = -ENOMEM; | 144 | ret = -ENOMEM; |
144 | goto error_data1; | 145 | goto error_data1; |
145 | } | 146 | } |
146 | 147 | ||
148 | /* rblock must be 4K aligned and should be 4K large */ | ||
147 | ret = hipz_h_error_data(shca->ipz_hca_handle, | 149 | ret = hipz_h_error_data(shca->ipz_hca_handle, |
148 | resource, | 150 | resource, |
149 | rblock, | 151 | rblock, |
150 | &block_count); | 152 | &block_count); |
151 | 153 | ||
152 | if (ret == H_R_STATE) { | 154 | if (ret == H_R_STATE) |
153 | ehca_err(&shca->ib_device, | 155 | ehca_err(&shca->ib_device, |
154 | "No error data is available: %lx.", resource); | 156 | "No error data is available: %lx.", resource); |
155 | } | ||
156 | else if (ret == H_SUCCESS) { | 157 | else if (ret == H_SUCCESS) { |
157 | int length; | 158 | int length; |
158 | 159 | ||
159 | length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]); | 160 | length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]); |
160 | 161 | ||
161 | if (length > PAGE_SIZE) | 162 | if (length > EHCA_PAGESIZE) |
162 | length = PAGE_SIZE; | 163 | length = EHCA_PAGESIZE; |
163 | 164 | ||
164 | print_error_data(shca, data, rblock, length); | 165 | print_error_data(shca, data, rblock, length); |
165 | } | 166 | } else |
166 | else { | ||
167 | ehca_err(&shca->ib_device, | 167 | ehca_err(&shca->ib_device, |
168 | "Error data could not be fetched: %lx", resource); | 168 | "Error data could not be fetched: %lx", resource); |
169 | } | ||
170 | 169 | ||
171 | kfree(rblock); | 170 | ehca_free_fw_ctrlblock(rblock); |
172 | 171 | ||
173 | error_data1: | 172 | error_data1: |
174 | return ret; | 173 | return ret; |
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h index 319c39d47f3a..3720e3032cce 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h | |||
@@ -179,4 +179,12 @@ int ehca_mmap_register(u64 physical,void **mapped, | |||
179 | 179 | ||
180 | int ehca_munmap(unsigned long addr, size_t len); | 180 | int ehca_munmap(unsigned long addr, size_t len); |
181 | 181 | ||
182 | #ifdef CONFIG_PPC_64K_PAGES | ||
183 | void *ehca_alloc_fw_ctrlblock(void); | ||
184 | void ehca_free_fw_ctrlblock(void *ptr); | ||
185 | #else | ||
186 | #define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL)) | ||
187 | #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) | ||
188 | #endif | ||
189 | |||
182 | #endif | 190 | #endif |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 024d511c4b58..cc47e4c13a18 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -40,6 +40,9 @@ | |||
40 | * POSSIBILITY OF SUCH DAMAGE. | 40 | * POSSIBILITY OF SUCH DAMAGE. |
41 | */ | 41 | */ |
42 | 42 | ||
43 | #ifdef CONFIG_PPC_64K_PAGES | ||
44 | #include <linux/slab.h> | ||
45 | #endif | ||
43 | #include "ehca_classes.h" | 46 | #include "ehca_classes.h" |
44 | #include "ehca_iverbs.h" | 47 | #include "ehca_iverbs.h" |
45 | #include "ehca_mrmw.h" | 48 | #include "ehca_mrmw.h" |
@@ -49,7 +52,7 @@ | |||
49 | MODULE_LICENSE("Dual BSD/GPL"); | 52 | MODULE_LICENSE("Dual BSD/GPL"); |
50 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | 53 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); |
51 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); | 54 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); |
52 | MODULE_VERSION("SVNEHCA_0017"); | 55 | MODULE_VERSION("SVNEHCA_0019"); |
53 | 56 | ||
54 | int ehca_open_aqp1 = 0; | 57 | int ehca_open_aqp1 = 0; |
55 | int ehca_debug_level = 0; | 58 | int ehca_debug_level = 0; |
@@ -94,11 +97,31 @@ spinlock_t ehca_cq_idr_lock; | |||
94 | DEFINE_IDR(ehca_qp_idr); | 97 | DEFINE_IDR(ehca_qp_idr); |
95 | DEFINE_IDR(ehca_cq_idr); | 98 | DEFINE_IDR(ehca_cq_idr); |
96 | 99 | ||
100 | |||
97 | static struct list_head shca_list; /* list of all registered ehcas */ | 101 | static struct list_head shca_list; /* list of all registered ehcas */ |
98 | static spinlock_t shca_list_lock; | 102 | static spinlock_t shca_list_lock; |
99 | 103 | ||
100 | static struct timer_list poll_eqs_timer; | 104 | static struct timer_list poll_eqs_timer; |
101 | 105 | ||
106 | #ifdef CONFIG_PPC_64K_PAGES | ||
107 | static struct kmem_cache *ctblk_cache = NULL; | ||
108 | |||
109 | void *ehca_alloc_fw_ctrlblock(void) | ||
110 | { | ||
111 | void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL); | ||
112 | if (!ret) | ||
113 | ehca_gen_err("Out of memory for ctblk"); | ||
114 | return ret; | ||
115 | } | ||
116 | |||
117 | void ehca_free_fw_ctrlblock(void *ptr) | ||
118 | { | ||
119 | if (ptr) | ||
120 | kmem_cache_free(ctblk_cache, ptr); | ||
121 | |||
122 | } | ||
123 | #endif | ||
124 | |||
102 | static int ehca_create_slab_caches(void) | 125 | static int ehca_create_slab_caches(void) |
103 | { | 126 | { |
104 | int ret; | 127 | int ret; |
@@ -133,6 +156,17 @@ static int ehca_create_slab_caches(void) | |||
133 | goto create_slab_caches5; | 156 | goto create_slab_caches5; |
134 | } | 157 | } |
135 | 158 | ||
159 | #ifdef CONFIG_PPC_64K_PAGES | ||
160 | ctblk_cache = kmem_cache_create("ehca_cache_ctblk", | ||
161 | EHCA_PAGESIZE, H_CB_ALIGNMENT, | ||
162 | SLAB_HWCACHE_ALIGN, | ||
163 | NULL, NULL); | ||
164 | if (!ctblk_cache) { | ||
165 | ehca_gen_err("Cannot create ctblk SLAB cache."); | ||
166 | ehca_cleanup_mrmw_cache(); | ||
167 | goto create_slab_caches5; | ||
168 | } | ||
169 | #endif | ||
136 | return 0; | 170 | return 0; |
137 | 171 | ||
138 | create_slab_caches5: | 172 | create_slab_caches5: |
@@ -157,6 +191,10 @@ static void ehca_destroy_slab_caches(void) | |||
157 | ehca_cleanup_qp_cache(); | 191 | ehca_cleanup_qp_cache(); |
158 | ehca_cleanup_cq_cache(); | 192 | ehca_cleanup_cq_cache(); |
159 | ehca_cleanup_pd_cache(); | 193 | ehca_cleanup_pd_cache(); |
194 | #ifdef CONFIG_PPC_64K_PAGES | ||
195 | if (ctblk_cache) | ||
196 | kmem_cache_destroy(ctblk_cache); | ||
197 | #endif | ||
160 | } | 198 | } |
161 | 199 | ||
162 | #define EHCA_HCAAVER EHCA_BMASK_IBM(32,39) | 200 | #define EHCA_HCAAVER EHCA_BMASK_IBM(32,39) |
@@ -168,7 +206,7 @@ int ehca_sense_attributes(struct ehca_shca *shca) | |||
168 | u64 h_ret; | 206 | u64 h_ret; |
169 | struct hipz_query_hca *rblock; | 207 | struct hipz_query_hca *rblock; |
170 | 208 | ||
171 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 209 | rblock = ehca_alloc_fw_ctrlblock(); |
172 | if (!rblock) { | 210 | if (!rblock) { |
173 | ehca_gen_err("Cannot allocate rblock memory."); | 211 | ehca_gen_err("Cannot allocate rblock memory."); |
174 | return -ENOMEM; | 212 | return -ENOMEM; |
@@ -211,7 +249,7 @@ int ehca_sense_attributes(struct ehca_shca *shca) | |||
211 | shca->sport[1].rate = IB_RATE_30_GBPS; | 249 | shca->sport[1].rate = IB_RATE_30_GBPS; |
212 | 250 | ||
213 | num_ports1: | 251 | num_ports1: |
214 | kfree(rblock); | 252 | ehca_free_fw_ctrlblock(rblock); |
215 | return ret; | 253 | return ret; |
216 | } | 254 | } |
217 | 255 | ||
@@ -220,7 +258,7 @@ static int init_node_guid(struct ehca_shca *shca) | |||
220 | int ret = 0; | 258 | int ret = 0; |
221 | struct hipz_query_hca *rblock; | 259 | struct hipz_query_hca *rblock; |
222 | 260 | ||
223 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 261 | rblock = ehca_alloc_fw_ctrlblock(); |
224 | if (!rblock) { | 262 | if (!rblock) { |
225 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | 263 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); |
226 | return -ENOMEM; | 264 | return -ENOMEM; |
@@ -235,7 +273,7 @@ static int init_node_guid(struct ehca_shca *shca) | |||
235 | memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64)); | 273 | memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64)); |
236 | 274 | ||
237 | init_node_guid1: | 275 | init_node_guid1: |
238 | kfree(rblock); | 276 | ehca_free_fw_ctrlblock(rblock); |
239 | return ret; | 277 | return ret; |
240 | } | 278 | } |
241 | 279 | ||
@@ -431,7 +469,7 @@ static ssize_t ehca_show_##name(struct device *dev, \ | |||
431 | \ | 469 | \ |
432 | shca = dev->driver_data; \ | 470 | shca = dev->driver_data; \ |
433 | \ | 471 | \ |
434 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); \ | 472 | rblock = ehca_alloc_fw_ctrlblock(); \ |
435 | if (!rblock) { \ | 473 | if (!rblock) { \ |
436 | dev_err(dev, "Can't allocate rblock memory."); \ | 474 | dev_err(dev, "Can't allocate rblock memory."); \ |
437 | return 0; \ | 475 | return 0; \ |
@@ -439,12 +477,12 @@ static ssize_t ehca_show_##name(struct device *dev, \ | |||
439 | \ | 477 | \ |
440 | if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \ | 478 | if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \ |
441 | dev_err(dev, "Can't query device properties"); \ | 479 | dev_err(dev, "Can't query device properties"); \ |
442 | kfree(rblock); \ | 480 | ehca_free_fw_ctrlblock(rblock); \ |
443 | return 0; \ | 481 | return 0; \ |
444 | } \ | 482 | } \ |
445 | \ | 483 | \ |
446 | data = rblock->name; \ | 484 | data = rblock->name; \ |
447 | kfree(rblock); \ | 485 | ehca_free_fw_ctrlblock(rblock); \ |
448 | \ | 486 | \ |
449 | if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \ | 487 | if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \ |
450 | return snprintf(buf, 256, "1\n"); \ | 488 | return snprintf(buf, 256, "1\n"); \ |
@@ -752,7 +790,7 @@ int __init ehca_module_init(void) | |||
752 | int ret; | 790 | int ret; |
753 | 791 | ||
754 | printk(KERN_INFO "eHCA Infiniband Device Driver " | 792 | printk(KERN_INFO "eHCA Infiniband Device Driver " |
755 | "(Rel.: SVNEHCA_0017)\n"); | 793 | "(Rel.: SVNEHCA_0019)\n"); |
756 | idr_init(&ehca_qp_idr); | 794 | idr_init(&ehca_qp_idr); |
757 | idr_init(&ehca_cq_idr); | 795 | idr_init(&ehca_cq_idr); |
758 | spin_lock_init(&ehca_qp_idr_lock); | 796 | spin_lock_init(&ehca_qp_idr_lock); |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 5ca65441e1da..0a5e2214cc5f 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
@@ -53,7 +53,7 @@ static struct ehca_mr *ehca_mr_new(void) | |||
53 | { | 53 | { |
54 | struct ehca_mr *me; | 54 | struct ehca_mr *me; |
55 | 55 | ||
56 | me = kmem_cache_alloc(mr_cache, SLAB_KERNEL); | 56 | me = kmem_cache_alloc(mr_cache, GFP_KERNEL); |
57 | if (me) { | 57 | if (me) { |
58 | memset(me, 0, sizeof(struct ehca_mr)); | 58 | memset(me, 0, sizeof(struct ehca_mr)); |
59 | spin_lock_init(&me->mrlock); | 59 | spin_lock_init(&me->mrlock); |
@@ -72,7 +72,7 @@ static struct ehca_mw *ehca_mw_new(void) | |||
72 | { | 72 | { |
73 | struct ehca_mw *me; | 73 | struct ehca_mw *me; |
74 | 74 | ||
75 | me = kmem_cache_alloc(mw_cache, SLAB_KERNEL); | 75 | me = kmem_cache_alloc(mw_cache, GFP_KERNEL); |
76 | if (me) { | 76 | if (me) { |
77 | memset(me, 0, sizeof(struct ehca_mw)); | 77 | memset(me, 0, sizeof(struct ehca_mw)); |
78 | spin_lock_init(&me->mwlock); | 78 | spin_lock_init(&me->mwlock); |
@@ -1013,7 +1013,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, | |||
1013 | u32 i; | 1013 | u32 i; |
1014 | u64 *kpage; | 1014 | u64 *kpage; |
1015 | 1015 | ||
1016 | kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 1016 | kpage = ehca_alloc_fw_ctrlblock(); |
1017 | if (!kpage) { | 1017 | if (!kpage) { |
1018 | ehca_err(&shca->ib_device, "kpage alloc failed"); | 1018 | ehca_err(&shca->ib_device, "kpage alloc failed"); |
1019 | ret = -ENOMEM; | 1019 | ret = -ENOMEM; |
@@ -1092,7 +1092,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, | |||
1092 | 1092 | ||
1093 | 1093 | ||
1094 | ehca_reg_mr_rpages_exit1: | 1094 | ehca_reg_mr_rpages_exit1: |
1095 | kfree(kpage); | 1095 | ehca_free_fw_ctrlblock(kpage); |
1096 | ehca_reg_mr_rpages_exit0: | 1096 | ehca_reg_mr_rpages_exit0: |
1097 | if (ret) | 1097 | if (ret) |
1098 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " | 1098 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " |
@@ -1124,7 +1124,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, | |||
1124 | ehca_mrmw_map_acl(acl, &hipz_acl); | 1124 | ehca_mrmw_map_acl(acl, &hipz_acl); |
1125 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); | 1125 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); |
1126 | 1126 | ||
1127 | kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 1127 | kpage = ehca_alloc_fw_ctrlblock(); |
1128 | if (!kpage) { | 1128 | if (!kpage) { |
1129 | ehca_err(&shca->ib_device, "kpage alloc failed"); | 1129 | ehca_err(&shca->ib_device, "kpage alloc failed"); |
1130 | ret = -ENOMEM; | 1130 | ret = -ENOMEM; |
@@ -1181,7 +1181,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, | |||
1181 | } | 1181 | } |
1182 | 1182 | ||
1183 | ehca_rereg_mr_rereg1_exit1: | 1183 | ehca_rereg_mr_rereg1_exit1: |
1184 | kfree(kpage); | 1184 | ehca_free_fw_ctrlblock(kpage); |
1185 | ehca_rereg_mr_rereg1_exit0: | 1185 | ehca_rereg_mr_rereg1_exit0: |
1186 | if ( ret && (ret != -EAGAIN) ) | 1186 | if ( ret && (ret != -EAGAIN) ) |
1187 | ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x " | 1187 | ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x " |
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c index 2c3cdc6f7b39..d5345e5b3cd6 100644 --- a/drivers/infiniband/hw/ehca/ehca_pd.c +++ b/drivers/infiniband/hw/ehca/ehca_pd.c | |||
@@ -50,7 +50,7 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device, | |||
50 | { | 50 | { |
51 | struct ehca_pd *pd; | 51 | struct ehca_pd *pd; |
52 | 52 | ||
53 | pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL); | 53 | pd = kmem_cache_alloc(pd_cache, GFP_KERNEL); |
54 | if (!pd) { | 54 | if (!pd) { |
55 | ehca_err(device, "device=%p context=%p out of memory", | 55 | ehca_err(device, "device=%p context=%p out of memory", |
56 | device, context); | 56 | device, context); |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 4394123cdbd7..c6c9cef203e3 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -450,7 +450,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd, | |||
450 | if (pd->uobject && udata) | 450 | if (pd->uobject && udata) |
451 | context = pd->uobject->context; | 451 | context = pd->uobject->context; |
452 | 452 | ||
453 | my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL); | 453 | my_qp = kmem_cache_alloc(qp_cache, GFP_KERNEL); |
454 | if (!my_qp) { | 454 | if (!my_qp) { |
455 | ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); | 455 | ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); |
456 | return ERR_PTR(-ENOMEM); | 456 | return ERR_PTR(-ENOMEM); |
@@ -732,8 +732,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
732 | u64 h_ret; | 732 | u64 h_ret; |
733 | struct ipz_queue *squeue; | 733 | struct ipz_queue *squeue; |
734 | void *bad_send_wqe_p, *bad_send_wqe_v; | 734 | void *bad_send_wqe_p, *bad_send_wqe_v; |
735 | void *squeue_start_p, *squeue_end_p; | 735 | u64 q_ofs; |
736 | void *squeue_start_v, *squeue_end_v; | ||
737 | struct ehca_wqe *wqe; | 736 | struct ehca_wqe *wqe; |
738 | int qp_num = my_qp->ib_qp.qp_num; | 737 | int qp_num = my_qp->ib_qp.qp_num; |
739 | 738 | ||
@@ -755,26 +754,23 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
755 | if (ehca_debug_level) | 754 | if (ehca_debug_level) |
756 | ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); | 755 | ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); |
757 | squeue = &my_qp->ipz_squeue; | 756 | squeue = &my_qp->ipz_squeue; |
758 | squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L)); | 757 | if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) { |
759 | squeue_end_p = squeue_start_p+squeue->queue_length; | 758 | ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x" |
760 | squeue_start_v = abs_to_virt((u64)squeue_start_p); | 759 | " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p); |
761 | squeue_end_v = abs_to_virt((u64)squeue_end_p); | 760 | return -EFAULT; |
762 | ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p", | 761 | } |
763 | qp_num, squeue_start_v, squeue_end_v); | ||
764 | 762 | ||
765 | /* loop sets wqe's purge bit */ | 763 | /* loop sets wqe's purge bit */ |
766 | wqe = (struct ehca_wqe*)bad_send_wqe_v; | 764 | wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs); |
767 | *bad_wqe_cnt = 0; | 765 | *bad_wqe_cnt = 0; |
768 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { | 766 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { |
769 | if (ehca_debug_level) | 767 | if (ehca_debug_level) |
770 | ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); | 768 | ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); |
771 | wqe->nr_of_data_seg = 0; /* suppress data access */ | 769 | wqe->nr_of_data_seg = 0; /* suppress data access */ |
772 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ | 770 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ |
773 | wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size); | 771 | q_ofs = ipz_queue_advance_offset(squeue, q_ofs); |
772 | wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs); | ||
774 | *bad_wqe_cnt = (*bad_wqe_cnt)+1; | 773 | *bad_wqe_cnt = (*bad_wqe_cnt)+1; |
775 | if ((void*)wqe >= squeue_end_v) { | ||
776 | wqe = squeue_start_v; | ||
777 | } | ||
778 | } | 774 | } |
779 | /* | 775 | /* |
780 | * bad wqe will be reprocessed and ignored when pol_cq() is called, | 776 | * bad wqe will be reprocessed and ignored when pol_cq() is called, |
@@ -811,8 +807,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
811 | unsigned long spl_flags = 0; | 807 | unsigned long spl_flags = 0; |
812 | 808 | ||
813 | /* do query_qp to obtain current attr values */ | 809 | /* do query_qp to obtain current attr values */ |
814 | mqpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 810 | mqpcb = ehca_alloc_fw_ctrlblock(); |
815 | if (mqpcb == NULL) { | 811 | if (!mqpcb) { |
816 | ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " | 812 | ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " |
817 | "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); | 813 | "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); |
818 | return -ENOMEM; | 814 | return -ENOMEM; |
@@ -1225,7 +1221,7 @@ modify_qp_exit2: | |||
1225 | } | 1221 | } |
1226 | 1222 | ||
1227 | modify_qp_exit1: | 1223 | modify_qp_exit1: |
1228 | kfree(mqpcb); | 1224 | ehca_free_fw_ctrlblock(mqpcb); |
1229 | 1225 | ||
1230 | return ret; | 1226 | return ret; |
1231 | } | 1227 | } |
@@ -1277,7 +1273,7 @@ int ehca_query_qp(struct ib_qp *qp, | |||
1277 | return -EINVAL; | 1273 | return -EINVAL; |
1278 | } | 1274 | } |
1279 | 1275 | ||
1280 | qpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL ); | 1276 | qpcb = ehca_alloc_fw_ctrlblock(); |
1281 | if (!qpcb) { | 1277 | if (!qpcb) { |
1282 | ehca_err(qp->device,"Out of memory for qpcb " | 1278 | ehca_err(qp->device,"Out of memory for qpcb " |
1283 | "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); | 1279 | "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); |
@@ -1401,7 +1397,7 @@ int ehca_query_qp(struct ib_qp *qp, | |||
1401 | ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); | 1397 | ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); |
1402 | 1398 | ||
1403 | query_qp_exit1: | 1399 | query_qp_exit1: |
1404 | kfree(qpcb); | 1400 | ehca_free_fw_ctrlblock(qpcb); |
1405 | 1401 | ||
1406 | return ret; | 1402 | return ret; |
1407 | } | 1403 | } |
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h index 809da3ef706b..973c4b591545 100644 --- a/drivers/infiniband/hw/ehca/ehca_tools.h +++ b/drivers/infiniband/hw/ehca/ehca_tools.h | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <asm/ibmebus.h> | 63 | #include <asm/ibmebus.h> |
64 | #include <asm/io.h> | 64 | #include <asm/io.h> |
65 | #include <asm/pgtable.h> | 65 | #include <asm/pgtable.h> |
66 | #include <asm/hvcall.h> | ||
66 | 67 | ||
67 | extern int ehca_debug_level; | 68 | extern int ehca_debug_level; |
68 | 69 | ||
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h index 3fc92b031c50..fad91368dc5a 100644 --- a/drivers/infiniband/hw/ehca/hipz_hw.h +++ b/drivers/infiniband/hw/ehca/hipz_hw.h | |||
@@ -45,6 +45,8 @@ | |||
45 | 45 | ||
46 | #include "ehca_tools.h" | 46 | #include "ehca_tools.h" |
47 | 47 | ||
48 | #define EHCA_MAX_MTU 4 | ||
49 | |||
48 | /* QP Table Entry Memory Map */ | 50 | /* QP Table Entry Memory Map */ |
49 | struct hipz_qptemm { | 51 | struct hipz_qptemm { |
50 | u64 qpx_hcr; | 52 | u64 qpx_hcr; |
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c index e028ff1588cc..bf7a40088f61 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c | |||
@@ -70,6 +70,19 @@ void *ipz_qeit_eq_get_inc(struct ipz_queue *queue) | |||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
73 | int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset) | ||
74 | { | ||
75 | int i; | ||
76 | for (i = 0; i < queue->queue_length / queue->pagesize; i++) { | ||
77 | u64 page = (u64)virt_to_abs(queue->queue_pages[i]); | ||
78 | if (addr >= page && addr < page + queue->pagesize) { | ||
79 | *q_offset = addr - page + i * queue->pagesize; | ||
80 | return 0; | ||
81 | } | ||
82 | } | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | |||
73 | int ipz_queue_ctor(struct ipz_queue *queue, | 86 | int ipz_queue_ctor(struct ipz_queue *queue, |
74 | const u32 nr_of_pages, | 87 | const u32 nr_of_pages, |
75 | const u32 pagesize, const u32 qe_size, const u32 nr_of_sg) | 88 | const u32 pagesize, const u32 qe_size, const u32 nr_of_sg) |
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h index 2f13509d5257..dc3bda2634b7 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h | |||
@@ -150,6 +150,21 @@ static inline void *ipz_qeit_reset(struct ipz_queue *queue) | |||
150 | return ipz_qeit_get(queue); | 150 | return ipz_qeit_get(queue); |
151 | } | 151 | } |
152 | 152 | ||
153 | /* | ||
154 | * return the q_offset corresponding to an absolute address | ||
155 | */ | ||
156 | int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset); | ||
157 | |||
158 | /* | ||
159 | * return the next queue offset. don't modify the queue. | ||
160 | */ | ||
161 | static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset) | ||
162 | { | ||
163 | offset += queue->qe_size; | ||
164 | if (offset >= queue->queue_length) offset = 0; | ||
165 | return offset; | ||
166 | } | ||
167 | |||
153 | /* struct generic page table */ | 168 | /* struct generic page table */ |
154 | struct ipz_pt { | 169 | struct ipz_pt { |
155 | u64 entries[EHCA_PT_ENTRIES]; | 170 | u64 entries[EHCA_PT_ENTRIES]; |
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig index 574a678e7fdd..90c14543677d 100644 --- a/drivers/infiniband/hw/ipath/Kconfig +++ b/drivers/infiniband/hw/ipath/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config INFINIBAND_IPATH | 1 | config INFINIBAND_IPATH |
2 | tristate "QLogic InfiniPath Driver" | 2 | tristate "QLogic InfiniPath Driver" |
3 | depends on PCI_MSI && 64BIT && INFINIBAND | 3 | depends on (PCI_MSI || HT_IRQ) && 64BIT && INFINIBAND && NET |
4 | ---help--- | 4 | ---help--- |
5 | This is a driver for QLogic InfiniPath host channel adapters, | 5 | This is a driver for QLogic InfiniPath host channel adapters, |
6 | including InfiniBand verbs support. This driver allows these | 6 | including InfiniBand verbs support. This driver allows these |
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile index 5e29cb0095e5..7dc10551cf18 100644 --- a/drivers/infiniband/hw/ipath/Makefile +++ b/drivers/infiniband/hw/ipath/Makefile | |||
@@ -10,8 +10,6 @@ ib_ipath-y := \ | |||
10 | ipath_eeprom.o \ | 10 | ipath_eeprom.o \ |
11 | ipath_file_ops.o \ | 11 | ipath_file_ops.o \ |
12 | ipath_fs.o \ | 12 | ipath_fs.o \ |
13 | ipath_iba6110.o \ | ||
14 | ipath_iba6120.o \ | ||
15 | ipath_init_chip.o \ | 13 | ipath_init_chip.o \ |
16 | ipath_intr.o \ | 14 | ipath_intr.o \ |
17 | ipath_keys.o \ | 15 | ipath_keys.o \ |
@@ -31,5 +29,8 @@ ib_ipath-y := \ | |||
31 | ipath_verbs_mcast.o \ | 29 | ipath_verbs_mcast.o \ |
32 | ipath_verbs.o | 30 | ipath_verbs.o |
33 | 31 | ||
32 | ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o | ||
33 | ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o | ||
34 | |||
34 | ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o | 35 | ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o |
35 | ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o | 36 | ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o |
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index 29958b6e0214..28c087b824c2 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c | |||
@@ -67,19 +67,54 @@ static struct file_operations diag_file_ops = { | |||
67 | .release = ipath_diag_release | 67 | .release = ipath_diag_release |
68 | }; | 68 | }; |
69 | 69 | ||
70 | static ssize_t ipath_diagpkt_write(struct file *fp, | ||
71 | const char __user *data, | ||
72 | size_t count, loff_t *off); | ||
73 | |||
74 | static struct file_operations diagpkt_file_ops = { | ||
75 | .owner = THIS_MODULE, | ||
76 | .write = ipath_diagpkt_write, | ||
77 | }; | ||
78 | |||
79 | static atomic_t diagpkt_count = ATOMIC_INIT(0); | ||
80 | static struct cdev *diagpkt_cdev; | ||
81 | static struct class_device *diagpkt_class_dev; | ||
82 | |||
70 | int ipath_diag_add(struct ipath_devdata *dd) | 83 | int ipath_diag_add(struct ipath_devdata *dd) |
71 | { | 84 | { |
72 | char name[16]; | 85 | char name[16]; |
86 | int ret = 0; | ||
87 | |||
88 | if (atomic_inc_return(&diagpkt_count) == 1) { | ||
89 | ret = ipath_cdev_init(IPATH_DIAGPKT_MINOR, | ||
90 | "ipath_diagpkt", &diagpkt_file_ops, | ||
91 | &diagpkt_cdev, &diagpkt_class_dev); | ||
92 | |||
93 | if (ret) { | ||
94 | ipath_dev_err(dd, "Couldn't create ipath_diagpkt " | ||
95 | "device: %d", ret); | ||
96 | goto done; | ||
97 | } | ||
98 | } | ||
73 | 99 | ||
74 | snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit); | 100 | snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit); |
75 | 101 | ||
76 | return ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name, | 102 | ret = ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name, |
77 | &diag_file_ops, &dd->diag_cdev, | 103 | &diag_file_ops, &dd->diag_cdev, |
78 | &dd->diag_class_dev); | 104 | &dd->diag_class_dev); |
105 | if (ret) | ||
106 | ipath_dev_err(dd, "Couldn't create %s device: %d", | ||
107 | name, ret); | ||
108 | |||
109 | done: | ||
110 | return ret; | ||
79 | } | 111 | } |
80 | 112 | ||
81 | void ipath_diag_remove(struct ipath_devdata *dd) | 113 | void ipath_diag_remove(struct ipath_devdata *dd) |
82 | { | 114 | { |
115 | if (atomic_dec_and_test(&diagpkt_count)) | ||
116 | ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_class_dev); | ||
117 | |||
83 | ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_class_dev); | 118 | ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_class_dev); |
84 | } | 119 | } |
85 | 120 | ||
@@ -275,30 +310,6 @@ bail: | |||
275 | return ret; | 310 | return ret; |
276 | } | 311 | } |
277 | 312 | ||
278 | static ssize_t ipath_diagpkt_write(struct file *fp, | ||
279 | const char __user *data, | ||
280 | size_t count, loff_t *off); | ||
281 | |||
282 | static struct file_operations diagpkt_file_ops = { | ||
283 | .owner = THIS_MODULE, | ||
284 | .write = ipath_diagpkt_write, | ||
285 | }; | ||
286 | |||
287 | static struct cdev *diagpkt_cdev; | ||
288 | static struct class_device *diagpkt_class_dev; | ||
289 | |||
290 | int __init ipath_diagpkt_add(void) | ||
291 | { | ||
292 | return ipath_cdev_init(IPATH_DIAGPKT_MINOR, | ||
293 | "ipath_diagpkt", &diagpkt_file_ops, | ||
294 | &diagpkt_cdev, &diagpkt_class_dev); | ||
295 | } | ||
296 | |||
297 | void __exit ipath_diagpkt_remove(void) | ||
298 | { | ||
299 | ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_class_dev); | ||
300 | } | ||
301 | |||
302 | /** | 313 | /** |
303 | * ipath_diagpkt_write - write an IB packet | 314 | * ipath_diagpkt_write - write an IB packet |
304 | * @fp: the diag data device file pointer | 315 | * @fp: the diag data device file pointer |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 12cefa658f3b..1aeddb48e355 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -304,7 +304,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
304 | } | 304 | } |
305 | addr = pci_resource_start(pdev, 0); | 305 | addr = pci_resource_start(pdev, 0); |
306 | len = pci_resource_len(pdev, 0); | 306 | len = pci_resource_len(pdev, 0); |
307 | ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %x, vend %x/%x " | 307 | ipath_cdbg(VERBOSE, "regbase (0) %llx len %d pdev->irq %d, vend %x/%x " |
308 | "driver_data %lx\n", addr, len, pdev->irq, ent->vendor, | 308 | "driver_data %lx\n", addr, len, pdev->irq, ent->vendor, |
309 | ent->device, ent->driver_data); | 309 | ent->device, ent->driver_data); |
310 | 310 | ||
@@ -390,12 +390,16 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
390 | 390 | ||
391 | /* setup the chip-specific functions, as early as possible. */ | 391 | /* setup the chip-specific functions, as early as possible. */ |
392 | switch (ent->device) { | 392 | switch (ent->device) { |
393 | #ifdef CONFIG_HT_IRQ | ||
393 | case PCI_DEVICE_ID_INFINIPATH_HT: | 394 | case PCI_DEVICE_ID_INFINIPATH_HT: |
394 | ipath_init_iba6110_funcs(dd); | 395 | ipath_init_iba6110_funcs(dd); |
395 | break; | 396 | break; |
397 | #endif | ||
398 | #ifdef CONFIG_PCI_MSI | ||
396 | case PCI_DEVICE_ID_INFINIPATH_PE800: | 399 | case PCI_DEVICE_ID_INFINIPATH_PE800: |
397 | ipath_init_iba6120_funcs(dd); | 400 | ipath_init_iba6120_funcs(dd); |
398 | break; | 401 | break; |
402 | #endif | ||
399 | default: | 403 | default: |
400 | ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " | 404 | ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " |
401 | "failing\n", ent->device); | 405 | "failing\n", ent->device); |
@@ -467,15 +471,15 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
467 | * check 0 irq after we return from chip-specific bus setup, since | 471 | * check 0 irq after we return from chip-specific bus setup, since |
468 | * that can affect this due to setup | 472 | * that can affect this due to setup |
469 | */ | 473 | */ |
470 | if (!pdev->irq) | 474 | if (!dd->ipath_irq) |
471 | ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " | 475 | ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " |
472 | "work\n"); | 476 | "work\n"); |
473 | else { | 477 | else { |
474 | ret = request_irq(pdev->irq, ipath_intr, IRQF_SHARED, | 478 | ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED, |
475 | IPATH_DRV_NAME, dd); | 479 | IPATH_DRV_NAME, dd); |
476 | if (ret) { | 480 | if (ret) { |
477 | ipath_dev_err(dd, "Couldn't setup irq handler, " | 481 | ipath_dev_err(dd, "Couldn't setup irq handler, " |
478 | "irq=%u: %d\n", pdev->irq, ret); | 482 | "irq=%d: %d\n", dd->ipath_irq, ret); |
479 | goto bail_iounmap; | 483 | goto bail_iounmap; |
480 | } | 484 | } |
481 | } | 485 | } |
@@ -637,11 +641,10 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev) | |||
637 | * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs | 641 | * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs |
638 | * for all versions of the driver, if they were allocated | 642 | * for all versions of the driver, if they were allocated |
639 | */ | 643 | */ |
640 | if (pdev->irq) { | 644 | if (dd->ipath_irq) { |
641 | ipath_cdbg(VERBOSE, | 645 | ipath_cdbg(VERBOSE, "unit %u free irq %d\n", |
642 | "unit %u free_irq of irq %x\n", | 646 | dd->ipath_unit, dd->ipath_irq); |
643 | dd->ipath_unit, pdev->irq); | 647 | dd->ipath_f_free_irq(dd); |
644 | free_irq(pdev->irq, dd); | ||
645 | } else | 648 | } else |
646 | ipath_dbg("irq is 0, not doing free_irq " | 649 | ipath_dbg("irq is 0, not doing free_irq " |
647 | "for unit %u\n", dd->ipath_unit); | 650 | "for unit %u\n", dd->ipath_unit); |
@@ -2005,18 +2008,8 @@ static int __init infinipath_init(void) | |||
2005 | goto bail_group; | 2008 | goto bail_group; |
2006 | } | 2009 | } |
2007 | 2010 | ||
2008 | ret = ipath_diagpkt_add(); | ||
2009 | if (ret < 0) { | ||
2010 | printk(KERN_ERR IPATH_DRV_NAME ": Unable to create " | ||
2011 | "diag data device: error %d\n", -ret); | ||
2012 | goto bail_ipathfs; | ||
2013 | } | ||
2014 | |||
2015 | goto bail; | 2011 | goto bail; |
2016 | 2012 | ||
2017 | bail_ipathfs: | ||
2018 | ipath_exit_ipathfs(); | ||
2019 | |||
2020 | bail_group: | 2013 | bail_group: |
2021 | ipath_driver_remove_group(&ipath_driver.driver); | 2014 | ipath_driver_remove_group(&ipath_driver.driver); |
2022 | 2015 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index a9ddc6911f66..340f27e3ebff 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c | |||
@@ -1745,9 +1745,9 @@ static int ipath_assign_port(struct file *fp, | |||
1745 | goto done; | 1745 | goto done; |
1746 | } | 1746 | } |
1747 | 1747 | ||
1748 | i_minor = iminor(fp->f_dentry->d_inode) - IPATH_USER_MINOR_BASE; | 1748 | i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE; |
1749 | ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n", | 1749 | ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n", |
1750 | (long)fp->f_dentry->d_inode->i_rdev, i_minor); | 1750 | (long)fp->f_path.dentry->d_inode->i_rdev, i_minor); |
1751 | 1751 | ||
1752 | if (i_minor) | 1752 | if (i_minor) |
1753 | ret = find_free_port(i_minor - 1, fp, uinfo); | 1753 | ret = find_free_port(i_minor - 1, fp, uinfo); |
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index d9ff283f725e..79a60f020a21 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c | |||
@@ -118,7 +118,7 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf, | |||
118 | u16 i; | 118 | u16 i; |
119 | struct ipath_devdata *dd; | 119 | struct ipath_devdata *dd; |
120 | 120 | ||
121 | dd = file->f_dentry->d_inode->i_private; | 121 | dd = file->f_path.dentry->d_inode->i_private; |
122 | 122 | ||
123 | for (i = 0; i < NUM_COUNTERS; i++) | 123 | for (i = 0; i < NUM_COUNTERS; i++) |
124 | counters[i] = ipath_snap_cntr(dd, i); | 124 | counters[i] = ipath_snap_cntr(dd, i); |
@@ -138,7 +138,7 @@ static ssize_t atomic_node_info_read(struct file *file, char __user *buf, | |||
138 | struct ipath_devdata *dd; | 138 | struct ipath_devdata *dd; |
139 | u64 guid; | 139 | u64 guid; |
140 | 140 | ||
141 | dd = file->f_dentry->d_inode->i_private; | 141 | dd = file->f_path.dentry->d_inode->i_private; |
142 | 142 | ||
143 | guid = be64_to_cpu(dd->ipath_guid); | 143 | guid = be64_to_cpu(dd->ipath_guid); |
144 | 144 | ||
@@ -177,7 +177,7 @@ static ssize_t atomic_port_info_read(struct file *file, char __user *buf, | |||
177 | u32 tmp, tmp2; | 177 | u32 tmp, tmp2; |
178 | struct ipath_devdata *dd; | 178 | struct ipath_devdata *dd; |
179 | 179 | ||
180 | dd = file->f_dentry->d_inode->i_private; | 180 | dd = file->f_path.dentry->d_inode->i_private; |
181 | 181 | ||
182 | /* so we only initialize non-zero fields. */ | 182 | /* so we only initialize non-zero fields. */ |
183 | memset(portinfo, 0, sizeof portinfo); | 183 | memset(portinfo, 0, sizeof portinfo); |
@@ -324,7 +324,7 @@ static ssize_t flash_read(struct file *file, char __user *buf, | |||
324 | goto bail; | 324 | goto bail; |
325 | } | 325 | } |
326 | 326 | ||
327 | dd = file->f_dentry->d_inode->i_private; | 327 | dd = file->f_path.dentry->d_inode->i_private; |
328 | if (ipath_eeprom_read(dd, pos, tmp, count)) { | 328 | if (ipath_eeprom_read(dd, pos, tmp, count)) { |
329 | ipath_dev_err(dd, "failed to read from flash\n"); | 329 | ipath_dev_err(dd, "failed to read from flash\n"); |
330 | ret = -ENXIO; | 330 | ret = -ENXIO; |
@@ -377,7 +377,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf, | |||
377 | goto bail_tmp; | 377 | goto bail_tmp; |
378 | } | 378 | } |
379 | 379 | ||
380 | dd = file->f_dentry->d_inode->i_private; | 380 | dd = file->f_path.dentry->d_inode->i_private; |
381 | if (ipath_eeprom_write(dd, pos, tmp, count)) { | 381 | if (ipath_eeprom_write(dd, pos, tmp, count)) { |
382 | ret = -ENXIO; | 382 | ret = -ENXIO; |
383 | ipath_dev_err(dd, "failed to write to flash\n"); | 383 | ipath_dev_err(dd, "failed to write to flash\n"); |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c index 9e4e8d4c6e20..e57c7a351cb5 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6110.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c | |||
@@ -38,6 +38,7 @@ | |||
38 | 38 | ||
39 | #include <linux/pci.h> | 39 | #include <linux/pci.h> |
40 | #include <linux/delay.h> | 40 | #include <linux/delay.h> |
41 | #include <linux/htirq.h> | ||
41 | 42 | ||
42 | #include "ipath_kernel.h" | 43 | #include "ipath_kernel.h" |
43 | #include "ipath_registers.h" | 44 | #include "ipath_registers.h" |
@@ -913,49 +914,40 @@ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev, | |||
913 | } | 914 | } |
914 | } | 915 | } |
915 | 916 | ||
916 | static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev, | 917 | static int ipath_ht_intconfig(struct ipath_devdata *dd) |
917 | int pos) | ||
918 | { | 918 | { |
919 | u32 int_handler_addr_lower; | 919 | int ret; |
920 | u32 int_handler_addr_upper; | ||
921 | u64 ihandler; | ||
922 | u32 intvec; | ||
923 | 920 | ||
924 | /* use indirection register to get the intr handler */ | 921 | if (dd->ipath_intconfig) { |
925 | pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x10); | 922 | ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig, |
926 | pci_read_config_dword(pdev, pos + 4, &int_handler_addr_lower); | 923 | dd->ipath_intconfig); /* interrupt address */ |
927 | pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x11); | 924 | ret = 0; |
928 | pci_read_config_dword(pdev, pos + 4, &int_handler_addr_upper); | 925 | } else { |
926 | ipath_dev_err(dd, "No interrupts enabled, couldn't setup " | ||
927 | "interrupt address\n"); | ||
928 | ret = -EINVAL; | ||
929 | } | ||
929 | 930 | ||
930 | ihandler = (u64) int_handler_addr_lower | | 931 | return ret; |
931 | ((u64) int_handler_addr_upper << 32); | 932 | } |
933 | |||
934 | static void ipath_ht_irq_update(struct pci_dev *dev, int irq, | ||
935 | struct ht_irq_msg *msg) | ||
936 | { | ||
937 | struct ipath_devdata *dd = pci_get_drvdata(dev); | ||
938 | u64 prev_intconfig = dd->ipath_intconfig; | ||
939 | |||
940 | dd->ipath_intconfig = msg->address_lo; | ||
941 | dd->ipath_intconfig |= ((u64) msg->address_hi) << 32; | ||
932 | 942 | ||
933 | /* | 943 | /* |
934 | * kernels with CONFIG_PCI_MSI set the vector in the irq field of | 944 | * If the previous value of dd->ipath_intconfig is zero, we're |
935 | * struct pci_device, so we use that to program the internal | 945 | * getting configured for the first time, and must not program the |
936 | * interrupt register (not config space) with that value. The BIOS | 946 | * intconfig register here (it will be programmed later, when the |
937 | * must still have done the basic MSI setup. | 947 | * hardware is ready). Otherwise, we should. |
938 | */ | ||
939 | intvec = pdev->irq; | ||
940 | /* | ||
941 | * clear any vector bits there; normally not set but we'll overload | ||
942 | * this for some debug purposes (setting the HTC debug register | ||
943 | * value from software, rather than GPIOs), so it might be set on a | ||
944 | * driver reload. | ||
945 | */ | 948 | */ |
946 | ihandler &= ~0xff0000; | 949 | if (prev_intconfig) |
947 | /* x86 vector goes in intrinfo[23:16] */ | 950 | ipath_ht_intconfig(dd); |
948 | ihandler |= intvec << 16; | ||
949 | ipath_cdbg(VERBOSE, "ihandler lower %x, upper %x, intvec %x, " | ||
950 | "interruptconfig %llx\n", int_handler_addr_lower, | ||
951 | int_handler_addr_upper, intvec, | ||
952 | (unsigned long long) ihandler); | ||
953 | |||
954 | /* can't program yet, so save for interrupt setup */ | ||
955 | dd->ipath_intconfig = ihandler; | ||
956 | /* keep going, so we find link control stuff also */ | ||
957 | |||
958 | return ihandler != 0; | ||
959 | } | 951 | } |
960 | 952 | ||
961 | /** | 953 | /** |
@@ -971,12 +963,19 @@ static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev, | |||
971 | static int ipath_setup_ht_config(struct ipath_devdata *dd, | 963 | static int ipath_setup_ht_config(struct ipath_devdata *dd, |
972 | struct pci_dev *pdev) | 964 | struct pci_dev *pdev) |
973 | { | 965 | { |
974 | int pos, ret = 0; | 966 | int pos, ret; |
975 | int ihandler = 0; | 967 | |
968 | ret = __ht_create_irq(pdev, 0, ipath_ht_irq_update); | ||
969 | if (ret < 0) { | ||
970 | ipath_dev_err(dd, "Couldn't create interrupt handler: " | ||
971 | "err %d\n", ret); | ||
972 | goto bail; | ||
973 | } | ||
974 | dd->ipath_irq = ret; | ||
975 | ret = 0; | ||
976 | 976 | ||
977 | /* | 977 | /* |
978 | * Read the capability info to find the interrupt info, and also | 978 | * Handle clearing CRC errors in linkctrl register if necessary. We |
979 | * handle clearing CRC errors in linkctrl register if necessary. We | ||
980 | * do this early, before we ever enable errors or hardware errors, | 979 | * do this early, before we ever enable errors or hardware errors, |
981 | * mostly to avoid causing the chip to enter freeze mode. | 980 | * mostly to avoid causing the chip to enter freeze mode. |
982 | */ | 981 | */ |
@@ -1000,17 +999,9 @@ static int ipath_setup_ht_config(struct ipath_devdata *dd, | |||
1000 | } | 999 | } |
1001 | if (!(cap_type & 0xE0)) | 1000 | if (!(cap_type & 0xE0)) |
1002 | slave_or_pri_blk(dd, pdev, pos, cap_type); | 1001 | slave_or_pri_blk(dd, pdev, pos, cap_type); |
1003 | else if (cap_type == HT_INTR_DISC_CONFIG) | ||
1004 | ihandler = set_int_handler(dd, pdev, pos); | ||
1005 | } while ((pos = pci_find_next_capability(pdev, pos, | 1002 | } while ((pos = pci_find_next_capability(pdev, pos, |
1006 | PCI_CAP_ID_HT))); | 1003 | PCI_CAP_ID_HT))); |
1007 | 1004 | ||
1008 | if (!ihandler) { | ||
1009 | ipath_dev_err(dd, "Couldn't find interrupt handler in " | ||
1010 | "config space\n"); | ||
1011 | ret = -ENODEV; | ||
1012 | } | ||
1013 | |||
1014 | bail: | 1005 | bail: |
1015 | return ret; | 1006 | return ret; |
1016 | } | 1007 | } |
@@ -1360,25 +1351,6 @@ static void ipath_ht_quiet_serdes(struct ipath_devdata *dd) | |||
1360 | ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); | 1351 | ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); |
1361 | } | 1352 | } |
1362 | 1353 | ||
1363 | static int ipath_ht_intconfig(struct ipath_devdata *dd) | ||
1364 | { | ||
1365 | int ret; | ||
1366 | |||
1367 | if (!dd->ipath_intconfig) { | ||
1368 | ipath_dev_err(dd, "No interrupts enabled, couldn't setup " | ||
1369 | "interrupt address\n"); | ||
1370 | ret = 1; | ||
1371 | goto bail; | ||
1372 | } | ||
1373 | |||
1374 | ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig, | ||
1375 | dd->ipath_intconfig); /* interrupt address */ | ||
1376 | ret = 0; | ||
1377 | |||
1378 | bail: | ||
1379 | return ret; | ||
1380 | } | ||
1381 | |||
1382 | /** | 1354 | /** |
1383 | * ipath_pe_put_tid - write a TID in chip | 1355 | * ipath_pe_put_tid - write a TID in chip |
1384 | * @dd: the infinipath device | 1356 | * @dd: the infinipath device |
@@ -1575,6 +1547,14 @@ static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase) | |||
1575 | return 0; | 1547 | return 0; |
1576 | } | 1548 | } |
1577 | 1549 | ||
1550 | static void ipath_ht_free_irq(struct ipath_devdata *dd) | ||
1551 | { | ||
1552 | free_irq(dd->ipath_irq, dd); | ||
1553 | ht_destroy_irq(dd->ipath_irq); | ||
1554 | dd->ipath_irq = 0; | ||
1555 | dd->ipath_intconfig = 0; | ||
1556 | } | ||
1557 | |||
1578 | /** | 1558 | /** |
1579 | * ipath_init_iba6110_funcs - set up the chip-specific function pointers | 1559 | * ipath_init_iba6110_funcs - set up the chip-specific function pointers |
1580 | * @dd: the infinipath device | 1560 | * @dd: the infinipath device |
@@ -1598,6 +1578,7 @@ void ipath_init_iba6110_funcs(struct ipath_devdata *dd) | |||
1598 | dd->ipath_f_cleanup = ipath_setup_ht_cleanup; | 1578 | dd->ipath_f_cleanup = ipath_setup_ht_cleanup; |
1599 | dd->ipath_f_setextled = ipath_setup_ht_setextled; | 1579 | dd->ipath_f_setextled = ipath_setup_ht_setextled; |
1600 | dd->ipath_f_get_base_info = ipath_ht_get_base_info; | 1580 | dd->ipath_f_get_base_info = ipath_ht_get_base_info; |
1581 | dd->ipath_f_free_irq = ipath_ht_free_irq; | ||
1601 | 1582 | ||
1602 | /* | 1583 | /* |
1603 | * initialize chip-specific variables | 1584 | * initialize chip-specific variables |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c index a72ab9de386a..6af89683f710 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6120.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c | |||
@@ -851,6 +851,7 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd, | |||
851 | int pos, ret; | 851 | int pos, ret; |
852 | 852 | ||
853 | dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ | 853 | dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ |
854 | dd->ipath_irq = pdev->irq; | ||
854 | ret = pci_enable_msi(dd->pcidev); | 855 | ret = pci_enable_msi(dd->pcidev); |
855 | if (ret) | 856 | if (ret) |
856 | ipath_dev_err(dd, "pci_enable_msi failed: %d, " | 857 | ipath_dev_err(dd, "pci_enable_msi failed: %d, " |
@@ -1323,6 +1324,12 @@ done: | |||
1323 | return 0; | 1324 | return 0; |
1324 | } | 1325 | } |
1325 | 1326 | ||
1327 | static void ipath_pe_free_irq(struct ipath_devdata *dd) | ||
1328 | { | ||
1329 | free_irq(dd->ipath_irq, dd); | ||
1330 | dd->ipath_irq = 0; | ||
1331 | } | ||
1332 | |||
1326 | /** | 1333 | /** |
1327 | * ipath_init_iba6120_funcs - set up the chip-specific function pointers | 1334 | * ipath_init_iba6120_funcs - set up the chip-specific function pointers |
1328 | * @dd: the infinipath device | 1335 | * @dd: the infinipath device |
@@ -1349,6 +1356,7 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd) | |||
1349 | dd->ipath_f_cleanup = ipath_setup_pe_cleanup; | 1356 | dd->ipath_f_cleanup = ipath_setup_pe_cleanup; |
1350 | dd->ipath_f_setextled = ipath_setup_pe_setextled; | 1357 | dd->ipath_f_setextled = ipath_setup_pe_setextled; |
1351 | dd->ipath_f_get_base_info = ipath_pe_get_base_info; | 1358 | dd->ipath_f_get_base_info = ipath_pe_get_base_info; |
1359 | dd->ipath_f_free_irq = ipath_pe_free_irq; | ||
1352 | 1360 | ||
1353 | /* initialize chip-specific variables */ | 1361 | /* initialize chip-specific variables */ |
1354 | dd->ipath_f_tidtemplate = ipath_pe_tidtemplate; | 1362 | dd->ipath_f_tidtemplate = ipath_pe_tidtemplate; |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index d9079ee12030..5652a550d442 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -710,14 +710,14 @@ static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp) | |||
710 | * linuxbios development work, and it may happen in | 710 | * linuxbios development work, and it may happen in |
711 | * the future again. | 711 | * the future again. |
712 | */ | 712 | */ |
713 | if (dd->pcidev && dd->pcidev->irq) { | 713 | if (dd->pcidev && dd->ipath_irq) { |
714 | ipath_dev_err(dd, "Now %u unexpected " | 714 | ipath_dev_err(dd, "Now %u unexpected " |
715 | "interrupts, unregistering " | 715 | "interrupts, unregistering " |
716 | "interrupt handler\n", | 716 | "interrupt handler\n", |
717 | *unexpectp); | 717 | *unexpectp); |
718 | ipath_dbg("free_irq of irq %x\n", | 718 | ipath_dbg("free_irq of irq %d\n", |
719 | dd->pcidev->irq); | 719 | dd->ipath_irq); |
720 | free_irq(dd->pcidev->irq, dd); | 720 | dd->ipath_f_free_irq(dd); |
721 | } | 721 | } |
722 | } | 722 | } |
723 | if (ipath_read_kreg32(dd, dd->ipath_kregs->kr_intmask)) { | 723 | if (ipath_read_kreg32(dd, dd->ipath_kregs->kr_intmask)) { |
@@ -753,7 +753,7 @@ static void ipath_bad_regread(struct ipath_devdata *dd) | |||
753 | if (allbits == 2) { | 753 | if (allbits == 2) { |
754 | ipath_dev_err(dd, "Still bad interrupt status, " | 754 | ipath_dev_err(dd, "Still bad interrupt status, " |
755 | "unregistering interrupt\n"); | 755 | "unregistering interrupt\n"); |
756 | free_irq(dd->pcidev->irq, dd); | 756 | dd->ipath_f_free_irq(dd); |
757 | } else if (allbits > 2) { | 757 | } else if (allbits > 2) { |
758 | if ((allbits % 10000) == 0) | 758 | if ((allbits % 10000) == 0) |
759 | printk("."); | 759 | printk("."); |
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index 7c436697d0e4..986b2125b8f5 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -213,6 +213,8 @@ struct ipath_devdata { | |||
213 | void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64); | 213 | void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64); |
214 | /* fill out chip-specific fields */ | 214 | /* fill out chip-specific fields */ |
215 | int (*ipath_f_get_base_info)(struct ipath_portdata *, void *); | 215 | int (*ipath_f_get_base_info)(struct ipath_portdata *, void *); |
216 | /* free irq */ | ||
217 | void (*ipath_f_free_irq)(struct ipath_devdata *); | ||
216 | struct ipath_ibdev *verbs_dev; | 218 | struct ipath_ibdev *verbs_dev; |
217 | struct timer_list verbs_timer; | 219 | struct timer_list verbs_timer; |
218 | /* total dwords sent (summed from counter) */ | 220 | /* total dwords sent (summed from counter) */ |
@@ -328,6 +330,8 @@ struct ipath_devdata { | |||
328 | /* so we can rewrite it after a chip reset */ | 330 | /* so we can rewrite it after a chip reset */ |
329 | u32 ipath_pcibar1; | 331 | u32 ipath_pcibar1; |
330 | 332 | ||
333 | /* interrupt number */ | ||
334 | int ipath_irq; | ||
331 | /* HT/PCI Vendor ID (here for NodeInfo) */ | 335 | /* HT/PCI Vendor ID (here for NodeInfo) */ |
332 | u16 ipath_vendorid; | 336 | u16 ipath_vendorid; |
333 | /* HT/PCI Device ID (here for NodeInfo) */ | 337 | /* HT/PCI Device ID (here for NodeInfo) */ |
@@ -869,9 +873,6 @@ int ipath_device_create_group(struct device *, struct ipath_devdata *); | |||
869 | void ipath_device_remove_group(struct device *, struct ipath_devdata *); | 873 | void ipath_device_remove_group(struct device *, struct ipath_devdata *); |
870 | int ipath_expose_reset(struct device *); | 874 | int ipath_expose_reset(struct device *); |
871 | 875 | ||
872 | int ipath_diagpkt_add(void); | ||
873 | void ipath_diagpkt_remove(void); | ||
874 | |||
875 | int ipath_init_ipathfs(void); | 876 | int ipath_init_ipathfs(void); |
876 | void ipath_exit_ipathfs(void); | 877 | void ipath_exit_ipathfs(void); |
877 | int ipathfs_add_device(struct ipath_devdata *); | 878 | int ipathfs_add_device(struct ipath_devdata *); |
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index 413754b1d8a2..8536aeb96af8 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c | |||
@@ -214,9 +214,10 @@ struct ipath_user_pages_work { | |||
214 | unsigned long num_pages; | 214 | unsigned long num_pages; |
215 | }; | 215 | }; |
216 | 216 | ||
217 | static void user_pages_account(void *ptr) | 217 | static void user_pages_account(struct work_struct *_work) |
218 | { | 218 | { |
219 | struct ipath_user_pages_work *work = ptr; | 219 | struct ipath_user_pages_work *work = |
220 | container_of(_work, struct ipath_user_pages_work, work); | ||
220 | 221 | ||
221 | down_write(&work->mm->mmap_sem); | 222 | down_write(&work->mm->mmap_sem); |
222 | work->mm->locked_vm -= work->num_pages; | 223 | work->mm->locked_vm -= work->num_pages; |
@@ -242,7 +243,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages) | |||
242 | 243 | ||
243 | goto bail; | 244 | goto bail; |
244 | 245 | ||
245 | INIT_WORK(&work->work, user_pages_account, work); | 246 | INIT_WORK(&work->work, user_pages_account); |
246 | work->mm = mm; | 247 | work->mm = mm; |
247 | work->num_pages = num_pages; | 248 | work->num_pages = num_pages; |
248 | 249 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index a5456108dbad..acdee33ee1f8 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -1487,7 +1487,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd) | |||
1487 | idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; | 1487 | idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; |
1488 | idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; | 1488 | idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; |
1489 | idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; | 1489 | idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; |
1490 | idev->pma_counter_select[5] = IB_PMA_PORT_XMIT_WAIT; | 1490 | idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; |
1491 | idev->link_width_enabled = 3; /* 1x or 4x */ | 1491 | idev->link_width_enabled = 3; /* 1x or 4x */ |
1492 | 1492 | ||
1493 | /* Snapshot current HW counters to "clear" them. */ | 1493 | /* Snapshot current HW counters to "clear" them. */ |
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index 69599455aca2..27caf3b0648a 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c | |||
@@ -33,7 +33,6 @@ | |||
33 | * $Id: mthca_av.c 1349 2004-12-16 21:09:43Z roland $ | 33 | * $Id: mthca_av.c 1349 2004-12-16 21:09:43Z roland $ |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/init.h> | ||
37 | #include <linux/string.h> | 36 | #include <linux/string.h> |
38 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
39 | 38 | ||
@@ -190,7 +189,7 @@ int mthca_create_ah(struct mthca_dev *dev, | |||
190 | on_hca_fail: | 189 | on_hca_fail: |
191 | if (ah->type == MTHCA_AH_PCI_POOL) { | 190 | if (ah->type == MTHCA_AH_PCI_POOL) { |
192 | ah->av = pci_pool_alloc(dev->av_table.pool, | 191 | ah->av = pci_pool_alloc(dev->av_table.pool, |
193 | SLAB_ATOMIC, &ah->avdma); | 192 | GFP_ATOMIC, &ah->avdma); |
194 | if (!ah->av) | 193 | if (!ah->av) |
195 | return -ENOMEM; | 194 | return -ENOMEM; |
196 | 195 | ||
@@ -323,7 +322,7 @@ int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr) | |||
323 | return 0; | 322 | return 0; |
324 | } | 323 | } |
325 | 324 | ||
326 | int __devinit mthca_init_av_table(struct mthca_dev *dev) | 325 | int mthca_init_av_table(struct mthca_dev *dev) |
327 | { | 326 | { |
328 | int err; | 327 | int err; |
329 | 328 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c index cd044ea2dfa4..e948158a28d9 100644 --- a/drivers/infiniband/hw/mthca/mthca_catas.c +++ b/drivers/infiniband/hw/mthca/mthca_catas.c | |||
@@ -57,7 +57,7 @@ static int catas_reset_disable; | |||
57 | module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); | 57 | module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); |
58 | MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); | 58 | MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); |
59 | 59 | ||
60 | static void catas_reset(void *work_ptr) | 60 | static void catas_reset(struct work_struct *work) |
61 | { | 61 | { |
62 | struct mthca_dev *dev, *tmpdev; | 62 | struct mthca_dev *dev, *tmpdev; |
63 | LIST_HEAD(tlist); | 63 | LIST_HEAD(tlist); |
@@ -203,7 +203,7 @@ void mthca_stop_catas_poll(struct mthca_dev *dev) | |||
203 | 203 | ||
204 | int __init mthca_catas_init(void) | 204 | int __init mthca_catas_init(void) |
205 | { | 205 | { |
206 | INIT_WORK(&catas_work, catas_reset, NULL); | 206 | INIT_WORK(&catas_work, catas_reset); |
207 | 207 | ||
208 | catas_wq = create_singlethread_workqueue("mthca_catas"); | 208 | catas_wq = create_singlethread_workqueue("mthca_catas"); |
209 | if (!catas_wq) | 209 | if (!catas_wq) |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 99a94d710935..768df7265b81 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -1820,11 +1820,11 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, | |||
1820 | 1820 | ||
1821 | #define MAD_IFC_BOX_SIZE 0x400 | 1821 | #define MAD_IFC_BOX_SIZE 0x400 |
1822 | #define MAD_IFC_MY_QPN_OFFSET 0x100 | 1822 | #define MAD_IFC_MY_QPN_OFFSET 0x100 |
1823 | #define MAD_IFC_RQPN_OFFSET 0x104 | 1823 | #define MAD_IFC_RQPN_OFFSET 0x108 |
1824 | #define MAD_IFC_SL_OFFSET 0x108 | 1824 | #define MAD_IFC_SL_OFFSET 0x10c |
1825 | #define MAD_IFC_G_PATH_OFFSET 0x109 | 1825 | #define MAD_IFC_G_PATH_OFFSET 0x10d |
1826 | #define MAD_IFC_RLID_OFFSET 0x10a | 1826 | #define MAD_IFC_RLID_OFFSET 0x10e |
1827 | #define MAD_IFC_PKEY_OFFSET 0x10e | 1827 | #define MAD_IFC_PKEY_OFFSET 0x112 |
1828 | #define MAD_IFC_GRH_OFFSET 0x140 | 1828 | #define MAD_IFC_GRH_OFFSET 0x140 |
1829 | 1829 | ||
1830 | inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); | 1830 | inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
@@ -1862,7 +1862,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, | |||
1862 | 1862 | ||
1863 | val = in_wc->dlid_path_bits | | 1863 | val = in_wc->dlid_path_bits | |
1864 | (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); | 1864 | (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); |
1865 | MTHCA_PUT(inbox, val, MAD_IFC_GRH_OFFSET); | 1865 | MTHCA_PUT(inbox, val, MAD_IFC_G_PATH_OFFSET); |
1866 | 1866 | ||
1867 | MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET); | 1867 | MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET); |
1868 | MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); | 1868 | MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); |
@@ -1870,7 +1870,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, | |||
1870 | if (in_grh) | 1870 | if (in_grh) |
1871 | memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40); | 1871 | memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40); |
1872 | 1872 | ||
1873 | op_modifier |= 0x10; | 1873 | op_modifier |= 0x4; |
1874 | 1874 | ||
1875 | in_modifier |= in_wc->slid << 16; | 1875 | in_modifier |= in_wc->slid << 16; |
1876 | } | 1876 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index e393681ba7d4..283d50b76c3d 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -36,9 +36,10 @@ | |||
36 | * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $ | 36 | * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $ |
37 | */ | 37 | */ |
38 | 38 | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/hardirq.h> | 39 | #include <linux/hardirq.h> |
41 | 40 | ||
41 | #include <asm/io.h> | ||
42 | |||
42 | #include <rdma/ib_pack.h> | 43 | #include <rdma/ib_pack.h> |
43 | 44 | ||
44 | #include "mthca_dev.h" | 45 | #include "mthca_dev.h" |
@@ -210,6 +211,11 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, | |||
210 | mthca_write64(doorbell, | 211 | mthca_write64(doorbell, |
211 | dev->kar + MTHCA_CQ_DOORBELL, | 212 | dev->kar + MTHCA_CQ_DOORBELL, |
212 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | 213 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); |
214 | /* | ||
215 | * Make sure doorbells don't leak out of CQ spinlock | ||
216 | * and reach the HCA out of order: | ||
217 | */ | ||
218 | mmiowb(); | ||
213 | } | 219 | } |
214 | } | 220 | } |
215 | 221 | ||
@@ -963,7 +969,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
963 | mthca_free_mailbox(dev, mailbox); | 969 | mthca_free_mailbox(dev, mailbox); |
964 | } | 970 | } |
965 | 971 | ||
966 | int __devinit mthca_init_cq_table(struct mthca_dev *dev) | 972 | int mthca_init_cq_table(struct mthca_dev *dev) |
967 | { | 973 | { |
968 | int err; | 974 | int err; |
969 | 975 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index e284e0613a94..8ec9fa1ff9ea 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -33,7 +33,6 @@ | |||
33 | * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $ | 33 | * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $ |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/init.h> | ||
37 | #include <linux/errno.h> | 36 | #include <linux/errno.h> |
38 | #include <linux/interrupt.h> | 37 | #include <linux/interrupt.h> |
39 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
@@ -479,10 +478,10 @@ static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr) | |||
479 | return IRQ_HANDLED; | 478 | return IRQ_HANDLED; |
480 | } | 479 | } |
481 | 480 | ||
482 | static int __devinit mthca_create_eq(struct mthca_dev *dev, | 481 | static int mthca_create_eq(struct mthca_dev *dev, |
483 | int nent, | 482 | int nent, |
484 | u8 intr, | 483 | u8 intr, |
485 | struct mthca_eq *eq) | 484 | struct mthca_eq *eq) |
486 | { | 485 | { |
487 | int npages; | 486 | int npages; |
488 | u64 *dma_list = NULL; | 487 | u64 *dma_list = NULL; |
@@ -664,9 +663,9 @@ static void mthca_free_irqs(struct mthca_dev *dev) | |||
664 | dev->eq_table.eq + i); | 663 | dev->eq_table.eq + i); |
665 | } | 664 | } |
666 | 665 | ||
667 | static int __devinit mthca_map_reg(struct mthca_dev *dev, | 666 | static int mthca_map_reg(struct mthca_dev *dev, |
668 | unsigned long offset, unsigned long size, | 667 | unsigned long offset, unsigned long size, |
669 | void __iomem **map) | 668 | void __iomem **map) |
670 | { | 669 | { |
671 | unsigned long base = pci_resource_start(dev->pdev, 0); | 670 | unsigned long base = pci_resource_start(dev->pdev, 0); |
672 | 671 | ||
@@ -691,7 +690,7 @@ static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset, | |||
691 | iounmap(map); | 690 | iounmap(map); |
692 | } | 691 | } |
693 | 692 | ||
694 | static int __devinit mthca_map_eq_regs(struct mthca_dev *dev) | 693 | static int mthca_map_eq_regs(struct mthca_dev *dev) |
695 | { | 694 | { |
696 | if (mthca_is_memfree(dev)) { | 695 | if (mthca_is_memfree(dev)) { |
697 | /* | 696 | /* |
@@ -781,7 +780,7 @@ static void mthca_unmap_eq_regs(struct mthca_dev *dev) | |||
781 | } | 780 | } |
782 | } | 781 | } |
783 | 782 | ||
784 | int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) | 783 | int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) |
785 | { | 784 | { |
786 | int ret; | 785 | int ret; |
787 | u8 status; | 786 | u8 status; |
@@ -825,7 +824,7 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev) | |||
825 | __free_page(dev->eq_table.icm_page); | 824 | __free_page(dev->eq_table.icm_page); |
826 | } | 825 | } |
827 | 826 | ||
828 | int __devinit mthca_init_eq_table(struct mthca_dev *dev) | 827 | int mthca_init_eq_table(struct mthca_dev *dev) |
829 | { | 828 | { |
830 | int err; | 829 | int err; |
831 | u8 status; | 830 | u8 status; |
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 45e106f14807..acfa41d968ee 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c | |||
@@ -317,7 +317,7 @@ err: | |||
317 | return ret; | 317 | return ret; |
318 | } | 318 | } |
319 | 319 | ||
320 | void __devexit mthca_free_agents(struct mthca_dev *dev) | 320 | void mthca_free_agents(struct mthca_dev *dev) |
321 | { | 321 | { |
322 | struct ib_mad_agent *agent; | 322 | struct ib_mad_agent *agent; |
323 | int p, q; | 323 | int p, q; |
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 47ea02148368..0491ec7a7c0a 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -98,7 +98,7 @@ static struct mthca_profile default_profile = { | |||
98 | .uarc_size = 1 << 18, /* Arbel only */ | 98 | .uarc_size = 1 << 18, /* Arbel only */ |
99 | }; | 99 | }; |
100 | 100 | ||
101 | static int __devinit mthca_tune_pci(struct mthca_dev *mdev) | 101 | static int mthca_tune_pci(struct mthca_dev *mdev) |
102 | { | 102 | { |
103 | int cap; | 103 | int cap; |
104 | u16 val; | 104 | u16 val; |
@@ -143,7 +143,7 @@ static int __devinit mthca_tune_pci(struct mthca_dev *mdev) | |||
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | 145 | ||
146 | static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) | 146 | static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) |
147 | { | 147 | { |
148 | int err; | 148 | int err; |
149 | u8 status; | 149 | u8 status; |
@@ -255,7 +255,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim | |||
255 | return 0; | 255 | return 0; |
256 | } | 256 | } |
257 | 257 | ||
258 | static int __devinit mthca_init_tavor(struct mthca_dev *mdev) | 258 | static int mthca_init_tavor(struct mthca_dev *mdev) |
259 | { | 259 | { |
260 | u8 status; | 260 | u8 status; |
261 | int err; | 261 | int err; |
@@ -333,7 +333,7 @@ err_disable: | |||
333 | return err; | 333 | return err; |
334 | } | 334 | } |
335 | 335 | ||
336 | static int __devinit mthca_load_fw(struct mthca_dev *mdev) | 336 | static int mthca_load_fw(struct mthca_dev *mdev) |
337 | { | 337 | { |
338 | u8 status; | 338 | u8 status; |
339 | int err; | 339 | int err; |
@@ -379,10 +379,10 @@ err_free: | |||
379 | return err; | 379 | return err; |
380 | } | 380 | } |
381 | 381 | ||
382 | static int __devinit mthca_init_icm(struct mthca_dev *mdev, | 382 | static int mthca_init_icm(struct mthca_dev *mdev, |
383 | struct mthca_dev_lim *dev_lim, | 383 | struct mthca_dev_lim *dev_lim, |
384 | struct mthca_init_hca_param *init_hca, | 384 | struct mthca_init_hca_param *init_hca, |
385 | u64 icm_size) | 385 | u64 icm_size) |
386 | { | 386 | { |
387 | u64 aux_pages; | 387 | u64 aux_pages; |
388 | u8 status; | 388 | u8 status; |
@@ -575,7 +575,7 @@ static void mthca_free_icms(struct mthca_dev *mdev) | |||
575 | mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); | 575 | mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); |
576 | } | 576 | } |
577 | 577 | ||
578 | static int __devinit mthca_init_arbel(struct mthca_dev *mdev) | 578 | static int mthca_init_arbel(struct mthca_dev *mdev) |
579 | { | 579 | { |
580 | struct mthca_dev_lim dev_lim; | 580 | struct mthca_dev_lim dev_lim; |
581 | struct mthca_profile profile; | 581 | struct mthca_profile profile; |
@@ -683,7 +683,7 @@ static void mthca_close_hca(struct mthca_dev *mdev) | |||
683 | mthca_SYS_DIS(mdev, &status); | 683 | mthca_SYS_DIS(mdev, &status); |
684 | } | 684 | } |
685 | 685 | ||
686 | static int __devinit mthca_init_hca(struct mthca_dev *mdev) | 686 | static int mthca_init_hca(struct mthca_dev *mdev) |
687 | { | 687 | { |
688 | u8 status; | 688 | u8 status; |
689 | int err; | 689 | int err; |
@@ -720,7 +720,7 @@ err_close: | |||
720 | return err; | 720 | return err; |
721 | } | 721 | } |
722 | 722 | ||
723 | static int __devinit mthca_setup_hca(struct mthca_dev *dev) | 723 | static int mthca_setup_hca(struct mthca_dev *dev) |
724 | { | 724 | { |
725 | int err; | 725 | int err; |
726 | u8 status; | 726 | u8 status; |
@@ -875,8 +875,7 @@ err_uar_table_free: | |||
875 | return err; | 875 | return err; |
876 | } | 876 | } |
877 | 877 | ||
878 | static int __devinit mthca_request_regions(struct pci_dev *pdev, | 878 | static int mthca_request_regions(struct pci_dev *pdev, int ddr_hidden) |
879 | int ddr_hidden) | ||
880 | { | 879 | { |
881 | int err; | 880 | int err; |
882 | 881 | ||
@@ -928,7 +927,7 @@ static void mthca_release_regions(struct pci_dev *pdev, | |||
928 | MTHCA_HCR_SIZE); | 927 | MTHCA_HCR_SIZE); |
929 | } | 928 | } |
930 | 929 | ||
931 | static int __devinit mthca_enable_msi_x(struct mthca_dev *mdev) | 930 | static int mthca_enable_msi_x(struct mthca_dev *mdev) |
932 | { | 931 | { |
933 | struct msix_entry entries[3]; | 932 | struct msix_entry entries[3]; |
934 | int err; | 933 | int err; |
@@ -1213,7 +1212,7 @@ int __mthca_restart_one(struct pci_dev *pdev) | |||
1213 | } | 1212 | } |
1214 | 1213 | ||
1215 | static int __devinit mthca_init_one(struct pci_dev *pdev, | 1214 | static int __devinit mthca_init_one(struct pci_dev *pdev, |
1216 | const struct pci_device_id *id) | 1215 | const struct pci_device_id *id) |
1217 | { | 1216 | { |
1218 | static int mthca_version_printed = 0; | 1217 | static int mthca_version_printed = 0; |
1219 | int ret; | 1218 | int ret; |
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c index 47ca8a9b7247..a8ad072be074 100644 --- a/drivers/infiniband/hw/mthca/mthca_mcg.c +++ b/drivers/infiniband/hw/mthca/mthca_mcg.c | |||
@@ -32,7 +32,6 @@ | |||
32 | * $Id: mthca_mcg.c 1349 2004-12-16 21:09:43Z roland $ | 32 | * $Id: mthca_mcg.c 1349 2004-12-16 21:09:43Z roland $ |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/string.h> | 35 | #include <linux/string.h> |
37 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
38 | 37 | ||
@@ -371,7 +370,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
371 | return err; | 370 | return err; |
372 | } | 371 | } |
373 | 372 | ||
374 | int __devinit mthca_init_mcg_table(struct mthca_dev *dev) | 373 | int mthca_init_mcg_table(struct mthca_dev *dev) |
375 | { | 374 | { |
376 | int err; | 375 | int err; |
377 | int table_size = dev->limits.num_mgms + dev->limits.num_amgms; | 376 | int table_size = dev->limits.num_mgms + dev->limits.num_amgms; |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index a486dec1707e..f71ffa88db3a 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -34,7 +34,6 @@ | |||
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | #include <linux/init.h> | ||
38 | #include <linux/errno.h> | 37 | #include <linux/errno.h> |
39 | 38 | ||
40 | #include "mthca_dev.h" | 39 | #include "mthca_dev.h" |
@@ -135,7 +134,7 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) | |||
135 | spin_unlock(&buddy->lock); | 134 | spin_unlock(&buddy->lock); |
136 | } | 135 | } |
137 | 136 | ||
138 | static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order) | 137 | static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) |
139 | { | 138 | { |
140 | int i, s; | 139 | int i, s; |
141 | 140 | ||
@@ -759,7 +758,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) | |||
759 | *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; | 758 | *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; |
760 | } | 759 | } |
761 | 760 | ||
762 | int __devinit mthca_init_mr_table(struct mthca_dev *dev) | 761 | int mthca_init_mr_table(struct mthca_dev *dev) |
763 | { | 762 | { |
764 | unsigned long addr; | 763 | unsigned long addr; |
765 | int err, i; | 764 | int err, i; |
diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c index 59df51614c85..c1e950764bd8 100644 --- a/drivers/infiniband/hw/mthca/mthca_pd.c +++ b/drivers/infiniband/hw/mthca/mthca_pd.c | |||
@@ -34,7 +34,6 @@ | |||
34 | * $Id: mthca_pd.c 1349 2004-12-16 21:09:43Z roland $ | 34 | * $Id: mthca_pd.c 1349 2004-12-16 21:09:43Z roland $ |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/errno.h> | 37 | #include <linux/errno.h> |
39 | 38 | ||
40 | #include "mthca_dev.h" | 39 | #include "mthca_dev.h" |
@@ -69,7 +68,7 @@ void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd) | |||
69 | mthca_free(&dev->pd_table.alloc, pd->pd_num); | 68 | mthca_free(&dev->pd_table.alloc, pd->pd_num); |
70 | } | 69 | } |
71 | 70 | ||
72 | int __devinit mthca_init_pd_table(struct mthca_dev *dev) | 71 | int mthca_init_pd_table(struct mthca_dev *dev) |
73 | { | 72 | { |
74 | return mthca_alloc_init(&dev->pd_table.alloc, | 73 | return mthca_alloc_init(&dev->pd_table.alloc, |
75 | dev->limits.num_pds, | 74 | dev->limits.num_pds, |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index fc67f780581b..7ec7c4b937f9 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -124,7 +124,7 @@ static int mthca_query_device(struct ib_device *ibdev, | |||
124 | props->max_map_per_fmr = 255; | 124 | props->max_map_per_fmr = 255; |
125 | else | 125 | else |
126 | props->max_map_per_fmr = | 126 | props->max_map_per_fmr = |
127 | (1 << (32 - long_log2(mdev->limits.num_mpts))) - 1; | 127 | (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1; |
128 | 128 | ||
129 | err = 0; | 129 | err = 0; |
130 | out: | 130 | out: |
@@ -816,7 +816,7 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda | |||
816 | lkey = ucmd.lkey; | 816 | lkey = ucmd.lkey; |
817 | } | 817 | } |
818 | 818 | ||
819 | ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, long_log2(entries), &status); | 819 | ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status); |
820 | if (status) | 820 | if (status) |
821 | ret = -EINVAL; | 821 | ret = -EINVAL; |
822 | 822 | ||
@@ -1100,11 +1100,10 @@ static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | |||
1100 | struct mthca_fmr *fmr; | 1100 | struct mthca_fmr *fmr; |
1101 | int err; | 1101 | int err; |
1102 | 1102 | ||
1103 | fmr = kmalloc(sizeof *fmr, GFP_KERNEL); | 1103 | fmr = kmemdup(fmr_attr, sizeof *fmr, GFP_KERNEL); |
1104 | if (!fmr) | 1104 | if (!fmr) |
1105 | return ERR_PTR(-ENOMEM); | 1105 | return ERR_PTR(-ENOMEM); |
1106 | 1106 | ||
1107 | memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr); | ||
1108 | err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num, | 1107 | err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num, |
1109 | convert_access(mr_access_flags), fmr); | 1108 | convert_access(mr_access_flags), fmr); |
1110 | 1109 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 5e5c58b9920b..d844a2569b47 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -35,10 +35,11 @@ | |||
35 | * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $ | 35 | * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $ |
36 | */ | 36 | */ |
37 | 37 | ||
38 | #include <linux/init.h> | ||
39 | #include <linux/string.h> | 38 | #include <linux/string.h> |
40 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
41 | 40 | ||
41 | #include <asm/io.h> | ||
42 | |||
42 | #include <rdma/ib_verbs.h> | 43 | #include <rdma/ib_verbs.h> |
43 | #include <rdma/ib_cache.h> | 44 | #include <rdma/ib_cache.h> |
44 | #include <rdma/ib_pack.h> | 45 | #include <rdma/ib_pack.h> |
@@ -635,11 +636,11 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, | |||
635 | 636 | ||
636 | if (mthca_is_memfree(dev)) { | 637 | if (mthca_is_memfree(dev)) { |
637 | if (qp->rq.max) | 638 | if (qp->rq.max) |
638 | qp_context->rq_size_stride = long_log2(qp->rq.max) << 3; | 639 | qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; |
639 | qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; | 640 | qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; |
640 | 641 | ||
641 | if (qp->sq.max) | 642 | if (qp->sq.max) |
642 | qp_context->sq_size_stride = long_log2(qp->sq.max) << 3; | 643 | qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; |
643 | qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; | 644 | qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; |
644 | } | 645 | } |
645 | 646 | ||
@@ -1732,6 +1733,11 @@ out: | |||
1732 | mthca_write64(doorbell, | 1733 | mthca_write64(doorbell, |
1733 | dev->kar + MTHCA_SEND_DOORBELL, | 1734 | dev->kar + MTHCA_SEND_DOORBELL, |
1734 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | 1735 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); |
1736 | /* | ||
1737 | * Make sure doorbells don't leak out of SQ spinlock | ||
1738 | * and reach the HCA out of order: | ||
1739 | */ | ||
1740 | mmiowb(); | ||
1735 | } | 1741 | } |
1736 | 1742 | ||
1737 | qp->sq.next_ind = ind; | 1743 | qp->sq.next_ind = ind; |
@@ -1851,6 +1857,12 @@ out: | |||
1851 | qp->rq.next_ind = ind; | 1857 | qp->rq.next_ind = ind; |
1852 | qp->rq.head += nreq; | 1858 | qp->rq.head += nreq; |
1853 | 1859 | ||
1860 | /* | ||
1861 | * Make sure doorbells don't leak out of RQ spinlock and reach | ||
1862 | * the HCA out of order: | ||
1863 | */ | ||
1864 | mmiowb(); | ||
1865 | |||
1854 | spin_unlock_irqrestore(&qp->rq.lock, flags); | 1866 | spin_unlock_irqrestore(&qp->rq.lock, flags); |
1855 | return err; | 1867 | return err; |
1856 | } | 1868 | } |
@@ -2112,6 +2124,12 @@ out: | |||
2112 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | 2124 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); |
2113 | } | 2125 | } |
2114 | 2126 | ||
2127 | /* | ||
2128 | * Make sure doorbells don't leak out of SQ spinlock and reach | ||
2129 | * the HCA out of order: | ||
2130 | */ | ||
2131 | mmiowb(); | ||
2132 | |||
2115 | spin_unlock_irqrestore(&qp->sq.lock, flags); | 2133 | spin_unlock_irqrestore(&qp->sq.lock, flags); |
2116 | return err; | 2134 | return err; |
2117 | } | 2135 | } |
@@ -2222,7 +2240,7 @@ void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, | |||
2222 | *new_wqe = 0; | 2240 | *new_wqe = 0; |
2223 | } | 2241 | } |
2224 | 2242 | ||
2225 | int __devinit mthca_init_qp_table(struct mthca_dev *dev) | 2243 | int mthca_init_qp_table(struct mthca_dev *dev) |
2226 | { | 2244 | { |
2227 | int err; | 2245 | int err; |
2228 | u8 status; | 2246 | u8 status; |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index 92a72f521528..10684da33d58 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -35,6 +35,8 @@ | |||
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/string.h> | 36 | #include <linux/string.h> |
37 | 37 | ||
38 | #include <asm/io.h> | ||
39 | |||
38 | #include "mthca_dev.h" | 40 | #include "mthca_dev.h" |
39 | #include "mthca_cmd.h" | 41 | #include "mthca_cmd.h" |
40 | #include "mthca_memfree.h" | 42 | #include "mthca_memfree.h" |
@@ -118,7 +120,7 @@ static void mthca_arbel_init_srq_context(struct mthca_dev *dev, | |||
118 | 120 | ||
119 | memset(context, 0, sizeof *context); | 121 | memset(context, 0, sizeof *context); |
120 | 122 | ||
121 | logsize = long_log2(srq->max) + srq->wqe_shift; | 123 | logsize = ilog2(srq->max); |
122 | context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); | 124 | context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); |
123 | context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); | 125 | context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); |
124 | context->db_index = cpu_to_be32(srq->db_index); | 126 | context->db_index = cpu_to_be32(srq->db_index); |
@@ -211,7 +213,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |||
211 | if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz)) | 213 | if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz)) |
212 | return -EINVAL; | 214 | return -EINVAL; |
213 | 215 | ||
214 | srq->wqe_shift = long_log2(ds); | 216 | srq->wqe_shift = ilog2(ds); |
215 | 217 | ||
216 | srq->srqn = mthca_alloc(&dev->srq_table.alloc); | 218 | srq->srqn = mthca_alloc(&dev->srq_table.alloc); |
217 | if (srq->srqn == -1) | 219 | if (srq->srqn == -1) |
@@ -595,6 +597,12 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
595 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | 597 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); |
596 | } | 598 | } |
597 | 599 | ||
600 | /* | ||
601 | * Make sure doorbells don't leak out of SRQ spinlock and | ||
602 | * reach the HCA out of order: | ||
603 | */ | ||
604 | mmiowb(); | ||
605 | |||
598 | spin_unlock_irqrestore(&srq->lock, flags); | 606 | spin_unlock_irqrestore(&srq->lock, flags); |
599 | return err; | 607 | return err; |
600 | } | 608 | } |
@@ -707,7 +715,7 @@ int mthca_max_srq_sge(struct mthca_dev *dev) | |||
707 | sizeof (struct mthca_data_seg)); | 715 | sizeof (struct mthca_data_seg)); |
708 | } | 716 | } |
709 | 717 | ||
710 | int __devinit mthca_init_srq_table(struct mthca_dev *dev) | 718 | int mthca_init_srq_table(struct mthca_dev *dev) |
711 | { | 719 | { |
712 | int err; | 720 | int err; |
713 | 721 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 0b8a79d53a00..99547996aba2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -136,11 +136,11 @@ struct ipoib_dev_priv { | |||
136 | struct list_head multicast_list; | 136 | struct list_head multicast_list; |
137 | struct rb_root multicast_tree; | 137 | struct rb_root multicast_tree; |
138 | 138 | ||
139 | struct work_struct pkey_task; | 139 | struct delayed_work pkey_task; |
140 | struct work_struct mcast_task; | 140 | struct delayed_work mcast_task; |
141 | struct work_struct flush_task; | 141 | struct work_struct flush_task; |
142 | struct work_struct restart_task; | 142 | struct work_struct restart_task; |
143 | struct work_struct ah_reap_task; | 143 | struct delayed_work ah_reap_task; |
144 | 144 | ||
145 | struct ib_device *ca; | 145 | struct ib_device *ca; |
146 | u8 port; | 146 | u8 port; |
@@ -233,7 +233,7 @@ static inline struct ipoib_neigh **to_ipoib_neigh(struct neighbour *neigh) | |||
233 | } | 233 | } |
234 | 234 | ||
235 | struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh); | 235 | struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh); |
236 | void ipoib_neigh_free(struct ipoib_neigh *neigh); | 236 | void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh); |
237 | 237 | ||
238 | extern struct workqueue_struct *ipoib_workqueue; | 238 | extern struct workqueue_struct *ipoib_workqueue; |
239 | 239 | ||
@@ -254,13 +254,13 @@ int ipoib_add_pkey_attr(struct net_device *dev); | |||
254 | 254 | ||
255 | void ipoib_send(struct net_device *dev, struct sk_buff *skb, | 255 | void ipoib_send(struct net_device *dev, struct sk_buff *skb, |
256 | struct ipoib_ah *address, u32 qpn); | 256 | struct ipoib_ah *address, u32 qpn); |
257 | void ipoib_reap_ah(void *dev_ptr); | 257 | void ipoib_reap_ah(struct work_struct *work); |
258 | 258 | ||
259 | void ipoib_flush_paths(struct net_device *dev); | 259 | void ipoib_flush_paths(struct net_device *dev); |
260 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); | 260 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); |
261 | 261 | ||
262 | int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 262 | int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
263 | void ipoib_ib_dev_flush(void *dev); | 263 | void ipoib_ib_dev_flush(struct work_struct *work); |
264 | void ipoib_ib_dev_cleanup(struct net_device *dev); | 264 | void ipoib_ib_dev_cleanup(struct net_device *dev); |
265 | 265 | ||
266 | int ipoib_ib_dev_open(struct net_device *dev); | 266 | int ipoib_ib_dev_open(struct net_device *dev); |
@@ -271,10 +271,10 @@ int ipoib_ib_dev_stop(struct net_device *dev); | |||
271 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 271 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
272 | void ipoib_dev_cleanup(struct net_device *dev); | 272 | void ipoib_dev_cleanup(struct net_device *dev); |
273 | 273 | ||
274 | void ipoib_mcast_join_task(void *dev_ptr); | 274 | void ipoib_mcast_join_task(struct work_struct *work); |
275 | void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); | 275 | void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); |
276 | 276 | ||
277 | void ipoib_mcast_restart_task(void *dev_ptr); | 277 | void ipoib_mcast_restart_task(struct work_struct *work); |
278 | int ipoib_mcast_start_thread(struct net_device *dev); | 278 | int ipoib_mcast_start_thread(struct net_device *dev); |
279 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush); | 279 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush); |
280 | 280 | ||
@@ -312,7 +312,7 @@ void ipoib_event(struct ib_event_handler *handler, | |||
312 | int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); | 312 | int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); |
313 | int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); | 313 | int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); |
314 | 314 | ||
315 | void ipoib_pkey_poll(void *dev); | 315 | void ipoib_pkey_poll(struct work_struct *work); |
316 | int ipoib_pkey_dev_delay_open(struct net_device *dev); | 316 | int ipoib_pkey_dev_delay_open(struct net_device *dev); |
317 | 317 | ||
318 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | 318 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 8bf5e9ec7c95..f10fba5d3265 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -400,10 +400,11 @@ static void __ipoib_reap_ah(struct net_device *dev) | |||
400 | spin_unlock_irq(&priv->tx_lock); | 400 | spin_unlock_irq(&priv->tx_lock); |
401 | } | 401 | } |
402 | 402 | ||
403 | void ipoib_reap_ah(void *dev_ptr) | 403 | void ipoib_reap_ah(struct work_struct *work) |
404 | { | 404 | { |
405 | struct net_device *dev = dev_ptr; | 405 | struct ipoib_dev_priv *priv = |
406 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 406 | container_of(work, struct ipoib_dev_priv, ah_reap_task.work); |
407 | struct net_device *dev = priv->dev; | ||
407 | 408 | ||
408 | __ipoib_reap_ah(dev); | 409 | __ipoib_reap_ah(dev); |
409 | 410 | ||
@@ -613,10 +614,11 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
613 | return 0; | 614 | return 0; |
614 | } | 615 | } |
615 | 616 | ||
616 | void ipoib_ib_dev_flush(void *_dev) | 617 | void ipoib_ib_dev_flush(struct work_struct *work) |
617 | { | 618 | { |
618 | struct net_device *dev = (struct net_device *)_dev; | 619 | struct ipoib_dev_priv *cpriv, *priv = |
619 | struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; | 620 | container_of(work, struct ipoib_dev_priv, flush_task); |
621 | struct net_device *dev = priv->dev; | ||
620 | 622 | ||
621 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { | 623 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { |
622 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); | 624 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); |
@@ -638,14 +640,14 @@ void ipoib_ib_dev_flush(void *_dev) | |||
638 | */ | 640 | */ |
639 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { | 641 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { |
640 | ipoib_ib_dev_up(dev); | 642 | ipoib_ib_dev_up(dev); |
641 | ipoib_mcast_restart_task(dev); | 643 | ipoib_mcast_restart_task(&priv->restart_task); |
642 | } | 644 | } |
643 | 645 | ||
644 | mutex_lock(&priv->vlan_mutex); | 646 | mutex_lock(&priv->vlan_mutex); |
645 | 647 | ||
646 | /* Flush any child interfaces too */ | 648 | /* Flush any child interfaces too */ |
647 | list_for_each_entry(cpriv, &priv->child_intfs, list) | 649 | list_for_each_entry(cpriv, &priv->child_intfs, list) |
648 | ipoib_ib_dev_flush(cpriv->dev); | 650 | ipoib_ib_dev_flush(&cpriv->flush_task); |
649 | 651 | ||
650 | mutex_unlock(&priv->vlan_mutex); | 652 | mutex_unlock(&priv->vlan_mutex); |
651 | } | 653 | } |
@@ -672,10 +674,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) | |||
672 | * change async notification is available. | 674 | * change async notification is available. |
673 | */ | 675 | */ |
674 | 676 | ||
675 | void ipoib_pkey_poll(void *dev_ptr) | 677 | void ipoib_pkey_poll(struct work_struct *work) |
676 | { | 678 | { |
677 | struct net_device *dev = dev_ptr; | 679 | struct ipoib_dev_priv *priv = |
678 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 680 | container_of(work, struct ipoib_dev_priv, pkey_task.work); |
681 | struct net_device *dev = priv->dev; | ||
679 | 682 | ||
680 | ipoib_pkey_dev_check_presence(dev); | 683 | ipoib_pkey_dev_check_presence(dev); |
681 | 684 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 1eaf00e9862c..c09280243726 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -49,6 +49,8 @@ | |||
49 | 49 | ||
50 | #include <net/dst.h> | 50 | #include <net/dst.h> |
51 | 51 | ||
52 | #define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff) | ||
53 | |||
52 | MODULE_AUTHOR("Roland Dreier"); | 54 | MODULE_AUTHOR("Roland Dreier"); |
53 | MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); | 55 | MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); |
54 | MODULE_LICENSE("Dual BSD/GPL"); | 56 | MODULE_LICENSE("Dual BSD/GPL"); |
@@ -262,7 +264,7 @@ static void path_free(struct net_device *dev, struct ipoib_path *path) | |||
262 | if (neigh->ah) | 264 | if (neigh->ah) |
263 | ipoib_put_ah(neigh->ah); | 265 | ipoib_put_ah(neigh->ah); |
264 | 266 | ||
265 | ipoib_neigh_free(neigh); | 267 | ipoib_neigh_free(dev, neigh); |
266 | } | 268 | } |
267 | 269 | ||
268 | spin_unlock_irqrestore(&priv->lock, flags); | 270 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -520,14 +522,14 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) | |||
520 | memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw, | 522 | memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw, |
521 | sizeof(union ib_gid)); | 523 | sizeof(union ib_gid)); |
522 | 524 | ||
523 | ipoib_send(dev, skb, path->ah, | 525 | ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha)); |
524 | be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); | ||
525 | } else { | 526 | } else { |
526 | neigh->ah = NULL; | 527 | neigh->ah = NULL; |
527 | __skb_queue_tail(&neigh->queue, skb); | ||
528 | 528 | ||
529 | if (!path->query && path_rec_start(dev, path)) | 529 | if (!path->query && path_rec_start(dev, path)) |
530 | goto err_list; | 530 | goto err_list; |
531 | |||
532 | __skb_queue_tail(&neigh->queue, skb); | ||
531 | } | 533 | } |
532 | 534 | ||
533 | spin_unlock(&priv->lock); | 535 | spin_unlock(&priv->lock); |
@@ -537,7 +539,7 @@ err_list: | |||
537 | list_del(&neigh->list); | 539 | list_del(&neigh->list); |
538 | 540 | ||
539 | err_path: | 541 | err_path: |
540 | ipoib_neigh_free(neigh); | 542 | ipoib_neigh_free(dev, neigh); |
541 | ++priv->stats.tx_dropped; | 543 | ++priv->stats.tx_dropped; |
542 | dev_kfree_skb_any(skb); | 544 | dev_kfree_skb_any(skb); |
543 | 545 | ||
@@ -599,8 +601,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
599 | ipoib_dbg(priv, "Send unicast ARP to %04x\n", | 601 | ipoib_dbg(priv, "Send unicast ARP to %04x\n", |
600 | be16_to_cpu(path->pathrec.dlid)); | 602 | be16_to_cpu(path->pathrec.dlid)); |
601 | 603 | ||
602 | ipoib_send(dev, skb, path->ah, | 604 | ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); |
603 | be32_to_cpup((__be32 *) phdr->hwaddr)); | ||
604 | } else if ((path->query || !path_rec_start(dev, path)) && | 605 | } else if ((path->query || !path_rec_start(dev, path)) && |
605 | skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { | 606 | skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { |
606 | /* put pseudoheader back on for next time */ | 607 | /* put pseudoheader back on for next time */ |
@@ -655,14 +656,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
655 | */ | 656 | */ |
656 | ipoib_put_ah(neigh->ah); | 657 | ipoib_put_ah(neigh->ah); |
657 | list_del(&neigh->list); | 658 | list_del(&neigh->list); |
658 | ipoib_neigh_free(neigh); | 659 | ipoib_neigh_free(dev, neigh); |
659 | spin_unlock(&priv->lock); | 660 | spin_unlock(&priv->lock); |
660 | ipoib_path_lookup(skb, dev); | 661 | ipoib_path_lookup(skb, dev); |
661 | goto out; | 662 | goto out; |
662 | } | 663 | } |
663 | 664 | ||
664 | ipoib_send(dev, skb, neigh->ah, | 665 | ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha)); |
665 | be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); | ||
666 | goto out; | 666 | goto out; |
667 | } | 667 | } |
668 | 668 | ||
@@ -694,7 +694,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
694 | IPOIB_GID_FMT "\n", | 694 | IPOIB_GID_FMT "\n", |
695 | skb->dst ? "neigh" : "dst", | 695 | skb->dst ? "neigh" : "dst", |
696 | be16_to_cpup((__be16 *) skb->data), | 696 | be16_to_cpup((__be16 *) skb->data), |
697 | be32_to_cpup((__be32 *) phdr->hwaddr), | 697 | IPOIB_QPN(phdr->hwaddr), |
698 | IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); | 698 | IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); |
699 | dev_kfree_skb_any(skb); | 699 | dev_kfree_skb_any(skb); |
700 | ++priv->stats.tx_dropped; | 700 | ++priv->stats.tx_dropped; |
@@ -777,7 +777,7 @@ static void ipoib_neigh_destructor(struct neighbour *n) | |||
777 | 777 | ||
778 | ipoib_dbg(priv, | 778 | ipoib_dbg(priv, |
779 | "neigh_destructor for %06x " IPOIB_GID_FMT "\n", | 779 | "neigh_destructor for %06x " IPOIB_GID_FMT "\n", |
780 | be32_to_cpup((__be32 *) n->ha), | 780 | IPOIB_QPN(n->ha), |
781 | IPOIB_GID_RAW_ARG(n->ha + 4)); | 781 | IPOIB_GID_RAW_ARG(n->ha + 4)); |
782 | 782 | ||
783 | spin_lock_irqsave(&priv->lock, flags); | 783 | spin_lock_irqsave(&priv->lock, flags); |
@@ -787,7 +787,7 @@ static void ipoib_neigh_destructor(struct neighbour *n) | |||
787 | if (neigh->ah) | 787 | if (neigh->ah) |
788 | ah = neigh->ah; | 788 | ah = neigh->ah; |
789 | list_del(&neigh->list); | 789 | list_del(&neigh->list); |
790 | ipoib_neigh_free(neigh); | 790 | ipoib_neigh_free(n->dev, neigh); |
791 | } | 791 | } |
792 | 792 | ||
793 | spin_unlock_irqrestore(&priv->lock, flags); | 793 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -810,9 +810,15 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour) | |||
810 | return neigh; | 810 | return neigh; |
811 | } | 811 | } |
812 | 812 | ||
813 | void ipoib_neigh_free(struct ipoib_neigh *neigh) | 813 | void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh) |
814 | { | 814 | { |
815 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
816 | struct sk_buff *skb; | ||
815 | *to_ipoib_neigh(neigh->neighbour) = NULL; | 817 | *to_ipoib_neigh(neigh->neighbour) = NULL; |
818 | while ((skb = __skb_dequeue(&neigh->queue))) { | ||
819 | ++priv->stats.tx_dropped; | ||
820 | dev_kfree_skb_any(skb); | ||
821 | } | ||
816 | kfree(neigh); | 822 | kfree(neigh); |
817 | } | 823 | } |
818 | 824 | ||
@@ -934,11 +940,11 @@ static void ipoib_setup(struct net_device *dev) | |||
934 | INIT_LIST_HEAD(&priv->dead_ahs); | 940 | INIT_LIST_HEAD(&priv->dead_ahs); |
935 | INIT_LIST_HEAD(&priv->multicast_list); | 941 | INIT_LIST_HEAD(&priv->multicast_list); |
936 | 942 | ||
937 | INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev); | 943 | INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll); |
938 | INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev); | 944 | INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); |
939 | INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev); | 945 | INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush); |
940 | INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev); | 946 | INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); |
941 | INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev); | 947 | INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); |
942 | } | 948 | } |
943 | 949 | ||
944 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) | 950 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 3faa1820f0e9..b04b72ca32ed 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -114,7 +114,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) | |||
114 | */ | 114 | */ |
115 | if (neigh->ah) | 115 | if (neigh->ah) |
116 | ipoib_put_ah(neigh->ah); | 116 | ipoib_put_ah(neigh->ah); |
117 | ipoib_neigh_free(neigh); | 117 | ipoib_neigh_free(dev, neigh); |
118 | } | 118 | } |
119 | 119 | ||
120 | spin_unlock_irqrestore(&priv->lock, flags); | 120 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -399,7 +399,8 @@ static void ipoib_mcast_join_complete(int status, | |||
399 | mcast->backoff = 1; | 399 | mcast->backoff = 1; |
400 | mutex_lock(&mcast_mutex); | 400 | mutex_lock(&mcast_mutex); |
401 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 401 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
402 | queue_work(ipoib_workqueue, &priv->mcast_task); | 402 | queue_delayed_work(ipoib_workqueue, |
403 | &priv->mcast_task, 0); | ||
403 | mutex_unlock(&mcast_mutex); | 404 | mutex_unlock(&mcast_mutex); |
404 | complete(&mcast->done); | 405 | complete(&mcast->done); |
405 | return; | 406 | return; |
@@ -435,7 +436,8 @@ static void ipoib_mcast_join_complete(int status, | |||
435 | 436 | ||
436 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { | 437 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { |
437 | if (status == -ETIMEDOUT) | 438 | if (status == -ETIMEDOUT) |
438 | queue_work(ipoib_workqueue, &priv->mcast_task); | 439 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, |
440 | 0); | ||
439 | else | 441 | else |
440 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, | 442 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, |
441 | mcast->backoff * HZ); | 443 | mcast->backoff * HZ); |
@@ -517,10 +519,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, | |||
517 | mcast->query_id = ret; | 519 | mcast->query_id = ret; |
518 | } | 520 | } |
519 | 521 | ||
520 | void ipoib_mcast_join_task(void *dev_ptr) | 522 | void ipoib_mcast_join_task(struct work_struct *work) |
521 | { | 523 | { |
522 | struct net_device *dev = dev_ptr; | 524 | struct ipoib_dev_priv *priv = |
523 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 525 | container_of(work, struct ipoib_dev_priv, mcast_task.work); |
526 | struct net_device *dev = priv->dev; | ||
524 | 527 | ||
525 | if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 528 | if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
526 | return; | 529 | return; |
@@ -610,7 +613,7 @@ int ipoib_mcast_start_thread(struct net_device *dev) | |||
610 | 613 | ||
611 | mutex_lock(&mcast_mutex); | 614 | mutex_lock(&mcast_mutex); |
612 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) | 615 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) |
613 | queue_work(ipoib_workqueue, &priv->mcast_task); | 616 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); |
614 | mutex_unlock(&mcast_mutex); | 617 | mutex_unlock(&mcast_mutex); |
615 | 618 | ||
616 | spin_lock_irq(&priv->lock); | 619 | spin_lock_irq(&priv->lock); |
@@ -818,10 +821,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev) | |||
818 | } | 821 | } |
819 | } | 822 | } |
820 | 823 | ||
821 | void ipoib_mcast_restart_task(void *dev_ptr) | 824 | void ipoib_mcast_restart_task(struct work_struct *work) |
822 | { | 825 | { |
823 | struct net_device *dev = dev_ptr; | 826 | struct ipoib_dev_priv *priv = |
824 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 827 | container_of(work, struct ipoib_dev_priv, restart_task); |
828 | struct net_device *dev = priv->dev; | ||
825 | struct dev_mc_list *mclist; | 829 | struct dev_mc_list *mclist; |
826 | struct ipoib_mcast *mcast, *tmcast; | 830 | struct ipoib_mcast *mcast, *tmcast; |
827 | LIST_HEAD(remove_list); | 831 | LIST_HEAD(remove_list); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index eb6f98d82289..9b2041e25d59 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -363,11 +363,11 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) | |||
363 | struct iscsi_conn *conn = cls_conn->dd_data; | 363 | struct iscsi_conn *conn = cls_conn->dd_data; |
364 | int err; | 364 | int err; |
365 | 365 | ||
366 | err = iscsi_conn_start(cls_conn); | 366 | err = iser_conn_set_full_featured_mode(conn); |
367 | if (err) | 367 | if (err) |
368 | return err; | 368 | return err; |
369 | 369 | ||
370 | return iser_conn_set_full_featured_mode(conn); | 370 | return iscsi_conn_start(cls_conn); |
371 | } | 371 | } |
372 | 372 | ||
373 | static struct iscsi_transport iscsi_iser_transport; | 373 | static struct iscsi_transport iscsi_iser_transport; |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 9c53916f28c2..234e5b061a75 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -283,7 +283,7 @@ struct iser_global { | |||
283 | struct mutex connlist_mutex; | 283 | struct mutex connlist_mutex; |
284 | struct list_head connlist; /* all iSER IB connections */ | 284 | struct list_head connlist; /* all iSER IB connections */ |
285 | 285 | ||
286 | kmem_cache_t *desc_cache; | 286 | struct kmem_cache *desc_cache; |
287 | }; | 287 | }; |
288 | 288 | ||
289 | extern struct iser_global ig; | 289 | extern struct iser_global ig; |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 0606744c3f84..3aedd59b8a84 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
38 | #include <linux/highmem.h> | ||
38 | #include <asm/io.h> | 39 | #include <asm/io.h> |
39 | #include <asm/scatterlist.h> | 40 | #include <asm/scatterlist.h> |
40 | #include <linux/scatterlist.h> | 41 | #include <linux/scatterlist.h> |
@@ -113,7 +114,7 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
113 | 114 | ||
114 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) | 115 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
115 | mem = (void *)__get_free_pages(GFP_NOIO, | 116 | mem = (void *)__get_free_pages(GFP_NOIO, |
116 | long_log2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); | 117 | ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); |
117 | else | 118 | else |
118 | mem = kmalloc(cmd_data_len, GFP_NOIO); | 119 | mem = kmalloc(cmd_data_len, GFP_NOIO); |
119 | 120 | ||
@@ -210,7 +211,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
210 | 211 | ||
211 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) | 212 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
212 | free_pages((unsigned long)mem_copy->copy_buf, | 213 | free_pages((unsigned long)mem_copy->copy_buf, |
213 | long_log2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); | 214 | ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); |
214 | else | 215 | else |
215 | kfree(mem_copy->copy_buf); | 216 | kfree(mem_copy->copy_buf); |
216 | 217 | ||
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 18a000034996..693b77002897 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -48,7 +48,7 @@ | |||
48 | 48 | ||
49 | static void iser_cq_tasklet_fn(unsigned long data); | 49 | static void iser_cq_tasklet_fn(unsigned long data); |
50 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context); | 50 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context); |
51 | static void iser_comp_error_worker(void *data); | 51 | static void iser_comp_error_worker(struct work_struct *work); |
52 | 52 | ||
53 | static void iser_cq_event_callback(struct ib_event *cause, void *context) | 53 | static void iser_cq_event_callback(struct ib_event *cause, void *context) |
54 | { | 54 | { |
@@ -480,8 +480,7 @@ int iser_conn_init(struct iser_conn **ibconn) | |||
480 | init_waitqueue_head(&ib_conn->wait); | 480 | init_waitqueue_head(&ib_conn->wait); |
481 | atomic_set(&ib_conn->post_recv_buf_count, 0); | 481 | atomic_set(&ib_conn->post_recv_buf_count, 0); |
482 | atomic_set(&ib_conn->post_send_buf_count, 0); | 482 | atomic_set(&ib_conn->post_send_buf_count, 0); |
483 | INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker, | 483 | INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker); |
484 | ib_conn); | ||
485 | INIT_LIST_HEAD(&ib_conn->conn_list); | 484 | INIT_LIST_HEAD(&ib_conn->conn_list); |
486 | spin_lock_init(&ib_conn->lock); | 485 | spin_lock_init(&ib_conn->lock); |
487 | 486 | ||
@@ -754,9 +753,10 @@ int iser_post_send(struct iser_desc *tx_desc) | |||
754 | return ret_val; | 753 | return ret_val; |
755 | } | 754 | } |
756 | 755 | ||
757 | static void iser_comp_error_worker(void *data) | 756 | static void iser_comp_error_worker(struct work_struct *work) |
758 | { | 757 | { |
759 | struct iser_conn *ib_conn = data; | 758 | struct iser_conn *ib_conn = |
759 | container_of(work, struct iser_conn, comperror_work); | ||
760 | 760 | ||
761 | /* getting here when the state is UP means that the conn is being * | 761 | /* getting here when the state is UP means that the conn is being * |
762 | * terminated asynchronously from the iSCSI layer's perspective. */ | 762 | * terminated asynchronously from the iSCSI layer's perspective. */ |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 4b09147f438f..a6289595557b 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -390,9 +390,10 @@ static void srp_disconnect_target(struct srp_target_port *target) | |||
390 | wait_for_completion(&target->done); | 390 | wait_for_completion(&target->done); |
391 | } | 391 | } |
392 | 392 | ||
393 | static void srp_remove_work(void *target_ptr) | 393 | static void srp_remove_work(struct work_struct *work) |
394 | { | 394 | { |
395 | struct srp_target_port *target = target_ptr; | 395 | struct srp_target_port *target = |
396 | container_of(work, struct srp_target_port, work); | ||
396 | 397 | ||
397 | spin_lock_irq(target->scsi_host->host_lock); | 398 | spin_lock_irq(target->scsi_host->host_lock); |
398 | if (target->state != SRP_TARGET_DEAD) { | 399 | if (target->state != SRP_TARGET_DEAD) { |
@@ -575,7 +576,7 @@ err: | |||
575 | spin_lock_irq(target->scsi_host->host_lock); | 576 | spin_lock_irq(target->scsi_host->host_lock); |
576 | if (target->state == SRP_TARGET_CONNECTING) { | 577 | if (target->state == SRP_TARGET_CONNECTING) { |
577 | target->state = SRP_TARGET_DEAD; | 578 | target->state = SRP_TARGET_DEAD; |
578 | INIT_WORK(&target->work, srp_remove_work, target); | 579 | INIT_WORK(&target->work, srp_remove_work); |
579 | schedule_work(&target->work); | 580 | schedule_work(&target->work); |
580 | } | 581 | } |
581 | spin_unlock_irq(target->scsi_host->host_lock); | 582 | spin_unlock_irq(target->scsi_host->host_lock); |
@@ -1176,9 +1177,11 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1176 | break; | 1177 | break; |
1177 | } | 1178 | } |
1178 | 1179 | ||
1179 | target->status = srp_alloc_iu_bufs(target); | 1180 | if (!target->rx_ring[0]) { |
1180 | if (target->status) | 1181 | target->status = srp_alloc_iu_bufs(target); |
1181 | break; | 1182 | if (target->status) |
1183 | break; | ||
1184 | } | ||
1182 | 1185 | ||
1183 | qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); | 1186 | qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); |
1184 | if (!qp_attr) { | 1187 | if (!qp_attr) { |
@@ -1716,7 +1719,8 @@ static ssize_t srp_create_target(struct class_device *class_dev, | |||
1716 | if (!target_host) | 1719 | if (!target_host) |
1717 | return -ENOMEM; | 1720 | return -ENOMEM; |
1718 | 1721 | ||
1719 | target_host->max_lun = SRP_MAX_LUN; | 1722 | target_host->max_lun = SRP_MAX_LUN; |
1723 | target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; | ||
1720 | 1724 | ||
1721 | target = host_to_target(target_host); | 1725 | target = host_to_target(target_host); |
1722 | 1726 | ||