aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-11-22 09:57:56 -0500
committerDavid Howells <dhowells@redhat.com>2006-11-22 09:57:56 -0500
commitc4028958b6ecad064b1a6303a6a5906d4fe48d73 (patch)
tree1c4c89652c62a75da09f9b9442012007e4ac6250 /drivers/infiniband/core
parent65f27f38446e1976cc98fd3004b110fedcddd189 (diff)
WorkStruct: make allyesconfig
Fix up for make allyesconfig. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/addr.c6
-rw-r--r--drivers/infiniband/core/cache.c7
-rw-r--r--drivers/infiniband/core/cm.c19
-rw-r--r--drivers/infiniband/core/cma.c10
-rw-r--r--drivers/infiniband/core/iwcm.c7
-rw-r--r--drivers/infiniband/core/mad.c25
-rw-r--r--drivers/infiniband/core/mad_priv.h2
-rw-r--r--drivers/infiniband/core/mad_rmpp.c18
-rw-r--r--drivers/infiniband/core/sa_query.c10
-rw-r--r--drivers/infiniband/core/uverbs_mem.c7
10 files changed, 58 insertions, 53 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index e11187ecc931..84b2f5cb3722 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -55,11 +55,11 @@ struct addr_req {
55 int status; 55 int status;
56}; 56};
57 57
58static void process_req(void *data); 58static void process_req(struct work_struct *work);
59 59
60static DEFINE_MUTEX(lock); 60static DEFINE_MUTEX(lock);
61static LIST_HEAD(req_list); 61static LIST_HEAD(req_list);
62static DECLARE_WORK(work, process_req, NULL); 62static DECLARE_DELAYED_WORK(work, process_req);
63static struct workqueue_struct *addr_wq; 63static struct workqueue_struct *addr_wq;
64 64
65void rdma_addr_register_client(struct rdma_addr_client *client) 65void rdma_addr_register_client(struct rdma_addr_client *client)
@@ -215,7 +215,7 @@ out:
215 return ret; 215 return ret;
216} 216}
217 217
218static void process_req(void *data) 218static void process_req(struct work_struct *work)
219{ 219{
220 struct addr_req *req, *temp_req; 220 struct addr_req *req, *temp_req;
221 struct sockaddr_in *src_in, *dst_in; 221 struct sockaddr_in *src_in, *dst_in;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 20e9f64e67a6..98272fbbfb31 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -285,9 +285,10 @@ err:
285 kfree(tprops); 285 kfree(tprops);
286} 286}
287 287
288static void ib_cache_task(void *work_ptr) 288static void ib_cache_task(struct work_struct *_work)
289{ 289{
290 struct ib_update_work *work = work_ptr; 290 struct ib_update_work *work =
291 container_of(_work, struct ib_update_work, work);
291 292
292 ib_cache_update(work->device, work->port_num); 293 ib_cache_update(work->device, work->port_num);
293 kfree(work); 294 kfree(work);
@@ -306,7 +307,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
306 event->event == IB_EVENT_CLIENT_REREGISTER) { 307 event->event == IB_EVENT_CLIENT_REREGISTER) {
307 work = kmalloc(sizeof *work, GFP_ATOMIC); 308 work = kmalloc(sizeof *work, GFP_ATOMIC);
308 if (work) { 309 if (work) {
309 INIT_WORK(&work->work, ib_cache_task, work); 310 INIT_WORK(&work->work, ib_cache_task);
310 work->device = event->device; 311 work->device = event->device;
311 work->port_num = event->element.port_num; 312 work->port_num = event->element.port_num;
312 schedule_work(&work->work); 313 schedule_work(&work->work);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 25b1018a476c..e1990f531d0a 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -101,7 +101,7 @@ struct cm_av {
101}; 101};
102 102
103struct cm_work { 103struct cm_work {
104 struct work_struct work; 104 struct delayed_work work;
105 struct list_head list; 105 struct list_head list;
106 struct cm_port *port; 106 struct cm_port *port;
107 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 107 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
@@ -161,7 +161,7 @@ struct cm_id_private {
161 atomic_t work_count; 161 atomic_t work_count;
162}; 162};
163 163
164static void cm_work_handler(void *data); 164static void cm_work_handler(struct work_struct *work);
165 165
166static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 166static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
167{ 167{
@@ -669,8 +669,7 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
669 return ERR_PTR(-ENOMEM); 669 return ERR_PTR(-ENOMEM);
670 670
671 timewait_info->work.local_id = local_id; 671 timewait_info->work.local_id = local_id;
672 INIT_WORK(&timewait_info->work.work, cm_work_handler, 672 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
673 &timewait_info->work);
674 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 673 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
675 return timewait_info; 674 return timewait_info;
676} 675}
@@ -2987,9 +2986,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
2987 } 2986 }
2988} 2987}
2989 2988
2990static void cm_work_handler(void *data) 2989static void cm_work_handler(struct work_struct *_work)
2991{ 2990{
2992 struct cm_work *work = data; 2991 struct cm_work *work = container_of(_work, struct cm_work, work.work);
2993 int ret; 2992 int ret;
2994 2993
2995 switch (work->cm_event.event) { 2994 switch (work->cm_event.event) {
@@ -3079,12 +3078,12 @@ int ib_cm_establish(struct ib_cm_id *cm_id)
3079 * we need to find the cm_id once we're in the context of the 3078 * we need to find the cm_id once we're in the context of the
3080 * worker thread, rather than holding a reference on it. 3079 * worker thread, rather than holding a reference on it.
3081 */ 3080 */
3082 INIT_WORK(&work->work, cm_work_handler, work); 3081 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3083 work->local_id = cm_id->local_id; 3082 work->local_id = cm_id->local_id;
3084 work->remote_id = cm_id->remote_id; 3083 work->remote_id = cm_id->remote_id;
3085 work->mad_recv_wc = NULL; 3084 work->mad_recv_wc = NULL;
3086 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3085 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3087 queue_work(cm.wq, &work->work); 3086 queue_delayed_work(cm.wq, &work->work, 0);
3088out: 3087out:
3089 return ret; 3088 return ret;
3090} 3089}
@@ -3146,11 +3145,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3146 return; 3145 return;
3147 } 3146 }
3148 3147
3149 INIT_WORK(&work->work, cm_work_handler, work); 3148 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3150 work->cm_event.event = event; 3149 work->cm_event.event = event;
3151 work->mad_recv_wc = mad_recv_wc; 3150 work->mad_recv_wc = mad_recv_wc;
3152 work->port = (struct cm_port *)mad_agent->context; 3151 work->port = (struct cm_port *)mad_agent->context;
3153 queue_work(cm.wq, &work->work); 3152 queue_delayed_work(cm.wq, &work->work, 0);
3154} 3153}
3155 3154
3156static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3155static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 845090b0859c..189f73f3f721 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1341,9 +1341,9 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1341 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1341 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1342} 1342}
1343 1343
1344static void cma_work_handler(void *data) 1344static void cma_work_handler(struct work_struct *_work)
1345{ 1345{
1346 struct cma_work *work = data; 1346 struct cma_work *work = container_of(_work, struct cma_work, work);
1347 struct rdma_id_private *id_priv = work->id; 1347 struct rdma_id_private *id_priv = work->id;
1348 int destroy = 0; 1348 int destroy = 0;
1349 1349
@@ -1374,7 +1374,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1374 return -ENOMEM; 1374 return -ENOMEM;
1375 1375
1376 work->id = id_priv; 1376 work->id = id_priv;
1377 INIT_WORK(&work->work, cma_work_handler, work); 1377 INIT_WORK(&work->work, cma_work_handler);
1378 work->old_state = CMA_ROUTE_QUERY; 1378 work->old_state = CMA_ROUTE_QUERY;
1379 work->new_state = CMA_ROUTE_RESOLVED; 1379 work->new_state = CMA_ROUTE_RESOLVED;
1380 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1380 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1431,7 +1431,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1431 return -ENOMEM; 1431 return -ENOMEM;
1432 1432
1433 work->id = id_priv; 1433 work->id = id_priv;
1434 INIT_WORK(&work->work, cma_work_handler, work); 1434 INIT_WORK(&work->work, cma_work_handler);
1435 work->old_state = CMA_ROUTE_QUERY; 1435 work->old_state = CMA_ROUTE_QUERY;
1436 work->new_state = CMA_ROUTE_RESOLVED; 1436 work->new_state = CMA_ROUTE_RESOLVED;
1437 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1437 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1585,7 +1585,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1585 } 1585 }
1586 1586
1587 work->id = id_priv; 1587 work->id = id_priv;
1588 INIT_WORK(&work->work, cma_work_handler, work); 1588 INIT_WORK(&work->work, cma_work_handler);
1589 work->old_state = CMA_ADDR_QUERY; 1589 work->old_state = CMA_ADDR_QUERY;
1590 work->new_state = CMA_ADDR_RESOLVED; 1590 work->new_state = CMA_ADDR_RESOLVED;
1591 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1591 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index c3fb304a4e86..9bfa785252dc 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -828,9 +828,10 @@ static int process_event(struct iwcm_id_private *cm_id_priv,
828 * thread asleep on the destroy_comp list vs. an object destroyed 828 * thread asleep on the destroy_comp list vs. an object destroyed
829 * here synchronously when the last reference is removed. 829 * here synchronously when the last reference is removed.
830 */ 830 */
831static void cm_work_handler(void *arg) 831static void cm_work_handler(struct work_struct *_work)
832{ 832{
833 struct iwcm_work *work = arg, lwork; 833 struct iwcm_work lwork, *work =
834 container_of(_work, struct iwcm_work, work);
834 struct iwcm_id_private *cm_id_priv = work->cm_id; 835 struct iwcm_id_private *cm_id_priv = work->cm_id;
835 unsigned long flags; 836 unsigned long flags;
836 int empty; 837 int empty;
@@ -899,7 +900,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
899 goto out; 900 goto out;
900 } 901 }
901 902
902 INIT_WORK(&work->work, cm_work_handler, work); 903 INIT_WORK(&work->work, cm_work_handler);
903 work->cm_id = cm_id_priv; 904 work->cm_id = cm_id_priv;
904 work->event = *iw_event; 905 work->event = *iw_event;
905 906
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index a72bcea46ff6..5a54ac35e961 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -65,8 +65,8 @@ static struct ib_mad_agent_private *find_mad_agent(
65static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 65static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
66 struct ib_mad_private *mad); 66 struct ib_mad_private *mad);
67static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 67static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
68static void timeout_sends(void *data); 68static void timeout_sends(struct work_struct *work);
69static void local_completions(void *data); 69static void local_completions(struct work_struct *work);
70static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 70static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71 struct ib_mad_agent_private *agent_priv, 71 struct ib_mad_agent_private *agent_priv,
72 u8 mgmt_class); 72 u8 mgmt_class);
@@ -356,10 +356,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
356 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 356 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
357 INIT_LIST_HEAD(&mad_agent_priv->done_list); 357 INIT_LIST_HEAD(&mad_agent_priv->done_list);
358 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 358 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
359 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); 359 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
360 INIT_LIST_HEAD(&mad_agent_priv->local_list); 360 INIT_LIST_HEAD(&mad_agent_priv->local_list);
361 INIT_WORK(&mad_agent_priv->local_work, local_completions, 361 INIT_WORK(&mad_agent_priv->local_work, local_completions);
362 mad_agent_priv);
363 atomic_set(&mad_agent_priv->refcount, 1); 362 atomic_set(&mad_agent_priv->refcount, 1);
364 init_completion(&mad_agent_priv->comp); 363 init_completion(&mad_agent_priv->comp);
365 364
@@ -2198,12 +2197,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
2198/* 2197/*
2199 * IB MAD completion callback 2198 * IB MAD completion callback
2200 */ 2199 */
2201static void ib_mad_completion_handler(void *data) 2200static void ib_mad_completion_handler(struct work_struct *work)
2202{ 2201{
2203 struct ib_mad_port_private *port_priv; 2202 struct ib_mad_port_private *port_priv;
2204 struct ib_wc wc; 2203 struct ib_wc wc;
2205 2204
2206 port_priv = (struct ib_mad_port_private *)data; 2205 port_priv = container_of(work, struct ib_mad_port_private, work);
2207 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 2206 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2208 2207
2209 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { 2208 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
@@ -2324,7 +2323,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2324} 2323}
2325EXPORT_SYMBOL(ib_cancel_mad); 2324EXPORT_SYMBOL(ib_cancel_mad);
2326 2325
2327static void local_completions(void *data) 2326static void local_completions(struct work_struct *work)
2328{ 2327{
2329 struct ib_mad_agent_private *mad_agent_priv; 2328 struct ib_mad_agent_private *mad_agent_priv;
2330 struct ib_mad_local_private *local; 2329 struct ib_mad_local_private *local;
@@ -2334,7 +2333,8 @@ static void local_completions(void *data)
2334 struct ib_wc wc; 2333 struct ib_wc wc;
2335 struct ib_mad_send_wc mad_send_wc; 2334 struct ib_mad_send_wc mad_send_wc;
2336 2335
2337 mad_agent_priv = (struct ib_mad_agent_private *)data; 2336 mad_agent_priv =
2337 container_of(work, struct ib_mad_agent_private, local_work);
2338 2338
2339 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2339 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2340 while (!list_empty(&mad_agent_priv->local_list)) { 2340 while (!list_empty(&mad_agent_priv->local_list)) {
@@ -2434,14 +2434,15 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2434 return ret; 2434 return ret;
2435} 2435}
2436 2436
2437static void timeout_sends(void *data) 2437static void timeout_sends(struct work_struct *work)
2438{ 2438{
2439 struct ib_mad_agent_private *mad_agent_priv; 2439 struct ib_mad_agent_private *mad_agent_priv;
2440 struct ib_mad_send_wr_private *mad_send_wr; 2440 struct ib_mad_send_wr_private *mad_send_wr;
2441 struct ib_mad_send_wc mad_send_wc; 2441 struct ib_mad_send_wc mad_send_wc;
2442 unsigned long flags, delay; 2442 unsigned long flags, delay;
2443 2443
2444 mad_agent_priv = (struct ib_mad_agent_private *)data; 2444 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2445 timed_work.work);
2445 mad_send_wc.vendor_err = 0; 2446 mad_send_wc.vendor_err = 0;
2446 2447
2447 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2448 spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@ -2799,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,
2799 ret = -ENOMEM; 2800 ret = -ENOMEM;
2800 goto error8; 2801 goto error8;
2801 } 2802 }
2802 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv); 2803 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2803 2804
2804 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 2805 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2805 list_add_tail(&port_priv->port_list, &ib_mad_port_list); 2806 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d06b59083f6e..d5548e73e068 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -102,7 +102,7 @@ struct ib_mad_agent_private {
102 struct list_head send_list; 102 struct list_head send_list;
103 struct list_head wait_list; 103 struct list_head wait_list;
104 struct list_head done_list; 104 struct list_head done_list;
105 struct work_struct timed_work; 105 struct delayed_work timed_work;
106 unsigned long timeout; 106 unsigned long timeout;
107 struct list_head local_list; 107 struct list_head local_list;
108 struct work_struct local_work; 108 struct work_struct local_work;
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 1ef79d015a1e..3663fd7022be 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -45,8 +45,8 @@ enum rmpp_state {
45struct mad_rmpp_recv { 45struct mad_rmpp_recv {
46 struct ib_mad_agent_private *agent; 46 struct ib_mad_agent_private *agent;
47 struct list_head list; 47 struct list_head list;
48 struct work_struct timeout_work; 48 struct delayed_work timeout_work;
49 struct work_struct cleanup_work; 49 struct delayed_work cleanup_work;
50 struct completion comp; 50 struct completion comp;
51 enum rmpp_state state; 51 enum rmpp_state state;
52 spinlock_t lock; 52 spinlock_t lock;
@@ -233,9 +233,10 @@ static void nack_recv(struct ib_mad_agent_private *agent,
233 } 233 }
234} 234}
235 235
236static void recv_timeout_handler(void *data) 236static void recv_timeout_handler(struct work_struct *work)
237{ 237{
238 struct mad_rmpp_recv *rmpp_recv = data; 238 struct mad_rmpp_recv *rmpp_recv =
239 container_of(work, struct mad_rmpp_recv, timeout_work.work);
239 struct ib_mad_recv_wc *rmpp_wc; 240 struct ib_mad_recv_wc *rmpp_wc;
240 unsigned long flags; 241 unsigned long flags;
241 242
@@ -254,9 +255,10 @@ static void recv_timeout_handler(void *data)
254 ib_free_recv_mad(rmpp_wc); 255 ib_free_recv_mad(rmpp_wc);
255} 256}
256 257
257static void recv_cleanup_handler(void *data) 258static void recv_cleanup_handler(struct work_struct *work)
258{ 259{
259 struct mad_rmpp_recv *rmpp_recv = data; 260 struct mad_rmpp_recv *rmpp_recv =
261 container_of(work, struct mad_rmpp_recv, cleanup_work.work);
260 unsigned long flags; 262 unsigned long flags;
261 263
262 spin_lock_irqsave(&rmpp_recv->agent->lock, flags); 264 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
@@ -285,8 +287,8 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
285 287
286 rmpp_recv->agent = agent; 288 rmpp_recv->agent = agent;
287 init_completion(&rmpp_recv->comp); 289 init_completion(&rmpp_recv->comp);
288 INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); 290 INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
289 INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); 291 INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
290 spin_lock_init(&rmpp_recv->lock); 292 spin_lock_init(&rmpp_recv->lock);
291 rmpp_recv->state = RMPP_STATE_ACTIVE; 293 rmpp_recv->state = RMPP_STATE_ACTIVE;
292 atomic_set(&rmpp_recv->refcount, 1); 294 atomic_set(&rmpp_recv->refcount, 1);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1706d3c7e95e..e45afba75341 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -360,9 +360,10 @@ static void free_sm_ah(struct kref *kref)
360 kfree(sm_ah); 360 kfree(sm_ah);
361} 361}
362 362
363static void update_sm_ah(void *port_ptr) 363static void update_sm_ah(struct work_struct *work)
364{ 364{
365 struct ib_sa_port *port = port_ptr; 365 struct ib_sa_port *port =
366 container_of(work, struct ib_sa_port, update_task);
366 struct ib_sa_sm_ah *new_ah, *old_ah; 367 struct ib_sa_sm_ah *new_ah, *old_ah;
367 struct ib_port_attr port_attr; 368 struct ib_port_attr port_attr;
368 struct ib_ah_attr ah_attr; 369 struct ib_ah_attr ah_attr;
@@ -992,8 +993,7 @@ static void ib_sa_add_one(struct ib_device *device)
992 if (IS_ERR(sa_dev->port[i].agent)) 993 if (IS_ERR(sa_dev->port[i].agent))
993 goto err; 994 goto err;
994 995
995 INIT_WORK(&sa_dev->port[i].update_task, 996 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
996 update_sm_ah, &sa_dev->port[i]);
997 } 997 }
998 998
999 ib_set_client_data(device, &sa_client, sa_dev); 999 ib_set_client_data(device, &sa_client, sa_dev);
@@ -1010,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device)
1010 goto err; 1010 goto err;
1011 1011
1012 for (i = 0; i <= e - s; ++i) 1012 for (i = 0; i <= e - s; ++i)
1013 update_sm_ah(&sa_dev->port[i]); 1013 update_sm_ah(&sa_dev->port[i].update_task);
1014 1014
1015 return; 1015 return;
1016 1016
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index efe147dbeb42..db12cc0841df 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -179,9 +179,10 @@ void ib_umem_release(struct ib_device *dev, struct ib_umem *umem)
179 up_write(&current->mm->mmap_sem); 179 up_write(&current->mm->mmap_sem);
180} 180}
181 181
182static void ib_umem_account(void *work_ptr) 182static void ib_umem_account(struct work_struct *_work)
183{ 183{
184 struct ib_umem_account_work *work = work_ptr; 184 struct ib_umem_account_work *work =
185 container_of(_work, struct ib_umem_account_work, work);
185 186
186 down_write(&work->mm->mmap_sem); 187 down_write(&work->mm->mmap_sem);
187 work->mm->locked_vm -= work->diff; 188 work->mm->locked_vm -= work->diff;
@@ -216,7 +217,7 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
216 return; 217 return;
217 } 218 }
218 219
219 INIT_WORK(&work->work, ib_umem_account, work); 220 INIT_WORK(&work->work, ib_umem_account);
220 work->mm = mm; 221 work->mm = mm;
221 work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; 222 work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
222 223