diff options
author | David Howells <dhowells@redhat.com> | 2006-11-22 09:57:56 -0500 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2006-11-22 09:57:56 -0500 |
commit | c4028958b6ecad064b1a6303a6a5906d4fe48d73 (patch) | |
tree | 1c4c89652c62a75da09f9b9442012007e4ac6250 /drivers/infiniband | |
parent | 65f27f38446e1976cc98fd3004b110fedcddd189 (diff) |
WorkStruct: make allyesconfig
Fix up for make allyesconfig.
Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/addr.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/core/cache.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/core/cm.c | 19 | ||||
-rw-r--r-- | drivers/infiniband/core/cma.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/core/iwcm.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 25 | ||||
-rw-r--r-- | drivers/infiniband/core/mad_priv.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/mad_rmpp.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_mem.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_user_pages.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_catas.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 16 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 25 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 7 |
18 files changed, 113 insertions, 99 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index e11187ecc931..84b2f5cb3722 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -55,11 +55,11 @@ struct addr_req { | |||
55 | int status; | 55 | int status; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | static void process_req(void *data); | 58 | static void process_req(struct work_struct *work); |
59 | 59 | ||
60 | static DEFINE_MUTEX(lock); | 60 | static DEFINE_MUTEX(lock); |
61 | static LIST_HEAD(req_list); | 61 | static LIST_HEAD(req_list); |
62 | static DECLARE_WORK(work, process_req, NULL); | 62 | static DECLARE_DELAYED_WORK(work, process_req); |
63 | static struct workqueue_struct *addr_wq; | 63 | static struct workqueue_struct *addr_wq; |
64 | 64 | ||
65 | void rdma_addr_register_client(struct rdma_addr_client *client) | 65 | void rdma_addr_register_client(struct rdma_addr_client *client) |
@@ -215,7 +215,7 @@ out: | |||
215 | return ret; | 215 | return ret; |
216 | } | 216 | } |
217 | 217 | ||
218 | static void process_req(void *data) | 218 | static void process_req(struct work_struct *work) |
219 | { | 219 | { |
220 | struct addr_req *req, *temp_req; | 220 | struct addr_req *req, *temp_req; |
221 | struct sockaddr_in *src_in, *dst_in; | 221 | struct sockaddr_in *src_in, *dst_in; |
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 20e9f64e67a6..98272fbbfb31 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -285,9 +285,10 @@ err: | |||
285 | kfree(tprops); | 285 | kfree(tprops); |
286 | } | 286 | } |
287 | 287 | ||
288 | static void ib_cache_task(void *work_ptr) | 288 | static void ib_cache_task(struct work_struct *_work) |
289 | { | 289 | { |
290 | struct ib_update_work *work = work_ptr; | 290 | struct ib_update_work *work = |
291 | container_of(_work, struct ib_update_work, work); | ||
291 | 292 | ||
292 | ib_cache_update(work->device, work->port_num); | 293 | ib_cache_update(work->device, work->port_num); |
293 | kfree(work); | 294 | kfree(work); |
@@ -306,7 +307,7 @@ static void ib_cache_event(struct ib_event_handler *handler, | |||
306 | event->event == IB_EVENT_CLIENT_REREGISTER) { | 307 | event->event == IB_EVENT_CLIENT_REREGISTER) { |
307 | work = kmalloc(sizeof *work, GFP_ATOMIC); | 308 | work = kmalloc(sizeof *work, GFP_ATOMIC); |
308 | if (work) { | 309 | if (work) { |
309 | INIT_WORK(&work->work, ib_cache_task, work); | 310 | INIT_WORK(&work->work, ib_cache_task); |
310 | work->device = event->device; | 311 | work->device = event->device; |
311 | work->port_num = event->element.port_num; | 312 | work->port_num = event->element.port_num; |
312 | schedule_work(&work->work); | 313 | schedule_work(&work->work); |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 25b1018a476c..e1990f531d0a 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -101,7 +101,7 @@ struct cm_av { | |||
101 | }; | 101 | }; |
102 | 102 | ||
103 | struct cm_work { | 103 | struct cm_work { |
104 | struct work_struct work; | 104 | struct delayed_work work; |
105 | struct list_head list; | 105 | struct list_head list; |
106 | struct cm_port *port; | 106 | struct cm_port *port; |
107 | struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ | 107 | struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ |
@@ -161,7 +161,7 @@ struct cm_id_private { | |||
161 | atomic_t work_count; | 161 | atomic_t work_count; |
162 | }; | 162 | }; |
163 | 163 | ||
164 | static void cm_work_handler(void *data); | 164 | static void cm_work_handler(struct work_struct *work); |
165 | 165 | ||
166 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) | 166 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) |
167 | { | 167 | { |
@@ -669,8 +669,7 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) | |||
669 | return ERR_PTR(-ENOMEM); | 669 | return ERR_PTR(-ENOMEM); |
670 | 670 | ||
671 | timewait_info->work.local_id = local_id; | 671 | timewait_info->work.local_id = local_id; |
672 | INIT_WORK(&timewait_info->work.work, cm_work_handler, | 672 | INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); |
673 | &timewait_info->work); | ||
674 | timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; | 673 | timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; |
675 | return timewait_info; | 674 | return timewait_info; |
676 | } | 675 | } |
@@ -2987,9 +2986,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, | |||
2987 | } | 2986 | } |
2988 | } | 2987 | } |
2989 | 2988 | ||
2990 | static void cm_work_handler(void *data) | 2989 | static void cm_work_handler(struct work_struct *_work) |
2991 | { | 2990 | { |
2992 | struct cm_work *work = data; | 2991 | struct cm_work *work = container_of(_work, struct cm_work, work.work); |
2993 | int ret; | 2992 | int ret; |
2994 | 2993 | ||
2995 | switch (work->cm_event.event) { | 2994 | switch (work->cm_event.event) { |
@@ -3079,12 +3078,12 @@ int ib_cm_establish(struct ib_cm_id *cm_id) | |||
3079 | * we need to find the cm_id once we're in the context of the | 3078 | * we need to find the cm_id once we're in the context of the |
3080 | * worker thread, rather than holding a reference on it. | 3079 | * worker thread, rather than holding a reference on it. |
3081 | */ | 3080 | */ |
3082 | INIT_WORK(&work->work, cm_work_handler, work); | 3081 | INIT_DELAYED_WORK(&work->work, cm_work_handler); |
3083 | work->local_id = cm_id->local_id; | 3082 | work->local_id = cm_id->local_id; |
3084 | work->remote_id = cm_id->remote_id; | 3083 | work->remote_id = cm_id->remote_id; |
3085 | work->mad_recv_wc = NULL; | 3084 | work->mad_recv_wc = NULL; |
3086 | work->cm_event.event = IB_CM_USER_ESTABLISHED; | 3085 | work->cm_event.event = IB_CM_USER_ESTABLISHED; |
3087 | queue_work(cm.wq, &work->work); | 3086 | queue_delayed_work(cm.wq, &work->work, 0); |
3088 | out: | 3087 | out: |
3089 | return ret; | 3088 | return ret; |
3090 | } | 3089 | } |
@@ -3146,11 +3145,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, | |||
3146 | return; | 3145 | return; |
3147 | } | 3146 | } |
3148 | 3147 | ||
3149 | INIT_WORK(&work->work, cm_work_handler, work); | 3148 | INIT_DELAYED_WORK(&work->work, cm_work_handler); |
3150 | work->cm_event.event = event; | 3149 | work->cm_event.event = event; |
3151 | work->mad_recv_wc = mad_recv_wc; | 3150 | work->mad_recv_wc = mad_recv_wc; |
3152 | work->port = (struct cm_port *)mad_agent->context; | 3151 | work->port = (struct cm_port *)mad_agent->context; |
3153 | queue_work(cm.wq, &work->work); | 3152 | queue_delayed_work(cm.wq, &work->work, 0); |
3154 | } | 3153 | } |
3155 | 3154 | ||
3156 | static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, | 3155 | static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 845090b0859c..189f73f3f721 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -1341,9 +1341,9 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, | |||
1341 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; | 1341 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; |
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | static void cma_work_handler(void *data) | 1344 | static void cma_work_handler(struct work_struct *_work) |
1345 | { | 1345 | { |
1346 | struct cma_work *work = data; | 1346 | struct cma_work *work = container_of(_work, struct cma_work, work); |
1347 | struct rdma_id_private *id_priv = work->id; | 1347 | struct rdma_id_private *id_priv = work->id; |
1348 | int destroy = 0; | 1348 | int destroy = 0; |
1349 | 1349 | ||
@@ -1374,7 +1374,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1374 | return -ENOMEM; | 1374 | return -ENOMEM; |
1375 | 1375 | ||
1376 | work->id = id_priv; | 1376 | work->id = id_priv; |
1377 | INIT_WORK(&work->work, cma_work_handler, work); | 1377 | INIT_WORK(&work->work, cma_work_handler); |
1378 | work->old_state = CMA_ROUTE_QUERY; | 1378 | work->old_state = CMA_ROUTE_QUERY; |
1379 | work->new_state = CMA_ROUTE_RESOLVED; | 1379 | work->new_state = CMA_ROUTE_RESOLVED; |
1380 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1380 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
@@ -1431,7 +1431,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1431 | return -ENOMEM; | 1431 | return -ENOMEM; |
1432 | 1432 | ||
1433 | work->id = id_priv; | 1433 | work->id = id_priv; |
1434 | INIT_WORK(&work->work, cma_work_handler, work); | 1434 | INIT_WORK(&work->work, cma_work_handler); |
1435 | work->old_state = CMA_ROUTE_QUERY; | 1435 | work->old_state = CMA_ROUTE_QUERY; |
1436 | work->new_state = CMA_ROUTE_RESOLVED; | 1436 | work->new_state = CMA_ROUTE_RESOLVED; |
1437 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1437 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
@@ -1585,7 +1585,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) | |||
1585 | } | 1585 | } |
1586 | 1586 | ||
1587 | work->id = id_priv; | 1587 | work->id = id_priv; |
1588 | INIT_WORK(&work->work, cma_work_handler, work); | 1588 | INIT_WORK(&work->work, cma_work_handler); |
1589 | work->old_state = CMA_ADDR_QUERY; | 1589 | work->old_state = CMA_ADDR_QUERY; |
1590 | work->new_state = CMA_ADDR_RESOLVED; | 1590 | work->new_state = CMA_ADDR_RESOLVED; |
1591 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; | 1591 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index c3fb304a4e86..9bfa785252dc 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -828,9 +828,10 @@ static int process_event(struct iwcm_id_private *cm_id_priv, | |||
828 | * thread asleep on the destroy_comp list vs. an object destroyed | 828 | * thread asleep on the destroy_comp list vs. an object destroyed |
829 | * here synchronously when the last reference is removed. | 829 | * here synchronously when the last reference is removed. |
830 | */ | 830 | */ |
831 | static void cm_work_handler(void *arg) | 831 | static void cm_work_handler(struct work_struct *_work) |
832 | { | 832 | { |
833 | struct iwcm_work *work = arg, lwork; | 833 | struct iwcm_work lwork, *work = |
834 | container_of(_work, struct iwcm_work, work); | ||
834 | struct iwcm_id_private *cm_id_priv = work->cm_id; | 835 | struct iwcm_id_private *cm_id_priv = work->cm_id; |
835 | unsigned long flags; | 836 | unsigned long flags; |
836 | int empty; | 837 | int empty; |
@@ -899,7 +900,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id, | |||
899 | goto out; | 900 | goto out; |
900 | } | 901 | } |
901 | 902 | ||
902 | INIT_WORK(&work->work, cm_work_handler, work); | 903 | INIT_WORK(&work->work, cm_work_handler); |
903 | work->cm_id = cm_id_priv; | 904 | work->cm_id = cm_id_priv; |
904 | work->event = *iw_event; | 905 | work->event = *iw_event; |
905 | 906 | ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index a72bcea46ff6..5a54ac35e961 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -65,8 +65,8 @@ static struct ib_mad_agent_private *find_mad_agent( | |||
65 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | 65 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, |
66 | struct ib_mad_private *mad); | 66 | struct ib_mad_private *mad); |
67 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); | 67 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); |
68 | static void timeout_sends(void *data); | 68 | static void timeout_sends(struct work_struct *work); |
69 | static void local_completions(void *data); | 69 | static void local_completions(struct work_struct *work); |
70 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | 70 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, |
71 | struct ib_mad_agent_private *agent_priv, | 71 | struct ib_mad_agent_private *agent_priv, |
72 | u8 mgmt_class); | 72 | u8 mgmt_class); |
@@ -356,10 +356,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
356 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); | 356 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); |
357 | INIT_LIST_HEAD(&mad_agent_priv->done_list); | 357 | INIT_LIST_HEAD(&mad_agent_priv->done_list); |
358 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); | 358 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); |
359 | INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); | 359 | INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); |
360 | INIT_LIST_HEAD(&mad_agent_priv->local_list); | 360 | INIT_LIST_HEAD(&mad_agent_priv->local_list); |
361 | INIT_WORK(&mad_agent_priv->local_work, local_completions, | 361 | INIT_WORK(&mad_agent_priv->local_work, local_completions); |
362 | mad_agent_priv); | ||
363 | atomic_set(&mad_agent_priv->refcount, 1); | 362 | atomic_set(&mad_agent_priv->refcount, 1); |
364 | init_completion(&mad_agent_priv->comp); | 363 | init_completion(&mad_agent_priv->comp); |
365 | 364 | ||
@@ -2198,12 +2197,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, | |||
2198 | /* | 2197 | /* |
2199 | * IB MAD completion callback | 2198 | * IB MAD completion callback |
2200 | */ | 2199 | */ |
2201 | static void ib_mad_completion_handler(void *data) | 2200 | static void ib_mad_completion_handler(struct work_struct *work) |
2202 | { | 2201 | { |
2203 | struct ib_mad_port_private *port_priv; | 2202 | struct ib_mad_port_private *port_priv; |
2204 | struct ib_wc wc; | 2203 | struct ib_wc wc; |
2205 | 2204 | ||
2206 | port_priv = (struct ib_mad_port_private *)data; | 2205 | port_priv = container_of(work, struct ib_mad_port_private, work); |
2207 | ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | 2206 | ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); |
2208 | 2207 | ||
2209 | while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { | 2208 | while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { |
@@ -2324,7 +2323,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent, | |||
2324 | } | 2323 | } |
2325 | EXPORT_SYMBOL(ib_cancel_mad); | 2324 | EXPORT_SYMBOL(ib_cancel_mad); |
2326 | 2325 | ||
2327 | static void local_completions(void *data) | 2326 | static void local_completions(struct work_struct *work) |
2328 | { | 2327 | { |
2329 | struct ib_mad_agent_private *mad_agent_priv; | 2328 | struct ib_mad_agent_private *mad_agent_priv; |
2330 | struct ib_mad_local_private *local; | 2329 | struct ib_mad_local_private *local; |
@@ -2334,7 +2333,8 @@ static void local_completions(void *data) | |||
2334 | struct ib_wc wc; | 2333 | struct ib_wc wc; |
2335 | struct ib_mad_send_wc mad_send_wc; | 2334 | struct ib_mad_send_wc mad_send_wc; |
2336 | 2335 | ||
2337 | mad_agent_priv = (struct ib_mad_agent_private *)data; | 2336 | mad_agent_priv = |
2337 | container_of(work, struct ib_mad_agent_private, local_work); | ||
2338 | 2338 | ||
2339 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2339 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
2340 | while (!list_empty(&mad_agent_priv->local_list)) { | 2340 | while (!list_empty(&mad_agent_priv->local_list)) { |
@@ -2434,14 +2434,15 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) | |||
2434 | return ret; | 2434 | return ret; |
2435 | } | 2435 | } |
2436 | 2436 | ||
2437 | static void timeout_sends(void *data) | 2437 | static void timeout_sends(struct work_struct *work) |
2438 | { | 2438 | { |
2439 | struct ib_mad_agent_private *mad_agent_priv; | 2439 | struct ib_mad_agent_private *mad_agent_priv; |
2440 | struct ib_mad_send_wr_private *mad_send_wr; | 2440 | struct ib_mad_send_wr_private *mad_send_wr; |
2441 | struct ib_mad_send_wc mad_send_wc; | 2441 | struct ib_mad_send_wc mad_send_wc; |
2442 | unsigned long flags, delay; | 2442 | unsigned long flags, delay; |
2443 | 2443 | ||
2444 | mad_agent_priv = (struct ib_mad_agent_private *)data; | 2444 | mad_agent_priv = container_of(work, struct ib_mad_agent_private, |
2445 | timed_work.work); | ||
2445 | mad_send_wc.vendor_err = 0; | 2446 | mad_send_wc.vendor_err = 0; |
2446 | 2447 | ||
2447 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2448 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
@@ -2799,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device, | |||
2799 | ret = -ENOMEM; | 2800 | ret = -ENOMEM; |
2800 | goto error8; | 2801 | goto error8; |
2801 | } | 2802 | } |
2802 | INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv); | 2803 | INIT_WORK(&port_priv->work, ib_mad_completion_handler); |
2803 | 2804 | ||
2804 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); | 2805 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
2805 | list_add_tail(&port_priv->port_list, &ib_mad_port_list); | 2806 | list_add_tail(&port_priv->port_list, &ib_mad_port_list); |
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index d06b59083f6e..d5548e73e068 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h | |||
@@ -102,7 +102,7 @@ struct ib_mad_agent_private { | |||
102 | struct list_head send_list; | 102 | struct list_head send_list; |
103 | struct list_head wait_list; | 103 | struct list_head wait_list; |
104 | struct list_head done_list; | 104 | struct list_head done_list; |
105 | struct work_struct timed_work; | 105 | struct delayed_work timed_work; |
106 | unsigned long timeout; | 106 | unsigned long timeout; |
107 | struct list_head local_list; | 107 | struct list_head local_list; |
108 | struct work_struct local_work; | 108 | struct work_struct local_work; |
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index 1ef79d015a1e..3663fd7022be 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -45,8 +45,8 @@ enum rmpp_state { | |||
45 | struct mad_rmpp_recv { | 45 | struct mad_rmpp_recv { |
46 | struct ib_mad_agent_private *agent; | 46 | struct ib_mad_agent_private *agent; |
47 | struct list_head list; | 47 | struct list_head list; |
48 | struct work_struct timeout_work; | 48 | struct delayed_work timeout_work; |
49 | struct work_struct cleanup_work; | 49 | struct delayed_work cleanup_work; |
50 | struct completion comp; | 50 | struct completion comp; |
51 | enum rmpp_state state; | 51 | enum rmpp_state state; |
52 | spinlock_t lock; | 52 | spinlock_t lock; |
@@ -233,9 +233,10 @@ static void nack_recv(struct ib_mad_agent_private *agent, | |||
233 | } | 233 | } |
234 | } | 234 | } |
235 | 235 | ||
236 | static void recv_timeout_handler(void *data) | 236 | static void recv_timeout_handler(struct work_struct *work) |
237 | { | 237 | { |
238 | struct mad_rmpp_recv *rmpp_recv = data; | 238 | struct mad_rmpp_recv *rmpp_recv = |
239 | container_of(work, struct mad_rmpp_recv, timeout_work.work); | ||
239 | struct ib_mad_recv_wc *rmpp_wc; | 240 | struct ib_mad_recv_wc *rmpp_wc; |
240 | unsigned long flags; | 241 | unsigned long flags; |
241 | 242 | ||
@@ -254,9 +255,10 @@ static void recv_timeout_handler(void *data) | |||
254 | ib_free_recv_mad(rmpp_wc); | 255 | ib_free_recv_mad(rmpp_wc); |
255 | } | 256 | } |
256 | 257 | ||
257 | static void recv_cleanup_handler(void *data) | 258 | static void recv_cleanup_handler(struct work_struct *work) |
258 | { | 259 | { |
259 | struct mad_rmpp_recv *rmpp_recv = data; | 260 | struct mad_rmpp_recv *rmpp_recv = |
261 | container_of(work, struct mad_rmpp_recv, cleanup_work.work); | ||
260 | unsigned long flags; | 262 | unsigned long flags; |
261 | 263 | ||
262 | spin_lock_irqsave(&rmpp_recv->agent->lock, flags); | 264 | spin_lock_irqsave(&rmpp_recv->agent->lock, flags); |
@@ -285,8 +287,8 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, | |||
285 | 287 | ||
286 | rmpp_recv->agent = agent; | 288 | rmpp_recv->agent = agent; |
287 | init_completion(&rmpp_recv->comp); | 289 | init_completion(&rmpp_recv->comp); |
288 | INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); | 290 | INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler); |
289 | INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); | 291 | INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler); |
290 | spin_lock_init(&rmpp_recv->lock); | 292 | spin_lock_init(&rmpp_recv->lock); |
291 | rmpp_recv->state = RMPP_STATE_ACTIVE; | 293 | rmpp_recv->state = RMPP_STATE_ACTIVE; |
292 | atomic_set(&rmpp_recv->refcount, 1); | 294 | atomic_set(&rmpp_recv->refcount, 1); |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 1706d3c7e95e..e45afba75341 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -360,9 +360,10 @@ static void free_sm_ah(struct kref *kref) | |||
360 | kfree(sm_ah); | 360 | kfree(sm_ah); |
361 | } | 361 | } |
362 | 362 | ||
363 | static void update_sm_ah(void *port_ptr) | 363 | static void update_sm_ah(struct work_struct *work) |
364 | { | 364 | { |
365 | struct ib_sa_port *port = port_ptr; | 365 | struct ib_sa_port *port = |
366 | container_of(work, struct ib_sa_port, update_task); | ||
366 | struct ib_sa_sm_ah *new_ah, *old_ah; | 367 | struct ib_sa_sm_ah *new_ah, *old_ah; |
367 | struct ib_port_attr port_attr; | 368 | struct ib_port_attr port_attr; |
368 | struct ib_ah_attr ah_attr; | 369 | struct ib_ah_attr ah_attr; |
@@ -992,8 +993,7 @@ static void ib_sa_add_one(struct ib_device *device) | |||
992 | if (IS_ERR(sa_dev->port[i].agent)) | 993 | if (IS_ERR(sa_dev->port[i].agent)) |
993 | goto err; | 994 | goto err; |
994 | 995 | ||
995 | INIT_WORK(&sa_dev->port[i].update_task, | 996 | INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); |
996 | update_sm_ah, &sa_dev->port[i]); | ||
997 | } | 997 | } |
998 | 998 | ||
999 | ib_set_client_data(device, &sa_client, sa_dev); | 999 | ib_set_client_data(device, &sa_client, sa_dev); |
@@ -1010,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device) | |||
1010 | goto err; | 1010 | goto err; |
1011 | 1011 | ||
1012 | for (i = 0; i <= e - s; ++i) | 1012 | for (i = 0; i <= e - s; ++i) |
1013 | update_sm_ah(&sa_dev->port[i]); | 1013 | update_sm_ah(&sa_dev->port[i].update_task); |
1014 | 1014 | ||
1015 | return; | 1015 | return; |
1016 | 1016 | ||
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c index efe147dbeb42..db12cc0841df 100644 --- a/drivers/infiniband/core/uverbs_mem.c +++ b/drivers/infiniband/core/uverbs_mem.c | |||
@@ -179,9 +179,10 @@ void ib_umem_release(struct ib_device *dev, struct ib_umem *umem) | |||
179 | up_write(¤t->mm->mmap_sem); | 179 | up_write(¤t->mm->mmap_sem); |
180 | } | 180 | } |
181 | 181 | ||
182 | static void ib_umem_account(void *work_ptr) | 182 | static void ib_umem_account(struct work_struct *_work) |
183 | { | 183 | { |
184 | struct ib_umem_account_work *work = work_ptr; | 184 | struct ib_umem_account_work *work = |
185 | container_of(_work, struct ib_umem_account_work, work); | ||
185 | 186 | ||
186 | down_write(&work->mm->mmap_sem); | 187 | down_write(&work->mm->mmap_sem); |
187 | work->mm->locked_vm -= work->diff; | 188 | work->mm->locked_vm -= work->diff; |
@@ -216,7 +217,7 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem) | |||
216 | return; | 217 | return; |
217 | } | 218 | } |
218 | 219 | ||
219 | INIT_WORK(&work->work, ib_umem_account, work); | 220 | INIT_WORK(&work->work, ib_umem_account); |
220 | work->mm = mm; | 221 | work->mm = mm; |
221 | work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; | 222 | work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; |
222 | 223 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index 413754b1d8a2..8536aeb96af8 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c | |||
@@ -214,9 +214,10 @@ struct ipath_user_pages_work { | |||
214 | unsigned long num_pages; | 214 | unsigned long num_pages; |
215 | }; | 215 | }; |
216 | 216 | ||
217 | static void user_pages_account(void *ptr) | 217 | static void user_pages_account(struct work_struct *_work) |
218 | { | 218 | { |
219 | struct ipath_user_pages_work *work = ptr; | 219 | struct ipath_user_pages_work *work = |
220 | container_of(_work, struct ipath_user_pages_work, work); | ||
220 | 221 | ||
221 | down_write(&work->mm->mmap_sem); | 222 | down_write(&work->mm->mmap_sem); |
222 | work->mm->locked_vm -= work->num_pages; | 223 | work->mm->locked_vm -= work->num_pages; |
@@ -242,7 +243,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages) | |||
242 | 243 | ||
243 | goto bail; | 244 | goto bail; |
244 | 245 | ||
245 | INIT_WORK(&work->work, user_pages_account, work); | 246 | INIT_WORK(&work->work, user_pages_account); |
246 | work->mm = mm; | 247 | work->mm = mm; |
247 | work->num_pages = num_pages; | 248 | work->num_pages = num_pages; |
248 | 249 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c index cd044ea2dfa4..e948158a28d9 100644 --- a/drivers/infiniband/hw/mthca/mthca_catas.c +++ b/drivers/infiniband/hw/mthca/mthca_catas.c | |||
@@ -57,7 +57,7 @@ static int catas_reset_disable; | |||
57 | module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); | 57 | module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); |
58 | MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); | 58 | MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); |
59 | 59 | ||
60 | static void catas_reset(void *work_ptr) | 60 | static void catas_reset(struct work_struct *work) |
61 | { | 61 | { |
62 | struct mthca_dev *dev, *tmpdev; | 62 | struct mthca_dev *dev, *tmpdev; |
63 | LIST_HEAD(tlist); | 63 | LIST_HEAD(tlist); |
@@ -203,7 +203,7 @@ void mthca_stop_catas_poll(struct mthca_dev *dev) | |||
203 | 203 | ||
204 | int __init mthca_catas_init(void) | 204 | int __init mthca_catas_init(void) |
205 | { | 205 | { |
206 | INIT_WORK(&catas_work, catas_reset, NULL); | 206 | INIT_WORK(&catas_work, catas_reset); |
207 | 207 | ||
208 | catas_wq = create_singlethread_workqueue("mthca_catas"); | 208 | catas_wq = create_singlethread_workqueue("mthca_catas"); |
209 | if (!catas_wq) | 209 | if (!catas_wq) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 0b8a79d53a00..3b2ee0eba05e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -136,11 +136,11 @@ struct ipoib_dev_priv { | |||
136 | struct list_head multicast_list; | 136 | struct list_head multicast_list; |
137 | struct rb_root multicast_tree; | 137 | struct rb_root multicast_tree; |
138 | 138 | ||
139 | struct work_struct pkey_task; | 139 | struct delayed_work pkey_task; |
140 | struct work_struct mcast_task; | 140 | struct delayed_work mcast_task; |
141 | struct work_struct flush_task; | 141 | struct work_struct flush_task; |
142 | struct work_struct restart_task; | 142 | struct work_struct restart_task; |
143 | struct work_struct ah_reap_task; | 143 | struct delayed_work ah_reap_task; |
144 | 144 | ||
145 | struct ib_device *ca; | 145 | struct ib_device *ca; |
146 | u8 port; | 146 | u8 port; |
@@ -254,13 +254,13 @@ int ipoib_add_pkey_attr(struct net_device *dev); | |||
254 | 254 | ||
255 | void ipoib_send(struct net_device *dev, struct sk_buff *skb, | 255 | void ipoib_send(struct net_device *dev, struct sk_buff *skb, |
256 | struct ipoib_ah *address, u32 qpn); | 256 | struct ipoib_ah *address, u32 qpn); |
257 | void ipoib_reap_ah(void *dev_ptr); | 257 | void ipoib_reap_ah(struct work_struct *work); |
258 | 258 | ||
259 | void ipoib_flush_paths(struct net_device *dev); | 259 | void ipoib_flush_paths(struct net_device *dev); |
260 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); | 260 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); |
261 | 261 | ||
262 | int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 262 | int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
263 | void ipoib_ib_dev_flush(void *dev); | 263 | void ipoib_ib_dev_flush(struct work_struct *work); |
264 | void ipoib_ib_dev_cleanup(struct net_device *dev); | 264 | void ipoib_ib_dev_cleanup(struct net_device *dev); |
265 | 265 | ||
266 | int ipoib_ib_dev_open(struct net_device *dev); | 266 | int ipoib_ib_dev_open(struct net_device *dev); |
@@ -271,10 +271,10 @@ int ipoib_ib_dev_stop(struct net_device *dev); | |||
271 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 271 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
272 | void ipoib_dev_cleanup(struct net_device *dev); | 272 | void ipoib_dev_cleanup(struct net_device *dev); |
273 | 273 | ||
274 | void ipoib_mcast_join_task(void *dev_ptr); | 274 | void ipoib_mcast_join_task(struct work_struct *work); |
275 | void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); | 275 | void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); |
276 | 276 | ||
277 | void ipoib_mcast_restart_task(void *dev_ptr); | 277 | void ipoib_mcast_restart_task(struct work_struct *work); |
278 | int ipoib_mcast_start_thread(struct net_device *dev); | 278 | int ipoib_mcast_start_thread(struct net_device *dev); |
279 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush); | 279 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush); |
280 | 280 | ||
@@ -312,7 +312,7 @@ void ipoib_event(struct ib_event_handler *handler, | |||
312 | int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); | 312 | int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); |
313 | int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); | 313 | int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); |
314 | 314 | ||
315 | void ipoib_pkey_poll(void *dev); | 315 | void ipoib_pkey_poll(struct work_struct *work); |
316 | int ipoib_pkey_dev_delay_open(struct net_device *dev); | 316 | int ipoib_pkey_dev_delay_open(struct net_device *dev); |
317 | 317 | ||
318 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | 318 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 8bf5e9ec7c95..f10fba5d3265 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -400,10 +400,11 @@ static void __ipoib_reap_ah(struct net_device *dev) | |||
400 | spin_unlock_irq(&priv->tx_lock); | 400 | spin_unlock_irq(&priv->tx_lock); |
401 | } | 401 | } |
402 | 402 | ||
403 | void ipoib_reap_ah(void *dev_ptr) | 403 | void ipoib_reap_ah(struct work_struct *work) |
404 | { | 404 | { |
405 | struct net_device *dev = dev_ptr; | 405 | struct ipoib_dev_priv *priv = |
406 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 406 | container_of(work, struct ipoib_dev_priv, ah_reap_task.work); |
407 | struct net_device *dev = priv->dev; | ||
407 | 408 | ||
408 | __ipoib_reap_ah(dev); | 409 | __ipoib_reap_ah(dev); |
409 | 410 | ||
@@ -613,10 +614,11 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
613 | return 0; | 614 | return 0; |
614 | } | 615 | } |
615 | 616 | ||
616 | void ipoib_ib_dev_flush(void *_dev) | 617 | void ipoib_ib_dev_flush(struct work_struct *work) |
617 | { | 618 | { |
618 | struct net_device *dev = (struct net_device *)_dev; | 619 | struct ipoib_dev_priv *cpriv, *priv = |
619 | struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; | 620 | container_of(work, struct ipoib_dev_priv, flush_task); |
621 | struct net_device *dev = priv->dev; | ||
620 | 622 | ||
621 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { | 623 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { |
622 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); | 624 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); |
@@ -638,14 +640,14 @@ void ipoib_ib_dev_flush(void *_dev) | |||
638 | */ | 640 | */ |
639 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { | 641 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { |
640 | ipoib_ib_dev_up(dev); | 642 | ipoib_ib_dev_up(dev); |
641 | ipoib_mcast_restart_task(dev); | 643 | ipoib_mcast_restart_task(&priv->restart_task); |
642 | } | 644 | } |
643 | 645 | ||
644 | mutex_lock(&priv->vlan_mutex); | 646 | mutex_lock(&priv->vlan_mutex); |
645 | 647 | ||
646 | /* Flush any child interfaces too */ | 648 | /* Flush any child interfaces too */ |
647 | list_for_each_entry(cpriv, &priv->child_intfs, list) | 649 | list_for_each_entry(cpriv, &priv->child_intfs, list) |
648 | ipoib_ib_dev_flush(cpriv->dev); | 650 | ipoib_ib_dev_flush(&cpriv->flush_task); |
649 | 651 | ||
650 | mutex_unlock(&priv->vlan_mutex); | 652 | mutex_unlock(&priv->vlan_mutex); |
651 | } | 653 | } |
@@ -672,10 +674,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) | |||
672 | * change async notification is available. | 674 | * change async notification is available. |
673 | */ | 675 | */ |
674 | 676 | ||
675 | void ipoib_pkey_poll(void *dev_ptr) | 677 | void ipoib_pkey_poll(struct work_struct *work) |
676 | { | 678 | { |
677 | struct net_device *dev = dev_ptr; | 679 | struct ipoib_dev_priv *priv = |
678 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 680 | container_of(work, struct ipoib_dev_priv, pkey_task.work); |
681 | struct net_device *dev = priv->dev; | ||
679 | 682 | ||
680 | ipoib_pkey_dev_check_presence(dev); | 683 | ipoib_pkey_dev_check_presence(dev); |
681 | 684 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 85522daeb946..71114a8c12b9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -933,11 +933,11 @@ static void ipoib_setup(struct net_device *dev) | |||
933 | INIT_LIST_HEAD(&priv->dead_ahs); | 933 | INIT_LIST_HEAD(&priv->dead_ahs); |
934 | INIT_LIST_HEAD(&priv->multicast_list); | 934 | INIT_LIST_HEAD(&priv->multicast_list); |
935 | 935 | ||
936 | INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev); | 936 | INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll); |
937 | INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev); | 937 | INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); |
938 | INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev); | 938 | INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush); |
939 | INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev); | 939 | INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); |
940 | INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev); | 940 | INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); |
941 | } | 941 | } |
942 | 942 | ||
943 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) | 943 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 3faa1820f0e9..f0a4fac1a215 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -399,7 +399,8 @@ static void ipoib_mcast_join_complete(int status, | |||
399 | mcast->backoff = 1; | 399 | mcast->backoff = 1; |
400 | mutex_lock(&mcast_mutex); | 400 | mutex_lock(&mcast_mutex); |
401 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 401 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
402 | queue_work(ipoib_workqueue, &priv->mcast_task); | 402 | queue_delayed_work(ipoib_workqueue, |
403 | &priv->mcast_task, 0); | ||
403 | mutex_unlock(&mcast_mutex); | 404 | mutex_unlock(&mcast_mutex); |
404 | complete(&mcast->done); | 405 | complete(&mcast->done); |
405 | return; | 406 | return; |
@@ -435,7 +436,8 @@ static void ipoib_mcast_join_complete(int status, | |||
435 | 436 | ||
436 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { | 437 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { |
437 | if (status == -ETIMEDOUT) | 438 | if (status == -ETIMEDOUT) |
438 | queue_work(ipoib_workqueue, &priv->mcast_task); | 439 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, |
440 | 0); | ||
439 | else | 441 | else |
440 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, | 442 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, |
441 | mcast->backoff * HZ); | 443 | mcast->backoff * HZ); |
@@ -517,10 +519,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, | |||
517 | mcast->query_id = ret; | 519 | mcast->query_id = ret; |
518 | } | 520 | } |
519 | 521 | ||
520 | void ipoib_mcast_join_task(void *dev_ptr) | 522 | void ipoib_mcast_join_task(struct work_struct *work) |
521 | { | 523 | { |
522 | struct net_device *dev = dev_ptr; | 524 | struct ipoib_dev_priv *priv = |
523 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 525 | container_of(work, struct ipoib_dev_priv, mcast_task.work); |
526 | struct net_device *dev = priv->dev; | ||
524 | 527 | ||
525 | if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 528 | if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
526 | return; | 529 | return; |
@@ -610,7 +613,7 @@ int ipoib_mcast_start_thread(struct net_device *dev) | |||
610 | 613 | ||
611 | mutex_lock(&mcast_mutex); | 614 | mutex_lock(&mcast_mutex); |
612 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) | 615 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) |
613 | queue_work(ipoib_workqueue, &priv->mcast_task); | 616 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); |
614 | mutex_unlock(&mcast_mutex); | 617 | mutex_unlock(&mcast_mutex); |
615 | 618 | ||
616 | spin_lock_irq(&priv->lock); | 619 | spin_lock_irq(&priv->lock); |
@@ -818,10 +821,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev) | |||
818 | } | 821 | } |
819 | } | 822 | } |
820 | 823 | ||
821 | void ipoib_mcast_restart_task(void *dev_ptr) | 824 | void ipoib_mcast_restart_task(struct work_struct *work) |
822 | { | 825 | { |
823 | struct net_device *dev = dev_ptr; | 826 | struct ipoib_dev_priv *priv = |
824 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 827 | container_of(work, struct ipoib_dev_priv, restart_task); |
828 | struct net_device *dev = priv->dev; | ||
825 | struct dev_mc_list *mclist; | 829 | struct dev_mc_list *mclist; |
826 | struct ipoib_mcast *mcast, *tmcast; | 830 | struct ipoib_mcast *mcast, *tmcast; |
827 | LIST_HEAD(remove_list); | 831 | LIST_HEAD(remove_list); |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 18a000034996..693b77002897 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -48,7 +48,7 @@ | |||
48 | 48 | ||
49 | static void iser_cq_tasklet_fn(unsigned long data); | 49 | static void iser_cq_tasklet_fn(unsigned long data); |
50 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context); | 50 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context); |
51 | static void iser_comp_error_worker(void *data); | 51 | static void iser_comp_error_worker(struct work_struct *work); |
52 | 52 | ||
53 | static void iser_cq_event_callback(struct ib_event *cause, void *context) | 53 | static void iser_cq_event_callback(struct ib_event *cause, void *context) |
54 | { | 54 | { |
@@ -480,8 +480,7 @@ int iser_conn_init(struct iser_conn **ibconn) | |||
480 | init_waitqueue_head(&ib_conn->wait); | 480 | init_waitqueue_head(&ib_conn->wait); |
481 | atomic_set(&ib_conn->post_recv_buf_count, 0); | 481 | atomic_set(&ib_conn->post_recv_buf_count, 0); |
482 | atomic_set(&ib_conn->post_send_buf_count, 0); | 482 | atomic_set(&ib_conn->post_send_buf_count, 0); |
483 | INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker, | 483 | INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker); |
484 | ib_conn); | ||
485 | INIT_LIST_HEAD(&ib_conn->conn_list); | 484 | INIT_LIST_HEAD(&ib_conn->conn_list); |
486 | spin_lock_init(&ib_conn->lock); | 485 | spin_lock_init(&ib_conn->lock); |
487 | 486 | ||
@@ -754,9 +753,10 @@ int iser_post_send(struct iser_desc *tx_desc) | |||
754 | return ret_val; | 753 | return ret_val; |
755 | } | 754 | } |
756 | 755 | ||
757 | static void iser_comp_error_worker(void *data) | 756 | static void iser_comp_error_worker(struct work_struct *work) |
758 | { | 757 | { |
759 | struct iser_conn *ib_conn = data; | 758 | struct iser_conn *ib_conn = |
759 | container_of(work, struct iser_conn, comperror_work); | ||
760 | 760 | ||
761 | /* getting here when the state is UP means that the conn is being * | 761 | /* getting here when the state is UP means that the conn is being * |
762 | * terminated asynchronously from the iSCSI layer's perspective. */ | 762 | * terminated asynchronously from the iSCSI layer's perspective. */ |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 4b09147f438f..214f66195af2 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -390,9 +390,10 @@ static void srp_disconnect_target(struct srp_target_port *target) | |||
390 | wait_for_completion(&target->done); | 390 | wait_for_completion(&target->done); |
391 | } | 391 | } |
392 | 392 | ||
393 | static void srp_remove_work(void *target_ptr) | 393 | static void srp_remove_work(struct work_struct *work) |
394 | { | 394 | { |
395 | struct srp_target_port *target = target_ptr; | 395 | struct srp_target_port *target = |
396 | container_of(work, struct srp_target_port, work); | ||
396 | 397 | ||
397 | spin_lock_irq(target->scsi_host->host_lock); | 398 | spin_lock_irq(target->scsi_host->host_lock); |
398 | if (target->state != SRP_TARGET_DEAD) { | 399 | if (target->state != SRP_TARGET_DEAD) { |
@@ -575,7 +576,7 @@ err: | |||
575 | spin_lock_irq(target->scsi_host->host_lock); | 576 | spin_lock_irq(target->scsi_host->host_lock); |
576 | if (target->state == SRP_TARGET_CONNECTING) { | 577 | if (target->state == SRP_TARGET_CONNECTING) { |
577 | target->state = SRP_TARGET_DEAD; | 578 | target->state = SRP_TARGET_DEAD; |
578 | INIT_WORK(&target->work, srp_remove_work, target); | 579 | INIT_WORK(&target->work, srp_remove_work); |
579 | schedule_work(&target->work); | 580 | schedule_work(&target->work); |
580 | } | 581 | } |
581 | spin_unlock_irq(target->scsi_host->host_lock); | 582 | spin_unlock_irq(target->scsi_host->host_lock); |