diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-07 15:48:15 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-07 15:48:15 -0500 |
commit | 34161db6b14d984fb9b06c735b7b42f8803f6851 (patch) | |
tree | 99656278b6697f1cde5b05894b7c0ee22c63a00e /net | |
parent | 5847e1f4d058677c5e46dc6c3e3c70e8855ea3ba (diff) | |
parent | 620034c84d1d939717bdfbe02c51a3fee43541c3 (diff) |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/ into merge_linus
Conflicts:
include/linux/sunrpc/xprt.h
net/sunrpc/xprtsock.c
Fix up conflicts with the workqueue changes.
Diffstat (limited to 'net')
29 files changed, 149 insertions, 126 deletions
diff --git a/net/atm/lec.c b/net/atm/lec.c index 5946ec63724f..3fc0abeeaf34 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -1454,7 +1454,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr, | |||
1454 | 1454 | ||
1455 | #define LEC_ARP_REFRESH_INTERVAL (3*HZ) | 1455 | #define LEC_ARP_REFRESH_INTERVAL (3*HZ) |
1456 | 1456 | ||
1457 | static void lec_arp_check_expire(void *data); | 1457 | static void lec_arp_check_expire(struct work_struct *work); |
1458 | static void lec_arp_expire_arp(unsigned long data); | 1458 | static void lec_arp_expire_arp(unsigned long data); |
1459 | 1459 | ||
1460 | /* | 1460 | /* |
@@ -1477,7 +1477,7 @@ static void lec_arp_init(struct lec_priv *priv) | |||
1477 | INIT_HLIST_HEAD(&priv->lec_no_forward); | 1477 | INIT_HLIST_HEAD(&priv->lec_no_forward); |
1478 | INIT_HLIST_HEAD(&priv->mcast_fwds); | 1478 | INIT_HLIST_HEAD(&priv->mcast_fwds); |
1479 | spin_lock_init(&priv->lec_arp_lock); | 1479 | spin_lock_init(&priv->lec_arp_lock); |
1480 | INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv); | 1480 | INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire); |
1481 | schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); | 1481 | schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); |
1482 | } | 1482 | } |
1483 | 1483 | ||
@@ -1875,10 +1875,11 @@ static void lec_arp_expire_vcc(unsigned long data) | |||
1875 | * to ESI_FORWARD_DIRECT. This causes the flush period to end | 1875 | * to ESI_FORWARD_DIRECT. This causes the flush period to end |
1876 | * regardless of the progress of the flush protocol. | 1876 | * regardless of the progress of the flush protocol. |
1877 | */ | 1877 | */ |
1878 | static void lec_arp_check_expire(void *data) | 1878 | static void lec_arp_check_expire(struct work_struct *work) |
1879 | { | 1879 | { |
1880 | unsigned long flags; | 1880 | unsigned long flags; |
1881 | struct lec_priv *priv = data; | 1881 | struct lec_priv *priv = |
1882 | container_of(work, struct lec_priv, lec_arp_work.work); | ||
1882 | struct hlist_node *node, *next; | 1883 | struct hlist_node *node, *next; |
1883 | struct lec_arp_table *entry; | 1884 | struct lec_arp_table *entry; |
1884 | unsigned long now; | 1885 | unsigned long now; |
diff --git a/net/atm/lec.h b/net/atm/lec.h index 24cc95f86741..99136babd535 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h | |||
@@ -92,7 +92,7 @@ struct lec_priv { | |||
92 | spinlock_t lec_arp_lock; | 92 | spinlock_t lec_arp_lock; |
93 | struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ | 93 | struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ |
94 | struct atm_vcc *lecd; | 94 | struct atm_vcc *lecd; |
95 | struct work_struct lec_arp_work; /* C10 */ | 95 | struct delayed_work lec_arp_work; /* C10 */ |
96 | unsigned int maximum_unknown_frame_count; | 96 | unsigned int maximum_unknown_frame_count; |
97 | /* | 97 | /* |
98 | * Within the period of time defined by this variable, the client will send | 98 | * Within the period of time defined by this variable, the client will send |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 3eeeb7a86e75..d4c935692ccf 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -237,9 +237,9 @@ static void bt_release(struct device *dev) | |||
237 | kfree(data); | 237 | kfree(data); |
238 | } | 238 | } |
239 | 239 | ||
240 | static void add_conn(void *data) | 240 | static void add_conn(struct work_struct *work) |
241 | { | 241 | { |
242 | struct hci_conn *conn = data; | 242 | struct hci_conn *conn = container_of(work, struct hci_conn, work); |
243 | int i; | 243 | int i; |
244 | 244 | ||
245 | if (device_register(&conn->dev) < 0) { | 245 | if (device_register(&conn->dev) < 0) { |
@@ -272,14 +272,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn) | |||
272 | 272 | ||
273 | dev_set_drvdata(&conn->dev, conn); | 273 | dev_set_drvdata(&conn->dev, conn); |
274 | 274 | ||
275 | INIT_WORK(&conn->work, add_conn, (void *) conn); | 275 | INIT_WORK(&conn->work, add_conn); |
276 | 276 | ||
277 | schedule_work(&conn->work); | 277 | schedule_work(&conn->work); |
278 | } | 278 | } |
279 | 279 | ||
280 | static void del_conn(void *data) | 280 | static void del_conn(struct work_struct *work) |
281 | { | 281 | { |
282 | struct hci_conn *conn = data; | 282 | struct hci_conn *conn = container_of(work, struct hci_conn, work); |
283 | device_del(&conn->dev); | 283 | device_del(&conn->dev); |
284 | } | 284 | } |
285 | 285 | ||
@@ -287,7 +287,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn) | |||
287 | { | 287 | { |
288 | BT_DBG("conn %p", conn); | 288 | BT_DBG("conn %p", conn); |
289 | 289 | ||
290 | INIT_WORK(&conn->work, del_conn, (void *) conn); | 290 | INIT_WORK(&conn->work, del_conn); |
291 | 291 | ||
292 | schedule_work(&conn->work); | 292 | schedule_work(&conn->work); |
293 | } | 293 | } |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f753c40c11d2..55bb2634c088 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -77,12 +77,16 @@ static int port_cost(struct net_device *dev) | |||
77 | * Called from work queue to allow for calling functions that | 77 | * Called from work queue to allow for calling functions that |
78 | * might sleep (such as speed check), and to debounce. | 78 | * might sleep (such as speed check), and to debounce. |
79 | */ | 79 | */ |
80 | static void port_carrier_check(void *arg) | 80 | static void port_carrier_check(struct work_struct *work) |
81 | { | 81 | { |
82 | struct net_device *dev = arg; | ||
83 | struct net_bridge_port *p; | 82 | struct net_bridge_port *p; |
83 | struct net_device *dev; | ||
84 | struct net_bridge *br; | 84 | struct net_bridge *br; |
85 | 85 | ||
86 | dev = container_of(work, struct net_bridge_port, | ||
87 | carrier_check.work)->dev; | ||
88 | work_release(work); | ||
89 | |||
86 | rtnl_lock(); | 90 | rtnl_lock(); |
87 | p = dev->br_port; | 91 | p = dev->br_port; |
88 | if (!p) | 92 | if (!p) |
@@ -276,7 +280,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, | |||
276 | p->port_no = index; | 280 | p->port_no = index; |
277 | br_init_port(p); | 281 | br_init_port(p); |
278 | p->state = BR_STATE_DISABLED; | 282 | p->state = BR_STATE_DISABLED; |
279 | INIT_WORK(&p->carrier_check, port_carrier_check, dev); | 283 | INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check); |
280 | br_stp_port_timer_init(p); | 284 | br_stp_port_timer_init(p); |
281 | 285 | ||
282 | kobject_init(&p->kobj); | 286 | kobject_init(&p->kobj); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 74258d86f256..3a534e94c7f3 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -82,7 +82,7 @@ struct net_bridge_port | |||
82 | struct timer_list hold_timer; | 82 | struct timer_list hold_timer; |
83 | struct timer_list message_age_timer; | 83 | struct timer_list message_age_timer; |
84 | struct kobject kobj; | 84 | struct kobject kobj; |
85 | struct work_struct carrier_check; | 85 | struct delayed_work carrier_check; |
86 | struct rcu_head rcu; | 86 | struct rcu_head rcu; |
87 | }; | 87 | }; |
88 | 88 | ||
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 4b36114744c5..549a2ce951b0 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -34,8 +34,8 @@ enum lw_bits { | |||
34 | static unsigned long linkwatch_flags; | 34 | static unsigned long linkwatch_flags; |
35 | static unsigned long linkwatch_nextevent; | 35 | static unsigned long linkwatch_nextevent; |
36 | 36 | ||
37 | static void linkwatch_event(void *dummy); | 37 | static void linkwatch_event(struct work_struct *dummy); |
38 | static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); | 38 | static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); |
39 | 39 | ||
40 | static LIST_HEAD(lweventlist); | 40 | static LIST_HEAD(lweventlist); |
41 | static DEFINE_SPINLOCK(lweventlist_lock); | 41 | static DEFINE_SPINLOCK(lweventlist_lock); |
@@ -127,7 +127,7 @@ void linkwatch_run_queue(void) | |||
127 | } | 127 | } |
128 | 128 | ||
129 | 129 | ||
130 | static void linkwatch_event(void *dummy) | 130 | static void linkwatch_event(struct work_struct *dummy) |
131 | { | 131 | { |
132 | /* Limit the number of linkwatch events to one | 132 | /* Limit the number of linkwatch events to one |
133 | * per second so that a runaway driver does not | 133 | * per second so that a runaway driver does not |
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev) | |||
171 | unsigned long delay = linkwatch_nextevent - jiffies; | 171 | unsigned long delay = linkwatch_nextevent - jiffies; |
172 | 172 | ||
173 | /* If we wrap around we'll delay it by at most HZ. */ | 173 | /* If we wrap around we'll delay it by at most HZ. */ |
174 | if (!delay || delay > HZ) | 174 | if (delay > HZ) |
175 | schedule_work(&linkwatch_work); | 175 | delay = 0; |
176 | else | 176 | schedule_delayed_work(&linkwatch_work, delay); |
177 | schedule_delayed_work(&linkwatch_work, delay); | ||
178 | } | 177 | } |
179 | } | 178 | } |
180 | } | 179 | } |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 3c58846fcaa5..b3c559b9ac35 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -50,9 +50,10 @@ static atomic_t trapped; | |||
50 | static void zap_completion_queue(void); | 50 | static void zap_completion_queue(void); |
51 | static void arp_reply(struct sk_buff *skb); | 51 | static void arp_reply(struct sk_buff *skb); |
52 | 52 | ||
53 | static void queue_process(void *p) | 53 | static void queue_process(struct work_struct *work) |
54 | { | 54 | { |
55 | struct netpoll_info *npinfo = p; | 55 | struct netpoll_info *npinfo = |
56 | container_of(work, struct netpoll_info, tx_work.work); | ||
56 | struct sk_buff *skb; | 57 | struct sk_buff *skb; |
57 | 58 | ||
58 | while ((skb = skb_dequeue(&npinfo->txq))) { | 59 | while ((skb = skb_dequeue(&npinfo->txq))) { |
@@ -72,8 +73,6 @@ static void queue_process(void *p) | |||
72 | schedule_delayed_work(&npinfo->tx_work, HZ/10); | 73 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
73 | return; | 74 | return; |
74 | } | 75 | } |
75 | |||
76 | netif_tx_unlock_bh(dev); | ||
77 | } | 76 | } |
78 | } | 77 | } |
79 | 78 | ||
@@ -263,7 +262,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | |||
263 | 262 | ||
264 | if (status != NETDEV_TX_OK) { | 263 | if (status != NETDEV_TX_OK) { |
265 | skb_queue_tail(&npinfo->txq, skb); | 264 | skb_queue_tail(&npinfo->txq, skb); |
266 | schedule_work(&npinfo->tx_work); | 265 | schedule_delayed_work(&npinfo->tx_work,0); |
267 | } | 266 | } |
268 | } | 267 | } |
269 | 268 | ||
@@ -628,7 +627,7 @@ int netpoll_setup(struct netpoll *np) | |||
628 | spin_lock_init(&npinfo->rx_lock); | 627 | spin_lock_init(&npinfo->rx_lock); |
629 | skb_queue_head_init(&npinfo->arp_tx); | 628 | skb_queue_head_init(&npinfo->arp_tx); |
630 | skb_queue_head_init(&npinfo->txq); | 629 | skb_queue_head_init(&npinfo->txq); |
631 | INIT_WORK(&npinfo->tx_work, queue_process, npinfo); | 630 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); |
632 | 631 | ||
633 | atomic_set(&npinfo->refcnt, 1); | 632 | atomic_set(&npinfo->refcnt, 1); |
634 | } else { | 633 | } else { |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 7b52f2a03eef..4c9e26775f72 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -32,8 +32,7 @@ struct inet_timewait_death_row dccp_death_row = { | |||
32 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, | 32 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, |
33 | (unsigned long)&dccp_death_row), | 33 | (unsigned long)&dccp_death_row), |
34 | .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, | 34 | .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, |
35 | inet_twdr_twkill_work, | 35 | inet_twdr_twkill_work), |
36 | &dccp_death_row), | ||
37 | /* Short-time timewait calendar */ | 36 | /* Short-time timewait calendar */ |
38 | 37 | ||
39 | .twcal_hand = -1, | 38 | .twcal_hand = -1, |
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index cf51c87a971d..08386c102954 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c | |||
@@ -58,9 +58,11 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft | |||
58 | } | 58 | } |
59 | 59 | ||
60 | void | 60 | void |
61 | ieee80211softmac_assoc_timeout(void *d) | 61 | ieee80211softmac_assoc_timeout(struct work_struct *work) |
62 | { | 62 | { |
63 | struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; | 63 | struct ieee80211softmac_device *mac = |
64 | container_of(work, struct ieee80211softmac_device, | ||
65 | associnfo.timeout.work); | ||
64 | struct ieee80211softmac_network *n; | 66 | struct ieee80211softmac_network *n; |
65 | 67 | ||
66 | mutex_lock(&mac->associnfo.mutex); | 68 | mutex_lock(&mac->associnfo.mutex); |
@@ -186,9 +188,11 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void | |||
186 | 188 | ||
187 | /* This function is called to handle userspace requests (asynchronously) */ | 189 | /* This function is called to handle userspace requests (asynchronously) */ |
188 | void | 190 | void |
189 | ieee80211softmac_assoc_work(void *d) | 191 | ieee80211softmac_assoc_work(struct work_struct *work) |
190 | { | 192 | { |
191 | struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; | 193 | struct ieee80211softmac_device *mac = |
194 | container_of(work, struct ieee80211softmac_device, | ||
195 | associnfo.work.work); | ||
192 | struct ieee80211softmac_network *found = NULL; | 196 | struct ieee80211softmac_network *found = NULL; |
193 | struct ieee80211_network *net = NULL, *best = NULL; | 197 | struct ieee80211_network *net = NULL, *best = NULL; |
194 | int bssvalid; | 198 | int bssvalid; |
@@ -412,7 +416,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, | |||
412 | network->authenticated = 0; | 416 | network->authenticated = 0; |
413 | /* we don't want to do this more than once ... */ | 417 | /* we don't want to do this more than once ... */ |
414 | network->auth_desynced_once = 1; | 418 | network->auth_desynced_once = 1; |
415 | schedule_work(&mac->associnfo.work); | 419 | schedule_delayed_work(&mac->associnfo.work, 0); |
416 | break; | 420 | break; |
417 | } | 421 | } |
418 | default: | 422 | default: |
@@ -446,7 +450,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
446 | ieee80211softmac_disassoc(mac); | 450 | ieee80211softmac_disassoc(mac); |
447 | 451 | ||
448 | /* try to reassociate */ | 452 | /* try to reassociate */ |
449 | schedule_work(&mac->associnfo.work); | 453 | schedule_delayed_work(&mac->associnfo.work, 0); |
450 | 454 | ||
451 | return 0; | 455 | return 0; |
452 | } | 456 | } |
@@ -466,7 +470,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev, | |||
466 | dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); | 470 | dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); |
467 | return 0; | 471 | return 0; |
468 | } | 472 | } |
469 | schedule_work(&mac->associnfo.work); | 473 | schedule_delayed_work(&mac->associnfo.work, 0); |
470 | 474 | ||
471 | return 0; | 475 | return 0; |
472 | } | 476 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 0612015f1c78..6012705aa4f8 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #include "ieee80211softmac_priv.h" | 27 | #include "ieee80211softmac_priv.h" |
28 | 28 | ||
29 | static void ieee80211softmac_auth_queue(void *data); | 29 | static void ieee80211softmac_auth_queue(struct work_struct *work); |
30 | 30 | ||
31 | /* Queues an auth request to the desired AP */ | 31 | /* Queues an auth request to the desired AP */ |
32 | int | 32 | int |
@@ -54,14 +54,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, | |||
54 | auth->mac = mac; | 54 | auth->mac = mac; |
55 | auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; | 55 | auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; |
56 | auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; | 56 | auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; |
57 | INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth); | 57 | INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue); |
58 | 58 | ||
59 | /* Lock (for list) */ | 59 | /* Lock (for list) */ |
60 | spin_lock_irqsave(&mac->lock, flags); | 60 | spin_lock_irqsave(&mac->lock, flags); |
61 | 61 | ||
62 | /* add to list */ | 62 | /* add to list */ |
63 | list_add_tail(&auth->list, &mac->auth_queue); | 63 | list_add_tail(&auth->list, &mac->auth_queue); |
64 | schedule_work(&auth->work); | 64 | schedule_delayed_work(&auth->work, 0); |
65 | spin_unlock_irqrestore(&mac->lock, flags); | 65 | spin_unlock_irqrestore(&mac->lock, flags); |
66 | 66 | ||
67 | return 0; | 67 | return 0; |
@@ -70,14 +70,15 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, | |||
70 | 70 | ||
71 | /* Sends an auth request to the desired AP and handles timeouts */ | 71 | /* Sends an auth request to the desired AP and handles timeouts */ |
72 | static void | 72 | static void |
73 | ieee80211softmac_auth_queue(void *data) | 73 | ieee80211softmac_auth_queue(struct work_struct *work) |
74 | { | 74 | { |
75 | struct ieee80211softmac_device *mac; | 75 | struct ieee80211softmac_device *mac; |
76 | struct ieee80211softmac_auth_queue_item *auth; | 76 | struct ieee80211softmac_auth_queue_item *auth; |
77 | struct ieee80211softmac_network *net; | 77 | struct ieee80211softmac_network *net; |
78 | unsigned long flags; | 78 | unsigned long flags; |
79 | 79 | ||
80 | auth = (struct ieee80211softmac_auth_queue_item *)data; | 80 | auth = container_of(work, struct ieee80211softmac_auth_queue_item, |
81 | work.work); | ||
81 | net = auth->net; | 82 | net = auth->net; |
82 | mac = auth->mac; | 83 | mac = auth->mac; |
83 | 84 | ||
@@ -118,9 +119,11 @@ ieee80211softmac_auth_queue(void *data) | |||
118 | 119 | ||
119 | /* Sends a response to an auth challenge (for shared key auth). */ | 120 | /* Sends a response to an auth challenge (for shared key auth). */ |
120 | static void | 121 | static void |
121 | ieee80211softmac_auth_challenge_response(void *_aq) | 122 | ieee80211softmac_auth_challenge_response(struct work_struct *work) |
122 | { | 123 | { |
123 | struct ieee80211softmac_auth_queue_item *aq = _aq; | 124 | struct ieee80211softmac_auth_queue_item *aq = |
125 | container_of(work, struct ieee80211softmac_auth_queue_item, | ||
126 | work.work); | ||
124 | 127 | ||
125 | /* Send our response */ | 128 | /* Send our response */ |
126 | ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); | 129 | ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); |
@@ -234,8 +237,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) | |||
234 | * we have obviously already sent the initial auth | 237 | * we have obviously already sent the initial auth |
235 | * request. */ | 238 | * request. */ |
236 | cancel_delayed_work(&aq->work); | 239 | cancel_delayed_work(&aq->work); |
237 | INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq); | 240 | INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response); |
238 | schedule_work(&aq->work); | 241 | schedule_delayed_work(&aq->work, 0); |
239 | spin_unlock_irqrestore(&mac->lock, flags); | 242 | spin_unlock_irqrestore(&mac->lock, flags); |
240 | return 0; | 243 | return 0; |
241 | case IEEE80211SOFTMAC_AUTH_SHARED_PASS: | 244 | case IEEE80211SOFTMAC_AUTH_SHARED_PASS: |
@@ -398,6 +401,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de | |||
398 | ieee80211softmac_deauth_from_net(mac, net); | 401 | ieee80211softmac_deauth_from_net(mac, net); |
399 | 402 | ||
400 | /* let's try to re-associate */ | 403 | /* let's try to re-associate */ |
401 | schedule_work(&mac->associnfo.work); | 404 | schedule_delayed_work(&mac->associnfo.work, 0); |
402 | return 0; | 405 | return 0; |
403 | } | 406 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c index f34fa2ef666b..b9015656cfb3 100644 --- a/net/ieee80211/softmac/ieee80211softmac_event.c +++ b/net/ieee80211/softmac/ieee80211softmac_event.c | |||
@@ -73,10 +73,12 @@ static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = { | |||
73 | 73 | ||
74 | 74 | ||
75 | static void | 75 | static void |
76 | ieee80211softmac_notify_callback(void *d) | 76 | ieee80211softmac_notify_callback(struct work_struct *work) |
77 | { | 77 | { |
78 | struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d; | 78 | struct ieee80211softmac_event *pevent = |
79 | kfree(d); | 79 | container_of(work, struct ieee80211softmac_event, work.work); |
80 | struct ieee80211softmac_event event = *pevent; | ||
81 | kfree(pevent); | ||
80 | 82 | ||
81 | event.fun(event.mac->dev, event.event_type, event.context); | 83 | event.fun(event.mac->dev, event.event_type, event.context); |
82 | } | 84 | } |
@@ -99,7 +101,7 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, | |||
99 | return -ENOMEM; | 101 | return -ENOMEM; |
100 | 102 | ||
101 | eventptr->event_type = event; | 103 | eventptr->event_type = event; |
102 | INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr); | 104 | INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback); |
103 | eventptr->fun = fun; | 105 | eventptr->fun = fun; |
104 | eventptr->context = context; | 106 | eventptr->context = context; |
105 | eventptr->mac = mac; | 107 | eventptr->mac = mac; |
@@ -170,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve | |||
170 | /* User may have subscribed to ANY event, so | 172 | /* User may have subscribed to ANY event, so |
171 | * we tell them which event triggered it. */ | 173 | * we tell them which event triggered it. */ |
172 | eventptr->event_type = event; | 174 | eventptr->event_type = event; |
173 | schedule_work(&eventptr->work); | 175 | schedule_delayed_work(&eventptr->work, 0); |
174 | } | 176 | } |
175 | } | 177 | } |
176 | } | 178 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c index 33aff4f4a471..256207b71dc9 100644 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/net/ieee80211/softmac/ieee80211softmac_module.c | |||
@@ -58,8 +58,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv) | |||
58 | INIT_LIST_HEAD(&softmac->events); | 58 | INIT_LIST_HEAD(&softmac->events); |
59 | 59 | ||
60 | mutex_init(&softmac->associnfo.mutex); | 60 | mutex_init(&softmac->associnfo.mutex); |
61 | INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac); | 61 | INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work); |
62 | INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac); | 62 | INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout); |
63 | softmac->start_scan = ieee80211softmac_start_scan_implementation; | 63 | softmac->start_scan = ieee80211softmac_start_scan_implementation; |
64 | softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; | 64 | softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; |
65 | softmac->stop_scan = ieee80211softmac_stop_scan_implementation; | 65 | softmac->stop_scan = ieee80211softmac_stop_scan_implementation; |
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h index 0642e090b8a7..c0dbe070e548 100644 --- a/net/ieee80211/softmac/ieee80211softmac_priv.h +++ b/net/ieee80211/softmac/ieee80211softmac_priv.h | |||
@@ -78,7 +78,7 @@ | |||
78 | /* private definitions and prototypes */ | 78 | /* private definitions and prototypes */ |
79 | 79 | ||
80 | /*** prototypes from _scan.c */ | 80 | /*** prototypes from _scan.c */ |
81 | void ieee80211softmac_scan(void *sm); | 81 | void ieee80211softmac_scan(struct work_struct *work); |
82 | /* for internal use if scanning is needed */ | 82 | /* for internal use if scanning is needed */ |
83 | int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); | 83 | int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); |
84 | void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); | 84 | void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); |
@@ -149,7 +149,7 @@ int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *au | |||
149 | int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); | 149 | int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); |
150 | 150 | ||
151 | /*** prototypes from _assoc.c */ | 151 | /*** prototypes from _assoc.c */ |
152 | void ieee80211softmac_assoc_work(void *d); | 152 | void ieee80211softmac_assoc_work(struct work_struct *work); |
153 | int ieee80211softmac_handle_assoc_response(struct net_device * dev, | 153 | int ieee80211softmac_handle_assoc_response(struct net_device * dev, |
154 | struct ieee80211_assoc_response * resp, | 154 | struct ieee80211_assoc_response * resp, |
155 | struct ieee80211_network * network); | 155 | struct ieee80211_network * network); |
@@ -157,7 +157,7 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
157 | struct ieee80211_disassoc * disassoc); | 157 | struct ieee80211_disassoc * disassoc); |
158 | int ieee80211softmac_handle_reassoc_req(struct net_device * dev, | 158 | int ieee80211softmac_handle_reassoc_req(struct net_device * dev, |
159 | struct ieee80211_reassoc_request * reassoc); | 159 | struct ieee80211_reassoc_request * reassoc); |
160 | void ieee80211softmac_assoc_timeout(void *d); | 160 | void ieee80211softmac_assoc_timeout(struct work_struct *work); |
161 | void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); | 161 | void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); |
162 | void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); | 162 | void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); |
163 | 163 | ||
@@ -207,7 +207,7 @@ struct ieee80211softmac_auth_queue_item { | |||
207 | struct ieee80211softmac_device *mac; /* SoftMAC device */ | 207 | struct ieee80211softmac_device *mac; /* SoftMAC device */ |
208 | u8 retry; /* Retry limit */ | 208 | u8 retry; /* Retry limit */ |
209 | u8 state; /* Auth State */ | 209 | u8 state; /* Auth State */ |
210 | struct work_struct work; /* Work queue */ | 210 | struct delayed_work work; /* Work queue */ |
211 | }; | 211 | }; |
212 | 212 | ||
213 | /* scanning information */ | 213 | /* scanning information */ |
@@ -219,7 +219,8 @@ struct ieee80211softmac_scaninfo { | |||
219 | stop:1; | 219 | stop:1; |
220 | u8 skip_flags; | 220 | u8 skip_flags; |
221 | struct completion finished; | 221 | struct completion finished; |
222 | struct work_struct softmac_scan; | 222 | struct delayed_work softmac_scan; |
223 | struct ieee80211softmac_device *mac; | ||
223 | }; | 224 | }; |
224 | 225 | ||
225 | /* private event struct */ | 226 | /* private event struct */ |
@@ -227,7 +228,7 @@ struct ieee80211softmac_event { | |||
227 | struct list_head list; | 228 | struct list_head list; |
228 | int event_type; | 229 | int event_type; |
229 | void *event_context; | 230 | void *event_context; |
230 | struct work_struct work; | 231 | struct delayed_work work; |
231 | notify_function_ptr fun; | 232 | notify_function_ptr fun; |
232 | void *context; | 233 | void *context; |
233 | struct ieee80211softmac_device *mac; | 234 | struct ieee80211softmac_device *mac; |
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c index 5507feab32de..0c85d6c24cdb 100644 --- a/net/ieee80211/softmac/ieee80211softmac_scan.c +++ b/net/ieee80211/softmac/ieee80211softmac_scan.c | |||
@@ -90,12 +90,14 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm) | |||
90 | 90 | ||
91 | 91 | ||
92 | /* internal scanning implementation follows */ | 92 | /* internal scanning implementation follows */ |
93 | void ieee80211softmac_scan(void *d) | 93 | void ieee80211softmac_scan(struct work_struct *work) |
94 | { | 94 | { |
95 | int invalid_channel; | 95 | int invalid_channel; |
96 | u8 current_channel_idx; | 96 | u8 current_channel_idx; |
97 | struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d; | 97 | struct ieee80211softmac_scaninfo *si = |
98 | struct ieee80211softmac_scaninfo *si = sm->scaninfo; | 98 | container_of(work, struct ieee80211softmac_scaninfo, |
99 | softmac_scan.work); | ||
100 | struct ieee80211softmac_device *sm = si->mac; | ||
99 | unsigned long flags; | 101 | unsigned long flags; |
100 | 102 | ||
101 | while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { | 103 | while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { |
@@ -146,7 +148,8 @@ static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee802 | |||
146 | struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); | 148 | struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); |
147 | if (unlikely(!info)) | 149 | if (unlikely(!info)) |
148 | return NULL; | 150 | return NULL; |
149 | INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac); | 151 | INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan); |
152 | info->mac = mac; | ||
150 | init_completion(&info->finished); | 153 | init_completion(&info->finished); |
151 | return info; | 154 | return info; |
152 | } | 155 | } |
@@ -187,7 +190,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev) | |||
187 | sm->scaninfo->started = 1; | 190 | sm->scaninfo->started = 1; |
188 | sm->scaninfo->stop = 0; | 191 | sm->scaninfo->stop = 0; |
189 | INIT_COMPLETION(sm->scaninfo->finished); | 192 | INIT_COMPLETION(sm->scaninfo->finished); |
190 | schedule_work(&sm->scaninfo->softmac_scan); | 193 | schedule_delayed_work(&sm->scaninfo->softmac_scan, 0); |
191 | spin_unlock_irqrestore(&sm->lock, flags); | 194 | spin_unlock_irqrestore(&sm->lock, flags); |
192 | return 0; | 195 | return 0; |
193 | } | 196 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index 23068a830f7d..2ffaebd21c53 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c | |||
@@ -122,7 +122,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, | |||
122 | 122 | ||
123 | sm->associnfo.associating = 1; | 123 | sm->associnfo.associating = 1; |
124 | /* queue lower level code to do work (if necessary) */ | 124 | /* queue lower level code to do work (if necessary) */ |
125 | schedule_work(&sm->associnfo.work); | 125 | schedule_delayed_work(&sm->associnfo.work, 0); |
126 | out: | 126 | out: |
127 | mutex_unlock(&sm->associnfo.mutex); | 127 | mutex_unlock(&sm->associnfo.mutex); |
128 | 128 | ||
@@ -356,7 +356,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, | |||
356 | /* force reassociation */ | 356 | /* force reassociation */ |
357 | mac->associnfo.bssvalid = 0; | 357 | mac->associnfo.bssvalid = 0; |
358 | if (mac->associnfo.associated) | 358 | if (mac->associnfo.associated) |
359 | schedule_work(&mac->associnfo.work); | 359 | schedule_delayed_work(&mac->associnfo.work, 0); |
360 | } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { | 360 | } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { |
361 | /* the bssid we have is no longer fixed */ | 361 | /* the bssid we have is no longer fixed */ |
362 | mac->associnfo.bssfixed = 0; | 362 | mac->associnfo.bssfixed = 0; |
@@ -373,7 +373,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, | |||
373 | /* tell the other code that this bssid should be used no matter what */ | 373 | /* tell the other code that this bssid should be used no matter what */ |
374 | mac->associnfo.bssfixed = 1; | 374 | mac->associnfo.bssfixed = 1; |
375 | /* queue associate if new bssid or (old one again and not associated) */ | 375 | /* queue associate if new bssid or (old one again and not associated) */ |
376 | schedule_work(&mac->associnfo.work); | 376 | schedule_delayed_work(&mac->associnfo.work, 0); |
377 | } | 377 | } |
378 | 378 | ||
379 | out: | 379 | out: |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index cdd805344c61..8c74f9168b7d 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -197,9 +197,10 @@ EXPORT_SYMBOL_GPL(inet_twdr_hangman); | |||
197 | 197 | ||
198 | extern void twkill_slots_invalid(void); | 198 | extern void twkill_slots_invalid(void); |
199 | 199 | ||
200 | void inet_twdr_twkill_work(void *data) | 200 | void inet_twdr_twkill_work(struct work_struct *work) |
201 | { | 201 | { |
202 | struct inet_timewait_death_row *twdr = data; | 202 | struct inet_timewait_death_row *twdr = |
203 | container_of(work, struct inet_timewait_death_row, twkill_work); | ||
203 | int i; | 204 | int i; |
204 | 205 | ||
205 | if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) | 206 | if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) |
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index f261616e4602..9b933381ebbe 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c | |||
@@ -221,10 +221,10 @@ static void update_defense_level(void) | |||
221 | * Timer for checking the defense | 221 | * Timer for checking the defense |
222 | */ | 222 | */ |
223 | #define DEFENSE_TIMER_PERIOD 1*HZ | 223 | #define DEFENSE_TIMER_PERIOD 1*HZ |
224 | static void defense_work_handler(void *data); | 224 | static void defense_work_handler(struct work_struct *work); |
225 | static DECLARE_WORK(defense_work, defense_work_handler, NULL); | 225 | static DECLARE_DELAYED_WORK(defense_work, defense_work_handler); |
226 | 226 | ||
227 | static void defense_work_handler(void *data) | 227 | static void defense_work_handler(struct work_struct *work) |
228 | { | 228 | { |
229 | update_defense_level(); | 229 | update_defense_level(); |
230 | if (atomic_read(&ip_vs_dropentry)) | 230 | if (atomic_read(&ip_vs_dropentry)) |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6dddf59c1fb9..4a3889dd1943 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -45,8 +45,7 @@ struct inet_timewait_death_row tcp_death_row = { | |||
45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, | 45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, |
46 | (unsigned long)&tcp_death_row), | 46 | (unsigned long)&tcp_death_row), |
47 | .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, | 47 | .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, |
48 | inet_twdr_twkill_work, | 48 | inet_twdr_twkill_work), |
49 | &tcp_death_row), | ||
50 | /* Short-time timewait calendar */ | 49 | /* Short-time timewait calendar */ |
51 | 50 | ||
52 | .twcal_hand = -1, | 51 | .twcal_hand = -1, |
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index d50a02030ad7..262bda808d96 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
@@ -61,7 +61,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty); | |||
61 | static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); | 61 | static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); |
62 | static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); | 62 | static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); |
63 | static void ircomm_tty_hangup(struct tty_struct *tty); | 63 | static void ircomm_tty_hangup(struct tty_struct *tty); |
64 | static void ircomm_tty_do_softint(void *private_); | 64 | static void ircomm_tty_do_softint(struct work_struct *work); |
65 | static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); | 65 | static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); |
66 | static void ircomm_tty_stop(struct tty_struct *tty); | 66 | static void ircomm_tty_stop(struct tty_struct *tty); |
67 | 67 | ||
@@ -389,7 +389,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
389 | self->flow = FLOW_STOP; | 389 | self->flow = FLOW_STOP; |
390 | 390 | ||
391 | self->line = line; | 391 | self->line = line; |
392 | INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self); | 392 | INIT_WORK(&self->tqueue, ircomm_tty_do_softint); |
393 | self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; | 393 | self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; |
394 | self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; | 394 | self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; |
395 | self->close_delay = 5*HZ/10; | 395 | self->close_delay = 5*HZ/10; |
@@ -594,15 +594,16 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty) | |||
594 | } | 594 | } |
595 | 595 | ||
596 | /* | 596 | /* |
597 | * Function ircomm_tty_do_softint (private_) | 597 | * Function ircomm_tty_do_softint (work) |
598 | * | 598 | * |
599 | * We use this routine to give the write wakeup to the user at at a | 599 | * We use this routine to give the write wakeup to the user at at a |
600 | * safe time (as fast as possible after write have completed). This | 600 | * safe time (as fast as possible after write have completed). This |
601 | * can be compared to the Tx interrupt. | 601 | * can be compared to the Tx interrupt. |
602 | */ | 602 | */ |
603 | static void ircomm_tty_do_softint(void *private_) | 603 | static void ircomm_tty_do_softint(struct work_struct *work) |
604 | { | 604 | { |
605 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_; | 605 | struct ircomm_tty_cb *self = |
606 | container_of(work, struct ircomm_tty_cb, tqueue); | ||
606 | struct tty_struct *tty; | 607 | struct tty_struct *tty; |
607 | unsigned long flags; | 608 | unsigned long flags; |
608 | struct sk_buff *skb, *ctrl_skb; | 609 | struct sk_buff *skb, *ctrl_skb; |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 39471d3b31b9..ad0057db0f91 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #include <net/sctp/sm.h> | 61 | #include <net/sctp/sm.h> |
62 | 62 | ||
63 | /* Forward declarations for internal functions. */ | 63 | /* Forward declarations for internal functions. */ |
64 | static void sctp_assoc_bh_rcv(struct sctp_association *asoc); | 64 | static void sctp_assoc_bh_rcv(struct work_struct *work); |
65 | 65 | ||
66 | 66 | ||
67 | /* 1st Level Abstractions. */ | 67 | /* 1st Level Abstractions. */ |
@@ -269,9 +269,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
269 | 269 | ||
270 | /* Create an input queue. */ | 270 | /* Create an input queue. */ |
271 | sctp_inq_init(&asoc->base.inqueue); | 271 | sctp_inq_init(&asoc->base.inqueue); |
272 | sctp_inq_set_th_handler(&asoc->base.inqueue, | 272 | sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); |
273 | (void (*)(void *))sctp_assoc_bh_rcv, | ||
274 | asoc); | ||
275 | 273 | ||
276 | /* Create an output queue. */ | 274 | /* Create an output queue. */ |
277 | sctp_outq_init(asoc, &asoc->outqueue); | 275 | sctp_outq_init(asoc, &asoc->outqueue); |
@@ -946,8 +944,11 @@ out: | |||
946 | } | 944 | } |
947 | 945 | ||
948 | /* Do delayed input processing. This is scheduled by sctp_rcv(). */ | 946 | /* Do delayed input processing. This is scheduled by sctp_rcv(). */ |
949 | static void sctp_assoc_bh_rcv(struct sctp_association *asoc) | 947 | static void sctp_assoc_bh_rcv(struct work_struct *work) |
950 | { | 948 | { |
949 | struct sctp_association *asoc = | ||
950 | container_of(work, struct sctp_association, | ||
951 | base.inqueue.immediate); | ||
951 | struct sctp_endpoint *ep; | 952 | struct sctp_endpoint *ep; |
952 | struct sctp_chunk *chunk; | 953 | struct sctp_chunk *chunk; |
953 | struct sock *sk; | 954 | struct sock *sk; |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 33a42e90c32f..129756908da4 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #include <net/sctp/sm.h> | 61 | #include <net/sctp/sm.h> |
62 | 62 | ||
63 | /* Forward declarations for internal helpers. */ | 63 | /* Forward declarations for internal helpers. */ |
64 | static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); | 64 | static void sctp_endpoint_bh_rcv(struct work_struct *work); |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Initialize the base fields of the endpoint structure. | 67 | * Initialize the base fields of the endpoint structure. |
@@ -89,8 +89,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
89 | sctp_inq_init(&ep->base.inqueue); | 89 | sctp_inq_init(&ep->base.inqueue); |
90 | 90 | ||
91 | /* Set its top-half handler */ | 91 | /* Set its top-half handler */ |
92 | sctp_inq_set_th_handler(&ep->base.inqueue, | 92 | sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); |
93 | (void (*)(void *))sctp_endpoint_bh_rcv, ep); | ||
94 | 93 | ||
95 | /* Initialize the bind addr area */ | 94 | /* Initialize the bind addr area */ |
96 | sctp_bind_addr_init(&ep->base.bind_addr, 0); | 95 | sctp_bind_addr_init(&ep->base.bind_addr, 0); |
@@ -318,8 +317,11 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, | |||
318 | /* Do delayed input processing. This is scheduled by sctp_rcv(). | 317 | /* Do delayed input processing. This is scheduled by sctp_rcv(). |
319 | * This may be called on BH or task time. | 318 | * This may be called on BH or task time. |
320 | */ | 319 | */ |
321 | static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep) | 320 | static void sctp_endpoint_bh_rcv(struct work_struct *work) |
322 | { | 321 | { |
322 | struct sctp_endpoint *ep = | ||
323 | container_of(work, struct sctp_endpoint, | ||
324 | base.inqueue.immediate); | ||
323 | struct sctp_association *asoc; | 325 | struct sctp_association *asoc; |
324 | struct sock *sk; | 326 | struct sock *sk; |
325 | struct sctp_transport *transport; | 327 | struct sctp_transport *transport; |
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index cf6deed7e849..71b07466e880 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
@@ -54,7 +54,7 @@ void sctp_inq_init(struct sctp_inq *queue) | |||
54 | queue->in_progress = NULL; | 54 | queue->in_progress = NULL; |
55 | 55 | ||
56 | /* Create a task for delivering data. */ | 56 | /* Create a task for delivering data. */ |
57 | INIT_WORK(&queue->immediate, NULL, NULL); | 57 | INIT_WORK(&queue->immediate, NULL); |
58 | 58 | ||
59 | queue->malloced = 0; | 59 | queue->malloced = 0; |
60 | } | 60 | } |
@@ -97,7 +97,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) | |||
97 | * on the BH related data structures. | 97 | * on the BH related data structures. |
98 | */ | 98 | */ |
99 | list_add_tail(&chunk->list, &q->in_chunk_list); | 99 | list_add_tail(&chunk->list, &q->in_chunk_list); |
100 | q->immediate.func(q->immediate.data); | 100 | q->immediate.func(&q->immediate); |
101 | } | 101 | } |
102 | 102 | ||
103 | /* Extract a chunk from an SCTP inqueue. | 103 | /* Extract a chunk from an SCTP inqueue. |
@@ -205,9 +205,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | |||
205 | * The intent is that this routine will pull stuff out of the | 205 | * The intent is that this routine will pull stuff out of the |
206 | * inqueue and process it. | 206 | * inqueue and process it. |
207 | */ | 207 | */ |
208 | void sctp_inq_set_th_handler(struct sctp_inq *q, | 208 | void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback) |
209 | void (*callback)(void *), void *arg) | ||
210 | { | 209 | { |
211 | INIT_WORK(&q->immediate, callback, arg); | 210 | INIT_WORK(&q->immediate, callback); |
212 | } | 211 | } |
213 | 212 | ||
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 00cb388ece03..d96fd466a9a4 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -284,8 +284,8 @@ static struct file_operations cache_file_operations; | |||
284 | static struct file_operations content_file_operations; | 284 | static struct file_operations content_file_operations; |
285 | static struct file_operations cache_flush_operations; | 285 | static struct file_operations cache_flush_operations; |
286 | 286 | ||
287 | static void do_cache_clean(void *data); | 287 | static void do_cache_clean(struct work_struct *work); |
288 | static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); | 288 | static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); |
289 | 289 | ||
290 | void cache_register(struct cache_detail *cd) | 290 | void cache_register(struct cache_detail *cd) |
291 | { | 291 | { |
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd) | |||
337 | spin_unlock(&cache_list_lock); | 337 | spin_unlock(&cache_list_lock); |
338 | 338 | ||
339 | /* start the cleaning process */ | 339 | /* start the cleaning process */ |
340 | schedule_work(&cache_cleaner); | 340 | schedule_delayed_work(&cache_cleaner, 0); |
341 | } | 341 | } |
342 | 342 | ||
343 | int cache_unregister(struct cache_detail *cd) | 343 | int cache_unregister(struct cache_detail *cd) |
@@ -461,7 +461,7 @@ static int cache_clean(void) | |||
461 | /* | 461 | /* |
462 | * We want to regularly clean the cache, so we need to schedule some work ... | 462 | * We want to regularly clean the cache, so we need to schedule some work ... |
463 | */ | 463 | */ |
464 | static void do_cache_clean(void *data) | 464 | static void do_cache_clean(struct work_struct *work) |
465 | { | 465 | { |
466 | int delay = 5; | 466 | int delay = 5; |
467 | if (cache_clean() == -1) | 467 | if (cache_clean() == -1) |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9a0b41a97f90..49dba5febbbd 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -54,10 +54,11 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, | |||
54 | } | 54 | } |
55 | 55 | ||
56 | static void | 56 | static void |
57 | rpc_timeout_upcall_queue(void *data) | 57 | rpc_timeout_upcall_queue(struct work_struct *work) |
58 | { | 58 | { |
59 | LIST_HEAD(free_list); | 59 | LIST_HEAD(free_list); |
60 | struct rpc_inode *rpci = (struct rpc_inode *)data; | 60 | struct rpc_inode *rpci = |
61 | container_of(work, struct rpc_inode, queue_timeout.work); | ||
61 | struct inode *inode = &rpci->vfs_inode; | 62 | struct inode *inode = &rpci->vfs_inode; |
62 | void (*destroy_msg)(struct rpc_pipe_msg *); | 63 | void (*destroy_msg)(struct rpc_pipe_msg *); |
63 | 64 | ||
@@ -837,7 +838,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | |||
837 | INIT_LIST_HEAD(&rpci->pipe); | 838 | INIT_LIST_HEAD(&rpci->pipe); |
838 | rpci->pipelen = 0; | 839 | rpci->pipelen = 0; |
839 | init_waitqueue_head(&rpci->waitq); | 840 | init_waitqueue_head(&rpci->waitq); |
840 | INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); | 841 | INIT_DELAYED_WORK(&rpci->queue_timeout, |
842 | rpc_timeout_upcall_queue); | ||
841 | rpci->ops = NULL; | 843 | rpci->ops = NULL; |
842 | } | 844 | } |
843 | } | 845 | } |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index f9fd66b1d48b..18a33d327012 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -41,7 +41,7 @@ static mempool_t *rpc_buffer_mempool __read_mostly; | |||
41 | 41 | ||
42 | static void __rpc_default_timer(struct rpc_task *task); | 42 | static void __rpc_default_timer(struct rpc_task *task); |
43 | static void rpciod_killall(void); | 43 | static void rpciod_killall(void); |
44 | static void rpc_async_schedule(void *); | 44 | static void rpc_async_schedule(struct work_struct *); |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * RPC tasks sit here while waiting for conditions to improve. | 47 | * RPC tasks sit here while waiting for conditions to improve. |
@@ -323,7 +323,7 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
323 | if (RPC_IS_ASYNC(task)) { | 323 | if (RPC_IS_ASYNC(task)) { |
324 | int status; | 324 | int status; |
325 | 325 | ||
326 | INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); | 326 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
327 | status = queue_work(task->tk_workqueue, &task->u.tk_work); | 327 | status = queue_work(task->tk_workqueue, &task->u.tk_work); |
328 | if (status < 0) { | 328 | if (status < 0) { |
329 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); | 329 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); |
@@ -729,9 +729,9 @@ rpc_execute(struct rpc_task *task) | |||
729 | return __rpc_execute(task); | 729 | return __rpc_execute(task); |
730 | } | 730 | } |
731 | 731 | ||
732 | static void rpc_async_schedule(void *arg) | 732 | static void rpc_async_schedule(struct work_struct *work) |
733 | { | 733 | { |
734 | __rpc_execute((struct rpc_task *)arg); | 734 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
735 | } | 735 | } |
736 | 736 | ||
737 | /** | 737 | /** |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index f8ca0a93454c..7a3999f0a4a2 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -477,9 +477,10 @@ int xprt_adjust_timeout(struct rpc_rqst *req) | |||
477 | return status; | 477 | return status; |
478 | } | 478 | } |
479 | 479 | ||
480 | static void xprt_autoclose(void *args) | 480 | static void xprt_autoclose(struct work_struct *work) |
481 | { | 481 | { |
482 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | 482 | struct rpc_xprt *xprt = |
483 | container_of(work, struct rpc_xprt, task_cleanup); | ||
483 | 484 | ||
484 | xprt_disconnect(xprt); | 485 | xprt_disconnect(xprt); |
485 | xprt->ops->close(xprt); | 486 | xprt->ops->close(xprt); |
@@ -916,7 +917,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si | |||
916 | 917 | ||
917 | INIT_LIST_HEAD(&xprt->free); | 918 | INIT_LIST_HEAD(&xprt->free); |
918 | INIT_LIST_HEAD(&xprt->recv); | 919 | INIT_LIST_HEAD(&xprt->recv); |
919 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); | 920 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); |
920 | init_timer(&xprt->timer); | 921 | init_timer(&xprt->timer); |
921 | xprt->timer.function = xprt_init_autodisconnect; | 922 | xprt->timer.function = xprt_init_autodisconnect; |
922 | xprt->timer.data = (unsigned long) xprt; | 923 | xprt->timer.data = (unsigned long) xprt; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 21438d7dc47b..3bb232eb5d90 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -235,7 +235,7 @@ struct sock_xprt { | |||
235 | /* | 235 | /* |
236 | * Connection of transports | 236 | * Connection of transports |
237 | */ | 237 | */ |
238 | struct work_struct connect_worker; | 238 | struct delayed_work connect_worker; |
239 | unsigned short port; | 239 | unsigned short port; |
240 | 240 | ||
241 | /* | 241 | /* |
@@ -1175,13 +1175,14 @@ static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) | |||
1175 | 1175 | ||
1176 | /** | 1176 | /** |
1177 | * xs_udp_connect_worker - set up a UDP socket | 1177 | * xs_udp_connect_worker - set up a UDP socket |
1178 | * @args: RPC transport to connect | 1178 | * @work: RPC transport to connect |
1179 | * | 1179 | * |
1180 | * Invoked by a work queue tasklet. | 1180 | * Invoked by a work queue tasklet. |
1181 | */ | 1181 | */ |
1182 | static void xs_udp_connect_worker(void *args) | 1182 | static void xs_udp_connect_worker(struct work_struct *work) |
1183 | { | 1183 | { |
1184 | struct sock_xprt *transport = (struct sock_xprt *)args; | 1184 | struct sock_xprt *transport = |
1185 | container_of(work, struct sock_xprt, connect_worker.work); | ||
1185 | struct rpc_xprt *xprt = &transport->xprt; | 1186 | struct rpc_xprt *xprt = &transport->xprt; |
1186 | struct socket *sock = transport->sock; | 1187 | struct socket *sock = transport->sock; |
1187 | int err, status = -EIO; | 1188 | int err, status = -EIO; |
@@ -1260,13 +1261,14 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |||
1260 | 1261 | ||
1261 | /** | 1262 | /** |
1262 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint | 1263 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint |
1263 | * @args: RPC transport to connect | 1264 | * @work: RPC transport to connect |
1264 | * | 1265 | * |
1265 | * Invoked by a work queue tasklet. | 1266 | * Invoked by a work queue tasklet. |
1266 | */ | 1267 | */ |
1267 | static void xs_tcp_connect_worker(void *args) | 1268 | static void xs_tcp_connect_worker(struct work_struct *work) |
1268 | { | 1269 | { |
1269 | struct sock_xprt *transport = (struct sock_xprt *)args; | 1270 | struct sock_xprt *transport = |
1271 | container_of(work, struct sock_xprt, connect_worker.work); | ||
1270 | struct rpc_xprt *xprt = &transport->xprt; | 1272 | struct rpc_xprt *xprt = &transport->xprt; |
1271 | struct socket *sock = transport->sock; | 1273 | struct socket *sock = transport->sock; |
1272 | int err, status = -EIO; | 1274 | int err, status = -EIO; |
@@ -1380,7 +1382,7 @@ static void xs_connect(struct rpc_task *task) | |||
1380 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | 1382 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; |
1381 | } else { | 1383 | } else { |
1382 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | 1384 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); |
1383 | schedule_work(&transport->connect_worker); | 1385 | schedule_delayed_work(&transport->connect_worker, 0); |
1384 | 1386 | ||
1385 | /* flush_scheduled_work can sleep... */ | 1387 | /* flush_scheduled_work can sleep... */ |
1386 | if (!RPC_IS_ASYNC(task)) | 1388 | if (!RPC_IS_ASYNC(task)) |
@@ -1525,7 +1527,7 @@ struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
1525 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | 1527 | /* XXX: header size can vary due to auth type, IPv6, etc. */ |
1526 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | 1528 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); |
1527 | 1529 | ||
1528 | INIT_WORK(&transport->connect_worker, xs_udp_connect_worker, transport); | 1530 | INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_connect_worker); |
1529 | xprt->bind_timeout = XS_BIND_TO; | 1531 | xprt->bind_timeout = XS_BIND_TO; |
1530 | xprt->connect_timeout = XS_UDP_CONN_TO; | 1532 | xprt->connect_timeout = XS_UDP_CONN_TO; |
1531 | xprt->reestablish_timeout = XS_UDP_REEST_TO; | 1533 | xprt->reestablish_timeout = XS_UDP_REEST_TO; |
@@ -1569,7 +1571,7 @@ struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
1569 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); | 1571 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); |
1570 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; | 1572 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
1571 | 1573 | ||
1572 | INIT_WORK(&transport->connect_worker, xs_tcp_connect_worker, transport); | 1574 | INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker); |
1573 | xprt->bind_timeout = XS_BIND_TO; | 1575 | xprt->bind_timeout = XS_BIND_TO; |
1574 | xprt->connect_timeout = XS_TCP_CONN_TO; | 1576 | xprt->connect_timeout = XS_TCP_CONN_TO; |
1575 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | 1577 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 64d3938f74c4..f6c77bd36fdd 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -392,7 +392,7 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy) | |||
392 | xfrm_pol_put(policy); | 392 | xfrm_pol_put(policy); |
393 | } | 393 | } |
394 | 394 | ||
395 | static void xfrm_policy_gc_task(void *data) | 395 | static void xfrm_policy_gc_task(struct work_struct *work) |
396 | { | 396 | { |
397 | struct xfrm_policy *policy; | 397 | struct xfrm_policy *policy; |
398 | struct hlist_node *entry, *tmp; | 398 | struct hlist_node *entry, *tmp; |
@@ -580,7 +580,7 @@ static inline int xfrm_byidx_should_resize(int total) | |||
580 | 580 | ||
581 | static DEFINE_MUTEX(hash_resize_mutex); | 581 | static DEFINE_MUTEX(hash_resize_mutex); |
582 | 582 | ||
583 | static void xfrm_hash_resize(void *__unused) | 583 | static void xfrm_hash_resize(struct work_struct *__unused) |
584 | { | 584 | { |
585 | int dir, total; | 585 | int dir, total; |
586 | 586 | ||
@@ -597,7 +597,7 @@ static void xfrm_hash_resize(void *__unused) | |||
597 | mutex_unlock(&hash_resize_mutex); | 597 | mutex_unlock(&hash_resize_mutex); |
598 | } | 598 | } |
599 | 599 | ||
600 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); | 600 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); |
601 | 601 | ||
602 | /* Generate new index... KAME seems to generate them ordered by cost | 602 | /* Generate new index... KAME seems to generate them ordered by cost |
603 | * of an absolute inpredictability of ordering of rules. This will not pass. */ | 603 | * of an absolute inpredictability of ordering of rules. This will not pass. */ |
@@ -2116,7 +2116,7 @@ static void __init xfrm_policy_init(void) | |||
2116 | panic("XFRM: failed to allocate bydst hash\n"); | 2116 | panic("XFRM: failed to allocate bydst hash\n"); |
2117 | } | 2117 | } |
2118 | 2118 | ||
2119 | INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); | 2119 | INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); |
2120 | register_netdevice_notifier(&xfrm_dev_notifier); | 2120 | register_netdevice_notifier(&xfrm_dev_notifier); |
2121 | } | 2121 | } |
2122 | 2122 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 864962bbda90..da54a64ccfa3 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -115,7 +115,7 @@ static unsigned long xfrm_hash_new_size(void) | |||
115 | 115 | ||
116 | static DEFINE_MUTEX(hash_resize_mutex); | 116 | static DEFINE_MUTEX(hash_resize_mutex); |
117 | 117 | ||
118 | static void xfrm_hash_resize(void *__unused) | 118 | static void xfrm_hash_resize(struct work_struct *__unused) |
119 | { | 119 | { |
120 | struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; | 120 | struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; |
121 | unsigned long nsize, osize; | 121 | unsigned long nsize, osize; |
@@ -168,7 +168,7 @@ out_unlock: | |||
168 | mutex_unlock(&hash_resize_mutex); | 168 | mutex_unlock(&hash_resize_mutex); |
169 | } | 169 | } |
170 | 170 | ||
171 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); | 171 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); |
172 | 172 | ||
173 | DECLARE_WAIT_QUEUE_HEAD(km_waitq); | 173 | DECLARE_WAIT_QUEUE_HEAD(km_waitq); |
174 | EXPORT_SYMBOL(km_waitq); | 174 | EXPORT_SYMBOL(km_waitq); |
@@ -207,7 +207,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) | |||
207 | kfree(x); | 207 | kfree(x); |
208 | } | 208 | } |
209 | 209 | ||
210 | static void xfrm_state_gc_task(void *data) | 210 | static void xfrm_state_gc_task(struct work_struct *data) |
211 | { | 211 | { |
212 | struct xfrm_state *x; | 212 | struct xfrm_state *x; |
213 | struct hlist_node *entry, *tmp; | 213 | struct hlist_node *entry, *tmp; |
@@ -1568,6 +1568,6 @@ void __init xfrm_state_init(void) | |||
1568 | panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); | 1568 | panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); |
1569 | xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); | 1569 | xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); |
1570 | 1570 | ||
1571 | INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL); | 1571 | INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task); |
1572 | } | 1572 | } |
1573 | 1573 | ||