diff options
Diffstat (limited to 'net')
68 files changed, 315 insertions, 229 deletions
diff --git a/net/atm/lec.c b/net/atm/lec.c index 5946ec63724f..3fc0abeeaf34 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -1454,7 +1454,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr, | |||
1454 | 1454 | ||
1455 | #define LEC_ARP_REFRESH_INTERVAL (3*HZ) | 1455 | #define LEC_ARP_REFRESH_INTERVAL (3*HZ) |
1456 | 1456 | ||
1457 | static void lec_arp_check_expire(void *data); | 1457 | static void lec_arp_check_expire(struct work_struct *work); |
1458 | static void lec_arp_expire_arp(unsigned long data); | 1458 | static void lec_arp_expire_arp(unsigned long data); |
1459 | 1459 | ||
1460 | /* | 1460 | /* |
@@ -1477,7 +1477,7 @@ static void lec_arp_init(struct lec_priv *priv) | |||
1477 | INIT_HLIST_HEAD(&priv->lec_no_forward); | 1477 | INIT_HLIST_HEAD(&priv->lec_no_forward); |
1478 | INIT_HLIST_HEAD(&priv->mcast_fwds); | 1478 | INIT_HLIST_HEAD(&priv->mcast_fwds); |
1479 | spin_lock_init(&priv->lec_arp_lock); | 1479 | spin_lock_init(&priv->lec_arp_lock); |
1480 | INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv); | 1480 | INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire); |
1481 | schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); | 1481 | schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); |
1482 | } | 1482 | } |
1483 | 1483 | ||
@@ -1875,10 +1875,11 @@ static void lec_arp_expire_vcc(unsigned long data) | |||
1875 | * to ESI_FORWARD_DIRECT. This causes the flush period to end | 1875 | * to ESI_FORWARD_DIRECT. This causes the flush period to end |
1876 | * regardless of the progress of the flush protocol. | 1876 | * regardless of the progress of the flush protocol. |
1877 | */ | 1877 | */ |
1878 | static void lec_arp_check_expire(void *data) | 1878 | static void lec_arp_check_expire(struct work_struct *work) |
1879 | { | 1879 | { |
1880 | unsigned long flags; | 1880 | unsigned long flags; |
1881 | struct lec_priv *priv = data; | 1881 | struct lec_priv *priv = |
1882 | container_of(work, struct lec_priv, lec_arp_work.work); | ||
1882 | struct hlist_node *node, *next; | 1883 | struct hlist_node *node, *next; |
1883 | struct lec_arp_table *entry; | 1884 | struct lec_arp_table *entry; |
1884 | unsigned long now; | 1885 | unsigned long now; |
diff --git a/net/atm/lec.h b/net/atm/lec.h index 24cc95f86741..99136babd535 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h | |||
@@ -92,7 +92,7 @@ struct lec_priv { | |||
92 | spinlock_t lec_arp_lock; | 92 | spinlock_t lec_arp_lock; |
93 | struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ | 93 | struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ |
94 | struct atm_vcc *lecd; | 94 | struct atm_vcc *lecd; |
95 | struct work_struct lec_arp_work; /* C10 */ | 95 | struct delayed_work lec_arp_work; /* C10 */ |
96 | unsigned int maximum_unknown_frame_count; | 96 | unsigned int maximum_unknown_frame_count; |
97 | /* | 97 | /* |
98 | * Within the period of time defined by this variable, the client will send | 98 | * Within the period of time defined by this variable, the client will send |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 3eeeb7a86e75..d4c935692ccf 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -237,9 +237,9 @@ static void bt_release(struct device *dev) | |||
237 | kfree(data); | 237 | kfree(data); |
238 | } | 238 | } |
239 | 239 | ||
240 | static void add_conn(void *data) | 240 | static void add_conn(struct work_struct *work) |
241 | { | 241 | { |
242 | struct hci_conn *conn = data; | 242 | struct hci_conn *conn = container_of(work, struct hci_conn, work); |
243 | int i; | 243 | int i; |
244 | 244 | ||
245 | if (device_register(&conn->dev) < 0) { | 245 | if (device_register(&conn->dev) < 0) { |
@@ -272,14 +272,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn) | |||
272 | 272 | ||
273 | dev_set_drvdata(&conn->dev, conn); | 273 | dev_set_drvdata(&conn->dev, conn); |
274 | 274 | ||
275 | INIT_WORK(&conn->work, add_conn, (void *) conn); | 275 | INIT_WORK(&conn->work, add_conn); |
276 | 276 | ||
277 | schedule_work(&conn->work); | 277 | schedule_work(&conn->work); |
278 | } | 278 | } |
279 | 279 | ||
280 | static void del_conn(void *data) | 280 | static void del_conn(struct work_struct *work) |
281 | { | 281 | { |
282 | struct hci_conn *conn = data; | 282 | struct hci_conn *conn = container_of(work, struct hci_conn, work); |
283 | device_del(&conn->dev); | 283 | device_del(&conn->dev); |
284 | } | 284 | } |
285 | 285 | ||
@@ -287,7 +287,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn) | |||
287 | { | 287 | { |
288 | BT_DBG("conn %p", conn); | 288 | BT_DBG("conn %p", conn); |
289 | 289 | ||
290 | INIT_WORK(&conn->work, del_conn, (void *) conn); | 290 | INIT_WORK(&conn->work, del_conn); |
291 | 291 | ||
292 | schedule_work(&conn->work); | 292 | schedule_work(&conn->work); |
293 | } | 293 | } |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index d9f04864d15d..8ca448db7a0d 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <asm/atomic.h> | 23 | #include <asm/atomic.h> |
24 | #include "br_private.h" | 24 | #include "br_private.h" |
25 | 25 | ||
26 | static kmem_cache_t *br_fdb_cache __read_mostly; | 26 | static struct kmem_cache *br_fdb_cache __read_mostly; |
27 | static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, | 27 | static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, |
28 | const unsigned char *addr); | 28 | const unsigned char *addr); |
29 | 29 | ||
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f753c40c11d2..55bb2634c088 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -77,12 +77,16 @@ static int port_cost(struct net_device *dev) | |||
77 | * Called from work queue to allow for calling functions that | 77 | * Called from work queue to allow for calling functions that |
78 | * might sleep (such as speed check), and to debounce. | 78 | * might sleep (such as speed check), and to debounce. |
79 | */ | 79 | */ |
80 | static void port_carrier_check(void *arg) | 80 | static void port_carrier_check(struct work_struct *work) |
81 | { | 81 | { |
82 | struct net_device *dev = arg; | ||
83 | struct net_bridge_port *p; | 82 | struct net_bridge_port *p; |
83 | struct net_device *dev; | ||
84 | struct net_bridge *br; | 84 | struct net_bridge *br; |
85 | 85 | ||
86 | dev = container_of(work, struct net_bridge_port, | ||
87 | carrier_check.work)->dev; | ||
88 | work_release(work); | ||
89 | |||
86 | rtnl_lock(); | 90 | rtnl_lock(); |
87 | p = dev->br_port; | 91 | p = dev->br_port; |
88 | if (!p) | 92 | if (!p) |
@@ -276,7 +280,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, | |||
276 | p->port_no = index; | 280 | p->port_no = index; |
277 | br_init_port(p); | 281 | br_init_port(p); |
278 | p->state = BR_STATE_DISABLED; | 282 | p->state = BR_STATE_DISABLED; |
279 | INIT_WORK(&p->carrier_check, port_carrier_check, dev); | 283 | INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check); |
280 | br_stp_port_timer_init(p); | 284 | br_stp_port_timer_init(p); |
281 | 285 | ||
282 | kobject_init(&p->kobj); | 286 | kobject_init(&p->kobj); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 74258d86f256..3a534e94c7f3 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -82,7 +82,7 @@ struct net_bridge_port | |||
82 | struct timer_list hold_timer; | 82 | struct timer_list hold_timer; |
83 | struct timer_list message_age_timer; | 83 | struct timer_list message_age_timer; |
84 | struct kobject kobj; | 84 | struct kobject kobj; |
85 | struct work_struct carrier_check; | 85 | struct delayed_work carrier_check; |
86 | struct rcu_head rcu; | 86 | struct rcu_head rcu; |
87 | }; | 87 | }; |
88 | 88 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 59d058a3b504..e660cb57e42a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3340,7 +3340,6 @@ void unregister_netdev(struct net_device *dev) | |||
3340 | 3340 | ||
3341 | EXPORT_SYMBOL(unregister_netdev); | 3341 | EXPORT_SYMBOL(unregister_netdev); |
3342 | 3342 | ||
3343 | #ifdef CONFIG_HOTPLUG_CPU | ||
3344 | static int dev_cpu_callback(struct notifier_block *nfb, | 3343 | static int dev_cpu_callback(struct notifier_block *nfb, |
3345 | unsigned long action, | 3344 | unsigned long action, |
3346 | void *ocpu) | 3345 | void *ocpu) |
@@ -3384,7 +3383,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
3384 | 3383 | ||
3385 | return NOTIFY_OK; | 3384 | return NOTIFY_OK; |
3386 | } | 3385 | } |
3387 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
3388 | 3386 | ||
3389 | #ifdef CONFIG_NET_DMA | 3387 | #ifdef CONFIG_NET_DMA |
3390 | /** | 3388 | /** |
diff --git a/net/core/dst.c b/net/core/dst.c index 1a5e49da0e77..836ec6606925 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -125,7 +125,7 @@ void * dst_alloc(struct dst_ops * ops) | |||
125 | if (ops->gc()) | 125 | if (ops->gc()) |
126 | return NULL; | 126 | return NULL; |
127 | } | 127 | } |
128 | dst = kmem_cache_alloc(ops->kmem_cachep, SLAB_ATOMIC); | 128 | dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); |
129 | if (!dst) | 129 | if (!dst) |
130 | return NULL; | 130 | return NULL; |
131 | memset(dst, 0, ops->entry_size); | 131 | memset(dst, 0, ops->entry_size); |
diff --git a/net/core/flow.c b/net/core/flow.c index b16d31ae5e54..d137f971f97d 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -44,7 +44,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; | |||
44 | 44 | ||
45 | #define flow_table(cpu) (per_cpu(flow_tables, cpu)) | 45 | #define flow_table(cpu) (per_cpu(flow_tables, cpu)) |
46 | 46 | ||
47 | static kmem_cache_t *flow_cachep __read_mostly; | 47 | static struct kmem_cache *flow_cachep __read_mostly; |
48 | 48 | ||
49 | static int flow_lwm, flow_hwm; | 49 | static int flow_lwm, flow_hwm; |
50 | 50 | ||
@@ -211,7 +211,7 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, | |||
211 | if (flow_count(cpu) > flow_hwm) | 211 | if (flow_count(cpu) > flow_hwm) |
212 | flow_cache_shrink(cpu); | 212 | flow_cache_shrink(cpu); |
213 | 213 | ||
214 | fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC); | 214 | fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); |
215 | if (fle) { | 215 | if (fle) { |
216 | fle->next = *head; | 216 | fle->next = *head; |
217 | *head = fle; | 217 | *head = fle; |
@@ -340,7 +340,6 @@ static void __devinit flow_cache_cpu_prepare(int cpu) | |||
340 | tasklet_init(tasklet, flow_cache_flush_tasklet, 0); | 340 | tasklet_init(tasklet, flow_cache_flush_tasklet, 0); |
341 | } | 341 | } |
342 | 342 | ||
343 | #ifdef CONFIG_HOTPLUG_CPU | ||
344 | static int flow_cache_cpu(struct notifier_block *nfb, | 343 | static int flow_cache_cpu(struct notifier_block *nfb, |
345 | unsigned long action, | 344 | unsigned long action, |
346 | void *hcpu) | 345 | void *hcpu) |
@@ -349,7 +348,6 @@ static int flow_cache_cpu(struct notifier_block *nfb, | |||
349 | __flow_cache_shrink((unsigned long)hcpu, 0); | 348 | __flow_cache_shrink((unsigned long)hcpu, 0); |
350 | return NOTIFY_OK; | 349 | return NOTIFY_OK; |
351 | } | 350 | } |
352 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
353 | 351 | ||
354 | static int __init flow_cache_init(void) | 352 | static int __init flow_cache_init(void) |
355 | { | 353 | { |
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 4b36114744c5..549a2ce951b0 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -34,8 +34,8 @@ enum lw_bits { | |||
34 | static unsigned long linkwatch_flags; | 34 | static unsigned long linkwatch_flags; |
35 | static unsigned long linkwatch_nextevent; | 35 | static unsigned long linkwatch_nextevent; |
36 | 36 | ||
37 | static void linkwatch_event(void *dummy); | 37 | static void linkwatch_event(struct work_struct *dummy); |
38 | static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); | 38 | static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); |
39 | 39 | ||
40 | static LIST_HEAD(lweventlist); | 40 | static LIST_HEAD(lweventlist); |
41 | static DEFINE_SPINLOCK(lweventlist_lock); | 41 | static DEFINE_SPINLOCK(lweventlist_lock); |
@@ -127,7 +127,7 @@ void linkwatch_run_queue(void) | |||
127 | } | 127 | } |
128 | 128 | ||
129 | 129 | ||
130 | static void linkwatch_event(void *dummy) | 130 | static void linkwatch_event(struct work_struct *dummy) |
131 | { | 131 | { |
132 | /* Limit the number of linkwatch events to one | 132 | /* Limit the number of linkwatch events to one |
133 | * per second so that a runaway driver does not | 133 | * per second so that a runaway driver does not |
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev) | |||
171 | unsigned long delay = linkwatch_nextevent - jiffies; | 171 | unsigned long delay = linkwatch_nextevent - jiffies; |
172 | 172 | ||
173 | /* If we wrap around we'll delay it by at most HZ. */ | 173 | /* If we wrap around we'll delay it by at most HZ. */ |
174 | if (!delay || delay > HZ) | 174 | if (delay > HZ) |
175 | schedule_work(&linkwatch_work); | 175 | delay = 0; |
176 | else | 176 | schedule_delayed_work(&linkwatch_work, delay); |
177 | schedule_delayed_work(&linkwatch_work, delay); | ||
178 | } | 177 | } |
179 | } | 178 | } |
180 | } | 179 | } |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ba509a4a8e92..0ab1987b9348 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -251,7 +251,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl) | |||
251 | goto out_entries; | 251 | goto out_entries; |
252 | } | 252 | } |
253 | 253 | ||
254 | n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC); | 254 | n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC); |
255 | if (!n) | 255 | if (!n) |
256 | goto out_entries; | 256 | goto out_entries; |
257 | 257 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 3c58846fcaa5..b3c559b9ac35 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -50,9 +50,10 @@ static atomic_t trapped; | |||
50 | static void zap_completion_queue(void); | 50 | static void zap_completion_queue(void); |
51 | static void arp_reply(struct sk_buff *skb); | 51 | static void arp_reply(struct sk_buff *skb); |
52 | 52 | ||
53 | static void queue_process(void *p) | 53 | static void queue_process(struct work_struct *work) |
54 | { | 54 | { |
55 | struct netpoll_info *npinfo = p; | 55 | struct netpoll_info *npinfo = |
56 | container_of(work, struct netpoll_info, tx_work.work); | ||
56 | struct sk_buff *skb; | 57 | struct sk_buff *skb; |
57 | 58 | ||
58 | while ((skb = skb_dequeue(&npinfo->txq))) { | 59 | while ((skb = skb_dequeue(&npinfo->txq))) { |
@@ -72,8 +73,6 @@ static void queue_process(void *p) | |||
72 | schedule_delayed_work(&npinfo->tx_work, HZ/10); | 73 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
73 | return; | 74 | return; |
74 | } | 75 | } |
75 | |||
76 | netif_tx_unlock_bh(dev); | ||
77 | } | 76 | } |
78 | } | 77 | } |
79 | 78 | ||
@@ -263,7 +262,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | |||
263 | 262 | ||
264 | if (status != NETDEV_TX_OK) { | 263 | if (status != NETDEV_TX_OK) { |
265 | skb_queue_tail(&npinfo->txq, skb); | 264 | skb_queue_tail(&npinfo->txq, skb); |
266 | schedule_work(&npinfo->tx_work); | 265 | schedule_delayed_work(&npinfo->tx_work,0); |
267 | } | 266 | } |
268 | } | 267 | } |
269 | 268 | ||
@@ -628,7 +627,7 @@ int netpoll_setup(struct netpoll *np) | |||
628 | spin_lock_init(&npinfo->rx_lock); | 627 | spin_lock_init(&npinfo->rx_lock); |
629 | skb_queue_head_init(&npinfo->arp_tx); | 628 | skb_queue_head_init(&npinfo->arp_tx); |
630 | skb_queue_head_init(&npinfo->txq); | 629 | skb_queue_head_init(&npinfo->txq); |
631 | INIT_WORK(&npinfo->tx_work, queue_process, npinfo); | 630 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); |
632 | 631 | ||
633 | atomic_set(&npinfo->refcnt, 1); | 632 | atomic_set(&npinfo->refcnt, 1); |
634 | } else { | 633 | } else { |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8e1c385e5ba9..de7801d589e7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -68,8 +68,8 @@ | |||
68 | 68 | ||
69 | #include "kmap_skb.h" | 69 | #include "kmap_skb.h" |
70 | 70 | ||
71 | static kmem_cache_t *skbuff_head_cache __read_mostly; | 71 | static struct kmem_cache *skbuff_head_cache __read_mostly; |
72 | static kmem_cache_t *skbuff_fclone_cache __read_mostly; | 72 | static struct kmem_cache *skbuff_fclone_cache __read_mostly; |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * Keep out-of-line to prevent kernel bloat. | 75 | * Keep out-of-line to prevent kernel bloat. |
@@ -132,6 +132,7 @@ EXPORT_SYMBOL(skb_truesize_bug); | |||
132 | * @gfp_mask: allocation mask | 132 | * @gfp_mask: allocation mask |
133 | * @fclone: allocate from fclone cache instead of head cache | 133 | * @fclone: allocate from fclone cache instead of head cache |
134 | * and allocate a cloned (child) skb | 134 | * and allocate a cloned (child) skb |
135 | * @node: numa node to allocate memory on | ||
135 | * | 136 | * |
136 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | 137 | * Allocate a new &sk_buff. The returned buffer has no headroom and a |
137 | * tail room of size bytes. The object has a reference count of one. | 138 | * tail room of size bytes. The object has a reference count of one. |
@@ -141,9 +142,9 @@ EXPORT_SYMBOL(skb_truesize_bug); | |||
141 | * %GFP_ATOMIC. | 142 | * %GFP_ATOMIC. |
142 | */ | 143 | */ |
143 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | 144 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
144 | int fclone) | 145 | int fclone, int node) |
145 | { | 146 | { |
146 | kmem_cache_t *cache; | 147 | struct kmem_cache *cache; |
147 | struct skb_shared_info *shinfo; | 148 | struct skb_shared_info *shinfo; |
148 | struct sk_buff *skb; | 149 | struct sk_buff *skb; |
149 | u8 *data; | 150 | u8 *data; |
@@ -151,14 +152,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
151 | cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; | 152 | cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; |
152 | 153 | ||
153 | /* Get the HEAD */ | 154 | /* Get the HEAD */ |
154 | skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); | 155 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); |
155 | if (!skb) | 156 | if (!skb) |
156 | goto out; | 157 | goto out; |
157 | 158 | ||
158 | /* Get the DATA. Size must match skb_add_mtu(). */ | 159 | /* Get the DATA. Size must match skb_add_mtu(). */ |
159 | size = SKB_DATA_ALIGN(size); | 160 | size = SKB_DATA_ALIGN(size); |
160 | data = kmalloc_track_caller(size + sizeof(struct skb_shared_info), | 161 | data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), |
161 | gfp_mask); | 162 | gfp_mask, node); |
162 | if (!data) | 163 | if (!data) |
163 | goto nodata; | 164 | goto nodata; |
164 | 165 | ||
@@ -210,7 +211,7 @@ nodata: | |||
210 | * Buffers may only be allocated from interrupts using a @gfp_mask of | 211 | * Buffers may only be allocated from interrupts using a @gfp_mask of |
211 | * %GFP_ATOMIC. | 212 | * %GFP_ATOMIC. |
212 | */ | 213 | */ |
213 | struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, | 214 | struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, |
214 | unsigned int size, | 215 | unsigned int size, |
215 | gfp_t gfp_mask) | 216 | gfp_t gfp_mask) |
216 | { | 217 | { |
@@ -267,9 +268,10 @@ nodata: | |||
267 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | 268 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, |
268 | unsigned int length, gfp_t gfp_mask) | 269 | unsigned int length, gfp_t gfp_mask) |
269 | { | 270 | { |
271 | int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1; | ||
270 | struct sk_buff *skb; | 272 | struct sk_buff *skb; |
271 | 273 | ||
272 | skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); | 274 | skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); |
273 | if (likely(skb)) { | 275 | if (likely(skb)) { |
274 | skb_reserve(skb, NET_SKB_PAD); | 276 | skb_reserve(skb, NET_SKB_PAD); |
275 | skb->dev = dev; | 277 | skb->dev = dev; |
diff --git a/net/core/sock.c b/net/core/sock.c index 419c7d3289c7..0ed5b4f0bc40 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -810,24 +810,11 @@ lenout: | |||
810 | */ | 810 | */ |
811 | static void inline sock_lock_init(struct sock *sk) | 811 | static void inline sock_lock_init(struct sock *sk) |
812 | { | 812 | { |
813 | spin_lock_init(&sk->sk_lock.slock); | 813 | sock_lock_init_class_and_name(sk, |
814 | sk->sk_lock.owner = NULL; | 814 | af_family_slock_key_strings[sk->sk_family], |
815 | init_waitqueue_head(&sk->sk_lock.wq); | 815 | af_family_slock_keys + sk->sk_family, |
816 | /* | 816 | af_family_key_strings[sk->sk_family], |
817 | * Make sure we are not reinitializing a held lock: | 817 | af_family_keys + sk->sk_family); |
818 | */ | ||
819 | debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock)); | ||
820 | |||
821 | /* | ||
822 | * Mark both the sk_lock and the sk_lock.slock as a | ||
823 | * per-address-family lock class: | ||
824 | */ | ||
825 | lockdep_set_class_and_name(&sk->sk_lock.slock, | ||
826 | af_family_slock_keys + sk->sk_family, | ||
827 | af_family_slock_key_strings[sk->sk_family]); | ||
828 | lockdep_init_map(&sk->sk_lock.dep_map, | ||
829 | af_family_key_strings[sk->sk_family], | ||
830 | af_family_keys + sk->sk_family, 0); | ||
831 | } | 818 | } |
832 | 819 | ||
833 | /** | 820 | /** |
@@ -841,7 +828,7 @@ struct sock *sk_alloc(int family, gfp_t priority, | |||
841 | struct proto *prot, int zero_it) | 828 | struct proto *prot, int zero_it) |
842 | { | 829 | { |
843 | struct sock *sk = NULL; | 830 | struct sock *sk = NULL; |
844 | kmem_cache_t *slab = prot->slab; | 831 | struct kmem_cache *slab = prot->slab; |
845 | 832 | ||
846 | if (slab != NULL) | 833 | if (slab != NULL) |
847 | sk = kmem_cache_alloc(slab, priority); | 834 | sk = kmem_cache_alloc(slab, priority); |
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index bdf1bb7a82c0..1f4727ddbdbf 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c | |||
@@ -21,8 +21,8 @@ | |||
21 | 21 | ||
22 | #include <net/sock.h> | 22 | #include <net/sock.h> |
23 | 23 | ||
24 | static kmem_cache_t *dccp_ackvec_slab; | 24 | static struct kmem_cache *dccp_ackvec_slab; |
25 | static kmem_cache_t *dccp_ackvec_record_slab; | 25 | static struct kmem_cache *dccp_ackvec_record_slab; |
26 | 26 | ||
27 | static struct dccp_ackvec_record *dccp_ackvec_record_new(void) | 27 | static struct dccp_ackvec_record *dccp_ackvec_record_new(void) |
28 | { | 28 | { |
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index ff05e59043cd..d8cf92f09e68 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c | |||
@@ -55,9 +55,9 @@ static inline void ccids_read_unlock(void) | |||
55 | #define ccids_read_unlock() do { } while(0) | 55 | #define ccids_read_unlock() do { } while(0) |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...) | 58 | static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) |
59 | { | 59 | { |
60 | kmem_cache_t *slab; | 60 | struct kmem_cache *slab; |
61 | char slab_name_fmt[32], *slab_name; | 61 | char slab_name_fmt[32], *slab_name; |
62 | va_list args; | 62 | va_list args; |
63 | 63 | ||
@@ -75,7 +75,7 @@ static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...) | |||
75 | return slab; | 75 | return slab; |
76 | } | 76 | } |
77 | 77 | ||
78 | static void ccid_kmem_cache_destroy(kmem_cache_t *slab) | 78 | static void ccid_kmem_cache_destroy(struct kmem_cache *slab) |
79 | { | 79 | { |
80 | if (slab != NULL) { | 80 | if (slab != NULL) { |
81 | const char *name = kmem_cache_name(slab); | 81 | const char *name = kmem_cache_name(slab); |
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index c7c29514dce8..bcc2d12ae81c 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h | |||
@@ -27,9 +27,9 @@ struct ccid_operations { | |||
27 | unsigned char ccid_id; | 27 | unsigned char ccid_id; |
28 | const char *ccid_name; | 28 | const char *ccid_name; |
29 | struct module *ccid_owner; | 29 | struct module *ccid_owner; |
30 | kmem_cache_t *ccid_hc_rx_slab; | 30 | struct kmem_cache *ccid_hc_rx_slab; |
31 | __u32 ccid_hc_rx_obj_size; | 31 | __u32 ccid_hc_rx_obj_size; |
32 | kmem_cache_t *ccid_hc_tx_slab; | 32 | struct kmem_cache *ccid_hc_tx_slab; |
33 | __u32 ccid_hc_tx_obj_size; | 33 | __u32 ccid_hc_tx_obj_size; |
34 | int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk); | 34 | int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk); |
35 | int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk); | 35 | int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk); |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index cf8c07b2704f..66a27b9688ca 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -295,7 +295,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
295 | new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); | 295 | new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); |
296 | if (new_packet == NULL || new_packet->dccphtx_sent) { | 296 | if (new_packet == NULL || new_packet->dccphtx_sent) { |
297 | new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist, | 297 | new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist, |
298 | SLAB_ATOMIC); | 298 | GFP_ATOMIC); |
299 | 299 | ||
300 | if (unlikely(new_packet == NULL)) { | 300 | if (unlikely(new_packet == NULL)) { |
301 | DCCP_WARN("%s, sk=%p, not enough mem to add to history," | 301 | DCCP_WARN("%s, sk=%p, not enough mem to add to history," |
@@ -889,7 +889,7 @@ static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) | |||
889 | /* new loss event detected */ | 889 | /* new loss event detected */ |
890 | /* calculate last interval length */ | 890 | /* calculate last interval length */ |
891 | seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss); | 891 | seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss); |
892 | entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC); | 892 | entry = dccp_li_hist_entry_new(ccid3_li_hist, GFP_ATOMIC); |
893 | 893 | ||
894 | if (entry == NULL) { | 894 | if (entry == NULL) { |
895 | DCCP_BUG("out of memory - can not allocate entry"); | 895 | DCCP_BUG("out of memory - can not allocate entry"); |
@@ -1011,7 +1011,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, | 1013 | packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, |
1014 | skb, SLAB_ATOMIC); | 1014 | skb, GFP_ATOMIC); |
1015 | if (unlikely(packet == NULL)) { | 1015 | if (unlikely(packet == NULL)) { |
1016 | DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet " | 1016 | DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet " |
1017 | "to history, consider it lost!\n", dccp_role(sk), sk); | 1017 | "to history, consider it lost!\n", dccp_role(sk), sk); |
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 48b9b93f8acb..0a0baef16b3e 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c | |||
@@ -125,7 +125,7 @@ int dccp_li_hist_interval_new(struct dccp_li_hist *hist, | |||
125 | int i; | 125 | int i; |
126 | 126 | ||
127 | for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) { | 127 | for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) { |
128 | entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); | 128 | entry = dccp_li_hist_entry_new(hist, GFP_ATOMIC); |
129 | if (entry == NULL) { | 129 | if (entry == NULL) { |
130 | dccp_li_hist_purge(hist, list); | 130 | dccp_li_hist_purge(hist, list); |
131 | DCCP_BUG("loss interval list entry is NULL"); | 131 | DCCP_BUG("loss interval list entry is NULL"); |
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h index 0ae85f0340b2..eb257014dd74 100644 --- a/net/dccp/ccids/lib/loss_interval.h +++ b/net/dccp/ccids/lib/loss_interval.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #define DCCP_LI_HIST_IVAL_F_LENGTH 8 | 20 | #define DCCP_LI_HIST_IVAL_F_LENGTH 8 |
21 | 21 | ||
22 | struct dccp_li_hist { | 22 | struct dccp_li_hist { |
23 | kmem_cache_t *dccplih_slab; | 23 | struct kmem_cache *dccplih_slab; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | extern struct dccp_li_hist *dccp_li_hist_new(const char *name); | 26 | extern struct dccp_li_hist *dccp_li_hist_new(const char *name); |
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h index 067cf1c85a37..9a8bcf224aa7 100644 --- a/net/dccp/ccids/lib/packet_history.h +++ b/net/dccp/ccids/lib/packet_history.h | |||
@@ -68,14 +68,14 @@ struct dccp_rx_hist_entry { | |||
68 | }; | 68 | }; |
69 | 69 | ||
70 | struct dccp_tx_hist { | 70 | struct dccp_tx_hist { |
71 | kmem_cache_t *dccptxh_slab; | 71 | struct kmem_cache *dccptxh_slab; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name); | 74 | extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name); |
75 | extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist); | 75 | extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist); |
76 | 76 | ||
77 | struct dccp_rx_hist { | 77 | struct dccp_rx_hist { |
78 | kmem_cache_t *dccprxh_slab; | 78 | struct kmem_cache *dccprxh_slab; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name); | 81 | extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name); |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 7b52f2a03eef..4c9e26775f72 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -32,8 +32,7 @@ struct inet_timewait_death_row dccp_death_row = { | |||
32 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, | 32 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, |
33 | (unsigned long)&dccp_death_row), | 33 | (unsigned long)&dccp_death_row), |
34 | .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, | 34 | .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, |
35 | inet_twdr_twkill_work, | 35 | inet_twdr_twkill_work), |
36 | &dccp_death_row), | ||
37 | /* Short-time timewait calendar */ | 36 | /* Short-time timewait calendar */ |
38 | 37 | ||
39 | .twcal_hand = -1, | 38 | .twcal_hand = -1, |
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index bdbc3f431668..13b2421991ba 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c | |||
@@ -79,7 +79,7 @@ for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_n | |||
79 | static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ]; | 79 | static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ]; |
80 | static DEFINE_RWLOCK(dn_fib_tables_lock); | 80 | static DEFINE_RWLOCK(dn_fib_tables_lock); |
81 | 81 | ||
82 | static kmem_cache_t *dn_hash_kmem __read_mostly; | 82 | static struct kmem_cache *dn_hash_kmem __read_mostly; |
83 | static int dn_fib_hash_zombies; | 83 | static int dn_fib_hash_zombies; |
84 | 84 | ||
85 | static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz) | 85 | static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz) |
@@ -590,7 +590,7 @@ create: | |||
590 | 590 | ||
591 | replace: | 591 | replace: |
592 | err = -ENOBUFS; | 592 | err = -ENOBUFS; |
593 | new_f = kmem_cache_alloc(dn_hash_kmem, SLAB_KERNEL); | 593 | new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL); |
594 | if (new_f == NULL) | 594 | if (new_f == NULL) |
595 | goto out; | 595 | goto out; |
596 | 596 | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index cf51c87a971d..08386c102954 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c | |||
@@ -58,9 +58,11 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft | |||
58 | } | 58 | } |
59 | 59 | ||
60 | void | 60 | void |
61 | ieee80211softmac_assoc_timeout(void *d) | 61 | ieee80211softmac_assoc_timeout(struct work_struct *work) |
62 | { | 62 | { |
63 | struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; | 63 | struct ieee80211softmac_device *mac = |
64 | container_of(work, struct ieee80211softmac_device, | ||
65 | associnfo.timeout.work); | ||
64 | struct ieee80211softmac_network *n; | 66 | struct ieee80211softmac_network *n; |
65 | 67 | ||
66 | mutex_lock(&mac->associnfo.mutex); | 68 | mutex_lock(&mac->associnfo.mutex); |
@@ -186,9 +188,11 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void | |||
186 | 188 | ||
187 | /* This function is called to handle userspace requests (asynchronously) */ | 189 | /* This function is called to handle userspace requests (asynchronously) */ |
188 | void | 190 | void |
189 | ieee80211softmac_assoc_work(void *d) | 191 | ieee80211softmac_assoc_work(struct work_struct *work) |
190 | { | 192 | { |
191 | struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; | 193 | struct ieee80211softmac_device *mac = |
194 | container_of(work, struct ieee80211softmac_device, | ||
195 | associnfo.work.work); | ||
192 | struct ieee80211softmac_network *found = NULL; | 196 | struct ieee80211softmac_network *found = NULL; |
193 | struct ieee80211_network *net = NULL, *best = NULL; | 197 | struct ieee80211_network *net = NULL, *best = NULL; |
194 | int bssvalid; | 198 | int bssvalid; |
@@ -412,7 +416,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, | |||
412 | network->authenticated = 0; | 416 | network->authenticated = 0; |
413 | /* we don't want to do this more than once ... */ | 417 | /* we don't want to do this more than once ... */ |
414 | network->auth_desynced_once = 1; | 418 | network->auth_desynced_once = 1; |
415 | schedule_work(&mac->associnfo.work); | 419 | schedule_delayed_work(&mac->associnfo.work, 0); |
416 | break; | 420 | break; |
417 | } | 421 | } |
418 | default: | 422 | default: |
@@ -446,7 +450,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
446 | ieee80211softmac_disassoc(mac); | 450 | ieee80211softmac_disassoc(mac); |
447 | 451 | ||
448 | /* try to reassociate */ | 452 | /* try to reassociate */ |
449 | schedule_work(&mac->associnfo.work); | 453 | schedule_delayed_work(&mac->associnfo.work, 0); |
450 | 454 | ||
451 | return 0; | 455 | return 0; |
452 | } | 456 | } |
@@ -466,7 +470,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev, | |||
466 | dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); | 470 | dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); |
467 | return 0; | 471 | return 0; |
468 | } | 472 | } |
469 | schedule_work(&mac->associnfo.work); | 473 | schedule_delayed_work(&mac->associnfo.work, 0); |
470 | 474 | ||
471 | return 0; | 475 | return 0; |
472 | } | 476 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 0612015f1c78..6012705aa4f8 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #include "ieee80211softmac_priv.h" | 27 | #include "ieee80211softmac_priv.h" |
28 | 28 | ||
29 | static void ieee80211softmac_auth_queue(void *data); | 29 | static void ieee80211softmac_auth_queue(struct work_struct *work); |
30 | 30 | ||
31 | /* Queues an auth request to the desired AP */ | 31 | /* Queues an auth request to the desired AP */ |
32 | int | 32 | int |
@@ -54,14 +54,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, | |||
54 | auth->mac = mac; | 54 | auth->mac = mac; |
55 | auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; | 55 | auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; |
56 | auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; | 56 | auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; |
57 | INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth); | 57 | INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue); |
58 | 58 | ||
59 | /* Lock (for list) */ | 59 | /* Lock (for list) */ |
60 | spin_lock_irqsave(&mac->lock, flags); | 60 | spin_lock_irqsave(&mac->lock, flags); |
61 | 61 | ||
62 | /* add to list */ | 62 | /* add to list */ |
63 | list_add_tail(&auth->list, &mac->auth_queue); | 63 | list_add_tail(&auth->list, &mac->auth_queue); |
64 | schedule_work(&auth->work); | 64 | schedule_delayed_work(&auth->work, 0); |
65 | spin_unlock_irqrestore(&mac->lock, flags); | 65 | spin_unlock_irqrestore(&mac->lock, flags); |
66 | 66 | ||
67 | return 0; | 67 | return 0; |
@@ -70,14 +70,15 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, | |||
70 | 70 | ||
71 | /* Sends an auth request to the desired AP and handles timeouts */ | 71 | /* Sends an auth request to the desired AP and handles timeouts */ |
72 | static void | 72 | static void |
73 | ieee80211softmac_auth_queue(void *data) | 73 | ieee80211softmac_auth_queue(struct work_struct *work) |
74 | { | 74 | { |
75 | struct ieee80211softmac_device *mac; | 75 | struct ieee80211softmac_device *mac; |
76 | struct ieee80211softmac_auth_queue_item *auth; | 76 | struct ieee80211softmac_auth_queue_item *auth; |
77 | struct ieee80211softmac_network *net; | 77 | struct ieee80211softmac_network *net; |
78 | unsigned long flags; | 78 | unsigned long flags; |
79 | 79 | ||
80 | auth = (struct ieee80211softmac_auth_queue_item *)data; | 80 | auth = container_of(work, struct ieee80211softmac_auth_queue_item, |
81 | work.work); | ||
81 | net = auth->net; | 82 | net = auth->net; |
82 | mac = auth->mac; | 83 | mac = auth->mac; |
83 | 84 | ||
@@ -118,9 +119,11 @@ ieee80211softmac_auth_queue(void *data) | |||
118 | 119 | ||
119 | /* Sends a response to an auth challenge (for shared key auth). */ | 120 | /* Sends a response to an auth challenge (for shared key auth). */ |
120 | static void | 121 | static void |
121 | ieee80211softmac_auth_challenge_response(void *_aq) | 122 | ieee80211softmac_auth_challenge_response(struct work_struct *work) |
122 | { | 123 | { |
123 | struct ieee80211softmac_auth_queue_item *aq = _aq; | 124 | struct ieee80211softmac_auth_queue_item *aq = |
125 | container_of(work, struct ieee80211softmac_auth_queue_item, | ||
126 | work.work); | ||
124 | 127 | ||
125 | /* Send our response */ | 128 | /* Send our response */ |
126 | ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); | 129 | ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); |
@@ -234,8 +237,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) | |||
234 | * we have obviously already sent the initial auth | 237 | * we have obviously already sent the initial auth |
235 | * request. */ | 238 | * request. */ |
236 | cancel_delayed_work(&aq->work); | 239 | cancel_delayed_work(&aq->work); |
237 | INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq); | 240 | INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response); |
238 | schedule_work(&aq->work); | 241 | schedule_delayed_work(&aq->work, 0); |
239 | spin_unlock_irqrestore(&mac->lock, flags); | 242 | spin_unlock_irqrestore(&mac->lock, flags); |
240 | return 0; | 243 | return 0; |
241 | case IEEE80211SOFTMAC_AUTH_SHARED_PASS: | 244 | case IEEE80211SOFTMAC_AUTH_SHARED_PASS: |
@@ -398,6 +401,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de | |||
398 | ieee80211softmac_deauth_from_net(mac, net); | 401 | ieee80211softmac_deauth_from_net(mac, net); |
399 | 402 | ||
400 | /* let's try to re-associate */ | 403 | /* let's try to re-associate */ |
401 | schedule_work(&mac->associnfo.work); | 404 | schedule_delayed_work(&mac->associnfo.work, 0); |
402 | return 0; | 405 | return 0; |
403 | } | 406 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c index f34fa2ef666b..b9015656cfb3 100644 --- a/net/ieee80211/softmac/ieee80211softmac_event.c +++ b/net/ieee80211/softmac/ieee80211softmac_event.c | |||
@@ -73,10 +73,12 @@ static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = { | |||
73 | 73 | ||
74 | 74 | ||
75 | static void | 75 | static void |
76 | ieee80211softmac_notify_callback(void *d) | 76 | ieee80211softmac_notify_callback(struct work_struct *work) |
77 | { | 77 | { |
78 | struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d; | 78 | struct ieee80211softmac_event *pevent = |
79 | kfree(d); | 79 | container_of(work, struct ieee80211softmac_event, work.work); |
80 | struct ieee80211softmac_event event = *pevent; | ||
81 | kfree(pevent); | ||
80 | 82 | ||
81 | event.fun(event.mac->dev, event.event_type, event.context); | 83 | event.fun(event.mac->dev, event.event_type, event.context); |
82 | } | 84 | } |
@@ -99,7 +101,7 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, | |||
99 | return -ENOMEM; | 101 | return -ENOMEM; |
100 | 102 | ||
101 | eventptr->event_type = event; | 103 | eventptr->event_type = event; |
102 | INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr); | 104 | INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback); |
103 | eventptr->fun = fun; | 105 | eventptr->fun = fun; |
104 | eventptr->context = context; | 106 | eventptr->context = context; |
105 | eventptr->mac = mac; | 107 | eventptr->mac = mac; |
@@ -170,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve | |||
170 | /* User may have subscribed to ANY event, so | 172 | /* User may have subscribed to ANY event, so |
171 | * we tell them which event triggered it. */ | 173 | * we tell them which event triggered it. */ |
172 | eventptr->event_type = event; | 174 | eventptr->event_type = event; |
173 | schedule_work(&eventptr->work); | 175 | schedule_delayed_work(&eventptr->work, 0); |
174 | } | 176 | } |
175 | } | 177 | } |
176 | } | 178 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c index 33aff4f4a471..256207b71dc9 100644 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/net/ieee80211/softmac/ieee80211softmac_module.c | |||
@@ -58,8 +58,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv) | |||
58 | INIT_LIST_HEAD(&softmac->events); | 58 | INIT_LIST_HEAD(&softmac->events); |
59 | 59 | ||
60 | mutex_init(&softmac->associnfo.mutex); | 60 | mutex_init(&softmac->associnfo.mutex); |
61 | INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac); | 61 | INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work); |
62 | INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac); | 62 | INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout); |
63 | softmac->start_scan = ieee80211softmac_start_scan_implementation; | 63 | softmac->start_scan = ieee80211softmac_start_scan_implementation; |
64 | softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; | 64 | softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; |
65 | softmac->stop_scan = ieee80211softmac_stop_scan_implementation; | 65 | softmac->stop_scan = ieee80211softmac_stop_scan_implementation; |
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h index 0642e090b8a7..c0dbe070e548 100644 --- a/net/ieee80211/softmac/ieee80211softmac_priv.h +++ b/net/ieee80211/softmac/ieee80211softmac_priv.h | |||
@@ -78,7 +78,7 @@ | |||
78 | /* private definitions and prototypes */ | 78 | /* private definitions and prototypes */ |
79 | 79 | ||
80 | /*** prototypes from _scan.c */ | 80 | /*** prototypes from _scan.c */ |
81 | void ieee80211softmac_scan(void *sm); | 81 | void ieee80211softmac_scan(struct work_struct *work); |
82 | /* for internal use if scanning is needed */ | 82 | /* for internal use if scanning is needed */ |
83 | int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); | 83 | int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); |
84 | void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); | 84 | void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); |
@@ -149,7 +149,7 @@ int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *au | |||
149 | int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); | 149 | int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); |
150 | 150 | ||
151 | /*** prototypes from _assoc.c */ | 151 | /*** prototypes from _assoc.c */ |
152 | void ieee80211softmac_assoc_work(void *d); | 152 | void ieee80211softmac_assoc_work(struct work_struct *work); |
153 | int ieee80211softmac_handle_assoc_response(struct net_device * dev, | 153 | int ieee80211softmac_handle_assoc_response(struct net_device * dev, |
154 | struct ieee80211_assoc_response * resp, | 154 | struct ieee80211_assoc_response * resp, |
155 | struct ieee80211_network * network); | 155 | struct ieee80211_network * network); |
@@ -157,7 +157,7 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
157 | struct ieee80211_disassoc * disassoc); | 157 | struct ieee80211_disassoc * disassoc); |
158 | int ieee80211softmac_handle_reassoc_req(struct net_device * dev, | 158 | int ieee80211softmac_handle_reassoc_req(struct net_device * dev, |
159 | struct ieee80211_reassoc_request * reassoc); | 159 | struct ieee80211_reassoc_request * reassoc); |
160 | void ieee80211softmac_assoc_timeout(void *d); | 160 | void ieee80211softmac_assoc_timeout(struct work_struct *work); |
161 | void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); | 161 | void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); |
162 | void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); | 162 | void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); |
163 | 163 | ||
@@ -207,7 +207,7 @@ struct ieee80211softmac_auth_queue_item { | |||
207 | struct ieee80211softmac_device *mac; /* SoftMAC device */ | 207 | struct ieee80211softmac_device *mac; /* SoftMAC device */ |
208 | u8 retry; /* Retry limit */ | 208 | u8 retry; /* Retry limit */ |
209 | u8 state; /* Auth State */ | 209 | u8 state; /* Auth State */ |
210 | struct work_struct work; /* Work queue */ | 210 | struct delayed_work work; /* Work queue */ |
211 | }; | 211 | }; |
212 | 212 | ||
213 | /* scanning information */ | 213 | /* scanning information */ |
@@ -219,7 +219,8 @@ struct ieee80211softmac_scaninfo { | |||
219 | stop:1; | 219 | stop:1; |
220 | u8 skip_flags; | 220 | u8 skip_flags; |
221 | struct completion finished; | 221 | struct completion finished; |
222 | struct work_struct softmac_scan; | 222 | struct delayed_work softmac_scan; |
223 | struct ieee80211softmac_device *mac; | ||
223 | }; | 224 | }; |
224 | 225 | ||
225 | /* private event struct */ | 226 | /* private event struct */ |
@@ -227,7 +228,7 @@ struct ieee80211softmac_event { | |||
227 | struct list_head list; | 228 | struct list_head list; |
228 | int event_type; | 229 | int event_type; |
229 | void *event_context; | 230 | void *event_context; |
230 | struct work_struct work; | 231 | struct delayed_work work; |
231 | notify_function_ptr fun; | 232 | notify_function_ptr fun; |
232 | void *context; | 233 | void *context; |
233 | struct ieee80211softmac_device *mac; | 234 | struct ieee80211softmac_device *mac; |
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c index 5507feab32de..0c85d6c24cdb 100644 --- a/net/ieee80211/softmac/ieee80211softmac_scan.c +++ b/net/ieee80211/softmac/ieee80211softmac_scan.c | |||
@@ -90,12 +90,14 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm) | |||
90 | 90 | ||
91 | 91 | ||
92 | /* internal scanning implementation follows */ | 92 | /* internal scanning implementation follows */ |
93 | void ieee80211softmac_scan(void *d) | 93 | void ieee80211softmac_scan(struct work_struct *work) |
94 | { | 94 | { |
95 | int invalid_channel; | 95 | int invalid_channel; |
96 | u8 current_channel_idx; | 96 | u8 current_channel_idx; |
97 | struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d; | 97 | struct ieee80211softmac_scaninfo *si = |
98 | struct ieee80211softmac_scaninfo *si = sm->scaninfo; | 98 | container_of(work, struct ieee80211softmac_scaninfo, |
99 | softmac_scan.work); | ||
100 | struct ieee80211softmac_device *sm = si->mac; | ||
99 | unsigned long flags; | 101 | unsigned long flags; |
100 | 102 | ||
101 | while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { | 103 | while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { |
@@ -146,7 +148,8 @@ static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee802 | |||
146 | struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); | 148 | struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); |
147 | if (unlikely(!info)) | 149 | if (unlikely(!info)) |
148 | return NULL; | 150 | return NULL; |
149 | INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac); | 151 | INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan); |
152 | info->mac = mac; | ||
150 | init_completion(&info->finished); | 153 | init_completion(&info->finished); |
151 | return info; | 154 | return info; |
152 | } | 155 | } |
@@ -187,7 +190,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev) | |||
187 | sm->scaninfo->started = 1; | 190 | sm->scaninfo->started = 1; |
188 | sm->scaninfo->stop = 0; | 191 | sm->scaninfo->stop = 0; |
189 | INIT_COMPLETION(sm->scaninfo->finished); | 192 | INIT_COMPLETION(sm->scaninfo->finished); |
190 | schedule_work(&sm->scaninfo->softmac_scan); | 193 | schedule_delayed_work(&sm->scaninfo->softmac_scan, 0); |
191 | spin_unlock_irqrestore(&sm->lock, flags); | 194 | spin_unlock_irqrestore(&sm->lock, flags); |
192 | return 0; | 195 | return 0; |
193 | } | 196 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index 23068a830f7d..2ffaebd21c53 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c | |||
@@ -122,7 +122,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, | |||
122 | 122 | ||
123 | sm->associnfo.associating = 1; | 123 | sm->associnfo.associating = 1; |
124 | /* queue lower level code to do work (if necessary) */ | 124 | /* queue lower level code to do work (if necessary) */ |
125 | schedule_work(&sm->associnfo.work); | 125 | schedule_delayed_work(&sm->associnfo.work, 0); |
126 | out: | 126 | out: |
127 | mutex_unlock(&sm->associnfo.mutex); | 127 | mutex_unlock(&sm->associnfo.mutex); |
128 | 128 | ||
@@ -356,7 +356,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, | |||
356 | /* force reassociation */ | 356 | /* force reassociation */ |
357 | mac->associnfo.bssvalid = 0; | 357 | mac->associnfo.bssvalid = 0; |
358 | if (mac->associnfo.associated) | 358 | if (mac->associnfo.associated) |
359 | schedule_work(&mac->associnfo.work); | 359 | schedule_delayed_work(&mac->associnfo.work, 0); |
360 | } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { | 360 | } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { |
361 | /* the bssid we have is no longer fixed */ | 361 | /* the bssid we have is no longer fixed */ |
362 | mac->associnfo.bssfixed = 0; | 362 | mac->associnfo.bssfixed = 0; |
@@ -373,7 +373,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, | |||
373 | /* tell the other code that this bssid should be used no matter what */ | 373 | /* tell the other code that this bssid should be used no matter what */ |
374 | mac->associnfo.bssfixed = 1; | 374 | mac->associnfo.bssfixed = 1; |
375 | /* queue associate if new bssid or (old one again and not associated) */ | 375 | /* queue associate if new bssid or (old one again and not associated) */ |
376 | schedule_work(&mac->associnfo.work); | 376 | schedule_delayed_work(&mac->associnfo.work, 0); |
377 | } | 377 | } |
378 | 378 | ||
379 | out: | 379 | out: |
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index 107bb6cbb0b3..648f47c1c399 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c | |||
@@ -45,8 +45,8 @@ | |||
45 | 45 | ||
46 | #include "fib_lookup.h" | 46 | #include "fib_lookup.h" |
47 | 47 | ||
48 | static kmem_cache_t *fn_hash_kmem __read_mostly; | 48 | static struct kmem_cache *fn_hash_kmem __read_mostly; |
49 | static kmem_cache_t *fn_alias_kmem __read_mostly; | 49 | static struct kmem_cache *fn_alias_kmem __read_mostly; |
50 | 50 | ||
51 | struct fib_node { | 51 | struct fib_node { |
52 | struct hlist_node fn_hash; | 52 | struct hlist_node fn_hash; |
@@ -485,13 +485,13 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) | |||
485 | goto out; | 485 | goto out; |
486 | 486 | ||
487 | err = -ENOBUFS; | 487 | err = -ENOBUFS; |
488 | new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); | 488 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); |
489 | if (new_fa == NULL) | 489 | if (new_fa == NULL) |
490 | goto out; | 490 | goto out; |
491 | 491 | ||
492 | new_f = NULL; | 492 | new_f = NULL; |
493 | if (!f) { | 493 | if (!f) { |
494 | new_f = kmem_cache_alloc(fn_hash_kmem, SLAB_KERNEL); | 494 | new_f = kmem_cache_alloc(fn_hash_kmem, GFP_KERNEL); |
495 | if (new_f == NULL) | 495 | if (new_f == NULL) |
496 | goto out_free_new_fa; | 496 | goto out_free_new_fa; |
497 | 497 | ||
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index d17990ec724f..cfb249cc0a58 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -172,7 +172,7 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn); | |||
172 | static struct tnode *halve(struct trie *t, struct tnode *tn); | 172 | static struct tnode *halve(struct trie *t, struct tnode *tn); |
173 | static void tnode_free(struct tnode *tn); | 173 | static void tnode_free(struct tnode *tn); |
174 | 174 | ||
175 | static kmem_cache_t *fn_alias_kmem __read_mostly; | 175 | static struct kmem_cache *fn_alias_kmem __read_mostly; |
176 | static struct trie *trie_local = NULL, *trie_main = NULL; | 176 | static struct trie *trie_local = NULL, *trie_main = NULL; |
177 | 177 | ||
178 | 178 | ||
@@ -1187,7 +1187,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1187 | u8 state; | 1187 | u8 state; |
1188 | 1188 | ||
1189 | err = -ENOBUFS; | 1189 | err = -ENOBUFS; |
1190 | new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); | 1190 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); |
1191 | if (new_fa == NULL) | 1191 | if (new_fa == NULL) |
1192 | goto out; | 1192 | goto out; |
1193 | 1193 | ||
@@ -1232,7 +1232,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1232 | goto out; | 1232 | goto out; |
1233 | 1233 | ||
1234 | err = -ENOBUFS; | 1234 | err = -ENOBUFS; |
1235 | new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); | 1235 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); |
1236 | if (new_fa == NULL) | 1236 | if (new_fa == NULL) |
1237 | goto out; | 1237 | goto out; |
1238 | 1238 | ||
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 244c4f445c7d..8c79c8a4ea5c 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -27,11 +27,11 @@ | |||
27 | * Allocate and initialize a new local port bind bucket. | 27 | * Allocate and initialize a new local port bind bucket. |
28 | * The bindhash mutex for snum's hash chain must be held here. | 28 | * The bindhash mutex for snum's hash chain must be held here. |
29 | */ | 29 | */ |
30 | struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, | 30 | struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, |
31 | struct inet_bind_hashbucket *head, | 31 | struct inet_bind_hashbucket *head, |
32 | const unsigned short snum) | 32 | const unsigned short snum) |
33 | { | 33 | { |
34 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC); | 34 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); |
35 | 35 | ||
36 | if (tb != NULL) { | 36 | if (tb != NULL) { |
37 | tb->port = snum; | 37 | tb->port = snum; |
@@ -45,7 +45,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, | |||
45 | /* | 45 | /* |
46 | * Caller must hold hashbucket lock for this tb with local BH disabled | 46 | * Caller must hold hashbucket lock for this tb with local BH disabled |
47 | */ | 47 | */ |
48 | void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb) | 48 | void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) |
49 | { | 49 | { |
50 | if (hlist_empty(&tb->owners)) { | 50 | if (hlist_empty(&tb->owners)) { |
51 | __hlist_del(&tb->node); | 51 | __hlist_del(&tb->node); |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index cdd805344c61..e28330aa4139 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -91,7 +91,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat | |||
91 | { | 91 | { |
92 | struct inet_timewait_sock *tw = | 92 | struct inet_timewait_sock *tw = |
93 | kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, | 93 | kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, |
94 | SLAB_ATOMIC); | 94 | GFP_ATOMIC); |
95 | if (tw != NULL) { | 95 | if (tw != NULL) { |
96 | const struct inet_sock *inet = inet_sk(sk); | 96 | const struct inet_sock *inet = inet_sk(sk); |
97 | 97 | ||
@@ -197,9 +197,10 @@ EXPORT_SYMBOL_GPL(inet_twdr_hangman); | |||
197 | 197 | ||
198 | extern void twkill_slots_invalid(void); | 198 | extern void twkill_slots_invalid(void); |
199 | 199 | ||
200 | void inet_twdr_twkill_work(void *data) | 200 | void inet_twdr_twkill_work(struct work_struct *work) |
201 | { | 201 | { |
202 | struct inet_timewait_death_row *twdr = data; | 202 | struct inet_timewait_death_row *twdr = |
203 | container_of(work, struct inet_timewait_death_row, twkill_work); | ||
203 | int i; | 204 | int i; |
204 | 205 | ||
205 | if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) | 206 | if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index f072f3875af8..711eb6d0285a 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -73,7 +73,7 @@ | |||
73 | /* Exported for inet_getid inline function. */ | 73 | /* Exported for inet_getid inline function. */ |
74 | DEFINE_SPINLOCK(inet_peer_idlock); | 74 | DEFINE_SPINLOCK(inet_peer_idlock); |
75 | 75 | ||
76 | static kmem_cache_t *peer_cachep __read_mostly; | 76 | static struct kmem_cache *peer_cachep __read_mostly; |
77 | 77 | ||
78 | #define node_height(x) x->avl_height | 78 | #define node_height(x) x->avl_height |
79 | static struct inet_peer peer_fake_node = { | 79 | static struct inet_peer peer_fake_node = { |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index efcf45ecc818..ecb5422ea237 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -105,7 +105,7 @@ static DEFINE_SPINLOCK(mfc_unres_lock); | |||
105 | In this case data path is free of exclusive locks at all. | 105 | In this case data path is free of exclusive locks at all. |
106 | */ | 106 | */ |
107 | 107 | ||
108 | static kmem_cache_t *mrt_cachep __read_mostly; | 108 | static struct kmem_cache *mrt_cachep __read_mostly; |
109 | 109 | ||
110 | static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); | 110 | static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); |
111 | static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); | 111 | static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); |
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index 8832eb517d52..8086787a2c51 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c | |||
@@ -44,7 +44,7 @@ | |||
44 | static struct list_head *ip_vs_conn_tab; | 44 | static struct list_head *ip_vs_conn_tab; |
45 | 45 | ||
46 | /* SLAB cache for IPVS connections */ | 46 | /* SLAB cache for IPVS connections */ |
47 | static kmem_cache_t *ip_vs_conn_cachep __read_mostly; | 47 | static struct kmem_cache *ip_vs_conn_cachep __read_mostly; |
48 | 48 | ||
49 | /* counter for current IPVS connections */ | 49 | /* counter for current IPVS connections */ |
50 | static atomic_t ip_vs_conn_count = ATOMIC_INIT(0); | 50 | static atomic_t ip_vs_conn_count = ATOMIC_INIT(0); |
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index f261616e4602..9b933381ebbe 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c | |||
@@ -221,10 +221,10 @@ static void update_defense_level(void) | |||
221 | * Timer for checking the defense | 221 | * Timer for checking the defense |
222 | */ | 222 | */ |
223 | #define DEFENSE_TIMER_PERIOD 1*HZ | 223 | #define DEFENSE_TIMER_PERIOD 1*HZ |
224 | static void defense_work_handler(void *data); | 224 | static void defense_work_handler(struct work_struct *work); |
225 | static DECLARE_WORK(defense_work, defense_work_handler, NULL); | 225 | static DECLARE_DELAYED_WORK(defense_work, defense_work_handler); |
226 | 226 | ||
227 | static void defense_work_handler(void *data) | 227 | static void defense_work_handler(struct work_struct *work) |
228 | { | 228 | { |
229 | update_defense_level(); | 229 | update_defense_level(); |
230 | if (atomic_read(&ip_vs_dropentry)) | 230 | if (atomic_read(&ip_vs_dropentry)) |
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index f4b0e68a16d2..8556a4f4f60a 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -65,8 +65,8 @@ static LIST_HEAD(helpers); | |||
65 | unsigned int ip_conntrack_htable_size __read_mostly = 0; | 65 | unsigned int ip_conntrack_htable_size __read_mostly = 0; |
66 | int ip_conntrack_max __read_mostly; | 66 | int ip_conntrack_max __read_mostly; |
67 | struct list_head *ip_conntrack_hash __read_mostly; | 67 | struct list_head *ip_conntrack_hash __read_mostly; |
68 | static kmem_cache_t *ip_conntrack_cachep __read_mostly; | 68 | static struct kmem_cache *ip_conntrack_cachep __read_mostly; |
69 | static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly; | 69 | static struct kmem_cache *ip_conntrack_expect_cachep __read_mostly; |
70 | struct ip_conntrack ip_conntrack_untracked; | 70 | struct ip_conntrack ip_conntrack_untracked; |
71 | unsigned int ip_ct_log_invalid __read_mostly; | 71 | unsigned int ip_ct_log_invalid __read_mostly; |
72 | static LIST_HEAD(unconfirmed); | 72 | static LIST_HEAD(unconfirmed); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6dddf59c1fb9..4a3889dd1943 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -45,8 +45,7 @@ struct inet_timewait_death_row tcp_death_row = { | |||
45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, | 45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, |
46 | (unsigned long)&tcp_death_row), | 46 | (unsigned long)&tcp_death_row), |
47 | .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, | 47 | .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, |
48 | inet_twdr_twkill_work, | 48 | inet_twdr_twkill_work), |
49 | &tcp_death_row), | ||
50 | /* Short-time timewait calendar */ | 49 | /* Short-time timewait calendar */ |
51 | 50 | ||
52 | .twcal_hand = -1, | 51 | .twcal_hand = -1, |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 87c8f54872b7..e5cd83b2205d 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -720,10 +720,8 @@ snmp6_mib_free(void *ptr[2]) | |||
720 | { | 720 | { |
721 | if (ptr == NULL) | 721 | if (ptr == NULL) |
722 | return; | 722 | return; |
723 | if (ptr[0]) | 723 | free_percpu(ptr[0]); |
724 | free_percpu(ptr[0]); | 724 | free_percpu(ptr[1]); |
725 | if (ptr[1]) | ||
726 | free_percpu(ptr[1]); | ||
727 | ptr[0] = ptr[1] = NULL; | 725 | ptr[0] = ptr[1] = NULL; |
728 | } | 726 | } |
729 | 727 | ||
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index bf526115e518..96d8310ae9c8 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -50,7 +50,7 @@ | |||
50 | 50 | ||
51 | struct rt6_statistics rt6_stats; | 51 | struct rt6_statistics rt6_stats; |
52 | 52 | ||
53 | static kmem_cache_t * fib6_node_kmem __read_mostly; | 53 | static struct kmem_cache * fib6_node_kmem __read_mostly; |
54 | 54 | ||
55 | enum fib_walk_state_t | 55 | enum fib_walk_state_t |
56 | { | 56 | { |
@@ -150,7 +150,7 @@ static __inline__ struct fib6_node * node_alloc(void) | |||
150 | { | 150 | { |
151 | struct fib6_node *fn; | 151 | struct fib6_node *fn; |
152 | 152 | ||
153 | if ((fn = kmem_cache_alloc(fib6_node_kmem, SLAB_ATOMIC)) != NULL) | 153 | if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL) |
154 | memset(fn, 0, sizeof(struct fib6_node)); | 154 | memset(fn, 0, sizeof(struct fib6_node)); |
155 | 155 | ||
156 | return fn; | 156 | return fn; |
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 01a5c52a2be3..12e426b9aacd 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c | |||
@@ -50,7 +50,7 @@ static u32 xfrm6_tunnel_spi; | |||
50 | #define XFRM6_TUNNEL_SPI_MIN 1 | 50 | #define XFRM6_TUNNEL_SPI_MIN 1 |
51 | #define XFRM6_TUNNEL_SPI_MAX 0xffffffff | 51 | #define XFRM6_TUNNEL_SPI_MAX 0xffffffff |
52 | 52 | ||
53 | static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly; | 53 | static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; |
54 | 54 | ||
55 | #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 | 55 | #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 |
56 | #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 | 56 | #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 |
@@ -180,7 +180,7 @@ try_next_2:; | |||
180 | spi = 0; | 180 | spi = 0; |
181 | goto out; | 181 | goto out; |
182 | alloc_spi: | 182 | alloc_spi: |
183 | x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC); | 183 | x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); |
184 | if (!x6spi) | 184 | if (!x6spi) |
185 | goto out; | 185 | goto out; |
186 | 186 | ||
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index d50a02030ad7..262bda808d96 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
@@ -61,7 +61,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty); | |||
61 | static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); | 61 | static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); |
62 | static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); | 62 | static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); |
63 | static void ircomm_tty_hangup(struct tty_struct *tty); | 63 | static void ircomm_tty_hangup(struct tty_struct *tty); |
64 | static void ircomm_tty_do_softint(void *private_); | 64 | static void ircomm_tty_do_softint(struct work_struct *work); |
65 | static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); | 65 | static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); |
66 | static void ircomm_tty_stop(struct tty_struct *tty); | 66 | static void ircomm_tty_stop(struct tty_struct *tty); |
67 | 67 | ||
@@ -389,7 +389,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
389 | self->flow = FLOW_STOP; | 389 | self->flow = FLOW_STOP; |
390 | 390 | ||
391 | self->line = line; | 391 | self->line = line; |
392 | INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self); | 392 | INIT_WORK(&self->tqueue, ircomm_tty_do_softint); |
393 | self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; | 393 | self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; |
394 | self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; | 394 | self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; |
395 | self->close_delay = 5*HZ/10; | 395 | self->close_delay = 5*HZ/10; |
@@ -594,15 +594,16 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty) | |||
594 | } | 594 | } |
595 | 595 | ||
596 | /* | 596 | /* |
597 | * Function ircomm_tty_do_softint (private_) | 597 | * Function ircomm_tty_do_softint (work) |
598 | * | 598 | * |
599 | * We use this routine to give the write wakeup to the user at at a | 599 | * We use this routine to give the write wakeup to the user at at a |
600 | * safe time (as fast as possible after write have completed). This | 600 | * safe time (as fast as possible after write have completed). This |
601 | * can be compared to the Tx interrupt. | 601 | * can be compared to the Tx interrupt. |
602 | */ | 602 | */ |
603 | static void ircomm_tty_do_softint(void *private_) | 603 | static void ircomm_tty_do_softint(struct work_struct *work) |
604 | { | 604 | { |
605 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_; | 605 | struct ircomm_tty_cb *self = |
606 | container_of(work, struct ircomm_tty_cb, tqueue); | ||
606 | struct tty_struct *tty; | 607 | struct tty_struct *tty; |
607 | unsigned long flags; | 608 | unsigned long flags; |
608 | struct sk_buff *skb, *ctrl_skb; | 609 | struct sk_buff *skb, *ctrl_skb; |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index eaa0f8a1adb6..a9638ff52a72 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -108,7 +108,7 @@ static struct { | |||
108 | size_t size; | 108 | size_t size; |
109 | 109 | ||
110 | /* slab cache pointer */ | 110 | /* slab cache pointer */ |
111 | kmem_cache_t *cachep; | 111 | struct kmem_cache *cachep; |
112 | 112 | ||
113 | /* allocated slab cache + modules which uses this slab cache */ | 113 | /* allocated slab cache + modules which uses this slab cache */ |
114 | int use; | 114 | int use; |
@@ -147,7 +147,7 @@ int nf_conntrack_register_cache(u_int32_t features, const char *name, | |||
147 | { | 147 | { |
148 | int ret = 0; | 148 | int ret = 0; |
149 | char *cache_name; | 149 | char *cache_name; |
150 | kmem_cache_t *cachep; | 150 | struct kmem_cache *cachep; |
151 | 151 | ||
152 | DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n", | 152 | DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n", |
153 | features, name, size); | 153 | features, name, size); |
@@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_register_cache); | |||
226 | /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ | 226 | /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ |
227 | void nf_conntrack_unregister_cache(u_int32_t features) | 227 | void nf_conntrack_unregister_cache(u_int32_t features) |
228 | { | 228 | { |
229 | kmem_cache_t *cachep; | 229 | struct kmem_cache *cachep; |
230 | char *name; | 230 | char *name; |
231 | 231 | ||
232 | /* | 232 | /* |
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 588d37937046..c20f901fa177 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c | |||
@@ -29,7 +29,7 @@ | |||
29 | LIST_HEAD(nf_conntrack_expect_list); | 29 | LIST_HEAD(nf_conntrack_expect_list); |
30 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_list); | 30 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_list); |
31 | 31 | ||
32 | kmem_cache_t *nf_conntrack_expect_cachep __read_mostly; | 32 | struct kmem_cache *nf_conntrack_expect_cachep __read_mostly; |
33 | static unsigned int nf_conntrack_expect_next_id; | 33 | static unsigned int nf_conntrack_expect_next_id; |
34 | 34 | ||
35 | /* nf_conntrack_expect helper functions */ | 35 | /* nf_conntrack_expect helper functions */ |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index a98de0b54d65..a5a6e192ac2d 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -92,7 +92,7 @@ struct xt_hashlimit_htable { | |||
92 | static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ | 92 | static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ |
93 | static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */ | 93 | static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */ |
94 | static HLIST_HEAD(hashlimit_htables); | 94 | static HLIST_HEAD(hashlimit_htables); |
95 | static kmem_cache_t *hashlimit_cachep __read_mostly; | 95 | static struct kmem_cache *hashlimit_cachep __read_mostly; |
96 | 96 | ||
97 | static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) | 97 | static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) |
98 | { | 98 | { |
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c index dada34a77b21..49effd92144e 100644 --- a/net/rxrpc/krxiod.c +++ b/net/rxrpc/krxiod.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/completion.h> | 13 | #include <linux/completion.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/freezer.h> | ||
16 | #include <rxrpc/krxiod.h> | 17 | #include <rxrpc/krxiod.h> |
17 | #include <rxrpc/transport.h> | 18 | #include <rxrpc/transport.h> |
18 | #include <rxrpc/peer.h> | 19 | #include <rxrpc/peer.h> |
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c index cea4eb5e2497..3ab0f77409f4 100644 --- a/net/rxrpc/krxsecd.c +++ b/net/rxrpc/krxsecd.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <rxrpc/call.h> | 27 | #include <rxrpc/call.h> |
28 | #include <linux/udp.h> | 28 | #include <linux/udp.h> |
29 | #include <linux/ip.h> | 29 | #include <linux/ip.h> |
30 | #include <linux/freezer.h> | ||
30 | #include <net/sock.h> | 31 | #include <net/sock.h> |
31 | #include "internal.h" | 32 | #include "internal.h" |
32 | 33 | ||
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c index 3e7466900bd4..9a9b6132dba4 100644 --- a/net/rxrpc/krxtimod.c +++ b/net/rxrpc/krxtimod.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/completion.h> | 15 | #include <linux/completion.h> |
16 | #include <linux/freezer.h> | ||
16 | #include <rxrpc/rxrpc.h> | 17 | #include <rxrpc/rxrpc.h> |
17 | #include <rxrpc/krxtimod.h> | 18 | #include <rxrpc/krxtimod.h> |
18 | #include <asm/errno.h> | 19 | #include <asm/errno.h> |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 39471d3b31b9..ad0057db0f91 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #include <net/sctp/sm.h> | 61 | #include <net/sctp/sm.h> |
62 | 62 | ||
63 | /* Forward declarations for internal functions. */ | 63 | /* Forward declarations for internal functions. */ |
64 | static void sctp_assoc_bh_rcv(struct sctp_association *asoc); | 64 | static void sctp_assoc_bh_rcv(struct work_struct *work); |
65 | 65 | ||
66 | 66 | ||
67 | /* 1st Level Abstractions. */ | 67 | /* 1st Level Abstractions. */ |
@@ -269,9 +269,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
269 | 269 | ||
270 | /* Create an input queue. */ | 270 | /* Create an input queue. */ |
271 | sctp_inq_init(&asoc->base.inqueue); | 271 | sctp_inq_init(&asoc->base.inqueue); |
272 | sctp_inq_set_th_handler(&asoc->base.inqueue, | 272 | sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); |
273 | (void (*)(void *))sctp_assoc_bh_rcv, | ||
274 | asoc); | ||
275 | 273 | ||
276 | /* Create an output queue. */ | 274 | /* Create an output queue. */ |
277 | sctp_outq_init(asoc, &asoc->outqueue); | 275 | sctp_outq_init(asoc, &asoc->outqueue); |
@@ -946,8 +944,11 @@ out: | |||
946 | } | 944 | } |
947 | 945 | ||
948 | /* Do delayed input processing. This is scheduled by sctp_rcv(). */ | 946 | /* Do delayed input processing. This is scheduled by sctp_rcv(). */ |
949 | static void sctp_assoc_bh_rcv(struct sctp_association *asoc) | 947 | static void sctp_assoc_bh_rcv(struct work_struct *work) |
950 | { | 948 | { |
949 | struct sctp_association *asoc = | ||
950 | container_of(work, struct sctp_association, | ||
951 | base.inqueue.immediate); | ||
951 | struct sctp_endpoint *ep; | 952 | struct sctp_endpoint *ep; |
952 | struct sctp_chunk *chunk; | 953 | struct sctp_chunk *chunk; |
953 | struct sock *sk; | 954 | struct sock *sk; |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 33a42e90c32f..129756908da4 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #include <net/sctp/sm.h> | 61 | #include <net/sctp/sm.h> |
62 | 62 | ||
63 | /* Forward declarations for internal helpers. */ | 63 | /* Forward declarations for internal helpers. */ |
64 | static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); | 64 | static void sctp_endpoint_bh_rcv(struct work_struct *work); |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Initialize the base fields of the endpoint structure. | 67 | * Initialize the base fields of the endpoint structure. |
@@ -89,8 +89,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
89 | sctp_inq_init(&ep->base.inqueue); | 89 | sctp_inq_init(&ep->base.inqueue); |
90 | 90 | ||
91 | /* Set its top-half handler */ | 91 | /* Set its top-half handler */ |
92 | sctp_inq_set_th_handler(&ep->base.inqueue, | 92 | sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); |
93 | (void (*)(void *))sctp_endpoint_bh_rcv, ep); | ||
94 | 93 | ||
95 | /* Initialize the bind addr area */ | 94 | /* Initialize the bind addr area */ |
96 | sctp_bind_addr_init(&ep->base.bind_addr, 0); | 95 | sctp_bind_addr_init(&ep->base.bind_addr, 0); |
@@ -318,8 +317,11 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, | |||
318 | /* Do delayed input processing. This is scheduled by sctp_rcv(). | 317 | /* Do delayed input processing. This is scheduled by sctp_rcv(). |
319 | * This may be called on BH or task time. | 318 | * This may be called on BH or task time. |
320 | */ | 319 | */ |
321 | static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep) | 320 | static void sctp_endpoint_bh_rcv(struct work_struct *work) |
322 | { | 321 | { |
322 | struct sctp_endpoint *ep = | ||
323 | container_of(work, struct sctp_endpoint, | ||
324 | base.inqueue.immediate); | ||
323 | struct sctp_association *asoc; | 325 | struct sctp_association *asoc; |
324 | struct sock *sk; | 326 | struct sock *sk; |
325 | struct sctp_transport *transport; | 327 | struct sctp_transport *transport; |
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index cf6deed7e849..71b07466e880 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
@@ -54,7 +54,7 @@ void sctp_inq_init(struct sctp_inq *queue) | |||
54 | queue->in_progress = NULL; | 54 | queue->in_progress = NULL; |
55 | 55 | ||
56 | /* Create a task for delivering data. */ | 56 | /* Create a task for delivering data. */ |
57 | INIT_WORK(&queue->immediate, NULL, NULL); | 57 | INIT_WORK(&queue->immediate, NULL); |
58 | 58 | ||
59 | queue->malloced = 0; | 59 | queue->malloced = 0; |
60 | } | 60 | } |
@@ -97,7 +97,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) | |||
97 | * on the BH related data structures. | 97 | * on the BH related data structures. |
98 | */ | 98 | */ |
99 | list_add_tail(&chunk->list, &q->in_chunk_list); | 99 | list_add_tail(&chunk->list, &q->in_chunk_list); |
100 | q->immediate.func(q->immediate.data); | 100 | q->immediate.func(&q->immediate); |
101 | } | 101 | } |
102 | 102 | ||
103 | /* Extract a chunk from an SCTP inqueue. | 103 | /* Extract a chunk from an SCTP inqueue. |
@@ -205,9 +205,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | |||
205 | * The intent is that this routine will pull stuff out of the | 205 | * The intent is that this routine will pull stuff out of the |
206 | * inqueue and process it. | 206 | * inqueue and process it. |
207 | */ | 207 | */ |
208 | void sctp_inq_set_th_handler(struct sctp_inq *q, | 208 | void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback) |
209 | void (*callback)(void *), void *arg) | ||
210 | { | 209 | { |
211 | INIT_WORK(&q->immediate, callback, arg); | 210 | INIT_WORK(&q->immediate, callback); |
212 | } | 211 | } |
213 | 212 | ||
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 11f3b549f4a4..f2ba8615895b 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -79,8 +79,8 @@ static struct sctp_pf *sctp_pf_inet_specific; | |||
79 | static struct sctp_af *sctp_af_v4_specific; | 79 | static struct sctp_af *sctp_af_v4_specific; |
80 | static struct sctp_af *sctp_af_v6_specific; | 80 | static struct sctp_af *sctp_af_v6_specific; |
81 | 81 | ||
82 | kmem_cache_t *sctp_chunk_cachep __read_mostly; | 82 | struct kmem_cache *sctp_chunk_cachep __read_mostly; |
83 | kmem_cache_t *sctp_bucket_cachep __read_mostly; | 83 | struct kmem_cache *sctp_bucket_cachep __read_mostly; |
84 | 84 | ||
85 | /* Return the address of the control sock. */ | 85 | /* Return the address of the control sock. */ |
86 | struct sock *sctp_get_ctl_sock(void) | 86 | struct sock *sctp_get_ctl_sock(void) |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 04954e5f6846..30927d3a597f 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -65,7 +65,7 @@ | |||
65 | #include <net/sctp/sctp.h> | 65 | #include <net/sctp/sctp.h> |
66 | #include <net/sctp/sm.h> | 66 | #include <net/sctp/sm.h> |
67 | 67 | ||
68 | extern kmem_cache_t *sctp_chunk_cachep; | 68 | extern struct kmem_cache *sctp_chunk_cachep; |
69 | 69 | ||
70 | SCTP_STATIC | 70 | SCTP_STATIC |
71 | struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, | 71 | struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, |
@@ -979,7 +979,7 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, | |||
979 | { | 979 | { |
980 | struct sctp_chunk *retval; | 980 | struct sctp_chunk *retval; |
981 | 981 | ||
982 | retval = kmem_cache_alloc(sctp_chunk_cachep, SLAB_ATOMIC); | 982 | retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC); |
983 | 983 | ||
984 | if (!retval) | 984 | if (!retval) |
985 | goto nodata; | 985 | goto nodata; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 02b27145b279..1e8132b8c4d9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -107,7 +107,7 @@ static void sctp_sock_migrate(struct sock *, struct sock *, | |||
107 | struct sctp_association *, sctp_socket_type_t); | 107 | struct sctp_association *, sctp_socket_type_t); |
108 | static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; | 108 | static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; |
109 | 109 | ||
110 | extern kmem_cache_t *sctp_bucket_cachep; | 110 | extern struct kmem_cache *sctp_bucket_cachep; |
111 | 111 | ||
112 | /* Get the sndbuf space available at the time on the association. */ | 112 | /* Get the sndbuf space available at the time on the association. */ |
113 | static inline int sctp_wspace(struct sctp_association *asoc) | 113 | static inline int sctp_wspace(struct sctp_association *asoc) |
@@ -4989,7 +4989,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( | |||
4989 | { | 4989 | { |
4990 | struct sctp_bind_bucket *pp; | 4990 | struct sctp_bind_bucket *pp; |
4991 | 4991 | ||
4992 | pp = kmem_cache_alloc(sctp_bucket_cachep, SLAB_ATOMIC); | 4992 | pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); |
4993 | SCTP_DBG_OBJCNT_INC(bind_bucket); | 4993 | SCTP_DBG_OBJCNT_INC(bind_bucket); |
4994 | if (pp) { | 4994 | if (pp) { |
4995 | pp->port = snum; | 4995 | pp->port = snum; |
diff --git a/net/socket.c b/net/socket.c index e8db54702a69..29ea1de43ecb 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -230,13 +230,13 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, | |||
230 | 230 | ||
231 | #define SOCKFS_MAGIC 0x534F434B | 231 | #define SOCKFS_MAGIC 0x534F434B |
232 | 232 | ||
233 | static kmem_cache_t *sock_inode_cachep __read_mostly; | 233 | static struct kmem_cache *sock_inode_cachep __read_mostly; |
234 | 234 | ||
235 | static struct inode *sock_alloc_inode(struct super_block *sb) | 235 | static struct inode *sock_alloc_inode(struct super_block *sb) |
236 | { | 236 | { |
237 | struct socket_alloc *ei; | 237 | struct socket_alloc *ei; |
238 | 238 | ||
239 | ei = kmem_cache_alloc(sock_inode_cachep, SLAB_KERNEL); | 239 | ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); |
240 | if (!ei) | 240 | if (!ei) |
241 | return NULL; | 241 | return NULL; |
242 | init_waitqueue_head(&ei->socket.wait); | 242 | init_waitqueue_head(&ei->socket.wait); |
@@ -257,7 +257,7 @@ static void sock_destroy_inode(struct inode *inode) | |||
257 | container_of(inode, struct socket_alloc, vfs_inode)); | 257 | container_of(inode, struct socket_alloc, vfs_inode)); |
258 | } | 258 | } |
259 | 259 | ||
260 | static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) | 260 | static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) |
261 | { | 261 | { |
262 | struct socket_alloc *ei = (struct socket_alloc *)foo; | 262 | struct socket_alloc *ei = (struct socket_alloc *)foo; |
263 | 263 | ||
@@ -305,7 +305,14 @@ static struct file_system_type sock_fs_type = { | |||
305 | 305 | ||
306 | static int sockfs_delete_dentry(struct dentry *dentry) | 306 | static int sockfs_delete_dentry(struct dentry *dentry) |
307 | { | 307 | { |
308 | return 1; | 308 | /* |
309 | * At creation time, we pretended this dentry was hashed | ||
310 | * (by clearing DCACHE_UNHASHED bit in d_flags) | ||
311 | * At delete time, we restore the truth : not hashed. | ||
312 | * (so that dput() can proceed correctly) | ||
313 | */ | ||
314 | dentry->d_flags |= DCACHE_UNHASHED; | ||
315 | return 0; | ||
309 | } | 316 | } |
310 | static struct dentry_operations sockfs_dentry_operations = { | 317 | static struct dentry_operations sockfs_dentry_operations = { |
311 | .d_delete = sockfs_delete_dentry, | 318 | .d_delete = sockfs_delete_dentry, |
@@ -353,14 +360,20 @@ static int sock_attach_fd(struct socket *sock, struct file *file) | |||
353 | 360 | ||
354 | this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino); | 361 | this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino); |
355 | this.name = name; | 362 | this.name = name; |
356 | this.hash = SOCK_INODE(sock)->i_ino; | 363 | this.hash = 0; |
357 | 364 | ||
358 | file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this); | 365 | file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this); |
359 | if (unlikely(!file->f_dentry)) | 366 | if (unlikely(!file->f_dentry)) |
360 | return -ENOMEM; | 367 | return -ENOMEM; |
361 | 368 | ||
362 | file->f_dentry->d_op = &sockfs_dentry_operations; | 369 | file->f_dentry->d_op = &sockfs_dentry_operations; |
363 | d_add(file->f_dentry, SOCK_INODE(sock)); | 370 | /* |
371 | * We dont want to push this dentry into global dentry hash table. | ||
372 | * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED | ||
373 | * This permits a working /proc/$pid/fd/XXX on sockets | ||
374 | */ | ||
375 | file->f_dentry->d_flags &= ~DCACHE_UNHASHED; | ||
376 | d_instantiate(file->f_dentry, SOCK_INODE(sock)); | ||
364 | file->f_vfsmnt = mntget(sock_mnt); | 377 | file->f_vfsmnt = mntget(sock_mnt); |
365 | file->f_mapping = file->f_dentry->d_inode->i_mapping; | 378 | file->f_mapping = file->f_dentry->d_inode->i_mapping; |
366 | 379 | ||
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 00cb388ece03..d96fd466a9a4 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -284,8 +284,8 @@ static struct file_operations cache_file_operations; | |||
284 | static struct file_operations content_file_operations; | 284 | static struct file_operations content_file_operations; |
285 | static struct file_operations cache_flush_operations; | 285 | static struct file_operations cache_flush_operations; |
286 | 286 | ||
287 | static void do_cache_clean(void *data); | 287 | static void do_cache_clean(struct work_struct *work); |
288 | static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); | 288 | static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); |
289 | 289 | ||
290 | void cache_register(struct cache_detail *cd) | 290 | void cache_register(struct cache_detail *cd) |
291 | { | 291 | { |
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd) | |||
337 | spin_unlock(&cache_list_lock); | 337 | spin_unlock(&cache_list_lock); |
338 | 338 | ||
339 | /* start the cleaning process */ | 339 | /* start the cleaning process */ |
340 | schedule_work(&cache_cleaner); | 340 | schedule_delayed_work(&cache_cleaner, 0); |
341 | } | 341 | } |
342 | 342 | ||
343 | int cache_unregister(struct cache_detail *cd) | 343 | int cache_unregister(struct cache_detail *cd) |
@@ -461,7 +461,7 @@ static int cache_clean(void) | |||
461 | /* | 461 | /* |
462 | * We want to regularly clean the cache, so we need to schedule some work ... | 462 | * We want to regularly clean the cache, so we need to schedule some work ... |
463 | */ | 463 | */ |
464 | static void do_cache_clean(void *data) | 464 | static void do_cache_clean(struct work_struct *work) |
465 | { | 465 | { |
466 | int delay = 5; | 466 | int delay = 5; |
467 | if (cache_clean() == -1) | 467 | if (cache_clean() == -1) |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9a0b41a97f90..19703aa9659e 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -33,7 +33,7 @@ static int rpc_mount_count; | |||
33 | static struct file_system_type rpc_pipe_fs_type; | 33 | static struct file_system_type rpc_pipe_fs_type; |
34 | 34 | ||
35 | 35 | ||
36 | static kmem_cache_t *rpc_inode_cachep __read_mostly; | 36 | static struct kmem_cache *rpc_inode_cachep __read_mostly; |
37 | 37 | ||
38 | #define RPC_UPCALL_TIMEOUT (30*HZ) | 38 | #define RPC_UPCALL_TIMEOUT (30*HZ) |
39 | 39 | ||
@@ -54,10 +54,11 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, | |||
54 | } | 54 | } |
55 | 55 | ||
56 | static void | 56 | static void |
57 | rpc_timeout_upcall_queue(void *data) | 57 | rpc_timeout_upcall_queue(struct work_struct *work) |
58 | { | 58 | { |
59 | LIST_HEAD(free_list); | 59 | LIST_HEAD(free_list); |
60 | struct rpc_inode *rpci = (struct rpc_inode *)data; | 60 | struct rpc_inode *rpci = |
61 | container_of(work, struct rpc_inode, queue_timeout.work); | ||
61 | struct inode *inode = &rpci->vfs_inode; | 62 | struct inode *inode = &rpci->vfs_inode; |
62 | void (*destroy_msg)(struct rpc_pipe_msg *); | 63 | void (*destroy_msg)(struct rpc_pipe_msg *); |
63 | 64 | ||
@@ -142,7 +143,7 @@ static struct inode * | |||
142 | rpc_alloc_inode(struct super_block *sb) | 143 | rpc_alloc_inode(struct super_block *sb) |
143 | { | 144 | { |
144 | struct rpc_inode *rpci; | 145 | struct rpc_inode *rpci; |
145 | rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL); | 146 | rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL); |
146 | if (!rpci) | 147 | if (!rpci) |
147 | return NULL; | 148 | return NULL; |
148 | return &rpci->vfs_inode; | 149 | return &rpci->vfs_inode; |
@@ -823,7 +824,7 @@ static struct file_system_type rpc_pipe_fs_type = { | |||
823 | }; | 824 | }; |
824 | 825 | ||
825 | static void | 826 | static void |
826 | init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | 827 | init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) |
827 | { | 828 | { |
828 | struct rpc_inode *rpci = (struct rpc_inode *) foo; | 829 | struct rpc_inode *rpci = (struct rpc_inode *) foo; |
829 | 830 | ||
@@ -837,7 +838,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | |||
837 | INIT_LIST_HEAD(&rpci->pipe); | 838 | INIT_LIST_HEAD(&rpci->pipe); |
838 | rpci->pipelen = 0; | 839 | rpci->pipelen = 0; |
839 | init_waitqueue_head(&rpci->waitq); | 840 | init_waitqueue_head(&rpci->waitq); |
840 | INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); | 841 | INIT_DELAYED_WORK(&rpci->queue_timeout, |
842 | rpc_timeout_upcall_queue); | ||
841 | rpci->ops = NULL; | 843 | rpci->ops = NULL; |
842 | } | 844 | } |
843 | } | 845 | } |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index a1ab4eed41f4..225e6510b523 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -34,14 +34,14 @@ static int rpc_task_id; | |||
34 | #define RPC_BUFFER_MAXSIZE (2048) | 34 | #define RPC_BUFFER_MAXSIZE (2048) |
35 | #define RPC_BUFFER_POOLSIZE (8) | 35 | #define RPC_BUFFER_POOLSIZE (8) |
36 | #define RPC_TASK_POOLSIZE (8) | 36 | #define RPC_TASK_POOLSIZE (8) |
37 | static kmem_cache_t *rpc_task_slabp __read_mostly; | 37 | static struct kmem_cache *rpc_task_slabp __read_mostly; |
38 | static kmem_cache_t *rpc_buffer_slabp __read_mostly; | 38 | static struct kmem_cache *rpc_buffer_slabp __read_mostly; |
39 | static mempool_t *rpc_task_mempool __read_mostly; | 39 | static mempool_t *rpc_task_mempool __read_mostly; |
40 | static mempool_t *rpc_buffer_mempool __read_mostly; | 40 | static mempool_t *rpc_buffer_mempool __read_mostly; |
41 | 41 | ||
42 | static void __rpc_default_timer(struct rpc_task *task); | 42 | static void __rpc_default_timer(struct rpc_task *task); |
43 | static void rpciod_killall(void); | 43 | static void rpciod_killall(void); |
44 | static void rpc_async_schedule(void *); | 44 | static void rpc_async_schedule(struct work_struct *); |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * RPC tasks sit here while waiting for conditions to improve. | 47 | * RPC tasks sit here while waiting for conditions to improve. |
@@ -305,7 +305,7 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
305 | if (RPC_IS_ASYNC(task)) { | 305 | if (RPC_IS_ASYNC(task)) { |
306 | int status; | 306 | int status; |
307 | 307 | ||
308 | INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); | 308 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
309 | status = queue_work(task->tk_workqueue, &task->u.tk_work); | 309 | status = queue_work(task->tk_workqueue, &task->u.tk_work); |
310 | if (status < 0) { | 310 | if (status < 0) { |
311 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); | 311 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); |
@@ -695,9 +695,9 @@ rpc_execute(struct rpc_task *task) | |||
695 | return __rpc_execute(task); | 695 | return __rpc_execute(task); |
696 | } | 696 | } |
697 | 697 | ||
698 | static void rpc_async_schedule(void *arg) | 698 | static void rpc_async_schedule(struct work_struct *work) |
699 | { | 699 | { |
700 | __rpc_execute((struct rpc_task *)arg); | 700 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
701 | } | 701 | } |
702 | 702 | ||
703 | /** | 703 | /** |
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index ee9bb1522d5e..c7bb5f7f21a5 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c | |||
@@ -119,7 +119,8 @@ EXPORT_SYMBOL(svc_auth_unregister); | |||
119 | #define DN_HASHMASK (DN_HASHMAX-1) | 119 | #define DN_HASHMASK (DN_HASHMAX-1) |
120 | 120 | ||
121 | static struct hlist_head auth_domain_table[DN_HASHMAX]; | 121 | static struct hlist_head auth_domain_table[DN_HASHMAX]; |
122 | static spinlock_t auth_domain_lock = SPIN_LOCK_UNLOCKED; | 122 | static spinlock_t auth_domain_lock = |
123 | __SPIN_LOCK_UNLOCKED(auth_domain_lock); | ||
123 | 124 | ||
124 | void auth_domain_put(struct auth_domain *dom) | 125 | void auth_domain_put(struct auth_domain *dom) |
125 | { | 126 | { |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 64ca1f61dd94..99f54fb6d669 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/netdevice.h> | 32 | #include <linux/netdevice.h> |
33 | #include <linux/skbuff.h> | 33 | #include <linux/skbuff.h> |
34 | #include <linux/file.h> | 34 | #include <linux/file.h> |
35 | #include <linux/freezer.h> | ||
35 | #include <net/sock.h> | 36 | #include <net/sock.h> |
36 | #include <net/checksum.h> | 37 | #include <net/checksum.h> |
37 | #include <net/ip.h> | 38 | #include <net/ip.h> |
@@ -84,6 +85,35 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req); | |||
84 | */ | 85 | */ |
85 | static int svc_conn_age_period = 6*60; | 86 | static int svc_conn_age_period = 6*60; |
86 | 87 | ||
88 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
89 | static struct lock_class_key svc_key[2]; | ||
90 | static struct lock_class_key svc_slock_key[2]; | ||
91 | |||
92 | static inline void svc_reclassify_socket(struct socket *sock) | ||
93 | { | ||
94 | struct sock *sk = sock->sk; | ||
95 | BUG_ON(sk->sk_lock.owner != NULL); | ||
96 | switch (sk->sk_family) { | ||
97 | case AF_INET: | ||
98 | sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", | ||
99 | &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]); | ||
100 | break; | ||
101 | |||
102 | case AF_INET6: | ||
103 | sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", | ||
104 | &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]); | ||
105 | break; | ||
106 | |||
107 | default: | ||
108 | BUG(); | ||
109 | } | ||
110 | } | ||
111 | #else | ||
112 | static inline void svc_reclassify_socket(struct socket *sock) | ||
113 | { | ||
114 | } | ||
115 | #endif | ||
116 | |||
87 | /* | 117 | /* |
88 | * Queue up an idle server thread. Must have pool->sp_lock held. | 118 | * Queue up an idle server thread. Must have pool->sp_lock held. |
89 | * Note: this is really a stack rather than a queue, so that we only | 119 | * Note: this is really a stack rather than a queue, so that we only |
@@ -1556,6 +1586,8 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin) | |||
1556 | if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) | 1586 | if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) |
1557 | return error; | 1587 | return error; |
1558 | 1588 | ||
1589 | svc_reclassify_socket(sock); | ||
1590 | |||
1559 | if (type == SOCK_STREAM) | 1591 | if (type == SOCK_STREAM) |
1560 | sock->sk->sk_reuse = 1; /* allow address reuse */ | 1592 | sock->sk->sk_reuse = 1; /* allow address reuse */ |
1561 | error = kernel_bind(sock, (struct sockaddr *) sin, | 1593 | error = kernel_bind(sock, (struct sockaddr *) sin, |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 80857470dc11..4f9a5d9791fb 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -479,9 +479,10 @@ int xprt_adjust_timeout(struct rpc_rqst *req) | |||
479 | return status; | 479 | return status; |
480 | } | 480 | } |
481 | 481 | ||
482 | static void xprt_autoclose(void *args) | 482 | static void xprt_autoclose(struct work_struct *work) |
483 | { | 483 | { |
484 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | 484 | struct rpc_xprt *xprt = |
485 | container_of(work, struct rpc_xprt, task_cleanup); | ||
485 | 486 | ||
486 | xprt_disconnect(xprt); | 487 | xprt_disconnect(xprt); |
487 | xprt->ops->close(xprt); | 488 | xprt->ops->close(xprt); |
@@ -932,7 +933,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si | |||
932 | 933 | ||
933 | INIT_LIST_HEAD(&xprt->free); | 934 | INIT_LIST_HEAD(&xprt->free); |
934 | INIT_LIST_HEAD(&xprt->recv); | 935 | INIT_LIST_HEAD(&xprt->recv); |
935 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); | 936 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); |
936 | init_timer(&xprt->timer); | 937 | init_timer(&xprt->timer); |
937 | xprt->timer.function = xprt_init_autodisconnect; | 938 | xprt->timer.function = xprt_init_autodisconnect; |
938 | xprt->timer.data = (unsigned long) xprt; | 939 | xprt->timer.data = (unsigned long) xprt; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 757fc91ef25d..2fc4a3123261 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -1058,15 +1058,45 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) | |||
1058 | return err; | 1058 | return err; |
1059 | } | 1059 | } |
1060 | 1060 | ||
1061 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
1062 | static struct lock_class_key xs_key[2]; | ||
1063 | static struct lock_class_key xs_slock_key[2]; | ||
1064 | |||
1065 | static inline void xs_reclassify_socket(struct socket *sock) | ||
1066 | { | ||
1067 | struct sock *sk = sock->sk; | ||
1068 | BUG_ON(sk->sk_lock.owner != NULL); | ||
1069 | switch (sk->sk_family) { | ||
1070 | case AF_INET: | ||
1071 | sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS", | ||
1072 | &xs_slock_key[0], "sk_lock-AF_INET-NFS", &xs_key[0]); | ||
1073 | break; | ||
1074 | |||
1075 | case AF_INET6: | ||
1076 | sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFS", | ||
1077 | &xs_slock_key[1], "sk_lock-AF_INET6-NFS", &xs_key[1]); | ||
1078 | break; | ||
1079 | |||
1080 | default: | ||
1081 | BUG(); | ||
1082 | } | ||
1083 | } | ||
1084 | #else | ||
1085 | static inline void xs_reclassify_socket(struct socket *sock) | ||
1086 | { | ||
1087 | } | ||
1088 | #endif | ||
1089 | |||
1061 | /** | 1090 | /** |
1062 | * xs_udp_connect_worker - set up a UDP socket | 1091 | * xs_udp_connect_worker - set up a UDP socket |
1063 | * @args: RPC transport to connect | 1092 | * @work: RPC transport to connect |
1064 | * | 1093 | * |
1065 | * Invoked by a work queue tasklet. | 1094 | * Invoked by a work queue tasklet. |
1066 | */ | 1095 | */ |
1067 | static void xs_udp_connect_worker(void *args) | 1096 | static void xs_udp_connect_worker(struct work_struct *work) |
1068 | { | 1097 | { |
1069 | struct rpc_xprt *xprt = (struct rpc_xprt *) args; | 1098 | struct rpc_xprt *xprt = |
1099 | container_of(work, struct rpc_xprt, connect_worker.work); | ||
1070 | struct socket *sock = xprt->sock; | 1100 | struct socket *sock = xprt->sock; |
1071 | int err, status = -EIO; | 1101 | int err, status = -EIO; |
1072 | 1102 | ||
@@ -1080,6 +1110,7 @@ static void xs_udp_connect_worker(void *args) | |||
1080 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); | 1110 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); |
1081 | goto out; | 1111 | goto out; |
1082 | } | 1112 | } |
1113 | xs_reclassify_socket(sock); | ||
1083 | 1114 | ||
1084 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { | 1115 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { |
1085 | sock_release(sock); | 1116 | sock_release(sock); |
@@ -1144,13 +1175,14 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |||
1144 | 1175 | ||
1145 | /** | 1176 | /** |
1146 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint | 1177 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint |
1147 | * @args: RPC transport to connect | 1178 | * @work: RPC transport to connect |
1148 | * | 1179 | * |
1149 | * Invoked by a work queue tasklet. | 1180 | * Invoked by a work queue tasklet. |
1150 | */ | 1181 | */ |
1151 | static void xs_tcp_connect_worker(void *args) | 1182 | static void xs_tcp_connect_worker(struct work_struct *work) |
1152 | { | 1183 | { |
1153 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | 1184 | struct rpc_xprt *xprt = |
1185 | container_of(work, struct rpc_xprt, connect_worker.work); | ||
1154 | struct socket *sock = xprt->sock; | 1186 | struct socket *sock = xprt->sock; |
1155 | int err, status = -EIO; | 1187 | int err, status = -EIO; |
1156 | 1188 | ||
@@ -1163,6 +1195,7 @@ static void xs_tcp_connect_worker(void *args) | |||
1163 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); | 1195 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); |
1164 | goto out; | 1196 | goto out; |
1165 | } | 1197 | } |
1198 | xs_reclassify_socket(sock); | ||
1166 | 1199 | ||
1167 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { | 1200 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { |
1168 | sock_release(sock); | 1201 | sock_release(sock); |
@@ -1262,7 +1295,7 @@ static void xs_connect(struct rpc_task *task) | |||
1262 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | 1295 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; |
1263 | } else { | 1296 | } else { |
1264 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | 1297 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); |
1265 | schedule_work(&xprt->connect_worker); | 1298 | schedule_delayed_work(&xprt->connect_worker, 0); |
1266 | 1299 | ||
1267 | /* flush_scheduled_work can sleep... */ | 1300 | /* flush_scheduled_work can sleep... */ |
1268 | if (!RPC_IS_ASYNC(task)) | 1301 | if (!RPC_IS_ASYNC(task)) |
@@ -1375,7 +1408,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1375 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | 1408 | /* XXX: header size can vary due to auth type, IPv6, etc. */ |
1376 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | 1409 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); |
1377 | 1410 | ||
1378 | INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); | 1411 | INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker); |
1379 | xprt->bind_timeout = XS_BIND_TO; | 1412 | xprt->bind_timeout = XS_BIND_TO; |
1380 | xprt->connect_timeout = XS_UDP_CONN_TO; | 1413 | xprt->connect_timeout = XS_UDP_CONN_TO; |
1381 | xprt->reestablish_timeout = XS_UDP_REEST_TO; | 1414 | xprt->reestablish_timeout = XS_UDP_REEST_TO; |
@@ -1420,7 +1453,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1420 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); | 1453 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); |
1421 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; | 1454 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
1422 | 1455 | ||
1423 | INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); | 1456 | INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker); |
1424 | xprt->bind_timeout = XS_BIND_TO; | 1457 | xprt->bind_timeout = XS_BIND_TO; |
1425 | xprt->connect_timeout = XS_TCP_CONN_TO; | 1458 | xprt->connect_timeout = XS_TCP_CONN_TO; |
1426 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | 1459 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |
diff --git a/net/tipc/handler.c b/net/tipc/handler.c index ae6ddf00a1aa..eb80778d6d9c 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c | |||
@@ -42,7 +42,7 @@ struct queue_item { | |||
42 | unsigned long data; | 42 | unsigned long data; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | static kmem_cache_t *tipc_queue_item_cache; | 45 | static struct kmem_cache *tipc_queue_item_cache; |
46 | static struct list_head signal_queue_head; | 46 | static struct list_head signal_queue_head; |
47 | static DEFINE_SPINLOCK(qitem_lock); | 47 | static DEFINE_SPINLOCK(qitem_lock); |
48 | static int handler_enabled = 0; | 48 | static int handler_enabled = 0; |
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index e8198a2c785d..414f89070380 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <net/ip.h> | 12 | #include <net/ip.h> |
13 | #include <net/xfrm.h> | 13 | #include <net/xfrm.h> |
14 | 14 | ||
15 | static kmem_cache_t *secpath_cachep __read_mostly; | 15 | static struct kmem_cache *secpath_cachep __read_mostly; |
16 | 16 | ||
17 | void __secpath_destroy(struct sec_path *sp) | 17 | void __secpath_destroy(struct sec_path *sp) |
18 | { | 18 | { |
@@ -27,7 +27,7 @@ struct sec_path *secpath_dup(struct sec_path *src) | |||
27 | { | 27 | { |
28 | struct sec_path *sp; | 28 | struct sec_path *sp; |
29 | 29 | ||
30 | sp = kmem_cache_alloc(secpath_cachep, SLAB_ATOMIC); | 30 | sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC); |
31 | if (!sp) | 31 | if (!sp) |
32 | return NULL; | 32 | return NULL; |
33 | 33 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 64d3938f74c4..3f3f563eb4ab 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -39,7 +39,7 @@ EXPORT_SYMBOL(xfrm_policy_count); | |||
39 | static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); | 39 | static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); |
40 | static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; | 40 | static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; |
41 | 41 | ||
42 | static kmem_cache_t *xfrm_dst_cache __read_mostly; | 42 | static struct kmem_cache *xfrm_dst_cache __read_mostly; |
43 | 43 | ||
44 | static struct work_struct xfrm_policy_gc_work; | 44 | static struct work_struct xfrm_policy_gc_work; |
45 | static HLIST_HEAD(xfrm_policy_gc_list); | 45 | static HLIST_HEAD(xfrm_policy_gc_list); |
@@ -392,7 +392,7 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy) | |||
392 | xfrm_pol_put(policy); | 392 | xfrm_pol_put(policy); |
393 | } | 393 | } |
394 | 394 | ||
395 | static void xfrm_policy_gc_task(void *data) | 395 | static void xfrm_policy_gc_task(struct work_struct *work) |
396 | { | 396 | { |
397 | struct xfrm_policy *policy; | 397 | struct xfrm_policy *policy; |
398 | struct hlist_node *entry, *tmp; | 398 | struct hlist_node *entry, *tmp; |
@@ -580,7 +580,7 @@ static inline int xfrm_byidx_should_resize(int total) | |||
580 | 580 | ||
581 | static DEFINE_MUTEX(hash_resize_mutex); | 581 | static DEFINE_MUTEX(hash_resize_mutex); |
582 | 582 | ||
583 | static void xfrm_hash_resize(void *__unused) | 583 | static void xfrm_hash_resize(struct work_struct *__unused) |
584 | { | 584 | { |
585 | int dir, total; | 585 | int dir, total; |
586 | 586 | ||
@@ -597,7 +597,7 @@ static void xfrm_hash_resize(void *__unused) | |||
597 | mutex_unlock(&hash_resize_mutex); | 597 | mutex_unlock(&hash_resize_mutex); |
598 | } | 598 | } |
599 | 599 | ||
600 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); | 600 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); |
601 | 601 | ||
602 | /* Generate new index... KAME seems to generate them ordered by cost | 602 | /* Generate new index... KAME seems to generate them ordered by cost |
603 | * of an absolute inpredictability of ordering of rules. This will not pass. */ | 603 | * of an absolute inpredictability of ordering of rules. This will not pass. */ |
@@ -2116,7 +2116,7 @@ static void __init xfrm_policy_init(void) | |||
2116 | panic("XFRM: failed to allocate bydst hash\n"); | 2116 | panic("XFRM: failed to allocate bydst hash\n"); |
2117 | } | 2117 | } |
2118 | 2118 | ||
2119 | INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); | 2119 | INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); |
2120 | register_netdevice_notifier(&xfrm_dev_notifier); | 2120 | register_netdevice_notifier(&xfrm_dev_notifier); |
2121 | } | 2121 | } |
2122 | 2122 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 864962bbda90..da54a64ccfa3 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -115,7 +115,7 @@ static unsigned long xfrm_hash_new_size(void) | |||
115 | 115 | ||
116 | static DEFINE_MUTEX(hash_resize_mutex); | 116 | static DEFINE_MUTEX(hash_resize_mutex); |
117 | 117 | ||
118 | static void xfrm_hash_resize(void *__unused) | 118 | static void xfrm_hash_resize(struct work_struct *__unused) |
119 | { | 119 | { |
120 | struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; | 120 | struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; |
121 | unsigned long nsize, osize; | 121 | unsigned long nsize, osize; |
@@ -168,7 +168,7 @@ out_unlock: | |||
168 | mutex_unlock(&hash_resize_mutex); | 168 | mutex_unlock(&hash_resize_mutex); |
169 | } | 169 | } |
170 | 170 | ||
171 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); | 171 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); |
172 | 172 | ||
173 | DECLARE_WAIT_QUEUE_HEAD(km_waitq); | 173 | DECLARE_WAIT_QUEUE_HEAD(km_waitq); |
174 | EXPORT_SYMBOL(km_waitq); | 174 | EXPORT_SYMBOL(km_waitq); |
@@ -207,7 +207,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) | |||
207 | kfree(x); | 207 | kfree(x); |
208 | } | 208 | } |
209 | 209 | ||
210 | static void xfrm_state_gc_task(void *data) | 210 | static void xfrm_state_gc_task(struct work_struct *data) |
211 | { | 211 | { |
212 | struct xfrm_state *x; | 212 | struct xfrm_state *x; |
213 | struct hlist_node *entry, *tmp; | 213 | struct hlist_node *entry, *tmp; |
@@ -1568,6 +1568,6 @@ void __init xfrm_state_init(void) | |||
1568 | panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); | 1568 | panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); |
1569 | xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); | 1569 | xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); |
1570 | 1570 | ||
1571 | INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL); | 1571 | INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task); |
1572 | } | 1572 | } |
1573 | 1573 | ||