diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-07 16:35:17 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-07 16:35:17 -0500 |
commit | 21b4e736922f546e0f1aa7b9d6c442f309a2444a (patch) | |
tree | e1be8645297f8ebe87445251743ebcc52081a20d /net | |
parent | 34161db6b14d984fb9b06c735b7b42f8803f6851 (diff) | |
parent | 68380b581383c028830f79ec2670f4a193854aa6 (diff) |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/ into merge_linus
Diffstat (limited to 'net')
67 files changed, 617 insertions, 389 deletions
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index d9f04864d15d..8ca448db7a0d 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <asm/atomic.h> | 23 | #include <asm/atomic.h> |
24 | #include "br_private.h" | 24 | #include "br_private.h" |
25 | 25 | ||
26 | static kmem_cache_t *br_fdb_cache __read_mostly; | 26 | static struct kmem_cache *br_fdb_cache __read_mostly; |
27 | static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, | 27 | static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, |
28 | const unsigned char *addr); | 28 | const unsigned char *addr); |
29 | 29 | ||
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index ac47ba2ba028..bd221ad52eaf 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/netfilter_ipv6.h> | 34 | #include <linux/netfilter_ipv6.h> |
35 | #include <linux/netfilter_arp.h> | 35 | #include <linux/netfilter_arp.h> |
36 | #include <linux/in_route.h> | 36 | #include <linux/in_route.h> |
37 | #include <linux/inetdevice.h> | ||
37 | 38 | ||
38 | #include <net/ip.h> | 39 | #include <net/ip.h> |
39 | #include <net/ipv6.h> | 40 | #include <net/ipv6.h> |
@@ -221,10 +222,14 @@ static void __br_dnat_complain(void) | |||
221 | * | 222 | * |
222 | * Otherwise, the packet is considered to be routed and we just | 223 | * Otherwise, the packet is considered to be routed and we just |
223 | * change the destination MAC address so that the packet will | 224 | * change the destination MAC address so that the packet will |
224 | * later be passed up to the IP stack to be routed. | 225 | * later be passed up to the IP stack to be routed. For a redirected |
226 | * packet, ip_route_input() will give back the localhost as output device, | ||
227 | * which differs from the bridge device. | ||
225 | * | 228 | * |
226 | * Let us now consider the case that ip_route_input() fails: | 229 | * Let us now consider the case that ip_route_input() fails: |
227 | * | 230 | * |
231 | * This can be because the destination address is martian, in which case | ||
232 | * the packet will be dropped. | ||
228 | * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input() | 233 | * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input() |
229 | * will fail, while __ip_route_output_key() will return success. The source | 234 | * will fail, while __ip_route_output_key() will return success. The source |
230 | * address for __ip_route_output_key() is set to zero, so __ip_route_output_key | 235 | * address for __ip_route_output_key() is set to zero, so __ip_route_output_key |
@@ -237,7 +242,8 @@ static void __br_dnat_complain(void) | |||
237 | * | 242 | * |
238 | * --Lennert, 20020411 | 243 | * --Lennert, 20020411 |
239 | * --Bart, 20020416 (updated) | 244 | * --Bart, 20020416 (updated) |
240 | * --Bart, 20021007 (updated) */ | 245 | * --Bart, 20021007 (updated) |
246 | * --Bart, 20062711 (updated) */ | ||
241 | static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) | 247 | static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) |
242 | { | 248 | { |
243 | if (skb->pkt_type == PACKET_OTHERHOST) { | 249 | if (skb->pkt_type == PACKET_OTHERHOST) { |
@@ -264,15 +270,15 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) | |||
264 | struct net_device *dev = skb->dev; | 270 | struct net_device *dev = skb->dev; |
265 | struct iphdr *iph = skb->nh.iph; | 271 | struct iphdr *iph = skb->nh.iph; |
266 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; | 272 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; |
273 | int err; | ||
267 | 274 | ||
268 | if (nf_bridge->mask & BRNF_PKT_TYPE) { | 275 | if (nf_bridge->mask & BRNF_PKT_TYPE) { |
269 | skb->pkt_type = PACKET_OTHERHOST; | 276 | skb->pkt_type = PACKET_OTHERHOST; |
270 | nf_bridge->mask ^= BRNF_PKT_TYPE; | 277 | nf_bridge->mask ^= BRNF_PKT_TYPE; |
271 | } | 278 | } |
272 | nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; | 279 | nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; |
273 | |||
274 | if (dnat_took_place(skb)) { | 280 | if (dnat_took_place(skb)) { |
275 | if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) { | 281 | if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { |
276 | struct rtable *rt; | 282 | struct rtable *rt; |
277 | struct flowi fl = { | 283 | struct flowi fl = { |
278 | .nl_u = { | 284 | .nl_u = { |
@@ -283,19 +289,33 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) | |||
283 | }, | 289 | }, |
284 | .proto = 0, | 290 | .proto = 0, |
285 | }; | 291 | }; |
292 | struct in_device *in_dev = in_dev_get(dev); | ||
293 | |||
294 | /* If err equals -EHOSTUNREACH the error is due to a | ||
295 | * martian destination or due to the fact that | ||
296 | * forwarding is disabled. For most martian packets, | ||
297 | * ip_route_output_key() will fail. It won't fail for 2 types of | ||
298 | * martian destinations: loopback destinations and destination | ||
299 | * 0.0.0.0. In both cases the packet will be dropped because the | ||
300 | * destination is the loopback device and not the bridge. */ | ||
301 | if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) | ||
302 | goto free_skb; | ||
286 | 303 | ||
287 | if (!ip_route_output_key(&rt, &fl)) { | 304 | if (!ip_route_output_key(&rt, &fl)) { |
288 | /* - Bridged-and-DNAT'ed traffic doesn't | 305 | /* - Bridged-and-DNAT'ed traffic doesn't |
289 | * require ip_forwarding. | 306 | * require ip_forwarding. */ |
290 | * - Deal with redirected traffic. */ | 307 | if (((struct dst_entry *)rt)->dev == dev) { |
291 | if (((struct dst_entry *)rt)->dev == dev || | ||
292 | rt->rt_type == RTN_LOCAL) { | ||
293 | skb->dst = (struct dst_entry *)rt; | 308 | skb->dst = (struct dst_entry *)rt; |
294 | goto bridged_dnat; | 309 | goto bridged_dnat; |
295 | } | 310 | } |
311 | /* we are sure that forwarding is disabled, so printing | ||
312 | * this message is no problem. Note that the packet could | ||
313 | * still have a martian destination address, in which case | ||
314 | * the packet could be dropped even if forwarding were enabled */ | ||
296 | __br_dnat_complain(); | 315 | __br_dnat_complain(); |
297 | dst_release((struct dst_entry *)rt); | 316 | dst_release((struct dst_entry *)rt); |
298 | } | 317 | } |
318 | free_skb: | ||
299 | kfree_skb(skb); | 319 | kfree_skb(skb); |
300 | return 0; | 320 | return 0; |
301 | } else { | 321 | } else { |
diff --git a/net/core/dev.c b/net/core/dev.c index 59d058a3b504..e660cb57e42a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3340,7 +3340,6 @@ void unregister_netdev(struct net_device *dev) | |||
3340 | 3340 | ||
3341 | EXPORT_SYMBOL(unregister_netdev); | 3341 | EXPORT_SYMBOL(unregister_netdev); |
3342 | 3342 | ||
3343 | #ifdef CONFIG_HOTPLUG_CPU | ||
3344 | static int dev_cpu_callback(struct notifier_block *nfb, | 3343 | static int dev_cpu_callback(struct notifier_block *nfb, |
3345 | unsigned long action, | 3344 | unsigned long action, |
3346 | void *ocpu) | 3345 | void *ocpu) |
@@ -3384,7 +3383,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
3384 | 3383 | ||
3385 | return NOTIFY_OK; | 3384 | return NOTIFY_OK; |
3386 | } | 3385 | } |
3387 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
3388 | 3386 | ||
3389 | #ifdef CONFIG_NET_DMA | 3387 | #ifdef CONFIG_NET_DMA |
3390 | /** | 3388 | /** |
diff --git a/net/core/dst.c b/net/core/dst.c index 1a5e49da0e77..836ec6606925 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -125,7 +125,7 @@ void * dst_alloc(struct dst_ops * ops) | |||
125 | if (ops->gc()) | 125 | if (ops->gc()) |
126 | return NULL; | 126 | return NULL; |
127 | } | 127 | } |
128 | dst = kmem_cache_alloc(ops->kmem_cachep, SLAB_ATOMIC); | 128 | dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); |
129 | if (!dst) | 129 | if (!dst) |
130 | return NULL; | 130 | return NULL; |
131 | memset(dst, 0, ops->entry_size); | 131 | memset(dst, 0, ops->entry_size); |
diff --git a/net/core/flow.c b/net/core/flow.c index b16d31ae5e54..d137f971f97d 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -44,7 +44,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; | |||
44 | 44 | ||
45 | #define flow_table(cpu) (per_cpu(flow_tables, cpu)) | 45 | #define flow_table(cpu) (per_cpu(flow_tables, cpu)) |
46 | 46 | ||
47 | static kmem_cache_t *flow_cachep __read_mostly; | 47 | static struct kmem_cache *flow_cachep __read_mostly; |
48 | 48 | ||
49 | static int flow_lwm, flow_hwm; | 49 | static int flow_lwm, flow_hwm; |
50 | 50 | ||
@@ -211,7 +211,7 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, | |||
211 | if (flow_count(cpu) > flow_hwm) | 211 | if (flow_count(cpu) > flow_hwm) |
212 | flow_cache_shrink(cpu); | 212 | flow_cache_shrink(cpu); |
213 | 213 | ||
214 | fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC); | 214 | fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); |
215 | if (fle) { | 215 | if (fle) { |
216 | fle->next = *head; | 216 | fle->next = *head; |
217 | *head = fle; | 217 | *head = fle; |
@@ -340,7 +340,6 @@ static void __devinit flow_cache_cpu_prepare(int cpu) | |||
340 | tasklet_init(tasklet, flow_cache_flush_tasklet, 0); | 340 | tasklet_init(tasklet, flow_cache_flush_tasklet, 0); |
341 | } | 341 | } |
342 | 342 | ||
343 | #ifdef CONFIG_HOTPLUG_CPU | ||
344 | static int flow_cache_cpu(struct notifier_block *nfb, | 343 | static int flow_cache_cpu(struct notifier_block *nfb, |
345 | unsigned long action, | 344 | unsigned long action, |
346 | void *hcpu) | 345 | void *hcpu) |
@@ -349,7 +348,6 @@ static int flow_cache_cpu(struct notifier_block *nfb, | |||
349 | __flow_cache_shrink((unsigned long)hcpu, 0); | 348 | __flow_cache_shrink((unsigned long)hcpu, 0); |
350 | return NOTIFY_OK; | 349 | return NOTIFY_OK; |
351 | } | 350 | } |
352 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
353 | 351 | ||
354 | static int __init flow_cache_init(void) | 352 | static int __init flow_cache_init(void) |
355 | { | 353 | { |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ba509a4a8e92..0ab1987b9348 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -251,7 +251,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl) | |||
251 | goto out_entries; | 251 | goto out_entries; |
252 | } | 252 | } |
253 | 253 | ||
254 | n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC); | 254 | n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC); |
255 | if (!n) | 255 | if (!n) |
256 | goto out_entries; | 256 | goto out_entries; |
257 | 257 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8e1c385e5ba9..de7801d589e7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -68,8 +68,8 @@ | |||
68 | 68 | ||
69 | #include "kmap_skb.h" | 69 | #include "kmap_skb.h" |
70 | 70 | ||
71 | static kmem_cache_t *skbuff_head_cache __read_mostly; | 71 | static struct kmem_cache *skbuff_head_cache __read_mostly; |
72 | static kmem_cache_t *skbuff_fclone_cache __read_mostly; | 72 | static struct kmem_cache *skbuff_fclone_cache __read_mostly; |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * Keep out-of-line to prevent kernel bloat. | 75 | * Keep out-of-line to prevent kernel bloat. |
@@ -132,6 +132,7 @@ EXPORT_SYMBOL(skb_truesize_bug); | |||
132 | * @gfp_mask: allocation mask | 132 | * @gfp_mask: allocation mask |
133 | * @fclone: allocate from fclone cache instead of head cache | 133 | * @fclone: allocate from fclone cache instead of head cache |
134 | * and allocate a cloned (child) skb | 134 | * and allocate a cloned (child) skb |
135 | * @node: numa node to allocate memory on | ||
135 | * | 136 | * |
136 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | 137 | * Allocate a new &sk_buff. The returned buffer has no headroom and a |
137 | * tail room of size bytes. The object has a reference count of one. | 138 | * tail room of size bytes. The object has a reference count of one. |
@@ -141,9 +142,9 @@ EXPORT_SYMBOL(skb_truesize_bug); | |||
141 | * %GFP_ATOMIC. | 142 | * %GFP_ATOMIC. |
142 | */ | 143 | */ |
143 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | 144 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
144 | int fclone) | 145 | int fclone, int node) |
145 | { | 146 | { |
146 | kmem_cache_t *cache; | 147 | struct kmem_cache *cache; |
147 | struct skb_shared_info *shinfo; | 148 | struct skb_shared_info *shinfo; |
148 | struct sk_buff *skb; | 149 | struct sk_buff *skb; |
149 | u8 *data; | 150 | u8 *data; |
@@ -151,14 +152,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
151 | cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; | 152 | cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; |
152 | 153 | ||
153 | /* Get the HEAD */ | 154 | /* Get the HEAD */ |
154 | skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); | 155 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); |
155 | if (!skb) | 156 | if (!skb) |
156 | goto out; | 157 | goto out; |
157 | 158 | ||
158 | /* Get the DATA. Size must match skb_add_mtu(). */ | 159 | /* Get the DATA. Size must match skb_add_mtu(). */ |
159 | size = SKB_DATA_ALIGN(size); | 160 | size = SKB_DATA_ALIGN(size); |
160 | data = kmalloc_track_caller(size + sizeof(struct skb_shared_info), | 161 | data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), |
161 | gfp_mask); | 162 | gfp_mask, node); |
162 | if (!data) | 163 | if (!data) |
163 | goto nodata; | 164 | goto nodata; |
164 | 165 | ||
@@ -210,7 +211,7 @@ nodata: | |||
210 | * Buffers may only be allocated from interrupts using a @gfp_mask of | 211 | * Buffers may only be allocated from interrupts using a @gfp_mask of |
211 | * %GFP_ATOMIC. | 212 | * %GFP_ATOMIC. |
212 | */ | 213 | */ |
213 | struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, | 214 | struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, |
214 | unsigned int size, | 215 | unsigned int size, |
215 | gfp_t gfp_mask) | 216 | gfp_t gfp_mask) |
216 | { | 217 | { |
@@ -267,9 +268,10 @@ nodata: | |||
267 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | 268 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, |
268 | unsigned int length, gfp_t gfp_mask) | 269 | unsigned int length, gfp_t gfp_mask) |
269 | { | 270 | { |
271 | int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1; | ||
270 | struct sk_buff *skb; | 272 | struct sk_buff *skb; |
271 | 273 | ||
272 | skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); | 274 | skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); |
273 | if (likely(skb)) { | 275 | if (likely(skb)) { |
274 | skb_reserve(skb, NET_SKB_PAD); | 276 | skb_reserve(skb, NET_SKB_PAD); |
275 | skb->dev = dev; | 277 | skb->dev = dev; |
diff --git a/net/core/sock.c b/net/core/sock.c index 419c7d3289c7..0ed5b4f0bc40 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -810,24 +810,11 @@ lenout: | |||
810 | */ | 810 | */ |
811 | static void inline sock_lock_init(struct sock *sk) | 811 | static void inline sock_lock_init(struct sock *sk) |
812 | { | 812 | { |
813 | spin_lock_init(&sk->sk_lock.slock); | 813 | sock_lock_init_class_and_name(sk, |
814 | sk->sk_lock.owner = NULL; | 814 | af_family_slock_key_strings[sk->sk_family], |
815 | init_waitqueue_head(&sk->sk_lock.wq); | 815 | af_family_slock_keys + sk->sk_family, |
816 | /* | 816 | af_family_key_strings[sk->sk_family], |
817 | * Make sure we are not reinitializing a held lock: | 817 | af_family_keys + sk->sk_family); |
818 | */ | ||
819 | debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock)); | ||
820 | |||
821 | /* | ||
822 | * Mark both the sk_lock and the sk_lock.slock as a | ||
823 | * per-address-family lock class: | ||
824 | */ | ||
825 | lockdep_set_class_and_name(&sk->sk_lock.slock, | ||
826 | af_family_slock_keys + sk->sk_family, | ||
827 | af_family_slock_key_strings[sk->sk_family]); | ||
828 | lockdep_init_map(&sk->sk_lock.dep_map, | ||
829 | af_family_key_strings[sk->sk_family], | ||
830 | af_family_keys + sk->sk_family, 0); | ||
831 | } | 818 | } |
832 | 819 | ||
833 | /** | 820 | /** |
@@ -841,7 +828,7 @@ struct sock *sk_alloc(int family, gfp_t priority, | |||
841 | struct proto *prot, int zero_it) | 828 | struct proto *prot, int zero_it) |
842 | { | 829 | { |
843 | struct sock *sk = NULL; | 830 | struct sock *sk = NULL; |
844 | kmem_cache_t *slab = prot->slab; | 831 | struct kmem_cache *slab = prot->slab; |
845 | 832 | ||
846 | if (slab != NULL) | 833 | if (slab != NULL) |
847 | sk = kmem_cache_alloc(slab, priority); | 834 | sk = kmem_cache_alloc(slab, priority); |
diff --git a/net/core/wireless.c b/net/core/wireless.c index cb1b8728d7ee..f69ab7b4408e 100644 --- a/net/core/wireless.c +++ b/net/core/wireless.c | |||
@@ -2130,7 +2130,7 @@ int iw_handler_set_spy(struct net_device * dev, | |||
2130 | * The rtnl_lock() make sure we don't race with the other iw_handlers. | 2130 | * The rtnl_lock() make sure we don't race with the other iw_handlers. |
2131 | * This make sure wireless_spy_update() "see" that the spy list | 2131 | * This make sure wireless_spy_update() "see" that the spy list |
2132 | * is temporarily disabled. */ | 2132 | * is temporarily disabled. */ |
2133 | wmb(); | 2133 | smp_wmb(); |
2134 | 2134 | ||
2135 | /* Are there are addresses to copy? */ | 2135 | /* Are there are addresses to copy? */ |
2136 | if(wrqu->data.length > 0) { | 2136 | if(wrqu->data.length > 0) { |
@@ -2159,7 +2159,7 @@ int iw_handler_set_spy(struct net_device * dev, | |||
2159 | } | 2159 | } |
2160 | 2160 | ||
2161 | /* Make sure above is updated before re-enabling */ | 2161 | /* Make sure above is updated before re-enabling */ |
2162 | wmb(); | 2162 | smp_wmb(); |
2163 | 2163 | ||
2164 | /* Enable addresses */ | 2164 | /* Enable addresses */ |
2165 | spydata->spy_number = wrqu->data.length; | 2165 | spydata->spy_number = wrqu->data.length; |
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index bdf1bb7a82c0..1f4727ddbdbf 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c | |||
@@ -21,8 +21,8 @@ | |||
21 | 21 | ||
22 | #include <net/sock.h> | 22 | #include <net/sock.h> |
23 | 23 | ||
24 | static kmem_cache_t *dccp_ackvec_slab; | 24 | static struct kmem_cache *dccp_ackvec_slab; |
25 | static kmem_cache_t *dccp_ackvec_record_slab; | 25 | static struct kmem_cache *dccp_ackvec_record_slab; |
26 | 26 | ||
27 | static struct dccp_ackvec_record *dccp_ackvec_record_new(void) | 27 | static struct dccp_ackvec_record *dccp_ackvec_record_new(void) |
28 | { | 28 | { |
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index ff05e59043cd..d8cf92f09e68 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c | |||
@@ -55,9 +55,9 @@ static inline void ccids_read_unlock(void) | |||
55 | #define ccids_read_unlock() do { } while(0) | 55 | #define ccids_read_unlock() do { } while(0) |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...) | 58 | static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) |
59 | { | 59 | { |
60 | kmem_cache_t *slab; | 60 | struct kmem_cache *slab; |
61 | char slab_name_fmt[32], *slab_name; | 61 | char slab_name_fmt[32], *slab_name; |
62 | va_list args; | 62 | va_list args; |
63 | 63 | ||
@@ -75,7 +75,7 @@ static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...) | |||
75 | return slab; | 75 | return slab; |
76 | } | 76 | } |
77 | 77 | ||
78 | static void ccid_kmem_cache_destroy(kmem_cache_t *slab) | 78 | static void ccid_kmem_cache_destroy(struct kmem_cache *slab) |
79 | { | 79 | { |
80 | if (slab != NULL) { | 80 | if (slab != NULL) { |
81 | const char *name = kmem_cache_name(slab); | 81 | const char *name = kmem_cache_name(slab); |
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index c7c29514dce8..bcc2d12ae81c 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h | |||
@@ -27,9 +27,9 @@ struct ccid_operations { | |||
27 | unsigned char ccid_id; | 27 | unsigned char ccid_id; |
28 | const char *ccid_name; | 28 | const char *ccid_name; |
29 | struct module *ccid_owner; | 29 | struct module *ccid_owner; |
30 | kmem_cache_t *ccid_hc_rx_slab; | 30 | struct kmem_cache *ccid_hc_rx_slab; |
31 | __u32 ccid_hc_rx_obj_size; | 31 | __u32 ccid_hc_rx_obj_size; |
32 | kmem_cache_t *ccid_hc_tx_slab; | 32 | struct kmem_cache *ccid_hc_tx_slab; |
33 | __u32 ccid_hc_tx_obj_size; | 33 | __u32 ccid_hc_tx_obj_size; |
34 | int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk); | 34 | int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk); |
35 | int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk); | 35 | int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk); |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index cf8c07b2704f..66a27b9688ca 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -295,7 +295,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
295 | new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); | 295 | new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); |
296 | if (new_packet == NULL || new_packet->dccphtx_sent) { | 296 | if (new_packet == NULL || new_packet->dccphtx_sent) { |
297 | new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist, | 297 | new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist, |
298 | SLAB_ATOMIC); | 298 | GFP_ATOMIC); |
299 | 299 | ||
300 | if (unlikely(new_packet == NULL)) { | 300 | if (unlikely(new_packet == NULL)) { |
301 | DCCP_WARN("%s, sk=%p, not enough mem to add to history," | 301 | DCCP_WARN("%s, sk=%p, not enough mem to add to history," |
@@ -889,7 +889,7 @@ static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) | |||
889 | /* new loss event detected */ | 889 | /* new loss event detected */ |
890 | /* calculate last interval length */ | 890 | /* calculate last interval length */ |
891 | seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss); | 891 | seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss); |
892 | entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC); | 892 | entry = dccp_li_hist_entry_new(ccid3_li_hist, GFP_ATOMIC); |
893 | 893 | ||
894 | if (entry == NULL) { | 894 | if (entry == NULL) { |
895 | DCCP_BUG("out of memory - can not allocate entry"); | 895 | DCCP_BUG("out of memory - can not allocate entry"); |
@@ -1011,7 +1011,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, | 1013 | packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, |
1014 | skb, SLAB_ATOMIC); | 1014 | skb, GFP_ATOMIC); |
1015 | if (unlikely(packet == NULL)) { | 1015 | if (unlikely(packet == NULL)) { |
1016 | DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet " | 1016 | DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet " |
1017 | "to history, consider it lost!\n", dccp_role(sk), sk); | 1017 | "to history, consider it lost!\n", dccp_role(sk), sk); |
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 48b9b93f8acb..0a0baef16b3e 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c | |||
@@ -125,7 +125,7 @@ int dccp_li_hist_interval_new(struct dccp_li_hist *hist, | |||
125 | int i; | 125 | int i; |
126 | 126 | ||
127 | for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) { | 127 | for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) { |
128 | entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); | 128 | entry = dccp_li_hist_entry_new(hist, GFP_ATOMIC); |
129 | if (entry == NULL) { | 129 | if (entry == NULL) { |
130 | dccp_li_hist_purge(hist, list); | 130 | dccp_li_hist_purge(hist, list); |
131 | DCCP_BUG("loss interval list entry is NULL"); | 131 | DCCP_BUG("loss interval list entry is NULL"); |
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h index 0ae85f0340b2..eb257014dd74 100644 --- a/net/dccp/ccids/lib/loss_interval.h +++ b/net/dccp/ccids/lib/loss_interval.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #define DCCP_LI_HIST_IVAL_F_LENGTH 8 | 20 | #define DCCP_LI_HIST_IVAL_F_LENGTH 8 |
21 | 21 | ||
22 | struct dccp_li_hist { | 22 | struct dccp_li_hist { |
23 | kmem_cache_t *dccplih_slab; | 23 | struct kmem_cache *dccplih_slab; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | extern struct dccp_li_hist *dccp_li_hist_new(const char *name); | 26 | extern struct dccp_li_hist *dccp_li_hist_new(const char *name); |
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h index 067cf1c85a37..9a8bcf224aa7 100644 --- a/net/dccp/ccids/lib/packet_history.h +++ b/net/dccp/ccids/lib/packet_history.h | |||
@@ -68,14 +68,14 @@ struct dccp_rx_hist_entry { | |||
68 | }; | 68 | }; |
69 | 69 | ||
70 | struct dccp_tx_hist { | 70 | struct dccp_tx_hist { |
71 | kmem_cache_t *dccptxh_slab; | 71 | struct kmem_cache *dccptxh_slab; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name); | 74 | extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name); |
75 | extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist); | 75 | extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist); |
76 | 76 | ||
77 | struct dccp_rx_hist { | 77 | struct dccp_rx_hist { |
78 | kmem_cache_t *dccprxh_slab; | 78 | struct kmem_cache *dccprxh_slab; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name); | 81 | extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name); |
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index bdbc3f431668..13b2421991ba 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c | |||
@@ -79,7 +79,7 @@ for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_n | |||
79 | static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ]; | 79 | static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ]; |
80 | static DEFINE_RWLOCK(dn_fib_tables_lock); | 80 | static DEFINE_RWLOCK(dn_fib_tables_lock); |
81 | 81 | ||
82 | static kmem_cache_t *dn_hash_kmem __read_mostly; | 82 | static struct kmem_cache *dn_hash_kmem __read_mostly; |
83 | static int dn_fib_hash_zombies; | 83 | static int dn_fib_hash_zombies; |
84 | 84 | ||
85 | static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz) | 85 | static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz) |
@@ -590,7 +590,7 @@ create: | |||
590 | 590 | ||
591 | replace: | 591 | replace: |
592 | err = -ENOBUFS; | 592 | err = -ENOBUFS; |
593 | new_f = kmem_cache_alloc(dn_hash_kmem, SLAB_KERNEL); | 593 | new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL); |
594 | if (new_f == NULL) | 594 | if (new_f == NULL) |
595 | goto out; | 595 | goto out; |
596 | 596 | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index 08386c102954..eec1a1dd91da 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c | |||
@@ -431,6 +431,17 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, | |||
431 | return 0; | 431 | return 0; |
432 | } | 432 | } |
433 | 433 | ||
434 | void | ||
435 | ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac) | ||
436 | { | ||
437 | unsigned long flags; | ||
438 | |||
439 | spin_lock_irqsave(&mac->lock, flags); | ||
440 | mac->associnfo.associating = 1; | ||
441 | schedule_work(&mac->associnfo.work); | ||
442 | spin_unlock_irqrestore(&mac->lock, flags); | ||
443 | } | ||
444 | |||
434 | int | 445 | int |
435 | ieee80211softmac_handle_disassoc(struct net_device * dev, | 446 | ieee80211softmac_handle_disassoc(struct net_device * dev, |
436 | struct ieee80211_disassoc *disassoc) | 447 | struct ieee80211_disassoc *disassoc) |
@@ -449,8 +460,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
449 | dprintk(KERN_INFO PFX "got disassoc frame\n"); | 460 | dprintk(KERN_INFO PFX "got disassoc frame\n"); |
450 | ieee80211softmac_disassoc(mac); | 461 | ieee80211softmac_disassoc(mac); |
451 | 462 | ||
452 | /* try to reassociate */ | 463 | ieee80211softmac_try_reassoc(mac); |
453 | schedule_delayed_work(&mac->associnfo.work, 0); | ||
454 | 464 | ||
455 | return 0; | 465 | return 0; |
456 | } | 466 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 6012705aa4f8..8ed3e59b8024 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c | |||
@@ -337,6 +337,8 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac, | |||
337 | /* can't transmit data right now... */ | 337 | /* can't transmit data right now... */ |
338 | netif_carrier_off(mac->dev); | 338 | netif_carrier_off(mac->dev); |
339 | spin_unlock_irqrestore(&mac->lock, flags); | 339 | spin_unlock_irqrestore(&mac->lock, flags); |
340 | |||
341 | ieee80211softmac_try_reassoc(mac); | ||
340 | } | 342 | } |
341 | 343 | ||
342 | /* | 344 | /* |
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h index c0dbe070e548..4c2bba34d328 100644 --- a/net/ieee80211/softmac/ieee80211softmac_priv.h +++ b/net/ieee80211/softmac/ieee80211softmac_priv.h | |||
@@ -239,4 +239,6 @@ void ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, in | |||
239 | int ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, | 239 | int ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, |
240 | int event, void *event_context, notify_function_ptr fun, void *context, gfp_t gfp_mask); | 240 | int event, void *event_context, notify_function_ptr fun, void *context, gfp_t gfp_mask); |
241 | 241 | ||
242 | void ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac); | ||
243 | |||
242 | #endif /* IEEE80211SOFTMAC_PRIV_H_ */ | 244 | #endif /* IEEE80211SOFTMAC_PRIV_H_ */ |
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index 2ffaebd21c53..480d72c7a42c 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c | |||
@@ -495,7 +495,8 @@ ieee80211softmac_wx_set_mlme(struct net_device *dev, | |||
495 | printk(KERN_DEBUG PFX "wx_set_mlme: we should know the net here...\n"); | 495 | printk(KERN_DEBUG PFX "wx_set_mlme: we should know the net here...\n"); |
496 | goto out; | 496 | goto out; |
497 | } | 497 | } |
498 | return ieee80211softmac_deauth_req(mac, net, reason); | 498 | err = ieee80211softmac_deauth_req(mac, net, reason); |
499 | goto out; | ||
499 | case IW_MLME_DISASSOC: | 500 | case IW_MLME_DISASSOC: |
500 | ieee80211softmac_send_disassoc_req(mac, reason); | 501 | ieee80211softmac_send_disassoc_req(mac, reason); |
501 | mac->associnfo.associated = 0; | 502 | mac->associnfo.associated = 0; |
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index 107bb6cbb0b3..648f47c1c399 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c | |||
@@ -45,8 +45,8 @@ | |||
45 | 45 | ||
46 | #include "fib_lookup.h" | 46 | #include "fib_lookup.h" |
47 | 47 | ||
48 | static kmem_cache_t *fn_hash_kmem __read_mostly; | 48 | static struct kmem_cache *fn_hash_kmem __read_mostly; |
49 | static kmem_cache_t *fn_alias_kmem __read_mostly; | 49 | static struct kmem_cache *fn_alias_kmem __read_mostly; |
50 | 50 | ||
51 | struct fib_node { | 51 | struct fib_node { |
52 | struct hlist_node fn_hash; | 52 | struct hlist_node fn_hash; |
@@ -485,13 +485,13 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) | |||
485 | goto out; | 485 | goto out; |
486 | 486 | ||
487 | err = -ENOBUFS; | 487 | err = -ENOBUFS; |
488 | new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); | 488 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); |
489 | if (new_fa == NULL) | 489 | if (new_fa == NULL) |
490 | goto out; | 490 | goto out; |
491 | 491 | ||
492 | new_f = NULL; | 492 | new_f = NULL; |
493 | if (!f) { | 493 | if (!f) { |
494 | new_f = kmem_cache_alloc(fn_hash_kmem, SLAB_KERNEL); | 494 | new_f = kmem_cache_alloc(fn_hash_kmem, GFP_KERNEL); |
495 | if (new_f == NULL) | 495 | if (new_f == NULL) |
496 | goto out_free_new_fa; | 496 | goto out_free_new_fa; |
497 | 497 | ||
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index d17990ec724f..cfb249cc0a58 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -172,7 +172,7 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn); | |||
172 | static struct tnode *halve(struct trie *t, struct tnode *tn); | 172 | static struct tnode *halve(struct trie *t, struct tnode *tn); |
173 | static void tnode_free(struct tnode *tn); | 173 | static void tnode_free(struct tnode *tn); |
174 | 174 | ||
175 | static kmem_cache_t *fn_alias_kmem __read_mostly; | 175 | static struct kmem_cache *fn_alias_kmem __read_mostly; |
176 | static struct trie *trie_local = NULL, *trie_main = NULL; | 176 | static struct trie *trie_local = NULL, *trie_main = NULL; |
177 | 177 | ||
178 | 178 | ||
@@ -1187,7 +1187,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1187 | u8 state; | 1187 | u8 state; |
1188 | 1188 | ||
1189 | err = -ENOBUFS; | 1189 | err = -ENOBUFS; |
1190 | new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); | 1190 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); |
1191 | if (new_fa == NULL) | 1191 | if (new_fa == NULL) |
1192 | goto out; | 1192 | goto out; |
1193 | 1193 | ||
@@ -1232,7 +1232,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1232 | goto out; | 1232 | goto out; |
1233 | 1233 | ||
1234 | err = -ENOBUFS; | 1234 | err = -ENOBUFS; |
1235 | new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); | 1235 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); |
1236 | if (new_fa == NULL) | 1236 | if (new_fa == NULL) |
1237 | goto out; | 1237 | goto out; |
1238 | 1238 | ||
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 244c4f445c7d..8c79c8a4ea5c 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -27,11 +27,11 @@ | |||
27 | * Allocate and initialize a new local port bind bucket. | 27 | * Allocate and initialize a new local port bind bucket. |
28 | * The bindhash mutex for snum's hash chain must be held here. | 28 | * The bindhash mutex for snum's hash chain must be held here. |
29 | */ | 29 | */ |
30 | struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, | 30 | struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, |
31 | struct inet_bind_hashbucket *head, | 31 | struct inet_bind_hashbucket *head, |
32 | const unsigned short snum) | 32 | const unsigned short snum) |
33 | { | 33 | { |
34 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC); | 34 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); |
35 | 35 | ||
36 | if (tb != NULL) { | 36 | if (tb != NULL) { |
37 | tb->port = snum; | 37 | tb->port = snum; |
@@ -45,7 +45,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, | |||
45 | /* | 45 | /* |
46 | * Caller must hold hashbucket lock for this tb with local BH disabled | 46 | * Caller must hold hashbucket lock for this tb with local BH disabled |
47 | */ | 47 | */ |
48 | void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb) | 48 | void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) |
49 | { | 49 | { |
50 | if (hlist_empty(&tb->owners)) { | 50 | if (hlist_empty(&tb->owners)) { |
51 | __hlist_del(&tb->node); | 51 | __hlist_del(&tb->node); |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 8c74f9168b7d..9f414e35c488 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -91,7 +91,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat | |||
91 | { | 91 | { |
92 | struct inet_timewait_sock *tw = | 92 | struct inet_timewait_sock *tw = |
93 | kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, | 93 | kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, |
94 | SLAB_ATOMIC); | 94 | GFP_ATOMIC); |
95 | if (tw != NULL) { | 95 | if (tw != NULL) { |
96 | const struct inet_sock *inet = inet_sk(sk); | 96 | const struct inet_sock *inet = inet_sk(sk); |
97 | 97 | ||
@@ -178,7 +178,6 @@ void inet_twdr_hangman(unsigned long data) | |||
178 | need_timer = 0; | 178 | need_timer = 0; |
179 | if (inet_twdr_do_twkill_work(twdr, twdr->slot)) { | 179 | if (inet_twdr_do_twkill_work(twdr, twdr->slot)) { |
180 | twdr->thread_slots |= (1 << twdr->slot); | 180 | twdr->thread_slots |= (1 << twdr->slot); |
181 | mb(); | ||
182 | schedule_work(&twdr->twkill_work); | 181 | schedule_work(&twdr->twkill_work); |
183 | need_timer = 1; | 182 | need_timer = 1; |
184 | } else { | 183 | } else { |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index f072f3875af8..711eb6d0285a 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -73,7 +73,7 @@ | |||
73 | /* Exported for inet_getid inline function. */ | 73 | /* Exported for inet_getid inline function. */ |
74 | DEFINE_SPINLOCK(inet_peer_idlock); | 74 | DEFINE_SPINLOCK(inet_peer_idlock); |
75 | 75 | ||
76 | static kmem_cache_t *peer_cachep __read_mostly; | 76 | static struct kmem_cache *peer_cachep __read_mostly; |
77 | 77 | ||
78 | #define node_height(x) x->avl_height | 78 | #define node_height(x) x->avl_height |
79 | static struct inet_peer peer_fake_node = { | 79 | static struct inet_peer peer_fake_node = { |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index efcf45ecc818..ecb5422ea237 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -105,7 +105,7 @@ static DEFINE_SPINLOCK(mfc_unres_lock); | |||
105 | In this case data path is free of exclusive locks at all. | 105 | In this case data path is free of exclusive locks at all. |
106 | */ | 106 | */ |
107 | 107 | ||
108 | static kmem_cache_t *mrt_cachep __read_mostly; | 108 | static struct kmem_cache *mrt_cachep __read_mostly; |
109 | 109 | ||
110 | static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); | 110 | static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); |
111 | static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); | 111 | static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); |
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index 8832eb517d52..8086787a2c51 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c | |||
@@ -44,7 +44,7 @@ | |||
44 | static struct list_head *ip_vs_conn_tab; | 44 | static struct list_head *ip_vs_conn_tab; |
45 | 45 | ||
46 | /* SLAB cache for IPVS connections */ | 46 | /* SLAB cache for IPVS connections */ |
47 | static kmem_cache_t *ip_vs_conn_cachep __read_mostly; | 47 | static struct kmem_cache *ip_vs_conn_cachep __read_mostly; |
48 | 48 | ||
49 | /* counter for current IPVS connections */ | 49 | /* counter for current IPVS connections */ |
50 | static atomic_t ip_vs_conn_count = ATOMIC_INIT(0); | 50 | static atomic_t ip_vs_conn_count = ATOMIC_INIT(0); |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 413c2d0a1f3d..71b76ade00e1 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -375,6 +375,13 @@ static int mark_source_chains(struct xt_table_info *newinfo, | |||
375 | && unconditional(&e->arp)) { | 375 | && unconditional(&e->arp)) { |
376 | unsigned int oldpos, size; | 376 | unsigned int oldpos, size; |
377 | 377 | ||
378 | if (t->verdict < -NF_MAX_VERDICT - 1) { | ||
379 | duprintf("mark_source_chains: bad " | ||
380 | "negative verdict (%i)\n", | ||
381 | t->verdict); | ||
382 | return 0; | ||
383 | } | ||
384 | |||
378 | /* Return: backtrack through the last | 385 | /* Return: backtrack through the last |
379 | * big jump. | 386 | * big jump. |
380 | */ | 387 | */ |
@@ -404,6 +411,14 @@ static int mark_source_chains(struct xt_table_info *newinfo, | |||
404 | if (strcmp(t->target.u.user.name, | 411 | if (strcmp(t->target.u.user.name, |
405 | ARPT_STANDARD_TARGET) == 0 | 412 | ARPT_STANDARD_TARGET) == 0 |
406 | && newpos >= 0) { | 413 | && newpos >= 0) { |
414 | if (newpos > newinfo->size - | ||
415 | sizeof(struct arpt_entry)) { | ||
416 | duprintf("mark_source_chains: " | ||
417 | "bad verdict (%i)\n", | ||
418 | newpos); | ||
419 | return 0; | ||
420 | } | ||
421 | |||
407 | /* This a jump; chase it. */ | 422 | /* This a jump; chase it. */ |
408 | duprintf("Jump rule %u -> %u\n", | 423 | duprintf("Jump rule %u -> %u\n", |
409 | pos, newpos); | 424 | pos, newpos); |
@@ -426,8 +441,6 @@ static int mark_source_chains(struct xt_table_info *newinfo, | |||
426 | static inline int standard_check(const struct arpt_entry_target *t, | 441 | static inline int standard_check(const struct arpt_entry_target *t, |
427 | unsigned int max_offset) | 442 | unsigned int max_offset) |
428 | { | 443 | { |
429 | struct arpt_standard_target *targ = (void *)t; | ||
430 | |||
431 | /* Check standard info. */ | 444 | /* Check standard info. */ |
432 | if (t->u.target_size | 445 | if (t->u.target_size |
433 | != ARPT_ALIGN(sizeof(struct arpt_standard_target))) { | 446 | != ARPT_ALIGN(sizeof(struct arpt_standard_target))) { |
@@ -437,18 +450,6 @@ static inline int standard_check(const struct arpt_entry_target *t, | |||
437 | return 0; | 450 | return 0; |
438 | } | 451 | } |
439 | 452 | ||
440 | if (targ->verdict >= 0 | ||
441 | && targ->verdict > max_offset - sizeof(struct arpt_entry)) { | ||
442 | duprintf("arpt_standard_check: bad verdict (%i)\n", | ||
443 | targ->verdict); | ||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | if (targ->verdict < -NF_MAX_VERDICT - 1) { | ||
448 | duprintf("arpt_standard_check: bad negative verdict (%i)\n", | ||
449 | targ->verdict); | ||
450 | return 0; | ||
451 | } | ||
452 | return 1; | 453 | return 1; |
453 | } | 454 | } |
454 | 455 | ||
@@ -627,18 +628,20 @@ static int translate_table(const char *name, | |||
627 | } | 628 | } |
628 | } | 629 | } |
629 | 630 | ||
631 | if (!mark_source_chains(newinfo, valid_hooks, entry0)) { | ||
632 | duprintf("Looping hook\n"); | ||
633 | return -ELOOP; | ||
634 | } | ||
635 | |||
630 | /* Finally, each sanity check must pass */ | 636 | /* Finally, each sanity check must pass */ |
631 | i = 0; | 637 | i = 0; |
632 | ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, | 638 | ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, |
633 | check_entry, name, size, &i); | 639 | check_entry, name, size, &i); |
634 | 640 | ||
635 | if (ret != 0) | 641 | if (ret != 0) { |
636 | goto cleanup; | 642 | ARPT_ENTRY_ITERATE(entry0, newinfo->size, |
637 | 643 | cleanup_entry, &i); | |
638 | ret = -ELOOP; | 644 | return ret; |
639 | if (!mark_source_chains(newinfo, valid_hooks, entry0)) { | ||
640 | duprintf("Looping hook\n"); | ||
641 | goto cleanup; | ||
642 | } | 645 | } |
643 | 646 | ||
644 | /* And one copy for every other CPU */ | 647 | /* And one copy for every other CPU */ |
@@ -647,9 +650,6 @@ static int translate_table(const char *name, | |||
647 | memcpy(newinfo->entries[i], entry0, newinfo->size); | 650 | memcpy(newinfo->entries[i], entry0, newinfo->size); |
648 | } | 651 | } |
649 | 652 | ||
650 | return 0; | ||
651 | cleanup: | ||
652 | ARPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i); | ||
653 | return ret; | 653 | return ret; |
654 | } | 654 | } |
655 | 655 | ||
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index f4b0e68a16d2..8556a4f4f60a 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -65,8 +65,8 @@ static LIST_HEAD(helpers); | |||
65 | unsigned int ip_conntrack_htable_size __read_mostly = 0; | 65 | unsigned int ip_conntrack_htable_size __read_mostly = 0; |
66 | int ip_conntrack_max __read_mostly; | 66 | int ip_conntrack_max __read_mostly; |
67 | struct list_head *ip_conntrack_hash __read_mostly; | 67 | struct list_head *ip_conntrack_hash __read_mostly; |
68 | static kmem_cache_t *ip_conntrack_cachep __read_mostly; | 68 | static struct kmem_cache *ip_conntrack_cachep __read_mostly; |
69 | static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly; | 69 | static struct kmem_cache *ip_conntrack_expect_cachep __read_mostly; |
70 | struct ip_conntrack ip_conntrack_untracked; | 70 | struct ip_conntrack ip_conntrack_untracked; |
71 | unsigned int ip_ct_log_invalid __read_mostly; | 71 | unsigned int ip_ct_log_invalid __read_mostly; |
72 | static LIST_HEAD(unconfirmed); | 72 | static LIST_HEAD(unconfirmed); |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 8a455439b128..0ff2956d35e5 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -401,6 +401,13 @@ mark_source_chains(struct xt_table_info *newinfo, | |||
401 | && unconditional(&e->ip)) { | 401 | && unconditional(&e->ip)) { |
402 | unsigned int oldpos, size; | 402 | unsigned int oldpos, size; |
403 | 403 | ||
404 | if (t->verdict < -NF_MAX_VERDICT - 1) { | ||
405 | duprintf("mark_source_chains: bad " | ||
406 | "negative verdict (%i)\n", | ||
407 | t->verdict); | ||
408 | return 0; | ||
409 | } | ||
410 | |||
404 | /* Return: backtrack through the last | 411 | /* Return: backtrack through the last |
405 | big jump. */ | 412 | big jump. */ |
406 | do { | 413 | do { |
@@ -438,6 +445,13 @@ mark_source_chains(struct xt_table_info *newinfo, | |||
438 | if (strcmp(t->target.u.user.name, | 445 | if (strcmp(t->target.u.user.name, |
439 | IPT_STANDARD_TARGET) == 0 | 446 | IPT_STANDARD_TARGET) == 0 |
440 | && newpos >= 0) { | 447 | && newpos >= 0) { |
448 | if (newpos > newinfo->size - | ||
449 | sizeof(struct ipt_entry)) { | ||
450 | duprintf("mark_source_chains: " | ||
451 | "bad verdict (%i)\n", | ||
452 | newpos); | ||
453 | return 0; | ||
454 | } | ||
441 | /* This a jump; chase it. */ | 455 | /* This a jump; chase it. */ |
442 | duprintf("Jump rule %u -> %u\n", | 456 | duprintf("Jump rule %u -> %u\n", |
443 | pos, newpos); | 457 | pos, newpos); |
@@ -470,27 +484,6 @@ cleanup_match(struct ipt_entry_match *m, unsigned int *i) | |||
470 | } | 484 | } |
471 | 485 | ||
472 | static inline int | 486 | static inline int |
473 | standard_check(const struct ipt_entry_target *t, | ||
474 | unsigned int max_offset) | ||
475 | { | ||
476 | struct ipt_standard_target *targ = (void *)t; | ||
477 | |||
478 | /* Check standard info. */ | ||
479 | if (targ->verdict >= 0 | ||
480 | && targ->verdict > max_offset - sizeof(struct ipt_entry)) { | ||
481 | duprintf("ipt_standard_check: bad verdict (%i)\n", | ||
482 | targ->verdict); | ||
483 | return 0; | ||
484 | } | ||
485 | if (targ->verdict < -NF_MAX_VERDICT - 1) { | ||
486 | duprintf("ipt_standard_check: bad negative verdict (%i)\n", | ||
487 | targ->verdict); | ||
488 | return 0; | ||
489 | } | ||
490 | return 1; | ||
491 | } | ||
492 | |||
493 | static inline int | ||
494 | check_match(struct ipt_entry_match *m, | 487 | check_match(struct ipt_entry_match *m, |
495 | const char *name, | 488 | const char *name, |
496 | const struct ipt_ip *ip, | 489 | const struct ipt_ip *ip, |
@@ -576,12 +569,7 @@ check_entry(struct ipt_entry *e, const char *name, unsigned int size, | |||
576 | if (ret) | 569 | if (ret) |
577 | goto err; | 570 | goto err; |
578 | 571 | ||
579 | if (t->u.kernel.target == &ipt_standard_target) { | 572 | if (t->u.kernel.target->checkentry |
580 | if (!standard_check(t, size)) { | ||
581 | ret = -EINVAL; | ||
582 | goto err; | ||
583 | } | ||
584 | } else if (t->u.kernel.target->checkentry | ||
585 | && !t->u.kernel.target->checkentry(name, e, target, t->data, | 573 | && !t->u.kernel.target->checkentry(name, e, target, t->data, |
586 | e->comefrom)) { | 574 | e->comefrom)) { |
587 | duprintf("ip_tables: check failed for `%s'.\n", | 575 | duprintf("ip_tables: check failed for `%s'.\n", |
@@ -718,17 +706,19 @@ translate_table(const char *name, | |||
718 | } | 706 | } |
719 | } | 707 | } |
720 | 708 | ||
709 | if (!mark_source_chains(newinfo, valid_hooks, entry0)) | ||
710 | return -ELOOP; | ||
711 | |||
721 | /* Finally, each sanity check must pass */ | 712 | /* Finally, each sanity check must pass */ |
722 | i = 0; | 713 | i = 0; |
723 | ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, | 714 | ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, |
724 | check_entry, name, size, &i); | 715 | check_entry, name, size, &i); |
725 | 716 | ||
726 | if (ret != 0) | 717 | if (ret != 0) { |
727 | goto cleanup; | 718 | IPT_ENTRY_ITERATE(entry0, newinfo->size, |
728 | 719 | cleanup_entry, &i); | |
729 | ret = -ELOOP; | 720 | return ret; |
730 | if (!mark_source_chains(newinfo, valid_hooks, entry0)) | 721 | } |
731 | goto cleanup; | ||
732 | 722 | ||
733 | /* And one copy for every other CPU */ | 723 | /* And one copy for every other CPU */ |
734 | for_each_possible_cpu(i) { | 724 | for_each_possible_cpu(i) { |
@@ -736,9 +726,6 @@ translate_table(const char *name, | |||
736 | memcpy(newinfo->entries[i], entry0, newinfo->size); | 726 | memcpy(newinfo->entries[i], entry0, newinfo->size); |
737 | } | 727 | } |
738 | 728 | ||
739 | return 0; | ||
740 | cleanup: | ||
741 | IPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i); | ||
742 | return ret; | 729 | return ret; |
743 | } | 730 | } |
744 | 731 | ||
@@ -1529,25 +1516,8 @@ static inline int compat_copy_match_from_user(struct ipt_entry_match *m, | |||
1529 | void **dstptr, compat_uint_t *size, const char *name, | 1516 | void **dstptr, compat_uint_t *size, const char *name, |
1530 | const struct ipt_ip *ip, unsigned int hookmask) | 1517 | const struct ipt_ip *ip, unsigned int hookmask) |
1531 | { | 1518 | { |
1532 | struct ipt_entry_match *dm; | ||
1533 | struct ipt_match *match; | ||
1534 | int ret; | ||
1535 | |||
1536 | dm = (struct ipt_entry_match *)*dstptr; | ||
1537 | match = m->u.kernel.match; | ||
1538 | xt_compat_match_from_user(m, dstptr, size); | 1519 | xt_compat_match_from_user(m, dstptr, size); |
1539 | 1520 | return 0; | |
1540 | ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm), | ||
1541 | name, hookmask, ip->proto, | ||
1542 | ip->invflags & IPT_INV_PROTO); | ||
1543 | if (!ret && m->u.kernel.match->checkentry | ||
1544 | && !m->u.kernel.match->checkentry(name, ip, match, dm->data, | ||
1545 | hookmask)) { | ||
1546 | duprintf("ip_tables: check failed for `%s'.\n", | ||
1547 | m->u.kernel.match->name); | ||
1548 | ret = -EINVAL; | ||
1549 | } | ||
1550 | return ret; | ||
1551 | } | 1521 | } |
1552 | 1522 | ||
1553 | static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr, | 1523 | static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr, |
@@ -1569,7 +1539,7 @@ static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr, | |||
1569 | ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size, | 1539 | ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size, |
1570 | name, &de->ip, de->comefrom); | 1540 | name, &de->ip, de->comefrom); |
1571 | if (ret) | 1541 | if (ret) |
1572 | goto err; | 1542 | return ret; |
1573 | de->target_offset = e->target_offset - (origsize - *size); | 1543 | de->target_offset = e->target_offset - (origsize - *size); |
1574 | t = ipt_get_target(e); | 1544 | t = ipt_get_target(e); |
1575 | target = t->u.kernel.target; | 1545 | target = t->u.kernel.target; |
@@ -1582,31 +1552,62 @@ static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr, | |||
1582 | if ((unsigned char *)de - base < newinfo->underflow[h]) | 1552 | if ((unsigned char *)de - base < newinfo->underflow[h]) |
1583 | newinfo->underflow[h] -= origsize - *size; | 1553 | newinfo->underflow[h] -= origsize - *size; |
1584 | } | 1554 | } |
1555 | return ret; | ||
1556 | } | ||
1557 | |||
1558 | static inline int compat_check_match(struct ipt_entry_match *m, const char *name, | ||
1559 | const struct ipt_ip *ip, unsigned int hookmask) | ||
1560 | { | ||
1561 | struct ipt_match *match; | ||
1562 | int ret; | ||
1563 | |||
1564 | match = m->u.kernel.match; | ||
1565 | ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m), | ||
1566 | name, hookmask, ip->proto, | ||
1567 | ip->invflags & IPT_INV_PROTO); | ||
1568 | if (!ret && m->u.kernel.match->checkentry | ||
1569 | && !m->u.kernel.match->checkentry(name, ip, match, m->data, | ||
1570 | hookmask)) { | ||
1571 | duprintf("ip_tables: compat: check failed for `%s'.\n", | ||
1572 | m->u.kernel.match->name); | ||
1573 | ret = -EINVAL; | ||
1574 | } | ||
1575 | return ret; | ||
1576 | } | ||
1577 | |||
1578 | static inline int compat_check_target(struct ipt_entry *e, const char *name) | ||
1579 | { | ||
1580 | struct ipt_entry_target *t; | ||
1581 | struct ipt_target *target; | ||
1582 | int ret; | ||
1585 | 1583 | ||
1586 | t = ipt_get_target(de); | 1584 | t = ipt_get_target(e); |
1587 | target = t->u.kernel.target; | 1585 | target = t->u.kernel.target; |
1588 | ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t), | 1586 | ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t), |
1589 | name, e->comefrom, e->ip.proto, | 1587 | name, e->comefrom, e->ip.proto, |
1590 | e->ip.invflags & IPT_INV_PROTO); | 1588 | e->ip.invflags & IPT_INV_PROTO); |
1591 | if (ret) | 1589 | if (!ret && t->u.kernel.target->checkentry |
1592 | goto err; | 1590 | && !t->u.kernel.target->checkentry(name, e, target, |
1593 | 1591 | t->data, e->comefrom)) { | |
1594 | ret = -EINVAL; | ||
1595 | if (t->u.kernel.target == &ipt_standard_target) { | ||
1596 | if (!standard_check(t, *size)) | ||
1597 | goto err; | ||
1598 | } else if (t->u.kernel.target->checkentry | ||
1599 | && !t->u.kernel.target->checkentry(name, de, target, | ||
1600 | t->data, de->comefrom)) { | ||
1601 | duprintf("ip_tables: compat: check failed for `%s'.\n", | 1592 | duprintf("ip_tables: compat: check failed for `%s'.\n", |
1602 | t->u.kernel.target->name); | 1593 | t->u.kernel.target->name); |
1603 | goto err; | 1594 | ret = -EINVAL; |
1604 | } | 1595 | } |
1605 | ret = 0; | ||
1606 | err: | ||
1607 | return ret; | 1596 | return ret; |
1608 | } | 1597 | } |
1609 | 1598 | ||
1599 | static inline int compat_check_entry(struct ipt_entry *e, const char *name) | ||
1600 | { | ||
1601 | int ret; | ||
1602 | |||
1603 | ret = IPT_MATCH_ITERATE(e, compat_check_match, name, &e->ip, | ||
1604 | e->comefrom); | ||
1605 | if (ret) | ||
1606 | return ret; | ||
1607 | |||
1608 | return compat_check_target(e, name); | ||
1609 | } | ||
1610 | |||
1610 | static int | 1611 | static int |
1611 | translate_compat_table(const char *name, | 1612 | translate_compat_table(const char *name, |
1612 | unsigned int valid_hooks, | 1613 | unsigned int valid_hooks, |
@@ -1695,6 +1696,11 @@ translate_compat_table(const char *name, | |||
1695 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) | 1696 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) |
1696 | goto free_newinfo; | 1697 | goto free_newinfo; |
1697 | 1698 | ||
1699 | ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, | ||
1700 | name); | ||
1701 | if (ret) | ||
1702 | goto free_newinfo; | ||
1703 | |||
1698 | /* And one copy for every other CPU */ | 1704 | /* And one copy for every other CPU */ |
1699 | for_each_possible_cpu(i) | 1705 | for_each_possible_cpu(i) |
1700 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) | 1706 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 9f3924c4905e..11c167118e87 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1780,7 +1780,7 @@ static inline int __mkroute_input(struct sk_buff *skb, | |||
1780 | #endif | 1780 | #endif |
1781 | if (in_dev->cnf.no_policy) | 1781 | if (in_dev->cnf.no_policy) |
1782 | rth->u.dst.flags |= DST_NOPOLICY; | 1782 | rth->u.dst.flags |= DST_NOPOLICY; |
1783 | if (in_dev->cnf.no_xfrm) | 1783 | if (out_dev->cnf.no_xfrm) |
1784 | rth->u.dst.flags |= DST_NOXFRM; | 1784 | rth->u.dst.flags |= DST_NOXFRM; |
1785 | rth->fl.fl4_dst = daddr; | 1785 | rth->fl.fl4_dst = daddr; |
1786 | rth->rt_dst = daddr; | 1786 | rth->rt_dst = daddr; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9304034c0c47..c701f6abbfc1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4235,7 +4235,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
4235 | * Change state from SYN-SENT only after copied_seq | 4235 | * Change state from SYN-SENT only after copied_seq |
4236 | * is initialized. */ | 4236 | * is initialized. */ |
4237 | tp->copied_seq = tp->rcv_nxt; | 4237 | tp->copied_seq = tp->rcv_nxt; |
4238 | mb(); | 4238 | smp_mb(); |
4239 | tcp_set_state(sk, TCP_ESTABLISHED); | 4239 | tcp_set_state(sk, TCP_ESTABLISHED); |
4240 | 4240 | ||
4241 | security_inet_conn_established(sk, skb); | 4241 | security_inet_conn_established(sk, skb); |
@@ -4483,7 +4483,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4483 | case TCP_SYN_RECV: | 4483 | case TCP_SYN_RECV: |
4484 | if (acceptable) { | 4484 | if (acceptable) { |
4485 | tp->copied_seq = tp->rcv_nxt; | 4485 | tp->copied_seq = tp->rcv_nxt; |
4486 | mb(); | 4486 | smp_mb(); |
4487 | tcp_set_state(sk, TCP_ESTABLISHED); | 4487 | tcp_set_state(sk, TCP_ESTABLISHED); |
4488 | sk->sk_state_change(sk); | 4488 | sk->sk_state_change(sk); |
4489 | 4489 | ||
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index d4107bb701b5..fb9f69c616f5 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -274,6 +274,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst) | |||
274 | 274 | ||
275 | if (likely(xdst->u.rt.idev)) | 275 | if (likely(xdst->u.rt.idev)) |
276 | in_dev_put(xdst->u.rt.idev); | 276 | in_dev_put(xdst->u.rt.idev); |
277 | if (likely(xdst->u.rt.peer)) | ||
278 | inet_putpeer(xdst->u.rt.peer); | ||
277 | xfrm_dst_destroy(xdst); | 279 | xfrm_dst_destroy(xdst); |
278 | } | 280 | } |
279 | 281 | ||
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 87c8f54872b7..e5cd83b2205d 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -720,10 +720,8 @@ snmp6_mib_free(void *ptr[2]) | |||
720 | { | 720 | { |
721 | if (ptr == NULL) | 721 | if (ptr == NULL) |
722 | return; | 722 | return; |
723 | if (ptr[0]) | 723 | free_percpu(ptr[0]); |
724 | free_percpu(ptr[0]); | 724 | free_percpu(ptr[1]); |
725 | if (ptr[1]) | ||
726 | free_percpu(ptr[1]); | ||
727 | ptr[0] = ptr[1] = NULL; | 725 | ptr[0] = ptr[1] = NULL; |
728 | } | 726 | } |
729 | 727 | ||
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index bf526115e518..96d8310ae9c8 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -50,7 +50,7 @@ | |||
50 | 50 | ||
51 | struct rt6_statistics rt6_stats; | 51 | struct rt6_statistics rt6_stats; |
52 | 52 | ||
53 | static kmem_cache_t * fib6_node_kmem __read_mostly; | 53 | static struct kmem_cache * fib6_node_kmem __read_mostly; |
54 | 54 | ||
55 | enum fib_walk_state_t | 55 | enum fib_walk_state_t |
56 | { | 56 | { |
@@ -150,7 +150,7 @@ static __inline__ struct fib6_node * node_alloc(void) | |||
150 | { | 150 | { |
151 | struct fib6_node *fn; | 151 | struct fib6_node *fn; |
152 | 152 | ||
153 | if ((fn = kmem_cache_alloc(fib6_node_kmem, SLAB_ATOMIC)) != NULL) | 153 | if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL) |
154 | memset(fn, 0, sizeof(struct fib6_node)); | 154 | memset(fn, 0, sizeof(struct fib6_node)); |
155 | 155 | ||
156 | return fn; | 156 | return fn; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index e05ecbb1412d..e9212c7ff5cf 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -624,13 +624,13 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
624 | skb_shinfo(skb)->frag_list = NULL; | 624 | skb_shinfo(skb)->frag_list = NULL; |
625 | /* BUILD HEADER */ | 625 | /* BUILD HEADER */ |
626 | 626 | ||
627 | *prevhdr = NEXTHDR_FRAGMENT; | ||
627 | tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC); | 628 | tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC); |
628 | if (!tmp_hdr) { | 629 | if (!tmp_hdr) { |
629 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); | 630 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); |
630 | return -ENOMEM; | 631 | return -ENOMEM; |
631 | } | 632 | } |
632 | 633 | ||
633 | *prevhdr = NEXTHDR_FRAGMENT; | ||
634 | __skb_pull(skb, hlen); | 634 | __skb_pull(skb, hlen); |
635 | fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); | 635 | fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); |
636 | skb->nh.raw = __skb_push(skb, hlen); | 636 | skb->nh.raw = __skb_push(skb, hlen); |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index f63fb86d7c7b..4eec4b3988b8 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -440,6 +440,13 @@ mark_source_chains(struct xt_table_info *newinfo, | |||
440 | && unconditional(&e->ipv6)) { | 440 | && unconditional(&e->ipv6)) { |
441 | unsigned int oldpos, size; | 441 | unsigned int oldpos, size; |
442 | 442 | ||
443 | if (t->verdict < -NF_MAX_VERDICT - 1) { | ||
444 | duprintf("mark_source_chains: bad " | ||
445 | "negative verdict (%i)\n", | ||
446 | t->verdict); | ||
447 | return 0; | ||
448 | } | ||
449 | |||
443 | /* Return: backtrack through the last | 450 | /* Return: backtrack through the last |
444 | big jump. */ | 451 | big jump. */ |
445 | do { | 452 | do { |
@@ -477,6 +484,13 @@ mark_source_chains(struct xt_table_info *newinfo, | |||
477 | if (strcmp(t->target.u.user.name, | 484 | if (strcmp(t->target.u.user.name, |
478 | IP6T_STANDARD_TARGET) == 0 | 485 | IP6T_STANDARD_TARGET) == 0 |
479 | && newpos >= 0) { | 486 | && newpos >= 0) { |
487 | if (newpos > newinfo->size - | ||
488 | sizeof(struct ip6t_entry)) { | ||
489 | duprintf("mark_source_chains: " | ||
490 | "bad verdict (%i)\n", | ||
491 | newpos); | ||
492 | return 0; | ||
493 | } | ||
480 | /* This a jump; chase it. */ | 494 | /* This a jump; chase it. */ |
481 | duprintf("Jump rule %u -> %u\n", | 495 | duprintf("Jump rule %u -> %u\n", |
482 | pos, newpos); | 496 | pos, newpos); |
@@ -509,27 +523,6 @@ cleanup_match(struct ip6t_entry_match *m, unsigned int *i) | |||
509 | } | 523 | } |
510 | 524 | ||
511 | static inline int | 525 | static inline int |
512 | standard_check(const struct ip6t_entry_target *t, | ||
513 | unsigned int max_offset) | ||
514 | { | ||
515 | struct ip6t_standard_target *targ = (void *)t; | ||
516 | |||
517 | /* Check standard info. */ | ||
518 | if (targ->verdict >= 0 | ||
519 | && targ->verdict > max_offset - sizeof(struct ip6t_entry)) { | ||
520 | duprintf("ip6t_standard_check: bad verdict (%i)\n", | ||
521 | targ->verdict); | ||
522 | return 0; | ||
523 | } | ||
524 | if (targ->verdict < -NF_MAX_VERDICT - 1) { | ||
525 | duprintf("ip6t_standard_check: bad negative verdict (%i)\n", | ||
526 | targ->verdict); | ||
527 | return 0; | ||
528 | } | ||
529 | return 1; | ||
530 | } | ||
531 | |||
532 | static inline int | ||
533 | check_match(struct ip6t_entry_match *m, | 526 | check_match(struct ip6t_entry_match *m, |
534 | const char *name, | 527 | const char *name, |
535 | const struct ip6t_ip6 *ipv6, | 528 | const struct ip6t_ip6 *ipv6, |
@@ -616,12 +609,7 @@ check_entry(struct ip6t_entry *e, const char *name, unsigned int size, | |||
616 | if (ret) | 609 | if (ret) |
617 | goto err; | 610 | goto err; |
618 | 611 | ||
619 | if (t->u.kernel.target == &ip6t_standard_target) { | 612 | if (t->u.kernel.target->checkentry |
620 | if (!standard_check(t, size)) { | ||
621 | ret = -EINVAL; | ||
622 | goto err; | ||
623 | } | ||
624 | } else if (t->u.kernel.target->checkentry | ||
625 | && !t->u.kernel.target->checkentry(name, e, target, t->data, | 613 | && !t->u.kernel.target->checkentry(name, e, target, t->data, |
626 | e->comefrom)) { | 614 | e->comefrom)) { |
627 | duprintf("ip_tables: check failed for `%s'.\n", | 615 | duprintf("ip_tables: check failed for `%s'.\n", |
@@ -758,17 +746,19 @@ translate_table(const char *name, | |||
758 | } | 746 | } |
759 | } | 747 | } |
760 | 748 | ||
749 | if (!mark_source_chains(newinfo, valid_hooks, entry0)) | ||
750 | return -ELOOP; | ||
751 | |||
761 | /* Finally, each sanity check must pass */ | 752 | /* Finally, each sanity check must pass */ |
762 | i = 0; | 753 | i = 0; |
763 | ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, | 754 | ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, |
764 | check_entry, name, size, &i); | 755 | check_entry, name, size, &i); |
765 | 756 | ||
766 | if (ret != 0) | 757 | if (ret != 0) { |
767 | goto cleanup; | 758 | IP6T_ENTRY_ITERATE(entry0, newinfo->size, |
768 | 759 | cleanup_entry, &i); | |
769 | ret = -ELOOP; | 760 | return ret; |
770 | if (!mark_source_chains(newinfo, valid_hooks, entry0)) | 761 | } |
771 | goto cleanup; | ||
772 | 762 | ||
773 | /* And one copy for every other CPU */ | 763 | /* And one copy for every other CPU */ |
774 | for_each_possible_cpu(i) { | 764 | for_each_possible_cpu(i) { |
@@ -777,9 +767,6 @@ translate_table(const char *name, | |||
777 | } | 767 | } |
778 | 768 | ||
779 | return 0; | 769 | return 0; |
780 | cleanup: | ||
781 | IP6T_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i); | ||
782 | return ret; | ||
783 | } | 770 | } |
784 | 771 | ||
785 | /* Gets counters. */ | 772 | /* Gets counters. */ |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index c2e629d6aea4..4ae1b19ada5d 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -854,7 +854,8 @@ back_from_confirm: | |||
854 | } | 854 | } |
855 | done: | 855 | done: |
856 | dst_release(dst); | 856 | dst_release(dst); |
857 | release_sock(sk); | 857 | if (!inet->hdrincl) |
858 | release_sock(sk); | ||
858 | out: | 859 | out: |
859 | fl6_sock_release(flowlabel); | 860 | fl6_sock_release(flowlabel); |
860 | return err<0?err:len; | 861 | return err<0?err:len; |
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 01a5c52a2be3..12e426b9aacd 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c | |||
@@ -50,7 +50,7 @@ static u32 xfrm6_tunnel_spi; | |||
50 | #define XFRM6_TUNNEL_SPI_MIN 1 | 50 | #define XFRM6_TUNNEL_SPI_MIN 1 |
51 | #define XFRM6_TUNNEL_SPI_MAX 0xffffffff | 51 | #define XFRM6_TUNNEL_SPI_MAX 0xffffffff |
52 | 52 | ||
53 | static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly; | 53 | static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; |
54 | 54 | ||
55 | #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 | 55 | #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 |
56 | #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 | 56 | #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 |
@@ -180,7 +180,7 @@ try_next_2:; | |||
180 | spi = 0; | 180 | spi = 0; |
181 | goto out; | 181 | goto out; |
182 | alloc_spi: | 182 | alloc_spi: |
183 | x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC); | 183 | x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); |
184 | if (!x6spi) | 184 | if (!x6spi) |
185 | goto out; | 185 | goto out; |
186 | 186 | ||
diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 252f11012566..03504f3e4990 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c | |||
@@ -1100,7 +1100,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel, | |||
1100 | return -ENOMEM; | 1100 | return -ENOMEM; |
1101 | 1101 | ||
1102 | /* Reserve space for MUX_CONTROL and LAP header */ | 1102 | /* Reserve space for MUX_CONTROL and LAP header */ |
1103 | skb_reserve(tx_skb, TTP_MAX_HEADER); | 1103 | skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER); |
1104 | } else { | 1104 | } else { |
1105 | tx_skb = userdata; | 1105 | tx_skb = userdata; |
1106 | /* | 1106 | /* |
@@ -1349,7 +1349,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size, | |||
1349 | return -ENOMEM; | 1349 | return -ENOMEM; |
1350 | 1350 | ||
1351 | /* Reserve space for MUX_CONTROL and LAP header */ | 1351 | /* Reserve space for MUX_CONTROL and LAP header */ |
1352 | skb_reserve(tx_skb, TTP_MAX_HEADER); | 1352 | skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER); |
1353 | } else { | 1353 | } else { |
1354 | tx_skb = userdata; | 1354 | tx_skb = userdata; |
1355 | /* | 1355 | /* |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 0e1dbfbb9b10..5dd5094659a1 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/proc_fs.h> | 27 | #include <linux/proc_fs.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <net/xfrm.h> | 29 | #include <net/xfrm.h> |
30 | #include <linux/audit.h> | ||
30 | 31 | ||
31 | #include <net/sock.h> | 32 | #include <net/sock.h> |
32 | 33 | ||
@@ -1420,6 +1421,9 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, | |||
1420 | else | 1421 | else |
1421 | err = xfrm_state_update(x); | 1422 | err = xfrm_state_update(x); |
1422 | 1423 | ||
1424 | xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, | ||
1425 | AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x); | ||
1426 | |||
1423 | if (err < 0) { | 1427 | if (err < 0) { |
1424 | x->km.state = XFRM_STATE_DEAD; | 1428 | x->km.state = XFRM_STATE_DEAD; |
1425 | __xfrm_state_put(x); | 1429 | __xfrm_state_put(x); |
@@ -1460,8 +1464,12 @@ static int pfkey_delete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h | |||
1460 | err = -EPERM; | 1464 | err = -EPERM; |
1461 | goto out; | 1465 | goto out; |
1462 | } | 1466 | } |
1463 | 1467 | ||
1464 | err = xfrm_state_delete(x); | 1468 | err = xfrm_state_delete(x); |
1469 | |||
1470 | xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, | ||
1471 | AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x); | ||
1472 | |||
1465 | if (err < 0) | 1473 | if (err < 0) |
1466 | goto out; | 1474 | goto out; |
1467 | 1475 | ||
@@ -1637,12 +1645,15 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd | |||
1637 | { | 1645 | { |
1638 | unsigned proto; | 1646 | unsigned proto; |
1639 | struct km_event c; | 1647 | struct km_event c; |
1648 | struct xfrm_audit audit_info; | ||
1640 | 1649 | ||
1641 | proto = pfkey_satype2proto(hdr->sadb_msg_satype); | 1650 | proto = pfkey_satype2proto(hdr->sadb_msg_satype); |
1642 | if (proto == 0) | 1651 | if (proto == 0) |
1643 | return -EINVAL; | 1652 | return -EINVAL; |
1644 | 1653 | ||
1645 | xfrm_state_flush(proto); | 1654 | audit_info.loginuid = audit_get_loginuid(current->audit_context); |
1655 | audit_info.secid = 0; | ||
1656 | xfrm_state_flush(proto, &audit_info); | ||
1646 | c.data.proto = proto; | 1657 | c.data.proto = proto; |
1647 | c.seq = hdr->sadb_msg_seq; | 1658 | c.seq = hdr->sadb_msg_seq; |
1648 | c.pid = hdr->sadb_msg_pid; | 1659 | c.pid = hdr->sadb_msg_pid; |
@@ -2205,6 +2216,9 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h | |||
2205 | err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp, | 2216 | err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp, |
2206 | hdr->sadb_msg_type != SADB_X_SPDUPDATE); | 2217 | hdr->sadb_msg_type != SADB_X_SPDUPDATE); |
2207 | 2218 | ||
2219 | xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, | ||
2220 | AUDIT_MAC_IPSEC_ADDSPD, err ? 0 : 1, xp, NULL); | ||
2221 | |||
2208 | if (err) | 2222 | if (err) |
2209 | goto out; | 2223 | goto out; |
2210 | 2224 | ||
@@ -2282,6 +2296,10 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg | |||
2282 | xp = xfrm_policy_bysel_ctx(XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir-1, | 2296 | xp = xfrm_policy_bysel_ctx(XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir-1, |
2283 | &sel, tmp.security, 1); | 2297 | &sel, tmp.security, 1); |
2284 | security_xfrm_policy_free(&tmp); | 2298 | security_xfrm_policy_free(&tmp); |
2299 | |||
2300 | xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, | ||
2301 | AUDIT_MAC_IPSEC_DELSPD, (xp) ? 1 : 0, xp, NULL); | ||
2302 | |||
2285 | if (xp == NULL) | 2303 | if (xp == NULL) |
2286 | return -ENOENT; | 2304 | return -ENOENT; |
2287 | 2305 | ||
@@ -2416,8 +2434,11 @@ static int key_notify_policy_flush(struct km_event *c) | |||
2416 | static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) | 2434 | static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) |
2417 | { | 2435 | { |
2418 | struct km_event c; | 2436 | struct km_event c; |
2437 | struct xfrm_audit audit_info; | ||
2419 | 2438 | ||
2420 | xfrm_policy_flush(XFRM_POLICY_TYPE_MAIN); | 2439 | audit_info.loginuid = audit_get_loginuid(current->audit_context); |
2440 | audit_info.secid = 0; | ||
2441 | xfrm_policy_flush(XFRM_POLICY_TYPE_MAIN, &audit_info); | ||
2421 | c.data.type = XFRM_POLICY_TYPE_MAIN; | 2442 | c.data.type = XFRM_POLICY_TYPE_MAIN; |
2422 | c.event = XFRM_MSG_FLUSHPOLICY; | 2443 | c.event = XFRM_MSG_FLUSHPOLICY; |
2423 | c.pid = hdr->sadb_msg_pid; | 2444 | c.pid = hdr->sadb_msg_pid; |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index eaa0f8a1adb6..9b02ec4012fb 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -108,7 +108,7 @@ static struct { | |||
108 | size_t size; | 108 | size_t size; |
109 | 109 | ||
110 | /* slab cache pointer */ | 110 | /* slab cache pointer */ |
111 | kmem_cache_t *cachep; | 111 | struct kmem_cache *cachep; |
112 | 112 | ||
113 | /* allocated slab cache + modules which uses this slab cache */ | 113 | /* allocated slab cache + modules which uses this slab cache */ |
114 | int use; | 114 | int use; |
@@ -147,7 +147,7 @@ int nf_conntrack_register_cache(u_int32_t features, const char *name, | |||
147 | { | 147 | { |
148 | int ret = 0; | 148 | int ret = 0; |
149 | char *cache_name; | 149 | char *cache_name; |
150 | kmem_cache_t *cachep; | 150 | struct kmem_cache *cachep; |
151 | 151 | ||
152 | DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n", | 152 | DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n", |
153 | features, name, size); | 153 | features, name, size); |
@@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_register_cache); | |||
226 | /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ | 226 | /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ |
227 | void nf_conntrack_unregister_cache(u_int32_t features) | 227 | void nf_conntrack_unregister_cache(u_int32_t features) |
228 | { | 228 | { |
229 | kmem_cache_t *cachep; | 229 | struct kmem_cache *cachep; |
230 | char *name; | 230 | char *name; |
231 | 231 | ||
232 | /* | 232 | /* |
@@ -1093,7 +1093,7 @@ static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size) | |||
1093 | get_order(sizeof(struct list_head) * size)); | 1093 | get_order(sizeof(struct list_head) * size)); |
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | void nf_conntrack_flush() | 1096 | void nf_conntrack_flush(void) |
1097 | { | 1097 | { |
1098 | nf_ct_iterate_cleanup(kill_all, NULL); | 1098 | nf_ct_iterate_cleanup(kill_all, NULL); |
1099 | } | 1099 | } |
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 588d37937046..9cbf926cdd14 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c | |||
@@ -29,7 +29,7 @@ | |||
29 | LIST_HEAD(nf_conntrack_expect_list); | 29 | LIST_HEAD(nf_conntrack_expect_list); |
30 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_list); | 30 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_list); |
31 | 31 | ||
32 | kmem_cache_t *nf_conntrack_expect_cachep __read_mostly; | 32 | struct kmem_cache *nf_conntrack_expect_cachep __read_mostly; |
33 | static unsigned int nf_conntrack_expect_next_id; | 33 | static unsigned int nf_conntrack_expect_next_id; |
34 | 34 | ||
35 | /* nf_conntrack_expect helper functions */ | 35 | /* nf_conntrack_expect helper functions */ |
@@ -91,25 +91,28 @@ EXPORT_SYMBOL_GPL(nf_conntrack_expect_find_get); | |||
91 | struct nf_conntrack_expect * | 91 | struct nf_conntrack_expect * |
92 | find_expectation(const struct nf_conntrack_tuple *tuple) | 92 | find_expectation(const struct nf_conntrack_tuple *tuple) |
93 | { | 93 | { |
94 | struct nf_conntrack_expect *i; | 94 | struct nf_conntrack_expect *exp; |
95 | |||
96 | exp = __nf_conntrack_expect_find(tuple); | ||
97 | if (!exp) | ||
98 | return NULL; | ||
95 | 99 | ||
96 | list_for_each_entry(i, &nf_conntrack_expect_list, list) { | ||
97 | /* If master is not in hash table yet (ie. packet hasn't left | 100 | /* If master is not in hash table yet (ie. packet hasn't left |
98 | this machine yet), how can other end know about expected? | 101 | this machine yet), how can other end know about expected? |
99 | Hence these are not the droids you are looking for (if | 102 | Hence these are not the droids you are looking for (if |
100 | master ct never got confirmed, we'd hold a reference to it | 103 | master ct never got confirmed, we'd hold a reference to it |
101 | and weird things would happen to future packets). */ | 104 | and weird things would happen to future packets). */ |
102 | if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) | 105 | if (!nf_ct_is_confirmed(exp->master)) |
103 | && nf_ct_is_confirmed(i->master)) { | 106 | return NULL; |
104 | if (i->flags & NF_CT_EXPECT_PERMANENT) { | 107 | |
105 | atomic_inc(&i->use); | 108 | if (exp->flags & NF_CT_EXPECT_PERMANENT) { |
106 | return i; | 109 | atomic_inc(&exp->use); |
107 | } else if (del_timer(&i->timeout)) { | 110 | return exp; |
108 | nf_ct_unlink_expect(i); | 111 | } else if (del_timer(&exp->timeout)) { |
109 | return i; | 112 | nf_ct_unlink_expect(exp); |
110 | } | 113 | return exp; |
111 | } | ||
112 | } | 114 | } |
115 | |||
113 | return NULL; | 116 | return NULL; |
114 | } | 117 | } |
115 | 118 | ||
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index a98de0b54d65..a5a6e192ac2d 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -92,7 +92,7 @@ struct xt_hashlimit_htable { | |||
92 | static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ | 92 | static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ |
93 | static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */ | 93 | static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */ |
94 | static HLIST_HEAD(hashlimit_htables); | 94 | static HLIST_HEAD(hashlimit_htables); |
95 | static kmem_cache_t *hashlimit_cachep __read_mostly; | 95 | static struct kmem_cache *hashlimit_cachep __read_mostly; |
96 | 96 | ||
97 | static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) | 97 | static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) |
98 | { | 98 | { |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index b9b03747c1f3..548e4e6e698f 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -143,6 +143,13 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops) | |||
143 | goto errout; | 143 | goto errout; |
144 | } | 144 | } |
145 | 145 | ||
146 | if (ops->dumpit) | ||
147 | ops->flags |= GENL_CMD_CAP_DUMP; | ||
148 | if (ops->doit) | ||
149 | ops->flags |= GENL_CMD_CAP_DO; | ||
150 | if (ops->policy) | ||
151 | ops->flags |= GENL_CMD_CAP_HASPOL; | ||
152 | |||
146 | genl_lock(); | 153 | genl_lock(); |
147 | list_add_tail(&ops->ops_list, &family->ops_list); | 154 | list_add_tail(&ops->ops_list, &family->ops_list); |
148 | genl_unlock(); | 155 | genl_unlock(); |
@@ -387,7 +394,7 @@ static void genl_rcv(struct sock *sk, int len) | |||
387 | static struct genl_family genl_ctrl = { | 394 | static struct genl_family genl_ctrl = { |
388 | .id = GENL_ID_CTRL, | 395 | .id = GENL_ID_CTRL, |
389 | .name = "nlctrl", | 396 | .name = "nlctrl", |
390 | .version = 0x1, | 397 | .version = 0x2, |
391 | .maxattr = CTRL_ATTR_MAX, | 398 | .maxattr = CTRL_ATTR_MAX, |
392 | }; | 399 | }; |
393 | 400 | ||
@@ -425,15 +432,6 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, | |||
425 | NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd); | 432 | NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd); |
426 | NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags); | 433 | NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags); |
427 | 434 | ||
428 | if (ops->policy) | ||
429 | NLA_PUT_FLAG(skb, CTRL_ATTR_OP_POLICY); | ||
430 | |||
431 | if (ops->doit) | ||
432 | NLA_PUT_FLAG(skb, CTRL_ATTR_OP_DOIT); | ||
433 | |||
434 | if (ops->dumpit) | ||
435 | NLA_PUT_FLAG(skb, CTRL_ATTR_OP_DUMPIT); | ||
436 | |||
437 | nla_nest_end(skb, nest); | 435 | nla_nest_end(skb, nest); |
438 | } | 436 | } |
439 | 437 | ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 08e68b67bbf6..da73e8a8c18d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -660,7 +660,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe | |||
660 | sll->sll_ifindex = dev->ifindex; | 660 | sll->sll_ifindex = dev->ifindex; |
661 | 661 | ||
662 | h->tp_status = status; | 662 | h->tp_status = status; |
663 | mb(); | 663 | smp_mb(); |
664 | 664 | ||
665 | { | 665 | { |
666 | struct page *p_start, *p_end; | 666 | struct page *p_start, *p_end; |
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c index dada34a77b21..49effd92144e 100644 --- a/net/rxrpc/krxiod.c +++ b/net/rxrpc/krxiod.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/completion.h> | 13 | #include <linux/completion.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/freezer.h> | ||
16 | #include <rxrpc/krxiod.h> | 17 | #include <rxrpc/krxiod.h> |
17 | #include <rxrpc/transport.h> | 18 | #include <rxrpc/transport.h> |
18 | #include <rxrpc/peer.h> | 19 | #include <rxrpc/peer.h> |
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c index cea4eb5e2497..3ab0f77409f4 100644 --- a/net/rxrpc/krxsecd.c +++ b/net/rxrpc/krxsecd.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <rxrpc/call.h> | 27 | #include <rxrpc/call.h> |
28 | #include <linux/udp.h> | 28 | #include <linux/udp.h> |
29 | #include <linux/ip.h> | 29 | #include <linux/ip.h> |
30 | #include <linux/freezer.h> | ||
30 | #include <net/sock.h> | 31 | #include <net/sock.h> |
31 | #include "internal.h" | 32 | #include "internal.h" |
32 | 33 | ||
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c index 3e7466900bd4..9a9b6132dba4 100644 --- a/net/rxrpc/krxtimod.c +++ b/net/rxrpc/krxtimod.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/completion.h> | 15 | #include <linux/completion.h> |
16 | #include <linux/freezer.h> | ||
16 | #include <rxrpc/rxrpc.h> | 17 | #include <rxrpc/rxrpc.h> |
17 | #include <rxrpc/krxtimod.h> | 18 | #include <rxrpc/krxtimod.h> |
18 | #include <asm/errno.h> | 19 | #include <asm/errno.h> |
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index f59a2c4aa039..c797d6ada7de 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c | |||
@@ -101,9 +101,10 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
101 | struct fw_head *head = (struct fw_head*)tp->root; | 101 | struct fw_head *head = (struct fw_head*)tp->root; |
102 | struct fw_filter *f; | 102 | struct fw_filter *f; |
103 | int r; | 103 | int r; |
104 | u32 id = skb->mark & head->mask; | 104 | u32 id = skb->mark; |
105 | 105 | ||
106 | if (head != NULL) { | 106 | if (head != NULL) { |
107 | id &= head->mask; | ||
107 | for (f=head->ht[fw_hash(id)]; f; f=f->next) { | 108 | for (f=head->ht[fw_hash(id)]; f; f=f->next) { |
108 | if (f->id == id) { | 109 | if (f->id == id) { |
109 | *res = f->res; | 110 | *res = f->res; |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 11f3b549f4a4..f2ba8615895b 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -79,8 +79,8 @@ static struct sctp_pf *sctp_pf_inet_specific; | |||
79 | static struct sctp_af *sctp_af_v4_specific; | 79 | static struct sctp_af *sctp_af_v4_specific; |
80 | static struct sctp_af *sctp_af_v6_specific; | 80 | static struct sctp_af *sctp_af_v6_specific; |
81 | 81 | ||
82 | kmem_cache_t *sctp_chunk_cachep __read_mostly; | 82 | struct kmem_cache *sctp_chunk_cachep __read_mostly; |
83 | kmem_cache_t *sctp_bucket_cachep __read_mostly; | 83 | struct kmem_cache *sctp_bucket_cachep __read_mostly; |
84 | 84 | ||
85 | /* Return the address of the control sock. */ | 85 | /* Return the address of the control sock. */ |
86 | struct sock *sctp_get_ctl_sock(void) | 86 | struct sock *sctp_get_ctl_sock(void) |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 04954e5f6846..30927d3a597f 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -65,7 +65,7 @@ | |||
65 | #include <net/sctp/sctp.h> | 65 | #include <net/sctp/sctp.h> |
66 | #include <net/sctp/sm.h> | 66 | #include <net/sctp/sm.h> |
67 | 67 | ||
68 | extern kmem_cache_t *sctp_chunk_cachep; | 68 | extern struct kmem_cache *sctp_chunk_cachep; |
69 | 69 | ||
70 | SCTP_STATIC | 70 | SCTP_STATIC |
71 | struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, | 71 | struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, |
@@ -979,7 +979,7 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, | |||
979 | { | 979 | { |
980 | struct sctp_chunk *retval; | 980 | struct sctp_chunk *retval; |
981 | 981 | ||
982 | retval = kmem_cache_alloc(sctp_chunk_cachep, SLAB_ATOMIC); | 982 | retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC); |
983 | 983 | ||
984 | if (!retval) | 984 | if (!retval) |
985 | goto nodata; | 985 | goto nodata; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 02b27145b279..1e8132b8c4d9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -107,7 +107,7 @@ static void sctp_sock_migrate(struct sock *, struct sock *, | |||
107 | struct sctp_association *, sctp_socket_type_t); | 107 | struct sctp_association *, sctp_socket_type_t); |
108 | static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; | 108 | static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; |
109 | 109 | ||
110 | extern kmem_cache_t *sctp_bucket_cachep; | 110 | extern struct kmem_cache *sctp_bucket_cachep; |
111 | 111 | ||
112 | /* Get the sndbuf space available at the time on the association. */ | 112 | /* Get the sndbuf space available at the time on the association. */ |
113 | static inline int sctp_wspace(struct sctp_association *asoc) | 113 | static inline int sctp_wspace(struct sctp_association *asoc) |
@@ -4989,7 +4989,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( | |||
4989 | { | 4989 | { |
4990 | struct sctp_bind_bucket *pp; | 4990 | struct sctp_bind_bucket *pp; |
4991 | 4991 | ||
4992 | pp = kmem_cache_alloc(sctp_bucket_cachep, SLAB_ATOMIC); | 4992 | pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); |
4993 | SCTP_DBG_OBJCNT_INC(bind_bucket); | 4993 | SCTP_DBG_OBJCNT_INC(bind_bucket); |
4994 | if (pp) { | 4994 | if (pp) { |
4995 | pp->port = snum; | 4995 | pp->port = snum; |
diff --git a/net/socket.c b/net/socket.c index e8db54702a69..29ea1de43ecb 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -230,13 +230,13 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, | |||
230 | 230 | ||
231 | #define SOCKFS_MAGIC 0x534F434B | 231 | #define SOCKFS_MAGIC 0x534F434B |
232 | 232 | ||
233 | static kmem_cache_t *sock_inode_cachep __read_mostly; | 233 | static struct kmem_cache *sock_inode_cachep __read_mostly; |
234 | 234 | ||
235 | static struct inode *sock_alloc_inode(struct super_block *sb) | 235 | static struct inode *sock_alloc_inode(struct super_block *sb) |
236 | { | 236 | { |
237 | struct socket_alloc *ei; | 237 | struct socket_alloc *ei; |
238 | 238 | ||
239 | ei = kmem_cache_alloc(sock_inode_cachep, SLAB_KERNEL); | 239 | ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); |
240 | if (!ei) | 240 | if (!ei) |
241 | return NULL; | 241 | return NULL; |
242 | init_waitqueue_head(&ei->socket.wait); | 242 | init_waitqueue_head(&ei->socket.wait); |
@@ -257,7 +257,7 @@ static void sock_destroy_inode(struct inode *inode) | |||
257 | container_of(inode, struct socket_alloc, vfs_inode)); | 257 | container_of(inode, struct socket_alloc, vfs_inode)); |
258 | } | 258 | } |
259 | 259 | ||
260 | static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) | 260 | static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) |
261 | { | 261 | { |
262 | struct socket_alloc *ei = (struct socket_alloc *)foo; | 262 | struct socket_alloc *ei = (struct socket_alloc *)foo; |
263 | 263 | ||
@@ -305,7 +305,14 @@ static struct file_system_type sock_fs_type = { | |||
305 | 305 | ||
306 | static int sockfs_delete_dentry(struct dentry *dentry) | 306 | static int sockfs_delete_dentry(struct dentry *dentry) |
307 | { | 307 | { |
308 | return 1; | 308 | /* |
309 | * At creation time, we pretended this dentry was hashed | ||
310 | * (by clearing DCACHE_UNHASHED bit in d_flags) | ||
311 | * At delete time, we restore the truth : not hashed. | ||
312 | * (so that dput() can proceed correctly) | ||
313 | */ | ||
314 | dentry->d_flags |= DCACHE_UNHASHED; | ||
315 | return 0; | ||
309 | } | 316 | } |
310 | static struct dentry_operations sockfs_dentry_operations = { | 317 | static struct dentry_operations sockfs_dentry_operations = { |
311 | .d_delete = sockfs_delete_dentry, | 318 | .d_delete = sockfs_delete_dentry, |
@@ -353,14 +360,20 @@ static int sock_attach_fd(struct socket *sock, struct file *file) | |||
353 | 360 | ||
354 | this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino); | 361 | this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino); |
355 | this.name = name; | 362 | this.name = name; |
356 | this.hash = SOCK_INODE(sock)->i_ino; | 363 | this.hash = 0; |
357 | 364 | ||
358 | file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this); | 365 | file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this); |
359 | if (unlikely(!file->f_dentry)) | 366 | if (unlikely(!file->f_dentry)) |
360 | return -ENOMEM; | 367 | return -ENOMEM; |
361 | 368 | ||
362 | file->f_dentry->d_op = &sockfs_dentry_operations; | 369 | file->f_dentry->d_op = &sockfs_dentry_operations; |
363 | d_add(file->f_dentry, SOCK_INODE(sock)); | 370 | /* |
371 | * We dont want to push this dentry into global dentry hash table. | ||
372 | * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED | ||
373 | * This permits a working /proc/$pid/fd/XXX on sockets | ||
374 | */ | ||
375 | file->f_dentry->d_flags &= ~DCACHE_UNHASHED; | ||
376 | d_instantiate(file->f_dentry, SOCK_INODE(sock)); | ||
364 | file->f_vfsmnt = mntget(sock_mnt); | 377 | file->f_vfsmnt = mntget(sock_mnt); |
365 | file->f_mapping = file->f_dentry->d_inode->i_mapping; | 378 | file->f_mapping = file->f_dentry->d_inode->i_mapping; |
366 | 379 | ||
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 49dba5febbbd..19703aa9659e 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -33,7 +33,7 @@ static int rpc_mount_count; | |||
33 | static struct file_system_type rpc_pipe_fs_type; | 33 | static struct file_system_type rpc_pipe_fs_type; |
34 | 34 | ||
35 | 35 | ||
36 | static kmem_cache_t *rpc_inode_cachep __read_mostly; | 36 | static struct kmem_cache *rpc_inode_cachep __read_mostly; |
37 | 37 | ||
38 | #define RPC_UPCALL_TIMEOUT (30*HZ) | 38 | #define RPC_UPCALL_TIMEOUT (30*HZ) |
39 | 39 | ||
@@ -143,7 +143,7 @@ static struct inode * | |||
143 | rpc_alloc_inode(struct super_block *sb) | 143 | rpc_alloc_inode(struct super_block *sb) |
144 | { | 144 | { |
145 | struct rpc_inode *rpci; | 145 | struct rpc_inode *rpci; |
146 | rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL); | 146 | rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL); |
147 | if (!rpci) | 147 | if (!rpci) |
148 | return NULL; | 148 | return NULL; |
149 | return &rpci->vfs_inode; | 149 | return &rpci->vfs_inode; |
@@ -824,7 +824,7 @@ static struct file_system_type rpc_pipe_fs_type = { | |||
824 | }; | 824 | }; |
825 | 825 | ||
826 | static void | 826 | static void |
827 | init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | 827 | init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) |
828 | { | 828 | { |
829 | struct rpc_inode *rpci = (struct rpc_inode *) foo; | 829 | struct rpc_inode *rpci = (struct rpc_inode *) foo; |
830 | 830 | ||
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 18a33d327012..79bc4cdf5d48 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -34,8 +34,8 @@ static int rpc_task_id; | |||
34 | #define RPC_BUFFER_MAXSIZE (2048) | 34 | #define RPC_BUFFER_MAXSIZE (2048) |
35 | #define RPC_BUFFER_POOLSIZE (8) | 35 | #define RPC_BUFFER_POOLSIZE (8) |
36 | #define RPC_TASK_POOLSIZE (8) | 36 | #define RPC_TASK_POOLSIZE (8) |
37 | static kmem_cache_t *rpc_task_slabp __read_mostly; | 37 | static struct kmem_cache *rpc_task_slabp __read_mostly; |
38 | static kmem_cache_t *rpc_buffer_slabp __read_mostly; | 38 | static struct kmem_cache *rpc_buffer_slabp __read_mostly; |
39 | static mempool_t *rpc_task_mempool __read_mostly; | 39 | static mempool_t *rpc_task_mempool __read_mostly; |
40 | static mempool_t *rpc_buffer_mempool __read_mostly; | 40 | static mempool_t *rpc_buffer_mempool __read_mostly; |
41 | 41 | ||
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index ee9bb1522d5e..c7bb5f7f21a5 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c | |||
@@ -119,7 +119,8 @@ EXPORT_SYMBOL(svc_auth_unregister); | |||
119 | #define DN_HASHMASK (DN_HASHMAX-1) | 119 | #define DN_HASHMASK (DN_HASHMAX-1) |
120 | 120 | ||
121 | static struct hlist_head auth_domain_table[DN_HASHMAX]; | 121 | static struct hlist_head auth_domain_table[DN_HASHMAX]; |
122 | static spinlock_t auth_domain_lock = SPIN_LOCK_UNLOCKED; | 122 | static spinlock_t auth_domain_lock = |
123 | __SPIN_LOCK_UNLOCKED(auth_domain_lock); | ||
123 | 124 | ||
124 | void auth_domain_put(struct auth_domain *dom) | 125 | void auth_domain_put(struct auth_domain *dom) |
125 | { | 126 | { |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 64ca1f61dd94..99f54fb6d669 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/netdevice.h> | 32 | #include <linux/netdevice.h> |
33 | #include <linux/skbuff.h> | 33 | #include <linux/skbuff.h> |
34 | #include <linux/file.h> | 34 | #include <linux/file.h> |
35 | #include <linux/freezer.h> | ||
35 | #include <net/sock.h> | 36 | #include <net/sock.h> |
36 | #include <net/checksum.h> | 37 | #include <net/checksum.h> |
37 | #include <net/ip.h> | 38 | #include <net/ip.h> |
@@ -84,6 +85,35 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req); | |||
84 | */ | 85 | */ |
85 | static int svc_conn_age_period = 6*60; | 86 | static int svc_conn_age_period = 6*60; |
86 | 87 | ||
88 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
89 | static struct lock_class_key svc_key[2]; | ||
90 | static struct lock_class_key svc_slock_key[2]; | ||
91 | |||
92 | static inline void svc_reclassify_socket(struct socket *sock) | ||
93 | { | ||
94 | struct sock *sk = sock->sk; | ||
95 | BUG_ON(sk->sk_lock.owner != NULL); | ||
96 | switch (sk->sk_family) { | ||
97 | case AF_INET: | ||
98 | sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", | ||
99 | &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]); | ||
100 | break; | ||
101 | |||
102 | case AF_INET6: | ||
103 | sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", | ||
104 | &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]); | ||
105 | break; | ||
106 | |||
107 | default: | ||
108 | BUG(); | ||
109 | } | ||
110 | } | ||
111 | #else | ||
112 | static inline void svc_reclassify_socket(struct socket *sock) | ||
113 | { | ||
114 | } | ||
115 | #endif | ||
116 | |||
87 | /* | 117 | /* |
88 | * Queue up an idle server thread. Must have pool->sp_lock held. | 118 | * Queue up an idle server thread. Must have pool->sp_lock held. |
89 | * Note: this is really a stack rather than a queue, so that we only | 119 | * Note: this is really a stack rather than a queue, so that we only |
@@ -1556,6 +1586,8 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin) | |||
1556 | if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) | 1586 | if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) |
1557 | return error; | 1587 | return error; |
1558 | 1588 | ||
1589 | svc_reclassify_socket(sock); | ||
1590 | |||
1559 | if (type == SOCK_STREAM) | 1591 | if (type == SOCK_STREAM) |
1560 | sock->sk->sk_reuse = 1; /* allow address reuse */ | 1592 | sock->sk->sk_reuse = 1; /* allow address reuse */ |
1561 | error = kernel_bind(sock, (struct sockaddr *) sin, | 1593 | error = kernel_bind(sock, (struct sockaddr *) sin, |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 3bb232eb5d90..49cabffd7fdb 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -1173,6 +1173,35 @@ static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) | |||
1173 | return err; | 1173 | return err; |
1174 | } | 1174 | } |
1175 | 1175 | ||
1176 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
1177 | static struct lock_class_key xs_key[2]; | ||
1178 | static struct lock_class_key xs_slock_key[2]; | ||
1179 | |||
1180 | static inline void xs_reclassify_socket(struct socket *sock) | ||
1181 | { | ||
1182 | struct sock *sk = sock->sk; | ||
1183 | BUG_ON(sk->sk_lock.owner != NULL); | ||
1184 | switch (sk->sk_family) { | ||
1185 | case AF_INET: | ||
1186 | sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS", | ||
1187 | &xs_slock_key[0], "sk_lock-AF_INET-NFS", &xs_key[0]); | ||
1188 | break; | ||
1189 | |||
1190 | case AF_INET6: | ||
1191 | sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFS", | ||
1192 | &xs_slock_key[1], "sk_lock-AF_INET6-NFS", &xs_key[1]); | ||
1193 | break; | ||
1194 | |||
1195 | default: | ||
1196 | BUG(); | ||
1197 | } | ||
1198 | } | ||
1199 | #else | ||
1200 | static inline void xs_reclassify_socket(struct socket *sock) | ||
1201 | { | ||
1202 | } | ||
1203 | #endif | ||
1204 | |||
1176 | /** | 1205 | /** |
1177 | * xs_udp_connect_worker - set up a UDP socket | 1206 | * xs_udp_connect_worker - set up a UDP socket |
1178 | * @work: RPC transport to connect | 1207 | * @work: RPC transport to connect |
@@ -1197,6 +1226,7 @@ static void xs_udp_connect_worker(struct work_struct *work) | |||
1197 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); | 1226 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); |
1198 | goto out; | 1227 | goto out; |
1199 | } | 1228 | } |
1229 | xs_reclassify_socket(sock); | ||
1200 | 1230 | ||
1201 | if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { | 1231 | if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { |
1202 | sock_release(sock); | 1232 | sock_release(sock); |
@@ -1282,6 +1312,7 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1282 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); | 1312 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); |
1283 | goto out; | 1313 | goto out; |
1284 | } | 1314 | } |
1315 | xs_reclassify_socket(sock); | ||
1285 | 1316 | ||
1286 | if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { | 1317 | if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { |
1287 | sock_release(sock); | 1318 | sock_release(sock); |
diff --git a/net/tipc/handler.c b/net/tipc/handler.c index ae6ddf00a1aa..eb80778d6d9c 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c | |||
@@ -42,7 +42,7 @@ struct queue_item { | |||
42 | unsigned long data; | 42 | unsigned long data; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | static kmem_cache_t *tipc_queue_item_cache; | 45 | static struct kmem_cache *tipc_queue_item_cache; |
46 | static struct list_head signal_queue_head; | 46 | static struct list_head signal_queue_head; |
47 | static DEFINE_SPINLOCK(qitem_lock); | 47 | static DEFINE_SPINLOCK(qitem_lock); |
48 | static int handler_enabled = 0; | 48 | static int handler_enabled = 0; |
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index 316211d9f17d..769cdd62c1bb 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c | |||
@@ -62,63 +62,6 @@ | |||
62 | 62 | ||
63 | #define KMEM_SAFETYZONE 8 | 63 | #define KMEM_SAFETYZONE 8 |
64 | 64 | ||
65 | /***********FOR DEBUGGING PURPOSES********************************************* | ||
66 | static void * dbg_kmalloc(unsigned int size, int prio, int line) { | ||
67 | int i = 0; | ||
68 | void * v = kmalloc(size+sizeof(unsigned int)+2*KMEM_SAFETYZONE*8,prio); | ||
69 | char * c1 = v; | ||
70 | c1 += sizeof(unsigned int); | ||
71 | *((unsigned int *)v) = size; | ||
72 | |||
73 | for (i = 0; i < KMEM_SAFETYZONE; i++) { | ||
74 | c1[0] = 'D'; c1[1] = 'E'; c1[2] = 'A'; c1[3] = 'D'; | ||
75 | c1[4] = 'B'; c1[5] = 'E'; c1[6] = 'E'; c1[7] = 'F'; | ||
76 | c1 += 8; | ||
77 | } | ||
78 | c1 += size; | ||
79 | for (i = 0; i < KMEM_SAFETYZONE; i++) { | ||
80 | c1[0] = 'M'; c1[1] = 'U'; c1[2] = 'N'; c1[3] = 'G'; | ||
81 | c1[4] = 'W'; c1[5] = 'A'; c1[6] = 'L'; c1[7] = 'L'; | ||
82 | c1 += 8; | ||
83 | } | ||
84 | v = ((char *)v) + sizeof(unsigned int) + KMEM_SAFETYZONE*8; | ||
85 | printk(KERN_INFO "line %d kmalloc(%d,%d) = %p\n",line,size,prio,v); | ||
86 | return v; | ||
87 | } | ||
88 | static void dbg_kfree(void * v, int line) { | ||
89 | unsigned int * sp = (unsigned int *)(((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8)); | ||
90 | unsigned int size = *sp; | ||
91 | char * c1 = ((char *)v) - KMEM_SAFETYZONE*8; | ||
92 | int i = 0; | ||
93 | for (i = 0; i < KMEM_SAFETYZONE; i++) { | ||
94 | if ( c1[0] != 'D' || c1[1] != 'E' || c1[2] != 'A' || c1[3] != 'D' | ||
95 | || c1[4] != 'B' || c1[5] != 'E' || c1[6] != 'E' || c1[7] != 'F') { | ||
96 | printk(KERN_INFO "kmalloced block at %p has been corrupted (underrun)!\n",v); | ||
97 | printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8, | ||
98 | c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] ); | ||
99 | } | ||
100 | c1 += 8; | ||
101 | } | ||
102 | c1 += size; | ||
103 | for (i = 0; i < KMEM_SAFETYZONE; i++) { | ||
104 | if ( c1[0] != 'M' || c1[1] != 'U' || c1[2] != 'N' || c1[3] != 'G' | ||
105 | || c1[4] != 'W' || c1[5] != 'A' || c1[6] != 'L' || c1[7] != 'L' | ||
106 | ) { | ||
107 | printk(KERN_INFO "kmalloced block at %p has been corrupted (overrun):\n",v); | ||
108 | printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8, | ||
109 | c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] ); | ||
110 | } | ||
111 | c1 += 8; | ||
112 | } | ||
113 | printk(KERN_INFO "line %d kfree(%p)\n",line,v); | ||
114 | v = ((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8); | ||
115 | kfree(v); | ||
116 | } | ||
117 | |||
118 | #define kmalloc(x,y) dbg_kmalloc(x,y,__LINE__) | ||
119 | #define kfree(x) dbg_kfree(x,__LINE__) | ||
120 | *****************************************************************************/ | ||
121 | |||
122 | /* | 65 | /* |
123 | * Function Prototypes | 66 | * Function Prototypes |
124 | */ | 67 | */ |
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 5a0dbeb6bbe8..6b381fc0383d 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
@@ -119,6 +119,23 @@ static struct xfrm_algo_desc aalg_list[] = { | |||
119 | .sadb_alg_maxbits = 160 | 119 | .sadb_alg_maxbits = 160 |
120 | } | 120 | } |
121 | }, | 121 | }, |
122 | { | ||
123 | .name = "xcbc(aes)", | ||
124 | |||
125 | .uinfo = { | ||
126 | .auth = { | ||
127 | .icv_truncbits = 96, | ||
128 | .icv_fullbits = 128, | ||
129 | } | ||
130 | }, | ||
131 | |||
132 | .desc = { | ||
133 | .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC, | ||
134 | .sadb_alg_ivlen = 0, | ||
135 | .sadb_alg_minbits = 128, | ||
136 | .sadb_alg_maxbits = 128 | ||
137 | } | ||
138 | }, | ||
122 | }; | 139 | }; |
123 | 140 | ||
124 | static struct xfrm_algo_desc ealg_list[] = { | 141 | static struct xfrm_algo_desc ealg_list[] = { |
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index e8198a2c785d..414f89070380 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <net/ip.h> | 12 | #include <net/ip.h> |
13 | #include <net/xfrm.h> | 13 | #include <net/xfrm.h> |
14 | 14 | ||
15 | static kmem_cache_t *secpath_cachep __read_mostly; | 15 | static struct kmem_cache *secpath_cachep __read_mostly; |
16 | 16 | ||
17 | void __secpath_destroy(struct sec_path *sp) | 17 | void __secpath_destroy(struct sec_path *sp) |
18 | { | 18 | { |
@@ -27,7 +27,7 @@ struct sec_path *secpath_dup(struct sec_path *src) | |||
27 | { | 27 | { |
28 | struct sec_path *sp; | 28 | struct sec_path *sp; |
29 | 29 | ||
30 | sp = kmem_cache_alloc(secpath_cachep, SLAB_ATOMIC); | 30 | sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC); |
31 | if (!sp) | 31 | if (!sp) |
32 | return NULL; | 32 | return NULL; |
33 | 33 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f6c77bd36fdd..bebd40e5a62e 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/cache.h> | 25 | #include <linux/cache.h> |
26 | #include <net/xfrm.h> | 26 | #include <net/xfrm.h> |
27 | #include <net/ip.h> | 27 | #include <net/ip.h> |
28 | #include <linux/audit.h> | ||
28 | 29 | ||
29 | #include "xfrm_hash.h" | 30 | #include "xfrm_hash.h" |
30 | 31 | ||
@@ -39,7 +40,7 @@ EXPORT_SYMBOL(xfrm_policy_count); | |||
39 | static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); | 40 | static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); |
40 | static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; | 41 | static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; |
41 | 42 | ||
42 | static kmem_cache_t *xfrm_dst_cache __read_mostly; | 43 | static struct kmem_cache *xfrm_dst_cache __read_mostly; |
43 | 44 | ||
44 | static struct work_struct xfrm_policy_gc_work; | 45 | static struct work_struct xfrm_policy_gc_work; |
45 | static HLIST_HEAD(xfrm_policy_gc_list); | 46 | static HLIST_HEAD(xfrm_policy_gc_list); |
@@ -804,7 +805,7 @@ struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete) | |||
804 | } | 805 | } |
805 | EXPORT_SYMBOL(xfrm_policy_byid); | 806 | EXPORT_SYMBOL(xfrm_policy_byid); |
806 | 807 | ||
807 | void xfrm_policy_flush(u8 type) | 808 | void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info) |
808 | { | 809 | { |
809 | int dir; | 810 | int dir; |
810 | 811 | ||
@@ -824,6 +825,9 @@ void xfrm_policy_flush(u8 type) | |||
824 | hlist_del(&pol->byidx); | 825 | hlist_del(&pol->byidx); |
825 | write_unlock_bh(&xfrm_policy_lock); | 826 | write_unlock_bh(&xfrm_policy_lock); |
826 | 827 | ||
828 | xfrm_audit_log(audit_info->loginuid, audit_info->secid, | ||
829 | AUDIT_MAC_IPSEC_DELSPD, 1, pol, NULL); | ||
830 | |||
827 | xfrm_policy_kill(pol); | 831 | xfrm_policy_kill(pol); |
828 | killed++; | 832 | killed++; |
829 | 833 | ||
@@ -842,6 +846,11 @@ void xfrm_policy_flush(u8 type) | |||
842 | hlist_del(&pol->byidx); | 846 | hlist_del(&pol->byidx); |
843 | write_unlock_bh(&xfrm_policy_lock); | 847 | write_unlock_bh(&xfrm_policy_lock); |
844 | 848 | ||
849 | xfrm_audit_log(audit_info->loginuid, | ||
850 | audit_info->secid, | ||
851 | AUDIT_MAC_IPSEC_DELSPD, 1, | ||
852 | pol, NULL); | ||
853 | |||
845 | xfrm_policy_kill(pol); | 854 | xfrm_policy_kill(pol); |
846 | killed++; | 855 | killed++; |
847 | 856 | ||
@@ -860,33 +869,12 @@ EXPORT_SYMBOL(xfrm_policy_flush); | |||
860 | int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), | 869 | int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), |
861 | void *data) | 870 | void *data) |
862 | { | 871 | { |
863 | struct xfrm_policy *pol; | 872 | struct xfrm_policy *pol, *last = NULL; |
864 | struct hlist_node *entry; | 873 | struct hlist_node *entry; |
865 | int dir, count, error; | 874 | int dir, last_dir = 0, count, error; |
866 | 875 | ||
867 | read_lock_bh(&xfrm_policy_lock); | 876 | read_lock_bh(&xfrm_policy_lock); |
868 | count = 0; | 877 | count = 0; |
869 | for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { | ||
870 | struct hlist_head *table = xfrm_policy_bydst[dir].table; | ||
871 | int i; | ||
872 | |||
873 | hlist_for_each_entry(pol, entry, | ||
874 | &xfrm_policy_inexact[dir], bydst) { | ||
875 | if (pol->type == type) | ||
876 | count++; | ||
877 | } | ||
878 | for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) { | ||
879 | hlist_for_each_entry(pol, entry, table + i, bydst) { | ||
880 | if (pol->type == type) | ||
881 | count++; | ||
882 | } | ||
883 | } | ||
884 | } | ||
885 | |||
886 | if (count == 0) { | ||
887 | error = -ENOENT; | ||
888 | goto out; | ||
889 | } | ||
890 | 878 | ||
891 | for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { | 879 | for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { |
892 | struct hlist_head *table = xfrm_policy_bydst[dir].table; | 880 | struct hlist_head *table = xfrm_policy_bydst[dir].table; |
@@ -896,21 +884,37 @@ int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*) | |||
896 | &xfrm_policy_inexact[dir], bydst) { | 884 | &xfrm_policy_inexact[dir], bydst) { |
897 | if (pol->type != type) | 885 | if (pol->type != type) |
898 | continue; | 886 | continue; |
899 | error = func(pol, dir % XFRM_POLICY_MAX, --count, data); | 887 | if (last) { |
900 | if (error) | 888 | error = func(last, last_dir % XFRM_POLICY_MAX, |
901 | goto out; | 889 | count, data); |
890 | if (error) | ||
891 | goto out; | ||
892 | } | ||
893 | last = pol; | ||
894 | last_dir = dir; | ||
895 | count++; | ||
902 | } | 896 | } |
903 | for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) { | 897 | for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) { |
904 | hlist_for_each_entry(pol, entry, table + i, bydst) { | 898 | hlist_for_each_entry(pol, entry, table + i, bydst) { |
905 | if (pol->type != type) | 899 | if (pol->type != type) |
906 | continue; | 900 | continue; |
907 | error = func(pol, dir % XFRM_POLICY_MAX, --count, data); | 901 | if (last) { |
908 | if (error) | 902 | error = func(last, last_dir % XFRM_POLICY_MAX, |
909 | goto out; | 903 | count, data); |
904 | if (error) | ||
905 | goto out; | ||
906 | } | ||
907 | last = pol; | ||
908 | last_dir = dir; | ||
909 | count++; | ||
910 | } | 910 | } |
911 | } | 911 | } |
912 | } | 912 | } |
913 | error = 0; | 913 | if (count == 0) { |
914 | error = -ENOENT; | ||
915 | goto out; | ||
916 | } | ||
917 | error = func(last, last_dir % XFRM_POLICY_MAX, 0, data); | ||
914 | out: | 918 | out: |
915 | read_unlock_bh(&xfrm_policy_lock); | 919 | read_unlock_bh(&xfrm_policy_lock); |
916 | return error; | 920 | return error; |
@@ -1982,6 +1986,117 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, | |||
1982 | 1986 | ||
1983 | EXPORT_SYMBOL(xfrm_bundle_ok); | 1987 | EXPORT_SYMBOL(xfrm_bundle_ok); |
1984 | 1988 | ||
1989 | #ifdef CONFIG_AUDITSYSCALL | ||
1990 | /* Audit addition and deletion of SAs and ipsec policy */ | ||
1991 | |||
1992 | void xfrm_audit_log(uid_t auid, u32 sid, int type, int result, | ||
1993 | struct xfrm_policy *xp, struct xfrm_state *x) | ||
1994 | { | ||
1995 | |||
1996 | char *secctx; | ||
1997 | u32 secctx_len; | ||
1998 | struct xfrm_sec_ctx *sctx = NULL; | ||
1999 | struct audit_buffer *audit_buf; | ||
2000 | int family; | ||
2001 | extern int audit_enabled; | ||
2002 | |||
2003 | if (audit_enabled == 0) | ||
2004 | return; | ||
2005 | |||
2006 | audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type); | ||
2007 | if (audit_buf == NULL) | ||
2008 | return; | ||
2009 | |||
2010 | switch(type) { | ||
2011 | case AUDIT_MAC_IPSEC_ADDSA: | ||
2012 | audit_log_format(audit_buf, "SAD add: auid=%u", auid); | ||
2013 | break; | ||
2014 | case AUDIT_MAC_IPSEC_DELSA: | ||
2015 | audit_log_format(audit_buf, "SAD delete: auid=%u", auid); | ||
2016 | break; | ||
2017 | case AUDIT_MAC_IPSEC_ADDSPD: | ||
2018 | audit_log_format(audit_buf, "SPD add: auid=%u", auid); | ||
2019 | break; | ||
2020 | case AUDIT_MAC_IPSEC_DELSPD: | ||
2021 | audit_log_format(audit_buf, "SPD delete: auid=%u", auid); | ||
2022 | break; | ||
2023 | default: | ||
2024 | return; | ||
2025 | } | ||
2026 | |||
2027 | if (sid != 0 && | ||
2028 | security_secid_to_secctx(sid, &secctx, &secctx_len) == 0) | ||
2029 | audit_log_format(audit_buf, " subj=%s", secctx); | ||
2030 | else | ||
2031 | audit_log_task_context(audit_buf); | ||
2032 | |||
2033 | if (xp) { | ||
2034 | family = xp->selector.family; | ||
2035 | if (xp->security) | ||
2036 | sctx = xp->security; | ||
2037 | } else { | ||
2038 | family = x->props.family; | ||
2039 | if (x->security) | ||
2040 | sctx = x->security; | ||
2041 | } | ||
2042 | |||
2043 | if (sctx) | ||
2044 | audit_log_format(audit_buf, | ||
2045 | " sec_alg=%u sec_doi=%u sec_obj=%s", | ||
2046 | sctx->ctx_alg, sctx->ctx_doi, sctx->ctx_str); | ||
2047 | |||
2048 | switch(family) { | ||
2049 | case AF_INET: | ||
2050 | { | ||
2051 | struct in_addr saddr, daddr; | ||
2052 | if (xp) { | ||
2053 | saddr.s_addr = xp->selector.saddr.a4; | ||
2054 | daddr.s_addr = xp->selector.daddr.a4; | ||
2055 | } else { | ||
2056 | saddr.s_addr = x->props.saddr.a4; | ||
2057 | daddr.s_addr = x->id.daddr.a4; | ||
2058 | } | ||
2059 | audit_log_format(audit_buf, | ||
2060 | " src=%u.%u.%u.%u dst=%u.%u.%u.%u", | ||
2061 | NIPQUAD(saddr), NIPQUAD(daddr)); | ||
2062 | } | ||
2063 | break; | ||
2064 | case AF_INET6: | ||
2065 | { | ||
2066 | struct in6_addr saddr6, daddr6; | ||
2067 | if (xp) { | ||
2068 | memcpy(&saddr6, xp->selector.saddr.a6, | ||
2069 | sizeof(struct in6_addr)); | ||
2070 | memcpy(&daddr6, xp->selector.daddr.a6, | ||
2071 | sizeof(struct in6_addr)); | ||
2072 | } else { | ||
2073 | memcpy(&saddr6, x->props.saddr.a6, | ||
2074 | sizeof(struct in6_addr)); | ||
2075 | memcpy(&daddr6, x->id.daddr.a6, | ||
2076 | sizeof(struct in6_addr)); | ||
2077 | } | ||
2078 | audit_log_format(audit_buf, | ||
2079 | " src=" NIP6_FMT "dst=" NIP6_FMT, | ||
2080 | NIP6(saddr6), NIP6(daddr6)); | ||
2081 | } | ||
2082 | break; | ||
2083 | } | ||
2084 | |||
2085 | if (x) | ||
2086 | audit_log_format(audit_buf, " spi=%lu(0x%lx) protocol=%s", | ||
2087 | (unsigned long)ntohl(x->id.spi), | ||
2088 | (unsigned long)ntohl(x->id.spi), | ||
2089 | x->id.proto == IPPROTO_AH ? "AH" : | ||
2090 | (x->id.proto == IPPROTO_ESP ? | ||
2091 | "ESP" : "IPCOMP")); | ||
2092 | |||
2093 | audit_log_format(audit_buf, " res=%u", result); | ||
2094 | audit_log_end(audit_buf); | ||
2095 | } | ||
2096 | |||
2097 | EXPORT_SYMBOL(xfrm_audit_log); | ||
2098 | #endif /* CONFIG_AUDITSYSCALL */ | ||
2099 | |||
1985 | int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) | 2100 | int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) |
1986 | { | 2101 | { |
1987 | int err = 0; | 2102 | int err = 0; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index da54a64ccfa3..fdb08d9f34aa 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/cache.h> | 21 | #include <linux/cache.h> |
22 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
23 | #include <linux/audit.h> | ||
23 | 24 | ||
24 | #include "xfrm_hash.h" | 25 | #include "xfrm_hash.h" |
25 | 26 | ||
@@ -238,6 +239,7 @@ static void xfrm_timer_handler(unsigned long data) | |||
238 | unsigned long now = (unsigned long)xtime.tv_sec; | 239 | unsigned long now = (unsigned long)xtime.tv_sec; |
239 | long next = LONG_MAX; | 240 | long next = LONG_MAX; |
240 | int warn = 0; | 241 | int warn = 0; |
242 | int err = 0; | ||
241 | 243 | ||
242 | spin_lock(&x->lock); | 244 | spin_lock(&x->lock); |
243 | if (x->km.state == XFRM_STATE_DEAD) | 245 | if (x->km.state == XFRM_STATE_DEAD) |
@@ -295,9 +297,14 @@ expired: | |||
295 | next = 2; | 297 | next = 2; |
296 | goto resched; | 298 | goto resched; |
297 | } | 299 | } |
298 | if (!__xfrm_state_delete(x) && x->id.spi) | 300 | |
301 | err = __xfrm_state_delete(x); | ||
302 | if (!err && x->id.spi) | ||
299 | km_state_expired(x, 1, 0); | 303 | km_state_expired(x, 1, 0); |
300 | 304 | ||
305 | xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, | ||
306 | AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x); | ||
307 | |||
301 | out: | 308 | out: |
302 | spin_unlock(&x->lock); | 309 | spin_unlock(&x->lock); |
303 | } | 310 | } |
@@ -384,9 +391,10 @@ int xfrm_state_delete(struct xfrm_state *x) | |||
384 | } | 391 | } |
385 | EXPORT_SYMBOL(xfrm_state_delete); | 392 | EXPORT_SYMBOL(xfrm_state_delete); |
386 | 393 | ||
387 | void xfrm_state_flush(u8 proto) | 394 | void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info) |
388 | { | 395 | { |
389 | int i; | 396 | int i; |
397 | int err = 0; | ||
390 | 398 | ||
391 | spin_lock_bh(&xfrm_state_lock); | 399 | spin_lock_bh(&xfrm_state_lock); |
392 | for (i = 0; i <= xfrm_state_hmask; i++) { | 400 | for (i = 0; i <= xfrm_state_hmask; i++) { |
@@ -399,7 +407,11 @@ restart: | |||
399 | xfrm_state_hold(x); | 407 | xfrm_state_hold(x); |
400 | spin_unlock_bh(&xfrm_state_lock); | 408 | spin_unlock_bh(&xfrm_state_lock); |
401 | 409 | ||
402 | xfrm_state_delete(x); | 410 | err = xfrm_state_delete(x); |
411 | xfrm_audit_log(audit_info->loginuid, | ||
412 | audit_info->secid, | ||
413 | AUDIT_MAC_IPSEC_DELSA, | ||
414 | err ? 0 : 1, NULL, x); | ||
403 | xfrm_state_put(x); | 415 | xfrm_state_put(x); |
404 | 416 | ||
405 | spin_lock_bh(&xfrm_state_lock); | 417 | spin_lock_bh(&xfrm_state_lock); |
@@ -1099,7 +1111,7 @@ int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), | |||
1099 | void *data) | 1111 | void *data) |
1100 | { | 1112 | { |
1101 | int i; | 1113 | int i; |
1102 | struct xfrm_state *x; | 1114 | struct xfrm_state *x, *last = NULL; |
1103 | struct hlist_node *entry; | 1115 | struct hlist_node *entry; |
1104 | int count = 0; | 1116 | int count = 0; |
1105 | int err = 0; | 1117 | int err = 0; |
@@ -1107,24 +1119,22 @@ int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), | |||
1107 | spin_lock_bh(&xfrm_state_lock); | 1119 | spin_lock_bh(&xfrm_state_lock); |
1108 | for (i = 0; i <= xfrm_state_hmask; i++) { | 1120 | for (i = 0; i <= xfrm_state_hmask; i++) { |
1109 | hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { | 1121 | hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { |
1110 | if (xfrm_id_proto_match(x->id.proto, proto)) | 1122 | if (!xfrm_id_proto_match(x->id.proto, proto)) |
1111 | count++; | 1123 | continue; |
1124 | if (last) { | ||
1125 | err = func(last, count, data); | ||
1126 | if (err) | ||
1127 | goto out; | ||
1128 | } | ||
1129 | last = x; | ||
1130 | count++; | ||
1112 | } | 1131 | } |
1113 | } | 1132 | } |
1114 | if (count == 0) { | 1133 | if (count == 0) { |
1115 | err = -ENOENT; | 1134 | err = -ENOENT; |
1116 | goto out; | 1135 | goto out; |
1117 | } | 1136 | } |
1118 | 1137 | err = func(last, 0, data); | |
1119 | for (i = 0; i <= xfrm_state_hmask; i++) { | ||
1120 | hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { | ||
1121 | if (!xfrm_id_proto_match(x->id.proto, proto)) | ||
1122 | continue; | ||
1123 | err = func(x, --count, data); | ||
1124 | if (err) | ||
1125 | goto out; | ||
1126 | } | ||
1127 | } | ||
1128 | out: | 1138 | out: |
1129 | spin_unlock_bh(&xfrm_state_lock); | 1139 | spin_unlock_bh(&xfrm_state_lock); |
1130 | return err; | 1140 | return err; |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 311205ffa775..e5372b11fc8f 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 31 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
32 | #include <linux/in6.h> | 32 | #include <linux/in6.h> |
33 | #endif | 33 | #endif |
34 | #include <linux/audit.h> | ||
34 | 35 | ||
35 | static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type) | 36 | static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type) |
36 | { | 37 | { |
@@ -454,6 +455,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | |||
454 | else | 455 | else |
455 | err = xfrm_state_update(x); | 456 | err = xfrm_state_update(x); |
456 | 457 | ||
458 | xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, | ||
459 | AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x); | ||
460 | |||
457 | if (err < 0) { | 461 | if (err < 0) { |
458 | x->km.state = XFRM_STATE_DEAD; | 462 | x->km.state = XFRM_STATE_DEAD; |
459 | __xfrm_state_put(x); | 463 | __xfrm_state_put(x); |
@@ -523,6 +527,10 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | |||
523 | } | 527 | } |
524 | 528 | ||
525 | err = xfrm_state_delete(x); | 529 | err = xfrm_state_delete(x); |
530 | |||
531 | xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, | ||
532 | AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x); | ||
533 | |||
526 | if (err < 0) | 534 | if (err < 0) |
527 | goto out; | 535 | goto out; |
528 | 536 | ||
@@ -1030,6 +1038,9 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr | |||
1030 | * a type XFRM_MSG_UPDPOLICY - JHS */ | 1038 | * a type XFRM_MSG_UPDPOLICY - JHS */ |
1031 | excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; | 1039 | excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; |
1032 | err = xfrm_policy_insert(p->dir, xp, excl); | 1040 | err = xfrm_policy_insert(p->dir, xp, excl); |
1041 | xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, | ||
1042 | AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL); | ||
1043 | |||
1033 | if (err) { | 1044 | if (err) { |
1034 | security_xfrm_policy_free(xp); | 1045 | security_xfrm_policy_free(xp); |
1035 | kfree(xp); | 1046 | kfree(xp); |
@@ -1257,6 +1268,10 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr | |||
1257 | xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, delete); | 1268 | xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, delete); |
1258 | security_xfrm_policy_free(&tmp); | 1269 | security_xfrm_policy_free(&tmp); |
1259 | } | 1270 | } |
1271 | if (delete) | ||
1272 | xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, | ||
1273 | AUDIT_MAC_IPSEC_DELSPD, (xp) ? 1 : 0, xp, NULL); | ||
1274 | |||
1260 | if (xp == NULL) | 1275 | if (xp == NULL) |
1261 | return -ENOENT; | 1276 | return -ENOENT; |
1262 | 1277 | ||
@@ -1291,8 +1306,11 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma | |||
1291 | { | 1306 | { |
1292 | struct km_event c; | 1307 | struct km_event c; |
1293 | struct xfrm_usersa_flush *p = NLMSG_DATA(nlh); | 1308 | struct xfrm_usersa_flush *p = NLMSG_DATA(nlh); |
1309 | struct xfrm_audit audit_info; | ||
1294 | 1310 | ||
1295 | xfrm_state_flush(p->proto); | 1311 | audit_info.loginuid = NETLINK_CB(skb).loginuid; |
1312 | audit_info.secid = NETLINK_CB(skb).sid; | ||
1313 | xfrm_state_flush(p->proto, &audit_info); | ||
1296 | c.data.proto = p->proto; | 1314 | c.data.proto = p->proto; |
1297 | c.event = nlh->nlmsg_type; | 1315 | c.event = nlh->nlmsg_type; |
1298 | c.seq = nlh->nlmsg_seq; | 1316 | c.seq = nlh->nlmsg_seq; |
@@ -1442,12 +1460,15 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **x | |||
1442 | struct km_event c; | 1460 | struct km_event c; |
1443 | u8 type = XFRM_POLICY_TYPE_MAIN; | 1461 | u8 type = XFRM_POLICY_TYPE_MAIN; |
1444 | int err; | 1462 | int err; |
1463 | struct xfrm_audit audit_info; | ||
1445 | 1464 | ||
1446 | err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); | 1465 | err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); |
1447 | if (err) | 1466 | if (err) |
1448 | return err; | 1467 | return err; |
1449 | 1468 | ||
1450 | xfrm_policy_flush(type); | 1469 | audit_info.loginuid = NETLINK_CB(skb).loginuid; |
1470 | audit_info.secid = NETLINK_CB(skb).sid; | ||
1471 | xfrm_policy_flush(type, &audit_info); | ||
1451 | c.data.type = type; | 1472 | c.data.type = type; |
1452 | c.event = nlh->nlmsg_type; | 1473 | c.event = nlh->nlmsg_type; |
1453 | c.seq = nlh->nlmsg_seq; | 1474 | c.seq = nlh->nlmsg_seq; |
@@ -1502,6 +1523,9 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void * | |||
1502 | err = 0; | 1523 | err = 0; |
1503 | if (up->hard) { | 1524 | if (up->hard) { |
1504 | xfrm_policy_delete(xp, p->dir); | 1525 | xfrm_policy_delete(xp, p->dir); |
1526 | xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, | ||
1527 | AUDIT_MAC_IPSEC_DELSPD, 1, xp, NULL); | ||
1528 | |||
1505 | } else { | 1529 | } else { |
1506 | // reset the timers here? | 1530 | // reset the timers here? |
1507 | printk("Dont know what to do with soft policy expire\n"); | 1531 | printk("Dont know what to do with soft policy expire\n"); |
@@ -1533,8 +1557,11 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void ** | |||
1533 | goto out; | 1557 | goto out; |
1534 | km_state_expired(x, ue->hard, current->pid); | 1558 | km_state_expired(x, ue->hard, current->pid); |
1535 | 1559 | ||
1536 | if (ue->hard) | 1560 | if (ue->hard) { |
1537 | __xfrm_state_delete(x); | 1561 | __xfrm_state_delete(x); |
1562 | xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, | ||
1563 | AUDIT_MAC_IPSEC_DELSA, 1, NULL, x); | ||
1564 | } | ||
1538 | out: | 1565 | out: |
1539 | spin_unlock_bh(&x->lock); | 1566 | spin_unlock_bh(&x->lock); |
1540 | xfrm_state_put(x); | 1567 | xfrm_state_put(x); |