diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-15 17:06:58 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-15 17:06:58 -0400 |
commit | a52cefc80fc92981592c688d1c8067442afe4cec (patch) | |
tree | ad119b5a4f5e4a257779c0ef324b5c9354c915f1 /net/ipv6 | |
parent | fba956c46a72f9e7503fd464ffee43c632307e31 (diff) | |
parent | 4acad72ded8e3f0211bd2a762e23c28229c61a51 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (42 commits)
[IPV6]: Consolidate the ip6_pol_route_(input|output) pair
[TCP]: Make snd_cwnd_cnt 32-bit
[TCP]: Update the /proc/net/tcp documentation
[NETNS]: Don't panic on creating the namespace's loopback
[NEIGH]: Ensure that pneigh_lookup is protected with RTNL
[INET]: kmalloc+memset -> kzalloc in frag_alloc_queue
[ISDN]: Fix compile with CONFIG_ISDN_X25 disabled.
[IPV6]: Replace sk_buff ** with sk_buff * in input handlers
[SELINUX]: Update for netfilter ->hook() arg changes.
[INET]: Consolidate the xxx_put
[INET]: Small cleanup for xxx_put after evictor consolidation
[INET]: Consolidate the xxx_evictor
[INET]: Consolidate the xxx_frag_destroy
[INET]: Consolidate xxx_the secret_rebuild
[INET]: Consolidate the xxx_frag_kill
[INET]: Collect common frag sysctl variables together
[INET]: Collect frag queues management objects together
[INET]: Move common fields from frag_queues in one place.
[TG3]: Fix performance regression on 5705.
[ISDN]: Remove local copy of device name to make sure renames work.
...
Diffstat (limited to 'net/ipv6')
27 files changed, 375 insertions, 663 deletions
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index c82d4d49f71f..1e89efd38a0c 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(ipv6_find_tlv); | |||
102 | 102 | ||
103 | struct tlvtype_proc { | 103 | struct tlvtype_proc { |
104 | int type; | 104 | int type; |
105 | int (*func)(struct sk_buff **skbp, int offset); | 105 | int (*func)(struct sk_buff *skb, int offset); |
106 | }; | 106 | }; |
107 | 107 | ||
108 | /********************* | 108 | /********************* |
@@ -111,10 +111,8 @@ struct tlvtype_proc { | |||
111 | 111 | ||
112 | /* An unknown option is detected, decide what to do */ | 112 | /* An unknown option is detected, decide what to do */ |
113 | 113 | ||
114 | static int ip6_tlvopt_unknown(struct sk_buff **skbp, int optoff) | 114 | static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) |
115 | { | 115 | { |
116 | struct sk_buff *skb = *skbp; | ||
117 | |||
118 | switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { | 116 | switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { |
119 | case 0: /* ignore */ | 117 | case 0: /* ignore */ |
120 | return 1; | 118 | return 1; |
@@ -139,9 +137,8 @@ static int ip6_tlvopt_unknown(struct sk_buff **skbp, int optoff) | |||
139 | 137 | ||
140 | /* Parse tlv encoded option header (hop-by-hop or destination) */ | 138 | /* Parse tlv encoded option header (hop-by-hop or destination) */ |
141 | 139 | ||
142 | static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff **skbp) | 140 | static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) |
143 | { | 141 | { |
144 | struct sk_buff *skb = *skbp; | ||
145 | struct tlvtype_proc *curr; | 142 | struct tlvtype_proc *curr; |
146 | const unsigned char *nh = skb_network_header(skb); | 143 | const unsigned char *nh = skb_network_header(skb); |
147 | int off = skb_network_header_len(skb); | 144 | int off = skb_network_header_len(skb); |
@@ -172,13 +169,13 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff **skbp) | |||
172 | /* type specific length/alignment | 169 | /* type specific length/alignment |
173 | checks will be performed in the | 170 | checks will be performed in the |
174 | func(). */ | 171 | func(). */ |
175 | if (curr->func(skbp, off) == 0) | 172 | if (curr->func(skb, off) == 0) |
176 | return 0; | 173 | return 0; |
177 | break; | 174 | break; |
178 | } | 175 | } |
179 | } | 176 | } |
180 | if (curr->type < 0) { | 177 | if (curr->type < 0) { |
181 | if (ip6_tlvopt_unknown(skbp, off) == 0) | 178 | if (ip6_tlvopt_unknown(skb, off) == 0) |
182 | return 0; | 179 | return 0; |
183 | } | 180 | } |
184 | break; | 181 | break; |
@@ -198,9 +195,8 @@ bad: | |||
198 | *****************************/ | 195 | *****************************/ |
199 | 196 | ||
200 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 197 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) |
201 | static int ipv6_dest_hao(struct sk_buff **skbp, int optoff) | 198 | static int ipv6_dest_hao(struct sk_buff *skb, int optoff) |
202 | { | 199 | { |
203 | struct sk_buff *skb = *skbp; | ||
204 | struct ipv6_destopt_hao *hao; | 200 | struct ipv6_destopt_hao *hao; |
205 | struct inet6_skb_parm *opt = IP6CB(skb); | 201 | struct inet6_skb_parm *opt = IP6CB(skb); |
206 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 202 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
@@ -234,22 +230,13 @@ static int ipv6_dest_hao(struct sk_buff **skbp, int optoff) | |||
234 | goto discard; | 230 | goto discard; |
235 | 231 | ||
236 | if (skb_cloned(skb)) { | 232 | if (skb_cloned(skb)) { |
237 | struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); | 233 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) |
238 | struct inet6_skb_parm *opt2; | ||
239 | |||
240 | if (skb2 == NULL) | ||
241 | goto discard; | 234 | goto discard; |
242 | 235 | ||
243 | opt2 = IP6CB(skb2); | ||
244 | memcpy(opt2, opt, sizeof(*opt2)); | ||
245 | |||
246 | kfree_skb(skb); | ||
247 | |||
248 | /* update all variable using below by copied skbuff */ | 236 | /* update all variable using below by copied skbuff */ |
249 | *skbp = skb = skb2; | 237 | hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + |
250 | hao = (struct ipv6_destopt_hao *)(skb_network_header(skb2) + | ||
251 | optoff); | 238 | optoff); |
252 | ipv6h = ipv6_hdr(skb2); | 239 | ipv6h = ipv6_hdr(skb); |
253 | } | 240 | } |
254 | 241 | ||
255 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 242 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
@@ -280,9 +267,8 @@ static struct tlvtype_proc tlvprocdestopt_lst[] = { | |||
280 | {-1, NULL} | 267 | {-1, NULL} |
281 | }; | 268 | }; |
282 | 269 | ||
283 | static int ipv6_destopt_rcv(struct sk_buff **skbp) | 270 | static int ipv6_destopt_rcv(struct sk_buff *skb) |
284 | { | 271 | { |
285 | struct sk_buff *skb = *skbp; | ||
286 | struct inet6_skb_parm *opt = IP6CB(skb); | 272 | struct inet6_skb_parm *opt = IP6CB(skb); |
287 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 273 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) |
288 | __u16 dstbuf; | 274 | __u16 dstbuf; |
@@ -304,9 +290,8 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp) | |||
304 | #endif | 290 | #endif |
305 | 291 | ||
306 | dst = dst_clone(skb->dst); | 292 | dst = dst_clone(skb->dst); |
307 | if (ip6_parse_tlv(tlvprocdestopt_lst, skbp)) { | 293 | if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { |
308 | dst_release(dst); | 294 | dst_release(dst); |
309 | skb = *skbp; | ||
310 | skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; | 295 | skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; |
311 | opt = IP6CB(skb); | 296 | opt = IP6CB(skb); |
312 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 297 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) |
@@ -337,10 +322,8 @@ void __init ipv6_destopt_init(void) | |||
337 | NONE header. No data in packet. | 322 | NONE header. No data in packet. |
338 | ********************************/ | 323 | ********************************/ |
339 | 324 | ||
340 | static int ipv6_nodata_rcv(struct sk_buff **skbp) | 325 | static int ipv6_nodata_rcv(struct sk_buff *skb) |
341 | { | 326 | { |
342 | struct sk_buff *skb = *skbp; | ||
343 | |||
344 | kfree_skb(skb); | 327 | kfree_skb(skb); |
345 | return 0; | 328 | return 0; |
346 | } | 329 | } |
@@ -360,9 +343,8 @@ void __init ipv6_nodata_init(void) | |||
360 | Routing header. | 343 | Routing header. |
361 | ********************************/ | 344 | ********************************/ |
362 | 345 | ||
363 | static int ipv6_rthdr_rcv(struct sk_buff **skbp) | 346 | static int ipv6_rthdr_rcv(struct sk_buff *skb) |
364 | { | 347 | { |
365 | struct sk_buff *skb = *skbp; | ||
366 | struct inet6_skb_parm *opt = IP6CB(skb); | 348 | struct inet6_skb_parm *opt = IP6CB(skb); |
367 | struct in6_addr *addr = NULL; | 349 | struct in6_addr *addr = NULL; |
368 | struct in6_addr daddr; | 350 | struct in6_addr daddr; |
@@ -464,18 +446,14 @@ looped_back: | |||
464 | Do not damage packets queued somewhere. | 446 | Do not damage packets queued somewhere. |
465 | */ | 447 | */ |
466 | if (skb_cloned(skb)) { | 448 | if (skb_cloned(skb)) { |
467 | struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); | ||
468 | /* the copy is a forwarded packet */ | 449 | /* the copy is a forwarded packet */ |
469 | if (skb2 == NULL) { | 450 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { |
470 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), | 451 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
471 | IPSTATS_MIB_OUTDISCARDS); | 452 | IPSTATS_MIB_OUTDISCARDS); |
472 | kfree_skb(skb); | 453 | kfree_skb(skb); |
473 | return -1; | 454 | return -1; |
474 | } | 455 | } |
475 | kfree_skb(skb); | 456 | hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); |
476 | *skbp = skb = skb2; | ||
477 | opt = IP6CB(skb2); | ||
478 | hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb2); | ||
479 | } | 457 | } |
480 | 458 | ||
481 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 459 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
@@ -578,9 +556,8 @@ static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb) | |||
578 | 556 | ||
579 | /* Router Alert as of RFC 2711 */ | 557 | /* Router Alert as of RFC 2711 */ |
580 | 558 | ||
581 | static int ipv6_hop_ra(struct sk_buff **skbp, int optoff) | 559 | static int ipv6_hop_ra(struct sk_buff *skb, int optoff) |
582 | { | 560 | { |
583 | struct sk_buff *skb = *skbp; | ||
584 | const unsigned char *nh = skb_network_header(skb); | 561 | const unsigned char *nh = skb_network_header(skb); |
585 | 562 | ||
586 | if (nh[optoff + 1] == 2) { | 563 | if (nh[optoff + 1] == 2) { |
@@ -595,9 +572,8 @@ static int ipv6_hop_ra(struct sk_buff **skbp, int optoff) | |||
595 | 572 | ||
596 | /* Jumbo payload */ | 573 | /* Jumbo payload */ |
597 | 574 | ||
598 | static int ipv6_hop_jumbo(struct sk_buff **skbp, int optoff) | 575 | static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) |
599 | { | 576 | { |
600 | struct sk_buff *skb = *skbp; | ||
601 | const unsigned char *nh = skb_network_header(skb); | 577 | const unsigned char *nh = skb_network_header(skb); |
602 | u32 pkt_len; | 578 | u32 pkt_len; |
603 | 579 | ||
@@ -648,9 +624,8 @@ static struct tlvtype_proc tlvprochopopt_lst[] = { | |||
648 | { -1, } | 624 | { -1, } |
649 | }; | 625 | }; |
650 | 626 | ||
651 | int ipv6_parse_hopopts(struct sk_buff **skbp) | 627 | int ipv6_parse_hopopts(struct sk_buff *skb) |
652 | { | 628 | { |
653 | struct sk_buff *skb = *skbp; | ||
654 | struct inet6_skb_parm *opt = IP6CB(skb); | 629 | struct inet6_skb_parm *opt = IP6CB(skb); |
655 | 630 | ||
656 | /* | 631 | /* |
@@ -667,8 +642,7 @@ int ipv6_parse_hopopts(struct sk_buff **skbp) | |||
667 | } | 642 | } |
668 | 643 | ||
669 | opt->hop = sizeof(struct ipv6hdr); | 644 | opt->hop = sizeof(struct ipv6hdr); |
670 | if (ip6_parse_tlv(tlvprochopopt_lst, skbp)) { | 645 | if (ip6_parse_tlv(tlvprochopopt_lst, skb)) { |
671 | skb = *skbp; | ||
672 | skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; | 646 | skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; |
673 | opt = IP6CB(skb); | 647 | opt = IP6CB(skb); |
674 | opt->nhoff = sizeof(struct ipv6hdr); | 648 | opt->nhoff = sizeof(struct ipv6hdr); |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 47b8ce232e84..9bb031fa1c2f 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -82,7 +82,7 @@ EXPORT_SYMBOL(icmpv6msg_statistics); | |||
82 | static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL; | 82 | static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL; |
83 | #define icmpv6_socket __get_cpu_var(__icmpv6_socket) | 83 | #define icmpv6_socket __get_cpu_var(__icmpv6_socket) |
84 | 84 | ||
85 | static int icmpv6_rcv(struct sk_buff **pskb); | 85 | static int icmpv6_rcv(struct sk_buff *skb); |
86 | 86 | ||
87 | static struct inet6_protocol icmpv6_protocol = { | 87 | static struct inet6_protocol icmpv6_protocol = { |
88 | .handler = icmpv6_rcv, | 88 | .handler = icmpv6_rcv, |
@@ -614,9 +614,8 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info) | |||
614 | * Handle icmp messages | 614 | * Handle icmp messages |
615 | */ | 615 | */ |
616 | 616 | ||
617 | static int icmpv6_rcv(struct sk_buff **pskb) | 617 | static int icmpv6_rcv(struct sk_buff *skb) |
618 | { | 618 | { |
619 | struct sk_buff *skb = *pskb; | ||
620 | struct net_device *dev = skb->dev; | 619 | struct net_device *dev = skb->dev; |
621 | struct inet6_dev *idev = __in6_dev_get(dev); | 620 | struct inet6_dev *idev = __in6_dev_get(dev); |
622 | struct in6_addr *saddr, *daddr; | 621 | struct in6_addr *saddr, *daddr; |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 25b931709749..78de42ada844 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -146,7 +146,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, | |||
146 | __ip6_dst_store(sk, dst, daddr, saddr); | 146 | __ip6_dst_store(sk, dst, daddr, saddr); |
147 | 147 | ||
148 | #ifdef CONFIG_XFRM | 148 | #ifdef CONFIG_XFRM |
149 | if (dst) { | 149 | { |
150 | struct rt6_info *rt = (struct rt6_info *)dst; | 150 | struct rt6_info *rt = (struct rt6_info *)dst; |
151 | rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); | 151 | rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); |
152 | } | 152 | } |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 9149fc239759..fac6f7f9dd73 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -125,7 +125,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
125 | } | 125 | } |
126 | 126 | ||
127 | if (hdr->nexthdr == NEXTHDR_HOP) { | 127 | if (hdr->nexthdr == NEXTHDR_HOP) { |
128 | if (ipv6_parse_hopopts(&skb) < 0) { | 128 | if (ipv6_parse_hopopts(skb) < 0) { |
129 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS); | 129 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS); |
130 | rcu_read_unlock(); | 130 | rcu_read_unlock(); |
131 | return 0; | 131 | return 0; |
@@ -149,7 +149,7 @@ out: | |||
149 | */ | 149 | */ |
150 | 150 | ||
151 | 151 | ||
152 | static inline int ip6_input_finish(struct sk_buff *skb) | 152 | static int ip6_input_finish(struct sk_buff *skb) |
153 | { | 153 | { |
154 | struct inet6_protocol *ipprot; | 154 | struct inet6_protocol *ipprot; |
155 | struct sock *raw_sk; | 155 | struct sock *raw_sk; |
@@ -199,7 +199,7 @@ resubmit: | |||
199 | !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | 199 | !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) |
200 | goto discard; | 200 | goto discard; |
201 | 201 | ||
202 | ret = ipprot->handler(&skb); | 202 | ret = ipprot->handler(skb); |
203 | if (ret > 0) | 203 | if (ret > 0) |
204 | goto resubmit; | 204 | goto resubmit; |
205 | else if (ret == 0) | 205 | else if (ret == 0) |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 011082ed921a..13565dfb1b45 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -70,7 +70,7 @@ static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *f | |||
70 | spin_unlock_bh(&ip6_id_lock); | 70 | spin_unlock_bh(&ip6_id_lock); |
71 | } | 71 | } |
72 | 72 | ||
73 | static inline int ip6_output_finish(struct sk_buff *skb) | 73 | static int ip6_output_finish(struct sk_buff *skb) |
74 | { | 74 | { |
75 | struct dst_entry *dst = skb->dst; | 75 | struct dst_entry *dst = skb->dst; |
76 | 76 | ||
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 38b149613915..b1326c2bf8aa 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
@@ -68,15 +68,15 @@ static void nf_ip6_saveroute(const struct sk_buff *skb, struct nf_info *info) | |||
68 | } | 68 | } |
69 | } | 69 | } |
70 | 70 | ||
71 | static int nf_ip6_reroute(struct sk_buff **pskb, const struct nf_info *info) | 71 | static int nf_ip6_reroute(struct sk_buff *skb, const struct nf_info *info) |
72 | { | 72 | { |
73 | struct ip6_rt_info *rt_info = nf_info_reroute(info); | 73 | struct ip6_rt_info *rt_info = nf_info_reroute(info); |
74 | 74 | ||
75 | if (info->hook == NF_IP6_LOCAL_OUT) { | 75 | if (info->hook == NF_IP6_LOCAL_OUT) { |
76 | struct ipv6hdr *iph = ipv6_hdr(*pskb); | 76 | struct ipv6hdr *iph = ipv6_hdr(skb); |
77 | if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || | 77 | if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || |
78 | !ipv6_addr_equal(&iph->saddr, &rt_info->saddr)) | 78 | !ipv6_addr_equal(&iph->saddr, &rt_info->saddr)) |
79 | return ip6_route_me_harder(*pskb); | 79 | return ip6_route_me_harder(skb); |
80 | } | 80 | } |
81 | return 0; | 81 | return 0; |
82 | } | 82 | } |
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 0473145ac534..6413a30d9f68 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c | |||
@@ -332,6 +332,7 @@ static int | |||
332 | ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) | 332 | ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) |
333 | { | 333 | { |
334 | int diff; | 334 | int diff; |
335 | int err; | ||
335 | struct ipv6hdr *user_iph = (struct ipv6hdr *)v->payload; | 336 | struct ipv6hdr *user_iph = (struct ipv6hdr *)v->payload; |
336 | 337 | ||
337 | if (v->data_len < sizeof(*user_iph)) | 338 | if (v->data_len < sizeof(*user_iph)) |
@@ -344,25 +345,18 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) | |||
344 | if (v->data_len > 0xFFFF) | 345 | if (v->data_len > 0xFFFF) |
345 | return -EINVAL; | 346 | return -EINVAL; |
346 | if (diff > skb_tailroom(e->skb)) { | 347 | if (diff > skb_tailroom(e->skb)) { |
347 | struct sk_buff *newskb; | 348 | err = pskb_expand_head(e->skb, 0, |
348 | 349 | diff - skb_tailroom(e->skb), | |
349 | newskb = skb_copy_expand(e->skb, | 350 | GFP_ATOMIC); |
350 | skb_headroom(e->skb), | 351 | if (err) { |
351 | diff, | ||
352 | GFP_ATOMIC); | ||
353 | if (newskb == NULL) { | ||
354 | printk(KERN_WARNING "ip6_queue: OOM " | 352 | printk(KERN_WARNING "ip6_queue: OOM " |
355 | "in mangle, dropping packet\n"); | 353 | "in mangle, dropping packet\n"); |
356 | return -ENOMEM; | 354 | return err; |
357 | } | 355 | } |
358 | if (e->skb->sk) | ||
359 | skb_set_owner_w(newskb, e->skb->sk); | ||
360 | kfree_skb(e->skb); | ||
361 | e->skb = newskb; | ||
362 | } | 356 | } |
363 | skb_put(e->skb, diff); | 357 | skb_put(e->skb, diff); |
364 | } | 358 | } |
365 | if (!skb_make_writable(&e->skb, v->data_len)) | 359 | if (!skb_make_writable(e->skb, v->data_len)) |
366 | return -ENOMEM; | 360 | return -ENOMEM; |
367 | skb_copy_to_linear_data(e->skb, v->payload, v->data_len); | 361 | skb_copy_to_linear_data(e->skb, v->payload, v->data_len); |
368 | e->skb->ip_summed = CHECKSUM_NONE; | 362 | e->skb->ip_summed = CHECKSUM_NONE; |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index cd9df02bb85c..acaba1537931 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -205,7 +205,7 @@ ip6_checkentry(const struct ip6t_ip6 *ipv6) | |||
205 | } | 205 | } |
206 | 206 | ||
207 | static unsigned int | 207 | static unsigned int |
208 | ip6t_error(struct sk_buff **pskb, | 208 | ip6t_error(struct sk_buff *skb, |
209 | const struct net_device *in, | 209 | const struct net_device *in, |
210 | const struct net_device *out, | 210 | const struct net_device *out, |
211 | unsigned int hooknum, | 211 | unsigned int hooknum, |
@@ -350,7 +350,7 @@ static void trace_packet(struct sk_buff *skb, | |||
350 | 350 | ||
351 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ | 351 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ |
352 | unsigned int | 352 | unsigned int |
353 | ip6t_do_table(struct sk_buff **pskb, | 353 | ip6t_do_table(struct sk_buff *skb, |
354 | unsigned int hook, | 354 | unsigned int hook, |
355 | const struct net_device *in, | 355 | const struct net_device *in, |
356 | const struct net_device *out, | 356 | const struct net_device *out, |
@@ -389,17 +389,17 @@ ip6t_do_table(struct sk_buff **pskb, | |||
389 | do { | 389 | do { |
390 | IP_NF_ASSERT(e); | 390 | IP_NF_ASSERT(e); |
391 | IP_NF_ASSERT(back); | 391 | IP_NF_ASSERT(back); |
392 | if (ip6_packet_match(*pskb, indev, outdev, &e->ipv6, | 392 | if (ip6_packet_match(skb, indev, outdev, &e->ipv6, |
393 | &protoff, &offset, &hotdrop)) { | 393 | &protoff, &offset, &hotdrop)) { |
394 | struct ip6t_entry_target *t; | 394 | struct ip6t_entry_target *t; |
395 | 395 | ||
396 | if (IP6T_MATCH_ITERATE(e, do_match, | 396 | if (IP6T_MATCH_ITERATE(e, do_match, |
397 | *pskb, in, out, | 397 | skb, in, out, |
398 | offset, protoff, &hotdrop) != 0) | 398 | offset, protoff, &hotdrop) != 0) |
399 | goto no_match; | 399 | goto no_match; |
400 | 400 | ||
401 | ADD_COUNTER(e->counters, | 401 | ADD_COUNTER(e->counters, |
402 | ntohs(ipv6_hdr(*pskb)->payload_len) | 402 | ntohs(ipv6_hdr(skb)->payload_len) |
403 | + IPV6_HDR_LEN, | 403 | + IPV6_HDR_LEN, |
404 | 1); | 404 | 1); |
405 | 405 | ||
@@ -409,8 +409,8 @@ ip6t_do_table(struct sk_buff **pskb, | |||
409 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 409 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
410 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | 410 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) |
411 | /* The packet is traced: log it */ | 411 | /* The packet is traced: log it */ |
412 | if (unlikely((*pskb)->nf_trace)) | 412 | if (unlikely(skb->nf_trace)) |
413 | trace_packet(*pskb, hook, in, out, | 413 | trace_packet(skb, hook, in, out, |
414 | table->name, private, e); | 414 | table->name, private, e); |
415 | #endif | 415 | #endif |
416 | /* Standard target? */ | 416 | /* Standard target? */ |
@@ -448,7 +448,7 @@ ip6t_do_table(struct sk_buff **pskb, | |||
448 | ((struct ip6t_entry *)table_base)->comefrom | 448 | ((struct ip6t_entry *)table_base)->comefrom |
449 | = 0xeeeeeeec; | 449 | = 0xeeeeeeec; |
450 | #endif | 450 | #endif |
451 | verdict = t->u.kernel.target->target(pskb, | 451 | verdict = t->u.kernel.target->target(skb, |
452 | in, out, | 452 | in, out, |
453 | hook, | 453 | hook, |
454 | t->u.kernel.target, | 454 | t->u.kernel.target, |
diff --git a/net/ipv6/netfilter/ip6t_HL.c b/net/ipv6/netfilter/ip6t_HL.c index ad4d94310b87..9afc836fd454 100644 --- a/net/ipv6/netfilter/ip6t_HL.c +++ b/net/ipv6/netfilter/ip6t_HL.c | |||
@@ -18,7 +18,7 @@ MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>"); | |||
18 | MODULE_DESCRIPTION("IP6 tables Hop Limit modification module"); | 18 | MODULE_DESCRIPTION("IP6 tables Hop Limit modification module"); |
19 | MODULE_LICENSE("GPL"); | 19 | MODULE_LICENSE("GPL"); |
20 | 20 | ||
21 | static unsigned int ip6t_hl_target(struct sk_buff **pskb, | 21 | static unsigned int ip6t_hl_target(struct sk_buff *skb, |
22 | const struct net_device *in, | 22 | const struct net_device *in, |
23 | const struct net_device *out, | 23 | const struct net_device *out, |
24 | unsigned int hooknum, | 24 | unsigned int hooknum, |
@@ -29,10 +29,10 @@ static unsigned int ip6t_hl_target(struct sk_buff **pskb, | |||
29 | const struct ip6t_HL_info *info = targinfo; | 29 | const struct ip6t_HL_info *info = targinfo; |
30 | int new_hl; | 30 | int new_hl; |
31 | 31 | ||
32 | if (!skb_make_writable(pskb, (*pskb)->len)) | 32 | if (!skb_make_writable(skb, skb->len)) |
33 | return NF_DROP; | 33 | return NF_DROP; |
34 | 34 | ||
35 | ip6h = ipv6_hdr(*pskb); | 35 | ip6h = ipv6_hdr(skb); |
36 | 36 | ||
37 | switch (info->mode) { | 37 | switch (info->mode) { |
38 | case IP6T_HL_SET: | 38 | case IP6T_HL_SET: |
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index 6ab99001dccc..7a48c342df46 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c | |||
@@ -431,7 +431,7 @@ ip6t_log_packet(unsigned int pf, | |||
431 | } | 431 | } |
432 | 432 | ||
433 | static unsigned int | 433 | static unsigned int |
434 | ip6t_log_target(struct sk_buff **pskb, | 434 | ip6t_log_target(struct sk_buff *skb, |
435 | const struct net_device *in, | 435 | const struct net_device *in, |
436 | const struct net_device *out, | 436 | const struct net_device *out, |
437 | unsigned int hooknum, | 437 | unsigned int hooknum, |
@@ -445,8 +445,7 @@ ip6t_log_target(struct sk_buff **pskb, | |||
445 | li.u.log.level = loginfo->level; | 445 | li.u.log.level = loginfo->level; |
446 | li.u.log.logflags = loginfo->logflags; | 446 | li.u.log.logflags = loginfo->logflags; |
447 | 447 | ||
448 | ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, | 448 | ip6t_log_packet(PF_INET6, hooknum, skb, in, out, &li, loginfo->prefix); |
449 | loginfo->prefix); | ||
450 | return XT_CONTINUE; | 449 | return XT_CONTINUE; |
451 | } | 450 | } |
452 | 451 | ||
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index 3fd08d5567a6..1a7d2917545d 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c | |||
@@ -172,7 +172,7 @@ send_unreach(struct sk_buff *skb_in, unsigned char code, unsigned int hooknum) | |||
172 | icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0, NULL); | 172 | icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0, NULL); |
173 | } | 173 | } |
174 | 174 | ||
175 | static unsigned int reject6_target(struct sk_buff **pskb, | 175 | static unsigned int reject6_target(struct sk_buff *skb, |
176 | const struct net_device *in, | 176 | const struct net_device *in, |
177 | const struct net_device *out, | 177 | const struct net_device *out, |
178 | unsigned int hooknum, | 178 | unsigned int hooknum, |
@@ -187,25 +187,25 @@ static unsigned int reject6_target(struct sk_buff **pskb, | |||
187 | must return an absolute verdict. --RR */ | 187 | must return an absolute verdict. --RR */ |
188 | switch (reject->with) { | 188 | switch (reject->with) { |
189 | case IP6T_ICMP6_NO_ROUTE: | 189 | case IP6T_ICMP6_NO_ROUTE: |
190 | send_unreach(*pskb, ICMPV6_NOROUTE, hooknum); | 190 | send_unreach(skb, ICMPV6_NOROUTE, hooknum); |
191 | break; | 191 | break; |
192 | case IP6T_ICMP6_ADM_PROHIBITED: | 192 | case IP6T_ICMP6_ADM_PROHIBITED: |
193 | send_unreach(*pskb, ICMPV6_ADM_PROHIBITED, hooknum); | 193 | send_unreach(skb, ICMPV6_ADM_PROHIBITED, hooknum); |
194 | break; | 194 | break; |
195 | case IP6T_ICMP6_NOT_NEIGHBOUR: | 195 | case IP6T_ICMP6_NOT_NEIGHBOUR: |
196 | send_unreach(*pskb, ICMPV6_NOT_NEIGHBOUR, hooknum); | 196 | send_unreach(skb, ICMPV6_NOT_NEIGHBOUR, hooknum); |
197 | break; | 197 | break; |
198 | case IP6T_ICMP6_ADDR_UNREACH: | 198 | case IP6T_ICMP6_ADDR_UNREACH: |
199 | send_unreach(*pskb, ICMPV6_ADDR_UNREACH, hooknum); | 199 | send_unreach(skb, ICMPV6_ADDR_UNREACH, hooknum); |
200 | break; | 200 | break; |
201 | case IP6T_ICMP6_PORT_UNREACH: | 201 | case IP6T_ICMP6_PORT_UNREACH: |
202 | send_unreach(*pskb, ICMPV6_PORT_UNREACH, hooknum); | 202 | send_unreach(skb, ICMPV6_PORT_UNREACH, hooknum); |
203 | break; | 203 | break; |
204 | case IP6T_ICMP6_ECHOREPLY: | 204 | case IP6T_ICMP6_ECHOREPLY: |
205 | /* Do nothing */ | 205 | /* Do nothing */ |
206 | break; | 206 | break; |
207 | case IP6T_TCP_RESET: | 207 | case IP6T_TCP_RESET: |
208 | send_reset(*pskb); | 208 | send_reset(skb); |
209 | break; | 209 | break; |
210 | default: | 210 | default: |
211 | if (net_ratelimit()) | 211 | if (net_ratelimit()) |
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c index 7e32e2aaf7f7..1d26b202bf30 100644 --- a/net/ipv6/netfilter/ip6table_filter.c +++ b/net/ipv6/netfilter/ip6table_filter.c | |||
@@ -60,32 +60,32 @@ static struct xt_table packet_filter = { | |||
60 | /* The work comes in here from netfilter.c. */ | 60 | /* The work comes in here from netfilter.c. */ |
61 | static unsigned int | 61 | static unsigned int |
62 | ip6t_hook(unsigned int hook, | 62 | ip6t_hook(unsigned int hook, |
63 | struct sk_buff **pskb, | 63 | struct sk_buff *skb, |
64 | const struct net_device *in, | 64 | const struct net_device *in, |
65 | const struct net_device *out, | 65 | const struct net_device *out, |
66 | int (*okfn)(struct sk_buff *)) | 66 | int (*okfn)(struct sk_buff *)) |
67 | { | 67 | { |
68 | return ip6t_do_table(pskb, hook, in, out, &packet_filter); | 68 | return ip6t_do_table(skb, hook, in, out, &packet_filter); |
69 | } | 69 | } |
70 | 70 | ||
71 | static unsigned int | 71 | static unsigned int |
72 | ip6t_local_out_hook(unsigned int hook, | 72 | ip6t_local_out_hook(unsigned int hook, |
73 | struct sk_buff **pskb, | 73 | struct sk_buff *skb, |
74 | const struct net_device *in, | 74 | const struct net_device *in, |
75 | const struct net_device *out, | 75 | const struct net_device *out, |
76 | int (*okfn)(struct sk_buff *)) | 76 | int (*okfn)(struct sk_buff *)) |
77 | { | 77 | { |
78 | #if 0 | 78 | #if 0 |
79 | /* root is playing with raw sockets. */ | 79 | /* root is playing with raw sockets. */ |
80 | if ((*pskb)->len < sizeof(struct iphdr) | 80 | if (skb->len < sizeof(struct iphdr) |
81 | || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { | 81 | || ip_hdrlen(skb) < sizeof(struct iphdr)) { |
82 | if (net_ratelimit()) | 82 | if (net_ratelimit()) |
83 | printk("ip6t_hook: happy cracking.\n"); | 83 | printk("ip6t_hook: happy cracking.\n"); |
84 | return NF_ACCEPT; | 84 | return NF_ACCEPT; |
85 | } | 85 | } |
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | return ip6t_do_table(pskb, hook, in, out, &packet_filter); | 88 | return ip6t_do_table(skb, hook, in, out, &packet_filter); |
89 | } | 89 | } |
90 | 90 | ||
91 | static struct nf_hook_ops ip6t_ops[] = { | 91 | static struct nf_hook_ops ip6t_ops[] = { |
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index f0a9efa67fb5..a0b6381f1e8c 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c | |||
@@ -68,17 +68,17 @@ static struct xt_table packet_mangler = { | |||
68 | /* The work comes in here from netfilter.c. */ | 68 | /* The work comes in here from netfilter.c. */ |
69 | static unsigned int | 69 | static unsigned int |
70 | ip6t_route_hook(unsigned int hook, | 70 | ip6t_route_hook(unsigned int hook, |
71 | struct sk_buff **pskb, | 71 | struct sk_buff *skb, |
72 | const struct net_device *in, | 72 | const struct net_device *in, |
73 | const struct net_device *out, | 73 | const struct net_device *out, |
74 | int (*okfn)(struct sk_buff *)) | 74 | int (*okfn)(struct sk_buff *)) |
75 | { | 75 | { |
76 | return ip6t_do_table(pskb, hook, in, out, &packet_mangler); | 76 | return ip6t_do_table(skb, hook, in, out, &packet_mangler); |
77 | } | 77 | } |
78 | 78 | ||
79 | static unsigned int | 79 | static unsigned int |
80 | ip6t_local_hook(unsigned int hook, | 80 | ip6t_local_hook(unsigned int hook, |
81 | struct sk_buff **pskb, | 81 | struct sk_buff *skb, |
82 | const struct net_device *in, | 82 | const struct net_device *in, |
83 | const struct net_device *out, | 83 | const struct net_device *out, |
84 | int (*okfn)(struct sk_buff *)) | 84 | int (*okfn)(struct sk_buff *)) |
@@ -91,8 +91,8 @@ ip6t_local_hook(unsigned int hook, | |||
91 | 91 | ||
92 | #if 0 | 92 | #if 0 |
93 | /* root is playing with raw sockets. */ | 93 | /* root is playing with raw sockets. */ |
94 | if ((*pskb)->len < sizeof(struct iphdr) | 94 | if (skb->len < sizeof(struct iphdr) |
95 | || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { | 95 | || ip_hdrlen(skb) < sizeof(struct iphdr)) { |
96 | if (net_ratelimit()) | 96 | if (net_ratelimit()) |
97 | printk("ip6t_hook: happy cracking.\n"); | 97 | printk("ip6t_hook: happy cracking.\n"); |
98 | return NF_ACCEPT; | 98 | return NF_ACCEPT; |
@@ -100,22 +100,22 @@ ip6t_local_hook(unsigned int hook, | |||
100 | #endif | 100 | #endif |
101 | 101 | ||
102 | /* save source/dest address, mark, hoplimit, flowlabel, priority, */ | 102 | /* save source/dest address, mark, hoplimit, flowlabel, priority, */ |
103 | memcpy(&saddr, &ipv6_hdr(*pskb)->saddr, sizeof(saddr)); | 103 | memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr)); |
104 | memcpy(&daddr, &ipv6_hdr(*pskb)->daddr, sizeof(daddr)); | 104 | memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr)); |
105 | mark = (*pskb)->mark; | 105 | mark = skb->mark; |
106 | hop_limit = ipv6_hdr(*pskb)->hop_limit; | 106 | hop_limit = ipv6_hdr(skb)->hop_limit; |
107 | 107 | ||
108 | /* flowlabel and prio (includes version, which shouldn't change either */ | 108 | /* flowlabel and prio (includes version, which shouldn't change either */ |
109 | flowlabel = *((u_int32_t *)ipv6_hdr(*pskb)); | 109 | flowlabel = *((u_int32_t *)ipv6_hdr(skb)); |
110 | 110 | ||
111 | ret = ip6t_do_table(pskb, hook, in, out, &packet_mangler); | 111 | ret = ip6t_do_table(skb, hook, in, out, &packet_mangler); |
112 | 112 | ||
113 | if (ret != NF_DROP && ret != NF_STOLEN | 113 | if (ret != NF_DROP && ret != NF_STOLEN |
114 | && (memcmp(&ipv6_hdr(*pskb)->saddr, &saddr, sizeof(saddr)) | 114 | && (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) |
115 | || memcmp(&ipv6_hdr(*pskb)->daddr, &daddr, sizeof(daddr)) | 115 | || memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) |
116 | || (*pskb)->mark != mark | 116 | || skb->mark != mark |
117 | || ipv6_hdr(*pskb)->hop_limit != hop_limit)) | 117 | || ipv6_hdr(skb)->hop_limit != hop_limit)) |
118 | return ip6_route_me_harder(*pskb) == 0 ? ret : NF_DROP; | 118 | return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP; |
119 | 119 | ||
120 | return ret; | 120 | return ret; |
121 | } | 121 | } |
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c index ec290e4ebdd8..8f7109f991e6 100644 --- a/net/ipv6/netfilter/ip6table_raw.c +++ b/net/ipv6/netfilter/ip6table_raw.c | |||
@@ -46,12 +46,12 @@ static struct xt_table packet_raw = { | |||
46 | /* The work comes in here from netfilter.c. */ | 46 | /* The work comes in here from netfilter.c. */ |
47 | static unsigned int | 47 | static unsigned int |
48 | ip6t_hook(unsigned int hook, | 48 | ip6t_hook(unsigned int hook, |
49 | struct sk_buff **pskb, | 49 | struct sk_buff *skb, |
50 | const struct net_device *in, | 50 | const struct net_device *in, |
51 | const struct net_device *out, | 51 | const struct net_device *out, |
52 | int (*okfn)(struct sk_buff *)) | 52 | int (*okfn)(struct sk_buff *)) |
53 | { | 53 | { |
54 | return ip6t_do_table(pskb, hook, in, out, &packet_raw); | 54 | return ip6t_do_table(skb, hook, in, out, &packet_raw); |
55 | } | 55 | } |
56 | 56 | ||
57 | static struct nf_hook_ops ip6t_ops[] = { | 57 | static struct nf_hook_ops ip6t_ops[] = { |
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 37a3db926953..0e40948f4fc6 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/icmp.h> | 18 | #include <linux/icmp.h> |
19 | #include <linux/sysctl.h> | 19 | #include <linux/sysctl.h> |
20 | #include <net/ipv6.h> | 20 | #include <net/ipv6.h> |
21 | #include <net/inet_frag.h> | ||
21 | 22 | ||
22 | #include <linux/netfilter_ipv6.h> | 23 | #include <linux/netfilter_ipv6.h> |
23 | #include <net/netfilter/nf_conntrack.h> | 24 | #include <net/netfilter/nf_conntrack.h> |
@@ -145,7 +146,7 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, | |||
145 | } | 146 | } |
146 | 147 | ||
147 | static unsigned int ipv6_confirm(unsigned int hooknum, | 148 | static unsigned int ipv6_confirm(unsigned int hooknum, |
148 | struct sk_buff **pskb, | 149 | struct sk_buff *skb, |
149 | const struct net_device *in, | 150 | const struct net_device *in, |
150 | const struct net_device *out, | 151 | const struct net_device *out, |
151 | int (*okfn)(struct sk_buff *)) | 152 | int (*okfn)(struct sk_buff *)) |
@@ -155,12 +156,12 @@ static unsigned int ipv6_confirm(unsigned int hooknum, | |||
155 | struct nf_conntrack_helper *helper; | 156 | struct nf_conntrack_helper *helper; |
156 | enum ip_conntrack_info ctinfo; | 157 | enum ip_conntrack_info ctinfo; |
157 | unsigned int ret, protoff; | 158 | unsigned int ret, protoff; |
158 | unsigned int extoff = (u8 *)(ipv6_hdr(*pskb) + 1) - (*pskb)->data; | 159 | unsigned int extoff = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; |
159 | unsigned char pnum = ipv6_hdr(*pskb)->nexthdr; | 160 | unsigned char pnum = ipv6_hdr(skb)->nexthdr; |
160 | 161 | ||
161 | 162 | ||
162 | /* This is where we call the helper: as the packet goes out. */ | 163 | /* This is where we call the helper: as the packet goes out. */ |
163 | ct = nf_ct_get(*pskb, &ctinfo); | 164 | ct = nf_ct_get(skb, &ctinfo); |
164 | if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) | 165 | if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) |
165 | goto out; | 166 | goto out; |
166 | 167 | ||
@@ -172,23 +173,23 @@ static unsigned int ipv6_confirm(unsigned int hooknum, | |||
172 | if (!helper) | 173 | if (!helper) |
173 | goto out; | 174 | goto out; |
174 | 175 | ||
175 | protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum, | 176 | protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, |
176 | (*pskb)->len - extoff); | 177 | skb->len - extoff); |
177 | if (protoff > (*pskb)->len || pnum == NEXTHDR_FRAGMENT) { | 178 | if (protoff > skb->len || pnum == NEXTHDR_FRAGMENT) { |
178 | pr_debug("proto header not found\n"); | 179 | pr_debug("proto header not found\n"); |
179 | return NF_ACCEPT; | 180 | return NF_ACCEPT; |
180 | } | 181 | } |
181 | 182 | ||
182 | ret = helper->help(pskb, protoff, ct, ctinfo); | 183 | ret = helper->help(skb, protoff, ct, ctinfo); |
183 | if (ret != NF_ACCEPT) | 184 | if (ret != NF_ACCEPT) |
184 | return ret; | 185 | return ret; |
185 | out: | 186 | out: |
186 | /* We've seen it coming out the other side: confirm it */ | 187 | /* We've seen it coming out the other side: confirm it */ |
187 | return nf_conntrack_confirm(pskb); | 188 | return nf_conntrack_confirm(skb); |
188 | } | 189 | } |
189 | 190 | ||
190 | static unsigned int ipv6_defrag(unsigned int hooknum, | 191 | static unsigned int ipv6_defrag(unsigned int hooknum, |
191 | struct sk_buff **pskb, | 192 | struct sk_buff *skb, |
192 | const struct net_device *in, | 193 | const struct net_device *in, |
193 | const struct net_device *out, | 194 | const struct net_device *out, |
194 | int (*okfn)(struct sk_buff *)) | 195 | int (*okfn)(struct sk_buff *)) |
@@ -196,17 +197,17 @@ static unsigned int ipv6_defrag(unsigned int hooknum, | |||
196 | struct sk_buff *reasm; | 197 | struct sk_buff *reasm; |
197 | 198 | ||
198 | /* Previously seen (loopback)? */ | 199 | /* Previously seen (loopback)? */ |
199 | if ((*pskb)->nfct) | 200 | if (skb->nfct) |
200 | return NF_ACCEPT; | 201 | return NF_ACCEPT; |
201 | 202 | ||
202 | reasm = nf_ct_frag6_gather(*pskb); | 203 | reasm = nf_ct_frag6_gather(skb); |
203 | 204 | ||
204 | /* queued */ | 205 | /* queued */ |
205 | if (reasm == NULL) | 206 | if (reasm == NULL) |
206 | return NF_STOLEN; | 207 | return NF_STOLEN; |
207 | 208 | ||
208 | /* error occured or not fragmented */ | 209 | /* error occured or not fragmented */ |
209 | if (reasm == *pskb) | 210 | if (reasm == skb) |
210 | return NF_ACCEPT; | 211 | return NF_ACCEPT; |
211 | 212 | ||
212 | nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in, | 213 | nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in, |
@@ -216,12 +217,12 @@ static unsigned int ipv6_defrag(unsigned int hooknum, | |||
216 | } | 217 | } |
217 | 218 | ||
218 | static unsigned int ipv6_conntrack_in(unsigned int hooknum, | 219 | static unsigned int ipv6_conntrack_in(unsigned int hooknum, |
219 | struct sk_buff **pskb, | 220 | struct sk_buff *skb, |
220 | const struct net_device *in, | 221 | const struct net_device *in, |
221 | const struct net_device *out, | 222 | const struct net_device *out, |
222 | int (*okfn)(struct sk_buff *)) | 223 | int (*okfn)(struct sk_buff *)) |
223 | { | 224 | { |
224 | struct sk_buff *reasm = (*pskb)->nfct_reasm; | 225 | struct sk_buff *reasm = skb->nfct_reasm; |
225 | 226 | ||
226 | /* This packet is fragmented and has reassembled packet. */ | 227 | /* This packet is fragmented and has reassembled packet. */ |
227 | if (reasm) { | 228 | if (reasm) { |
@@ -229,32 +230,32 @@ static unsigned int ipv6_conntrack_in(unsigned int hooknum, | |||
229 | if (!reasm->nfct) { | 230 | if (!reasm->nfct) { |
230 | unsigned int ret; | 231 | unsigned int ret; |
231 | 232 | ||
232 | ret = nf_conntrack_in(PF_INET6, hooknum, &reasm); | 233 | ret = nf_conntrack_in(PF_INET6, hooknum, reasm); |
233 | if (ret != NF_ACCEPT) | 234 | if (ret != NF_ACCEPT) |
234 | return ret; | 235 | return ret; |
235 | } | 236 | } |
236 | nf_conntrack_get(reasm->nfct); | 237 | nf_conntrack_get(reasm->nfct); |
237 | (*pskb)->nfct = reasm->nfct; | 238 | skb->nfct = reasm->nfct; |
238 | (*pskb)->nfctinfo = reasm->nfctinfo; | 239 | skb->nfctinfo = reasm->nfctinfo; |
239 | return NF_ACCEPT; | 240 | return NF_ACCEPT; |
240 | } | 241 | } |
241 | 242 | ||
242 | return nf_conntrack_in(PF_INET6, hooknum, pskb); | 243 | return nf_conntrack_in(PF_INET6, hooknum, skb); |
243 | } | 244 | } |
244 | 245 | ||
245 | static unsigned int ipv6_conntrack_local(unsigned int hooknum, | 246 | static unsigned int ipv6_conntrack_local(unsigned int hooknum, |
246 | struct sk_buff **pskb, | 247 | struct sk_buff *skb, |
247 | const struct net_device *in, | 248 | const struct net_device *in, |
248 | const struct net_device *out, | 249 | const struct net_device *out, |
249 | int (*okfn)(struct sk_buff *)) | 250 | int (*okfn)(struct sk_buff *)) |
250 | { | 251 | { |
251 | /* root is playing with raw sockets. */ | 252 | /* root is playing with raw sockets. */ |
252 | if ((*pskb)->len < sizeof(struct ipv6hdr)) { | 253 | if (skb->len < sizeof(struct ipv6hdr)) { |
253 | if (net_ratelimit()) | 254 | if (net_ratelimit()) |
254 | printk("ipv6_conntrack_local: packet too short\n"); | 255 | printk("ipv6_conntrack_local: packet too short\n"); |
255 | return NF_ACCEPT; | 256 | return NF_ACCEPT; |
256 | } | 257 | } |
257 | return ipv6_conntrack_in(hooknum, pskb, in, out, okfn); | 258 | return ipv6_conntrack_in(hooknum, skb, in, out, okfn); |
258 | } | 259 | } |
259 | 260 | ||
260 | static struct nf_hook_ops ipv6_conntrack_ops[] = { | 261 | static struct nf_hook_ops ipv6_conntrack_ops[] = { |
@@ -307,7 +308,7 @@ static ctl_table nf_ct_ipv6_sysctl_table[] = { | |||
307 | { | 308 | { |
308 | .ctl_name = NET_NF_CONNTRACK_FRAG6_TIMEOUT, | 309 | .ctl_name = NET_NF_CONNTRACK_FRAG6_TIMEOUT, |
309 | .procname = "nf_conntrack_frag6_timeout", | 310 | .procname = "nf_conntrack_frag6_timeout", |
310 | .data = &nf_ct_frag6_timeout, | 311 | .data = &nf_frags_ctl.timeout, |
311 | .maxlen = sizeof(unsigned int), | 312 | .maxlen = sizeof(unsigned int), |
312 | .mode = 0644, | 313 | .mode = 0644, |
313 | .proc_handler = &proc_dointvec_jiffies, | 314 | .proc_handler = &proc_dointvec_jiffies, |
@@ -315,7 +316,7 @@ static ctl_table nf_ct_ipv6_sysctl_table[] = { | |||
315 | { | 316 | { |
316 | .ctl_name = NET_NF_CONNTRACK_FRAG6_LOW_THRESH, | 317 | .ctl_name = NET_NF_CONNTRACK_FRAG6_LOW_THRESH, |
317 | .procname = "nf_conntrack_frag6_low_thresh", | 318 | .procname = "nf_conntrack_frag6_low_thresh", |
318 | .data = &nf_ct_frag6_low_thresh, | 319 | .data = &nf_frags_ctl.low_thresh, |
319 | .maxlen = sizeof(unsigned int), | 320 | .maxlen = sizeof(unsigned int), |
320 | .mode = 0644, | 321 | .mode = 0644, |
321 | .proc_handler = &proc_dointvec, | 322 | .proc_handler = &proc_dointvec, |
@@ -323,7 +324,7 @@ static ctl_table nf_ct_ipv6_sysctl_table[] = { | |||
323 | { | 324 | { |
324 | .ctl_name = NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, | 325 | .ctl_name = NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, |
325 | .procname = "nf_conntrack_frag6_high_thresh", | 326 | .procname = "nf_conntrack_frag6_high_thresh", |
326 | .data = &nf_ct_frag6_high_thresh, | 327 | .data = &nf_frags_ctl.high_thresh, |
327 | .maxlen = sizeof(unsigned int), | 328 | .maxlen = sizeof(unsigned int), |
328 | .mode = 0644, | 329 | .mode = 0644, |
329 | .proc_handler = &proc_dointvec, | 330 | .proc_handler = &proc_dointvec, |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 25442a8c1ba8..726fafd41961 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include <net/sock.h> | 32 | #include <net/sock.h> |
33 | #include <net/snmp.h> | 33 | #include <net/snmp.h> |
34 | #include <net/inet_frag.h> | ||
34 | 35 | ||
35 | #include <net/ipv6.h> | 36 | #include <net/ipv6.h> |
36 | #include <net/protocol.h> | 37 | #include <net/protocol.h> |
@@ -48,10 +49,6 @@ | |||
48 | #define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */ | 49 | #define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */ |
49 | #define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT | 50 | #define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT |
50 | 51 | ||
51 | unsigned int nf_ct_frag6_high_thresh __read_mostly = 256*1024; | ||
52 | unsigned int nf_ct_frag6_low_thresh __read_mostly = 192*1024; | ||
53 | unsigned long nf_ct_frag6_timeout __read_mostly = IPV6_FRAG_TIMEOUT; | ||
54 | |||
55 | struct nf_ct_frag6_skb_cb | 52 | struct nf_ct_frag6_skb_cb |
56 | { | 53 | { |
57 | struct inet6_skb_parm h; | 54 | struct inet6_skb_parm h; |
@@ -63,51 +60,24 @@ struct nf_ct_frag6_skb_cb | |||
63 | 60 | ||
64 | struct nf_ct_frag6_queue | 61 | struct nf_ct_frag6_queue |
65 | { | 62 | { |
66 | struct hlist_node list; | 63 | struct inet_frag_queue q; |
67 | struct list_head lru_list; /* lru list member */ | ||
68 | 64 | ||
69 | __be32 id; /* fragment id */ | 65 | __be32 id; /* fragment id */ |
70 | struct in6_addr saddr; | 66 | struct in6_addr saddr; |
71 | struct in6_addr daddr; | 67 | struct in6_addr daddr; |
72 | 68 | ||
73 | spinlock_t lock; | ||
74 | atomic_t refcnt; | ||
75 | struct timer_list timer; /* expire timer */ | ||
76 | struct sk_buff *fragments; | ||
77 | int len; | ||
78 | int meat; | ||
79 | ktime_t stamp; | ||
80 | unsigned int csum; | 69 | unsigned int csum; |
81 | __u8 last_in; /* has first/last segment arrived? */ | ||
82 | #define COMPLETE 4 | ||
83 | #define FIRST_IN 2 | ||
84 | #define LAST_IN 1 | ||
85 | __u16 nhoffset; | 70 | __u16 nhoffset; |
86 | }; | 71 | }; |
87 | 72 | ||
88 | /* Hash table. */ | 73 | struct inet_frags_ctl nf_frags_ctl __read_mostly = { |
89 | 74 | .high_thresh = 256 * 1024, | |
90 | #define FRAG6Q_HASHSZ 64 | 75 | .low_thresh = 192 * 1024, |
91 | 76 | .timeout = IPV6_FRAG_TIMEOUT, | |
92 | static struct hlist_head nf_ct_frag6_hash[FRAG6Q_HASHSZ]; | 77 | .secret_interval = 10 * 60 * HZ, |
93 | static DEFINE_RWLOCK(nf_ct_frag6_lock); | 78 | }; |
94 | static u32 nf_ct_frag6_hash_rnd; | ||
95 | static LIST_HEAD(nf_ct_frag6_lru_list); | ||
96 | int nf_ct_frag6_nqueues = 0; | ||
97 | |||
98 | static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq) | ||
99 | { | ||
100 | hlist_del(&fq->list); | ||
101 | list_del(&fq->lru_list); | ||
102 | nf_ct_frag6_nqueues--; | ||
103 | } | ||
104 | 79 | ||
105 | static __inline__ void fq_unlink(struct nf_ct_frag6_queue *fq) | 80 | static struct inet_frags nf_frags; |
106 | { | ||
107 | write_lock(&nf_ct_frag6_lock); | ||
108 | __fq_unlink(fq); | ||
109 | write_unlock(&nf_ct_frag6_lock); | ||
110 | } | ||
111 | 81 | ||
112 | static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, | 82 | static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, |
113 | struct in6_addr *daddr) | 83 | struct in6_addr *daddr) |
@@ -120,7 +90,7 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, | |||
120 | 90 | ||
121 | a += JHASH_GOLDEN_RATIO; | 91 | a += JHASH_GOLDEN_RATIO; |
122 | b += JHASH_GOLDEN_RATIO; | 92 | b += JHASH_GOLDEN_RATIO; |
123 | c += nf_ct_frag6_hash_rnd; | 93 | c += nf_frags.rnd; |
124 | __jhash_mix(a, b, c); | 94 | __jhash_mix(a, b, c); |
125 | 95 | ||
126 | a += (__force u32)saddr->s6_addr32[3]; | 96 | a += (__force u32)saddr->s6_addr32[3]; |
@@ -133,100 +103,54 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, | |||
133 | c += (__force u32)id; | 103 | c += (__force u32)id; |
134 | __jhash_mix(a, b, c); | 104 | __jhash_mix(a, b, c); |
135 | 105 | ||
136 | return c & (FRAG6Q_HASHSZ - 1); | 106 | return c & (INETFRAGS_HASHSZ - 1); |
137 | } | 107 | } |
138 | 108 | ||
139 | static struct timer_list nf_ct_frag6_secret_timer; | 109 | static unsigned int nf_hashfn(struct inet_frag_queue *q) |
140 | int nf_ct_frag6_secret_interval = 10 * 60 * HZ; | ||
141 | |||
142 | static void nf_ct_frag6_secret_rebuild(unsigned long dummy) | ||
143 | { | 110 | { |
144 | unsigned long now = jiffies; | 111 | struct nf_ct_frag6_queue *nq; |
145 | int i; | ||
146 | |||
147 | write_lock(&nf_ct_frag6_lock); | ||
148 | get_random_bytes(&nf_ct_frag6_hash_rnd, sizeof(u32)); | ||
149 | for (i = 0; i < FRAG6Q_HASHSZ; i++) { | ||
150 | struct nf_ct_frag6_queue *q; | ||
151 | struct hlist_node *p, *n; | ||
152 | |||
153 | hlist_for_each_entry_safe(q, p, n, &nf_ct_frag6_hash[i], list) { | ||
154 | unsigned int hval = ip6qhashfn(q->id, | ||
155 | &q->saddr, | ||
156 | &q->daddr); | ||
157 | if (hval != i) { | ||
158 | hlist_del(&q->list); | ||
159 | /* Relink to new hash chain. */ | ||
160 | hlist_add_head(&q->list, | ||
161 | &nf_ct_frag6_hash[hval]); | ||
162 | } | ||
163 | } | ||
164 | } | ||
165 | write_unlock(&nf_ct_frag6_lock); | ||
166 | 112 | ||
167 | mod_timer(&nf_ct_frag6_secret_timer, now + nf_ct_frag6_secret_interval); | 113 | nq = container_of(q, struct nf_ct_frag6_queue, q); |
114 | return ip6qhashfn(nq->id, &nq->saddr, &nq->daddr); | ||
168 | } | 115 | } |
169 | 116 | ||
170 | atomic_t nf_ct_frag6_mem = ATOMIC_INIT(0); | 117 | static void nf_skb_free(struct sk_buff *skb) |
118 | { | ||
119 | if (NFCT_FRAG6_CB(skb)->orig) | ||
120 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); | ||
121 | } | ||
171 | 122 | ||
172 | /* Memory Tracking Functions. */ | 123 | /* Memory Tracking Functions. */ |
173 | static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work) | 124 | static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work) |
174 | { | 125 | { |
175 | if (work) | 126 | if (work) |
176 | *work -= skb->truesize; | 127 | *work -= skb->truesize; |
177 | atomic_sub(skb->truesize, &nf_ct_frag6_mem); | 128 | atomic_sub(skb->truesize, &nf_frags.mem); |
178 | if (NFCT_FRAG6_CB(skb)->orig) | 129 | nf_skb_free(skb); |
179 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); | ||
180 | |||
181 | kfree_skb(skb); | 130 | kfree_skb(skb); |
182 | } | 131 | } |
183 | 132 | ||
184 | static inline void frag_free_queue(struct nf_ct_frag6_queue *fq, | 133 | static void nf_frag_free(struct inet_frag_queue *q) |
185 | unsigned int *work) | ||
186 | { | 134 | { |
187 | if (work) | 135 | kfree(container_of(q, struct nf_ct_frag6_queue, q)); |
188 | *work -= sizeof(struct nf_ct_frag6_queue); | ||
189 | atomic_sub(sizeof(struct nf_ct_frag6_queue), &nf_ct_frag6_mem); | ||
190 | kfree(fq); | ||
191 | } | 136 | } |
192 | 137 | ||
193 | static inline struct nf_ct_frag6_queue *frag_alloc_queue(void) | 138 | static inline struct nf_ct_frag6_queue *frag_alloc_queue(void) |
194 | { | 139 | { |
195 | struct nf_ct_frag6_queue *fq = kmalloc(sizeof(struct nf_ct_frag6_queue), GFP_ATOMIC); | 140 | struct nf_ct_frag6_queue *fq; |
196 | 141 | ||
197 | if (!fq) | 142 | fq = kzalloc(sizeof(struct nf_ct_frag6_queue), GFP_ATOMIC); |
143 | if (fq == NULL) | ||
198 | return NULL; | 144 | return NULL; |
199 | atomic_add(sizeof(struct nf_ct_frag6_queue), &nf_ct_frag6_mem); | 145 | atomic_add(sizeof(struct nf_ct_frag6_queue), &nf_frags.mem); |
200 | return fq; | 146 | return fq; |
201 | } | 147 | } |
202 | 148 | ||
203 | /* Destruction primitives. */ | 149 | /* Destruction primitives. */ |
204 | 150 | ||
205 | /* Complete destruction of fq. */ | 151 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) |
206 | static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq, | ||
207 | unsigned int *work) | ||
208 | { | 152 | { |
209 | struct sk_buff *fp; | 153 | inet_frag_put(&fq->q, &nf_frags); |
210 | |||
211 | BUG_TRAP(fq->last_in&COMPLETE); | ||
212 | BUG_TRAP(del_timer(&fq->timer) == 0); | ||
213 | |||
214 | /* Release all fragment data. */ | ||
215 | fp = fq->fragments; | ||
216 | while (fp) { | ||
217 | struct sk_buff *xp = fp->next; | ||
218 | |||
219 | frag_kfree_skb(fp, work); | ||
220 | fp = xp; | ||
221 | } | ||
222 | |||
223 | frag_free_queue(fq, work); | ||
224 | } | ||
225 | |||
226 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work) | ||
227 | { | ||
228 | if (atomic_dec_and_test(&fq->refcnt)) | ||
229 | nf_ct_frag6_destroy(fq, work); | ||
230 | } | 154 | } |
231 | 155 | ||
232 | /* Kill fq entry. It is not destroyed immediately, | 156 | /* Kill fq entry. It is not destroyed immediately, |
@@ -234,62 +158,28 @@ static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work) | |||
234 | */ | 158 | */ |
235 | static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq) | 159 | static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq) |
236 | { | 160 | { |
237 | if (del_timer(&fq->timer)) | 161 | inet_frag_kill(&fq->q, &nf_frags); |
238 | atomic_dec(&fq->refcnt); | ||
239 | |||
240 | if (!(fq->last_in & COMPLETE)) { | ||
241 | fq_unlink(fq); | ||
242 | atomic_dec(&fq->refcnt); | ||
243 | fq->last_in |= COMPLETE; | ||
244 | } | ||
245 | } | 162 | } |
246 | 163 | ||
247 | static void nf_ct_frag6_evictor(void) | 164 | static void nf_ct_frag6_evictor(void) |
248 | { | 165 | { |
249 | struct nf_ct_frag6_queue *fq; | 166 | inet_frag_evictor(&nf_frags); |
250 | struct list_head *tmp; | ||
251 | unsigned int work; | ||
252 | |||
253 | work = atomic_read(&nf_ct_frag6_mem); | ||
254 | if (work <= nf_ct_frag6_low_thresh) | ||
255 | return; | ||
256 | |||
257 | work -= nf_ct_frag6_low_thresh; | ||
258 | while (work > 0) { | ||
259 | read_lock(&nf_ct_frag6_lock); | ||
260 | if (list_empty(&nf_ct_frag6_lru_list)) { | ||
261 | read_unlock(&nf_ct_frag6_lock); | ||
262 | return; | ||
263 | } | ||
264 | tmp = nf_ct_frag6_lru_list.next; | ||
265 | BUG_ON(tmp == NULL); | ||
266 | fq = list_entry(tmp, struct nf_ct_frag6_queue, lru_list); | ||
267 | atomic_inc(&fq->refcnt); | ||
268 | read_unlock(&nf_ct_frag6_lock); | ||
269 | |||
270 | spin_lock(&fq->lock); | ||
271 | if (!(fq->last_in&COMPLETE)) | ||
272 | fq_kill(fq); | ||
273 | spin_unlock(&fq->lock); | ||
274 | |||
275 | fq_put(fq, &work); | ||
276 | } | ||
277 | } | 167 | } |
278 | 168 | ||
279 | static void nf_ct_frag6_expire(unsigned long data) | 169 | static void nf_ct_frag6_expire(unsigned long data) |
280 | { | 170 | { |
281 | struct nf_ct_frag6_queue *fq = (struct nf_ct_frag6_queue *) data; | 171 | struct nf_ct_frag6_queue *fq = (struct nf_ct_frag6_queue *) data; |
282 | 172 | ||
283 | spin_lock(&fq->lock); | 173 | spin_lock(&fq->q.lock); |
284 | 174 | ||
285 | if (fq->last_in & COMPLETE) | 175 | if (fq->q.last_in & COMPLETE) |
286 | goto out; | 176 | goto out; |
287 | 177 | ||
288 | fq_kill(fq); | 178 | fq_kill(fq); |
289 | 179 | ||
290 | out: | 180 | out: |
291 | spin_unlock(&fq->lock); | 181 | spin_unlock(&fq->q.lock); |
292 | fq_put(fq, NULL); | 182 | fq_put(fq); |
293 | } | 183 | } |
294 | 184 | ||
295 | /* Creation primitives. */ | 185 | /* Creation primitives. */ |
@@ -302,31 +192,31 @@ static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash, | |||
302 | struct hlist_node *n; | 192 | struct hlist_node *n; |
303 | #endif | 193 | #endif |
304 | 194 | ||
305 | write_lock(&nf_ct_frag6_lock); | 195 | write_lock(&nf_frags.lock); |
306 | #ifdef CONFIG_SMP | 196 | #ifdef CONFIG_SMP |
307 | hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) { | 197 | hlist_for_each_entry(fq, n, &nf_frags.hash[hash], q.list) { |
308 | if (fq->id == fq_in->id && | 198 | if (fq->id == fq_in->id && |
309 | ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && | 199 | ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && |
310 | ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { | 200 | ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { |
311 | atomic_inc(&fq->refcnt); | 201 | atomic_inc(&fq->q.refcnt); |
312 | write_unlock(&nf_ct_frag6_lock); | 202 | write_unlock(&nf_frags.lock); |
313 | fq_in->last_in |= COMPLETE; | 203 | fq_in->q.last_in |= COMPLETE; |
314 | fq_put(fq_in, NULL); | 204 | fq_put(fq_in); |
315 | return fq; | 205 | return fq; |
316 | } | 206 | } |
317 | } | 207 | } |
318 | #endif | 208 | #endif |
319 | fq = fq_in; | 209 | fq = fq_in; |
320 | 210 | ||
321 | if (!mod_timer(&fq->timer, jiffies + nf_ct_frag6_timeout)) | 211 | if (!mod_timer(&fq->q.timer, jiffies + nf_frags_ctl.timeout)) |
322 | atomic_inc(&fq->refcnt); | 212 | atomic_inc(&fq->q.refcnt); |
323 | 213 | ||
324 | atomic_inc(&fq->refcnt); | 214 | atomic_inc(&fq->q.refcnt); |
325 | hlist_add_head(&fq->list, &nf_ct_frag6_hash[hash]); | 215 | hlist_add_head(&fq->q.list, &nf_frags.hash[hash]); |
326 | INIT_LIST_HEAD(&fq->lru_list); | 216 | INIT_LIST_HEAD(&fq->q.lru_list); |
327 | list_add_tail(&fq->lru_list, &nf_ct_frag6_lru_list); | 217 | list_add_tail(&fq->q.lru_list, &nf_frags.lru_list); |
328 | nf_ct_frag6_nqueues++; | 218 | nf_frags.nqueues++; |
329 | write_unlock(&nf_ct_frag6_lock); | 219 | write_unlock(&nf_frags.lock); |
330 | return fq; | 220 | return fq; |
331 | } | 221 | } |
332 | 222 | ||
@@ -341,15 +231,13 @@ nf_ct_frag6_create(unsigned int hash, __be32 id, struct in6_addr *src, str | |||
341 | goto oom; | 231 | goto oom; |
342 | } | 232 | } |
343 | 233 | ||
344 | memset(fq, 0, sizeof(struct nf_ct_frag6_queue)); | ||
345 | |||
346 | fq->id = id; | 234 | fq->id = id; |
347 | ipv6_addr_copy(&fq->saddr, src); | 235 | ipv6_addr_copy(&fq->saddr, src); |
348 | ipv6_addr_copy(&fq->daddr, dst); | 236 | ipv6_addr_copy(&fq->daddr, dst); |
349 | 237 | ||
350 | setup_timer(&fq->timer, nf_ct_frag6_expire, (unsigned long)fq); | 238 | setup_timer(&fq->q.timer, nf_ct_frag6_expire, (unsigned long)fq); |
351 | spin_lock_init(&fq->lock); | 239 | spin_lock_init(&fq->q.lock); |
352 | atomic_set(&fq->refcnt, 1); | 240 | atomic_set(&fq->q.refcnt, 1); |
353 | 241 | ||
354 | return nf_ct_frag6_intern(hash, fq); | 242 | return nf_ct_frag6_intern(hash, fq); |
355 | 243 | ||
@@ -364,17 +252,17 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) | |||
364 | struct hlist_node *n; | 252 | struct hlist_node *n; |
365 | unsigned int hash = ip6qhashfn(id, src, dst); | 253 | unsigned int hash = ip6qhashfn(id, src, dst); |
366 | 254 | ||
367 | read_lock(&nf_ct_frag6_lock); | 255 | read_lock(&nf_frags.lock); |
368 | hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) { | 256 | hlist_for_each_entry(fq, n, &nf_frags.hash[hash], q.list) { |
369 | if (fq->id == id && | 257 | if (fq->id == id && |
370 | ipv6_addr_equal(src, &fq->saddr) && | 258 | ipv6_addr_equal(src, &fq->saddr) && |
371 | ipv6_addr_equal(dst, &fq->daddr)) { | 259 | ipv6_addr_equal(dst, &fq->daddr)) { |
372 | atomic_inc(&fq->refcnt); | 260 | atomic_inc(&fq->q.refcnt); |
373 | read_unlock(&nf_ct_frag6_lock); | 261 | read_unlock(&nf_frags.lock); |
374 | return fq; | 262 | return fq; |
375 | } | 263 | } |
376 | } | 264 | } |
377 | read_unlock(&nf_ct_frag6_lock); | 265 | read_unlock(&nf_frags.lock); |
378 | 266 | ||
379 | return nf_ct_frag6_create(hash, id, src, dst); | 267 | return nf_ct_frag6_create(hash, id, src, dst); |
380 | } | 268 | } |
@@ -386,7 +274,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
386 | struct sk_buff *prev, *next; | 274 | struct sk_buff *prev, *next; |
387 | int offset, end; | 275 | int offset, end; |
388 | 276 | ||
389 | if (fq->last_in & COMPLETE) { | 277 | if (fq->q.last_in & COMPLETE) { |
390 | pr_debug("Allready completed\n"); | 278 | pr_debug("Allready completed\n"); |
391 | goto err; | 279 | goto err; |
392 | } | 280 | } |
@@ -412,13 +300,13 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
412 | /* If we already have some bits beyond end | 300 | /* If we already have some bits beyond end |
413 | * or have different end, the segment is corrupted. | 301 | * or have different end, the segment is corrupted. |
414 | */ | 302 | */ |
415 | if (end < fq->len || | 303 | if (end < fq->q.len || |
416 | ((fq->last_in & LAST_IN) && end != fq->len)) { | 304 | ((fq->q.last_in & LAST_IN) && end != fq->q.len)) { |
417 | pr_debug("already received last fragment\n"); | 305 | pr_debug("already received last fragment\n"); |
418 | goto err; | 306 | goto err; |
419 | } | 307 | } |
420 | fq->last_in |= LAST_IN; | 308 | fq->q.last_in |= LAST_IN; |
421 | fq->len = end; | 309 | fq->q.len = end; |
422 | } else { | 310 | } else { |
423 | /* Check if the fragment is rounded to 8 bytes. | 311 | /* Check if the fragment is rounded to 8 bytes. |
424 | * Required by the RFC. | 312 | * Required by the RFC. |
@@ -430,13 +318,13 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
430 | pr_debug("end of fragment not rounded to 8 bytes.\n"); | 318 | pr_debug("end of fragment not rounded to 8 bytes.\n"); |
431 | return -1; | 319 | return -1; |
432 | } | 320 | } |
433 | if (end > fq->len) { | 321 | if (end > fq->q.len) { |
434 | /* Some bits beyond end -> corruption. */ | 322 | /* Some bits beyond end -> corruption. */ |
435 | if (fq->last_in & LAST_IN) { | 323 | if (fq->q.last_in & LAST_IN) { |
436 | pr_debug("last packet already reached.\n"); | 324 | pr_debug("last packet already reached.\n"); |
437 | goto err; | 325 | goto err; |
438 | } | 326 | } |
439 | fq->len = end; | 327 | fq->q.len = end; |
440 | } | 328 | } |
441 | } | 329 | } |
442 | 330 | ||
@@ -458,7 +346,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
458 | * this fragment, right? | 346 | * this fragment, right? |
459 | */ | 347 | */ |
460 | prev = NULL; | 348 | prev = NULL; |
461 | for (next = fq->fragments; next != NULL; next = next->next) { | 349 | for (next = fq->q.fragments; next != NULL; next = next->next) { |
462 | if (NFCT_FRAG6_CB(next)->offset >= offset) | 350 | if (NFCT_FRAG6_CB(next)->offset >= offset) |
463 | break; /* bingo! */ | 351 | break; /* bingo! */ |
464 | prev = next; | 352 | prev = next; |
@@ -503,7 +391,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
503 | 391 | ||
504 | /* next fragment */ | 392 | /* next fragment */ |
505 | NFCT_FRAG6_CB(next)->offset += i; | 393 | NFCT_FRAG6_CB(next)->offset += i; |
506 | fq->meat -= i; | 394 | fq->q.meat -= i; |
507 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | 395 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
508 | next->ip_summed = CHECKSUM_NONE; | 396 | next->ip_summed = CHECKSUM_NONE; |
509 | break; | 397 | break; |
@@ -518,9 +406,9 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
518 | if (prev) | 406 | if (prev) |
519 | prev->next = next; | 407 | prev->next = next; |
520 | else | 408 | else |
521 | fq->fragments = next; | 409 | fq->q.fragments = next; |
522 | 410 | ||
523 | fq->meat -= free_it->len; | 411 | fq->q.meat -= free_it->len; |
524 | frag_kfree_skb(free_it, NULL); | 412 | frag_kfree_skb(free_it, NULL); |
525 | } | 413 | } |
526 | } | 414 | } |
@@ -532,23 +420,23 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
532 | if (prev) | 420 | if (prev) |
533 | prev->next = skb; | 421 | prev->next = skb; |
534 | else | 422 | else |
535 | fq->fragments = skb; | 423 | fq->q.fragments = skb; |
536 | 424 | ||
537 | skb->dev = NULL; | 425 | skb->dev = NULL; |
538 | fq->stamp = skb->tstamp; | 426 | fq->q.stamp = skb->tstamp; |
539 | fq->meat += skb->len; | 427 | fq->q.meat += skb->len; |
540 | atomic_add(skb->truesize, &nf_ct_frag6_mem); | 428 | atomic_add(skb->truesize, &nf_frags.mem); |
541 | 429 | ||
542 | /* The first fragment. | 430 | /* The first fragment. |
543 | * nhoffset is obtained from the first fragment, of course. | 431 | * nhoffset is obtained from the first fragment, of course. |
544 | */ | 432 | */ |
545 | if (offset == 0) { | 433 | if (offset == 0) { |
546 | fq->nhoffset = nhoff; | 434 | fq->nhoffset = nhoff; |
547 | fq->last_in |= FIRST_IN; | 435 | fq->q.last_in |= FIRST_IN; |
548 | } | 436 | } |
549 | write_lock(&nf_ct_frag6_lock); | 437 | write_lock(&nf_frags.lock); |
550 | list_move_tail(&fq->lru_list, &nf_ct_frag6_lru_list); | 438 | list_move_tail(&fq->q.lru_list, &nf_frags.lru_list); |
551 | write_unlock(&nf_ct_frag6_lock); | 439 | write_unlock(&nf_frags.lock); |
552 | return 0; | 440 | return 0; |
553 | 441 | ||
554 | err: | 442 | err: |
@@ -567,7 +455,7 @@ err: | |||
567 | static struct sk_buff * | 455 | static struct sk_buff * |
568 | nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | 456 | nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) |
569 | { | 457 | { |
570 | struct sk_buff *fp, *op, *head = fq->fragments; | 458 | struct sk_buff *fp, *op, *head = fq->q.fragments; |
571 | int payload_len; | 459 | int payload_len; |
572 | 460 | ||
573 | fq_kill(fq); | 461 | fq_kill(fq); |
@@ -577,7 +465,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
577 | 465 | ||
578 | /* Unfragmented part is taken from the first segment. */ | 466 | /* Unfragmented part is taken from the first segment. */ |
579 | payload_len = ((head->data - skb_network_header(head)) - | 467 | payload_len = ((head->data - skb_network_header(head)) - |
580 | sizeof(struct ipv6hdr) + fq->len - | 468 | sizeof(struct ipv6hdr) + fq->q.len - |
581 | sizeof(struct frag_hdr)); | 469 | sizeof(struct frag_hdr)); |
582 | if (payload_len > IPV6_MAXPLEN) { | 470 | if (payload_len > IPV6_MAXPLEN) { |
583 | pr_debug("payload len is too large.\n"); | 471 | pr_debug("payload len is too large.\n"); |
@@ -614,7 +502,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
614 | clone->ip_summed = head->ip_summed; | 502 | clone->ip_summed = head->ip_summed; |
615 | 503 | ||
616 | NFCT_FRAG6_CB(clone)->orig = NULL; | 504 | NFCT_FRAG6_CB(clone)->orig = NULL; |
617 | atomic_add(clone->truesize, &nf_ct_frag6_mem); | 505 | atomic_add(clone->truesize, &nf_frags.mem); |
618 | } | 506 | } |
619 | 507 | ||
620 | /* We have to remove fragment header from datagram and to relocate | 508 | /* We have to remove fragment header from datagram and to relocate |
@@ -628,7 +516,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
628 | skb_shinfo(head)->frag_list = head->next; | 516 | skb_shinfo(head)->frag_list = head->next; |
629 | skb_reset_transport_header(head); | 517 | skb_reset_transport_header(head); |
630 | skb_push(head, head->data - skb_network_header(head)); | 518 | skb_push(head, head->data - skb_network_header(head)); |
631 | atomic_sub(head->truesize, &nf_ct_frag6_mem); | 519 | atomic_sub(head->truesize, &nf_frags.mem); |
632 | 520 | ||
633 | for (fp=head->next; fp; fp = fp->next) { | 521 | for (fp=head->next; fp; fp = fp->next) { |
634 | head->data_len += fp->len; | 522 | head->data_len += fp->len; |
@@ -638,12 +526,12 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
638 | else if (head->ip_summed == CHECKSUM_COMPLETE) | 526 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
639 | head->csum = csum_add(head->csum, fp->csum); | 527 | head->csum = csum_add(head->csum, fp->csum); |
640 | head->truesize += fp->truesize; | 528 | head->truesize += fp->truesize; |
641 | atomic_sub(fp->truesize, &nf_ct_frag6_mem); | 529 | atomic_sub(fp->truesize, &nf_frags.mem); |
642 | } | 530 | } |
643 | 531 | ||
644 | head->next = NULL; | 532 | head->next = NULL; |
645 | head->dev = dev; | 533 | head->dev = dev; |
646 | head->tstamp = fq->stamp; | 534 | head->tstamp = fq->q.stamp; |
647 | ipv6_hdr(head)->payload_len = htons(payload_len); | 535 | ipv6_hdr(head)->payload_len = htons(payload_len); |
648 | 536 | ||
649 | /* Yes, and fold redundant checksum back. 8) */ | 537 | /* Yes, and fold redundant checksum back. 8) */ |
@@ -652,7 +540,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
652 | skb_network_header_len(head), | 540 | skb_network_header_len(head), |
653 | head->csum); | 541 | head->csum); |
654 | 542 | ||
655 | fq->fragments = NULL; | 543 | fq->q.fragments = NULL; |
656 | 544 | ||
657 | /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ | 545 | /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ |
658 | fp = skb_shinfo(head)->frag_list; | 546 | fp = skb_shinfo(head)->frag_list; |
@@ -788,7 +676,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) | |||
788 | goto ret_orig; | 676 | goto ret_orig; |
789 | } | 677 | } |
790 | 678 | ||
791 | if (atomic_read(&nf_ct_frag6_mem) > nf_ct_frag6_high_thresh) | 679 | if (atomic_read(&nf_frags.mem) > nf_frags_ctl.high_thresh) |
792 | nf_ct_frag6_evictor(); | 680 | nf_ct_frag6_evictor(); |
793 | 681 | ||
794 | fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); | 682 | fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); |
@@ -797,23 +685,23 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) | |||
797 | goto ret_orig; | 685 | goto ret_orig; |
798 | } | 686 | } |
799 | 687 | ||
800 | spin_lock(&fq->lock); | 688 | spin_lock(&fq->q.lock); |
801 | 689 | ||
802 | if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { | 690 | if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { |
803 | spin_unlock(&fq->lock); | 691 | spin_unlock(&fq->q.lock); |
804 | pr_debug("Can't insert skb to queue\n"); | 692 | pr_debug("Can't insert skb to queue\n"); |
805 | fq_put(fq, NULL); | 693 | fq_put(fq); |
806 | goto ret_orig; | 694 | goto ret_orig; |
807 | } | 695 | } |
808 | 696 | ||
809 | if (fq->last_in == (FIRST_IN|LAST_IN) && fq->meat == fq->len) { | 697 | if (fq->q.last_in == (FIRST_IN|LAST_IN) && fq->q.meat == fq->q.len) { |
810 | ret_skb = nf_ct_frag6_reasm(fq, dev); | 698 | ret_skb = nf_ct_frag6_reasm(fq, dev); |
811 | if (ret_skb == NULL) | 699 | if (ret_skb == NULL) |
812 | pr_debug("Can't reassemble fragmented packets\n"); | 700 | pr_debug("Can't reassemble fragmented packets\n"); |
813 | } | 701 | } |
814 | spin_unlock(&fq->lock); | 702 | spin_unlock(&fq->q.lock); |
815 | 703 | ||
816 | fq_put(fq, NULL); | 704 | fq_put(fq); |
817 | return ret_skb; | 705 | return ret_skb; |
818 | 706 | ||
819 | ret_orig: | 707 | ret_orig: |
@@ -859,20 +747,20 @@ int nf_ct_frag6_kfree_frags(struct sk_buff *skb) | |||
859 | 747 | ||
860 | int nf_ct_frag6_init(void) | 748 | int nf_ct_frag6_init(void) |
861 | { | 749 | { |
862 | nf_ct_frag6_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ | 750 | nf_frags.ctl = &nf_frags_ctl; |
863 | (jiffies ^ (jiffies >> 6))); | 751 | nf_frags.hashfn = nf_hashfn; |
864 | 752 | nf_frags.destructor = nf_frag_free; | |
865 | setup_timer(&nf_ct_frag6_secret_timer, nf_ct_frag6_secret_rebuild, 0); | 753 | nf_frags.skb_free = nf_skb_free; |
866 | nf_ct_frag6_secret_timer.expires = jiffies | 754 | nf_frags.qsize = sizeof(struct nf_ct_frag6_queue); |
867 | + nf_ct_frag6_secret_interval; | 755 | inet_frags_init(&nf_frags); |
868 | add_timer(&nf_ct_frag6_secret_timer); | ||
869 | 756 | ||
870 | return 0; | 757 | return 0; |
871 | } | 758 | } |
872 | 759 | ||
873 | void nf_ct_frag6_cleanup(void) | 760 | void nf_ct_frag6_cleanup(void) |
874 | { | 761 | { |
875 | del_timer(&nf_ct_frag6_secret_timer); | 762 | inet_frags_fini(&nf_frags); |
876 | nf_ct_frag6_low_thresh = 0; | 763 | |
764 | nf_frags_ctl.low_thresh = 0; | ||
877 | nf_ct_frag6_evictor(); | 765 | nf_ct_frag6_evictor(); |
878 | } | 766 | } |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index db945018579e..be526ad92543 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -54,7 +54,7 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v) | |||
54 | seq_printf(seq, "RAW6: inuse %d\n", | 54 | seq_printf(seq, "RAW6: inuse %d\n", |
55 | fold_prot_inuse(&rawv6_prot)); | 55 | fold_prot_inuse(&rawv6_prot)); |
56 | seq_printf(seq, "FRAG6: inuse %d memory %d\n", | 56 | seq_printf(seq, "FRAG6: inuse %d memory %d\n", |
57 | ip6_frag_nqueues, atomic_read(&ip6_frag_mem)); | 57 | ip6_frag_nqueues(), ip6_frag_mem()); |
58 | return 0; | 58 | return 0; |
59 | } | 59 | } |
60 | 60 | ||
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 31601c993541..6ad19cfc2025 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/icmpv6.h> | 42 | #include <linux/icmpv6.h> |
43 | #include <linux/random.h> | 43 | #include <linux/random.h> |
44 | #include <linux/jhash.h> | 44 | #include <linux/jhash.h> |
45 | #include <linux/skbuff.h> | ||
45 | 46 | ||
46 | #include <net/sock.h> | 47 | #include <net/sock.h> |
47 | #include <net/snmp.h> | 48 | #include <net/snmp.h> |
@@ -53,11 +54,7 @@ | |||
53 | #include <net/rawv6.h> | 54 | #include <net/rawv6.h> |
54 | #include <net/ndisc.h> | 55 | #include <net/ndisc.h> |
55 | #include <net/addrconf.h> | 56 | #include <net/addrconf.h> |
56 | 57 | #include <net/inet_frag.h> | |
57 | int sysctl_ip6frag_high_thresh __read_mostly = 256*1024; | ||
58 | int sysctl_ip6frag_low_thresh __read_mostly = 192*1024; | ||
59 | |||
60 | int sysctl_ip6frag_time __read_mostly = IPV6_FRAG_TIMEOUT; | ||
61 | 58 | ||
62 | struct ip6frag_skb_cb | 59 | struct ip6frag_skb_cb |
63 | { | 60 | { |
@@ -74,53 +71,39 @@ struct ip6frag_skb_cb | |||
74 | 71 | ||
75 | struct frag_queue | 72 | struct frag_queue |
76 | { | 73 | { |
77 | struct hlist_node list; | 74 | struct inet_frag_queue q; |
78 | struct list_head lru_list; /* lru list member */ | ||
79 | 75 | ||
80 | __be32 id; /* fragment id */ | 76 | __be32 id; /* fragment id */ |
81 | struct in6_addr saddr; | 77 | struct in6_addr saddr; |
82 | struct in6_addr daddr; | 78 | struct in6_addr daddr; |
83 | 79 | ||
84 | spinlock_t lock; | ||
85 | atomic_t refcnt; | ||
86 | struct timer_list timer; /* expire timer */ | ||
87 | struct sk_buff *fragments; | ||
88 | int len; | ||
89 | int meat; | ||
90 | int iif; | 80 | int iif; |
91 | ktime_t stamp; | ||
92 | unsigned int csum; | 81 | unsigned int csum; |
93 | __u8 last_in; /* has first/last segment arrived? */ | ||
94 | #define COMPLETE 4 | ||
95 | #define FIRST_IN 2 | ||
96 | #define LAST_IN 1 | ||
97 | __u16 nhoffset; | 82 | __u16 nhoffset; |
98 | }; | 83 | }; |
99 | 84 | ||
100 | /* Hash table. */ | 85 | struct inet_frags_ctl ip6_frags_ctl __read_mostly = { |
101 | 86 | .high_thresh = 256 * 1024, | |
102 | #define IP6Q_HASHSZ 64 | 87 | .low_thresh = 192 * 1024, |
88 | .timeout = IPV6_FRAG_TIMEOUT, | ||
89 | .secret_interval = 10 * 60 * HZ, | ||
90 | }; | ||
103 | 91 | ||
104 | static struct hlist_head ip6_frag_hash[IP6Q_HASHSZ]; | 92 | static struct inet_frags ip6_frags; |
105 | static DEFINE_RWLOCK(ip6_frag_lock); | ||
106 | static u32 ip6_frag_hash_rnd; | ||
107 | static LIST_HEAD(ip6_frag_lru_list); | ||
108 | int ip6_frag_nqueues = 0; | ||
109 | 93 | ||
110 | static __inline__ void __fq_unlink(struct frag_queue *fq) | 94 | int ip6_frag_nqueues(void) |
111 | { | 95 | { |
112 | hlist_del(&fq->list); | 96 | return ip6_frags.nqueues; |
113 | list_del(&fq->lru_list); | ||
114 | ip6_frag_nqueues--; | ||
115 | } | 97 | } |
116 | 98 | ||
117 | static __inline__ void fq_unlink(struct frag_queue *fq) | 99 | int ip6_frag_mem(void) |
118 | { | 100 | { |
119 | write_lock(&ip6_frag_lock); | 101 | return atomic_read(&ip6_frags.mem); |
120 | __fq_unlink(fq); | ||
121 | write_unlock(&ip6_frag_lock); | ||
122 | } | 102 | } |
123 | 103 | ||
104 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | ||
105 | struct net_device *dev); | ||
106 | |||
124 | /* | 107 | /* |
125 | * callers should be careful not to use the hash value outside the ipfrag_lock | 108 | * callers should be careful not to use the hash value outside the ipfrag_lock |
126 | * as doing so could race with ipfrag_hash_rnd being recalculated. | 109 | * as doing so could race with ipfrag_hash_rnd being recalculated. |
@@ -136,7 +119,7 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, | |||
136 | 119 | ||
137 | a += JHASH_GOLDEN_RATIO; | 120 | a += JHASH_GOLDEN_RATIO; |
138 | b += JHASH_GOLDEN_RATIO; | 121 | b += JHASH_GOLDEN_RATIO; |
139 | c += ip6_frag_hash_rnd; | 122 | c += ip6_frags.rnd; |
140 | __jhash_mix(a, b, c); | 123 | __jhash_mix(a, b, c); |
141 | 124 | ||
142 | a += (__force u32)saddr->s6_addr32[3]; | 125 | a += (__force u32)saddr->s6_addr32[3]; |
@@ -149,60 +132,29 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, | |||
149 | c += (__force u32)id; | 132 | c += (__force u32)id; |
150 | __jhash_mix(a, b, c); | 133 | __jhash_mix(a, b, c); |
151 | 134 | ||
152 | return c & (IP6Q_HASHSZ - 1); | 135 | return c & (INETFRAGS_HASHSZ - 1); |
153 | } | 136 | } |
154 | 137 | ||
155 | static struct timer_list ip6_frag_secret_timer; | 138 | static unsigned int ip6_hashfn(struct inet_frag_queue *q) |
156 | int sysctl_ip6frag_secret_interval __read_mostly = 10 * 60 * HZ; | ||
157 | |||
158 | static void ip6_frag_secret_rebuild(unsigned long dummy) | ||
159 | { | 139 | { |
160 | unsigned long now = jiffies; | 140 | struct frag_queue *fq; |
161 | int i; | ||
162 | |||
163 | write_lock(&ip6_frag_lock); | ||
164 | get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32)); | ||
165 | for (i = 0; i < IP6Q_HASHSZ; i++) { | ||
166 | struct frag_queue *q; | ||
167 | struct hlist_node *p, *n; | ||
168 | |||
169 | hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], list) { | ||
170 | unsigned int hval = ip6qhashfn(q->id, | ||
171 | &q->saddr, | ||
172 | &q->daddr); | ||
173 | |||
174 | if (hval != i) { | ||
175 | hlist_del(&q->list); | ||
176 | |||
177 | /* Relink to new hash chain. */ | ||
178 | hlist_add_head(&q->list, | ||
179 | &ip6_frag_hash[hval]); | ||
180 | |||
181 | } | ||
182 | } | ||
183 | } | ||
184 | write_unlock(&ip6_frag_lock); | ||
185 | 141 | ||
186 | mod_timer(&ip6_frag_secret_timer, now + sysctl_ip6frag_secret_interval); | 142 | fq = container_of(q, struct frag_queue, q); |
143 | return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr); | ||
187 | } | 144 | } |
188 | 145 | ||
189 | atomic_t ip6_frag_mem = ATOMIC_INIT(0); | ||
190 | |||
191 | /* Memory Tracking Functions. */ | 146 | /* Memory Tracking Functions. */ |
192 | static inline void frag_kfree_skb(struct sk_buff *skb, int *work) | 147 | static inline void frag_kfree_skb(struct sk_buff *skb, int *work) |
193 | { | 148 | { |
194 | if (work) | 149 | if (work) |
195 | *work -= skb->truesize; | 150 | *work -= skb->truesize; |
196 | atomic_sub(skb->truesize, &ip6_frag_mem); | 151 | atomic_sub(skb->truesize, &ip6_frags.mem); |
197 | kfree_skb(skb); | 152 | kfree_skb(skb); |
198 | } | 153 | } |
199 | 154 | ||
200 | static inline void frag_free_queue(struct frag_queue *fq, int *work) | 155 | static void ip6_frag_free(struct inet_frag_queue *fq) |
201 | { | 156 | { |
202 | if (work) | 157 | kfree(container_of(fq, struct frag_queue, q)); |
203 | *work -= sizeof(struct frag_queue); | ||
204 | atomic_sub(sizeof(struct frag_queue), &ip6_frag_mem); | ||
205 | kfree(fq); | ||
206 | } | 158 | } |
207 | 159 | ||
208 | static inline struct frag_queue *frag_alloc_queue(void) | 160 | static inline struct frag_queue *frag_alloc_queue(void) |
@@ -211,36 +163,15 @@ static inline struct frag_queue *frag_alloc_queue(void) | |||
211 | 163 | ||
212 | if(!fq) | 164 | if(!fq) |
213 | return NULL; | 165 | return NULL; |
214 | atomic_add(sizeof(struct frag_queue), &ip6_frag_mem); | 166 | atomic_add(sizeof(struct frag_queue), &ip6_frags.mem); |
215 | return fq; | 167 | return fq; |
216 | } | 168 | } |
217 | 169 | ||
218 | /* Destruction primitives. */ | 170 | /* Destruction primitives. */ |
219 | 171 | ||
220 | /* Complete destruction of fq. */ | 172 | static __inline__ void fq_put(struct frag_queue *fq) |
221 | static void ip6_frag_destroy(struct frag_queue *fq, int *work) | ||
222 | { | ||
223 | struct sk_buff *fp; | ||
224 | |||
225 | BUG_TRAP(fq->last_in&COMPLETE); | ||
226 | BUG_TRAP(del_timer(&fq->timer) == 0); | ||
227 | |||
228 | /* Release all fragment data. */ | ||
229 | fp = fq->fragments; | ||
230 | while (fp) { | ||
231 | struct sk_buff *xp = fp->next; | ||
232 | |||
233 | frag_kfree_skb(fp, work); | ||
234 | fp = xp; | ||
235 | } | ||
236 | |||
237 | frag_free_queue(fq, work); | ||
238 | } | ||
239 | |||
240 | static __inline__ void fq_put(struct frag_queue *fq, int *work) | ||
241 | { | 173 | { |
242 | if (atomic_dec_and_test(&fq->refcnt)) | 174 | inet_frag_put(&fq->q, &ip6_frags); |
243 | ip6_frag_destroy(fq, work); | ||
244 | } | 175 | } |
245 | 176 | ||
246 | /* Kill fq entry. It is not destroyed immediately, | 177 | /* Kill fq entry. It is not destroyed immediately, |
@@ -248,45 +179,16 @@ static __inline__ void fq_put(struct frag_queue *fq, int *work) | |||
248 | */ | 179 | */ |
249 | static __inline__ void fq_kill(struct frag_queue *fq) | 180 | static __inline__ void fq_kill(struct frag_queue *fq) |
250 | { | 181 | { |
251 | if (del_timer(&fq->timer)) | 182 | inet_frag_kill(&fq->q, &ip6_frags); |
252 | atomic_dec(&fq->refcnt); | ||
253 | |||
254 | if (!(fq->last_in & COMPLETE)) { | ||
255 | fq_unlink(fq); | ||
256 | atomic_dec(&fq->refcnt); | ||
257 | fq->last_in |= COMPLETE; | ||
258 | } | ||
259 | } | 183 | } |
260 | 184 | ||
261 | static void ip6_evictor(struct inet6_dev *idev) | 185 | static void ip6_evictor(struct inet6_dev *idev) |
262 | { | 186 | { |
263 | struct frag_queue *fq; | 187 | int evicted; |
264 | struct list_head *tmp; | 188 | |
265 | int work; | 189 | evicted = inet_frag_evictor(&ip6_frags); |
266 | 190 | if (evicted) | |
267 | work = atomic_read(&ip6_frag_mem) - sysctl_ip6frag_low_thresh; | 191 | IP6_ADD_STATS_BH(idev, IPSTATS_MIB_REASMFAILS, evicted); |
268 | if (work <= 0) | ||
269 | return; | ||
270 | |||
271 | while(work > 0) { | ||
272 | read_lock(&ip6_frag_lock); | ||
273 | if (list_empty(&ip6_frag_lru_list)) { | ||
274 | read_unlock(&ip6_frag_lock); | ||
275 | return; | ||
276 | } | ||
277 | tmp = ip6_frag_lru_list.next; | ||
278 | fq = list_entry(tmp, struct frag_queue, lru_list); | ||
279 | atomic_inc(&fq->refcnt); | ||
280 | read_unlock(&ip6_frag_lock); | ||
281 | |||
282 | spin_lock(&fq->lock); | ||
283 | if (!(fq->last_in&COMPLETE)) | ||
284 | fq_kill(fq); | ||
285 | spin_unlock(&fq->lock); | ||
286 | |||
287 | fq_put(fq, &work); | ||
288 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS); | ||
289 | } | ||
290 | } | 192 | } |
291 | 193 | ||
292 | static void ip6_frag_expire(unsigned long data) | 194 | static void ip6_frag_expire(unsigned long data) |
@@ -294,9 +196,9 @@ static void ip6_frag_expire(unsigned long data) | |||
294 | struct frag_queue *fq = (struct frag_queue *) data; | 196 | struct frag_queue *fq = (struct frag_queue *) data; |
295 | struct net_device *dev = NULL; | 197 | struct net_device *dev = NULL; |
296 | 198 | ||
297 | spin_lock(&fq->lock); | 199 | spin_lock(&fq->q.lock); |
298 | 200 | ||
299 | if (fq->last_in & COMPLETE) | 201 | if (fq->q.last_in & COMPLETE) |
300 | goto out; | 202 | goto out; |
301 | 203 | ||
302 | fq_kill(fq); | 204 | fq_kill(fq); |
@@ -311,7 +213,7 @@ static void ip6_frag_expire(unsigned long data) | |||
311 | rcu_read_unlock(); | 213 | rcu_read_unlock(); |
312 | 214 | ||
313 | /* Don't send error if the first segment did not arrive. */ | 215 | /* Don't send error if the first segment did not arrive. */ |
314 | if (!(fq->last_in&FIRST_IN) || !fq->fragments) | 216 | if (!(fq->q.last_in&FIRST_IN) || !fq->q.fragments) |
315 | goto out; | 217 | goto out; |
316 | 218 | ||
317 | /* | 219 | /* |
@@ -319,13 +221,13 @@ static void ip6_frag_expire(unsigned long data) | |||
319 | segment was received. And do not use fq->dev | 221 | segment was received. And do not use fq->dev |
320 | pointer directly, device might already disappeared. | 222 | pointer directly, device might already disappeared. |
321 | */ | 223 | */ |
322 | fq->fragments->dev = dev; | 224 | fq->q.fragments->dev = dev; |
323 | icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); | 225 | icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); |
324 | out: | 226 | out: |
325 | if (dev) | 227 | if (dev) |
326 | dev_put(dev); | 228 | dev_put(dev); |
327 | spin_unlock(&fq->lock); | 229 | spin_unlock(&fq->q.lock); |
328 | fq_put(fq, NULL); | 230 | fq_put(fq); |
329 | } | 231 | } |
330 | 232 | ||
331 | /* Creation primitives. */ | 233 | /* Creation primitives. */ |
@@ -339,32 +241,32 @@ static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in) | |||
339 | struct hlist_node *n; | 241 | struct hlist_node *n; |
340 | #endif | 242 | #endif |
341 | 243 | ||
342 | write_lock(&ip6_frag_lock); | 244 | write_lock(&ip6_frags.lock); |
343 | hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr); | 245 | hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr); |
344 | #ifdef CONFIG_SMP | 246 | #ifdef CONFIG_SMP |
345 | hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) { | 247 | hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) { |
346 | if (fq->id == fq_in->id && | 248 | if (fq->id == fq_in->id && |
347 | ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && | 249 | ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && |
348 | ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { | 250 | ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { |
349 | atomic_inc(&fq->refcnt); | 251 | atomic_inc(&fq->q.refcnt); |
350 | write_unlock(&ip6_frag_lock); | 252 | write_unlock(&ip6_frags.lock); |
351 | fq_in->last_in |= COMPLETE; | 253 | fq_in->q.last_in |= COMPLETE; |
352 | fq_put(fq_in, NULL); | 254 | fq_put(fq_in); |
353 | return fq; | 255 | return fq; |
354 | } | 256 | } |
355 | } | 257 | } |
356 | #endif | 258 | #endif |
357 | fq = fq_in; | 259 | fq = fq_in; |
358 | 260 | ||
359 | if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time)) | 261 | if (!mod_timer(&fq->q.timer, jiffies + ip6_frags_ctl.timeout)) |
360 | atomic_inc(&fq->refcnt); | 262 | atomic_inc(&fq->q.refcnt); |
361 | 263 | ||
362 | atomic_inc(&fq->refcnt); | 264 | atomic_inc(&fq->q.refcnt); |
363 | hlist_add_head(&fq->list, &ip6_frag_hash[hash]); | 265 | hlist_add_head(&fq->q.list, &ip6_frags.hash[hash]); |
364 | INIT_LIST_HEAD(&fq->lru_list); | 266 | INIT_LIST_HEAD(&fq->q.lru_list); |
365 | list_add_tail(&fq->lru_list, &ip6_frag_lru_list); | 267 | list_add_tail(&fq->q.lru_list, &ip6_frags.lru_list); |
366 | ip6_frag_nqueues++; | 268 | ip6_frags.nqueues++; |
367 | write_unlock(&ip6_frag_lock); | 269 | write_unlock(&ip6_frags.lock); |
368 | return fq; | 270 | return fq; |
369 | } | 271 | } |
370 | 272 | ||
@@ -382,11 +284,11 @@ ip6_frag_create(__be32 id, struct in6_addr *src, struct in6_addr *dst, | |||
382 | ipv6_addr_copy(&fq->saddr, src); | 284 | ipv6_addr_copy(&fq->saddr, src); |
383 | ipv6_addr_copy(&fq->daddr, dst); | 285 | ipv6_addr_copy(&fq->daddr, dst); |
384 | 286 | ||
385 | init_timer(&fq->timer); | 287 | init_timer(&fq->q.timer); |
386 | fq->timer.function = ip6_frag_expire; | 288 | fq->q.timer.function = ip6_frag_expire; |
387 | fq->timer.data = (long) fq; | 289 | fq->q.timer.data = (long) fq; |
388 | spin_lock_init(&fq->lock); | 290 | spin_lock_init(&fq->q.lock); |
389 | atomic_set(&fq->refcnt, 1); | 291 | atomic_set(&fq->q.refcnt, 1); |
390 | 292 | ||
391 | return ip6_frag_intern(fq); | 293 | return ip6_frag_intern(fq); |
392 | 294 | ||
@@ -403,30 +305,31 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst, | |||
403 | struct hlist_node *n; | 305 | struct hlist_node *n; |
404 | unsigned int hash; | 306 | unsigned int hash; |
405 | 307 | ||
406 | read_lock(&ip6_frag_lock); | 308 | read_lock(&ip6_frags.lock); |
407 | hash = ip6qhashfn(id, src, dst); | 309 | hash = ip6qhashfn(id, src, dst); |
408 | hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) { | 310 | hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) { |
409 | if (fq->id == id && | 311 | if (fq->id == id && |
410 | ipv6_addr_equal(src, &fq->saddr) && | 312 | ipv6_addr_equal(src, &fq->saddr) && |
411 | ipv6_addr_equal(dst, &fq->daddr)) { | 313 | ipv6_addr_equal(dst, &fq->daddr)) { |
412 | atomic_inc(&fq->refcnt); | 314 | atomic_inc(&fq->q.refcnt); |
413 | read_unlock(&ip6_frag_lock); | 315 | read_unlock(&ip6_frags.lock); |
414 | return fq; | 316 | return fq; |
415 | } | 317 | } |
416 | } | 318 | } |
417 | read_unlock(&ip6_frag_lock); | 319 | read_unlock(&ip6_frags.lock); |
418 | 320 | ||
419 | return ip6_frag_create(id, src, dst, idev); | 321 | return ip6_frag_create(id, src, dst, idev); |
420 | } | 322 | } |
421 | 323 | ||
422 | 324 | ||
423 | static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | 325 | static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, |
424 | struct frag_hdr *fhdr, int nhoff) | 326 | struct frag_hdr *fhdr, int nhoff) |
425 | { | 327 | { |
426 | struct sk_buff *prev, *next; | 328 | struct sk_buff *prev, *next; |
329 | struct net_device *dev; | ||
427 | int offset, end; | 330 | int offset, end; |
428 | 331 | ||
429 | if (fq->last_in & COMPLETE) | 332 | if (fq->q.last_in & COMPLETE) |
430 | goto err; | 333 | goto err; |
431 | 334 | ||
432 | offset = ntohs(fhdr->frag_off) & ~0x7; | 335 | offset = ntohs(fhdr->frag_off) & ~0x7; |
@@ -439,7 +342,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
439 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, | 342 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
440 | ((u8 *)&fhdr->frag_off - | 343 | ((u8 *)&fhdr->frag_off - |
441 | skb_network_header(skb))); | 344 | skb_network_header(skb))); |
442 | return; | 345 | return -1; |
443 | } | 346 | } |
444 | 347 | ||
445 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 348 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
@@ -454,11 +357,11 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
454 | /* If we already have some bits beyond end | 357 | /* If we already have some bits beyond end |
455 | * or have different end, the segment is corrupted. | 358 | * or have different end, the segment is corrupted. |
456 | */ | 359 | */ |
457 | if (end < fq->len || | 360 | if (end < fq->q.len || |
458 | ((fq->last_in & LAST_IN) && end != fq->len)) | 361 | ((fq->q.last_in & LAST_IN) && end != fq->q.len)) |
459 | goto err; | 362 | goto err; |
460 | fq->last_in |= LAST_IN; | 363 | fq->q.last_in |= LAST_IN; |
461 | fq->len = end; | 364 | fq->q.len = end; |
462 | } else { | 365 | } else { |
463 | /* Check if the fragment is rounded to 8 bytes. | 366 | /* Check if the fragment is rounded to 8 bytes. |
464 | * Required by the RFC. | 367 | * Required by the RFC. |
@@ -471,13 +374,13 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
471 | IPSTATS_MIB_INHDRERRORS); | 374 | IPSTATS_MIB_INHDRERRORS); |
472 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, | 375 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
473 | offsetof(struct ipv6hdr, payload_len)); | 376 | offsetof(struct ipv6hdr, payload_len)); |
474 | return; | 377 | return -1; |
475 | } | 378 | } |
476 | if (end > fq->len) { | 379 | if (end > fq->q.len) { |
477 | /* Some bits beyond end -> corruption. */ | 380 | /* Some bits beyond end -> corruption. */ |
478 | if (fq->last_in & LAST_IN) | 381 | if (fq->q.last_in & LAST_IN) |
479 | goto err; | 382 | goto err; |
480 | fq->len = end; | 383 | fq->q.len = end; |
481 | } | 384 | } |
482 | } | 385 | } |
483 | 386 | ||
@@ -496,7 +399,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
496 | * this fragment, right? | 399 | * this fragment, right? |
497 | */ | 400 | */ |
498 | prev = NULL; | 401 | prev = NULL; |
499 | for(next = fq->fragments; next != NULL; next = next->next) { | 402 | for(next = fq->q.fragments; next != NULL; next = next->next) { |
500 | if (FRAG6_CB(next)->offset >= offset) | 403 | if (FRAG6_CB(next)->offset >= offset) |
501 | break; /* bingo! */ | 404 | break; /* bingo! */ |
502 | prev = next; | 405 | prev = next; |
@@ -533,7 +436,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
533 | if (!pskb_pull(next, i)) | 436 | if (!pskb_pull(next, i)) |
534 | goto err; | 437 | goto err; |
535 | FRAG6_CB(next)->offset += i; /* next fragment */ | 438 | FRAG6_CB(next)->offset += i; /* next fragment */ |
536 | fq->meat -= i; | 439 | fq->q.meat -= i; |
537 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | 440 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
538 | next->ip_summed = CHECKSUM_NONE; | 441 | next->ip_summed = CHECKSUM_NONE; |
539 | break; | 442 | break; |
@@ -548,9 +451,9 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
548 | if (prev) | 451 | if (prev) |
549 | prev->next = next; | 452 | prev->next = next; |
550 | else | 453 | else |
551 | fq->fragments = next; | 454 | fq->q.fragments = next; |
552 | 455 | ||
553 | fq->meat -= free_it->len; | 456 | fq->q.meat -= free_it->len; |
554 | frag_kfree_skb(free_it, NULL); | 457 | frag_kfree_skb(free_it, NULL); |
555 | } | 458 | } |
556 | } | 459 | } |
@@ -562,30 +465,37 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
562 | if (prev) | 465 | if (prev) |
563 | prev->next = skb; | 466 | prev->next = skb; |
564 | else | 467 | else |
565 | fq->fragments = skb; | 468 | fq->q.fragments = skb; |
566 | 469 | ||
567 | if (skb->dev) | 470 | dev = skb->dev; |
568 | fq->iif = skb->dev->ifindex; | 471 | if (dev) { |
569 | skb->dev = NULL; | 472 | fq->iif = dev->ifindex; |
570 | fq->stamp = skb->tstamp; | 473 | skb->dev = NULL; |
571 | fq->meat += skb->len; | 474 | } |
572 | atomic_add(skb->truesize, &ip6_frag_mem); | 475 | fq->q.stamp = skb->tstamp; |
476 | fq->q.meat += skb->len; | ||
477 | atomic_add(skb->truesize, &ip6_frags.mem); | ||
573 | 478 | ||
574 | /* The first fragment. | 479 | /* The first fragment. |
575 | * nhoffset is obtained from the first fragment, of course. | 480 | * nhoffset is obtained from the first fragment, of course. |
576 | */ | 481 | */ |
577 | if (offset == 0) { | 482 | if (offset == 0) { |
578 | fq->nhoffset = nhoff; | 483 | fq->nhoffset = nhoff; |
579 | fq->last_in |= FIRST_IN; | 484 | fq->q.last_in |= FIRST_IN; |
580 | } | 485 | } |
581 | write_lock(&ip6_frag_lock); | 486 | |
582 | list_move_tail(&fq->lru_list, &ip6_frag_lru_list); | 487 | if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len) |
583 | write_unlock(&ip6_frag_lock); | 488 | return ip6_frag_reasm(fq, prev, dev); |
584 | return; | 489 | |
490 | write_lock(&ip6_frags.lock); | ||
491 | list_move_tail(&fq->q.lru_list, &ip6_frags.lru_list); | ||
492 | write_unlock(&ip6_frags.lock); | ||
493 | return -1; | ||
585 | 494 | ||
586 | err: | 495 | err: |
587 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); | 496 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); |
588 | kfree_skb(skb); | 497 | kfree_skb(skb); |
498 | return -1; | ||
589 | } | 499 | } |
590 | 500 | ||
591 | /* | 501 | /* |
@@ -597,21 +507,39 @@ err: | |||
597 | * queue is eligible for reassembly i.e. it is not COMPLETE, | 507 | * queue is eligible for reassembly i.e. it is not COMPLETE, |
598 | * the last and the first frames arrived and all the bits are here. | 508 | * the last and the first frames arrived and all the bits are here. |
599 | */ | 509 | */ |
600 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, | 510 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, |
601 | struct net_device *dev) | 511 | struct net_device *dev) |
602 | { | 512 | { |
603 | struct sk_buff *fp, *head = fq->fragments; | 513 | struct sk_buff *fp, *head = fq->q.fragments; |
604 | int payload_len; | 514 | int payload_len; |
605 | unsigned int nhoff; | 515 | unsigned int nhoff; |
606 | 516 | ||
607 | fq_kill(fq); | 517 | fq_kill(fq); |
608 | 518 | ||
519 | /* Make the one we just received the head. */ | ||
520 | if (prev) { | ||
521 | head = prev->next; | ||
522 | fp = skb_clone(head, GFP_ATOMIC); | ||
523 | |||
524 | if (!fp) | ||
525 | goto out_oom; | ||
526 | |||
527 | fp->next = head->next; | ||
528 | prev->next = fp; | ||
529 | |||
530 | skb_morph(head, fq->q.fragments); | ||
531 | head->next = fq->q.fragments->next; | ||
532 | |||
533 | kfree_skb(fq->q.fragments); | ||
534 | fq->q.fragments = head; | ||
535 | } | ||
536 | |||
609 | BUG_TRAP(head != NULL); | 537 | BUG_TRAP(head != NULL); |
610 | BUG_TRAP(FRAG6_CB(head)->offset == 0); | 538 | BUG_TRAP(FRAG6_CB(head)->offset == 0); |
611 | 539 | ||
612 | /* Unfragmented part is taken from the first segment. */ | 540 | /* Unfragmented part is taken from the first segment. */ |
613 | payload_len = ((head->data - skb_network_header(head)) - | 541 | payload_len = ((head->data - skb_network_header(head)) - |
614 | sizeof(struct ipv6hdr) + fq->len - | 542 | sizeof(struct ipv6hdr) + fq->q.len - |
615 | sizeof(struct frag_hdr)); | 543 | sizeof(struct frag_hdr)); |
616 | if (payload_len > IPV6_MAXPLEN) | 544 | if (payload_len > IPV6_MAXPLEN) |
617 | goto out_oversize; | 545 | goto out_oversize; |
@@ -640,7 +568,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, | |||
640 | head->len -= clone->len; | 568 | head->len -= clone->len; |
641 | clone->csum = 0; | 569 | clone->csum = 0; |
642 | clone->ip_summed = head->ip_summed; | 570 | clone->ip_summed = head->ip_summed; |
643 | atomic_add(clone->truesize, &ip6_frag_mem); | 571 | atomic_add(clone->truesize, &ip6_frags.mem); |
644 | } | 572 | } |
645 | 573 | ||
646 | /* We have to remove fragment header from datagram and to relocate | 574 | /* We have to remove fragment header from datagram and to relocate |
@@ -655,7 +583,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, | |||
655 | skb_shinfo(head)->frag_list = head->next; | 583 | skb_shinfo(head)->frag_list = head->next; |
656 | skb_reset_transport_header(head); | 584 | skb_reset_transport_header(head); |
657 | skb_push(head, head->data - skb_network_header(head)); | 585 | skb_push(head, head->data - skb_network_header(head)); |
658 | atomic_sub(head->truesize, &ip6_frag_mem); | 586 | atomic_sub(head->truesize, &ip6_frags.mem); |
659 | 587 | ||
660 | for (fp=head->next; fp; fp = fp->next) { | 588 | for (fp=head->next; fp; fp = fp->next) { |
661 | head->data_len += fp->len; | 589 | head->data_len += fp->len; |
@@ -665,17 +593,15 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, | |||
665 | else if (head->ip_summed == CHECKSUM_COMPLETE) | 593 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
666 | head->csum = csum_add(head->csum, fp->csum); | 594 | head->csum = csum_add(head->csum, fp->csum); |
667 | head->truesize += fp->truesize; | 595 | head->truesize += fp->truesize; |
668 | atomic_sub(fp->truesize, &ip6_frag_mem); | 596 | atomic_sub(fp->truesize, &ip6_frags.mem); |
669 | } | 597 | } |
670 | 598 | ||
671 | head->next = NULL; | 599 | head->next = NULL; |
672 | head->dev = dev; | 600 | head->dev = dev; |
673 | head->tstamp = fq->stamp; | 601 | head->tstamp = fq->q.stamp; |
674 | ipv6_hdr(head)->payload_len = htons(payload_len); | 602 | ipv6_hdr(head)->payload_len = htons(payload_len); |
675 | IP6CB(head)->nhoff = nhoff; | 603 | IP6CB(head)->nhoff = nhoff; |
676 | 604 | ||
677 | *skb_in = head; | ||
678 | |||
679 | /* Yes, and fold redundant checksum back. 8) */ | 605 | /* Yes, and fold redundant checksum back. 8) */ |
680 | if (head->ip_summed == CHECKSUM_COMPLETE) | 606 | if (head->ip_summed == CHECKSUM_COMPLETE) |
681 | head->csum = csum_partial(skb_network_header(head), | 607 | head->csum = csum_partial(skb_network_header(head), |
@@ -685,7 +611,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, | |||
685 | rcu_read_lock(); | 611 | rcu_read_lock(); |
686 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS); | 612 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS); |
687 | rcu_read_unlock(); | 613 | rcu_read_unlock(); |
688 | fq->fragments = NULL; | 614 | fq->q.fragments = NULL; |
689 | return 1; | 615 | return 1; |
690 | 616 | ||
691 | out_oversize: | 617 | out_oversize: |
@@ -702,10 +628,8 @@ out_fail: | |||
702 | return -1; | 628 | return -1; |
703 | } | 629 | } |
704 | 630 | ||
705 | static int ipv6_frag_rcv(struct sk_buff **skbp) | 631 | static int ipv6_frag_rcv(struct sk_buff *skb) |
706 | { | 632 | { |
707 | struct sk_buff *skb = *skbp; | ||
708 | struct net_device *dev = skb->dev; | ||
709 | struct frag_hdr *fhdr; | 633 | struct frag_hdr *fhdr; |
710 | struct frag_queue *fq; | 634 | struct frag_queue *fq; |
711 | struct ipv6hdr *hdr = ipv6_hdr(skb); | 635 | struct ipv6hdr *hdr = ipv6_hdr(skb); |
@@ -739,23 +663,19 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) | |||
739 | return 1; | 663 | return 1; |
740 | } | 664 | } |
741 | 665 | ||
742 | if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh) | 666 | if (atomic_read(&ip6_frags.mem) > ip6_frags_ctl.high_thresh) |
743 | ip6_evictor(ip6_dst_idev(skb->dst)); | 667 | ip6_evictor(ip6_dst_idev(skb->dst)); |
744 | 668 | ||
745 | if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr, | 669 | if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr, |
746 | ip6_dst_idev(skb->dst))) != NULL) { | 670 | ip6_dst_idev(skb->dst))) != NULL) { |
747 | int ret = -1; | 671 | int ret; |
748 | 672 | ||
749 | spin_lock(&fq->lock); | 673 | spin_lock(&fq->q.lock); |
750 | 674 | ||
751 | ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); | 675 | ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); |
752 | 676 | ||
753 | if (fq->last_in == (FIRST_IN|LAST_IN) && | 677 | spin_unlock(&fq->q.lock); |
754 | fq->meat == fq->len) | 678 | fq_put(fq); |
755 | ret = ip6_frag_reasm(fq, skbp, dev); | ||
756 | |||
757 | spin_unlock(&fq->lock); | ||
758 | fq_put(fq, NULL); | ||
759 | return ret; | 679 | return ret; |
760 | } | 680 | } |
761 | 681 | ||
@@ -775,11 +695,10 @@ void __init ipv6_frag_init(void) | |||
775 | if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0) | 695 | if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0) |
776 | printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n"); | 696 | printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n"); |
777 | 697 | ||
778 | ip6_frag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ | 698 | ip6_frags.ctl = &ip6_frags_ctl; |
779 | (jiffies ^ (jiffies >> 6))); | 699 | ip6_frags.hashfn = ip6_hashfn; |
780 | 700 | ip6_frags.destructor = ip6_frag_free; | |
781 | init_timer(&ip6_frag_secret_timer); | 701 | ip6_frags.skb_free = NULL; |
782 | ip6_frag_secret_timer.function = ip6_frag_secret_rebuild; | 702 | ip6_frags.qsize = sizeof(struct frag_queue); |
783 | ip6_frag_secret_timer.expires = jiffies + sysctl_ip6frag_secret_interval; | 703 | inet_frags_init(&ip6_frags); |
784 | add_timer(&ip6_frag_secret_timer); | ||
785 | } | 704 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 6ff19f9eb9ee..cce9941c11c6 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -663,7 +663,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *d | |||
663 | return rt; | 663 | return rt; |
664 | } | 664 | } |
665 | 665 | ||
666 | static struct rt6_info *ip6_pol_route_input(struct fib6_table *table, | 666 | static struct rt6_info *ip6_pol_route(struct fib6_table *table, int oif, |
667 | struct flowi *fl, int flags) | 667 | struct flowi *fl, int flags) |
668 | { | 668 | { |
669 | struct fib6_node *fn; | 669 | struct fib6_node *fn; |
@@ -682,7 +682,7 @@ restart_2: | |||
682 | fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); | 682 | fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); |
683 | 683 | ||
684 | restart: | 684 | restart: |
685 | rt = rt6_select(fn, fl->iif, strict | reachable); | 685 | rt = rt6_select(fn, oif, strict | reachable); |
686 | BACKTRACK(&fl->fl6_src); | 686 | BACKTRACK(&fl->fl6_src); |
687 | if (rt == &ip6_null_entry || | 687 | if (rt == &ip6_null_entry || |
688 | rt->rt6i_flags & RTF_CACHE) | 688 | rt->rt6i_flags & RTF_CACHE) |
@@ -735,6 +735,12 @@ out2: | |||
735 | return rt; | 735 | return rt; |
736 | } | 736 | } |
737 | 737 | ||
738 | static struct rt6_info *ip6_pol_route_input(struct fib6_table *table, | ||
739 | struct flowi *fl, int flags) | ||
740 | { | ||
741 | return ip6_pol_route(table, fl->iif, fl, flags); | ||
742 | } | ||
743 | |||
738 | void ip6_route_input(struct sk_buff *skb) | 744 | void ip6_route_input(struct sk_buff *skb) |
739 | { | 745 | { |
740 | struct ipv6hdr *iph = ipv6_hdr(skb); | 746 | struct ipv6hdr *iph = ipv6_hdr(skb); |
@@ -761,72 +767,7 @@ void ip6_route_input(struct sk_buff *skb) | |||
761 | static struct rt6_info *ip6_pol_route_output(struct fib6_table *table, | 767 | static struct rt6_info *ip6_pol_route_output(struct fib6_table *table, |
762 | struct flowi *fl, int flags) | 768 | struct flowi *fl, int flags) |
763 | { | 769 | { |
764 | struct fib6_node *fn; | 770 | return ip6_pol_route(table, fl->oif, fl, flags); |
765 | struct rt6_info *rt, *nrt; | ||
766 | int strict = 0; | ||
767 | int attempts = 3; | ||
768 | int err; | ||
769 | int reachable = ipv6_devconf.forwarding ? 0 : RT6_LOOKUP_F_REACHABLE; | ||
770 | |||
771 | strict |= flags & RT6_LOOKUP_F_IFACE; | ||
772 | |||
773 | relookup: | ||
774 | read_lock_bh(&table->tb6_lock); | ||
775 | |||
776 | restart_2: | ||
777 | fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); | ||
778 | |||
779 | restart: | ||
780 | rt = rt6_select(fn, fl->oif, strict | reachable); | ||
781 | BACKTRACK(&fl->fl6_src); | ||
782 | if (rt == &ip6_null_entry || | ||
783 | rt->rt6i_flags & RTF_CACHE) | ||
784 | goto out; | ||
785 | |||
786 | dst_hold(&rt->u.dst); | ||
787 | read_unlock_bh(&table->tb6_lock); | ||
788 | |||
789 | if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) | ||
790 | nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); | ||
791 | else { | ||
792 | #if CLONE_OFFLINK_ROUTE | ||
793 | nrt = rt6_alloc_clone(rt, &fl->fl6_dst); | ||
794 | #else | ||
795 | goto out2; | ||
796 | #endif | ||
797 | } | ||
798 | |||
799 | dst_release(&rt->u.dst); | ||
800 | rt = nrt ? : &ip6_null_entry; | ||
801 | |||
802 | dst_hold(&rt->u.dst); | ||
803 | if (nrt) { | ||
804 | err = ip6_ins_rt(nrt); | ||
805 | if (!err) | ||
806 | goto out2; | ||
807 | } | ||
808 | |||
809 | if (--attempts <= 0) | ||
810 | goto out2; | ||
811 | |||
812 | /* | ||
813 | * Race condition! In the gap, when table->tb6_lock was | ||
814 | * released someone could insert this route. Relookup. | ||
815 | */ | ||
816 | dst_release(&rt->u.dst); | ||
817 | goto relookup; | ||
818 | |||
819 | out: | ||
820 | if (reachable) { | ||
821 | reachable = 0; | ||
822 | goto restart_2; | ||
823 | } | ||
824 | dst_hold(&rt->u.dst); | ||
825 | read_unlock_bh(&table->tb6_lock); | ||
826 | out2: | ||
827 | rt->u.dst.lastuse = jiffies; | ||
828 | rt->u.dst.__use++; | ||
829 | return rt; | ||
830 | } | 771 | } |
831 | 772 | ||
832 | struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl) | 773 | struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl) |
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 3fb44277207b..68bb2548e469 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <net/ndisc.h> | 12 | #include <net/ndisc.h> |
13 | #include <net/ipv6.h> | 13 | #include <net/ipv6.h> |
14 | #include <net/addrconf.h> | 14 | #include <net/addrconf.h> |
15 | #include <net/inet_frag.h> | ||
15 | 16 | ||
16 | #ifdef CONFIG_SYSCTL | 17 | #ifdef CONFIG_SYSCTL |
17 | 18 | ||
@@ -41,7 +42,7 @@ static ctl_table ipv6_table[] = { | |||
41 | { | 42 | { |
42 | .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH, | 43 | .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH, |
43 | .procname = "ip6frag_high_thresh", | 44 | .procname = "ip6frag_high_thresh", |
44 | .data = &sysctl_ip6frag_high_thresh, | 45 | .data = &ip6_frags_ctl.high_thresh, |
45 | .maxlen = sizeof(int), | 46 | .maxlen = sizeof(int), |
46 | .mode = 0644, | 47 | .mode = 0644, |
47 | .proc_handler = &proc_dointvec | 48 | .proc_handler = &proc_dointvec |
@@ -49,7 +50,7 @@ static ctl_table ipv6_table[] = { | |||
49 | { | 50 | { |
50 | .ctl_name = NET_IPV6_IP6FRAG_LOW_THRESH, | 51 | .ctl_name = NET_IPV6_IP6FRAG_LOW_THRESH, |
51 | .procname = "ip6frag_low_thresh", | 52 | .procname = "ip6frag_low_thresh", |
52 | .data = &sysctl_ip6frag_low_thresh, | 53 | .data = &ip6_frags_ctl.low_thresh, |
53 | .maxlen = sizeof(int), | 54 | .maxlen = sizeof(int), |
54 | .mode = 0644, | 55 | .mode = 0644, |
55 | .proc_handler = &proc_dointvec | 56 | .proc_handler = &proc_dointvec |
@@ -57,7 +58,7 @@ static ctl_table ipv6_table[] = { | |||
57 | { | 58 | { |
58 | .ctl_name = NET_IPV6_IP6FRAG_TIME, | 59 | .ctl_name = NET_IPV6_IP6FRAG_TIME, |
59 | .procname = "ip6frag_time", | 60 | .procname = "ip6frag_time", |
60 | .data = &sysctl_ip6frag_time, | 61 | .data = &ip6_frags_ctl.timeout, |
61 | .maxlen = sizeof(int), | 62 | .maxlen = sizeof(int), |
62 | .mode = 0644, | 63 | .mode = 0644, |
63 | .proc_handler = &proc_dointvec_jiffies, | 64 | .proc_handler = &proc_dointvec_jiffies, |
@@ -66,7 +67,7 @@ static ctl_table ipv6_table[] = { | |||
66 | { | 67 | { |
67 | .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL, | 68 | .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL, |
68 | .procname = "ip6frag_secret_interval", | 69 | .procname = "ip6frag_secret_interval", |
69 | .data = &sysctl_ip6frag_secret_interval, | 70 | .data = &ip6_frags_ctl.secret_interval, |
70 | .maxlen = sizeof(int), | 71 | .maxlen = sizeof(int), |
71 | .mode = 0644, | 72 | .mode = 0644, |
72 | .proc_handler = &proc_dointvec_jiffies, | 73 | .proc_handler = &proc_dointvec_jiffies, |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index a07b59c528f3..737b755342bd 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1668,9 +1668,8 @@ ipv6_pktoptions: | |||
1668 | return 0; | 1668 | return 0; |
1669 | } | 1669 | } |
1670 | 1670 | ||
1671 | static int tcp_v6_rcv(struct sk_buff **pskb) | 1671 | static int tcp_v6_rcv(struct sk_buff *skb) |
1672 | { | 1672 | { |
1673 | struct sk_buff *skb = *pskb; | ||
1674 | struct tcphdr *th; | 1673 | struct tcphdr *th; |
1675 | struct sock *sk; | 1674 | struct sock *sk; |
1676 | int ret; | 1675 | int ret; |
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c index 23e2809878ae..6323921b40be 100644 --- a/net/ipv6/tunnel6.c +++ b/net/ipv6/tunnel6.c | |||
@@ -87,9 +87,8 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family) | |||
87 | 87 | ||
88 | EXPORT_SYMBOL(xfrm6_tunnel_deregister); | 88 | EXPORT_SYMBOL(xfrm6_tunnel_deregister); |
89 | 89 | ||
90 | static int tunnel6_rcv(struct sk_buff **pskb) | 90 | static int tunnel6_rcv(struct sk_buff *skb) |
91 | { | 91 | { |
92 | struct sk_buff *skb = *pskb; | ||
93 | struct xfrm6_tunnel *handler; | 92 | struct xfrm6_tunnel *handler; |
94 | 93 | ||
95 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) | 94 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) |
@@ -106,9 +105,8 @@ drop: | |||
106 | return 0; | 105 | return 0; |
107 | } | 106 | } |
108 | 107 | ||
109 | static int tunnel46_rcv(struct sk_buff **pskb) | 108 | static int tunnel46_rcv(struct sk_buff *skb) |
110 | { | 109 | { |
111 | struct sk_buff *skb = *pskb; | ||
112 | struct xfrm6_tunnel *handler; | 110 | struct xfrm6_tunnel *handler; |
113 | 111 | ||
114 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) | 112 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 82ff26dd4470..caebad6ee510 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -405,10 +405,9 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, | |||
405 | return 0; | 405 | return 0; |
406 | } | 406 | } |
407 | 407 | ||
408 | int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[], | 408 | int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], |
409 | int proto) | 409 | int proto) |
410 | { | 410 | { |
411 | struct sk_buff *skb = *pskb; | ||
412 | struct sock *sk; | 411 | struct sock *sk; |
413 | struct udphdr *uh; | 412 | struct udphdr *uh; |
414 | struct net_device *dev = skb->dev; | 413 | struct net_device *dev = skb->dev; |
@@ -494,9 +493,9 @@ discard: | |||
494 | return 0; | 493 | return 0; |
495 | } | 494 | } |
496 | 495 | ||
497 | static __inline__ int udpv6_rcv(struct sk_buff **pskb) | 496 | static __inline__ int udpv6_rcv(struct sk_buff *skb) |
498 | { | 497 | { |
499 | return __udp6_lib_rcv(pskb, udp_hash, IPPROTO_UDP); | 498 | return __udp6_lib_rcv(skb, udp_hash, IPPROTO_UDP); |
500 | } | 499 | } |
501 | 500 | ||
502 | /* | 501 | /* |
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index 6e252f318f7c..2d3fda601232 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <net/addrconf.h> | 6 | #include <net/addrconf.h> |
7 | #include <net/inet_common.h> | 7 | #include <net/inet_common.h> |
8 | 8 | ||
9 | extern int __udp6_lib_rcv(struct sk_buff **, struct hlist_head [], int ); | 9 | extern int __udp6_lib_rcv(struct sk_buff *, struct hlist_head [], int ); |
10 | extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, | 10 | extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, |
11 | int , int , int , __be32 , struct hlist_head []); | 11 | int , int , int , __be32 , struct hlist_head []); |
12 | 12 | ||
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index f54016a55004..766566f7de47 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c | |||
@@ -17,9 +17,9 @@ | |||
17 | 17 | ||
18 | DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6) __read_mostly; | 18 | DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6) __read_mostly; |
19 | 19 | ||
20 | static int udplitev6_rcv(struct sk_buff **pskb) | 20 | static int udplitev6_rcv(struct sk_buff *skb) |
21 | { | 21 | { |
22 | return __udp6_lib_rcv(pskb, udplite_hash, IPPROTO_UDPLITE); | 22 | return __udp6_lib_rcv(skb, udplite_hash, IPPROTO_UDPLITE); |
23 | } | 23 | } |
24 | 24 | ||
25 | static void udplitev6_err(struct sk_buff *skb, | 25 | static void udplitev6_err(struct sk_buff *skb, |
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index c858537cec4b..02f69e544f6f 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c | |||
@@ -133,9 +133,9 @@ drop: | |||
133 | 133 | ||
134 | EXPORT_SYMBOL(xfrm6_rcv_spi); | 134 | EXPORT_SYMBOL(xfrm6_rcv_spi); |
135 | 135 | ||
136 | int xfrm6_rcv(struct sk_buff **pskb) | 136 | int xfrm6_rcv(struct sk_buff *skb) |
137 | { | 137 | { |
138 | return xfrm6_rcv_spi(*pskb, 0); | 138 | return xfrm6_rcv_spi(skb, 0); |
139 | } | 139 | } |
140 | 140 | ||
141 | EXPORT_SYMBOL(xfrm6_rcv); | 141 | EXPORT_SYMBOL(xfrm6_rcv); |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 4618c18e611d..a5a32c17249d 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -80,7 +80,7 @@ static int xfrm6_output_finish2(struct sk_buff *skb) | |||
80 | while (likely((err = xfrm6_output_one(skb)) == 0)) { | 80 | while (likely((err = xfrm6_output_one(skb)) == 0)) { |
81 | nf_reset(skb); | 81 | nf_reset(skb); |
82 | 82 | ||
83 | err = nf_hook(PF_INET6, NF_IP6_LOCAL_OUT, &skb, NULL, | 83 | err = nf_hook(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, |
84 | skb->dst->dev, dst_output); | 84 | skb->dst->dev, dst_output); |
85 | if (unlikely(err != 1)) | 85 | if (unlikely(err != 1)) |
86 | break; | 86 | break; |
@@ -88,7 +88,7 @@ static int xfrm6_output_finish2(struct sk_buff *skb) | |||
88 | if (!skb->dst->xfrm) | 88 | if (!skb->dst->xfrm) |
89 | return dst_output(skb); | 89 | return dst_output(skb); |
90 | 90 | ||
91 | err = nf_hook(PF_INET6, NF_IP6_POST_ROUTING, &skb, NULL, | 91 | err = nf_hook(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, |
92 | skb->dst->dev, xfrm6_output_finish2); | 92 | skb->dst->dev, xfrm6_output_finish2); |
93 | if (unlikely(err != 1)) | 93 | if (unlikely(err != 1)) |
94 | break; | 94 | break; |