aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/reassembly.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/reassembly.c')
-rw-r--r--net/ipv6/reassembly.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 3c575118fca5..da5bd0ed83df 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -267,7 +267,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
267 struct sk_buff *prev, *next; 267 struct sk_buff *prev, *next;
268 struct net_device *dev; 268 struct net_device *dev;
269 int offset, end; 269 int offset, end;
270 struct net *net = dev_net(skb->dst->dev); 270 struct net *net = dev_net(skb_dst(skb)->dev);
271 271
272 if (fq->q.last_in & INET_FRAG_COMPLETE) 272 if (fq->q.last_in & INET_FRAG_COMPLETE)
273 goto err; 273 goto err;
@@ -277,7 +277,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
277 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); 277 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
278 278
279 if ((unsigned int)end > IPV6_MAXPLEN) { 279 if ((unsigned int)end > IPV6_MAXPLEN) {
280 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 280 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
281 IPSTATS_MIB_INHDRERRORS); 281 IPSTATS_MIB_INHDRERRORS);
282 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 282 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
283 ((u8 *)&fhdr->frag_off - 283 ((u8 *)&fhdr->frag_off -
@@ -310,7 +310,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
310 /* RFC2460 says always send parameter problem in 310 /* RFC2460 says always send parameter problem in
311 * this case. -DaveM 311 * this case. -DaveM
312 */ 312 */
313 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 313 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
314 IPSTATS_MIB_INHDRERRORS); 314 IPSTATS_MIB_INHDRERRORS);
315 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 315 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
316 offsetof(struct ipv6hdr, payload_len)); 316 offsetof(struct ipv6hdr, payload_len));
@@ -434,7 +434,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
434 return -1; 434 return -1;
435 435
436err: 436err:
437 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), 437 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
438 IPSTATS_MIB_REASMFAILS); 438 IPSTATS_MIB_REASMFAILS);
439 kfree_skb(skb); 439 kfree_skb(skb);
440 return -1; 440 return -1;
@@ -452,6 +452,7 @@ err:
452static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 452static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
453 struct net_device *dev) 453 struct net_device *dev)
454{ 454{
455 struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
455 struct sk_buff *fp, *head = fq->q.fragments; 456 struct sk_buff *fp, *head = fq->q.fragments;
456 int payload_len; 457 int payload_len;
457 unsigned int nhoff; 458 unsigned int nhoff;
@@ -493,7 +494,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
493 /* If the first fragment is fragmented itself, we split 494 /* If the first fragment is fragmented itself, we split
494 * it to two chunks: the first with data and paged part 495 * it to two chunks: the first with data and paged part
495 * and the second, holding only fragments. */ 496 * and the second, holding only fragments. */
496 if (skb_shinfo(head)->frag_list) { 497 if (skb_has_frags(head)) {
497 struct sk_buff *clone; 498 struct sk_buff *clone;
498 int i, plen = 0; 499 int i, plen = 0;
499 500
@@ -502,7 +503,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
502 clone->next = head->next; 503 clone->next = head->next;
503 head->next = clone; 504 head->next = clone;
504 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 505 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
505 skb_shinfo(head)->frag_list = NULL; 506 skb_frag_list_init(head);
506 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 507 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
507 plen += skb_shinfo(head)->frags[i].size; 508 plen += skb_shinfo(head)->frags[i].size;
508 clone->len = clone->data_len = head->data_len - plen; 509 clone->len = clone->data_len = head->data_len - plen;
@@ -551,8 +552,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
551 head->csum); 552 head->csum);
552 553
553 rcu_read_lock(); 554 rcu_read_lock();
554 IP6_INC_STATS_BH(dev_net(dev), 555 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
555 __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
556 rcu_read_unlock(); 556 rcu_read_unlock();
557 fq->q.fragments = NULL; 557 fq->q.fragments = NULL;
558 return 1; 558 return 1;
@@ -566,8 +566,7 @@ out_oom:
566 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); 566 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
567out_fail: 567out_fail:
568 rcu_read_lock(); 568 rcu_read_lock();
569 IP6_INC_STATS_BH(dev_net(dev), 569 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
570 __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
571 rcu_read_unlock(); 570 rcu_read_unlock();
572 return -1; 571 return -1;
573} 572}
@@ -577,9 +576,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
577 struct frag_hdr *fhdr; 576 struct frag_hdr *fhdr;
578 struct frag_queue *fq; 577 struct frag_queue *fq;
579 struct ipv6hdr *hdr = ipv6_hdr(skb); 578 struct ipv6hdr *hdr = ipv6_hdr(skb);
580 struct net *net = dev_net(skb->dst->dev); 579 struct net *net = dev_net(skb_dst(skb)->dev);
581 580
582 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS); 581 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
583 582
584 /* Jumbo payload inhibits frag. header */ 583 /* Jumbo payload inhibits frag. header */
585 if (hdr->payload_len==0) 584 if (hdr->payload_len==0)
@@ -596,17 +595,17 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
596 /* It is not a fragmented frame */ 595 /* It is not a fragmented frame */
597 skb->transport_header += sizeof(struct frag_hdr); 596 skb->transport_header += sizeof(struct frag_hdr);
598 IP6_INC_STATS_BH(net, 597 IP6_INC_STATS_BH(net,
599 ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS); 598 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
600 599
601 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 600 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
602 return 1; 601 return 1;
603 } 602 }
604 603
605 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 604 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
606 ip6_evictor(net, ip6_dst_idev(skb->dst)); 605 ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
607 606
608 if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, 607 if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
609 ip6_dst_idev(skb->dst))) != NULL) { 608 ip6_dst_idev(skb_dst(skb)))) != NULL) {
610 int ret; 609 int ret;
611 610
612 spin_lock(&fq->q.lock); 611 spin_lock(&fq->q.lock);
@@ -618,17 +617,17 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
618 return ret; 617 return ret;
619 } 618 }
620 619
621 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); 620 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
622 kfree_skb(skb); 621 kfree_skb(skb);
623 return -1; 622 return -1;
624 623
625fail_hdr: 624fail_hdr:
626 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); 625 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
627 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); 626 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
628 return -1; 627 return -1;
629} 628}
630 629
631static struct inet6_protocol frag_protocol = 630static const struct inet6_protocol frag_protocol =
632{ 631{
633 .handler = ipv6_frag_rcv, 632 .handler = ipv6_frag_rcv,
634 .flags = INET6_PROTO_NOPOLICY, 633 .flags = INET6_PROTO_NOPOLICY,