diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv6/reassembly.c | 57 |
1 files changed, 40 insertions, 17 deletions
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 31601c993541..fa1055b669d1 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/icmpv6.h> | 42 | #include <linux/icmpv6.h> |
43 | #include <linux/random.h> | 43 | #include <linux/random.h> |
44 | #include <linux/jhash.h> | 44 | #include <linux/jhash.h> |
45 | #include <linux/skbuff.h> | ||
45 | 46 | ||
46 | #include <net/sock.h> | 47 | #include <net/sock.h> |
47 | #include <net/snmp.h> | 48 | #include <net/snmp.h> |
@@ -107,6 +108,9 @@ static u32 ip6_frag_hash_rnd; | |||
107 | static LIST_HEAD(ip6_frag_lru_list); | 108 | static LIST_HEAD(ip6_frag_lru_list); |
108 | int ip6_frag_nqueues = 0; | 109 | int ip6_frag_nqueues = 0; |
109 | 110 | ||
111 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | ||
112 | struct net_device *dev); | ||
113 | |||
110 | static __inline__ void __fq_unlink(struct frag_queue *fq) | 114 | static __inline__ void __fq_unlink(struct frag_queue *fq) |
111 | { | 115 | { |
112 | hlist_del(&fq->list); | 116 | hlist_del(&fq->list); |
@@ -420,10 +424,11 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst, | |||
420 | } | 424 | } |
421 | 425 | ||
422 | 426 | ||
423 | static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | 427 | static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, |
424 | struct frag_hdr *fhdr, int nhoff) | 428 | struct frag_hdr *fhdr, int nhoff) |
425 | { | 429 | { |
426 | struct sk_buff *prev, *next; | 430 | struct sk_buff *prev, *next; |
431 | struct net_device *dev; | ||
427 | int offset, end; | 432 | int offset, end; |
428 | 433 | ||
429 | if (fq->last_in & COMPLETE) | 434 | if (fq->last_in & COMPLETE) |
@@ -439,7 +444,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
439 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, | 444 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
440 | ((u8 *)&fhdr->frag_off - | 445 | ((u8 *)&fhdr->frag_off - |
441 | skb_network_header(skb))); | 446 | skb_network_header(skb))); |
442 | return; | 447 | return -1; |
443 | } | 448 | } |
444 | 449 | ||
445 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 450 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
@@ -471,7 +476,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
471 | IPSTATS_MIB_INHDRERRORS); | 476 | IPSTATS_MIB_INHDRERRORS); |
472 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, | 477 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
473 | offsetof(struct ipv6hdr, payload_len)); | 478 | offsetof(struct ipv6hdr, payload_len)); |
474 | return; | 479 | return -1; |
475 | } | 480 | } |
476 | if (end > fq->len) { | 481 | if (end > fq->len) { |
477 | /* Some bits beyond end -> corruption. */ | 482 | /* Some bits beyond end -> corruption. */ |
@@ -564,9 +569,11 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
564 | else | 569 | else |
565 | fq->fragments = skb; | 570 | fq->fragments = skb; |
566 | 571 | ||
567 | if (skb->dev) | 572 | dev = skb->dev; |
568 | fq->iif = skb->dev->ifindex; | 573 | if (dev) { |
569 | skb->dev = NULL; | 574 | fq->iif = dev->ifindex; |
575 | skb->dev = NULL; | ||
576 | } | ||
570 | fq->stamp = skb->tstamp; | 577 | fq->stamp = skb->tstamp; |
571 | fq->meat += skb->len; | 578 | fq->meat += skb->len; |
572 | atomic_add(skb->truesize, &ip6_frag_mem); | 579 | atomic_add(skb->truesize, &ip6_frag_mem); |
@@ -578,14 +585,19 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
578 | fq->nhoffset = nhoff; | 585 | fq->nhoffset = nhoff; |
579 | fq->last_in |= FIRST_IN; | 586 | fq->last_in |= FIRST_IN; |
580 | } | 587 | } |
588 | |||
589 | if (fq->last_in == (FIRST_IN | LAST_IN) && fq->meat == fq->len) | ||
590 | return ip6_frag_reasm(fq, prev, dev); | ||
591 | |||
581 | write_lock(&ip6_frag_lock); | 592 | write_lock(&ip6_frag_lock); |
582 | list_move_tail(&fq->lru_list, &ip6_frag_lru_list); | 593 | list_move_tail(&fq->lru_list, &ip6_frag_lru_list); |
583 | write_unlock(&ip6_frag_lock); | 594 | write_unlock(&ip6_frag_lock); |
584 | return; | 595 | return -1; |
585 | 596 | ||
586 | err: | 597 | err: |
587 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); | 598 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); |
588 | kfree_skb(skb); | 599 | kfree_skb(skb); |
600 | return -1; | ||
589 | } | 601 | } |
590 | 602 | ||
591 | /* | 603 | /* |
@@ -597,7 +609,7 @@ err: | |||
597 | * queue is eligible for reassembly i.e. it is not COMPLETE, | 609 | * queue is eligible for reassembly i.e. it is not COMPLETE, |
598 | * the last and the first frames arrived and all the bits are here. | 610 | * the last and the first frames arrived and all the bits are here. |
599 | */ | 611 | */ |
600 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, | 612 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, |
601 | struct net_device *dev) | 613 | struct net_device *dev) |
602 | { | 614 | { |
603 | struct sk_buff *fp, *head = fq->fragments; | 615 | struct sk_buff *fp, *head = fq->fragments; |
@@ -606,6 +618,24 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, | |||
606 | 618 | ||
607 | fq_kill(fq); | 619 | fq_kill(fq); |
608 | 620 | ||
621 | /* Make the one we just received the head. */ | ||
622 | if (prev) { | ||
623 | head = prev->next; | ||
624 | fp = skb_clone(head, GFP_ATOMIC); | ||
625 | |||
626 | if (!fp) | ||
627 | goto out_oom; | ||
628 | |||
629 | fp->next = head->next; | ||
630 | prev->next = fp; | ||
631 | |||
632 | skb_morph(head, fq->fragments); | ||
633 | head->next = fq->fragments->next; | ||
634 | |||
635 | kfree_skb(fq->fragments); | ||
636 | fq->fragments = head; | ||
637 | } | ||
638 | |||
609 | BUG_TRAP(head != NULL); | 639 | BUG_TRAP(head != NULL); |
610 | BUG_TRAP(FRAG6_CB(head)->offset == 0); | 640 | BUG_TRAP(FRAG6_CB(head)->offset == 0); |
611 | 641 | ||
@@ -674,8 +704,6 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, | |||
674 | ipv6_hdr(head)->payload_len = htons(payload_len); | 704 | ipv6_hdr(head)->payload_len = htons(payload_len); |
675 | IP6CB(head)->nhoff = nhoff; | 705 | IP6CB(head)->nhoff = nhoff; |
676 | 706 | ||
677 | *skb_in = head; | ||
678 | |||
679 | /* Yes, and fold redundant checksum back. 8) */ | 707 | /* Yes, and fold redundant checksum back. 8) */ |
680 | if (head->ip_summed == CHECKSUM_COMPLETE) | 708 | if (head->ip_summed == CHECKSUM_COMPLETE) |
681 | head->csum = csum_partial(skb_network_header(head), | 709 | head->csum = csum_partial(skb_network_header(head), |
@@ -705,7 +733,6 @@ out_fail: | |||
705 | static int ipv6_frag_rcv(struct sk_buff **skbp) | 733 | static int ipv6_frag_rcv(struct sk_buff **skbp) |
706 | { | 734 | { |
707 | struct sk_buff *skb = *skbp; | 735 | struct sk_buff *skb = *skbp; |
708 | struct net_device *dev = skb->dev; | ||
709 | struct frag_hdr *fhdr; | 736 | struct frag_hdr *fhdr; |
710 | struct frag_queue *fq; | 737 | struct frag_queue *fq; |
711 | struct ipv6hdr *hdr = ipv6_hdr(skb); | 738 | struct ipv6hdr *hdr = ipv6_hdr(skb); |
@@ -744,15 +771,11 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) | |||
744 | 771 | ||
745 | if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr, | 772 | if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr, |
746 | ip6_dst_idev(skb->dst))) != NULL) { | 773 | ip6_dst_idev(skb->dst))) != NULL) { |
747 | int ret = -1; | 774 | int ret; |
748 | 775 | ||
749 | spin_lock(&fq->lock); | 776 | spin_lock(&fq->lock); |
750 | 777 | ||
751 | ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); | 778 | ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); |
752 | |||
753 | if (fq->last_in == (FIRST_IN|LAST_IN) && | ||
754 | fq->meat == fq->len) | ||
755 | ret = ip6_frag_reasm(fq, skbp, dev); | ||
756 | 779 | ||
757 | spin_unlock(&fq->lock); | 780 | spin_unlock(&fq->lock); |
758 | fq_put(fq, NULL); | 781 | fq_put(fq, NULL); |