aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-10-15 04:28:47 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-15 15:26:30 -0400
commitf61944efdf0d2569721ed6d7b0445e9f1214b295 (patch)
treef29293ab16b6fec87fc08ca5b42b4839e125b543 /net
parent3db05fea51cdb162cfa8f69e9cfb9e228919d2a9 (diff)
[IPV6]: Make ipv6_frag_rcv return the same packet
This patch implements the same change taht was done to ip_defrag. It makes ipv6_frag_rcv return the last packet received of a train of fragments rather than the head of that sequence. This allows us to get rid of the sk_buff ** argument later. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv6/reassembly.c57
1 files changed, 40 insertions, 17 deletions
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 31601c993541..fa1055b669d1 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -42,6 +42,7 @@
42#include <linux/icmpv6.h> 42#include <linux/icmpv6.h>
43#include <linux/random.h> 43#include <linux/random.h>
44#include <linux/jhash.h> 44#include <linux/jhash.h>
45#include <linux/skbuff.h>
45 46
46#include <net/sock.h> 47#include <net/sock.h>
47#include <net/snmp.h> 48#include <net/snmp.h>
@@ -107,6 +108,9 @@ static u32 ip6_frag_hash_rnd;
107static LIST_HEAD(ip6_frag_lru_list); 108static LIST_HEAD(ip6_frag_lru_list);
108int ip6_frag_nqueues = 0; 109int ip6_frag_nqueues = 0;
109 110
111static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
112 struct net_device *dev);
113
110static __inline__ void __fq_unlink(struct frag_queue *fq) 114static __inline__ void __fq_unlink(struct frag_queue *fq)
111{ 115{
112 hlist_del(&fq->list); 116 hlist_del(&fq->list);
@@ -420,10 +424,11 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
420} 424}
421 425
422 426
423static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 427static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
424 struct frag_hdr *fhdr, int nhoff) 428 struct frag_hdr *fhdr, int nhoff)
425{ 429{
426 struct sk_buff *prev, *next; 430 struct sk_buff *prev, *next;
431 struct net_device *dev;
427 int offset, end; 432 int offset, end;
428 433
429 if (fq->last_in & COMPLETE) 434 if (fq->last_in & COMPLETE)
@@ -439,7 +444,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
439 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 444 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
440 ((u8 *)&fhdr->frag_off - 445 ((u8 *)&fhdr->frag_off -
441 skb_network_header(skb))); 446 skb_network_header(skb)));
442 return; 447 return -1;
443 } 448 }
444 449
445 if (skb->ip_summed == CHECKSUM_COMPLETE) { 450 if (skb->ip_summed == CHECKSUM_COMPLETE) {
@@ -471,7 +476,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
471 IPSTATS_MIB_INHDRERRORS); 476 IPSTATS_MIB_INHDRERRORS);
472 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 477 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
473 offsetof(struct ipv6hdr, payload_len)); 478 offsetof(struct ipv6hdr, payload_len));
474 return; 479 return -1;
475 } 480 }
476 if (end > fq->len) { 481 if (end > fq->len) {
477 /* Some bits beyond end -> corruption. */ 482 /* Some bits beyond end -> corruption. */
@@ -564,9 +569,11 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
564 else 569 else
565 fq->fragments = skb; 570 fq->fragments = skb;
566 571
567 if (skb->dev) 572 dev = skb->dev;
568 fq->iif = skb->dev->ifindex; 573 if (dev) {
569 skb->dev = NULL; 574 fq->iif = dev->ifindex;
575 skb->dev = NULL;
576 }
570 fq->stamp = skb->tstamp; 577 fq->stamp = skb->tstamp;
571 fq->meat += skb->len; 578 fq->meat += skb->len;
572 atomic_add(skb->truesize, &ip6_frag_mem); 579 atomic_add(skb->truesize, &ip6_frag_mem);
@@ -578,14 +585,19 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
578 fq->nhoffset = nhoff; 585 fq->nhoffset = nhoff;
579 fq->last_in |= FIRST_IN; 586 fq->last_in |= FIRST_IN;
580 } 587 }
588
589 if (fq->last_in == (FIRST_IN | LAST_IN) && fq->meat == fq->len)
590 return ip6_frag_reasm(fq, prev, dev);
591
581 write_lock(&ip6_frag_lock); 592 write_lock(&ip6_frag_lock);
582 list_move_tail(&fq->lru_list, &ip6_frag_lru_list); 593 list_move_tail(&fq->lru_list, &ip6_frag_lru_list);
583 write_unlock(&ip6_frag_lock); 594 write_unlock(&ip6_frag_lock);
584 return; 595 return -1;
585 596
586err: 597err:
587 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); 598 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
588 kfree_skb(skb); 599 kfree_skb(skb);
600 return -1;
589} 601}
590 602
591/* 603/*
@@ -597,7 +609,7 @@ err:
597 * queue is eligible for reassembly i.e. it is not COMPLETE, 609 * queue is eligible for reassembly i.e. it is not COMPLETE,
598 * the last and the first frames arrived and all the bits are here. 610 * the last and the first frames arrived and all the bits are here.
599 */ 611 */
600static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, 612static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
601 struct net_device *dev) 613 struct net_device *dev)
602{ 614{
603 struct sk_buff *fp, *head = fq->fragments; 615 struct sk_buff *fp, *head = fq->fragments;
@@ -606,6 +618,24 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
606 618
607 fq_kill(fq); 619 fq_kill(fq);
608 620
621 /* Make the one we just received the head. */
622 if (prev) {
623 head = prev->next;
624 fp = skb_clone(head, GFP_ATOMIC);
625
626 if (!fp)
627 goto out_oom;
628
629 fp->next = head->next;
630 prev->next = fp;
631
632 skb_morph(head, fq->fragments);
633 head->next = fq->fragments->next;
634
635 kfree_skb(fq->fragments);
636 fq->fragments = head;
637 }
638
609 BUG_TRAP(head != NULL); 639 BUG_TRAP(head != NULL);
610 BUG_TRAP(FRAG6_CB(head)->offset == 0); 640 BUG_TRAP(FRAG6_CB(head)->offset == 0);
611 641
@@ -674,8 +704,6 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
674 ipv6_hdr(head)->payload_len = htons(payload_len); 704 ipv6_hdr(head)->payload_len = htons(payload_len);
675 IP6CB(head)->nhoff = nhoff; 705 IP6CB(head)->nhoff = nhoff;
676 706
677 *skb_in = head;
678
679 /* Yes, and fold redundant checksum back. 8) */ 707 /* Yes, and fold redundant checksum back. 8) */
680 if (head->ip_summed == CHECKSUM_COMPLETE) 708 if (head->ip_summed == CHECKSUM_COMPLETE)
681 head->csum = csum_partial(skb_network_header(head), 709 head->csum = csum_partial(skb_network_header(head),
@@ -705,7 +733,6 @@ out_fail:
705static int ipv6_frag_rcv(struct sk_buff **skbp) 733static int ipv6_frag_rcv(struct sk_buff **skbp)
706{ 734{
707 struct sk_buff *skb = *skbp; 735 struct sk_buff *skb = *skbp;
708 struct net_device *dev = skb->dev;
709 struct frag_hdr *fhdr; 736 struct frag_hdr *fhdr;
710 struct frag_queue *fq; 737 struct frag_queue *fq;
711 struct ipv6hdr *hdr = ipv6_hdr(skb); 738 struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -744,15 +771,11 @@ static int ipv6_frag_rcv(struct sk_buff **skbp)
744 771
745 if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr, 772 if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr,
746 ip6_dst_idev(skb->dst))) != NULL) { 773 ip6_dst_idev(skb->dst))) != NULL) {
747 int ret = -1; 774 int ret;
748 775
749 spin_lock(&fq->lock); 776 spin_lock(&fq->lock);
750 777
751 ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); 778 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
752
753 if (fq->last_in == (FIRST_IN|LAST_IN) &&
754 fq->meat == fq->len)
755 ret = ip6_frag_reasm(fq, skbp, dev);
756 779
757 spin_unlock(&fq->lock); 780 spin_unlock(&fq->lock);
758 fq_put(fq, NULL); 781 fq_put(fq, NULL);