summaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2015-11-18 17:32:40 -0500
committerPablo Neira Ayuso <pablo@netfilter.org>2015-11-23 11:54:45 -0500
commitdaaa7d647f81f3f1494d9a9029d611b666d63181 (patch)
treebc0ecf36c021ae09c845364acbd918afbd0a693e /net/ipv6
parent029f7f3b8701cc7aca8bdb31f0c7edd6a479e357 (diff)
netfilter: ipv6: avoid nf_iterate recursion
The previous patch changed nf_ct_frag6_gather() to morph reassembled skb with the previous one. This means that the return value is always NULL or the skb argument. So change it to an err value. Instead of invoking NF_HOOK recursively with threshold to skip already-called hooks we can now just return NF_ACCEPT to move on to the next hook except for -EINPROGRESS (which means skb has been queued for reassembly), in which case we return NF_STOLEN. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c71
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c14
2 files changed, 36 insertions, 49 deletions
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 1a86a08adbe5..912bc3afc183 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -361,14 +361,15 @@ err:
361 361
362/* 362/*
363 * Check if this packet is complete. 363 * Check if this packet is complete.
364 * Returns NULL on failure by any reason, and pointer
365 * to current nexthdr field in reassembled frame.
366 * 364 *
367 * It is called with locked fq, and caller must check that 365 * It is called with locked fq, and caller must check that
368 * queue is eligible for reassembly i.e. it is not COMPLETE, 366 * queue is eligible for reassembly i.e. it is not COMPLETE,
369 * the last and the first frames arrived and all the bits are here. 367 * the last and the first frames arrived and all the bits are here.
368 *
369 * returns true if *prev skb has been transformed into the reassembled
370 * skb, false otherwise.
370 */ 371 */
371static struct sk_buff * 372static bool
372nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) 373nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
373{ 374{
374 struct sk_buff *fp, *head = fq->q.fragments; 375 struct sk_buff *fp, *head = fq->q.fragments;
@@ -382,22 +383,21 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
382 383
383 ecn = ip_frag_ecn_table[fq->ecn]; 384 ecn = ip_frag_ecn_table[fq->ecn];
384 if (unlikely(ecn == 0xff)) 385 if (unlikely(ecn == 0xff))
385 goto out_fail; 386 return false;
386 387
387 /* Unfragmented part is taken from the first segment. */ 388 /* Unfragmented part is taken from the first segment. */
388 payload_len = ((head->data - skb_network_header(head)) - 389 payload_len = ((head->data - skb_network_header(head)) -
389 sizeof(struct ipv6hdr) + fq->q.len - 390 sizeof(struct ipv6hdr) + fq->q.len -
390 sizeof(struct frag_hdr)); 391 sizeof(struct frag_hdr));
391 if (payload_len > IPV6_MAXPLEN) { 392 if (payload_len > IPV6_MAXPLEN) {
392 pr_debug("payload len is too large.\n"); 393 net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
393 goto out_oversize; 394 payload_len);
395 return false;
394 } 396 }
395 397
396 /* Head of list must not be cloned. */ 398 /* Head of list must not be cloned. */
397 if (skb_unclone(head, GFP_ATOMIC)) { 399 if (skb_unclone(head, GFP_ATOMIC))
398 pr_debug("skb is cloned but can't expand head"); 400 return false;
399 goto out_oom;
400 }
401 401
402 /* If the first fragment is fragmented itself, we split 402 /* If the first fragment is fragmented itself, we split
403 * it to two chunks: the first with data and paged part 403 * it to two chunks: the first with data and paged part
@@ -408,7 +408,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
408 408
409 clone = alloc_skb(0, GFP_ATOMIC); 409 clone = alloc_skb(0, GFP_ATOMIC);
410 if (clone == NULL) 410 if (clone == NULL)
411 goto out_oom; 411 return false;
412 412
413 clone->next = head->next; 413 clone->next = head->next;
414 head->next = clone; 414 head->next = clone;
@@ -438,7 +438,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
438 438
439 fp = skb_clone(prev, GFP_ATOMIC); 439 fp = skb_clone(prev, GFP_ATOMIC);
440 if (!fp) 440 if (!fp)
441 goto out_oom; 441 return false;
442 442
443 fp->next = prev->next; 443 fp->next = prev->next;
444 skb_queue_walk(head, iter) { 444 skb_queue_walk(head, iter) {
@@ -494,16 +494,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
494 fq->q.fragments = NULL; 494 fq->q.fragments = NULL;
495 fq->q.fragments_tail = NULL; 495 fq->q.fragments_tail = NULL;
496 496
497 return head; 497 return true;
498
499out_oversize:
500 net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
501 payload_len);
502 goto out_fail;
503out_oom:
504 net_dbg_ratelimited("nf_ct_frag6_reasm: no memory for reassembly\n");
505out_fail:
506 return NULL;
507} 498}
508 499
509/* 500/*
@@ -569,27 +560,26 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
569 return 0; 560 return 0;
570} 561}
571 562
572struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) 563int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
573{ 564{
574 struct net_device *dev = skb->dev; 565 struct net_device *dev = skb->dev;
566 int fhoff, nhoff, ret;
575 struct frag_hdr *fhdr; 567 struct frag_hdr *fhdr;
576 struct frag_queue *fq; 568 struct frag_queue *fq;
577 struct ipv6hdr *hdr; 569 struct ipv6hdr *hdr;
578 int fhoff, nhoff;
579 u8 prevhdr; 570 u8 prevhdr;
580 struct sk_buff *ret_skb = NULL;
581 571
582 /* Jumbo payload inhibits frag. header */ 572 /* Jumbo payload inhibits frag. header */
583 if (ipv6_hdr(skb)->payload_len == 0) { 573 if (ipv6_hdr(skb)->payload_len == 0) {
584 pr_debug("payload len = 0\n"); 574 pr_debug("payload len = 0\n");
585 return skb; 575 return -EINVAL;
586 } 576 }
587 577
588 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) 578 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
589 return skb; 579 return -EINVAL;
590 580
591 if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr))) 581 if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
592 return skb; 582 return -ENOMEM;
593 583
594 skb_set_transport_header(skb, fhoff); 584 skb_set_transport_header(skb, fhoff);
595 hdr = ipv6_hdr(skb); 585 hdr = ipv6_hdr(skb);
@@ -598,27 +588,28 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use
598 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, 588 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
599 ip6_frag_ecn(hdr)); 589 ip6_frag_ecn(hdr));
600 if (fq == NULL) 590 if (fq == NULL)
601 return skb; 591 return -ENOMEM;
602 592
603 spin_lock_bh(&fq->q.lock); 593 spin_lock_bh(&fq->q.lock);
604 594
605 if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) { 595 if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
606 spin_unlock_bh(&fq->q.lock); 596 ret = -EINVAL;
607 pr_debug("Can't insert skb to queue\n"); 597 goto out_unlock;
608 inet_frag_put(&fq->q, &nf_frags);
609 return skb;
610 } 598 }
611 599
600 /* after queue has assumed skb ownership, only 0 or -EINPROGRESS
601 * must be returned.
602 */
603 ret = -EINPROGRESS;
612 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 604 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
613 fq->q.meat == fq->q.len) { 605 fq->q.meat == fq->q.len &&
614 ret_skb = nf_ct_frag6_reasm(fq, skb, dev); 606 nf_ct_frag6_reasm(fq, skb, dev))
615 if (ret_skb == NULL) 607 ret = 0;
616 pr_debug("Can't reassemble fragmented packets\n");
617 }
618 spin_unlock_bh(&fq->q.lock);
619 608
609out_unlock:
610 spin_unlock_bh(&fq->q.lock);
620 inet_frag_put(&fq->q, &nf_frags); 611 inet_frag_put(&fq->q, &nf_frags);
621 return ret_skb; 612 return ret;
622} 613}
623EXPORT_SYMBOL_GPL(nf_ct_frag6_gather); 614EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
624 615
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index fb96b1018884..f7aab5ab93a5 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -55,7 +55,7 @@ static unsigned int ipv6_defrag(void *priv,
55 struct sk_buff *skb, 55 struct sk_buff *skb,
56 const struct nf_hook_state *state) 56 const struct nf_hook_state *state)
57{ 57{
58 struct sk_buff *reasm; 58 int err;
59 59
60#if IS_ENABLED(CONFIG_NF_CONNTRACK) 60#if IS_ENABLED(CONFIG_NF_CONNTRACK)
61 /* Previously seen (loopback)? */ 61 /* Previously seen (loopback)? */
@@ -63,17 +63,13 @@ static unsigned int ipv6_defrag(void *priv,
63 return NF_ACCEPT; 63 return NF_ACCEPT;
64#endif 64#endif
65 65
66 reasm = nf_ct_frag6_gather(state->net, skb, 66 err = nf_ct_frag6_gather(state->net, skb,
67 nf_ct6_defrag_user(state->hook, skb)); 67 nf_ct6_defrag_user(state->hook, skb));
68 /* queued */ 68 /* queued */
69 if (reasm == NULL) 69 if (err == -EINPROGRESS)
70 return NF_STOLEN; 70 return NF_STOLEN;
71 71
72 NF_HOOK_THRESH(NFPROTO_IPV6, state->hook, state->net, state->sk, reasm, 72 return NF_ACCEPT;
73 state->in, state->out,
74 state->okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
75
76 return NF_STOLEN;
77} 73}
78 74
79static struct nf_hook_ops ipv6_defrag_ops[] = { 75static struct nf_hook_ops ipv6_defrag_ops[] = {