summaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-12-18 15:37:42 -0500
committerDavid S. Miller <davem@davemloft.net>2015-12-18 15:37:42 -0500
commit59ce9670ce18d067433883adf213d04ded074cbf (patch)
tree1df82ed7347977550386aa081b35b3d3d05929e9 /net/ipv6
parent4b402d71d304aa627111fb9d746bb0a75c3989b9 (diff)
parentb4aae759c22e71a3c32144f0b3bc4f2fa4aaae98 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says: ==================== Netfilter updates for net-next The following patchset contains the first batch of Netfilter updates for the upcoming 4.5 kernel. This batch contains userspace netfilter header compilation fixes, support for packet mangling in nf_tables, the new tracing infrastructure for nf_tables and cgroup2 support for iptables. More specifically, they are: 1) Two patches to include dependencies in our netfilter userspace headers to resolve compilation problems, from Mikko Rapeli. 2) Four comestic cleanup patches for the ebtables codebase, from Ian Morris. 3) Remove duplicate include in the netfilter reject infrastructure, from Stephen Hemminger. 4) Two patches to simplify the netfilter defragmentation code for IPv6, patch from Florian Westphal. 5) Fix root ownership of /proc/net netfilter for unpriviledged net namespaces, from Philip Whineray. 6) Get rid of unused fields in struct nft_pktinfo, from Florian Westphal. 7) Add mangling support to our nf_tables payload expression, from Patrick McHardy. 8) Introduce a new netlink-based tracing infrastructure for nf_tables, from Florian Westphal. 9) Change setter functions in nfnetlink_log to be void, from Rami Rosen. 10) Add netns support to the cttimeout infrastructure. 11) Add cgroup2 support to iptables, from Tejun Heo. 12) Introduce nfnl_dereference_protected() in nfnetlink, from Florian. 13) Add support for mangling pkttype in the nf_tables meta expression, also from Florian. BTW, I need that you pull net into net-next, I have another batch that requires changes that I don't yet see in net. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c167
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c20
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c1
3 files changed, 74 insertions, 114 deletions
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index bab4441ed4e4..e4347aeb2e65 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -56,7 +56,6 @@ struct nf_ct_frag6_skb_cb
56{ 56{
57 struct inet6_skb_parm h; 57 struct inet6_skb_parm h;
58 int offset; 58 int offset;
59 struct sk_buff *orig;
60}; 59};
61 60
62#define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb *)((skb)->cb)) 61#define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb *)((skb)->cb))
@@ -170,12 +169,6 @@ static unsigned int nf_hashfn(const struct inet_frag_queue *q)
170 return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr); 169 return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
171} 170}
172 171
173static void nf_skb_free(struct sk_buff *skb)
174{
175 if (NFCT_FRAG6_CB(skb)->orig)
176 kfree_skb(NFCT_FRAG6_CB(skb)->orig);
177}
178
179static void nf_ct_frag6_expire(unsigned long data) 172static void nf_ct_frag6_expire(unsigned long data)
180{ 173{
181 struct frag_queue *fq; 174 struct frag_queue *fq;
@@ -369,17 +362,18 @@ err:
369 362
370/* 363/*
371 * Check if this packet is complete. 364 * Check if this packet is complete.
372 * Returns NULL on failure by any reason, and pointer
373 * to current nexthdr field in reassembled frame.
374 * 365 *
375 * It is called with locked fq, and caller must check that 366 * It is called with locked fq, and caller must check that
376 * queue is eligible for reassembly i.e. it is not COMPLETE, 367 * queue is eligible for reassembly i.e. it is not COMPLETE,
377 * the last and the first frames arrived and all the bits are here. 368 * the last and the first frames arrived and all the bits are here.
369 *
370 * returns true if *prev skb has been transformed into the reassembled
371 * skb, false otherwise.
378 */ 372 */
379static struct sk_buff * 373static bool
380nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev) 374nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
381{ 375{
382 struct sk_buff *fp, *op, *head = fq->q.fragments; 376 struct sk_buff *fp, *head = fq->q.fragments;
383 int payload_len; 377 int payload_len;
384 u8 ecn; 378 u8 ecn;
385 379
@@ -390,22 +384,21 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
390 384
391 ecn = ip_frag_ecn_table[fq->ecn]; 385 ecn = ip_frag_ecn_table[fq->ecn];
392 if (unlikely(ecn == 0xff)) 386 if (unlikely(ecn == 0xff))
393 goto out_fail; 387 return false;
394 388
395 /* Unfragmented part is taken from the first segment. */ 389 /* Unfragmented part is taken from the first segment. */
396 payload_len = ((head->data - skb_network_header(head)) - 390 payload_len = ((head->data - skb_network_header(head)) -
397 sizeof(struct ipv6hdr) + fq->q.len - 391 sizeof(struct ipv6hdr) + fq->q.len -
398 sizeof(struct frag_hdr)); 392 sizeof(struct frag_hdr));
399 if (payload_len > IPV6_MAXPLEN) { 393 if (payload_len > IPV6_MAXPLEN) {
400 pr_debug("payload len is too large.\n"); 394 net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
401 goto out_oversize; 395 payload_len);
396 return false;
402 } 397 }
403 398
404 /* Head of list must not be cloned. */ 399 /* Head of list must not be cloned. */
405 if (skb_unclone(head, GFP_ATOMIC)) { 400 if (skb_unclone(head, GFP_ATOMIC))
406 pr_debug("skb is cloned but can't expand head"); 401 return false;
407 goto out_oom;
408 }
409 402
410 /* If the first fragment is fragmented itself, we split 403 /* If the first fragment is fragmented itself, we split
411 * it to two chunks: the first with data and paged part 404 * it to two chunks: the first with data and paged part
@@ -416,7 +409,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
416 409
417 clone = alloc_skb(0, GFP_ATOMIC); 410 clone = alloc_skb(0, GFP_ATOMIC);
418 if (clone == NULL) 411 if (clone == NULL)
419 goto out_oom; 412 return false;
420 413
421 clone->next = head->next; 414 clone->next = head->next;
422 head->next = clone; 415 head->next = clone;
@@ -430,10 +423,41 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
430 clone->csum = 0; 423 clone->csum = 0;
431 clone->ip_summed = head->ip_summed; 424 clone->ip_summed = head->ip_summed;
432 425
433 NFCT_FRAG6_CB(clone)->orig = NULL;
434 add_frag_mem_limit(fq->q.net, clone->truesize); 426 add_frag_mem_limit(fq->q.net, clone->truesize);
435 } 427 }
436 428
429 /* morph head into last received skb: prev.
430 *
431 * This allows callers of ipv6 conntrack defrag to continue
432 * to use the last skb(frag) passed into the reasm engine.
433 * The last skb frag 'silently' turns into the full reassembled skb.
434 *
435 * Since prev is also part of q->fragments we have to clone it first.
436 */
437 if (head != prev) {
438 struct sk_buff *iter;
439
440 fp = skb_clone(prev, GFP_ATOMIC);
441 if (!fp)
442 return false;
443
444 fp->next = prev->next;
445
446 iter = head;
447 while (iter) {
448 if (iter->next == prev) {
449 iter->next = fp;
450 break;
451 }
452 iter = iter->next;
453 }
454
455 skb_morph(prev, head);
456 prev->next = head->next;
457 consume_skb(head);
458 head = prev;
459 }
460
437 /* We have to remove fragment header from datagram and to relocate 461 /* We have to remove fragment header from datagram and to relocate
438 * header in order to calculate ICV correctly. */ 462 * header in order to calculate ICV correctly. */
439 skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0]; 463 skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
@@ -474,31 +498,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
474 fq->q.fragments = NULL; 498 fq->q.fragments = NULL;
475 fq->q.fragments_tail = NULL; 499 fq->q.fragments_tail = NULL;
476 500
477 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ 501 return true;
478 fp = skb_shinfo(head)->frag_list;
479 if (fp && NFCT_FRAG6_CB(fp)->orig == NULL)
480 /* at above code, head skb is divided into two skbs. */
481 fp = fp->next;
482
483 op = NFCT_FRAG6_CB(head)->orig;
484 for (; fp; fp = fp->next) {
485 struct sk_buff *orig = NFCT_FRAG6_CB(fp)->orig;
486
487 op->next = orig;
488 op = orig;
489 NFCT_FRAG6_CB(fp)->orig = NULL;
490 }
491
492 return head;
493
494out_oversize:
495 net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
496 payload_len);
497 goto out_fail;
498out_oom:
499 net_dbg_ratelimited("nf_ct_frag6_reasm: no memory for reassembly\n");
500out_fail:
501 return NULL;
502} 502}
503 503
504/* 504/*
@@ -564,89 +564,61 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
564 return 0; 564 return 0;
565} 565}
566 566
567struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) 567int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
568{ 568{
569 struct sk_buff *clone;
570 struct net_device *dev = skb->dev; 569 struct net_device *dev = skb->dev;
570 int fhoff, nhoff, ret;
571 struct frag_hdr *fhdr; 571 struct frag_hdr *fhdr;
572 struct frag_queue *fq; 572 struct frag_queue *fq;
573 struct ipv6hdr *hdr; 573 struct ipv6hdr *hdr;
574 int fhoff, nhoff;
575 u8 prevhdr; 574 u8 prevhdr;
576 struct sk_buff *ret_skb = NULL;
577 575
578 /* Jumbo payload inhibits frag. header */ 576 /* Jumbo payload inhibits frag. header */
579 if (ipv6_hdr(skb)->payload_len == 0) { 577 if (ipv6_hdr(skb)->payload_len == 0) {
580 pr_debug("payload len = 0\n"); 578 pr_debug("payload len = 0\n");
581 return skb; 579 return -EINVAL;
582 } 580 }
583 581
584 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) 582 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
585 return skb; 583 return -EINVAL;
586 584
587 clone = skb_clone(skb, GFP_ATOMIC); 585 if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
588 if (clone == NULL) { 586 return -ENOMEM;
589 pr_debug("Can't clone skb\n");
590 return skb;
591 }
592 587
593 NFCT_FRAG6_CB(clone)->orig = skb; 588 skb_set_transport_header(skb, fhoff);
594 589 hdr = ipv6_hdr(skb);
595 if (!pskb_may_pull(clone, fhoff + sizeof(*fhdr))) { 590 fhdr = (struct frag_hdr *)skb_transport_header(skb);
596 pr_debug("message is too short.\n");
597 goto ret_orig;
598 }
599
600 skb_set_transport_header(clone, fhoff);
601 hdr = ipv6_hdr(clone);
602 fhdr = (struct frag_hdr *)skb_transport_header(clone);
603 591
604 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, 592 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
605 skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); 593 skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
606 if (fq == NULL) { 594 if (fq == NULL) {
607 pr_debug("Can't find and can't create new queue\n"); 595 pr_debug("Can't find and can't create new queue\n");
608 goto ret_orig; 596 return -ENOMEM;
609 } 597 }
610 598
611 spin_lock_bh(&fq->q.lock); 599 spin_lock_bh(&fq->q.lock);
612 600
613 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { 601 if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
614 spin_unlock_bh(&fq->q.lock); 602 ret = -EINVAL;
615 pr_debug("Can't insert skb to queue\n"); 603 goto out_unlock;
616 inet_frag_put(&fq->q, &nf_frags);
617 goto ret_orig;
618 } 604 }
619 605
606 /* after queue has assumed skb ownership, only 0 or -EINPROGRESS
607 * must be returned.
608 */
609 ret = -EINPROGRESS;
620 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 610 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
621 fq->q.meat == fq->q.len) { 611 fq->q.meat == fq->q.len &&
622 ret_skb = nf_ct_frag6_reasm(fq, dev); 612 nf_ct_frag6_reasm(fq, skb, dev))
623 if (ret_skb == NULL) 613 ret = 0;
624 pr_debug("Can't reassemble fragmented packets\n");
625 }
626 spin_unlock_bh(&fq->q.lock);
627 614
615out_unlock:
616 spin_unlock_bh(&fq->q.lock);
628 inet_frag_put(&fq->q, &nf_frags); 617 inet_frag_put(&fq->q, &nf_frags);
629 return ret_skb; 618 return ret;
630
631ret_orig:
632 kfree_skb(clone);
633 return skb;
634} 619}
635EXPORT_SYMBOL_GPL(nf_ct_frag6_gather); 620EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
636 621
637void nf_ct_frag6_consume_orig(struct sk_buff *skb)
638{
639 struct sk_buff *s, *s2;
640
641 for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
642 s2 = s->next;
643 s->next = NULL;
644 consume_skb(s);
645 s = s2;
646 }
647}
648EXPORT_SYMBOL_GPL(nf_ct_frag6_consume_orig);
649
650static int nf_ct_net_init(struct net *net) 622static int nf_ct_net_init(struct net *net)
651{ 623{
652 int res; 624 int res;
@@ -681,7 +653,6 @@ int nf_ct_frag6_init(void)
681 nf_frags.hashfn = nf_hashfn; 653 nf_frags.hashfn = nf_hashfn;
682 nf_frags.constructor = ip6_frag_init; 654 nf_frags.constructor = ip6_frag_init;
683 nf_frags.destructor = NULL; 655 nf_frags.destructor = NULL;
684 nf_frags.skb_free = nf_skb_free;
685 nf_frags.qsize = sizeof(struct frag_queue); 656 nf_frags.qsize = sizeof(struct frag_queue);
686 nf_frags.match = ip6_frag_match; 657 nf_frags.match = ip6_frag_match;
687 nf_frags.frag_expire = nf_ct_frag6_expire; 658 nf_frags.frag_expire = nf_ct_frag6_expire;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index 4fdbed5ebfb6..f7aab5ab93a5 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -55,7 +55,7 @@ static unsigned int ipv6_defrag(void *priv,
55 struct sk_buff *skb, 55 struct sk_buff *skb,
56 const struct nf_hook_state *state) 56 const struct nf_hook_state *state)
57{ 57{
58 struct sk_buff *reasm; 58 int err;
59 59
60#if IS_ENABLED(CONFIG_NF_CONNTRACK) 60#if IS_ENABLED(CONFIG_NF_CONNTRACK)
61 /* Previously seen (loopback)? */ 61 /* Previously seen (loopback)? */
@@ -63,23 +63,13 @@ static unsigned int ipv6_defrag(void *priv,
63 return NF_ACCEPT; 63 return NF_ACCEPT;
64#endif 64#endif
65 65
66 reasm = nf_ct_frag6_gather(state->net, skb, 66 err = nf_ct_frag6_gather(state->net, skb,
67 nf_ct6_defrag_user(state->hook, skb)); 67 nf_ct6_defrag_user(state->hook, skb));
68 /* queued */ 68 /* queued */
69 if (reasm == NULL) 69 if (err == -EINPROGRESS)
70 return NF_STOLEN; 70 return NF_STOLEN;
71 71
72 /* error occurred or not fragmented */ 72 return NF_ACCEPT;
73 if (reasm == skb)
74 return NF_ACCEPT;
75
76 nf_ct_frag6_consume_orig(reasm);
77
78 NF_HOOK_THRESH(NFPROTO_IPV6, state->hook, state->net, state->sk, reasm,
79 state->in, state->out,
80 state->okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
81
82 return NF_STOLEN;
83} 73}
84 74
85static struct nf_hook_ops ipv6_defrag_ops[] = { 75static struct nf_hook_ops ipv6_defrag_ops[] = {
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index e0f922b777e3..4709f657b7b6 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -14,7 +14,6 @@
14#include <net/netfilter/ipv6/nf_reject.h> 14#include <net/netfilter/ipv6/nf_reject.h>
15#include <linux/netfilter_ipv6.h> 15#include <linux/netfilter_ipv6.h>
16#include <linux/netfilter_bridge.h> 16#include <linux/netfilter_bridge.h>
17#include <net/netfilter/ipv6/nf_reject.h>
18 17
19const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb, 18const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
20 struct tcphdr *otcph, 19 struct tcphdr *otcph,