diff options
author | Florian Westphal <fw@strlen.de> | 2015-03-04 18:52:33 -0500 |
---|---|---|
committer | Pablo Neira Ayuso <pablo@netfilter.org> | 2015-03-09 08:20:48 -0400 |
commit | 8bd63cf1a426e69bf4f611b08978f721e46c194f (patch) | |
tree | 007963a3e6a1530e407652fcec662708ec59c20b /net/bridge | |
parent | 1cae565e8b746f484f1ff1b71d2a1c89d7cf0668 (diff) |
bridge: move mac header copying into br_netfilter
The mac header only has to be copied back into the skb for
fragments generated by ip_fragment(), which only happens
for bridge forwarded packets with nf-call-iptables=1 && active nf_defrag.
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/bridge')
-rw-r--r-- | net/bridge/br_forward.c | 4 | ||||
-rw-r--r-- | net/bridge/br_netfilter.c | 29 |
2 files changed, 29 insertions, 4 deletions
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index f96933a823e3..32541d4f72e8 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -37,9 +37,7 @@ static inline int should_deliver(const struct net_bridge_port *p, | |||
37 | 37 | ||
38 | int br_dev_queue_push_xmit(struct sk_buff *skb) | 38 | int br_dev_queue_push_xmit(struct sk_buff *skb) |
39 | { | 39 | { |
40 | /* ip_fragment doesn't copy the MAC header */ | 40 | if (!is_skb_forwardable(skb->dev, skb)) { |
41 | if (nf_bridge_maybe_copy_header(skb) || | ||
42 | !is_skb_forwardable(skb->dev, skb)) { | ||
43 | kfree_skb(skb); | 41 | kfree_skb(skb); |
44 | } else { | 42 | } else { |
45 | skb_push(skb, ETH_HLEN); | 43 | skb_push(skb, ETH_HLEN); |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 0ee453fad3de..e5479112c4a3 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -764,6 +764,33 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops, | |||
764 | } | 764 | } |
765 | 765 | ||
766 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) | 766 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) |
767 | static bool nf_bridge_copy_header(struct sk_buff *skb) | ||
768 | { | ||
769 | int err; | ||
770 | unsigned int header_size; | ||
771 | |||
772 | nf_bridge_update_protocol(skb); | ||
773 | header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); | ||
774 | err = skb_cow_head(skb, header_size); | ||
775 | if (err) | ||
776 | return false; | ||
777 | |||
778 | skb_copy_to_linear_data_offset(skb, -header_size, | ||
779 | skb->nf_bridge->data, header_size); | ||
780 | __skb_push(skb, nf_bridge_encap_header_len(skb)); | ||
781 | return true; | ||
782 | } | ||
783 | |||
784 | static int br_nf_push_frag_xmit(struct sk_buff *skb) | ||
785 | { | ||
786 | if (!nf_bridge_copy_header(skb)) { | ||
787 | kfree_skb(skb); | ||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | return br_dev_queue_push_xmit(skb); | ||
792 | } | ||
793 | |||
767 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) | 794 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) |
768 | { | 795 | { |
769 | int ret; | 796 | int ret; |
@@ -780,7 +807,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb) | |||
780 | /* Drop invalid packet */ | 807 | /* Drop invalid packet */ |
781 | return NF_DROP; | 808 | return NF_DROP; |
782 | IPCB(skb)->frag_max_size = frag_max_size; | 809 | IPCB(skb)->frag_max_size = frag_max_size; |
783 | ret = ip_fragment(skb, br_dev_queue_push_xmit); | 810 | ret = ip_fragment(skb, br_nf_push_frag_xmit); |
784 | } else | 811 | } else |
785 | ret = br_dev_queue_push_xmit(skb); | 812 | ret = br_dev_queue_push_xmit(skb); |
786 | 813 | ||