diff options
author | Florian Westphal <fw@strlen.de> | 2015-03-04 18:52:33 -0500 |
---|---|---|
committer | Pablo Neira Ayuso <pablo@netfilter.org> | 2015-03-09 08:20:48 -0400 |
commit | 8bd63cf1a426e69bf4f611b08978f721e46c194f (patch) | |
tree | 007963a3e6a1530e407652fcec662708ec59c20b | |
parent | 1cae565e8b746f484f1ff1b71d2a1c89d7cf0668 (diff) |
bridge: move mac header copying into br_netfilter
The mac header only has to be copied back into the skb for
fragments generated by ip_fragment(), which only happens
for bridge forwarded packets with nf-call-iptables=1 && active nf_defrag.
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
-rw-r--r-- | include/linux/netfilter_bridge.h | 31 | ||||
-rw-r--r-- | net/bridge/br_forward.c | 4 | ||||
-rw-r--r-- | net/bridge/br_netfilter.c | 29 |
3 files changed, 29 insertions, 35 deletions
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index c755e4971fa3..332ef8ab37e9 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h | |||
@@ -44,36 +44,6 @@ static inline void nf_bridge_update_protocol(struct sk_buff *skb) | |||
44 | skb->protocol = htons(ETH_P_PPP_SES); | 44 | skb->protocol = htons(ETH_P_PPP_SES); |
45 | } | 45 | } |
46 | 46 | ||
47 | /* Fill in the header for fragmented IP packets handled by | ||
48 | * the IPv4 connection tracking code. | ||
49 | * | ||
50 | * Only used in br_forward.c | ||
51 | */ | ||
52 | static inline int nf_bridge_copy_header(struct sk_buff *skb) | ||
53 | { | ||
54 | int err; | ||
55 | unsigned int header_size; | ||
56 | |||
57 | nf_bridge_update_protocol(skb); | ||
58 | header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); | ||
59 | err = skb_cow_head(skb, header_size); | ||
60 | if (err) | ||
61 | return err; | ||
62 | |||
63 | skb_copy_to_linear_data_offset(skb, -header_size, | ||
64 | skb->nf_bridge->data, header_size); | ||
65 | __skb_push(skb, nf_bridge_encap_header_len(skb)); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) | ||
70 | { | ||
71 | if (skb->nf_bridge && | ||
72 | skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)) | ||
73 | return nf_bridge_copy_header(skb); | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) | 47 | static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) |
78 | { | 48 | { |
79 | if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE)) | 49 | if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE)) |
@@ -119,7 +89,6 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb) | |||
119 | } | 89 | } |
120 | 90 | ||
121 | #else | 91 | #else |
122 | #define nf_bridge_maybe_copy_header(skb) (0) | ||
123 | #define nf_bridge_pad(skb) (0) | 92 | #define nf_bridge_pad(skb) (0) |
124 | #define br_drop_fake_rtable(skb) do { } while (0) | 93 | #define br_drop_fake_rtable(skb) do { } while (0) |
125 | #endif /* CONFIG_BRIDGE_NETFILTER */ | 94 | #endif /* CONFIG_BRIDGE_NETFILTER */ |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index f96933a823e3..32541d4f72e8 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -37,9 +37,7 @@ static inline int should_deliver(const struct net_bridge_port *p, | |||
37 | 37 | ||
38 | int br_dev_queue_push_xmit(struct sk_buff *skb) | 38 | int br_dev_queue_push_xmit(struct sk_buff *skb) |
39 | { | 39 | { |
40 | /* ip_fragment doesn't copy the MAC header */ | 40 | if (!is_skb_forwardable(skb->dev, skb)) { |
41 | if (nf_bridge_maybe_copy_header(skb) || | ||
42 | !is_skb_forwardable(skb->dev, skb)) { | ||
43 | kfree_skb(skb); | 41 | kfree_skb(skb); |
44 | } else { | 42 | } else { |
45 | skb_push(skb, ETH_HLEN); | 43 | skb_push(skb, ETH_HLEN); |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 0ee453fad3de..e5479112c4a3 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -764,6 +764,33 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops, | |||
764 | } | 764 | } |
765 | 765 | ||
766 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) | 766 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) |
767 | static bool nf_bridge_copy_header(struct sk_buff *skb) | ||
768 | { | ||
769 | int err; | ||
770 | unsigned int header_size; | ||
771 | |||
772 | nf_bridge_update_protocol(skb); | ||
773 | header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); | ||
774 | err = skb_cow_head(skb, header_size); | ||
775 | if (err) | ||
776 | return false; | ||
777 | |||
778 | skb_copy_to_linear_data_offset(skb, -header_size, | ||
779 | skb->nf_bridge->data, header_size); | ||
780 | __skb_push(skb, nf_bridge_encap_header_len(skb)); | ||
781 | return true; | ||
782 | } | ||
783 | |||
784 | static int br_nf_push_frag_xmit(struct sk_buff *skb) | ||
785 | { | ||
786 | if (!nf_bridge_copy_header(skb)) { | ||
787 | kfree_skb(skb); | ||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | return br_dev_queue_push_xmit(skb); | ||
792 | } | ||
793 | |||
767 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) | 794 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) |
768 | { | 795 | { |
769 | int ret; | 796 | int ret; |
@@ -780,7 +807,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb) | |||
780 | /* Drop invalid packet */ | 807 | /* Drop invalid packet */ |
781 | return NF_DROP; | 808 | return NF_DROP; |
782 | IPCB(skb)->frag_max_size = frag_max_size; | 809 | IPCB(skb)->frag_max_size = frag_max_size; |
783 | ret = ip_fragment(skb, br_dev_queue_push_xmit); | 810 | ret = ip_fragment(skb, br_nf_push_frag_xmit); |
784 | } else | 811 | } else |
785 | ret = br_dev_queue_push_xmit(skb); | 812 | ret = br_dev_queue_push_xmit(skb); |
786 | 813 | ||