aboutsummaryrefslogtreecommitdiffstats
path: root/net/bridge
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2015-04-02 08:31:40 -0400
committerPablo Neira Ayuso <pablo@netfilter.org>2015-04-08 10:49:07 -0400
commite70deecbf8e1562cac0b19f23848919e2f5d65aa (patch)
tree9350ff40e25b7a7a3c096b5463441d7782c750de /net/bridge
parentd64d80a2cde94f3e89caebd27240be419fec5b81 (diff)
netfilter: bridge: don't use nf_bridge_info data to store mac header
br_netfilter maintains an extra state, nf_bridge_info, which is attached to skb via skb->nf_bridge pointer. Amongst other things we use skb->nf_bridge->data to store the original mac header for every processed skb. This is required for ip refragmentation when using conntrack on top of bridge, because ip_fragment doesn't copy it from original skb. However there is no need anymore to do this unconditionally. Move this to the one place where its needed -- when br_netfilter calls ip_fragment(). Also switch to percpu storage for this so we can handle fragmenting without accessing nf_bridge meta data. Only user left is neigh resolution when DNAT is detected, to hold the original source mac address (neigh resolution builds new mac header using bridge mac), so rename ->data and reduce its size to whats needed. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/bridge')
-rw-r--r--net/bridge/br_netfilter.c70
1 files changed, 41 insertions, 29 deletions
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 282ed76c49e0..ca1cb6704a78 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -111,6 +111,19 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
111 pppoe_proto(skb) == htons(PPP_IPV6) && \ 111 pppoe_proto(skb) == htons(PPP_IPV6) && \
112 brnf_filter_pppoe_tagged) 112 brnf_filter_pppoe_tagged)
113 113
114/* largest possible L2 header, see br_nf_dev_queue_xmit() */
115#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
116
117#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
118struct brnf_frag_data {
119 char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
120 u8 encap_size;
121 u8 size;
122};
123
124static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
125#endif
126
114static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) 127static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
115{ 128{
116 struct net_bridge_port *port; 129 struct net_bridge_port *port;
@@ -189,14 +202,6 @@ static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
189 skb->network_header += len; 202 skb->network_header += len;
190} 203}
191 204
192static inline void nf_bridge_save_header(struct sk_buff *skb)
193{
194 int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
195
196 skb_copy_from_linear_data_offset(skb, -header_size,
197 skb->nf_bridge->data, header_size);
198}
199
200/* When handing a packet over to the IP layer 205/* When handing a packet over to the IP layer
201 * check whether we have a skb that is in the 206 * check whether we have a skb that is in the
202 * expected format 207 * expected format
@@ -318,7 +323,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
318 */ 323 */
319 skb_copy_from_linear_data_offset(skb, 324 skb_copy_from_linear_data_offset(skb,
320 -(ETH_HLEN-ETH_ALEN), 325 -(ETH_HLEN-ETH_ALEN),
321 skb->nf_bridge->data, 326 nf_bridge->neigh_header,
322 ETH_HLEN-ETH_ALEN); 327 ETH_HLEN-ETH_ALEN);
323 /* tell br_dev_xmit to continue with forwarding */ 328 /* tell br_dev_xmit to continue with forwarding */
324 nf_bridge->mask |= BRNF_BRIDGED_DNAT; 329 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
@@ -810,30 +815,22 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
810} 815}
811 816
812#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) 817#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
813static bool nf_bridge_copy_header(struct sk_buff *skb) 818static int br_nf_push_frag_xmit(struct sk_buff *skb)
814{ 819{
820 struct brnf_frag_data *data;
815 int err; 821 int err;
816 unsigned int header_size;
817 822
818 nf_bridge_update_protocol(skb); 823 data = this_cpu_ptr(&brnf_frag_data_storage);
819 header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); 824 err = skb_cow_head(skb, data->size);
820 err = skb_cow_head(skb, header_size);
821 if (err)
822 return false;
823 825
824 skb_copy_to_linear_data_offset(skb, -header_size, 826 if (err) {
825 skb->nf_bridge->data, header_size);
826 __skb_push(skb, nf_bridge_encap_header_len(skb));
827 return true;
828}
829
830static int br_nf_push_frag_xmit(struct sk_buff *skb)
831{
832 if (!nf_bridge_copy_header(skb)) {
833 kfree_skb(skb); 827 kfree_skb(skb);
834 return 0; 828 return 0;
835 } 829 }
836 830
831 skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
832 __skb_push(skb, data->encap_size);
833
837 return br_dev_queue_push_xmit(skb); 834 return br_dev_queue_push_xmit(skb);
838} 835}
839 836
@@ -851,14 +848,27 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
851 * boundaries by preserving frag_list rather than refragmenting. 848 * boundaries by preserving frag_list rather than refragmenting.
852 */ 849 */
853 if (skb->len + mtu_reserved > skb->dev->mtu) { 850 if (skb->len + mtu_reserved > skb->dev->mtu) {
851 struct brnf_frag_data *data;
852
854 frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; 853 frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
855 if (br_parse_ip_options(skb)) 854 if (br_parse_ip_options(skb))
856 /* Drop invalid packet */ 855 /* Drop invalid packet */
857 return NF_DROP; 856 return NF_DROP;
858 IPCB(skb)->frag_max_size = frag_max_size; 857 IPCB(skb)->frag_max_size = frag_max_size;
858
859 nf_bridge_update_protocol(skb);
860
861 data = this_cpu_ptr(&brnf_frag_data_storage);
862 data->encap_size = nf_bridge_encap_header_len(skb);
863 data->size = ETH_HLEN + data->encap_size;
864
865 skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
866 data->size);
867
859 ret = ip_fragment(skb, br_nf_push_frag_xmit); 868 ret = ip_fragment(skb, br_nf_push_frag_xmit);
860 } else 869 } else {
861 ret = br_dev_queue_push_xmit(skb); 870 ret = br_dev_queue_push_xmit(skb);
871 }
862 872
863 return ret; 873 return ret;
864} 874}
@@ -906,7 +916,6 @@ static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
906 } 916 }
907 917
908 nf_bridge_pull_encap_header(skb); 918 nf_bridge_pull_encap_header(skb);
909 nf_bridge_save_header(skb);
910 if (pf == NFPROTO_IPV4) 919 if (pf == NFPROTO_IPV4)
911 skb->protocol = htons(ETH_P_IP); 920 skb->protocol = htons(ETH_P_IP);
912 else 921 else
@@ -951,8 +960,11 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
951 skb_pull(skb, ETH_HLEN); 960 skb_pull(skb, ETH_HLEN);
952 nf_bridge->mask &= ~BRNF_BRIDGED_DNAT; 961 nf_bridge->mask &= ~BRNF_BRIDGED_DNAT;
953 962
954 skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), 963 BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
955 skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); 964
965 skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
966 nf_bridge->neigh_header,
967 ETH_HLEN - ETH_ALEN);
956 skb->dev = nf_bridge->physindev; 968 skb->dev = nf_bridge->physindev;
957 br_handle_frame_finish(skb); 969 br_handle_frame_finish(skb);
958} 970}