aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/udp_offload.c
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2014-09-29 23:22:29 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-01 21:35:51 -0400
commit8bce6d7d0d1ede22af334ee241841e9278365278 (patch)
tree9032eb929757a84f90828cdbbb89b917e783fcf7 /net/ipv4/udp_offload.c
parentf44d61cdd3ab4259289ccf314093eb45d83a69e6 (diff)
udp: Generalize skb_udp_segment
skb_udp_segment is the function called from udp4_ufo_fragment to segment a UDP tunnel packet. This function currently assumes segmentation is transparent Ethernet bridging (i.e. VXLAN encapsulation). This patch generalizes the function to operate on either Ethertype or IP protocol. The inner_protocol field must be set to the protocol of the inner header. This can now be either an Ethertype or an IP protocol (in a union). A new flag in the skbuff indicates which type is effective. skb_set_inner_protocol and skb_set_inner_ipproto helper functions were added to set the inner_protocol. These functions are called from the point where the tunnel encapsulation is occuring. When skb_udp_tunnel_segment is called, the function to segment the inner packet is selected based on the inner IP or Ethertype. In the case of an IP protocol encapsulation, the function is derived from inet[6]_offloads. In the case of Ethertype, skb->protocol is set to the inner_protocol and skb_mac_gso_segment is called. (GRE currently does this, but it might be possible to lookup the protocol in offload_base and call the appropriate segmenation function directly). Signed-off-by: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/udp_offload.c')
-rw-r--r--net/ipv4/udp_offload.c51
1 files changed, 46 insertions, 5 deletions
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 19ebe6a39ddc..8c35f2c939ee 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -25,8 +25,11 @@ struct udp_offload_priv {
25 struct udp_offload_priv __rcu *next; 25 struct udp_offload_priv __rcu *next;
26}; 26};
27 27
28struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, 28static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
29 netdev_features_t features) 29 netdev_features_t features,
30 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
31 netdev_features_t features),
32 __be16 new_protocol)
30{ 33{
31 struct sk_buff *segs = ERR_PTR(-EINVAL); 34 struct sk_buff *segs = ERR_PTR(-EINVAL);
32 u16 mac_offset = skb->mac_header; 35 u16 mac_offset = skb->mac_header;
@@ -48,7 +51,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
48 skb_reset_mac_header(skb); 51 skb_reset_mac_header(skb);
49 skb_set_network_header(skb, skb_inner_network_offset(skb)); 52 skb_set_network_header(skb, skb_inner_network_offset(skb));
50 skb->mac_len = skb_inner_network_offset(skb); 53 skb->mac_len = skb_inner_network_offset(skb);
51 skb->protocol = htons(ETH_P_TEB); 54 skb->protocol = new_protocol;
52 55
53 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); 56 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
54 if (need_csum) 57 if (need_csum)
@@ -56,7 +59,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
56 59
57 /* segment inner packet. */ 60 /* segment inner packet. */
58 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 61 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
59 segs = skb_mac_gso_segment(skb, enc_features); 62 segs = gso_inner_segment(skb, enc_features);
60 if (IS_ERR_OR_NULL(segs)) { 63 if (IS_ERR_OR_NULL(segs)) {
61 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, 64 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
62 mac_len); 65 mac_len);
@@ -101,6 +104,44 @@ out:
101 return segs; 104 return segs;
102} 105}
103 106
107struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
108 netdev_features_t features,
109 bool is_ipv6)
110{
111 __be16 protocol = skb->protocol;
112 const struct net_offload **offloads;
113 const struct net_offload *ops;
114 struct sk_buff *segs = ERR_PTR(-EINVAL);
115 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
116 netdev_features_t features);
117
118 rcu_read_lock();
119
120 switch (skb->inner_protocol_type) {
121 case ENCAP_TYPE_ETHER:
122 protocol = skb->inner_protocol;
123 gso_inner_segment = skb_mac_gso_segment;
124 break;
125 case ENCAP_TYPE_IPPROTO:
126 offloads = is_ipv6 ? inet6_offloads : inet_offloads;
127 ops = rcu_dereference(offloads[skb->inner_ipproto]);
128 if (!ops || !ops->callbacks.gso_segment)
129 goto out_unlock;
130 gso_inner_segment = ops->callbacks.gso_segment;
131 break;
132 default:
133 goto out_unlock;
134 }
135
136 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
137 protocol);
138
139out_unlock:
140 rcu_read_unlock();
141
142 return segs;
143}
144
104static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, 145static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
105 netdev_features_t features) 146 netdev_features_t features)
106{ 147{
@@ -113,7 +154,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
113 if (skb->encapsulation && 154 if (skb->encapsulation &&
114 (skb_shinfo(skb)->gso_type & 155 (skb_shinfo(skb)->gso_type &
115 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { 156 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
116 segs = skb_udp_tunnel_segment(skb, features); 157 segs = skb_udp_tunnel_segment(skb, features, false);
117 goto out; 158 goto out;
118 } 159 }
119 160