diff options
author | Or Gerlitz <ogerlitz@mellanox.com> | 2014-01-20 06:59:19 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-01-21 21:05:04 -0500 |
commit | b582ef0990d457f7ce8ccf827af51a575ca0b4a6 (patch) | |
tree | 2893cba0f3c386795a7324c71851d165a68d891e /net/ipv4 | |
parent | 2618abb73c8953f0848511fc13f68da4d8337574 (diff) |
net: Add GRO support for UDP encapsulating protocols
Add GRO handlers for protocols that do UDP encapsulation, with the intent of
being able to coalesce packets which encapsulate packets belonging to
the same TCP session.
For GRO purposes, the destination UDP port takes the role of the ether type
field in the ethernet header or the next protocol in the IP header.
The UDP GRO handler will only attempt to coalesce packets whose destination
port is registered to have gro handler.
Use a mark on the skb GRO CB data to disallow (flush) running the udp gro receive
code twice on a packet. This solves the problem of udp encapsulated packets whose
inner VM packet is udp and happen to carry a port which has registered offloads.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/udp_offload.c | 143 |
1 files changed, 143 insertions, 0 deletions
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 79c62bdcd3c5..ee853c55deea 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -14,6 +14,15 @@ | |||
14 | #include <net/udp.h> | 14 | #include <net/udp.h> |
15 | #include <net/protocol.h> | 15 | #include <net/protocol.h> |
16 | 16 | ||
17 | static DEFINE_SPINLOCK(udp_offload_lock); | ||
18 | static struct udp_offload_priv *udp_offload_base __read_mostly; | ||
19 | |||
20 | struct udp_offload_priv { | ||
21 | struct udp_offload *offload; | ||
22 | struct rcu_head rcu; | ||
23 | struct udp_offload_priv __rcu *next; | ||
24 | }; | ||
25 | |||
17 | static int udp4_ufo_send_check(struct sk_buff *skb) | 26 | static int udp4_ufo_send_check(struct sk_buff *skb) |
18 | { | 27 | { |
19 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) | 28 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
@@ -89,10 +98,144 @@ out: | |||
89 | return segs; | 98 | return segs; |
90 | } | 99 | } |
91 | 100 | ||
101 | int udp_add_offload(struct udp_offload *uo) | ||
102 | { | ||
103 | struct udp_offload_priv **head = &udp_offload_base; | ||
104 | struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_KERNEL); | ||
105 | |||
106 | if (!new_offload) | ||
107 | return -ENOMEM; | ||
108 | |||
109 | new_offload->offload = uo; | ||
110 | |||
111 | spin_lock(&udp_offload_lock); | ||
112 | rcu_assign_pointer(new_offload->next, rcu_dereference(*head)); | ||
113 | rcu_assign_pointer(*head, rcu_dereference(new_offload)); | ||
114 | spin_unlock(&udp_offload_lock); | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | EXPORT_SYMBOL(udp_add_offload); | ||
119 | |||
120 | static void udp_offload_free_routine(struct rcu_head *head) | ||
121 | { | ||
122 | struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu); | ||
123 | kfree(ou_priv); | ||
124 | } | ||
125 | |||
126 | void udp_del_offload(struct udp_offload *uo) | ||
127 | { | ||
128 | struct udp_offload_priv __rcu **head = &udp_offload_base; | ||
129 | struct udp_offload_priv *uo_priv; | ||
130 | |||
131 | spin_lock(&udp_offload_lock); | ||
132 | |||
133 | uo_priv = rcu_dereference(*head); | ||
134 | for (; uo_priv != NULL; | ||
135 | uo_priv = rcu_dereference(*head)) { | ||
136 | |||
137 | if (uo_priv->offload == uo) { | ||
138 | rcu_assign_pointer(*head, rcu_dereference(uo_priv->next)); | ||
139 | goto unlock; | ||
140 | } | ||
141 | head = &uo_priv->next; | ||
142 | } | ||
143 | pr_warn("udp_del_offload: didn't find offload for port %d\n", htons(uo->port)); | ||
144 | unlock: | ||
145 | spin_unlock(&udp_offload_lock); | ||
146 | if (uo_priv != NULL) | ||
147 | call_rcu(&uo_priv->rcu, udp_offload_free_routine); | ||
148 | } | ||
149 | EXPORT_SYMBOL(udp_del_offload); | ||
150 | |||
151 | static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb) | ||
152 | { | ||
153 | struct udp_offload_priv *uo_priv; | ||
154 | struct sk_buff *p, **pp = NULL; | ||
155 | struct udphdr *uh, *uh2; | ||
156 | unsigned int hlen, off; | ||
157 | int flush = 1; | ||
158 | |||
159 | if (NAPI_GRO_CB(skb)->udp_mark || | ||
160 | (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE)) | ||
161 | goto out; | ||
162 | |||
163 | /* mark that this skb passed once through the udp gro layer */ | ||
164 | NAPI_GRO_CB(skb)->udp_mark = 1; | ||
165 | |||
166 | off = skb_gro_offset(skb); | ||
167 | hlen = off + sizeof(*uh); | ||
168 | uh = skb_gro_header_fast(skb, off); | ||
169 | if (skb_gro_header_hard(skb, hlen)) { | ||
170 | uh = skb_gro_header_slow(skb, hlen, off); | ||
171 | if (unlikely(!uh)) | ||
172 | goto out; | ||
173 | } | ||
174 | |||
175 | rcu_read_lock(); | ||
176 | uo_priv = rcu_dereference(udp_offload_base); | ||
177 | for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { | ||
178 | if (uo_priv->offload->port == uh->dest && | ||
179 | uo_priv->offload->callbacks.gro_receive) | ||
180 | goto unflush; | ||
181 | } | ||
182 | goto out_unlock; | ||
183 | |||
184 | unflush: | ||
185 | flush = 0; | ||
186 | |||
187 | for (p = *head; p; p = p->next) { | ||
188 | if (!NAPI_GRO_CB(p)->same_flow) | ||
189 | continue; | ||
190 | |||
191 | uh2 = (struct udphdr *)(p->data + off); | ||
192 | if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) { | ||
193 | NAPI_GRO_CB(p)->same_flow = 0; | ||
194 | continue; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ | ||
199 | pp = uo_priv->offload->callbacks.gro_receive(head, skb); | ||
200 | |||
201 | out_unlock: | ||
202 | rcu_read_unlock(); | ||
203 | out: | ||
204 | NAPI_GRO_CB(skb)->flush |= flush; | ||
205 | return pp; | ||
206 | } | ||
207 | |||
208 | static int udp_gro_complete(struct sk_buff *skb, int nhoff) | ||
209 | { | ||
210 | struct udp_offload_priv *uo_priv; | ||
211 | __be16 newlen = htons(skb->len - nhoff); | ||
212 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); | ||
213 | int err = -ENOSYS; | ||
214 | |||
215 | uh->len = newlen; | ||
216 | |||
217 | rcu_read_lock(); | ||
218 | |||
219 | uo_priv = rcu_dereference(udp_offload_base); | ||
220 | for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { | ||
221 | if (uo_priv->offload->port == uh->dest && | ||
222 | uo_priv->offload->callbacks.gro_complete) | ||
223 | break; | ||
224 | } | ||
225 | |||
226 | if (uo_priv != NULL) | ||
227 | err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr)); | ||
228 | |||
229 | rcu_read_unlock(); | ||
230 | return err; | ||
231 | } | ||
232 | |||
92 | static const struct net_offload udpv4_offload = { | 233 | static const struct net_offload udpv4_offload = { |
93 | .callbacks = { | 234 | .callbacks = { |
94 | .gso_send_check = udp4_ufo_send_check, | 235 | .gso_send_check = udp4_ufo_send_check, |
95 | .gso_segment = udp4_ufo_fragment, | 236 | .gso_segment = udp4_ufo_fragment, |
237 | .gro_receive = udp_gro_receive, | ||
238 | .gro_complete = udp_gro_complete, | ||
96 | }, | 239 | }, |
97 | }; | 240 | }; |
98 | 241 | ||