aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2018-05-07 14:08:40 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-08 22:30:06 -0400
commit0ad6509571e06b302d519f2f05e616ac8c1a10d7 (patch)
tree339ea1f0c15f38b9510b353b95da1c46939e381c
parent9a0d41b3598ff62ecb26661bbfb1d523586cdea3 (diff)
udp: Partially unroll handling of first segment and last segment
This patch allows us to take care of unrolling the first segment and the last segment of the loop for processing the segmented skb. Part of the motivation for this is that it makes it easier to process the fact that the first fame and all of the frames in between should be mostly identical in terms of header data, and the last frame has differences in the length and partial checksum. In addition I am dropping the header length calculation since we don't really need it for anything but the last frame and it can be easily obtained by just pulling the data_len and offset of tail from the transport header. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Acked-by: Willem de Bruijn <willemb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/udp_offload.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 92c182e99ddc..b15c78ac3f23 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -193,7 +193,6 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
193 struct sock *sk = gso_skb->sk; 193 struct sock *sk = gso_skb->sk;
194 unsigned int sum_truesize = 0; 194 unsigned int sum_truesize = 0;
195 struct sk_buff *segs, *seg; 195 struct sk_buff *segs, *seg;
196 unsigned int hdrlen;
197 struct udphdr *uh; 196 struct udphdr *uh;
198 unsigned int mss; 197 unsigned int mss;
199 __sum16 check; 198 __sum16 check;
@@ -203,7 +202,6 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
203 if (gso_skb->len <= sizeof(*uh) + mss) 202 if (gso_skb->len <= sizeof(*uh) + mss)
204 return ERR_PTR(-EINVAL); 203 return ERR_PTR(-EINVAL);
205 204
206 hdrlen = gso_skb->data - skb_mac_header(gso_skb);
207 skb_pull(gso_skb, sizeof(*uh)); 205 skb_pull(gso_skb, sizeof(*uh));
208 206
209 /* clear destructor to avoid skb_segment assigning it to tail */ 207 /* clear destructor to avoid skb_segment assigning it to tail */
@@ -216,30 +214,37 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
216 return segs; 214 return segs;
217 } 215 }
218 216
219 uh = udp_hdr(segs); 217 seg = segs;
218 uh = udp_hdr(seg);
220 219
221 /* compute checksum adjustment based on old length versus new */ 220 /* compute checksum adjustment based on old length versus new */
222 newlen = htons(sizeof(*uh) + mss); 221 newlen = htons(sizeof(*uh) + mss);
223 check = csum16_add(csum16_sub(uh->check, uh->len), newlen); 222 check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
224 223
225 for (seg = segs; seg; seg = seg->next) { 224 for (;;) {
226 uh = udp_hdr(seg); 225 seg->destructor = sock_wfree;
226 seg->sk = sk;
227 sum_truesize += seg->truesize;
227 228
228 /* last packet can be partial gso_size */ 229 if (!seg->next)
229 if (!seg->next) { 230 break;
230 newlen = htons(seg->len - hdrlen);
231 check = csum16_add(csum16_sub(uh->check, uh->len),
232 newlen);
233 }
234 231
235 uh->len = newlen; 232 uh->len = newlen;
236 uh->check = check; 233 uh->check = check;
237 234
238 seg->destructor = sock_wfree; 235 seg = seg->next;
239 seg->sk = sk; 236 uh = udp_hdr(seg);
240 sum_truesize += seg->truesize;
241 } 237 }
242 238
239 /* last packet can be partial gso_size, account for that in checksum */
240 newlen = htons(skb_tail_pointer(seg) - skb_transport_header(seg) +
241 seg->data_len);
242 check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
243
244 uh->len = newlen;
245 uh->check = check;
246
247 /* update refcount for the packet */
243 refcount_add(sum_truesize - gso_skb->truesize, &sk->sk_wmem_alloc); 248 refcount_add(sum_truesize - gso_skb->truesize, &sk->sk_wmem_alloc);
244 249
245 return segs; 250 return segs;