aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2013-10-18 16:13:27 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-19 19:11:56 -0400
commit47d27aad44169372f358cda88a223883f6760fa5 (patch)
tree773d64817eccc3a2453a6804e5540bcbb6b6907c /net
parenta729e83ad6f6de2cf11c0da4a5a7c0b3924d8335 (diff)
ipv4: gso: send_check() & segment() cleanups
inet_gso_segment() and inet_gso_send_check() are called by skb_mac_gso_segment() under rcu lock, no need to use rcu_read_lock() / rcu_read_unlock() Avoid calling ip_hdr() twice per function. We can use ip_send_check() helper. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/af_inet.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 35913fb77dc8..4f8cd4fc451d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1254,20 +1254,19 @@ static int inet_gso_send_check(struct sk_buff *skb)
1254 if (ihl < sizeof(*iph)) 1254 if (ihl < sizeof(*iph))
1255 goto out; 1255 goto out;
1256 1256
1257 proto = iph->protocol;
1258
1259 /* Warning: after this point, iph might be no longer valid */
1257 if (unlikely(!pskb_may_pull(skb, ihl))) 1260 if (unlikely(!pskb_may_pull(skb, ihl)))
1258 goto out; 1261 goto out;
1259
1260 __skb_pull(skb, ihl); 1262 __skb_pull(skb, ihl);
1263
1261 skb_reset_transport_header(skb); 1264 skb_reset_transport_header(skb);
1262 iph = ip_hdr(skb);
1263 proto = iph->protocol;
1264 err = -EPROTONOSUPPORT; 1265 err = -EPROTONOSUPPORT;
1265 1266
1266 rcu_read_lock();
1267 ops = rcu_dereference(inet_offloads[proto]); 1267 ops = rcu_dereference(inet_offloads[proto]);
1268 if (likely(ops && ops->callbacks.gso_send_check)) 1268 if (likely(ops && ops->callbacks.gso_send_check))
1269 err = ops->callbacks.gso_send_check(skb); 1269 err = ops->callbacks.gso_send_check(skb);
1270 rcu_read_unlock();
1271 1270
1272out: 1271out:
1273 return err; 1272 return err;
@@ -1305,23 +1304,23 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1305 if (ihl < sizeof(*iph)) 1304 if (ihl < sizeof(*iph))
1306 goto out; 1305 goto out;
1307 1306
1307 id = ntohs(iph->id);
1308 proto = iph->protocol;
1309
1310 /* Warning: after this point, iph might be no longer valid */
1308 if (unlikely(!pskb_may_pull(skb, ihl))) 1311 if (unlikely(!pskb_may_pull(skb, ihl)))
1309 goto out; 1312 goto out;
1313 __skb_pull(skb, ihl);
1310 1314
1311 tunnel = !!skb->encapsulation; 1315 tunnel = !!skb->encapsulation;
1312 1316
1313 __skb_pull(skb, ihl);
1314 skb_reset_transport_header(skb); 1317 skb_reset_transport_header(skb);
1315 iph = ip_hdr(skb); 1318
1316 id = ntohs(iph->id);
1317 proto = iph->protocol;
1318 segs = ERR_PTR(-EPROTONOSUPPORT); 1319 segs = ERR_PTR(-EPROTONOSUPPORT);
1319 1320
1320 rcu_read_lock();
1321 ops = rcu_dereference(inet_offloads[proto]); 1321 ops = rcu_dereference(inet_offloads[proto]);
1322 if (likely(ops && ops->callbacks.gso_segment)) 1322 if (likely(ops && ops->callbacks.gso_segment))
1323 segs = ops->callbacks.gso_segment(skb, features); 1323 segs = ops->callbacks.gso_segment(skb, features);
1324 rcu_read_unlock();
1325 1324
1326 if (IS_ERR_OR_NULL(segs)) 1325 if (IS_ERR_OR_NULL(segs))
1327 goto out; 1326 goto out;
@@ -1339,8 +1338,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1339 iph->id = htons(id++); 1338 iph->id = htons(id++);
1340 } 1339 }
1341 iph->tot_len = htons(skb->len - skb->mac_len); 1340 iph->tot_len = htons(skb->len - skb->mac_len);
1342 iph->check = 0; 1341 ip_send_check(iph);
1343 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
1344 } while ((skb = skb->next)); 1342 } while ((skb = skb->next));
1345 1343
1346out: 1344out: