aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorPravin B Shelar <pshelar@nicira.com>2013-02-14 04:44:55 -0500
committerDavid S. Miller <davem@davemloft.net>2013-02-15 15:16:03 -0500
commit05e8ef4ab2d8087d360e814d14da20b9f7fb2283 (patch)
treef5161c863bff859fd1c52e36d88fe4a10b84af0f /net/core/dev.c
parent14bbd6a565e1bcdc240d44687edb93f721cfdf99 (diff)
net: factor out skb_mac_gso_segment() from skb_gso_segment()
This function will be used in next GRE_GSO patch. This patch does not change any functionality. It only exports skb_mac_gso_segment() function. [ Use skb_reset_mac_len() -DaveM ] Signed-off-by: Pravin B Shelar <pshelar@nicira.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c79
1 files changed, 48 insertions, 31 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index f44473696b8b..67deae60214c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2327,37 +2327,20 @@ out:
2327} 2327}
2328EXPORT_SYMBOL(skb_checksum_help); 2328EXPORT_SYMBOL(skb_checksum_help);
2329 2329
2330/* openvswitch calls this on rx path, so we need a different check.
2331 */
2332static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2333{
2334 if (tx_path)
2335 return skb->ip_summed != CHECKSUM_PARTIAL;
2336 else
2337 return skb->ip_summed == CHECKSUM_NONE;
2338}
2339
2340/** 2330/**
2341 * __skb_gso_segment - Perform segmentation on skb. 2331 * skb_mac_gso_segment - mac layer segmentation handler.
2342 * @skb: buffer to segment 2332 * @skb: buffer to segment
2343 * @features: features for the output path (see dev->features) 2333 * @features: features for the output path (see dev->features)
2344 * @tx_path: whether it is called in TX path
2345 *
2346 * This function segments the given skb and returns a list of segments.
2347 *
2348 * It may return NULL if the skb requires no segmentation. This is
2349 * only possible when GSO is used for verifying header integrity.
2350 */ 2334 */
2351struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 2335struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2352 netdev_features_t features, bool tx_path) 2336 netdev_features_t features)
2353{ 2337{
2354 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 2338 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2355 struct packet_offload *ptype; 2339 struct packet_offload *ptype;
2356 __be16 type = skb->protocol; 2340 __be16 type = skb->protocol;
2357 int vlan_depth = ETH_HLEN;
2358 int err;
2359 2341
2360 while (type == htons(ETH_P_8021Q)) { 2342 while (type == htons(ETH_P_8021Q)) {
2343 int vlan_depth = ETH_HLEN;
2361 struct vlan_hdr *vh; 2344 struct vlan_hdr *vh;
2362 2345
2363 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2346 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
@@ -2368,22 +2351,14 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2368 vlan_depth += VLAN_HLEN; 2351 vlan_depth += VLAN_HLEN;
2369 } 2352 }
2370 2353
2371 skb_reset_mac_header(skb);
2372 skb->mac_len = skb->network_header - skb->mac_header;
2373 __skb_pull(skb, skb->mac_len); 2354 __skb_pull(skb, skb->mac_len);
2374 2355
2375 if (unlikely(skb_needs_check(skb, tx_path))) {
2376 skb_warn_bad_offload(skb);
2377
2378 if (skb_header_cloned(skb) &&
2379 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2380 return ERR_PTR(err);
2381 }
2382
2383 rcu_read_lock(); 2356 rcu_read_lock();
2384 list_for_each_entry_rcu(ptype, &offload_base, list) { 2357 list_for_each_entry_rcu(ptype, &offload_base, list) {
2385 if (ptype->type == type && ptype->callbacks.gso_segment) { 2358 if (ptype->type == type && ptype->callbacks.gso_segment) {
2386 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 2359 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2360 int err;
2361
2387 err = ptype->callbacks.gso_send_check(skb); 2362 err = ptype->callbacks.gso_send_check(skb);
2388 segs = ERR_PTR(err); 2363 segs = ERR_PTR(err);
2389 if (err || skb_gso_ok(skb, features)) 2364 if (err || skb_gso_ok(skb, features))
@@ -2401,6 +2376,48 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2401 2376
2402 return segs; 2377 return segs;
2403} 2378}
2379EXPORT_SYMBOL(skb_mac_gso_segment);
2380
2381
2382/* openvswitch calls this on rx path, so we need a different check.
2383 */
2384static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2385{
2386 if (tx_path)
2387 return skb->ip_summed != CHECKSUM_PARTIAL;
2388 else
2389 return skb->ip_summed == CHECKSUM_NONE;
2390}
2391
2392/**
2393 * __skb_gso_segment - Perform segmentation on skb.
2394 * @skb: buffer to segment
2395 * @features: features for the output path (see dev->features)
2396 * @tx_path: whether it is called in TX path
2397 *
2398 * This function segments the given skb and returns a list of segments.
2399 *
2400 * It may return NULL if the skb requires no segmentation. This is
2401 * only possible when GSO is used for verifying header integrity.
2402 */
2403struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2404 netdev_features_t features, bool tx_path)
2405{
2406 if (unlikely(skb_needs_check(skb, tx_path))) {
2407 int err;
2408
2409 skb_warn_bad_offload(skb);
2410
2411 if (skb_header_cloned(skb) &&
2412 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2413 return ERR_PTR(err);
2414 }
2415
2416 skb_reset_mac_header(skb);
2417 skb_reset_mac_len(skb);
2418
2419 return skb_mac_gso_segment(skb, features);
2420}
2404EXPORT_SYMBOL(__skb_gso_segment); 2421EXPORT_SYMBOL(__skb_gso_segment);
2405 2422
2406/* Take action when hardware reception checksum errors are detected. */ 2423/* Take action when hardware reception checksum errors are detected. */