aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c133
1 files changed, 111 insertions, 22 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index d2c8a06b3a98..fb8b0546485b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2283,8 +2283,8 @@ EXPORT_SYMBOL(skb_checksum_help);
2283 2283
2284__be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2284__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2285{ 2285{
2286 unsigned int vlan_depth = skb->mac_len;
2286 __be16 type = skb->protocol; 2287 __be16 type = skb->protocol;
2287 int vlan_depth = skb->mac_len;
2288 2288
2289 /* Tunnel gso handlers can set protocol to ethernet. */ 2289 /* Tunnel gso handlers can set protocol to ethernet. */
2290 if (type == htons(ETH_P_TEB)) { 2290 if (type == htons(ETH_P_TEB)) {
@@ -2297,15 +2297,30 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2297 type = eth->h_proto; 2297 type = eth->h_proto;
2298 } 2298 }
2299 2299
2300 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { 2300 /* if skb->protocol is 802.1Q/AD then the header should already be
2301 struct vlan_hdr *vh; 2301 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2302 * ETH_HLEN otherwise
2303 */
2304 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2305 if (vlan_depth) {
2306 if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN)))
2307 return 0;
2308 vlan_depth -= VLAN_HLEN;
2309 } else {
2310 vlan_depth = ETH_HLEN;
2311 }
2312 do {
2313 struct vlan_hdr *vh;
2302 2314
2303 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2315 if (unlikely(!pskb_may_pull(skb,
2304 return 0; 2316 vlan_depth + VLAN_HLEN)))
2317 return 0;
2305 2318
2306 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 2319 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2307 type = vh->h_vlan_encapsulated_proto; 2320 type = vh->h_vlan_encapsulated_proto;
2308 vlan_depth += VLAN_HLEN; 2321 vlan_depth += VLAN_HLEN;
2322 } while (type == htons(ETH_P_8021Q) ||
2323 type == htons(ETH_P_8021AD));
2309 } 2324 }
2310 2325
2311 *depth = vlan_depth; 2326 *depth = vlan_depth;
@@ -2418,7 +2433,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
2418 * 2. No high memory really exists on this machine. 2433 * 2. No high memory really exists on this machine.
2419 */ 2434 */
2420 2435
2421static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) 2436static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2422{ 2437{
2423#ifdef CONFIG_HIGHMEM 2438#ifdef CONFIG_HIGHMEM
2424 int i; 2439 int i;
@@ -2493,38 +2508,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2493} 2508}
2494 2509
2495static netdev_features_t harmonize_features(struct sk_buff *skb, 2510static netdev_features_t harmonize_features(struct sk_buff *skb,
2496 const struct net_device *dev, 2511 netdev_features_t features)
2497 netdev_features_t features)
2498{ 2512{
2499 int tmp; 2513 int tmp;
2500 2514
2501 if (skb->ip_summed != CHECKSUM_NONE && 2515 if (skb->ip_summed != CHECKSUM_NONE &&
2502 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) { 2516 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
2503 features &= ~NETIF_F_ALL_CSUM; 2517 features &= ~NETIF_F_ALL_CSUM;
2504 } else if (illegal_highdma(dev, skb)) { 2518 } else if (illegal_highdma(skb->dev, skb)) {
2505 features &= ~NETIF_F_SG; 2519 features &= ~NETIF_F_SG;
2506 } 2520 }
2507 2521
2508 return features; 2522 return features;
2509} 2523}
2510 2524
2511netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 2525netdev_features_t netif_skb_features(struct sk_buff *skb)
2512 const struct net_device *dev)
2513{ 2526{
2514 __be16 protocol = skb->protocol; 2527 __be16 protocol = skb->protocol;
2515 netdev_features_t features = dev->features; 2528 netdev_features_t features = skb->dev->features;
2516 2529
2517 if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) 2530 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2518 features &= ~NETIF_F_GSO_MASK; 2531 features &= ~NETIF_F_GSO_MASK;
2519 2532
2520 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2533 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2521 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2534 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2522 protocol = veh->h_vlan_encapsulated_proto; 2535 protocol = veh->h_vlan_encapsulated_proto;
2523 } else if (!vlan_tx_tag_present(skb)) { 2536 } else if (!vlan_tx_tag_present(skb)) {
2524 return harmonize_features(skb, dev, features); 2537 return harmonize_features(skb, features);
2525 } 2538 }
2526 2539
2527 features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2540 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2528 NETIF_F_HW_VLAN_STAG_TX); 2541 NETIF_F_HW_VLAN_STAG_TX);
2529 2542
2530 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) 2543 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2532,9 +2545,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
2532 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2545 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2533 NETIF_F_HW_VLAN_STAG_TX; 2546 NETIF_F_HW_VLAN_STAG_TX;
2534 2547
2535 return harmonize_features(skb, dev, features); 2548 return harmonize_features(skb, features);
2536} 2549}
2537EXPORT_SYMBOL(netif_skb_dev_features); 2550EXPORT_SYMBOL(netif_skb_features);
2538 2551
2539int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2552int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2540 struct netdev_queue *txq) 2553 struct netdev_queue *txq)
@@ -3953,6 +3966,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3953 } 3966 }
3954 NAPI_GRO_CB(skb)->count = 1; 3967 NAPI_GRO_CB(skb)->count = 1;
3955 NAPI_GRO_CB(skb)->age = jiffies; 3968 NAPI_GRO_CB(skb)->age = jiffies;
3969 NAPI_GRO_CB(skb)->last = skb;
3956 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3970 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3957 skb->next = napi->gro_list; 3971 skb->next = napi->gro_list;
3958 napi->gro_list = skb; 3972 napi->gro_list = skb;
@@ -4543,6 +4557,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
4543EXPORT_SYMBOL(netdev_adjacent_get_private); 4557EXPORT_SYMBOL(netdev_adjacent_get_private);
4544 4558
4545/** 4559/**
4560 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4561 * @dev: device
4562 * @iter: list_head ** of the current position
4563 *
4564 * Gets the next device from the dev's upper list, starting from iter
4565 * position. The caller must hold RCU read lock.
4566 */
4567struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4568 struct list_head **iter)
4569{
4570 struct netdev_adjacent *upper;
4571
4572 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4573
4574 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4575
4576 if (&upper->list == &dev->adj_list.upper)
4577 return NULL;
4578
4579 *iter = &upper->list;
4580
4581 return upper->dev;
4582}
4583EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4584
4585/**
4546 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list 4586 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4547 * @dev: device 4587 * @dev: device
4548 * @iter: list_head ** of the current position 4588 * @iter: list_head ** of the current position
@@ -4624,6 +4664,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4624EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 4664EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4625 4665
4626/** 4666/**
4667 * netdev_lower_get_next - Get the next device from the lower neighbour
4668 * list
4669 * @dev: device
4670 * @iter: list_head ** of the current position
4671 *
4672 * Gets the next netdev_adjacent from the dev's lower neighbour
4673 * list, starting from iter position. The caller must hold RTNL lock or
4674 * its own locking that guarantees that the neighbour lower
4675 * list will remain unchainged.
4676 */
4677void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4678{
4679 struct netdev_adjacent *lower;
4680
4681 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4682
4683 if (&lower->list == &dev->adj_list.lower)
4684 return NULL;
4685
4686 *iter = &lower->list;
4687
4688 return lower->dev;
4689}
4690EXPORT_SYMBOL(netdev_lower_get_next);
4691
4692/**
4627 * netdev_lower_get_first_private_rcu - Get the first ->private from the 4693 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4628 * lower neighbour list, RCU 4694 * lower neighbour list, RCU
4629 * variant 4695 * variant
@@ -5073,6 +5139,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
5073} 5139}
5074EXPORT_SYMBOL(netdev_lower_dev_get_private); 5140EXPORT_SYMBOL(netdev_lower_dev_get_private);
5075 5141
5142
5143int dev_get_nest_level(struct net_device *dev,
5144 bool (*type_check)(struct net_device *dev))
5145{
5146 struct net_device *lower = NULL;
5147 struct list_head *iter;
5148 int max_nest = -1;
5149 int nest;
5150
5151 ASSERT_RTNL();
5152
5153 netdev_for_each_lower_dev(dev, lower, iter) {
5154 nest = dev_get_nest_level(lower, type_check);
5155 if (max_nest < nest)
5156 max_nest = nest;
5157 }
5158
5159 if (type_check(dev))
5160 max_nest++;
5161
5162 return max_nest;
5163}
5164EXPORT_SYMBOL(dev_get_nest_level);
5165
5076static void dev_change_rx_flags(struct net_device *dev, int flags) 5166static void dev_change_rx_flags(struct net_device *dev, int flags)
5077{ 5167{
5078 const struct net_device_ops *ops = dev->netdev_ops; 5168 const struct net_device_ops *ops = dev->netdev_ops;
@@ -5238,7 +5328,6 @@ void __dev_set_rx_mode(struct net_device *dev)
5238 if (ops->ndo_set_rx_mode) 5328 if (ops->ndo_set_rx_mode)
5239 ops->ndo_set_rx_mode(dev); 5329 ops->ndo_set_rx_mode(dev);
5240} 5330}
5241EXPORT_SYMBOL(__dev_set_rx_mode);
5242 5331
5243void dev_set_rx_mode(struct net_device *dev) 5332void dev_set_rx_mode(struct net_device *dev)
5244{ 5333{
@@ -5543,7 +5632,7 @@ static int dev_new_index(struct net *net)
5543 5632
5544/* Delayed registration/unregisteration */ 5633/* Delayed registration/unregisteration */
5545static LIST_HEAD(net_todo_list); 5634static LIST_HEAD(net_todo_list);
5546static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 5635DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5547 5636
5548static void net_set_todo(struct net_device *dev) 5637static void net_set_todo(struct net_device *dev)
5549{ 5638{