aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c102
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/rtnetlink.c33
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/utils.c8
6 files changed, 129 insertions, 24 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index d2c8a06b3a98..9abc503b19b7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2418,7 +2418,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
2418 * 2. No high memory really exists on this machine. 2418 * 2. No high memory really exists on this machine.
2419 */ 2419 */
2420 2420
2421static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) 2421static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2422{ 2422{
2423#ifdef CONFIG_HIGHMEM 2423#ifdef CONFIG_HIGHMEM
2424 int i; 2424 int i;
@@ -2493,38 +2493,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2493} 2493}
2494 2494
2495static netdev_features_t harmonize_features(struct sk_buff *skb, 2495static netdev_features_t harmonize_features(struct sk_buff *skb,
2496 const struct net_device *dev, 2496 netdev_features_t features)
2497 netdev_features_t features)
2498{ 2497{
2499 int tmp; 2498 int tmp;
2500 2499
2501 if (skb->ip_summed != CHECKSUM_NONE && 2500 if (skb->ip_summed != CHECKSUM_NONE &&
2502 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) { 2501 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
2503 features &= ~NETIF_F_ALL_CSUM; 2502 features &= ~NETIF_F_ALL_CSUM;
2504 } else if (illegal_highdma(dev, skb)) { 2503 } else if (illegal_highdma(skb->dev, skb)) {
2505 features &= ~NETIF_F_SG; 2504 features &= ~NETIF_F_SG;
2506 } 2505 }
2507 2506
2508 return features; 2507 return features;
2509} 2508}
2510 2509
2511netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 2510netdev_features_t netif_skb_features(struct sk_buff *skb)
2512 const struct net_device *dev)
2513{ 2511{
2514 __be16 protocol = skb->protocol; 2512 __be16 protocol = skb->protocol;
2515 netdev_features_t features = dev->features; 2513 netdev_features_t features = skb->dev->features;
2516 2514
2517 if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) 2515 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2518 features &= ~NETIF_F_GSO_MASK; 2516 features &= ~NETIF_F_GSO_MASK;
2519 2517
2520 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2518 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2521 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2519 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2522 protocol = veh->h_vlan_encapsulated_proto; 2520 protocol = veh->h_vlan_encapsulated_proto;
2523 } else if (!vlan_tx_tag_present(skb)) { 2521 } else if (!vlan_tx_tag_present(skb)) {
2524 return harmonize_features(skb, dev, features); 2522 return harmonize_features(skb, features);
2525 } 2523 }
2526 2524
2527 features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2525 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2528 NETIF_F_HW_VLAN_STAG_TX); 2526 NETIF_F_HW_VLAN_STAG_TX);
2529 2527
2530 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) 2528 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2532,9 +2530,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
2532 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2530 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2533 NETIF_F_HW_VLAN_STAG_TX; 2531 NETIF_F_HW_VLAN_STAG_TX;
2534 2532
2535 return harmonize_features(skb, dev, features); 2533 return harmonize_features(skb, features);
2536} 2534}
2537EXPORT_SYMBOL(netif_skb_dev_features); 2535EXPORT_SYMBOL(netif_skb_features);
2538 2536
2539int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2537int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2540 struct netdev_queue *txq) 2538 struct netdev_queue *txq)
@@ -3953,6 +3951,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3953 } 3951 }
3954 NAPI_GRO_CB(skb)->count = 1; 3952 NAPI_GRO_CB(skb)->count = 1;
3955 NAPI_GRO_CB(skb)->age = jiffies; 3953 NAPI_GRO_CB(skb)->age = jiffies;
3954 NAPI_GRO_CB(skb)->last = skb;
3956 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3955 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3957 skb->next = napi->gro_list; 3956 skb->next = napi->gro_list;
3958 napi->gro_list = skb; 3957 napi->gro_list = skb;
@@ -4543,6 +4542,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
4543EXPORT_SYMBOL(netdev_adjacent_get_private); 4542EXPORT_SYMBOL(netdev_adjacent_get_private);
4544 4543
4545/** 4544/**
4545 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4546 * @dev: device
4547 * @iter: list_head ** of the current position
4548 *
4549 * Gets the next device from the dev's upper list, starting from iter
4550 * position. The caller must hold RCU read lock.
4551 */
4552struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4553 struct list_head **iter)
4554{
4555 struct netdev_adjacent *upper;
4556
4557 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4558
4559 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4560
4561 if (&upper->list == &dev->adj_list.upper)
4562 return NULL;
4563
4564 *iter = &upper->list;
4565
4566 return upper->dev;
4567}
4568EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4569
4570/**
4546 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list 4571 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4547 * @dev: device 4572 * @dev: device
4548 * @iter: list_head ** of the current position 4573 * @iter: list_head ** of the current position
@@ -4624,6 +4649,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4624EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 4649EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4625 4650
4626/** 4651/**
4652 * netdev_lower_get_next - Get the next device from the lower neighbour
4653 * list
4654 * @dev: device
4655 * @iter: list_head ** of the current position
4656 *
4657 * Gets the next netdev_adjacent from the dev's lower neighbour
4658 * list, starting from iter position. The caller must hold RTNL lock or
4659 * its own locking that guarantees that the neighbour lower
4660 * list will remain unchainged.
4661 */
4662void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4663{
4664 struct netdev_adjacent *lower;
4665
4666 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4667
4668 if (&lower->list == &dev->adj_list.lower)
4669 return NULL;
4670
4671 *iter = &lower->list;
4672
4673 return lower->dev;
4674}
4675EXPORT_SYMBOL(netdev_lower_get_next);
4676
4677/**
4627 * netdev_lower_get_first_private_rcu - Get the first ->private from the 4678 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4628 * lower neighbour list, RCU 4679 * lower neighbour list, RCU
4629 * variant 4680 * variant
@@ -5073,6 +5124,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
5073} 5124}
5074EXPORT_SYMBOL(netdev_lower_dev_get_private); 5125EXPORT_SYMBOL(netdev_lower_dev_get_private);
5075 5126
5127
5128int dev_get_nest_level(struct net_device *dev,
5129 bool (*type_check)(struct net_device *dev))
5130{
5131 struct net_device *lower = NULL;
5132 struct list_head *iter;
5133 int max_nest = -1;
5134 int nest;
5135
5136 ASSERT_RTNL();
5137
5138 netdev_for_each_lower_dev(dev, lower, iter) {
5139 nest = dev_get_nest_level(lower, type_check);
5140 if (max_nest < nest)
5141 max_nest = nest;
5142 }
5143
5144 if (type_check(dev))
5145 max_nest++;
5146
5147 return max_nest;
5148}
5149EXPORT_SYMBOL(dev_get_nest_level);
5150
5076static void dev_change_rx_flags(struct net_device *dev, int flags) 5151static void dev_change_rx_flags(struct net_device *dev, int flags)
5077{ 5152{
5078 const struct net_device_ops *ops = dev->netdev_ops; 5153 const struct net_device_ops *ops = dev->netdev_ops;
@@ -5238,7 +5313,6 @@ void __dev_set_rx_mode(struct net_device *dev)
5238 if (ops->ndo_set_rx_mode) 5313 if (ops->ndo_set_rx_mode)
5239 ops->ndo_set_rx_mode(dev); 5314 ops->ndo_set_rx_mode(dev);
5240} 5315}
5241EXPORT_SYMBOL(__dev_set_rx_mode);
5242 5316
5243void dev_set_rx_mode(struct net_device *dev) 5317void dev_set_rx_mode(struct net_device *dev)
5244{ 5318{
@@ -5543,7 +5617,7 @@ static int dev_new_index(struct net *net)
5543 5617
5544/* Delayed registration/unregisteration */ 5618/* Delayed registration/unregisteration */
5545static LIST_HEAD(net_todo_list); 5619static LIST_HEAD(net_todo_list);
5546static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 5620DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5547 5621
5548static void net_set_todo(struct net_device *dev) 5622static void net_set_todo(struct net_device *dev)
5549{ 5623{
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8f8a96ef9f3f..32d872eec7f5 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
1248 neigh->updated = jiffies; 1248 neigh->updated = jiffies;
1249 if (!(neigh->nud_state & NUD_FAILED)) 1249 if (!(neigh->nud_state & NUD_FAILED))
1250 return; 1250 return;
1251 neigh->nud_state = NUD_PROBE; 1251 neigh->nud_state = NUD_INCOMPLETE;
1252 atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES)); 1252 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1253 neigh_add_timer(neigh, 1253 neigh_add_timer(neigh,
1254 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME)); 1254 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1255} 1255}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 81d3a9a08453..7c8ffd974961 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -24,7 +24,7 @@
24 24
25static LIST_HEAD(pernet_list); 25static LIST_HEAD(pernet_list);
26static struct list_head *first_device = &pernet_list; 26static struct list_head *first_device = &pernet_list;
27static DEFINE_MUTEX(net_mutex); 27DEFINE_MUTEX(net_mutex);
28 28
29LIST_HEAD(net_namespace_list); 29LIST_HEAD(net_namespace_list);
30EXPORT_SYMBOL_GPL(net_namespace_list); 30EXPORT_SYMBOL_GPL(net_namespace_list);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9837bebf93ce..2d8d8fcfa060 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
353} 353}
354EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 354EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
355 355
356/* Return with the rtnl_lock held when there are no network
357 * devices unregistering in any network namespace.
358 */
359static void rtnl_lock_unregistering_all(void)
360{
361 struct net *net;
362 bool unregistering;
363 DEFINE_WAIT(wait);
364
365 for (;;) {
366 prepare_to_wait(&netdev_unregistering_wq, &wait,
367 TASK_UNINTERRUPTIBLE);
368 unregistering = false;
369 rtnl_lock();
370 for_each_net(net) {
371 if (net->dev_unreg_count > 0) {
372 unregistering = true;
373 break;
374 }
375 }
376 if (!unregistering)
377 break;
378 __rtnl_unlock();
379 schedule();
380 }
381 finish_wait(&netdev_unregistering_wq, &wait);
382}
383
356/** 384/**
357 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 385 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
358 * @ops: struct rtnl_link_ops * to unregister 386 * @ops: struct rtnl_link_ops * to unregister
359 */ 387 */
360void rtnl_link_unregister(struct rtnl_link_ops *ops) 388void rtnl_link_unregister(struct rtnl_link_ops *ops)
361{ 389{
362 rtnl_lock(); 390 /* Close the race with cleanup_net() */
391 mutex_lock(&net_mutex);
392 rtnl_lock_unregistering_all();
363 __rtnl_link_unregister(ops); 393 __rtnl_link_unregister(ops);
364 rtnl_unlock(); 394 rtnl_unlock();
395 mutex_unlock(&net_mutex);
365} 396}
366EXPORT_SYMBOL_GPL(rtnl_link_unregister); 397EXPORT_SYMBOL_GPL(rtnl_link_unregister);
367 398
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1b62343f5837..8383b2bddeb9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3076 if (unlikely(p->len + len >= 65536)) 3076 if (unlikely(p->len + len >= 65536))
3077 return -E2BIG; 3077 return -E2BIG;
3078 3078
3079 lp = NAPI_GRO_CB(p)->last ?: p; 3079 lp = NAPI_GRO_CB(p)->last;
3080 pinfo = skb_shinfo(lp); 3080 pinfo = skb_shinfo(lp);
3081 3081
3082 if (headlen <= offset) { 3082 if (headlen <= offset) {
@@ -3192,7 +3192,7 @@ merge:
3192 3192
3193 __skb_pull(skb, offset); 3193 __skb_pull(skb, offset);
3194 3194
3195 if (!NAPI_GRO_CB(p)->last) 3195 if (NAPI_GRO_CB(p)->last == p)
3196 skb_shinfo(p)->frag_list = skb; 3196 skb_shinfo(p)->frag_list = skb;
3197 else 3197 else
3198 NAPI_GRO_CB(p)->last->next = skb; 3198 NAPI_GRO_CB(p)->last->next = skb;
diff --git a/net/core/utils.c b/net/core/utils.c
index 2f737bf90b3f..eed34338736c 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
348{ 348{
349 struct __net_random_once_work *work = 349 struct __net_random_once_work *work =
350 container_of(w, struct __net_random_once_work, work); 350 container_of(w, struct __net_random_once_work, work);
351 if (!static_key_enabled(work->key)) 351 BUG_ON(!static_key_enabled(work->key));
352 static_key_slow_inc(work->key); 352 static_key_slow_dec(work->key);
353 kfree(work); 353 kfree(work);
354} 354}
355 355
@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
367} 367}
368 368
369bool __net_get_random_once(void *buf, int nbytes, bool *done, 369bool __net_get_random_once(void *buf, int nbytes, bool *done,
370 struct static_key *done_key) 370 struct static_key *once_key)
371{ 371{
372 static DEFINE_SPINLOCK(lock); 372 static DEFINE_SPINLOCK(lock);
373 unsigned long flags; 373 unsigned long flags;
@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
382 *done = true; 382 *done = true;
383 spin_unlock_irqrestore(&lock, flags); 383 spin_unlock_irqrestore(&lock, flags);
384 384
385 __net_random_once_disable_jump(done_key); 385 __net_random_once_disable_jump(once_key);
386 386
387 return true; 387 return true;
388} 388}