aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c138
1 files changed, 66 insertions, 72 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ba50a1e404c..8afb244b205f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -137,6 +137,7 @@
137#include <linux/if_pppox.h> 137#include <linux/if_pppox.h>
138#include <linux/ppp_defs.h> 138#include <linux/ppp_defs.h>
139#include <linux/net_tstamp.h> 139#include <linux/net_tstamp.h>
140#include <linux/jump_label.h>
140 141
141#include "net-sysfs.h" 142#include "net-sysfs.h"
142 143
@@ -1320,8 +1321,6 @@ EXPORT_SYMBOL(dev_close);
1320 */ 1321 */
1321void dev_disable_lro(struct net_device *dev) 1322void dev_disable_lro(struct net_device *dev)
1322{ 1323{
1323 u32 flags;
1324
1325 /* 1324 /*
1326 * If we're trying to disable lro on a vlan device 1325 * If we're trying to disable lro on a vlan device
1327 * use the underlying physical device instead 1326 * use the underlying physical device instead
@@ -1329,15 +1328,9 @@ void dev_disable_lro(struct net_device *dev)
1329 if (is_vlan_dev(dev)) 1328 if (is_vlan_dev(dev))
1330 dev = vlan_dev_real_dev(dev); 1329 dev = vlan_dev_real_dev(dev);
1331 1330
1332 if (dev->ethtool_ops && dev->ethtool_ops->get_flags) 1331 dev->wanted_features &= ~NETIF_F_LRO;
1333 flags = dev->ethtool_ops->get_flags(dev); 1332 netdev_update_features(dev);
1334 else
1335 flags = ethtool_op_get_flags(dev);
1336 1333
1337 if (!(flags & ETH_FLAG_LRO))
1338 return;
1339
1340 __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
1341 if (unlikely(dev->features & NETIF_F_LRO)) 1334 if (unlikely(dev->features & NETIF_F_LRO))
1342 netdev_WARN(dev, "failed to disable LRO!\n"); 1335 netdev_WARN(dev, "failed to disable LRO!\n");
1343} 1336}
@@ -1449,34 +1442,32 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1449} 1442}
1450EXPORT_SYMBOL(call_netdevice_notifiers); 1443EXPORT_SYMBOL(call_netdevice_notifiers);
1451 1444
1452/* When > 0 there are consumers of rx skb time stamps */ 1445static struct jump_label_key netstamp_needed __read_mostly;
1453static atomic_t netstamp_needed = ATOMIC_INIT(0);
1454 1446
1455void net_enable_timestamp(void) 1447void net_enable_timestamp(void)
1456{ 1448{
1457 atomic_inc(&netstamp_needed); 1449 jump_label_inc(&netstamp_needed);
1458} 1450}
1459EXPORT_SYMBOL(net_enable_timestamp); 1451EXPORT_SYMBOL(net_enable_timestamp);
1460 1452
1461void net_disable_timestamp(void) 1453void net_disable_timestamp(void)
1462{ 1454{
1463 atomic_dec(&netstamp_needed); 1455 jump_label_dec(&netstamp_needed);
1464} 1456}
1465EXPORT_SYMBOL(net_disable_timestamp); 1457EXPORT_SYMBOL(net_disable_timestamp);
1466 1458
1467static inline void net_timestamp_set(struct sk_buff *skb) 1459static inline void net_timestamp_set(struct sk_buff *skb)
1468{ 1460{
1469 if (atomic_read(&netstamp_needed)) 1461 skb->tstamp.tv64 = 0;
1462 if (static_branch(&netstamp_needed))
1470 __net_timestamp(skb); 1463 __net_timestamp(skb);
1471 else
1472 skb->tstamp.tv64 = 0;
1473} 1464}
1474 1465
1475static inline void net_timestamp_check(struct sk_buff *skb) 1466#define net_timestamp_check(COND, SKB) \
1476{ 1467 if (static_branch(&netstamp_needed)) { \
1477 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed)) 1468 if ((COND) && !(SKB)->tstamp.tv64) \
1478 __net_timestamp(skb); 1469 __net_timestamp(SKB); \
1479} 1470 } \
1480 1471
1481static int net_hwtstamp_validate(struct ifreq *ifr) 1472static int net_hwtstamp_validate(struct ifreq *ifr)
1482{ 1473{
@@ -1923,7 +1914,8 @@ EXPORT_SYMBOL(skb_checksum_help);
1923 * It may return NULL if the skb requires no segmentation. This is 1914 * It may return NULL if the skb requires no segmentation. This is
1924 * only possible when GSO is used for verifying header integrity. 1915 * only possible when GSO is used for verifying header integrity.
1925 */ 1916 */
1926struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features) 1917struct sk_buff *skb_gso_segment(struct sk_buff *skb,
1918 netdev_features_t features)
1927{ 1919{
1928 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1920 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1929 struct packet_type *ptype; 1921 struct packet_type *ptype;
@@ -1953,9 +1945,9 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
1953 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) 1945 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1954 dev->ethtool_ops->get_drvinfo(dev, &info); 1946 dev->ethtool_ops->get_drvinfo(dev, &info);
1955 1947
1956 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n", 1948 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d ip_summed=%d\n",
1957 info.driver, dev ? dev->features : 0L, 1949 info.driver, dev ? &dev->features : NULL,
1958 skb->sk ? skb->sk->sk_route_caps : 0L, 1950 skb->sk ? &skb->sk->sk_route_caps : NULL,
1959 skb->len, skb->data_len, skb->ip_summed); 1951 skb->len, skb->data_len, skb->ip_summed);
1960 1952
1961 if (skb_header_cloned(skb) && 1953 if (skb_header_cloned(skb) &&
@@ -2064,7 +2056,7 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
2064 * This function segments the given skb and stores the list of segments 2056 * This function segments the given skb and stores the list of segments
2065 * in skb->next. 2057 * in skb->next.
2066 */ 2058 */
2067static int dev_gso_segment(struct sk_buff *skb, int features) 2059static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2068{ 2060{
2069 struct sk_buff *segs; 2061 struct sk_buff *segs;
2070 2062
@@ -2103,7 +2095,7 @@ static inline void skb_orphan_try(struct sk_buff *skb)
2103 } 2095 }
2104} 2096}
2105 2097
2106static bool can_checksum_protocol(unsigned long features, __be16 protocol) 2098static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2107{ 2099{
2108 return ((features & NETIF_F_GEN_CSUM) || 2100 return ((features & NETIF_F_GEN_CSUM) ||
2109 ((features & NETIF_F_V4_CSUM) && 2101 ((features & NETIF_F_V4_CSUM) &&
@@ -2114,7 +2106,8 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
2114 protocol == htons(ETH_P_FCOE))); 2106 protocol == htons(ETH_P_FCOE)));
2115} 2107}
2116 2108
2117static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features) 2109static netdev_features_t harmonize_features(struct sk_buff *skb,
2110 __be16 protocol, netdev_features_t features)
2118{ 2111{
2119 if (!can_checksum_protocol(features, protocol)) { 2112 if (!can_checksum_protocol(features, protocol)) {
2120 features &= ~NETIF_F_ALL_CSUM; 2113 features &= ~NETIF_F_ALL_CSUM;
@@ -2126,10 +2119,10 @@ static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features
2126 return features; 2119 return features;
2127} 2120}
2128 2121
2129u32 netif_skb_features(struct sk_buff *skb) 2122netdev_features_t netif_skb_features(struct sk_buff *skb)
2130{ 2123{
2131 __be16 protocol = skb->protocol; 2124 __be16 protocol = skb->protocol;
2132 u32 features = skb->dev->features; 2125 netdev_features_t features = skb->dev->features;
2133 2126
2134 if (protocol == htons(ETH_P_8021Q)) { 2127 if (protocol == htons(ETH_P_8021Q)) {
2135 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2128 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2175,7 +2168,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2175 unsigned int skb_len; 2168 unsigned int skb_len;
2176 2169
2177 if (likely(!skb->next)) { 2170 if (likely(!skb->next)) {
2178 u32 features; 2171 netdev_features_t features;
2179 2172
2180 /* 2173 /*
2181 * If device doesn't need skb->dst, release it right now while 2174 * If device doesn't need skb->dst, release it right now while
@@ -2456,6 +2449,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2456 return rc; 2449 return rc;
2457} 2450}
2458 2451
2452#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2453static void skb_update_prio(struct sk_buff *skb)
2454{
2455 struct netprio_map *map = rcu_dereference(skb->dev->priomap);
2456
2457 if ((!skb->priority) && (skb->sk) && map)
2458 skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
2459}
2460#else
2461#define skb_update_prio(skb)
2462#endif
2463
2459static DEFINE_PER_CPU(int, xmit_recursion); 2464static DEFINE_PER_CPU(int, xmit_recursion);
2460#define RECURSION_LIMIT 10 2465#define RECURSION_LIMIT 10
2461 2466
@@ -2496,6 +2501,8 @@ int dev_queue_xmit(struct sk_buff *skb)
2496 */ 2501 */
2497 rcu_read_lock_bh(); 2502 rcu_read_lock_bh();
2498 2503
2504 skb_update_prio(skb);
2505
2499 txq = dev_pick_tx(dev, skb); 2506 txq = dev_pick_tx(dev, skb);
2500 q = rcu_dereference_bh(txq->qdisc); 2507 q = rcu_dereference_bh(txq->qdisc);
2501 2508
@@ -2718,6 +2725,8 @@ EXPORT_SYMBOL(__skb_get_rxhash);
2718struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 2725struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2719EXPORT_SYMBOL(rps_sock_flow_table); 2726EXPORT_SYMBOL(rps_sock_flow_table);
2720 2727
2728struct jump_label_key rps_needed __read_mostly;
2729
2721static struct rps_dev_flow * 2730static struct rps_dev_flow *
2722set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2731set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2723 struct rps_dev_flow *rflow, u16 next_cpu) 2732 struct rps_dev_flow *rflow, u16 next_cpu)
@@ -2997,12 +3006,11 @@ int netif_rx(struct sk_buff *skb)
2997 if (netpoll_rx(skb)) 3006 if (netpoll_rx(skb))
2998 return NET_RX_DROP; 3007 return NET_RX_DROP;
2999 3008
3000 if (netdev_tstamp_prequeue) 3009 net_timestamp_check(netdev_tstamp_prequeue, skb);
3001 net_timestamp_check(skb);
3002 3010
3003 trace_netif_rx(skb); 3011 trace_netif_rx(skb);
3004#ifdef CONFIG_RPS 3012#ifdef CONFIG_RPS
3005 { 3013 if (static_branch(&rps_needed)) {
3006 struct rps_dev_flow voidflow, *rflow = &voidflow; 3014 struct rps_dev_flow voidflow, *rflow = &voidflow;
3007 int cpu; 3015 int cpu;
3008 3016
@@ -3017,14 +3025,13 @@ int netif_rx(struct sk_buff *skb)
3017 3025
3018 rcu_read_unlock(); 3026 rcu_read_unlock();
3019 preempt_enable(); 3027 preempt_enable();
3020 } 3028 } else
3021#else 3029#endif
3022 { 3030 {
3023 unsigned int qtail; 3031 unsigned int qtail;
3024 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 3032 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3025 put_cpu(); 3033 put_cpu();
3026 } 3034 }
3027#endif
3028 return ret; 3035 return ret;
3029} 3036}
3030EXPORT_SYMBOL(netif_rx); 3037EXPORT_SYMBOL(netif_rx);
@@ -3230,8 +3237,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
3230 int ret = NET_RX_DROP; 3237 int ret = NET_RX_DROP;
3231 __be16 type; 3238 __be16 type;
3232 3239
3233 if (!netdev_tstamp_prequeue) 3240 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3234 net_timestamp_check(skb);
3235 3241
3236 trace_netif_receive_skb(skb); 3242 trace_netif_receive_skb(skb);
3237 3243
@@ -3362,14 +3368,13 @@ out:
3362 */ 3368 */
3363int netif_receive_skb(struct sk_buff *skb) 3369int netif_receive_skb(struct sk_buff *skb)
3364{ 3370{
3365 if (netdev_tstamp_prequeue) 3371 net_timestamp_check(netdev_tstamp_prequeue, skb);
3366 net_timestamp_check(skb);
3367 3372
3368 if (skb_defer_rx_timestamp(skb)) 3373 if (skb_defer_rx_timestamp(skb))
3369 return NET_RX_SUCCESS; 3374 return NET_RX_SUCCESS;
3370 3375
3371#ifdef CONFIG_RPS 3376#ifdef CONFIG_RPS
3372 { 3377 if (static_branch(&rps_needed)) {
3373 struct rps_dev_flow voidflow, *rflow = &voidflow; 3378 struct rps_dev_flow voidflow, *rflow = &voidflow;
3374 int cpu, ret; 3379 int cpu, ret;
3375 3380
@@ -3380,16 +3385,12 @@ int netif_receive_skb(struct sk_buff *skb)
3380 if (cpu >= 0) { 3385 if (cpu >= 0) {
3381 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3386 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3382 rcu_read_unlock(); 3387 rcu_read_unlock();
3383 } else { 3388 return ret;
3384 rcu_read_unlock();
3385 ret = __netif_receive_skb(skb);
3386 } 3389 }
3387 3390 rcu_read_unlock();
3388 return ret;
3389 } 3391 }
3390#else
3391 return __netif_receive_skb(skb);
3392#endif 3392#endif
3393 return __netif_receive_skb(skb);
3393} 3394}
3394EXPORT_SYMBOL(netif_receive_skb); 3395EXPORT_SYMBOL(netif_receive_skb);
3395 3396
@@ -5362,7 +5363,8 @@ static void rollback_registered(struct net_device *dev)
5362 list_del(&single); 5363 list_del(&single);
5363} 5364}
5364 5365
5365static u32 netdev_fix_features(struct net_device *dev, u32 features) 5366static netdev_features_t netdev_fix_features(struct net_device *dev,
5367 netdev_features_t features)
5366{ 5368{
5367 /* Fix illegal checksum combinations */ 5369 /* Fix illegal checksum combinations */
5368 if ((features & NETIF_F_HW_CSUM) && 5370 if ((features & NETIF_F_HW_CSUM) &&
@@ -5371,12 +5373,6 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features)
5371 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5373 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5372 } 5374 }
5373 5375
5374 if ((features & NETIF_F_NO_CSUM) &&
5375 (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5376 netdev_warn(dev, "mixed no checksumming and other settings.\n");
5377 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5378 }
5379
5380 /* Fix illegal SG+CSUM combinations. */ 5376 /* Fix illegal SG+CSUM combinations. */
5381 if ((features & NETIF_F_SG) && 5377 if ((features & NETIF_F_SG) &&
5382 !(features & NETIF_F_ALL_CSUM)) { 5378 !(features & NETIF_F_ALL_CSUM)) {
@@ -5424,7 +5420,7 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features)
5424 5420
5425int __netdev_update_features(struct net_device *dev) 5421int __netdev_update_features(struct net_device *dev)
5426{ 5422{
5427 u32 features; 5423 netdev_features_t features;
5428 int err = 0; 5424 int err = 0;
5429 5425
5430 ASSERT_RTNL(); 5426 ASSERT_RTNL();
@@ -5440,16 +5436,16 @@ int __netdev_update_features(struct net_device *dev)
5440 if (dev->features == features) 5436 if (dev->features == features)
5441 return 0; 5437 return 0;
5442 5438
5443 netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n", 5439 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5444 dev->features, features); 5440 &dev->features, &features);
5445 5441
5446 if (dev->netdev_ops->ndo_set_features) 5442 if (dev->netdev_ops->ndo_set_features)
5447 err = dev->netdev_ops->ndo_set_features(dev, features); 5443 err = dev->netdev_ops->ndo_set_features(dev, features);
5448 5444
5449 if (unlikely(err < 0)) { 5445 if (unlikely(err < 0)) {
5450 netdev_err(dev, 5446 netdev_err(dev,
5451 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n", 5447 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5452 err, features, dev->features); 5448 err, &features, &dev->features);
5453 return -1; 5449 return -1;
5454 } 5450 }
5455 5451
@@ -5633,11 +5629,12 @@ int register_netdevice(struct net_device *dev)
5633 dev->wanted_features = dev->features & dev->hw_features; 5629 dev->wanted_features = dev->features & dev->hw_features;
5634 5630
5635 /* Turn on no cache copy if HW is doing checksum */ 5631 /* Turn on no cache copy if HW is doing checksum */
5636 dev->hw_features |= NETIF_F_NOCACHE_COPY; 5632 if (!(dev->flags & IFF_LOOPBACK)) {
5637 if ((dev->features & NETIF_F_ALL_CSUM) && 5633 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5638 !(dev->features & NETIF_F_NO_CSUM)) { 5634 if (dev->features & NETIF_F_ALL_CSUM) {
5639 dev->wanted_features |= NETIF_F_NOCACHE_COPY; 5635 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5640 dev->features |= NETIF_F_NOCACHE_COPY; 5636 dev->features |= NETIF_F_NOCACHE_COPY;
5637 }
5641 } 5638 }
5642 5639
5643 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 5640 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
@@ -6373,7 +6370,8 @@ static int dev_cpu_callback(struct notifier_block *nfb,
6373 * @one to the master device with current feature set @all. Will not 6370 * @one to the master device with current feature set @all. Will not
6374 * enable anything that is off in @mask. Returns the new feature set. 6371 * enable anything that is off in @mask. Returns the new feature set.
6375 */ 6372 */
6376u32 netdev_increment_features(u32 all, u32 one, u32 mask) 6373netdev_features_t netdev_increment_features(netdev_features_t all,
6374 netdev_features_t one, netdev_features_t mask)
6377{ 6375{
6378 if (mask & NETIF_F_GEN_CSUM) 6376 if (mask & NETIF_F_GEN_CSUM)
6379 mask |= NETIF_F_ALL_CSUM; 6377 mask |= NETIF_F_ALL_CSUM;
@@ -6382,10 +6380,6 @@ u32 netdev_increment_features(u32 all, u32 one, u32 mask)
6382 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask; 6380 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6383 all &= one | ~NETIF_F_ALL_FOR_ALL; 6381 all &= one | ~NETIF_F_ALL_FOR_ALL;
6384 6382
6385 /* If device needs checksumming, downgrade to it. */
6386 if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
6387 all &= ~NETIF_F_NO_CSUM;
6388
6389 /* If one device supports hw checksumming, set for all. */ 6383 /* If one device supports hw checksumming, set for all. */
6390 if (all & NETIF_F_GEN_CSUM) 6384 if (all & NETIF_F_GEN_CSUM)
6391 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); 6385 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);