aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h192
1 files changed, 152 insertions, 40 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ce2a1f5f9a1e..e8eeebd49a98 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -668,15 +668,28 @@ extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
668bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 668bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
669 u16 filter_id); 669 u16 filter_id);
670#endif 670#endif
671#endif /* CONFIG_RPS */
671 672
672/* This structure contains an instance of an RX queue. */ 673/* This structure contains an instance of an RX queue. */
673struct netdev_rx_queue { 674struct netdev_rx_queue {
675#ifdef CONFIG_RPS
674 struct rps_map __rcu *rps_map; 676 struct rps_map __rcu *rps_map;
675 struct rps_dev_flow_table __rcu *rps_flow_table; 677 struct rps_dev_flow_table __rcu *rps_flow_table;
678#endif
676 struct kobject kobj; 679 struct kobject kobj;
677 struct net_device *dev; 680 struct net_device *dev;
678} ____cacheline_aligned_in_smp; 681} ____cacheline_aligned_in_smp;
679#endif /* CONFIG_RPS */ 682
683/*
684 * RX queue sysfs structures and functions.
685 */
686struct rx_queue_attribute {
687 struct attribute attr;
688 ssize_t (*show)(struct netdev_rx_queue *queue,
689 struct rx_queue_attribute *attr, char *buf);
690 ssize_t (*store)(struct netdev_rx_queue *queue,
691 struct rx_queue_attribute *attr, const char *buf, size_t len);
692};
680 693
681#ifdef CONFIG_XPS 694#ifdef CONFIG_XPS
682/* 695/*
@@ -739,6 +752,9 @@ struct netdev_phys_port_id {
739 unsigned char id_len; 752 unsigned char id_len;
740}; 753};
741 754
755typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
756 struct sk_buff *skb);
757
742/* 758/*
743 * This structure defines the management hooks for network devices. 759 * This structure defines the management hooks for network devices.
744 * The following hooks can be defined; unless noted otherwise, they are 760 * The following hooks can be defined; unless noted otherwise, they are
@@ -770,7 +786,7 @@ struct netdev_phys_port_id {
770 * Required can not be NULL. 786 * Required can not be NULL.
771 * 787 *
772 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 788 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
773 * void *accel_priv); 789 * void *accel_priv, select_queue_fallback_t fallback);
774 * Called to decide which queue to when device supports multiple 790 * Called to decide which queue to when device supports multiple
775 * transmit queues. 791 * transmit queues.
776 * 792 *
@@ -992,7 +1008,8 @@ struct net_device_ops {
992 struct net_device *dev); 1008 struct net_device *dev);
993 u16 (*ndo_select_queue)(struct net_device *dev, 1009 u16 (*ndo_select_queue)(struct net_device *dev,
994 struct sk_buff *skb, 1010 struct sk_buff *skb,
995 void *accel_priv); 1011 void *accel_priv,
1012 select_queue_fallback_t fallback);
996 void (*ndo_change_rx_flags)(struct net_device *dev, 1013 void (*ndo_change_rx_flags)(struct net_device *dev,
997 int flags); 1014 int flags);
998 void (*ndo_set_rx_mode)(struct net_device *dev); 1015 void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1285,6 +1302,9 @@ struct net_device {
1285#if IS_ENABLED(CONFIG_NET_DSA) 1302#if IS_ENABLED(CONFIG_NET_DSA)
1286 struct dsa_switch_tree *dsa_ptr; /* dsa specific data */ 1303 struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
1287#endif 1304#endif
1305#if IS_ENABLED(CONFIG_TIPC)
1306 struct tipc_bearer __rcu *tipc_ptr; /* TIPC specific data */
1307#endif
1288 void *atalk_ptr; /* AppleTalk link */ 1308 void *atalk_ptr; /* AppleTalk link */
1289 struct in_device __rcu *ip_ptr; /* IPv4 specific data */ 1309 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
1290 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */ 1310 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
@@ -1310,7 +1330,7 @@ struct net_device {
1310 unicast) */ 1330 unicast) */
1311 1331
1312 1332
1313#ifdef CONFIG_RPS 1333#ifdef CONFIG_SYSFS
1314 struct netdev_rx_queue *_rx; 1334 struct netdev_rx_queue *_rx;
1315 1335
1316 /* Number of RX queues allocated at register_netdev() time */ 1336 /* Number of RX queues allocated at register_netdev() time */
@@ -1408,7 +1428,7 @@ struct net_device {
1408 union { 1428 union {
1409 void *ml_priv; 1429 void *ml_priv;
1410 struct pcpu_lstats __percpu *lstats; /* loopback stats */ 1430 struct pcpu_lstats __percpu *lstats; /* loopback stats */
1411 struct pcpu_tstats __percpu *tstats; /* tunnel stats */ 1431 struct pcpu_sw_netstats __percpu *tstats;
1412 struct pcpu_dstats __percpu *dstats; /* dummy stats */ 1432 struct pcpu_dstats __percpu *dstats; /* dummy stats */
1413 struct pcpu_vstats __percpu *vstats; /* veth stats */ 1433 struct pcpu_vstats __percpu *vstats; /* veth stats */
1414 }; 1434 };
@@ -1421,6 +1441,8 @@ struct net_device {
1421 struct device dev; 1441 struct device dev;
1422 /* space for optional device, statistics, and wireless sysfs groups */ 1442 /* space for optional device, statistics, and wireless sysfs groups */
1423 const struct attribute_group *sysfs_groups[4]; 1443 const struct attribute_group *sysfs_groups[4];
1444 /* space for optional per-rx queue attributes */
1445 const struct attribute_group *sysfs_rx_queue_group;
1424 1446
1425 /* rtnetlink link ops */ 1447 /* rtnetlink link ops */
1426 const struct rtnl_link_ops *rtnl_link_ops; 1448 const struct rtnl_link_ops *rtnl_link_ops;
@@ -1443,7 +1465,7 @@ struct net_device {
1443 /* max exchange id for FCoE LRO by ddp */ 1465 /* max exchange id for FCoE LRO by ddp */
1444 unsigned int fcoe_ddp_xid; 1466 unsigned int fcoe_ddp_xid;
1445#endif 1467#endif
1446#if IS_ENABLED(CONFIG_NETPRIO_CGROUP) 1468#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1447 struct netprio_map __rcu *priomap; 1469 struct netprio_map __rcu *priomap;
1448#endif 1470#endif
1449 /* phy device may attach itself for hardware timestamping */ 1471 /* phy device may attach itself for hardware timestamping */
@@ -1533,7 +1555,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
1533struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1555struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1534 struct sk_buff *skb, 1556 struct sk_buff *skb,
1535 void *accel_priv); 1557 void *accel_priv);
1536u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
1537 1558
1538/* 1559/*
1539 * Net namespace inlines 1560 * Net namespace inlines
@@ -1632,7 +1653,10 @@ struct napi_gro_cb {
1632 int data_offset; 1653 int data_offset;
1633 1654
1634 /* This is non-zero if the packet cannot be merged with the new skb. */ 1655 /* This is non-zero if the packet cannot be merged with the new skb. */
1635 int flush; 1656 u16 flush;
1657
1658 /* Save the IP ID here and check when we get to the transport layer */
1659 u16 flush_id;
1636 1660
1637 /* Number of segments aggregated. */ 1661 /* Number of segments aggregated. */
1638 u16 count; 1662 u16 count;
@@ -1649,7 +1673,13 @@ struct napi_gro_cb {
1649 unsigned long age; 1673 unsigned long age;
1650 1674
1651 /* Used in ipv6_gro_receive() */ 1675 /* Used in ipv6_gro_receive() */
1652 int proto; 1676 u16 proto;
1677
1678 /* Used in udp_gro_receive */
1679 u16 udp_mark;
1680
1681 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
1682 __wsum csum;
1653 1683
1654 /* used in skb_gro_receive() slow path */ 1684 /* used in skb_gro_receive() slow path */
1655 struct sk_buff *last; 1685 struct sk_buff *last;
@@ -1676,7 +1706,7 @@ struct offload_callbacks {
1676 int (*gso_send_check)(struct sk_buff *skb); 1706 int (*gso_send_check)(struct sk_buff *skb);
1677 struct sk_buff **(*gro_receive)(struct sk_buff **head, 1707 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1678 struct sk_buff *skb); 1708 struct sk_buff *skb);
1679 int (*gro_complete)(struct sk_buff *skb); 1709 int (*gro_complete)(struct sk_buff *skb, int nhoff);
1680}; 1710};
1681 1711
1682struct packet_offload { 1712struct packet_offload {
@@ -1685,6 +1715,20 @@ struct packet_offload {
1685 struct list_head list; 1715 struct list_head list;
1686}; 1716};
1687 1717
1718struct udp_offload {
1719 __be16 port;
1720 struct offload_callbacks callbacks;
1721};
1722
1723/* often modified stats are per cpu, other are shared (netdev->stats) */
1724struct pcpu_sw_netstats {
1725 u64 rx_packets;
1726 u64 rx_bytes;
1727 u64 tx_packets;
1728 u64 tx_bytes;
1729 struct u64_stats_sync syncp;
1730};
1731
1688#include <linux/notifier.h> 1732#include <linux/notifier.h>
1689 1733
1690/* netdevice notifier chain. Please remember to update the rtnetlink 1734/* netdevice notifier chain. Please remember to update the rtnetlink
@@ -1700,7 +1744,7 @@ struct packet_offload {
1700#define NETDEV_CHANGE 0x0004 /* Notify device state change */ 1744#define NETDEV_CHANGE 0x0004 /* Notify device state change */
1701#define NETDEV_REGISTER 0x0005 1745#define NETDEV_REGISTER 0x0005
1702#define NETDEV_UNREGISTER 0x0006 1746#define NETDEV_UNREGISTER 0x0006
1703#define NETDEV_CHANGEMTU 0x0007 1747#define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
1704#define NETDEV_CHANGEADDR 0x0008 1748#define NETDEV_CHANGEADDR 0x0008
1705#define NETDEV_GOING_DOWN 0x0009 1749#define NETDEV_GOING_DOWN 0x0009
1706#define NETDEV_CHANGENAME 0x000A 1750#define NETDEV_CHANGENAME 0x000A
@@ -1716,6 +1760,7 @@ struct packet_offload {
1716#define NETDEV_JOIN 0x0014 1760#define NETDEV_JOIN 0x0014
1717#define NETDEV_CHANGEUPPER 0x0015 1761#define NETDEV_CHANGEUPPER 0x0015
1718#define NETDEV_RESEND_IGMP 0x0016 1762#define NETDEV_RESEND_IGMP 0x0016
1763#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
1719 1764
1720int register_netdevice_notifier(struct notifier_block *nb); 1765int register_netdevice_notifier(struct notifier_block *nb);
1721int unregister_netdevice_notifier(struct notifier_block *nb); 1766int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -1741,8 +1786,6 @@ netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
1741 return info->dev; 1786 return info->dev;
1742} 1787}
1743 1788
1744int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
1745 struct netdev_notifier_info *info);
1746int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 1789int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1747 1790
1748 1791
@@ -1809,7 +1852,6 @@ void dev_remove_pack(struct packet_type *pt);
1809void __dev_remove_pack(struct packet_type *pt); 1852void __dev_remove_pack(struct packet_type *pt);
1810void dev_add_offload(struct packet_offload *po); 1853void dev_add_offload(struct packet_offload *po);
1811void dev_remove_offload(struct packet_offload *po); 1854void dev_remove_offload(struct packet_offload *po);
1812void __dev_remove_offload(struct packet_offload *po);
1813 1855
1814struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, 1856struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1815 unsigned short mask); 1857 unsigned short mask);
@@ -1895,6 +1937,14 @@ static inline void *skb_gro_network_header(struct sk_buff *skb)
1895 skb_network_offset(skb); 1937 skb_network_offset(skb);
1896} 1938}
1897 1939
1940static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
1941 const void *start, unsigned int len)
1942{
1943 if (skb->ip_summed == CHECKSUM_COMPLETE)
1944 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
1945 csum_partial(start, len, 0));
1946}
1947
1898static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 1948static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1899 unsigned short type, 1949 unsigned short type,
1900 const void *daddr, const void *saddr, 1950 const void *daddr, const void *saddr,
@@ -2229,6 +2279,26 @@ static inline void netdev_reset_queue(struct net_device *dev_queue)
2229} 2279}
2230 2280
2231/** 2281/**
2282 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
2283 * @dev: network device
2284 * @queue_index: given tx queue index
2285 *
2286 * Returns 0 if given tx queue index >= number of device tx queues,
2287 * otherwise returns the originally passed tx queue index.
2288 */
2289static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
2290{
2291 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2292 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2293 dev->name, queue_index,
2294 dev->real_num_tx_queues);
2295 return 0;
2296 }
2297
2298 return queue_index;
2299}
2300
2301/**
2232 * netif_running - test if up 2302 * netif_running - test if up
2233 * @dev: network device 2303 * @dev: network device
2234 * 2304 *
@@ -2351,7 +2421,7 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
2351 2421
2352int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); 2422int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
2353 2423
2354#ifdef CONFIG_RPS 2424#ifdef CONFIG_SYSFS
2355int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 2425int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
2356#else 2426#else
2357static inline int netif_set_real_num_rx_queues(struct net_device *dev, 2427static inline int netif_set_real_num_rx_queues(struct net_device *dev,
@@ -2370,7 +2440,7 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2370 from_dev->real_num_tx_queues); 2440 from_dev->real_num_tx_queues);
2371 if (err) 2441 if (err)
2372 return err; 2442 return err;
2373#ifdef CONFIG_RPS 2443#ifdef CONFIG_SYSFS
2374 return netif_set_real_num_rx_queues(to_dev, 2444 return netif_set_real_num_rx_queues(to_dev,
2375 from_dev->real_num_rx_queues); 2445 from_dev->real_num_rx_queues);
2376#else 2446#else
@@ -2378,20 +2448,67 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2378#endif 2448#endif
2379} 2449}
2380 2450
2451#ifdef CONFIG_SYSFS
2452static inline unsigned int get_netdev_rx_queue_index(
2453 struct netdev_rx_queue *queue)
2454{
2455 struct net_device *dev = queue->dev;
2456 int index = queue - dev->_rx;
2457
2458 BUG_ON(index >= dev->num_rx_queues);
2459 return index;
2460}
2461#endif
2462
2381#define DEFAULT_MAX_NUM_RSS_QUEUES (8) 2463#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2382int netif_get_num_default_rss_queues(void); 2464int netif_get_num_default_rss_queues(void);
2383 2465
2384/* Use this variant when it is known for sure that it 2466enum skb_free_reason {
2385 * is executing from hardware interrupt context or with hardware interrupts 2467 SKB_REASON_CONSUMED,
2386 * disabled. 2468 SKB_REASON_DROPPED,
2387 */ 2469};
2388void dev_kfree_skb_irq(struct sk_buff *skb);
2389 2470
2390/* Use this variant in places where it could be invoked 2471void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
2391 * from either hardware interrupt or other context, with hardware interrupts 2472void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
2392 * either disabled or enabled. 2473
2474/*
2475 * It is not allowed to call kfree_skb() or consume_skb() from hardware
2476 * interrupt context or with hardware interrupts being disabled.
2477 * (in_irq() || irqs_disabled())
2478 *
2479 * We provide four helpers that can be used in following contexts :
2480 *
2481 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
2482 * replacing kfree_skb(skb)
2483 *
2484 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
2485 * Typically used in place of consume_skb(skb) in TX completion path
2486 *
2487 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
2488 * replacing kfree_skb(skb)
2489 *
2490 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
2491 * and consumed a packet. Used in place of consume_skb(skb)
2393 */ 2492 */
2394void dev_kfree_skb_any(struct sk_buff *skb); 2493static inline void dev_kfree_skb_irq(struct sk_buff *skb)
2494{
2495 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
2496}
2497
2498static inline void dev_consume_skb_irq(struct sk_buff *skb)
2499{
2500 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
2501}
2502
2503static inline void dev_kfree_skb_any(struct sk_buff *skb)
2504{
2505 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
2506}
2507
2508static inline void dev_consume_skb_any(struct sk_buff *skb)
2509{
2510 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
2511}
2395 2512
2396int netif_rx(struct sk_buff *skb); 2513int netif_rx(struct sk_buff *skb);
2397int netif_rx_ni(struct sk_buff *skb); 2514int netif_rx_ni(struct sk_buff *skb);
@@ -2400,6 +2517,8 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
2400void napi_gro_flush(struct napi_struct *napi, bool flush_old); 2517void napi_gro_flush(struct napi_struct *napi, bool flush_old);
2401struct sk_buff *napi_get_frags(struct napi_struct *napi); 2518struct sk_buff *napi_get_frags(struct napi_struct *napi);
2402gro_result_t napi_gro_frags(struct napi_struct *napi); 2519gro_result_t napi_gro_frags(struct napi_struct *napi);
2520struct packet_offload *gro_find_receive_by_type(__be16 type);
2521struct packet_offload *gro_find_complete_by_type(__be16 type);
2403 2522
2404static inline void napi_free_frags(struct napi_struct *napi) 2523static inline void napi_free_frags(struct napi_struct *napi)
2405{ 2524{
@@ -2785,17 +2904,10 @@ int register_netdev(struct net_device *dev);
2785void unregister_netdev(struct net_device *dev); 2904void unregister_netdev(struct net_device *dev);
2786 2905
2787/* General hardware address lists handling functions */ 2906/* General hardware address lists handling functions */
2788int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2789 struct netdev_hw_addr_list *from_list,
2790 int addr_len, unsigned char addr_type);
2791void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2792 struct netdev_hw_addr_list *from_list,
2793 int addr_len, unsigned char addr_type);
2794int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 2907int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2795 struct netdev_hw_addr_list *from_list, int addr_len); 2908 struct netdev_hw_addr_list *from_list, int addr_len);
2796void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 2909void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2797 struct netdev_hw_addr_list *from_list, int addr_len); 2910 struct netdev_hw_addr_list *from_list, int addr_len);
2798void __hw_addr_flush(struct netdev_hw_addr_list *list);
2799void __hw_addr_init(struct netdev_hw_addr_list *list); 2911void __hw_addr_init(struct netdev_hw_addr_list *list);
2800 2912
2801/* Functions used for device addresses handling */ 2913/* Functions used for device addresses handling */
@@ -2803,10 +2915,6 @@ int dev_addr_add(struct net_device *dev, const unsigned char *addr,
2803 unsigned char addr_type); 2915 unsigned char addr_type);
2804int dev_addr_del(struct net_device *dev, const unsigned char *addr, 2916int dev_addr_del(struct net_device *dev, const unsigned char *addr,
2805 unsigned char addr_type); 2917 unsigned char addr_type);
2806int dev_addr_add_multiple(struct net_device *to_dev,
2807 struct net_device *from_dev, unsigned char addr_type);
2808int dev_addr_del_multiple(struct net_device *to_dev,
2809 struct net_device *from_dev, unsigned char addr_type);
2810void dev_addr_flush(struct net_device *dev); 2918void dev_addr_flush(struct net_device *dev);
2811int dev_addr_init(struct net_device *dev); 2919int dev_addr_init(struct net_device *dev);
2812 2920
@@ -2853,7 +2961,6 @@ extern int weight_p;
2853extern int bpf_jit_enable; 2961extern int bpf_jit_enable;
2854 2962
2855bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 2963bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
2856bool netdev_has_any_upper_dev(struct net_device *dev);
2857struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, 2964struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
2858 struct list_head **iter); 2965 struct list_head **iter);
2859 2966
@@ -2882,6 +2989,7 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
2882 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 2989 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
2883 2990
2884void *netdev_adjacent_get_private(struct list_head *adj_list); 2991void *netdev_adjacent_get_private(struct list_head *adj_list);
2992void *netdev_lower_get_first_private_rcu(struct net_device *dev);
2885struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 2993struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
2886struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 2994struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
2887int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev); 2995int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
@@ -2892,8 +3000,7 @@ int netdev_master_upper_dev_link_private(struct net_device *dev,
2892 void *private); 3000 void *private);
2893void netdev_upper_dev_unlink(struct net_device *dev, 3001void netdev_upper_dev_unlink(struct net_device *dev,
2894 struct net_device *upper_dev); 3002 struct net_device *upper_dev);
2895void *netdev_lower_dev_get_private_rcu(struct net_device *dev, 3003void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
2896 struct net_device *lower_dev);
2897void *netdev_lower_dev_get_private(struct net_device *dev, 3004void *netdev_lower_dev_get_private(struct net_device *dev,
2898 struct net_device *lower_dev); 3005 struct net_device *lower_dev);
2899int skb_checksum_help(struct sk_buff *skb); 3006int skb_checksum_help(struct sk_buff *skb);
@@ -2984,7 +3091,12 @@ void netdev_change_features(struct net_device *dev);
2984void netif_stacked_transfer_operstate(const struct net_device *rootdev, 3091void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2985 struct net_device *dev); 3092 struct net_device *dev);
2986 3093
2987netdev_features_t netif_skb_features(struct sk_buff *skb); 3094netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
3095 const struct net_device *dev);
3096static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
3097{
3098 return netif_skb_dev_features(skb, skb->dev);
3099}
2988 3100
2989static inline bool net_gso_ok(netdev_features_t features, int gso_type) 3101static inline bool net_gso_ok(netdev_features_t features, int gso_type)
2990{ 3102{