aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h219
1 files changed, 179 insertions, 40 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3d0cc0b5cec2..ca5ab98053c8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -302,6 +302,17 @@ struct netdev_boot_setup {
302 302
303int __init netdev_boot_setup(char *str); 303int __init netdev_boot_setup(char *str);
304 304
305struct gro_list {
306 struct list_head list;
307 int count;
308};
309
310/*
311 * size of gro hash buckets, must less than bit number of
312 * napi_struct::gro_bitmask
313 */
314#define GRO_HASH_BUCKETS 8
315
305/* 316/*
306 * Structure for NAPI scheduling similar to tasklet but with weighting 317 * Structure for NAPI scheduling similar to tasklet but with weighting
307 */ 318 */
@@ -316,13 +327,13 @@ struct napi_struct {
316 327
317 unsigned long state; 328 unsigned long state;
318 int weight; 329 int weight;
319 unsigned int gro_count; 330 unsigned long gro_bitmask;
320 int (*poll)(struct napi_struct *, int); 331 int (*poll)(struct napi_struct *, int);
321#ifdef CONFIG_NETPOLL 332#ifdef CONFIG_NETPOLL
322 int poll_owner; 333 int poll_owner;
323#endif 334#endif
324 struct net_device *dev; 335 struct net_device *dev;
325 struct sk_buff *gro_list; 336 struct gro_list gro_hash[GRO_HASH_BUCKETS];
326 struct sk_buff *skb; 337 struct sk_buff *skb;
327 struct hrtimer timer; 338 struct hrtimer timer;
328 struct list_head dev_list; 339 struct list_head dev_list;
@@ -569,6 +580,9 @@ struct netdev_queue {
569 * (/sys/class/net/DEV/Q/trans_timeout) 580 * (/sys/class/net/DEV/Q/trans_timeout)
570 */ 581 */
571 unsigned long trans_timeout; 582 unsigned long trans_timeout;
583
584 /* Subordinate device that the queue has been assigned to */
585 struct net_device *sb_dev;
572/* 586/*
573 * write-mostly part 587 * write-mostly part
574 */ 588 */
@@ -730,10 +744,15 @@ struct xps_map {
730 */ 744 */
731struct xps_dev_maps { 745struct xps_dev_maps {
732 struct rcu_head rcu; 746 struct rcu_head rcu;
733 struct xps_map __rcu *cpu_map[0]; 747 struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
734}; 748};
735#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ 749
750#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
736 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) 751 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
752
753#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
754 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
755
737#endif /* CONFIG_XPS */ 756#endif /* CONFIG_XPS */
738 757
739#define TC_MAX_QUEUE 16 758#define TC_MAX_QUEUE 16
@@ -779,7 +798,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
779} 798}
780 799
781typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 800typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
782 struct sk_buff *skb); 801 struct sk_buff *skb,
802 struct net_device *sb_dev);
783 803
784enum tc_setup_type { 804enum tc_setup_type {
785 TC_SETUP_QDISC_MQPRIO, 805 TC_SETUP_QDISC_MQPRIO,
@@ -792,6 +812,7 @@ enum tc_setup_type {
792 TC_SETUP_QDISC_RED, 812 TC_SETUP_QDISC_RED,
793 TC_SETUP_QDISC_PRIO, 813 TC_SETUP_QDISC_PRIO,
794 TC_SETUP_QDISC_MQ, 814 TC_SETUP_QDISC_MQ,
815 TC_SETUP_QDISC_ETF,
795}; 816};
796 817
797/* These structures hold the attributes of bpf state that are being passed 818/* These structures hold the attributes of bpf state that are being passed
@@ -807,11 +828,8 @@ enum bpf_netdev_command {
807 */ 828 */
808 XDP_SETUP_PROG, 829 XDP_SETUP_PROG,
809 XDP_SETUP_PROG_HW, 830 XDP_SETUP_PROG_HW,
810 /* Check if a bpf program is set on the device. The callee should
811 * set @prog_attached to one of XDP_ATTACHED_* values, note that "true"
812 * is equivalent to XDP_ATTACHED_DRV.
813 */
814 XDP_QUERY_PROG, 831 XDP_QUERY_PROG,
832 XDP_QUERY_PROG_HW,
815 /* BPF program for offload callbacks, invoked at program load time. */ 833 /* BPF program for offload callbacks, invoked at program load time. */
816 BPF_OFFLOAD_VERIFIER_PREP, 834 BPF_OFFLOAD_VERIFIER_PREP,
817 BPF_OFFLOAD_TRANSLATE, 835 BPF_OFFLOAD_TRANSLATE,
@@ -835,9 +853,8 @@ struct netdev_bpf {
835 struct bpf_prog *prog; 853 struct bpf_prog *prog;
836 struct netlink_ext_ack *extack; 854 struct netlink_ext_ack *extack;
837 }; 855 };
838 /* XDP_QUERY_PROG */ 856 /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */
839 struct { 857 struct {
840 u8 prog_attached;
841 u32 prog_id; 858 u32 prog_id;
842 /* flags with which program was installed */ 859 /* flags with which program was installed */
843 u32 prog_flags; 860 u32 prog_flags;
@@ -855,10 +872,10 @@ struct netdev_bpf {
855 struct { 872 struct {
856 struct bpf_offloaded_map *offmap; 873 struct bpf_offloaded_map *offmap;
857 }; 874 };
858 /* XDP_SETUP_XSK_UMEM */ 875 /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */
859 struct { 876 struct {
860 struct xdp_umem *umem; 877 struct xdp_umem *umem; /* out for query*/
861 u16 queue_id; 878 u16 queue_id; /* in for query */
862 } xsk; 879 } xsk;
863 }; 880 };
864}; 881};
@@ -891,6 +908,8 @@ struct tlsdev_ops {
891 void (*tls_dev_del)(struct net_device *netdev, 908 void (*tls_dev_del)(struct net_device *netdev,
892 struct tls_context *ctx, 909 struct tls_context *ctx,
893 enum tls_offload_ctx_dir direction); 910 enum tls_offload_ctx_dir direction);
911 void (*tls_dev_resync_rx)(struct net_device *netdev,
912 struct sock *sk, u32 seq, u64 rcd_sn);
894}; 913};
895#endif 914#endif
896 915
@@ -942,7 +961,8 @@ struct dev_ifalias {
942 * those the driver believes to be appropriate. 961 * those the driver believes to be appropriate.
943 * 962 *
944 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 963 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
945 * void *accel_priv, select_queue_fallback_t fallback); 964 * struct net_device *sb_dev,
965 * select_queue_fallback_t fallback);
946 * Called to decide which queue to use when device supports multiple 966 * Called to decide which queue to use when device supports multiple
947 * transmit queues. 967 * transmit queues.
948 * 968 *
@@ -1214,7 +1234,7 @@ struct net_device_ops {
1214 netdev_features_t features); 1234 netdev_features_t features);
1215 u16 (*ndo_select_queue)(struct net_device *dev, 1235 u16 (*ndo_select_queue)(struct net_device *dev,
1216 struct sk_buff *skb, 1236 struct sk_buff *skb,
1217 void *accel_priv, 1237 struct net_device *sb_dev,
1218 select_queue_fallback_t fallback); 1238 select_queue_fallback_t fallback);
1219 void (*ndo_change_rx_flags)(struct net_device *dev, 1239 void (*ndo_change_rx_flags)(struct net_device *dev,
1220 int flags); 1240 int flags);
@@ -1909,7 +1929,8 @@ struct net_device {
1909 int watchdog_timeo; 1929 int watchdog_timeo;
1910 1930
1911#ifdef CONFIG_XPS 1931#ifdef CONFIG_XPS
1912 struct xps_dev_maps __rcu *xps_maps; 1932 struct xps_dev_maps __rcu *xps_cpus_map;
1933 struct xps_dev_maps __rcu *xps_rxqs_map;
1913#endif 1934#endif
1914#ifdef CONFIG_NET_CLS_ACT 1935#ifdef CONFIG_NET_CLS_ACT
1915 struct mini_Qdisc __rcu *miniq_egress; 1936 struct mini_Qdisc __rcu *miniq_egress;
@@ -1978,7 +1999,7 @@ struct net_device {
1978#ifdef CONFIG_DCB 1999#ifdef CONFIG_DCB
1979 const struct dcbnl_rtnl_ops *dcbnl_ops; 2000 const struct dcbnl_rtnl_ops *dcbnl_ops;
1980#endif 2001#endif
1981 u8 num_tc; 2002 s16 num_tc;
1982 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 2003 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1983 u8 prio_tc_map[TC_BITMASK + 1]; 2004 u8 prio_tc_map[TC_BITMASK + 1];
1984 2005
@@ -2032,6 +2053,17 @@ int netdev_get_num_tc(struct net_device *dev)
2032 return dev->num_tc; 2053 return dev->num_tc;
2033} 2054}
2034 2055
2056void netdev_unbind_sb_channel(struct net_device *dev,
2057 struct net_device *sb_dev);
2058int netdev_bind_sb_channel_queue(struct net_device *dev,
2059 struct net_device *sb_dev,
2060 u8 tc, u16 count, u16 offset);
2061int netdev_set_sb_channel(struct net_device *dev, u16 channel);
2062static inline int netdev_get_sb_channel(struct net_device *dev)
2063{
2064 return max_t(int, -dev->num_tc, 0);
2065}
2066
2035static inline 2067static inline
2036struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 2068struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2037 unsigned int index) 2069 unsigned int index)
@@ -2076,7 +2108,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
2076 2108
2077struct netdev_queue *netdev_pick_tx(struct net_device *dev, 2109struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2078 struct sk_buff *skb, 2110 struct sk_buff *skb,
2079 void *accel_priv); 2111 struct net_device *sb_dev);
2080 2112
2081/* returns the headroom that the master device needs to take in account 2113/* returns the headroom that the master device needs to take in account
2082 * when forwarding to this dev 2114 * when forwarding to this dev
@@ -2255,10 +2287,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb)
2255 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; 2287 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2256} 2288}
2257 2289
2258typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); 2290typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2259static inline struct sk_buff **call_gro_receive(gro_receive_t cb, 2291static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2260 struct sk_buff **head, 2292 struct list_head *head,
2261 struct sk_buff *skb) 2293 struct sk_buff *skb)
2262{ 2294{
2263 if (unlikely(gro_recursion_inc_test(skb))) { 2295 if (unlikely(gro_recursion_inc_test(skb))) {
2264 NAPI_GRO_CB(skb)->flush |= 1; 2296 NAPI_GRO_CB(skb)->flush |= 1;
@@ -2268,12 +2300,12 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
2268 return cb(head, skb); 2300 return cb(head, skb);
2269} 2301}
2270 2302
2271typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, 2303typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2272 struct sk_buff *); 2304 struct sk_buff *);
2273static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, 2305static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2274 struct sock *sk, 2306 struct sock *sk,
2275 struct sk_buff **head, 2307 struct list_head *head,
2276 struct sk_buff *skb) 2308 struct sk_buff *skb)
2277{ 2309{
2278 if (unlikely(gro_recursion_inc_test(skb))) { 2310 if (unlikely(gro_recursion_inc_test(skb))) {
2279 NAPI_GRO_CB(skb)->flush |= 1; 2311 NAPI_GRO_CB(skb)->flush |= 1;
@@ -2290,6 +2322,9 @@ struct packet_type {
2290 struct net_device *, 2322 struct net_device *,
2291 struct packet_type *, 2323 struct packet_type *,
2292 struct net_device *); 2324 struct net_device *);
2325 void (*list_func) (struct list_head *,
2326 struct packet_type *,
2327 struct net_device *);
2293 bool (*id_match)(struct packet_type *ptype, 2328 bool (*id_match)(struct packet_type *ptype,
2294 struct sock *sk); 2329 struct sock *sk);
2295 void *af_packet_priv; 2330 void *af_packet_priv;
@@ -2299,8 +2334,8 @@ struct packet_type {
2299struct offload_callbacks { 2334struct offload_callbacks {
2300 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2335 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2301 netdev_features_t features); 2336 netdev_features_t features);
2302 struct sk_buff **(*gro_receive)(struct sk_buff **head, 2337 struct sk_buff *(*gro_receive)(struct list_head *head,
2303 struct sk_buff *skb); 2338 struct sk_buff *skb);
2304 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2339 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2305}; 2340};
2306 2341
@@ -2537,8 +2572,14 @@ void dev_close(struct net_device *dev);
2537void dev_close_many(struct list_head *head, bool unlink); 2572void dev_close_many(struct list_head *head, bool unlink);
2538void dev_disable_lro(struct net_device *dev); 2573void dev_disable_lro(struct net_device *dev);
2539int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 2574int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2575u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
2576 struct net_device *sb_dev,
2577 select_queue_fallback_t fallback);
2578u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
2579 struct net_device *sb_dev,
2580 select_queue_fallback_t fallback);
2540int dev_queue_xmit(struct sk_buff *skb); 2581int dev_queue_xmit(struct sk_buff *skb);
2541int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); 2582int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
2542int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 2583int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
2543int register_netdevice(struct net_device *dev); 2584int register_netdevice(struct net_device *dev);
2544void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 2585void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2568,7 +2609,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2568struct net_device *dev_get_by_napi_id(unsigned int napi_id); 2609struct net_device *dev_get_by_napi_id(unsigned int napi_id);
2569int netdev_get_name(struct net *net, char *name, int ifindex); 2610int netdev_get_name(struct net *net, char *name, int ifindex);
2570int dev_restart(struct net_device *dev); 2611int dev_restart(struct net_device *dev);
2571int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); 2612int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
2572 2613
2573static inline unsigned int skb_gro_offset(const struct sk_buff *skb) 2614static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2574{ 2615{
@@ -2784,13 +2825,13 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2784} 2825}
2785 2826
2786#ifdef CONFIG_XFRM_OFFLOAD 2827#ifdef CONFIG_XFRM_OFFLOAD
2787static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) 2828static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2788{ 2829{
2789 if (PTR_ERR(pp) != -EINPROGRESS) 2830 if (PTR_ERR(pp) != -EINPROGRESS)
2790 NAPI_GRO_CB(skb)->flush |= flush; 2831 NAPI_GRO_CB(skb)->flush |= flush;
2791} 2832}
2792static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, 2833static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
2793 struct sk_buff **pp, 2834 struct sk_buff *pp,
2794 int flush, 2835 int flush,
2795 struct gro_remcsum *grc) 2836 struct gro_remcsum *grc)
2796{ 2837{
@@ -2801,12 +2842,12 @@ static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
2801 } 2842 }
2802} 2843}
2803#else 2844#else
2804static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) 2845static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2805{ 2846{
2806 NAPI_GRO_CB(skb)->flush |= flush; 2847 NAPI_GRO_CB(skb)->flush |= flush;
2807} 2848}
2808static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, 2849static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
2809 struct sk_buff **pp, 2850 struct sk_buff *pp,
2810 int flush, 2851 int flush,
2811 struct gro_remcsum *grc) 2852 struct gro_remcsum *grc)
2812{ 2853{
@@ -3278,6 +3319,92 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3278#ifdef CONFIG_XPS 3319#ifdef CONFIG_XPS
3279int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 3320int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3280 u16 index); 3321 u16 index);
3322int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3323 u16 index, bool is_rxqs_map);
3324
3325/**
3326 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3327 * @j: CPU/Rx queue index
3328 * @mask: bitmask of all cpus/rx queues
3329 * @nr_bits: number of bits in the bitmask
3330 *
3331 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
3332 */
3333static inline bool netif_attr_test_mask(unsigned long j,
3334 const unsigned long *mask,
3335 unsigned int nr_bits)
3336{
3337 cpu_max_bits_warn(j, nr_bits);
3338 return test_bit(j, mask);
3339}
3340
3341/**
3342 * netif_attr_test_online - Test for online CPU/Rx queue
3343 * @j: CPU/Rx queue index
3344 * @online_mask: bitmask for CPUs/Rx queues that are online
3345 * @nr_bits: number of bits in the bitmask
3346 *
3347 * Returns true if a CPU/Rx queue is online.
3348 */
3349static inline bool netif_attr_test_online(unsigned long j,
3350 const unsigned long *online_mask,
3351 unsigned int nr_bits)
3352{
3353 cpu_max_bits_warn(j, nr_bits);
3354
3355 if (online_mask)
3356 return test_bit(j, online_mask);
3357
3358 return (j < nr_bits);
3359}
3360
3361/**
3362 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3363 * @n: CPU/Rx queue index
3364 * @srcp: the cpumask/Rx queue mask pointer
3365 * @nr_bits: number of bits in the bitmask
3366 *
3367 * Returns >= nr_bits if no further CPUs/Rx queues set.
3368 */
3369static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3370 unsigned int nr_bits)
3371{
3372 /* -1 is a legal arg here. */
3373 if (n != -1)
3374 cpu_max_bits_warn(n, nr_bits);
3375
3376 if (srcp)
3377 return find_next_bit(srcp, nr_bits, n + 1);
3378
3379 return n + 1;
3380}
3381
3382/**
3383 * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
3384 * @n: CPU/Rx queue index
3385 * @src1p: the first CPUs/Rx queues mask pointer
3386 * @src2p: the second CPUs/Rx queues mask pointer
3387 * @nr_bits: number of bits in the bitmask
3388 *
3389 * Returns >= nr_bits if no further CPUs/Rx queues set in both.
3390 */
3391static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3392 const unsigned long *src2p,
3393 unsigned int nr_bits)
3394{
3395 /* -1 is a legal arg here. */
3396 if (n != -1)
3397 cpu_max_bits_warn(n, nr_bits);
3398
3399 if (src1p && src2p)
3400 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3401 else if (src1p)
3402 return find_next_bit(src1p, nr_bits, n + 1);
3403 else if (src2p)
3404 return find_next_bit(src2p, nr_bits, n + 1);
3405
3406 return n + 1;
3407}
3281#else 3408#else
3282static inline int netif_set_xps_queue(struct net_device *dev, 3409static inline int netif_set_xps_queue(struct net_device *dev,
3283 const struct cpumask *mask, 3410 const struct cpumask *mask,
@@ -3285,6 +3412,13 @@ static inline int netif_set_xps_queue(struct net_device *dev,
3285{ 3412{
3286 return 0; 3413 return 0;
3287} 3414}
3415
3416static inline int __netif_set_xps_queue(struct net_device *dev,
3417 const unsigned long *mask,
3418 u16 index, bool is_rxqs_map)
3419{
3420 return 0;
3421}
3288#endif 3422#endif
3289 3423
3290/** 3424/**
@@ -3304,8 +3438,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3304int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 3438int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3305#else 3439#else
3306static inline int netif_set_real_num_rx_queues(struct net_device *dev, 3440static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3307 unsigned int rxq) 3441 unsigned int rxqs)
3308{ 3442{
3443 dev->real_num_rx_queues = rxqs;
3309 return 0; 3444 return 0;
3310} 3445}
3311#endif 3446#endif
@@ -3384,6 +3519,7 @@ int netif_rx(struct sk_buff *skb);
3384int netif_rx_ni(struct sk_buff *skb); 3519int netif_rx_ni(struct sk_buff *skb);
3385int netif_receive_skb(struct sk_buff *skb); 3520int netif_receive_skb(struct sk_buff *skb);
3386int netif_receive_skb_core(struct sk_buff *skb); 3521int netif_receive_skb_core(struct sk_buff *skb);
3522void netif_receive_skb_list(struct list_head *head);
3387gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); 3523gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3388void napi_gro_flush(struct napi_struct *napi, bool flush_old); 3524void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3389struct sk_buff *napi_get_frags(struct napi_struct *napi); 3525struct sk_buff *napi_get_frags(struct napi_struct *napi);
@@ -3418,6 +3554,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
3418int dev_get_alias(const struct net_device *, char *, size_t); 3554int dev_get_alias(const struct net_device *, char *, size_t);
3419int dev_change_net_namespace(struct net_device *, struct net *, const char *); 3555int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3420int __dev_set_mtu(struct net_device *, int); 3556int __dev_set_mtu(struct net_device *, int);
3557int dev_set_mtu_ext(struct net_device *dev, int mtu,
3558 struct netlink_ext_ack *extack);
3421int dev_set_mtu(struct net_device *, int); 3559int dev_set_mtu(struct net_device *, int);
3422int dev_change_tx_queue_len(struct net_device *, unsigned long); 3560int dev_change_tx_queue_len(struct net_device *, unsigned long);
3423void dev_set_group(struct net_device *, int); 3561void dev_set_group(struct net_device *, int);
@@ -3435,8 +3573,9 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3435typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); 3573typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
3436int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 3574int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3437 int fd, u32 flags); 3575 int fd, u32 flags);
3438void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, 3576u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
3439 struct netdev_bpf *xdp); 3577 enum bpf_netdev_command cmd);
3578int xdp_umem_query(struct net_device *dev, u16 queue_id);
3440 3579
3441int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3580int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3442int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3581int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);