diff options
Diffstat (limited to 'include/linux/netdevice.h')
| -rw-r--r-- | include/linux/netdevice.h | 219 |
1 files changed, 179 insertions, 40 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3d0cc0b5cec2..ca5ab98053c8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -302,6 +302,17 @@ struct netdev_boot_setup { | |||
| 302 | 302 | ||
| 303 | int __init netdev_boot_setup(char *str); | 303 | int __init netdev_boot_setup(char *str); |
| 304 | 304 | ||
| 305 | struct gro_list { | ||
| 306 | struct list_head list; | ||
| 307 | int count; | ||
| 308 | }; | ||
| 309 | |||
| 310 | /* | ||
| 311 | * size of gro hash buckets, must less than bit number of | ||
| 312 | * napi_struct::gro_bitmask | ||
| 313 | */ | ||
| 314 | #define GRO_HASH_BUCKETS 8 | ||
| 315 | |||
| 305 | /* | 316 | /* |
| 306 | * Structure for NAPI scheduling similar to tasklet but with weighting | 317 | * Structure for NAPI scheduling similar to tasklet but with weighting |
| 307 | */ | 318 | */ |
| @@ -316,13 +327,13 @@ struct napi_struct { | |||
| 316 | 327 | ||
| 317 | unsigned long state; | 328 | unsigned long state; |
| 318 | int weight; | 329 | int weight; |
| 319 | unsigned int gro_count; | 330 | unsigned long gro_bitmask; |
| 320 | int (*poll)(struct napi_struct *, int); | 331 | int (*poll)(struct napi_struct *, int); |
| 321 | #ifdef CONFIG_NETPOLL | 332 | #ifdef CONFIG_NETPOLL |
| 322 | int poll_owner; | 333 | int poll_owner; |
| 323 | #endif | 334 | #endif |
| 324 | struct net_device *dev; | 335 | struct net_device *dev; |
| 325 | struct sk_buff *gro_list; | 336 | struct gro_list gro_hash[GRO_HASH_BUCKETS]; |
| 326 | struct sk_buff *skb; | 337 | struct sk_buff *skb; |
| 327 | struct hrtimer timer; | 338 | struct hrtimer timer; |
| 328 | struct list_head dev_list; | 339 | struct list_head dev_list; |
| @@ -569,6 +580,9 @@ struct netdev_queue { | |||
| 569 | * (/sys/class/net/DEV/Q/trans_timeout) | 580 | * (/sys/class/net/DEV/Q/trans_timeout) |
| 570 | */ | 581 | */ |
| 571 | unsigned long trans_timeout; | 582 | unsigned long trans_timeout; |
| 583 | |||
| 584 | /* Subordinate device that the queue has been assigned to */ | ||
| 585 | struct net_device *sb_dev; | ||
| 572 | /* | 586 | /* |
| 573 | * write-mostly part | 587 | * write-mostly part |
| 574 | */ | 588 | */ |
| @@ -730,10 +744,15 @@ struct xps_map { | |||
| 730 | */ | 744 | */ |
| 731 | struct xps_dev_maps { | 745 | struct xps_dev_maps { |
| 732 | struct rcu_head rcu; | 746 | struct rcu_head rcu; |
| 733 | struct xps_map __rcu *cpu_map[0]; | 747 | struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */ |
| 734 | }; | 748 | }; |
| 735 | #define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ | 749 | |
| 750 | #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ | ||
| 736 | (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) | 751 | (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) |
| 752 | |||
| 753 | #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ | ||
| 754 | (_rxqs * (_tcs) * sizeof(struct xps_map *))) | ||
| 755 | |||
| 737 | #endif /* CONFIG_XPS */ | 756 | #endif /* CONFIG_XPS */ |
| 738 | 757 | ||
| 739 | #define TC_MAX_QUEUE 16 | 758 | #define TC_MAX_QUEUE 16 |
| @@ -779,7 +798,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, | |||
| 779 | } | 798 | } |
| 780 | 799 | ||
| 781 | typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | 800 | typedef u16 (*select_queue_fallback_t)(struct net_device *dev, |
| 782 | struct sk_buff *skb); | 801 | struct sk_buff *skb, |
| 802 | struct net_device *sb_dev); | ||
| 783 | 803 | ||
| 784 | enum tc_setup_type { | 804 | enum tc_setup_type { |
| 785 | TC_SETUP_QDISC_MQPRIO, | 805 | TC_SETUP_QDISC_MQPRIO, |
| @@ -792,6 +812,7 @@ enum tc_setup_type { | |||
| 792 | TC_SETUP_QDISC_RED, | 812 | TC_SETUP_QDISC_RED, |
| 793 | TC_SETUP_QDISC_PRIO, | 813 | TC_SETUP_QDISC_PRIO, |
| 794 | TC_SETUP_QDISC_MQ, | 814 | TC_SETUP_QDISC_MQ, |
| 815 | TC_SETUP_QDISC_ETF, | ||
| 795 | }; | 816 | }; |
| 796 | 817 | ||
| 797 | /* These structures hold the attributes of bpf state that are being passed | 818 | /* These structures hold the attributes of bpf state that are being passed |
| @@ -807,11 +828,8 @@ enum bpf_netdev_command { | |||
| 807 | */ | 828 | */ |
| 808 | XDP_SETUP_PROG, | 829 | XDP_SETUP_PROG, |
| 809 | XDP_SETUP_PROG_HW, | 830 | XDP_SETUP_PROG_HW, |
| 810 | /* Check if a bpf program is set on the device. The callee should | ||
| 811 | * set @prog_attached to one of XDP_ATTACHED_* values, note that "true" | ||
| 812 | * is equivalent to XDP_ATTACHED_DRV. | ||
| 813 | */ | ||
| 814 | XDP_QUERY_PROG, | 831 | XDP_QUERY_PROG, |
| 832 | XDP_QUERY_PROG_HW, | ||
| 815 | /* BPF program for offload callbacks, invoked at program load time. */ | 833 | /* BPF program for offload callbacks, invoked at program load time. */ |
| 816 | BPF_OFFLOAD_VERIFIER_PREP, | 834 | BPF_OFFLOAD_VERIFIER_PREP, |
| 817 | BPF_OFFLOAD_TRANSLATE, | 835 | BPF_OFFLOAD_TRANSLATE, |
| @@ -835,9 +853,8 @@ struct netdev_bpf { | |||
| 835 | struct bpf_prog *prog; | 853 | struct bpf_prog *prog; |
| 836 | struct netlink_ext_ack *extack; | 854 | struct netlink_ext_ack *extack; |
| 837 | }; | 855 | }; |
| 838 | /* XDP_QUERY_PROG */ | 856 | /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */ |
| 839 | struct { | 857 | struct { |
| 840 | u8 prog_attached; | ||
| 841 | u32 prog_id; | 858 | u32 prog_id; |
| 842 | /* flags with which program was installed */ | 859 | /* flags with which program was installed */ |
| 843 | u32 prog_flags; | 860 | u32 prog_flags; |
| @@ -855,10 +872,10 @@ struct netdev_bpf { | |||
| 855 | struct { | 872 | struct { |
| 856 | struct bpf_offloaded_map *offmap; | 873 | struct bpf_offloaded_map *offmap; |
| 857 | }; | 874 | }; |
| 858 | /* XDP_SETUP_XSK_UMEM */ | 875 | /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */ |
| 859 | struct { | 876 | struct { |
| 860 | struct xdp_umem *umem; | 877 | struct xdp_umem *umem; /* out for query*/ |
| 861 | u16 queue_id; | 878 | u16 queue_id; /* in for query */ |
| 862 | } xsk; | 879 | } xsk; |
| 863 | }; | 880 | }; |
| 864 | }; | 881 | }; |
| @@ -891,6 +908,8 @@ struct tlsdev_ops { | |||
| 891 | void (*tls_dev_del)(struct net_device *netdev, | 908 | void (*tls_dev_del)(struct net_device *netdev, |
| 892 | struct tls_context *ctx, | 909 | struct tls_context *ctx, |
| 893 | enum tls_offload_ctx_dir direction); | 910 | enum tls_offload_ctx_dir direction); |
| 911 | void (*tls_dev_resync_rx)(struct net_device *netdev, | ||
| 912 | struct sock *sk, u32 seq, u64 rcd_sn); | ||
| 894 | }; | 913 | }; |
| 895 | #endif | 914 | #endif |
| 896 | 915 | ||
| @@ -942,7 +961,8 @@ struct dev_ifalias { | |||
| 942 | * those the driver believes to be appropriate. | 961 | * those the driver believes to be appropriate. |
| 943 | * | 962 | * |
| 944 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, | 963 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
| 945 | * void *accel_priv, select_queue_fallback_t fallback); | 964 | * struct net_device *sb_dev, |
| 965 | * select_queue_fallback_t fallback); | ||
| 946 | * Called to decide which queue to use when device supports multiple | 966 | * Called to decide which queue to use when device supports multiple |
| 947 | * transmit queues. | 967 | * transmit queues. |
| 948 | * | 968 | * |
| @@ -1214,7 +1234,7 @@ struct net_device_ops { | |||
| 1214 | netdev_features_t features); | 1234 | netdev_features_t features); |
| 1215 | u16 (*ndo_select_queue)(struct net_device *dev, | 1235 | u16 (*ndo_select_queue)(struct net_device *dev, |
| 1216 | struct sk_buff *skb, | 1236 | struct sk_buff *skb, |
| 1217 | void *accel_priv, | 1237 | struct net_device *sb_dev, |
| 1218 | select_queue_fallback_t fallback); | 1238 | select_queue_fallback_t fallback); |
| 1219 | void (*ndo_change_rx_flags)(struct net_device *dev, | 1239 | void (*ndo_change_rx_flags)(struct net_device *dev, |
| 1220 | int flags); | 1240 | int flags); |
| @@ -1909,7 +1929,8 @@ struct net_device { | |||
| 1909 | int watchdog_timeo; | 1929 | int watchdog_timeo; |
| 1910 | 1930 | ||
| 1911 | #ifdef CONFIG_XPS | 1931 | #ifdef CONFIG_XPS |
| 1912 | struct xps_dev_maps __rcu *xps_maps; | 1932 | struct xps_dev_maps __rcu *xps_cpus_map; |
| 1933 | struct xps_dev_maps __rcu *xps_rxqs_map; | ||
| 1913 | #endif | 1934 | #endif |
| 1914 | #ifdef CONFIG_NET_CLS_ACT | 1935 | #ifdef CONFIG_NET_CLS_ACT |
| 1915 | struct mini_Qdisc __rcu *miniq_egress; | 1936 | struct mini_Qdisc __rcu *miniq_egress; |
| @@ -1978,7 +1999,7 @@ struct net_device { | |||
| 1978 | #ifdef CONFIG_DCB | 1999 | #ifdef CONFIG_DCB |
| 1979 | const struct dcbnl_rtnl_ops *dcbnl_ops; | 2000 | const struct dcbnl_rtnl_ops *dcbnl_ops; |
| 1980 | #endif | 2001 | #endif |
| 1981 | u8 num_tc; | 2002 | s16 num_tc; |
| 1982 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; | 2003 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; |
| 1983 | u8 prio_tc_map[TC_BITMASK + 1]; | 2004 | u8 prio_tc_map[TC_BITMASK + 1]; |
| 1984 | 2005 | ||
| @@ -2032,6 +2053,17 @@ int netdev_get_num_tc(struct net_device *dev) | |||
| 2032 | return dev->num_tc; | 2053 | return dev->num_tc; |
| 2033 | } | 2054 | } |
| 2034 | 2055 | ||
| 2056 | void netdev_unbind_sb_channel(struct net_device *dev, | ||
| 2057 | struct net_device *sb_dev); | ||
| 2058 | int netdev_bind_sb_channel_queue(struct net_device *dev, | ||
| 2059 | struct net_device *sb_dev, | ||
| 2060 | u8 tc, u16 count, u16 offset); | ||
| 2061 | int netdev_set_sb_channel(struct net_device *dev, u16 channel); | ||
| 2062 | static inline int netdev_get_sb_channel(struct net_device *dev) | ||
| 2063 | { | ||
| 2064 | return max_t(int, -dev->num_tc, 0); | ||
| 2065 | } | ||
| 2066 | |||
| 2035 | static inline | 2067 | static inline |
| 2036 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | 2068 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, |
| 2037 | unsigned int index) | 2069 | unsigned int index) |
| @@ -2076,7 +2108,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, | |||
| 2076 | 2108 | ||
| 2077 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 2109 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
| 2078 | struct sk_buff *skb, | 2110 | struct sk_buff *skb, |
| 2079 | void *accel_priv); | 2111 | struct net_device *sb_dev); |
| 2080 | 2112 | ||
| 2081 | /* returns the headroom that the master device needs to take in account | 2113 | /* returns the headroom that the master device needs to take in account |
| 2082 | * when forwarding to this dev | 2114 | * when forwarding to this dev |
| @@ -2255,10 +2287,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb) | |||
| 2255 | return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; | 2287 | return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; |
| 2256 | } | 2288 | } |
| 2257 | 2289 | ||
| 2258 | typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); | 2290 | typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *); |
| 2259 | static inline struct sk_buff **call_gro_receive(gro_receive_t cb, | 2291 | static inline struct sk_buff *call_gro_receive(gro_receive_t cb, |
| 2260 | struct sk_buff **head, | 2292 | struct list_head *head, |
| 2261 | struct sk_buff *skb) | 2293 | struct sk_buff *skb) |
| 2262 | { | 2294 | { |
| 2263 | if (unlikely(gro_recursion_inc_test(skb))) { | 2295 | if (unlikely(gro_recursion_inc_test(skb))) { |
| 2264 | NAPI_GRO_CB(skb)->flush |= 1; | 2296 | NAPI_GRO_CB(skb)->flush |= 1; |
| @@ -2268,12 +2300,12 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb, | |||
| 2268 | return cb(head, skb); | 2300 | return cb(head, skb); |
| 2269 | } | 2301 | } |
| 2270 | 2302 | ||
| 2271 | typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, | 2303 | typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *, |
| 2272 | struct sk_buff *); | 2304 | struct sk_buff *); |
| 2273 | static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, | 2305 | static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, |
| 2274 | struct sock *sk, | 2306 | struct sock *sk, |
| 2275 | struct sk_buff **head, | 2307 | struct list_head *head, |
| 2276 | struct sk_buff *skb) | 2308 | struct sk_buff *skb) |
| 2277 | { | 2309 | { |
| 2278 | if (unlikely(gro_recursion_inc_test(skb))) { | 2310 | if (unlikely(gro_recursion_inc_test(skb))) { |
| 2279 | NAPI_GRO_CB(skb)->flush |= 1; | 2311 | NAPI_GRO_CB(skb)->flush |= 1; |
| @@ -2290,6 +2322,9 @@ struct packet_type { | |||
| 2290 | struct net_device *, | 2322 | struct net_device *, |
| 2291 | struct packet_type *, | 2323 | struct packet_type *, |
| 2292 | struct net_device *); | 2324 | struct net_device *); |
| 2325 | void (*list_func) (struct list_head *, | ||
| 2326 | struct packet_type *, | ||
| 2327 | struct net_device *); | ||
| 2293 | bool (*id_match)(struct packet_type *ptype, | 2328 | bool (*id_match)(struct packet_type *ptype, |
| 2294 | struct sock *sk); | 2329 | struct sock *sk); |
| 2295 | void *af_packet_priv; | 2330 | void *af_packet_priv; |
| @@ -2299,8 +2334,8 @@ struct packet_type { | |||
| 2299 | struct offload_callbacks { | 2334 | struct offload_callbacks { |
| 2300 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, | 2335 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
| 2301 | netdev_features_t features); | 2336 | netdev_features_t features); |
| 2302 | struct sk_buff **(*gro_receive)(struct sk_buff **head, | 2337 | struct sk_buff *(*gro_receive)(struct list_head *head, |
| 2303 | struct sk_buff *skb); | 2338 | struct sk_buff *skb); |
| 2304 | int (*gro_complete)(struct sk_buff *skb, int nhoff); | 2339 | int (*gro_complete)(struct sk_buff *skb, int nhoff); |
| 2305 | }; | 2340 | }; |
| 2306 | 2341 | ||
| @@ -2537,8 +2572,14 @@ void dev_close(struct net_device *dev); | |||
| 2537 | void dev_close_many(struct list_head *head, bool unlink); | 2572 | void dev_close_many(struct list_head *head, bool unlink); |
| 2538 | void dev_disable_lro(struct net_device *dev); | 2573 | void dev_disable_lro(struct net_device *dev); |
| 2539 | int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); | 2574 | int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); |
| 2575 | u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, | ||
| 2576 | struct net_device *sb_dev, | ||
| 2577 | select_queue_fallback_t fallback); | ||
| 2578 | u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, | ||
| 2579 | struct net_device *sb_dev, | ||
| 2580 | select_queue_fallback_t fallback); | ||
| 2540 | int dev_queue_xmit(struct sk_buff *skb); | 2581 | int dev_queue_xmit(struct sk_buff *skb); |
| 2541 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); | 2582 | int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); |
| 2542 | int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); | 2583 | int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); |
| 2543 | int register_netdevice(struct net_device *dev); | 2584 | int register_netdevice(struct net_device *dev); |
| 2544 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); | 2585 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); |
| @@ -2568,7 +2609,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); | |||
| 2568 | struct net_device *dev_get_by_napi_id(unsigned int napi_id); | 2609 | struct net_device *dev_get_by_napi_id(unsigned int napi_id); |
| 2569 | int netdev_get_name(struct net *net, char *name, int ifindex); | 2610 | int netdev_get_name(struct net *net, char *name, int ifindex); |
| 2570 | int dev_restart(struct net_device *dev); | 2611 | int dev_restart(struct net_device *dev); |
| 2571 | int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); | 2612 | int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); |
| 2572 | 2613 | ||
| 2573 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) | 2614 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) |
| 2574 | { | 2615 | { |
| @@ -2784,13 +2825,13 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, | |||
| 2784 | } | 2825 | } |
| 2785 | 2826 | ||
| 2786 | #ifdef CONFIG_XFRM_OFFLOAD | 2827 | #ifdef CONFIG_XFRM_OFFLOAD |
| 2787 | static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) | 2828 | static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) |
| 2788 | { | 2829 | { |
| 2789 | if (PTR_ERR(pp) != -EINPROGRESS) | 2830 | if (PTR_ERR(pp) != -EINPROGRESS) |
| 2790 | NAPI_GRO_CB(skb)->flush |= flush; | 2831 | NAPI_GRO_CB(skb)->flush |= flush; |
| 2791 | } | 2832 | } |
| 2792 | static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, | 2833 | static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, |
| 2793 | struct sk_buff **pp, | 2834 | struct sk_buff *pp, |
| 2794 | int flush, | 2835 | int flush, |
| 2795 | struct gro_remcsum *grc) | 2836 | struct gro_remcsum *grc) |
| 2796 | { | 2837 | { |
| @@ -2801,12 +2842,12 @@ static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, | |||
| 2801 | } | 2842 | } |
| 2802 | } | 2843 | } |
| 2803 | #else | 2844 | #else |
| 2804 | static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) | 2845 | static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) |
| 2805 | { | 2846 | { |
| 2806 | NAPI_GRO_CB(skb)->flush |= flush; | 2847 | NAPI_GRO_CB(skb)->flush |= flush; |
| 2807 | } | 2848 | } |
| 2808 | static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, | 2849 | static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, |
| 2809 | struct sk_buff **pp, | 2850 | struct sk_buff *pp, |
| 2810 | int flush, | 2851 | int flush, |
| 2811 | struct gro_remcsum *grc) | 2852 | struct gro_remcsum *grc) |
| 2812 | { | 2853 | { |
| @@ -3278,6 +3319,92 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | |||
| 3278 | #ifdef CONFIG_XPS | 3319 | #ifdef CONFIG_XPS |
| 3279 | int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, | 3320 | int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, |
| 3280 | u16 index); | 3321 | u16 index); |
| 3322 | int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, | ||
| 3323 | u16 index, bool is_rxqs_map); | ||
| 3324 | |||
| 3325 | /** | ||
| 3326 | * netif_attr_test_mask - Test a CPU or Rx queue set in a mask | ||
| 3327 | * @j: CPU/Rx queue index | ||
| 3328 | * @mask: bitmask of all cpus/rx queues | ||
| 3329 | * @nr_bits: number of bits in the bitmask | ||
| 3330 | * | ||
| 3331 | * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. | ||
| 3332 | */ | ||
| 3333 | static inline bool netif_attr_test_mask(unsigned long j, | ||
| 3334 | const unsigned long *mask, | ||
| 3335 | unsigned int nr_bits) | ||
| 3336 | { | ||
| 3337 | cpu_max_bits_warn(j, nr_bits); | ||
| 3338 | return test_bit(j, mask); | ||
| 3339 | } | ||
| 3340 | |||
| 3341 | /** | ||
| 3342 | * netif_attr_test_online - Test for online CPU/Rx queue | ||
| 3343 | * @j: CPU/Rx queue index | ||
| 3344 | * @online_mask: bitmask for CPUs/Rx queues that are online | ||
| 3345 | * @nr_bits: number of bits in the bitmask | ||
| 3346 | * | ||
| 3347 | * Returns true if a CPU/Rx queue is online. | ||
| 3348 | */ | ||
| 3349 | static inline bool netif_attr_test_online(unsigned long j, | ||
| 3350 | const unsigned long *online_mask, | ||
| 3351 | unsigned int nr_bits) | ||
| 3352 | { | ||
| 3353 | cpu_max_bits_warn(j, nr_bits); | ||
| 3354 | |||
| 3355 | if (online_mask) | ||
| 3356 | return test_bit(j, online_mask); | ||
| 3357 | |||
| 3358 | return (j < nr_bits); | ||
| 3359 | } | ||
| 3360 | |||
| 3361 | /** | ||
| 3362 | * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask | ||
| 3363 | * @n: CPU/Rx queue index | ||
| 3364 | * @srcp: the cpumask/Rx queue mask pointer | ||
| 3365 | * @nr_bits: number of bits in the bitmask | ||
| 3366 | * | ||
| 3367 | * Returns >= nr_bits if no further CPUs/Rx queues set. | ||
| 3368 | */ | ||
| 3369 | static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, | ||
| 3370 | unsigned int nr_bits) | ||
| 3371 | { | ||
| 3372 | /* -1 is a legal arg here. */ | ||
| 3373 | if (n != -1) | ||
| 3374 | cpu_max_bits_warn(n, nr_bits); | ||
| 3375 | |||
| 3376 | if (srcp) | ||
| 3377 | return find_next_bit(srcp, nr_bits, n + 1); | ||
| 3378 | |||
| 3379 | return n + 1; | ||
| 3380 | } | ||
| 3381 | |||
| 3382 | /** | ||
| 3383 | * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p | ||
| 3384 | * @n: CPU/Rx queue index | ||
| 3385 | * @src1p: the first CPUs/Rx queues mask pointer | ||
| 3386 | * @src2p: the second CPUs/Rx queues mask pointer | ||
| 3387 | * @nr_bits: number of bits in the bitmask | ||
| 3388 | * | ||
| 3389 | * Returns >= nr_bits if no further CPUs/Rx queues set in both. | ||
| 3390 | */ | ||
| 3391 | static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, | ||
| 3392 | const unsigned long *src2p, | ||
| 3393 | unsigned int nr_bits) | ||
| 3394 | { | ||
| 3395 | /* -1 is a legal arg here. */ | ||
| 3396 | if (n != -1) | ||
| 3397 | cpu_max_bits_warn(n, nr_bits); | ||
| 3398 | |||
| 3399 | if (src1p && src2p) | ||
| 3400 | return find_next_and_bit(src1p, src2p, nr_bits, n + 1); | ||
| 3401 | else if (src1p) | ||
| 3402 | return find_next_bit(src1p, nr_bits, n + 1); | ||
| 3403 | else if (src2p) | ||
| 3404 | return find_next_bit(src2p, nr_bits, n + 1); | ||
| 3405 | |||
| 3406 | return n + 1; | ||
| 3407 | } | ||
| 3281 | #else | 3408 | #else |
| 3282 | static inline int netif_set_xps_queue(struct net_device *dev, | 3409 | static inline int netif_set_xps_queue(struct net_device *dev, |
| 3283 | const struct cpumask *mask, | 3410 | const struct cpumask *mask, |
| @@ -3285,6 +3412,13 @@ static inline int netif_set_xps_queue(struct net_device *dev, | |||
| 3285 | { | 3412 | { |
| 3286 | return 0; | 3413 | return 0; |
| 3287 | } | 3414 | } |
| 3415 | |||
| 3416 | static inline int __netif_set_xps_queue(struct net_device *dev, | ||
| 3417 | const unsigned long *mask, | ||
| 3418 | u16 index, bool is_rxqs_map) | ||
| 3419 | { | ||
| 3420 | return 0; | ||
| 3421 | } | ||
| 3288 | #endif | 3422 | #endif |
| 3289 | 3423 | ||
| 3290 | /** | 3424 | /** |
| @@ -3304,8 +3438,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); | |||
| 3304 | int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); | 3438 | int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); |
| 3305 | #else | 3439 | #else |
| 3306 | static inline int netif_set_real_num_rx_queues(struct net_device *dev, | 3440 | static inline int netif_set_real_num_rx_queues(struct net_device *dev, |
| 3307 | unsigned int rxq) | 3441 | unsigned int rxqs) |
| 3308 | { | 3442 | { |
| 3443 | dev->real_num_rx_queues = rxqs; | ||
| 3309 | return 0; | 3444 | return 0; |
| 3310 | } | 3445 | } |
| 3311 | #endif | 3446 | #endif |
| @@ -3384,6 +3519,7 @@ int netif_rx(struct sk_buff *skb); | |||
| 3384 | int netif_rx_ni(struct sk_buff *skb); | 3519 | int netif_rx_ni(struct sk_buff *skb); |
| 3385 | int netif_receive_skb(struct sk_buff *skb); | 3520 | int netif_receive_skb(struct sk_buff *skb); |
| 3386 | int netif_receive_skb_core(struct sk_buff *skb); | 3521 | int netif_receive_skb_core(struct sk_buff *skb); |
| 3522 | void netif_receive_skb_list(struct list_head *head); | ||
| 3387 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); | 3523 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); |
| 3388 | void napi_gro_flush(struct napi_struct *napi, bool flush_old); | 3524 | void napi_gro_flush(struct napi_struct *napi, bool flush_old); |
| 3389 | struct sk_buff *napi_get_frags(struct napi_struct *napi); | 3525 | struct sk_buff *napi_get_frags(struct napi_struct *napi); |
| @@ -3418,6 +3554,8 @@ int dev_set_alias(struct net_device *, const char *, size_t); | |||
| 3418 | int dev_get_alias(const struct net_device *, char *, size_t); | 3554 | int dev_get_alias(const struct net_device *, char *, size_t); |
| 3419 | int dev_change_net_namespace(struct net_device *, struct net *, const char *); | 3555 | int dev_change_net_namespace(struct net_device *, struct net *, const char *); |
| 3420 | int __dev_set_mtu(struct net_device *, int); | 3556 | int __dev_set_mtu(struct net_device *, int); |
| 3557 | int dev_set_mtu_ext(struct net_device *dev, int mtu, | ||
| 3558 | struct netlink_ext_ack *extack); | ||
| 3421 | int dev_set_mtu(struct net_device *, int); | 3559 | int dev_set_mtu(struct net_device *, int); |
| 3422 | int dev_change_tx_queue_len(struct net_device *, unsigned long); | 3560 | int dev_change_tx_queue_len(struct net_device *, unsigned long); |
| 3423 | void dev_set_group(struct net_device *, int); | 3561 | void dev_set_group(struct net_device *, int); |
| @@ -3435,8 +3573,9 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
| 3435 | typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); | 3573 | typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); |
| 3436 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, | 3574 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, |
| 3437 | int fd, u32 flags); | 3575 | int fd, u32 flags); |
| 3438 | void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, | 3576 | u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, |
| 3439 | struct netdev_bpf *xdp); | 3577 | enum bpf_netdev_command cmd); |
| 3578 | int xdp_umem_query(struct net_device *dev, u16 queue_id); | ||
| 3440 | 3579 | ||
| 3441 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | 3580 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
| 3442 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | 3581 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
