diff options
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 146 |
1 files changed, 117 insertions, 29 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 52fd8e8694cf..5897b4ea5a3f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <linux/netdev_features.h> | 51 | #include <linux/netdev_features.h> |
52 | #include <linux/neighbour.h> | 52 | #include <linux/neighbour.h> |
53 | #include <uapi/linux/netdevice.h> | 53 | #include <uapi/linux/netdevice.h> |
54 | #include <uapi/linux/if_bonding.h> | ||
54 | 55 | ||
55 | struct netpoll_info; | 56 | struct netpoll_info; |
56 | struct device; | 57 | struct device; |
@@ -643,39 +644,40 @@ struct rps_dev_flow_table { | |||
643 | /* | 644 | /* |
644 | * The rps_sock_flow_table contains mappings of flows to the last CPU | 645 | * The rps_sock_flow_table contains mappings of flows to the last CPU |
645 | * on which they were processed by the application (set in recvmsg). | 646 | * on which they were processed by the application (set in recvmsg). |
647 | * Each entry is a 32bit value. Upper part is the high order bits | ||
648 | * of flow hash, lower part is cpu number. | ||
649 | * rps_cpu_mask is used to partition the space, depending on number of | ||
650 | * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 | ||
651 | * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f, | ||
652 | * meaning we use 32-6=26 bits for the hash. | ||
646 | */ | 653 | */ |
647 | struct rps_sock_flow_table { | 654 | struct rps_sock_flow_table { |
648 | unsigned int mask; | 655 | u32 mask; |
649 | u16 ents[0]; | 656 | |
657 | u32 ents[0] ____cacheline_aligned_in_smp; | ||
650 | }; | 658 | }; |
651 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ | 659 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) |
652 | ((_num) * sizeof(u16))) | ||
653 | 660 | ||
654 | #define RPS_NO_CPU 0xffff | 661 | #define RPS_NO_CPU 0xffff |
655 | 662 | ||
663 | extern u32 rps_cpu_mask; | ||
664 | extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; | ||
665 | |||
656 | static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, | 666 | static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, |
657 | u32 hash) | 667 | u32 hash) |
658 | { | 668 | { |
659 | if (table && hash) { | 669 | if (table && hash) { |
660 | unsigned int cpu, index = hash & table->mask; | 670 | unsigned int index = hash & table->mask; |
671 | u32 val = hash & ~rps_cpu_mask; | ||
661 | 672 | ||
662 | /* We only give a hint, preemption can change cpu under us */ | 673 | /* We only give a hint, preemption can change cpu under us */ |
663 | cpu = raw_smp_processor_id(); | 674 | val |= raw_smp_processor_id(); |
664 | 675 | ||
665 | if (table->ents[index] != cpu) | 676 | if (table->ents[index] != val) |
666 | table->ents[index] = cpu; | 677 | table->ents[index] = val; |
667 | } | 678 | } |
668 | } | 679 | } |
669 | 680 | ||
670 | static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, | ||
671 | u32 hash) | ||
672 | { | ||
673 | if (table && hash) | ||
674 | table->ents[hash & table->mask] = RPS_NO_CPU; | ||
675 | } | ||
676 | |||
677 | extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; | ||
678 | |||
679 | #ifdef CONFIG_RFS_ACCEL | 681 | #ifdef CONFIG_RFS_ACCEL |
680 | bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, | 682 | bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, |
681 | u16 filter_id); | 683 | u16 filter_id); |
@@ -1154,13 +1156,15 @@ struct net_device_ops { | |||
1154 | int idx); | 1156 | int idx); |
1155 | 1157 | ||
1156 | int (*ndo_bridge_setlink)(struct net_device *dev, | 1158 | int (*ndo_bridge_setlink)(struct net_device *dev, |
1157 | struct nlmsghdr *nlh); | 1159 | struct nlmsghdr *nlh, |
1160 | u16 flags); | ||
1158 | int (*ndo_bridge_getlink)(struct sk_buff *skb, | 1161 | int (*ndo_bridge_getlink)(struct sk_buff *skb, |
1159 | u32 pid, u32 seq, | 1162 | u32 pid, u32 seq, |
1160 | struct net_device *dev, | 1163 | struct net_device *dev, |
1161 | u32 filter_mask); | 1164 | u32 filter_mask); |
1162 | int (*ndo_bridge_dellink)(struct net_device *dev, | 1165 | int (*ndo_bridge_dellink)(struct net_device *dev, |
1163 | struct nlmsghdr *nlh); | 1166 | struct nlmsghdr *nlh, |
1167 | u16 flags); | ||
1164 | int (*ndo_change_carrier)(struct net_device *dev, | 1168 | int (*ndo_change_carrier)(struct net_device *dev, |
1165 | bool new_carrier); | 1169 | bool new_carrier); |
1166 | int (*ndo_get_phys_port_id)(struct net_device *dev, | 1170 | int (*ndo_get_phys_port_id)(struct net_device *dev, |
@@ -1514,6 +1518,8 @@ struct net_device { | |||
1514 | struct list_head napi_list; | 1518 | struct list_head napi_list; |
1515 | struct list_head unreg_list; | 1519 | struct list_head unreg_list; |
1516 | struct list_head close_list; | 1520 | struct list_head close_list; |
1521 | struct list_head ptype_all; | ||
1522 | struct list_head ptype_specific; | ||
1517 | 1523 | ||
1518 | struct { | 1524 | struct { |
1519 | struct list_head upper; | 1525 | struct list_head upper; |
@@ -1917,13 +1923,8 @@ struct napi_gro_cb { | |||
1917 | /* Number of segments aggregated. */ | 1923 | /* Number of segments aggregated. */ |
1918 | u16 count; | 1924 | u16 count; |
1919 | 1925 | ||
1920 | /* This is non-zero if the packet may be of the same flow. */ | 1926 | /* Start offset for remote checksum offload */ |
1921 | u8 same_flow; | 1927 | u16 gro_remcsum_start; |
1922 | |||
1923 | /* Free the skb? */ | ||
1924 | u8 free; | ||
1925 | #define NAPI_GRO_FREE 1 | ||
1926 | #define NAPI_GRO_FREE_STOLEN_HEAD 2 | ||
1927 | 1928 | ||
1928 | /* jiffies when first packet was created/queued */ | 1929 | /* jiffies when first packet was created/queued */ |
1929 | unsigned long age; | 1930 | unsigned long age; |
@@ -1931,6 +1932,9 @@ struct napi_gro_cb { | |||
1931 | /* Used in ipv6_gro_receive() and foo-over-udp */ | 1932 | /* Used in ipv6_gro_receive() and foo-over-udp */ |
1932 | u16 proto; | 1933 | u16 proto; |
1933 | 1934 | ||
1935 | /* This is non-zero if the packet may be of the same flow. */ | ||
1936 | u8 same_flow:1; | ||
1937 | |||
1934 | /* Used in udp_gro_receive */ | 1938 | /* Used in udp_gro_receive */ |
1935 | u8 udp_mark:1; | 1939 | u8 udp_mark:1; |
1936 | 1940 | ||
@@ -1940,9 +1944,16 @@ struct napi_gro_cb { | |||
1940 | /* Number of checksums via CHECKSUM_UNNECESSARY */ | 1944 | /* Number of checksums via CHECKSUM_UNNECESSARY */ |
1941 | u8 csum_cnt:3; | 1945 | u8 csum_cnt:3; |
1942 | 1946 | ||
1947 | /* Free the skb? */ | ||
1948 | u8 free:2; | ||
1949 | #define NAPI_GRO_FREE 1 | ||
1950 | #define NAPI_GRO_FREE_STOLEN_HEAD 2 | ||
1951 | |||
1943 | /* Used in foo-over-udp, set in udp[46]_gro_receive */ | 1952 | /* Used in foo-over-udp, set in udp[46]_gro_receive */ |
1944 | u8 is_ipv6:1; | 1953 | u8 is_ipv6:1; |
1945 | 1954 | ||
1955 | /* 7 bit hole */ | ||
1956 | |||
1946 | /* used to support CHECKSUM_COMPLETE for tunneling protocols */ | 1957 | /* used to support CHECKSUM_COMPLETE for tunneling protocols */ |
1947 | __wsum csum; | 1958 | __wsum csum; |
1948 | 1959 | ||
@@ -1969,7 +1980,7 @@ struct offload_callbacks { | |||
1969 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, | 1980 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
1970 | netdev_features_t features); | 1981 | netdev_features_t features); |
1971 | struct sk_buff **(*gro_receive)(struct sk_buff **head, | 1982 | struct sk_buff **(*gro_receive)(struct sk_buff **head, |
1972 | struct sk_buff *skb); | 1983 | struct sk_buff *skb); |
1973 | int (*gro_complete)(struct sk_buff *skb, int nhoff); | 1984 | int (*gro_complete)(struct sk_buff *skb, int nhoff); |
1974 | }; | 1985 | }; |
1975 | 1986 | ||
@@ -1979,10 +1990,21 @@ struct packet_offload { | |||
1979 | struct list_head list; | 1990 | struct list_head list; |
1980 | }; | 1991 | }; |
1981 | 1992 | ||
1993 | struct udp_offload; | ||
1994 | |||
1995 | struct udp_offload_callbacks { | ||
1996 | struct sk_buff **(*gro_receive)(struct sk_buff **head, | ||
1997 | struct sk_buff *skb, | ||
1998 | struct udp_offload *uoff); | ||
1999 | int (*gro_complete)(struct sk_buff *skb, | ||
2000 | int nhoff, | ||
2001 | struct udp_offload *uoff); | ||
2002 | }; | ||
2003 | |||
1982 | struct udp_offload { | 2004 | struct udp_offload { |
1983 | __be16 port; | 2005 | __be16 port; |
1984 | u8 ipproto; | 2006 | u8 ipproto; |
1985 | struct offload_callbacks callbacks; | 2007 | struct udp_offload_callbacks callbacks; |
1986 | }; | 2008 | }; |
1987 | 2009 | ||
1988 | /* often modified stats are per cpu, other are shared (netdev->stats) */ | 2010 | /* often modified stats are per cpu, other are shared (netdev->stats) */ |
@@ -2041,6 +2063,7 @@ struct pcpu_sw_netstats { | |||
2041 | #define NETDEV_RESEND_IGMP 0x0016 | 2063 | #define NETDEV_RESEND_IGMP 0x0016 |
2042 | #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ | 2064 | #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ |
2043 | #define NETDEV_CHANGEINFODATA 0x0018 | 2065 | #define NETDEV_CHANGEINFODATA 0x0018 |
2066 | #define NETDEV_BONDING_INFO 0x0019 | ||
2044 | 2067 | ||
2045 | int register_netdevice_notifier(struct notifier_block *nb); | 2068 | int register_netdevice_notifier(struct notifier_block *nb); |
2046 | int unregister_netdevice_notifier(struct notifier_block *nb); | 2069 | int unregister_netdevice_notifier(struct notifier_block *nb); |
@@ -2224,11 +2247,20 @@ static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, | |||
2224 | 2247 | ||
2225 | __sum16 __skb_gro_checksum_complete(struct sk_buff *skb); | 2248 | __sum16 __skb_gro_checksum_complete(struct sk_buff *skb); |
2226 | 2249 | ||
2250 | static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb) | ||
2251 | { | ||
2252 | return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) == | ||
2253 | skb_gro_offset(skb)); | ||
2254 | } | ||
2255 | |||
2227 | static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, | 2256 | static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, |
2228 | bool zero_okay, | 2257 | bool zero_okay, |
2229 | __sum16 check) | 2258 | __sum16 check) |
2230 | { | 2259 | { |
2231 | return (skb->ip_summed != CHECKSUM_PARTIAL && | 2260 | return ((skb->ip_summed != CHECKSUM_PARTIAL || |
2261 | skb_checksum_start_offset(skb) < | ||
2262 | skb_gro_offset(skb)) && | ||
2263 | !skb_at_gro_remcsum_start(skb) && | ||
2232 | NAPI_GRO_CB(skb)->csum_cnt == 0 && | 2264 | NAPI_GRO_CB(skb)->csum_cnt == 0 && |
2233 | (!zero_okay || check)); | 2265 | (!zero_okay || check)); |
2234 | } | 2266 | } |
@@ -2303,6 +2335,49 @@ do { \ | |||
2303 | compute_pseudo(skb, proto)); \ | 2335 | compute_pseudo(skb, proto)); \ |
2304 | } while (0) | 2336 | } while (0) |
2305 | 2337 | ||
2338 | struct gro_remcsum { | ||
2339 | int offset; | ||
2340 | __wsum delta; | ||
2341 | }; | ||
2342 | |||
2343 | static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) | ||
2344 | { | ||
2345 | grc->delta = 0; | ||
2346 | } | ||
2347 | |||
2348 | static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, | ||
2349 | int start, int offset, | ||
2350 | struct gro_remcsum *grc, | ||
2351 | bool nopartial) | ||
2352 | { | ||
2353 | __wsum delta; | ||
2354 | |||
2355 | BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); | ||
2356 | |||
2357 | if (!nopartial) { | ||
2358 | NAPI_GRO_CB(skb)->gro_remcsum_start = | ||
2359 | ((unsigned char *)ptr + start) - skb->head; | ||
2360 | return; | ||
2361 | } | ||
2362 | |||
2363 | delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset); | ||
2364 | |||
2365 | /* Adjust skb->csum since we changed the packet */ | ||
2366 | NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); | ||
2367 | |||
2368 | grc->offset = (ptr + offset) - (void *)skb->head; | ||
2369 | grc->delta = delta; | ||
2370 | } | ||
2371 | |||
2372 | static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, | ||
2373 | struct gro_remcsum *grc) | ||
2374 | { | ||
2375 | if (!grc->delta) | ||
2376 | return; | ||
2377 | |||
2378 | remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta); | ||
2379 | } | ||
2380 | |||
2306 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, | 2381 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
2307 | unsigned short type, | 2382 | unsigned short type, |
2308 | const void *daddr, const void *saddr, | 2383 | const void *daddr, const void *saddr, |
@@ -3464,6 +3539,19 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, | |||
3464 | struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, | 3539 | struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, |
3465 | netdev_features_t features); | 3540 | netdev_features_t features); |
3466 | 3541 | ||
3542 | struct netdev_bonding_info { | ||
3543 | ifslave slave; | ||
3544 | ifbond master; | ||
3545 | }; | ||
3546 | |||
3547 | struct netdev_notifier_bonding_info { | ||
3548 | struct netdev_notifier_info info; /* must be first */ | ||
3549 | struct netdev_bonding_info bonding_info; | ||
3550 | }; | ||
3551 | |||
3552 | void netdev_bonding_info_change(struct net_device *dev, | ||
3553 | struct netdev_bonding_info *bonding_info); | ||
3554 | |||
3467 | static inline | 3555 | static inline |
3468 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) | 3556 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) |
3469 | { | 3557 | { |