diff options
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 306 |
1 files changed, 201 insertions, 105 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index fa8b47637997..40291f375024 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/if_link.h> | 31 | #include <linux/if_link.h> |
32 | 32 | ||
33 | #ifdef __KERNEL__ | 33 | #ifdef __KERNEL__ |
34 | #include <linux/pm_qos_params.h> | ||
34 | #include <linux/timer.h> | 35 | #include <linux/timer.h> |
35 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
36 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
@@ -218,34 +219,6 @@ struct neighbour; | |||
218 | struct neigh_parms; | 219 | struct neigh_parms; |
219 | struct sk_buff; | 220 | struct sk_buff; |
220 | 221 | ||
221 | struct netif_rx_stats { | ||
222 | unsigned total; | ||
223 | unsigned dropped; | ||
224 | unsigned time_squeeze; | ||
225 | unsigned cpu_collision; | ||
226 | }; | ||
227 | |||
228 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); | ||
229 | |||
230 | struct dev_addr_list { | ||
231 | struct dev_addr_list *next; | ||
232 | u8 da_addr[MAX_ADDR_LEN]; | ||
233 | u8 da_addrlen; | ||
234 | u8 da_synced; | ||
235 | int da_users; | ||
236 | int da_gusers; | ||
237 | }; | ||
238 | |||
239 | /* | ||
240 | * We tag multicasts with these structures. | ||
241 | */ | ||
242 | |||
243 | #define dev_mc_list dev_addr_list | ||
244 | #define dmi_addr da_addr | ||
245 | #define dmi_addrlen da_addrlen | ||
246 | #define dmi_users da_users | ||
247 | #define dmi_gusers da_gusers | ||
248 | |||
249 | struct netdev_hw_addr { | 222 | struct netdev_hw_addr { |
250 | struct list_head list; | 223 | struct list_head list; |
251 | unsigned char addr[MAX_ADDR_LEN]; | 224 | unsigned char addr[MAX_ADDR_LEN]; |
@@ -254,8 +227,10 @@ struct netdev_hw_addr { | |||
254 | #define NETDEV_HW_ADDR_T_SAN 2 | 227 | #define NETDEV_HW_ADDR_T_SAN 2 |
255 | #define NETDEV_HW_ADDR_T_SLAVE 3 | 228 | #define NETDEV_HW_ADDR_T_SLAVE 3 |
256 | #define NETDEV_HW_ADDR_T_UNICAST 4 | 229 | #define NETDEV_HW_ADDR_T_UNICAST 4 |
230 | #define NETDEV_HW_ADDR_T_MULTICAST 5 | ||
257 | int refcount; | 231 | int refcount; |
258 | bool synced; | 232 | bool synced; |
233 | bool global_use; | ||
259 | struct rcu_head rcu_head; | 234 | struct rcu_head rcu_head; |
260 | }; | 235 | }; |
261 | 236 | ||
@@ -264,16 +239,20 @@ struct netdev_hw_addr_list { | |||
264 | int count; | 239 | int count; |
265 | }; | 240 | }; |
266 | 241 | ||
267 | #define netdev_uc_count(dev) ((dev)->uc.count) | 242 | #define netdev_hw_addr_list_count(l) ((l)->count) |
268 | #define netdev_uc_empty(dev) ((dev)->uc.count == 0) | 243 | #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) |
269 | #define netdev_for_each_uc_addr(ha, dev) \ | 244 | #define netdev_hw_addr_list_for_each(ha, l) \ |
270 | list_for_each_entry(ha, &dev->uc.list, list) | 245 | list_for_each_entry(ha, &(l)->list, list) |
271 | 246 | ||
272 | #define netdev_mc_count(dev) ((dev)->mc_count) | 247 | #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) |
273 | #define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) | 248 | #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) |
249 | #define netdev_for_each_uc_addr(ha, dev) \ | ||
250 | netdev_hw_addr_list_for_each(ha, &(dev)->uc) | ||
274 | 251 | ||
275 | #define netdev_for_each_mc_addr(mclist, dev) \ | 252 | #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) |
276 | for (mclist = dev->mc_list; mclist; mclist = mclist->next) | 253 | #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) |
254 | #define netdev_for_each_mc_addr(ha, dev) \ | ||
255 | netdev_hw_addr_list_for_each(ha, &(dev)->mc) | ||
277 | 256 | ||
278 | struct hh_cache { | 257 | struct hh_cache { |
279 | struct hh_cache *hh_next; /* Next entry */ | 258 | struct hh_cache *hh_next; /* Next entry */ |
@@ -530,6 +509,85 @@ struct netdev_queue { | |||
530 | unsigned long tx_dropped; | 509 | unsigned long tx_dropped; |
531 | } ____cacheline_aligned_in_smp; | 510 | } ____cacheline_aligned_in_smp; |
532 | 511 | ||
512 | #ifdef CONFIG_RPS | ||
513 | /* | ||
514 | * This structure holds an RPS map which can be of variable length. The | ||
515 | * map is an array of CPUs. | ||
516 | */ | ||
517 | struct rps_map { | ||
518 | unsigned int len; | ||
519 | struct rcu_head rcu; | ||
520 | u16 cpus[0]; | ||
521 | }; | ||
522 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) | ||
523 | |||
524 | /* | ||
525 | * The rps_dev_flow structure contains the mapping of a flow to a CPU and the | ||
526 | * tail pointer for that CPU's input queue at the time of last enqueue. | ||
527 | */ | ||
528 | struct rps_dev_flow { | ||
529 | u16 cpu; | ||
530 | u16 fill; | ||
531 | unsigned int last_qtail; | ||
532 | }; | ||
533 | |||
534 | /* | ||
535 | * The rps_dev_flow_table structure contains a table of flow mappings. | ||
536 | */ | ||
537 | struct rps_dev_flow_table { | ||
538 | unsigned int mask; | ||
539 | struct rcu_head rcu; | ||
540 | struct work_struct free_work; | ||
541 | struct rps_dev_flow flows[0]; | ||
542 | }; | ||
543 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ | ||
544 | (_num * sizeof(struct rps_dev_flow))) | ||
545 | |||
546 | /* | ||
547 | * The rps_sock_flow_table contains mappings of flows to the last CPU | ||
548 | * on which they were processed by the application (set in recvmsg). | ||
549 | */ | ||
550 | struct rps_sock_flow_table { | ||
551 | unsigned int mask; | ||
552 | u16 ents[0]; | ||
553 | }; | ||
554 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ | ||
555 | (_num * sizeof(u16))) | ||
556 | |||
557 | #define RPS_NO_CPU 0xffff | ||
558 | |||
559 | static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, | ||
560 | u32 hash) | ||
561 | { | ||
562 | if (table && hash) { | ||
563 | unsigned int cpu, index = hash & table->mask; | ||
564 | |||
565 | /* We only give a hint, preemption can change cpu under us */ | ||
566 | cpu = raw_smp_processor_id(); | ||
567 | |||
568 | if (table->ents[index] != cpu) | ||
569 | table->ents[index] = cpu; | ||
570 | } | ||
571 | } | ||
572 | |||
573 | static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, | ||
574 | u32 hash) | ||
575 | { | ||
576 | if (table && hash) | ||
577 | table->ents[hash & table->mask] = RPS_NO_CPU; | ||
578 | } | ||
579 | |||
580 | extern struct rps_sock_flow_table *rps_sock_flow_table; | ||
581 | |||
582 | /* This structure contains an instance of an RX queue. */ | ||
583 | struct netdev_rx_queue { | ||
584 | struct rps_map *rps_map; | ||
585 | struct rps_dev_flow_table *rps_flow_table; | ||
586 | struct kobject kobj; | ||
587 | struct netdev_rx_queue *first; | ||
588 | atomic_t count; | ||
589 | } ____cacheline_aligned_in_smp; | ||
590 | #endif /* CONFIG_RPS */ | ||
533 | 591 | ||
534 | /* | 592 | /* |
535 | * This structure defines the management hooks for network devices. | 593 | * This structure defines the management hooks for network devices. |
@@ -629,6 +687,9 @@ struct netdev_queue { | |||
629 | * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); | 687 | * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); |
630 | * int (*ndo_get_vf_config)(struct net_device *dev, | 688 | * int (*ndo_get_vf_config)(struct net_device *dev, |
631 | * int vf, struct ifla_vf_info *ivf); | 689 | * int vf, struct ifla_vf_info *ivf); |
690 | * int (*ndo_set_vf_port)(struct net_device *dev, int vf, | ||
691 | * struct nlattr *port[]); | ||
692 | * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); | ||
632 | */ | 693 | */ |
633 | #define HAVE_NET_DEVICE_OPS | 694 | #define HAVE_NET_DEVICE_OPS |
634 | struct net_device_ops { | 695 | struct net_device_ops { |
@@ -667,6 +728,7 @@ struct net_device_ops { | |||
667 | unsigned short vid); | 728 | unsigned short vid); |
668 | #ifdef CONFIG_NET_POLL_CONTROLLER | 729 | #ifdef CONFIG_NET_POLL_CONTROLLER |
669 | void (*ndo_poll_controller)(struct net_device *dev); | 730 | void (*ndo_poll_controller)(struct net_device *dev); |
731 | void (*ndo_netpoll_cleanup)(struct net_device *dev); | ||
670 | #endif | 732 | #endif |
671 | int (*ndo_set_vf_mac)(struct net_device *dev, | 733 | int (*ndo_set_vf_mac)(struct net_device *dev, |
672 | int queue, u8 *mac); | 734 | int queue, u8 *mac); |
@@ -677,6 +739,11 @@ struct net_device_ops { | |||
677 | int (*ndo_get_vf_config)(struct net_device *dev, | 739 | int (*ndo_get_vf_config)(struct net_device *dev, |
678 | int vf, | 740 | int vf, |
679 | struct ifla_vf_info *ivf); | 741 | struct ifla_vf_info *ivf); |
742 | int (*ndo_set_vf_port)(struct net_device *dev, | ||
743 | int vf, | ||
744 | struct nlattr *port[]); | ||
745 | int (*ndo_get_vf_port)(struct net_device *dev, | ||
746 | int vf, struct sk_buff *skb); | ||
680 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 747 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
681 | int (*ndo_fcoe_enable)(struct net_device *dev); | 748 | int (*ndo_fcoe_enable)(struct net_device *dev); |
682 | int (*ndo_fcoe_disable)(struct net_device *dev); | 749 | int (*ndo_fcoe_disable)(struct net_device *dev); |
@@ -711,6 +778,9 @@ struct net_device { | |||
711 | * the interface. | 778 | * the interface. |
712 | */ | 779 | */ |
713 | char name[IFNAMSIZ]; | 780 | char name[IFNAMSIZ]; |
781 | |||
782 | struct pm_qos_request_list *pm_qos_req; | ||
783 | |||
714 | /* device name hash chain */ | 784 | /* device name hash chain */ |
715 | struct hlist_node name_hlist; | 785 | struct hlist_node name_hlist; |
716 | /* snmp alias */ | 786 | /* snmp alias */ |
@@ -764,6 +834,7 @@ struct net_device { | |||
764 | #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ | 834 | #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ |
765 | #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ | 835 | #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ |
766 | #define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */ | 836 | #define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */ |
837 | #define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */ | ||
767 | 838 | ||
768 | /* Segmentation offload features */ | 839 | /* Segmentation offload features */ |
769 | #define NETIF_F_GSO_SHIFT 16 | 840 | #define NETIF_F_GSO_SHIFT 16 |
@@ -820,7 +891,7 @@ struct net_device { | |||
820 | unsigned char operstate; /* RFC2863 operstate */ | 891 | unsigned char operstate; /* RFC2863 operstate */ |
821 | unsigned char link_mode; /* mapping policy to operstate */ | 892 | unsigned char link_mode; /* mapping policy to operstate */ |
822 | 893 | ||
823 | unsigned mtu; /* interface MTU value */ | 894 | unsigned int mtu; /* interface MTU value */ |
824 | unsigned short type; /* interface hardware type */ | 895 | unsigned short type; /* interface hardware type */ |
825 | unsigned short hard_header_len; /* hardware hdr length */ | 896 | unsigned short hard_header_len; /* hardware hdr length */ |
826 | 897 | ||
@@ -840,12 +911,10 @@ struct net_device { | |||
840 | unsigned char addr_len; /* hardware address length */ | 911 | unsigned char addr_len; /* hardware address length */ |
841 | unsigned short dev_id; /* for shared network cards */ | 912 | unsigned short dev_id; /* for shared network cards */ |
842 | 913 | ||
843 | struct netdev_hw_addr_list uc; /* Secondary unicast | ||
844 | mac addresses */ | ||
845 | int uc_promisc; | ||
846 | spinlock_t addr_list_lock; | 914 | spinlock_t addr_list_lock; |
847 | struct dev_addr_list *mc_list; /* Multicast mac addresses */ | 915 | struct netdev_hw_addr_list uc; /* Unicast mac addresses */ |
848 | int mc_count; /* Number of installed mcasts */ | 916 | struct netdev_hw_addr_list mc; /* Multicast mac addresses */ |
917 | int uc_promisc; | ||
849 | unsigned int promiscuity; | 918 | unsigned int promiscuity; |
850 | unsigned int allmulti; | 919 | unsigned int allmulti; |
851 | 920 | ||
@@ -878,6 +947,15 @@ struct net_device { | |||
878 | 947 | ||
879 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | 948 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ |
880 | 949 | ||
950 | #ifdef CONFIG_RPS | ||
951 | struct kset *queues_kset; | ||
952 | |||
953 | struct netdev_rx_queue *_rx; | ||
954 | |||
955 | /* Number of RX queues allocated at alloc_netdev_mq() time */ | ||
956 | unsigned int num_rx_queues; | ||
957 | #endif | ||
958 | |||
881 | struct netdev_queue rx_queue; | 959 | struct netdev_queue rx_queue; |
882 | 960 | ||
883 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; | 961 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
@@ -1306,19 +1384,52 @@ static inline int unregister_gifconf(unsigned int family) | |||
1306 | } | 1384 | } |
1307 | 1385 | ||
1308 | /* | 1386 | /* |
1309 | * Incoming packets are placed on per-cpu queues so that | 1387 | * Incoming packets are placed on per-cpu queues |
1310 | * no locking is needed. | ||
1311 | */ | 1388 | */ |
1312 | struct softnet_data { | 1389 | struct softnet_data { |
1313 | struct Qdisc *output_queue; | 1390 | struct Qdisc *output_queue; |
1314 | struct sk_buff_head input_pkt_queue; | 1391 | struct Qdisc **output_queue_tailp; |
1315 | struct list_head poll_list; | 1392 | struct list_head poll_list; |
1316 | struct sk_buff *completion_queue; | 1393 | struct sk_buff *completion_queue; |
1317 | 1394 | struct sk_buff_head process_queue; | |
1395 | |||
1396 | /* stats */ | ||
1397 | unsigned int processed; | ||
1398 | unsigned int time_squeeze; | ||
1399 | unsigned int cpu_collision; | ||
1400 | unsigned int received_rps; | ||
1401 | |||
1402 | #ifdef CONFIG_RPS | ||
1403 | struct softnet_data *rps_ipi_list; | ||
1404 | |||
1405 | /* Elements below can be accessed between CPUs for RPS */ | ||
1406 | struct call_single_data csd ____cacheline_aligned_in_smp; | ||
1407 | struct softnet_data *rps_ipi_next; | ||
1408 | unsigned int cpu; | ||
1409 | unsigned int input_queue_head; | ||
1410 | unsigned int input_queue_tail; | ||
1411 | #endif | ||
1412 | unsigned dropped; | ||
1413 | struct sk_buff_head input_pkt_queue; | ||
1318 | struct napi_struct backlog; | 1414 | struct napi_struct backlog; |
1319 | }; | 1415 | }; |
1320 | 1416 | ||
1321 | DECLARE_PER_CPU(struct softnet_data,softnet_data); | 1417 | static inline void input_queue_head_incr(struct softnet_data *sd) |
1418 | { | ||
1419 | #ifdef CONFIG_RPS | ||
1420 | sd->input_queue_head++; | ||
1421 | #endif | ||
1422 | } | ||
1423 | |||
1424 | static inline void input_queue_tail_incr_save(struct softnet_data *sd, | ||
1425 | unsigned int *qtail) | ||
1426 | { | ||
1427 | #ifdef CONFIG_RPS | ||
1428 | *qtail = ++sd->input_queue_tail; | ||
1429 | #endif | ||
1430 | } | ||
1431 | |||
1432 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); | ||
1322 | 1433 | ||
1323 | #define HAVE_NETIF_QUEUE | 1434 | #define HAVE_NETIF_QUEUE |
1324 | 1435 | ||
@@ -1945,6 +2056,22 @@ extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
1945 | extern int register_netdev(struct net_device *dev); | 2056 | extern int register_netdev(struct net_device *dev); |
1946 | extern void unregister_netdev(struct net_device *dev); | 2057 | extern void unregister_netdev(struct net_device *dev); |
1947 | 2058 | ||
2059 | /* General hardware address lists handling functions */ | ||
2060 | extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, | ||
2061 | struct netdev_hw_addr_list *from_list, | ||
2062 | int addr_len, unsigned char addr_type); | ||
2063 | extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | ||
2064 | struct netdev_hw_addr_list *from_list, | ||
2065 | int addr_len, unsigned char addr_type); | ||
2066 | extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | ||
2067 | struct netdev_hw_addr_list *from_list, | ||
2068 | int addr_len); | ||
2069 | extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | ||
2070 | struct netdev_hw_addr_list *from_list, | ||
2071 | int addr_len); | ||
2072 | extern void __hw_addr_flush(struct netdev_hw_addr_list *list); | ||
2073 | extern void __hw_addr_init(struct netdev_hw_addr_list *list); | ||
2074 | |||
1948 | /* Functions used for device addresses handling */ | 2075 | /* Functions used for device addresses handling */ |
1949 | extern int dev_addr_add(struct net_device *dev, unsigned char *addr, | 2076 | extern int dev_addr_add(struct net_device *dev, unsigned char *addr, |
1950 | unsigned char addr_type); | 2077 | unsigned char addr_type); |
@@ -1956,26 +2083,34 @@ extern int dev_addr_add_multiple(struct net_device *to_dev, | |||
1956 | extern int dev_addr_del_multiple(struct net_device *to_dev, | 2083 | extern int dev_addr_del_multiple(struct net_device *to_dev, |
1957 | struct net_device *from_dev, | 2084 | struct net_device *from_dev, |
1958 | unsigned char addr_type); | 2085 | unsigned char addr_type); |
2086 | extern void dev_addr_flush(struct net_device *dev); | ||
2087 | extern int dev_addr_init(struct net_device *dev); | ||
2088 | |||
2089 | /* Functions used for unicast addresses handling */ | ||
2090 | extern int dev_uc_add(struct net_device *dev, unsigned char *addr); | ||
2091 | extern int dev_uc_del(struct net_device *dev, unsigned char *addr); | ||
2092 | extern int dev_uc_sync(struct net_device *to, struct net_device *from); | ||
2093 | extern void dev_uc_unsync(struct net_device *to, struct net_device *from); | ||
2094 | extern void dev_uc_flush(struct net_device *dev); | ||
2095 | extern void dev_uc_init(struct net_device *dev); | ||
2096 | |||
2097 | /* Functions used for multicast addresses handling */ | ||
2098 | extern int dev_mc_add(struct net_device *dev, unsigned char *addr); | ||
2099 | extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr); | ||
2100 | extern int dev_mc_del(struct net_device *dev, unsigned char *addr); | ||
2101 | extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr); | ||
2102 | extern int dev_mc_sync(struct net_device *to, struct net_device *from); | ||
2103 | extern void dev_mc_unsync(struct net_device *to, struct net_device *from); | ||
2104 | extern void dev_mc_flush(struct net_device *dev); | ||
2105 | extern void dev_mc_init(struct net_device *dev); | ||
1959 | 2106 | ||
1960 | /* Functions used for secondary unicast and multicast support */ | 2107 | /* Functions used for secondary unicast and multicast support */ |
1961 | extern void dev_set_rx_mode(struct net_device *dev); | 2108 | extern void dev_set_rx_mode(struct net_device *dev); |
1962 | extern void __dev_set_rx_mode(struct net_device *dev); | 2109 | extern void __dev_set_rx_mode(struct net_device *dev); |
1963 | extern int dev_unicast_delete(struct net_device *dev, void *addr); | ||
1964 | extern int dev_unicast_add(struct net_device *dev, void *addr); | ||
1965 | extern int dev_unicast_sync(struct net_device *to, struct net_device *from); | ||
1966 | extern void dev_unicast_unsync(struct net_device *to, struct net_device *from); | ||
1967 | extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); | ||
1968 | extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); | ||
1969 | extern int dev_mc_sync(struct net_device *to, struct net_device *from); | ||
1970 | extern void dev_mc_unsync(struct net_device *to, struct net_device *from); | ||
1971 | extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); | ||
1972 | extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); | ||
1973 | extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); | ||
1974 | extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); | ||
1975 | extern int dev_set_promiscuity(struct net_device *dev, int inc); | 2110 | extern int dev_set_promiscuity(struct net_device *dev, int inc); |
1976 | extern int dev_set_allmulti(struct net_device *dev, int inc); | 2111 | extern int dev_set_allmulti(struct net_device *dev, int inc); |
1977 | extern void netdev_state_change(struct net_device *dev); | 2112 | extern void netdev_state_change(struct net_device *dev); |
1978 | extern void netdev_bonding_change(struct net_device *dev, | 2113 | extern int netdev_bonding_change(struct net_device *dev, |
1979 | unsigned long event); | 2114 | unsigned long event); |
1980 | extern void netdev_features_change(struct net_device *dev); | 2115 | extern void netdev_features_change(struct net_device *dev); |
1981 | /* Load a device via the kmod */ | 2116 | /* Load a device via the kmod */ |
@@ -1985,6 +2120,7 @@ extern const struct net_device_stats *dev_get_stats(struct net_device *dev); | |||
1985 | extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats); | 2120 | extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats); |
1986 | 2121 | ||
1987 | extern int netdev_max_backlog; | 2122 | extern int netdev_max_backlog; |
2123 | extern int netdev_tstamp_prequeue; | ||
1988 | extern int weight_p; | 2124 | extern int weight_p; |
1989 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); | 2125 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); |
1990 | extern int skb_checksum_help(struct sk_buff *skb); | 2126 | extern int skb_checksum_help(struct sk_buff *skb); |
@@ -2045,54 +2181,14 @@ static inline void netif_set_gso_max_size(struct net_device *dev, | |||
2045 | dev->gso_max_size = size; | 2181 | dev->gso_max_size = size; |
2046 | } | 2182 | } |
2047 | 2183 | ||
2048 | static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, | 2184 | extern int __skb_bond_should_drop(struct sk_buff *skb, |
2049 | struct net_device *master) | 2185 | struct net_device *master); |
2050 | { | ||
2051 | if (skb->pkt_type == PACKET_HOST) { | ||
2052 | u16 *dest = (u16 *) eth_hdr(skb)->h_dest; | ||
2053 | |||
2054 | memcpy(dest, master->dev_addr, ETH_ALEN); | ||
2055 | } | ||
2056 | } | ||
2057 | 2186 | ||
2058 | /* On bonding slaves other than the currently active slave, suppress | ||
2059 | * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and | ||
2060 | * ARP on active-backup slaves with arp_validate enabled. | ||
2061 | */ | ||
2062 | static inline int skb_bond_should_drop(struct sk_buff *skb, | 2187 | static inline int skb_bond_should_drop(struct sk_buff *skb, |
2063 | struct net_device *master) | 2188 | struct net_device *master) |
2064 | { | 2189 | { |
2065 | if (master) { | 2190 | if (master) |
2066 | struct net_device *dev = skb->dev; | 2191 | return __skb_bond_should_drop(skb, master); |
2067 | |||
2068 | if (master->priv_flags & IFF_MASTER_ARPMON) | ||
2069 | dev->last_rx = jiffies; | ||
2070 | |||
2071 | if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) { | ||
2072 | /* Do address unmangle. The local destination address | ||
2073 | * will be always the one master has. Provides the right | ||
2074 | * functionality in a bridge. | ||
2075 | */ | ||
2076 | skb_bond_set_mac_by_master(skb, master); | ||
2077 | } | ||
2078 | |||
2079 | if (dev->priv_flags & IFF_SLAVE_INACTIVE) { | ||
2080 | if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && | ||
2081 | skb->protocol == __cpu_to_be16(ETH_P_ARP)) | ||
2082 | return 0; | ||
2083 | |||
2084 | if (master->priv_flags & IFF_MASTER_ALB) { | ||
2085 | if (skb->pkt_type != PACKET_BROADCAST && | ||
2086 | skb->pkt_type != PACKET_MULTICAST) | ||
2087 | return 0; | ||
2088 | } | ||
2089 | if (master->priv_flags & IFF_MASTER_8023AD && | ||
2090 | skb->protocol == __cpu_to_be16(ETH_P_SLOW)) | ||
2091 | return 0; | ||
2092 | |||
2093 | return 1; | ||
2094 | } | ||
2095 | } | ||
2096 | return 0; | 2192 | return 0; |
2097 | } | 2193 | } |
2098 | 2194 | ||
@@ -2238,7 +2334,7 @@ do { \ | |||
2238 | #define netif_vdbg(priv, type, dev, format, args...) \ | 2334 | #define netif_vdbg(priv, type, dev, format, args...) \ |
2239 | ({ \ | 2335 | ({ \ |
2240 | if (0) \ | 2336 | if (0) \ |
2241 | netif_printk(KERN_DEBUG, dev, format, ##args); \ | 2337 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ |
2242 | 0; \ | 2338 | 0; \ |
2243 | }) | 2339 | }) |
2244 | #endif | 2340 | #endif |