aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h234
1 files changed, 140 insertions, 94 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index de1a52bcb9e0..3c5ed5f5274e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -228,25 +228,6 @@ struct netif_rx_stats {
228 228
229DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); 229DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
230 230
231struct dev_addr_list {
232 struct dev_addr_list *next;
233 u8 da_addr[MAX_ADDR_LEN];
234 u8 da_addrlen;
235 u8 da_synced;
236 int da_users;
237 int da_gusers;
238};
239
240/*
241 * We tag multicasts with these structures.
242 */
243
244#define dev_mc_list dev_addr_list
245#define dmi_addr da_addr
246#define dmi_addrlen da_addrlen
247#define dmi_users da_users
248#define dmi_gusers da_gusers
249
250struct netdev_hw_addr { 231struct netdev_hw_addr {
251 struct list_head list; 232 struct list_head list;
252 unsigned char addr[MAX_ADDR_LEN]; 233 unsigned char addr[MAX_ADDR_LEN];
@@ -255,8 +236,10 @@ struct netdev_hw_addr {
255#define NETDEV_HW_ADDR_T_SAN 2 236#define NETDEV_HW_ADDR_T_SAN 2
256#define NETDEV_HW_ADDR_T_SLAVE 3 237#define NETDEV_HW_ADDR_T_SLAVE 3
257#define NETDEV_HW_ADDR_T_UNICAST 4 238#define NETDEV_HW_ADDR_T_UNICAST 4
239#define NETDEV_HW_ADDR_T_MULTICAST 5
258 int refcount; 240 int refcount;
259 bool synced; 241 bool synced;
242 bool global_use;
260 struct rcu_head rcu_head; 243 struct rcu_head rcu_head;
261}; 244};
262 245
@@ -265,16 +248,20 @@ struct netdev_hw_addr_list {
265 int count; 248 int count;
266}; 249};
267 250
268#define netdev_uc_count(dev) ((dev)->uc.count) 251#define netdev_hw_addr_list_count(l) ((l)->count)
269#define netdev_uc_empty(dev) ((dev)->uc.count == 0) 252#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
270#define netdev_for_each_uc_addr(ha, dev) \ 253#define netdev_hw_addr_list_for_each(ha, l) \
271 list_for_each_entry(ha, &dev->uc.list, list) 254 list_for_each_entry(ha, &(l)->list, list)
272 255
273#define netdev_mc_count(dev) ((dev)->mc_count) 256#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
274#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) 257#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
258#define netdev_for_each_uc_addr(ha, dev) \
259 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
275 260
276#define netdev_for_each_mc_addr(mclist, dev) \ 261#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
277 for (mclist = dev->mc_list; mclist; mclist = mclist->next) 262#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
263#define netdev_for_each_mc_addr(ha, dev) \
264 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
278 265
279struct hh_cache { 266struct hh_cache {
280 struct hh_cache *hh_next; /* Next entry */ 267 struct hh_cache *hh_next; /* Next entry */
@@ -531,6 +518,7 @@ struct netdev_queue {
531 unsigned long tx_dropped; 518 unsigned long tx_dropped;
532} ____cacheline_aligned_in_smp; 519} ____cacheline_aligned_in_smp;
533 520
521#ifdef CONFIG_RPS
534/* 522/*
535 * This structure holds an RPS map which can be of variable length. The 523 * This structure holds an RPS map which can be of variable length. The
536 * map is an array of CPUs. 524 * map is an array of CPUs.
@@ -542,13 +530,73 @@ struct rps_map {
542}; 530};
543#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) 531#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
544 532
533/*
534 * The rps_dev_flow structure contains the mapping of a flow to a CPU and the
535 * tail pointer for that CPU's input queue at the time of last enqueue.
536 */
537struct rps_dev_flow {
538 u16 cpu;
539 u16 fill;
540 unsigned int last_qtail;
541};
542
543/*
544 * The rps_dev_flow_table structure contains a table of flow mappings.
545 */
546struct rps_dev_flow_table {
547 unsigned int mask;
548 struct rcu_head rcu;
549 struct work_struct free_work;
550 struct rps_dev_flow flows[0];
551};
552#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
553 (_num * sizeof(struct rps_dev_flow)))
554
555/*
556 * The rps_sock_flow_table contains mappings of flows to the last CPU
557 * on which they were processed by the application (set in recvmsg).
558 */
559struct rps_sock_flow_table {
560 unsigned int mask;
561 u16 ents[0];
562};
563#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
564 (_num * sizeof(u16)))
565
566#define RPS_NO_CPU 0xffff
567
568static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
569 u32 hash)
570{
571 if (table && hash) {
572 unsigned int cpu, index = hash & table->mask;
573
574 /* We only give a hint, preemption can change cpu under us */
575 cpu = raw_smp_processor_id();
576
577 if (table->ents[index] != cpu)
578 table->ents[index] = cpu;
579 }
580}
581
582static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
583 u32 hash)
584{
585 if (table && hash)
586 table->ents[hash & table->mask] = RPS_NO_CPU;
587}
588
589extern struct rps_sock_flow_table *rps_sock_flow_table;
590
545/* This structure contains an instance of an RX queue. */ 591/* This structure contains an instance of an RX queue. */
546struct netdev_rx_queue { 592struct netdev_rx_queue {
547 struct rps_map *rps_map; 593 struct rps_map *rps_map;
594 struct rps_dev_flow_table *rps_flow_table;
548 struct kobject kobj; 595 struct kobject kobj;
549 struct netdev_rx_queue *first; 596 struct netdev_rx_queue *first;
550 atomic_t count; 597 atomic_t count;
551} ____cacheline_aligned_in_smp; 598} ____cacheline_aligned_in_smp;
599#endif /* CONFIG_RPS */
552 600
553/* 601/*
554 * This structure defines the management hooks for network devices. 602 * This structure defines the management hooks for network devices.
@@ -783,6 +831,7 @@ struct net_device {
783#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ 831#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
784#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ 832#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
785#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */ 833#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
834#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
786 835
787 /* Segmentation offload features */ 836 /* Segmentation offload features */
788#define NETIF_F_GSO_SHIFT 16 837#define NETIF_F_GSO_SHIFT 16
@@ -859,12 +908,10 @@ struct net_device {
859 unsigned char addr_len; /* hardware address length */ 908 unsigned char addr_len; /* hardware address length */
860 unsigned short dev_id; /* for shared network cards */ 909 unsigned short dev_id; /* for shared network cards */
861 910
862 struct netdev_hw_addr_list uc; /* Secondary unicast
863 mac addresses */
864 int uc_promisc;
865 spinlock_t addr_list_lock; 911 spinlock_t addr_list_lock;
866 struct dev_addr_list *mc_list; /* Multicast mac addresses */ 912 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
867 int mc_count; /* Number of installed mcasts */ 913 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
914 int uc_promisc;
868 unsigned int promiscuity; 915 unsigned int promiscuity;
869 unsigned int allmulti; 916 unsigned int allmulti;
870 917
@@ -897,12 +944,14 @@ struct net_device {
897 944
898 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 945 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
899 946
947#ifdef CONFIG_RPS
900 struct kset *queues_kset; 948 struct kset *queues_kset;
901 949
902 struct netdev_rx_queue *_rx; 950 struct netdev_rx_queue *_rx;
903 951
904 /* Number of RX queues allocated at alloc_netdev_mq() time */ 952 /* Number of RX queues allocated at alloc_netdev_mq() time */
905 unsigned int num_rx_queues; 953 unsigned int num_rx_queues;
954#endif
906 955
907 struct netdev_queue rx_queue; 956 struct netdev_queue rx_queue;
908 957
@@ -1332,20 +1381,33 @@ static inline int unregister_gifconf(unsigned int family)
1332} 1381}
1333 1382
1334/* 1383/*
1335 * Incoming packets are placed on per-cpu queues so that 1384 * Incoming packets are placed on per-cpu queues
1336 * no locking is needed.
1337 */ 1385 */
1338struct softnet_data { 1386struct softnet_data {
1339 struct Qdisc *output_queue; 1387 struct Qdisc *output_queue;
1340 struct list_head poll_list; 1388 struct list_head poll_list;
1341 struct sk_buff *completion_queue; 1389 struct sk_buff *completion_queue;
1342 1390
1391#ifdef CONFIG_RPS
1392 struct softnet_data *rps_ipi_list;
1393
1343 /* Elements below can be accessed between CPUs for RPS */ 1394 /* Elements below can be accessed between CPUs for RPS */
1344 struct call_single_data csd ____cacheline_aligned_in_smp; 1395 struct call_single_data csd ____cacheline_aligned_in_smp;
1396 struct softnet_data *rps_ipi_next;
1397 unsigned int cpu;
1398 unsigned int input_queue_head;
1399#endif
1345 struct sk_buff_head input_pkt_queue; 1400 struct sk_buff_head input_pkt_queue;
1346 struct napi_struct backlog; 1401 struct napi_struct backlog;
1347}; 1402};
1348 1403
1404static inline void input_queue_head_incr(struct softnet_data *sd)
1405{
1406#ifdef CONFIG_RPS
1407 sd->input_queue_head++;
1408#endif
1409}
1410
1349DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 1411DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1350 1412
1351#define HAVE_NETIF_QUEUE 1413#define HAVE_NETIF_QUEUE
@@ -1973,6 +2035,22 @@ extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1973extern int register_netdev(struct net_device *dev); 2035extern int register_netdev(struct net_device *dev);
1974extern void unregister_netdev(struct net_device *dev); 2036extern void unregister_netdev(struct net_device *dev);
1975 2037
2038/* General hardware address lists handling functions */
2039extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2040 struct netdev_hw_addr_list *from_list,
2041 int addr_len, unsigned char addr_type);
2042extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2043 struct netdev_hw_addr_list *from_list,
2044 int addr_len, unsigned char addr_type);
2045extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2046 struct netdev_hw_addr_list *from_list,
2047 int addr_len);
2048extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2049 struct netdev_hw_addr_list *from_list,
2050 int addr_len);
2051extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2052extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2053
1976/* Functions used for device addresses handling */ 2054/* Functions used for device addresses handling */
1977extern int dev_addr_add(struct net_device *dev, unsigned char *addr, 2055extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
1978 unsigned char addr_type); 2056 unsigned char addr_type);
@@ -1984,26 +2062,34 @@ extern int dev_addr_add_multiple(struct net_device *to_dev,
1984extern int dev_addr_del_multiple(struct net_device *to_dev, 2062extern int dev_addr_del_multiple(struct net_device *to_dev,
1985 struct net_device *from_dev, 2063 struct net_device *from_dev,
1986 unsigned char addr_type); 2064 unsigned char addr_type);
2065extern void dev_addr_flush(struct net_device *dev);
2066extern int dev_addr_init(struct net_device *dev);
2067
2068/* Functions used for unicast addresses handling */
2069extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
2070extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
2071extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2072extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2073extern void dev_uc_flush(struct net_device *dev);
2074extern void dev_uc_init(struct net_device *dev);
2075
2076/* Functions used for multicast addresses handling */
2077extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
2078extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
2079extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
2080extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
2081extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2082extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2083extern void dev_mc_flush(struct net_device *dev);
2084extern void dev_mc_init(struct net_device *dev);
1987 2085
1988/* Functions used for secondary unicast and multicast support */ 2086/* Functions used for secondary unicast and multicast support */
1989extern void dev_set_rx_mode(struct net_device *dev); 2087extern void dev_set_rx_mode(struct net_device *dev);
1990extern void __dev_set_rx_mode(struct net_device *dev); 2088extern void __dev_set_rx_mode(struct net_device *dev);
1991extern int dev_unicast_delete(struct net_device *dev, void *addr);
1992extern int dev_unicast_add(struct net_device *dev, void *addr);
1993extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
1994extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
1995extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1996extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
1997extern int dev_mc_sync(struct net_device *to, struct net_device *from);
1998extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
1999extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
2000extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
2001extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
2002extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
2003extern int dev_set_promiscuity(struct net_device *dev, int inc); 2089extern int dev_set_promiscuity(struct net_device *dev, int inc);
2004extern int dev_set_allmulti(struct net_device *dev, int inc); 2090extern int dev_set_allmulti(struct net_device *dev, int inc);
2005extern void netdev_state_change(struct net_device *dev); 2091extern void netdev_state_change(struct net_device *dev);
2006extern void netdev_bonding_change(struct net_device *dev, 2092extern int netdev_bonding_change(struct net_device *dev,
2007 unsigned long event); 2093 unsigned long event);
2008extern void netdev_features_change(struct net_device *dev); 2094extern void netdev_features_change(struct net_device *dev);
2009/* Load a device via the kmod */ 2095/* Load a device via the kmod */
@@ -2073,54 +2159,14 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
2073 dev->gso_max_size = size; 2159 dev->gso_max_size = size;
2074} 2160}
2075 2161
2076static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, 2162extern int __skb_bond_should_drop(struct sk_buff *skb,
2077 struct net_device *master) 2163 struct net_device *master);
2078{
2079 if (skb->pkt_type == PACKET_HOST) {
2080 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2081
2082 memcpy(dest, master->dev_addr, ETH_ALEN);
2083 }
2084}
2085 2164
2086/* On bonding slaves other than the currently active slave, suppress 2165static inline int skb_bond_should_drop(struct sk_buff *skb,
2087 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and 2166 struct net_device *master)
2088 * ARP on active-backup slaves with arp_validate enabled. 2167{
2089 */ 2168 if (master)
2090static inline int skb_bond_should_drop(struct sk_buff *skb) 2169 return __skb_bond_should_drop(skb, master);
2091{
2092 struct net_device *dev = skb->dev;
2093 struct net_device *master = dev->master;
2094
2095 if (master) {
2096 if (master->priv_flags & IFF_MASTER_ARPMON)
2097 dev->last_rx = jiffies;
2098
2099 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
2100 /* Do address unmangle. The local destination address
2101 * will be always the one master has. Provides the right
2102 * functionality in a bridge.
2103 */
2104 skb_bond_set_mac_by_master(skb, master);
2105 }
2106
2107 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2108 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2109 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2110 return 0;
2111
2112 if (master->priv_flags & IFF_MASTER_ALB) {
2113 if (skb->pkt_type != PACKET_BROADCAST &&
2114 skb->pkt_type != PACKET_MULTICAST)
2115 return 0;
2116 }
2117 if (master->priv_flags & IFF_MASTER_8023AD &&
2118 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2119 return 0;
2120
2121 return 1;
2122 }
2123 }
2124 return 0; 2170 return 0;
2125} 2171}
2126 2172