aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h484
1 files changed, 305 insertions, 179 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c79a88be7c33..46c36ffe20ee 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -31,6 +31,7 @@
31#include <linux/if_link.h> 31#include <linux/if_link.h>
32 32
33#ifdef __KERNEL__ 33#ifdef __KERNEL__
34#include <linux/pm_qos_params.h>
34#include <linux/timer.h> 35#include <linux/timer.h>
35#include <linux/delay.h> 36#include <linux/delay.h>
36#include <linux/mm.h> 37#include <linux/mm.h>
@@ -53,6 +54,7 @@
53 54
54struct vlan_group; 55struct vlan_group;
55struct netpoll_info; 56struct netpoll_info;
57struct phy_device;
56/* 802.11 specific */ 58/* 802.11 specific */
57struct wireless_dev; 59struct wireless_dev;
58 /* source back-compat hooks */ 60 /* source back-compat hooks */
@@ -64,6 +66,11 @@ struct wireless_dev;
64#define HAVE_FREE_NETDEV /* free_netdev() */ 66#define HAVE_FREE_NETDEV /* free_netdev() */
65#define HAVE_NETDEV_PRIV /* netdev_priv() */ 67#define HAVE_NETDEV_PRIV /* netdev_priv() */
66 68
69/* hardware address assignment types */
70#define NET_ADDR_PERM 0 /* address is permanent (default) */
71#define NET_ADDR_RANDOM 1 /* address is generated randomly */
72#define NET_ADDR_STOLEN 2 /* address is stolen from other device */
73
67/* Backlog congestion levels */ 74/* Backlog congestion levels */
68#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 75#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
69#define NET_RX_DROP 1 /* packet dropped */ 76#define NET_RX_DROP 1 /* packet dropped */
@@ -158,45 +165,39 @@ static inline bool dev_xmit_complete(int rc)
158#define MAX_HEADER (LL_MAX_HEADER + 48) 165#define MAX_HEADER (LL_MAX_HEADER + 48)
159#endif 166#endif
160 167
161#endif /* __KERNEL__ */
162
163/* 168/*
164 * Network device statistics. Akin to the 2.0 ether stats but 169 * Old network device statistics. Fields are native words
165 * with byte counters. 170 * (unsigned long) so they can be read and written atomically.
166 */ 171 */
167 172
168struct net_device_stats { 173struct net_device_stats {
169 unsigned long rx_packets; /* total packets received */ 174 unsigned long rx_packets;
170 unsigned long tx_packets; /* total packets transmitted */ 175 unsigned long tx_packets;
171 unsigned long rx_bytes; /* total bytes received */ 176 unsigned long rx_bytes;
172 unsigned long tx_bytes; /* total bytes transmitted */ 177 unsigned long tx_bytes;
173 unsigned long rx_errors; /* bad packets received */ 178 unsigned long rx_errors;
174 unsigned long tx_errors; /* packet transmit problems */ 179 unsigned long tx_errors;
175 unsigned long rx_dropped; /* no space in linux buffers */ 180 unsigned long rx_dropped;
176 unsigned long tx_dropped; /* no space available in linux */ 181 unsigned long tx_dropped;
177 unsigned long multicast; /* multicast packets received */ 182 unsigned long multicast;
178 unsigned long collisions; 183 unsigned long collisions;
179
180 /* detailed rx_errors: */
181 unsigned long rx_length_errors; 184 unsigned long rx_length_errors;
182 unsigned long rx_over_errors; /* receiver ring buff overflow */ 185 unsigned long rx_over_errors;
183 unsigned long rx_crc_errors; /* recved pkt with crc error */ 186 unsigned long rx_crc_errors;
184 unsigned long rx_frame_errors; /* recv'd frame alignment error */ 187 unsigned long rx_frame_errors;
185 unsigned long rx_fifo_errors; /* recv'r fifo overrun */ 188 unsigned long rx_fifo_errors;
186 unsigned long rx_missed_errors; /* receiver missed packet */ 189 unsigned long rx_missed_errors;
187
188 /* detailed tx_errors */
189 unsigned long tx_aborted_errors; 190 unsigned long tx_aborted_errors;
190 unsigned long tx_carrier_errors; 191 unsigned long tx_carrier_errors;
191 unsigned long tx_fifo_errors; 192 unsigned long tx_fifo_errors;
192 unsigned long tx_heartbeat_errors; 193 unsigned long tx_heartbeat_errors;
193 unsigned long tx_window_errors; 194 unsigned long tx_window_errors;
194
195 /* for cslip etc */
196 unsigned long rx_compressed; 195 unsigned long rx_compressed;
197 unsigned long tx_compressed; 196 unsigned long tx_compressed;
198}; 197};
199 198
199#endif /* __KERNEL__ */
200
200 201
201/* Media selection options. */ 202/* Media selection options. */
202enum { 203enum {
@@ -218,34 +219,6 @@ struct neighbour;
218struct neigh_parms; 219struct neigh_parms;
219struct sk_buff; 220struct sk_buff;
220 221
221struct netif_rx_stats {
222 unsigned total;
223 unsigned dropped;
224 unsigned time_squeeze;
225 unsigned cpu_collision;
226};
227
228DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
229
230struct dev_addr_list {
231 struct dev_addr_list *next;
232 u8 da_addr[MAX_ADDR_LEN];
233 u8 da_addrlen;
234 u8 da_synced;
235 int da_users;
236 int da_gusers;
237};
238
239/*
240 * We tag multicasts with these structures.
241 */
242
243#define dev_mc_list dev_addr_list
244#define dmi_addr da_addr
245#define dmi_addrlen da_addrlen
246#define dmi_users da_users
247#define dmi_gusers da_gusers
248
249struct netdev_hw_addr { 222struct netdev_hw_addr {
250 struct list_head list; 223 struct list_head list;
251 unsigned char addr[MAX_ADDR_LEN]; 224 unsigned char addr[MAX_ADDR_LEN];
@@ -254,8 +227,10 @@ struct netdev_hw_addr {
254#define NETDEV_HW_ADDR_T_SAN 2 227#define NETDEV_HW_ADDR_T_SAN 2
255#define NETDEV_HW_ADDR_T_SLAVE 3 228#define NETDEV_HW_ADDR_T_SLAVE 3
256#define NETDEV_HW_ADDR_T_UNICAST 4 229#define NETDEV_HW_ADDR_T_UNICAST 4
230#define NETDEV_HW_ADDR_T_MULTICAST 5
257 int refcount; 231 int refcount;
258 bool synced; 232 bool synced;
233 bool global_use;
259 struct rcu_head rcu_head; 234 struct rcu_head rcu_head;
260}; 235};
261 236
@@ -264,16 +239,20 @@ struct netdev_hw_addr_list {
264 int count; 239 int count;
265}; 240};
266 241
267#define netdev_uc_count(dev) ((dev)->uc.count) 242#define netdev_hw_addr_list_count(l) ((l)->count)
268#define netdev_uc_empty(dev) ((dev)->uc.count == 0) 243#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
269#define netdev_for_each_uc_addr(ha, dev) \ 244#define netdev_hw_addr_list_for_each(ha, l) \
270 list_for_each_entry(ha, &dev->uc.list, list) 245 list_for_each_entry(ha, &(l)->list, list)
271 246
272#define netdev_mc_count(dev) ((dev)->mc_count) 247#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
273#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) 248#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
249#define netdev_for_each_uc_addr(ha, dev) \
250 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
274 251
275#define netdev_for_each_mc_addr(mclist, dev) \ 252#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
276 for (mclist = dev->mc_list; mclist; mclist = mclist->next) 253#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
254#define netdev_for_each_mc_addr(ha, dev) \
255 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
277 256
278struct hh_cache { 257struct hh_cache {
279 struct hh_cache *hh_next; /* Next entry */ 258 struct hh_cache *hh_next; /* Next entry */
@@ -402,6 +381,8 @@ enum gro_result {
402}; 381};
403typedef enum gro_result gro_result_t; 382typedef enum gro_result gro_result_t;
404 383
384typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb);
385
405extern void __napi_schedule(struct napi_struct *n); 386extern void __napi_schedule(struct napi_struct *n);
406 387
407static inline int napi_disable_pending(struct napi_struct *n) 388static inline int napi_disable_pending(struct napi_struct *n)
@@ -525,11 +506,90 @@ struct netdev_queue {
525 * please use this field instead of dev->trans_start 506 * please use this field instead of dev->trans_start
526 */ 507 */
527 unsigned long trans_start; 508 unsigned long trans_start;
528 unsigned long tx_bytes; 509 u64 tx_bytes;
529 unsigned long tx_packets; 510 u64 tx_packets;
530 unsigned long tx_dropped; 511 u64 tx_dropped;
531} ____cacheline_aligned_in_smp; 512} ____cacheline_aligned_in_smp;
532 513
514#ifdef CONFIG_RPS
515/*
516 * This structure holds an RPS map which can be of variable length. The
517 * map is an array of CPUs.
518 */
519struct rps_map {
520 unsigned int len;
521 struct rcu_head rcu;
522 u16 cpus[0];
523};
524#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
525
526/*
527 * The rps_dev_flow structure contains the mapping of a flow to a CPU and the
528 * tail pointer for that CPU's input queue at the time of last enqueue.
529 */
530struct rps_dev_flow {
531 u16 cpu;
532 u16 fill;
533 unsigned int last_qtail;
534};
535
536/*
537 * The rps_dev_flow_table structure contains a table of flow mappings.
538 */
539struct rps_dev_flow_table {
540 unsigned int mask;
541 struct rcu_head rcu;
542 struct work_struct free_work;
543 struct rps_dev_flow flows[0];
544};
545#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
546 (_num * sizeof(struct rps_dev_flow)))
547
548/*
549 * The rps_sock_flow_table contains mappings of flows to the last CPU
550 * on which they were processed by the application (set in recvmsg).
551 */
552struct rps_sock_flow_table {
553 unsigned int mask;
554 u16 ents[0];
555};
556#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
557 (_num * sizeof(u16)))
558
559#define RPS_NO_CPU 0xffff
560
561static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
562 u32 hash)
563{
564 if (table && hash) {
565 unsigned int cpu, index = hash & table->mask;
566
567 /* We only give a hint, preemption can change cpu under us */
568 cpu = raw_smp_processor_id();
569
570 if (table->ents[index] != cpu)
571 table->ents[index] = cpu;
572 }
573}
574
575static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
576 u32 hash)
577{
578 if (table && hash)
579 table->ents[hash & table->mask] = RPS_NO_CPU;
580}
581
582extern struct rps_sock_flow_table *rps_sock_flow_table;
583
584/* This structure contains an instance of an RX queue. */
585struct netdev_rx_queue {
586 struct rps_map *rps_map;
587 struct rps_dev_flow_table *rps_flow_table;
588 struct kobject kobj;
589 struct netdev_rx_queue *first;
590 atomic_t count;
591} ____cacheline_aligned_in_smp;
592#endif /* CONFIG_RPS */
533 593
534/* 594/*
535 * This structure defines the management hooks for network devices. 595 * This structure defines the management hooks for network devices.
@@ -602,10 +662,19 @@ struct netdev_queue {
602 * Callback uses when the transmitter has not made any progress 662 * Callback uses when the transmitter has not made any progress
603 * for dev->watchdog ticks. 663 * for dev->watchdog ticks.
604 * 664 *
665 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
666 * struct rtnl_link_stats64 *storage);
605 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 667 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
606 * Called when a user wants to get the network device usage 668 * Called when a user wants to get the network device usage
607 * statistics. If not defined, the counters in dev->stats will 669 * statistics. Drivers must do one of the following:
608 * be used. 670 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
671 * rtnl_link_stats64 structure passed by the caller.
672 * 2. Define @ndo_get_stats to update a net_device_stats structure
673 * (which should normally be dev->stats) and return a pointer to
674 * it. The structure may be changed asynchronously only if each
675 * field is written atomically.
676 * 3. Update dev->stats asynchronously and atomically, and define
677 * neither operation.
609 * 678 *
610 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp); 679 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
611 * If device support VLAN receive accleration 680 * If device support VLAN receive accleration
@@ -629,6 +698,9 @@ struct netdev_queue {
629 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); 698 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
630 * int (*ndo_get_vf_config)(struct net_device *dev, 699 * int (*ndo_get_vf_config)(struct net_device *dev,
631 * int vf, struct ifla_vf_info *ivf); 700 * int vf, struct ifla_vf_info *ivf);
701 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
702 * struct nlattr *port[]);
703 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
632 */ 704 */
633#define HAVE_NET_DEVICE_OPS 705#define HAVE_NET_DEVICE_OPS
634struct net_device_ops { 706struct net_device_ops {
@@ -657,6 +729,8 @@ struct net_device_ops {
657 struct neigh_parms *); 729 struct neigh_parms *);
658 void (*ndo_tx_timeout) (struct net_device *dev); 730 void (*ndo_tx_timeout) (struct net_device *dev);
659 731
732 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
733 struct rtnl_link_stats64 *storage);
660 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 734 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
661 735
662 void (*ndo_vlan_rx_register)(struct net_device *dev, 736 void (*ndo_vlan_rx_register)(struct net_device *dev,
@@ -667,6 +741,9 @@ struct net_device_ops {
667 unsigned short vid); 741 unsigned short vid);
668#ifdef CONFIG_NET_POLL_CONTROLLER 742#ifdef CONFIG_NET_POLL_CONTROLLER
669 void (*ndo_poll_controller)(struct net_device *dev); 743 void (*ndo_poll_controller)(struct net_device *dev);
744 int (*ndo_netpoll_setup)(struct net_device *dev,
745 struct netpoll_info *info);
746 void (*ndo_netpoll_cleanup)(struct net_device *dev);
670#endif 747#endif
671 int (*ndo_set_vf_mac)(struct net_device *dev, 748 int (*ndo_set_vf_mac)(struct net_device *dev,
672 int queue, u8 *mac); 749 int queue, u8 *mac);
@@ -677,6 +754,11 @@ struct net_device_ops {
677 int (*ndo_get_vf_config)(struct net_device *dev, 754 int (*ndo_get_vf_config)(struct net_device *dev,
678 int vf, 755 int vf,
679 struct ifla_vf_info *ivf); 756 struct ifla_vf_info *ivf);
757 int (*ndo_set_vf_port)(struct net_device *dev,
758 int vf,
759 struct nlattr *port[]);
760 int (*ndo_get_vf_port)(struct net_device *dev,
761 int vf, struct sk_buff *skb);
680#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 762#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
681 int (*ndo_fcoe_enable)(struct net_device *dev); 763 int (*ndo_fcoe_enable)(struct net_device *dev);
682 int (*ndo_fcoe_disable)(struct net_device *dev); 764 int (*ndo_fcoe_disable)(struct net_device *dev);
@@ -708,9 +790,12 @@ struct net_device {
708 /* 790 /*
709 * This is the first field of the "visible" part of this structure 791 * This is the first field of the "visible" part of this structure
710 * (i.e. as seen by users in the "Space.c" file). It is the name 792 * (i.e. as seen by users in the "Space.c" file). It is the name
711 * the interface. 793 * of the interface.
712 */ 794 */
713 char name[IFNAMSIZ]; 795 char name[IFNAMSIZ];
796
797 struct pm_qos_request_list pm_qos_req;
798
714 /* device name hash chain */ 799 /* device name hash chain */
715 struct hlist_node name_hlist; 800 struct hlist_node name_hlist;
716 /* snmp alias */ 801 /* snmp alias */
@@ -764,6 +849,7 @@ struct net_device {
764#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ 849#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
765#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ 850#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
766#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */ 851#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
852#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
767 853
768 /* Segmentation offload features */ 854 /* Segmentation offload features */
769#define NETIF_F_GSO_SHIFT 16 855#define NETIF_F_GSO_SHIFT 16
@@ -776,7 +862,8 @@ struct net_device {
776#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) 862#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
777 863
778 /* List of features with software fallbacks. */ 864 /* List of features with software fallbacks. */
779#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) 865#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
866 NETIF_F_TSO6 | NETIF_F_UFO)
780 867
781 868
782#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 869#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
@@ -820,7 +907,7 @@ struct net_device {
820 unsigned char operstate; /* RFC2863 operstate */ 907 unsigned char operstate; /* RFC2863 operstate */
821 unsigned char link_mode; /* mapping policy to operstate */ 908 unsigned char link_mode; /* mapping policy to operstate */
822 909
823 unsigned mtu; /* interface MTU value */ 910 unsigned int mtu; /* interface MTU value */
824 unsigned short type; /* interface hardware type */ 911 unsigned short type; /* interface hardware type */
825 unsigned short hard_header_len; /* hardware hdr length */ 912 unsigned short hard_header_len; /* hardware hdr length */
826 913
@@ -837,15 +924,14 @@ struct net_device {
837 924
838 /* Interface address info. */ 925 /* Interface address info. */
839 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ 926 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
927 unsigned char addr_assign_type; /* hw address assignment type */
840 unsigned char addr_len; /* hardware address length */ 928 unsigned char addr_len; /* hardware address length */
841 unsigned short dev_id; /* for shared network cards */ 929 unsigned short dev_id; /* for shared network cards */
842 930
843 struct netdev_hw_addr_list uc; /* Secondary unicast
844 mac addresses */
845 int uc_promisc;
846 spinlock_t addr_list_lock; 931 spinlock_t addr_list_lock;
847 struct dev_addr_list *mc_list; /* Multicast mac addresses */ 932 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
848 int mc_count; /* Number of installed mcasts */ 933 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
934 int uc_promisc;
849 unsigned int promiscuity; 935 unsigned int promiscuity;
850 unsigned int allmulti; 936 unsigned int allmulti;
851 937
@@ -878,7 +964,18 @@ struct net_device {
878 964
879 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 965 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
880 966
967#ifdef CONFIG_RPS
968 struct kset *queues_kset;
969
970 struct netdev_rx_queue *_rx;
971
972 /* Number of RX queues allocated at alloc_netdev_mq() time */
973 unsigned int num_rx_queues;
974#endif
975
881 struct netdev_queue rx_queue; 976 struct netdev_queue rx_queue;
977 rx_handler_func_t *rx_handler;
978 void *rx_handler_data;
882 979
883 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 980 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
884 981
@@ -946,10 +1043,6 @@ struct net_device {
946 /* mid-layer private */ 1043 /* mid-layer private */
947 void *ml_priv; 1044 void *ml_priv;
948 1045
949 /* bridge stuff */
950 struct net_bridge_port *br_port;
951 /* macvlan */
952 struct macvlan_port *macvlan_port;
953 /* GARP */ 1046 /* GARP */
954 struct garp_port *garp_port; 1047 struct garp_port *garp_port;
955 1048
@@ -979,6 +1072,9 @@ struct net_device {
979#endif 1072#endif
980 /* n-tuple filter list attached to this device */ 1073 /* n-tuple filter list attached to this device */
981 struct ethtool_rx_ntuple_list ethtool_ntuple_list; 1074 struct ethtool_rx_ntuple_list ethtool_ntuple_list;
1075
1076 /* phy device may attach itself for hardware timestamping */
1077 struct phy_device *phydev;
982}; 1078};
983#define to_net_dev(d) container_of(d, struct net_device, dev) 1079#define to_net_dev(d) container_of(d, struct net_device, dev)
984 1080
@@ -1009,11 +1105,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
1009static inline 1105static inline
1010struct net *dev_net(const struct net_device *dev) 1106struct net *dev_net(const struct net_device *dev)
1011{ 1107{
1012#ifdef CONFIG_NET_NS 1108 return read_pnet(&dev->nd_net);
1013 return dev->nd_net;
1014#else
1015 return &init_net;
1016#endif
1017} 1109}
1018 1110
1019static inline 1111static inline
@@ -1194,8 +1286,8 @@ extern void dev_add_pack(struct packet_type *pt);
1194extern void dev_remove_pack(struct packet_type *pt); 1286extern void dev_remove_pack(struct packet_type *pt);
1195extern void __dev_remove_pack(struct packet_type *pt); 1287extern void __dev_remove_pack(struct packet_type *pt);
1196 1288
1197extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags, 1289extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1198 unsigned short mask); 1290 unsigned short mask);
1199extern struct net_device *dev_get_by_name(struct net *net, const char *name); 1291extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1200extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 1292extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1201extern struct net_device *__dev_get_by_name(struct net *net, const char *name); 1293extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
@@ -1306,19 +1398,52 @@ static inline int unregister_gifconf(unsigned int family)
1306} 1398}
1307 1399
1308/* 1400/*
1309 * Incoming packets are placed on per-cpu queues so that 1401 * Incoming packets are placed on per-cpu queues
1310 * no locking is needed.
1311 */ 1402 */
1312struct softnet_data { 1403struct softnet_data {
1313 struct Qdisc *output_queue; 1404 struct Qdisc *output_queue;
1314 struct sk_buff_head input_pkt_queue; 1405 struct Qdisc **output_queue_tailp;
1315 struct list_head poll_list; 1406 struct list_head poll_list;
1316 struct sk_buff *completion_queue; 1407 struct sk_buff *completion_queue;
1317 1408 struct sk_buff_head process_queue;
1409
1410 /* stats */
1411 unsigned int processed;
1412 unsigned int time_squeeze;
1413 unsigned int cpu_collision;
1414 unsigned int received_rps;
1415
1416#ifdef CONFIG_RPS
1417 struct softnet_data *rps_ipi_list;
1418
1419 /* Elements below can be accessed between CPUs for RPS */
1420 struct call_single_data csd ____cacheline_aligned_in_smp;
1421 struct softnet_data *rps_ipi_next;
1422 unsigned int cpu;
1423 unsigned int input_queue_head;
1424 unsigned int input_queue_tail;
1425#endif
1426 unsigned dropped;
1427 struct sk_buff_head input_pkt_queue;
1318 struct napi_struct backlog; 1428 struct napi_struct backlog;
1319}; 1429};
1320 1430
1321DECLARE_PER_CPU(struct softnet_data,softnet_data); 1431static inline void input_queue_head_incr(struct softnet_data *sd)
1432{
1433#ifdef CONFIG_RPS
1434 sd->input_queue_head++;
1435#endif
1436}
1437
1438static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1439 unsigned int *qtail)
1440{
1441#ifdef CONFIG_RPS
1442 *qtail = ++sd->input_queue_tail;
1443#endif
1444}
1445
1446DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1322 1447
1323#define HAVE_NETIF_QUEUE 1448#define HAVE_NETIF_QUEUE
1324 1449
@@ -1545,6 +1670,9 @@ static inline int netif_is_multiqueue(const struct net_device *dev)
1545 return (dev->num_tx_queues > 1); 1670 return (dev->num_tx_queues > 1);
1546} 1671}
1547 1672
1673extern void netif_set_real_num_tx_queues(struct net_device *dev,
1674 unsigned int txq);
1675
1548/* Use this variant when it is known for sure that it 1676/* Use this variant when it is known for sure that it
1549 * is executing from hardware interrupt context or with hardware interrupts 1677 * is executing from hardware interrupt context or with hardware interrupts
1550 * disabled. 1678 * disabled.
@@ -1582,6 +1710,11 @@ static inline void napi_free_frags(struct napi_struct *napi)
1582 napi->skb = NULL; 1710 napi->skb = NULL;
1583} 1711}
1584 1712
1713extern int netdev_rx_handler_register(struct net_device *dev,
1714 rx_handler_func_t *rx_handler,
1715 void *rx_handler_data);
1716extern void netdev_rx_handler_unregister(struct net_device *dev);
1717
1585extern void netif_nit_deliver(struct sk_buff *skb); 1718extern void netif_nit_deliver(struct sk_buff *skb);
1586extern int dev_valid_name(const char *name); 1719extern int dev_valid_name(const char *name);
1587extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); 1720extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
@@ -1661,6 +1794,8 @@ extern void netif_carrier_on(struct net_device *dev);
1661 1794
1662extern void netif_carrier_off(struct net_device *dev); 1795extern void netif_carrier_off(struct net_device *dev);
1663 1796
1797extern void netif_notify_peers(struct net_device *dev);
1798
1664/** 1799/**
1665 * netif_dormant_on - mark device as dormant. 1800 * netif_dormant_on - mark device as dormant.
1666 * @dev: network device 1801 * @dev: network device
@@ -1945,6 +2080,22 @@ extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1945extern int register_netdev(struct net_device *dev); 2080extern int register_netdev(struct net_device *dev);
1946extern void unregister_netdev(struct net_device *dev); 2081extern void unregister_netdev(struct net_device *dev);
1947 2082
2083/* General hardware address lists handling functions */
2084extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2085 struct netdev_hw_addr_list *from_list,
2086 int addr_len, unsigned char addr_type);
2087extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2088 struct netdev_hw_addr_list *from_list,
2089 int addr_len, unsigned char addr_type);
2090extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2091 struct netdev_hw_addr_list *from_list,
2092 int addr_len);
2093extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2094 struct netdev_hw_addr_list *from_list,
2095 int addr_len);
2096extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2097extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2098
1948/* Functions used for device addresses handling */ 2099/* Functions used for device addresses handling */
1949extern int dev_addr_add(struct net_device *dev, unsigned char *addr, 2100extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
1950 unsigned char addr_type); 2101 unsigned char addr_type);
@@ -1956,35 +2107,46 @@ extern int dev_addr_add_multiple(struct net_device *to_dev,
1956extern int dev_addr_del_multiple(struct net_device *to_dev, 2107extern int dev_addr_del_multiple(struct net_device *to_dev,
1957 struct net_device *from_dev, 2108 struct net_device *from_dev,
1958 unsigned char addr_type); 2109 unsigned char addr_type);
2110extern void dev_addr_flush(struct net_device *dev);
2111extern int dev_addr_init(struct net_device *dev);
2112
2113/* Functions used for unicast addresses handling */
2114extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
2115extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
2116extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2117extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2118extern void dev_uc_flush(struct net_device *dev);
2119extern void dev_uc_init(struct net_device *dev);
2120
2121/* Functions used for multicast addresses handling */
2122extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
2123extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
2124extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
2125extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
2126extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2127extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2128extern void dev_mc_flush(struct net_device *dev);
2129extern void dev_mc_init(struct net_device *dev);
1959 2130
1960/* Functions used for secondary unicast and multicast support */ 2131/* Functions used for secondary unicast and multicast support */
1961extern void dev_set_rx_mode(struct net_device *dev); 2132extern void dev_set_rx_mode(struct net_device *dev);
1962extern void __dev_set_rx_mode(struct net_device *dev); 2133extern void __dev_set_rx_mode(struct net_device *dev);
1963extern int dev_unicast_delete(struct net_device *dev, void *addr);
1964extern int dev_unicast_add(struct net_device *dev, void *addr);
1965extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
1966extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
1967extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1968extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
1969extern int dev_mc_sync(struct net_device *to, struct net_device *from);
1970extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
1971extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1972extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
1973extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1974extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1975extern int dev_set_promiscuity(struct net_device *dev, int inc); 2134extern int dev_set_promiscuity(struct net_device *dev, int inc);
1976extern int dev_set_allmulti(struct net_device *dev, int inc); 2135extern int dev_set_allmulti(struct net_device *dev, int inc);
1977extern void netdev_state_change(struct net_device *dev); 2136extern void netdev_state_change(struct net_device *dev);
1978extern void netdev_bonding_change(struct net_device *dev, 2137extern int netdev_bonding_change(struct net_device *dev,
1979 unsigned long event); 2138 unsigned long event);
1980extern void netdev_features_change(struct net_device *dev); 2139extern void netdev_features_change(struct net_device *dev);
1981/* Load a device via the kmod */ 2140/* Load a device via the kmod */
1982extern void dev_load(struct net *net, const char *name); 2141extern void dev_load(struct net *net, const char *name);
1983extern void dev_mcast_init(void); 2142extern void dev_mcast_init(void);
1984extern const struct net_device_stats *dev_get_stats(struct net_device *dev); 2143extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
1985extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats); 2144 struct rtnl_link_stats64 *storage);
2145extern void dev_txq_stats_fold(const struct net_device *dev,
2146 struct rtnl_link_stats64 *stats);
1986 2147
1987extern int netdev_max_backlog; 2148extern int netdev_max_backlog;
2149extern int netdev_tstamp_prequeue;
1988extern int weight_p; 2150extern int weight_p;
1989extern int netdev_set_master(struct net_device *dev, struct net_device *master); 2151extern int netdev_set_master(struct net_device *dev, struct net_device *master);
1990extern int skb_checksum_help(struct sk_buff *skb); 2152extern int skb_checksum_help(struct sk_buff *skb);
@@ -2045,54 +2207,14 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
2045 dev->gso_max_size = size; 2207 dev->gso_max_size = size;
2046} 2208}
2047 2209
2048static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, 2210extern int __skb_bond_should_drop(struct sk_buff *skb,
2049 struct net_device *master) 2211 struct net_device *master);
2050{
2051 if (skb->pkt_type == PACKET_HOST) {
2052 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2053
2054 memcpy(dest, master->dev_addr, ETH_ALEN);
2055 }
2056}
2057 2212
2058/* On bonding slaves other than the currently active slave, suppress 2213static inline int skb_bond_should_drop(struct sk_buff *skb,
2059 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and 2214 struct net_device *master)
2060 * ARP on active-backup slaves with arp_validate enabled. 2215{
2061 */ 2216 if (master)
2062static inline int skb_bond_should_drop(struct sk_buff *skb) 2217 return __skb_bond_should_drop(skb, master);
2063{
2064 struct net_device *dev = skb->dev;
2065 struct net_device *master = dev->master;
2066
2067 if (master) {
2068 if (master->priv_flags & IFF_MASTER_ARPMON)
2069 dev->last_rx = jiffies;
2070
2071 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
2072 /* Do address unmangle. The local destination address
2073 * will be always the one master has. Provides the right
2074 * functionality in a bridge.
2075 */
2076 skb_bond_set_mac_by_master(skb, master);
2077 }
2078
2079 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2080 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2081 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2082 return 0;
2083
2084 if (master->priv_flags & IFF_MASTER_ALB) {
2085 if (skb->pkt_type != PACKET_BROADCAST &&
2086 skb->pkt_type != PACKET_MULTICAST)
2087 return 0;
2088 }
2089 if (master->priv_flags & IFF_MASTER_8023AD &&
2090 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2091 return 0;
2092
2093 return 1;
2094 }
2095 }
2096 return 0; 2218 return 0;
2097} 2219}
2098 2220
@@ -2131,25 +2253,23 @@ static inline const char *netdev_name(const struct net_device *dev)
2131 return dev->name; 2253 return dev->name;
2132} 2254}
2133 2255
2134#define netdev_printk(level, netdev, format, args...) \ 2256extern int netdev_printk(const char *level, const struct net_device *dev,
2135 dev_printk(level, (netdev)->dev.parent, \ 2257 const char *format, ...)
2136 "%s: " format, \ 2258 __attribute__ ((format (printf, 3, 4)));
2137 netdev_name(netdev), ##args) 2259extern int netdev_emerg(const struct net_device *dev, const char *format, ...)
2138 2260 __attribute__ ((format (printf, 2, 3)));
2139#define netdev_emerg(dev, format, args...) \ 2261extern int netdev_alert(const struct net_device *dev, const char *format, ...)
2140 netdev_printk(KERN_EMERG, dev, format, ##args) 2262 __attribute__ ((format (printf, 2, 3)));
2141#define netdev_alert(dev, format, args...) \ 2263extern int netdev_crit(const struct net_device *dev, const char *format, ...)
2142 netdev_printk(KERN_ALERT, dev, format, ##args) 2264 __attribute__ ((format (printf, 2, 3)));
2143#define netdev_crit(dev, format, args...) \ 2265extern int netdev_err(const struct net_device *dev, const char *format, ...)
2144 netdev_printk(KERN_CRIT, dev, format, ##args) 2266 __attribute__ ((format (printf, 2, 3)));
2145#define netdev_err(dev, format, args...) \ 2267extern int netdev_warn(const struct net_device *dev, const char *format, ...)
2146 netdev_printk(KERN_ERR, dev, format, ##args) 2268 __attribute__ ((format (printf, 2, 3)));
2147#define netdev_warn(dev, format, args...) \ 2269extern int netdev_notice(const struct net_device *dev, const char *format, ...)
2148 netdev_printk(KERN_WARNING, dev, format, ##args) 2270 __attribute__ ((format (printf, 2, 3)));
2149#define netdev_notice(dev, format, args...) \ 2271extern int netdev_info(const struct net_device *dev, const char *format, ...)
2150 netdev_printk(KERN_NOTICE, dev, format, ##args) 2272 __attribute__ ((format (printf, 2, 3)));
2151#define netdev_info(dev, format, args...) \
2152 netdev_printk(KERN_INFO, dev, format, ##args)
2153 2273
2154#if defined(DEBUG) 2274#if defined(DEBUG)
2155#define netdev_dbg(__dev, format, args...) \ 2275#define netdev_dbg(__dev, format, args...) \
@@ -2197,20 +2317,26 @@ do { \
2197 netdev_printk(level, (dev), fmt, ##args); \ 2317 netdev_printk(level, (dev), fmt, ##args); \
2198} while (0) 2318} while (0)
2199 2319
2320#define netif_level(level, priv, type, dev, fmt, args...) \
2321do { \
2322 if (netif_msg_##type(priv)) \
2323 netdev_##level(dev, fmt, ##args); \
2324} while (0)
2325
2200#define netif_emerg(priv, type, dev, fmt, args...) \ 2326#define netif_emerg(priv, type, dev, fmt, args...) \
2201 netif_printk(priv, type, KERN_EMERG, dev, fmt, ##args) 2327 netif_level(emerg, priv, type, dev, fmt, ##args)
2202#define netif_alert(priv, type, dev, fmt, args...) \ 2328#define netif_alert(priv, type, dev, fmt, args...) \
2203 netif_printk(priv, type, KERN_ALERT, dev, fmt, ##args) 2329 netif_level(alert, priv, type, dev, fmt, ##args)
2204#define netif_crit(priv, type, dev, fmt, args...) \ 2330#define netif_crit(priv, type, dev, fmt, args...) \
2205 netif_printk(priv, type, KERN_CRIT, dev, fmt, ##args) 2331 netif_level(crit, priv, type, dev, fmt, ##args)
2206#define netif_err(priv, type, dev, fmt, args...) \ 2332#define netif_err(priv, type, dev, fmt, args...) \
2207 netif_printk(priv, type, KERN_ERR, dev, fmt, ##args) 2333 netif_level(err, priv, type, dev, fmt, ##args)
2208#define netif_warn(priv, type, dev, fmt, args...) \ 2334#define netif_warn(priv, type, dev, fmt, args...) \
2209 netif_printk(priv, type, KERN_WARNING, dev, fmt, ##args) 2335 netif_level(warn, priv, type, dev, fmt, ##args)
2210#define netif_notice(priv, type, dev, fmt, args...) \ 2336#define netif_notice(priv, type, dev, fmt, args...) \
2211 netif_printk(priv, type, KERN_NOTICE, dev, fmt, ##args) 2337 netif_level(notice, priv, type, dev, fmt, ##args)
2212#define netif_info(priv, type, dev, fmt, args...) \ 2338#define netif_info(priv, type, dev, fmt, args...) \
2213 netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args) 2339 netif_level(info, priv, type, dev, fmt, ##args)
2214 2340
2215#if defined(DEBUG) 2341#if defined(DEBUG)
2216#define netif_dbg(priv, type, dev, format, args...) \ 2342#define netif_dbg(priv, type, dev, format, args...) \
@@ -2233,12 +2359,12 @@ do { \
2233#endif 2359#endif
2234 2360
2235#if defined(VERBOSE_DEBUG) 2361#if defined(VERBOSE_DEBUG)
2236#define netif_vdbg netdev_dbg 2362#define netif_vdbg netif_dbg
2237#else 2363#else
2238#define netif_vdbg(priv, type, dev, format, args...) \ 2364#define netif_vdbg(priv, type, dev, format, args...) \
2239({ \ 2365({ \
2240 if (0) \ 2366 if (0) \
2241 netif_printk(KERN_DEBUG, dev, format, ##args); \ 2367 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2242 0; \ 2368 0; \
2243}) 2369})
2244#endif 2370#endif