diff options
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 305 |
1 files changed, 277 insertions, 28 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 71caf7a5e6c6..5eeb2cd3631c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -138,6 +138,9 @@ static inline bool dev_xmit_complete(int rc) | |||
138 | 138 | ||
139 | #define MAX_ADDR_LEN 32 /* Largest hardware address length */ | 139 | #define MAX_ADDR_LEN 32 /* Largest hardware address length */ |
140 | 140 | ||
141 | /* Initial net device group. All devices belong to group 0 by default. */ | ||
142 | #define INIT_NETDEV_GROUP 0 | ||
143 | |||
141 | #ifdef __KERNEL__ | 144 | #ifdef __KERNEL__ |
142 | /* | 145 | /* |
143 | * Compute the worst case header length according to the protocols | 146 | * Compute the worst case header length according to the protocols |
@@ -387,7 +390,55 @@ enum gro_result { | |||
387 | }; | 390 | }; |
388 | typedef enum gro_result gro_result_t; | 391 | typedef enum gro_result gro_result_t; |
389 | 392 | ||
390 | typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb); | 393 | /* |
394 | * enum rx_handler_result - Possible return values for rx_handlers. | ||
395 | * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it | ||
396 | * further. | ||
397 | * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in | ||
398 | * case skb->dev was changed by rx_handler. | ||
399 | * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. | ||
400 | * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called. | ||
401 | * | ||
402 | * rx_handlers are functions called from inside __netif_receive_skb(), to do | ||
403 | * special processing of the skb, prior to delivery to protocol handlers. | ||
404 | * | ||
405 | * Currently, a net_device can only have a single rx_handler registered. Trying | ||
406 | * to register a second rx_handler will return -EBUSY. | ||
407 | * | ||
408 | * To register a rx_handler on a net_device, use netdev_rx_handler_register(). | ||
409 | * To unregister a rx_handler on a net_device, use | ||
410 | * netdev_rx_handler_unregister(). | ||
411 | * | ||
412 | * Upon return, rx_handler is expected to tell __netif_receive_skb() what to | ||
413 | * do with the skb. | ||
414 | * | ||
415 | * If the rx_handler consumed to skb in some way, it should return | ||
416 | * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for | ||
417 | * the skb to be delivered in some other ways. | ||
418 | * | ||
419 | * If the rx_handler changed skb->dev, to divert the skb to another | ||
420 | * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the | ||
421 | * new device will be called if it exists. | ||
422 | * | ||
423 | * If the rx_handler consider the skb should be ignored, it should return | ||
424 | * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that | ||
425 | * are registred on exact device (ptype->dev == skb->dev). | ||
426 | * | ||
427 | * If the rx_handler didn't changed skb->dev, but want the skb to be normally | ||
428 | * delivered, it should return RX_HANDLER_PASS. | ||
429 | * | ||
430 | * A device without a registered rx_handler will behave as if rx_handler | ||
431 | * returned RX_HANDLER_PASS. | ||
432 | */ | ||
433 | |||
434 | enum rx_handler_result { | ||
435 | RX_HANDLER_CONSUMED, | ||
436 | RX_HANDLER_ANOTHER, | ||
437 | RX_HANDLER_EXACT, | ||
438 | RX_HANDLER_PASS, | ||
439 | }; | ||
440 | typedef enum rx_handler_result rx_handler_result_t; | ||
441 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); | ||
391 | 442 | ||
392 | extern void __napi_schedule(struct napi_struct *n); | 443 | extern void __napi_schedule(struct napi_struct *n); |
393 | 444 | ||
@@ -551,14 +602,16 @@ struct rps_map { | |||
551 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) | 602 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) |
552 | 603 | ||
553 | /* | 604 | /* |
554 | * The rps_dev_flow structure contains the mapping of a flow to a CPU and the | 605 | * The rps_dev_flow structure contains the mapping of a flow to a CPU, the |
555 | * tail pointer for that CPU's input queue at the time of last enqueue. | 606 | * tail pointer for that CPU's input queue at the time of last enqueue, and |
607 | * a hardware filter index. | ||
556 | */ | 608 | */ |
557 | struct rps_dev_flow { | 609 | struct rps_dev_flow { |
558 | u16 cpu; | 610 | u16 cpu; |
559 | u16 fill; | 611 | u16 filter; |
560 | unsigned int last_qtail; | 612 | unsigned int last_qtail; |
561 | }; | 613 | }; |
614 | #define RPS_NO_FILTER 0xffff | ||
562 | 615 | ||
563 | /* | 616 | /* |
564 | * The rps_dev_flow_table structure contains a table of flow mappings. | 617 | * The rps_dev_flow_table structure contains a table of flow mappings. |
@@ -608,6 +661,11 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, | |||
608 | 661 | ||
609 | extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; | 662 | extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; |
610 | 663 | ||
664 | #ifdef CONFIG_RFS_ACCEL | ||
665 | extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, | ||
666 | u32 flow_id, u16 filter_id); | ||
667 | #endif | ||
668 | |||
611 | /* This structure contains an instance of an RX queue. */ | 669 | /* This structure contains an instance of an RX queue. */ |
612 | struct netdev_rx_queue { | 670 | struct netdev_rx_queue { |
613 | struct rps_map __rcu *rps_map; | 671 | struct rps_map __rcu *rps_map; |
@@ -643,6 +701,14 @@ struct xps_dev_maps { | |||
643 | (nr_cpu_ids * sizeof(struct xps_map *))) | 701 | (nr_cpu_ids * sizeof(struct xps_map *))) |
644 | #endif /* CONFIG_XPS */ | 702 | #endif /* CONFIG_XPS */ |
645 | 703 | ||
704 | #define TC_MAX_QUEUE 16 | ||
705 | #define TC_BITMASK 15 | ||
706 | /* HW offloaded queuing disciplines txq count and offset maps */ | ||
707 | struct netdev_tc_txq { | ||
708 | u16 count; | ||
709 | u16 offset; | ||
710 | }; | ||
711 | |||
646 | /* | 712 | /* |
647 | * This structure defines the management hooks for network devices. | 713 | * This structure defines the management hooks for network devices. |
648 | * The following hooks can be defined; unless noted otherwise, they are | 714 | * The following hooks can be defined; unless noted otherwise, they are |
@@ -753,6 +819,74 @@ struct xps_dev_maps { | |||
753 | * int (*ndo_set_vf_port)(struct net_device *dev, int vf, | 819 | * int (*ndo_set_vf_port)(struct net_device *dev, int vf, |
754 | * struct nlattr *port[]); | 820 | * struct nlattr *port[]); |
755 | * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); | 821 | * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); |
822 | * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) | ||
823 | * Called to setup 'tc' number of traffic classes in the net device. This | ||
824 | * is always called from the stack with the rtnl lock held and netif tx | ||
825 | * queues stopped. This allows the netdevice to perform queue management | ||
826 | * safely. | ||
827 | * | ||
828 | * Fiber Channel over Ethernet (FCoE) offload functions. | ||
829 | * int (*ndo_fcoe_enable)(struct net_device *dev); | ||
830 | * Called when the FCoE protocol stack wants to start using LLD for FCoE | ||
831 | * so the underlying device can perform whatever needed configuration or | ||
832 | * initialization to support acceleration of FCoE traffic. | ||
833 | * | ||
834 | * int (*ndo_fcoe_disable)(struct net_device *dev); | ||
835 | * Called when the FCoE protocol stack wants to stop using LLD for FCoE | ||
836 | * so the underlying device can perform whatever needed clean-ups to | ||
837 | * stop supporting acceleration of FCoE traffic. | ||
838 | * | ||
839 | * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, | ||
840 | * struct scatterlist *sgl, unsigned int sgc); | ||
841 | * Called when the FCoE Initiator wants to initialize an I/O that | ||
842 | * is a possible candidate for Direct Data Placement (DDP). The LLD can | ||
843 | * perform necessary setup and returns 1 to indicate the device is set up | ||
844 | * successfully to perform DDP on this I/O, otherwise this returns 0. | ||
845 | * | ||
846 | * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); | ||
847 | * Called when the FCoE Initiator/Target is done with the DDPed I/O as | ||
848 | * indicated by the FC exchange id 'xid', so the underlying device can | ||
849 | * clean up and reuse resources for later DDP requests. | ||
850 | * | ||
851 | * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, | ||
852 | * struct scatterlist *sgl, unsigned int sgc); | ||
853 | * Called when the FCoE Target wants to initialize an I/O that | ||
854 | * is a possible candidate for Direct Data Placement (DDP). The LLD can | ||
855 | * perform necessary setup and returns 1 to indicate the device is set up | ||
856 | * successfully to perform DDP on this I/O, otherwise this returns 0. | ||
857 | * | ||
858 | * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); | ||
859 | * Called when the underlying device wants to override default World Wide | ||
860 | * Name (WWN) generation mechanism in FCoE protocol stack to pass its own | ||
861 | * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE | ||
862 | * protocol stack to use. | ||
863 | * | ||
864 | * RFS acceleration. | ||
865 | * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, | ||
866 | * u16 rxq_index, u32 flow_id); | ||
867 | * Set hardware filter for RFS. rxq_index is the target queue index; | ||
868 | * flow_id is a flow ID to be passed to rps_may_expire_flow() later. | ||
869 | * Return the filter ID on success, or a negative error code. | ||
870 | * | ||
871 | * Slave management functions (for bridge, bonding, etc). User should | ||
872 | * call netdev_set_master() to set dev->master properly. | ||
873 | * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); | ||
874 | * Called to make another netdev an underling. | ||
875 | * | ||
876 | * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); | ||
877 | * Called to release previously enslaved netdev. | ||
878 | * | ||
879 | * Feature/offload setting functions. | ||
880 | * u32 (*ndo_fix_features)(struct net_device *dev, u32 features); | ||
881 | * Adjusts the requested feature flags according to device-specific | ||
882 | * constraints, and returns the resulting flags. Must not modify | ||
883 | * the device state. | ||
884 | * | ||
885 | * int (*ndo_set_features)(struct net_device *dev, u32 features); | ||
886 | * Called to update device configuration to new features. Passed | ||
887 | * feature set might be less than what was returned by ndo_fix_features()). | ||
888 | * Must return >0 or -errno if it changed dev->features itself. | ||
889 | * | ||
756 | */ | 890 | */ |
757 | #define HAVE_NET_DEVICE_OPS | 891 | #define HAVE_NET_DEVICE_OPS |
758 | struct net_device_ops { | 892 | struct net_device_ops { |
@@ -811,6 +945,7 @@ struct net_device_ops { | |||
811 | struct nlattr *port[]); | 945 | struct nlattr *port[]); |
812 | int (*ndo_get_vf_port)(struct net_device *dev, | 946 | int (*ndo_get_vf_port)(struct net_device *dev, |
813 | int vf, struct sk_buff *skb); | 947 | int vf, struct sk_buff *skb); |
948 | int (*ndo_setup_tc)(struct net_device *dev, u8 tc); | ||
814 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 949 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
815 | int (*ndo_fcoe_enable)(struct net_device *dev); | 950 | int (*ndo_fcoe_enable)(struct net_device *dev); |
816 | int (*ndo_fcoe_disable)(struct net_device *dev); | 951 | int (*ndo_fcoe_disable)(struct net_device *dev); |
@@ -820,11 +955,29 @@ struct net_device_ops { | |||
820 | unsigned int sgc); | 955 | unsigned int sgc); |
821 | int (*ndo_fcoe_ddp_done)(struct net_device *dev, | 956 | int (*ndo_fcoe_ddp_done)(struct net_device *dev, |
822 | u16 xid); | 957 | u16 xid); |
958 | int (*ndo_fcoe_ddp_target)(struct net_device *dev, | ||
959 | u16 xid, | ||
960 | struct scatterlist *sgl, | ||
961 | unsigned int sgc); | ||
823 | #define NETDEV_FCOE_WWNN 0 | 962 | #define NETDEV_FCOE_WWNN 0 |
824 | #define NETDEV_FCOE_WWPN 1 | 963 | #define NETDEV_FCOE_WWPN 1 |
825 | int (*ndo_fcoe_get_wwn)(struct net_device *dev, | 964 | int (*ndo_fcoe_get_wwn)(struct net_device *dev, |
826 | u64 *wwn, int type); | 965 | u64 *wwn, int type); |
827 | #endif | 966 | #endif |
967 | #ifdef CONFIG_RFS_ACCEL | ||
968 | int (*ndo_rx_flow_steer)(struct net_device *dev, | ||
969 | const struct sk_buff *skb, | ||
970 | u16 rxq_index, | ||
971 | u32 flow_id); | ||
972 | #endif | ||
973 | int (*ndo_add_slave)(struct net_device *dev, | ||
974 | struct net_device *slave_dev); | ||
975 | int (*ndo_del_slave)(struct net_device *dev, | ||
976 | struct net_device *slave_dev); | ||
977 | u32 (*ndo_fix_features)(struct net_device *dev, | ||
978 | u32 features); | ||
979 | int (*ndo_set_features)(struct net_device *dev, | ||
980 | u32 features); | ||
828 | }; | 981 | }; |
829 | 982 | ||
830 | /* | 983 | /* |
@@ -876,8 +1029,18 @@ struct net_device { | |||
876 | struct list_head napi_list; | 1029 | struct list_head napi_list; |
877 | struct list_head unreg_list; | 1030 | struct list_head unreg_list; |
878 | 1031 | ||
879 | /* Net device features */ | 1032 | /* currently active device features */ |
880 | unsigned long features; | 1033 | u32 features; |
1034 | /* user-changeable features */ | ||
1035 | u32 hw_features; | ||
1036 | /* user-requested features */ | ||
1037 | u32 wanted_features; | ||
1038 | /* VLAN feature mask */ | ||
1039 | u32 vlan_features; | ||
1040 | |||
1041 | /* Net device feature bits; if you change something, | ||
1042 | * also update netdev_features_strings[] in ethtool.c */ | ||
1043 | |||
881 | #define NETIF_F_SG 1 /* Scatter/gather IO. */ | 1044 | #define NETIF_F_SG 1 /* Scatter/gather IO. */ |
882 | #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ | 1045 | #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ |
883 | #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ | 1046 | #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ |
@@ -902,6 +1065,7 @@ struct net_device { | |||
902 | #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ | 1065 | #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ |
903 | #define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */ | 1066 | #define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */ |
904 | #define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */ | 1067 | #define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */ |
1068 | #define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */ | ||
905 | 1069 | ||
906 | /* Segmentation offload features */ | 1070 | /* Segmentation offload features */ |
907 | #define NETIF_F_GSO_SHIFT 16 | 1071 | #define NETIF_F_GSO_SHIFT 16 |
@@ -913,6 +1077,12 @@ struct net_device { | |||
913 | #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) | 1077 | #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) |
914 | #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) | 1078 | #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) |
915 | 1079 | ||
1080 | /* Features valid for ethtool to change */ | ||
1081 | /* = all defined minus driver/device-class-related */ | ||
1082 | #define NETIF_F_NEVER_CHANGE (NETIF_F_HIGHDMA | NETIF_F_VLAN_CHALLENGED | \ | ||
1083 | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) | ||
1084 | #define NETIF_F_ETHTOOL_BITS (0x3f3fffff & ~NETIF_F_NEVER_CHANGE) | ||
1085 | |||
916 | /* List of features with software fallbacks. */ | 1086 | /* List of features with software fallbacks. */ |
917 | #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ | 1087 | #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ |
918 | NETIF_F_TSO6 | NETIF_F_UFO) | 1088 | NETIF_F_TSO6 | NETIF_F_UFO) |
@@ -923,6 +1093,12 @@ struct net_device { | |||
923 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) | 1093 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) |
924 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) | 1094 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) |
925 | 1095 | ||
1096 | #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) | ||
1097 | |||
1098 | #define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \ | ||
1099 | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ | ||
1100 | NETIF_F_SCTP_CSUM | NETIF_F_FCOE_CRC) | ||
1101 | |||
926 | /* | 1102 | /* |
927 | * If one device supports one of these features, then enable them | 1103 | * If one device supports one of these features, then enable them |
928 | * for all in netdev_increment_features. | 1104 | * for all in netdev_increment_features. |
@@ -931,6 +1107,9 @@ struct net_device { | |||
931 | NETIF_F_SG | NETIF_F_HIGHDMA | \ | 1107 | NETIF_F_SG | NETIF_F_HIGHDMA | \ |
932 | NETIF_F_FRAGLIST) | 1108 | NETIF_F_FRAGLIST) |
933 | 1109 | ||
1110 | /* changeable features with no special hardware requirements */ | ||
1111 | #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) | ||
1112 | |||
934 | /* Interface index. Unique device identifier */ | 1113 | /* Interface index. Unique device identifier */ |
935 | int ifindex; | 1114 | int ifindex; |
936 | int iflink; | 1115 | int iflink; |
@@ -1039,6 +1218,13 @@ struct net_device { | |||
1039 | 1218 | ||
1040 | /* Number of RX queues currently active in device */ | 1219 | /* Number of RX queues currently active in device */ |
1041 | unsigned int real_num_rx_queues; | 1220 | unsigned int real_num_rx_queues; |
1221 | |||
1222 | #ifdef CONFIG_RFS_ACCEL | ||
1223 | /* CPU reverse-mapping for RX completion interrupts, indexed | ||
1224 | * by RX queue number. Assigned by driver. This must only be | ||
1225 | * set if the ndo_rx_flow_steer operation is defined. */ | ||
1226 | struct cpu_rmap *rx_cpu_rmap; | ||
1227 | #endif | ||
1042 | #endif | 1228 | #endif |
1043 | 1229 | ||
1044 | rx_handler_func_t __rcu *rx_handler; | 1230 | rx_handler_func_t __rcu *rx_handler; |
@@ -1132,9 +1318,6 @@ struct net_device { | |||
1132 | /* rtnetlink link ops */ | 1318 | /* rtnetlink link ops */ |
1133 | const struct rtnl_link_ops *rtnl_link_ops; | 1319 | const struct rtnl_link_ops *rtnl_link_ops; |
1134 | 1320 | ||
1135 | /* VLAN feature mask */ | ||
1136 | unsigned long vlan_features; | ||
1137 | |||
1138 | /* for setting kernel sock attribute on TCP connection setup */ | 1321 | /* for setting kernel sock attribute on TCP connection setup */ |
1139 | #define GSO_MAX_SIZE 65536 | 1322 | #define GSO_MAX_SIZE 65536 |
1140 | unsigned int gso_max_size; | 1323 | unsigned int gso_max_size; |
@@ -1143,6 +1326,9 @@ struct net_device { | |||
1143 | /* Data Center Bridging netlink ops */ | 1326 | /* Data Center Bridging netlink ops */ |
1144 | const struct dcbnl_rtnl_ops *dcbnl_ops; | 1327 | const struct dcbnl_rtnl_ops *dcbnl_ops; |
1145 | #endif | 1328 | #endif |
1329 | u8 num_tc; | ||
1330 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; | ||
1331 | u8 prio_tc_map[TC_BITMASK + 1]; | ||
1146 | 1332 | ||
1147 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 1333 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
1148 | /* max exchange id for FCoE LRO by ddp */ | 1334 | /* max exchange id for FCoE LRO by ddp */ |
@@ -1153,12 +1339,66 @@ struct net_device { | |||
1153 | 1339 | ||
1154 | /* phy device may attach itself for hardware timestamping */ | 1340 | /* phy device may attach itself for hardware timestamping */ |
1155 | struct phy_device *phydev; | 1341 | struct phy_device *phydev; |
1342 | |||
1343 | /* group the device belongs to */ | ||
1344 | int group; | ||
1156 | }; | 1345 | }; |
1157 | #define to_net_dev(d) container_of(d, struct net_device, dev) | 1346 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
1158 | 1347 | ||
1159 | #define NETDEV_ALIGN 32 | 1348 | #define NETDEV_ALIGN 32 |
1160 | 1349 | ||
1161 | static inline | 1350 | static inline |
1351 | int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) | ||
1352 | { | ||
1353 | return dev->prio_tc_map[prio & TC_BITMASK]; | ||
1354 | } | ||
1355 | |||
1356 | static inline | ||
1357 | int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) | ||
1358 | { | ||
1359 | if (tc >= dev->num_tc) | ||
1360 | return -EINVAL; | ||
1361 | |||
1362 | dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; | ||
1363 | return 0; | ||
1364 | } | ||
1365 | |||
1366 | static inline | ||
1367 | void netdev_reset_tc(struct net_device *dev) | ||
1368 | { | ||
1369 | dev->num_tc = 0; | ||
1370 | memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); | ||
1371 | memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); | ||
1372 | } | ||
1373 | |||
1374 | static inline | ||
1375 | int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) | ||
1376 | { | ||
1377 | if (tc >= dev->num_tc) | ||
1378 | return -EINVAL; | ||
1379 | |||
1380 | dev->tc_to_txq[tc].count = count; | ||
1381 | dev->tc_to_txq[tc].offset = offset; | ||
1382 | return 0; | ||
1383 | } | ||
1384 | |||
1385 | static inline | ||
1386 | int netdev_set_num_tc(struct net_device *dev, u8 num_tc) | ||
1387 | { | ||
1388 | if (num_tc > TC_MAX_QUEUE) | ||
1389 | return -EINVAL; | ||
1390 | |||
1391 | dev->num_tc = num_tc; | ||
1392 | return 0; | ||
1393 | } | ||
1394 | |||
1395 | static inline | ||
1396 | int netdev_get_num_tc(struct net_device *dev) | ||
1397 | { | ||
1398 | return dev->num_tc; | ||
1399 | } | ||
1400 | |||
1401 | static inline | ||
1162 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | 1402 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, |
1163 | unsigned int index) | 1403 | unsigned int index) |
1164 | { | 1404 | { |
@@ -1300,7 +1540,7 @@ struct packet_type { | |||
1300 | struct packet_type *, | 1540 | struct packet_type *, |
1301 | struct net_device *); | 1541 | struct net_device *); |
1302 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, | 1542 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
1303 | int features); | 1543 | u32 features); |
1304 | int (*gso_send_check)(struct sk_buff *skb); | 1544 | int (*gso_send_check)(struct sk_buff *skb); |
1305 | struct sk_buff **(*gro_receive)(struct sk_buff **head, | 1545 | struct sk_buff **(*gro_receive)(struct sk_buff **head, |
1306 | struct sk_buff *skb); | 1546 | struct sk_buff *skb); |
@@ -1345,7 +1585,7 @@ static inline struct net_device *next_net_device_rcu(struct net_device *dev) | |||
1345 | struct net *net; | 1585 | struct net *net; |
1346 | 1586 | ||
1347 | net = dev_net(dev); | 1587 | net = dev_net(dev); |
1348 | lh = rcu_dereference(dev->dev_list.next); | 1588 | lh = rcu_dereference(list_next_rcu(&dev->dev_list)); |
1349 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | 1589 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); |
1350 | } | 1590 | } |
1351 | 1591 | ||
@@ -1355,6 +1595,13 @@ static inline struct net_device *first_net_device(struct net *net) | |||
1355 | net_device_entry(net->dev_base_head.next); | 1595 | net_device_entry(net->dev_base_head.next); |
1356 | } | 1596 | } |
1357 | 1597 | ||
1598 | static inline struct net_device *first_net_device_rcu(struct net *net) | ||
1599 | { | ||
1600 | struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); | ||
1601 | |||
1602 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | ||
1603 | } | ||
1604 | |||
1358 | extern int netdev_boot_setup_check(struct net_device *dev); | 1605 | extern int netdev_boot_setup_check(struct net_device *dev); |
1359 | extern unsigned long netdev_boot_base(const char *prefix, int unit); | 1606 | extern unsigned long netdev_boot_base(const char *prefix, int unit); |
1360 | extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, | 1607 | extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, |
@@ -1606,8 +1853,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev) | |||
1606 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | 1853 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) |
1607 | { | 1854 | { |
1608 | if (WARN_ON(!dev_queue)) { | 1855 | if (WARN_ON(!dev_queue)) { |
1609 | printk(KERN_INFO "netif_stop_queue() cannot be called before " | 1856 | pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); |
1610 | "register_netdev()"); | ||
1611 | return; | 1857 | return; |
1612 | } | 1858 | } |
1613 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | 1859 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); |
@@ -1844,6 +2090,7 @@ extern int dev_set_alias(struct net_device *, const char *, size_t); | |||
1844 | extern int dev_change_net_namespace(struct net_device *, | 2090 | extern int dev_change_net_namespace(struct net_device *, |
1845 | struct net *, const char *); | 2091 | struct net *, const char *); |
1846 | extern int dev_set_mtu(struct net_device *, int); | 2092 | extern int dev_set_mtu(struct net_device *, int); |
2093 | extern void dev_set_group(struct net_device *, int); | ||
1847 | extern int dev_set_mac_address(struct net_device *, | 2094 | extern int dev_set_mac_address(struct net_device *, |
1848 | struct sockaddr *); | 2095 | struct sockaddr *); |
1849 | extern int dev_hard_start_xmit(struct sk_buff *skb, | 2096 | extern int dev_hard_start_xmit(struct sk_buff *skb, |
@@ -2267,8 +2514,10 @@ extern int netdev_max_backlog; | |||
2267 | extern int netdev_tstamp_prequeue; | 2514 | extern int netdev_tstamp_prequeue; |
2268 | extern int weight_p; | 2515 | extern int weight_p; |
2269 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); | 2516 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); |
2517 | extern int netdev_set_bond_master(struct net_device *dev, | ||
2518 | struct net_device *master); | ||
2270 | extern int skb_checksum_help(struct sk_buff *skb); | 2519 | extern int skb_checksum_help(struct sk_buff *skb); |
2271 | extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); | 2520 | extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features); |
2272 | #ifdef CONFIG_BUG | 2521 | #ifdef CONFIG_BUG |
2273 | extern void netdev_rx_csum_fault(struct net_device *dev); | 2522 | extern void netdev_rx_csum_fault(struct net_device *dev); |
2274 | #else | 2523 | #else |
@@ -2295,22 +2544,26 @@ extern char *netdev_drivername(const struct net_device *dev, char *buffer, int l | |||
2295 | 2544 | ||
2296 | extern void linkwatch_run_queue(void); | 2545 | extern void linkwatch_run_queue(void); |
2297 | 2546 | ||
2298 | unsigned long netdev_increment_features(unsigned long all, unsigned long one, | 2547 | static inline u32 netdev_get_wanted_features(struct net_device *dev) |
2299 | unsigned long mask); | 2548 | { |
2300 | unsigned long netdev_fix_features(unsigned long features, const char *name); | 2549 | return (dev->features & ~dev->hw_features) | dev->wanted_features; |
2550 | } | ||
2551 | u32 netdev_increment_features(u32 all, u32 one, u32 mask); | ||
2552 | u32 netdev_fix_features(struct net_device *dev, u32 features); | ||
2553 | void netdev_update_features(struct net_device *dev); | ||
2301 | 2554 | ||
2302 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, | 2555 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, |
2303 | struct net_device *dev); | 2556 | struct net_device *dev); |
2304 | 2557 | ||
2305 | int netif_skb_features(struct sk_buff *skb); | 2558 | u32 netif_skb_features(struct sk_buff *skb); |
2306 | 2559 | ||
2307 | static inline int net_gso_ok(int features, int gso_type) | 2560 | static inline int net_gso_ok(u32 features, int gso_type) |
2308 | { | 2561 | { |
2309 | int feature = gso_type << NETIF_F_GSO_SHIFT; | 2562 | int feature = gso_type << NETIF_F_GSO_SHIFT; |
2310 | return (features & feature) == feature; | 2563 | return (features & feature) == feature; |
2311 | } | 2564 | } |
2312 | 2565 | ||
2313 | static inline int skb_gso_ok(struct sk_buff *skb, int features) | 2566 | static inline int skb_gso_ok(struct sk_buff *skb, u32 features) |
2314 | { | 2567 | { |
2315 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && | 2568 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && |
2316 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); | 2569 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); |
@@ -2328,15 +2581,9 @@ static inline void netif_set_gso_max_size(struct net_device *dev, | |||
2328 | dev->gso_max_size = size; | 2581 | dev->gso_max_size = size; |
2329 | } | 2582 | } |
2330 | 2583 | ||
2331 | extern int __skb_bond_should_drop(struct sk_buff *skb, | 2584 | static inline int netif_is_bond_slave(struct net_device *dev) |
2332 | struct net_device *master); | ||
2333 | |||
2334 | static inline int skb_bond_should_drop(struct sk_buff *skb, | ||
2335 | struct net_device *master) | ||
2336 | { | 2585 | { |
2337 | if (master) | 2586 | return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; |
2338 | return __skb_bond_should_drop(skb, master); | ||
2339 | return 0; | ||
2340 | } | 2587 | } |
2341 | 2588 | ||
2342 | extern struct pernet_operations __net_initdata loopback_net_ops; | 2589 | extern struct pernet_operations __net_initdata loopback_net_ops; |
@@ -2351,6 +2598,8 @@ static inline int dev_ethtool_get_settings(struct net_device *dev, | |||
2351 | 2598 | ||
2352 | static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev) | 2599 | static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev) |
2353 | { | 2600 | { |
2601 | if (dev->hw_features & NETIF_F_RXCSUM) | ||
2602 | return !!(dev->features & NETIF_F_RXCSUM); | ||
2354 | if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum) | 2603 | if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum) |
2355 | return 0; | 2604 | return 0; |
2356 | return dev->ethtool_ops->get_rx_csum(dev); | 2605 | return dev->ethtool_ops->get_rx_csum(dev); |