diff options
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 143 |
1 files changed, 123 insertions, 20 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3a70f553b28f..79cc3dab4be7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -108,6 +108,14 @@ struct wireless_dev; | |||
108 | #define MAX_HEADER (LL_MAX_HEADER + 48) | 108 | #define MAX_HEADER (LL_MAX_HEADER + 48) |
109 | #endif | 109 | #endif |
110 | 110 | ||
111 | struct net_device_subqueue | ||
112 | { | ||
113 | /* Give a control state for each queue. This struct may contain | ||
114 | * per-queue locks in the future. | ||
115 | */ | ||
116 | unsigned long state; | ||
117 | }; | ||
118 | |||
111 | /* | 119 | /* |
112 | * Network device statistics. Akin to the 2.0 ether stats but | 120 | * Network device statistics. Akin to the 2.0 ether stats but |
113 | * with byte counters. | 121 | * with byte counters. |
@@ -177,19 +185,24 @@ struct netif_rx_stats | |||
177 | 185 | ||
178 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); | 186 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); |
179 | 187 | ||
188 | struct dev_addr_list | ||
189 | { | ||
190 | struct dev_addr_list *next; | ||
191 | u8 da_addr[MAX_ADDR_LEN]; | ||
192 | u8 da_addrlen; | ||
193 | int da_users; | ||
194 | int da_gusers; | ||
195 | }; | ||
180 | 196 | ||
181 | /* | 197 | /* |
182 | * We tag multicasts with these structures. | 198 | * We tag multicasts with these structures. |
183 | */ | 199 | */ |
184 | 200 | ||
185 | struct dev_mc_list | 201 | #define dev_mc_list dev_addr_list |
186 | { | 202 | #define dmi_addr da_addr |
187 | struct dev_mc_list *next; | 203 | #define dmi_addrlen da_addrlen |
188 | __u8 dmi_addr[MAX_ADDR_LEN]; | 204 | #define dmi_users da_users |
189 | unsigned char dmi_addrlen; | 205 | #define dmi_gusers da_gusers |
190 | int dmi_users; | ||
191 | int dmi_gusers; | ||
192 | }; | ||
193 | 206 | ||
194 | struct hh_cache | 207 | struct hh_cache |
195 | { | 208 | { |
@@ -248,6 +261,8 @@ enum netdev_state_t | |||
248 | __LINK_STATE_LINKWATCH_PENDING, | 261 | __LINK_STATE_LINKWATCH_PENDING, |
249 | __LINK_STATE_DORMANT, | 262 | __LINK_STATE_DORMANT, |
250 | __LINK_STATE_QDISC_RUNNING, | 263 | __LINK_STATE_QDISC_RUNNING, |
264 | /* Set by the netpoll NAPI code */ | ||
265 | __LINK_STATE_POLL_LIST_FROZEN, | ||
251 | }; | 266 | }; |
252 | 267 | ||
253 | 268 | ||
@@ -314,9 +329,10 @@ struct net_device | |||
314 | /* Net device features */ | 329 | /* Net device features */ |
315 | unsigned long features; | 330 | unsigned long features; |
316 | #define NETIF_F_SG 1 /* Scatter/gather IO. */ | 331 | #define NETIF_F_SG 1 /* Scatter/gather IO. */ |
317 | #define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */ | 332 | #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ |
318 | #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ | 333 | #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ |
319 | #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ | 334 | #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ |
335 | #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */ | ||
320 | #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ | 336 | #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ |
321 | #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ | 337 | #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ |
322 | #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ | 338 | #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ |
@@ -325,6 +341,7 @@ struct net_device | |||
325 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ | 341 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ |
326 | #define NETIF_F_GSO 2048 /* Enable software GSO. */ | 342 | #define NETIF_F_GSO 2048 /* Enable software GSO. */ |
327 | #define NETIF_F_LLTX 4096 /* LockLess TX */ | 343 | #define NETIF_F_LLTX 4096 /* LockLess TX */ |
344 | #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ | ||
328 | 345 | ||
329 | /* Segmentation offload features */ | 346 | /* Segmentation offload features */ |
330 | #define NETIF_F_GSO_SHIFT 16 | 347 | #define NETIF_F_GSO_SHIFT 16 |
@@ -338,8 +355,11 @@ struct net_device | |||
338 | /* List of features with software fallbacks. */ | 355 | /* List of features with software fallbacks. */ |
339 | #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) | 356 | #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) |
340 | 357 | ||
358 | |||
341 | #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) | 359 | #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) |
342 | #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) | 360 | #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) |
361 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) | ||
362 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) | ||
343 | 363 | ||
344 | struct net_device *next_sched; | 364 | struct net_device *next_sched; |
345 | 365 | ||
@@ -388,7 +408,10 @@ struct net_device | |||
388 | unsigned char addr_len; /* hardware address length */ | 408 | unsigned char addr_len; /* hardware address length */ |
389 | unsigned short dev_id; /* for shared network cards */ | 409 | unsigned short dev_id; /* for shared network cards */ |
390 | 410 | ||
391 | struct dev_mc_list *mc_list; /* Multicast mac addresses */ | 411 | struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */ |
412 | int uc_count; /* Number of installed ucasts */ | ||
413 | int uc_promisc; | ||
414 | struct dev_addr_list *mc_list; /* Multicast mac addresses */ | ||
392 | int mc_count; /* Number of installed mcasts */ | 415 | int mc_count; /* Number of installed mcasts */ |
393 | int promiscuity; | 416 | int promiscuity; |
394 | int allmulti; | 417 | int allmulti; |
@@ -493,6 +516,8 @@ struct net_device | |||
493 | void *saddr, | 516 | void *saddr, |
494 | unsigned len); | 517 | unsigned len); |
495 | int (*rebuild_header)(struct sk_buff *skb); | 518 | int (*rebuild_header)(struct sk_buff *skb); |
519 | #define HAVE_SET_RX_MODE | ||
520 | void (*set_rx_mode)(struct net_device *dev); | ||
496 | #define HAVE_MULTICAST | 521 | #define HAVE_MULTICAST |
497 | void (*set_multicast_list)(struct net_device *dev); | 522 | void (*set_multicast_list)(struct net_device *dev); |
498 | #define HAVE_SET_MAC_ADDR | 523 | #define HAVE_SET_MAC_ADDR |
@@ -540,17 +565,22 @@ struct net_device | |||
540 | struct device dev; | 565 | struct device dev; |
541 | /* space for optional statistics and wireless sysfs groups */ | 566 | /* space for optional statistics and wireless sysfs groups */ |
542 | struct attribute_group *sysfs_groups[3]; | 567 | struct attribute_group *sysfs_groups[3]; |
568 | |||
569 | /* rtnetlink link ops */ | ||
570 | const struct rtnl_link_ops *rtnl_link_ops; | ||
571 | |||
572 | /* The TX queue control structures */ | ||
573 | unsigned int egress_subqueue_count; | ||
574 | struct net_device_subqueue egress_subqueue[0]; | ||
543 | }; | 575 | }; |
544 | #define to_net_dev(d) container_of(d, struct net_device, dev) | 576 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
545 | 577 | ||
546 | #define NETDEV_ALIGN 32 | 578 | #define NETDEV_ALIGN 32 |
547 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) | 579 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) |
548 | 580 | ||
549 | static inline void *netdev_priv(struct net_device *dev) | 581 | static inline void *netdev_priv(const struct net_device *dev) |
550 | { | 582 | { |
551 | return (char *)dev + ((sizeof(struct net_device) | 583 | return dev->priv; |
552 | + NETDEV_ALIGN_CONST) | ||
553 | & ~NETDEV_ALIGN_CONST); | ||
554 | } | 584 | } |
555 | 585 | ||
556 | #define SET_MODULE_OWNER(dev) do { } while (0) | 586 | #define SET_MODULE_OWNER(dev) do { } while (0) |
@@ -702,6 +732,62 @@ static inline int netif_running(const struct net_device *dev) | |||
702 | return test_bit(__LINK_STATE_START, &dev->state); | 732 | return test_bit(__LINK_STATE_START, &dev->state); |
703 | } | 733 | } |
704 | 734 | ||
735 | /* | ||
736 | * Routines to manage the subqueues on a device. We only need start | ||
737 | * stop, and a check if it's stopped. All other device management is | ||
738 | * done at the overall netdevice level. | ||
739 | * Also test the device if we're multiqueue. | ||
740 | */ | ||
741 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | ||
742 | { | ||
743 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
744 | clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | ||
745 | #endif | ||
746 | } | ||
747 | |||
748 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | ||
749 | { | ||
750 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
751 | #ifdef CONFIG_NETPOLL_TRAP | ||
752 | if (netpoll_trap()) | ||
753 | return; | ||
754 | #endif | ||
755 | set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | ||
756 | #endif | ||
757 | } | ||
758 | |||
759 | static inline int netif_subqueue_stopped(const struct net_device *dev, | ||
760 | u16 queue_index) | ||
761 | { | ||
762 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
763 | return test_bit(__LINK_STATE_XOFF, | ||
764 | &dev->egress_subqueue[queue_index].state); | ||
765 | #else | ||
766 | return 0; | ||
767 | #endif | ||
768 | } | ||
769 | |||
770 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | ||
771 | { | ||
772 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
773 | #ifdef CONFIG_NETPOLL_TRAP | ||
774 | if (netpoll_trap()) | ||
775 | return; | ||
776 | #endif | ||
777 | if (test_and_clear_bit(__LINK_STATE_XOFF, | ||
778 | &dev->egress_subqueue[queue_index].state)) | ||
779 | __netif_schedule(dev); | ||
780 | #endif | ||
781 | } | ||
782 | |||
783 | static inline int netif_is_multiqueue(const struct net_device *dev) | ||
784 | { | ||
785 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
786 | return (!!(NETIF_F_MULTI_QUEUE & dev->features)); | ||
787 | #else | ||
788 | return 0; | ||
789 | #endif | ||
790 | } | ||
705 | 791 | ||
706 | /* Use this variant when it is known for sure that it | 792 | /* Use this variant when it is known for sure that it |
707 | * is executing from interrupt context. | 793 | * is executing from interrupt context. |
@@ -930,6 +1016,14 @@ static inline void netif_rx_complete(struct net_device *dev) | |||
930 | { | 1016 | { |
931 | unsigned long flags; | 1017 | unsigned long flags; |
932 | 1018 | ||
1019 | #ifdef CONFIG_NETPOLL | ||
1020 | /* Prevent race with netpoll - yes, this is a kludge. | ||
1021 | * But at least it doesn't penalize the non-netpoll | ||
1022 | * code path. */ | ||
1023 | if (test_bit(__LINK_STATE_POLL_LIST_FROZEN, &dev->state)) | ||
1024 | return; | ||
1025 | #endif | ||
1026 | |||
933 | local_irq_save(flags); | 1027 | local_irq_save(flags); |
934 | __netif_rx_complete(dev); | 1028 | __netif_rx_complete(dev); |
935 | local_irq_restore(flags); | 1029 | local_irq_restore(flags); |
@@ -992,15 +1086,24 @@ static inline void netif_tx_disable(struct net_device *dev) | |||
992 | extern void ether_setup(struct net_device *dev); | 1086 | extern void ether_setup(struct net_device *dev); |
993 | 1087 | ||
994 | /* Support for loadable net-drivers */ | 1088 | /* Support for loadable net-drivers */ |
995 | extern struct net_device *alloc_netdev(int sizeof_priv, const char *name, | 1089 | extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, |
996 | void (*setup)(struct net_device *)); | 1090 | void (*setup)(struct net_device *), |
1091 | unsigned int queue_count); | ||
1092 | #define alloc_netdev(sizeof_priv, name, setup) \ | ||
1093 | alloc_netdev_mq(sizeof_priv, name, setup, 1) | ||
997 | extern int register_netdev(struct net_device *dev); | 1094 | extern int register_netdev(struct net_device *dev); |
998 | extern void unregister_netdev(struct net_device *dev); | 1095 | extern void unregister_netdev(struct net_device *dev); |
999 | /* Functions used for multicast support */ | 1096 | /* Functions used for secondary unicast and multicast support */ |
1000 | extern void dev_mc_upload(struct net_device *dev); | 1097 | extern void dev_set_rx_mode(struct net_device *dev); |
1098 | extern void __dev_set_rx_mode(struct net_device *dev); | ||
1099 | extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen); | ||
1100 | extern int dev_unicast_add(struct net_device *dev, void *addr, int alen); | ||
1001 | extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); | 1101 | extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); |
1002 | extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); | 1102 | extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); |
1003 | extern void dev_mc_discard(struct net_device *dev); | 1103 | extern void dev_mc_discard(struct net_device *dev); |
1104 | extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); | ||
1105 | extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); | ||
1106 | extern void __dev_addr_discard(struct dev_addr_list **list); | ||
1004 | extern void dev_set_promiscuity(struct net_device *dev, int inc); | 1107 | extern void dev_set_promiscuity(struct net_device *dev, int inc); |
1005 | extern void dev_set_allmulti(struct net_device *dev, int inc); | 1108 | extern void dev_set_allmulti(struct net_device *dev, int inc); |
1006 | extern void netdev_state_change(struct net_device *dev); | 1109 | extern void netdev_state_change(struct net_device *dev); |