diff options
author | Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> | 2007-07-06 16:36:20 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-07-11 01:16:21 -0400 |
commit | f25f4e44808f0f6c9875d94ef1c41ef86c288eb2 (patch) | |
tree | d7809dd5e957f1626185326d0c3438ff9a04d350 /include/linux/netdevice.h | |
parent | a093bf006e09a305e95ff0938c0a18b7520aef67 (diff) |
[CORE] Stack changes to add multiqueue hardware support API
Add the multiqueue hardware device support API to the core network
stack. Allow drivers to allocate multiple queues and manage them at
the netdev level if they choose to do so.
Added a new field to sk_buff, namely queue_mapping, for drivers to
know which tx_ring to select based on OS classification of the flow.
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 80 |
1 files changed, 75 insertions, 5 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2c0cc19edfb2..9817821729c4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -108,6 +108,14 @@ struct wireless_dev; | |||
108 | #define MAX_HEADER (LL_MAX_HEADER + 48) | 108 | #define MAX_HEADER (LL_MAX_HEADER + 48) |
109 | #endif | 109 | #endif |
110 | 110 | ||
111 | struct net_device_subqueue | ||
112 | { | ||
113 | /* Give a control state for each queue. This struct may contain | ||
114 | * per-queue locks in the future. | ||
115 | */ | ||
116 | unsigned long state; | ||
117 | }; | ||
118 | |||
111 | /* | 119 | /* |
112 | * Network device statistics. Akin to the 2.0 ether stats but | 120 | * Network device statistics. Akin to the 2.0 ether stats but |
113 | * with byte counters. | 121 | * with byte counters. |
@@ -331,6 +339,7 @@ struct net_device | |||
331 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ | 339 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ |
332 | #define NETIF_F_GSO 2048 /* Enable software GSO. */ | 340 | #define NETIF_F_GSO 2048 /* Enable software GSO. */ |
333 | #define NETIF_F_LLTX 4096 /* LockLess TX */ | 341 | #define NETIF_F_LLTX 4096 /* LockLess TX */ |
342 | #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ | ||
334 | 343 | ||
335 | /* Segmentation offload features */ | 344 | /* Segmentation offload features */ |
336 | #define NETIF_F_GSO_SHIFT 16 | 345 | #define NETIF_F_GSO_SHIFT 16 |
@@ -557,6 +566,10 @@ struct net_device | |||
557 | 566 | ||
558 | /* rtnetlink link ops */ | 567 | /* rtnetlink link ops */ |
559 | const struct rtnl_link_ops *rtnl_link_ops; | 568 | const struct rtnl_link_ops *rtnl_link_ops; |
569 | |||
570 | /* The TX queue control structures */ | ||
571 | unsigned int egress_subqueue_count; | ||
572 | struct net_device_subqueue egress_subqueue[0]; | ||
560 | }; | 573 | }; |
561 | #define to_net_dev(d) container_of(d, struct net_device, dev) | 574 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
562 | 575 | ||
@@ -565,9 +578,7 @@ struct net_device | |||
565 | 578 | ||
566 | static inline void *netdev_priv(const struct net_device *dev) | 579 | static inline void *netdev_priv(const struct net_device *dev) |
567 | { | 580 | { |
568 | return (char *)dev + ((sizeof(struct net_device) | 581 | return dev->priv; |
569 | + NETDEV_ALIGN_CONST) | ||
570 | & ~NETDEV_ALIGN_CONST); | ||
571 | } | 582 | } |
572 | 583 | ||
573 | #define SET_MODULE_OWNER(dev) do { } while (0) | 584 | #define SET_MODULE_OWNER(dev) do { } while (0) |
@@ -719,6 +730,62 @@ static inline int netif_running(const struct net_device *dev) | |||
719 | return test_bit(__LINK_STATE_START, &dev->state); | 730 | return test_bit(__LINK_STATE_START, &dev->state); |
720 | } | 731 | } |
721 | 732 | ||
733 | /* | ||
734 | * Routines to manage the subqueues on a device. We only need start | ||
735 | * stop, and a check if it's stopped. All other device management is | ||
736 | * done at the overall netdevice level. | ||
737 | * Also test the device if we're multiqueue. | ||
738 | */ | ||
739 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | ||
740 | { | ||
741 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
742 | clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | ||
743 | #endif | ||
744 | } | ||
745 | |||
746 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | ||
747 | { | ||
748 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
749 | #ifdef CONFIG_NETPOLL_TRAP | ||
750 | if (netpoll_trap()) | ||
751 | return; | ||
752 | #endif | ||
753 | set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | ||
754 | #endif | ||
755 | } | ||
756 | |||
757 | static inline int netif_subqueue_stopped(const struct net_device *dev, | ||
758 | u16 queue_index) | ||
759 | { | ||
760 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
761 | return test_bit(__LINK_STATE_XOFF, | ||
762 | &dev->egress_subqueue[queue_index].state); | ||
763 | #else | ||
764 | return 0; | ||
765 | #endif | ||
766 | } | ||
767 | |||
768 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | ||
769 | { | ||
770 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
771 | #ifdef CONFIG_NETPOLL_TRAP | ||
772 | if (netpoll_trap()) | ||
773 | return; | ||
774 | #endif | ||
775 | if (test_and_clear_bit(__LINK_STATE_XOFF, | ||
776 | &dev->egress_subqueue[queue_index].state)) | ||
777 | __netif_schedule(dev); | ||
778 | #endif | ||
779 | } | ||
780 | |||
781 | static inline int netif_is_multiqueue(const struct net_device *dev) | ||
782 | { | ||
783 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
784 | return (!!(NETIF_F_MULTI_QUEUE & dev->features)); | ||
785 | #else | ||
786 | return 0; | ||
787 | #endif | ||
788 | } | ||
722 | 789 | ||
723 | /* Use this variant when it is known for sure that it | 790 | /* Use this variant when it is known for sure that it |
724 | * is executing from interrupt context. | 791 | * is executing from interrupt context. |
@@ -1009,8 +1076,11 @@ static inline void netif_tx_disable(struct net_device *dev) | |||
1009 | extern void ether_setup(struct net_device *dev); | 1076 | extern void ether_setup(struct net_device *dev); |
1010 | 1077 | ||
1011 | /* Support for loadable net-drivers */ | 1078 | /* Support for loadable net-drivers */ |
1012 | extern struct net_device *alloc_netdev(int sizeof_priv, const char *name, | 1079 | extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, |
1013 | void (*setup)(struct net_device *)); | 1080 | void (*setup)(struct net_device *), |
1081 | unsigned int queue_count); | ||
1082 | #define alloc_netdev(sizeof_priv, name, setup) \ | ||
1083 | alloc_netdev_mq(sizeof_priv, name, setup, 1) | ||
1014 | extern int register_netdev(struct net_device *dev); | 1084 | extern int register_netdev(struct net_device *dev); |
1015 | extern void unregister_netdev(struct net_device *dev); | 1085 | extern void unregister_netdev(struct net_device *dev); |
1016 | /* Functions used for secondary unicast and multicast support */ | 1086 | /* Functions used for secondary unicast and multicast support */ |