aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-09 02:14:24 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-09 02:14:24 -0400
commitb19fa1fa91845234961c64dbd564671aa7c0fd27 (patch)
treeefb09da87299ef503b59396b69a7667f1650e378 /include
parentc773e847ea8f6812804e40f52399c6921a00eab1 (diff)
net: Delete NETDEVICES_MULTIQUEUE kconfig option.
Multiple TX queue support is a core networking feature. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/netdevice.h14
-rw-r--r--include/linux/skbuff.h10
2 files changed, 0 insertions, 24 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c8d5f128858d..e2d931f9b700 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1043,9 +1043,7 @@ static inline int netif_running(const struct net_device *dev)
1043 */ 1043 */
1044static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 1044static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1045{ 1045{
1046#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1047 clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); 1046 clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
1048#endif
1049} 1047}
1050 1048
1051/** 1049/**
@@ -1057,13 +1055,11 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1057 */ 1055 */
1058static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 1056static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1059{ 1057{
1060#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1061#ifdef CONFIG_NETPOLL_TRAP 1058#ifdef CONFIG_NETPOLL_TRAP
1062 if (netpoll_trap()) 1059 if (netpoll_trap())
1063 return; 1060 return;
1064#endif 1061#endif
1065 set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); 1062 set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
1066#endif
1067} 1063}
1068 1064
1069/** 1065/**
@@ -1076,12 +1072,8 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1076static inline int __netif_subqueue_stopped(const struct net_device *dev, 1072static inline int __netif_subqueue_stopped(const struct net_device *dev,
1077 u16 queue_index) 1073 u16 queue_index)
1078{ 1074{
1079#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1080 return test_bit(__LINK_STATE_XOFF, 1075 return test_bit(__LINK_STATE_XOFF,
1081 &dev->egress_subqueue[queue_index].state); 1076 &dev->egress_subqueue[queue_index].state);
1082#else
1083 return 0;
1084#endif
1085} 1077}
1086 1078
1087static inline int netif_subqueue_stopped(const struct net_device *dev, 1079static inline int netif_subqueue_stopped(const struct net_device *dev,
@@ -1099,7 +1091,6 @@ static inline int netif_subqueue_stopped(const struct net_device *dev,
1099 */ 1091 */
1100static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 1092static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1101{ 1093{
1102#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1103#ifdef CONFIG_NETPOLL_TRAP 1094#ifdef CONFIG_NETPOLL_TRAP
1104 if (netpoll_trap()) 1095 if (netpoll_trap())
1105 return; 1096 return;
@@ -1107,7 +1098,6 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1107 if (test_and_clear_bit(__LINK_STATE_XOFF, 1098 if (test_and_clear_bit(__LINK_STATE_XOFF,
1108 &dev->egress_subqueue[queue_index].state)) 1099 &dev->egress_subqueue[queue_index].state))
1109 __netif_schedule(&dev->tx_queue); 1100 __netif_schedule(&dev->tx_queue);
1110#endif
1111} 1101}
1112 1102
1113/** 1103/**
@@ -1119,11 +1109,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1119 */ 1109 */
1120static inline int netif_is_multiqueue(const struct net_device *dev) 1110static inline int netif_is_multiqueue(const struct net_device *dev)
1121{ 1111{
1122#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1123 return (!!(NETIF_F_MULTI_QUEUE & dev->features)); 1112 return (!!(NETIF_F_MULTI_QUEUE & dev->features));
1124#else
1125 return 0;
1126#endif
1127} 1113}
1128 1114
1129/* Use this variant when it is known for sure that it 1115/* Use this variant when it is known for sure that it
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2220b9e2dab0..8f10e3d08fd9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -305,9 +305,7 @@ struct sk_buff {
305#endif 305#endif
306 306
307 int iif; 307 int iif;
308#ifdef CONFIG_NETDEVICES_MULTIQUEUE
309 __u16 queue_mapping; 308 __u16 queue_mapping;
310#endif
311#ifdef CONFIG_NET_SCHED 309#ifdef CONFIG_NET_SCHED
312 __u16 tc_index; /* traffic control index */ 310 __u16 tc_index; /* traffic control index */
313#ifdef CONFIG_NET_CLS_ACT 311#ifdef CONFIG_NET_CLS_ACT
@@ -1671,25 +1669,17 @@ static inline void skb_init_secmark(struct sk_buff *skb)
1671 1669
1672static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 1670static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
1673{ 1671{
1674#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1675 skb->queue_mapping = queue_mapping; 1672 skb->queue_mapping = queue_mapping;
1676#endif
1677} 1673}
1678 1674
1679static inline u16 skb_get_queue_mapping(struct sk_buff *skb) 1675static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
1680{ 1676{
1681#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1682 return skb->queue_mapping; 1677 return skb->queue_mapping;
1683#else
1684 return 0;
1685#endif
1686} 1678}
1687 1679
1688static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 1680static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
1689{ 1681{
1690#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1691 to->queue_mapping = from->queue_mapping; 1682 to->queue_mapping = from->queue_mapping;
1692#endif
1693} 1683}
1694 1684
1695static inline int skb_is_gso(const struct sk_buff *skb) 1685static inline int skb_is_gso(const struct sk_buff *skb)