diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-17 04:56:23 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-17 22:21:07 -0400 |
commit | fd2ea0a79faad824258af5dcec1927aa24d81c16 (patch) | |
tree | 644fd4ce92227cc319c7a54c63ea07a96b8c6b8d /include/linux/netdevice.h | |
parent | 24344d2600108b9b79a60c0e4c43b3c499856d14 (diff) |
net: Use queue aware tests throughout.
This effectively "flips the switch" by making the core networking
and multiqueue-aware drivers use the new TX multiqueue structures.
Non-multiqueue drivers need no changes. The interfaces they use such
as netif_stop_queue() degenerate into an operation on TX queue zero.
So everything "just works" for them.
Code that really wants to do "X" to all TX queues now invokes a
routine that does so, such as netif_tx_wake_all_queues(),
netif_tx_stop_all_queues(), etc.
pktgen and netpoll required a little bit more surgery than the others.
In particular the pktgen changes, whilst functional, could be largely
improved. The initial check in pktgen_xmit() will sometimes check the
wrong queue, which is mostly harmless. The thing to do is probably to
invoke fill_packet() earlier.
The bulk of the netpoll changes is to make the code operate solely on
the TX queue indicated by by the SKB queue mapping.
Setting of the SKB queue mapping is entirely confined inside of
net/core/dev.c:dev_pick_tx(). If we end up needing any kind of
special semantics (drops, for example) it will be implemented here.
Finally, we now have a "real_num_tx_queues" which is where the driver
indicates how many TX queues are actually active.
With IGB changes from Jeff Kirsher.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 82 |
1 files changed, 73 insertions, 9 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c02227b9dd7b..b5c1e7df64fc 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -642,7 +642,13 @@ struct net_device | |||
642 | struct netdev_queue rx_queue; | 642 | struct netdev_queue rx_queue; |
643 | 643 | ||
644 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; | 644 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
645 | |||
646 | /* Number of TX queues allocated at alloc_netdev_mq() time */ | ||
645 | unsigned int num_tx_queues; | 647 | unsigned int num_tx_queues; |
648 | |||
649 | /* Number of TX queues currently active in device */ | ||
650 | unsigned int real_num_tx_queues; | ||
651 | |||
646 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | 652 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
647 | 653 | ||
648 | /* | 654 | /* |
@@ -1000,6 +1006,14 @@ static inline void netif_schedule(struct net_device *dev) | |||
1000 | netif_schedule_queue(netdev_get_tx_queue(dev, 0)); | 1006 | netif_schedule_queue(netdev_get_tx_queue(dev, 0)); |
1001 | } | 1007 | } |
1002 | 1008 | ||
1009 | static inline void netif_tx_schedule_all(struct net_device *dev) | ||
1010 | { | ||
1011 | unsigned int i; | ||
1012 | |||
1013 | for (i = 0; i < dev->num_tx_queues; i++) | ||
1014 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); | ||
1015 | } | ||
1016 | |||
1003 | /** | 1017 | /** |
1004 | * netif_start_queue - allow transmit | 1018 | * netif_start_queue - allow transmit |
1005 | * @dev: network device | 1019 | * @dev: network device |
@@ -1016,6 +1030,16 @@ static inline void netif_start_queue(struct net_device *dev) | |||
1016 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); | 1030 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); |
1017 | } | 1031 | } |
1018 | 1032 | ||
1033 | static inline void netif_tx_start_all_queues(struct net_device *dev) | ||
1034 | { | ||
1035 | unsigned int i; | ||
1036 | |||
1037 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
1038 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
1039 | netif_tx_start_queue(txq); | ||
1040 | } | ||
1041 | } | ||
1042 | |||
1019 | /** | 1043 | /** |
1020 | * netif_wake_queue - restart transmit | 1044 | * netif_wake_queue - restart transmit |
1021 | * @dev: network device | 1045 | * @dev: network device |
@@ -1040,6 +1064,16 @@ static inline void netif_wake_queue(struct net_device *dev) | |||
1040 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); | 1064 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); |
1041 | } | 1065 | } |
1042 | 1066 | ||
1067 | static inline void netif_tx_wake_all_queues(struct net_device *dev) | ||
1068 | { | ||
1069 | unsigned int i; | ||
1070 | |||
1071 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
1072 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
1073 | netif_tx_wake_queue(txq); | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1043 | /** | 1077 | /** |
1044 | * netif_stop_queue - stop transmitted packets | 1078 | * netif_stop_queue - stop transmitted packets |
1045 | * @dev: network device | 1079 | * @dev: network device |
@@ -1057,6 +1091,16 @@ static inline void netif_stop_queue(struct net_device *dev) | |||
1057 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); | 1091 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
1058 | } | 1092 | } |
1059 | 1093 | ||
1094 | static inline void netif_tx_stop_all_queues(struct net_device *dev) | ||
1095 | { | ||
1096 | unsigned int i; | ||
1097 | |||
1098 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
1099 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
1100 | netif_tx_stop_queue(txq); | ||
1101 | } | ||
1102 | } | ||
1103 | |||
1060 | /** | 1104 | /** |
1061 | * netif_queue_stopped - test if transmit queue is flowblocked | 1105 | * netif_queue_stopped - test if transmit queue is flowblocked |
1062 | * @dev: network device | 1106 | * @dev: network device |
@@ -1100,7 +1144,8 @@ static inline int netif_running(const struct net_device *dev) | |||
1100 | */ | 1144 | */ |
1101 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | 1145 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
1102 | { | 1146 | { |
1103 | clear_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | 1147 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
1148 | clear_bit(__QUEUE_STATE_XOFF, &txq->state); | ||
1104 | } | 1149 | } |
1105 | 1150 | ||
1106 | /** | 1151 | /** |
@@ -1112,11 +1157,12 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | |||
1112 | */ | 1157 | */ |
1113 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | 1158 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
1114 | { | 1159 | { |
1160 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | ||
1115 | #ifdef CONFIG_NETPOLL_TRAP | 1161 | #ifdef CONFIG_NETPOLL_TRAP |
1116 | if (netpoll_trap()) | 1162 | if (netpoll_trap()) |
1117 | return; | 1163 | return; |
1118 | #endif | 1164 | #endif |
1119 | set_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | 1165 | set_bit(__QUEUE_STATE_XOFF, &txq->state); |
1120 | } | 1166 | } |
1121 | 1167 | ||
1122 | /** | 1168 | /** |
@@ -1129,8 +1175,8 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | |||
1129 | static inline int __netif_subqueue_stopped(const struct net_device *dev, | 1175 | static inline int __netif_subqueue_stopped(const struct net_device *dev, |
1130 | u16 queue_index) | 1176 | u16 queue_index) |
1131 | { | 1177 | { |
1132 | return test_bit(__QUEUE_STATE_XOFF, | 1178 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
1133 | &dev->egress_subqueue[queue_index].state); | 1179 | return test_bit(__QUEUE_STATE_XOFF, &txq->state); |
1134 | } | 1180 | } |
1135 | 1181 | ||
1136 | static inline int netif_subqueue_stopped(const struct net_device *dev, | 1182 | static inline int netif_subqueue_stopped(const struct net_device *dev, |
@@ -1148,13 +1194,13 @@ static inline int netif_subqueue_stopped(const struct net_device *dev, | |||
1148 | */ | 1194 | */ |
1149 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | 1195 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) |
1150 | { | 1196 | { |
1197 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | ||
1151 | #ifdef CONFIG_NETPOLL_TRAP | 1198 | #ifdef CONFIG_NETPOLL_TRAP |
1152 | if (netpoll_trap()) | 1199 | if (netpoll_trap()) |
1153 | return; | 1200 | return; |
1154 | #endif | 1201 | #endif |
1155 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, | 1202 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) |
1156 | &dev->egress_subqueue[queue_index].state)) | 1203 | __netif_schedule(txq); |
1157 | __netif_schedule(netdev_get_tx_queue(dev, 0)); | ||
1158 | } | 1204 | } |
1159 | 1205 | ||
1160 | /** | 1206 | /** |
@@ -1198,7 +1244,8 @@ extern int dev_set_mtu(struct net_device *, int); | |||
1198 | extern int dev_set_mac_address(struct net_device *, | 1244 | extern int dev_set_mac_address(struct net_device *, |
1199 | struct sockaddr *); | 1245 | struct sockaddr *); |
1200 | extern int dev_hard_start_xmit(struct sk_buff *skb, | 1246 | extern int dev_hard_start_xmit(struct sk_buff *skb, |
1201 | struct net_device *dev); | 1247 | struct net_device *dev, |
1248 | struct netdev_queue *txq); | ||
1202 | 1249 | ||
1203 | extern int netdev_budget; | 1250 | extern int netdev_budget; |
1204 | 1251 | ||
@@ -1447,6 +1494,12 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) | |||
1447 | txq->xmit_lock_owner = cpu; | 1494 | txq->xmit_lock_owner = cpu; |
1448 | } | 1495 | } |
1449 | 1496 | ||
1497 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) | ||
1498 | { | ||
1499 | spin_lock_bh(&txq->_xmit_lock); | ||
1500 | txq->xmit_lock_owner = smp_processor_id(); | ||
1501 | } | ||
1502 | |||
1450 | static inline void netif_tx_lock(struct net_device *dev) | 1503 | static inline void netif_tx_lock(struct net_device *dev) |
1451 | { | 1504 | { |
1452 | int cpu = smp_processor_id(); | 1505 | int cpu = smp_processor_id(); |
@@ -1483,6 +1536,12 @@ static inline void __netif_tx_unlock(struct netdev_queue *txq) | |||
1483 | spin_unlock(&txq->_xmit_lock); | 1536 | spin_unlock(&txq->_xmit_lock); |
1484 | } | 1537 | } |
1485 | 1538 | ||
1539 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | ||
1540 | { | ||
1541 | txq->xmit_lock_owner = -1; | ||
1542 | spin_unlock_bh(&txq->_xmit_lock); | ||
1543 | } | ||
1544 | |||
1486 | static inline void netif_tx_unlock(struct net_device *dev) | 1545 | static inline void netif_tx_unlock(struct net_device *dev) |
1487 | { | 1546 | { |
1488 | unsigned int i; | 1547 | unsigned int i; |
@@ -1514,8 +1573,13 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) | |||
1514 | 1573 | ||
1515 | static inline void netif_tx_disable(struct net_device *dev) | 1574 | static inline void netif_tx_disable(struct net_device *dev) |
1516 | { | 1575 | { |
1576 | unsigned int i; | ||
1577 | |||
1517 | netif_tx_lock_bh(dev); | 1578 | netif_tx_lock_bh(dev); |
1518 | netif_stop_queue(dev); | 1579 | for (i = 0; i < dev->num_tx_queues; i++) { |
1580 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
1581 | netif_tx_stop_queue(txq); | ||
1582 | } | ||
1519 | netif_tx_unlock_bh(dev); | 1583 | netif_tx_unlock_bh(dev); |
1520 | } | 1584 | } |
1521 | 1585 | ||