diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-17 03:34:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-17 22:21:00 -0400 |
commit | e8a0464cc950972824e2e128028ae3db666ec1ed (patch) | |
tree | 5022b95396c0f3b313531bc39b19543c03551b9a /include/linux/netdevice.h | |
parent | 070825b3840a743e21ebcc44f8279708a4fed977 (diff) |
netdev: Allocate multiple queues for TX.
alloc_netdev_mq() now allocates an array of netdev_queue
structures for TX, based upon the queue_count argument.
Furthermore, all accesses to the TX queues are now vectored
through the netdev_get_tx_queue() and netdev_for_each_tx_queue()
interfaces. This makes it easy to grep the tree for all
things that want to get to a TX queue of a net device.
Problem spots which are not really multiqueue aware yet, and
only work with one queue, can easily be spotted by grepping
for all netdev_get_tx_queue() calls that pass in a zero index.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 69 |
1 files changed, 46 insertions, 23 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 570cf7affa72..f25d4f5a31b0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -463,7 +463,7 @@ struct netdev_queue { | |||
463 | struct Qdisc *qdisc_sleeping; | 463 | struct Qdisc *qdisc_sleeping; |
464 | struct list_head qdisc_list; | 464 | struct list_head qdisc_list; |
465 | struct netdev_queue *next_sched; | 465 | struct netdev_queue *next_sched; |
466 | }; | 466 | } ____cacheline_aligned_in_smp; |
467 | 467 | ||
468 | /* | 468 | /* |
469 | * The DEVICE structure. | 469 | * The DEVICE structure. |
@@ -641,7 +641,9 @@ struct net_device | |||
641 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | 641 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ |
642 | 642 | ||
643 | struct netdev_queue rx_queue; | 643 | struct netdev_queue rx_queue; |
644 | struct netdev_queue tx_queue ____cacheline_aligned_in_smp; | 644 | |
645 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; | ||
646 | unsigned int num_tx_queues; | ||
645 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | 647 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
646 | 648 | ||
647 | /* | 649 | /* |
@@ -764,6 +766,25 @@ struct net_device | |||
764 | #define NETDEV_ALIGN 32 | 766 | #define NETDEV_ALIGN 32 |
765 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) | 767 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) |
766 | 768 | ||
769 | static inline | ||
770 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | ||
771 | unsigned int index) | ||
772 | { | ||
773 | return &dev->_tx[index]; | ||
774 | } | ||
775 | |||
776 | static inline void netdev_for_each_tx_queue(struct net_device *dev, | ||
777 | void (*f)(struct net_device *, | ||
778 | struct netdev_queue *, | ||
779 | void *), | ||
780 | void *arg) | ||
781 | { | ||
782 | unsigned int i; | ||
783 | |||
784 | for (i = 0; i < dev->num_tx_queues; i++) | ||
785 | f(dev, &dev->_tx[i], arg); | ||
786 | } | ||
787 | |||
767 | /* | 788 | /* |
768 | * Net namespace inlines | 789 | * Net namespace inlines |
769 | */ | 790 | */ |
@@ -977,7 +998,7 @@ static inline void netif_schedule_queue(struct netdev_queue *txq) | |||
977 | 998 | ||
978 | static inline void netif_schedule(struct net_device *dev) | 999 | static inline void netif_schedule(struct net_device *dev) |
979 | { | 1000 | { |
980 | netif_schedule_queue(&dev->tx_queue); | 1001 | netif_schedule_queue(netdev_get_tx_queue(dev, 0)); |
981 | } | 1002 | } |
982 | 1003 | ||
983 | /** | 1004 | /** |
@@ -993,7 +1014,7 @@ static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) | |||
993 | 1014 | ||
994 | static inline void netif_start_queue(struct net_device *dev) | 1015 | static inline void netif_start_queue(struct net_device *dev) |
995 | { | 1016 | { |
996 | netif_tx_start_queue(&dev->tx_queue); | 1017 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); |
997 | } | 1018 | } |
998 | 1019 | ||
999 | /** | 1020 | /** |
@@ -1017,7 +1038,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) | |||
1017 | 1038 | ||
1018 | static inline void netif_wake_queue(struct net_device *dev) | 1039 | static inline void netif_wake_queue(struct net_device *dev) |
1019 | { | 1040 | { |
1020 | netif_tx_wake_queue(&dev->tx_queue); | 1041 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); |
1021 | } | 1042 | } |
1022 | 1043 | ||
1023 | /** | 1044 | /** |
@@ -1034,7 +1055,7 @@ static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | |||
1034 | 1055 | ||
1035 | static inline void netif_stop_queue(struct net_device *dev) | 1056 | static inline void netif_stop_queue(struct net_device *dev) |
1036 | { | 1057 | { |
1037 | netif_tx_stop_queue(&dev->tx_queue); | 1058 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
1038 | } | 1059 | } |
1039 | 1060 | ||
1040 | /** | 1061 | /** |
@@ -1050,7 +1071,7 @@ static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) | |||
1050 | 1071 | ||
1051 | static inline int netif_queue_stopped(const struct net_device *dev) | 1072 | static inline int netif_queue_stopped(const struct net_device *dev) |
1052 | { | 1073 | { |
1053 | return netif_tx_queue_stopped(&dev->tx_queue); | 1074 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
1054 | } | 1075 | } |
1055 | 1076 | ||
1056 | /** | 1077 | /** |
@@ -1134,7 +1155,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | |||
1134 | #endif | 1155 | #endif |
1135 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, | 1156 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, |
1136 | &dev->egress_subqueue[queue_index].state)) | 1157 | &dev->egress_subqueue[queue_index].state)) |
1137 | __netif_schedule(&dev->tx_queue); | 1158 | __netif_schedule(netdev_get_tx_queue(dev, 0)); |
1138 | } | 1159 | } |
1139 | 1160 | ||
1140 | /** | 1161 | /** |
@@ -1430,18 +1451,19 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) | |||
1430 | 1451 | ||
1431 | static inline void netif_tx_lock(struct net_device *dev) | 1452 | static inline void netif_tx_lock(struct net_device *dev) |
1432 | { | 1453 | { |
1433 | __netif_tx_lock(&dev->tx_queue, smp_processor_id()); | 1454 | int cpu = smp_processor_id(); |
1434 | } | 1455 | unsigned int i; |
1435 | 1456 | ||
1436 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) | 1457 | for (i = 0; i < dev->num_tx_queues; i++) { |
1437 | { | 1458 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
1438 | spin_lock_bh(&txq->_xmit_lock); | 1459 | __netif_tx_lock(txq, cpu); |
1439 | txq->xmit_lock_owner = smp_processor_id(); | 1460 | } |
1440 | } | 1461 | } |
1441 | 1462 | ||
1442 | static inline void netif_tx_lock_bh(struct net_device *dev) | 1463 | static inline void netif_tx_lock_bh(struct net_device *dev) |
1443 | { | 1464 | { |
1444 | __netif_tx_lock_bh(&dev->tx_queue); | 1465 | local_bh_disable(); |
1466 | netif_tx_lock(dev); | ||
1445 | } | 1467 | } |
1446 | 1468 | ||
1447 | static inline int __netif_tx_trylock(struct netdev_queue *txq) | 1469 | static inline int __netif_tx_trylock(struct netdev_queue *txq) |
@@ -1454,7 +1476,7 @@ static inline int __netif_tx_trylock(struct netdev_queue *txq) | |||
1454 | 1476 | ||
1455 | static inline int netif_tx_trylock(struct net_device *dev) | 1477 | static inline int netif_tx_trylock(struct net_device *dev) |
1456 | { | 1478 | { |
1457 | return __netif_tx_trylock(&dev->tx_queue); | 1479 | return __netif_tx_trylock(netdev_get_tx_queue(dev, 0)); |
1458 | } | 1480 | } |
1459 | 1481 | ||
1460 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | 1482 | static inline void __netif_tx_unlock(struct netdev_queue *txq) |
@@ -1465,18 +1487,19 @@ static inline void __netif_tx_unlock(struct netdev_queue *txq) | |||
1465 | 1487 | ||
1466 | static inline void netif_tx_unlock(struct net_device *dev) | 1488 | static inline void netif_tx_unlock(struct net_device *dev) |
1467 | { | 1489 | { |
1468 | __netif_tx_unlock(&dev->tx_queue); | 1490 | unsigned int i; |
1469 | } | 1491 | |
1492 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
1493 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
1494 | __netif_tx_unlock(txq); | ||
1495 | } | ||
1470 | 1496 | ||
1471 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | ||
1472 | { | ||
1473 | txq->xmit_lock_owner = -1; | ||
1474 | spin_unlock_bh(&txq->_xmit_lock); | ||
1475 | } | 1497 | } |
1476 | 1498 | ||
1477 | static inline void netif_tx_unlock_bh(struct net_device *dev) | 1499 | static inline void netif_tx_unlock_bh(struct net_device *dev) |
1478 | { | 1500 | { |
1479 | __netif_tx_unlock_bh(&dev->tx_queue); | 1501 | netif_tx_unlock(dev); |
1502 | local_bh_enable(); | ||
1480 | } | 1503 | } |
1481 | 1504 | ||
1482 | #define HARD_TX_LOCK(dev, txq, cpu) { \ | 1505 | #define HARD_TX_LOCK(dev, txq, cpu) { \ |