diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-16 05:15:04 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-17 22:21:20 -0400 |
commit | 37437bb2e1ae8af470dfcd5b4ff454110894ccaf (patch) | |
tree | 1795e78a7648252b0c92c972df12b776a28437d7 /include/linux/netdevice.h | |
parent | 7698b4fcabcd790efc4f226bada1e7b5870653af (diff) |
pkt_sched: Schedule qdiscs instead of netdev_queue.
When we have shared qdiscs, packets come out of the qdiscs
for multiple transmit queues.
Therefore it doesn't make any sense to schedule the transmit
queue when logically we cannot know ahead of time the TX
queue of the SKB that the qdisc->dequeue() will give us.
Just for sanity I added a BUG check to make sure we never
get into a state where the noop_qdisc is scheduled.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 12 |
1 files changed, 5 insertions, 7 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9240a95793be..1e839fa01434 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -275,7 +275,6 @@ enum netdev_state_t | |||
275 | { | 275 | { |
276 | __LINK_STATE_START, | 276 | __LINK_STATE_START, |
277 | __LINK_STATE_PRESENT, | 277 | __LINK_STATE_PRESENT, |
278 | __LINK_STATE_SCHED, | ||
279 | __LINK_STATE_NOCARRIER, | 278 | __LINK_STATE_NOCARRIER, |
280 | __LINK_STATE_LINKWATCH_PENDING, | 279 | __LINK_STATE_LINKWATCH_PENDING, |
281 | __LINK_STATE_DORMANT, | 280 | __LINK_STATE_DORMANT, |
@@ -452,7 +451,6 @@ struct netdev_queue { | |||
452 | int xmit_lock_owner; | 451 | int xmit_lock_owner; |
453 | struct Qdisc *qdisc_sleeping; | 452 | struct Qdisc *qdisc_sleeping; |
454 | struct list_head qdisc_list; | 453 | struct list_head qdisc_list; |
455 | struct netdev_queue *next_sched; | ||
456 | } ____cacheline_aligned_in_smp; | 454 | } ____cacheline_aligned_in_smp; |
457 | 455 | ||
458 | /* | 456 | /* |
@@ -969,7 +967,7 @@ static inline int unregister_gifconf(unsigned int family) | |||
969 | */ | 967 | */ |
970 | struct softnet_data | 968 | struct softnet_data |
971 | { | 969 | { |
972 | struct netdev_queue *output_queue; | 970 | struct Qdisc *output_queue; |
973 | struct sk_buff_head input_pkt_queue; | 971 | struct sk_buff_head input_pkt_queue; |
974 | struct list_head poll_list; | 972 | struct list_head poll_list; |
975 | struct sk_buff *completion_queue; | 973 | struct sk_buff *completion_queue; |
@@ -984,12 +982,12 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data); | |||
984 | 982 | ||
985 | #define HAVE_NETIF_QUEUE | 983 | #define HAVE_NETIF_QUEUE |
986 | 984 | ||
987 | extern void __netif_schedule(struct netdev_queue *txq); | 985 | extern void __netif_schedule(struct Qdisc *q); |
988 | 986 | ||
989 | static inline void netif_schedule_queue(struct netdev_queue *txq) | 987 | static inline void netif_schedule_queue(struct netdev_queue *txq) |
990 | { | 988 | { |
991 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) | 989 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) |
992 | __netif_schedule(txq); | 990 | __netif_schedule(txq->qdisc); |
993 | } | 991 | } |
994 | 992 | ||
995 | static inline void netif_tx_schedule_all(struct net_device *dev) | 993 | static inline void netif_tx_schedule_all(struct net_device *dev) |
@@ -1042,7 +1040,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) | |||
1042 | } | 1040 | } |
1043 | #endif | 1041 | #endif |
1044 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) | 1042 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) |
1045 | __netif_schedule(dev_queue); | 1043 | __netif_schedule(dev_queue->qdisc); |
1046 | } | 1044 | } |
1047 | 1045 | ||
1048 | static inline void netif_wake_queue(struct net_device *dev) | 1046 | static inline void netif_wake_queue(struct net_device *dev) |
@@ -1186,7 +1184,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | |||
1186 | return; | 1184 | return; |
1187 | #endif | 1185 | #endif |
1188 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) | 1186 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) |
1189 | __netif_schedule(txq); | 1187 | __netif_schedule(txq->qdisc); |
1190 | } | 1188 | } |
1191 | 1189 | ||
1192 | /** | 1190 | /** |