diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-17 03:34:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-17 22:21:00 -0400 |
commit | e8a0464cc950972824e2e128028ae3db666ec1ed (patch) | |
tree | 5022b95396c0f3b313531bc39b19543c03551b9a /net/sched/sch_teql.c | |
parent | 070825b3840a743e21ebcc44f8279708a4fed977 (diff) |
netdev: Allocate multiple queues for TX.
alloc_netdev_mq() now allocates an array of netdev_queue
structures for TX, based upon the queue_count argument.
Furthermore, all accesses to the TX queues are now vectored
through the netdev_get_tx_queue() and netdev_for_each_tx_queue()
interfaces. This makes it easy to grep the tree for all
things that want to get to a TX queue of a net device.
Problem spots which are not really multiqueue aware yet, and
only work with one queue, can easily be spotted by grepping
for all netdev_get_tx_queue() calls that pass in a zero index.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_teql.c')
-rw-r--r-- | net/sched/sch_teql.c | 21 |
1 files changed, 14 insertions, 7 deletions
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 8ac05981be2..44a2c3451f4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -111,7 +111,7 @@ teql_dequeue(struct Qdisc* sch) | |||
111 | struct sk_buff *skb; | 111 | struct sk_buff *skb; |
112 | 112 | ||
113 | skb = __skb_dequeue(&dat->q); | 113 | skb = __skb_dequeue(&dat->q); |
114 | dat_queue = &dat->m->dev->tx_queue; | 114 | dat_queue = netdev_get_tx_queue(dat->m->dev, 0); |
115 | if (skb == NULL) { | 115 | if (skb == NULL) { |
116 | struct net_device *m = qdisc_dev(dat_queue->qdisc); | 116 | struct net_device *m = qdisc_dev(dat_queue->qdisc); |
117 | if (m) { | 117 | if (m) { |
@@ -155,10 +155,13 @@ teql_destroy(struct Qdisc* sch) | |||
155 | if (q == master->slaves) { | 155 | if (q == master->slaves) { |
156 | master->slaves = NEXT_SLAVE(q); | 156 | master->slaves = NEXT_SLAVE(q); |
157 | if (q == master->slaves) { | 157 | if (q == master->slaves) { |
158 | struct netdev_queue *txq; | ||
159 | |||
160 | txq = netdev_get_tx_queue(master->dev, 0); | ||
158 | master->slaves = NULL; | 161 | master->slaves = NULL; |
159 | spin_lock_bh(&master->dev->tx_queue.lock); | 162 | spin_lock_bh(&txq->lock); |
160 | qdisc_reset(master->dev->tx_queue.qdisc); | 163 | qdisc_reset(txq->qdisc); |
161 | spin_unlock_bh(&master->dev->tx_queue.lock); | 164 | spin_unlock_bh(&txq->lock); |
162 | } | 165 | } |
163 | } | 166 | } |
164 | skb_queue_purge(&dat->q); | 167 | skb_queue_purge(&dat->q); |
@@ -218,7 +221,8 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) | |||
218 | static int | 221 | static int |
219 | __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) | 222 | __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) |
220 | { | 223 | { |
221 | struct teql_sched_data *q = qdisc_priv(dev->tx_queue.qdisc); | 224 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); |
225 | struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); | ||
222 | struct neighbour *mn = skb->dst->neighbour; | 226 | struct neighbour *mn = skb->dst->neighbour; |
223 | struct neighbour *n = q->ncache; | 227 | struct neighbour *n = q->ncache; |
224 | 228 | ||
@@ -254,7 +258,8 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * | |||
254 | static inline int teql_resolve(struct sk_buff *skb, | 258 | static inline int teql_resolve(struct sk_buff *skb, |
255 | struct sk_buff *skb_res, struct net_device *dev) | 259 | struct sk_buff *skb_res, struct net_device *dev) |
256 | { | 260 | { |
257 | if (dev->tx_queue.qdisc == &noop_qdisc) | 261 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); |
262 | if (txq->qdisc == &noop_qdisc) | ||
258 | return -ENODEV; | 263 | return -ENODEV; |
259 | 264 | ||
260 | if (dev->header_ops == NULL || | 265 | if (dev->header_ops == NULL || |
@@ -285,8 +290,10 @@ restart: | |||
285 | 290 | ||
286 | do { | 291 | do { |
287 | struct net_device *slave = qdisc_dev(q); | 292 | struct net_device *slave = qdisc_dev(q); |
293 | struct netdev_queue *slave_txq; | ||
288 | 294 | ||
289 | if (slave->tx_queue.qdisc_sleeping != q) | 295 | slave_txq = netdev_get_tx_queue(slave, 0); |
296 | if (slave_txq->qdisc_sleeping != q) | ||
290 | continue; | 297 | continue; |
291 | if (netif_queue_stopped(slave) || | 298 | if (netif_queue_stopped(slave) || |
292 | __netif_subqueue_stopped(slave, subq) || | 299 | __netif_subqueue_stopped(slave, subq) || |