diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-31 19:58:50 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-31 19:58:50 -0400 |
commit | c3f26a269c2421f97f10cf8ed05d5099b573af4d (patch) | |
tree | d0602cbb48742b3e39ab6bdcaa08c342d4cd2cae /net | |
parent | 967ab999a090b1a4e7d3c7febfd6d89b42fb4cf4 (diff) |
netdev: Fix lockdep warnings in multiqueue configurations.
When support for multiple TX queues were added, the
netif_tx_lock() routines we converted to iterate over
all TX queues and grab each queue's spinlock.
This causes heartburn for lockdep and it's not a healthy
thing to do with lots of TX queues anyways.
So modify this to use a top-level lock and a "frozen"
state for the individual TX queues.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 1 | ||||
-rw-r--r-- | net/core/netpoll.c | 1 | ||||
-rw-r--r-- | net/core/pktgen.c | 7 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 6 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 9 |
5 files changed, 16 insertions, 8 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 63d6bcddbf46..69320a56a084 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4200,6 +4200,7 @@ static void netdev_init_queues(struct net_device *dev) | |||
4200 | { | 4200 | { |
4201 | netdev_init_one_queue(dev, &dev->rx_queue, NULL); | 4201 | netdev_init_one_queue(dev, &dev->rx_queue, NULL); |
4202 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); | 4202 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); |
4203 | spin_lock_init(&dev->tx_global_lock); | ||
4203 | } | 4204 | } |
4204 | 4205 | ||
4205 | /** | 4206 | /** |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index c12720895ecf..6c7af390be0a 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -70,6 +70,7 @@ static void queue_process(struct work_struct *work) | |||
70 | local_irq_save(flags); | 70 | local_irq_save(flags); |
71 | __netif_tx_lock(txq, smp_processor_id()); | 71 | __netif_tx_lock(txq, smp_processor_id()); |
72 | if (netif_tx_queue_stopped(txq) || | 72 | if (netif_tx_queue_stopped(txq) || |
73 | netif_tx_queue_frozen(txq) || | ||
73 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { | 74 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { |
74 | skb_queue_head(&npinfo->txq, skb); | 75 | skb_queue_head(&npinfo->txq, skb); |
75 | __netif_tx_unlock(txq); | 76 | __netif_tx_unlock(txq); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index c7d484f7e1c4..3284605f2ec7 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -3305,6 +3305,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3305 | 3305 | ||
3306 | txq = netdev_get_tx_queue(odev, queue_map); | 3306 | txq = netdev_get_tx_queue(odev, queue_map); |
3307 | if (netif_tx_queue_stopped(txq) || | 3307 | if (netif_tx_queue_stopped(txq) || |
3308 | netif_tx_queue_frozen(txq) || | ||
3308 | need_resched()) { | 3309 | need_resched()) { |
3309 | idle_start = getCurUs(); | 3310 | idle_start = getCurUs(); |
3310 | 3311 | ||
@@ -3320,7 +3321,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3320 | 3321 | ||
3321 | pkt_dev->idle_acc += getCurUs() - idle_start; | 3322 | pkt_dev->idle_acc += getCurUs() - idle_start; |
3322 | 3323 | ||
3323 | if (netif_tx_queue_stopped(txq)) { | 3324 | if (netif_tx_queue_stopped(txq) || |
3325 | netif_tx_queue_frozen(txq)) { | ||
3324 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ | 3326 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ |
3325 | pkt_dev->next_tx_ns = 0; | 3327 | pkt_dev->next_tx_ns = 0; |
3326 | goto out; /* Try the next interface */ | 3328 | goto out; /* Try the next interface */ |
@@ -3352,7 +3354,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3352 | txq = netdev_get_tx_queue(odev, queue_map); | 3354 | txq = netdev_get_tx_queue(odev, queue_map); |
3353 | 3355 | ||
3354 | __netif_tx_lock_bh(txq); | 3356 | __netif_tx_lock_bh(txq); |
3355 | if (!netif_tx_queue_stopped(txq)) { | 3357 | if (!netif_tx_queue_stopped(txq) && |
3358 | !netif_tx_queue_frozen(txq)) { | ||
3356 | 3359 | ||
3357 | atomic_inc(&(pkt_dev->skb->users)); | 3360 | atomic_inc(&(pkt_dev->skb->users)); |
3358 | retry_now: | 3361 | retry_now: |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 345838a2e369..9c9cd4d94890 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -135,7 +135,8 @@ static inline int qdisc_restart(struct Qdisc *q) | |||
135 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 135 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
136 | 136 | ||
137 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 137 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
138 | if (!netif_subqueue_stopped(dev, skb)) | 138 | if (!netif_tx_queue_stopped(txq) && |
139 | !netif_tx_queue_frozen(txq)) | ||
139 | ret = dev_hard_start_xmit(skb, dev, txq); | 140 | ret = dev_hard_start_xmit(skb, dev, txq); |
140 | HARD_TX_UNLOCK(dev, txq); | 141 | HARD_TX_UNLOCK(dev, txq); |
141 | 142 | ||
@@ -162,7 +163,8 @@ static inline int qdisc_restart(struct Qdisc *q) | |||
162 | break; | 163 | break; |
163 | } | 164 | } |
164 | 165 | ||
165 | if (ret && netif_tx_queue_stopped(txq)) | 166 | if (ret && (netif_tx_queue_stopped(txq) || |
167 | netif_tx_queue_frozen(txq))) | ||
166 | ret = 0; | 168 | ret = 0; |
167 | 169 | ||
168 | return ret; | 170 | return ret; |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 537223642b6e..2c35c678563b 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -305,10 +305,11 @@ restart: | |||
305 | 305 | ||
306 | switch (teql_resolve(skb, skb_res, slave)) { | 306 | switch (teql_resolve(skb, skb_res, slave)) { |
307 | case 0: | 307 | case 0: |
308 | if (netif_tx_trylock(slave)) { | 308 | if (__netif_tx_trylock(slave_txq)) { |
309 | if (!__netif_subqueue_stopped(slave, subq) && | 309 | if (!netif_tx_queue_stopped(slave_txq) && |
310 | !netif_tx_queue_frozen(slave_txq) && | ||
310 | slave->hard_start_xmit(skb, slave) == 0) { | 311 | slave->hard_start_xmit(skb, slave) == 0) { |
311 | netif_tx_unlock(slave); | 312 | __netif_tx_unlock(slave_txq); |
312 | master->slaves = NEXT_SLAVE(q); | 313 | master->slaves = NEXT_SLAVE(q); |
313 | netif_wake_queue(dev); | 314 | netif_wake_queue(dev); |
314 | master->stats.tx_packets++; | 315 | master->stats.tx_packets++; |
@@ -316,7 +317,7 @@ restart: | |||
316 | qdisc_pkt_len(skb); | 317 | qdisc_pkt_len(skb); |
317 | return 0; | 318 | return 0; |
318 | } | 319 | } |
319 | netif_tx_unlock(slave); | 320 | __netif_tx_unlock(slave_txq); |
320 | } | 321 | } |
321 | if (netif_queue_stopped(dev)) | 322 | if (netif_queue_stopped(dev)) |
322 | busy = 1; | 323 | busy = 1; |