aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-31 19:58:50 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-31 19:58:50 -0400
commitc3f26a269c2421f97f10cf8ed05d5099b573af4d (patch)
treed0602cbb48742b3e39ab6bdcaa08c342d4cd2cae /net/core
parent967ab999a090b1a4e7d3c7febfd6d89b42fb4cf4 (diff)
netdev: Fix lockdep warnings in multiqueue configurations.
When support for multiple TX queues were added, the netif_tx_lock() routines we converted to iterate over all TX queues and grab each queue's spinlock. This causes heartburn for lockdep and it's not a healthy thing to do with lots of TX queues anyways. So modify this to use a top-level lock and a "frozen" state for the individual TX queues. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/netpoll.c1
-rw-r--r--net/core/pktgen.c7
3 files changed, 7 insertions, 2 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 63d6bcddbf46..69320a56a084 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4200,6 +4200,7 @@ static void netdev_init_queues(struct net_device *dev)
4200{ 4200{
4201 netdev_init_one_queue(dev, &dev->rx_queue, NULL); 4201 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4202 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 4202 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4203 spin_lock_init(&dev->tx_global_lock);
4203} 4204}
4204 4205
4205/** 4206/**
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c12720895ecf..6c7af390be0a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -70,6 +70,7 @@ static void queue_process(struct work_struct *work)
70 local_irq_save(flags); 70 local_irq_save(flags);
71 __netif_tx_lock(txq, smp_processor_id()); 71 __netif_tx_lock(txq, smp_processor_id());
72 if (netif_tx_queue_stopped(txq) || 72 if (netif_tx_queue_stopped(txq) ||
73 netif_tx_queue_frozen(txq) ||
73 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 74 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
74 skb_queue_head(&npinfo->txq, skb); 75 skb_queue_head(&npinfo->txq, skb);
75 __netif_tx_unlock(txq); 76 __netif_tx_unlock(txq);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c7d484f7e1c4..3284605f2ec7 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3305,6 +3305,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3305 3305
3306 txq = netdev_get_tx_queue(odev, queue_map); 3306 txq = netdev_get_tx_queue(odev, queue_map);
3307 if (netif_tx_queue_stopped(txq) || 3307 if (netif_tx_queue_stopped(txq) ||
3308 netif_tx_queue_frozen(txq) ||
3308 need_resched()) { 3309 need_resched()) {
3309 idle_start = getCurUs(); 3310 idle_start = getCurUs();
3310 3311
@@ -3320,7 +3321,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3320 3321
3321 pkt_dev->idle_acc += getCurUs() - idle_start; 3322 pkt_dev->idle_acc += getCurUs() - idle_start;
3322 3323
3323 if (netif_tx_queue_stopped(txq)) { 3324 if (netif_tx_queue_stopped(txq) ||
3325 netif_tx_queue_frozen(txq)) {
3324 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3326 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3325 pkt_dev->next_tx_ns = 0; 3327 pkt_dev->next_tx_ns = 0;
3326 goto out; /* Try the next interface */ 3328 goto out; /* Try the next interface */
@@ -3352,7 +3354,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3352 txq = netdev_get_tx_queue(odev, queue_map); 3354 txq = netdev_get_tx_queue(odev, queue_map);
3353 3355
3354 __netif_tx_lock_bh(txq); 3356 __netif_tx_lock_bh(txq);
3355 if (!netif_tx_queue_stopped(txq)) { 3357 if (!netif_tx_queue_stopped(txq) &&
3358 !netif_tx_queue_frozen(txq)) {
3356 3359
3357 atomic_inc(&(pkt_dev->skb->users)); 3360 atomic_inc(&(pkt_dev->skb->users));
3358 retry_now: 3361 retry_now: