aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-31 19:58:50 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-31 19:58:50 -0400
commitc3f26a269c2421f97f10cf8ed05d5099b573af4d (patch)
treed0602cbb48742b3e39ab6bdcaa08c342d4cd2cae
parent967ab999a090b1a4e7d3c7febfd6d89b42fb4cf4 (diff)
netdev: Fix lockdep warnings in multiqueue configurations.
When support for multiple TX queues were added, the netif_tx_lock() routines we converted to iterate over all TX queues and grab each queue's spinlock. This causes heartburn for lockdep and it's not a healthy thing to do with lots of TX queues anyways. So modify this to use a top-level lock and a "frozen" state for the individual TX queues. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ifb.c12
-rw-r--r--include/linux/netdevice.h86
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/netpoll.c1
-rw-r--r--net/core/pktgen.c7
-rw-r--r--net/sched/sch_generic.c6
-rw-r--r--net/sched/sch_teql.c9
7 files changed, 78 insertions, 44 deletions
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 0960e69b2da4..e4fbefc8c82f 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -69,18 +69,20 @@ static void ri_tasklet(unsigned long dev)
69 struct net_device *_dev = (struct net_device *)dev; 69 struct net_device *_dev = (struct net_device *)dev;
70 struct ifb_private *dp = netdev_priv(_dev); 70 struct ifb_private *dp = netdev_priv(_dev);
71 struct net_device_stats *stats = &_dev->stats; 71 struct net_device_stats *stats = &_dev->stats;
72 struct netdev_queue *txq;
72 struct sk_buff *skb; 73 struct sk_buff *skb;
73 74
75 txq = netdev_get_tx_queue(_dev, 0);
74 dp->st_task_enter++; 76 dp->st_task_enter++;
75 if ((skb = skb_peek(&dp->tq)) == NULL) { 77 if ((skb = skb_peek(&dp->tq)) == NULL) {
76 dp->st_txq_refl_try++; 78 dp->st_txq_refl_try++;
77 if (netif_tx_trylock(_dev)) { 79 if (__netif_tx_trylock(txq)) {
78 dp->st_rxq_enter++; 80 dp->st_rxq_enter++;
79 while ((skb = skb_dequeue(&dp->rq)) != NULL) { 81 while ((skb = skb_dequeue(&dp->rq)) != NULL) {
80 skb_queue_tail(&dp->tq, skb); 82 skb_queue_tail(&dp->tq, skb);
81 dp->st_rx2tx_tran++; 83 dp->st_rx2tx_tran++;
82 } 84 }
83 netif_tx_unlock(_dev); 85 __netif_tx_unlock(txq);
84 } else { 86 } else {
85 /* reschedule */ 87 /* reschedule */
86 dp->st_rxq_notenter++; 88 dp->st_rxq_notenter++;
@@ -115,7 +117,7 @@ static void ri_tasklet(unsigned long dev)
115 BUG(); 117 BUG();
116 } 118 }
117 119
118 if (netif_tx_trylock(_dev)) { 120 if (__netif_tx_trylock(txq)) {
119 dp->st_rxq_check++; 121 dp->st_rxq_check++;
120 if ((skb = skb_peek(&dp->rq)) == NULL) { 122 if ((skb = skb_peek(&dp->rq)) == NULL) {
121 dp->tasklet_pending = 0; 123 dp->tasklet_pending = 0;
@@ -123,10 +125,10 @@ static void ri_tasklet(unsigned long dev)
123 netif_wake_queue(_dev); 125 netif_wake_queue(_dev);
124 } else { 126 } else {
125 dp->st_rxq_rsch++; 127 dp->st_rxq_rsch++;
126 netif_tx_unlock(_dev); 128 __netif_tx_unlock(txq);
127 goto resched; 129 goto resched;
128 } 130 }
129 netif_tx_unlock(_dev); 131 __netif_tx_unlock(txq);
130 } else { 132 } else {
131resched: 133resched:
132 dp->tasklet_pending = 1; 134 dp->tasklet_pending = 1;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b4d056ceab96..ee583f642a9f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -440,6 +440,7 @@ static inline void napi_synchronize(const struct napi_struct *n)
440enum netdev_queue_state_t 440enum netdev_queue_state_t
441{ 441{
442 __QUEUE_STATE_XOFF, 442 __QUEUE_STATE_XOFF,
443 __QUEUE_STATE_FROZEN,
443}; 444};
444 445
445struct netdev_queue { 446struct netdev_queue {
@@ -636,7 +637,7 @@ struct net_device
636 unsigned int real_num_tx_queues; 637 unsigned int real_num_tx_queues;
637 638
638 unsigned long tx_queue_len; /* Max frames per queue allowed */ 639 unsigned long tx_queue_len; /* Max frames per queue allowed */
639 640 spinlock_t tx_global_lock;
640/* 641/*
641 * One part is mostly used on xmit path (device) 642 * One part is mostly used on xmit path (device)
642 */ 643 */
@@ -1099,6 +1100,11 @@ static inline int netif_queue_stopped(const struct net_device *dev)
1099 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 1100 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1100} 1101}
1101 1102
1103static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
1104{
1105 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
1106}
1107
1102/** 1108/**
1103 * netif_running - test if up 1109 * netif_running - test if up
1104 * @dev: network device 1110 * @dev: network device
@@ -1475,6 +1481,26 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1475 txq->xmit_lock_owner = smp_processor_id(); 1481 txq->xmit_lock_owner = smp_processor_id();
1476} 1482}
1477 1483
1484static inline int __netif_tx_trylock(struct netdev_queue *txq)
1485{
1486 int ok = spin_trylock(&txq->_xmit_lock);
1487 if (likely(ok))
1488 txq->xmit_lock_owner = smp_processor_id();
1489 return ok;
1490}
1491
1492static inline void __netif_tx_unlock(struct netdev_queue *txq)
1493{
1494 txq->xmit_lock_owner = -1;
1495 spin_unlock(&txq->_xmit_lock);
1496}
1497
1498static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1499{
1500 txq->xmit_lock_owner = -1;
1501 spin_unlock_bh(&txq->_xmit_lock);
1502}
1503
1478/** 1504/**
1479 * netif_tx_lock - grab network device transmit lock 1505 * netif_tx_lock - grab network device transmit lock
1480 * @dev: network device 1506 * @dev: network device
@@ -1484,12 +1510,23 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1484 */ 1510 */
1485static inline void netif_tx_lock(struct net_device *dev) 1511static inline void netif_tx_lock(struct net_device *dev)
1486{ 1512{
1487 int cpu = smp_processor_id();
1488 unsigned int i; 1513 unsigned int i;
1514 int cpu;
1489 1515
1516 spin_lock(&dev->tx_global_lock);
1517 cpu = smp_processor_id();
1490 for (i = 0; i < dev->num_tx_queues; i++) { 1518 for (i = 0; i < dev->num_tx_queues; i++) {
1491 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1519 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1520
1521 /* We are the only thread of execution doing a
1522 * freeze, but we have to grab the _xmit_lock in
1523 * order to synchronize with threads which are in
1524 * the ->hard_start_xmit() handler and already
1525 * checked the frozen bit.
1526 */
1492 __netif_tx_lock(txq, cpu); 1527 __netif_tx_lock(txq, cpu);
1528 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
1529 __netif_tx_unlock(txq);
1493 } 1530 }
1494} 1531}
1495 1532
@@ -1499,40 +1536,22 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
1499 netif_tx_lock(dev); 1536 netif_tx_lock(dev);
1500} 1537}
1501 1538
1502static inline int __netif_tx_trylock(struct netdev_queue *txq)
1503{
1504 int ok = spin_trylock(&txq->_xmit_lock);
1505 if (likely(ok))
1506 txq->xmit_lock_owner = smp_processor_id();
1507 return ok;
1508}
1509
1510static inline int netif_tx_trylock(struct net_device *dev)
1511{
1512 return __netif_tx_trylock(netdev_get_tx_queue(dev, 0));
1513}
1514
1515static inline void __netif_tx_unlock(struct netdev_queue *txq)
1516{
1517 txq->xmit_lock_owner = -1;
1518 spin_unlock(&txq->_xmit_lock);
1519}
1520
1521static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1522{
1523 txq->xmit_lock_owner = -1;
1524 spin_unlock_bh(&txq->_xmit_lock);
1525}
1526
1527static inline void netif_tx_unlock(struct net_device *dev) 1539static inline void netif_tx_unlock(struct net_device *dev)
1528{ 1540{
1529 unsigned int i; 1541 unsigned int i;
1530 1542
1531 for (i = 0; i < dev->num_tx_queues; i++) { 1543 for (i = 0; i < dev->num_tx_queues; i++) {
1532 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1544 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1533 __netif_tx_unlock(txq);
1534 }
1535 1545
1546 /* No need to grab the _xmit_lock here. If the
1547 * queue is not stopped for another reason, we
1548 * force a schedule.
1549 */
1550 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
1551 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1552 __netif_schedule(txq->qdisc);
1553 }
1554 spin_unlock(&dev->tx_global_lock);
1536} 1555}
1537 1556
1538static inline void netif_tx_unlock_bh(struct net_device *dev) 1557static inline void netif_tx_unlock_bh(struct net_device *dev)
@@ -1556,13 +1575,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
1556static inline void netif_tx_disable(struct net_device *dev) 1575static inline void netif_tx_disable(struct net_device *dev)
1557{ 1576{
1558 unsigned int i; 1577 unsigned int i;
1578 int cpu;
1559 1579
1560 netif_tx_lock_bh(dev); 1580 local_bh_disable();
1581 cpu = smp_processor_id();
1561 for (i = 0; i < dev->num_tx_queues; i++) { 1582 for (i = 0; i < dev->num_tx_queues; i++) {
1562 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1583 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1584
1585 __netif_tx_lock(txq, cpu);
1563 netif_tx_stop_queue(txq); 1586 netif_tx_stop_queue(txq);
1587 __netif_tx_unlock(txq);
1564 } 1588 }
1565 netif_tx_unlock_bh(dev); 1589 local_bh_enable();
1566} 1590}
1567 1591
1568static inline void netif_addr_lock(struct net_device *dev) 1592static inline void netif_addr_lock(struct net_device *dev)
diff --git a/net/core/dev.c b/net/core/dev.c
index 63d6bcddbf46..69320a56a084 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4200,6 +4200,7 @@ static void netdev_init_queues(struct net_device *dev)
4200{ 4200{
4201 netdev_init_one_queue(dev, &dev->rx_queue, NULL); 4201 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4202 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 4202 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4203 spin_lock_init(&dev->tx_global_lock);
4203} 4204}
4204 4205
4205/** 4206/**
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c12720895ecf..6c7af390be0a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -70,6 +70,7 @@ static void queue_process(struct work_struct *work)
70 local_irq_save(flags); 70 local_irq_save(flags);
71 __netif_tx_lock(txq, smp_processor_id()); 71 __netif_tx_lock(txq, smp_processor_id());
72 if (netif_tx_queue_stopped(txq) || 72 if (netif_tx_queue_stopped(txq) ||
73 netif_tx_queue_frozen(txq) ||
73 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 74 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
74 skb_queue_head(&npinfo->txq, skb); 75 skb_queue_head(&npinfo->txq, skb);
75 __netif_tx_unlock(txq); 76 __netif_tx_unlock(txq);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c7d484f7e1c4..3284605f2ec7 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3305,6 +3305,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3305 3305
3306 txq = netdev_get_tx_queue(odev, queue_map); 3306 txq = netdev_get_tx_queue(odev, queue_map);
3307 if (netif_tx_queue_stopped(txq) || 3307 if (netif_tx_queue_stopped(txq) ||
3308 netif_tx_queue_frozen(txq) ||
3308 need_resched()) { 3309 need_resched()) {
3309 idle_start = getCurUs(); 3310 idle_start = getCurUs();
3310 3311
@@ -3320,7 +3321,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3320 3321
3321 pkt_dev->idle_acc += getCurUs() - idle_start; 3322 pkt_dev->idle_acc += getCurUs() - idle_start;
3322 3323
3323 if (netif_tx_queue_stopped(txq)) { 3324 if (netif_tx_queue_stopped(txq) ||
3325 netif_tx_queue_frozen(txq)) {
3324 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3326 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3325 pkt_dev->next_tx_ns = 0; 3327 pkt_dev->next_tx_ns = 0;
3326 goto out; /* Try the next interface */ 3328 goto out; /* Try the next interface */
@@ -3352,7 +3354,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3352 txq = netdev_get_tx_queue(odev, queue_map); 3354 txq = netdev_get_tx_queue(odev, queue_map);
3353 3355
3354 __netif_tx_lock_bh(txq); 3356 __netif_tx_lock_bh(txq);
3355 if (!netif_tx_queue_stopped(txq)) { 3357 if (!netif_tx_queue_stopped(txq) &&
3358 !netif_tx_queue_frozen(txq)) {
3356 3359
3357 atomic_inc(&(pkt_dev->skb->users)); 3360 atomic_inc(&(pkt_dev->skb->users));
3358 retry_now: 3361 retry_now:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 345838a2e369..9c9cd4d94890 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -135,7 +135,8 @@ static inline int qdisc_restart(struct Qdisc *q)
135 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 135 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
136 136
137 HARD_TX_LOCK(dev, txq, smp_processor_id()); 137 HARD_TX_LOCK(dev, txq, smp_processor_id());
138 if (!netif_subqueue_stopped(dev, skb)) 138 if (!netif_tx_queue_stopped(txq) &&
139 !netif_tx_queue_frozen(txq))
139 ret = dev_hard_start_xmit(skb, dev, txq); 140 ret = dev_hard_start_xmit(skb, dev, txq);
140 HARD_TX_UNLOCK(dev, txq); 141 HARD_TX_UNLOCK(dev, txq);
141 142
@@ -162,7 +163,8 @@ static inline int qdisc_restart(struct Qdisc *q)
162 break; 163 break;
163 } 164 }
164 165
165 if (ret && netif_tx_queue_stopped(txq)) 166 if (ret && (netif_tx_queue_stopped(txq) ||
167 netif_tx_queue_frozen(txq)))
166 ret = 0; 168 ret = 0;
167 169
168 return ret; 170 return ret;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 537223642b6e..2c35c678563b 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -305,10 +305,11 @@ restart:
305 305
306 switch (teql_resolve(skb, skb_res, slave)) { 306 switch (teql_resolve(skb, skb_res, slave)) {
307 case 0: 307 case 0:
308 if (netif_tx_trylock(slave)) { 308 if (__netif_tx_trylock(slave_txq)) {
309 if (!__netif_subqueue_stopped(slave, subq) && 309 if (!netif_tx_queue_stopped(slave_txq) &&
310 !netif_tx_queue_frozen(slave_txq) &&
310 slave->hard_start_xmit(skb, slave) == 0) { 311 slave->hard_start_xmit(skb, slave) == 0) {
311 netif_tx_unlock(slave); 312 __netif_tx_unlock(slave_txq);
312 master->slaves = NEXT_SLAVE(q); 313 master->slaves = NEXT_SLAVE(q);
313 netif_wake_queue(dev); 314 netif_wake_queue(dev);
314 master->stats.tx_packets++; 315 master->stats.tx_packets++;
@@ -316,7 +317,7 @@ restart:
316 qdisc_pkt_len(skb); 317 qdisc_pkt_len(skb);
317 return 0; 318 return 0;
318 } 319 }
319 netif_tx_unlock(slave); 320 __netif_tx_unlock(slave_txq);
320 } 321 }
321 if (netif_queue_stopped(dev)) 322 if (netif_queue_stopped(dev))
322 busy = 1; 323 busy = 1;