aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-31 19:58:50 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-31 19:58:50 -0400
commitc3f26a269c2421f97f10cf8ed05d5099b573af4d (patch)
treed0602cbb48742b3e39ab6bdcaa08c342d4cd2cae /include/linux/netdevice.h
parent967ab999a090b1a4e7d3c7febfd6d89b42fb4cf4 (diff)
netdev: Fix lockdep warnings in multiqueue configurations.
When support for multiple TX queues were added, the netif_tx_lock() routines we converted to iterate over all TX queues and grab each queue's spinlock. This causes heartburn for lockdep and it's not a healthy thing to do with lots of TX queues anyways. So modify this to use a top-level lock and a "frozen" state for the individual TX queues. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h86
1 files changed, 55 insertions, 31 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b4d056ceab96..ee583f642a9f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -440,6 +440,7 @@ static inline void napi_synchronize(const struct napi_struct *n)
440enum netdev_queue_state_t 440enum netdev_queue_state_t
441{ 441{
442 __QUEUE_STATE_XOFF, 442 __QUEUE_STATE_XOFF,
443 __QUEUE_STATE_FROZEN,
443}; 444};
444 445
445struct netdev_queue { 446struct netdev_queue {
@@ -636,7 +637,7 @@ struct net_device
636 unsigned int real_num_tx_queues; 637 unsigned int real_num_tx_queues;
637 638
638 unsigned long tx_queue_len; /* Max frames per queue allowed */ 639 unsigned long tx_queue_len; /* Max frames per queue allowed */
639 640 spinlock_t tx_global_lock;
640/* 641/*
641 * One part is mostly used on xmit path (device) 642 * One part is mostly used on xmit path (device)
642 */ 643 */
@@ -1099,6 +1100,11 @@ static inline int netif_queue_stopped(const struct net_device *dev)
1099 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 1100 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1100} 1101}
1101 1102
1103static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
1104{
1105 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
1106}
1107
1102/** 1108/**
1103 * netif_running - test if up 1109 * netif_running - test if up
1104 * @dev: network device 1110 * @dev: network device
@@ -1475,6 +1481,26 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1475 txq->xmit_lock_owner = smp_processor_id(); 1481 txq->xmit_lock_owner = smp_processor_id();
1476} 1482}
1477 1483
1484static inline int __netif_tx_trylock(struct netdev_queue *txq)
1485{
1486 int ok = spin_trylock(&txq->_xmit_lock);
1487 if (likely(ok))
1488 txq->xmit_lock_owner = smp_processor_id();
1489 return ok;
1490}
1491
1492static inline void __netif_tx_unlock(struct netdev_queue *txq)
1493{
1494 txq->xmit_lock_owner = -1;
1495 spin_unlock(&txq->_xmit_lock);
1496}
1497
1498static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1499{
1500 txq->xmit_lock_owner = -1;
1501 spin_unlock_bh(&txq->_xmit_lock);
1502}
1503
1478/** 1504/**
1479 * netif_tx_lock - grab network device transmit lock 1505 * netif_tx_lock - grab network device transmit lock
1480 * @dev: network device 1506 * @dev: network device
@@ -1484,12 +1510,23 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1484 */ 1510 */
1485static inline void netif_tx_lock(struct net_device *dev) 1511static inline void netif_tx_lock(struct net_device *dev)
1486{ 1512{
1487 int cpu = smp_processor_id();
1488 unsigned int i; 1513 unsigned int i;
1514 int cpu;
1489 1515
1516 spin_lock(&dev->tx_global_lock);
1517 cpu = smp_processor_id();
1490 for (i = 0; i < dev->num_tx_queues; i++) { 1518 for (i = 0; i < dev->num_tx_queues; i++) {
1491 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1519 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1520
1521 /* We are the only thread of execution doing a
1522 * freeze, but we have to grab the _xmit_lock in
1523 * order to synchronize with threads which are in
1524 * the ->hard_start_xmit() handler and already
1525 * checked the frozen bit.
1526 */
1492 __netif_tx_lock(txq, cpu); 1527 __netif_tx_lock(txq, cpu);
1528 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
1529 __netif_tx_unlock(txq);
1493 } 1530 }
1494} 1531}
1495 1532
@@ -1499,40 +1536,22 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
1499 netif_tx_lock(dev); 1536 netif_tx_lock(dev);
1500} 1537}
1501 1538
1502static inline int __netif_tx_trylock(struct netdev_queue *txq)
1503{
1504 int ok = spin_trylock(&txq->_xmit_lock);
1505 if (likely(ok))
1506 txq->xmit_lock_owner = smp_processor_id();
1507 return ok;
1508}
1509
1510static inline int netif_tx_trylock(struct net_device *dev)
1511{
1512 return __netif_tx_trylock(netdev_get_tx_queue(dev, 0));
1513}
1514
1515static inline void __netif_tx_unlock(struct netdev_queue *txq)
1516{
1517 txq->xmit_lock_owner = -1;
1518 spin_unlock(&txq->_xmit_lock);
1519}
1520
1521static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1522{
1523 txq->xmit_lock_owner = -1;
1524 spin_unlock_bh(&txq->_xmit_lock);
1525}
1526
1527static inline void netif_tx_unlock(struct net_device *dev) 1539static inline void netif_tx_unlock(struct net_device *dev)
1528{ 1540{
1529 unsigned int i; 1541 unsigned int i;
1530 1542
1531 for (i = 0; i < dev->num_tx_queues; i++) { 1543 for (i = 0; i < dev->num_tx_queues; i++) {
1532 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1544 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1533 __netif_tx_unlock(txq);
1534 }
1535 1545
1546 /* No need to grab the _xmit_lock here. If the
1547 * queue is not stopped for another reason, we
1548 * force a schedule.
1549 */
1550 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
1551 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1552 __netif_schedule(txq->qdisc);
1553 }
1554 spin_unlock(&dev->tx_global_lock);
1536} 1555}
1537 1556
1538static inline void netif_tx_unlock_bh(struct net_device *dev) 1557static inline void netif_tx_unlock_bh(struct net_device *dev)
@@ -1556,13 +1575,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
1556static inline void netif_tx_disable(struct net_device *dev) 1575static inline void netif_tx_disable(struct net_device *dev)
1557{ 1576{
1558 unsigned int i; 1577 unsigned int i;
1578 int cpu;
1559 1579
1560 netif_tx_lock_bh(dev); 1580 local_bh_disable();
1581 cpu = smp_processor_id();
1561 for (i = 0; i < dev->num_tx_queues; i++) { 1582 for (i = 0; i < dev->num_tx_queues; i++) {
1562 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1583 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1584
1585 __netif_tx_lock(txq, cpu);
1563 netif_tx_stop_queue(txq); 1586 netif_tx_stop_queue(txq);
1587 __netif_tx_unlock(txq);
1564 } 1588 }
1565 netif_tx_unlock_bh(dev); 1589 local_bh_enable();
1566} 1590}
1567 1591
1568static inline void netif_addr_lock(struct net_device *dev) 1592static inline void netif_addr_lock(struct net_device *dev)