diff options
Diffstat (limited to 'include/linux/netdevice.h')
| -rw-r--r-- | include/linux/netdevice.h | 90 |
1 files changed, 56 insertions, 34 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b4d056ceab96..488c56e649b5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -61,9 +61,7 @@ struct wireless_dev; | |||
| 61 | #define NET_XMIT_DROP 1 /* skb dropped */ | 61 | #define NET_XMIT_DROP 1 /* skb dropped */ |
| 62 | #define NET_XMIT_CN 2 /* congestion notification */ | 62 | #define NET_XMIT_CN 2 /* congestion notification */ |
| 63 | #define NET_XMIT_POLICED 3 /* skb is shot by police */ | 63 | #define NET_XMIT_POLICED 3 /* skb is shot by police */ |
| 64 | #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; | 64 | #define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */ |
| 65 | (TC use only - dev_queue_xmit | ||
| 66 | returns this as NET_XMIT_SUCCESS) */ | ||
| 67 | 65 | ||
| 68 | /* Backlog congestion levels */ | 66 | /* Backlog congestion levels */ |
| 69 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | 67 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ |
| @@ -440,6 +438,7 @@ static inline void napi_synchronize(const struct napi_struct *n) | |||
| 440 | enum netdev_queue_state_t | 438 | enum netdev_queue_state_t |
| 441 | { | 439 | { |
| 442 | __QUEUE_STATE_XOFF, | 440 | __QUEUE_STATE_XOFF, |
| 441 | __QUEUE_STATE_FROZEN, | ||
| 443 | }; | 442 | }; |
| 444 | 443 | ||
| 445 | struct netdev_queue { | 444 | struct netdev_queue { |
| @@ -636,7 +635,7 @@ struct net_device | |||
| 636 | unsigned int real_num_tx_queues; | 635 | unsigned int real_num_tx_queues; |
| 637 | 636 | ||
| 638 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | 637 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
| 639 | 638 | spinlock_t tx_global_lock; | |
| 640 | /* | 639 | /* |
| 641 | * One part is mostly used on xmit path (device) | 640 | * One part is mostly used on xmit path (device) |
| 642 | */ | 641 | */ |
| @@ -1099,6 +1098,11 @@ static inline int netif_queue_stopped(const struct net_device *dev) | |||
| 1099 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); | 1098 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
| 1100 | } | 1099 | } |
| 1101 | 1100 | ||
| 1101 | static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) | ||
| 1102 | { | ||
| 1103 | return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); | ||
| 1104 | } | ||
| 1105 | |||
| 1102 | /** | 1106 | /** |
| 1103 | * netif_running - test if up | 1107 | * netif_running - test if up |
| 1104 | * @dev: network device | 1108 | * @dev: network device |
| @@ -1475,6 +1479,26 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) | |||
| 1475 | txq->xmit_lock_owner = smp_processor_id(); | 1479 | txq->xmit_lock_owner = smp_processor_id(); |
| 1476 | } | 1480 | } |
| 1477 | 1481 | ||
| 1482 | static inline int __netif_tx_trylock(struct netdev_queue *txq) | ||
| 1483 | { | ||
| 1484 | int ok = spin_trylock(&txq->_xmit_lock); | ||
| 1485 | if (likely(ok)) | ||
| 1486 | txq->xmit_lock_owner = smp_processor_id(); | ||
| 1487 | return ok; | ||
| 1488 | } | ||
| 1489 | |||
| 1490 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | ||
| 1491 | { | ||
| 1492 | txq->xmit_lock_owner = -1; | ||
| 1493 | spin_unlock(&txq->_xmit_lock); | ||
| 1494 | } | ||
| 1495 | |||
| 1496 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | ||
| 1497 | { | ||
| 1498 | txq->xmit_lock_owner = -1; | ||
| 1499 | spin_unlock_bh(&txq->_xmit_lock); | ||
| 1500 | } | ||
| 1501 | |||
| 1478 | /** | 1502 | /** |
| 1479 | * netif_tx_lock - grab network device transmit lock | 1503 | * netif_tx_lock - grab network device transmit lock |
| 1480 | * @dev: network device | 1504 | * @dev: network device |
| @@ -1484,12 +1508,23 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) | |||
| 1484 | */ | 1508 | */ |
| 1485 | static inline void netif_tx_lock(struct net_device *dev) | 1509 | static inline void netif_tx_lock(struct net_device *dev) |
| 1486 | { | 1510 | { |
| 1487 | int cpu = smp_processor_id(); | ||
| 1488 | unsigned int i; | 1511 | unsigned int i; |
| 1512 | int cpu; | ||
| 1489 | 1513 | ||
| 1514 | spin_lock(&dev->tx_global_lock); | ||
| 1515 | cpu = smp_processor_id(); | ||
| 1490 | for (i = 0; i < dev->num_tx_queues; i++) { | 1516 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 1491 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 1517 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
| 1518 | |||
| 1519 | /* We are the only thread of execution doing a | ||
| 1520 | * freeze, but we have to grab the _xmit_lock in | ||
| 1521 | * order to synchronize with threads which are in | ||
| 1522 | * the ->hard_start_xmit() handler and already | ||
| 1523 | * checked the frozen bit. | ||
| 1524 | */ | ||
| 1492 | __netif_tx_lock(txq, cpu); | 1525 | __netif_tx_lock(txq, cpu); |
| 1526 | set_bit(__QUEUE_STATE_FROZEN, &txq->state); | ||
| 1527 | __netif_tx_unlock(txq); | ||
| 1493 | } | 1528 | } |
| 1494 | } | 1529 | } |
| 1495 | 1530 | ||
| @@ -1499,40 +1534,22 @@ static inline void netif_tx_lock_bh(struct net_device *dev) | |||
| 1499 | netif_tx_lock(dev); | 1534 | netif_tx_lock(dev); |
| 1500 | } | 1535 | } |
| 1501 | 1536 | ||
| 1502 | static inline int __netif_tx_trylock(struct netdev_queue *txq) | ||
| 1503 | { | ||
| 1504 | int ok = spin_trylock(&txq->_xmit_lock); | ||
| 1505 | if (likely(ok)) | ||
| 1506 | txq->xmit_lock_owner = smp_processor_id(); | ||
| 1507 | return ok; | ||
| 1508 | } | ||
| 1509 | |||
| 1510 | static inline int netif_tx_trylock(struct net_device *dev) | ||
| 1511 | { | ||
| 1512 | return __netif_tx_trylock(netdev_get_tx_queue(dev, 0)); | ||
| 1513 | } | ||
| 1514 | |||
| 1515 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | ||
| 1516 | { | ||
| 1517 | txq->xmit_lock_owner = -1; | ||
| 1518 | spin_unlock(&txq->_xmit_lock); | ||
| 1519 | } | ||
| 1520 | |||
| 1521 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | ||
| 1522 | { | ||
| 1523 | txq->xmit_lock_owner = -1; | ||
| 1524 | spin_unlock_bh(&txq->_xmit_lock); | ||
| 1525 | } | ||
| 1526 | |||
| 1527 | static inline void netif_tx_unlock(struct net_device *dev) | 1537 | static inline void netif_tx_unlock(struct net_device *dev) |
| 1528 | { | 1538 | { |
| 1529 | unsigned int i; | 1539 | unsigned int i; |
| 1530 | 1540 | ||
| 1531 | for (i = 0; i < dev->num_tx_queues; i++) { | 1541 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 1532 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 1542 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
| 1533 | __netif_tx_unlock(txq); | ||
| 1534 | } | ||
| 1535 | 1543 | ||
| 1544 | /* No need to grab the _xmit_lock here. If the | ||
| 1545 | * queue is not stopped for another reason, we | ||
| 1546 | * force a schedule. | ||
| 1547 | */ | ||
| 1548 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | ||
| 1549 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) | ||
| 1550 | __netif_schedule(txq->qdisc); | ||
| 1551 | } | ||
| 1552 | spin_unlock(&dev->tx_global_lock); | ||
| 1536 | } | 1553 | } |
| 1537 | 1554 | ||
| 1538 | static inline void netif_tx_unlock_bh(struct net_device *dev) | 1555 | static inline void netif_tx_unlock_bh(struct net_device *dev) |
| @@ -1556,13 +1573,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) | |||
| 1556 | static inline void netif_tx_disable(struct net_device *dev) | 1573 | static inline void netif_tx_disable(struct net_device *dev) |
| 1557 | { | 1574 | { |
| 1558 | unsigned int i; | 1575 | unsigned int i; |
| 1576 | int cpu; | ||
| 1559 | 1577 | ||
| 1560 | netif_tx_lock_bh(dev); | 1578 | local_bh_disable(); |
| 1579 | cpu = smp_processor_id(); | ||
| 1561 | for (i = 0; i < dev->num_tx_queues; i++) { | 1580 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 1562 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 1581 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
| 1582 | |||
| 1583 | __netif_tx_lock(txq, cpu); | ||
| 1563 | netif_tx_stop_queue(txq); | 1584 | netif_tx_stop_queue(txq); |
| 1585 | __netif_tx_unlock(txq); | ||
| 1564 | } | 1586 | } |
| 1565 | netif_tx_unlock_bh(dev); | 1587 | local_bh_enable(); |
| 1566 | } | 1588 | } |
| 1567 | 1589 | ||
| 1568 | static inline void netif_addr_lock(struct net_device *dev) | 1590 | static inline void netif_addr_lock(struct net_device *dev) |
