aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-09 02:13:53 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-09 02:13:53 -0400
commitc773e847ea8f6812804e40f52399c6921a00eab1 (patch)
tree952e0e262cc0b0f2136bc2a62938ae1d186f896a /include/linux/netdevice.h
parenteb6aafe3f843cb0e939546c03540a3b4911b6964 (diff)
netdev: Move _xmit_lock and xmit_lock_owner into netdev_queue.
Accesses are mostly structured such that when there are multiple TX queues the code transformations will be a little bit simpler. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h62
1 files changed, 39 insertions, 23 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 28aa8e77cee9..c8d5f128858d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -453,6 +453,8 @@ struct netdev_queue {
453 struct net_device *dev; 453 struct net_device *dev;
454 struct Qdisc *qdisc; 454 struct Qdisc *qdisc;
455 struct sk_buff *gso_skb; 455 struct sk_buff *gso_skb;
456 spinlock_t _xmit_lock;
457 int xmit_lock_owner;
456 struct Qdisc *qdisc_sleeping; 458 struct Qdisc *qdisc_sleeping;
457 struct list_head qdisc_list; 459 struct list_head qdisc_list;
458 struct netdev_queue *next_sched; 460 struct netdev_queue *next_sched;
@@ -639,12 +641,6 @@ struct net_device
639/* 641/*
640 * One part is mostly used on xmit path (device) 642 * One part is mostly used on xmit path (device)
641 */ 643 */
642 /* hard_start_xmit synchronizer */
643 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
644 /* cpu id of processor entered to hard_start_xmit or -1,
645 if nobody entered there.
646 */
647 int xmit_lock_owner;
648 void *priv; /* pointer to private data */ 644 void *priv; /* pointer to private data */
649 int (*hard_start_xmit) (struct sk_buff *skb, 645 int (*hard_start_xmit) (struct sk_buff *skb,
650 struct net_device *dev); 646 struct net_device *dev);
@@ -1402,52 +1398,72 @@ static inline void netif_rx_complete(struct net_device *dev,
1402 * 1398 *
1403 * Get network device transmit lock 1399 * Get network device transmit lock
1404 */ 1400 */
1405static inline void __netif_tx_lock(struct net_device *dev, int cpu) 1401static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
1406{ 1402{
1407 spin_lock(&dev->_xmit_lock); 1403 spin_lock(&txq->_xmit_lock);
1408 dev->xmit_lock_owner = cpu; 1404 txq->xmit_lock_owner = cpu;
1409} 1405}
1410 1406
1411static inline void netif_tx_lock(struct net_device *dev) 1407static inline void netif_tx_lock(struct net_device *dev)
1412{ 1408{
1413 __netif_tx_lock(dev, smp_processor_id()); 1409 __netif_tx_lock(&dev->tx_queue, smp_processor_id());
1410}
1411
1412static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1413{
1414 spin_lock_bh(&txq->_xmit_lock);
1415 txq->xmit_lock_owner = smp_processor_id();
1414} 1416}
1415 1417
1416static inline void netif_tx_lock_bh(struct net_device *dev) 1418static inline void netif_tx_lock_bh(struct net_device *dev)
1417{ 1419{
1418 spin_lock_bh(&dev->_xmit_lock); 1420 __netif_tx_lock_bh(&dev->tx_queue);
1419 dev->xmit_lock_owner = smp_processor_id();
1420} 1421}
1421 1422
1422static inline int netif_tx_trylock(struct net_device *dev) 1423static inline int __netif_tx_trylock(struct netdev_queue *txq)
1423{ 1424{
1424 int ok = spin_trylock(&dev->_xmit_lock); 1425 int ok = spin_trylock(&txq->_xmit_lock);
1425 if (likely(ok)) 1426 if (likely(ok))
1426 dev->xmit_lock_owner = smp_processor_id(); 1427 txq->xmit_lock_owner = smp_processor_id();
1427 return ok; 1428 return ok;
1428} 1429}
1429 1430
1431static inline int netif_tx_trylock(struct net_device *dev)
1432{
1433 return __netif_tx_trylock(&dev->tx_queue);
1434}
1435
1436static inline void __netif_tx_unlock(struct netdev_queue *txq)
1437{
1438 txq->xmit_lock_owner = -1;
1439 spin_unlock(&txq->_xmit_lock);
1440}
1441
1430static inline void netif_tx_unlock(struct net_device *dev) 1442static inline void netif_tx_unlock(struct net_device *dev)
1431{ 1443{
1432 dev->xmit_lock_owner = -1; 1444 __netif_tx_unlock(&dev->tx_queue);
1433 spin_unlock(&dev->_xmit_lock); 1445}
1446
1447static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1448{
1449 txq->xmit_lock_owner = -1;
1450 spin_unlock_bh(&txq->_xmit_lock);
1434} 1451}
1435 1452
1436static inline void netif_tx_unlock_bh(struct net_device *dev) 1453static inline void netif_tx_unlock_bh(struct net_device *dev)
1437{ 1454{
1438 dev->xmit_lock_owner = -1; 1455 __netif_tx_unlock_bh(&dev->tx_queue);
1439 spin_unlock_bh(&dev->_xmit_lock);
1440} 1456}
1441 1457
1442#define HARD_TX_LOCK(dev, cpu) { \ 1458#define HARD_TX_LOCK(dev, txq, cpu) { \
1443 if ((dev->features & NETIF_F_LLTX) == 0) { \ 1459 if ((dev->features & NETIF_F_LLTX) == 0) { \
1444 __netif_tx_lock(dev, cpu); \ 1460 __netif_tx_lock(txq, cpu); \
1445 } \ 1461 } \
1446} 1462}
1447 1463
1448#define HARD_TX_UNLOCK(dev) { \ 1464#define HARD_TX_UNLOCK(dev, txq) { \
1449 if ((dev->features & NETIF_F_LLTX) == 0) { \ 1465 if ((dev->features & NETIF_F_LLTX) == 0) { \
1450 netif_tx_unlock(dev); \ 1466 __netif_tx_unlock(txq); \
1451 } \ 1467 } \
1452} 1468}
1453 1469