aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-08 20:18:23 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-08 20:18:23 -0400
commitdc2b48475a0a36f8b3bbb2da60d3a006dc5c2c84 (patch)
treeb2421a338840bd1c675f4f91de7c7cf03863fb78 /net/core/dev.c
parent5ce2d488fe039ddd86a638496cf704df86c74eeb (diff)
netdev: Move queue_lock into struct netdev_queue.
The lock is now an attribute of the device queue. One thing to notice is that "suspicious" places emerge which will need specific training about multiple queue handling. They are so marked with explicit "netdev->rx_queue" and "netdev->tx_queue" references. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 9b281c906eb0..05011048b86c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1667,6 +1667,7 @@ out_kfree_skb:
1667int dev_queue_xmit(struct sk_buff *skb) 1667int dev_queue_xmit(struct sk_buff *skb)
1668{ 1668{
1669 struct net_device *dev = skb->dev; 1669 struct net_device *dev = skb->dev;
1670 struct netdev_queue *txq;
1670 struct Qdisc *q; 1671 struct Qdisc *q;
1671 int rc = -ENOMEM; 1672 int rc = -ENOMEM;
1672 1673
@@ -1699,14 +1700,15 @@ int dev_queue_xmit(struct sk_buff *skb)
1699 } 1700 }
1700 1701
1701gso: 1702gso:
1702 spin_lock_prefetch(&dev->queue_lock); 1703 txq = &dev->tx_queue;
1704 spin_lock_prefetch(&txq->lock);
1703 1705
1704 /* Disable soft irqs for various locks below. Also 1706 /* Disable soft irqs for various locks below. Also
1705 * stops preemption for RCU. 1707 * stops preemption for RCU.
1706 */ 1708 */
1707 rcu_read_lock_bh(); 1709 rcu_read_lock_bh();
1708 1710
1709 /* Updates of qdisc are serialized by queue_lock. 1711 /* Updates of qdisc are serialized by queue->lock.
1710 * The struct Qdisc which is pointed to by qdisc is now a 1712 * The struct Qdisc which is pointed to by qdisc is now a
1711 * rcu structure - it may be accessed without acquiring 1713 * rcu structure - it may be accessed without acquiring
1712 * a lock (but the structure may be stale.) The freeing of the 1714 * a lock (but the structure may be stale.) The freeing of the
@@ -1714,7 +1716,7 @@ gso:
1714 * more references to it. 1716 * more references to it.
1715 * 1717 *
1716 * If the qdisc has an enqueue function, we still need to 1718 * If the qdisc has an enqueue function, we still need to
1717 * hold the queue_lock before calling it, since queue_lock 1719 * hold the queue->lock before calling it, since queue->lock
1718 * also serializes access to the device queue. 1720 * also serializes access to the device queue.
1719 */ 1721 */
1720 1722
@@ -1724,19 +1726,19 @@ gso:
1724#endif 1726#endif
1725 if (q->enqueue) { 1727 if (q->enqueue) {
1726 /* Grab device queue */ 1728 /* Grab device queue */
1727 spin_lock(&dev->queue_lock); 1729 spin_lock(&txq->lock);
1728 q = dev->qdisc; 1730 q = dev->qdisc;
1729 if (q->enqueue) { 1731 if (q->enqueue) {
1730 /* reset queue_mapping to zero */ 1732 /* reset queue_mapping to zero */
1731 skb_set_queue_mapping(skb, 0); 1733 skb_set_queue_mapping(skb, 0);
1732 rc = q->enqueue(skb, q); 1734 rc = q->enqueue(skb, q);
1733 qdisc_run(dev); 1735 qdisc_run(dev);
1734 spin_unlock(&dev->queue_lock); 1736 spin_unlock(&txq->lock);
1735 1737
1736 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; 1738 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1737 goto out; 1739 goto out;
1738 } 1740 }
1739 spin_unlock(&dev->queue_lock); 1741 spin_unlock(&txq->lock);
1740 } 1742 }
1741 1743
1742 /* The device has no queue. Common case for software devices: 1744 /* The device has no queue. Common case for software devices:
@@ -1919,14 +1921,17 @@ static void net_tx_action(struct softirq_action *h)
1919 1921
1920 while (head) { 1922 while (head) {
1921 struct net_device *dev = head; 1923 struct net_device *dev = head;
1924 struct netdev_queue *txq;
1922 head = head->next_sched; 1925 head = head->next_sched;
1923 1926
1927 txq = &dev->tx_queue;
1928
1924 smp_mb__before_clear_bit(); 1929 smp_mb__before_clear_bit();
1925 clear_bit(__LINK_STATE_SCHED, &dev->state); 1930 clear_bit(__LINK_STATE_SCHED, &dev->state);
1926 1931
1927 if (spin_trylock(&dev->queue_lock)) { 1932 if (spin_trylock(&txq->lock)) {
1928 qdisc_run(dev); 1933 qdisc_run(dev);
1929 spin_unlock(&dev->queue_lock); 1934 spin_unlock(&txq->lock);
1930 } else { 1935 } else {
1931 netif_schedule(dev); 1936 netif_schedule(dev);
1932 } 1937 }
@@ -3787,7 +3792,6 @@ int register_netdevice(struct net_device *dev)
3787 BUG_ON(!dev_net(dev)); 3792 BUG_ON(!dev_net(dev));
3788 net = dev_net(dev); 3793 net = dev_net(dev);
3789 3794
3790 spin_lock_init(&dev->queue_lock);
3791 spin_lock_init(&dev->_xmit_lock); 3795 spin_lock_init(&dev->_xmit_lock);
3792 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type); 3796 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
3793 dev->xmit_lock_owner = -1; 3797 dev->xmit_lock_owner = -1;
@@ -4072,10 +4076,17 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
4072 return &dev->stats; 4076 return &dev->stats;
4073} 4077}
4074 4078
4079static void netdev_init_one_queue(struct net_device *dev,
4080 struct netdev_queue *queue)
4081{
4082 spin_lock_init(&queue->lock);
4083 queue->dev = dev;
4084}
4085
4075static void netdev_init_queues(struct net_device *dev) 4086static void netdev_init_queues(struct net_device *dev)
4076{ 4087{
4077 dev->rx_queue.dev = dev; 4088 netdev_init_one_queue(dev, &dev->rx_queue);
4078 dev->tx_queue.dev = dev; 4089 netdev_init_one_queue(dev, &dev->tx_queue);
4079} 4090}
4080 4091
4081/** 4092/**