aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-16 05:15:04 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:20 -0400
commit37437bb2e1ae8af470dfcd5b4ff454110894ccaf (patch)
tree1795e78a7648252b0c92c972df12b776a28437d7 /net/core/dev.c
parent7698b4fcabcd790efc4f226bada1e7b5870653af (diff)
pkt_sched: Schedule qdiscs instead of netdev_queue.
When we have shared qdiscs, packets come out of the qdiscs for multiple transmit queues. Therefore it doesn't make any sense to schedule the transmit queue when logically we cannot know ahead of time the TX queue of the SKB that the qdisc->dequeue() will give us. Just for sanity I added a BUG check to make sure we never get into a state where the noop_qdisc is scheduled. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c68
1 files changed, 28 insertions, 40 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 467bfb325123..0b909b74f698 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1323,18 +1323,18 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1323} 1323}
1324 1324
1325 1325
1326void __netif_schedule(struct netdev_queue *txq) 1326void __netif_schedule(struct Qdisc *q)
1327{ 1327{
1328 struct net_device *dev = txq->dev; 1328 BUG_ON(q == &noop_qdisc);
1329 1329
1330 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { 1330 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
1331 struct softnet_data *sd; 1331 struct softnet_data *sd;
1332 unsigned long flags; 1332 unsigned long flags;
1333 1333
1334 local_irq_save(flags); 1334 local_irq_save(flags);
1335 sd = &__get_cpu_var(softnet_data); 1335 sd = &__get_cpu_var(softnet_data);
1336 txq->next_sched = sd->output_queue; 1336 q->next_sched = sd->output_queue;
1337 sd->output_queue = txq; 1337 sd->output_queue = q;
1338 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1338 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1339 local_irq_restore(flags); 1339 local_irq_restore(flags);
1340 } 1340 }
@@ -1771,37 +1771,23 @@ gso:
1771 rcu_read_lock_bh(); 1771 rcu_read_lock_bh();
1772 1772
1773 txq = dev_pick_tx(dev, skb); 1773 txq = dev_pick_tx(dev, skb);
1774 spin_lock_prefetch(&txq->lock);
1775
1776 /* Updates of qdisc are serialized by queue->lock.
1777 * The struct Qdisc which is pointed to by qdisc is now a
1778 * rcu structure - it may be accessed without acquiring
1779 * a lock (but the structure may be stale.) The freeing of the
1780 * qdisc will be deferred until it's known that there are no
1781 * more references to it.
1782 *
1783 * If the qdisc has an enqueue function, we still need to
1784 * hold the queue->lock before calling it, since queue->lock
1785 * also serializes access to the device queue.
1786 */
1787
1788 q = rcu_dereference(txq->qdisc); 1774 q = rcu_dereference(txq->qdisc);
1775
1789#ifdef CONFIG_NET_CLS_ACT 1776#ifdef CONFIG_NET_CLS_ACT
1790 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); 1777 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1791#endif 1778#endif
1792 if (q->enqueue) { 1779 if (q->enqueue) {
1793 /* Grab device queue */ 1780 spinlock_t *root_lock = qdisc_root_lock(q);
1794 spin_lock(&txq->lock); 1781
1795 q = txq->qdisc; 1782 spin_lock(root_lock);
1796 if (q->enqueue) { 1783
1797 rc = q->enqueue(skb, q); 1784 rc = q->enqueue(skb, q);
1798 qdisc_run(txq); 1785 qdisc_run(q);
1799 spin_unlock(&txq->lock); 1786
1800 1787 spin_unlock(root_lock);
1801 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; 1788
1802 goto out; 1789 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1803 } 1790 goto out;
1804 spin_unlock(&txq->lock);
1805 } 1791 }
1806 1792
1807 /* The device has no queue. Common case for software devices: 1793 /* The device has no queue. Common case for software devices:
@@ -1974,7 +1960,7 @@ static void net_tx_action(struct softirq_action *h)
1974 } 1960 }
1975 1961
1976 if (sd->output_queue) { 1962 if (sd->output_queue) {
1977 struct netdev_queue *head; 1963 struct Qdisc *head;
1978 1964
1979 local_irq_disable(); 1965 local_irq_disable();
1980 head = sd->output_queue; 1966 head = sd->output_queue;
@@ -1982,18 +1968,20 @@ static void net_tx_action(struct softirq_action *h)
1982 local_irq_enable(); 1968 local_irq_enable();
1983 1969
1984 while (head) { 1970 while (head) {
1985 struct netdev_queue *txq = head; 1971 struct Qdisc *q = head;
1986 struct net_device *dev = txq->dev; 1972 spinlock_t *root_lock;
1973
1987 head = head->next_sched; 1974 head = head->next_sched;
1988 1975
1989 smp_mb__before_clear_bit(); 1976 smp_mb__before_clear_bit();
1990 clear_bit(__LINK_STATE_SCHED, &dev->state); 1977 clear_bit(__QDISC_STATE_SCHED, &q->state);
1991 1978
1992 if (spin_trylock(&txq->lock)) { 1979 root_lock = qdisc_root_lock(q);
1993 qdisc_run(txq); 1980 if (spin_trylock(root_lock)) {
1994 spin_unlock(&txq->lock); 1981 qdisc_run(q);
1982 spin_unlock(root_lock);
1995 } else { 1983 } else {
1996 netif_schedule_queue(txq); 1984 __netif_schedule(q);
1997 } 1985 }
1998 } 1986 }
1999 } 1987 }
@@ -4459,7 +4447,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
4459 void *ocpu) 4447 void *ocpu)
4460{ 4448{
4461 struct sk_buff **list_skb; 4449 struct sk_buff **list_skb;
4462 struct netdev_queue **list_net; 4450 struct Qdisc **list_net;
4463 struct sk_buff *skb; 4451 struct sk_buff *skb;
4464 unsigned int cpu, oldcpu = (unsigned long)ocpu; 4452 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4465 struct softnet_data *sd, *oldsd; 4453 struct softnet_data *sd, *oldsd;