aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-09 02:12:38 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-09 02:12:38 -0400
commiteb6aafe3f843cb0e939546c03540a3b4911b6964 (patch)
tree550cfba4baadcb64f98ce6e77fe6f9b44b5bb142
parent86d804e10a37cd86f16bf72386c37e843a98a74b (diff)
pkt_sched: Make qdisc_run take a netdev_queue.
This allows us to use this calling convention all the way down into qdisc_restart(). Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/pkt_sched.h8
-rw-r--r--net/core/dev.c4
-rw-r--r--net/sched/sch_generic.c26
3 files changed, 21 insertions, 17 deletions
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 8e3a0c4e9d97..2311d242bb35 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -84,13 +84,15 @@ extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
84 struct nlattr *tab); 84 struct nlattr *tab);
85extern void qdisc_put_rtab(struct qdisc_rate_table *tab); 85extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
86 86
87extern void __qdisc_run(struct net_device *dev); 87extern void __qdisc_run(struct netdev_queue *txq);
88 88
89static inline void qdisc_run(struct net_device *dev) 89static inline void qdisc_run(struct netdev_queue *txq)
90{ 90{
91 struct net_device *dev = txq->dev;
92
91 if (!netif_queue_stopped(dev) && 93 if (!netif_queue_stopped(dev) &&
92 !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) 94 !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
93 __qdisc_run(dev); 95 __qdisc_run(txq);
94} 96}
95 97
96extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, 98extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
diff --git a/net/core/dev.c b/net/core/dev.c
index 0dc888ad4217..0218b0b9be80 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1734,7 +1734,7 @@ gso:
1734 /* reset queue_mapping to zero */ 1734 /* reset queue_mapping to zero */
1735 skb_set_queue_mapping(skb, 0); 1735 skb_set_queue_mapping(skb, 0);
1736 rc = q->enqueue(skb, q); 1736 rc = q->enqueue(skb, q);
1737 qdisc_run(dev); 1737 qdisc_run(txq);
1738 spin_unlock(&txq->lock); 1738 spin_unlock(&txq->lock);
1739 1739
1740 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; 1740 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
@@ -1930,7 +1930,7 @@ static void net_tx_action(struct softirq_action *h)
1930 clear_bit(__LINK_STATE_SCHED, &dev->state); 1930 clear_bit(__LINK_STATE_SCHED, &dev->state);
1931 1931
1932 if (spin_trylock(&txq->lock)) { 1932 if (spin_trylock(&txq->lock)) {
1933 qdisc_run(dev); 1933 qdisc_run(txq);
1934 spin_unlock(&txq->lock); 1934 spin_unlock(&txq->lock);
1935 } else { 1935 } else {
1936 netif_schedule_queue(txq); 1936 netif_schedule_queue(txq);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 407dfdb142a4..fcc7533f0bcc 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -75,9 +75,8 @@ static inline int dev_requeue_skb(struct sk_buff *skb,
75 return 0; 75 return 0;
76} 76}
77 77
78static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, 78static inline struct sk_buff *dequeue_skb(struct netdev_queue *dev_queue,
79 struct netdev_queue *dev_queue, 79 struct Qdisc *q)
80 struct Qdisc *q)
81{ 80{
82 struct sk_buff *skb; 81 struct sk_buff *skb;
83 82
@@ -90,10 +89,10 @@ static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev,
90} 89}
91 90
92static inline int handle_dev_cpu_collision(struct sk_buff *skb, 91static inline int handle_dev_cpu_collision(struct sk_buff *skb,
93 struct net_device *dev,
94 struct netdev_queue *dev_queue, 92 struct netdev_queue *dev_queue,
95 struct Qdisc *q) 93 struct Qdisc *q)
96{ 94{
95 struct net_device *dev = dev_queue->dev;
97 int ret; 96 int ret;
98 97
99 if (unlikely(dev->xmit_lock_owner == smp_processor_id())) { 98 if (unlikely(dev->xmit_lock_owner == smp_processor_id())) {
@@ -139,21 +138,23 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
139 * >0 - queue is not empty. 138 * >0 - queue is not empty.
140 * 139 *
141 */ 140 */
142static inline int qdisc_restart(struct net_device *dev) 141static inline int qdisc_restart(struct netdev_queue *txq)
143{ 142{
144 struct netdev_queue *txq = &dev->tx_queue;
145 struct Qdisc *q = txq->qdisc; 143 struct Qdisc *q = txq->qdisc;
146 struct sk_buff *skb;
147 int ret = NETDEV_TX_BUSY; 144 int ret = NETDEV_TX_BUSY;
145 struct net_device *dev;
146 struct sk_buff *skb;
148 147
149 /* Dequeue packet */ 148 /* Dequeue packet */
150 if (unlikely((skb = dev_dequeue_skb(dev, txq, q)) == NULL)) 149 if (unlikely((skb = dequeue_skb(txq, q)) == NULL))
151 return 0; 150 return 0;
152 151
153 152
154 /* And release queue */ 153 /* And release queue */
155 spin_unlock(&txq->lock); 154 spin_unlock(&txq->lock);
156 155
156 dev = txq->dev;
157
157 HARD_TX_LOCK(dev, smp_processor_id()); 158 HARD_TX_LOCK(dev, smp_processor_id());
158 if (!netif_subqueue_stopped(dev, skb)) 159 if (!netif_subqueue_stopped(dev, skb))
159 ret = dev_hard_start_xmit(skb, dev); 160 ret = dev_hard_start_xmit(skb, dev);
@@ -170,7 +171,7 @@ static inline int qdisc_restart(struct net_device *dev)
170 171
171 case NETDEV_TX_LOCKED: 172 case NETDEV_TX_LOCKED:
172 /* Driver try lock failed */ 173 /* Driver try lock failed */
173 ret = handle_dev_cpu_collision(skb, dev, txq, q); 174 ret = handle_dev_cpu_collision(skb, txq, q);
174 break; 175 break;
175 176
176 default: 177 default:
@@ -186,11 +187,12 @@ static inline int qdisc_restart(struct net_device *dev)
186 return ret; 187 return ret;
187} 188}
188 189
189void __qdisc_run(struct net_device *dev) 190void __qdisc_run(struct netdev_queue *txq)
190{ 191{
192 struct net_device *dev = txq->dev;
191 unsigned long start_time = jiffies; 193 unsigned long start_time = jiffies;
192 194
193 while (qdisc_restart(dev)) { 195 while (qdisc_restart(txq)) {
194 if (netif_queue_stopped(dev)) 196 if (netif_queue_stopped(dev))
195 break; 197 break;
196 198
@@ -200,7 +202,7 @@ void __qdisc_run(struct net_device *dev)
200 * 2. we've been doing it for too long. 202 * 2. we've been doing it for too long.
201 */ 203 */
202 if (need_resched() || jiffies != start_time) { 204 if (need_resched() || jiffies != start_time) {
203 netif_schedule_queue(&dev->tx_queue); 205 netif_schedule_queue(txq);
204 break; 206 break;
205 } 207 }
206 } 208 }