aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-09 02:11:25 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-09 02:11:25 -0400
commit86d804e10a37cd86f16bf72386c37e843a98a74b (patch)
tree04483a937f11c752aea998298a27fc79e6851b2d
parent970565bbad0c7b98db0d14131a69e5a0f4445d49 (diff)
netdev: Make netif_schedule() routines work with netdev_queue objects.
Only plain netif_schedule() remains taking a net_device, mostly as a compatability item while we transition the rest of these interfaces. Everything else calls netif_schedule_queue() or __netif_schedule(), both of which take a netdev_queue pointer. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netdevice.h17
-rw-r--r--net/core/dev.c9
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_cbq.c2
-rw-r--r--net/sched/sch_generic.c10
6 files changed, 27 insertions, 19 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index aae6c6d153f2..28aa8e77cee9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -952,12 +952,19 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data);
952 952
953#define HAVE_NETIF_QUEUE 953#define HAVE_NETIF_QUEUE
954 954
955extern void __netif_schedule(struct net_device *dev); 955extern void __netif_schedule(struct netdev_queue *txq);
956 956
957static inline void netif_schedule(struct net_device *dev) 957static inline void netif_schedule_queue(struct netdev_queue *txq)
958{ 958{
959 struct net_device *dev = txq->dev;
960
959 if (!test_bit(__LINK_STATE_XOFF, &dev->state)) 961 if (!test_bit(__LINK_STATE_XOFF, &dev->state))
960 __netif_schedule(dev); 962 __netif_schedule(txq);
963}
964
965static inline void netif_schedule(struct net_device *dev)
966{
967 netif_schedule_queue(&dev->tx_queue);
961} 968}
962 969
963/** 970/**
@@ -987,7 +994,7 @@ static inline void netif_wake_queue(struct net_device *dev)
987 } 994 }
988#endif 995#endif
989 if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) 996 if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
990 __netif_schedule(dev); 997 __netif_schedule(&dev->tx_queue);
991} 998}
992 999
993/** 1000/**
@@ -1103,7 +1110,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1103#endif 1110#endif
1104 if (test_and_clear_bit(__LINK_STATE_XOFF, 1111 if (test_and_clear_bit(__LINK_STATE_XOFF,
1105 &dev->egress_subqueue[queue_index].state)) 1112 &dev->egress_subqueue[queue_index].state))
1106 __netif_schedule(dev); 1113 __netif_schedule(&dev->tx_queue);
1107#endif 1114#endif
1108} 1115}
1109 1116
diff --git a/net/core/dev.c b/net/core/dev.c
index d6b8d3c3e6ec..0dc888ad4217 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1320,12 +1320,13 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1320} 1320}
1321 1321
1322 1322
1323void __netif_schedule(struct net_device *dev) 1323void __netif_schedule(struct netdev_queue *txq)
1324{ 1324{
1325 struct net_device *dev = txq->dev;
1326
1325 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { 1327 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
1326 struct netdev_queue *txq = &dev->tx_queue;
1327 unsigned long flags;
1328 struct softnet_data *sd; 1328 struct softnet_data *sd;
1329 unsigned long flags;
1329 1330
1330 local_irq_save(flags); 1331 local_irq_save(flags);
1331 sd = &__get_cpu_var(softnet_data); 1332 sd = &__get_cpu_var(softnet_data);
@@ -1932,7 +1933,7 @@ static void net_tx_action(struct softirq_action *h)
1932 qdisc_run(dev); 1933 qdisc_run(dev);
1933 spin_unlock(&txq->lock); 1934 spin_unlock(&txq->lock);
1934 } else { 1935 } else {
1935 netif_schedule(dev); 1936 netif_schedule_queue(txq);
1936 } 1937 }
1937 } 1938 }
1938 } 1939 }
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 2968baa66b91..1c4d3ba6b878 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -885,10 +885,10 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
885 spin_unlock_bh(&txq->lock); 885 spin_unlock_bh(&txq->lock);
886 886
887 /* we just requeued the all the frames that were in the removed 887 /* we just requeued the all the frames that were in the removed
888 * queue, and since we might miss a softirq we do netif_schedule. 888 * queue, and since we might miss a softirq we do netif_schedule_queue.
889 * ieee80211_wake_queue is not used here as this queue is not 889 * ieee80211_wake_queue is not used here as this queue is not
890 * necessarily stopped */ 890 * necessarily stopped */
891 netif_schedule(local->mdev); 891 netif_schedule_queue(txq);
892 spin_lock_bh(&sta->lock); 892 spin_lock_bh(&sta->lock);
893 *state = HT_AGG_STATE_IDLE; 893 *state = HT_AGG_STATE_IDLE;
894 sta->ampdu_mlme.addba_req_num[tid] = 0; 894 sta->ampdu_mlme.addba_req_num[tid] = 0;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index e73bd68aa7ae..95873f8dd37c 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -282,11 +282,11 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
282{ 282{
283 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, 283 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
284 timer); 284 timer);
285 struct net_device *dev = qdisc_dev(wd->qdisc); 285 struct netdev_queue *txq = wd->qdisc->dev_queue;
286 286
287 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 287 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
288 smp_wmb(); 288 smp_wmb();
289 netif_schedule(dev); 289 netif_schedule_queue(txq);
290 290
291 return HRTIMER_NORESTART; 291 return HRTIMER_NORESTART;
292} 292}
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 99ce3da2b0a4..4efc836cbf38 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -650,7 +650,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
650 } 650 }
651 651
652 sch->flags &= ~TCQ_F_THROTTLED; 652 sch->flags &= ~TCQ_F_THROTTLED;
653 netif_schedule(qdisc_dev(sch)); 653 netif_schedule_queue(sch->dev_queue);
654 return HRTIMER_NORESTART; 654 return HRTIMER_NORESTART;
655} 655}
656 656
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 8247a406a401..407dfdb142a4 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -62,7 +62,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
62 return q->q.qlen; 62 return q->q.qlen;
63} 63}
64 64
65static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev, 65static inline int dev_requeue_skb(struct sk_buff *skb,
66 struct netdev_queue *dev_queue, 66 struct netdev_queue *dev_queue,
67 struct Qdisc *q) 67 struct Qdisc *q)
68{ 68{
@@ -71,7 +71,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev,
71 else 71 else
72 q->ops->requeue(skb, q); 72 q->ops->requeue(skb, q);
73 73
74 netif_schedule(dev); 74 netif_schedule_queue(dev_queue);
75 return 0; 75 return 0;
76} 76}
77 77
@@ -114,7 +114,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
114 * some time. 114 * some time.
115 */ 115 */
116 __get_cpu_var(netdev_rx_stat).cpu_collision++; 116 __get_cpu_var(netdev_rx_stat).cpu_collision++;
117 ret = dev_requeue_skb(skb, dev, dev_queue, q); 117 ret = dev_requeue_skb(skb, dev_queue, q);
118 } 118 }
119 119
120 return ret; 120 return ret;
@@ -179,7 +179,7 @@ static inline int qdisc_restart(struct net_device *dev)
179 printk(KERN_WARNING "BUG %s code %d qlen %d\n", 179 printk(KERN_WARNING "BUG %s code %d qlen %d\n",
180 dev->name, ret, q->q.qlen); 180 dev->name, ret, q->q.qlen);
181 181
182 ret = dev_requeue_skb(skb, dev, txq, q); 182 ret = dev_requeue_skb(skb, txq, q);
183 break; 183 break;
184 } 184 }
185 185
@@ -200,7 +200,7 @@ void __qdisc_run(struct net_device *dev)
200 * 2. we've been doing it for too long. 200 * 2. we've been doing it for too long.
201 */ 201 */
202 if (need_resched() || jiffies != start_time) { 202 if (need_resched() || jiffies != start_time) {
203 netif_schedule(dev); 203 netif_schedule_queue(&dev->tx_queue);
204 break; 204 break;
205 } 205 }
206 } 206 }