aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@linux-foundation.org>2007-03-22 15:17:42 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:27:22 -0400
commit11274e5a43266d531140530adebead6903380caf (patch)
tree38c365a3835c9e5973fb1941a31dde8cc4856724 /net/sched
parent075aa573b74a732aeff487ab77d3fbd627c10856 (diff)
[NETEM]: avoid excessive requeues
The netem code would call getnstimeofday() and dequeue/requeue after every packet, even if it was waiting. Avoid this overhead by using the throttled flag. Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_api.c3
-rw-r--r--net/sched/sch_netem.c23
2 files changed, 16 insertions, 10 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b06f20294ac0..fcaa4adefc82 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -298,6 +298,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
298 timer); 298 timer);
299 299
300 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 300 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
301 smp_wmb();
301 netif_schedule(wd->qdisc->dev); 302 netif_schedule(wd->qdisc->dev);
302 return HRTIMER_NORESTART; 303 return HRTIMER_NORESTART;
303} 304}
@@ -315,6 +316,7 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
315 ktime_t time; 316 ktime_t time;
316 317
317 wd->qdisc->flags |= TCQ_F_THROTTLED; 318 wd->qdisc->flags |= TCQ_F_THROTTLED;
319 smp_wmb();
318 time = ktime_set(0, 0); 320 time = ktime_set(0, 0);
319 time = ktime_add_ns(time, PSCHED_US2NS(expires)); 321 time = ktime_add_ns(time, PSCHED_US2NS(expires));
320 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); 322 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
@@ -325,6 +327,7 @@ void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
325{ 327{
326 hrtimer_cancel(&wd->timer); 328 hrtimer_cancel(&wd->timer);
327 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 329 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
330 smp_wmb();
328} 331}
329EXPORT_SYMBOL(qdisc_watchdog_cancel); 332EXPORT_SYMBOL(qdisc_watchdog_cancel);
330 333
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 7e9e658d4d93..fb49e9e7ace0 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -273,6 +273,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
273 struct netem_sched_data *q = qdisc_priv(sch); 273 struct netem_sched_data *q = qdisc_priv(sch);
274 struct sk_buff *skb; 274 struct sk_buff *skb;
275 275
276 smp_mb();
277 if (sch->flags & TCQ_F_THROTTLED)
278 return NULL;
279
276 skb = q->qdisc->dequeue(q->qdisc); 280 skb = q->qdisc->dequeue(q->qdisc);
277 if (skb) { 281 if (skb) {
278 const struct netem_skb_cb *cb 282 const struct netem_skb_cb *cb
@@ -285,18 +289,17 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
285 if (PSCHED_TLESS(cb->time_to_send, now)) { 289 if (PSCHED_TLESS(cb->time_to_send, now)) {
286 pr_debug("netem_dequeue: return skb=%p\n", skb); 290 pr_debug("netem_dequeue: return skb=%p\n", skb);
287 sch->q.qlen--; 291 sch->q.qlen--;
288 sch->flags &= ~TCQ_F_THROTTLED;
289 return skb; 292 return skb;
290 } else {
291 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
292
293 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
294 qdisc_tree_decrease_qlen(q->qdisc, 1);
295 sch->qstats.drops++;
296 printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
297 q->qdisc->ops->id);
298 }
299 } 293 }
294
295 if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
296 qdisc_tree_decrease_qlen(q->qdisc, 1);
297 sch->qstats.drops++;
298 printk(KERN_ERR "netem: %s could not requeue\n",
299 q->qdisc->ops->id);
300 }
301
302 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
300 } 303 }
301 304
302 return NULL; 305 return NULL;