diff options
author | Stephen Hemminger <shemminger@linux-foundation.org> | 2007-03-22 15:17:42 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:27:22 -0400 |
commit | 11274e5a43266d531140530adebead6903380caf (patch) | |
tree | 38c365a3835c9e5973fb1941a31dde8cc4856724 /net/sched/sch_netem.c | |
parent | 075aa573b74a732aeff487ab77d3fbd627c10856 (diff) |
[NETEM]: avoid excessive requeues
The netem code would call getnstimeofday() and dequeue/requeue after
every packet, even if it was waiting. Avoid this overhead by using
the throttled flag.
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_netem.c')
-rw-r--r-- | net/sched/sch_netem.c | 23 |
1 files changed, 13 insertions, 10 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 7e9e658d4d93..fb49e9e7ace0 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -273,6 +273,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) | |||
273 | struct netem_sched_data *q = qdisc_priv(sch); | 273 | struct netem_sched_data *q = qdisc_priv(sch); |
274 | struct sk_buff *skb; | 274 | struct sk_buff *skb; |
275 | 275 | ||
276 | smp_mb(); | ||
277 | if (sch->flags & TCQ_F_THROTTLED) | ||
278 | return NULL; | ||
279 | |||
276 | skb = q->qdisc->dequeue(q->qdisc); | 280 | skb = q->qdisc->dequeue(q->qdisc); |
277 | if (skb) { | 281 | if (skb) { |
278 | const struct netem_skb_cb *cb | 282 | const struct netem_skb_cb *cb |
@@ -285,18 +289,17 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) | |||
285 | if (PSCHED_TLESS(cb->time_to_send, now)) { | 289 | if (PSCHED_TLESS(cb->time_to_send, now)) { |
286 | pr_debug("netem_dequeue: return skb=%p\n", skb); | 290 | pr_debug("netem_dequeue: return skb=%p\n", skb); |
287 | sch->q.qlen--; | 291 | sch->q.qlen--; |
288 | sch->flags &= ~TCQ_F_THROTTLED; | ||
289 | return skb; | 292 | return skb; |
290 | } else { | ||
291 | qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); | ||
292 | |||
293 | if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) { | ||
294 | qdisc_tree_decrease_qlen(q->qdisc, 1); | ||
295 | sch->qstats.drops++; | ||
296 | printk(KERN_ERR "netem: queue discpline %s could not requeue\n", | ||
297 | q->qdisc->ops->id); | ||
298 | } | ||
299 | } | 293 | } |
294 | |||
295 | if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) { | ||
296 | qdisc_tree_decrease_qlen(q->qdisc, 1); | ||
297 | sch->qstats.drops++; | ||
298 | printk(KERN_ERR "netem: %s could not requeue\n", | ||
299 | q->qdisc->ops->id); | ||
300 | } | ||
301 | |||
302 | qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); | ||
300 | } | 303 | } |
301 | 304 | ||
302 | return NULL; | 305 | return NULL; |