aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-10-04 13:11:31 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-06 00:55:10 -0400
commitf2600cf02b5b59aaee082c3485b7f01fc7f7b70c (patch)
treef38d692c3f706ead3d0cfd77b216fbd629a22964 /net
parent681d2421e1135b95f5cd9d16fe10eac7f570a9f2 (diff)
net: sched: avoid costly atomic operation in fq_dequeue()
Standard qdisc API to setup a timer implies an atomic operation on every packet dequeue : qdisc_unthrottled() It turns out this is not really needed for FQ, as FQ has no concept of global qdisc throttling, being a qdisc handling many different flows, some of them can be throttled, while others are not. Fix is straightforward : add a 'bool throttle' to qdisc_watchdog_schedule_ns(), and remove calls to qdisc_unthrottled() in sch_fq. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_api.c5
-rw-r--r--net/sched/sch_fq.c6
-rw-r--r--net/sched/sch_tbf.c3
3 files changed, 7 insertions, 7 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index c79a226cc25c..2cf61b3e633c 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -594,13 +594,14 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
594} 594}
595EXPORT_SYMBOL(qdisc_watchdog_init); 595EXPORT_SYMBOL(qdisc_watchdog_init);
596 596
597void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires) 597void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
598{ 598{
599 if (test_bit(__QDISC_STATE_DEACTIVATED, 599 if (test_bit(__QDISC_STATE_DEACTIVATED,
600 &qdisc_root_sleeping(wd->qdisc)->state)) 600 &qdisc_root_sleeping(wd->qdisc)->state))
601 return; 601 return;
602 602
603 qdisc_throttled(wd->qdisc); 603 if (throttle)
604 qdisc_throttled(wd->qdisc);
604 605
605 hrtimer_start(&wd->timer, 606 hrtimer_start(&wd->timer,
606 ns_to_ktime(expires), 607 ns_to_ktime(expires),
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index c9b9fcb53206..cbd7e1fd23b4 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -377,7 +377,6 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
377 if (time_after(jiffies, f->age + q->flow_refill_delay)) 377 if (time_after(jiffies, f->age + q->flow_refill_delay))
378 f->credit = max_t(u32, f->credit, q->quantum); 378 f->credit = max_t(u32, f->credit, q->quantum);
379 q->inactive_flows--; 379 q->inactive_flows--;
380 qdisc_unthrottled(sch);
381 } 380 }
382 381
383 /* Note: this overwrites f->age */ 382 /* Note: this overwrites f->age */
@@ -385,7 +384,6 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
385 384
386 if (unlikely(f == &q->internal)) { 385 if (unlikely(f == &q->internal)) {
387 q->stat_internal_packets++; 386 q->stat_internal_packets++;
388 qdisc_unthrottled(sch);
389 } 387 }
390 sch->q.qlen++; 388 sch->q.qlen++;
391 389
@@ -433,7 +431,8 @@ begin:
433 if (!head->first) { 431 if (!head->first) {
434 if (q->time_next_delayed_flow != ~0ULL) 432 if (q->time_next_delayed_flow != ~0ULL)
435 qdisc_watchdog_schedule_ns(&q->watchdog, 433 qdisc_watchdog_schedule_ns(&q->watchdog,
436 q->time_next_delayed_flow); 434 q->time_next_delayed_flow,
435 false);
437 return NULL; 436 return NULL;
438 } 437 }
439 } 438 }
@@ -495,7 +494,6 @@ begin:
495 } 494 }
496out: 495out:
497 qdisc_bstats_update(sch, skb); 496 qdisc_bstats_update(sch, skb);
498 qdisc_unthrottled(sch);
499 return skb; 497 return skb;
500} 498}
501 499
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 77edffe329c4..a4afde14e865 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -268,7 +268,8 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
268 } 268 }
269 269
270 qdisc_watchdog_schedule_ns(&q->watchdog, 270 qdisc_watchdog_schedule_ns(&q->watchdog,
271 now + max_t(long, -toks, -ptoks)); 271 now + max_t(long, -toks, -ptoks),
272 true);
272 273
273 /* Maybe we have a shorter packet in the queue, 274 /* Maybe we have a shorter packet in the queue,
274 which can be sent now. It sounds cool, 275 which can be sent now. It sounds cool,