diff options
author | Patrick McHardy <kaber@trash.net> | 2007-03-16 04:22:20 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:26:12 -0400 |
commit | 1a13cb63d679da328cfa339c89b8b2d0eba3b81e (patch) | |
tree | 0de208583285cb470ca6ff9bd351e8054e5a68d2 /net/sched/sch_cbq.c | |
parent | e9054a339eb275c756efeeaee42af484ac72a3f4 (diff) |
[NET_SCHED]: sch_cbq: use hrtimer for delay_timer
Switch delay_timer to hrtimer.
The class penalty parameter is changed to use psched ticks as units.
Since iproute never supported using this and the only existing user
(libnl) incorrectly assumes psched ticks as units anyway, this
shouldn't break anything.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_cbq.c')
-rw-r--r-- | net/sched/sch_cbq.c | 70 |
1 files changed, 41 insertions, 29 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 32f6a308bad6..0491fad97c0a 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -112,7 +112,7 @@ struct cbq_class | |||
112 | 112 | ||
113 | /* Overlimit strategy parameters */ | 113 | /* Overlimit strategy parameters */ |
114 | void (*overlimit)(struct cbq_class *cl); | 114 | void (*overlimit)(struct cbq_class *cl); |
115 | long penalty; | 115 | psched_tdiff_t penalty; |
116 | 116 | ||
117 | /* General scheduler (WRR) parameters */ | 117 | /* General scheduler (WRR) parameters */ |
118 | long allot; | 118 | long allot; |
@@ -143,7 +143,7 @@ struct cbq_class | |||
143 | psched_time_t undertime; | 143 | psched_time_t undertime; |
144 | long avgidle; | 144 | long avgidle; |
145 | long deficit; /* Saved deficit for WRR */ | 145 | long deficit; /* Saved deficit for WRR */ |
146 | unsigned long penalized; | 146 | psched_time_t penalized; |
147 | struct gnet_stats_basic bstats; | 147 | struct gnet_stats_basic bstats; |
148 | struct gnet_stats_queue qstats; | 148 | struct gnet_stats_queue qstats; |
149 | struct gnet_stats_rate_est rate_est; | 149 | struct gnet_stats_rate_est rate_est; |
@@ -180,7 +180,7 @@ struct cbq_sched_data | |||
180 | psched_time_t now_rt; /* Cached real time */ | 180 | psched_time_t now_rt; /* Cached real time */ |
181 | unsigned pmask; | 181 | unsigned pmask; |
182 | 182 | ||
183 | struct timer_list delay_timer; | 183 | struct hrtimer delay_timer; |
184 | struct qdisc_watchdog watchdog; /* Watchdog timer, | 184 | struct qdisc_watchdog watchdog; /* Watchdog timer, |
185 | started when CBQ has | 185 | started when CBQ has |
186 | backlog, but cannot | 186 | backlog, but cannot |
@@ -549,7 +549,8 @@ static void cbq_ovl_delay(struct cbq_class *cl) | |||
549 | psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now); | 549 | psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now); |
550 | 550 | ||
551 | if (!cl->delayed) { | 551 | if (!cl->delayed) { |
552 | unsigned long sched = jiffies; | 552 | psched_time_t sched = q->now; |
553 | ktime_t expires; | ||
553 | 554 | ||
554 | delay += cl->offtime; | 555 | delay += cl->offtime; |
555 | if (cl->avgidle < 0) | 556 | if (cl->avgidle < 0) |
@@ -559,14 +560,18 @@ static void cbq_ovl_delay(struct cbq_class *cl) | |||
559 | PSCHED_TADD2(q->now, delay, cl->undertime); | 560 | PSCHED_TADD2(q->now, delay, cl->undertime); |
560 | 561 | ||
561 | if (delay > 0) { | 562 | if (delay > 0) { |
562 | sched += PSCHED_US2JIFFIE(delay) + cl->penalty; | 563 | sched += delay + cl->penalty; |
563 | cl->penalized = sched; | 564 | cl->penalized = sched; |
564 | cl->cpriority = TC_CBQ_MAXPRIO; | 565 | cl->cpriority = TC_CBQ_MAXPRIO; |
565 | q->pmask |= (1<<TC_CBQ_MAXPRIO); | 566 | q->pmask |= (1<<TC_CBQ_MAXPRIO); |
566 | if (del_timer(&q->delay_timer) && | 567 | |
567 | (long)(q->delay_timer.expires - sched) > 0) | 568 | expires = ktime_set(0, 0); |
568 | q->delay_timer.expires = sched; | 569 | expires = ktime_add_ns(expires, PSCHED_US2NS(sched)); |
569 | add_timer(&q->delay_timer); | 570 | if (hrtimer_try_to_cancel(&q->delay_timer) && |
571 | ktime_to_ns(ktime_sub(q->delay_timer.expires, | ||
572 | expires)) > 0) | ||
573 | q->delay_timer.expires = expires; | ||
574 | hrtimer_restart(&q->delay_timer); | ||
570 | cl->delayed = 1; | 575 | cl->delayed = 1; |
571 | cl->xstats.overactions++; | 576 | cl->xstats.overactions++; |
572 | return; | 577 | return; |
@@ -583,7 +588,7 @@ static void cbq_ovl_lowprio(struct cbq_class *cl) | |||
583 | { | 588 | { |
584 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | 589 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
585 | 590 | ||
586 | cl->penalized = jiffies + cl->penalty; | 591 | cl->penalized = q->now + cl->penalty; |
587 | 592 | ||
588 | if (cl->cpriority != cl->priority2) { | 593 | if (cl->cpriority != cl->priority2) { |
589 | cl->cpriority = cl->priority2; | 594 | cl->cpriority = cl->priority2; |
@@ -604,19 +609,19 @@ static void cbq_ovl_drop(struct cbq_class *cl) | |||
604 | cbq_ovl_classic(cl); | 609 | cbq_ovl_classic(cl); |
605 | } | 610 | } |
606 | 611 | ||
607 | static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio) | 612 | static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, |
613 | psched_time_t now) | ||
608 | { | 614 | { |
609 | struct cbq_class *cl; | 615 | struct cbq_class *cl; |
610 | struct cbq_class *cl_prev = q->active[prio]; | 616 | struct cbq_class *cl_prev = q->active[prio]; |
611 | unsigned long now = jiffies; | 617 | psched_time_t sched = now; |
612 | unsigned long sched = now; | ||
613 | 618 | ||
614 | if (cl_prev == NULL) | 619 | if (cl_prev == NULL) |
615 | return 0; | 620 | return 0; |
616 | 621 | ||
617 | do { | 622 | do { |
618 | cl = cl_prev->next_alive; | 623 | cl = cl_prev->next_alive; |
619 | if ((long)(now - cl->penalized) > 0) { | 624 | if (now - cl->penalized > 0) { |
620 | cl_prev->next_alive = cl->next_alive; | 625 | cl_prev->next_alive = cl->next_alive; |
621 | cl->next_alive = NULL; | 626 | cl->next_alive = NULL; |
622 | cl->cpriority = cl->priority; | 627 | cl->cpriority = cl->priority; |
@@ -632,30 +637,34 @@ static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio) | |||
632 | } | 637 | } |
633 | 638 | ||
634 | cl = cl_prev->next_alive; | 639 | cl = cl_prev->next_alive; |
635 | } else if ((long)(sched - cl->penalized) > 0) | 640 | } else if (sched - cl->penalized > 0) |
636 | sched = cl->penalized; | 641 | sched = cl->penalized; |
637 | } while ((cl_prev = cl) != q->active[prio]); | 642 | } while ((cl_prev = cl) != q->active[prio]); |
638 | 643 | ||
639 | return (long)(sched - now); | 644 | return sched - now; |
640 | } | 645 | } |
641 | 646 | ||
642 | static void cbq_undelay(unsigned long arg) | 647 | static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) |
643 | { | 648 | { |
644 | struct Qdisc *sch = (struct Qdisc*)arg; | 649 | struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, |
645 | struct cbq_sched_data *q = qdisc_priv(sch); | 650 | delay_timer); |
646 | long delay = 0; | 651 | struct Qdisc *sch = q->watchdog.qdisc; |
652 | psched_time_t now; | ||
653 | psched_tdiff_t delay = 0; | ||
647 | unsigned pmask; | 654 | unsigned pmask; |
648 | 655 | ||
656 | PSCHED_GET_TIME(now); | ||
657 | |||
649 | pmask = q->pmask; | 658 | pmask = q->pmask; |
650 | q->pmask = 0; | 659 | q->pmask = 0; |
651 | 660 | ||
652 | while (pmask) { | 661 | while (pmask) { |
653 | int prio = ffz(~pmask); | 662 | int prio = ffz(~pmask); |
654 | long tmp; | 663 | psched_tdiff_t tmp; |
655 | 664 | ||
656 | pmask &= ~(1<<prio); | 665 | pmask &= ~(1<<prio); |
657 | 666 | ||
658 | tmp = cbq_undelay_prio(q, prio); | 667 | tmp = cbq_undelay_prio(q, prio, now); |
659 | if (tmp > 0) { | 668 | if (tmp > 0) { |
660 | q->pmask |= 1<<prio; | 669 | q->pmask |= 1<<prio; |
661 | if (tmp < delay || delay == 0) | 670 | if (tmp < delay || delay == 0) |
@@ -664,12 +673,16 @@ static void cbq_undelay(unsigned long arg) | |||
664 | } | 673 | } |
665 | 674 | ||
666 | if (delay) { | 675 | if (delay) { |
667 | q->delay_timer.expires = jiffies + delay; | 676 | ktime_t time; |
668 | add_timer(&q->delay_timer); | 677 | |
678 | time = ktime_set(0, 0); | ||
679 | time = ktime_add_ns(time, PSCHED_US2NS(now + delay)); | ||
680 | hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); | ||
669 | } | 681 | } |
670 | 682 | ||
671 | sch->flags &= ~TCQ_F_THROTTLED; | 683 | sch->flags &= ~TCQ_F_THROTTLED; |
672 | netif_schedule(sch->dev); | 684 | netif_schedule(sch->dev); |
685 | return HRTIMER_NORESTART; | ||
673 | } | 686 | } |
674 | 687 | ||
675 | 688 | ||
@@ -1265,7 +1278,7 @@ cbq_reset(struct Qdisc* sch) | |||
1265 | q->tx_class = NULL; | 1278 | q->tx_class = NULL; |
1266 | q->tx_borrowed = NULL; | 1279 | q->tx_borrowed = NULL; |
1267 | qdisc_watchdog_cancel(&q->watchdog); | 1280 | qdisc_watchdog_cancel(&q->watchdog); |
1268 | del_timer(&q->delay_timer); | 1281 | hrtimer_cancel(&q->delay_timer); |
1269 | q->toplevel = TC_CBQ_MAXLEVEL; | 1282 | q->toplevel = TC_CBQ_MAXLEVEL; |
1270 | PSCHED_GET_TIME(q->now); | 1283 | PSCHED_GET_TIME(q->now); |
1271 | q->now_rt = q->now; | 1284 | q->now_rt = q->now; |
@@ -1367,7 +1380,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl) | |||
1367 | default: | 1380 | default: |
1368 | return -EINVAL; | 1381 | return -EINVAL; |
1369 | } | 1382 | } |
1370 | cl->penalty = (ovl->penalty*HZ)/1000; | 1383 | cl->penalty = ovl->penalty; |
1371 | return 0; | 1384 | return 0; |
1372 | } | 1385 | } |
1373 | 1386 | ||
@@ -1435,8 +1448,7 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt) | |||
1435 | q->link.stats_lock = &sch->dev->queue_lock; | 1448 | q->link.stats_lock = &sch->dev->queue_lock; |
1436 | 1449 | ||
1437 | qdisc_watchdog_init(&q->watchdog, sch); | 1450 | qdisc_watchdog_init(&q->watchdog, sch); |
1438 | init_timer(&q->delay_timer); | 1451 | hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
1439 | q->delay_timer.data = (unsigned long)sch; | ||
1440 | q->delay_timer.function = cbq_undelay; | 1452 | q->delay_timer.function = cbq_undelay; |
1441 | q->toplevel = TC_CBQ_MAXLEVEL; | 1453 | q->toplevel = TC_CBQ_MAXLEVEL; |
1442 | PSCHED_GET_TIME(q->now); | 1454 | PSCHED_GET_TIME(q->now); |
@@ -1514,7 +1526,7 @@ static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) | |||
1514 | opt.strategy = cl->ovl_strategy; | 1526 | opt.strategy = cl->ovl_strategy; |
1515 | opt.priority2 = cl->priority2+1; | 1527 | opt.priority2 = cl->priority2+1; |
1516 | opt.pad = 0; | 1528 | opt.pad = 0; |
1517 | opt.penalty = (cl->penalty*1000)/HZ; | 1529 | opt.penalty = cl->penalty; |
1518 | RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); | 1530 | RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); |
1519 | return skb->len; | 1531 | return skb->len; |
1520 | 1532 | ||