diff options
Diffstat (limited to 'net/sched/sch_cbq.c')
-rw-r--r-- | net/sched/sch_cbq.c | 207 |
1 files changed, 96 insertions, 111 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 76c92e710a33..a294542cb8e4 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/etherdevice.h> | 29 | #include <linux/etherdevice.h> |
30 | #include <linux/notifier.h> | 30 | #include <linux/notifier.h> |
31 | #include <net/ip.h> | 31 | #include <net/ip.h> |
32 | #include <net/netlink.h> | ||
32 | #include <net/route.h> | 33 | #include <net/route.h> |
33 | #include <linux/skbuff.h> | 34 | #include <linux/skbuff.h> |
34 | #include <net/sock.h> | 35 | #include <net/sock.h> |
@@ -112,7 +113,7 @@ struct cbq_class | |||
112 | 113 | ||
113 | /* Overlimit strategy parameters */ | 114 | /* Overlimit strategy parameters */ |
114 | void (*overlimit)(struct cbq_class *cl); | 115 | void (*overlimit)(struct cbq_class *cl); |
115 | long penalty; | 116 | psched_tdiff_t penalty; |
116 | 117 | ||
117 | /* General scheduler (WRR) parameters */ | 118 | /* General scheduler (WRR) parameters */ |
118 | long allot; | 119 | long allot; |
@@ -143,7 +144,7 @@ struct cbq_class | |||
143 | psched_time_t undertime; | 144 | psched_time_t undertime; |
144 | long avgidle; | 145 | long avgidle; |
145 | long deficit; /* Saved deficit for WRR */ | 146 | long deficit; /* Saved deficit for WRR */ |
146 | unsigned long penalized; | 147 | psched_time_t penalized; |
147 | struct gnet_stats_basic bstats; | 148 | struct gnet_stats_basic bstats; |
148 | struct gnet_stats_queue qstats; | 149 | struct gnet_stats_queue qstats; |
149 | struct gnet_stats_rate_est rate_est; | 150 | struct gnet_stats_rate_est rate_est; |
@@ -180,12 +181,12 @@ struct cbq_sched_data | |||
180 | psched_time_t now_rt; /* Cached real time */ | 181 | psched_time_t now_rt; /* Cached real time */ |
181 | unsigned pmask; | 182 | unsigned pmask; |
182 | 183 | ||
183 | struct timer_list delay_timer; | 184 | struct hrtimer delay_timer; |
184 | struct timer_list wd_timer; /* Watchdog timer, | 185 | struct qdisc_watchdog watchdog; /* Watchdog timer, |
185 | started when CBQ has | 186 | started when CBQ has |
186 | backlog, but cannot | 187 | backlog, but cannot |
187 | transmit just now */ | 188 | transmit just now */ |
188 | long wd_expires; | 189 | psched_tdiff_t wd_expires; |
189 | int toplevel; | 190 | int toplevel; |
190 | u32 hgenerator; | 191 | u32 hgenerator; |
191 | }; | 192 | }; |
@@ -384,12 +385,12 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) | |||
384 | psched_time_t now; | 385 | psched_time_t now; |
385 | psched_tdiff_t incr; | 386 | psched_tdiff_t incr; |
386 | 387 | ||
387 | PSCHED_GET_TIME(now); | 388 | now = psched_get_time(); |
388 | incr = PSCHED_TDIFF(now, q->now_rt); | 389 | incr = now - q->now_rt; |
389 | PSCHED_TADD2(q->now, incr, now); | 390 | now = q->now + incr; |
390 | 391 | ||
391 | do { | 392 | do { |
392 | if (PSCHED_TLESS(cl->undertime, now)) { | 393 | if (cl->undertime < now) { |
393 | q->toplevel = cl->level; | 394 | q->toplevel = cl->level; |
394 | return; | 395 | return; |
395 | } | 396 | } |
@@ -473,7 +474,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) | |||
473 | static void cbq_ovl_classic(struct cbq_class *cl) | 474 | static void cbq_ovl_classic(struct cbq_class *cl) |
474 | { | 475 | { |
475 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | 476 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
476 | psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now); | 477 | psched_tdiff_t delay = cl->undertime - q->now; |
477 | 478 | ||
478 | if (!cl->delayed) { | 479 | if (!cl->delayed) { |
479 | delay += cl->offtime; | 480 | delay += cl->offtime; |
@@ -491,7 +492,7 @@ static void cbq_ovl_classic(struct cbq_class *cl) | |||
491 | cl->avgidle = cl->minidle; | 492 | cl->avgidle = cl->minidle; |
492 | if (delay <= 0) | 493 | if (delay <= 0) |
493 | delay = 1; | 494 | delay = 1; |
494 | PSCHED_TADD2(q->now, delay, cl->undertime); | 495 | cl->undertime = q->now + delay; |
495 | 496 | ||
496 | cl->xstats.overactions++; | 497 | cl->xstats.overactions++; |
497 | cl->delayed = 1; | 498 | cl->delayed = 1; |
@@ -508,7 +509,7 @@ static void cbq_ovl_classic(struct cbq_class *cl) | |||
508 | psched_tdiff_t base_delay = q->wd_expires; | 509 | psched_tdiff_t base_delay = q->wd_expires; |
509 | 510 | ||
510 | for (b = cl->borrow; b; b = b->borrow) { | 511 | for (b = cl->borrow; b; b = b->borrow) { |
511 | delay = PSCHED_TDIFF(b->undertime, q->now); | 512 | delay = b->undertime - q->now; |
512 | if (delay < base_delay) { | 513 | if (delay < base_delay) { |
513 | if (delay <= 0) | 514 | if (delay <= 0) |
514 | delay = 1; | 515 | delay = 1; |
@@ -546,27 +547,32 @@ static void cbq_ovl_rclassic(struct cbq_class *cl) | |||
546 | static void cbq_ovl_delay(struct cbq_class *cl) | 547 | static void cbq_ovl_delay(struct cbq_class *cl) |
547 | { | 548 | { |
548 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | 549 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
549 | psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now); | 550 | psched_tdiff_t delay = cl->undertime - q->now; |
550 | 551 | ||
551 | if (!cl->delayed) { | 552 | if (!cl->delayed) { |
552 | unsigned long sched = jiffies; | 553 | psched_time_t sched = q->now; |
554 | ktime_t expires; | ||
553 | 555 | ||
554 | delay += cl->offtime; | 556 | delay += cl->offtime; |
555 | if (cl->avgidle < 0) | 557 | if (cl->avgidle < 0) |
556 | delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); | 558 | delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); |
557 | if (cl->avgidle < cl->minidle) | 559 | if (cl->avgidle < cl->minidle) |
558 | cl->avgidle = cl->minidle; | 560 | cl->avgidle = cl->minidle; |
559 | PSCHED_TADD2(q->now, delay, cl->undertime); | 561 | cl->undertime = q->now + delay; |
560 | 562 | ||
561 | if (delay > 0) { | 563 | if (delay > 0) { |
562 | sched += PSCHED_US2JIFFIE(delay) + cl->penalty; | 564 | sched += delay + cl->penalty; |
563 | cl->penalized = sched; | 565 | cl->penalized = sched; |
564 | cl->cpriority = TC_CBQ_MAXPRIO; | 566 | cl->cpriority = TC_CBQ_MAXPRIO; |
565 | q->pmask |= (1<<TC_CBQ_MAXPRIO); | 567 | q->pmask |= (1<<TC_CBQ_MAXPRIO); |
566 | if (del_timer(&q->delay_timer) && | 568 | |
567 | (long)(q->delay_timer.expires - sched) > 0) | 569 | expires = ktime_set(0, 0); |
568 | q->delay_timer.expires = sched; | 570 | expires = ktime_add_ns(expires, PSCHED_US2NS(sched)); |
569 | add_timer(&q->delay_timer); | 571 | if (hrtimer_try_to_cancel(&q->delay_timer) && |
572 | ktime_to_ns(ktime_sub(q->delay_timer.expires, | ||
573 | expires)) > 0) | ||
574 | q->delay_timer.expires = expires; | ||
575 | hrtimer_restart(&q->delay_timer); | ||
570 | cl->delayed = 1; | 576 | cl->delayed = 1; |
571 | cl->xstats.overactions++; | 577 | cl->xstats.overactions++; |
572 | return; | 578 | return; |
@@ -583,7 +589,7 @@ static void cbq_ovl_lowprio(struct cbq_class *cl) | |||
583 | { | 589 | { |
584 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | 590 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
585 | 591 | ||
586 | cl->penalized = jiffies + cl->penalty; | 592 | cl->penalized = q->now + cl->penalty; |
587 | 593 | ||
588 | if (cl->cpriority != cl->priority2) { | 594 | if (cl->cpriority != cl->priority2) { |
589 | cl->cpriority = cl->priority2; | 595 | cl->cpriority = cl->priority2; |
@@ -604,27 +610,19 @@ static void cbq_ovl_drop(struct cbq_class *cl) | |||
604 | cbq_ovl_classic(cl); | 610 | cbq_ovl_classic(cl); |
605 | } | 611 | } |
606 | 612 | ||
607 | static void cbq_watchdog(unsigned long arg) | 613 | static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, |
608 | { | 614 | psched_time_t now) |
609 | struct Qdisc *sch = (struct Qdisc*)arg; | ||
610 | |||
611 | sch->flags &= ~TCQ_F_THROTTLED; | ||
612 | netif_schedule(sch->dev); | ||
613 | } | ||
614 | |||
615 | static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio) | ||
616 | { | 615 | { |
617 | struct cbq_class *cl; | 616 | struct cbq_class *cl; |
618 | struct cbq_class *cl_prev = q->active[prio]; | 617 | struct cbq_class *cl_prev = q->active[prio]; |
619 | unsigned long now = jiffies; | 618 | psched_time_t sched = now; |
620 | unsigned long sched = now; | ||
621 | 619 | ||
622 | if (cl_prev == NULL) | 620 | if (cl_prev == NULL) |
623 | return now; | 621 | return 0; |
624 | 622 | ||
625 | do { | 623 | do { |
626 | cl = cl_prev->next_alive; | 624 | cl = cl_prev->next_alive; |
627 | if ((long)(now - cl->penalized) > 0) { | 625 | if (now - cl->penalized > 0) { |
628 | cl_prev->next_alive = cl->next_alive; | 626 | cl_prev->next_alive = cl->next_alive; |
629 | cl->next_alive = NULL; | 627 | cl->next_alive = NULL; |
630 | cl->cpriority = cl->priority; | 628 | cl->cpriority = cl->priority; |
@@ -640,30 +638,34 @@ static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio) | |||
640 | } | 638 | } |
641 | 639 | ||
642 | cl = cl_prev->next_alive; | 640 | cl = cl_prev->next_alive; |
643 | } else if ((long)(sched - cl->penalized) > 0) | 641 | } else if (sched - cl->penalized > 0) |
644 | sched = cl->penalized; | 642 | sched = cl->penalized; |
645 | } while ((cl_prev = cl) != q->active[prio]); | 643 | } while ((cl_prev = cl) != q->active[prio]); |
646 | 644 | ||
647 | return (long)(sched - now); | 645 | return sched - now; |
648 | } | 646 | } |
649 | 647 | ||
650 | static void cbq_undelay(unsigned long arg) | 648 | static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) |
651 | { | 649 | { |
652 | struct Qdisc *sch = (struct Qdisc*)arg; | 650 | struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, |
653 | struct cbq_sched_data *q = qdisc_priv(sch); | 651 | delay_timer); |
654 | long delay = 0; | 652 | struct Qdisc *sch = q->watchdog.qdisc; |
653 | psched_time_t now; | ||
654 | psched_tdiff_t delay = 0; | ||
655 | unsigned pmask; | 655 | unsigned pmask; |
656 | 656 | ||
657 | now = psched_get_time(); | ||
658 | |||
657 | pmask = q->pmask; | 659 | pmask = q->pmask; |
658 | q->pmask = 0; | 660 | q->pmask = 0; |
659 | 661 | ||
660 | while (pmask) { | 662 | while (pmask) { |
661 | int prio = ffz(~pmask); | 663 | int prio = ffz(~pmask); |
662 | long tmp; | 664 | psched_tdiff_t tmp; |
663 | 665 | ||
664 | pmask &= ~(1<<prio); | 666 | pmask &= ~(1<<prio); |
665 | 667 | ||
666 | tmp = cbq_undelay_prio(q, prio); | 668 | tmp = cbq_undelay_prio(q, prio, now); |
667 | if (tmp > 0) { | 669 | if (tmp > 0) { |
668 | q->pmask |= 1<<prio; | 670 | q->pmask |= 1<<prio; |
669 | if (tmp < delay || delay == 0) | 671 | if (tmp < delay || delay == 0) |
@@ -672,12 +674,16 @@ static void cbq_undelay(unsigned long arg) | |||
672 | } | 674 | } |
673 | 675 | ||
674 | if (delay) { | 676 | if (delay) { |
675 | q->delay_timer.expires = jiffies + delay; | 677 | ktime_t time; |
676 | add_timer(&q->delay_timer); | 678 | |
679 | time = ktime_set(0, 0); | ||
680 | time = ktime_add_ns(time, PSCHED_US2NS(now + delay)); | ||
681 | hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); | ||
677 | } | 682 | } |
678 | 683 | ||
679 | sch->flags &= ~TCQ_F_THROTTLED; | 684 | sch->flags &= ~TCQ_F_THROTTLED; |
680 | netif_schedule(sch->dev); | 685 | netif_schedule(sch->dev); |
686 | return HRTIMER_NORESTART; | ||
681 | } | 687 | } |
682 | 688 | ||
683 | 689 | ||
@@ -732,7 +738,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, | |||
732 | if (cl && q->toplevel >= borrowed->level) { | 738 | if (cl && q->toplevel >= borrowed->level) { |
733 | if (cl->q->q.qlen > 1) { | 739 | if (cl->q->q.qlen > 1) { |
734 | do { | 740 | do { |
735 | if (PSCHED_IS_PASTPERFECT(borrowed->undertime)) { | 741 | if (borrowed->undertime == PSCHED_PASTPERFECT) { |
736 | q->toplevel = borrowed->level; | 742 | q->toplevel = borrowed->level; |
737 | return; | 743 | return; |
738 | } | 744 | } |
@@ -770,7 +776,7 @@ cbq_update(struct cbq_sched_data *q) | |||
770 | idle = (now - last) - last_pktlen/rate | 776 | idle = (now - last) - last_pktlen/rate |
771 | */ | 777 | */ |
772 | 778 | ||
773 | idle = PSCHED_TDIFF(q->now, cl->last); | 779 | idle = q->now - cl->last; |
774 | if ((unsigned long)idle > 128*1024*1024) { | 780 | if ((unsigned long)idle > 128*1024*1024) { |
775 | avgidle = cl->maxidle; | 781 | avgidle = cl->maxidle; |
776 | } else { | 782 | } else { |
@@ -814,13 +820,11 @@ cbq_update(struct cbq_sched_data *q) | |||
814 | idle -= L2T(&q->link, len); | 820 | idle -= L2T(&q->link, len); |
815 | idle += L2T(cl, len); | 821 | idle += L2T(cl, len); |
816 | 822 | ||
817 | PSCHED_AUDIT_TDIFF(idle); | 823 | cl->undertime = q->now + idle; |
818 | |||
819 | PSCHED_TADD2(q->now, idle, cl->undertime); | ||
820 | } else { | 824 | } else { |
821 | /* Underlimit */ | 825 | /* Underlimit */ |
822 | 826 | ||
823 | PSCHED_SET_PASTPERFECT(cl->undertime); | 827 | cl->undertime = PSCHED_PASTPERFECT; |
824 | if (avgidle > cl->maxidle) | 828 | if (avgidle > cl->maxidle) |
825 | cl->avgidle = cl->maxidle; | 829 | cl->avgidle = cl->maxidle; |
826 | else | 830 | else |
@@ -841,8 +845,7 @@ cbq_under_limit(struct cbq_class *cl) | |||
841 | if (cl->tparent == NULL) | 845 | if (cl->tparent == NULL) |
842 | return cl; | 846 | return cl; |
843 | 847 | ||
844 | if (PSCHED_IS_PASTPERFECT(cl->undertime) || | 848 | if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { |
845 | !PSCHED_TLESS(q->now, cl->undertime)) { | ||
846 | cl->delayed = 0; | 849 | cl->delayed = 0; |
847 | return cl; | 850 | return cl; |
848 | } | 851 | } |
@@ -865,8 +868,7 @@ cbq_under_limit(struct cbq_class *cl) | |||
865 | } | 868 | } |
866 | if (cl->level > q->toplevel) | 869 | if (cl->level > q->toplevel) |
867 | return NULL; | 870 | return NULL; |
868 | } while (!PSCHED_IS_PASTPERFECT(cl->undertime) && | 871 | } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); |
869 | PSCHED_TLESS(q->now, cl->undertime)); | ||
870 | 872 | ||
871 | cl->delayed = 0; | 873 | cl->delayed = 0; |
872 | return cl; | 874 | return cl; |
@@ -1001,8 +1003,8 @@ cbq_dequeue(struct Qdisc *sch) | |||
1001 | psched_time_t now; | 1003 | psched_time_t now; |
1002 | psched_tdiff_t incr; | 1004 | psched_tdiff_t incr; |
1003 | 1005 | ||
1004 | PSCHED_GET_TIME(now); | 1006 | now = psched_get_time(); |
1005 | incr = PSCHED_TDIFF(now, q->now_rt); | 1007 | incr = now - q->now_rt; |
1006 | 1008 | ||
1007 | if (q->tx_class) { | 1009 | if (q->tx_class) { |
1008 | psched_tdiff_t incr2; | 1010 | psched_tdiff_t incr2; |
@@ -1014,12 +1016,12 @@ cbq_dequeue(struct Qdisc *sch) | |||
1014 | cbq_time = max(real_time, work); | 1016 | cbq_time = max(real_time, work); |
1015 | */ | 1017 | */ |
1016 | incr2 = L2T(&q->link, q->tx_len); | 1018 | incr2 = L2T(&q->link, q->tx_len); |
1017 | PSCHED_TADD(q->now, incr2); | 1019 | q->now += incr2; |
1018 | cbq_update(q); | 1020 | cbq_update(q); |
1019 | if ((incr -= incr2) < 0) | 1021 | if ((incr -= incr2) < 0) |
1020 | incr = 0; | 1022 | incr = 0; |
1021 | } | 1023 | } |
1022 | PSCHED_TADD(q->now, incr); | 1024 | q->now += incr; |
1023 | q->now_rt = now; | 1025 | q->now_rt = now; |
1024 | 1026 | ||
1025 | for (;;) { | 1027 | for (;;) { |
@@ -1051,11 +1053,11 @@ cbq_dequeue(struct Qdisc *sch) | |||
1051 | */ | 1053 | */ |
1052 | 1054 | ||
1053 | if (q->toplevel == TC_CBQ_MAXLEVEL && | 1055 | if (q->toplevel == TC_CBQ_MAXLEVEL && |
1054 | PSCHED_IS_PASTPERFECT(q->link.undertime)) | 1056 | q->link.undertime == PSCHED_PASTPERFECT) |
1055 | break; | 1057 | break; |
1056 | 1058 | ||
1057 | q->toplevel = TC_CBQ_MAXLEVEL; | 1059 | q->toplevel = TC_CBQ_MAXLEVEL; |
1058 | PSCHED_SET_PASTPERFECT(q->link.undertime); | 1060 | q->link.undertime = PSCHED_PASTPERFECT; |
1059 | } | 1061 | } |
1060 | 1062 | ||
1061 | /* No packets in scheduler or nobody wants to give them to us :-( | 1063 | /* No packets in scheduler or nobody wants to give them to us :-( |
@@ -1063,13 +1065,9 @@ cbq_dequeue(struct Qdisc *sch) | |||
1063 | 1065 | ||
1064 | if (sch->q.qlen) { | 1066 | if (sch->q.qlen) { |
1065 | sch->qstats.overlimits++; | 1067 | sch->qstats.overlimits++; |
1066 | if (q->wd_expires) { | 1068 | if (q->wd_expires) |
1067 | long delay = PSCHED_US2JIFFIE(q->wd_expires); | 1069 | qdisc_watchdog_schedule(&q->watchdog, |
1068 | if (delay <= 0) | 1070 | now + q->wd_expires); |
1069 | delay = 1; | ||
1070 | mod_timer(&q->wd_timer, jiffies + delay); | ||
1071 | sch->flags |= TCQ_F_THROTTLED; | ||
1072 | } | ||
1073 | } | 1071 | } |
1074 | return NULL; | 1072 | return NULL; |
1075 | } | 1073 | } |
@@ -1276,10 +1274,10 @@ cbq_reset(struct Qdisc* sch) | |||
1276 | q->pmask = 0; | 1274 | q->pmask = 0; |
1277 | q->tx_class = NULL; | 1275 | q->tx_class = NULL; |
1278 | q->tx_borrowed = NULL; | 1276 | q->tx_borrowed = NULL; |
1279 | del_timer(&q->wd_timer); | 1277 | qdisc_watchdog_cancel(&q->watchdog); |
1280 | del_timer(&q->delay_timer); | 1278 | hrtimer_cancel(&q->delay_timer); |
1281 | q->toplevel = TC_CBQ_MAXLEVEL; | 1279 | q->toplevel = TC_CBQ_MAXLEVEL; |
1282 | PSCHED_GET_TIME(q->now); | 1280 | q->now = psched_get_time(); |
1283 | q->now_rt = q->now; | 1281 | q->now_rt = q->now; |
1284 | 1282 | ||
1285 | for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) | 1283 | for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) |
@@ -1290,7 +1288,7 @@ cbq_reset(struct Qdisc* sch) | |||
1290 | qdisc_reset(cl->q); | 1288 | qdisc_reset(cl->q); |
1291 | 1289 | ||
1292 | cl->next_alive = NULL; | 1290 | cl->next_alive = NULL; |
1293 | PSCHED_SET_PASTPERFECT(cl->undertime); | 1291 | cl->undertime = PSCHED_PASTPERFECT; |
1294 | cl->avgidle = cl->maxidle; | 1292 | cl->avgidle = cl->maxidle; |
1295 | cl->deficit = cl->quantum; | 1293 | cl->deficit = cl->quantum; |
1296 | cl->cpriority = cl->priority; | 1294 | cl->cpriority = cl->priority; |
@@ -1379,7 +1377,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl) | |||
1379 | default: | 1377 | default: |
1380 | return -EINVAL; | 1378 | return -EINVAL; |
1381 | } | 1379 | } |
1382 | cl->penalty = (ovl->penalty*HZ)/1000; | 1380 | cl->penalty = ovl->penalty; |
1383 | return 0; | 1381 | return 0; |
1384 | } | 1382 | } |
1385 | 1383 | ||
@@ -1446,14 +1444,11 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt) | |||
1446 | q->link.minidle = -0x7FFFFFFF; | 1444 | q->link.minidle = -0x7FFFFFFF; |
1447 | q->link.stats_lock = &sch->dev->queue_lock; | 1445 | q->link.stats_lock = &sch->dev->queue_lock; |
1448 | 1446 | ||
1449 | init_timer(&q->wd_timer); | 1447 | qdisc_watchdog_init(&q->watchdog, sch); |
1450 | q->wd_timer.data = (unsigned long)sch; | 1448 | hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
1451 | q->wd_timer.function = cbq_watchdog; | ||
1452 | init_timer(&q->delay_timer); | ||
1453 | q->delay_timer.data = (unsigned long)sch; | ||
1454 | q->delay_timer.function = cbq_undelay; | 1449 | q->delay_timer.function = cbq_undelay; |
1455 | q->toplevel = TC_CBQ_MAXLEVEL; | 1450 | q->toplevel = TC_CBQ_MAXLEVEL; |
1456 | PSCHED_GET_TIME(q->now); | 1451 | q->now = psched_get_time(); |
1457 | q->now_rt = q->now; | 1452 | q->now_rt = q->now; |
1458 | 1453 | ||
1459 | cbq_link_class(&q->link); | 1454 | cbq_link_class(&q->link); |
@@ -1467,19 +1462,19 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt) | |||
1467 | 1462 | ||
1468 | static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) | 1463 | static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) |
1469 | { | 1464 | { |
1470 | unsigned char *b = skb->tail; | 1465 | unsigned char *b = skb_tail_pointer(skb); |
1471 | 1466 | ||
1472 | RTA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); | 1467 | RTA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); |
1473 | return skb->len; | 1468 | return skb->len; |
1474 | 1469 | ||
1475 | rtattr_failure: | 1470 | rtattr_failure: |
1476 | skb_trim(skb, b - skb->data); | 1471 | nlmsg_trim(skb, b); |
1477 | return -1; | 1472 | return -1; |
1478 | } | 1473 | } |
1479 | 1474 | ||
1480 | static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) | 1475 | static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) |
1481 | { | 1476 | { |
1482 | unsigned char *b = skb->tail; | 1477 | unsigned char *b = skb_tail_pointer(skb); |
1483 | struct tc_cbq_lssopt opt; | 1478 | struct tc_cbq_lssopt opt; |
1484 | 1479 | ||
1485 | opt.flags = 0; | 1480 | opt.flags = 0; |
@@ -1498,13 +1493,13 @@ static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) | |||
1498 | return skb->len; | 1493 | return skb->len; |
1499 | 1494 | ||
1500 | rtattr_failure: | 1495 | rtattr_failure: |
1501 | skb_trim(skb, b - skb->data); | 1496 | nlmsg_trim(skb, b); |
1502 | return -1; | 1497 | return -1; |
1503 | } | 1498 | } |
1504 | 1499 | ||
1505 | static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) | 1500 | static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) |
1506 | { | 1501 | { |
1507 | unsigned char *b = skb->tail; | 1502 | unsigned char *b = skb_tail_pointer(skb); |
1508 | struct tc_cbq_wrropt opt; | 1503 | struct tc_cbq_wrropt opt; |
1509 | 1504 | ||
1510 | opt.flags = 0; | 1505 | opt.flags = 0; |
@@ -1516,30 +1511,30 @@ static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) | |||
1516 | return skb->len; | 1511 | return skb->len; |
1517 | 1512 | ||
1518 | rtattr_failure: | 1513 | rtattr_failure: |
1519 | skb_trim(skb, b - skb->data); | 1514 | nlmsg_trim(skb, b); |
1520 | return -1; | 1515 | return -1; |
1521 | } | 1516 | } |
1522 | 1517 | ||
1523 | static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) | 1518 | static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) |
1524 | { | 1519 | { |
1525 | unsigned char *b = skb->tail; | 1520 | unsigned char *b = skb_tail_pointer(skb); |
1526 | struct tc_cbq_ovl opt; | 1521 | struct tc_cbq_ovl opt; |
1527 | 1522 | ||
1528 | opt.strategy = cl->ovl_strategy; | 1523 | opt.strategy = cl->ovl_strategy; |
1529 | opt.priority2 = cl->priority2+1; | 1524 | opt.priority2 = cl->priority2+1; |
1530 | opt.pad = 0; | 1525 | opt.pad = 0; |
1531 | opt.penalty = (cl->penalty*1000)/HZ; | 1526 | opt.penalty = cl->penalty; |
1532 | RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); | 1527 | RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); |
1533 | return skb->len; | 1528 | return skb->len; |
1534 | 1529 | ||
1535 | rtattr_failure: | 1530 | rtattr_failure: |
1536 | skb_trim(skb, b - skb->data); | 1531 | nlmsg_trim(skb, b); |
1537 | return -1; | 1532 | return -1; |
1538 | } | 1533 | } |
1539 | 1534 | ||
1540 | static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) | 1535 | static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) |
1541 | { | 1536 | { |
1542 | unsigned char *b = skb->tail; | 1537 | unsigned char *b = skb_tail_pointer(skb); |
1543 | struct tc_cbq_fopt opt; | 1538 | struct tc_cbq_fopt opt; |
1544 | 1539 | ||
1545 | if (cl->split || cl->defmap) { | 1540 | if (cl->split || cl->defmap) { |
@@ -1551,14 +1546,14 @@ static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) | |||
1551 | return skb->len; | 1546 | return skb->len; |
1552 | 1547 | ||
1553 | rtattr_failure: | 1548 | rtattr_failure: |
1554 | skb_trim(skb, b - skb->data); | 1549 | nlmsg_trim(skb, b); |
1555 | return -1; | 1550 | return -1; |
1556 | } | 1551 | } |
1557 | 1552 | ||
1558 | #ifdef CONFIG_NET_CLS_POLICE | 1553 | #ifdef CONFIG_NET_CLS_POLICE |
1559 | static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) | 1554 | static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) |
1560 | { | 1555 | { |
1561 | unsigned char *b = skb->tail; | 1556 | unsigned char *b = skb_tail_pointer(skb); |
1562 | struct tc_cbq_police opt; | 1557 | struct tc_cbq_police opt; |
1563 | 1558 | ||
1564 | if (cl->police) { | 1559 | if (cl->police) { |
@@ -1570,7 +1565,7 @@ static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) | |||
1570 | return skb->len; | 1565 | return skb->len; |
1571 | 1566 | ||
1572 | rtattr_failure: | 1567 | rtattr_failure: |
1573 | skb_trim(skb, b - skb->data); | 1568 | nlmsg_trim(skb, b); |
1574 | return -1; | 1569 | return -1; |
1575 | } | 1570 | } |
1576 | #endif | 1571 | #endif |
@@ -1592,18 +1587,18 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) | |||
1592 | static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb) | 1587 | static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb) |
1593 | { | 1588 | { |
1594 | struct cbq_sched_data *q = qdisc_priv(sch); | 1589 | struct cbq_sched_data *q = qdisc_priv(sch); |
1595 | unsigned char *b = skb->tail; | 1590 | unsigned char *b = skb_tail_pointer(skb); |
1596 | struct rtattr *rta; | 1591 | struct rtattr *rta; |
1597 | 1592 | ||
1598 | rta = (struct rtattr*)b; | 1593 | rta = (struct rtattr*)b; |
1599 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 1594 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); |
1600 | if (cbq_dump_attr(skb, &q->link) < 0) | 1595 | if (cbq_dump_attr(skb, &q->link) < 0) |
1601 | goto rtattr_failure; | 1596 | goto rtattr_failure; |
1602 | rta->rta_len = skb->tail - b; | 1597 | rta->rta_len = skb_tail_pointer(skb) - b; |
1603 | return skb->len; | 1598 | return skb->len; |
1604 | 1599 | ||
1605 | rtattr_failure: | 1600 | rtattr_failure: |
1606 | skb_trim(skb, b - skb->data); | 1601 | nlmsg_trim(skb, b); |
1607 | return -1; | 1602 | return -1; |
1608 | } | 1603 | } |
1609 | 1604 | ||
@@ -1621,7 +1616,7 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1621 | struct sk_buff *skb, struct tcmsg *tcm) | 1616 | struct sk_buff *skb, struct tcmsg *tcm) |
1622 | { | 1617 | { |
1623 | struct cbq_class *cl = (struct cbq_class*)arg; | 1618 | struct cbq_class *cl = (struct cbq_class*)arg; |
1624 | unsigned char *b = skb->tail; | 1619 | unsigned char *b = skb_tail_pointer(skb); |
1625 | struct rtattr *rta; | 1620 | struct rtattr *rta; |
1626 | 1621 | ||
1627 | if (cl->tparent) | 1622 | if (cl->tparent) |
@@ -1635,11 +1630,11 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1635 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 1630 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); |
1636 | if (cbq_dump_attr(skb, cl) < 0) | 1631 | if (cbq_dump_attr(skb, cl) < 0) |
1637 | goto rtattr_failure; | 1632 | goto rtattr_failure; |
1638 | rta->rta_len = skb->tail - b; | 1633 | rta->rta_len = skb_tail_pointer(skb) - b; |
1639 | return skb->len; | 1634 | return skb->len; |
1640 | 1635 | ||
1641 | rtattr_failure: | 1636 | rtattr_failure: |
1642 | skb_trim(skb, b - skb->data); | 1637 | nlmsg_trim(skb, b); |
1643 | return -1; | 1638 | return -1; |
1644 | } | 1639 | } |
1645 | 1640 | ||
@@ -1654,8 +1649,8 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
1654 | cl->xstats.avgidle = cl->avgidle; | 1649 | cl->xstats.avgidle = cl->avgidle; |
1655 | cl->xstats.undertime = 0; | 1650 | cl->xstats.undertime = 0; |
1656 | 1651 | ||
1657 | if (!PSCHED_IS_PASTPERFECT(cl->undertime)) | 1652 | if (cl->undertime != PSCHED_PASTPERFECT) |
1658 | cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now); | 1653 | cl->xstats.undertime = cl->undertime - q->now; |
1659 | 1654 | ||
1660 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 1655 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
1661 | #ifdef CONFIG_NET_ESTIMATOR | 1656 | #ifdef CONFIG_NET_ESTIMATOR |
@@ -1722,23 +1717,13 @@ static unsigned long cbq_get(struct Qdisc *sch, u32 classid) | |||
1722 | return 0; | 1717 | return 0; |
1723 | } | 1718 | } |
1724 | 1719 | ||
1725 | static void cbq_destroy_filters(struct cbq_class *cl) | ||
1726 | { | ||
1727 | struct tcf_proto *tp; | ||
1728 | |||
1729 | while ((tp = cl->filter_list) != NULL) { | ||
1730 | cl->filter_list = tp->next; | ||
1731 | tcf_destroy(tp); | ||
1732 | } | ||
1733 | } | ||
1734 | |||
1735 | static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) | 1720 | static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) |
1736 | { | 1721 | { |
1737 | struct cbq_sched_data *q = qdisc_priv(sch); | 1722 | struct cbq_sched_data *q = qdisc_priv(sch); |
1738 | 1723 | ||
1739 | BUG_TRAP(!cl->filters); | 1724 | BUG_TRAP(!cl->filters); |
1740 | 1725 | ||
1741 | cbq_destroy_filters(cl); | 1726 | tcf_destroy_chain(cl->filter_list); |
1742 | qdisc_destroy(cl->q); | 1727 | qdisc_destroy(cl->q); |
1743 | qdisc_put_rtab(cl->R_tab); | 1728 | qdisc_put_rtab(cl->R_tab); |
1744 | #ifdef CONFIG_NET_ESTIMATOR | 1729 | #ifdef CONFIG_NET_ESTIMATOR |
@@ -1765,7 +1750,7 @@ cbq_destroy(struct Qdisc* sch) | |||
1765 | */ | 1750 | */ |
1766 | for (h = 0; h < 16; h++) | 1751 | for (h = 0; h < 16; h++) |
1767 | for (cl = q->classes[h]; cl; cl = cl->next) | 1752 | for (cl = q->classes[h]; cl; cl = cl->next) |
1768 | cbq_destroy_filters(cl); | 1753 | tcf_destroy_chain(cl->filter_list); |
1769 | 1754 | ||
1770 | for (h = 0; h < 16; h++) { | 1755 | for (h = 0; h < 16; h++) { |
1771 | struct cbq_class *next; | 1756 | struct cbq_class *next; |