diff options
author | Dmitry Torokhov <dtor@insightbb.com> | 2007-05-01 00:24:54 -0400 |
---|---|---|
committer | Dmitry Torokhov <dtor@insightbb.com> | 2007-05-01 00:24:54 -0400 |
commit | bc95f3669f5e6f63cf0b84fe4922c3c6dd4aa775 (patch) | |
tree | 427fcf2a7287c16d4b5aa6cbf494d59579a6a8b1 /net/sched/sch_htb.c | |
parent | 3d29cdff999c37b3876082278a8134a0642a02cd (diff) | |
parent | dc87c3985e9b442c60994308a96f887579addc39 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/usb/input/Makefile
drivers/usb/input/gtco.c
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r-- | net/sched/sch_htb.c | 136 |
1 files changed, 49 insertions, 87 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 97cbb9aec946..99bcec8dd04c 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/skbuff.h> | 50 | #include <linux/skbuff.h> |
51 | #include <linux/list.h> | 51 | #include <linux/list.h> |
52 | #include <linux/compiler.h> | 52 | #include <linux/compiler.h> |
53 | #include <net/netlink.h> | ||
53 | #include <net/sock.h> | 54 | #include <net/sock.h> |
54 | #include <net/pkt_sched.h> | 55 | #include <net/pkt_sched.h> |
55 | #include <linux/rbtree.h> | 56 | #include <linux/rbtree.h> |
@@ -128,7 +129,7 @@ struct htb_class { | |||
128 | } un; | 129 | } un; |
129 | struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ | 130 | struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ |
130 | struct rb_node pq_node; /* node for event queue */ | 131 | struct rb_node pq_node; /* node for event queue */ |
131 | unsigned long pq_key; /* the same type as jiffies global */ | 132 | psched_time_t pq_key; |
132 | 133 | ||
133 | int prio_activity; /* for which prios are we active */ | 134 | int prio_activity; /* for which prios are we active */ |
134 | enum htb_cmode cmode; /* current mode of the class */ | 135 | enum htb_cmode cmode; /* current mode of the class */ |
@@ -179,10 +180,7 @@ struct htb_sched { | |||
179 | struct rb_root wait_pq[TC_HTB_MAXDEPTH]; | 180 | struct rb_root wait_pq[TC_HTB_MAXDEPTH]; |
180 | 181 | ||
181 | /* time of nearest event per level (row) */ | 182 | /* time of nearest event per level (row) */ |
182 | unsigned long near_ev_cache[TC_HTB_MAXDEPTH]; | 183 | psched_time_t near_ev_cache[TC_HTB_MAXDEPTH]; |
183 | |||
184 | /* cached value of jiffies in dequeue */ | ||
185 | unsigned long jiffies; | ||
186 | 184 | ||
187 | /* whether we hit non-work conserving class during this dequeue; we use */ | 185 | /* whether we hit non-work conserving class during this dequeue; we use */ |
188 | int nwc_hit; /* this to disable mindelay complaint in dequeue */ | 186 | int nwc_hit; /* this to disable mindelay complaint in dequeue */ |
@@ -195,7 +193,7 @@ struct htb_sched { | |||
195 | 193 | ||
196 | int rate2quantum; /* quant = rate / rate2quantum */ | 194 | int rate2quantum; /* quant = rate / rate2quantum */ |
197 | psched_time_t now; /* cached dequeue time */ | 195 | psched_time_t now; /* cached dequeue time */ |
198 | struct timer_list timer; /* send delay timer */ | 196 | struct qdisc_watchdog watchdog; |
199 | #ifdef HTB_RATECM | 197 | #ifdef HTB_RATECM |
200 | struct timer_list rttim; /* rate computer timer */ | 198 | struct timer_list rttim; /* rate computer timer */ |
201 | int recmp_bucket; /* which hash bucket to recompute next */ | 199 | int recmp_bucket; /* which hash bucket to recompute next */ |
@@ -342,19 +340,19 @@ static void htb_add_to_wait_tree(struct htb_sched *q, | |||
342 | { | 340 | { |
343 | struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; | 341 | struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; |
344 | 342 | ||
345 | cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay); | 343 | cl->pq_key = q->now + delay; |
346 | if (cl->pq_key == q->jiffies) | 344 | if (cl->pq_key == q->now) |
347 | cl->pq_key++; | 345 | cl->pq_key++; |
348 | 346 | ||
349 | /* update the nearest event cache */ | 347 | /* update the nearest event cache */ |
350 | if (time_after(q->near_ev_cache[cl->level], cl->pq_key)) | 348 | if (q->near_ev_cache[cl->level] > cl->pq_key) |
351 | q->near_ev_cache[cl->level] = cl->pq_key; | 349 | q->near_ev_cache[cl->level] = cl->pq_key; |
352 | 350 | ||
353 | while (*p) { | 351 | while (*p) { |
354 | struct htb_class *c; | 352 | struct htb_class *c; |
355 | parent = *p; | 353 | parent = *p; |
356 | c = rb_entry(parent, struct htb_class, pq_node); | 354 | c = rb_entry(parent, struct htb_class, pq_node); |
357 | if (time_after_eq(cl->pq_key, c->pq_key)) | 355 | if (cl->pq_key >= c->pq_key) |
358 | p = &parent->rb_right; | 356 | p = &parent->rb_right; |
359 | else | 357 | else |
360 | p = &parent->rb_left; | 358 | p = &parent->rb_left; |
@@ -679,14 +677,6 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) | |||
679 | return NET_XMIT_SUCCESS; | 677 | return NET_XMIT_SUCCESS; |
680 | } | 678 | } |
681 | 679 | ||
682 | static void htb_timer(unsigned long arg) | ||
683 | { | ||
684 | struct Qdisc *sch = (struct Qdisc *)arg; | ||
685 | sch->flags &= ~TCQ_F_THROTTLED; | ||
686 | wmb(); | ||
687 | netif_schedule(sch->dev); | ||
688 | } | ||
689 | |||
690 | #ifdef HTB_RATECM | 680 | #ifdef HTB_RATECM |
691 | #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0 | 681 | #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0 |
692 | static void htb_rate_timer(unsigned long arg) | 682 | static void htb_rate_timer(unsigned long arg) |
@@ -739,7 +729,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | |||
739 | cl->T = toks | 729 | cl->T = toks |
740 | 730 | ||
741 | while (cl) { | 731 | while (cl) { |
742 | diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer); | 732 | diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer); |
743 | if (cl->level >= level) { | 733 | if (cl->level >= level) { |
744 | if (cl->level == level) | 734 | if (cl->level == level) |
745 | cl->xstats.lends++; | 735 | cl->xstats.lends++; |
@@ -778,11 +768,11 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | |||
778 | /** | 768 | /** |
779 | * htb_do_events - make mode changes to classes at the level | 769 | * htb_do_events - make mode changes to classes at the level |
780 | * | 770 | * |
781 | * Scans event queue for pending events and applies them. Returns jiffies to | 771 | * Scans event queue for pending events and applies them. Returns time of |
782 | * next pending event (0 for no event in pq). | 772 | * next pending event (0 for no event in pq). |
783 | * Note: Aplied are events whose have cl->pq_key <= jiffies. | 773 | * Note: Applied are events whose have cl->pq_key <= q->now. |
784 | */ | 774 | */ |
785 | static long htb_do_events(struct htb_sched *q, int level) | 775 | static psched_time_t htb_do_events(struct htb_sched *q, int level) |
786 | { | 776 | { |
787 | int i; | 777 | int i; |
788 | 778 | ||
@@ -795,18 +785,18 @@ static long htb_do_events(struct htb_sched *q, int level) | |||
795 | return 0; | 785 | return 0; |
796 | 786 | ||
797 | cl = rb_entry(p, struct htb_class, pq_node); | 787 | cl = rb_entry(p, struct htb_class, pq_node); |
798 | if (time_after(cl->pq_key, q->jiffies)) { | 788 | if (cl->pq_key > q->now) |
799 | return cl->pq_key - q->jiffies; | 789 | return cl->pq_key; |
800 | } | 790 | |
801 | htb_safe_rb_erase(p, q->wait_pq + level); | 791 | htb_safe_rb_erase(p, q->wait_pq + level); |
802 | diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer); | 792 | diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer); |
803 | htb_change_class_mode(q, cl, &diff); | 793 | htb_change_class_mode(q, cl, &diff); |
804 | if (cl->cmode != HTB_CAN_SEND) | 794 | if (cl->cmode != HTB_CAN_SEND) |
805 | htb_add_to_wait_tree(q, cl, diff); | 795 | htb_add_to_wait_tree(q, cl, diff); |
806 | } | 796 | } |
807 | if (net_ratelimit()) | 797 | if (net_ratelimit()) |
808 | printk(KERN_WARNING "htb: too many events !\n"); | 798 | printk(KERN_WARNING "htb: too many events !\n"); |
809 | return HZ / 10; | 799 | return q->now + PSCHED_TICKS_PER_SEC / 10; |
810 | } | 800 | } |
811 | 801 | ||
812 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL | 802 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL |
@@ -958,30 +948,12 @@ next: | |||
958 | return skb; | 948 | return skb; |
959 | } | 949 | } |
960 | 950 | ||
961 | static void htb_delay_by(struct Qdisc *sch, long delay) | ||
962 | { | ||
963 | struct htb_sched *q = qdisc_priv(sch); | ||
964 | if (delay <= 0) | ||
965 | delay = 1; | ||
966 | if (unlikely(delay > 5 * HZ)) { | ||
967 | if (net_ratelimit()) | ||
968 | printk(KERN_INFO "HTB delay %ld > 5sec\n", delay); | ||
969 | delay = 5 * HZ; | ||
970 | } | ||
971 | /* why don't use jiffies here ? because expires can be in past */ | ||
972 | mod_timer(&q->timer, q->jiffies + delay); | ||
973 | sch->flags |= TCQ_F_THROTTLED; | ||
974 | sch->qstats.overlimits++; | ||
975 | } | ||
976 | |||
977 | static struct sk_buff *htb_dequeue(struct Qdisc *sch) | 951 | static struct sk_buff *htb_dequeue(struct Qdisc *sch) |
978 | { | 952 | { |
979 | struct sk_buff *skb = NULL; | 953 | struct sk_buff *skb = NULL; |
980 | struct htb_sched *q = qdisc_priv(sch); | 954 | struct htb_sched *q = qdisc_priv(sch); |
981 | int level; | 955 | int level; |
982 | long min_delay; | 956 | psched_time_t next_event; |
983 | |||
984 | q->jiffies = jiffies; | ||
985 | 957 | ||
986 | /* try to dequeue direct packets as high prio (!) to minimize cpu work */ | 958 | /* try to dequeue direct packets as high prio (!) to minimize cpu work */ |
987 | skb = __skb_dequeue(&q->direct_queue); | 959 | skb = __skb_dequeue(&q->direct_queue); |
@@ -993,23 +965,25 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
993 | 965 | ||
994 | if (!sch->q.qlen) | 966 | if (!sch->q.qlen) |
995 | goto fin; | 967 | goto fin; |
996 | PSCHED_GET_TIME(q->now); | 968 | q->now = psched_get_time(); |
997 | 969 | ||
998 | min_delay = LONG_MAX; | 970 | next_event = q->now + 5 * PSCHED_TICKS_PER_SEC; |
999 | q->nwc_hit = 0; | 971 | q->nwc_hit = 0; |
1000 | for (level = 0; level < TC_HTB_MAXDEPTH; level++) { | 972 | for (level = 0; level < TC_HTB_MAXDEPTH; level++) { |
1001 | /* common case optimization - skip event handler quickly */ | 973 | /* common case optimization - skip event handler quickly */ |
1002 | int m; | 974 | int m; |
1003 | long delay; | 975 | psched_time_t event; |
1004 | if (time_after_eq(q->jiffies, q->near_ev_cache[level])) { | 976 | |
1005 | delay = htb_do_events(q, level); | 977 | if (q->now >= q->near_ev_cache[level]) { |
1006 | q->near_ev_cache[level] = | 978 | event = htb_do_events(q, level); |
1007 | q->jiffies + (delay ? delay : HZ); | 979 | q->near_ev_cache[level] = event ? event : |
980 | PSCHED_TICKS_PER_SEC; | ||
1008 | } else | 981 | } else |
1009 | delay = q->near_ev_cache[level] - q->jiffies; | 982 | event = q->near_ev_cache[level]; |
983 | |||
984 | if (event && next_event > event) | ||
985 | next_event = event; | ||
1010 | 986 | ||
1011 | if (delay && min_delay > delay) | ||
1012 | min_delay = delay; | ||
1013 | m = ~q->row_mask[level]; | 987 | m = ~q->row_mask[level]; |
1014 | while (m != (int)(-1)) { | 988 | while (m != (int)(-1)) { |
1015 | int prio = ffz(m); | 989 | int prio = ffz(m); |
@@ -1022,7 +996,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
1022 | } | 996 | } |
1023 | } | 997 | } |
1024 | } | 998 | } |
1025 | htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay); | 999 | sch->qstats.overlimits++; |
1000 | qdisc_watchdog_schedule(&q->watchdog, next_event); | ||
1026 | fin: | 1001 | fin: |
1027 | return skb; | 1002 | return skb; |
1028 | } | 1003 | } |
@@ -1075,8 +1050,7 @@ static void htb_reset(struct Qdisc *sch) | |||
1075 | 1050 | ||
1076 | } | 1051 | } |
1077 | } | 1052 | } |
1078 | sch->flags &= ~TCQ_F_THROTTLED; | 1053 | qdisc_watchdog_cancel(&q->watchdog); |
1079 | del_timer(&q->timer); | ||
1080 | __skb_queue_purge(&q->direct_queue); | 1054 | __skb_queue_purge(&q->direct_queue); |
1081 | sch->q.qlen = 0; | 1055 | sch->q.qlen = 0; |
1082 | memset(q->row, 0, sizeof(q->row)); | 1056 | memset(q->row, 0, sizeof(q->row)); |
@@ -1113,14 +1087,12 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt) | |||
1113 | for (i = 0; i < TC_HTB_NUMPRIO; i++) | 1087 | for (i = 0; i < TC_HTB_NUMPRIO; i++) |
1114 | INIT_LIST_HEAD(q->drops + i); | 1088 | INIT_LIST_HEAD(q->drops + i); |
1115 | 1089 | ||
1116 | init_timer(&q->timer); | 1090 | qdisc_watchdog_init(&q->watchdog, sch); |
1117 | skb_queue_head_init(&q->direct_queue); | 1091 | skb_queue_head_init(&q->direct_queue); |
1118 | 1092 | ||
1119 | q->direct_qlen = sch->dev->tx_queue_len; | 1093 | q->direct_qlen = sch->dev->tx_queue_len; |
1120 | if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ | 1094 | if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ |
1121 | q->direct_qlen = 2; | 1095 | q->direct_qlen = 2; |
1122 | q->timer.function = htb_timer; | ||
1123 | q->timer.data = (unsigned long)sch; | ||
1124 | 1096 | ||
1125 | #ifdef HTB_RATECM | 1097 | #ifdef HTB_RATECM |
1126 | init_timer(&q->rttim); | 1098 | init_timer(&q->rttim); |
@@ -1139,7 +1111,7 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt) | |||
1139 | static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) | 1111 | static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) |
1140 | { | 1112 | { |
1141 | struct htb_sched *q = qdisc_priv(sch); | 1113 | struct htb_sched *q = qdisc_priv(sch); |
1142 | unsigned char *b = skb->tail; | 1114 | unsigned char *b = skb_tail_pointer(skb); |
1143 | struct rtattr *rta; | 1115 | struct rtattr *rta; |
1144 | struct tc_htb_glob gopt; | 1116 | struct tc_htb_glob gopt; |
1145 | spin_lock_bh(&sch->dev->queue_lock); | 1117 | spin_lock_bh(&sch->dev->queue_lock); |
@@ -1152,12 +1124,12 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
1152 | rta = (struct rtattr *)b; | 1124 | rta = (struct rtattr *)b; |
1153 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 1125 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); |
1154 | RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); | 1126 | RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); |
1155 | rta->rta_len = skb->tail - b; | 1127 | rta->rta_len = skb_tail_pointer(skb) - b; |
1156 | spin_unlock_bh(&sch->dev->queue_lock); | 1128 | spin_unlock_bh(&sch->dev->queue_lock); |
1157 | return skb->len; | 1129 | return skb->len; |
1158 | rtattr_failure: | 1130 | rtattr_failure: |
1159 | spin_unlock_bh(&sch->dev->queue_lock); | 1131 | spin_unlock_bh(&sch->dev->queue_lock); |
1160 | skb_trim(skb, skb->tail - skb->data); | 1132 | nlmsg_trim(skb, skb_tail_pointer(skb)); |
1161 | return -1; | 1133 | return -1; |
1162 | } | 1134 | } |
1163 | 1135 | ||
@@ -1165,7 +1137,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1165 | struct sk_buff *skb, struct tcmsg *tcm) | 1137 | struct sk_buff *skb, struct tcmsg *tcm) |
1166 | { | 1138 | { |
1167 | struct htb_class *cl = (struct htb_class *)arg; | 1139 | struct htb_class *cl = (struct htb_class *)arg; |
1168 | unsigned char *b = skb->tail; | 1140 | unsigned char *b = skb_tail_pointer(skb); |
1169 | struct rtattr *rta; | 1141 | struct rtattr *rta; |
1170 | struct tc_htb_opt opt; | 1142 | struct tc_htb_opt opt; |
1171 | 1143 | ||
@@ -1188,12 +1160,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1188 | opt.prio = cl->un.leaf.prio; | 1160 | opt.prio = cl->un.leaf.prio; |
1189 | opt.level = cl->level; | 1161 | opt.level = cl->level; |
1190 | RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); | 1162 | RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); |
1191 | rta->rta_len = skb->tail - b; | 1163 | rta->rta_len = skb_tail_pointer(skb) - b; |
1192 | spin_unlock_bh(&sch->dev->queue_lock); | 1164 | spin_unlock_bh(&sch->dev->queue_lock); |
1193 | return skb->len; | 1165 | return skb->len; |
1194 | rtattr_failure: | 1166 | rtattr_failure: |
1195 | spin_unlock_bh(&sch->dev->queue_lock); | 1167 | spin_unlock_bh(&sch->dev->queue_lock); |
1196 | skb_trim(skb, b - skb->data); | 1168 | nlmsg_trim(skb, b); |
1197 | return -1; | 1169 | return -1; |
1198 | } | 1170 | } |
1199 | 1171 | ||
@@ -1264,16 +1236,6 @@ static unsigned long htb_get(struct Qdisc *sch, u32 classid) | |||
1264 | return (unsigned long)cl; | 1236 | return (unsigned long)cl; |
1265 | } | 1237 | } |
1266 | 1238 | ||
1267 | static void htb_destroy_filters(struct tcf_proto **fl) | ||
1268 | { | ||
1269 | struct tcf_proto *tp; | ||
1270 | |||
1271 | while ((tp = *fl) != NULL) { | ||
1272 | *fl = tp->next; | ||
1273 | tcf_destroy(tp); | ||
1274 | } | ||
1275 | } | ||
1276 | |||
1277 | static inline int htb_parent_last_child(struct htb_class *cl) | 1239 | static inline int htb_parent_last_child(struct htb_class *cl) |
1278 | { | 1240 | { |
1279 | if (!cl->parent) | 1241 | if (!cl->parent) |
@@ -1302,7 +1264,7 @@ static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q) | |||
1302 | parent->un.leaf.prio = parent->prio; | 1264 | parent->un.leaf.prio = parent->prio; |
1303 | parent->tokens = parent->buffer; | 1265 | parent->tokens = parent->buffer; |
1304 | parent->ctokens = parent->cbuffer; | 1266 | parent->ctokens = parent->cbuffer; |
1305 | PSCHED_GET_TIME(parent->t_c); | 1267 | parent->t_c = psched_get_time(); |
1306 | parent->cmode = HTB_CAN_SEND; | 1268 | parent->cmode = HTB_CAN_SEND; |
1307 | } | 1269 | } |
1308 | 1270 | ||
@@ -1317,7 +1279,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | |||
1317 | qdisc_put_rtab(cl->rate); | 1279 | qdisc_put_rtab(cl->rate); |
1318 | qdisc_put_rtab(cl->ceil); | 1280 | qdisc_put_rtab(cl->ceil); |
1319 | 1281 | ||
1320 | htb_destroy_filters(&cl->filter_list); | 1282 | tcf_destroy_chain(cl->filter_list); |
1321 | 1283 | ||
1322 | while (!list_empty(&cl->children)) | 1284 | while (!list_empty(&cl->children)) |
1323 | htb_destroy_class(sch, list_entry(cl->children.next, | 1285 | htb_destroy_class(sch, list_entry(cl->children.next, |
@@ -1341,7 +1303,7 @@ static void htb_destroy(struct Qdisc *sch) | |||
1341 | { | 1303 | { |
1342 | struct htb_sched *q = qdisc_priv(sch); | 1304 | struct htb_sched *q = qdisc_priv(sch); |
1343 | 1305 | ||
1344 | del_timer_sync(&q->timer); | 1306 | qdisc_watchdog_cancel(&q->watchdog); |
1345 | #ifdef HTB_RATECM | 1307 | #ifdef HTB_RATECM |
1346 | del_timer_sync(&q->rttim); | 1308 | del_timer_sync(&q->rttim); |
1347 | #endif | 1309 | #endif |
@@ -1349,7 +1311,7 @@ static void htb_destroy(struct Qdisc *sch) | |||
1349 | and surprisingly it worked in 2.4. But it must precede it | 1311 | and surprisingly it worked in 2.4. But it must precede it |
1350 | because filter need its target class alive to be able to call | 1312 | because filter need its target class alive to be able to call |
1351 | unbind_filter on it (without Oops). */ | 1313 | unbind_filter on it (without Oops). */ |
1352 | htb_destroy_filters(&q->filter_list); | 1314 | tcf_destroy_chain(q->filter_list); |
1353 | 1315 | ||
1354 | while (!list_empty(&q->root)) | 1316 | while (!list_empty(&q->root)) |
1355 | htb_destroy_class(sch, list_entry(q->root.next, | 1317 | htb_destroy_class(sch, list_entry(q->root.next, |
@@ -1380,15 +1342,15 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) | |||
1380 | 1342 | ||
1381 | sch_tree_lock(sch); | 1343 | sch_tree_lock(sch); |
1382 | 1344 | ||
1383 | /* delete from hash and active; remainder in destroy_class */ | ||
1384 | hlist_del_init(&cl->hlist); | ||
1385 | |||
1386 | if (!cl->level) { | 1345 | if (!cl->level) { |
1387 | qlen = cl->un.leaf.q->q.qlen; | 1346 | qlen = cl->un.leaf.q->q.qlen; |
1388 | qdisc_reset(cl->un.leaf.q); | 1347 | qdisc_reset(cl->un.leaf.q); |
1389 | qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen); | 1348 | qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen); |
1390 | } | 1349 | } |
1391 | 1350 | ||
1351 | /* delete from hash and active; remainder in destroy_class */ | ||
1352 | hlist_del_init(&cl->hlist); | ||
1353 | |||
1392 | if (cl->prio_activity) | 1354 | if (cl->prio_activity) |
1393 | htb_deactivate(q, cl); | 1355 | htb_deactivate(q, cl); |
1394 | 1356 | ||
@@ -1498,8 +1460,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1498 | /* set class to be in HTB_CAN_SEND state */ | 1460 | /* set class to be in HTB_CAN_SEND state */ |
1499 | cl->tokens = hopt->buffer; | 1461 | cl->tokens = hopt->buffer; |
1500 | cl->ctokens = hopt->cbuffer; | 1462 | cl->ctokens = hopt->cbuffer; |
1501 | cl->mbuffer = PSCHED_JIFFIE2US(HZ * 60); /* 1min */ | 1463 | cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */ |
1502 | PSCHED_GET_TIME(cl->t_c); | 1464 | cl->t_c = psched_get_time(); |
1503 | cl->cmode = HTB_CAN_SEND; | 1465 | cl->cmode = HTB_CAN_SEND; |
1504 | 1466 | ||
1505 | /* attach to the hash list and parent's family */ | 1467 | /* attach to the hash list and parent's family */ |