aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2013-06-15 06:30:10 -0400
committerDavid S. Miller <davem@davemloft.net>2013-06-20 02:06:52 -0400
commitc9364636dcb01a6fc37ca2c6a51c5aa0c663013c (patch)
tree4ea94902a65b0f3ca46ec82652da39b0dea59d55 /net/sched
parentbcefe17cffd06efdda3e7ad679ea743236e6271a (diff)
htb: refactor struct htb_sched fields for performance
htb_sched structures are big, and source of false sharing on SMP. Every time a packet is queued or dequeue, many cache lines must be touched because structures are not lay out properly. By carefully splitting htb_sched in two parts, and define sub structures to increase data locality, we can improve performance dramatically on SMP. New htb_prio structure can also be used in htb_class to increase data locality. I got 26 % performance increase on a 24 threads machine, with 200 concurrent netperf in TCP_RR mode, using a HTB hierarchy of 4 classes. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_htb.c181
1 files changed, 95 insertions, 86 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 7954e73d118a..c2124ea29f45 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -76,6 +76,20 @@ enum htb_cmode {
76 HTB_CAN_SEND /* class can send */ 76 HTB_CAN_SEND /* class can send */
77}; 77};
78 78
79struct htb_prio {
80 union {
81 struct rb_root row;
82 struct rb_root feed;
83 };
84 struct rb_node *ptr;
85 /* When class changes from state 1->2 and disconnects from
86 * parent's feed then we lost ptr value and start from the
87 * first child again. Here we store classid of the
88 * last valid ptr (used when ptr is NULL).
89 */
90 u32 last_ptr_id;
91};
92
79/* interior & leaf nodes; props specific to leaves are marked L: 93/* interior & leaf nodes; props specific to leaves are marked L:
80 * To reduce false sharing, place mostly read fields at beginning, 94 * To reduce false sharing, place mostly read fields at beginning,
81 * and mostly written ones at the end. 95 * and mostly written ones at the end.
@@ -112,19 +126,12 @@ struct htb_class {
112 126
113 union { 127 union {
114 struct htb_class_leaf { 128 struct htb_class_leaf {
115 struct Qdisc *q;
116 int deficit[TC_HTB_MAXDEPTH];
117 struct list_head drop_list; 129 struct list_head drop_list;
130 int deficit[TC_HTB_MAXDEPTH];
131 struct Qdisc *q;
118 } leaf; 132 } leaf;
119 struct htb_class_inner { 133 struct htb_class_inner {
120 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */ 134 struct htb_prio clprio[TC_HTB_NUMPRIO];
121 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
122 /* When class changes from state 1->2 and disconnects from
123 * parent's feed then we lost ptr value and start from the
124 * first child again. Here we store classid of the
125 * last valid ptr (used when ptr is NULL).
126 */
127 u32 last_ptr_id[TC_HTB_NUMPRIO];
128 } inner; 135 } inner;
129 } un; 136 } un;
130 s64 pq_key; 137 s64 pq_key;
@@ -135,40 +142,39 @@ struct htb_class {
135 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ 142 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
136}; 143};
137 144
145struct htb_level {
146 struct rb_root wait_pq;
147 struct htb_prio hprio[TC_HTB_NUMPRIO];
148};
149
138struct htb_sched { 150struct htb_sched {
139 struct Qdisc_class_hash clhash; 151 struct Qdisc_class_hash clhash;
140 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */ 152 int defcls; /* class where unclassified flows go to */
141 153 int rate2quantum; /* quant = rate / rate2quantum */
142 /* self list - roots of self generating tree */
143 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
144 int row_mask[TC_HTB_MAXDEPTH];
145 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
146 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
147 154
148 /* self wait list - roots of wait PQs per row */ 155 /* filters for qdisc itself */
149 struct rb_root wait_pq[TC_HTB_MAXDEPTH]; 156 struct tcf_proto *filter_list;
150 157
151 /* time of nearest event per level (row) */ 158#define HTB_WARN_TOOMANYEVENTS 0x1
152 s64 near_ev_cache[TC_HTB_MAXDEPTH]; 159 unsigned int warned; /* only one warning */
160 int direct_qlen;
161 struct work_struct work;
153 162
154 int defcls; /* class where unclassified flows go to */ 163 /* non shaped skbs; let them go directly thru */
164 struct sk_buff_head direct_queue;
165 long direct_pkts;
155 166
156 /* filters for qdisc itself */ 167 struct qdisc_watchdog watchdog;
157 struct tcf_proto *filter_list;
158 168
159 int rate2quantum; /* quant = rate / rate2quantum */ 169 s64 now; /* cached dequeue time */
160 s64 now; /* cached dequeue time */ 170 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
161 struct qdisc_watchdog watchdog;
162 171
163 /* non shaped skbs; let them go directly thru */ 172 /* time of nearest event per level (row) */
164 struct sk_buff_head direct_queue; 173 s64 near_ev_cache[TC_HTB_MAXDEPTH];
165 int direct_qlen; /* max qlen of above */
166 174
167 long direct_pkts; 175 int row_mask[TC_HTB_MAXDEPTH];
168 176
169#define HTB_WARN_TOOMANYEVENTS 0x1 177 struct htb_level hlevel[TC_HTB_MAXDEPTH];
170 unsigned int warned; /* only one warning */
171 struct work_struct work;
172}; 178};
173 179
174/* find class in global hash table using given handle */ 180/* find class in global hash table using given handle */
@@ -284,7 +290,7 @@ static void htb_add_to_id_tree(struct rb_root *root,
284static void htb_add_to_wait_tree(struct htb_sched *q, 290static void htb_add_to_wait_tree(struct htb_sched *q,
285 struct htb_class *cl, s64 delay) 291 struct htb_class *cl, s64 delay)
286{ 292{
287 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; 293 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
288 294
289 cl->pq_key = q->now + delay; 295 cl->pq_key = q->now + delay;
290 if (cl->pq_key == q->now) 296 if (cl->pq_key == q->now)
@@ -304,7 +310,7 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
304 p = &parent->rb_left; 310 p = &parent->rb_left;
305 } 311 }
306 rb_link_node(&cl->pq_node, parent, p); 312 rb_link_node(&cl->pq_node, parent, p);
307 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]); 313 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
308} 314}
309 315
310/** 316/**
@@ -331,7 +337,7 @@ static inline void htb_add_class_to_row(struct htb_sched *q,
331 while (mask) { 337 while (mask) {
332 int prio = ffz(~mask); 338 int prio = ffz(~mask);
333 mask &= ~(1 << prio); 339 mask &= ~(1 << prio);
334 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio); 340 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
335 } 341 }
336} 342}
337 343
@@ -357,16 +363,18 @@ static inline void htb_remove_class_from_row(struct htb_sched *q,
357 struct htb_class *cl, int mask) 363 struct htb_class *cl, int mask)
358{ 364{
359 int m = 0; 365 int m = 0;
366 struct htb_level *hlevel = &q->hlevel[cl->level];
360 367
361 while (mask) { 368 while (mask) {
362 int prio = ffz(~mask); 369 int prio = ffz(~mask);
370 struct htb_prio *hprio = &hlevel->hprio[prio];
363 371
364 mask &= ~(1 << prio); 372 mask &= ~(1 << prio);
365 if (q->ptr[cl->level][prio] == cl->node + prio) 373 if (hprio->ptr == cl->node + prio)
366 htb_next_rb_node(q->ptr[cl->level] + prio); 374 htb_next_rb_node(&hprio->ptr);
367 375
368 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio); 376 htb_safe_rb_erase(cl->node + prio, &hprio->row);
369 if (!q->row[cl->level][prio].rb_node) 377 if (!hprio->row.rb_node)
370 m |= 1 << prio; 378 m |= 1 << prio;
371 } 379 }
372 q->row_mask[cl->level] &= ~m; 380 q->row_mask[cl->level] &= ~m;
@@ -390,13 +398,13 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
390 int prio = ffz(~m); 398 int prio = ffz(~m);
391 m &= ~(1 << prio); 399 m &= ~(1 << prio);
392 400
393 if (p->un.inner.feed[prio].rb_node) 401 if (p->un.inner.clprio[prio].feed.rb_node)
394 /* parent already has its feed in use so that 402 /* parent already has its feed in use so that
395 * reset bit in mask as parent is already ok 403 * reset bit in mask as parent is already ok
396 */ 404 */
397 mask &= ~(1 << prio); 405 mask &= ~(1 << prio);
398 406
399 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio); 407 htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
400 } 408 }
401 p->prio_activity |= mask; 409 p->prio_activity |= mask;
402 cl = p; 410 cl = p;
@@ -426,18 +434,19 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
426 int prio = ffz(~m); 434 int prio = ffz(~m);
427 m &= ~(1 << prio); 435 m &= ~(1 << prio);
428 436
429 if (p->un.inner.ptr[prio] == cl->node + prio) { 437 if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
430 /* we are removing child which is pointed to from 438 /* we are removing child which is pointed to from
431 * parent feed - forget the pointer but remember 439 * parent feed - forget the pointer but remember
432 * classid 440 * classid
433 */ 441 */
434 p->un.inner.last_ptr_id[prio] = cl->common.classid; 442 p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
435 p->un.inner.ptr[prio] = NULL; 443 p->un.inner.clprio[prio].ptr = NULL;
436 } 444 }
437 445
438 htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio); 446 htb_safe_rb_erase(cl->node + prio,
447 &p->un.inner.clprio[prio].feed);
439 448
440 if (!p->un.inner.feed[prio].rb_node) 449 if (!p->un.inner.clprio[prio].feed.rb_node)
441 mask |= 1 << prio; 450 mask |= 1 << prio;
442 } 451 }
443 452
@@ -652,7 +661,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
652 htb_change_class_mode(q, cl, &diff); 661 htb_change_class_mode(q, cl, &diff);
653 if (old_mode != cl->cmode) { 662 if (old_mode != cl->cmode) {
654 if (old_mode != HTB_CAN_SEND) 663 if (old_mode != HTB_CAN_SEND)
655 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); 664 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
656 if (cl->cmode != HTB_CAN_SEND) 665 if (cl->cmode != HTB_CAN_SEND)
657 htb_add_to_wait_tree(q, cl, diff); 666 htb_add_to_wait_tree(q, cl, diff);
658 } 667 }
@@ -672,7 +681,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
672 * next pending event (0 for no event in pq, q->now for too many events). 681 * next pending event (0 for no event in pq, q->now for too many events).
673 * Note: Applied are events whose have cl->pq_key <= q->now. 682 * Note: Applied are events whose have cl->pq_key <= q->now.
674 */ 683 */
675static s64 htb_do_events(struct htb_sched *q, int level, 684static s64 htb_do_events(struct htb_sched *q, const int level,
676 unsigned long start) 685 unsigned long start)
677{ 686{
678 /* don't run for longer than 2 jiffies; 2 is used instead of 687 /* don't run for longer than 2 jiffies; 2 is used instead of
@@ -680,10 +689,12 @@ static s64 htb_do_events(struct htb_sched *q, int level,
680 * too soon 689 * too soon
681 */ 690 */
682 unsigned long stop_at = start + 2; 691 unsigned long stop_at = start + 2;
692 struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
693
683 while (time_before(jiffies, stop_at)) { 694 while (time_before(jiffies, stop_at)) {
684 struct htb_class *cl; 695 struct htb_class *cl;
685 s64 diff; 696 s64 diff;
686 struct rb_node *p = rb_first(&q->wait_pq[level]); 697 struct rb_node *p = rb_first(wait_pq);
687 698
688 if (!p) 699 if (!p)
689 return 0; 700 return 0;
@@ -692,7 +703,7 @@ static s64 htb_do_events(struct htb_sched *q, int level,
692 if (cl->pq_key > q->now) 703 if (cl->pq_key > q->now)
693 return cl->pq_key; 704 return cl->pq_key;
694 705
695 htb_safe_rb_erase(p, q->wait_pq + level); 706 htb_safe_rb_erase(p, wait_pq);
696 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); 707 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
697 htb_change_class_mode(q, cl, &diff); 708 htb_change_class_mode(q, cl, &diff);
698 if (cl->cmode != HTB_CAN_SEND) 709 if (cl->cmode != HTB_CAN_SEND)
@@ -736,8 +747,7 @@ static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
736 * 747 *
737 * Find leaf where current feed pointers points to. 748 * Find leaf where current feed pointers points to.
738 */ 749 */
739static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio, 750static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
740 struct rb_node **pptr, u32 * pid)
741{ 751{
742 int i; 752 int i;
743 struct { 753 struct {
@@ -746,10 +756,10 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
746 u32 *pid; 756 u32 *pid;
747 } stk[TC_HTB_MAXDEPTH], *sp = stk; 757 } stk[TC_HTB_MAXDEPTH], *sp = stk;
748 758
749 BUG_ON(!tree->rb_node); 759 BUG_ON(!hprio->row.rb_node);
750 sp->root = tree->rb_node; 760 sp->root = hprio->row.rb_node;
751 sp->pptr = pptr; 761 sp->pptr = &hprio->ptr;
752 sp->pid = pid; 762 sp->pid = &hprio->last_ptr_id;
753 763
754 for (i = 0; i < 65535; i++) { 764 for (i = 0; i < 65535; i++) {
755 if (!*sp->pptr && *sp->pid) { 765 if (!*sp->pptr && *sp->pid) {
@@ -776,12 +786,15 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
776 } 786 }
777 } else { 787 } else {
778 struct htb_class *cl; 788 struct htb_class *cl;
789 struct htb_prio *clp;
790
779 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); 791 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
780 if (!cl->level) 792 if (!cl->level)
781 return cl; 793 return cl;
782 (++sp)->root = cl->un.inner.feed[prio].rb_node; 794 clp = &cl->un.inner.clprio[prio];
783 sp->pptr = cl->un.inner.ptr + prio; 795 (++sp)->root = clp->feed.rb_node;
784 sp->pid = cl->un.inner.last_ptr_id + prio; 796 sp->pptr = &clp->ptr;
797 sp->pid = &clp->last_ptr_id;
785 } 798 }
786 } 799 }
787 WARN_ON(1); 800 WARN_ON(1);
@@ -791,15 +804,16 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
791/* dequeues packet at given priority and level; call only if 804/* dequeues packet at given priority and level; call only if
792 * you are sure that there is active class at prio/level 805 * you are sure that there is active class at prio/level
793 */ 806 */
794static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, 807static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
795 int level) 808 const int level)
796{ 809{
797 struct sk_buff *skb = NULL; 810 struct sk_buff *skb = NULL;
798 struct htb_class *cl, *start; 811 struct htb_class *cl, *start;
812 struct htb_level *hlevel = &q->hlevel[level];
813 struct htb_prio *hprio = &hlevel->hprio[prio];
814
799 /* look initial class up in the row */ 815 /* look initial class up in the row */
800 start = cl = htb_lookup_leaf(q->row[level] + prio, prio, 816 start = cl = htb_lookup_leaf(hprio, prio);
801 q->ptr[level] + prio,
802 q->last_ptr_id[level] + prio);
803 817
804 do { 818 do {
805next: 819next:
@@ -819,9 +833,7 @@ next:
819 if ((q->row_mask[level] & (1 << prio)) == 0) 833 if ((q->row_mask[level] & (1 << prio)) == 0)
820 return NULL; 834 return NULL;
821 835
822 next = htb_lookup_leaf(q->row[level] + prio, 836 next = htb_lookup_leaf(hprio, prio);
823 prio, q->ptr[level] + prio,
824 q->last_ptr_id[level] + prio);
825 837
826 if (cl == start) /* fix start if we just deleted it */ 838 if (cl == start) /* fix start if we just deleted it */
827 start = next; 839 start = next;
@@ -834,11 +846,9 @@ next:
834 break; 846 break;
835 847
836 qdisc_warn_nonwc("htb", cl->un.leaf.q); 848 qdisc_warn_nonwc("htb", cl->un.leaf.q);
837 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> 849 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
838 ptr[0]) + prio); 850 &q->hlevel[0].hprio[prio].ptr);
839 cl = htb_lookup_leaf(q->row[level] + prio, prio, 851 cl = htb_lookup_leaf(hprio, prio);
840 q->ptr[level] + prio,
841 q->last_ptr_id[level] + prio);
842 852
843 } while (cl != start); 853 } while (cl != start);
844 854
@@ -847,8 +857,8 @@ next:
847 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb); 857 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
848 if (cl->un.leaf.deficit[level] < 0) { 858 if (cl->un.leaf.deficit[level] < 0) {
849 cl->un.leaf.deficit[level] += cl->quantum; 859 cl->un.leaf.deficit[level] += cl->quantum;
850 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> 860 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
851 ptr[0]) + prio); 861 &q->hlevel[0].hprio[prio].ptr);
852 } 862 }
853 /* this used to be after charge_class but this constelation 863 /* this used to be after charge_class but this constelation
854 * gives us slightly better performance 864 * gives us slightly better performance
@@ -888,15 +898,14 @@ ok:
888 for (level = 0; level < TC_HTB_MAXDEPTH; level++) { 898 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
889 /* common case optimization - skip event handler quickly */ 899 /* common case optimization - skip event handler quickly */
890 int m; 900 int m;
891 s64 event; 901 s64 event = q->near_ev_cache[level];
892 902
893 if (q->now >= q->near_ev_cache[level]) { 903 if (q->now >= event) {
894 event = htb_do_events(q, level, start_at); 904 event = htb_do_events(q, level, start_at);
895 if (!event) 905 if (!event)
896 event = q->now + NSEC_PER_SEC; 906 event = q->now + NSEC_PER_SEC;
897 q->near_ev_cache[level] = event; 907 q->near_ev_cache[level] = event;
898 } else 908 }
899 event = q->near_ev_cache[level];
900 909
901 if (next_event > event) 910 if (next_event > event)
902 next_event = event; 911 next_event = event;
@@ -976,10 +985,8 @@ static void htb_reset(struct Qdisc *sch)
976 qdisc_watchdog_cancel(&q->watchdog); 985 qdisc_watchdog_cancel(&q->watchdog);
977 __skb_queue_purge(&q->direct_queue); 986 __skb_queue_purge(&q->direct_queue);
978 sch->q.qlen = 0; 987 sch->q.qlen = 0;
979 memset(q->row, 0, sizeof(q->row)); 988 memset(q->hlevel, 0, sizeof(q->hlevel));
980 memset(q->row_mask, 0, sizeof(q->row_mask)); 989 memset(q->row_mask, 0, sizeof(q->row_mask));
981 memset(q->wait_pq, 0, sizeof(q->wait_pq));
982 memset(q->ptr, 0, sizeof(q->ptr));
983 for (i = 0; i < TC_HTB_NUMPRIO; i++) 990 for (i = 0; i < TC_HTB_NUMPRIO; i++)
984 INIT_LIST_HEAD(q->drops + i); 991 INIT_LIST_HEAD(q->drops + i);
985} 992}
@@ -1200,7 +1207,8 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1200 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity); 1207 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1201 1208
1202 if (parent->cmode != HTB_CAN_SEND) 1209 if (parent->cmode != HTB_CAN_SEND)
1203 htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level); 1210 htb_safe_rb_erase(&parent->pq_node,
1211 &q->hlevel[parent->level].wait_pq);
1204 1212
1205 parent->level = 0; 1213 parent->level = 0;
1206 memset(&parent->un.inner, 0, sizeof(parent->un.inner)); 1214 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
@@ -1289,7 +1297,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
1289 htb_deactivate(q, cl); 1297 htb_deactivate(q, cl);
1290 1298
1291 if (cl->cmode != HTB_CAN_SEND) 1299 if (cl->cmode != HTB_CAN_SEND)
1292 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); 1300 htb_safe_rb_erase(&cl->pq_node,
1301 &q->hlevel[cl->level].wait_pq);
1293 1302
1294 if (last_child) 1303 if (last_child)
1295 htb_parent_to_leaf(q, cl, new_q); 1304 htb_parent_to_leaf(q, cl, new_q);
@@ -1411,7 +1420,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1411 1420
1412 /* remove from evt list because of level change */ 1421 /* remove from evt list because of level change */
1413 if (parent->cmode != HTB_CAN_SEND) { 1422 if (parent->cmode != HTB_CAN_SEND) {
1414 htb_safe_rb_erase(&parent->pq_node, q->wait_pq); 1423 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1415 parent->cmode = HTB_CAN_SEND; 1424 parent->cmode = HTB_CAN_SEND;
1416 } 1425 }
1417 parent->level = (parent->parent ? parent->parent->level 1426 parent->level = (parent->parent ? parent->parent->level