diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2006-08-11 02:31:08 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-09-22 17:54:30 -0400 |
commit | 3bf72957d2a553c343e4285463ef0a88139bdec4 (patch) | |
tree | dd7f6882d7ba513d849914db8c7f074bcc69b0c1 /net/sched | |
parent | b801f54917b7c6e8540f877ee562cd0725e62ebd (diff) |
[HTB]: Remove broken debug code.
The HTB network scheduler had debug code that wouldn't compile
and confused and obfuscated the code, remove it.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_htb.c | 302 |
1 files changed, 34 insertions, 268 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 880a3394a51f..73094e7f4169 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -70,7 +70,6 @@ | |||
70 | 70 | ||
71 | #define HTB_HSIZE 16 /* classid hash size */ | 71 | #define HTB_HSIZE 16 /* classid hash size */ |
72 | #define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */ | 72 | #define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */ |
73 | #undef HTB_DEBUG /* compile debugging support (activated by tc tool) */ | ||
74 | #define HTB_RATECM 1 /* whether to use rate computer */ | 73 | #define HTB_RATECM 1 /* whether to use rate computer */ |
75 | #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */ | 74 | #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */ |
76 | #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock) | 75 | #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock) |
@@ -81,51 +80,6 @@ | |||
81 | #error "Mismatched sch_htb.c and pkt_sch.h" | 80 | #error "Mismatched sch_htb.c and pkt_sch.h" |
82 | #endif | 81 | #endif |
83 | 82 | ||
84 | /* debugging support; S is subsystem, these are defined: | ||
85 | 0 - netlink messages | ||
86 | 1 - enqueue | ||
87 | 2 - drop & requeue | ||
88 | 3 - dequeue main | ||
89 | 4 - dequeue one prio DRR part | ||
90 | 5 - dequeue class accounting | ||
91 | 6 - class overlimit status computation | ||
92 | 7 - hint tree | ||
93 | 8 - event queue | ||
94 | 10 - rate estimator | ||
95 | 11 - classifier | ||
96 | 12 - fast dequeue cache | ||
97 | |||
98 | L is level; 0 = none, 1 = basic info, 2 = detailed, 3 = full | ||
99 | q->debug uint32 contains 16 2-bit fields one for subsystem starting | ||
100 | from LSB | ||
101 | */ | ||
102 | #ifdef HTB_DEBUG | ||
103 | #define HTB_DBG_COND(S,L) (((q->debug>>(2*S))&3) >= L) | ||
104 | #define HTB_DBG(S,L,FMT,ARG...) if (HTB_DBG_COND(S,L)) \ | ||
105 | printk(KERN_DEBUG FMT,##ARG) | ||
106 | #define HTB_CHCL(cl) BUG_TRAP((cl)->magic == HTB_CMAGIC) | ||
107 | #define HTB_PASSQ q, | ||
108 | #define HTB_ARGQ struct htb_sched *q, | ||
109 | #define static | ||
110 | #undef __inline__ | ||
111 | #define __inline__ | ||
112 | #undef inline | ||
113 | #define inline | ||
114 | #define HTB_CMAGIC 0xFEFAFEF1 | ||
115 | #define htb_safe_rb_erase(N,R) do { BUG_TRAP((N)->rb_color != -1); \ | ||
116 | if ((N)->rb_color == -1) break; \ | ||
117 | rb_erase(N,R); \ | ||
118 | (N)->rb_color = -1; } while (0) | ||
119 | #else | ||
120 | #define HTB_DBG_COND(S,L) (0) | ||
121 | #define HTB_DBG(S,L,FMT,ARG...) | ||
122 | #define HTB_PASSQ | ||
123 | #define HTB_ARGQ | ||
124 | #define HTB_CHCL(cl) | ||
125 | #define htb_safe_rb_erase(N,R) rb_erase(N,R) | ||
126 | #endif | ||
127 | |||
128 | |||
129 | /* used internaly to keep status of single class */ | 83 | /* used internaly to keep status of single class */ |
130 | enum htb_cmode { | 84 | enum htb_cmode { |
131 | HTB_CANT_SEND, /* class can't send and can't borrow */ | 85 | HTB_CANT_SEND, /* class can't send and can't borrow */ |
@@ -136,9 +90,6 @@ enum htb_cmode { | |||
136 | /* interior & leaf nodes; props specific to leaves are marked L: */ | 90 | /* interior & leaf nodes; props specific to leaves are marked L: */ |
137 | struct htb_class | 91 | struct htb_class |
138 | { | 92 | { |
139 | #ifdef HTB_DEBUG | ||
140 | unsigned magic; | ||
141 | #endif | ||
142 | /* general class parameters */ | 93 | /* general class parameters */ |
143 | u32 classid; | 94 | u32 classid; |
144 | struct gnet_stats_basic bstats; | 95 | struct gnet_stats_basic bstats; |
@@ -238,7 +189,6 @@ struct htb_sched | |||
238 | int nwc_hit; /* this to disable mindelay complaint in dequeue */ | 189 | int nwc_hit; /* this to disable mindelay complaint in dequeue */ |
239 | 190 | ||
240 | int defcls; /* class where unclassified flows go to */ | 191 | int defcls; /* class where unclassified flows go to */ |
241 | u32 debug; /* subsystem debug levels */ | ||
242 | 192 | ||
243 | /* filters for qdisc itself */ | 193 | /* filters for qdisc itself */ |
244 | struct tcf_proto *filter_list; | 194 | struct tcf_proto *filter_list; |
@@ -354,75 +304,21 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in | |||
354 | return cl; | 304 | return cl; |
355 | } | 305 | } |
356 | 306 | ||
357 | #ifdef HTB_DEBUG | ||
358 | static void htb_next_rb_node(struct rb_node **n); | ||
359 | #define HTB_DUMTREE(root,memb) if(root) { \ | ||
360 | struct rb_node *n = (root)->rb_node; \ | ||
361 | while (n->rb_left) n = n->rb_left; \ | ||
362 | while (n) { \ | ||
363 | struct htb_class *cl = rb_entry(n, struct htb_class, memb); \ | ||
364 | printk(" %x",cl->classid); htb_next_rb_node (&n); \ | ||
365 | } } | ||
366 | |||
367 | static void htb_debug_dump (struct htb_sched *q) | ||
368 | { | ||
369 | int i,p; | ||
370 | printk(KERN_DEBUG "htb*g j=%lu lj=%lu\n",jiffies,q->jiffies); | ||
371 | /* rows */ | ||
372 | for (i=TC_HTB_MAXDEPTH-1;i>=0;i--) { | ||
373 | printk(KERN_DEBUG "htb*r%d m=%x",i,q->row_mask[i]); | ||
374 | for (p=0;p<TC_HTB_NUMPRIO;p++) { | ||
375 | if (!q->row[i][p].rb_node) continue; | ||
376 | printk(" p%d:",p); | ||
377 | HTB_DUMTREE(q->row[i]+p,node[p]); | ||
378 | } | ||
379 | printk("\n"); | ||
380 | } | ||
381 | /* classes */ | ||
382 | for (i = 0; i < HTB_HSIZE; i++) { | ||
383 | struct list_head *l; | ||
384 | list_for_each (l,q->hash+i) { | ||
385 | struct htb_class *cl = list_entry(l,struct htb_class,hlist); | ||
386 | long diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer); | ||
387 | printk(KERN_DEBUG "htb*c%x m=%d t=%ld c=%ld pq=%lu df=%ld ql=%d " | ||
388 | "pa=%x f:", | ||
389 | cl->classid,cl->cmode,cl->tokens,cl->ctokens, | ||
390 | cl->pq_node.rb_color==-1?0:cl->pq_key,diff, | ||
391 | cl->level?0:cl->un.leaf.q->q.qlen,cl->prio_activity); | ||
392 | if (cl->level) | ||
393 | for (p=0;p<TC_HTB_NUMPRIO;p++) { | ||
394 | if (!cl->un.inner.feed[p].rb_node) continue; | ||
395 | printk(" p%d a=%x:",p,cl->un.inner.ptr[p]?rb_entry(cl->un.inner.ptr[p], struct htb_class,node[p])->classid:0); | ||
396 | HTB_DUMTREE(cl->un.inner.feed+p,node[p]); | ||
397 | } | ||
398 | printk("\n"); | ||
399 | } | ||
400 | } | ||
401 | } | ||
402 | #endif | ||
403 | /** | 307 | /** |
404 | * htb_add_to_id_tree - adds class to the round robin list | 308 | * htb_add_to_id_tree - adds class to the round robin list |
405 | * | 309 | * |
406 | * Routine adds class to the list (actually tree) sorted by classid. | 310 | * Routine adds class to the list (actually tree) sorted by classid. |
407 | * Make sure that class is not already on such list for given prio. | 311 | * Make sure that class is not already on such list for given prio. |
408 | */ | 312 | */ |
409 | static void htb_add_to_id_tree (HTB_ARGQ struct rb_root *root, | 313 | static void htb_add_to_id_tree (struct rb_root *root, |
410 | struct htb_class *cl,int prio) | 314 | struct htb_class *cl,int prio) |
411 | { | 315 | { |
412 | struct rb_node **p = &root->rb_node, *parent = NULL; | 316 | struct rb_node **p = &root->rb_node, *parent = NULL; |
413 | HTB_DBG(7,3,"htb_add_id_tree cl=%X prio=%d\n",cl->classid,prio); | 317 | |
414 | #ifdef HTB_DEBUG | ||
415 | if (cl->node[prio].rb_color != -1) { BUG_TRAP(0); return; } | ||
416 | HTB_CHCL(cl); | ||
417 | if (*p) { | ||
418 | struct htb_class *x = rb_entry(*p,struct htb_class,node[prio]); | ||
419 | HTB_CHCL(x); | ||
420 | } | ||
421 | #endif | ||
422 | while (*p) { | 318 | while (*p) { |
423 | struct htb_class *c; parent = *p; | 319 | struct htb_class *c; parent = *p; |
424 | c = rb_entry(parent, struct htb_class, node[prio]); | 320 | c = rb_entry(parent, struct htb_class, node[prio]); |
425 | HTB_CHCL(c); | 321 | |
426 | if (cl->classid > c->classid) | 322 | if (cl->classid > c->classid) |
427 | p = &parent->rb_right; | 323 | p = &parent->rb_right; |
428 | else | 324 | else |
@@ -440,16 +336,10 @@ static void htb_add_to_id_tree (HTB_ARGQ struct rb_root *root, | |||
440 | * already in the queue. | 336 | * already in the queue. |
441 | */ | 337 | */ |
442 | static void htb_add_to_wait_tree (struct htb_sched *q, | 338 | static void htb_add_to_wait_tree (struct htb_sched *q, |
443 | struct htb_class *cl,long delay,int debug_hint) | 339 | struct htb_class *cl,long delay) |
444 | { | 340 | { |
445 | struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; | 341 | struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; |
446 | HTB_DBG(7,3,"htb_add_wt cl=%X key=%lu\n",cl->classid,cl->pq_key); | 342 | |
447 | #ifdef HTB_DEBUG | ||
448 | if (cl->pq_node.rb_color != -1) { BUG_TRAP(0); return; } | ||
449 | HTB_CHCL(cl); | ||
450 | if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit()) | ||
451 | printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint); | ||
452 | #endif | ||
453 | cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay); | 343 | cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay); |
454 | if (cl->pq_key == q->jiffies) | 344 | if (cl->pq_key == q->jiffies) |
455 | cl->pq_key++; | 345 | cl->pq_key++; |
@@ -490,14 +380,11 @@ static void htb_next_rb_node(struct rb_node **n) | |||
490 | static inline void htb_add_class_to_row(struct htb_sched *q, | 380 | static inline void htb_add_class_to_row(struct htb_sched *q, |
491 | struct htb_class *cl,int mask) | 381 | struct htb_class *cl,int mask) |
492 | { | 382 | { |
493 | HTB_DBG(7,2,"htb_addrow cl=%X mask=%X rmask=%X\n", | ||
494 | cl->classid,mask,q->row_mask[cl->level]); | ||
495 | HTB_CHCL(cl); | ||
496 | q->row_mask[cl->level] |= mask; | 383 | q->row_mask[cl->level] |= mask; |
497 | while (mask) { | 384 | while (mask) { |
498 | int prio = ffz(~mask); | 385 | int prio = ffz(~mask); |
499 | mask &= ~(1 << prio); | 386 | mask &= ~(1 << prio); |
500 | htb_add_to_id_tree(HTB_PASSQ q->row[cl->level]+prio,cl,prio); | 387 | htb_add_to_id_tree(q->row[cl->level]+prio,cl,prio); |
501 | } | 388 | } |
502 | } | 389 | } |
503 | 390 | ||
@@ -511,18 +398,16 @@ static __inline__ void htb_remove_class_from_row(struct htb_sched *q, | |||
511 | struct htb_class *cl,int mask) | 398 | struct htb_class *cl,int mask) |
512 | { | 399 | { |
513 | int m = 0; | 400 | int m = 0; |
514 | HTB_CHCL(cl); | 401 | |
515 | while (mask) { | 402 | while (mask) { |
516 | int prio = ffz(~mask); | 403 | int prio = ffz(~mask); |
517 | mask &= ~(1 << prio); | 404 | mask &= ~(1 << prio); |
518 | if (q->ptr[cl->level][prio] == cl->node+prio) | 405 | if (q->ptr[cl->level][prio] == cl->node+prio) |
519 | htb_next_rb_node(q->ptr[cl->level]+prio); | 406 | htb_next_rb_node(q->ptr[cl->level]+prio); |
520 | htb_safe_rb_erase(cl->node + prio,q->row[cl->level]+prio); | 407 | rb_erase(cl->node + prio,q->row[cl->level]+prio); |
521 | if (!q->row[cl->level][prio].rb_node) | 408 | if (!q->row[cl->level][prio].rb_node) |
522 | m |= 1 << prio; | 409 | m |= 1 << prio; |
523 | } | 410 | } |
524 | HTB_DBG(7,2,"htb_delrow cl=%X mask=%X rmask=%X maskdel=%X\n", | ||
525 | cl->classid,mask,q->row_mask[cl->level],m); | ||
526 | q->row_mask[cl->level] &= ~m; | 411 | q->row_mask[cl->level] &= ~m; |
527 | } | 412 | } |
528 | 413 | ||
@@ -537,11 +422,9 @@ static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl) | |||
537 | { | 422 | { |
538 | struct htb_class *p = cl->parent; | 423 | struct htb_class *p = cl->parent; |
539 | long m,mask = cl->prio_activity; | 424 | long m,mask = cl->prio_activity; |
540 | HTB_DBG(7,2,"htb_act_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode); | ||
541 | HTB_CHCL(cl); | ||
542 | 425 | ||
543 | while (cl->cmode == HTB_MAY_BORROW && p && mask) { | 426 | while (cl->cmode == HTB_MAY_BORROW && p && mask) { |
544 | HTB_CHCL(p); | 427 | |
545 | m = mask; while (m) { | 428 | m = mask; while (m) { |
546 | int prio = ffz(~m); | 429 | int prio = ffz(~m); |
547 | m &= ~(1 << prio); | 430 | m &= ~(1 << prio); |
@@ -551,13 +434,11 @@ static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl) | |||
551 | reset bit in mask as parent is already ok */ | 434 | reset bit in mask as parent is already ok */ |
552 | mask &= ~(1 << prio); | 435 | mask &= ~(1 << prio); |
553 | 436 | ||
554 | htb_add_to_id_tree(HTB_PASSQ p->un.inner.feed+prio,cl,prio); | 437 | htb_add_to_id_tree(p->un.inner.feed+prio,cl,prio); |
555 | } | 438 | } |
556 | HTB_DBG(7,3,"htb_act_pr_aft p=%X pact=%X mask=%lX pmode=%d\n", | ||
557 | p->classid,p->prio_activity,mask,p->cmode); | ||
558 | p->prio_activity |= mask; | 439 | p->prio_activity |= mask; |
559 | cl = p; p = cl->parent; | 440 | cl = p; p = cl->parent; |
560 | HTB_CHCL(cl); | 441 | |
561 | } | 442 | } |
562 | if (cl->cmode == HTB_CAN_SEND && mask) | 443 | if (cl->cmode == HTB_CAN_SEND && mask) |
563 | htb_add_class_to_row(q,cl,mask); | 444 | htb_add_class_to_row(q,cl,mask); |
@@ -574,8 +455,7 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) | |||
574 | { | 455 | { |
575 | struct htb_class *p = cl->parent; | 456 | struct htb_class *p = cl->parent; |
576 | long m,mask = cl->prio_activity; | 457 | long m,mask = cl->prio_activity; |
577 | HTB_DBG(7,2,"htb_deact_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode); | 458 | |
578 | HTB_CHCL(cl); | ||
579 | 459 | ||
580 | while (cl->cmode == HTB_MAY_BORROW && p && mask) { | 460 | while (cl->cmode == HTB_MAY_BORROW && p && mask) { |
581 | m = mask; mask = 0; | 461 | m = mask; mask = 0; |
@@ -591,16 +471,15 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) | |||
591 | p->un.inner.ptr[prio] = NULL; | 471 | p->un.inner.ptr[prio] = NULL; |
592 | } | 472 | } |
593 | 473 | ||
594 | htb_safe_rb_erase(cl->node + prio,p->un.inner.feed + prio); | 474 | rb_erase(cl->node + prio,p->un.inner.feed + prio); |
595 | 475 | ||
596 | if (!p->un.inner.feed[prio].rb_node) | 476 | if (!p->un.inner.feed[prio].rb_node) |
597 | mask |= 1 << prio; | 477 | mask |= 1 << prio; |
598 | } | 478 | } |
599 | HTB_DBG(7,3,"htb_deact_pr_aft p=%X pact=%X mask=%lX pmode=%d\n", | 479 | |
600 | p->classid,p->prio_activity,mask,p->cmode); | ||
601 | p->prio_activity &= ~mask; | 480 | p->prio_activity &= ~mask; |
602 | cl = p; p = cl->parent; | 481 | cl = p; p = cl->parent; |
603 | HTB_CHCL(cl); | 482 | |
604 | } | 483 | } |
605 | if (cl->cmode == HTB_CAN_SEND && mask) | 484 | if (cl->cmode == HTB_CAN_SEND && mask) |
606 | htb_remove_class_from_row(q,cl,mask); | 485 | htb_remove_class_from_row(q,cl,mask); |
@@ -655,8 +534,6 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff) | |||
655 | { | 534 | { |
656 | enum htb_cmode new_mode = htb_class_mode(cl,diff); | 535 | enum htb_cmode new_mode = htb_class_mode(cl,diff); |
657 | 536 | ||
658 | HTB_CHCL(cl); | ||
659 | HTB_DBG(7,1,"htb_chging_clmode %d->%d cl=%X\n",cl->cmode,new_mode,cl->classid); | ||
660 | 537 | ||
661 | if (new_mode == cl->cmode) | 538 | if (new_mode == cl->cmode) |
662 | return; | 539 | return; |
@@ -681,7 +558,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff) | |||
681 | static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl) | 558 | static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl) |
682 | { | 559 | { |
683 | BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen); | 560 | BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen); |
684 | HTB_CHCL(cl); | 561 | |
685 | if (!cl->prio_activity) { | 562 | if (!cl->prio_activity) { |
686 | cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio); | 563 | cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio); |
687 | htb_activate_prios(q,cl); | 564 | htb_activate_prios(q,cl); |
@@ -699,7 +576,7 @@ static __inline__ void | |||
699 | htb_deactivate(struct htb_sched *q,struct htb_class *cl) | 576 | htb_deactivate(struct htb_sched *q,struct htb_class *cl) |
700 | { | 577 | { |
701 | BUG_TRAP(cl->prio_activity); | 578 | BUG_TRAP(cl->prio_activity); |
702 | HTB_CHCL(cl); | 579 | |
703 | htb_deactivate_prios(q,cl); | 580 | htb_deactivate_prios(q,cl); |
704 | cl->prio_activity = 0; | 581 | cl->prio_activity = 0; |
705 | list_del_init(&cl->un.leaf.drop_list); | 582 | list_del_init(&cl->un.leaf.drop_list); |
@@ -739,7 +616,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
739 | 616 | ||
740 | sch->q.qlen++; | 617 | sch->q.qlen++; |
741 | sch->bstats.packets++; sch->bstats.bytes += skb->len; | 618 | sch->bstats.packets++; sch->bstats.bytes += skb->len; |
742 | HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb); | ||
743 | return NET_XMIT_SUCCESS; | 619 | return NET_XMIT_SUCCESS; |
744 | } | 620 | } |
745 | 621 | ||
@@ -771,7 +647,6 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) | |||
771 | 647 | ||
772 | sch->q.qlen++; | 648 | sch->q.qlen++; |
773 | sch->qstats.requeues++; | 649 | sch->qstats.requeues++; |
774 | HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb); | ||
775 | return NET_XMIT_SUCCESS; | 650 | return NET_XMIT_SUCCESS; |
776 | } | 651 | } |
777 | 652 | ||
@@ -793,7 +668,6 @@ static void htb_rate_timer(unsigned long arg) | |||
793 | 668 | ||
794 | /* lock queue so that we can muck with it */ | 669 | /* lock queue so that we can muck with it */ |
795 | HTB_QLOCK(sch); | 670 | HTB_QLOCK(sch); |
796 | HTB_DBG(10,1,"htb_rttmr j=%ld\n",jiffies); | ||
797 | 671 | ||
798 | q->rttim.expires = jiffies + HZ; | 672 | q->rttim.expires = jiffies + HZ; |
799 | add_timer(&q->rttim); | 673 | add_timer(&q->rttim); |
@@ -803,8 +677,7 @@ static void htb_rate_timer(unsigned long arg) | |||
803 | q->recmp_bucket = 0; | 677 | q->recmp_bucket = 0; |
804 | list_for_each (p,q->hash+q->recmp_bucket) { | 678 | list_for_each (p,q->hash+q->recmp_bucket) { |
805 | struct htb_class *cl = list_entry(p,struct htb_class,hlist); | 679 | struct htb_class *cl = list_entry(p,struct htb_class,hlist); |
806 | HTB_DBG(10,2,"htb_rttmr_cl cl=%X sbyte=%lu spkt=%lu\n", | 680 | |
807 | cl->classid,cl->sum_bytes,cl->sum_packets); | ||
808 | RT_GEN (cl->sum_bytes,cl->rate_bytes); | 681 | RT_GEN (cl->sum_bytes,cl->rate_bytes); |
809 | RT_GEN (cl->sum_packets,cl->rate_packets); | 682 | RT_GEN (cl->sum_packets,cl->rate_packets); |
810 | } | 683 | } |
@@ -828,7 +701,6 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl, | |||
828 | { | 701 | { |
829 | long toks,diff; | 702 | long toks,diff; |
830 | enum htb_cmode old_mode; | 703 | enum htb_cmode old_mode; |
831 | HTB_DBG(5,1,"htb_chrg_cl cl=%X lev=%d len=%d\n",cl->classid,level,bytes); | ||
832 | 704 | ||
833 | #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \ | 705 | #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \ |
834 | if (toks > cl->B) toks = cl->B; \ | 706 | if (toks > cl->B) toks = cl->B; \ |
@@ -837,24 +709,7 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl, | |||
837 | cl->T = toks | 709 | cl->T = toks |
838 | 710 | ||
839 | while (cl) { | 711 | while (cl) { |
840 | HTB_CHCL(cl); | ||
841 | diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer); | 712 | diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer); |
842 | #ifdef HTB_DEBUG | ||
843 | if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) { | ||
844 | if (net_ratelimit()) | ||
845 | printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n", | ||
846 | cl->classid, diff, | ||
847 | #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY | ||
848 | q->now.tv_sec * 1000000ULL + q->now.tv_usec, | ||
849 | cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec, | ||
850 | #else | ||
851 | (unsigned long long) q->now, | ||
852 | (unsigned long long) cl->t_c, | ||
853 | #endif | ||
854 | q->jiffies); | ||
855 | diff = 1000; | ||
856 | } | ||
857 | #endif | ||
858 | if (cl->level >= level) { | 713 | if (cl->level >= level) { |
859 | if (cl->level == level) cl->xstats.lends++; | 714 | if (cl->level == level) cl->xstats.lends++; |
860 | HTB_ACCNT (tokens,buffer,rate); | 715 | HTB_ACCNT (tokens,buffer,rate); |
@@ -864,15 +719,14 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl, | |||
864 | } | 719 | } |
865 | HTB_ACCNT (ctokens,cbuffer,ceil); | 720 | HTB_ACCNT (ctokens,cbuffer,ceil); |
866 | cl->t_c = q->now; | 721 | cl->t_c = q->now; |
867 | HTB_DBG(5,2,"htb_chrg_clp cl=%X diff=%ld tok=%ld ctok=%ld\n",cl->classid,diff,cl->tokens,cl->ctokens); | ||
868 | 722 | ||
869 | old_mode = cl->cmode; diff = 0; | 723 | old_mode = cl->cmode; diff = 0; |
870 | htb_change_class_mode(q,cl,&diff); | 724 | htb_change_class_mode(q,cl,&diff); |
871 | if (old_mode != cl->cmode) { | 725 | if (old_mode != cl->cmode) { |
872 | if (old_mode != HTB_CAN_SEND) | 726 | if (old_mode != HTB_CAN_SEND) |
873 | htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level); | 727 | rb_erase(&cl->pq_node,q->wait_pq+cl->level); |
874 | if (cl->cmode != HTB_CAN_SEND) | 728 | if (cl->cmode != HTB_CAN_SEND) |
875 | htb_add_to_wait_tree (q,cl,diff,1); | 729 | htb_add_to_wait_tree (q,cl,diff); |
876 | } | 730 | } |
877 | 731 | ||
878 | #ifdef HTB_RATECM | 732 | #ifdef HTB_RATECM |
@@ -899,8 +753,7 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl, | |||
899 | static long htb_do_events(struct htb_sched *q,int level) | 753 | static long htb_do_events(struct htb_sched *q,int level) |
900 | { | 754 | { |
901 | int i; | 755 | int i; |
902 | HTB_DBG(8,1,"htb_do_events l=%d root=%p rmask=%X\n", | 756 | |
903 | level,q->wait_pq[level].rb_node,q->row_mask[level]); | ||
904 | for (i = 0; i < 500; i++) { | 757 | for (i = 0; i < 500; i++) { |
905 | struct htb_class *cl; | 758 | struct htb_class *cl; |
906 | long diff; | 759 | long diff; |
@@ -910,30 +763,13 @@ static long htb_do_events(struct htb_sched *q,int level) | |||
910 | 763 | ||
911 | cl = rb_entry(p, struct htb_class, pq_node); | 764 | cl = rb_entry(p, struct htb_class, pq_node); |
912 | if (time_after(cl->pq_key, q->jiffies)) { | 765 | if (time_after(cl->pq_key, q->jiffies)) { |
913 | HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - q->jiffies); | ||
914 | return cl->pq_key - q->jiffies; | 766 | return cl->pq_key - q->jiffies; |
915 | } | 767 | } |
916 | htb_safe_rb_erase(p,q->wait_pq+level); | 768 | rb_erase(p,q->wait_pq+level); |
917 | diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer); | 769 | diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer); |
918 | #ifdef HTB_DEBUG | ||
919 | if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) { | ||
920 | if (net_ratelimit()) | ||
921 | printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n", | ||
922 | cl->classid, diff, | ||
923 | #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY | ||
924 | q->now.tv_sec * 1000000ULL + q->now.tv_usec, | ||
925 | cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec, | ||
926 | #else | ||
927 | (unsigned long long) q->now, | ||
928 | (unsigned long long) cl->t_c, | ||
929 | #endif | ||
930 | q->jiffies); | ||
931 | diff = 1000; | ||
932 | } | ||
933 | #endif | ||
934 | htb_change_class_mode(q,cl,&diff); | 770 | htb_change_class_mode(q,cl,&diff); |
935 | if (cl->cmode != HTB_CAN_SEND) | 771 | if (cl->cmode != HTB_CAN_SEND) |
936 | htb_add_to_wait_tree (q,cl,diff,2); | 772 | htb_add_to_wait_tree (q,cl,diff); |
937 | } | 773 | } |
938 | if (net_ratelimit()) | 774 | if (net_ratelimit()) |
939 | printk(KERN_WARNING "htb: too many events !\n"); | 775 | printk(KERN_WARNING "htb: too many events !\n"); |
@@ -966,7 +802,7 @@ htb_id_find_next_upper(int prio,struct rb_node *n,u32 id) | |||
966 | * Find leaf where current feed pointers points to. | 802 | * Find leaf where current feed pointers points to. |
967 | */ | 803 | */ |
968 | static struct htb_class * | 804 | static struct htb_class * |
969 | htb_lookup_leaf(HTB_ARGQ struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid) | 805 | htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid) |
970 | { | 806 | { |
971 | int i; | 807 | int i; |
972 | struct { | 808 | struct { |
@@ -981,8 +817,6 @@ htb_lookup_leaf(HTB_ARGQ struct rb_root *tree,int prio,struct rb_node **pptr,u32 | |||
981 | sp->pid = pid; | 817 | sp->pid = pid; |
982 | 818 | ||
983 | for (i = 0; i < 65535; i++) { | 819 | for (i = 0; i < 65535; i++) { |
984 | HTB_DBG(4,2,"htb_lleaf ptr=%p pid=%X\n",*sp->pptr,*sp->pid); | ||
985 | |||
986 | if (!*sp->pptr && *sp->pid) { | 820 | if (!*sp->pptr && *sp->pid) { |
987 | /* ptr was invalidated but id is valid - try to recover | 821 | /* ptr was invalidated but id is valid - try to recover |
988 | the original or next ptr */ | 822 | the original or next ptr */ |
@@ -1002,7 +836,6 @@ htb_lookup_leaf(HTB_ARGQ struct rb_root *tree,int prio,struct rb_node **pptr,u32 | |||
1002 | } else { | 836 | } else { |
1003 | struct htb_class *cl; | 837 | struct htb_class *cl; |
1004 | cl = rb_entry(*sp->pptr,struct htb_class,node[prio]); | 838 | cl = rb_entry(*sp->pptr,struct htb_class,node[prio]); |
1005 | HTB_CHCL(cl); | ||
1006 | if (!cl->level) | 839 | if (!cl->level) |
1007 | return cl; | 840 | return cl; |
1008 | (++sp)->root = cl->un.inner.feed[prio].rb_node; | 841 | (++sp)->root = cl->un.inner.feed[prio].rb_node; |
@@ -1022,15 +855,13 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level) | |||
1022 | struct sk_buff *skb = NULL; | 855 | struct sk_buff *skb = NULL; |
1023 | struct htb_class *cl,*start; | 856 | struct htb_class *cl,*start; |
1024 | /* look initial class up in the row */ | 857 | /* look initial class up in the row */ |
1025 | start = cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio, | 858 | start = cl = htb_lookup_leaf (q->row[level]+prio,prio, |
1026 | q->ptr[level]+prio,q->last_ptr_id[level]+prio); | 859 | q->ptr[level]+prio,q->last_ptr_id[level]+prio); |
1027 | 860 | ||
1028 | do { | 861 | do { |
1029 | next: | 862 | next: |
1030 | BUG_TRAP(cl); | 863 | BUG_TRAP(cl); |
1031 | if (!cl) return NULL; | 864 | if (!cl) return NULL; |
1032 | HTB_DBG(4,1,"htb_deq_tr prio=%d lev=%d cl=%X defic=%d\n", | ||
1033 | prio,level,cl->classid,cl->un.leaf.deficit[level]); | ||
1034 | 865 | ||
1035 | /* class can be empty - it is unlikely but can be true if leaf | 866 | /* class can be empty - it is unlikely but can be true if leaf |
1036 | qdisc drops packets in enqueue routine or if someone used | 867 | qdisc drops packets in enqueue routine or if someone used |
@@ -1044,7 +875,7 @@ next: | |||
1044 | if ((q->row_mask[level] & (1 << prio)) == 0) | 875 | if ((q->row_mask[level] & (1 << prio)) == 0) |
1045 | return NULL; | 876 | return NULL; |
1046 | 877 | ||
1047 | next = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio, | 878 | next = htb_lookup_leaf (q->row[level]+prio, |
1048 | prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio); | 879 | prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio); |
1049 | 880 | ||
1050 | if (cl == start) /* fix start if we just deleted it */ | 881 | if (cl == start) /* fix start if we just deleted it */ |
@@ -1061,15 +892,13 @@ next: | |||
1061 | } | 892 | } |
1062 | q->nwc_hit++; | 893 | q->nwc_hit++; |
1063 | htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio); | 894 | htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio); |
1064 | cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,q->ptr[level]+prio, | 895 | cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio, |
1065 | q->last_ptr_id[level]+prio); | 896 | q->last_ptr_id[level]+prio); |
1066 | 897 | ||
1067 | } while (cl != start); | 898 | } while (cl != start); |
1068 | 899 | ||
1069 | if (likely(skb != NULL)) { | 900 | if (likely(skb != NULL)) { |
1070 | if ((cl->un.leaf.deficit[level] -= skb->len) < 0) { | 901 | if ((cl->un.leaf.deficit[level] -= skb->len) < 0) { |
1071 | HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n", | ||
1072 | level?cl->parent->un.inner.ptr[prio]:q->ptr[0][prio],cl->un.leaf.quantum); | ||
1073 | cl->un.leaf.deficit[level] += cl->un.leaf.quantum; | 902 | cl->un.leaf.deficit[level] += cl->un.leaf.quantum; |
1074 | htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio); | 903 | htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio); |
1075 | } | 904 | } |
@@ -1095,7 +924,6 @@ static void htb_delay_by(struct Qdisc *sch,long delay) | |||
1095 | mod_timer(&q->timer, q->jiffies + delay); | 924 | mod_timer(&q->timer, q->jiffies + delay); |
1096 | sch->flags |= TCQ_F_THROTTLED; | 925 | sch->flags |= TCQ_F_THROTTLED; |
1097 | sch->qstats.overlimits++; | 926 | sch->qstats.overlimits++; |
1098 | HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay); | ||
1099 | } | 927 | } |
1100 | 928 | ||
1101 | static struct sk_buff *htb_dequeue(struct Qdisc *sch) | 929 | static struct sk_buff *htb_dequeue(struct Qdisc *sch) |
@@ -1104,13 +932,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
1104 | struct htb_sched *q = qdisc_priv(sch); | 932 | struct htb_sched *q = qdisc_priv(sch); |
1105 | int level; | 933 | int level; |
1106 | long min_delay; | 934 | long min_delay; |
1107 | #ifdef HTB_DEBUG | ||
1108 | int evs_used = 0; | ||
1109 | #endif | ||
1110 | 935 | ||
1111 | q->jiffies = jiffies; | 936 | q->jiffies = jiffies; |
1112 | HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue), | ||
1113 | sch->q.qlen); | ||
1114 | 937 | ||
1115 | /* try to dequeue direct packets as high prio (!) to minimize cpu work */ | 938 | /* try to dequeue direct packets as high prio (!) to minimize cpu work */ |
1116 | if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) { | 939 | if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) { |
@@ -1131,9 +954,6 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
1131 | if (time_after_eq(q->jiffies, q->near_ev_cache[level])) { | 954 | if (time_after_eq(q->jiffies, q->near_ev_cache[level])) { |
1132 | delay = htb_do_events(q,level); | 955 | delay = htb_do_events(q,level); |
1133 | q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ); | 956 | q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ); |
1134 | #ifdef HTB_DEBUG | ||
1135 | evs_used++; | ||
1136 | #endif | ||
1137 | } else | 957 | } else |
1138 | delay = q->near_ev_cache[level] - q->jiffies; | 958 | delay = q->near_ev_cache[level] - q->jiffies; |
1139 | 959 | ||
@@ -1151,20 +971,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
1151 | } | 971 | } |
1152 | } | 972 | } |
1153 | } | 973 | } |
1154 | #ifdef HTB_DEBUG | ||
1155 | if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) { | ||
1156 | if (min_delay == LONG_MAX) { | ||
1157 | printk(KERN_ERR "HTB: dequeue bug (%d,%lu,%lu), report it please !\n", | ||
1158 | evs_used,q->jiffies,jiffies); | ||
1159 | htb_debug_dump(q); | ||
1160 | } else | ||
1161 | printk(KERN_WARNING "HTB: mindelay=%ld, some class has " | ||
1162 | "too small rate\n",min_delay); | ||
1163 | } | ||
1164 | #endif | ||
1165 | htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay); | 974 | htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay); |
1166 | fin: | 975 | fin: |
1167 | HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,q->jiffies,skb); | ||
1168 | return skb; | 976 | return skb; |
1169 | } | 977 | } |
1170 | 978 | ||
@@ -1198,7 +1006,6 @@ static void htb_reset(struct Qdisc* sch) | |||
1198 | { | 1006 | { |
1199 | struct htb_sched *q = qdisc_priv(sch); | 1007 | struct htb_sched *q = qdisc_priv(sch); |
1200 | int i; | 1008 | int i; |
1201 | HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle); | ||
1202 | 1009 | ||
1203 | for (i = 0; i < HTB_HSIZE; i++) { | 1010 | for (i = 0; i < HTB_HSIZE; i++) { |
1204 | struct list_head *p; | 1011 | struct list_head *p; |
@@ -1213,10 +1020,6 @@ static void htb_reset(struct Qdisc* sch) | |||
1213 | } | 1020 | } |
1214 | cl->prio_activity = 0; | 1021 | cl->prio_activity = 0; |
1215 | cl->cmode = HTB_CAN_SEND; | 1022 | cl->cmode = HTB_CAN_SEND; |
1216 | #ifdef HTB_DEBUG | ||
1217 | cl->pq_node.rb_color = -1; | ||
1218 | memset(cl->node,255,sizeof(cl->node)); | ||
1219 | #endif | ||
1220 | 1023 | ||
1221 | } | 1024 | } |
1222 | } | 1025 | } |
@@ -1238,10 +1041,6 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt) | |||
1238 | struct rtattr *tb[TCA_HTB_INIT]; | 1041 | struct rtattr *tb[TCA_HTB_INIT]; |
1239 | struct tc_htb_glob *gopt; | 1042 | struct tc_htb_glob *gopt; |
1240 | int i; | 1043 | int i; |
1241 | #ifdef HTB_DEBUG | ||
1242 | printk(KERN_INFO "HTB init, kernel part version %d.%d\n", | ||
1243 | HTB_VER >> 16,HTB_VER & 0xffff); | ||
1244 | #endif | ||
1245 | if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) || | 1044 | if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) || |
1246 | tb[TCA_HTB_INIT-1] == NULL || | 1045 | tb[TCA_HTB_INIT-1] == NULL || |
1247 | RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) { | 1046 | RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) { |
@@ -1254,8 +1053,6 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt) | |||
1254 | HTB_VER >> 16,HTB_VER & 0xffff,gopt->version); | 1053 | HTB_VER >> 16,HTB_VER & 0xffff,gopt->version); |
1255 | return -EINVAL; | 1054 | return -EINVAL; |
1256 | } | 1055 | } |
1257 | q->debug = gopt->debug; | ||
1258 | HTB_DBG(0,1,"htb_init sch=%p handle=%X r2q=%d\n",sch,sch->handle,gopt->rate2quantum); | ||
1259 | 1056 | ||
1260 | INIT_LIST_HEAD(&q->root); | 1057 | INIT_LIST_HEAD(&q->root); |
1261 | for (i = 0; i < HTB_HSIZE; i++) | 1058 | for (i = 0; i < HTB_HSIZE; i++) |
@@ -1292,18 +1089,13 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
1292 | unsigned char *b = skb->tail; | 1089 | unsigned char *b = skb->tail; |
1293 | struct rtattr *rta; | 1090 | struct rtattr *rta; |
1294 | struct tc_htb_glob gopt; | 1091 | struct tc_htb_glob gopt; |
1295 | HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle); | ||
1296 | HTB_QLOCK(sch); | 1092 | HTB_QLOCK(sch); |
1297 | gopt.direct_pkts = q->direct_pkts; | 1093 | gopt.direct_pkts = q->direct_pkts; |
1298 | 1094 | ||
1299 | #ifdef HTB_DEBUG | ||
1300 | if (HTB_DBG_COND(0,2)) | ||
1301 | htb_debug_dump(q); | ||
1302 | #endif | ||
1303 | gopt.version = HTB_VER; | 1095 | gopt.version = HTB_VER; |
1304 | gopt.rate2quantum = q->rate2quantum; | 1096 | gopt.rate2quantum = q->rate2quantum; |
1305 | gopt.defcls = q->defcls; | 1097 | gopt.defcls = q->defcls; |
1306 | gopt.debug = q->debug; | 1098 | gopt.debug = 0; |
1307 | rta = (struct rtattr*)b; | 1099 | rta = (struct rtattr*)b; |
1308 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 1100 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); |
1309 | RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); | 1101 | RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); |
@@ -1319,16 +1111,11 @@ rtattr_failure: | |||
1319 | static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | 1111 | static int htb_dump_class(struct Qdisc *sch, unsigned long arg, |
1320 | struct sk_buff *skb, struct tcmsg *tcm) | 1112 | struct sk_buff *skb, struct tcmsg *tcm) |
1321 | { | 1113 | { |
1322 | #ifdef HTB_DEBUG | ||
1323 | struct htb_sched *q = qdisc_priv(sch); | ||
1324 | #endif | ||
1325 | struct htb_class *cl = (struct htb_class*)arg; | 1114 | struct htb_class *cl = (struct htb_class*)arg; |
1326 | unsigned char *b = skb->tail; | 1115 | unsigned char *b = skb->tail; |
1327 | struct rtattr *rta; | 1116 | struct rtattr *rta; |
1328 | struct tc_htb_opt opt; | 1117 | struct tc_htb_opt opt; |
1329 | 1118 | ||
1330 | HTB_DBG(0,1,"htb_dump_class handle=%X clid=%X\n",sch->handle,cl->classid); | ||
1331 | |||
1332 | HTB_QLOCK(sch); | 1119 | HTB_QLOCK(sch); |
1333 | tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT; | 1120 | tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT; |
1334 | tcm->tcm_handle = cl->classid; | 1121 | tcm->tcm_handle = cl->classid; |
@@ -1410,11 +1197,7 @@ static struct Qdisc * htb_leaf(struct Qdisc *sch, unsigned long arg) | |||
1410 | 1197 | ||
1411 | static unsigned long htb_get(struct Qdisc *sch, u32 classid) | 1198 | static unsigned long htb_get(struct Qdisc *sch, u32 classid) |
1412 | { | 1199 | { |
1413 | #ifdef HTB_DEBUG | ||
1414 | struct htb_sched *q = qdisc_priv(sch); | ||
1415 | #endif | ||
1416 | struct htb_class *cl = htb_find(classid,sch); | 1200 | struct htb_class *cl = htb_find(classid,sch); |
1417 | HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0); | ||
1418 | if (cl) | 1201 | if (cl) |
1419 | cl->refcnt++; | 1202 | cl->refcnt++; |
1420 | return (unsigned long)cl; | 1203 | return (unsigned long)cl; |
@@ -1433,7 +1216,6 @@ static void htb_destroy_filters(struct tcf_proto **fl) | |||
1433 | static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl) | 1216 | static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl) |
1434 | { | 1217 | { |
1435 | struct htb_sched *q = qdisc_priv(sch); | 1218 | struct htb_sched *q = qdisc_priv(sch); |
1436 | HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0); | ||
1437 | if (!cl->level) { | 1219 | if (!cl->level) { |
1438 | BUG_TRAP(cl->un.leaf.q); | 1220 | BUG_TRAP(cl->un.leaf.q); |
1439 | sch->q.qlen -= cl->un.leaf.q->q.qlen; | 1221 | sch->q.qlen -= cl->un.leaf.q->q.qlen; |
@@ -1456,7 +1238,7 @@ static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl) | |||
1456 | htb_deactivate (q,cl); | 1238 | htb_deactivate (q,cl); |
1457 | 1239 | ||
1458 | if (cl->cmode != HTB_CAN_SEND) | 1240 | if (cl->cmode != HTB_CAN_SEND) |
1459 | htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level); | 1241 | rb_erase(&cl->pq_node,q->wait_pq+cl->level); |
1460 | 1242 | ||
1461 | kfree(cl); | 1243 | kfree(cl); |
1462 | } | 1244 | } |
@@ -1465,7 +1247,6 @@ static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl) | |||
1465 | static void htb_destroy(struct Qdisc* sch) | 1247 | static void htb_destroy(struct Qdisc* sch) |
1466 | { | 1248 | { |
1467 | struct htb_sched *q = qdisc_priv(sch); | 1249 | struct htb_sched *q = qdisc_priv(sch); |
1468 | HTB_DBG(0,1,"htb_destroy q=%p\n",q); | ||
1469 | 1250 | ||
1470 | del_timer_sync (&q->timer); | 1251 | del_timer_sync (&q->timer); |
1471 | #ifdef HTB_RATECM | 1252 | #ifdef HTB_RATECM |
@@ -1488,7 +1269,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) | |||
1488 | { | 1269 | { |
1489 | struct htb_sched *q = qdisc_priv(sch); | 1270 | struct htb_sched *q = qdisc_priv(sch); |
1490 | struct htb_class *cl = (struct htb_class*)arg; | 1271 | struct htb_class *cl = (struct htb_class*)arg; |
1491 | HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0); | ||
1492 | 1272 | ||
1493 | // TODO: why don't allow to delete subtree ? references ? does | 1273 | // TODO: why don't allow to delete subtree ? references ? does |
1494 | // tc subsys quarantee us that in htb_destroy it holds no class | 1274 | // tc subsys quarantee us that in htb_destroy it holds no class |
@@ -1512,11 +1292,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) | |||
1512 | 1292 | ||
1513 | static void htb_put(struct Qdisc *sch, unsigned long arg) | 1293 | static void htb_put(struct Qdisc *sch, unsigned long arg) |
1514 | { | 1294 | { |
1515 | #ifdef HTB_DEBUG | ||
1516 | struct htb_sched *q = qdisc_priv(sch); | ||
1517 | #endif | ||
1518 | struct htb_class *cl = (struct htb_class*)arg; | 1295 | struct htb_class *cl = (struct htb_class*)arg; |
1519 | HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0); | ||
1520 | 1296 | ||
1521 | if (--cl->refcnt == 0) | 1297 | if (--cl->refcnt == 0) |
1522 | htb_destroy_class(sch,cl); | 1298 | htb_destroy_class(sch,cl); |
@@ -1542,7 +1318,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1542 | parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch); | 1318 | parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch); |
1543 | 1319 | ||
1544 | hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]); | 1320 | hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]); |
1545 | HTB_DBG(0,1,"htb_chg cl=%p(%X), clid=%X, parid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,classid,parentid,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum); | 1321 | |
1546 | rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]); | 1322 | rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]); |
1547 | ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]); | 1323 | ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]); |
1548 | if (!rtab || !ctab) goto failure; | 1324 | if (!rtab || !ctab) goto failure; |
@@ -1567,9 +1343,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1567 | INIT_LIST_HEAD(&cl->hlist); | 1343 | INIT_LIST_HEAD(&cl->hlist); |
1568 | INIT_LIST_HEAD(&cl->children); | 1344 | INIT_LIST_HEAD(&cl->children); |
1569 | INIT_LIST_HEAD(&cl->un.leaf.drop_list); | 1345 | INIT_LIST_HEAD(&cl->un.leaf.drop_list); |
1570 | #ifdef HTB_DEBUG | ||
1571 | cl->magic = HTB_CMAGIC; | ||
1572 | #endif | ||
1573 | 1346 | ||
1574 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) | 1347 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) |
1575 | so that can't be used inside of sch_tree_lock | 1348 | so that can't be used inside of sch_tree_lock |
@@ -1585,7 +1358,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1585 | 1358 | ||
1586 | /* remove from evt list because of level change */ | 1359 | /* remove from evt list because of level change */ |
1587 | if (parent->cmode != HTB_CAN_SEND) { | 1360 | if (parent->cmode != HTB_CAN_SEND) { |
1588 | htb_safe_rb_erase(&parent->pq_node,q->wait_pq /*+0*/); | 1361 | rb_erase(&parent->pq_node,q->wait_pq); |
1589 | parent->cmode = HTB_CAN_SEND; | 1362 | parent->cmode = HTB_CAN_SEND; |
1590 | } | 1363 | } |
1591 | parent->level = (parent->parent ? parent->parent->level | 1364 | parent->level = (parent->parent ? parent->parent->level |
@@ -1607,13 +1380,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1607 | /* attach to the hash list and parent's family */ | 1380 | /* attach to the hash list and parent's family */ |
1608 | list_add_tail(&cl->hlist, q->hash+htb_hash(classid)); | 1381 | list_add_tail(&cl->hlist, q->hash+htb_hash(classid)); |
1609 | list_add_tail(&cl->sibling, parent ? &parent->children : &q->root); | 1382 | list_add_tail(&cl->sibling, parent ? &parent->children : &q->root); |
1610 | #ifdef HTB_DEBUG | ||
1611 | { | ||
1612 | int i; | ||
1613 | for (i = 0; i < TC_HTB_NUMPRIO; i++) cl->node[i].rb_color = -1; | ||
1614 | cl->pq_node.rb_color = -1; | ||
1615 | } | ||
1616 | #endif | ||
1617 | } else sch_tree_lock(sch); | 1383 | } else sch_tree_lock(sch); |
1618 | 1384 | ||
1619 | /* it used to be a nasty bug here, we have to check that node | 1385 | /* it used to be a nasty bug here, we have to check that node |
@@ -1654,7 +1420,7 @@ static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg) | |||
1654 | struct htb_sched *q = qdisc_priv(sch); | 1420 | struct htb_sched *q = qdisc_priv(sch); |
1655 | struct htb_class *cl = (struct htb_class *)arg; | 1421 | struct htb_class *cl = (struct htb_class *)arg; |
1656 | struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list; | 1422 | struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list; |
1657 | HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl); | 1423 | |
1658 | return fl; | 1424 | return fl; |
1659 | } | 1425 | } |
1660 | 1426 | ||
@@ -1663,7 +1429,7 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, | |||
1663 | { | 1429 | { |
1664 | struct htb_sched *q = qdisc_priv(sch); | 1430 | struct htb_sched *q = qdisc_priv(sch); |
1665 | struct htb_class *cl = htb_find (classid,sch); | 1431 | struct htb_class *cl = htb_find (classid,sch); |
1666 | HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt); | 1432 | |
1667 | /*if (cl && !cl->level) return 0; | 1433 | /*if (cl && !cl->level) return 0; |
1668 | The line above used to be there to prevent attaching filters to | 1434 | The line above used to be there to prevent attaching filters to |
1669 | leaves. But at least tc_index filter uses this just to get class | 1435 | leaves. But at least tc_index filter uses this just to get class |
@@ -1684,7 +1450,7 @@ static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg) | |||
1684 | { | 1450 | { |
1685 | struct htb_sched *q = qdisc_priv(sch); | 1451 | struct htb_sched *q = qdisc_priv(sch); |
1686 | struct htb_class *cl = (struct htb_class *)arg; | 1452 | struct htb_class *cl = (struct htb_class *)arg; |
1687 | HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt); | 1453 | |
1688 | if (cl) | 1454 | if (cl) |
1689 | cl->filter_cnt--; | 1455 | cl->filter_cnt--; |
1690 | else | 1456 | else |