aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_cbq.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_cbq.c')
-rw-r--r--net/sched/sch_cbq.c358
1 files changed, 182 insertions, 176 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index c80d1c210c5d..4aaf44c95c52 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -72,8 +72,7 @@
72struct cbq_sched_data; 72struct cbq_sched_data;
73 73
74 74
75struct cbq_class 75struct cbq_class {
76{
77 struct Qdisc_class_common common; 76 struct Qdisc_class_common common;
78 struct cbq_class *next_alive; /* next class with backlog in this priority band */ 77 struct cbq_class *next_alive; /* next class with backlog in this priority band */
79 78
@@ -139,19 +138,18 @@ struct cbq_class
139 int refcnt; 138 int refcnt;
140 int filters; 139 int filters;
141 140
142 struct cbq_class *defaults[TC_PRIO_MAX+1]; 141 struct cbq_class *defaults[TC_PRIO_MAX + 1];
143}; 142};
144 143
145struct cbq_sched_data 144struct cbq_sched_data {
146{
147 struct Qdisc_class_hash clhash; /* Hash table of all classes */ 145 struct Qdisc_class_hash clhash; /* Hash table of all classes */
148 int nclasses[TC_CBQ_MAXPRIO+1]; 146 int nclasses[TC_CBQ_MAXPRIO + 1];
149 unsigned quanta[TC_CBQ_MAXPRIO+1]; 147 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
150 148
151 struct cbq_class link; 149 struct cbq_class link;
152 150
153 unsigned activemask; 151 unsigned int activemask;
154 struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes 152 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
155 with backlog */ 153 with backlog */
156 154
157#ifdef CONFIG_NET_CLS_ACT 155#ifdef CONFIG_NET_CLS_ACT
@@ -162,7 +160,7 @@ struct cbq_sched_data
162 int tx_len; 160 int tx_len;
163 psched_time_t now; /* Cached timestamp */ 161 psched_time_t now; /* Cached timestamp */
164 psched_time_t now_rt; /* Cached real time */ 162 psched_time_t now_rt; /* Cached real time */
165 unsigned pmask; 163 unsigned int pmask;
166 164
167 struct hrtimer delay_timer; 165 struct hrtimer delay_timer;
168 struct qdisc_watchdog watchdog; /* Watchdog timer, 166 struct qdisc_watchdog watchdog; /* Watchdog timer,
@@ -175,9 +173,9 @@ struct cbq_sched_data
175}; 173};
176 174
177 175
178#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len) 176#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
179 177
180static __inline__ struct cbq_class * 178static inline struct cbq_class *
181cbq_class_lookup(struct cbq_sched_data *q, u32 classid) 179cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
182{ 180{
183 struct Qdisc_class_common *clc; 181 struct Qdisc_class_common *clc;
@@ -193,25 +191,27 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
193static struct cbq_class * 191static struct cbq_class *
194cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) 192cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
195{ 193{
196 struct cbq_class *cl, *new; 194 struct cbq_class *cl;
197 195
198 for (cl = this->tparent; cl; cl = cl->tparent) 196 for (cl = this->tparent; cl; cl = cl->tparent) {
199 if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this) 197 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
200 return new;
201 198
199 if (new != NULL && new != this)
200 return new;
201 }
202 return NULL; 202 return NULL;
203} 203}
204 204
205#endif 205#endif
206 206
207/* Classify packet. The procedure is pretty complicated, but 207/* Classify packet. The procedure is pretty complicated, but
208 it allows us to combine link sharing and priority scheduling 208 * it allows us to combine link sharing and priority scheduling
209 transparently. 209 * transparently.
210 210 *
211 Namely, you can put link sharing rules (f.e. route based) at root of CBQ, 211 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
212 so that it resolves to split nodes. Then packets are classified 212 * so that it resolves to split nodes. Then packets are classified
213 by logical priority, or a more specific classifier may be attached 213 * by logical priority, or a more specific classifier may be attached
214 to the split node. 214 * to the split node.
215 */ 215 */
216 216
217static struct cbq_class * 217static struct cbq_class *
@@ -227,7 +227,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
227 /* 227 /*
228 * Step 1. If skb->priority points to one of our classes, use it. 228 * Step 1. If skb->priority points to one of our classes, use it.
229 */ 229 */
230 if (TC_H_MAJ(prio^sch->handle) == 0 && 230 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
231 (cl = cbq_class_lookup(q, prio)) != NULL) 231 (cl = cbq_class_lookup(q, prio)) != NULL)
232 return cl; 232 return cl;
233 233
@@ -243,10 +243,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
243 (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) 243 (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
244 goto fallback; 244 goto fallback;
245 245
246 if ((cl = (void*)res.class) == NULL) { 246 cl = (void *)res.class;
247 if (!cl) {
247 if (TC_H_MAJ(res.classid)) 248 if (TC_H_MAJ(res.classid))
248 cl = cbq_class_lookup(q, res.classid); 249 cl = cbq_class_lookup(q, res.classid);
249 else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL) 250 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
250 cl = defmap[TC_PRIO_BESTEFFORT]; 251 cl = defmap[TC_PRIO_BESTEFFORT];
251 252
252 if (cl == NULL || cl->level >= head->level) 253 if (cl == NULL || cl->level >= head->level)
@@ -282,7 +283,7 @@ fallback:
282 * Step 4. No success... 283 * Step 4. No success...
283 */ 284 */
284 if (TC_H_MAJ(prio) == 0 && 285 if (TC_H_MAJ(prio) == 0 &&
285 !(cl = head->defaults[prio&TC_PRIO_MAX]) && 286 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
286 !(cl = head->defaults[TC_PRIO_BESTEFFORT])) 287 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
287 return head; 288 return head;
288 289
@@ -290,12 +291,12 @@ fallback:
290} 291}
291 292
292/* 293/*
293 A packet has just been enqueued on the empty class. 294 * A packet has just been enqueued on the empty class.
294 cbq_activate_class adds it to the tail of active class list 295 * cbq_activate_class adds it to the tail of active class list
295 of its priority band. 296 * of its priority band.
296 */ 297 */
297 298
298static __inline__ void cbq_activate_class(struct cbq_class *cl) 299static inline void cbq_activate_class(struct cbq_class *cl)
299{ 300{
300 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); 301 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
301 int prio = cl->cpriority; 302 int prio = cl->cpriority;
@@ -314,9 +315,9 @@ static __inline__ void cbq_activate_class(struct cbq_class *cl)
314} 315}
315 316
316/* 317/*
317 Unlink class from active chain. 318 * Unlink class from active chain.
318 Note that this same procedure is done directly in cbq_dequeue* 319 * Note that this same procedure is done directly in cbq_dequeue*
319 during round-robin procedure. 320 * during round-robin procedure.
320 */ 321 */
321 322
322static void cbq_deactivate_class(struct cbq_class *this) 323static void cbq_deactivate_class(struct cbq_class *this)
@@ -350,7 +351,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
350{ 351{
351 int toplevel = q->toplevel; 352 int toplevel = q->toplevel;
352 353
353 if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) { 354 if (toplevel > cl->level && !(cl->q->flags & TCQ_F_THROTTLED)) {
354 psched_time_t now; 355 psched_time_t now;
355 psched_tdiff_t incr; 356 psched_tdiff_t incr;
356 357
@@ -363,7 +364,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
363 q->toplevel = cl->level; 364 q->toplevel = cl->level;
364 return; 365 return;
365 } 366 }
366 } while ((cl=cl->borrow) != NULL && toplevel > cl->level); 367 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
367 } 368 }
368} 369}
369 370
@@ -418,11 +419,11 @@ static void cbq_ovl_classic(struct cbq_class *cl)
418 delay += cl->offtime; 419 delay += cl->offtime;
419 420
420 /* 421 /*
421 Class goes to sleep, so that it will have no 422 * Class goes to sleep, so that it will have no
422 chance to work avgidle. Let's forgive it 8) 423 * chance to work avgidle. Let's forgive it 8)
423 424 *
424 BTW cbq-2.0 has a crap in this 425 * BTW cbq-2.0 has a crap in this
425 place, apparently they forgot to shift it by cl->ewma_log. 426 * place, apparently they forgot to shift it by cl->ewma_log.
426 */ 427 */
427 if (cl->avgidle < 0) 428 if (cl->avgidle < 0)
428 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); 429 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
@@ -439,8 +440,8 @@ static void cbq_ovl_classic(struct cbq_class *cl)
439 q->wd_expires = delay; 440 q->wd_expires = delay;
440 441
441 /* Dirty work! We must schedule wakeups based on 442 /* Dirty work! We must schedule wakeups based on
442 real available rate, rather than leaf rate, 443 * real available rate, rather than leaf rate,
443 which may be tiny (even zero). 444 * which may be tiny (even zero).
444 */ 445 */
445 if (q->toplevel == TC_CBQ_MAXLEVEL) { 446 if (q->toplevel == TC_CBQ_MAXLEVEL) {
446 struct cbq_class *b; 447 struct cbq_class *b;
@@ -460,7 +461,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
460} 461}
461 462
462/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when 463/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
463 they go overlimit 464 * they go overlimit
464 */ 465 */
465 466
466static void cbq_ovl_rclassic(struct cbq_class *cl) 467static void cbq_ovl_rclassic(struct cbq_class *cl)
@@ -595,7 +596,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
595 struct Qdisc *sch = q->watchdog.qdisc; 596 struct Qdisc *sch = q->watchdog.qdisc;
596 psched_time_t now; 597 psched_time_t now;
597 psched_tdiff_t delay = 0; 598 psched_tdiff_t delay = 0;
598 unsigned pmask; 599 unsigned int pmask;
599 600
600 now = psched_get_time(); 601 now = psched_get_time();
601 602
@@ -665,15 +666,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
665#endif 666#endif
666 667
667/* 668/*
668 It is mission critical procedure. 669 * It is mission critical procedure.
669 670 *
670 We "regenerate" toplevel cutoff, if transmitting class 671 * We "regenerate" toplevel cutoff, if transmitting class
671 has backlog and it is not regulated. It is not part of 672 * has backlog and it is not regulated. It is not part of
672 original CBQ description, but looks more reasonable. 673 * original CBQ description, but looks more reasonable.
673 Probably, it is wrong. This question needs further investigation. 674 * Probably, it is wrong. This question needs further investigation.
674*/ 675 */
675 676
676static __inline__ void 677static inline void
677cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, 678cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
678 struct cbq_class *borrowed) 679 struct cbq_class *borrowed)
679{ 680{
@@ -684,7 +685,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
684 q->toplevel = borrowed->level; 685 q->toplevel = borrowed->level;
685 return; 686 return;
686 } 687 }
687 } while ((borrowed=borrowed->borrow) != NULL); 688 } while ((borrowed = borrowed->borrow) != NULL);
688 } 689 }
689#if 0 690#if 0
690 /* It is not necessary now. Uncommenting it 691 /* It is not necessary now. Uncommenting it
@@ -712,10 +713,10 @@ cbq_update(struct cbq_sched_data *q)
712 cl->bstats.bytes += len; 713 cl->bstats.bytes += len;
713 714
714 /* 715 /*
715 (now - last) is total time between packet right edges. 716 * (now - last) is total time between packet right edges.
716 (last_pktlen/rate) is "virtual" busy time, so that 717 * (last_pktlen/rate) is "virtual" busy time, so that
717 718 *
718 idle = (now - last) - last_pktlen/rate 719 * idle = (now - last) - last_pktlen/rate
719 */ 720 */
720 721
721 idle = q->now - cl->last; 722 idle = q->now - cl->last;
@@ -725,9 +726,9 @@ cbq_update(struct cbq_sched_data *q)
725 idle -= L2T(cl, len); 726 idle -= L2T(cl, len);
726 727
727 /* true_avgidle := (1-W)*true_avgidle + W*idle, 728 /* true_avgidle := (1-W)*true_avgidle + W*idle,
728 where W=2^{-ewma_log}. But cl->avgidle is scaled: 729 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
729 cl->avgidle == true_avgidle/W, 730 * cl->avgidle == true_avgidle/W,
730 hence: 731 * hence:
731 */ 732 */
732 avgidle += idle - (avgidle>>cl->ewma_log); 733 avgidle += idle - (avgidle>>cl->ewma_log);
733 } 734 }
@@ -741,22 +742,22 @@ cbq_update(struct cbq_sched_data *q)
741 cl->avgidle = avgidle; 742 cl->avgidle = avgidle;
742 743
743 /* Calculate expected time, when this class 744 /* Calculate expected time, when this class
744 will be allowed to send. 745 * will be allowed to send.
745 It will occur, when: 746 * It will occur, when:
746 (1-W)*true_avgidle + W*delay = 0, i.e. 747 * (1-W)*true_avgidle + W*delay = 0, i.e.
747 idle = (1/W - 1)*(-true_avgidle) 748 * idle = (1/W - 1)*(-true_avgidle)
748 or 749 * or
749 idle = (1 - W)*(-cl->avgidle); 750 * idle = (1 - W)*(-cl->avgidle);
750 */ 751 */
751 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); 752 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
752 753
753 /* 754 /*
754 That is not all. 755 * That is not all.
755 To maintain the rate allocated to the class, 756 * To maintain the rate allocated to the class,
756 we add to undertime virtual clock, 757 * we add to undertime virtual clock,
757 necessary to complete transmitted packet. 758 * necessary to complete transmitted packet.
758 (len/phys_bandwidth has been already passed 759 * (len/phys_bandwidth has been already passed
759 to the moment of cbq_update) 760 * to the moment of cbq_update)
760 */ 761 */
761 762
762 idle -= L2T(&q->link, len); 763 idle -= L2T(&q->link, len);
@@ -778,7 +779,7 @@ cbq_update(struct cbq_sched_data *q)
778 cbq_update_toplevel(q, this, q->tx_borrowed); 779 cbq_update_toplevel(q, this, q->tx_borrowed);
779} 780}
780 781
781static __inline__ struct cbq_class * 782static inline struct cbq_class *
782cbq_under_limit(struct cbq_class *cl) 783cbq_under_limit(struct cbq_class *cl)
783{ 784{
784 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); 785 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
@@ -794,16 +795,17 @@ cbq_under_limit(struct cbq_class *cl)
794 795
795 do { 796 do {
796 /* It is very suspicious place. Now overlimit 797 /* It is very suspicious place. Now overlimit
797 action is generated for not bounded classes 798 * action is generated for not bounded classes
798 only if link is completely congested. 799 * only if link is completely congested.
799 Though it is in agree with ancestor-only paradigm, 800 * Though it is in agree with ancestor-only paradigm,
800 it looks very stupid. Particularly, 801 * it looks very stupid. Particularly,
801 it means that this chunk of code will either 802 * it means that this chunk of code will either
802 never be called or result in strong amplification 803 * never be called or result in strong amplification
803 of burstiness. Dangerous, silly, and, however, 804 * of burstiness. Dangerous, silly, and, however,
804 no another solution exists. 805 * no another solution exists.
805 */ 806 */
806 if ((cl = cl->borrow) == NULL) { 807 cl = cl->borrow;
808 if (!cl) {
807 this_cl->qstats.overlimits++; 809 this_cl->qstats.overlimits++;
808 this_cl->overlimit(this_cl); 810 this_cl->overlimit(this_cl);
809 return NULL; 811 return NULL;
@@ -816,7 +818,7 @@ cbq_under_limit(struct cbq_class *cl)
816 return cl; 818 return cl;
817} 819}
818 820
819static __inline__ struct sk_buff * 821static inline struct sk_buff *
820cbq_dequeue_prio(struct Qdisc *sch, int prio) 822cbq_dequeue_prio(struct Qdisc *sch, int prio)
821{ 823{
822 struct cbq_sched_data *q = qdisc_priv(sch); 824 struct cbq_sched_data *q = qdisc_priv(sch);
@@ -840,7 +842,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
840 842
841 if (cl->deficit <= 0) { 843 if (cl->deficit <= 0) {
842 /* Class exhausted its allotment per 844 /* Class exhausted its allotment per
843 this round. Switch to the next one. 845 * this round. Switch to the next one.
844 */ 846 */
845 deficit = 1; 847 deficit = 1;
846 cl->deficit += cl->quantum; 848 cl->deficit += cl->quantum;
@@ -850,8 +852,8 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
850 skb = cl->q->dequeue(cl->q); 852 skb = cl->q->dequeue(cl->q);
851 853
852 /* Class did not give us any skb :-( 854 /* Class did not give us any skb :-(
853 It could occur even if cl->q->q.qlen != 0 855 * It could occur even if cl->q->q.qlen != 0
854 f.e. if cl->q == "tbf" 856 * f.e. if cl->q == "tbf"
855 */ 857 */
856 if (skb == NULL) 858 if (skb == NULL)
857 goto skip_class; 859 goto skip_class;
@@ -880,7 +882,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
880skip_class: 882skip_class:
881 if (cl->q->q.qlen == 0 || prio != cl->cpriority) { 883 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
882 /* Class is empty or penalized. 884 /* Class is empty or penalized.
883 Unlink it from active chain. 885 * Unlink it from active chain.
884 */ 886 */
885 cl_prev->next_alive = cl->next_alive; 887 cl_prev->next_alive = cl->next_alive;
886 cl->next_alive = NULL; 888 cl->next_alive = NULL;
@@ -919,14 +921,14 @@ next_class:
919 return NULL; 921 return NULL;
920} 922}
921 923
922static __inline__ struct sk_buff * 924static inline struct sk_buff *
923cbq_dequeue_1(struct Qdisc *sch) 925cbq_dequeue_1(struct Qdisc *sch)
924{ 926{
925 struct cbq_sched_data *q = qdisc_priv(sch); 927 struct cbq_sched_data *q = qdisc_priv(sch);
926 struct sk_buff *skb; 928 struct sk_buff *skb;
927 unsigned activemask; 929 unsigned int activemask;
928 930
929 activemask = q->activemask&0xFF; 931 activemask = q->activemask & 0xFF;
930 while (activemask) { 932 while (activemask) {
931 int prio = ffz(~activemask); 933 int prio = ffz(~activemask);
932 activemask &= ~(1<<prio); 934 activemask &= ~(1<<prio);
@@ -951,11 +953,11 @@ cbq_dequeue(struct Qdisc *sch)
951 if (q->tx_class) { 953 if (q->tx_class) {
952 psched_tdiff_t incr2; 954 psched_tdiff_t incr2;
953 /* Time integrator. We calculate EOS time 955 /* Time integrator. We calculate EOS time
954 by adding expected packet transmission time. 956 * by adding expected packet transmission time.
955 If real time is greater, we warp artificial clock, 957 * If real time is greater, we warp artificial clock,
956 so that: 958 * so that:
957 959 *
958 cbq_time = max(real_time, work); 960 * cbq_time = max(real_time, work);
959 */ 961 */
960 incr2 = L2T(&q->link, q->tx_len); 962 incr2 = L2T(&q->link, q->tx_len);
961 q->now += incr2; 963 q->now += incr2;
@@ -977,22 +979,22 @@ cbq_dequeue(struct Qdisc *sch)
977 } 979 }
978 980
979 /* All the classes are overlimit. 981 /* All the classes are overlimit.
980 982 *
981 It is possible, if: 983 * It is possible, if:
982 984 *
983 1. Scheduler is empty. 985 * 1. Scheduler is empty.
984 2. Toplevel cutoff inhibited borrowing. 986 * 2. Toplevel cutoff inhibited borrowing.
985 3. Root class is overlimit. 987 * 3. Root class is overlimit.
986 988 *
987 Reset 2d and 3d conditions and retry. 989 * Reset 2d and 3d conditions and retry.
988 990 *
989 Note, that NS and cbq-2.0 are buggy, peeking 991 * Note, that NS and cbq-2.0 are buggy, peeking
990 an arbitrary class is appropriate for ancestor-only 992 * an arbitrary class is appropriate for ancestor-only
991 sharing, but not for toplevel algorithm. 993 * sharing, but not for toplevel algorithm.
992 994 *
993 Our version is better, but slower, because it requires 995 * Our version is better, but slower, because it requires
994 two passes, but it is unavoidable with top-level sharing. 996 * two passes, but it is unavoidable with top-level sharing.
995 */ 997 */
996 998
997 if (q->toplevel == TC_CBQ_MAXLEVEL && 999 if (q->toplevel == TC_CBQ_MAXLEVEL &&
998 q->link.undertime == PSCHED_PASTPERFECT) 1000 q->link.undertime == PSCHED_PASTPERFECT)
@@ -1003,7 +1005,8 @@ cbq_dequeue(struct Qdisc *sch)
1003 } 1005 }
1004 1006
1005 /* No packets in scheduler or nobody wants to give them to us :-( 1007 /* No packets in scheduler or nobody wants to give them to us :-(
1006 Sigh... start watchdog timer in the last case. */ 1008 * Sigh... start watchdog timer in the last case.
1009 */
1007 1010
1008 if (sch->q.qlen) { 1011 if (sch->q.qlen) {
1009 sch->qstats.overlimits++; 1012 sch->qstats.overlimits++;
@@ -1025,13 +1028,14 @@ static void cbq_adjust_levels(struct cbq_class *this)
1025 int level = 0; 1028 int level = 0;
1026 struct cbq_class *cl; 1029 struct cbq_class *cl;
1027 1030
1028 if ((cl = this->children) != NULL) { 1031 cl = this->children;
1032 if (cl) {
1029 do { 1033 do {
1030 if (cl->level > level) 1034 if (cl->level > level)
1031 level = cl->level; 1035 level = cl->level;
1032 } while ((cl = cl->sibling) != this->children); 1036 } while ((cl = cl->sibling) != this->children);
1033 } 1037 }
1034 this->level = level+1; 1038 this->level = level + 1;
1035 } while ((this = this->tparent) != NULL); 1039 } while ((this = this->tparent) != NULL);
1036} 1040}
1037 1041
@@ -1047,14 +1051,15 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1047 for (h = 0; h < q->clhash.hashsize; h++) { 1051 for (h = 0; h < q->clhash.hashsize; h++) {
1048 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 1052 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
1049 /* BUGGGG... Beware! This expression suffer of 1053 /* BUGGGG... Beware! This expression suffer of
1050 arithmetic overflows! 1054 * arithmetic overflows!
1051 */ 1055 */
1052 if (cl->priority == prio) { 1056 if (cl->priority == prio) {
1053 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ 1057 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1054 q->quanta[prio]; 1058 q->quanta[prio];
1055 } 1059 }
1056 if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) { 1060 if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
1057 printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum); 1061 pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n",
1062 cl->common.classid, cl->quantum);
1058 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; 1063 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
1059 } 1064 }
1060 } 1065 }
@@ -1065,18 +1070,18 @@ static void cbq_sync_defmap(struct cbq_class *cl)
1065{ 1070{
1066 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); 1071 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1067 struct cbq_class *split = cl->split; 1072 struct cbq_class *split = cl->split;
1068 unsigned h; 1073 unsigned int h;
1069 int i; 1074 int i;
1070 1075
1071 if (split == NULL) 1076 if (split == NULL)
1072 return; 1077 return;
1073 1078
1074 for (i=0; i<=TC_PRIO_MAX; i++) { 1079 for (i = 0; i <= TC_PRIO_MAX; i++) {
1075 if (split->defaults[i] == cl && !(cl->defmap&(1<<i))) 1080 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
1076 split->defaults[i] = NULL; 1081 split->defaults[i] = NULL;
1077 } 1082 }
1078 1083
1079 for (i=0; i<=TC_PRIO_MAX; i++) { 1084 for (i = 0; i <= TC_PRIO_MAX; i++) {
1080 int level = split->level; 1085 int level = split->level;
1081 1086
1082 if (split->defaults[i]) 1087 if (split->defaults[i])
@@ -1089,7 +1094,7 @@ static void cbq_sync_defmap(struct cbq_class *cl)
1089 hlist_for_each_entry(c, n, &q->clhash.hash[h], 1094 hlist_for_each_entry(c, n, &q->clhash.hash[h],
1090 common.hnode) { 1095 common.hnode) {
1091 if (c->split == split && c->level < level && 1096 if (c->split == split && c->level < level &&
1092 c->defmap&(1<<i)) { 1097 c->defmap & (1<<i)) {
1093 split->defaults[i] = c; 1098 split->defaults[i] = c;
1094 level = c->level; 1099 level = c->level;
1095 } 1100 }
@@ -1103,7 +1108,8 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
1103 struct cbq_class *split = NULL; 1108 struct cbq_class *split = NULL;
1104 1109
1105 if (splitid == 0) { 1110 if (splitid == 0) {
1106 if ((split = cl->split) == NULL) 1111 split = cl->split;
1112 if (!split)
1107 return; 1113 return;
1108 splitid = split->common.classid; 1114 splitid = split->common.classid;
1109 } 1115 }
@@ -1121,9 +1127,9 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
1121 cl->defmap = 0; 1127 cl->defmap = 0;
1122 cbq_sync_defmap(cl); 1128 cbq_sync_defmap(cl);
1123 cl->split = split; 1129 cl->split = split;
1124 cl->defmap = def&mask; 1130 cl->defmap = def & mask;
1125 } else 1131 } else
1126 cl->defmap = (cl->defmap&~mask)|(def&mask); 1132 cl->defmap = (cl->defmap & ~mask) | (def & mask);
1127 1133
1128 cbq_sync_defmap(cl); 1134 cbq_sync_defmap(cl);
1129} 1135}
@@ -1136,7 +1142,7 @@ static void cbq_unlink_class(struct cbq_class *this)
1136 qdisc_class_hash_remove(&q->clhash, &this->common); 1142 qdisc_class_hash_remove(&q->clhash, &this->common);
1137 1143
1138 if (this->tparent) { 1144 if (this->tparent) {
1139 clp=&this->sibling; 1145 clp = &this->sibling;
1140 cl = *clp; 1146 cl = *clp;
1141 do { 1147 do {
1142 if (cl == this) { 1148 if (cl == this) {
@@ -1175,7 +1181,7 @@ static void cbq_link_class(struct cbq_class *this)
1175 } 1181 }
1176} 1182}
1177 1183
1178static unsigned int cbq_drop(struct Qdisc* sch) 1184static unsigned int cbq_drop(struct Qdisc *sch)
1179{ 1185{
1180 struct cbq_sched_data *q = qdisc_priv(sch); 1186 struct cbq_sched_data *q = qdisc_priv(sch);
1181 struct cbq_class *cl, *cl_head; 1187 struct cbq_class *cl, *cl_head;
@@ -1183,7 +1189,8 @@ static unsigned int cbq_drop(struct Qdisc* sch)
1183 unsigned int len; 1189 unsigned int len;
1184 1190
1185 for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) { 1191 for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
1186 if ((cl_head = q->active[prio]) == NULL) 1192 cl_head = q->active[prio];
1193 if (!cl_head)
1187 continue; 1194 continue;
1188 1195
1189 cl = cl_head; 1196 cl = cl_head;
@@ -1200,13 +1207,13 @@ static unsigned int cbq_drop(struct Qdisc* sch)
1200} 1207}
1201 1208
1202static void 1209static void
1203cbq_reset(struct Qdisc* sch) 1210cbq_reset(struct Qdisc *sch)
1204{ 1211{
1205 struct cbq_sched_data *q = qdisc_priv(sch); 1212 struct cbq_sched_data *q = qdisc_priv(sch);
1206 struct cbq_class *cl; 1213 struct cbq_class *cl;
1207 struct hlist_node *n; 1214 struct hlist_node *n;
1208 int prio; 1215 int prio;
1209 unsigned h; 1216 unsigned int h;
1210 1217
1211 q->activemask = 0; 1218 q->activemask = 0;
1212 q->pmask = 0; 1219 q->pmask = 0;
@@ -1238,21 +1245,21 @@ cbq_reset(struct Qdisc* sch)
1238 1245
1239static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) 1246static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1240{ 1247{
1241 if (lss->change&TCF_CBQ_LSS_FLAGS) { 1248 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1242 cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; 1249 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1243 cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; 1250 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1244 } 1251 }
1245 if (lss->change&TCF_CBQ_LSS_EWMA) 1252 if (lss->change & TCF_CBQ_LSS_EWMA)
1246 cl->ewma_log = lss->ewma_log; 1253 cl->ewma_log = lss->ewma_log;
1247 if (lss->change&TCF_CBQ_LSS_AVPKT) 1254 if (lss->change & TCF_CBQ_LSS_AVPKT)
1248 cl->avpkt = lss->avpkt; 1255 cl->avpkt = lss->avpkt;
1249 if (lss->change&TCF_CBQ_LSS_MINIDLE) 1256 if (lss->change & TCF_CBQ_LSS_MINIDLE)
1250 cl->minidle = -(long)lss->minidle; 1257 cl->minidle = -(long)lss->minidle;
1251 if (lss->change&TCF_CBQ_LSS_MAXIDLE) { 1258 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
1252 cl->maxidle = lss->maxidle; 1259 cl->maxidle = lss->maxidle;
1253 cl->avgidle = lss->maxidle; 1260 cl->avgidle = lss->maxidle;
1254 } 1261 }
1255 if (lss->change&TCF_CBQ_LSS_OFFTIME) 1262 if (lss->change & TCF_CBQ_LSS_OFFTIME)
1256 cl->offtime = lss->offtime; 1263 cl->offtime = lss->offtime;
1257 return 0; 1264 return 0;
1258} 1265}
@@ -1280,10 +1287,10 @@ static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1280 if (wrr->weight) 1287 if (wrr->weight)
1281 cl->weight = wrr->weight; 1288 cl->weight = wrr->weight;
1282 if (wrr->priority) { 1289 if (wrr->priority) {
1283 cl->priority = wrr->priority-1; 1290 cl->priority = wrr->priority - 1;
1284 cl->cpriority = cl->priority; 1291 cl->cpriority = cl->priority;
1285 if (cl->priority >= cl->priority2) 1292 if (cl->priority >= cl->priority2)
1286 cl->priority2 = TC_CBQ_MAXPRIO-1; 1293 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1287 } 1294 }
1288 1295
1289 cbq_addprio(q, cl); 1296 cbq_addprio(q, cl);
@@ -1300,10 +1307,10 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1300 cl->overlimit = cbq_ovl_delay; 1307 cl->overlimit = cbq_ovl_delay;
1301 break; 1308 break;
1302 case TC_CBQ_OVL_LOWPRIO: 1309 case TC_CBQ_OVL_LOWPRIO:
1303 if (ovl->priority2-1 >= TC_CBQ_MAXPRIO || 1310 if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
1304 ovl->priority2-1 <= cl->priority) 1311 ovl->priority2 - 1 <= cl->priority)
1305 return -EINVAL; 1312 return -EINVAL;
1306 cl->priority2 = ovl->priority2-1; 1313 cl->priority2 = ovl->priority2 - 1;
1307 cl->overlimit = cbq_ovl_lowprio; 1314 cl->overlimit = cbq_ovl_lowprio;
1308 break; 1315 break;
1309 case TC_CBQ_OVL_DROP: 1316 case TC_CBQ_OVL_DROP:
@@ -1382,9 +1389,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1382 if (!q->link.q) 1389 if (!q->link.q)
1383 q->link.q = &noop_qdisc; 1390 q->link.q = &noop_qdisc;
1384 1391
1385 q->link.priority = TC_CBQ_MAXPRIO-1; 1392 q->link.priority = TC_CBQ_MAXPRIO - 1;
1386 q->link.priority2 = TC_CBQ_MAXPRIO-1; 1393 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1387 q->link.cpriority = TC_CBQ_MAXPRIO-1; 1394 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1388 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; 1395 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1389 q->link.overlimit = cbq_ovl_classic; 1396 q->link.overlimit = cbq_ovl_classic;
1390 q->link.allot = psched_mtu(qdisc_dev(sch)); 1397 q->link.allot = psched_mtu(qdisc_dev(sch));
@@ -1415,7 +1422,7 @@ put_rtab:
1415 return err; 1422 return err;
1416} 1423}
1417 1424
1418static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) 1425static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1419{ 1426{
1420 unsigned char *b = skb_tail_pointer(skb); 1427 unsigned char *b = skb_tail_pointer(skb);
1421 1428
@@ -1427,7 +1434,7 @@ nla_put_failure:
1427 return -1; 1434 return -1;
1428} 1435}
1429 1436
1430static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) 1437static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1431{ 1438{
1432 unsigned char *b = skb_tail_pointer(skb); 1439 unsigned char *b = skb_tail_pointer(skb);
1433 struct tc_cbq_lssopt opt; 1440 struct tc_cbq_lssopt opt;
@@ -1452,15 +1459,15 @@ nla_put_failure:
1452 return -1; 1459 return -1;
1453} 1460}
1454 1461
1455static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) 1462static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1456{ 1463{
1457 unsigned char *b = skb_tail_pointer(skb); 1464 unsigned char *b = skb_tail_pointer(skb);
1458 struct tc_cbq_wrropt opt; 1465 struct tc_cbq_wrropt opt;
1459 1466
1460 opt.flags = 0; 1467 opt.flags = 0;
1461 opt.allot = cl->allot; 1468 opt.allot = cl->allot;
1462 opt.priority = cl->priority+1; 1469 opt.priority = cl->priority + 1;
1463 opt.cpriority = cl->cpriority+1; 1470 opt.cpriority = cl->cpriority + 1;
1464 opt.weight = cl->weight; 1471 opt.weight = cl->weight;
1465 NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); 1472 NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
1466 return skb->len; 1473 return skb->len;
@@ -1470,13 +1477,13 @@ nla_put_failure:
1470 return -1; 1477 return -1;
1471} 1478}
1472 1479
1473static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) 1480static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
1474{ 1481{
1475 unsigned char *b = skb_tail_pointer(skb); 1482 unsigned char *b = skb_tail_pointer(skb);
1476 struct tc_cbq_ovl opt; 1483 struct tc_cbq_ovl opt;
1477 1484
1478 opt.strategy = cl->ovl_strategy; 1485 opt.strategy = cl->ovl_strategy;
1479 opt.priority2 = cl->priority2+1; 1486 opt.priority2 = cl->priority2 + 1;
1480 opt.pad = 0; 1487 opt.pad = 0;
1481 opt.penalty = cl->penalty; 1488 opt.penalty = cl->penalty;
1482 NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); 1489 NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
@@ -1487,7 +1494,7 @@ nla_put_failure:
1487 return -1; 1494 return -1;
1488} 1495}
1489 1496
1490static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) 1497static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1491{ 1498{
1492 unsigned char *b = skb_tail_pointer(skb); 1499 unsigned char *b = skb_tail_pointer(skb);
1493 struct tc_cbq_fopt opt; 1500 struct tc_cbq_fopt opt;
@@ -1506,7 +1513,7 @@ nla_put_failure:
1506} 1513}
1507 1514
1508#ifdef CONFIG_NET_CLS_ACT 1515#ifdef CONFIG_NET_CLS_ACT
1509static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) 1516static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1510{ 1517{
1511 unsigned char *b = skb_tail_pointer(skb); 1518 unsigned char *b = skb_tail_pointer(skb);
1512 struct tc_cbq_police opt; 1519 struct tc_cbq_police opt;
@@ -1570,7 +1577,7 @@ static int
1570cbq_dump_class(struct Qdisc *sch, unsigned long arg, 1577cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1571 struct sk_buff *skb, struct tcmsg *tcm) 1578 struct sk_buff *skb, struct tcmsg *tcm)
1572{ 1579{
1573 struct cbq_class *cl = (struct cbq_class*)arg; 1580 struct cbq_class *cl = (struct cbq_class *)arg;
1574 struct nlattr *nest; 1581 struct nlattr *nest;
1575 1582
1576 if (cl->tparent) 1583 if (cl->tparent)
@@ -1598,7 +1605,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1598 struct gnet_dump *d) 1605 struct gnet_dump *d)
1599{ 1606{
1600 struct cbq_sched_data *q = qdisc_priv(sch); 1607 struct cbq_sched_data *q = qdisc_priv(sch);
1601 struct cbq_class *cl = (struct cbq_class*)arg; 1608 struct cbq_class *cl = (struct cbq_class *)arg;
1602 1609
1603 cl->qstats.qlen = cl->q->q.qlen; 1610 cl->qstats.qlen = cl->q->q.qlen;
1604 cl->xstats.avgidle = cl->avgidle; 1611 cl->xstats.avgidle = cl->avgidle;
@@ -1618,7 +1625,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1618static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1625static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1619 struct Qdisc **old) 1626 struct Qdisc **old)
1620{ 1627{
1621 struct cbq_class *cl = (struct cbq_class*)arg; 1628 struct cbq_class *cl = (struct cbq_class *)arg;
1622 1629
1623 if (new == NULL) { 1630 if (new == NULL) {
1624 new = qdisc_create_dflt(sch->dev_queue, 1631 new = qdisc_create_dflt(sch->dev_queue,
@@ -1641,10 +1648,9 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1641 return 0; 1648 return 0;
1642} 1649}
1643 1650
1644static struct Qdisc * 1651static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1645cbq_leaf(struct Qdisc *sch, unsigned long arg)
1646{ 1652{
1647 struct cbq_class *cl = (struct cbq_class*)arg; 1653 struct cbq_class *cl = (struct cbq_class *)arg;
1648 1654
1649 return cl->q; 1655 return cl->q;
1650} 1656}
@@ -1683,13 +1689,12 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1683 kfree(cl); 1689 kfree(cl);
1684} 1690}
1685 1691
1686static void 1692static void cbq_destroy(struct Qdisc *sch)
1687cbq_destroy(struct Qdisc* sch)
1688{ 1693{
1689 struct cbq_sched_data *q = qdisc_priv(sch); 1694 struct cbq_sched_data *q = qdisc_priv(sch);
1690 struct hlist_node *n, *next; 1695 struct hlist_node *n, *next;
1691 struct cbq_class *cl; 1696 struct cbq_class *cl;
1692 unsigned h; 1697 unsigned int h;
1693 1698
1694#ifdef CONFIG_NET_CLS_ACT 1699#ifdef CONFIG_NET_CLS_ACT
1695 q->rx_class = NULL; 1700 q->rx_class = NULL;
@@ -1713,7 +1718,7 @@ cbq_destroy(struct Qdisc* sch)
1713 1718
1714static void cbq_put(struct Qdisc *sch, unsigned long arg) 1719static void cbq_put(struct Qdisc *sch, unsigned long arg)
1715{ 1720{
1716 struct cbq_class *cl = (struct cbq_class*)arg; 1721 struct cbq_class *cl = (struct cbq_class *)arg;
1717 1722
1718 if (--cl->refcnt == 0) { 1723 if (--cl->refcnt == 0) {
1719#ifdef CONFIG_NET_CLS_ACT 1724#ifdef CONFIG_NET_CLS_ACT
@@ -1736,7 +1741,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1736{ 1741{
1737 int err; 1742 int err;
1738 struct cbq_sched_data *q = qdisc_priv(sch); 1743 struct cbq_sched_data *q = qdisc_priv(sch);
1739 struct cbq_class *cl = (struct cbq_class*)*arg; 1744 struct cbq_class *cl = (struct cbq_class *)*arg;
1740 struct nlattr *opt = tca[TCA_OPTIONS]; 1745 struct nlattr *opt = tca[TCA_OPTIONS];
1741 struct nlattr *tb[TCA_CBQ_MAX + 1]; 1746 struct nlattr *tb[TCA_CBQ_MAX + 1];
1742 struct cbq_class *parent; 1747 struct cbq_class *parent;
@@ -1828,13 +1833,14 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1828 1833
1829 if (classid) { 1834 if (classid) {
1830 err = -EINVAL; 1835 err = -EINVAL;
1831 if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid)) 1836 if (TC_H_MAJ(classid ^ sch->handle) ||
1837 cbq_class_lookup(q, classid))
1832 goto failure; 1838 goto failure;
1833 } else { 1839 } else {
1834 int i; 1840 int i;
1835 classid = TC_H_MAKE(sch->handle,0x8000); 1841 classid = TC_H_MAKE(sch->handle, 0x8000);
1836 1842
1837 for (i=0; i<0x8000; i++) { 1843 for (i = 0; i < 0x8000; i++) {
1838 if (++q->hgenerator >= 0x8000) 1844 if (++q->hgenerator >= 0x8000)
1839 q->hgenerator = 1; 1845 q->hgenerator = 1;
1840 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) 1846 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
@@ -1891,11 +1897,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1891 cl->minidle = -0x7FFFFFFF; 1897 cl->minidle = -0x7FFFFFFF;
1892 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); 1898 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1893 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); 1899 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1894 if (cl->ewma_log==0) 1900 if (cl->ewma_log == 0)
1895 cl->ewma_log = q->link.ewma_log; 1901 cl->ewma_log = q->link.ewma_log;
1896 if (cl->maxidle==0) 1902 if (cl->maxidle == 0)
1897 cl->maxidle = q->link.maxidle; 1903 cl->maxidle = q->link.maxidle;
1898 if (cl->avpkt==0) 1904 if (cl->avpkt == 0)
1899 cl->avpkt = q->link.avpkt; 1905 cl->avpkt = q->link.avpkt;
1900 cl->overlimit = cbq_ovl_classic; 1906 cl->overlimit = cbq_ovl_classic;
1901 if (tb[TCA_CBQ_OVL_STRATEGY]) 1907 if (tb[TCA_CBQ_OVL_STRATEGY])
@@ -1921,7 +1927,7 @@ failure:
1921static int cbq_delete(struct Qdisc *sch, unsigned long arg) 1927static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1922{ 1928{
1923 struct cbq_sched_data *q = qdisc_priv(sch); 1929 struct cbq_sched_data *q = qdisc_priv(sch);
1924 struct cbq_class *cl = (struct cbq_class*)arg; 1930 struct cbq_class *cl = (struct cbq_class *)arg;
1925 unsigned int qlen; 1931 unsigned int qlen;
1926 1932
1927 if (cl->filters || cl->children || cl == &q->link) 1933 if (cl->filters || cl->children || cl == &q->link)
@@ -1979,7 +1985,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1979 u32 classid) 1985 u32 classid)
1980{ 1986{
1981 struct cbq_sched_data *q = qdisc_priv(sch); 1987 struct cbq_sched_data *q = qdisc_priv(sch);
1982 struct cbq_class *p = (struct cbq_class*)parent; 1988 struct cbq_class *p = (struct cbq_class *)parent;
1983 struct cbq_class *cl = cbq_class_lookup(q, classid); 1989 struct cbq_class *cl = cbq_class_lookup(q, classid);
1984 1990
1985 if (cl) { 1991 if (cl) {
@@ -1993,7 +1999,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1993 1999
1994static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg) 2000static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1995{ 2001{
1996 struct cbq_class *cl = (struct cbq_class*)arg; 2002 struct cbq_class *cl = (struct cbq_class *)arg;
1997 2003
1998 cl->filters--; 2004 cl->filters--;
1999} 2005}
@@ -2003,7 +2009,7 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2003 struct cbq_sched_data *q = qdisc_priv(sch); 2009 struct cbq_sched_data *q = qdisc_priv(sch);
2004 struct cbq_class *cl; 2010 struct cbq_class *cl;
2005 struct hlist_node *n; 2011 struct hlist_node *n;
2006 unsigned h; 2012 unsigned int h;
2007 2013
2008 if (arg->stop) 2014 if (arg->stop)
2009 return; 2015 return;