diff options
Diffstat (limited to 'net/sched/sch_cbq.c')
-rw-r--r-- | net/sched/sch_cbq.c | 379 |
1 files changed, 191 insertions, 188 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 28c01ef5abc8..24d94c097b35 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -72,8 +72,7 @@ | |||
72 | struct cbq_sched_data; | 72 | struct cbq_sched_data; |
73 | 73 | ||
74 | 74 | ||
75 | struct cbq_class | 75 | struct cbq_class { |
76 | { | ||
77 | struct Qdisc_class_common common; | 76 | struct Qdisc_class_common common; |
78 | struct cbq_class *next_alive; /* next class with backlog in this priority band */ | 77 | struct cbq_class *next_alive; /* next class with backlog in this priority band */ |
79 | 78 | ||
@@ -139,19 +138,18 @@ struct cbq_class | |||
139 | int refcnt; | 138 | int refcnt; |
140 | int filters; | 139 | int filters; |
141 | 140 | ||
142 | struct cbq_class *defaults[TC_PRIO_MAX+1]; | 141 | struct cbq_class *defaults[TC_PRIO_MAX + 1]; |
143 | }; | 142 | }; |
144 | 143 | ||
145 | struct cbq_sched_data | 144 | struct cbq_sched_data { |
146 | { | ||
147 | struct Qdisc_class_hash clhash; /* Hash table of all classes */ | 145 | struct Qdisc_class_hash clhash; /* Hash table of all classes */ |
148 | int nclasses[TC_CBQ_MAXPRIO+1]; | 146 | int nclasses[TC_CBQ_MAXPRIO + 1]; |
149 | unsigned quanta[TC_CBQ_MAXPRIO+1]; | 147 | unsigned int quanta[TC_CBQ_MAXPRIO + 1]; |
150 | 148 | ||
151 | struct cbq_class link; | 149 | struct cbq_class link; |
152 | 150 | ||
153 | unsigned activemask; | 151 | unsigned int activemask; |
154 | struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes | 152 | struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes |
155 | with backlog */ | 153 | with backlog */ |
156 | 154 | ||
157 | #ifdef CONFIG_NET_CLS_ACT | 155 | #ifdef CONFIG_NET_CLS_ACT |
@@ -162,7 +160,7 @@ struct cbq_sched_data | |||
162 | int tx_len; | 160 | int tx_len; |
163 | psched_time_t now; /* Cached timestamp */ | 161 | psched_time_t now; /* Cached timestamp */ |
164 | psched_time_t now_rt; /* Cached real time */ | 162 | psched_time_t now_rt; /* Cached real time */ |
165 | unsigned pmask; | 163 | unsigned int pmask; |
166 | 164 | ||
167 | struct hrtimer delay_timer; | 165 | struct hrtimer delay_timer; |
168 | struct qdisc_watchdog watchdog; /* Watchdog timer, | 166 | struct qdisc_watchdog watchdog; /* Watchdog timer, |
@@ -175,9 +173,9 @@ struct cbq_sched_data | |||
175 | }; | 173 | }; |
176 | 174 | ||
177 | 175 | ||
178 | #define L2T(cl,len) qdisc_l2t((cl)->R_tab,len) | 176 | #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len) |
179 | 177 | ||
180 | static __inline__ struct cbq_class * | 178 | static inline struct cbq_class * |
181 | cbq_class_lookup(struct cbq_sched_data *q, u32 classid) | 179 | cbq_class_lookup(struct cbq_sched_data *q, u32 classid) |
182 | { | 180 | { |
183 | struct Qdisc_class_common *clc; | 181 | struct Qdisc_class_common *clc; |
@@ -193,25 +191,27 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid) | |||
193 | static struct cbq_class * | 191 | static struct cbq_class * |
194 | cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) | 192 | cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) |
195 | { | 193 | { |
196 | struct cbq_class *cl, *new; | 194 | struct cbq_class *cl; |
197 | 195 | ||
198 | for (cl = this->tparent; cl; cl = cl->tparent) | 196 | for (cl = this->tparent; cl; cl = cl->tparent) { |
199 | if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this) | 197 | struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT]; |
200 | return new; | ||
201 | 198 | ||
199 | if (new != NULL && new != this) | ||
200 | return new; | ||
201 | } | ||
202 | return NULL; | 202 | return NULL; |
203 | } | 203 | } |
204 | 204 | ||
205 | #endif | 205 | #endif |
206 | 206 | ||
207 | /* Classify packet. The procedure is pretty complicated, but | 207 | /* Classify packet. The procedure is pretty complicated, but |
208 | it allows us to combine link sharing and priority scheduling | 208 | * it allows us to combine link sharing and priority scheduling |
209 | transparently. | 209 | * transparently. |
210 | 210 | * | |
211 | Namely, you can put link sharing rules (f.e. route based) at root of CBQ, | 211 | * Namely, you can put link sharing rules (f.e. route based) at root of CBQ, |
212 | so that it resolves to split nodes. Then packets are classified | 212 | * so that it resolves to split nodes. Then packets are classified |
213 | by logical priority, or a more specific classifier may be attached | 213 | * by logical priority, or a more specific classifier may be attached |
214 | to the split node. | 214 | * to the split node. |
215 | */ | 215 | */ |
216 | 216 | ||
217 | static struct cbq_class * | 217 | static struct cbq_class * |
@@ -227,7 +227,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
227 | /* | 227 | /* |
228 | * Step 1. If skb->priority points to one of our classes, use it. | 228 | * Step 1. If skb->priority points to one of our classes, use it. |
229 | */ | 229 | */ |
230 | if (TC_H_MAJ(prio^sch->handle) == 0 && | 230 | if (TC_H_MAJ(prio ^ sch->handle) == 0 && |
231 | (cl = cbq_class_lookup(q, prio)) != NULL) | 231 | (cl = cbq_class_lookup(q, prio)) != NULL) |
232 | return cl; | 232 | return cl; |
233 | 233 | ||
@@ -243,10 +243,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
243 | (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) | 243 | (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) |
244 | goto fallback; | 244 | goto fallback; |
245 | 245 | ||
246 | if ((cl = (void*)res.class) == NULL) { | 246 | cl = (void *)res.class; |
247 | if (!cl) { | ||
247 | if (TC_H_MAJ(res.classid)) | 248 | if (TC_H_MAJ(res.classid)) |
248 | cl = cbq_class_lookup(q, res.classid); | 249 | cl = cbq_class_lookup(q, res.classid); |
249 | else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL) | 250 | else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) |
250 | cl = defmap[TC_PRIO_BESTEFFORT]; | 251 | cl = defmap[TC_PRIO_BESTEFFORT]; |
251 | 252 | ||
252 | if (cl == NULL || cl->level >= head->level) | 253 | if (cl == NULL || cl->level >= head->level) |
@@ -282,7 +283,7 @@ fallback: | |||
282 | * Step 4. No success... | 283 | * Step 4. No success... |
283 | */ | 284 | */ |
284 | if (TC_H_MAJ(prio) == 0 && | 285 | if (TC_H_MAJ(prio) == 0 && |
285 | !(cl = head->defaults[prio&TC_PRIO_MAX]) && | 286 | !(cl = head->defaults[prio & TC_PRIO_MAX]) && |
286 | !(cl = head->defaults[TC_PRIO_BESTEFFORT])) | 287 | !(cl = head->defaults[TC_PRIO_BESTEFFORT])) |
287 | return head; | 288 | return head; |
288 | 289 | ||
@@ -290,12 +291,12 @@ fallback: | |||
290 | } | 291 | } |
291 | 292 | ||
292 | /* | 293 | /* |
293 | A packet has just been enqueued on the empty class. | 294 | * A packet has just been enqueued on the empty class. |
294 | cbq_activate_class adds it to the tail of active class list | 295 | * cbq_activate_class adds it to the tail of active class list |
295 | of its priority band. | 296 | * of its priority band. |
296 | */ | 297 | */ |
297 | 298 | ||
298 | static __inline__ void cbq_activate_class(struct cbq_class *cl) | 299 | static inline void cbq_activate_class(struct cbq_class *cl) |
299 | { | 300 | { |
300 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | 301 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
301 | int prio = cl->cpriority; | 302 | int prio = cl->cpriority; |
@@ -314,9 +315,9 @@ static __inline__ void cbq_activate_class(struct cbq_class *cl) | |||
314 | } | 315 | } |
315 | 316 | ||
316 | /* | 317 | /* |
317 | Unlink class from active chain. | 318 | * Unlink class from active chain. |
318 | Note that this same procedure is done directly in cbq_dequeue* | 319 | * Note that this same procedure is done directly in cbq_dequeue* |
319 | during round-robin procedure. | 320 | * during round-robin procedure. |
320 | */ | 321 | */ |
321 | 322 | ||
322 | static void cbq_deactivate_class(struct cbq_class *this) | 323 | static void cbq_deactivate_class(struct cbq_class *this) |
@@ -350,7 +351,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) | |||
350 | { | 351 | { |
351 | int toplevel = q->toplevel; | 352 | int toplevel = q->toplevel; |
352 | 353 | ||
353 | if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) { | 354 | if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { |
354 | psched_time_t now; | 355 | psched_time_t now; |
355 | psched_tdiff_t incr; | 356 | psched_tdiff_t incr; |
356 | 357 | ||
@@ -363,7 +364,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) | |||
363 | q->toplevel = cl->level; | 364 | q->toplevel = cl->level; |
364 | return; | 365 | return; |
365 | } | 366 | } |
366 | } while ((cl=cl->borrow) != NULL && toplevel > cl->level); | 367 | } while ((cl = cl->borrow) != NULL && toplevel > cl->level); |
367 | } | 368 | } |
368 | } | 369 | } |
369 | 370 | ||
@@ -390,8 +391,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
390 | ret = qdisc_enqueue(skb, cl->q); | 391 | ret = qdisc_enqueue(skb, cl->q); |
391 | if (ret == NET_XMIT_SUCCESS) { | 392 | if (ret == NET_XMIT_SUCCESS) { |
392 | sch->q.qlen++; | 393 | sch->q.qlen++; |
393 | sch->bstats.packets++; | ||
394 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
395 | cbq_mark_toplevel(q, cl); | 394 | cbq_mark_toplevel(q, cl); |
396 | if (!cl->next_alive) | 395 | if (!cl->next_alive) |
397 | cbq_activate_class(cl); | 396 | cbq_activate_class(cl); |
@@ -419,11 +418,11 @@ static void cbq_ovl_classic(struct cbq_class *cl) | |||
419 | delay += cl->offtime; | 418 | delay += cl->offtime; |
420 | 419 | ||
421 | /* | 420 | /* |
422 | Class goes to sleep, so that it will have no | 421 | * Class goes to sleep, so that it will have no |
423 | chance to work avgidle. Let's forgive it 8) | 422 | * chance to work avgidle. Let's forgive it 8) |
424 | 423 | * | |
425 | BTW cbq-2.0 has a crap in this | 424 | * BTW cbq-2.0 has a crap in this |
426 | place, apparently they forgot to shift it by cl->ewma_log. | 425 | * place, apparently they forgot to shift it by cl->ewma_log. |
427 | */ | 426 | */ |
428 | if (cl->avgidle < 0) | 427 | if (cl->avgidle < 0) |
429 | delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); | 428 | delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); |
@@ -440,8 +439,8 @@ static void cbq_ovl_classic(struct cbq_class *cl) | |||
440 | q->wd_expires = delay; | 439 | q->wd_expires = delay; |
441 | 440 | ||
442 | /* Dirty work! We must schedule wakeups based on | 441 | /* Dirty work! We must schedule wakeups based on |
443 | real available rate, rather than leaf rate, | 442 | * real available rate, rather than leaf rate, |
444 | which may be tiny (even zero). | 443 | * which may be tiny (even zero). |
445 | */ | 444 | */ |
446 | if (q->toplevel == TC_CBQ_MAXLEVEL) { | 445 | if (q->toplevel == TC_CBQ_MAXLEVEL) { |
447 | struct cbq_class *b; | 446 | struct cbq_class *b; |
@@ -461,7 +460,7 @@ static void cbq_ovl_classic(struct cbq_class *cl) | |||
461 | } | 460 | } |
462 | 461 | ||
463 | /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when | 462 | /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when |
464 | they go overlimit | 463 | * they go overlimit |
465 | */ | 464 | */ |
466 | 465 | ||
467 | static void cbq_ovl_rclassic(struct cbq_class *cl) | 466 | static void cbq_ovl_rclassic(struct cbq_class *cl) |
@@ -596,7 +595,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) | |||
596 | struct Qdisc *sch = q->watchdog.qdisc; | 595 | struct Qdisc *sch = q->watchdog.qdisc; |
597 | psched_time_t now; | 596 | psched_time_t now; |
598 | psched_tdiff_t delay = 0; | 597 | psched_tdiff_t delay = 0; |
599 | unsigned pmask; | 598 | unsigned int pmask; |
600 | 599 | ||
601 | now = psched_get_time(); | 600 | now = psched_get_time(); |
602 | 601 | ||
@@ -625,7 +624,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) | |||
625 | hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); | 624 | hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); |
626 | } | 625 | } |
627 | 626 | ||
628 | sch->flags &= ~TCQ_F_THROTTLED; | 627 | qdisc_unthrottled(sch); |
629 | __netif_schedule(qdisc_root(sch)); | 628 | __netif_schedule(qdisc_root(sch)); |
630 | return HRTIMER_NORESTART; | 629 | return HRTIMER_NORESTART; |
631 | } | 630 | } |
@@ -650,8 +649,6 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) | |||
650 | ret = qdisc_enqueue(skb, cl->q); | 649 | ret = qdisc_enqueue(skb, cl->q); |
651 | if (ret == NET_XMIT_SUCCESS) { | 650 | if (ret == NET_XMIT_SUCCESS) { |
652 | sch->q.qlen++; | 651 | sch->q.qlen++; |
653 | sch->bstats.packets++; | ||
654 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
655 | if (!cl->next_alive) | 652 | if (!cl->next_alive) |
656 | cbq_activate_class(cl); | 653 | cbq_activate_class(cl); |
657 | return 0; | 654 | return 0; |
@@ -667,15 +664,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) | |||
667 | #endif | 664 | #endif |
668 | 665 | ||
669 | /* | 666 | /* |
670 | It is mission critical procedure. | 667 | * It is mission critical procedure. |
671 | 668 | * | |
672 | We "regenerate" toplevel cutoff, if transmitting class | 669 | * We "regenerate" toplevel cutoff, if transmitting class |
673 | has backlog and it is not regulated. It is not part of | 670 | * has backlog and it is not regulated. It is not part of |
674 | original CBQ description, but looks more reasonable. | 671 | * original CBQ description, but looks more reasonable. |
675 | Probably, it is wrong. This question needs further investigation. | 672 | * Probably, it is wrong. This question needs further investigation. |
676 | */ | 673 | */ |
677 | 674 | ||
678 | static __inline__ void | 675 | static inline void |
679 | cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, | 676 | cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, |
680 | struct cbq_class *borrowed) | 677 | struct cbq_class *borrowed) |
681 | { | 678 | { |
@@ -686,7 +683,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, | |||
686 | q->toplevel = borrowed->level; | 683 | q->toplevel = borrowed->level; |
687 | return; | 684 | return; |
688 | } | 685 | } |
689 | } while ((borrowed=borrowed->borrow) != NULL); | 686 | } while ((borrowed = borrowed->borrow) != NULL); |
690 | } | 687 | } |
691 | #if 0 | 688 | #if 0 |
692 | /* It is not necessary now. Uncommenting it | 689 | /* It is not necessary now. Uncommenting it |
@@ -714,10 +711,10 @@ cbq_update(struct cbq_sched_data *q) | |||
714 | cl->bstats.bytes += len; | 711 | cl->bstats.bytes += len; |
715 | 712 | ||
716 | /* | 713 | /* |
717 | (now - last) is total time between packet right edges. | 714 | * (now - last) is total time between packet right edges. |
718 | (last_pktlen/rate) is "virtual" busy time, so that | 715 | * (last_pktlen/rate) is "virtual" busy time, so that |
719 | 716 | * | |
720 | idle = (now - last) - last_pktlen/rate | 717 | * idle = (now - last) - last_pktlen/rate |
721 | */ | 718 | */ |
722 | 719 | ||
723 | idle = q->now - cl->last; | 720 | idle = q->now - cl->last; |
@@ -727,9 +724,9 @@ cbq_update(struct cbq_sched_data *q) | |||
727 | idle -= L2T(cl, len); | 724 | idle -= L2T(cl, len); |
728 | 725 | ||
729 | /* true_avgidle := (1-W)*true_avgidle + W*idle, | 726 | /* true_avgidle := (1-W)*true_avgidle + W*idle, |
730 | where W=2^{-ewma_log}. But cl->avgidle is scaled: | 727 | * where W=2^{-ewma_log}. But cl->avgidle is scaled: |
731 | cl->avgidle == true_avgidle/W, | 728 | * cl->avgidle == true_avgidle/W, |
732 | hence: | 729 | * hence: |
733 | */ | 730 | */ |
734 | avgidle += idle - (avgidle>>cl->ewma_log); | 731 | avgidle += idle - (avgidle>>cl->ewma_log); |
735 | } | 732 | } |
@@ -743,22 +740,22 @@ cbq_update(struct cbq_sched_data *q) | |||
743 | cl->avgidle = avgidle; | 740 | cl->avgidle = avgidle; |
744 | 741 | ||
745 | /* Calculate expected time, when this class | 742 | /* Calculate expected time, when this class |
746 | will be allowed to send. | 743 | * will be allowed to send. |
747 | It will occur, when: | 744 | * It will occur, when: |
748 | (1-W)*true_avgidle + W*delay = 0, i.e. | 745 | * (1-W)*true_avgidle + W*delay = 0, i.e. |
749 | idle = (1/W - 1)*(-true_avgidle) | 746 | * idle = (1/W - 1)*(-true_avgidle) |
750 | or | 747 | * or |
751 | idle = (1 - W)*(-cl->avgidle); | 748 | * idle = (1 - W)*(-cl->avgidle); |
752 | */ | 749 | */ |
753 | idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); | 750 | idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); |
754 | 751 | ||
755 | /* | 752 | /* |
756 | That is not all. | 753 | * That is not all. |
757 | To maintain the rate allocated to the class, | 754 | * To maintain the rate allocated to the class, |
758 | we add to undertime virtual clock, | 755 | * we add to undertime virtual clock, |
759 | necessary to complete transmitted packet. | 756 | * necessary to complete transmitted packet. |
760 | (len/phys_bandwidth has been already passed | 757 | * (len/phys_bandwidth has been already passed |
761 | to the moment of cbq_update) | 758 | * to the moment of cbq_update) |
762 | */ | 759 | */ |
763 | 760 | ||
764 | idle -= L2T(&q->link, len); | 761 | idle -= L2T(&q->link, len); |
@@ -780,7 +777,7 @@ cbq_update(struct cbq_sched_data *q) | |||
780 | cbq_update_toplevel(q, this, q->tx_borrowed); | 777 | cbq_update_toplevel(q, this, q->tx_borrowed); |
781 | } | 778 | } |
782 | 779 | ||
783 | static __inline__ struct cbq_class * | 780 | static inline struct cbq_class * |
784 | cbq_under_limit(struct cbq_class *cl) | 781 | cbq_under_limit(struct cbq_class *cl) |
785 | { | 782 | { |
786 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | 783 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
@@ -796,16 +793,17 @@ cbq_under_limit(struct cbq_class *cl) | |||
796 | 793 | ||
797 | do { | 794 | do { |
798 | /* It is very suspicious place. Now overlimit | 795 | /* It is very suspicious place. Now overlimit |
799 | action is generated for not bounded classes | 796 | * action is generated for not bounded classes |
800 | only if link is completely congested. | 797 | * only if link is completely congested. |
801 | Though it is in agree with ancestor-only paradigm, | 798 | * Though it is in agree with ancestor-only paradigm, |
802 | it looks very stupid. Particularly, | 799 | * it looks very stupid. Particularly, |
803 | it means that this chunk of code will either | 800 | * it means that this chunk of code will either |
804 | never be called or result in strong amplification | 801 | * never be called or result in strong amplification |
805 | of burstiness. Dangerous, silly, and, however, | 802 | * of burstiness. Dangerous, silly, and, however, |
806 | no another solution exists. | 803 | * no another solution exists. |
807 | */ | 804 | */ |
808 | if ((cl = cl->borrow) == NULL) { | 805 | cl = cl->borrow; |
806 | if (!cl) { | ||
809 | this_cl->qstats.overlimits++; | 807 | this_cl->qstats.overlimits++; |
810 | this_cl->overlimit(this_cl); | 808 | this_cl->overlimit(this_cl); |
811 | return NULL; | 809 | return NULL; |
@@ -818,7 +816,7 @@ cbq_under_limit(struct cbq_class *cl) | |||
818 | return cl; | 816 | return cl; |
819 | } | 817 | } |
820 | 818 | ||
821 | static __inline__ struct sk_buff * | 819 | static inline struct sk_buff * |
822 | cbq_dequeue_prio(struct Qdisc *sch, int prio) | 820 | cbq_dequeue_prio(struct Qdisc *sch, int prio) |
823 | { | 821 | { |
824 | struct cbq_sched_data *q = qdisc_priv(sch); | 822 | struct cbq_sched_data *q = qdisc_priv(sch); |
@@ -842,7 +840,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) | |||
842 | 840 | ||
843 | if (cl->deficit <= 0) { | 841 | if (cl->deficit <= 0) { |
844 | /* Class exhausted its allotment per | 842 | /* Class exhausted its allotment per |
845 | this round. Switch to the next one. | 843 | * this round. Switch to the next one. |
846 | */ | 844 | */ |
847 | deficit = 1; | 845 | deficit = 1; |
848 | cl->deficit += cl->quantum; | 846 | cl->deficit += cl->quantum; |
@@ -852,8 +850,8 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) | |||
852 | skb = cl->q->dequeue(cl->q); | 850 | skb = cl->q->dequeue(cl->q); |
853 | 851 | ||
854 | /* Class did not give us any skb :-( | 852 | /* Class did not give us any skb :-( |
855 | It could occur even if cl->q->q.qlen != 0 | 853 | * It could occur even if cl->q->q.qlen != 0 |
856 | f.e. if cl->q == "tbf" | 854 | * f.e. if cl->q == "tbf" |
857 | */ | 855 | */ |
858 | if (skb == NULL) | 856 | if (skb == NULL) |
859 | goto skip_class; | 857 | goto skip_class; |
@@ -882,7 +880,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) | |||
882 | skip_class: | 880 | skip_class: |
883 | if (cl->q->q.qlen == 0 || prio != cl->cpriority) { | 881 | if (cl->q->q.qlen == 0 || prio != cl->cpriority) { |
884 | /* Class is empty or penalized. | 882 | /* Class is empty or penalized. |
885 | Unlink it from active chain. | 883 | * Unlink it from active chain. |
886 | */ | 884 | */ |
887 | cl_prev->next_alive = cl->next_alive; | 885 | cl_prev->next_alive = cl->next_alive; |
888 | cl->next_alive = NULL; | 886 | cl->next_alive = NULL; |
@@ -921,14 +919,14 @@ next_class: | |||
921 | return NULL; | 919 | return NULL; |
922 | } | 920 | } |
923 | 921 | ||
924 | static __inline__ struct sk_buff * | 922 | static inline struct sk_buff * |
925 | cbq_dequeue_1(struct Qdisc *sch) | 923 | cbq_dequeue_1(struct Qdisc *sch) |
926 | { | 924 | { |
927 | struct cbq_sched_data *q = qdisc_priv(sch); | 925 | struct cbq_sched_data *q = qdisc_priv(sch); |
928 | struct sk_buff *skb; | 926 | struct sk_buff *skb; |
929 | unsigned activemask; | 927 | unsigned int activemask; |
930 | 928 | ||
931 | activemask = q->activemask&0xFF; | 929 | activemask = q->activemask & 0xFF; |
932 | while (activemask) { | 930 | while (activemask) { |
933 | int prio = ffz(~activemask); | 931 | int prio = ffz(~activemask); |
934 | activemask &= ~(1<<prio); | 932 | activemask &= ~(1<<prio); |
@@ -953,11 +951,11 @@ cbq_dequeue(struct Qdisc *sch) | |||
953 | if (q->tx_class) { | 951 | if (q->tx_class) { |
954 | psched_tdiff_t incr2; | 952 | psched_tdiff_t incr2; |
955 | /* Time integrator. We calculate EOS time | 953 | /* Time integrator. We calculate EOS time |
956 | by adding expected packet transmission time. | 954 | * by adding expected packet transmission time. |
957 | If real time is greater, we warp artificial clock, | 955 | * If real time is greater, we warp artificial clock, |
958 | so that: | 956 | * so that: |
959 | 957 | * | |
960 | cbq_time = max(real_time, work); | 958 | * cbq_time = max(real_time, work); |
961 | */ | 959 | */ |
962 | incr2 = L2T(&q->link, q->tx_len); | 960 | incr2 = L2T(&q->link, q->tx_len); |
963 | q->now += incr2; | 961 | q->now += incr2; |
@@ -973,28 +971,29 @@ cbq_dequeue(struct Qdisc *sch) | |||
973 | 971 | ||
974 | skb = cbq_dequeue_1(sch); | 972 | skb = cbq_dequeue_1(sch); |
975 | if (skb) { | 973 | if (skb) { |
974 | qdisc_bstats_update(sch, skb); | ||
976 | sch->q.qlen--; | 975 | sch->q.qlen--; |
977 | sch->flags &= ~TCQ_F_THROTTLED; | 976 | qdisc_unthrottled(sch); |
978 | return skb; | 977 | return skb; |
979 | } | 978 | } |
980 | 979 | ||
981 | /* All the classes are overlimit. | 980 | /* All the classes are overlimit. |
982 | 981 | * | |
983 | It is possible, if: | 982 | * It is possible, if: |
984 | 983 | * | |
985 | 1. Scheduler is empty. | 984 | * 1. Scheduler is empty. |
986 | 2. Toplevel cutoff inhibited borrowing. | 985 | * 2. Toplevel cutoff inhibited borrowing. |
987 | 3. Root class is overlimit. | 986 | * 3. Root class is overlimit. |
988 | 987 | * | |
989 | Reset 2d and 3d conditions and retry. | 988 | * Reset 2d and 3d conditions and retry. |
990 | 989 | * | |
991 | Note, that NS and cbq-2.0 are buggy, peeking | 990 | * Note, that NS and cbq-2.0 are buggy, peeking |
992 | an arbitrary class is appropriate for ancestor-only | 991 | * an arbitrary class is appropriate for ancestor-only |
993 | sharing, but not for toplevel algorithm. | 992 | * sharing, but not for toplevel algorithm. |
994 | 993 | * | |
995 | Our version is better, but slower, because it requires | 994 | * Our version is better, but slower, because it requires |
996 | two passes, but it is unavoidable with top-level sharing. | 995 | * two passes, but it is unavoidable with top-level sharing. |
997 | */ | 996 | */ |
998 | 997 | ||
999 | if (q->toplevel == TC_CBQ_MAXLEVEL && | 998 | if (q->toplevel == TC_CBQ_MAXLEVEL && |
1000 | q->link.undertime == PSCHED_PASTPERFECT) | 999 | q->link.undertime == PSCHED_PASTPERFECT) |
@@ -1005,7 +1004,8 @@ cbq_dequeue(struct Qdisc *sch) | |||
1005 | } | 1004 | } |
1006 | 1005 | ||
1007 | /* No packets in scheduler or nobody wants to give them to us :-( | 1006 | /* No packets in scheduler or nobody wants to give them to us :-( |
1008 | Sigh... start watchdog timer in the last case. */ | 1007 | * Sigh... start watchdog timer in the last case. |
1008 | */ | ||
1009 | 1009 | ||
1010 | if (sch->q.qlen) { | 1010 | if (sch->q.qlen) { |
1011 | sch->qstats.overlimits++; | 1011 | sch->qstats.overlimits++; |
@@ -1027,13 +1027,14 @@ static void cbq_adjust_levels(struct cbq_class *this) | |||
1027 | int level = 0; | 1027 | int level = 0; |
1028 | struct cbq_class *cl; | 1028 | struct cbq_class *cl; |
1029 | 1029 | ||
1030 | if ((cl = this->children) != NULL) { | 1030 | cl = this->children; |
1031 | if (cl) { | ||
1031 | do { | 1032 | do { |
1032 | if (cl->level > level) | 1033 | if (cl->level > level) |
1033 | level = cl->level; | 1034 | level = cl->level; |
1034 | } while ((cl = cl->sibling) != this->children); | 1035 | } while ((cl = cl->sibling) != this->children); |
1035 | } | 1036 | } |
1036 | this->level = level+1; | 1037 | this->level = level + 1; |
1037 | } while ((this = this->tparent) != NULL); | 1038 | } while ((this = this->tparent) != NULL); |
1038 | } | 1039 | } |
1039 | 1040 | ||
@@ -1049,14 +1050,15 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) | |||
1049 | for (h = 0; h < q->clhash.hashsize; h++) { | 1050 | for (h = 0; h < q->clhash.hashsize; h++) { |
1050 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { | 1051 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { |
1051 | /* BUGGGG... Beware! This expression suffer of | 1052 | /* BUGGGG... Beware! This expression suffer of |
1052 | arithmetic overflows! | 1053 | * arithmetic overflows! |
1053 | */ | 1054 | */ |
1054 | if (cl->priority == prio) { | 1055 | if (cl->priority == prio) { |
1055 | cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ | 1056 | cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ |
1056 | q->quanta[prio]; | 1057 | q->quanta[prio]; |
1057 | } | 1058 | } |
1058 | if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) { | 1059 | if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) { |
1059 | printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum); | 1060 | pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n", |
1061 | cl->common.classid, cl->quantum); | ||
1060 | cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; | 1062 | cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; |
1061 | } | 1063 | } |
1062 | } | 1064 | } |
@@ -1067,18 +1069,18 @@ static void cbq_sync_defmap(struct cbq_class *cl) | |||
1067 | { | 1069 | { |
1068 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | 1070 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
1069 | struct cbq_class *split = cl->split; | 1071 | struct cbq_class *split = cl->split; |
1070 | unsigned h; | 1072 | unsigned int h; |
1071 | int i; | 1073 | int i; |
1072 | 1074 | ||
1073 | if (split == NULL) | 1075 | if (split == NULL) |
1074 | return; | 1076 | return; |
1075 | 1077 | ||
1076 | for (i=0; i<=TC_PRIO_MAX; i++) { | 1078 | for (i = 0; i <= TC_PRIO_MAX; i++) { |
1077 | if (split->defaults[i] == cl && !(cl->defmap&(1<<i))) | 1079 | if (split->defaults[i] == cl && !(cl->defmap & (1<<i))) |
1078 | split->defaults[i] = NULL; | 1080 | split->defaults[i] = NULL; |
1079 | } | 1081 | } |
1080 | 1082 | ||
1081 | for (i=0; i<=TC_PRIO_MAX; i++) { | 1083 | for (i = 0; i <= TC_PRIO_MAX; i++) { |
1082 | int level = split->level; | 1084 | int level = split->level; |
1083 | 1085 | ||
1084 | if (split->defaults[i]) | 1086 | if (split->defaults[i]) |
@@ -1091,7 +1093,7 @@ static void cbq_sync_defmap(struct cbq_class *cl) | |||
1091 | hlist_for_each_entry(c, n, &q->clhash.hash[h], | 1093 | hlist_for_each_entry(c, n, &q->clhash.hash[h], |
1092 | common.hnode) { | 1094 | common.hnode) { |
1093 | if (c->split == split && c->level < level && | 1095 | if (c->split == split && c->level < level && |
1094 | c->defmap&(1<<i)) { | 1096 | c->defmap & (1<<i)) { |
1095 | split->defaults[i] = c; | 1097 | split->defaults[i] = c; |
1096 | level = c->level; | 1098 | level = c->level; |
1097 | } | 1099 | } |
@@ -1105,7 +1107,8 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma | |||
1105 | struct cbq_class *split = NULL; | 1107 | struct cbq_class *split = NULL; |
1106 | 1108 | ||
1107 | if (splitid == 0) { | 1109 | if (splitid == 0) { |
1108 | if ((split = cl->split) == NULL) | 1110 | split = cl->split; |
1111 | if (!split) | ||
1109 | return; | 1112 | return; |
1110 | splitid = split->common.classid; | 1113 | splitid = split->common.classid; |
1111 | } | 1114 | } |
@@ -1123,9 +1126,9 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma | |||
1123 | cl->defmap = 0; | 1126 | cl->defmap = 0; |
1124 | cbq_sync_defmap(cl); | 1127 | cbq_sync_defmap(cl); |
1125 | cl->split = split; | 1128 | cl->split = split; |
1126 | cl->defmap = def&mask; | 1129 | cl->defmap = def & mask; |
1127 | } else | 1130 | } else |
1128 | cl->defmap = (cl->defmap&~mask)|(def&mask); | 1131 | cl->defmap = (cl->defmap & ~mask) | (def & mask); |
1129 | 1132 | ||
1130 | cbq_sync_defmap(cl); | 1133 | cbq_sync_defmap(cl); |
1131 | } | 1134 | } |
@@ -1138,7 +1141,7 @@ static void cbq_unlink_class(struct cbq_class *this) | |||
1138 | qdisc_class_hash_remove(&q->clhash, &this->common); | 1141 | qdisc_class_hash_remove(&q->clhash, &this->common); |
1139 | 1142 | ||
1140 | if (this->tparent) { | 1143 | if (this->tparent) { |
1141 | clp=&this->sibling; | 1144 | clp = &this->sibling; |
1142 | cl = *clp; | 1145 | cl = *clp; |
1143 | do { | 1146 | do { |
1144 | if (cl == this) { | 1147 | if (cl == this) { |
@@ -1177,7 +1180,7 @@ static void cbq_link_class(struct cbq_class *this) | |||
1177 | } | 1180 | } |
1178 | } | 1181 | } |
1179 | 1182 | ||
1180 | static unsigned int cbq_drop(struct Qdisc* sch) | 1183 | static unsigned int cbq_drop(struct Qdisc *sch) |
1181 | { | 1184 | { |
1182 | struct cbq_sched_data *q = qdisc_priv(sch); | 1185 | struct cbq_sched_data *q = qdisc_priv(sch); |
1183 | struct cbq_class *cl, *cl_head; | 1186 | struct cbq_class *cl, *cl_head; |
@@ -1185,7 +1188,8 @@ static unsigned int cbq_drop(struct Qdisc* sch) | |||
1185 | unsigned int len; | 1188 | unsigned int len; |
1186 | 1189 | ||
1187 | for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) { | 1190 | for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) { |
1188 | if ((cl_head = q->active[prio]) == NULL) | 1191 | cl_head = q->active[prio]; |
1192 | if (!cl_head) | ||
1189 | continue; | 1193 | continue; |
1190 | 1194 | ||
1191 | cl = cl_head; | 1195 | cl = cl_head; |
@@ -1202,13 +1206,13 @@ static unsigned int cbq_drop(struct Qdisc* sch) | |||
1202 | } | 1206 | } |
1203 | 1207 | ||
1204 | static void | 1208 | static void |
1205 | cbq_reset(struct Qdisc* sch) | 1209 | cbq_reset(struct Qdisc *sch) |
1206 | { | 1210 | { |
1207 | struct cbq_sched_data *q = qdisc_priv(sch); | 1211 | struct cbq_sched_data *q = qdisc_priv(sch); |
1208 | struct cbq_class *cl; | 1212 | struct cbq_class *cl; |
1209 | struct hlist_node *n; | 1213 | struct hlist_node *n; |
1210 | int prio; | 1214 | int prio; |
1211 | unsigned h; | 1215 | unsigned int h; |
1212 | 1216 | ||
1213 | q->activemask = 0; | 1217 | q->activemask = 0; |
1214 | q->pmask = 0; | 1218 | q->pmask = 0; |
@@ -1240,21 +1244,21 @@ cbq_reset(struct Qdisc* sch) | |||
1240 | 1244 | ||
1241 | static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) | 1245 | static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) |
1242 | { | 1246 | { |
1243 | if (lss->change&TCF_CBQ_LSS_FLAGS) { | 1247 | if (lss->change & TCF_CBQ_LSS_FLAGS) { |
1244 | cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; | 1248 | cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; |
1245 | cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; | 1249 | cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; |
1246 | } | 1250 | } |
1247 | if (lss->change&TCF_CBQ_LSS_EWMA) | 1251 | if (lss->change & TCF_CBQ_LSS_EWMA) |
1248 | cl->ewma_log = lss->ewma_log; | 1252 | cl->ewma_log = lss->ewma_log; |
1249 | if (lss->change&TCF_CBQ_LSS_AVPKT) | 1253 | if (lss->change & TCF_CBQ_LSS_AVPKT) |
1250 | cl->avpkt = lss->avpkt; | 1254 | cl->avpkt = lss->avpkt; |
1251 | if (lss->change&TCF_CBQ_LSS_MINIDLE) | 1255 | if (lss->change & TCF_CBQ_LSS_MINIDLE) |
1252 | cl->minidle = -(long)lss->minidle; | 1256 | cl->minidle = -(long)lss->minidle; |
1253 | if (lss->change&TCF_CBQ_LSS_MAXIDLE) { | 1257 | if (lss->change & TCF_CBQ_LSS_MAXIDLE) { |
1254 | cl->maxidle = lss->maxidle; | 1258 | cl->maxidle = lss->maxidle; |
1255 | cl->avgidle = lss->maxidle; | 1259 | cl->avgidle = lss->maxidle; |
1256 | } | 1260 | } |
1257 | if (lss->change&TCF_CBQ_LSS_OFFTIME) | 1261 | if (lss->change & TCF_CBQ_LSS_OFFTIME) |
1258 | cl->offtime = lss->offtime; | 1262 | cl->offtime = lss->offtime; |
1259 | return 0; | 1263 | return 0; |
1260 | } | 1264 | } |
@@ -1282,10 +1286,10 @@ static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr) | |||
1282 | if (wrr->weight) | 1286 | if (wrr->weight) |
1283 | cl->weight = wrr->weight; | 1287 | cl->weight = wrr->weight; |
1284 | if (wrr->priority) { | 1288 | if (wrr->priority) { |
1285 | cl->priority = wrr->priority-1; | 1289 | cl->priority = wrr->priority - 1; |
1286 | cl->cpriority = cl->priority; | 1290 | cl->cpriority = cl->priority; |
1287 | if (cl->priority >= cl->priority2) | 1291 | if (cl->priority >= cl->priority2) |
1288 | cl->priority2 = TC_CBQ_MAXPRIO-1; | 1292 | cl->priority2 = TC_CBQ_MAXPRIO - 1; |
1289 | } | 1293 | } |
1290 | 1294 | ||
1291 | cbq_addprio(q, cl); | 1295 | cbq_addprio(q, cl); |
@@ -1302,10 +1306,10 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl) | |||
1302 | cl->overlimit = cbq_ovl_delay; | 1306 | cl->overlimit = cbq_ovl_delay; |
1303 | break; | 1307 | break; |
1304 | case TC_CBQ_OVL_LOWPRIO: | 1308 | case TC_CBQ_OVL_LOWPRIO: |
1305 | if (ovl->priority2-1 >= TC_CBQ_MAXPRIO || | 1309 | if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO || |
1306 | ovl->priority2-1 <= cl->priority) | 1310 | ovl->priority2 - 1 <= cl->priority) |
1307 | return -EINVAL; | 1311 | return -EINVAL; |
1308 | cl->priority2 = ovl->priority2-1; | 1312 | cl->priority2 = ovl->priority2 - 1; |
1309 | cl->overlimit = cbq_ovl_lowprio; | 1313 | cl->overlimit = cbq_ovl_lowprio; |
1310 | break; | 1314 | break; |
1311 | case TC_CBQ_OVL_DROP: | 1315 | case TC_CBQ_OVL_DROP: |
@@ -1379,14 +1383,14 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) | |||
1379 | q->link.sibling = &q->link; | 1383 | q->link.sibling = &q->link; |
1380 | q->link.common.classid = sch->handle; | 1384 | q->link.common.classid = sch->handle; |
1381 | q->link.qdisc = sch; | 1385 | q->link.qdisc = sch; |
1382 | if (!(q->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | 1386 | q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, |
1383 | &pfifo_qdisc_ops, | 1387 | sch->handle); |
1384 | sch->handle))) | 1388 | if (!q->link.q) |
1385 | q->link.q = &noop_qdisc; | 1389 | q->link.q = &noop_qdisc; |
1386 | 1390 | ||
1387 | q->link.priority = TC_CBQ_MAXPRIO-1; | 1391 | q->link.priority = TC_CBQ_MAXPRIO - 1; |
1388 | q->link.priority2 = TC_CBQ_MAXPRIO-1; | 1392 | q->link.priority2 = TC_CBQ_MAXPRIO - 1; |
1389 | q->link.cpriority = TC_CBQ_MAXPRIO-1; | 1393 | q->link.cpriority = TC_CBQ_MAXPRIO - 1; |
1390 | q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; | 1394 | q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; |
1391 | q->link.overlimit = cbq_ovl_classic; | 1395 | q->link.overlimit = cbq_ovl_classic; |
1392 | q->link.allot = psched_mtu(qdisc_dev(sch)); | 1396 | q->link.allot = psched_mtu(qdisc_dev(sch)); |
@@ -1417,7 +1421,7 @@ put_rtab: | |||
1417 | return err; | 1421 | return err; |
1418 | } | 1422 | } |
1419 | 1423 | ||
1420 | static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) | 1424 | static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) |
1421 | { | 1425 | { |
1422 | unsigned char *b = skb_tail_pointer(skb); | 1426 | unsigned char *b = skb_tail_pointer(skb); |
1423 | 1427 | ||
@@ -1429,7 +1433,7 @@ nla_put_failure: | |||
1429 | return -1; | 1433 | return -1; |
1430 | } | 1434 | } |
1431 | 1435 | ||
1432 | static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) | 1436 | static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) |
1433 | { | 1437 | { |
1434 | unsigned char *b = skb_tail_pointer(skb); | 1438 | unsigned char *b = skb_tail_pointer(skb); |
1435 | struct tc_cbq_lssopt opt; | 1439 | struct tc_cbq_lssopt opt; |
@@ -1454,15 +1458,15 @@ nla_put_failure: | |||
1454 | return -1; | 1458 | return -1; |
1455 | } | 1459 | } |
1456 | 1460 | ||
1457 | static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) | 1461 | static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) |
1458 | { | 1462 | { |
1459 | unsigned char *b = skb_tail_pointer(skb); | 1463 | unsigned char *b = skb_tail_pointer(skb); |
1460 | struct tc_cbq_wrropt opt; | 1464 | struct tc_cbq_wrropt opt; |
1461 | 1465 | ||
1462 | opt.flags = 0; | 1466 | opt.flags = 0; |
1463 | opt.allot = cl->allot; | 1467 | opt.allot = cl->allot; |
1464 | opt.priority = cl->priority+1; | 1468 | opt.priority = cl->priority + 1; |
1465 | opt.cpriority = cl->cpriority+1; | 1469 | opt.cpriority = cl->cpriority + 1; |
1466 | opt.weight = cl->weight; | 1470 | opt.weight = cl->weight; |
1467 | NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); | 1471 | NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); |
1468 | return skb->len; | 1472 | return skb->len; |
@@ -1472,13 +1476,13 @@ nla_put_failure: | |||
1472 | return -1; | 1476 | return -1; |
1473 | } | 1477 | } |
1474 | 1478 | ||
1475 | static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) | 1479 | static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) |
1476 | { | 1480 | { |
1477 | unsigned char *b = skb_tail_pointer(skb); | 1481 | unsigned char *b = skb_tail_pointer(skb); |
1478 | struct tc_cbq_ovl opt; | 1482 | struct tc_cbq_ovl opt; |
1479 | 1483 | ||
1480 | opt.strategy = cl->ovl_strategy; | 1484 | opt.strategy = cl->ovl_strategy; |
1481 | opt.priority2 = cl->priority2+1; | 1485 | opt.priority2 = cl->priority2 + 1; |
1482 | opt.pad = 0; | 1486 | opt.pad = 0; |
1483 | opt.penalty = cl->penalty; | 1487 | opt.penalty = cl->penalty; |
1484 | NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); | 1488 | NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); |
@@ -1489,7 +1493,7 @@ nla_put_failure: | |||
1489 | return -1; | 1493 | return -1; |
1490 | } | 1494 | } |
1491 | 1495 | ||
1492 | static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) | 1496 | static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) |
1493 | { | 1497 | { |
1494 | unsigned char *b = skb_tail_pointer(skb); | 1498 | unsigned char *b = skb_tail_pointer(skb); |
1495 | struct tc_cbq_fopt opt; | 1499 | struct tc_cbq_fopt opt; |
@@ -1508,7 +1512,7 @@ nla_put_failure: | |||
1508 | } | 1512 | } |
1509 | 1513 | ||
1510 | #ifdef CONFIG_NET_CLS_ACT | 1514 | #ifdef CONFIG_NET_CLS_ACT |
1511 | static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) | 1515 | static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) |
1512 | { | 1516 | { |
1513 | unsigned char *b = skb_tail_pointer(skb); | 1517 | unsigned char *b = skb_tail_pointer(skb); |
1514 | struct tc_cbq_police opt; | 1518 | struct tc_cbq_police opt; |
@@ -1572,7 +1576,7 @@ static int | |||
1572 | cbq_dump_class(struct Qdisc *sch, unsigned long arg, | 1576 | cbq_dump_class(struct Qdisc *sch, unsigned long arg, |
1573 | struct sk_buff *skb, struct tcmsg *tcm) | 1577 | struct sk_buff *skb, struct tcmsg *tcm) |
1574 | { | 1578 | { |
1575 | struct cbq_class *cl = (struct cbq_class*)arg; | 1579 | struct cbq_class *cl = (struct cbq_class *)arg; |
1576 | struct nlattr *nest; | 1580 | struct nlattr *nest; |
1577 | 1581 | ||
1578 | if (cl->tparent) | 1582 | if (cl->tparent) |
@@ -1600,7 +1604,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
1600 | struct gnet_dump *d) | 1604 | struct gnet_dump *d) |
1601 | { | 1605 | { |
1602 | struct cbq_sched_data *q = qdisc_priv(sch); | 1606 | struct cbq_sched_data *q = qdisc_priv(sch); |
1603 | struct cbq_class *cl = (struct cbq_class*)arg; | 1607 | struct cbq_class *cl = (struct cbq_class *)arg; |
1604 | 1608 | ||
1605 | cl->qstats.qlen = cl->q->q.qlen; | 1609 | cl->qstats.qlen = cl->q->q.qlen; |
1606 | cl->xstats.avgidle = cl->avgidle; | 1610 | cl->xstats.avgidle = cl->avgidle; |
@@ -1620,10 +1624,10 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
1620 | static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | 1624 | static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, |
1621 | struct Qdisc **old) | 1625 | struct Qdisc **old) |
1622 | { | 1626 | { |
1623 | struct cbq_class *cl = (struct cbq_class*)arg; | 1627 | struct cbq_class *cl = (struct cbq_class *)arg; |
1624 | 1628 | ||
1625 | if (new == NULL) { | 1629 | if (new == NULL) { |
1626 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | 1630 | new = qdisc_create_dflt(sch->dev_queue, |
1627 | &pfifo_qdisc_ops, cl->common.classid); | 1631 | &pfifo_qdisc_ops, cl->common.classid); |
1628 | if (new == NULL) | 1632 | if (new == NULL) |
1629 | return -ENOBUFS; | 1633 | return -ENOBUFS; |
@@ -1643,10 +1647,9 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1643 | return 0; | 1647 | return 0; |
1644 | } | 1648 | } |
1645 | 1649 | ||
1646 | static struct Qdisc * | 1650 | static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg) |
1647 | cbq_leaf(struct Qdisc *sch, unsigned long arg) | ||
1648 | { | 1651 | { |
1649 | struct cbq_class *cl = (struct cbq_class*)arg; | 1652 | struct cbq_class *cl = (struct cbq_class *)arg; |
1650 | 1653 | ||
1651 | return cl->q; | 1654 | return cl->q; |
1652 | } | 1655 | } |
@@ -1685,13 +1688,12 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) | |||
1685 | kfree(cl); | 1688 | kfree(cl); |
1686 | } | 1689 | } |
1687 | 1690 | ||
1688 | static void | 1691 | static void cbq_destroy(struct Qdisc *sch) |
1689 | cbq_destroy(struct Qdisc* sch) | ||
1690 | { | 1692 | { |
1691 | struct cbq_sched_data *q = qdisc_priv(sch); | 1693 | struct cbq_sched_data *q = qdisc_priv(sch); |
1692 | struct hlist_node *n, *next; | 1694 | struct hlist_node *n, *next; |
1693 | struct cbq_class *cl; | 1695 | struct cbq_class *cl; |
1694 | unsigned h; | 1696 | unsigned int h; |
1695 | 1697 | ||
1696 | #ifdef CONFIG_NET_CLS_ACT | 1698 | #ifdef CONFIG_NET_CLS_ACT |
1697 | q->rx_class = NULL; | 1699 | q->rx_class = NULL; |
@@ -1715,7 +1717,7 @@ cbq_destroy(struct Qdisc* sch) | |||
1715 | 1717 | ||
1716 | static void cbq_put(struct Qdisc *sch, unsigned long arg) | 1718 | static void cbq_put(struct Qdisc *sch, unsigned long arg) |
1717 | { | 1719 | { |
1718 | struct cbq_class *cl = (struct cbq_class*)arg; | 1720 | struct cbq_class *cl = (struct cbq_class *)arg; |
1719 | 1721 | ||
1720 | if (--cl->refcnt == 0) { | 1722 | if (--cl->refcnt == 0) { |
1721 | #ifdef CONFIG_NET_CLS_ACT | 1723 | #ifdef CONFIG_NET_CLS_ACT |
@@ -1738,7 +1740,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1738 | { | 1740 | { |
1739 | int err; | 1741 | int err; |
1740 | struct cbq_sched_data *q = qdisc_priv(sch); | 1742 | struct cbq_sched_data *q = qdisc_priv(sch); |
1741 | struct cbq_class *cl = (struct cbq_class*)*arg; | 1743 | struct cbq_class *cl = (struct cbq_class *)*arg; |
1742 | struct nlattr *opt = tca[TCA_OPTIONS]; | 1744 | struct nlattr *opt = tca[TCA_OPTIONS]; |
1743 | struct nlattr *tb[TCA_CBQ_MAX + 1]; | 1745 | struct nlattr *tb[TCA_CBQ_MAX + 1]; |
1744 | struct cbq_class *parent; | 1746 | struct cbq_class *parent; |
@@ -1830,13 +1832,14 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1830 | 1832 | ||
1831 | if (classid) { | 1833 | if (classid) { |
1832 | err = -EINVAL; | 1834 | err = -EINVAL; |
1833 | if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid)) | 1835 | if (TC_H_MAJ(classid ^ sch->handle) || |
1836 | cbq_class_lookup(q, classid)) | ||
1834 | goto failure; | 1837 | goto failure; |
1835 | } else { | 1838 | } else { |
1836 | int i; | 1839 | int i; |
1837 | classid = TC_H_MAKE(sch->handle,0x8000); | 1840 | classid = TC_H_MAKE(sch->handle, 0x8000); |
1838 | 1841 | ||
1839 | for (i=0; i<0x8000; i++) { | 1842 | for (i = 0; i < 0x8000; i++) { |
1840 | if (++q->hgenerator >= 0x8000) | 1843 | if (++q->hgenerator >= 0x8000) |
1841 | q->hgenerator = 1; | 1844 | q->hgenerator = 1; |
1842 | if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) | 1845 | if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) |
@@ -1874,8 +1877,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1874 | cl->R_tab = rtab; | 1877 | cl->R_tab = rtab; |
1875 | rtab = NULL; | 1878 | rtab = NULL; |
1876 | cl->refcnt = 1; | 1879 | cl->refcnt = 1; |
1877 | if (!(cl->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | 1880 | cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); |
1878 | &pfifo_qdisc_ops, classid))) | 1881 | if (!cl->q) |
1879 | cl->q = &noop_qdisc; | 1882 | cl->q = &noop_qdisc; |
1880 | cl->common.classid = classid; | 1883 | cl->common.classid = classid; |
1881 | cl->tparent = parent; | 1884 | cl->tparent = parent; |
@@ -1893,11 +1896,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1893 | cl->minidle = -0x7FFFFFFF; | 1896 | cl->minidle = -0x7FFFFFFF; |
1894 | cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); | 1897 | cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); |
1895 | cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); | 1898 | cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); |
1896 | if (cl->ewma_log==0) | 1899 | if (cl->ewma_log == 0) |
1897 | cl->ewma_log = q->link.ewma_log; | 1900 | cl->ewma_log = q->link.ewma_log; |
1898 | if (cl->maxidle==0) | 1901 | if (cl->maxidle == 0) |
1899 | cl->maxidle = q->link.maxidle; | 1902 | cl->maxidle = q->link.maxidle; |
1900 | if (cl->avpkt==0) | 1903 | if (cl->avpkt == 0) |
1901 | cl->avpkt = q->link.avpkt; | 1904 | cl->avpkt = q->link.avpkt; |
1902 | cl->overlimit = cbq_ovl_classic; | 1905 | cl->overlimit = cbq_ovl_classic; |
1903 | if (tb[TCA_CBQ_OVL_STRATEGY]) | 1906 | if (tb[TCA_CBQ_OVL_STRATEGY]) |
@@ -1923,7 +1926,7 @@ failure: | |||
1923 | static int cbq_delete(struct Qdisc *sch, unsigned long arg) | 1926 | static int cbq_delete(struct Qdisc *sch, unsigned long arg) |
1924 | { | 1927 | { |
1925 | struct cbq_sched_data *q = qdisc_priv(sch); | 1928 | struct cbq_sched_data *q = qdisc_priv(sch); |
1926 | struct cbq_class *cl = (struct cbq_class*)arg; | 1929 | struct cbq_class *cl = (struct cbq_class *)arg; |
1927 | unsigned int qlen; | 1930 | unsigned int qlen; |
1928 | 1931 | ||
1929 | if (cl->filters || cl->children || cl == &q->link) | 1932 | if (cl->filters || cl->children || cl == &q->link) |
@@ -1981,7 +1984,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent, | |||
1981 | u32 classid) | 1984 | u32 classid) |
1982 | { | 1985 | { |
1983 | struct cbq_sched_data *q = qdisc_priv(sch); | 1986 | struct cbq_sched_data *q = qdisc_priv(sch); |
1984 | struct cbq_class *p = (struct cbq_class*)parent; | 1987 | struct cbq_class *p = (struct cbq_class *)parent; |
1985 | struct cbq_class *cl = cbq_class_lookup(q, classid); | 1988 | struct cbq_class *cl = cbq_class_lookup(q, classid); |
1986 | 1989 | ||
1987 | if (cl) { | 1990 | if (cl) { |
@@ -1995,7 +1998,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent, | |||
1995 | 1998 | ||
1996 | static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg) | 1999 | static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg) |
1997 | { | 2000 | { |
1998 | struct cbq_class *cl = (struct cbq_class*)arg; | 2001 | struct cbq_class *cl = (struct cbq_class *)arg; |
1999 | 2002 | ||
2000 | cl->filters--; | 2003 | cl->filters--; |
2001 | } | 2004 | } |
@@ -2005,7 +2008,7 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
2005 | struct cbq_sched_data *q = qdisc_priv(sch); | 2008 | struct cbq_sched_data *q = qdisc_priv(sch); |
2006 | struct cbq_class *cl; | 2009 | struct cbq_class *cl; |
2007 | struct hlist_node *n; | 2010 | struct hlist_node *n; |
2008 | unsigned h; | 2011 | unsigned int h; |
2009 | 2012 | ||
2010 | if (arg->stop) | 2013 | if (arg->stop) |
2011 | return; | 2014 | return; |