aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_htb.c1001
1 files changed, 526 insertions, 475 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index d8c1a6b0def1..6c6cac65255f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1,4 +1,4 @@
1/* vim: ts=8 sw=8 1/*
2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version 2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
@@ -68,11 +68,11 @@
68 one less than their parent. 68 one less than their parent.
69*/ 69*/
70 70
71#define HTB_HSIZE 16 /* classid hash size */ 71#define HTB_HSIZE 16 /* classid hash size */
72#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */ 72#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
73#define HTB_RATECM 1 /* whether to use rate computer */ 73#define HTB_RATECM 1 /* whether to use rate computer */
74#define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */ 74#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
75#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */ 75#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
76 76
77#if HTB_VER >> 16 != TC_HTB_PROTOVER 77#if HTB_VER >> 16 != TC_HTB_PROTOVER
78#error "Mismatched sch_htb.c and pkt_sch.h" 78#error "Mismatched sch_htb.c and pkt_sch.h"
@@ -80,154 +80,152 @@
80 80
81/* used internaly to keep status of single class */ 81/* used internaly to keep status of single class */
82enum htb_cmode { 82enum htb_cmode {
83 HTB_CANT_SEND, /* class can't send and can't borrow */ 83 HTB_CANT_SEND, /* class can't send and can't borrow */
84 HTB_MAY_BORROW, /* class can't send but may borrow */ 84 HTB_MAY_BORROW, /* class can't send but may borrow */
85 HTB_CAN_SEND /* class can send */ 85 HTB_CAN_SEND /* class can send */
86}; 86};
87 87
88/* interior & leaf nodes; props specific to leaves are marked L: */ 88/* interior & leaf nodes; props specific to leaves are marked L: */
89struct htb_class 89struct htb_class {
90{ 90 /* general class parameters */
91 /* general class parameters */ 91 u32 classid;
92 u32 classid; 92 struct gnet_stats_basic bstats;
93 struct gnet_stats_basic bstats; 93 struct gnet_stats_queue qstats;
94 struct gnet_stats_queue qstats; 94 struct gnet_stats_rate_est rate_est;
95 struct gnet_stats_rate_est rate_est; 95 struct tc_htb_xstats xstats; /* our special stats */
96 struct tc_htb_xstats xstats;/* our special stats */ 96 int refcnt; /* usage count of this class */
97 int refcnt; /* usage count of this class */
98 97
99#ifdef HTB_RATECM 98#ifdef HTB_RATECM
100 /* rate measurement counters */ 99 /* rate measurement counters */
101 unsigned long rate_bytes,sum_bytes; 100 unsigned long rate_bytes, sum_bytes;
102 unsigned long rate_packets,sum_packets; 101 unsigned long rate_packets, sum_packets;
103#endif 102#endif
104 103
105 /* topology */ 104 /* topology */
106 int level; /* our level (see above) */ 105 int level; /* our level (see above) */
107 struct htb_class *parent; /* parent class */ 106 struct htb_class *parent; /* parent class */
108 struct list_head hlist; /* classid hash list item */ 107 struct list_head hlist; /* classid hash list item */
109 struct list_head sibling; /* sibling list item */ 108 struct list_head sibling; /* sibling list item */
110 struct list_head children; /* children list */ 109 struct list_head children; /* children list */
111 110
112 union { 111 union {
113 struct htb_class_leaf { 112 struct htb_class_leaf {
114 struct Qdisc *q; 113 struct Qdisc *q;
115 int prio; 114 int prio;
116 int aprio; 115 int aprio;
117 int quantum; 116 int quantum;
118 int deficit[TC_HTB_MAXDEPTH]; 117 int deficit[TC_HTB_MAXDEPTH];
119 struct list_head drop_list; 118 struct list_head drop_list;
120 } leaf; 119 } leaf;
121 struct htb_class_inner { 120 struct htb_class_inner {
122 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */ 121 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
123 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */ 122 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
124 /* When class changes from state 1->2 and disconnects from 123 /* When class changes from state 1->2 and disconnects from
125 parent's feed then we lost ptr value and start from the 124 parent's feed then we lost ptr value and start from the
126 first child again. Here we store classid of the 125 first child again. Here we store classid of the
127 last valid ptr (used when ptr is NULL). */ 126 last valid ptr (used when ptr is NULL). */
128 u32 last_ptr_id[TC_HTB_NUMPRIO]; 127 u32 last_ptr_id[TC_HTB_NUMPRIO];
129 } inner; 128 } inner;
130 } un; 129 } un;
131 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ 130 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
132 struct rb_node pq_node; /* node for event queue */ 131 struct rb_node pq_node; /* node for event queue */
133 unsigned long pq_key; /* the same type as jiffies global */ 132 unsigned long pq_key; /* the same type as jiffies global */
134 133
135 int prio_activity; /* for which prios are we active */ 134 int prio_activity; /* for which prios are we active */
136 enum htb_cmode cmode; /* current mode of the class */ 135 enum htb_cmode cmode; /* current mode of the class */
137 136
138 /* class attached filters */ 137 /* class attached filters */
139 struct tcf_proto *filter_list; 138 struct tcf_proto *filter_list;
140 int filter_cnt; 139 int filter_cnt;
141 140
142 int warned; /* only one warning about non work conserving .. */ 141 int warned; /* only one warning about non work conserving .. */
143 142
144 /* token bucket parameters */ 143 /* token bucket parameters */
145 struct qdisc_rate_table *rate; /* rate table of the class itself */ 144 struct qdisc_rate_table *rate; /* rate table of the class itself */
146 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ 145 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
147 long buffer,cbuffer; /* token bucket depth/rate */ 146 long buffer, cbuffer; /* token bucket depth/rate */
148 psched_tdiff_t mbuffer; /* max wait time */ 147 psched_tdiff_t mbuffer; /* max wait time */
149 long tokens,ctokens; /* current number of tokens */ 148 long tokens, ctokens; /* current number of tokens */
150 psched_time_t t_c; /* checkpoint time */ 149 psched_time_t t_c; /* checkpoint time */
151}; 150};
152 151
153/* TODO: maybe compute rate when size is too large .. or drop ? */ 152/* TODO: maybe compute rate when size is too large .. or drop ? */
154static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate, 153static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
155 int size) 154 int size)
156{ 155{
157 int slot = size >> rate->rate.cell_log; 156 int slot = size >> rate->rate.cell_log;
158 if (slot > 255) { 157 if (slot > 255) {
159 cl->xstats.giants++; 158 cl->xstats.giants++;
160 slot = 255; 159 slot = 255;
161 } 160 }
162 return rate->data[slot]; 161 return rate->data[slot];
163} 162}
164 163
165struct htb_sched 164struct htb_sched {
166{ 165 struct list_head root; /* root classes list */
167 struct list_head root; /* root classes list */ 166 struct list_head hash[HTB_HSIZE]; /* hashed by classid */
168 struct list_head hash[HTB_HSIZE]; /* hashed by classid */ 167 struct list_head drops[TC_HTB_NUMPRIO]; /* active leaves (for drops) */
169 struct list_head drops[TC_HTB_NUMPRIO]; /* active leaves (for drops) */ 168
170 169 /* self list - roots of self generating tree */
171 /* self list - roots of self generating tree */ 170 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
172 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; 171 int row_mask[TC_HTB_MAXDEPTH];
173 int row_mask[TC_HTB_MAXDEPTH]; 172 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
174 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; 173 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
175 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
176 174
177 /* self wait list - roots of wait PQs per row */ 175 /* self wait list - roots of wait PQs per row */
178 struct rb_root wait_pq[TC_HTB_MAXDEPTH]; 176 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
179 177
180 /* time of nearest event per level (row) */ 178 /* time of nearest event per level (row) */
181 unsigned long near_ev_cache[TC_HTB_MAXDEPTH]; 179 unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
182 180
183 /* cached value of jiffies in dequeue */ 181 /* cached value of jiffies in dequeue */
184 unsigned long jiffies; 182 unsigned long jiffies;
185 183
186 /* whether we hit non-work conserving class during this dequeue; we use */ 184 /* whether we hit non-work conserving class during this dequeue; we use */
187 int nwc_hit; /* this to disable mindelay complaint in dequeue */ 185 int nwc_hit; /* this to disable mindelay complaint in dequeue */
188 186
189 int defcls; /* class where unclassified flows go to */ 187 int defcls; /* class where unclassified flows go to */
190 188
191 /* filters for qdisc itself */ 189 /* filters for qdisc itself */
192 struct tcf_proto *filter_list; 190 struct tcf_proto *filter_list;
193 int filter_cnt; 191 int filter_cnt;
194 192
195 int rate2quantum; /* quant = rate / rate2quantum */ 193 int rate2quantum; /* quant = rate / rate2quantum */
196 psched_time_t now; /* cached dequeue time */ 194 psched_time_t now; /* cached dequeue time */
197 struct timer_list timer; /* send delay timer */ 195 struct timer_list timer; /* send delay timer */
198#ifdef HTB_RATECM 196#ifdef HTB_RATECM
199 struct timer_list rttim; /* rate computer timer */ 197 struct timer_list rttim; /* rate computer timer */
200 int recmp_bucket; /* which hash bucket to recompute next */ 198 int recmp_bucket; /* which hash bucket to recompute next */
201#endif 199#endif
202
203 /* non shaped skbs; let them go directly thru */
204 struct sk_buff_head direct_queue;
205 int direct_qlen; /* max qlen of above */
206 200
207 long direct_pkts; 201 /* non shaped skbs; let them go directly thru */
202 struct sk_buff_head direct_queue;
203 int direct_qlen; /* max qlen of above */
204
205 long direct_pkts;
208}; 206};
209 207
210/* compute hash of size HTB_HSIZE for given handle */ 208/* compute hash of size HTB_HSIZE for given handle */
211static __inline__ int htb_hash(u32 h) 209static inline int htb_hash(u32 h)
212{ 210{
213#if HTB_HSIZE != 16 211#if HTB_HSIZE != 16
214 #error "Declare new hash for your HTB_HSIZE" 212#error "Declare new hash for your HTB_HSIZE"
215#endif 213#endif
216 h ^= h>>8; /* stolen from cbq_hash */ 214 h ^= h >> 8; /* stolen from cbq_hash */
217 h ^= h>>4; 215 h ^= h >> 4;
218 return h & 0xf; 216 return h & 0xf;
219} 217}
220 218
221/* find class in global hash table using given handle */ 219/* find class in global hash table using given handle */
222static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch) 220static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
223{ 221{
224 struct htb_sched *q = qdisc_priv(sch); 222 struct htb_sched *q = qdisc_priv(sch);
225 struct list_head *p; 223 struct list_head *p;
226 if (TC_H_MAJ(handle) != sch->handle) 224 if (TC_H_MAJ(handle) != sch->handle)
227 return NULL; 225 return NULL;
228 226
229 list_for_each (p,q->hash+htb_hash(handle)) { 227 list_for_each(p, q->hash + htb_hash(handle)) {
230 struct htb_class *cl = list_entry(p,struct htb_class,hlist); 228 struct htb_class *cl = list_entry(p, struct htb_class, hlist);
231 if (cl->classid == handle) 229 if (cl->classid == handle)
232 return cl; 230 return cl;
233 } 231 }
@@ -252,7 +250,8 @@ static inline u32 htb_classid(struct htb_class *cl)
252 return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC; 250 return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
253} 251}
254 252
255static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) 253static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
254 int *qerr)
256{ 255{
257 struct htb_sched *q = qdisc_priv(sch); 256 struct htb_sched *q = qdisc_priv(sch);
258 struct htb_class *cl; 257 struct htb_class *cl;
@@ -264,8 +263,8 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in
264 note that nfmark can be used too by attaching filter fw with no 263 note that nfmark can be used too by attaching filter fw with no
265 rules in it */ 264 rules in it */
266 if (skb->priority == sch->handle) 265 if (skb->priority == sch->handle)
267 return HTB_DIRECT; /* X:0 (direct flow) selected */ 266 return HTB_DIRECT; /* X:0 (direct flow) selected */
268 if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) 267 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
269 return cl; 268 return cl;
270 269
271 *qerr = NET_XMIT_BYPASS; 270 *qerr = NET_XMIT_BYPASS;
@@ -274,7 +273,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in
274#ifdef CONFIG_NET_CLS_ACT 273#ifdef CONFIG_NET_CLS_ACT
275 switch (result) { 274 switch (result) {
276 case TC_ACT_QUEUED: 275 case TC_ACT_QUEUED:
277 case TC_ACT_STOLEN: 276 case TC_ACT_STOLEN:
278 *qerr = NET_XMIT_SUCCESS; 277 *qerr = NET_XMIT_SUCCESS;
279 case TC_ACT_SHOT: 278 case TC_ACT_SHOT:
280 return NULL; 279 return NULL;
@@ -283,22 +282,22 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in
283 if (result == TC_POLICE_SHOT) 282 if (result == TC_POLICE_SHOT)
284 return HTB_DIRECT; 283 return HTB_DIRECT;
285#endif 284#endif
286 if ((cl = (void*)res.class) == NULL) { 285 if ((cl = (void *)res.class) == NULL) {
287 if (res.classid == sch->handle) 286 if (res.classid == sch->handle)
288 return HTB_DIRECT; /* X:0 (direct flow) */ 287 return HTB_DIRECT; /* X:0 (direct flow) */
289 if ((cl = htb_find(res.classid,sch)) == NULL) 288 if ((cl = htb_find(res.classid, sch)) == NULL)
290 break; /* filter selected invalid classid */ 289 break; /* filter selected invalid classid */
291 } 290 }
292 if (!cl->level) 291 if (!cl->level)
293 return cl; /* we hit leaf; return it */ 292 return cl; /* we hit leaf; return it */
294 293
295 /* we have got inner class; apply inner filter chain */ 294 /* we have got inner class; apply inner filter chain */
296 tcf = cl->filter_list; 295 tcf = cl->filter_list;
297 } 296 }
298 /* classification failed; try to use default class */ 297 /* classification failed; try to use default class */
299 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle),q->defcls),sch); 298 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
300 if (!cl || cl->level) 299 if (!cl || cl->level)
301 return HTB_DIRECT; /* bad default .. this is safe bet */ 300 return HTB_DIRECT; /* bad default .. this is safe bet */
302 return cl; 301 return cl;
303} 302}
304 303
@@ -308,18 +307,19 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in
308 * Routine adds class to the list (actually tree) sorted by classid. 307 * Routine adds class to the list (actually tree) sorted by classid.
309 * Make sure that class is not already on such list for given prio. 308 * Make sure that class is not already on such list for given prio.
310 */ 309 */
311static void htb_add_to_id_tree (struct rb_root *root, 310static void htb_add_to_id_tree(struct rb_root *root,
312 struct htb_class *cl,int prio) 311 struct htb_class *cl, int prio)
313{ 312{
314 struct rb_node **p = &root->rb_node, *parent = NULL; 313 struct rb_node **p = &root->rb_node, *parent = NULL;
315 314
316 while (*p) { 315 while (*p) {
317 struct htb_class *c; parent = *p; 316 struct htb_class *c;
317 parent = *p;
318 c = rb_entry(parent, struct htb_class, node[prio]); 318 c = rb_entry(parent, struct htb_class, node[prio]);
319 319
320 if (cl->classid > c->classid) 320 if (cl->classid > c->classid)
321 p = &parent->rb_right; 321 p = &parent->rb_right;
322 else 322 else
323 p = &parent->rb_left; 323 p = &parent->rb_left;
324 } 324 }
325 rb_link_node(&cl->node[prio], parent, p); 325 rb_link_node(&cl->node[prio], parent, p);
@@ -333,8 +333,8 @@ static void htb_add_to_id_tree (struct rb_root *root,
333 * change its mode in cl->pq_key microseconds. Make sure that class is not 333 * change its mode in cl->pq_key microseconds. Make sure that class is not
334 * already in the queue. 334 * already in the queue.
335 */ 335 */
336static void htb_add_to_wait_tree (struct htb_sched *q, 336static void htb_add_to_wait_tree(struct htb_sched *q,
337 struct htb_class *cl,long delay) 337 struct htb_class *cl, long delay)
338{ 338{
339 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; 339 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
340 340
@@ -345,13 +345,14 @@ static void htb_add_to_wait_tree (struct htb_sched *q,
345 /* update the nearest event cache */ 345 /* update the nearest event cache */
346 if (time_after(q->near_ev_cache[cl->level], cl->pq_key)) 346 if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
347 q->near_ev_cache[cl->level] = cl->pq_key; 347 q->near_ev_cache[cl->level] = cl->pq_key;
348 348
349 while (*p) { 349 while (*p) {
350 struct htb_class *c; parent = *p; 350 struct htb_class *c;
351 parent = *p;
351 c = rb_entry(parent, struct htb_class, pq_node); 352 c = rb_entry(parent, struct htb_class, pq_node);
352 if (time_after_eq(cl->pq_key, c->pq_key)) 353 if (time_after_eq(cl->pq_key, c->pq_key))
353 p = &parent->rb_right; 354 p = &parent->rb_right;
354 else 355 else
355 p = &parent->rb_left; 356 p = &parent->rb_left;
356 } 357 }
357 rb_link_node(&cl->pq_node, parent, p); 358 rb_link_node(&cl->pq_node, parent, p);
@@ -375,14 +376,14 @@ static void htb_next_rb_node(struct rb_node **n)
375 * The class is added to row at priorities marked in mask. 376 * The class is added to row at priorities marked in mask.
376 * It does nothing if mask == 0. 377 * It does nothing if mask == 0.
377 */ 378 */
378static inline void htb_add_class_to_row(struct htb_sched *q, 379static inline void htb_add_class_to_row(struct htb_sched *q,
379 struct htb_class *cl,int mask) 380 struct htb_class *cl, int mask)
380{ 381{
381 q->row_mask[cl->level] |= mask; 382 q->row_mask[cl->level] |= mask;
382 while (mask) { 383 while (mask) {
383 int prio = ffz(~mask); 384 int prio = ffz(~mask);
384 mask &= ~(1 << prio); 385 mask &= ~(1 << prio);
385 htb_add_to_id_tree(q->row[cl->level]+prio,cl,prio); 386 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
386 } 387 }
387} 388}
388 389
@@ -392,18 +393,18 @@ static inline void htb_add_class_to_row(struct htb_sched *q,
392 * The class is removed from row at priorities marked in mask. 393 * The class is removed from row at priorities marked in mask.
393 * It does nothing if mask == 0. 394 * It does nothing if mask == 0.
394 */ 395 */
395static __inline__ void htb_remove_class_from_row(struct htb_sched *q, 396static inline void htb_remove_class_from_row(struct htb_sched *q,
396 struct htb_class *cl,int mask) 397 struct htb_class *cl, int mask)
397{ 398{
398 int m = 0; 399 int m = 0;
399 400
400 while (mask) { 401 while (mask) {
401 int prio = ffz(~mask); 402 int prio = ffz(~mask);
402 mask &= ~(1 << prio); 403 mask &= ~(1 << prio);
403 if (q->ptr[cl->level][prio] == cl->node+prio) 404 if (q->ptr[cl->level][prio] == cl->node + prio)
404 htb_next_rb_node(q->ptr[cl->level]+prio); 405 htb_next_rb_node(q->ptr[cl->level] + prio);
405 rb_erase(cl->node + prio,q->row[cl->level]+prio); 406 rb_erase(cl->node + prio, q->row[cl->level] + prio);
406 if (!q->row[cl->level][prio].rb_node) 407 if (!q->row[cl->level][prio].rb_node)
407 m |= 1 << prio; 408 m |= 1 << prio;
408 } 409 }
409 q->row_mask[cl->level] &= ~m; 410 q->row_mask[cl->level] &= ~m;
@@ -416,30 +417,31 @@ static __inline__ void htb_remove_class_from_row(struct htb_sched *q,
416 * for priorities it is participating on. cl->cmode must be new 417 * for priorities it is participating on. cl->cmode must be new
417 * (activated) mode. It does nothing if cl->prio_activity == 0. 418 * (activated) mode. It does nothing if cl->prio_activity == 0.
418 */ 419 */
419static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl) 420static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
420{ 421{
421 struct htb_class *p = cl->parent; 422 struct htb_class *p = cl->parent;
422 long m,mask = cl->prio_activity; 423 long m, mask = cl->prio_activity;
423 424
424 while (cl->cmode == HTB_MAY_BORROW && p && mask) { 425 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
425 426 m = mask;
426 m = mask; while (m) { 427 while (m) {
427 int prio = ffz(~m); 428 int prio = ffz(~m);
428 m &= ~(1 << prio); 429 m &= ~(1 << prio);
429 430
430 if (p->un.inner.feed[prio].rb_node) 431 if (p->un.inner.feed[prio].rb_node)
431 /* parent already has its feed in use so that 432 /* parent already has its feed in use so that
432 reset bit in mask as parent is already ok */ 433 reset bit in mask as parent is already ok */
433 mask &= ~(1 << prio); 434 mask &= ~(1 << prio);
434 435
435 htb_add_to_id_tree(p->un.inner.feed+prio,cl,prio); 436 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
436 } 437 }
437 p->prio_activity |= mask; 438 p->prio_activity |= mask;
438 cl = p; p = cl->parent; 439 cl = p;
440 p = cl->parent;
439 441
440 } 442 }
441 if (cl->cmode == HTB_CAN_SEND && mask) 443 if (cl->cmode == HTB_CAN_SEND && mask)
442 htb_add_class_to_row(q,cl,mask); 444 htb_add_class_to_row(q, cl, mask);
443} 445}
444 446
445/** 447/**
@@ -452,35 +454,36 @@ static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
452static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) 454static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
453{ 455{
454 struct htb_class *p = cl->parent; 456 struct htb_class *p = cl->parent;
455 long m,mask = cl->prio_activity; 457 long m, mask = cl->prio_activity;
456
457 458
458 while (cl->cmode == HTB_MAY_BORROW && p && mask) { 459 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
459 m = mask; mask = 0; 460 m = mask;
461 mask = 0;
460 while (m) { 462 while (m) {
461 int prio = ffz(~m); 463 int prio = ffz(~m);
462 m &= ~(1 << prio); 464 m &= ~(1 << prio);
463 465
464 if (p->un.inner.ptr[prio] == cl->node+prio) { 466 if (p->un.inner.ptr[prio] == cl->node + prio) {
465 /* we are removing child which is pointed to from 467 /* we are removing child which is pointed to from
466 parent feed - forget the pointer but remember 468 parent feed - forget the pointer but remember
467 classid */ 469 classid */
468 p->un.inner.last_ptr_id[prio] = cl->classid; 470 p->un.inner.last_ptr_id[prio] = cl->classid;
469 p->un.inner.ptr[prio] = NULL; 471 p->un.inner.ptr[prio] = NULL;
470 } 472 }
471 473
472 rb_erase(cl->node + prio,p->un.inner.feed + prio); 474 rb_erase(cl->node + prio, p->un.inner.feed + prio);
473 475
474 if (!p->un.inner.feed[prio].rb_node) 476 if (!p->un.inner.feed[prio].rb_node)
475 mask |= 1 << prio; 477 mask |= 1 << prio;
476 } 478 }
477 479
478 p->prio_activity &= ~mask; 480 p->prio_activity &= ~mask;
479 cl = p; p = cl->parent; 481 cl = p;
482 p = cl->parent;
480 483
481 } 484 }
482 if (cl->cmode == HTB_CAN_SEND && mask) 485 if (cl->cmode == HTB_CAN_SEND && mask)
483 htb_remove_class_from_row(q,cl,mask); 486 htb_remove_class_from_row(q, cl, mask);
484} 487}
485 488
486#if HTB_HYSTERESIS 489#if HTB_HYSTERESIS
@@ -508,21 +511,21 @@ static inline long htb_hiwater(const struct htb_class *cl)
508 * 0 .. -cl->{c,}buffer range. It is meant to limit number of 511 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
509 * mode transitions per time unit. The speed gain is about 1/6. 512 * mode transitions per time unit. The speed gain is about 1/6.
510 */ 513 */
511static __inline__ enum htb_cmode 514static inline enum htb_cmode
512htb_class_mode(struct htb_class *cl,long *diff) 515htb_class_mode(struct htb_class *cl, long *diff)
513{ 516{
514 long toks; 517 long toks;
515 518
516 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { 519 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
517 *diff = -toks; 520 *diff = -toks;
518 return HTB_CANT_SEND; 521 return HTB_CANT_SEND;
519 } 522 }
520 523
521 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl)) 524 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
522 return HTB_CAN_SEND; 525 return HTB_CAN_SEND;
523 526
524 *diff = -toks; 527 *diff = -toks;
525 return HTB_MAY_BORROW; 528 return HTB_MAY_BORROW;
526} 529}
527 530
528/** 531/**
@@ -534,22 +537,21 @@ htb_class_mode(struct htb_class *cl,long *diff)
534 * be different from old one and cl->pq_key has to be valid if changing 537 * be different from old one and cl->pq_key has to be valid if changing
535 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree). 538 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
536 */ 539 */
537static void 540static void
538htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff) 541htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
539{ 542{
540 enum htb_cmode new_mode = htb_class_mode(cl,diff); 543 enum htb_cmode new_mode = htb_class_mode(cl, diff);
541
542 544
543 if (new_mode == cl->cmode) 545 if (new_mode == cl->cmode)
544 return; 546 return;
545 547
546 if (cl->prio_activity) { /* not necessary: speed optimization */ 548 if (cl->prio_activity) { /* not necessary: speed optimization */
547 if (cl->cmode != HTB_CANT_SEND) 549 if (cl->cmode != HTB_CANT_SEND)
548 htb_deactivate_prios(q,cl); 550 htb_deactivate_prios(q, cl);
549 cl->cmode = new_mode; 551 cl->cmode = new_mode;
550 if (new_mode != HTB_CANT_SEND) 552 if (new_mode != HTB_CANT_SEND)
551 htb_activate_prios(q,cl); 553 htb_activate_prios(q, cl);
552 } else 554 } else
553 cl->cmode = new_mode; 555 cl->cmode = new_mode;
554} 556}
555 557
@@ -560,14 +562,15 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
560 * for the prio. It can be called on already active leaf safely. 562 * for the prio. It can be called on already active leaf safely.
561 * It also adds leaf into droplist. 563 * It also adds leaf into droplist.
562 */ 564 */
563static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl) 565static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
564{ 566{
565 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen); 567 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
566 568
567 if (!cl->prio_activity) { 569 if (!cl->prio_activity) {
568 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio); 570 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
569 htb_activate_prios(q,cl); 571 htb_activate_prios(q, cl);
570 list_add_tail(&cl->un.leaf.drop_list,q->drops+cl->un.leaf.aprio); 572 list_add_tail(&cl->un.leaf.drop_list,
573 q->drops + cl->un.leaf.aprio);
571 } 574 }
572} 575}
573 576
@@ -577,97 +580,100 @@ static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
577 * Make sure that leaf is active. In the other words it can't be called 580 * Make sure that leaf is active. In the other words it can't be called
578 * with non-active leaf. It also removes class from the drop list. 581 * with non-active leaf. It also removes class from the drop list.
579 */ 582 */
580static __inline__ void 583static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
581htb_deactivate(struct htb_sched *q,struct htb_class *cl)
582{ 584{
583 BUG_TRAP(cl->prio_activity); 585 BUG_TRAP(cl->prio_activity);
584 586
585 htb_deactivate_prios(q,cl); 587 htb_deactivate_prios(q, cl);
586 cl->prio_activity = 0; 588 cl->prio_activity = 0;
587 list_del_init(&cl->un.leaf.drop_list); 589 list_del_init(&cl->un.leaf.drop_list);
588} 590}
589 591
590static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) 592static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
591{ 593{
592 int ret; 594 int ret;
593 struct htb_sched *q = qdisc_priv(sch); 595 struct htb_sched *q = qdisc_priv(sch);
594 struct htb_class *cl = htb_classify(skb,sch,&ret); 596 struct htb_class *cl = htb_classify(skb, sch, &ret);
595 597
596 if (cl == HTB_DIRECT) { 598 if (cl == HTB_DIRECT) {
597 /* enqueue to helper queue */ 599 /* enqueue to helper queue */
598 if (q->direct_queue.qlen < q->direct_qlen) { 600 if (q->direct_queue.qlen < q->direct_qlen) {
599 __skb_queue_tail(&q->direct_queue, skb); 601 __skb_queue_tail(&q->direct_queue, skb);
600 q->direct_pkts++; 602 q->direct_pkts++;
601 } else { 603 } else {
602 kfree_skb(skb); 604 kfree_skb(skb);
603 sch->qstats.drops++; 605 sch->qstats.drops++;
604 return NET_XMIT_DROP; 606 return NET_XMIT_DROP;
605 } 607 }
606#ifdef CONFIG_NET_CLS_ACT 608#ifdef CONFIG_NET_CLS_ACT
607 } else if (!cl) { 609 } else if (!cl) {
608 if (ret == NET_XMIT_BYPASS) 610 if (ret == NET_XMIT_BYPASS)
609 sch->qstats.drops++; 611 sch->qstats.drops++;
610 kfree_skb (skb); 612 kfree_skb(skb);
611 return ret; 613 return ret;
612#endif 614#endif
613 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { 615 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
614 sch->qstats.drops++; 616 NET_XMIT_SUCCESS) {
615 cl->qstats.drops++; 617 sch->qstats.drops++;
616 return NET_XMIT_DROP; 618 cl->qstats.drops++;
617 } else { 619 return NET_XMIT_DROP;
618 cl->bstats.packets++; cl->bstats.bytes += skb->len; 620 } else {
619 htb_activate (q,cl); 621 cl->bstats.packets++;
620 } 622 cl->bstats.bytes += skb->len;
621 623 htb_activate(q, cl);
622 sch->q.qlen++; 624 }
623 sch->bstats.packets++; sch->bstats.bytes += skb->len; 625
624 return NET_XMIT_SUCCESS; 626 sch->q.qlen++;
627 sch->bstats.packets++;
628 sch->bstats.bytes += skb->len;
629 return NET_XMIT_SUCCESS;
625} 630}
626 631
627/* TODO: requeuing packet charges it to policers again !! */ 632/* TODO: requeuing packet charges it to policers again !! */
628static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) 633static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
629{ 634{
630 struct htb_sched *q = qdisc_priv(sch); 635 struct htb_sched *q = qdisc_priv(sch);
631 int ret = NET_XMIT_SUCCESS; 636 int ret = NET_XMIT_SUCCESS;
632 struct htb_class *cl = htb_classify(skb,sch, &ret); 637 struct htb_class *cl = htb_classify(skb, sch, &ret);
633 struct sk_buff *tskb; 638 struct sk_buff *tskb;
634 639
635 if (cl == HTB_DIRECT || !cl) { 640 if (cl == HTB_DIRECT || !cl) {
636 /* enqueue to helper queue */ 641 /* enqueue to helper queue */
637 if (q->direct_queue.qlen < q->direct_qlen && cl) { 642 if (q->direct_queue.qlen < q->direct_qlen && cl) {
638 __skb_queue_head(&q->direct_queue, skb); 643 __skb_queue_head(&q->direct_queue, skb);
639 } else { 644 } else {
640 __skb_queue_head(&q->direct_queue, skb); 645 __skb_queue_head(&q->direct_queue, skb);
641 tskb = __skb_dequeue_tail(&q->direct_queue); 646 tskb = __skb_dequeue_tail(&q->direct_queue);
642 kfree_skb (tskb); 647 kfree_skb(tskb);
643 sch->qstats.drops++; 648 sch->qstats.drops++;
644 return NET_XMIT_CN; 649 return NET_XMIT_CN;
645 } 650 }
646 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { 651 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
647 sch->qstats.drops++; 652 NET_XMIT_SUCCESS) {
648 cl->qstats.drops++; 653 sch->qstats.drops++;
649 return NET_XMIT_DROP; 654 cl->qstats.drops++;
650 } else 655 return NET_XMIT_DROP;
651 htb_activate (q,cl); 656 } else
652 657 htb_activate(q, cl);
653 sch->q.qlen++; 658
654 sch->qstats.requeues++; 659 sch->q.qlen++;
655 return NET_XMIT_SUCCESS; 660 sch->qstats.requeues++;
661 return NET_XMIT_SUCCESS;
656} 662}
657 663
658static void htb_timer(unsigned long arg) 664static void htb_timer(unsigned long arg)
659{ 665{
660 struct Qdisc *sch = (struct Qdisc*)arg; 666 struct Qdisc *sch = (struct Qdisc *)arg;
661 sch->flags &= ~TCQ_F_THROTTLED; 667 sch->flags &= ~TCQ_F_THROTTLED;
662 wmb(); 668 wmb();
663 netif_schedule(sch->dev); 669 netif_schedule(sch->dev);
664} 670}
665 671
666#ifdef HTB_RATECM 672#ifdef HTB_RATECM
667#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0 673#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
668static void htb_rate_timer(unsigned long arg) 674static void htb_rate_timer(unsigned long arg)
669{ 675{
670 struct Qdisc *sch = (struct Qdisc*)arg; 676 struct Qdisc *sch = (struct Qdisc *)arg;
671 struct htb_sched *q = qdisc_priv(sch); 677 struct htb_sched *q = qdisc_priv(sch);
672 struct list_head *p; 678 struct list_head *p;
673 679
@@ -678,13 +684,13 @@ static void htb_rate_timer(unsigned long arg)
678 add_timer(&q->rttim); 684 add_timer(&q->rttim);
679 685
680 /* scan and recompute one bucket at time */ 686 /* scan and recompute one bucket at time */
681 if (++q->recmp_bucket >= HTB_HSIZE) 687 if (++q->recmp_bucket >= HTB_HSIZE)
682 q->recmp_bucket = 0; 688 q->recmp_bucket = 0;
683 list_for_each (p,q->hash+q->recmp_bucket) { 689 list_for_each(p, q->hash + q->recmp_bucket) {
684 struct htb_class *cl = list_entry(p,struct htb_class,hlist); 690 struct htb_class *cl = list_entry(p, struct htb_class, hlist);
685 691
686 RT_GEN (cl->sum_bytes,cl->rate_bytes); 692 RT_GEN(cl->sum_bytes, cl->rate_bytes);
687 RT_GEN (cl->sum_packets,cl->rate_packets); 693 RT_GEN(cl->sum_packets, cl->rate_packets);
688 } 694 }
689 spin_unlock_bh(&sch->dev->queue_lock); 695 spin_unlock_bh(&sch->dev->queue_lock);
690} 696}
@@ -701,10 +707,10 @@ static void htb_rate_timer(unsigned long arg)
701 * CAN_SEND) because we can use more precise clock that event queue here. 707 * CAN_SEND) because we can use more precise clock that event queue here.
702 * In such case we remove class from event queue first. 708 * In such case we remove class from event queue first.
703 */ 709 */
704static void htb_charge_class(struct htb_sched *q,struct htb_class *cl, 710static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
705 int level,int bytes) 711 int level, int bytes)
706{ 712{
707 long toks,diff; 713 long toks, diff;
708 enum htb_cmode old_mode; 714 enum htb_cmode old_mode;
709 715
710#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \ 716#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
@@ -714,29 +720,31 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
714 cl->T = toks 720 cl->T = toks
715 721
716 while (cl) { 722 while (cl) {
717 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer); 723 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
718 if (cl->level >= level) { 724 if (cl->level >= level) {
719 if (cl->level == level) cl->xstats.lends++; 725 if (cl->level == level)
720 HTB_ACCNT (tokens,buffer,rate); 726 cl->xstats.lends++;
727 HTB_ACCNT(tokens, buffer, rate);
721 } else { 728 } else {
722 cl->xstats.borrows++; 729 cl->xstats.borrows++;
723 cl->tokens += diff; /* we moved t_c; update tokens */ 730 cl->tokens += diff; /* we moved t_c; update tokens */
724 } 731 }
725 HTB_ACCNT (ctokens,cbuffer,ceil); 732 HTB_ACCNT(ctokens, cbuffer, ceil);
726 cl->t_c = q->now; 733 cl->t_c = q->now;
727 734
728 old_mode = cl->cmode; diff = 0; 735 old_mode = cl->cmode;
729 htb_change_class_mode(q,cl,&diff); 736 diff = 0;
737 htb_change_class_mode(q, cl, &diff);
730 if (old_mode != cl->cmode) { 738 if (old_mode != cl->cmode) {
731 if (old_mode != HTB_CAN_SEND) 739 if (old_mode != HTB_CAN_SEND)
732 rb_erase(&cl->pq_node,q->wait_pq+cl->level); 740 rb_erase(&cl->pq_node, q->wait_pq + cl->level);
733 if (cl->cmode != HTB_CAN_SEND) 741 if (cl->cmode != HTB_CAN_SEND)
734 htb_add_to_wait_tree (q,cl,diff); 742 htb_add_to_wait_tree(q, cl, diff);
735 } 743 }
736
737#ifdef HTB_RATECM 744#ifdef HTB_RATECM
738 /* update rate counters */ 745 /* update rate counters */
739 cl->sum_bytes += bytes; cl->sum_packets++; 746 cl->sum_bytes += bytes;
747 cl->sum_packets++;
740#endif 748#endif
741 749
742 /* update byte stats except for leaves which are already updated */ 750 /* update byte stats except for leaves which are already updated */
@@ -755,7 +763,7 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
755 * next pending event (0 for no event in pq). 763 * next pending event (0 for no event in pq).
756 * Note: Aplied are events whose have cl->pq_key <= jiffies. 764 * Note: Aplied are events whose have cl->pq_key <= jiffies.
757 */ 765 */
758static long htb_do_events(struct htb_sched *q,int level) 766static long htb_do_events(struct htb_sched *q, int level)
759{ 767{
760 int i; 768 int i;
761 769
@@ -763,34 +771,38 @@ static long htb_do_events(struct htb_sched *q,int level)
763 struct htb_class *cl; 771 struct htb_class *cl;
764 long diff; 772 long diff;
765 struct rb_node *p = q->wait_pq[level].rb_node; 773 struct rb_node *p = q->wait_pq[level].rb_node;
766 if (!p) return 0; 774 if (!p)
767 while (p->rb_left) p = p->rb_left; 775 return 0;
776 while (p->rb_left)
777 p = p->rb_left;
768 778
769 cl = rb_entry(p, struct htb_class, pq_node); 779 cl = rb_entry(p, struct htb_class, pq_node);
770 if (time_after(cl->pq_key, q->jiffies)) { 780 if (time_after(cl->pq_key, q->jiffies)) {
771 return cl->pq_key - q->jiffies; 781 return cl->pq_key - q->jiffies;
772 } 782 }
773 rb_erase(p,q->wait_pq+level); 783 rb_erase(p, q->wait_pq + level);
774 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer); 784 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
775 htb_change_class_mode(q,cl,&diff); 785 htb_change_class_mode(q, cl, &diff);
776 if (cl->cmode != HTB_CAN_SEND) 786 if (cl->cmode != HTB_CAN_SEND)
777 htb_add_to_wait_tree (q,cl,diff); 787 htb_add_to_wait_tree(q, cl, diff);
778 } 788 }
779 if (net_ratelimit()) 789 if (net_ratelimit())
780 printk(KERN_WARNING "htb: too many events !\n"); 790 printk(KERN_WARNING "htb: too many events !\n");
781 return HZ/10; 791 return HZ / 10;
782} 792}
783 793
784/* Returns class->node+prio from id-tree where classe's id is >= id. NULL 794/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
785 is no such one exists. */ 795 is no such one exists. */
786static struct rb_node * 796static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
787htb_id_find_next_upper(int prio,struct rb_node *n,u32 id) 797 u32 id)
788{ 798{
789 struct rb_node *r = NULL; 799 struct rb_node *r = NULL;
790 while (n) { 800 while (n) {
791 struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]); 801 struct htb_class *cl =
792 if (id == cl->classid) return n; 802 rb_entry(n, struct htb_class, node[prio]);
793 803 if (id == cl->classid)
804 return n;
805
794 if (id > cl->classid) { 806 if (id > cl->classid) {
795 n = n->rb_right; 807 n = n->rb_right;
796 } else { 808 } else {
@@ -806,46 +818,49 @@ htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
806 * 818 *
807 * Find leaf where current feed pointers points to. 819 * Find leaf where current feed pointers points to.
808 */ 820 */
809static struct htb_class * 821static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
810htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid) 822 struct rb_node **pptr, u32 * pid)
811{ 823{
812 int i; 824 int i;
813 struct { 825 struct {
814 struct rb_node *root; 826 struct rb_node *root;
815 struct rb_node **pptr; 827 struct rb_node **pptr;
816 u32 *pid; 828 u32 *pid;
817 } stk[TC_HTB_MAXDEPTH],*sp = stk; 829 } stk[TC_HTB_MAXDEPTH], *sp = stk;
818 830
819 BUG_TRAP(tree->rb_node); 831 BUG_TRAP(tree->rb_node);
820 sp->root = tree->rb_node; 832 sp->root = tree->rb_node;
821 sp->pptr = pptr; 833 sp->pptr = pptr;
822 sp->pid = pid; 834 sp->pid = pid;
823 835
824 for (i = 0; i < 65535; i++) { 836 for (i = 0; i < 65535; i++) {
825 if (!*sp->pptr && *sp->pid) { 837 if (!*sp->pptr && *sp->pid) {
826 /* ptr was invalidated but id is valid - try to recover 838 /* ptr was invalidated but id is valid - try to recover
827 the original or next ptr */ 839 the original or next ptr */
828 *sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid); 840 *sp->pptr =
841 htb_id_find_next_upper(prio, sp->root, *sp->pid);
829 } 842 }
830 *sp->pid = 0; /* ptr is valid now so that remove this hint as it 843 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
831 can become out of date quickly */ 844 can become out of date quickly */
832 if (!*sp->pptr) { /* we are at right end; rewind & go up */ 845 if (!*sp->pptr) { /* we are at right end; rewind & go up */
833 *sp->pptr = sp->root; 846 *sp->pptr = sp->root;
834 while ((*sp->pptr)->rb_left) 847 while ((*sp->pptr)->rb_left)
835 *sp->pptr = (*sp->pptr)->rb_left; 848 *sp->pptr = (*sp->pptr)->rb_left;
836 if (sp > stk) { 849 if (sp > stk) {
837 sp--; 850 sp--;
838 BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL; 851 BUG_TRAP(*sp->pptr);
839 htb_next_rb_node (sp->pptr); 852 if (!*sp->pptr)
853 return NULL;
854 htb_next_rb_node(sp->pptr);
840 } 855 }
841 } else { 856 } else {
842 struct htb_class *cl; 857 struct htb_class *cl;
843 cl = rb_entry(*sp->pptr,struct htb_class,node[prio]); 858 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
844 if (!cl->level) 859 if (!cl->level)
845 return cl; 860 return cl;
846 (++sp)->root = cl->un.inner.feed[prio].rb_node; 861 (++sp)->root = cl->un.inner.feed[prio].rb_node;
847 sp->pptr = cl->un.inner.ptr+prio; 862 sp->pptr = cl->un.inner.ptr + prio;
848 sp->pid = cl->un.inner.last_ptr_id+prio; 863 sp->pid = cl->un.inner.last_ptr_id + prio;
849 } 864 }
850 } 865 }
851 BUG_TRAP(0); 866 BUG_TRAP(0);
@@ -854,19 +869,21 @@ htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
854 869
855/* dequeues packet at given priority and level; call only if 870/* dequeues packet at given priority and level; call only if
856 you are sure that there is active class at prio/level */ 871 you are sure that there is active class at prio/level */
857static struct sk_buff * 872static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
858htb_dequeue_tree(struct htb_sched *q,int prio,int level) 873 int level)
859{ 874{
860 struct sk_buff *skb = NULL; 875 struct sk_buff *skb = NULL;
861 struct htb_class *cl,*start; 876 struct htb_class *cl, *start;
862 /* look initial class up in the row */ 877 /* look initial class up in the row */
863 start = cl = htb_lookup_leaf (q->row[level]+prio,prio, 878 start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
864 q->ptr[level]+prio,q->last_ptr_id[level]+prio); 879 q->ptr[level] + prio,
865 880 q->last_ptr_id[level] + prio);
881
866 do { 882 do {
867next: 883next:
868 BUG_TRAP(cl); 884 BUG_TRAP(cl);
869 if (!cl) return NULL; 885 if (!cl)
886 return NULL;
870 887
871 /* class can be empty - it is unlikely but can be true if leaf 888 /* class can be empty - it is unlikely but can be true if leaf
872 qdisc drops packets in enqueue routine or if someone used 889 qdisc drops packets in enqueue routine or if someone used
@@ -874,56 +891,64 @@ next:
874 simply deactivate and skip such class */ 891 simply deactivate and skip such class */
875 if (unlikely(cl->un.leaf.q->q.qlen == 0)) { 892 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
876 struct htb_class *next; 893 struct htb_class *next;
877 htb_deactivate(q,cl); 894 htb_deactivate(q, cl);
878 895
879 /* row/level might become empty */ 896 /* row/level might become empty */
880 if ((q->row_mask[level] & (1 << prio)) == 0) 897 if ((q->row_mask[level] & (1 << prio)) == 0)
881 return NULL; 898 return NULL;
882
883 next = htb_lookup_leaf (q->row[level]+prio,
884 prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio);
885 899
886 if (cl == start) /* fix start if we just deleted it */ 900 next = htb_lookup_leaf(q->row[level] + prio,
901 prio, q->ptr[level] + prio,
902 q->last_ptr_id[level] + prio);
903
904 if (cl == start) /* fix start if we just deleted it */
887 start = next; 905 start = next;
888 cl = next; 906 cl = next;
889 goto next; 907 goto next;
890 } 908 }
891 909
892 if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL)) 910 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
911 if (likely(skb != NULL))
893 break; 912 break;
894 if (!cl->warned) { 913 if (!cl->warned) {
895 printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid); 914 printk(KERN_WARNING
915 "htb: class %X isn't work conserving ?!\n",
916 cl->classid);
896 cl->warned = 1; 917 cl->warned = 1;
897 } 918 }
898 q->nwc_hit++; 919 q->nwc_hit++;
899 htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio); 920 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
900 cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio, 921 ptr[0]) + prio);
901 q->last_ptr_id[level]+prio); 922 cl = htb_lookup_leaf(q->row[level] + prio, prio,
923 q->ptr[level] + prio,
924 q->last_ptr_id[level] + prio);
902 925
903 } while (cl != start); 926 } while (cl != start);
904 927
905 if (likely(skb != NULL)) { 928 if (likely(skb != NULL)) {
906 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) { 929 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
907 cl->un.leaf.deficit[level] += cl->un.leaf.quantum; 930 cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
908 htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio); 931 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
932 ptr[0]) + prio);
909 } 933 }
910 /* this used to be after charge_class but this constelation 934 /* this used to be after charge_class but this constelation
911 gives us slightly better performance */ 935 gives us slightly better performance */
912 if (!cl->un.leaf.q->q.qlen) 936 if (!cl->un.leaf.q->q.qlen)
913 htb_deactivate (q,cl); 937 htb_deactivate(q, cl);
914 htb_charge_class (q,cl,level,skb->len); 938 htb_charge_class(q, cl, level, skb->len);
915 } 939 }
916 return skb; 940 return skb;
917} 941}
918 942
919static void htb_delay_by(struct Qdisc *sch,long delay) 943static void htb_delay_by(struct Qdisc *sch, long delay)
920{ 944{
921 struct htb_sched *q = qdisc_priv(sch); 945 struct htb_sched *q = qdisc_priv(sch);
922 if (delay <= 0) delay = 1; 946 if (delay <= 0)
923 if (unlikely(delay > 5*HZ)) { 947 delay = 1;
948 if (unlikely(delay > 5 * HZ)) {
924 if (net_ratelimit()) 949 if (net_ratelimit())
925 printk(KERN_INFO "HTB delay %ld > 5sec\n", delay); 950 printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
926 delay = 5*HZ; 951 delay = 5 * HZ;
927 } 952 }
928 /* why don't use jiffies here ? because expires can be in past */ 953 /* why don't use jiffies here ? because expires can be in past */
929 mod_timer(&q->timer, q->jiffies + delay); 954 mod_timer(&q->timer, q->jiffies + delay);
@@ -941,13 +966,15 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
941 q->jiffies = jiffies; 966 q->jiffies = jiffies;
942 967
943 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 968 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
944 if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) { 969 skb = __skb_dequeue(&q->direct_queue);
970 if (skb != NULL) {
945 sch->flags &= ~TCQ_F_THROTTLED; 971 sch->flags &= ~TCQ_F_THROTTLED;
946 sch->q.qlen--; 972 sch->q.qlen--;
947 return skb; 973 return skb;
948 } 974 }
949 975
950 if (!sch->q.qlen) goto fin; 976 if (!sch->q.qlen)
977 goto fin;
951 PSCHED_GET_TIME(q->now); 978 PSCHED_GET_TIME(q->now);
952 979
953 min_delay = LONG_MAX; 980 min_delay = LONG_MAX;
@@ -957,18 +984,19 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
957 int m; 984 int m;
958 long delay; 985 long delay;
959 if (time_after_eq(q->jiffies, q->near_ev_cache[level])) { 986 if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
960 delay = htb_do_events(q,level); 987 delay = htb_do_events(q, level);
961 q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ); 988 q->near_ev_cache[level] =
989 q->jiffies + (delay ? delay : HZ);
962 } else 990 } else
963 delay = q->near_ev_cache[level] - q->jiffies; 991 delay = q->near_ev_cache[level] - q->jiffies;
964 992
965 if (delay && min_delay > delay) 993 if (delay && min_delay > delay)
966 min_delay = delay; 994 min_delay = delay;
967 m = ~q->row_mask[level]; 995 m = ~q->row_mask[level];
968 while (m != (int)(-1)) { 996 while (m != (int)(-1)) {
969 int prio = ffz (m); 997 int prio = ffz(m);
970 m |= 1 << prio; 998 m |= 1 << prio;
971 skb = htb_dequeue_tree(q,prio,level); 999 skb = htb_dequeue_tree(q, prio, level);
972 if (likely(skb != NULL)) { 1000 if (likely(skb != NULL)) {
973 sch->q.qlen--; 1001 sch->q.qlen--;
974 sch->flags &= ~TCQ_F_THROTTLED; 1002 sch->flags &= ~TCQ_F_THROTTLED;
@@ -976,28 +1004,28 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
976 } 1004 }
977 } 1005 }
978 } 1006 }
979 htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay); 1007 htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay);
980fin: 1008fin:
981 return skb; 1009 return skb;
982} 1010}
983 1011
984/* try to drop from each class (by prio) until one succeed */ 1012/* try to drop from each class (by prio) until one succeed */
985static unsigned int htb_drop(struct Qdisc* sch) 1013static unsigned int htb_drop(struct Qdisc *sch)
986{ 1014{
987 struct htb_sched *q = qdisc_priv(sch); 1015 struct htb_sched *q = qdisc_priv(sch);
988 int prio; 1016 int prio;
989 1017
990 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) { 1018 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
991 struct list_head *p; 1019 struct list_head *p;
992 list_for_each (p,q->drops+prio) { 1020 list_for_each(p, q->drops + prio) {
993 struct htb_class *cl = list_entry(p, struct htb_class, 1021 struct htb_class *cl = list_entry(p, struct htb_class,
994 un.leaf.drop_list); 1022 un.leaf.drop_list);
995 unsigned int len; 1023 unsigned int len;
996 if (cl->un.leaf.q->ops->drop && 1024 if (cl->un.leaf.q->ops->drop &&
997 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) { 1025 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
998 sch->q.qlen--; 1026 sch->q.qlen--;
999 if (!cl->un.leaf.q->q.qlen) 1027 if (!cl->un.leaf.q->q.qlen)
1000 htb_deactivate (q,cl); 1028 htb_deactivate(q, cl);
1001 return len; 1029 return len;
1002 } 1030 }
1003 } 1031 }
@@ -1007,19 +1035,20 @@ static unsigned int htb_drop(struct Qdisc* sch)
1007 1035
1008/* reset all classes */ 1036/* reset all classes */
1009/* always caled under BH & queue lock */ 1037/* always caled under BH & queue lock */
1010static void htb_reset(struct Qdisc* sch) 1038static void htb_reset(struct Qdisc *sch)
1011{ 1039{
1012 struct htb_sched *q = qdisc_priv(sch); 1040 struct htb_sched *q = qdisc_priv(sch);
1013 int i; 1041 int i;
1014 1042
1015 for (i = 0; i < HTB_HSIZE; i++) { 1043 for (i = 0; i < HTB_HSIZE; i++) {
1016 struct list_head *p; 1044 struct list_head *p;
1017 list_for_each (p,q->hash+i) { 1045 list_for_each(p, q->hash + i) {
1018 struct htb_class *cl = list_entry(p,struct htb_class,hlist); 1046 struct htb_class *cl =
1047 list_entry(p, struct htb_class, hlist);
1019 if (cl->level) 1048 if (cl->level)
1020 memset(&cl->un.inner,0,sizeof(cl->un.inner)); 1049 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
1021 else { 1050 else {
1022 if (cl->un.leaf.q) 1051 if (cl->un.leaf.q)
1023 qdisc_reset(cl->un.leaf.q); 1052 qdisc_reset(cl->un.leaf.q);
1024 INIT_LIST_HEAD(&cl->un.leaf.drop_list); 1053 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1025 } 1054 }
@@ -1032,12 +1061,12 @@ static void htb_reset(struct Qdisc* sch)
1032 del_timer(&q->timer); 1061 del_timer(&q->timer);
1033 __skb_queue_purge(&q->direct_queue); 1062 __skb_queue_purge(&q->direct_queue);
1034 sch->q.qlen = 0; 1063 sch->q.qlen = 0;
1035 memset(q->row,0,sizeof(q->row)); 1064 memset(q->row, 0, sizeof(q->row));
1036 memset(q->row_mask,0,sizeof(q->row_mask)); 1065 memset(q->row_mask, 0, sizeof(q->row_mask));
1037 memset(q->wait_pq,0,sizeof(q->wait_pq)); 1066 memset(q->wait_pq, 0, sizeof(q->wait_pq));
1038 memset(q->ptr,0,sizeof(q->ptr)); 1067 memset(q->ptr, 0, sizeof(q->ptr));
1039 for (i = 0; i < TC_HTB_NUMPRIO; i++) 1068 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1040 INIT_LIST_HEAD(q->drops+i); 1069 INIT_LIST_HEAD(q->drops + i);
1041} 1070}
1042 1071
1043static int htb_init(struct Qdisc *sch, struct rtattr *opt) 1072static int htb_init(struct Qdisc *sch, struct rtattr *opt)
@@ -1047,29 +1076,30 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1047 struct tc_htb_glob *gopt; 1076 struct tc_htb_glob *gopt;
1048 int i; 1077 int i;
1049 if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) || 1078 if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
1050 tb[TCA_HTB_INIT-1] == NULL || 1079 tb[TCA_HTB_INIT - 1] == NULL ||
1051 RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) { 1080 RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
1052 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); 1081 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1053 return -EINVAL; 1082 return -EINVAL;
1054 } 1083 }
1055 gopt = RTA_DATA(tb[TCA_HTB_INIT-1]); 1084 gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
1056 if (gopt->version != HTB_VER >> 16) { 1085 if (gopt->version != HTB_VER >> 16) {
1057 printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n", 1086 printk(KERN_ERR
1058 HTB_VER >> 16,HTB_VER & 0xffff,gopt->version); 1087 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1088 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1059 return -EINVAL; 1089 return -EINVAL;
1060 } 1090 }
1061 1091
1062 INIT_LIST_HEAD(&q->root); 1092 INIT_LIST_HEAD(&q->root);
1063 for (i = 0; i < HTB_HSIZE; i++) 1093 for (i = 0; i < HTB_HSIZE; i++)
1064 INIT_LIST_HEAD(q->hash+i); 1094 INIT_LIST_HEAD(q->hash + i);
1065 for (i = 0; i < TC_HTB_NUMPRIO; i++) 1095 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1066 INIT_LIST_HEAD(q->drops+i); 1096 INIT_LIST_HEAD(q->drops + i);
1067 1097
1068 init_timer(&q->timer); 1098 init_timer(&q->timer);
1069 skb_queue_head_init(&q->direct_queue); 1099 skb_queue_head_init(&q->direct_queue);
1070 1100
1071 q->direct_qlen = sch->dev->tx_queue_len; 1101 q->direct_qlen = sch->dev->tx_queue_len;
1072 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ 1102 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1073 q->direct_qlen = 2; 1103 q->direct_qlen = 2;
1074 q->timer.function = htb_timer; 1104 q->timer.function = htb_timer;
1075 q->timer.data = (unsigned long)sch; 1105 q->timer.data = (unsigned long)sch;
@@ -1091,7 +1121,7 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1091static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) 1121static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1092{ 1122{
1093 struct htb_sched *q = qdisc_priv(sch); 1123 struct htb_sched *q = qdisc_priv(sch);
1094 unsigned char *b = skb->tail; 1124 unsigned char *b = skb->tail;
1095 struct rtattr *rta; 1125 struct rtattr *rta;
1096 struct tc_htb_glob gopt; 1126 struct tc_htb_glob gopt;
1097 spin_lock_bh(&sch->dev->queue_lock); 1127 spin_lock_bh(&sch->dev->queue_lock);
@@ -1101,7 +1131,7 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1101 gopt.rate2quantum = q->rate2quantum; 1131 gopt.rate2quantum = q->rate2quantum;
1102 gopt.defcls = q->defcls; 1132 gopt.defcls = q->defcls;
1103 gopt.debug = 0; 1133 gopt.debug = 0;
1104 rta = (struct rtattr*)b; 1134 rta = (struct rtattr *)b;
1105 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1135 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1106 RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1136 RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1107 rta->rta_len = skb->tail - b; 1137 rta->rta_len = skb->tail - b;
@@ -1114,10 +1144,10 @@ rtattr_failure:
1114} 1144}
1115 1145
1116static int htb_dump_class(struct Qdisc *sch, unsigned long arg, 1146static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1117 struct sk_buff *skb, struct tcmsg *tcm) 1147 struct sk_buff *skb, struct tcmsg *tcm)
1118{ 1148{
1119 struct htb_class *cl = (struct htb_class*)arg; 1149 struct htb_class *cl = (struct htb_class *)arg;
1120 unsigned char *b = skb->tail; 1150 unsigned char *b = skb->tail;
1121 struct rtattr *rta; 1151 struct rtattr *rta;
1122 struct tc_htb_opt opt; 1152 struct tc_htb_opt opt;
1123 1153
@@ -1127,15 +1157,18 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1127 if (!cl->level && cl->un.leaf.q) 1157 if (!cl->level && cl->un.leaf.q)
1128 tcm->tcm_info = cl->un.leaf.q->handle; 1158 tcm->tcm_info = cl->un.leaf.q->handle;
1129 1159
1130 rta = (struct rtattr*)b; 1160 rta = (struct rtattr *)b;
1131 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1161 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1132 1162
1133 memset (&opt,0,sizeof(opt)); 1163 memset(&opt, 0, sizeof(opt));
1134 1164
1135 opt.rate = cl->rate->rate; opt.buffer = cl->buffer; 1165 opt.rate = cl->rate->rate;
1136 opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer; 1166 opt.buffer = cl->buffer;
1137 opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio; 1167 opt.ceil = cl->ceil->rate;
1138 opt.level = cl->level; 1168 opt.cbuffer = cl->cbuffer;
1169 opt.quantum = cl->un.leaf.quantum;
1170 opt.prio = cl->un.leaf.prio;
1171 opt.level = cl->level;
1139 RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1172 RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1140 rta->rta_len = skb->tail - b; 1173 rta->rta_len = skb->tail - b;
1141 spin_unlock_bh(&sch->dev->queue_lock); 1174 spin_unlock_bh(&sch->dev->queue_lock);
@@ -1147,14 +1180,13 @@ rtattr_failure:
1147} 1180}
1148 1181
1149static int 1182static int
1150htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, 1183htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1151 struct gnet_dump *d)
1152{ 1184{
1153 struct htb_class *cl = (struct htb_class*)arg; 1185 struct htb_class *cl = (struct htb_class *)arg;
1154 1186
1155#ifdef HTB_RATECM 1187#ifdef HTB_RATECM
1156 cl->rate_est.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE); 1188 cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE);
1157 cl->rate_est.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE); 1189 cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE);
1158#endif 1190#endif
1159 1191
1160 if (!cl->level && cl->un.leaf.q) 1192 if (!cl->level && cl->un.leaf.q)
@@ -1171,21 +1203,22 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1171} 1203}
1172 1204
1173static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1205static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1174 struct Qdisc **old) 1206 struct Qdisc **old)
1175{ 1207{
1176 struct htb_class *cl = (struct htb_class*)arg; 1208 struct htb_class *cl = (struct htb_class *)arg;
1177 1209
1178 if (cl && !cl->level) { 1210 if (cl && !cl->level) {
1179 if (new == NULL && (new = qdisc_create_dflt(sch->dev, 1211 if (new == NULL && (new = qdisc_create_dflt(sch->dev,
1180 &pfifo_qdisc_ops)) == NULL) 1212 &pfifo_qdisc_ops))
1181 return -ENOBUFS; 1213 == NULL)
1214 return -ENOBUFS;
1182 sch_tree_lock(sch); 1215 sch_tree_lock(sch);
1183 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) { 1216 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1184 if (cl->prio_activity) 1217 if (cl->prio_activity)
1185 htb_deactivate (qdisc_priv(sch),cl); 1218 htb_deactivate(qdisc_priv(sch), cl);
1186 1219
1187 /* TODO: is it correct ? Why CBQ doesn't do it ? */ 1220 /* TODO: is it correct ? Why CBQ doesn't do it ? */
1188 sch->q.qlen -= (*old)->q.qlen; 1221 sch->q.qlen -= (*old)->q.qlen;
1189 qdisc_reset(*old); 1222 qdisc_reset(*old);
1190 } 1223 }
1191 sch_tree_unlock(sch); 1224 sch_tree_unlock(sch);
@@ -1194,16 +1227,16 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1194 return -ENOENT; 1227 return -ENOENT;
1195} 1228}
1196 1229
1197static struct Qdisc * htb_leaf(struct Qdisc *sch, unsigned long arg) 1230static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1198{ 1231{
1199 struct htb_class *cl = (struct htb_class*)arg; 1232 struct htb_class *cl = (struct htb_class *)arg;
1200 return (cl && !cl->level) ? cl->un.leaf.q : NULL; 1233 return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1201} 1234}
1202 1235
1203static unsigned long htb_get(struct Qdisc *sch, u32 classid) 1236static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1204{ 1237{
1205 struct htb_class *cl = htb_find(classid,sch); 1238 struct htb_class *cl = htb_find(classid, sch);
1206 if (cl) 1239 if (cl)
1207 cl->refcnt++; 1240 cl->refcnt++;
1208 return (unsigned long)cl; 1241 return (unsigned long)cl;
1209} 1242}
@@ -1218,7 +1251,7 @@ static void htb_destroy_filters(struct tcf_proto **fl)
1218 } 1251 }
1219} 1252}
1220 1253
1221static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl) 1254static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1222{ 1255{
1223 struct htb_sched *q = qdisc_priv(sch); 1256 struct htb_sched *q = qdisc_priv(sch);
1224 if (!cl->level) { 1257 if (!cl->level) {
@@ -1228,44 +1261,44 @@ static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
1228 } 1261 }
1229 qdisc_put_rtab(cl->rate); 1262 qdisc_put_rtab(cl->rate);
1230 qdisc_put_rtab(cl->ceil); 1263 qdisc_put_rtab(cl->ceil);
1231 1264
1232 htb_destroy_filters (&cl->filter_list); 1265 htb_destroy_filters(&cl->filter_list);
1233 1266
1234 while (!list_empty(&cl->children)) 1267 while (!list_empty(&cl->children))
1235 htb_destroy_class (sch,list_entry(cl->children.next, 1268 htb_destroy_class(sch, list_entry(cl->children.next,
1236 struct htb_class,sibling)); 1269 struct htb_class, sibling));
1237 1270
1238 /* note: this delete may happen twice (see htb_delete) */ 1271 /* note: this delete may happen twice (see htb_delete) */
1239 list_del(&cl->hlist); 1272 list_del(&cl->hlist);
1240 list_del(&cl->sibling); 1273 list_del(&cl->sibling);
1241 1274
1242 if (cl->prio_activity) 1275 if (cl->prio_activity)
1243 htb_deactivate (q,cl); 1276 htb_deactivate(q, cl);
1244 1277
1245 if (cl->cmode != HTB_CAN_SEND) 1278 if (cl->cmode != HTB_CAN_SEND)
1246 rb_erase(&cl->pq_node,q->wait_pq+cl->level); 1279 rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1247 1280
1248 kfree(cl); 1281 kfree(cl);
1249} 1282}
1250 1283
1251/* always caled under BH & queue lock */ 1284/* always caled under BH & queue lock */
1252static void htb_destroy(struct Qdisc* sch) 1285static void htb_destroy(struct Qdisc *sch)
1253{ 1286{
1254 struct htb_sched *q = qdisc_priv(sch); 1287 struct htb_sched *q = qdisc_priv(sch);
1255 1288
1256 del_timer_sync (&q->timer); 1289 del_timer_sync(&q->timer);
1257#ifdef HTB_RATECM 1290#ifdef HTB_RATECM
1258 del_timer_sync (&q->rttim); 1291 del_timer_sync(&q->rttim);
1259#endif 1292#endif
1260 /* This line used to be after htb_destroy_class call below 1293 /* This line used to be after htb_destroy_class call below
1261 and surprisingly it worked in 2.4. But it must precede it 1294 and surprisingly it worked in 2.4. But it must precede it
1262 because filter need its target class alive to be able to call 1295 because filter need its target class alive to be able to call
1263 unbind_filter on it (without Oops). */ 1296 unbind_filter on it (without Oops). */
1264 htb_destroy_filters(&q->filter_list); 1297 htb_destroy_filters(&q->filter_list);
1265 1298
1266 while (!list_empty(&q->root)) 1299 while (!list_empty(&q->root))
1267 htb_destroy_class (sch,list_entry(q->root.next, 1300 htb_destroy_class(sch, list_entry(q->root.next,
1268 struct htb_class,sibling)); 1301 struct htb_class, sibling));
1269 1302
1270 __skb_queue_purge(&q->direct_queue); 1303 __skb_queue_purge(&q->direct_queue);
1271} 1304}
@@ -1273,23 +1306,23 @@ static void htb_destroy(struct Qdisc* sch)
1273static int htb_delete(struct Qdisc *sch, unsigned long arg) 1306static int htb_delete(struct Qdisc *sch, unsigned long arg)
1274{ 1307{
1275 struct htb_sched *q = qdisc_priv(sch); 1308 struct htb_sched *q = qdisc_priv(sch);
1276 struct htb_class *cl = (struct htb_class*)arg; 1309 struct htb_class *cl = (struct htb_class *)arg;
1277 1310
1278 // TODO: why don't allow to delete subtree ? references ? does 1311 // TODO: why don't allow to delete subtree ? references ? does
1279 // tc subsys quarantee us that in htb_destroy it holds no class 1312 // tc subsys quarantee us that in htb_destroy it holds no class
1280 // refs so that we can remove children safely there ? 1313 // refs so that we can remove children safely there ?
1281 if (!list_empty(&cl->children) || cl->filter_cnt) 1314 if (!list_empty(&cl->children) || cl->filter_cnt)
1282 return -EBUSY; 1315 return -EBUSY;
1283 1316
1284 sch_tree_lock(sch); 1317 sch_tree_lock(sch);
1285 1318
1286 /* delete from hash and active; remainder in destroy_class */ 1319 /* delete from hash and active; remainder in destroy_class */
1287 list_del_init(&cl->hlist); 1320 list_del_init(&cl->hlist);
1288 if (cl->prio_activity) 1321 if (cl->prio_activity)
1289 htb_deactivate (q,cl); 1322 htb_deactivate(q, cl);
1290 1323
1291 if (--cl->refcnt == 0) 1324 if (--cl->refcnt == 0)
1292 htb_destroy_class(sch,cl); 1325 htb_destroy_class(sch, cl);
1293 1326
1294 sch_tree_unlock(sch); 1327 sch_tree_unlock(sch);
1295 return 0; 1328 return 0;
@@ -1297,41 +1330,44 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
1297 1330
1298static void htb_put(struct Qdisc *sch, unsigned long arg) 1331static void htb_put(struct Qdisc *sch, unsigned long arg)
1299{ 1332{
1300 struct htb_class *cl = (struct htb_class*)arg; 1333 struct htb_class *cl = (struct htb_class *)arg;
1301 1334
1302 if (--cl->refcnt == 0) 1335 if (--cl->refcnt == 0)
1303 htb_destroy_class(sch,cl); 1336 htb_destroy_class(sch, cl);
1304} 1337}
1305 1338
1306static int htb_change_class(struct Qdisc *sch, u32 classid, 1339static int htb_change_class(struct Qdisc *sch, u32 classid,
1307 u32 parentid, struct rtattr **tca, unsigned long *arg) 1340 u32 parentid, struct rtattr **tca,
1341 unsigned long *arg)
1308{ 1342{
1309 int err = -EINVAL; 1343 int err = -EINVAL;
1310 struct htb_sched *q = qdisc_priv(sch); 1344 struct htb_sched *q = qdisc_priv(sch);
1311 struct htb_class *cl = (struct htb_class*)*arg,*parent; 1345 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1312 struct rtattr *opt = tca[TCA_OPTIONS-1]; 1346 struct rtattr *opt = tca[TCA_OPTIONS - 1];
1313 struct qdisc_rate_table *rtab = NULL, *ctab = NULL; 1347 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1314 struct rtattr *tb[TCA_HTB_RTAB]; 1348 struct rtattr *tb[TCA_HTB_RTAB];
1315 struct tc_htb_opt *hopt; 1349 struct tc_htb_opt *hopt;
1316 1350
1317 /* extract all subattrs from opt attr */ 1351 /* extract all subattrs from opt attr */
1318 if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) || 1352 if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
1319 tb[TCA_HTB_PARMS-1] == NULL || 1353 tb[TCA_HTB_PARMS - 1] == NULL ||
1320 RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt)) 1354 RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
1321 goto failure; 1355 goto failure;
1322
1323 parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch);
1324 1356
1325 hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]); 1357 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1358
1359 hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
1326 1360
1327 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]); 1361 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
1328 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]); 1362 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
1329 if (!rtab || !ctab) goto failure; 1363 if (!rtab || !ctab)
1364 goto failure;
1330 1365
1331 if (!cl) { /* new class */ 1366 if (!cl) { /* new class */
1332 struct Qdisc *new_q; 1367 struct Qdisc *new_q;
1333 /* check for valid classid */ 1368 /* check for valid classid */
1334 if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch)) 1369 if (!classid || TC_H_MAJ(classid ^ sch->handle)
1370 || htb_find(classid, sch))
1335 goto failure; 1371 goto failure;
1336 1372
1337 /* check maximal depth */ 1373 /* check maximal depth */
@@ -1342,7 +1378,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1342 err = -ENOBUFS; 1378 err = -ENOBUFS;
1343 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) 1379 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1344 goto failure; 1380 goto failure;
1345 1381
1346 cl->refcnt = 1; 1382 cl->refcnt = 1;
1347 INIT_LIST_HEAD(&cl->sibling); 1383 INIT_LIST_HEAD(&cl->sibling);
1348 INIT_LIST_HEAD(&cl->hlist); 1384 INIT_LIST_HEAD(&cl->hlist);
@@ -1357,46 +1393,53 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1357 if (parent && !parent->level) { 1393 if (parent && !parent->level) {
1358 /* turn parent into inner node */ 1394 /* turn parent into inner node */
1359 sch->q.qlen -= parent->un.leaf.q->q.qlen; 1395 sch->q.qlen -= parent->un.leaf.q->q.qlen;
1360 qdisc_destroy (parent->un.leaf.q); 1396 qdisc_destroy(parent->un.leaf.q);
1361 if (parent->prio_activity) 1397 if (parent->prio_activity)
1362 htb_deactivate (q,parent); 1398 htb_deactivate(q, parent);
1363 1399
1364 /* remove from evt list because of level change */ 1400 /* remove from evt list because of level change */
1365 if (parent->cmode != HTB_CAN_SEND) { 1401 if (parent->cmode != HTB_CAN_SEND) {
1366 rb_erase(&parent->pq_node,q->wait_pq); 1402 rb_erase(&parent->pq_node, q->wait_pq);
1367 parent->cmode = HTB_CAN_SEND; 1403 parent->cmode = HTB_CAN_SEND;
1368 } 1404 }
1369 parent->level = (parent->parent ? parent->parent->level 1405 parent->level = (parent->parent ? parent->parent->level
1370 : TC_HTB_MAXDEPTH) - 1; 1406 : TC_HTB_MAXDEPTH) - 1;
1371 memset (&parent->un.inner,0,sizeof(parent->un.inner)); 1407 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1372 } 1408 }
1373 /* leaf (we) needs elementary qdisc */ 1409 /* leaf (we) needs elementary qdisc */
1374 cl->un.leaf.q = new_q ? new_q : &noop_qdisc; 1410 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1375 1411
1376 cl->classid = classid; cl->parent = parent; 1412 cl->classid = classid;
1413 cl->parent = parent;
1377 1414
1378 /* set class to be in HTB_CAN_SEND state */ 1415 /* set class to be in HTB_CAN_SEND state */
1379 cl->tokens = hopt->buffer; 1416 cl->tokens = hopt->buffer;
1380 cl->ctokens = hopt->cbuffer; 1417 cl->ctokens = hopt->cbuffer;
1381 cl->mbuffer = PSCHED_JIFFIE2US(HZ*60); /* 1min */ 1418 cl->mbuffer = PSCHED_JIFFIE2US(HZ * 60); /* 1min */
1382 PSCHED_GET_TIME(cl->t_c); 1419 PSCHED_GET_TIME(cl->t_c);
1383 cl->cmode = HTB_CAN_SEND; 1420 cl->cmode = HTB_CAN_SEND;
1384 1421
1385 /* attach to the hash list and parent's family */ 1422 /* attach to the hash list and parent's family */
1386 list_add_tail(&cl->hlist, q->hash+htb_hash(classid)); 1423 list_add_tail(&cl->hlist, q->hash + htb_hash(classid));
1387 list_add_tail(&cl->sibling, parent ? &parent->children : &q->root); 1424 list_add_tail(&cl->sibling,
1388 } else sch_tree_lock(sch); 1425 parent ? &parent->children : &q->root);
1426 } else
1427 sch_tree_lock(sch);
1389 1428
1390 /* it used to be a nasty bug here, we have to check that node 1429 /* it used to be a nasty bug here, we have to check that node
1391 is really leaf before changing cl->un.leaf ! */ 1430 is really leaf before changing cl->un.leaf ! */
1392 if (!cl->level) { 1431 if (!cl->level) {
1393 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum; 1432 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1394 if (!hopt->quantum && cl->un.leaf.quantum < 1000) { 1433 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1395 printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid); 1434 printk(KERN_WARNING
1435 "HTB: quantum of class %X is small. Consider r2q change.\n",
1436 cl->classid);
1396 cl->un.leaf.quantum = 1000; 1437 cl->un.leaf.quantum = 1000;
1397 } 1438 }
1398 if (!hopt->quantum && cl->un.leaf.quantum > 200000) { 1439 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1399 printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid); 1440 printk(KERN_WARNING
1441 "HTB: quantum of class %X is big. Consider r2q change.\n",
1442 cl->classid);
1400 cl->un.leaf.quantum = 200000; 1443 cl->un.leaf.quantum = 200000;
1401 } 1444 }
1402 if (hopt->quantum) 1445 if (hopt->quantum)
@@ -1407,16 +1450,22 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1407 1450
1408 cl->buffer = hopt->buffer; 1451 cl->buffer = hopt->buffer;
1409 cl->cbuffer = hopt->cbuffer; 1452 cl->cbuffer = hopt->cbuffer;
1410 if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab; 1453 if (cl->rate)
1411 if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab; 1454 qdisc_put_rtab(cl->rate);
1455 cl->rate = rtab;
1456 if (cl->ceil)
1457 qdisc_put_rtab(cl->ceil);
1458 cl->ceil = ctab;
1412 sch_tree_unlock(sch); 1459 sch_tree_unlock(sch);
1413 1460
1414 *arg = (unsigned long)cl; 1461 *arg = (unsigned long)cl;
1415 return 0; 1462 return 0;
1416 1463
1417failure: 1464failure:
1418 if (rtab) qdisc_put_rtab(rtab); 1465 if (rtab)
1419 if (ctab) qdisc_put_rtab(ctab); 1466 qdisc_put_rtab(rtab);
1467 if (ctab)
1468 qdisc_put_rtab(ctab);
1420 return err; 1469 return err;
1421} 1470}
1422 1471
@@ -1430,23 +1479,23 @@ static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1430} 1479}
1431 1480
1432static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, 1481static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1433 u32 classid) 1482 u32 classid)
1434{ 1483{
1435 struct htb_sched *q = qdisc_priv(sch); 1484 struct htb_sched *q = qdisc_priv(sch);
1436 struct htb_class *cl = htb_find (classid,sch); 1485 struct htb_class *cl = htb_find(classid, sch);
1437 1486
1438 /*if (cl && !cl->level) return 0; 1487 /*if (cl && !cl->level) return 0;
1439 The line above used to be there to prevent attaching filters to 1488 The line above used to be there to prevent attaching filters to
1440 leaves. But at least tc_index filter uses this just to get class 1489 leaves. But at least tc_index filter uses this just to get class
1441 for other reasons so that we have to allow for it. 1490 for other reasons so that we have to allow for it.
1442 ---- 1491 ----
1443 19.6.2002 As Werner explained it is ok - bind filter is just 1492 19.6.2002 As Werner explained it is ok - bind filter is just
1444 another way to "lock" the class - unlike "get" this lock can 1493 another way to "lock" the class - unlike "get" this lock can
1445 be broken by class during destroy IIUC. 1494 be broken by class during destroy IIUC.
1446 */ 1495 */
1447 if (cl) 1496 if (cl)
1448 cl->filter_cnt++; 1497 cl->filter_cnt++;
1449 else 1498 else
1450 q->filter_cnt++; 1499 q->filter_cnt++;
1451 return (unsigned long)cl; 1500 return (unsigned long)cl;
1452} 1501}
@@ -1456,9 +1505,9 @@ static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1456 struct htb_sched *q = qdisc_priv(sch); 1505 struct htb_sched *q = qdisc_priv(sch);
1457 struct htb_class *cl = (struct htb_class *)arg; 1506 struct htb_class *cl = (struct htb_class *)arg;
1458 1507
1459 if (cl) 1508 if (cl)
1460 cl->filter_cnt--; 1509 cl->filter_cnt--;
1461 else 1510 else
1462 q->filter_cnt--; 1511 q->filter_cnt--;
1463} 1512}
1464 1513
@@ -1472,8 +1521,9 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1472 1521
1473 for (i = 0; i < HTB_HSIZE; i++) { 1522 for (i = 0; i < HTB_HSIZE; i++) {
1474 struct list_head *p; 1523 struct list_head *p;
1475 list_for_each (p,q->hash+i) { 1524 list_for_each(p, q->hash + i) {
1476 struct htb_class *cl = list_entry(p,struct htb_class,hlist); 1525 struct htb_class *cl =
1526 list_entry(p, struct htb_class, hlist);
1477 if (arg->count < arg->skip) { 1527 if (arg->count < arg->skip) {
1478 arg->count++; 1528 arg->count++;
1479 continue; 1529 continue;
@@ -1521,12 +1571,13 @@ static struct Qdisc_ops htb_qdisc_ops = {
1521 1571
1522static int __init htb_module_init(void) 1572static int __init htb_module_init(void)
1523{ 1573{
1524 return register_qdisc(&htb_qdisc_ops); 1574 return register_qdisc(&htb_qdisc_ops);
1525} 1575}
1526static void __exit htb_module_exit(void) 1576static void __exit htb_module_exit(void)
1527{ 1577{
1528 unregister_qdisc(&htb_qdisc_ops); 1578 unregister_qdisc(&htb_qdisc_ops);
1529} 1579}
1580
1530module_init(htb_module_init) 1581module_init(htb_module_init)
1531module_exit(htb_module_exit) 1582module_exit(htb_module_exit)
1532MODULE_LICENSE("GPL"); 1583MODULE_LICENSE("GPL");