diff options
-rw-r--r-- | net/sched/sch_htb.c | 49 |
1 files changed, 27 insertions, 22 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 6c6cac65255f..a686b9511b05 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -104,7 +104,7 @@ struct htb_class { | |||
104 | /* topology */ | 104 | /* topology */ |
105 | int level; /* our level (see above) */ | 105 | int level; /* our level (see above) */ |
106 | struct htb_class *parent; /* parent class */ | 106 | struct htb_class *parent; /* parent class */ |
107 | struct list_head hlist; /* classid hash list item */ | 107 | struct hlist_node hlist; /* classid hash list item */ |
108 | struct list_head sibling; /* sibling list item */ | 108 | struct list_head sibling; /* sibling list item */ |
109 | struct list_head children; /* children list */ | 109 | struct list_head children; /* children list */ |
110 | 110 | ||
@@ -163,8 +163,8 @@ static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate, | |||
163 | 163 | ||
164 | struct htb_sched { | 164 | struct htb_sched { |
165 | struct list_head root; /* root classes list */ | 165 | struct list_head root; /* root classes list */ |
166 | struct list_head hash[HTB_HSIZE]; /* hashed by classid */ | 166 | struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */ |
167 | struct list_head drops[TC_HTB_NUMPRIO]; /* active leaves (for drops) */ | 167 | struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */ |
168 | 168 | ||
169 | /* self list - roots of self generating tree */ | 169 | /* self list - roots of self generating tree */ |
170 | struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; | 170 | struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; |
@@ -220,12 +220,13 @@ static inline int htb_hash(u32 h) | |||
220 | static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) | 220 | static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) |
221 | { | 221 | { |
222 | struct htb_sched *q = qdisc_priv(sch); | 222 | struct htb_sched *q = qdisc_priv(sch); |
223 | struct list_head *p; | 223 | struct hlist_node *p; |
224 | struct htb_class *cl; | ||
225 | |||
224 | if (TC_H_MAJ(handle) != sch->handle) | 226 | if (TC_H_MAJ(handle) != sch->handle) |
225 | return NULL; | 227 | return NULL; |
226 | 228 | ||
227 | list_for_each(p, q->hash + htb_hash(handle)) { | 229 | hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) { |
228 | struct htb_class *cl = list_entry(p, struct htb_class, hlist); | ||
229 | if (cl->classid == handle) | 230 | if (cl->classid == handle) |
230 | return cl; | 231 | return cl; |
231 | } | 232 | } |
@@ -675,7 +676,9 @@ static void htb_rate_timer(unsigned long arg) | |||
675 | { | 676 | { |
676 | struct Qdisc *sch = (struct Qdisc *)arg; | 677 | struct Qdisc *sch = (struct Qdisc *)arg; |
677 | struct htb_sched *q = qdisc_priv(sch); | 678 | struct htb_sched *q = qdisc_priv(sch); |
678 | struct list_head *p; | 679 | struct hlist_node *p; |
680 | struct htb_class *cl; | ||
681 | |||
679 | 682 | ||
680 | /* lock queue so that we can muck with it */ | 683 | /* lock queue so that we can muck with it */ |
681 | spin_lock_bh(&sch->dev->queue_lock); | 684 | spin_lock_bh(&sch->dev->queue_lock); |
@@ -686,9 +689,8 @@ static void htb_rate_timer(unsigned long arg) | |||
686 | /* scan and recompute one bucket at time */ | 689 | /* scan and recompute one bucket at time */ |
687 | if (++q->recmp_bucket >= HTB_HSIZE) | 690 | if (++q->recmp_bucket >= HTB_HSIZE) |
688 | q->recmp_bucket = 0; | 691 | q->recmp_bucket = 0; |
689 | list_for_each(p, q->hash + q->recmp_bucket) { | ||
690 | struct htb_class *cl = list_entry(p, struct htb_class, hlist); | ||
691 | 692 | ||
693 | hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) { | ||
692 | RT_GEN(cl->sum_bytes, cl->rate_bytes); | 694 | RT_GEN(cl->sum_bytes, cl->rate_bytes); |
693 | RT_GEN(cl->sum_packets, cl->rate_packets); | 695 | RT_GEN(cl->sum_packets, cl->rate_packets); |
694 | } | 696 | } |
@@ -1041,10 +1043,10 @@ static void htb_reset(struct Qdisc *sch) | |||
1041 | int i; | 1043 | int i; |
1042 | 1044 | ||
1043 | for (i = 0; i < HTB_HSIZE; i++) { | 1045 | for (i = 0; i < HTB_HSIZE; i++) { |
1044 | struct list_head *p; | 1046 | struct hlist_node *p; |
1045 | list_for_each(p, q->hash + i) { | 1047 | struct htb_class *cl; |
1046 | struct htb_class *cl = | 1048 | |
1047 | list_entry(p, struct htb_class, hlist); | 1049 | hlist_for_each_entry(cl, p, q->hash + i, hlist) { |
1048 | if (cl->level) | 1050 | if (cl->level) |
1049 | memset(&cl->un.inner, 0, sizeof(cl->un.inner)); | 1051 | memset(&cl->un.inner, 0, sizeof(cl->un.inner)); |
1050 | else { | 1052 | else { |
@@ -1091,7 +1093,7 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt) | |||
1091 | 1093 | ||
1092 | INIT_LIST_HEAD(&q->root); | 1094 | INIT_LIST_HEAD(&q->root); |
1093 | for (i = 0; i < HTB_HSIZE; i++) | 1095 | for (i = 0; i < HTB_HSIZE; i++) |
1094 | INIT_LIST_HEAD(q->hash + i); | 1096 | INIT_HLIST_HEAD(q->hash + i); |
1095 | for (i = 0; i < TC_HTB_NUMPRIO; i++) | 1097 | for (i = 0; i < TC_HTB_NUMPRIO; i++) |
1096 | INIT_LIST_HEAD(q->drops + i); | 1098 | INIT_LIST_HEAD(q->drops + i); |
1097 | 1099 | ||
@@ -1269,7 +1271,8 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | |||
1269 | struct htb_class, sibling)); | 1271 | struct htb_class, sibling)); |
1270 | 1272 | ||
1271 | /* note: this delete may happen twice (see htb_delete) */ | 1273 | /* note: this delete may happen twice (see htb_delete) */ |
1272 | list_del(&cl->hlist); | 1274 | if (!hlist_unhashed(&cl->hlist)) |
1275 | hlist_del(&cl->hlist); | ||
1273 | list_del(&cl->sibling); | 1276 | list_del(&cl->sibling); |
1274 | 1277 | ||
1275 | if (cl->prio_activity) | 1278 | if (cl->prio_activity) |
@@ -1317,7 +1320,9 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) | |||
1317 | sch_tree_lock(sch); | 1320 | sch_tree_lock(sch); |
1318 | 1321 | ||
1319 | /* delete from hash and active; remainder in destroy_class */ | 1322 | /* delete from hash and active; remainder in destroy_class */ |
1320 | list_del_init(&cl->hlist); | 1323 | if (!hlist_unhashed(&cl->hlist)) |
1324 | hlist_del(&cl->hlist); | ||
1325 | |||
1321 | if (cl->prio_activity) | 1326 | if (cl->prio_activity) |
1322 | htb_deactivate(q, cl); | 1327 | htb_deactivate(q, cl); |
1323 | 1328 | ||
@@ -1381,7 +1386,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1381 | 1386 | ||
1382 | cl->refcnt = 1; | 1387 | cl->refcnt = 1; |
1383 | INIT_LIST_HEAD(&cl->sibling); | 1388 | INIT_LIST_HEAD(&cl->sibling); |
1384 | INIT_LIST_HEAD(&cl->hlist); | 1389 | INIT_HLIST_NODE(&cl->hlist); |
1385 | INIT_LIST_HEAD(&cl->children); | 1390 | INIT_LIST_HEAD(&cl->children); |
1386 | INIT_LIST_HEAD(&cl->un.leaf.drop_list); | 1391 | INIT_LIST_HEAD(&cl->un.leaf.drop_list); |
1387 | 1392 | ||
@@ -1420,7 +1425,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1420 | cl->cmode = HTB_CAN_SEND; | 1425 | cl->cmode = HTB_CAN_SEND; |
1421 | 1426 | ||
1422 | /* attach to the hash list and parent's family */ | 1427 | /* attach to the hash list and parent's family */ |
1423 | list_add_tail(&cl->hlist, q->hash + htb_hash(classid)); | 1428 | hlist_add_head(&cl->hlist, q->hash + htb_hash(classid)); |
1424 | list_add_tail(&cl->sibling, | 1429 | list_add_tail(&cl->sibling, |
1425 | parent ? &parent->children : &q->root); | 1430 | parent ? &parent->children : &q->root); |
1426 | } else | 1431 | } else |
@@ -1520,10 +1525,10 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
1520 | return; | 1525 | return; |
1521 | 1526 | ||
1522 | for (i = 0; i < HTB_HSIZE; i++) { | 1527 | for (i = 0; i < HTB_HSIZE; i++) { |
1523 | struct list_head *p; | 1528 | struct hlist_node *p; |
1524 | list_for_each(p, q->hash + i) { | 1529 | struct htb_class *cl; |
1525 | struct htb_class *cl = | 1530 | |
1526 | list_entry(p, struct htb_class, hlist); | 1531 | hlist_for_each_entry(cl, p, q->hash + i, hlist) { |
1527 | if (arg->count < arg->skip) { | 1532 | if (arg->count < arg->skip) { |
1528 | arg->count++; | 1533 | arg->count++; |
1529 | continue; | 1534 | continue; |