aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2013-02-27 20:06:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 22:10:24 -0500
commitb67bfe0d42cac56c512dd5da4b1b347a23f4b70a (patch)
tree3d465aea12b97683f26ffa38eba8744469de9997 /net/sched
parent1e142b29e210b5dfb2deeb6ce2210b60af16d2a6 (diff)
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_cbq.c18
-rw-r--r--net/sched/sch_drr.c10
-rw-r--r--net/sched/sch_hfsc.c15
-rw-r--r--net/sched/sch_htb.c12
-rw-r--r--net/sched/sch_qfq.c16
6 files changed, 30 insertions, 45 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a181b484812a..c297e2a8e2a1 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -545,7 +545,7 @@ static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
545void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) 545void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
546{ 546{
547 struct Qdisc_class_common *cl; 547 struct Qdisc_class_common *cl;
548 struct hlist_node *n, *next; 548 struct hlist_node *next;
549 struct hlist_head *nhash, *ohash; 549 struct hlist_head *nhash, *ohash;
550 unsigned int nsize, nmask, osize; 550 unsigned int nsize, nmask, osize;
551 unsigned int i, h; 551 unsigned int i, h;
@@ -564,7 +564,7 @@ void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
564 564
565 sch_tree_lock(sch); 565 sch_tree_lock(sch);
566 for (i = 0; i < osize; i++) { 566 for (i = 0; i < osize; i++) {
567 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) { 567 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
568 h = qdisc_class_hash(cl->classid, nmask); 568 h = qdisc_class_hash(cl->classid, nmask);
569 hlist_add_head(&cl->hnode, &nhash[h]); 569 hlist_add_head(&cl->hnode, &nhash[h]);
570 } 570 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 0e19948470b8..13aa47aa2ffb 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1041,14 +1041,13 @@ static void cbq_adjust_levels(struct cbq_class *this)
1041static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) 1041static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1042{ 1042{
1043 struct cbq_class *cl; 1043 struct cbq_class *cl;
1044 struct hlist_node *n;
1045 unsigned int h; 1044 unsigned int h;
1046 1045
1047 if (q->quanta[prio] == 0) 1046 if (q->quanta[prio] == 0)
1048 return; 1047 return;
1049 1048
1050 for (h = 0; h < q->clhash.hashsize; h++) { 1049 for (h = 0; h < q->clhash.hashsize; h++) {
1051 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 1050 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1052 /* BUGGGG... Beware! This expression suffer of 1051 /* BUGGGG... Beware! This expression suffer of
1053 * arithmetic overflows! 1052 * arithmetic overflows!
1054 */ 1053 */
@@ -1087,10 +1086,9 @@ static void cbq_sync_defmap(struct cbq_class *cl)
1087 continue; 1086 continue;
1088 1087
1089 for (h = 0; h < q->clhash.hashsize; h++) { 1088 for (h = 0; h < q->clhash.hashsize; h++) {
1090 struct hlist_node *n;
1091 struct cbq_class *c; 1089 struct cbq_class *c;
1092 1090
1093 hlist_for_each_entry(c, n, &q->clhash.hash[h], 1091 hlist_for_each_entry(c, &q->clhash.hash[h],
1094 common.hnode) { 1092 common.hnode) {
1095 if (c->split == split && c->level < level && 1093 if (c->split == split && c->level < level &&
1096 c->defmap & (1<<i)) { 1094 c->defmap & (1<<i)) {
@@ -1210,7 +1208,6 @@ cbq_reset(struct Qdisc *sch)
1210{ 1208{
1211 struct cbq_sched_data *q = qdisc_priv(sch); 1209 struct cbq_sched_data *q = qdisc_priv(sch);
1212 struct cbq_class *cl; 1210 struct cbq_class *cl;
1213 struct hlist_node *n;
1214 int prio; 1211 int prio;
1215 unsigned int h; 1212 unsigned int h;
1216 1213
@@ -1228,7 +1225,7 @@ cbq_reset(struct Qdisc *sch)
1228 q->active[prio] = NULL; 1225 q->active[prio] = NULL;
1229 1226
1230 for (h = 0; h < q->clhash.hashsize; h++) { 1227 for (h = 0; h < q->clhash.hashsize; h++) {
1231 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 1228 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1232 qdisc_reset(cl->q); 1229 qdisc_reset(cl->q);
1233 1230
1234 cl->next_alive = NULL; 1231 cl->next_alive = NULL;
@@ -1697,7 +1694,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1697static void cbq_destroy(struct Qdisc *sch) 1694static void cbq_destroy(struct Qdisc *sch)
1698{ 1695{
1699 struct cbq_sched_data *q = qdisc_priv(sch); 1696 struct cbq_sched_data *q = qdisc_priv(sch);
1700 struct hlist_node *n, *next; 1697 struct hlist_node *next;
1701 struct cbq_class *cl; 1698 struct cbq_class *cl;
1702 unsigned int h; 1699 unsigned int h;
1703 1700
@@ -1710,11 +1707,11 @@ static void cbq_destroy(struct Qdisc *sch)
1710 * be bound to classes which have been destroyed already. --TGR '04 1707 * be bound to classes which have been destroyed already. --TGR '04
1711 */ 1708 */
1712 for (h = 0; h < q->clhash.hashsize; h++) { 1709 for (h = 0; h < q->clhash.hashsize; h++) {
1713 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) 1710 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
1714 tcf_destroy_chain(&cl->filter_list); 1711 tcf_destroy_chain(&cl->filter_list);
1715 } 1712 }
1716 for (h = 0; h < q->clhash.hashsize; h++) { 1713 for (h = 0; h < q->clhash.hashsize; h++) {
1717 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h], 1714 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1718 common.hnode) 1715 common.hnode)
1719 cbq_destroy_class(sch, cl); 1716 cbq_destroy_class(sch, cl);
1720 } 1717 }
@@ -2013,14 +2010,13 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2013{ 2010{
2014 struct cbq_sched_data *q = qdisc_priv(sch); 2011 struct cbq_sched_data *q = qdisc_priv(sch);
2015 struct cbq_class *cl; 2012 struct cbq_class *cl;
2016 struct hlist_node *n;
2017 unsigned int h; 2013 unsigned int h;
2018 2014
2019 if (arg->stop) 2015 if (arg->stop)
2020 return; 2016 return;
2021 2017
2022 for (h = 0; h < q->clhash.hashsize; h++) { 2018 for (h = 0; h < q->clhash.hashsize; h++) {
2023 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 2019 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
2024 if (arg->count < arg->skip) { 2020 if (arg->count < arg->skip) {
2025 arg->count++; 2021 arg->count++;
2026 continue; 2022 continue;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 71e50c80315f..759b308d1a8d 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -293,14 +293,13 @@ static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
293{ 293{
294 struct drr_sched *q = qdisc_priv(sch); 294 struct drr_sched *q = qdisc_priv(sch);
295 struct drr_class *cl; 295 struct drr_class *cl;
296 struct hlist_node *n;
297 unsigned int i; 296 unsigned int i;
298 297
299 if (arg->stop) 298 if (arg->stop)
300 return; 299 return;
301 300
302 for (i = 0; i < q->clhash.hashsize; i++) { 301 for (i = 0; i < q->clhash.hashsize; i++) {
303 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 302 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
304 if (arg->count < arg->skip) { 303 if (arg->count < arg->skip) {
305 arg->count++; 304 arg->count++;
306 continue; 305 continue;
@@ -451,11 +450,10 @@ static void drr_reset_qdisc(struct Qdisc *sch)
451{ 450{
452 struct drr_sched *q = qdisc_priv(sch); 451 struct drr_sched *q = qdisc_priv(sch);
453 struct drr_class *cl; 452 struct drr_class *cl;
454 struct hlist_node *n;
455 unsigned int i; 453 unsigned int i;
456 454
457 for (i = 0; i < q->clhash.hashsize; i++) { 455 for (i = 0; i < q->clhash.hashsize; i++) {
458 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 456 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
459 if (cl->qdisc->q.qlen) 457 if (cl->qdisc->q.qlen)
460 list_del(&cl->alist); 458 list_del(&cl->alist);
461 qdisc_reset(cl->qdisc); 459 qdisc_reset(cl->qdisc);
@@ -468,13 +466,13 @@ static void drr_destroy_qdisc(struct Qdisc *sch)
468{ 466{
469 struct drr_sched *q = qdisc_priv(sch); 467 struct drr_sched *q = qdisc_priv(sch);
470 struct drr_class *cl; 468 struct drr_class *cl;
471 struct hlist_node *n, *next; 469 struct hlist_node *next;
472 unsigned int i; 470 unsigned int i;
473 471
474 tcf_destroy_chain(&q->filter_list); 472 tcf_destroy_chain(&q->filter_list);
475 473
476 for (i = 0; i < q->clhash.hashsize; i++) { 474 for (i = 0; i < q->clhash.hashsize; i++) {
477 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 475 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
478 common.hnode) 476 common.hnode)
479 drr_destroy_class(sch, cl); 477 drr_destroy_class(sch, cl);
480 } 478 }
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6c2ec4510540..9facea03faeb 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1389,7 +1389,6 @@ static void
1389hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1389hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1390{ 1390{
1391 struct hfsc_sched *q = qdisc_priv(sch); 1391 struct hfsc_sched *q = qdisc_priv(sch);
1392 struct hlist_node *n;
1393 struct hfsc_class *cl; 1392 struct hfsc_class *cl;
1394 unsigned int i; 1393 unsigned int i;
1395 1394
@@ -1397,7 +1396,7 @@ hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1397 return; 1396 return;
1398 1397
1399 for (i = 0; i < q->clhash.hashsize; i++) { 1398 for (i = 0; i < q->clhash.hashsize; i++) {
1400 hlist_for_each_entry(cl, n, &q->clhash.hash[i], 1399 hlist_for_each_entry(cl, &q->clhash.hash[i],
1401 cl_common.hnode) { 1400 cl_common.hnode) {
1402 if (arg->count < arg->skip) { 1401 if (arg->count < arg->skip) {
1403 arg->count++; 1402 arg->count++;
@@ -1523,11 +1522,10 @@ hfsc_reset_qdisc(struct Qdisc *sch)
1523{ 1522{
1524 struct hfsc_sched *q = qdisc_priv(sch); 1523 struct hfsc_sched *q = qdisc_priv(sch);
1525 struct hfsc_class *cl; 1524 struct hfsc_class *cl;
1526 struct hlist_node *n;
1527 unsigned int i; 1525 unsigned int i;
1528 1526
1529 for (i = 0; i < q->clhash.hashsize; i++) { 1527 for (i = 0; i < q->clhash.hashsize; i++) {
1530 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1528 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1531 hfsc_reset_class(cl); 1529 hfsc_reset_class(cl);
1532 } 1530 }
1533 q->eligible = RB_ROOT; 1531 q->eligible = RB_ROOT;
@@ -1540,16 +1538,16 @@ static void
1540hfsc_destroy_qdisc(struct Qdisc *sch) 1538hfsc_destroy_qdisc(struct Qdisc *sch)
1541{ 1539{
1542 struct hfsc_sched *q = qdisc_priv(sch); 1540 struct hfsc_sched *q = qdisc_priv(sch);
1543 struct hlist_node *n, *next; 1541 struct hlist_node *next;
1544 struct hfsc_class *cl; 1542 struct hfsc_class *cl;
1545 unsigned int i; 1543 unsigned int i;
1546 1544
1547 for (i = 0; i < q->clhash.hashsize; i++) { 1545 for (i = 0; i < q->clhash.hashsize; i++) {
1548 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1546 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1549 tcf_destroy_chain(&cl->filter_list); 1547 tcf_destroy_chain(&cl->filter_list);
1550 } 1548 }
1551 for (i = 0; i < q->clhash.hashsize; i++) { 1549 for (i = 0; i < q->clhash.hashsize; i++) {
1552 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 1550 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1553 cl_common.hnode) 1551 cl_common.hnode)
1554 hfsc_destroy_class(sch, cl); 1552 hfsc_destroy_class(sch, cl);
1555 } 1553 }
@@ -1564,12 +1562,11 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1564 unsigned char *b = skb_tail_pointer(skb); 1562 unsigned char *b = skb_tail_pointer(skb);
1565 struct tc_hfsc_qopt qopt; 1563 struct tc_hfsc_qopt qopt;
1566 struct hfsc_class *cl; 1564 struct hfsc_class *cl;
1567 struct hlist_node *n;
1568 unsigned int i; 1565 unsigned int i;
1569 1566
1570 sch->qstats.backlog = 0; 1567 sch->qstats.backlog = 0;
1571 for (i = 0; i < q->clhash.hashsize; i++) { 1568 for (i = 0; i < q->clhash.hashsize; i++) {
1572 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1569 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1573 sch->qstats.backlog += cl->qdisc->qstats.backlog; 1570 sch->qstats.backlog += cl->qdisc->qstats.backlog;
1574 } 1571 }
1575 1572
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 03c2692ca01e..571f1d211f4d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -949,11 +949,10 @@ static void htb_reset(struct Qdisc *sch)
949{ 949{
950 struct htb_sched *q = qdisc_priv(sch); 950 struct htb_sched *q = qdisc_priv(sch);
951 struct htb_class *cl; 951 struct htb_class *cl;
952 struct hlist_node *n;
953 unsigned int i; 952 unsigned int i;
954 953
955 for (i = 0; i < q->clhash.hashsize; i++) { 954 for (i = 0; i < q->clhash.hashsize; i++) {
956 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 955 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
957 if (cl->level) 956 if (cl->level)
958 memset(&cl->un.inner, 0, sizeof(cl->un.inner)); 957 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
959 else { 958 else {
@@ -1218,7 +1217,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1218static void htb_destroy(struct Qdisc *sch) 1217static void htb_destroy(struct Qdisc *sch)
1219{ 1218{
1220 struct htb_sched *q = qdisc_priv(sch); 1219 struct htb_sched *q = qdisc_priv(sch);
1221 struct hlist_node *n, *next; 1220 struct hlist_node *next;
1222 struct htb_class *cl; 1221 struct htb_class *cl;
1223 unsigned int i; 1222 unsigned int i;
1224 1223
@@ -1232,11 +1231,11 @@ static void htb_destroy(struct Qdisc *sch)
1232 tcf_destroy_chain(&q->filter_list); 1231 tcf_destroy_chain(&q->filter_list);
1233 1232
1234 for (i = 0; i < q->clhash.hashsize; i++) { 1233 for (i = 0; i < q->clhash.hashsize; i++) {
1235 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) 1234 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
1236 tcf_destroy_chain(&cl->filter_list); 1235 tcf_destroy_chain(&cl->filter_list);
1237 } 1236 }
1238 for (i = 0; i < q->clhash.hashsize; i++) { 1237 for (i = 0; i < q->clhash.hashsize; i++) {
1239 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 1238 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1240 common.hnode) 1239 common.hnode)
1241 htb_destroy_class(sch, cl); 1240 htb_destroy_class(sch, cl);
1242 } 1241 }
@@ -1516,14 +1515,13 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1516{ 1515{
1517 struct htb_sched *q = qdisc_priv(sch); 1516 struct htb_sched *q = qdisc_priv(sch);
1518 struct htb_class *cl; 1517 struct htb_class *cl;
1519 struct hlist_node *n;
1520 unsigned int i; 1518 unsigned int i;
1521 1519
1522 if (arg->stop) 1520 if (arg->stop)
1523 return; 1521 return;
1524 1522
1525 for (i = 0; i < q->clhash.hashsize; i++) { 1523 for (i = 0; i < q->clhash.hashsize; i++) {
1526 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 1524 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1527 if (arg->count < arg->skip) { 1525 if (arg->count < arg->skip) {
1528 arg->count++; 1526 arg->count++;
1529 continue; 1527 continue;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 6ed37652a4c3..e9a77f621c3d 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -276,9 +276,8 @@ static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
276 u32 lmax, u32 weight) 276 u32 lmax, u32 weight)
277{ 277{
278 struct qfq_aggregate *agg; 278 struct qfq_aggregate *agg;
279 struct hlist_node *n;
280 279
281 hlist_for_each_entry(agg, n, &q->nonfull_aggs, nonfull_next) 280 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
282 if (agg->lmax == lmax && agg->class_weight == weight) 281 if (agg->lmax == lmax && agg->class_weight == weight)
283 return agg; 282 return agg;
284 283
@@ -670,14 +669,13 @@ static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
670{ 669{
671 struct qfq_sched *q = qdisc_priv(sch); 670 struct qfq_sched *q = qdisc_priv(sch);
672 struct qfq_class *cl; 671 struct qfq_class *cl;
673 struct hlist_node *n;
674 unsigned int i; 672 unsigned int i;
675 673
676 if (arg->stop) 674 if (arg->stop)
677 return; 675 return;
678 676
679 for (i = 0; i < q->clhash.hashsize; i++) { 677 for (i = 0; i < q->clhash.hashsize; i++) {
680 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 678 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
681 if (arg->count < arg->skip) { 679 if (arg->count < arg->skip) {
682 arg->count++; 680 arg->count++;
683 continue; 681 continue;
@@ -1376,11 +1374,10 @@ static unsigned int qfq_drop_from_slot(struct qfq_sched *q,
1376 struct hlist_head *slot) 1374 struct hlist_head *slot)
1377{ 1375{
1378 struct qfq_aggregate *agg; 1376 struct qfq_aggregate *agg;
1379 struct hlist_node *n;
1380 struct qfq_class *cl; 1377 struct qfq_class *cl;
1381 unsigned int len; 1378 unsigned int len;
1382 1379
1383 hlist_for_each_entry(agg, n, slot, next) { 1380 hlist_for_each_entry(agg, slot, next) {
1384 list_for_each_entry(cl, &agg->active, alist) { 1381 list_for_each_entry(cl, &agg->active, alist) {
1385 1382
1386 if (!cl->qdisc->ops->drop) 1383 if (!cl->qdisc->ops->drop)
@@ -1459,11 +1456,10 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
1459{ 1456{
1460 struct qfq_sched *q = qdisc_priv(sch); 1457 struct qfq_sched *q = qdisc_priv(sch);
1461 struct qfq_class *cl; 1458 struct qfq_class *cl;
1462 struct hlist_node *n;
1463 unsigned int i; 1459 unsigned int i;
1464 1460
1465 for (i = 0; i < q->clhash.hashsize; i++) { 1461 for (i = 0; i < q->clhash.hashsize; i++) {
1466 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 1462 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1467 if (cl->qdisc->q.qlen > 0) 1463 if (cl->qdisc->q.qlen > 0)
1468 qfq_deactivate_class(q, cl); 1464 qfq_deactivate_class(q, cl);
1469 1465
@@ -1477,13 +1473,13 @@ static void qfq_destroy_qdisc(struct Qdisc *sch)
1477{ 1473{
1478 struct qfq_sched *q = qdisc_priv(sch); 1474 struct qfq_sched *q = qdisc_priv(sch);
1479 struct qfq_class *cl; 1475 struct qfq_class *cl;
1480 struct hlist_node *n, *next; 1476 struct hlist_node *next;
1481 unsigned int i; 1477 unsigned int i;
1482 1478
1483 tcf_destroy_chain(&q->filter_list); 1479 tcf_destroy_chain(&q->filter_list);
1484 1480
1485 for (i = 0; i < q->clhash.hashsize; i++) { 1481 for (i = 0; i < q->clhash.hashsize; i++) {
1486 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 1482 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1487 common.hnode) { 1483 common.hnode) {
1488 qfq_destroy_class(sch, cl); 1484 qfq_destroy_class(sch, cl);
1489 } 1485 }