aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_cbq.c
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2013-02-27 20:06:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 22:10:24 -0500
commitb67bfe0d42cac56c512dd5da4b1b347a23f4b70a (patch)
tree3d465aea12b97683f26ffa38eba8744469de9997 /net/sched/sch_cbq.c
parent1e142b29e210b5dfb2deeb6ce2210b60af16d2a6 (diff)
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/sched/sch_cbq.c')
-rw-r--r--net/sched/sch_cbq.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 0e19948470b8..13aa47aa2ffb 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1041,14 +1041,13 @@ static void cbq_adjust_levels(struct cbq_class *this)
1041static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) 1041static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1042{ 1042{
1043 struct cbq_class *cl; 1043 struct cbq_class *cl;
1044 struct hlist_node *n;
1045 unsigned int h; 1044 unsigned int h;
1046 1045
1047 if (q->quanta[prio] == 0) 1046 if (q->quanta[prio] == 0)
1048 return; 1047 return;
1049 1048
1050 for (h = 0; h < q->clhash.hashsize; h++) { 1049 for (h = 0; h < q->clhash.hashsize; h++) {
1051 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 1050 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1052 /* BUGGGG... Beware! This expression suffer of 1051 /* BUGGGG... Beware! This expression suffer of
1053 * arithmetic overflows! 1052 * arithmetic overflows!
1054 */ 1053 */
@@ -1087,10 +1086,9 @@ static void cbq_sync_defmap(struct cbq_class *cl)
1087 continue; 1086 continue;
1088 1087
1089 for (h = 0; h < q->clhash.hashsize; h++) { 1088 for (h = 0; h < q->clhash.hashsize; h++) {
1090 struct hlist_node *n;
1091 struct cbq_class *c; 1089 struct cbq_class *c;
1092 1090
1093 hlist_for_each_entry(c, n, &q->clhash.hash[h], 1091 hlist_for_each_entry(c, &q->clhash.hash[h],
1094 common.hnode) { 1092 common.hnode) {
1095 if (c->split == split && c->level < level && 1093 if (c->split == split && c->level < level &&
1096 c->defmap & (1<<i)) { 1094 c->defmap & (1<<i)) {
@@ -1210,7 +1208,6 @@ cbq_reset(struct Qdisc *sch)
1210{ 1208{
1211 struct cbq_sched_data *q = qdisc_priv(sch); 1209 struct cbq_sched_data *q = qdisc_priv(sch);
1212 struct cbq_class *cl; 1210 struct cbq_class *cl;
1213 struct hlist_node *n;
1214 int prio; 1211 int prio;
1215 unsigned int h; 1212 unsigned int h;
1216 1213
@@ -1228,7 +1225,7 @@ cbq_reset(struct Qdisc *sch)
1228 q->active[prio] = NULL; 1225 q->active[prio] = NULL;
1229 1226
1230 for (h = 0; h < q->clhash.hashsize; h++) { 1227 for (h = 0; h < q->clhash.hashsize; h++) {
1231 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 1228 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1232 qdisc_reset(cl->q); 1229 qdisc_reset(cl->q);
1233 1230
1234 cl->next_alive = NULL; 1231 cl->next_alive = NULL;
@@ -1697,7 +1694,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1697static void cbq_destroy(struct Qdisc *sch) 1694static void cbq_destroy(struct Qdisc *sch)
1698{ 1695{
1699 struct cbq_sched_data *q = qdisc_priv(sch); 1696 struct cbq_sched_data *q = qdisc_priv(sch);
1700 struct hlist_node *n, *next; 1697 struct hlist_node *next;
1701 struct cbq_class *cl; 1698 struct cbq_class *cl;
1702 unsigned int h; 1699 unsigned int h;
1703 1700
@@ -1710,11 +1707,11 @@ static void cbq_destroy(struct Qdisc *sch)
1710 * be bound to classes which have been destroyed already. --TGR '04 1707 * be bound to classes which have been destroyed already. --TGR '04
1711 */ 1708 */
1712 for (h = 0; h < q->clhash.hashsize; h++) { 1709 for (h = 0; h < q->clhash.hashsize; h++) {
1713 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) 1710 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
1714 tcf_destroy_chain(&cl->filter_list); 1711 tcf_destroy_chain(&cl->filter_list);
1715 } 1712 }
1716 for (h = 0; h < q->clhash.hashsize; h++) { 1713 for (h = 0; h < q->clhash.hashsize; h++) {
1717 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h], 1714 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1718 common.hnode) 1715 common.hnode)
1719 cbq_destroy_class(sch, cl); 1716 cbq_destroy_class(sch, cl);
1720 } 1717 }
@@ -2013,14 +2010,13 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2013{ 2010{
2014 struct cbq_sched_data *q = qdisc_priv(sch); 2011 struct cbq_sched_data *q = qdisc_priv(sch);
2015 struct cbq_class *cl; 2012 struct cbq_class *cl;
2016 struct hlist_node *n;
2017 unsigned int h; 2013 unsigned int h;
2018 2014
2019 if (arg->stop) 2015 if (arg->stop)
2020 return; 2016 return;
2021 2017
2022 for (h = 0; h < q->clhash.hashsize; h++) { 2018 for (h = 0; h < q->clhash.hashsize; h++) {
2023 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 2019 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
2024 if (arg->count < arg->skip) { 2020 if (arg->count < arg->skip) {
2025 arg->count++; 2021 arg->count++;
2026 continue; 2022 continue;