aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2013-02-27 20:06:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 22:10:24 -0500
commitb67bfe0d42cac56c512dd5da4b1b347a23f4b70a (patch)
tree3d465aea12b97683f26ffa38eba8744469de9997 /block
parent1e142b29e210b5dfb2deeb6ce2210b60af16d2a6 (diff)
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--block/blk-ioc.c3
-rw-r--r--block/bsg.c3
-rw-r--r--block/cfq-iosched.c3
-rw-r--r--block/elevator.c4
5 files changed, 7 insertions, 12 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b8858fb0cafa..8bdebb6781e1 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -357,7 +357,6 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
357{ 357{
358 struct blkcg *blkcg = cgroup_to_blkcg(cgroup); 358 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
359 struct blkcg_gq *blkg; 359 struct blkcg_gq *blkg;
360 struct hlist_node *n;
361 int i; 360 int i;
362 361
363 mutex_lock(&blkcg_pol_mutex); 362 mutex_lock(&blkcg_pol_mutex);
@@ -368,7 +367,7 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
368 * stat updates. This is a debug feature which shouldn't exist 367 * stat updates. This is a debug feature which shouldn't exist
369 * anyway. If you get hit by a race, retry. 368 * anyway. If you get hit by a race, retry.
370 */ 369 */
371 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 370 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
372 for (i = 0; i < BLKCG_MAX_POLS; i++) { 371 for (i = 0; i < BLKCG_MAX_POLS; i++) {
373 struct blkcg_policy *pol = blkcg_policy[i]; 372 struct blkcg_policy *pol = blkcg_policy[i];
374 373
@@ -415,11 +414,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
415 bool show_total) 414 bool show_total)
416{ 415{
417 struct blkcg_gq *blkg; 416 struct blkcg_gq *blkg;
418 struct hlist_node *n;
419 u64 total = 0; 417 u64 total = 0;
420 418
421 spin_lock_irq(&blkcg->lock); 419 spin_lock_irq(&blkcg->lock);
422 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) 420 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node)
423 if (blkcg_policy_enabled(blkg->q, pol)) 421 if (blkcg_policy_enabled(blkg->q, pol))
424 total += prfill(sf, blkg->pd[pol->plid], data); 422 total += prfill(sf, blkg->pd[pol->plid], data);
425 spin_unlock_irq(&blkcg->lock); 423 spin_unlock_irq(&blkcg->lock);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fab4cdd3f7bb..9c4bb8266bc8 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -164,7 +164,6 @@ EXPORT_SYMBOL(put_io_context);
164 */ 164 */
165void put_io_context_active(struct io_context *ioc) 165void put_io_context_active(struct io_context *ioc)
166{ 166{
167 struct hlist_node *n;
168 unsigned long flags; 167 unsigned long flags;
169 struct io_cq *icq; 168 struct io_cq *icq;
170 169
@@ -180,7 +179,7 @@ void put_io_context_active(struct io_context *ioc)
180 */ 179 */
181retry: 180retry:
182 spin_lock_irqsave_nested(&ioc->lock, flags, 1); 181 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
183 hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) { 182 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
184 if (icq->flags & ICQ_EXITED) 183 if (icq->flags & ICQ_EXITED)
185 continue; 184 continue;
186 if (spin_trylock(icq->q->queue_lock)) { 185 if (spin_trylock(icq->q->queue_lock)) {
diff --git a/block/bsg.c b/block/bsg.c
index 3ca92ebf6bbb..420a5a9f1b23 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -800,11 +800,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
800static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) 800static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
801{ 801{
802 struct bsg_device *bd; 802 struct bsg_device *bd;
803 struct hlist_node *entry;
804 803
805 mutex_lock(&bsg_mutex); 804 mutex_lock(&bsg_mutex);
806 805
807 hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { 806 hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
808 if (bd->queue == q) { 807 if (bd->queue == q) {
809 atomic_inc(&bd->ref_count); 808 atomic_inc(&bd->ref_count);
810 goto found; 809 goto found;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e62e9205b80a..ec52807cdd09 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1435,7 +1435,6 @@ static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1435{ 1435{
1436 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1436 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1437 struct blkcg_gq *blkg; 1437 struct blkcg_gq *blkg;
1438 struct hlist_node *n;
1439 1438
1440 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) 1439 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
1441 return -EINVAL; 1440 return -EINVAL;
@@ -1443,7 +1442,7 @@ static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1443 spin_lock_irq(&blkcg->lock); 1442 spin_lock_irq(&blkcg->lock);
1444 blkcg->cfq_weight = (unsigned int)val; 1443 blkcg->cfq_weight = (unsigned int)val;
1445 1444
1446 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 1445 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1447 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1446 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1448 1447
1449 if (cfqg && !cfqg->dev_weight) 1448 if (cfqg && !cfqg->dev_weight)
diff --git a/block/elevator.c b/block/elevator.c
index 603b2c178740..d0acb31cc083 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -288,10 +288,10 @@ static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
288{ 288{
289 struct elevator_queue *e = q->elevator; 289 struct elevator_queue *e = q->elevator;
290 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; 290 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
291 struct hlist_node *entry, *next; 291 struct hlist_node *next;
292 struct request *rq; 292 struct request *rq;
293 293
294 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { 294 hlist_for_each_entry_safe(rq, next, hash_list, hash) {
295 BUG_ON(!ELV_ON_HASH(rq)); 295 BUG_ON(!ELV_ON_HASH(rq));
296 296
297 if (unlikely(!rq_mergeable(rq))) { 297 if (unlikely(!rq_mergeable(rq))) {