diff options
author | Sasha Levin <sasha.levin@oracle.com> | 2013-02-27 20:06:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-27 22:10:24 -0500 |
commit | b67bfe0d42cac56c512dd5da4b1b347a23f4b70a (patch) | |
tree | 3d465aea12b97683f26ffa38eba8744469de9997 /lib | |
parent | 1e142b29e210b5dfb2deeb6ce2210b60af16d2a6 (diff) |
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/debugobjects.c | 21 | ||||
-rw-r--r-- | lib/lru_cache.c | 3 |
2 files changed, 11 insertions, 13 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index d11808ca4bc4..37061ede8b81 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -109,11 +109,10 @@ static void fill_pool(void) | |||
109 | */ | 109 | */ |
110 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | 110 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) |
111 | { | 111 | { |
112 | struct hlist_node *node; | ||
113 | struct debug_obj *obj; | 112 | struct debug_obj *obj; |
114 | int cnt = 0; | 113 | int cnt = 0; |
115 | 114 | ||
116 | hlist_for_each_entry(obj, node, &b->list, node) { | 115 | hlist_for_each_entry(obj, &b->list, node) { |
117 | cnt++; | 116 | cnt++; |
118 | if (obj->object == addr) | 117 | if (obj->object == addr) |
119 | return obj; | 118 | return obj; |
@@ -213,7 +212,7 @@ static void free_object(struct debug_obj *obj) | |||
213 | static void debug_objects_oom(void) | 212 | static void debug_objects_oom(void) |
214 | { | 213 | { |
215 | struct debug_bucket *db = obj_hash; | 214 | struct debug_bucket *db = obj_hash; |
216 | struct hlist_node *node, *tmp; | 215 | struct hlist_node *tmp; |
217 | HLIST_HEAD(freelist); | 216 | HLIST_HEAD(freelist); |
218 | struct debug_obj *obj; | 217 | struct debug_obj *obj; |
219 | unsigned long flags; | 218 | unsigned long flags; |
@@ -227,7 +226,7 @@ static void debug_objects_oom(void) | |||
227 | raw_spin_unlock_irqrestore(&db->lock, flags); | 226 | raw_spin_unlock_irqrestore(&db->lock, flags); |
228 | 227 | ||
229 | /* Now free them */ | 228 | /* Now free them */ |
230 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | 229 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
231 | hlist_del(&obj->node); | 230 | hlist_del(&obj->node); |
232 | free_object(obj); | 231 | free_object(obj); |
233 | } | 232 | } |
@@ -658,7 +657,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr, | |||
658 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | 657 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
659 | { | 658 | { |
660 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | 659 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
661 | struct hlist_node *node, *tmp; | 660 | struct hlist_node *tmp; |
662 | HLIST_HEAD(freelist); | 661 | HLIST_HEAD(freelist); |
663 | struct debug_obj_descr *descr; | 662 | struct debug_obj_descr *descr; |
664 | enum debug_obj_state state; | 663 | enum debug_obj_state state; |
@@ -678,7 +677,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) | |||
678 | repeat: | 677 | repeat: |
679 | cnt = 0; | 678 | cnt = 0; |
680 | raw_spin_lock_irqsave(&db->lock, flags); | 679 | raw_spin_lock_irqsave(&db->lock, flags); |
681 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | 680 | hlist_for_each_entry_safe(obj, tmp, &db->list, node) { |
682 | cnt++; | 681 | cnt++; |
683 | oaddr = (unsigned long) obj->object; | 682 | oaddr = (unsigned long) obj->object; |
684 | if (oaddr < saddr || oaddr >= eaddr) | 683 | if (oaddr < saddr || oaddr >= eaddr) |
@@ -702,7 +701,7 @@ repeat: | |||
702 | raw_spin_unlock_irqrestore(&db->lock, flags); | 701 | raw_spin_unlock_irqrestore(&db->lock, flags); |
703 | 702 | ||
704 | /* Now free them */ | 703 | /* Now free them */ |
705 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | 704 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
706 | hlist_del(&obj->node); | 705 | hlist_del(&obj->node); |
707 | free_object(obj); | 706 | free_object(obj); |
708 | } | 707 | } |
@@ -1013,7 +1012,7 @@ void __init debug_objects_early_init(void) | |||
1013 | static int __init debug_objects_replace_static_objects(void) | 1012 | static int __init debug_objects_replace_static_objects(void) |
1014 | { | 1013 | { |
1015 | struct debug_bucket *db = obj_hash; | 1014 | struct debug_bucket *db = obj_hash; |
1016 | struct hlist_node *node, *tmp; | 1015 | struct hlist_node *tmp; |
1017 | struct debug_obj *obj, *new; | 1016 | struct debug_obj *obj, *new; |
1018 | HLIST_HEAD(objects); | 1017 | HLIST_HEAD(objects); |
1019 | int i, cnt = 0; | 1018 | int i, cnt = 0; |
@@ -1033,7 +1032,7 @@ static int __init debug_objects_replace_static_objects(void) | |||
1033 | local_irq_disable(); | 1032 | local_irq_disable(); |
1034 | 1033 | ||
1035 | /* Remove the statically allocated objects from the pool */ | 1034 | /* Remove the statically allocated objects from the pool */ |
1036 | hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) | 1035 | hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) |
1037 | hlist_del(&obj->node); | 1036 | hlist_del(&obj->node); |
1038 | /* Move the allocated objects to the pool */ | 1037 | /* Move the allocated objects to the pool */ |
1039 | hlist_move_list(&objects, &obj_pool); | 1038 | hlist_move_list(&objects, &obj_pool); |
@@ -1042,7 +1041,7 @@ static int __init debug_objects_replace_static_objects(void) | |||
1042 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | 1041 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
1043 | hlist_move_list(&db->list, &objects); | 1042 | hlist_move_list(&db->list, &objects); |
1044 | 1043 | ||
1045 | hlist_for_each_entry(obj, node, &objects, node) { | 1044 | hlist_for_each_entry(obj, &objects, node) { |
1046 | new = hlist_entry(obj_pool.first, typeof(*obj), node); | 1045 | new = hlist_entry(obj_pool.first, typeof(*obj), node); |
1047 | hlist_del(&new->node); | 1046 | hlist_del(&new->node); |
1048 | /* copy object data */ | 1047 | /* copy object data */ |
@@ -1057,7 +1056,7 @@ static int __init debug_objects_replace_static_objects(void) | |||
1057 | obj_pool_used); | 1056 | obj_pool_used); |
1058 | return 0; | 1057 | return 0; |
1059 | free: | 1058 | free: |
1060 | hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { | 1059 | hlist_for_each_entry_safe(obj, tmp, &objects, node) { |
1061 | hlist_del(&obj->node); | 1060 | hlist_del(&obj->node); |
1062 | kmem_cache_free(obj_cache, obj); | 1061 | kmem_cache_free(obj_cache, obj); |
1063 | } | 1062 | } |
diff --git a/lib/lru_cache.c b/lib/lru_cache.c index d71d89498943..8335d39d2ccd 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c | |||
@@ -262,12 +262,11 @@ static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) | |||
262 | static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, | 262 | static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, |
263 | bool include_changing) | 263 | bool include_changing) |
264 | { | 264 | { |
265 | struct hlist_node *n; | ||
266 | struct lc_element *e; | 265 | struct lc_element *e; |
267 | 266 | ||
268 | BUG_ON(!lc); | 267 | BUG_ON(!lc); |
269 | BUG_ON(!lc->nr_elements); | 268 | BUG_ON(!lc->nr_elements); |
270 | hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) { | 269 | hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) { |
271 | /* "about to be changed" elements, pending transaction commit, | 270 | /* "about to be changed" elements, pending transaction commit, |
272 | * are hashed by their "new number". "Normal" elements have | 271 | * are hashed by their "new number". "Normal" elements have |
273 | * lc_number == lc_new_number. */ | 272 | * lc_number == lc_new_number. */ |