diff options
author | Sasha Levin <sasha.levin@oracle.com> | 2013-02-27 20:06:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-27 22:10:24 -0500 |
commit | b67bfe0d42cac56c512dd5da4b1b347a23f4b70a (patch) | |
tree | 3d465aea12b97683f26ffa38eba8744469de9997 /net/mac80211/mesh_pathtbl.c | |
parent | 1e142b29e210b5dfb2deeb6ce2210b60af16d2a6 (diff) |
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r-- | net/mac80211/mesh_pathtbl.c | 45 |
1 files changed, 17 insertions, 28 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 6b3c4e119c63..dc7c8df40c2c 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -72,9 +72,9 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void) | |||
72 | * it's used twice. So it is illegal to do | 72 | * it's used twice. So it is illegal to do |
73 | * for_each_mesh_entry(rcu_dereference(...), ...) | 73 | * for_each_mesh_entry(rcu_dereference(...), ...) |
74 | */ | 74 | */ |
75 | #define for_each_mesh_entry(tbl, p, node, i) \ | 75 | #define for_each_mesh_entry(tbl, node, i) \ |
76 | for (i = 0; i <= tbl->hash_mask; i++) \ | 76 | for (i = 0; i <= tbl->hash_mask; i++) \ |
77 | hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) | 77 | hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list) |
78 | 78 | ||
79 | 79 | ||
80 | static struct mesh_table *mesh_table_alloc(int size_order) | 80 | static struct mesh_table *mesh_table_alloc(int size_order) |
@@ -139,7 +139,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | |||
139 | } | 139 | } |
140 | if (free_leafs) { | 140 | if (free_leafs) { |
141 | spin_lock_bh(&tbl->gates_lock); | 141 | spin_lock_bh(&tbl->gates_lock); |
142 | hlist_for_each_entry_safe(gate, p, q, | 142 | hlist_for_each_entry_safe(gate, q, |
143 | tbl->known_gates, list) { | 143 | tbl->known_gates, list) { |
144 | hlist_del(&gate->list); | 144 | hlist_del(&gate->list); |
145 | kfree(gate); | 145 | kfree(gate); |
@@ -333,12 +333,11 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, | |||
333 | struct ieee80211_sub_if_data *sdata) | 333 | struct ieee80211_sub_if_data *sdata) |
334 | { | 334 | { |
335 | struct mesh_path *mpath; | 335 | struct mesh_path *mpath; |
336 | struct hlist_node *n; | ||
337 | struct hlist_head *bucket; | 336 | struct hlist_head *bucket; |
338 | struct mpath_node *node; | 337 | struct mpath_node *node; |
339 | 338 | ||
340 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | 339 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
341 | hlist_for_each_entry_rcu(node, n, bucket, list) { | 340 | hlist_for_each_entry_rcu(node, bucket, list) { |
342 | mpath = node->mpath; | 341 | mpath = node->mpath; |
343 | if (mpath->sdata == sdata && | 342 | if (mpath->sdata == sdata && |
344 | ether_addr_equal(dst, mpath->dst)) { | 343 | ether_addr_equal(dst, mpath->dst)) { |
@@ -389,11 +388,10 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) | |||
389 | { | 388 | { |
390 | struct mesh_table *tbl = rcu_dereference(mesh_paths); | 389 | struct mesh_table *tbl = rcu_dereference(mesh_paths); |
391 | struct mpath_node *node; | 390 | struct mpath_node *node; |
392 | struct hlist_node *p; | ||
393 | int i; | 391 | int i; |
394 | int j = 0; | 392 | int j = 0; |
395 | 393 | ||
396 | for_each_mesh_entry(tbl, p, node, i) { | 394 | for_each_mesh_entry(tbl, node, i) { |
397 | if (sdata && node->mpath->sdata != sdata) | 395 | if (sdata && node->mpath->sdata != sdata) |
398 | continue; | 396 | continue; |
399 | if (j++ == idx) { | 397 | if (j++ == idx) { |
@@ -417,13 +415,12 @@ int mesh_path_add_gate(struct mesh_path *mpath) | |||
417 | { | 415 | { |
418 | struct mesh_table *tbl; | 416 | struct mesh_table *tbl; |
419 | struct mpath_node *gate, *new_gate; | 417 | struct mpath_node *gate, *new_gate; |
420 | struct hlist_node *n; | ||
421 | int err; | 418 | int err; |
422 | 419 | ||
423 | rcu_read_lock(); | 420 | rcu_read_lock(); |
424 | tbl = rcu_dereference(mesh_paths); | 421 | tbl = rcu_dereference(mesh_paths); |
425 | 422 | ||
426 | hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) | 423 | hlist_for_each_entry_rcu(gate, tbl->known_gates, list) |
427 | if (gate->mpath == mpath) { | 424 | if (gate->mpath == mpath) { |
428 | err = -EEXIST; | 425 | err = -EEXIST; |
429 | goto err_rcu; | 426 | goto err_rcu; |
@@ -460,9 +457,9 @@ err_rcu: | |||
460 | static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) | 457 | static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) |
461 | { | 458 | { |
462 | struct mpath_node *gate; | 459 | struct mpath_node *gate; |
463 | struct hlist_node *p, *q; | 460 | struct hlist_node *q; |
464 | 461 | ||
465 | hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) { | 462 | hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) { |
466 | if (gate->mpath != mpath) | 463 | if (gate->mpath != mpath) |
467 | continue; | 464 | continue; |
468 | spin_lock_bh(&tbl->gates_lock); | 465 | spin_lock_bh(&tbl->gates_lock); |
@@ -504,7 +501,6 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst) | |||
504 | struct mesh_path *mpath, *new_mpath; | 501 | struct mesh_path *mpath, *new_mpath; |
505 | struct mpath_node *node, *new_node; | 502 | struct mpath_node *node, *new_node; |
506 | struct hlist_head *bucket; | 503 | struct hlist_head *bucket; |
507 | struct hlist_node *n; | ||
508 | int grow = 0; | 504 | int grow = 0; |
509 | int err = 0; | 505 | int err = 0; |
510 | u32 hash_idx; | 506 | u32 hash_idx; |
@@ -550,7 +546,7 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst) | |||
550 | spin_lock(&tbl->hashwlock[hash_idx]); | 546 | spin_lock(&tbl->hashwlock[hash_idx]); |
551 | 547 | ||
552 | err = -EEXIST; | 548 | err = -EEXIST; |
553 | hlist_for_each_entry(node, n, bucket, list) { | 549 | hlist_for_each_entry(node, bucket, list) { |
554 | mpath = node->mpath; | 550 | mpath = node->mpath; |
555 | if (mpath->sdata == sdata && | 551 | if (mpath->sdata == sdata && |
556 | ether_addr_equal(dst, mpath->dst)) | 552 | ether_addr_equal(dst, mpath->dst)) |
@@ -640,7 +636,6 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
640 | struct mesh_path *mpath, *new_mpath; | 636 | struct mesh_path *mpath, *new_mpath; |
641 | struct mpath_node *node, *new_node; | 637 | struct mpath_node *node, *new_node; |
642 | struct hlist_head *bucket; | 638 | struct hlist_head *bucket; |
643 | struct hlist_node *n; | ||
644 | int grow = 0; | 639 | int grow = 0; |
645 | int err = 0; | 640 | int err = 0; |
646 | u32 hash_idx; | 641 | u32 hash_idx; |
@@ -680,7 +675,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
680 | spin_lock(&tbl->hashwlock[hash_idx]); | 675 | spin_lock(&tbl->hashwlock[hash_idx]); |
681 | 676 | ||
682 | err = -EEXIST; | 677 | err = -EEXIST; |
683 | hlist_for_each_entry(node, n, bucket, list) { | 678 | hlist_for_each_entry(node, bucket, list) { |
684 | mpath = node->mpath; | 679 | mpath = node->mpath; |
685 | if (mpath->sdata == sdata && | 680 | if (mpath->sdata == sdata && |
686 | ether_addr_equal(dst, mpath->dst)) | 681 | ether_addr_equal(dst, mpath->dst)) |
@@ -725,14 +720,13 @@ void mesh_plink_broken(struct sta_info *sta) | |||
725 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 720 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
726 | struct mesh_path *mpath; | 721 | struct mesh_path *mpath; |
727 | struct mpath_node *node; | 722 | struct mpath_node *node; |
728 | struct hlist_node *p; | ||
729 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 723 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
730 | int i; | 724 | int i; |
731 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); | 725 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); |
732 | 726 | ||
733 | rcu_read_lock(); | 727 | rcu_read_lock(); |
734 | tbl = rcu_dereference(mesh_paths); | 728 | tbl = rcu_dereference(mesh_paths); |
735 | for_each_mesh_entry(tbl, p, node, i) { | 729 | for_each_mesh_entry(tbl, node, i) { |
736 | mpath = node->mpath; | 730 | mpath = node->mpath; |
737 | if (rcu_dereference(mpath->next_hop) == sta && | 731 | if (rcu_dereference(mpath->next_hop) == sta && |
738 | mpath->flags & MESH_PATH_ACTIVE && | 732 | mpath->flags & MESH_PATH_ACTIVE && |
@@ -792,13 +786,12 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
792 | struct mesh_table *tbl; | 786 | struct mesh_table *tbl; |
793 | struct mesh_path *mpath; | 787 | struct mesh_path *mpath; |
794 | struct mpath_node *node; | 788 | struct mpath_node *node; |
795 | struct hlist_node *p; | ||
796 | int i; | 789 | int i; |
797 | 790 | ||
798 | rcu_read_lock(); | 791 | rcu_read_lock(); |
799 | read_lock_bh(&pathtbl_resize_lock); | 792 | read_lock_bh(&pathtbl_resize_lock); |
800 | tbl = resize_dereference_mesh_paths(); | 793 | tbl = resize_dereference_mesh_paths(); |
801 | for_each_mesh_entry(tbl, p, node, i) { | 794 | for_each_mesh_entry(tbl, node, i) { |
802 | mpath = node->mpath; | 795 | mpath = node->mpath; |
803 | if (rcu_dereference(mpath->next_hop) == sta) { | 796 | if (rcu_dereference(mpath->next_hop) == sta) { |
804 | spin_lock(&tbl->hashwlock[i]); | 797 | spin_lock(&tbl->hashwlock[i]); |
@@ -815,11 +808,10 @@ static void table_flush_by_iface(struct mesh_table *tbl, | |||
815 | { | 808 | { |
816 | struct mesh_path *mpath; | 809 | struct mesh_path *mpath; |
817 | struct mpath_node *node; | 810 | struct mpath_node *node; |
818 | struct hlist_node *p; | ||
819 | int i; | 811 | int i; |
820 | 812 | ||
821 | WARN_ON(!rcu_read_lock_held()); | 813 | WARN_ON(!rcu_read_lock_held()); |
822 | for_each_mesh_entry(tbl, p, node, i) { | 814 | for_each_mesh_entry(tbl, node, i) { |
823 | mpath = node->mpath; | 815 | mpath = node->mpath; |
824 | if (mpath->sdata != sdata) | 816 | if (mpath->sdata != sdata) |
825 | continue; | 817 | continue; |
@@ -865,7 +857,6 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) | |||
865 | struct mesh_path *mpath; | 857 | struct mesh_path *mpath; |
866 | struct mpath_node *node; | 858 | struct mpath_node *node; |
867 | struct hlist_head *bucket; | 859 | struct hlist_head *bucket; |
868 | struct hlist_node *n; | ||
869 | int hash_idx; | 860 | int hash_idx; |
870 | int err = 0; | 861 | int err = 0; |
871 | 862 | ||
@@ -875,7 +866,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) | |||
875 | bucket = &tbl->hash_buckets[hash_idx]; | 866 | bucket = &tbl->hash_buckets[hash_idx]; |
876 | 867 | ||
877 | spin_lock(&tbl->hashwlock[hash_idx]); | 868 | spin_lock(&tbl->hashwlock[hash_idx]); |
878 | hlist_for_each_entry(node, n, bucket, list) { | 869 | hlist_for_each_entry(node, bucket, list) { |
879 | mpath = node->mpath; | 870 | mpath = node->mpath; |
880 | if (mpath->sdata == sdata && | 871 | if (mpath->sdata == sdata && |
881 | ether_addr_equal(addr, mpath->dst)) { | 872 | ether_addr_equal(addr, mpath->dst)) { |
@@ -920,7 +911,6 @@ void mesh_path_tx_pending(struct mesh_path *mpath) | |||
920 | int mesh_path_send_to_gates(struct mesh_path *mpath) | 911 | int mesh_path_send_to_gates(struct mesh_path *mpath) |
921 | { | 912 | { |
922 | struct ieee80211_sub_if_data *sdata = mpath->sdata; | 913 | struct ieee80211_sub_if_data *sdata = mpath->sdata; |
923 | struct hlist_node *n; | ||
924 | struct mesh_table *tbl; | 914 | struct mesh_table *tbl; |
925 | struct mesh_path *from_mpath = mpath; | 915 | struct mesh_path *from_mpath = mpath; |
926 | struct mpath_node *gate = NULL; | 916 | struct mpath_node *gate = NULL; |
@@ -935,7 +925,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) | |||
935 | if (!known_gates) | 925 | if (!known_gates) |
936 | return -EHOSTUNREACH; | 926 | return -EHOSTUNREACH; |
937 | 927 | ||
938 | hlist_for_each_entry_rcu(gate, n, known_gates, list) { | 928 | hlist_for_each_entry_rcu(gate, known_gates, list) { |
939 | if (gate->mpath->sdata != sdata) | 929 | if (gate->mpath->sdata != sdata) |
940 | continue; | 930 | continue; |
941 | 931 | ||
@@ -951,7 +941,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) | |||
951 | } | 941 | } |
952 | } | 942 | } |
953 | 943 | ||
954 | hlist_for_each_entry_rcu(gate, n, known_gates, list) | 944 | hlist_for_each_entry_rcu(gate, known_gates, list) |
955 | if (gate->mpath->sdata == sdata) { | 945 | if (gate->mpath->sdata == sdata) { |
956 | mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); | 946 | mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); |
957 | mesh_path_tx_pending(gate->mpath); | 947 | mesh_path_tx_pending(gate->mpath); |
@@ -1096,12 +1086,11 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | |||
1096 | struct mesh_table *tbl; | 1086 | struct mesh_table *tbl; |
1097 | struct mesh_path *mpath; | 1087 | struct mesh_path *mpath; |
1098 | struct mpath_node *node; | 1088 | struct mpath_node *node; |
1099 | struct hlist_node *p; | ||
1100 | int i; | 1089 | int i; |
1101 | 1090 | ||
1102 | rcu_read_lock(); | 1091 | rcu_read_lock(); |
1103 | tbl = rcu_dereference(mesh_paths); | 1092 | tbl = rcu_dereference(mesh_paths); |
1104 | for_each_mesh_entry(tbl, p, node, i) { | 1093 | for_each_mesh_entry(tbl, node, i) { |
1105 | if (node->mpath->sdata != sdata) | 1094 | if (node->mpath->sdata != sdata) |
1106 | continue; | 1095 | continue; |
1107 | mpath = node->mpath; | 1096 | mpath = node->mpath; |