diff options
author | Sasha Levin <sasha.levin@oracle.com> | 2013-02-27 20:06:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-27 22:10:24 -0500 |
commit | b67bfe0d42cac56c512dd5da4b1b347a23f4b70a (patch) | |
tree | 3d465aea12b97683f26ffa38eba8744469de9997 /net | |
parent | 1e142b29e210b5dfb2deeb6ce2210b60af16d2a6 (diff) |
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net')
110 files changed, 555 insertions, 870 deletions
diff --git a/net/9p/error.c b/net/9p/error.c index 2ab2de76010f..126fd0dceea2 100644 --- a/net/9p/error.c +++ b/net/9p/error.c | |||
@@ -221,15 +221,13 @@ EXPORT_SYMBOL(p9_error_init); | |||
221 | int p9_errstr2errno(char *errstr, int len) | 221 | int p9_errstr2errno(char *errstr, int len) |
222 | { | 222 | { |
223 | int errno; | 223 | int errno; |
224 | struct hlist_node *p; | ||
225 | struct errormap *c; | 224 | struct errormap *c; |
226 | int bucket; | 225 | int bucket; |
227 | 226 | ||
228 | errno = 0; | 227 | errno = 0; |
229 | p = NULL; | ||
230 | c = NULL; | 228 | c = NULL; |
231 | bucket = jhash(errstr, len, 0) % ERRHASHSZ; | 229 | bucket = jhash(errstr, len, 0) % ERRHASHSZ; |
232 | hlist_for_each_entry(c, p, &hash_errmap[bucket], list) { | 230 | hlist_for_each_entry(c, &hash_errmap[bucket], list) { |
233 | if (c->namelen == len && !memcmp(c->name, errstr, len)) { | 231 | if (c->namelen == len && !memcmp(c->name, errstr, len)) { |
234 | errno = c->val; | 232 | errno = c->val; |
235 | break; | 233 | break; |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index de2e950a0a7a..74dea377fe5b 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -655,7 +655,7 @@ static struct p9_trans_module p9_virtio_trans = { | |||
655 | .create = p9_virtio_create, | 655 | .create = p9_virtio_create, |
656 | .close = p9_virtio_close, | 656 | .close = p9_virtio_close, |
657 | .request = p9_virtio_request, | 657 | .request = p9_virtio_request, |
658 | .zc_request = p9_virtio_zc_request, | 658 | //.zc_request = p9_virtio_zc_request, |
659 | .cancel = p9_virtio_cancel, | 659 | .cancel = p9_virtio_cancel, |
660 | /* | 660 | /* |
661 | * We leave one entry for input and one entry for response | 661 | * We leave one entry for input and one entry for response |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 33475291c9c1..4a141e3cf076 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
@@ -93,10 +93,9 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to, | |||
93 | struct atalk_iface *atif) | 93 | struct atalk_iface *atif) |
94 | { | 94 | { |
95 | struct sock *s; | 95 | struct sock *s; |
96 | struct hlist_node *node; | ||
97 | 96 | ||
98 | read_lock_bh(&atalk_sockets_lock); | 97 | read_lock_bh(&atalk_sockets_lock); |
99 | sk_for_each(s, node, &atalk_sockets) { | 98 | sk_for_each(s, &atalk_sockets) { |
100 | struct atalk_sock *at = at_sk(s); | 99 | struct atalk_sock *at = at_sk(s); |
101 | 100 | ||
102 | if (to->sat_port != at->src_port) | 101 | if (to->sat_port != at->src_port) |
@@ -141,11 +140,10 @@ static struct sock *atalk_find_or_insert_socket(struct sock *sk, | |||
141 | struct sockaddr_at *sat) | 140 | struct sockaddr_at *sat) |
142 | { | 141 | { |
143 | struct sock *s; | 142 | struct sock *s; |
144 | struct hlist_node *node; | ||
145 | struct atalk_sock *at; | 143 | struct atalk_sock *at; |
146 | 144 | ||
147 | write_lock_bh(&atalk_sockets_lock); | 145 | write_lock_bh(&atalk_sockets_lock); |
148 | sk_for_each(s, node, &atalk_sockets) { | 146 | sk_for_each(s, &atalk_sockets) { |
149 | at = at_sk(s); | 147 | at = at_sk(s); |
150 | 148 | ||
151 | if (at->src_net == sat->sat_addr.s_net && | 149 | if (at->src_net == sat->sat_addr.s_net && |
@@ -1084,9 +1082,8 @@ static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat) | |||
1084 | sat->sat_port < ATPORT_LAST; | 1082 | sat->sat_port < ATPORT_LAST; |
1085 | sat->sat_port++) { | 1083 | sat->sat_port++) { |
1086 | struct sock *s; | 1084 | struct sock *s; |
1087 | struct hlist_node *node; | ||
1088 | 1085 | ||
1089 | sk_for_each(s, node, &atalk_sockets) { | 1086 | sk_for_each(s, &atalk_sockets) { |
1090 | struct atalk_sock *at = at_sk(s); | 1087 | struct atalk_sock *at = at_sk(s); |
1091 | 1088 | ||
1092 | if (at->src_net == sat->sat_addr.s_net && | 1089 | if (at->src_net == sat->sat_addr.s_net && |
diff --git a/net/atm/common.c b/net/atm/common.c index 806fc0a40051..7b491006eaf4 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
@@ -270,11 +270,11 @@ void atm_dev_release_vccs(struct atm_dev *dev) | |||
270 | write_lock_irq(&vcc_sklist_lock); | 270 | write_lock_irq(&vcc_sklist_lock); |
271 | for (i = 0; i < VCC_HTABLE_SIZE; i++) { | 271 | for (i = 0; i < VCC_HTABLE_SIZE; i++) { |
272 | struct hlist_head *head = &vcc_hash[i]; | 272 | struct hlist_head *head = &vcc_hash[i]; |
273 | struct hlist_node *node, *tmp; | 273 | struct hlist_node *tmp; |
274 | struct sock *s; | 274 | struct sock *s; |
275 | struct atm_vcc *vcc; | 275 | struct atm_vcc *vcc; |
276 | 276 | ||
277 | sk_for_each_safe(s, node, tmp, head) { | 277 | sk_for_each_safe(s, tmp, head) { |
278 | vcc = atm_sk(s); | 278 | vcc = atm_sk(s); |
279 | if (vcc->dev == dev) { | 279 | if (vcc->dev == dev) { |
280 | vcc_release_async(vcc, -EPIPE); | 280 | vcc_release_async(vcc, -EPIPE); |
@@ -317,11 +317,10 @@ static int adjust_tp(struct atm_trafprm *tp, unsigned char aal) | |||
317 | static int check_ci(const struct atm_vcc *vcc, short vpi, int vci) | 317 | static int check_ci(const struct atm_vcc *vcc, short vpi, int vci) |
318 | { | 318 | { |
319 | struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)]; | 319 | struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)]; |
320 | struct hlist_node *node; | ||
321 | struct sock *s; | 320 | struct sock *s; |
322 | struct atm_vcc *walk; | 321 | struct atm_vcc *walk; |
323 | 322 | ||
324 | sk_for_each(s, node, head) { | 323 | sk_for_each(s, head) { |
325 | walk = atm_sk(s); | 324 | walk = atm_sk(s); |
326 | if (walk->dev != vcc->dev) | 325 | if (walk->dev != vcc->dev) |
327 | continue; | 326 | continue; |
diff --git a/net/atm/lec.c b/net/atm/lec.c index 2e3d942e77f1..f23916be18fb 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -842,7 +842,9 @@ static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl, | |||
842 | --*l; | 842 | --*l; |
843 | } | 843 | } |
844 | 844 | ||
845 | hlist_for_each_entry_from(tmp, e, next) { | 845 | tmp = container_of(e, struct lec_arp_table, next); |
846 | |||
847 | hlist_for_each_entry_from(tmp, next) { | ||
846 | if (--*l < 0) | 848 | if (--*l < 0) |
847 | break; | 849 | break; |
848 | } | 850 | } |
@@ -1307,7 +1309,6 @@ lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry) | |||
1307 | static int | 1309 | static int |
1308 | lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) | 1310 | lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) |
1309 | { | 1311 | { |
1310 | struct hlist_node *node; | ||
1311 | struct lec_arp_table *entry; | 1312 | struct lec_arp_table *entry; |
1312 | int i, remove_vcc = 1; | 1313 | int i, remove_vcc = 1; |
1313 | 1314 | ||
@@ -1326,7 +1327,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) | |||
1326 | * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT | 1327 | * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT |
1327 | */ | 1328 | */ |
1328 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 1329 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
1329 | hlist_for_each_entry(entry, node, | 1330 | hlist_for_each_entry(entry, |
1330 | &priv->lec_arp_tables[i], next) { | 1331 | &priv->lec_arp_tables[i], next) { |
1331 | if (memcmp(to_remove->atm_addr, | 1332 | if (memcmp(to_remove->atm_addr, |
1332 | entry->atm_addr, ATM_ESA_LEN) == 0) { | 1333 | entry->atm_addr, ATM_ESA_LEN) == 0) { |
@@ -1364,14 +1365,13 @@ static const char *get_status_string(unsigned char st) | |||
1364 | 1365 | ||
1365 | static void dump_arp_table(struct lec_priv *priv) | 1366 | static void dump_arp_table(struct lec_priv *priv) |
1366 | { | 1367 | { |
1367 | struct hlist_node *node; | ||
1368 | struct lec_arp_table *rulla; | 1368 | struct lec_arp_table *rulla; |
1369 | char buf[256]; | 1369 | char buf[256]; |
1370 | int i, j, offset; | 1370 | int i, j, offset; |
1371 | 1371 | ||
1372 | pr_info("Dump %p:\n", priv); | 1372 | pr_info("Dump %p:\n", priv); |
1373 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 1373 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
1374 | hlist_for_each_entry(rulla, node, | 1374 | hlist_for_each_entry(rulla, |
1375 | &priv->lec_arp_tables[i], next) { | 1375 | &priv->lec_arp_tables[i], next) { |
1376 | offset = 0; | 1376 | offset = 0; |
1377 | offset += sprintf(buf, "%d: %p\n", i, rulla); | 1377 | offset += sprintf(buf, "%d: %p\n", i, rulla); |
@@ -1403,7 +1403,7 @@ static void dump_arp_table(struct lec_priv *priv) | |||
1403 | 1403 | ||
1404 | if (!hlist_empty(&priv->lec_no_forward)) | 1404 | if (!hlist_empty(&priv->lec_no_forward)) |
1405 | pr_info("No forward\n"); | 1405 | pr_info("No forward\n"); |
1406 | hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) { | 1406 | hlist_for_each_entry(rulla, &priv->lec_no_forward, next) { |
1407 | offset = 0; | 1407 | offset = 0; |
1408 | offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); | 1408 | offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); |
1409 | offset += sprintf(buf + offset, " Atm:"); | 1409 | offset += sprintf(buf + offset, " Atm:"); |
@@ -1428,7 +1428,7 @@ static void dump_arp_table(struct lec_priv *priv) | |||
1428 | 1428 | ||
1429 | if (!hlist_empty(&priv->lec_arp_empty_ones)) | 1429 | if (!hlist_empty(&priv->lec_arp_empty_ones)) |
1430 | pr_info("Empty ones\n"); | 1430 | pr_info("Empty ones\n"); |
1431 | hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) { | 1431 | hlist_for_each_entry(rulla, &priv->lec_arp_empty_ones, next) { |
1432 | offset = 0; | 1432 | offset = 0; |
1433 | offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); | 1433 | offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); |
1434 | offset += sprintf(buf + offset, " Atm:"); | 1434 | offset += sprintf(buf + offset, " Atm:"); |
@@ -1453,7 +1453,7 @@ static void dump_arp_table(struct lec_priv *priv) | |||
1453 | 1453 | ||
1454 | if (!hlist_empty(&priv->mcast_fwds)) | 1454 | if (!hlist_empty(&priv->mcast_fwds)) |
1455 | pr_info("Multicast Forward VCCs\n"); | 1455 | pr_info("Multicast Forward VCCs\n"); |
1456 | hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) { | 1456 | hlist_for_each_entry(rulla, &priv->mcast_fwds, next) { |
1457 | offset = 0; | 1457 | offset = 0; |
1458 | offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); | 1458 | offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); |
1459 | offset += sprintf(buf + offset, " Atm:"); | 1459 | offset += sprintf(buf + offset, " Atm:"); |
@@ -1487,7 +1487,7 @@ static void dump_arp_table(struct lec_priv *priv) | |||
1487 | static void lec_arp_destroy(struct lec_priv *priv) | 1487 | static void lec_arp_destroy(struct lec_priv *priv) |
1488 | { | 1488 | { |
1489 | unsigned long flags; | 1489 | unsigned long flags; |
1490 | struct hlist_node *node, *next; | 1490 | struct hlist_node *next; |
1491 | struct lec_arp_table *entry; | 1491 | struct lec_arp_table *entry; |
1492 | int i; | 1492 | int i; |
1493 | 1493 | ||
@@ -1499,7 +1499,7 @@ static void lec_arp_destroy(struct lec_priv *priv) | |||
1499 | 1499 | ||
1500 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 1500 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
1501 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 1501 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
1502 | hlist_for_each_entry_safe(entry, node, next, | 1502 | hlist_for_each_entry_safe(entry, next, |
1503 | &priv->lec_arp_tables[i], next) { | 1503 | &priv->lec_arp_tables[i], next) { |
1504 | lec_arp_remove(priv, entry); | 1504 | lec_arp_remove(priv, entry); |
1505 | lec_arp_put(entry); | 1505 | lec_arp_put(entry); |
@@ -1507,7 +1507,7 @@ static void lec_arp_destroy(struct lec_priv *priv) | |||
1507 | INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); | 1507 | INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); |
1508 | } | 1508 | } |
1509 | 1509 | ||
1510 | hlist_for_each_entry_safe(entry, node, next, | 1510 | hlist_for_each_entry_safe(entry, next, |
1511 | &priv->lec_arp_empty_ones, next) { | 1511 | &priv->lec_arp_empty_ones, next) { |
1512 | del_timer_sync(&entry->timer); | 1512 | del_timer_sync(&entry->timer); |
1513 | lec_arp_clear_vccs(entry); | 1513 | lec_arp_clear_vccs(entry); |
@@ -1516,7 +1516,7 @@ static void lec_arp_destroy(struct lec_priv *priv) | |||
1516 | } | 1516 | } |
1517 | INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); | 1517 | INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); |
1518 | 1518 | ||
1519 | hlist_for_each_entry_safe(entry, node, next, | 1519 | hlist_for_each_entry_safe(entry, next, |
1520 | &priv->lec_no_forward, next) { | 1520 | &priv->lec_no_forward, next) { |
1521 | del_timer_sync(&entry->timer); | 1521 | del_timer_sync(&entry->timer); |
1522 | lec_arp_clear_vccs(entry); | 1522 | lec_arp_clear_vccs(entry); |
@@ -1525,7 +1525,7 @@ static void lec_arp_destroy(struct lec_priv *priv) | |||
1525 | } | 1525 | } |
1526 | INIT_HLIST_HEAD(&priv->lec_no_forward); | 1526 | INIT_HLIST_HEAD(&priv->lec_no_forward); |
1527 | 1527 | ||
1528 | hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { | 1528 | hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) { |
1529 | /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ | 1529 | /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ |
1530 | lec_arp_clear_vccs(entry); | 1530 | lec_arp_clear_vccs(entry); |
1531 | hlist_del(&entry->next); | 1531 | hlist_del(&entry->next); |
@@ -1542,14 +1542,13 @@ static void lec_arp_destroy(struct lec_priv *priv) | |||
1542 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, | 1542 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, |
1543 | const unsigned char *mac_addr) | 1543 | const unsigned char *mac_addr) |
1544 | { | 1544 | { |
1545 | struct hlist_node *node; | ||
1546 | struct hlist_head *head; | 1545 | struct hlist_head *head; |
1547 | struct lec_arp_table *entry; | 1546 | struct lec_arp_table *entry; |
1548 | 1547 | ||
1549 | pr_debug("%pM\n", mac_addr); | 1548 | pr_debug("%pM\n", mac_addr); |
1550 | 1549 | ||
1551 | head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; | 1550 | head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; |
1552 | hlist_for_each_entry(entry, node, head, next) { | 1551 | hlist_for_each_entry(entry, head, next) { |
1553 | if (ether_addr_equal(mac_addr, entry->mac_addr)) | 1552 | if (ether_addr_equal(mac_addr, entry->mac_addr)) |
1554 | return entry; | 1553 | return entry; |
1555 | } | 1554 | } |
@@ -1686,7 +1685,7 @@ static void lec_arp_check_expire(struct work_struct *work) | |||
1686 | unsigned long flags; | 1685 | unsigned long flags; |
1687 | struct lec_priv *priv = | 1686 | struct lec_priv *priv = |
1688 | container_of(work, struct lec_priv, lec_arp_work.work); | 1687 | container_of(work, struct lec_priv, lec_arp_work.work); |
1689 | struct hlist_node *node, *next; | 1688 | struct hlist_node *next; |
1690 | struct lec_arp_table *entry; | 1689 | struct lec_arp_table *entry; |
1691 | unsigned long now; | 1690 | unsigned long now; |
1692 | int i; | 1691 | int i; |
@@ -1696,7 +1695,7 @@ static void lec_arp_check_expire(struct work_struct *work) | |||
1696 | restart: | 1695 | restart: |
1697 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 1696 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
1698 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 1697 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
1699 | hlist_for_each_entry_safe(entry, node, next, | 1698 | hlist_for_each_entry_safe(entry, next, |
1700 | &priv->lec_arp_tables[i], next) { | 1699 | &priv->lec_arp_tables[i], next) { |
1701 | if (__lec_arp_check_expire(entry, now, priv)) { | 1700 | if (__lec_arp_check_expire(entry, now, priv)) { |
1702 | struct sk_buff *skb; | 1701 | struct sk_buff *skb; |
@@ -1823,14 +1822,14 @@ lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr, | |||
1823 | unsigned long permanent) | 1822 | unsigned long permanent) |
1824 | { | 1823 | { |
1825 | unsigned long flags; | 1824 | unsigned long flags; |
1826 | struct hlist_node *node, *next; | 1825 | struct hlist_node *next; |
1827 | struct lec_arp_table *entry; | 1826 | struct lec_arp_table *entry; |
1828 | int i; | 1827 | int i; |
1829 | 1828 | ||
1830 | pr_debug("\n"); | 1829 | pr_debug("\n"); |
1831 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 1830 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
1832 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 1831 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
1833 | hlist_for_each_entry_safe(entry, node, next, | 1832 | hlist_for_each_entry_safe(entry, next, |
1834 | &priv->lec_arp_tables[i], next) { | 1833 | &priv->lec_arp_tables[i], next) { |
1835 | if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) && | 1834 | if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) && |
1836 | (permanent || | 1835 | (permanent || |
@@ -1855,7 +1854,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, | |||
1855 | unsigned int targetless_le_arp) | 1854 | unsigned int targetless_le_arp) |
1856 | { | 1855 | { |
1857 | unsigned long flags; | 1856 | unsigned long flags; |
1858 | struct hlist_node *node, *next; | 1857 | struct hlist_node *next; |
1859 | struct lec_arp_table *entry, *tmp; | 1858 | struct lec_arp_table *entry, *tmp; |
1860 | int i; | 1859 | int i; |
1861 | 1860 | ||
@@ -1870,7 +1869,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, | |||
1870 | * we have no entry in the cache. 7.1.30 | 1869 | * we have no entry in the cache. 7.1.30 |
1871 | */ | 1870 | */ |
1872 | if (!hlist_empty(&priv->lec_arp_empty_ones)) { | 1871 | if (!hlist_empty(&priv->lec_arp_empty_ones)) { |
1873 | hlist_for_each_entry_safe(entry, node, next, | 1872 | hlist_for_each_entry_safe(entry, next, |
1874 | &priv->lec_arp_empty_ones, next) { | 1873 | &priv->lec_arp_empty_ones, next) { |
1875 | if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { | 1874 | if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { |
1876 | hlist_del(&entry->next); | 1875 | hlist_del(&entry->next); |
@@ -1915,7 +1914,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, | |||
1915 | memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); | 1914 | memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); |
1916 | del_timer(&entry->timer); | 1915 | del_timer(&entry->timer); |
1917 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 1916 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
1918 | hlist_for_each_entry(tmp, node, | 1917 | hlist_for_each_entry(tmp, |
1919 | &priv->lec_arp_tables[i], next) { | 1918 | &priv->lec_arp_tables[i], next) { |
1920 | if (entry != tmp && | 1919 | if (entry != tmp && |
1921 | !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { | 1920 | !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { |
@@ -1956,7 +1955,6 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, | |||
1956 | void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) | 1955 | void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) |
1957 | { | 1956 | { |
1958 | unsigned long flags; | 1957 | unsigned long flags; |
1959 | struct hlist_node *node; | ||
1960 | struct lec_arp_table *entry; | 1958 | struct lec_arp_table *entry; |
1961 | int i, found_entry = 0; | 1959 | int i, found_entry = 0; |
1962 | 1960 | ||
@@ -2026,7 +2024,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, | |||
2026 | ioc_data->atm_addr[16], ioc_data->atm_addr[17], | 2024 | ioc_data->atm_addr[16], ioc_data->atm_addr[17], |
2027 | ioc_data->atm_addr[18], ioc_data->atm_addr[19]); | 2025 | ioc_data->atm_addr[18], ioc_data->atm_addr[19]); |
2028 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 2026 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
2029 | hlist_for_each_entry(entry, node, | 2027 | hlist_for_each_entry(entry, |
2030 | &priv->lec_arp_tables[i], next) { | 2028 | &priv->lec_arp_tables[i], next) { |
2031 | if (memcmp | 2029 | if (memcmp |
2032 | (ioc_data->atm_addr, entry->atm_addr, | 2030 | (ioc_data->atm_addr, entry->atm_addr, |
@@ -2103,7 +2101,6 @@ out: | |||
2103 | static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) | 2101 | static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) |
2104 | { | 2102 | { |
2105 | unsigned long flags; | 2103 | unsigned long flags; |
2106 | struct hlist_node *node; | ||
2107 | struct lec_arp_table *entry; | 2104 | struct lec_arp_table *entry; |
2108 | int i; | 2105 | int i; |
2109 | 2106 | ||
@@ -2111,7 +2108,7 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) | |||
2111 | restart: | 2108 | restart: |
2112 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 2109 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
2113 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 2110 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
2114 | hlist_for_each_entry(entry, node, | 2111 | hlist_for_each_entry(entry, |
2115 | &priv->lec_arp_tables[i], next) { | 2112 | &priv->lec_arp_tables[i], next) { |
2116 | if (entry->flush_tran_id == tran_id && | 2113 | if (entry->flush_tran_id == tran_id && |
2117 | entry->status == ESI_FLUSH_PENDING) { | 2114 | entry->status == ESI_FLUSH_PENDING) { |
@@ -2140,13 +2137,12 @@ lec_set_flush_tran_id(struct lec_priv *priv, | |||
2140 | const unsigned char *atm_addr, unsigned long tran_id) | 2137 | const unsigned char *atm_addr, unsigned long tran_id) |
2141 | { | 2138 | { |
2142 | unsigned long flags; | 2139 | unsigned long flags; |
2143 | struct hlist_node *node; | ||
2144 | struct lec_arp_table *entry; | 2140 | struct lec_arp_table *entry; |
2145 | int i; | 2141 | int i; |
2146 | 2142 | ||
2147 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 2143 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
2148 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) | 2144 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) |
2149 | hlist_for_each_entry(entry, node, | 2145 | hlist_for_each_entry(entry, |
2150 | &priv->lec_arp_tables[i], next) { | 2146 | &priv->lec_arp_tables[i], next) { |
2151 | if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { | 2147 | if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { |
2152 | entry->flush_tran_id = tran_id; | 2148 | entry->flush_tran_id = tran_id; |
@@ -2198,7 +2194,7 @@ out: | |||
2198 | static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) | 2194 | static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) |
2199 | { | 2195 | { |
2200 | unsigned long flags; | 2196 | unsigned long flags; |
2201 | struct hlist_node *node, *next; | 2197 | struct hlist_node *next; |
2202 | struct lec_arp_table *entry; | 2198 | struct lec_arp_table *entry; |
2203 | int i; | 2199 | int i; |
2204 | 2200 | ||
@@ -2208,7 +2204,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) | |||
2208 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 2204 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
2209 | 2205 | ||
2210 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { | 2206 | for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { |
2211 | hlist_for_each_entry_safe(entry, node, next, | 2207 | hlist_for_each_entry_safe(entry, next, |
2212 | &priv->lec_arp_tables[i], next) { | 2208 | &priv->lec_arp_tables[i], next) { |
2213 | if (vcc == entry->vcc) { | 2209 | if (vcc == entry->vcc) { |
2214 | lec_arp_remove(priv, entry); | 2210 | lec_arp_remove(priv, entry); |
@@ -2219,7 +2215,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) | |||
2219 | } | 2215 | } |
2220 | } | 2216 | } |
2221 | 2217 | ||
2222 | hlist_for_each_entry_safe(entry, node, next, | 2218 | hlist_for_each_entry_safe(entry, next, |
2223 | &priv->lec_arp_empty_ones, next) { | 2219 | &priv->lec_arp_empty_ones, next) { |
2224 | if (entry->vcc == vcc) { | 2220 | if (entry->vcc == vcc) { |
2225 | lec_arp_clear_vccs(entry); | 2221 | lec_arp_clear_vccs(entry); |
@@ -2229,7 +2225,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) | |||
2229 | } | 2225 | } |
2230 | } | 2226 | } |
2231 | 2227 | ||
2232 | hlist_for_each_entry_safe(entry, node, next, | 2228 | hlist_for_each_entry_safe(entry, next, |
2233 | &priv->lec_no_forward, next) { | 2229 | &priv->lec_no_forward, next) { |
2234 | if (entry->recv_vcc == vcc) { | 2230 | if (entry->recv_vcc == vcc) { |
2235 | lec_arp_clear_vccs(entry); | 2231 | lec_arp_clear_vccs(entry); |
@@ -2239,7 +2235,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) | |||
2239 | } | 2235 | } |
2240 | } | 2236 | } |
2241 | 2237 | ||
2242 | hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { | 2238 | hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) { |
2243 | if (entry->recv_vcc == vcc) { | 2239 | if (entry->recv_vcc == vcc) { |
2244 | lec_arp_clear_vccs(entry); | 2240 | lec_arp_clear_vccs(entry); |
2245 | /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ | 2241 | /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ |
@@ -2257,13 +2253,13 @@ lec_arp_check_empties(struct lec_priv *priv, | |||
2257 | struct atm_vcc *vcc, struct sk_buff *skb) | 2253 | struct atm_vcc *vcc, struct sk_buff *skb) |
2258 | { | 2254 | { |
2259 | unsigned long flags; | 2255 | unsigned long flags; |
2260 | struct hlist_node *node, *next; | 2256 | struct hlist_node *next; |
2261 | struct lec_arp_table *entry, *tmp; | 2257 | struct lec_arp_table *entry, *tmp; |
2262 | struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; | 2258 | struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; |
2263 | unsigned char *src = hdr->h_source; | 2259 | unsigned char *src = hdr->h_source; |
2264 | 2260 | ||
2265 | spin_lock_irqsave(&priv->lec_arp_lock, flags); | 2261 | spin_lock_irqsave(&priv->lec_arp_lock, flags); |
2266 | hlist_for_each_entry_safe(entry, node, next, | 2262 | hlist_for_each_entry_safe(entry, next, |
2267 | &priv->lec_arp_empty_ones, next) { | 2263 | &priv->lec_arp_empty_ones, next) { |
2268 | if (vcc == entry->vcc) { | 2264 | if (vcc == entry->vcc) { |
2269 | del_timer(&entry->timer); | 2265 | del_timer(&entry->timer); |
diff --git a/net/atm/signaling.c b/net/atm/signaling.c index 86767ca908a3..4176887e72eb 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c | |||
@@ -217,7 +217,6 @@ static void purge_vcc(struct atm_vcc *vcc) | |||
217 | 217 | ||
218 | static void sigd_close(struct atm_vcc *vcc) | 218 | static void sigd_close(struct atm_vcc *vcc) |
219 | { | 219 | { |
220 | struct hlist_node *node; | ||
221 | struct sock *s; | 220 | struct sock *s; |
222 | int i; | 221 | int i; |
223 | 222 | ||
@@ -231,7 +230,7 @@ static void sigd_close(struct atm_vcc *vcc) | |||
231 | for (i = 0; i < VCC_HTABLE_SIZE; ++i) { | 230 | for (i = 0; i < VCC_HTABLE_SIZE; ++i) { |
232 | struct hlist_head *head = &vcc_hash[i]; | 231 | struct hlist_head *head = &vcc_hash[i]; |
233 | 232 | ||
234 | sk_for_each(s, node, head) { | 233 | sk_for_each(s, head) { |
235 | vcc = atm_sk(s); | 234 | vcc = atm_sk(s); |
236 | 235 | ||
237 | purge_vcc(vcc); | 236 | purge_vcc(vcc); |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 69a06c47b648..7b11f8bc5071 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -81,14 +81,13 @@ static void ax25_kill_by_device(struct net_device *dev) | |||
81 | { | 81 | { |
82 | ax25_dev *ax25_dev; | 82 | ax25_dev *ax25_dev; |
83 | ax25_cb *s; | 83 | ax25_cb *s; |
84 | struct hlist_node *node; | ||
85 | 84 | ||
86 | if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) | 85 | if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) |
87 | return; | 86 | return; |
88 | 87 | ||
89 | spin_lock_bh(&ax25_list_lock); | 88 | spin_lock_bh(&ax25_list_lock); |
90 | again: | 89 | again: |
91 | ax25_for_each(s, node, &ax25_list) { | 90 | ax25_for_each(s, &ax25_list) { |
92 | if (s->ax25_dev == ax25_dev) { | 91 | if (s->ax25_dev == ax25_dev) { |
93 | s->ax25_dev = NULL; | 92 | s->ax25_dev = NULL; |
94 | spin_unlock_bh(&ax25_list_lock); | 93 | spin_unlock_bh(&ax25_list_lock); |
@@ -158,10 +157,9 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi, | |||
158 | struct net_device *dev, int type) | 157 | struct net_device *dev, int type) |
159 | { | 158 | { |
160 | ax25_cb *s; | 159 | ax25_cb *s; |
161 | struct hlist_node *node; | ||
162 | 160 | ||
163 | spin_lock(&ax25_list_lock); | 161 | spin_lock(&ax25_list_lock); |
164 | ax25_for_each(s, node, &ax25_list) { | 162 | ax25_for_each(s, &ax25_list) { |
165 | if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) | 163 | if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) |
166 | continue; | 164 | continue; |
167 | if (s->sk && !ax25cmp(&s->source_addr, addr) && | 165 | if (s->sk && !ax25cmp(&s->source_addr, addr) && |
@@ -187,10 +185,9 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr, | |||
187 | { | 185 | { |
188 | struct sock *sk = NULL; | 186 | struct sock *sk = NULL; |
189 | ax25_cb *s; | 187 | ax25_cb *s; |
190 | struct hlist_node *node; | ||
191 | 188 | ||
192 | spin_lock(&ax25_list_lock); | 189 | spin_lock(&ax25_list_lock); |
193 | ax25_for_each(s, node, &ax25_list) { | 190 | ax25_for_each(s, &ax25_list) { |
194 | if (s->sk && !ax25cmp(&s->source_addr, my_addr) && | 191 | if (s->sk && !ax25cmp(&s->source_addr, my_addr) && |
195 | !ax25cmp(&s->dest_addr, dest_addr) && | 192 | !ax25cmp(&s->dest_addr, dest_addr) && |
196 | s->sk->sk_type == type) { | 193 | s->sk->sk_type == type) { |
@@ -213,10 +210,9 @@ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr, | |||
213 | ax25_digi *digi, struct net_device *dev) | 210 | ax25_digi *digi, struct net_device *dev) |
214 | { | 211 | { |
215 | ax25_cb *s; | 212 | ax25_cb *s; |
216 | struct hlist_node *node; | ||
217 | 213 | ||
218 | spin_lock_bh(&ax25_list_lock); | 214 | spin_lock_bh(&ax25_list_lock); |
219 | ax25_for_each(s, node, &ax25_list) { | 215 | ax25_for_each(s, &ax25_list) { |
220 | if (s->sk && s->sk->sk_type != SOCK_SEQPACKET) | 216 | if (s->sk && s->sk->sk_type != SOCK_SEQPACKET) |
221 | continue; | 217 | continue; |
222 | if (s->ax25_dev == NULL) | 218 | if (s->ax25_dev == NULL) |
@@ -248,10 +244,9 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto) | |||
248 | { | 244 | { |
249 | ax25_cb *s; | 245 | ax25_cb *s; |
250 | struct sk_buff *copy; | 246 | struct sk_buff *copy; |
251 | struct hlist_node *node; | ||
252 | 247 | ||
253 | spin_lock(&ax25_list_lock); | 248 | spin_lock(&ax25_list_lock); |
254 | ax25_for_each(s, node, &ax25_list) { | 249 | ax25_for_each(s, &ax25_list) { |
255 | if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && | 250 | if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && |
256 | s->sk->sk_type == SOCK_RAW && | 251 | s->sk->sk_type == SOCK_RAW && |
257 | s->sk->sk_protocol == proto && | 252 | s->sk->sk_protocol == proto && |
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c index 5ea7fd3e2af9..e05bd57b5afd 100644 --- a/net/ax25/ax25_ds_subr.c +++ b/net/ax25/ax25_ds_subr.c | |||
@@ -39,7 +39,6 @@ void ax25_ds_nr_error_recovery(ax25_cb *ax25) | |||
39 | void ax25_ds_enquiry_response(ax25_cb *ax25) | 39 | void ax25_ds_enquiry_response(ax25_cb *ax25) |
40 | { | 40 | { |
41 | ax25_cb *ax25o; | 41 | ax25_cb *ax25o; |
42 | struct hlist_node *node; | ||
43 | 42 | ||
44 | /* Please note that neither DK4EG's nor DG2FEF's | 43 | /* Please note that neither DK4EG's nor DG2FEF's |
45 | * DAMA spec mention the following behaviour as seen | 44 | * DAMA spec mention the following behaviour as seen |
@@ -80,7 +79,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25) | |||
80 | ax25_ds_set_timer(ax25->ax25_dev); | 79 | ax25_ds_set_timer(ax25->ax25_dev); |
81 | 80 | ||
82 | spin_lock(&ax25_list_lock); | 81 | spin_lock(&ax25_list_lock); |
83 | ax25_for_each(ax25o, node, &ax25_list) { | 82 | ax25_for_each(ax25o, &ax25_list) { |
84 | if (ax25o == ax25) | 83 | if (ax25o == ax25) |
85 | continue; | 84 | continue; |
86 | 85 | ||
@@ -159,10 +158,9 @@ static int ax25_check_dama_slave(ax25_dev *ax25_dev) | |||
159 | { | 158 | { |
160 | ax25_cb *ax25; | 159 | ax25_cb *ax25; |
161 | int res = 0; | 160 | int res = 0; |
162 | struct hlist_node *node; | ||
163 | 161 | ||
164 | spin_lock(&ax25_list_lock); | 162 | spin_lock(&ax25_list_lock); |
165 | ax25_for_each(ax25, node, &ax25_list) | 163 | ax25_for_each(ax25, &ax25_list) |
166 | if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { | 164 | if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { |
167 | res = 1; | 165 | res = 1; |
168 | break; | 166 | break; |
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c index 993c439b4f71..951cd57bb07d 100644 --- a/net/ax25/ax25_ds_timer.c +++ b/net/ax25/ax25_ds_timer.c | |||
@@ -70,7 +70,6 @@ static void ax25_ds_timeout(unsigned long arg) | |||
70 | { | 70 | { |
71 | ax25_dev *ax25_dev = (struct ax25_dev *) arg; | 71 | ax25_dev *ax25_dev = (struct ax25_dev *) arg; |
72 | ax25_cb *ax25; | 72 | ax25_cb *ax25; |
73 | struct hlist_node *node; | ||
74 | 73 | ||
75 | if (ax25_dev == NULL || !ax25_dev->dama.slave) | 74 | if (ax25_dev == NULL || !ax25_dev->dama.slave) |
76 | return; /* Yikes! */ | 75 | return; /* Yikes! */ |
@@ -81,7 +80,7 @@ static void ax25_ds_timeout(unsigned long arg) | |||
81 | } | 80 | } |
82 | 81 | ||
83 | spin_lock(&ax25_list_lock); | 82 | spin_lock(&ax25_list_lock); |
84 | ax25_for_each(ax25, node, &ax25_list) { | 83 | ax25_for_each(ax25, &ax25_list) { |
85 | if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE)) | 84 | if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE)) |
86 | continue; | 85 | continue; |
87 | 86 | ||
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c index 7d5f24b82cc8..7f16e8a931b2 100644 --- a/net/ax25/ax25_iface.c +++ b/net/ax25/ax25_iface.c | |||
@@ -193,10 +193,9 @@ int ax25_listen_mine(ax25_address *callsign, struct net_device *dev) | |||
193 | void ax25_link_failed(ax25_cb *ax25, int reason) | 193 | void ax25_link_failed(ax25_cb *ax25, int reason) |
194 | { | 194 | { |
195 | struct ax25_linkfail *lf; | 195 | struct ax25_linkfail *lf; |
196 | struct hlist_node *node; | ||
197 | 196 | ||
198 | spin_lock_bh(&linkfail_lock); | 197 | spin_lock_bh(&linkfail_lock); |
199 | hlist_for_each_entry(lf, node, &ax25_linkfail_list, lf_node) | 198 | hlist_for_each_entry(lf, &ax25_linkfail_list, lf_node) |
200 | lf->func(ax25, reason); | 199 | lf->func(ax25, reason); |
201 | spin_unlock_bh(&linkfail_lock); | 200 | spin_unlock_bh(&linkfail_lock); |
202 | } | 201 | } |
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c index 957999e43ff7..71c4badbc807 100644 --- a/net/ax25/ax25_uid.c +++ b/net/ax25/ax25_uid.c | |||
@@ -54,10 +54,9 @@ EXPORT_SYMBOL(ax25_uid_policy); | |||
54 | ax25_uid_assoc *ax25_findbyuid(kuid_t uid) | 54 | ax25_uid_assoc *ax25_findbyuid(kuid_t uid) |
55 | { | 55 | { |
56 | ax25_uid_assoc *ax25_uid, *res = NULL; | 56 | ax25_uid_assoc *ax25_uid, *res = NULL; |
57 | struct hlist_node *node; | ||
58 | 57 | ||
59 | read_lock(&ax25_uid_lock); | 58 | read_lock(&ax25_uid_lock); |
60 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { | 59 | ax25_uid_for_each(ax25_uid, &ax25_uid_list) { |
61 | if (uid_eq(ax25_uid->uid, uid)) { | 60 | if (uid_eq(ax25_uid->uid, uid)) { |
62 | ax25_uid_hold(ax25_uid); | 61 | ax25_uid_hold(ax25_uid); |
63 | res = ax25_uid; | 62 | res = ax25_uid; |
@@ -74,7 +73,6 @@ EXPORT_SYMBOL(ax25_findbyuid); | |||
74 | int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | 73 | int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) |
75 | { | 74 | { |
76 | ax25_uid_assoc *ax25_uid; | 75 | ax25_uid_assoc *ax25_uid; |
77 | struct hlist_node *node; | ||
78 | ax25_uid_assoc *user; | 76 | ax25_uid_assoc *user; |
79 | unsigned long res; | 77 | unsigned long res; |
80 | 78 | ||
@@ -82,7 +80,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | |||
82 | case SIOCAX25GETUID: | 80 | case SIOCAX25GETUID: |
83 | res = -ENOENT; | 81 | res = -ENOENT; |
84 | read_lock(&ax25_uid_lock); | 82 | read_lock(&ax25_uid_lock); |
85 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { | 83 | ax25_uid_for_each(ax25_uid, &ax25_uid_list) { |
86 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { | 84 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { |
87 | res = from_kuid_munged(current_user_ns(), ax25_uid->uid); | 85 | res = from_kuid_munged(current_user_ns(), ax25_uid->uid); |
88 | break; | 86 | break; |
@@ -126,7 +124,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | |||
126 | 124 | ||
127 | ax25_uid = NULL; | 125 | ax25_uid = NULL; |
128 | write_lock(&ax25_uid_lock); | 126 | write_lock(&ax25_uid_lock); |
129 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { | 127 | ax25_uid_for_each(ax25_uid, &ax25_uid_list) { |
130 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) | 128 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) |
131 | break; | 129 | break; |
132 | } | 130 | } |
@@ -212,11 +210,10 @@ const struct file_operations ax25_uid_fops = { | |||
212 | void __exit ax25_uid_free(void) | 210 | void __exit ax25_uid_free(void) |
213 | { | 211 | { |
214 | ax25_uid_assoc *ax25_uid; | 212 | ax25_uid_assoc *ax25_uid; |
215 | struct hlist_node *node; | ||
216 | 213 | ||
217 | write_lock(&ax25_uid_lock); | 214 | write_lock(&ax25_uid_lock); |
218 | again: | 215 | again: |
219 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { | 216 | ax25_uid_for_each(ax25_uid, &ax25_uid_list) { |
220 | hlist_del_init(&ax25_uid->uid_node); | 217 | hlist_del_init(&ax25_uid->uid_node); |
221 | ax25_uid_put(ax25_uid); | 218 | ax25_uid_put(ax25_uid); |
222 | goto again; | 219 | goto again; |
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 72fe1bbf7721..a0b253ecadaf 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
@@ -487,7 +487,6 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv, | |||
487 | */ | 487 | */ |
488 | struct batadv_forw_packet *forw_packet_aggr = NULL; | 488 | struct batadv_forw_packet *forw_packet_aggr = NULL; |
489 | struct batadv_forw_packet *forw_packet_pos = NULL; | 489 | struct batadv_forw_packet *forw_packet_pos = NULL; |
490 | struct hlist_node *tmp_node; | ||
491 | struct batadv_ogm_packet *batadv_ogm_packet; | 490 | struct batadv_ogm_packet *batadv_ogm_packet; |
492 | bool direct_link; | 491 | bool direct_link; |
493 | unsigned long max_aggregation_jiffies; | 492 | unsigned long max_aggregation_jiffies; |
@@ -500,7 +499,7 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv, | |||
500 | spin_lock_bh(&bat_priv->forw_bat_list_lock); | 499 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
501 | /* own packets are not to be aggregated */ | 500 | /* own packets are not to be aggregated */ |
502 | if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { | 501 | if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { |
503 | hlist_for_each_entry(forw_packet_pos, tmp_node, | 502 | hlist_for_each_entry(forw_packet_pos, |
504 | &bat_priv->forw_bat_list, list) { | 503 | &bat_priv->forw_bat_list, list) { |
505 | if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet, | 504 | if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet, |
506 | bat_priv, packet_len, | 505 | bat_priv, packet_len, |
@@ -655,7 +654,6 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, | |||
655 | struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; | 654 | struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; |
656 | struct batadv_neigh_node *router = NULL; | 655 | struct batadv_neigh_node *router = NULL; |
657 | struct batadv_orig_node *orig_node_tmp; | 656 | struct batadv_orig_node *orig_node_tmp; |
658 | struct hlist_node *node; | ||
659 | int if_num; | 657 | int if_num; |
660 | uint8_t sum_orig, sum_neigh; | 658 | uint8_t sum_orig, sum_neigh; |
661 | uint8_t *neigh_addr; | 659 | uint8_t *neigh_addr; |
@@ -665,7 +663,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, | |||
665 | "update_originator(): Searching and updating originator entry of received packet\n"); | 663 | "update_originator(): Searching and updating originator entry of received packet\n"); |
666 | 664 | ||
667 | rcu_read_lock(); | 665 | rcu_read_lock(); |
668 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | 666 | hlist_for_each_entry_rcu(tmp_neigh_node, |
669 | &orig_node->neigh_list, list) { | 667 | &orig_node->neigh_list, list) { |
670 | neigh_addr = tmp_neigh_node->addr; | 668 | neigh_addr = tmp_neigh_node->addr; |
671 | if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && | 669 | if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && |
@@ -801,7 +799,6 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, | |||
801 | { | 799 | { |
802 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 800 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
803 | struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node; | 801 | struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node; |
804 | struct hlist_node *node; | ||
805 | uint8_t total_count; | 802 | uint8_t total_count; |
806 | uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; | 803 | uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; |
807 | unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; | 804 | unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; |
@@ -810,7 +807,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, | |||
810 | 807 | ||
811 | /* find corresponding one hop neighbor */ | 808 | /* find corresponding one hop neighbor */ |
812 | rcu_read_lock(); | 809 | rcu_read_lock(); |
813 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | 810 | hlist_for_each_entry_rcu(tmp_neigh_node, |
814 | &orig_neigh_node->neigh_list, list) { | 811 | &orig_neigh_node->neigh_list, list) { |
815 | if (!batadv_compare_eth(tmp_neigh_node->addr, | 812 | if (!batadv_compare_eth(tmp_neigh_node->addr, |
816 | orig_neigh_node->orig)) | 813 | orig_neigh_node->orig)) |
@@ -920,7 +917,6 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, | |||
920 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 917 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
921 | struct batadv_orig_node *orig_node; | 918 | struct batadv_orig_node *orig_node; |
922 | struct batadv_neigh_node *tmp_neigh_node; | 919 | struct batadv_neigh_node *tmp_neigh_node; |
923 | struct hlist_node *node; | ||
924 | int is_duplicate = 0; | 920 | int is_duplicate = 0; |
925 | int32_t seq_diff; | 921 | int32_t seq_diff; |
926 | int need_update = 0; | 922 | int need_update = 0; |
@@ -943,7 +939,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, | |||
943 | goto out; | 939 | goto out; |
944 | 940 | ||
945 | rcu_read_lock(); | 941 | rcu_read_lock(); |
946 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | 942 | hlist_for_each_entry_rcu(tmp_neigh_node, |
947 | &orig_node->neigh_list, list) { | 943 | &orig_node->neigh_list, list) { |
948 | is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits, | 944 | is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits, |
949 | orig_node->last_real_seqno, | 945 | orig_node->last_real_seqno, |
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 30f46526cbbd..6a4f728680ae 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
@@ -144,7 +144,6 @@ static struct batadv_bla_claim | |||
144 | { | 144 | { |
145 | struct batadv_hashtable *hash = bat_priv->bla.claim_hash; | 145 | struct batadv_hashtable *hash = bat_priv->bla.claim_hash; |
146 | struct hlist_head *head; | 146 | struct hlist_head *head; |
147 | struct hlist_node *node; | ||
148 | struct batadv_bla_claim *claim; | 147 | struct batadv_bla_claim *claim; |
149 | struct batadv_bla_claim *claim_tmp = NULL; | 148 | struct batadv_bla_claim *claim_tmp = NULL; |
150 | int index; | 149 | int index; |
@@ -156,7 +155,7 @@ static struct batadv_bla_claim | |||
156 | head = &hash->table[index]; | 155 | head = &hash->table[index]; |
157 | 156 | ||
158 | rcu_read_lock(); | 157 | rcu_read_lock(); |
159 | hlist_for_each_entry_rcu(claim, node, head, hash_entry) { | 158 | hlist_for_each_entry_rcu(claim, head, hash_entry) { |
160 | if (!batadv_compare_claim(&claim->hash_entry, data)) | 159 | if (!batadv_compare_claim(&claim->hash_entry, data)) |
161 | continue; | 160 | continue; |
162 | 161 | ||
@@ -185,7 +184,6 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, | |||
185 | { | 184 | { |
186 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; | 185 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; |
187 | struct hlist_head *head; | 186 | struct hlist_head *head; |
188 | struct hlist_node *node; | ||
189 | struct batadv_bla_backbone_gw search_entry, *backbone_gw; | 187 | struct batadv_bla_backbone_gw search_entry, *backbone_gw; |
190 | struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL; | 188 | struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL; |
191 | int index; | 189 | int index; |
@@ -200,7 +198,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, | |||
200 | head = &hash->table[index]; | 198 | head = &hash->table[index]; |
201 | 199 | ||
202 | rcu_read_lock(); | 200 | rcu_read_lock(); |
203 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | 201 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { |
204 | if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry, | 202 | if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry, |
205 | &search_entry)) | 203 | &search_entry)) |
206 | continue; | 204 | continue; |
@@ -221,7 +219,7 @@ static void | |||
221 | batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) | 219 | batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) |
222 | { | 220 | { |
223 | struct batadv_hashtable *hash; | 221 | struct batadv_hashtable *hash; |
224 | struct hlist_node *node, *node_tmp; | 222 | struct hlist_node *node_tmp; |
225 | struct hlist_head *head; | 223 | struct hlist_head *head; |
226 | struct batadv_bla_claim *claim; | 224 | struct batadv_bla_claim *claim; |
227 | int i; | 225 | int i; |
@@ -236,13 +234,13 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) | |||
236 | list_lock = &hash->list_locks[i]; | 234 | list_lock = &hash->list_locks[i]; |
237 | 235 | ||
238 | spin_lock_bh(list_lock); | 236 | spin_lock_bh(list_lock); |
239 | hlist_for_each_entry_safe(claim, node, node_tmp, | 237 | hlist_for_each_entry_safe(claim, node_tmp, |
240 | head, hash_entry) { | 238 | head, hash_entry) { |
241 | if (claim->backbone_gw != backbone_gw) | 239 | if (claim->backbone_gw != backbone_gw) |
242 | continue; | 240 | continue; |
243 | 241 | ||
244 | batadv_claim_free_ref(claim); | 242 | batadv_claim_free_ref(claim); |
245 | hlist_del_rcu(node); | 243 | hlist_del_rcu(&claim->hash_entry); |
246 | } | 244 | } |
247 | spin_unlock_bh(list_lock); | 245 | spin_unlock_bh(list_lock); |
248 | } | 246 | } |
@@ -460,7 +458,6 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv, | |||
460 | struct batadv_hard_iface *primary_if, | 458 | struct batadv_hard_iface *primary_if, |
461 | short vid) | 459 | short vid) |
462 | { | 460 | { |
463 | struct hlist_node *node; | ||
464 | struct hlist_head *head; | 461 | struct hlist_head *head; |
465 | struct batadv_hashtable *hash; | 462 | struct batadv_hashtable *hash; |
466 | struct batadv_bla_claim *claim; | 463 | struct batadv_bla_claim *claim; |
@@ -481,7 +478,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv, | |||
481 | head = &hash->table[i]; | 478 | head = &hash->table[i]; |
482 | 479 | ||
483 | rcu_read_lock(); | 480 | rcu_read_lock(); |
484 | hlist_for_each_entry_rcu(claim, node, head, hash_entry) { | 481 | hlist_for_each_entry_rcu(claim, head, hash_entry) { |
485 | /* only own claims are interesting */ | 482 | /* only own claims are interesting */ |
486 | if (claim->backbone_gw != backbone_gw) | 483 | if (claim->backbone_gw != backbone_gw) |
487 | continue; | 484 | continue; |
@@ -958,7 +955,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, | |||
958 | static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) | 955 | static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) |
959 | { | 956 | { |
960 | struct batadv_bla_backbone_gw *backbone_gw; | 957 | struct batadv_bla_backbone_gw *backbone_gw; |
961 | struct hlist_node *node, *node_tmp; | 958 | struct hlist_node *node_tmp; |
962 | struct hlist_head *head; | 959 | struct hlist_head *head; |
963 | struct batadv_hashtable *hash; | 960 | struct batadv_hashtable *hash; |
964 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 961 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
@@ -973,7 +970,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) | |||
973 | list_lock = &hash->list_locks[i]; | 970 | list_lock = &hash->list_locks[i]; |
974 | 971 | ||
975 | spin_lock_bh(list_lock); | 972 | spin_lock_bh(list_lock); |
976 | hlist_for_each_entry_safe(backbone_gw, node, node_tmp, | 973 | hlist_for_each_entry_safe(backbone_gw, node_tmp, |
977 | head, hash_entry) { | 974 | head, hash_entry) { |
978 | if (now) | 975 | if (now) |
979 | goto purge_now; | 976 | goto purge_now; |
@@ -992,7 +989,7 @@ purge_now: | |||
992 | 989 | ||
993 | batadv_bla_del_backbone_claims(backbone_gw); | 990 | batadv_bla_del_backbone_claims(backbone_gw); |
994 | 991 | ||
995 | hlist_del_rcu(node); | 992 | hlist_del_rcu(&backbone_gw->hash_entry); |
996 | batadv_backbone_gw_free_ref(backbone_gw); | 993 | batadv_backbone_gw_free_ref(backbone_gw); |
997 | } | 994 | } |
998 | spin_unlock_bh(list_lock); | 995 | spin_unlock_bh(list_lock); |
@@ -1013,7 +1010,6 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, | |||
1013 | int now) | 1010 | int now) |
1014 | { | 1011 | { |
1015 | struct batadv_bla_claim *claim; | 1012 | struct batadv_bla_claim *claim; |
1016 | struct hlist_node *node; | ||
1017 | struct hlist_head *head; | 1013 | struct hlist_head *head; |
1018 | struct batadv_hashtable *hash; | 1014 | struct batadv_hashtable *hash; |
1019 | int i; | 1015 | int i; |
@@ -1026,7 +1022,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, | |||
1026 | head = &hash->table[i]; | 1022 | head = &hash->table[i]; |
1027 | 1023 | ||
1028 | rcu_read_lock(); | 1024 | rcu_read_lock(); |
1029 | hlist_for_each_entry_rcu(claim, node, head, hash_entry) { | 1025 | hlist_for_each_entry_rcu(claim, head, hash_entry) { |
1030 | if (now) | 1026 | if (now) |
1031 | goto purge_now; | 1027 | goto purge_now; |
1032 | if (!batadv_compare_eth(claim->backbone_gw->orig, | 1028 | if (!batadv_compare_eth(claim->backbone_gw->orig, |
@@ -1062,7 +1058,6 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, | |||
1062 | struct batadv_hard_iface *oldif) | 1058 | struct batadv_hard_iface *oldif) |
1063 | { | 1059 | { |
1064 | struct batadv_bla_backbone_gw *backbone_gw; | 1060 | struct batadv_bla_backbone_gw *backbone_gw; |
1065 | struct hlist_node *node; | ||
1066 | struct hlist_head *head; | 1061 | struct hlist_head *head; |
1067 | struct batadv_hashtable *hash; | 1062 | struct batadv_hashtable *hash; |
1068 | __be16 group; | 1063 | __be16 group; |
@@ -1086,7 +1081,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, | |||
1086 | head = &hash->table[i]; | 1081 | head = &hash->table[i]; |
1087 | 1082 | ||
1088 | rcu_read_lock(); | 1083 | rcu_read_lock(); |
1089 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | 1084 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { |
1090 | /* own orig still holds the old value. */ | 1085 | /* own orig still holds the old value. */ |
1091 | if (!batadv_compare_eth(backbone_gw->orig, | 1086 | if (!batadv_compare_eth(backbone_gw->orig, |
1092 | oldif->net_dev->dev_addr)) | 1087 | oldif->net_dev->dev_addr)) |
@@ -1112,7 +1107,6 @@ static void batadv_bla_periodic_work(struct work_struct *work) | |||
1112 | struct delayed_work *delayed_work; | 1107 | struct delayed_work *delayed_work; |
1113 | struct batadv_priv *bat_priv; | 1108 | struct batadv_priv *bat_priv; |
1114 | struct batadv_priv_bla *priv_bla; | 1109 | struct batadv_priv_bla *priv_bla; |
1115 | struct hlist_node *node; | ||
1116 | struct hlist_head *head; | 1110 | struct hlist_head *head; |
1117 | struct batadv_bla_backbone_gw *backbone_gw; | 1111 | struct batadv_bla_backbone_gw *backbone_gw; |
1118 | struct batadv_hashtable *hash; | 1112 | struct batadv_hashtable *hash; |
@@ -1140,7 +1134,7 @@ static void batadv_bla_periodic_work(struct work_struct *work) | |||
1140 | head = &hash->table[i]; | 1134 | head = &hash->table[i]; |
1141 | 1135 | ||
1142 | rcu_read_lock(); | 1136 | rcu_read_lock(); |
1143 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | 1137 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { |
1144 | if (!batadv_compare_eth(backbone_gw->orig, | 1138 | if (!batadv_compare_eth(backbone_gw->orig, |
1145 | primary_if->net_dev->dev_addr)) | 1139 | primary_if->net_dev->dev_addr)) |
1146 | continue; | 1140 | continue; |
@@ -1322,7 +1316,6 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig) | |||
1322 | { | 1316 | { |
1323 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; | 1317 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; |
1324 | struct hlist_head *head; | 1318 | struct hlist_head *head; |
1325 | struct hlist_node *node; | ||
1326 | struct batadv_bla_backbone_gw *backbone_gw; | 1319 | struct batadv_bla_backbone_gw *backbone_gw; |
1327 | int i; | 1320 | int i; |
1328 | 1321 | ||
@@ -1336,7 +1329,7 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig) | |||
1336 | head = &hash->table[i]; | 1329 | head = &hash->table[i]; |
1337 | 1330 | ||
1338 | rcu_read_lock(); | 1331 | rcu_read_lock(); |
1339 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | 1332 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { |
1340 | if (batadv_compare_eth(backbone_gw->orig, orig)) { | 1333 | if (batadv_compare_eth(backbone_gw->orig, orig)) { |
1341 | rcu_read_unlock(); | 1334 | rcu_read_unlock(); |
1342 | return 1; | 1335 | return 1; |
@@ -1607,7 +1600,6 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) | |||
1607 | struct batadv_hashtable *hash = bat_priv->bla.claim_hash; | 1600 | struct batadv_hashtable *hash = bat_priv->bla.claim_hash; |
1608 | struct batadv_bla_claim *claim; | 1601 | struct batadv_bla_claim *claim; |
1609 | struct batadv_hard_iface *primary_if; | 1602 | struct batadv_hard_iface *primary_if; |
1610 | struct hlist_node *node; | ||
1611 | struct hlist_head *head; | 1603 | struct hlist_head *head; |
1612 | uint32_t i; | 1604 | uint32_t i; |
1613 | bool is_own; | 1605 | bool is_own; |
@@ -1628,7 +1620,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) | |||
1628 | head = &hash->table[i]; | 1620 | head = &hash->table[i]; |
1629 | 1621 | ||
1630 | rcu_read_lock(); | 1622 | rcu_read_lock(); |
1631 | hlist_for_each_entry_rcu(claim, node, head, hash_entry) { | 1623 | hlist_for_each_entry_rcu(claim, head, hash_entry) { |
1632 | is_own = batadv_compare_eth(claim->backbone_gw->orig, | 1624 | is_own = batadv_compare_eth(claim->backbone_gw->orig, |
1633 | primary_addr); | 1625 | primary_addr); |
1634 | seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n", | 1626 | seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n", |
@@ -1652,7 +1644,6 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) | |||
1652 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; | 1644 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; |
1653 | struct batadv_bla_backbone_gw *backbone_gw; | 1645 | struct batadv_bla_backbone_gw *backbone_gw; |
1654 | struct batadv_hard_iface *primary_if; | 1646 | struct batadv_hard_iface *primary_if; |
1655 | struct hlist_node *node; | ||
1656 | struct hlist_head *head; | 1647 | struct hlist_head *head; |
1657 | int secs, msecs; | 1648 | int secs, msecs; |
1658 | uint32_t i; | 1649 | uint32_t i; |
@@ -1674,7 +1665,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) | |||
1674 | head = &hash->table[i]; | 1665 | head = &hash->table[i]; |
1675 | 1666 | ||
1676 | rcu_read_lock(); | 1667 | rcu_read_lock(); |
1677 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | 1668 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { |
1678 | msecs = jiffies_to_msecs(jiffies - | 1669 | msecs = jiffies_to_msecs(jiffies - |
1679 | backbone_gw->lasttime); | 1670 | backbone_gw->lasttime); |
1680 | secs = msecs / 1000; | 1671 | secs = msecs / 1000; |
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 761a59002e34..d54188a112ea 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
@@ -83,7 +83,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv, | |||
83 | { | 83 | { |
84 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 84 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
85 | struct batadv_dat_entry *dat_entry; | 85 | struct batadv_dat_entry *dat_entry; |
86 | struct hlist_node *node, *node_tmp; | 86 | struct hlist_node *node_tmp; |
87 | struct hlist_head *head; | 87 | struct hlist_head *head; |
88 | uint32_t i; | 88 | uint32_t i; |
89 | 89 | ||
@@ -95,7 +95,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv, | |||
95 | list_lock = &bat_priv->dat.hash->list_locks[i]; | 95 | list_lock = &bat_priv->dat.hash->list_locks[i]; |
96 | 96 | ||
97 | spin_lock_bh(list_lock); | 97 | spin_lock_bh(list_lock); |
98 | hlist_for_each_entry_safe(dat_entry, node, node_tmp, head, | 98 | hlist_for_each_entry_safe(dat_entry, node_tmp, head, |
99 | hash_entry) { | 99 | hash_entry) { |
100 | /* if an helper function has been passed as parameter, | 100 | /* if an helper function has been passed as parameter, |
101 | * ask it if the entry has to be purged or not | 101 | * ask it if the entry has to be purged or not |
@@ -103,7 +103,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv, | |||
103 | if (to_purge && !to_purge(dat_entry)) | 103 | if (to_purge && !to_purge(dat_entry)) |
104 | continue; | 104 | continue; |
105 | 105 | ||
106 | hlist_del_rcu(node); | 106 | hlist_del_rcu(&dat_entry->hash_entry); |
107 | batadv_dat_entry_free_ref(dat_entry); | 107 | batadv_dat_entry_free_ref(dat_entry); |
108 | } | 108 | } |
109 | spin_unlock_bh(list_lock); | 109 | spin_unlock_bh(list_lock); |
@@ -235,7 +235,6 @@ static struct batadv_dat_entry * | |||
235 | batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip) | 235 | batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip) |
236 | { | 236 | { |
237 | struct hlist_head *head; | 237 | struct hlist_head *head; |
238 | struct hlist_node *node; | ||
239 | struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL; | 238 | struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL; |
240 | struct batadv_hashtable *hash = bat_priv->dat.hash; | 239 | struct batadv_hashtable *hash = bat_priv->dat.hash; |
241 | uint32_t index; | 240 | uint32_t index; |
@@ -247,7 +246,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip) | |||
247 | head = &hash->table[index]; | 246 | head = &hash->table[index]; |
248 | 247 | ||
249 | rcu_read_lock(); | 248 | rcu_read_lock(); |
250 | hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { | 249 | hlist_for_each_entry_rcu(dat_entry, head, hash_entry) { |
251 | if (dat_entry->ip != ip) | 250 | if (dat_entry->ip != ip) |
252 | continue; | 251 | continue; |
253 | 252 | ||
@@ -465,7 +464,6 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, | |||
465 | batadv_dat_addr_t max = 0, tmp_max = 0; | 464 | batadv_dat_addr_t max = 0, tmp_max = 0; |
466 | struct batadv_orig_node *orig_node, *max_orig_node = NULL; | 465 | struct batadv_orig_node *orig_node, *max_orig_node = NULL; |
467 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 466 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
468 | struct hlist_node *node; | ||
469 | struct hlist_head *head; | 467 | struct hlist_head *head; |
470 | int i; | 468 | int i; |
471 | 469 | ||
@@ -481,7 +479,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, | |||
481 | head = &hash->table[i]; | 479 | head = &hash->table[i]; |
482 | 480 | ||
483 | rcu_read_lock(); | 481 | rcu_read_lock(); |
484 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 482 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
485 | /* the dht space is a ring and addresses are unsigned */ | 483 | /* the dht space is a ring and addresses are unsigned */ |
486 | tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr + | 484 | tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr + |
487 | ip_key; | 485 | ip_key; |
@@ -686,7 +684,6 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) | |||
686 | struct batadv_hashtable *hash = bat_priv->dat.hash; | 684 | struct batadv_hashtable *hash = bat_priv->dat.hash; |
687 | struct batadv_dat_entry *dat_entry; | 685 | struct batadv_dat_entry *dat_entry; |
688 | struct batadv_hard_iface *primary_if; | 686 | struct batadv_hard_iface *primary_if; |
689 | struct hlist_node *node; | ||
690 | struct hlist_head *head; | 687 | struct hlist_head *head; |
691 | unsigned long last_seen_jiffies; | 688 | unsigned long last_seen_jiffies; |
692 | int last_seen_msecs, last_seen_secs, last_seen_mins; | 689 | int last_seen_msecs, last_seen_secs, last_seen_mins; |
@@ -704,7 +701,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) | |||
704 | head = &hash->table[i]; | 701 | head = &hash->table[i]; |
705 | 702 | ||
706 | rcu_read_lock(); | 703 | rcu_read_lock(); |
707 | hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { | 704 | hlist_for_each_entry_rcu(dat_entry, head, hash_entry) { |
708 | last_seen_jiffies = jiffies - dat_entry->last_update; | 705 | last_seen_jiffies = jiffies - dat_entry->last_update; |
709 | last_seen_msecs = jiffies_to_msecs(last_seen_jiffies); | 706 | last_seen_msecs = jiffies_to_msecs(last_seen_jiffies); |
710 | last_seen_mins = last_seen_msecs / 60000; | 707 | last_seen_mins = last_seen_msecs / 60000; |
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 074107f2cfaa..34f99a46ec1d 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -114,7 +114,6 @@ static struct batadv_gw_node * | |||
114 | batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) | 114 | batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) |
115 | { | 115 | { |
116 | struct batadv_neigh_node *router; | 116 | struct batadv_neigh_node *router; |
117 | struct hlist_node *node; | ||
118 | struct batadv_gw_node *gw_node, *curr_gw = NULL; | 117 | struct batadv_gw_node *gw_node, *curr_gw = NULL; |
119 | uint32_t max_gw_factor = 0, tmp_gw_factor = 0; | 118 | uint32_t max_gw_factor = 0, tmp_gw_factor = 0; |
120 | uint32_t gw_divisor; | 119 | uint32_t gw_divisor; |
@@ -127,7 +126,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) | |||
127 | gw_divisor *= 64; | 126 | gw_divisor *= 64; |
128 | 127 | ||
129 | rcu_read_lock(); | 128 | rcu_read_lock(); |
130 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { | 129 | hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { |
131 | if (gw_node->deleted) | 130 | if (gw_node->deleted) |
132 | continue; | 131 | continue; |
133 | 132 | ||
@@ -344,7 +343,6 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, | |||
344 | struct batadv_orig_node *orig_node, | 343 | struct batadv_orig_node *orig_node, |
345 | uint8_t new_gwflags) | 344 | uint8_t new_gwflags) |
346 | { | 345 | { |
347 | struct hlist_node *node; | ||
348 | struct batadv_gw_node *gw_node, *curr_gw; | 346 | struct batadv_gw_node *gw_node, *curr_gw; |
349 | 347 | ||
350 | /* Note: We don't need a NULL check here, since curr_gw never gets | 348 | /* Note: We don't need a NULL check here, since curr_gw never gets |
@@ -355,7 +353,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, | |||
355 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); | 353 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); |
356 | 354 | ||
357 | rcu_read_lock(); | 355 | rcu_read_lock(); |
358 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { | 356 | hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { |
359 | if (gw_node->orig_node != orig_node) | 357 | if (gw_node->orig_node != orig_node) |
360 | continue; | 358 | continue; |
361 | 359 | ||
@@ -403,7 +401,7 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv, | |||
403 | void batadv_gw_node_purge(struct batadv_priv *bat_priv) | 401 | void batadv_gw_node_purge(struct batadv_priv *bat_priv) |
404 | { | 402 | { |
405 | struct batadv_gw_node *gw_node, *curr_gw; | 403 | struct batadv_gw_node *gw_node, *curr_gw; |
406 | struct hlist_node *node, *node_tmp; | 404 | struct hlist_node *node_tmp; |
407 | unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT); | 405 | unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT); |
408 | int do_deselect = 0; | 406 | int do_deselect = 0; |
409 | 407 | ||
@@ -411,7 +409,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv) | |||
411 | 409 | ||
412 | spin_lock_bh(&bat_priv->gw.list_lock); | 410 | spin_lock_bh(&bat_priv->gw.list_lock); |
413 | 411 | ||
414 | hlist_for_each_entry_safe(gw_node, node, node_tmp, | 412 | hlist_for_each_entry_safe(gw_node, node_tmp, |
415 | &bat_priv->gw.list, list) { | 413 | &bat_priv->gw.list, list) { |
416 | if (((!gw_node->deleted) || | 414 | if (((!gw_node->deleted) || |
417 | (time_before(jiffies, gw_node->deleted + timeout))) && | 415 | (time_before(jiffies, gw_node->deleted + timeout))) && |
@@ -476,7 +474,6 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) | |||
476 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | 474 | struct batadv_priv *bat_priv = netdev_priv(net_dev); |
477 | struct batadv_hard_iface *primary_if; | 475 | struct batadv_hard_iface *primary_if; |
478 | struct batadv_gw_node *gw_node; | 476 | struct batadv_gw_node *gw_node; |
479 | struct hlist_node *node; | ||
480 | int gw_count = 0; | 477 | int gw_count = 0; |
481 | 478 | ||
482 | primary_if = batadv_seq_print_text_primary_if_get(seq); | 479 | primary_if = batadv_seq_print_text_primary_if_get(seq); |
@@ -490,7 +487,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) | |||
490 | primary_if->net_dev->dev_addr, net_dev->name); | 487 | primary_if->net_dev->dev_addr, net_dev->name); |
491 | 488 | ||
492 | rcu_read_lock(); | 489 | rcu_read_lock(); |
493 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { | 490 | hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { |
494 | if (gw_node->deleted) | 491 | if (gw_node->deleted) |
495 | continue; | 492 | continue; |
496 | 493 | ||
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 21fe6987733b..0488d70c8c35 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
@@ -345,9 +345,8 @@ void batadv_recv_handler_unregister(uint8_t packet_type) | |||
345 | static struct batadv_algo_ops *batadv_algo_get(char *name) | 345 | static struct batadv_algo_ops *batadv_algo_get(char *name) |
346 | { | 346 | { |
347 | struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; | 347 | struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; |
348 | struct hlist_node *node; | ||
349 | 348 | ||
350 | hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) { | 349 | hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) { |
351 | if (strcmp(bat_algo_ops_tmp->name, name) != 0) | 350 | if (strcmp(bat_algo_ops_tmp->name, name) != 0) |
352 | continue; | 351 | continue; |
353 | 352 | ||
@@ -411,11 +410,10 @@ out: | |||
411 | int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) | 410 | int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) |
412 | { | 411 | { |
413 | struct batadv_algo_ops *bat_algo_ops; | 412 | struct batadv_algo_ops *bat_algo_ops; |
414 | struct hlist_node *node; | ||
415 | 413 | ||
416 | seq_printf(seq, "Available routing algorithms:\n"); | 414 | seq_printf(seq, "Available routing algorithms:\n"); |
417 | 415 | ||
418 | hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) { | 416 | hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) { |
419 | seq_printf(seq, "%s\n", bat_algo_ops->name); | 417 | seq_printf(seq, "%s\n", bat_algo_ops->name); |
420 | } | 418 | } |
421 | 419 | ||
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 457ea445217c..96fb80b724dc 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
@@ -118,7 +118,7 @@ out: | |||
118 | 118 | ||
119 | static void batadv_orig_node_free_rcu(struct rcu_head *rcu) | 119 | static void batadv_orig_node_free_rcu(struct rcu_head *rcu) |
120 | { | 120 | { |
121 | struct hlist_node *node, *node_tmp; | 121 | struct hlist_node *node_tmp; |
122 | struct batadv_neigh_node *neigh_node, *tmp_neigh_node; | 122 | struct batadv_neigh_node *neigh_node, *tmp_neigh_node; |
123 | struct batadv_orig_node *orig_node; | 123 | struct batadv_orig_node *orig_node; |
124 | 124 | ||
@@ -134,7 +134,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | /* for all neighbors towards this originator ... */ | 136 | /* for all neighbors towards this originator ... */ |
137 | hlist_for_each_entry_safe(neigh_node, node, node_tmp, | 137 | hlist_for_each_entry_safe(neigh_node, node_tmp, |
138 | &orig_node->neigh_list, list) { | 138 | &orig_node->neigh_list, list) { |
139 | hlist_del_rcu(&neigh_node->list); | 139 | hlist_del_rcu(&neigh_node->list); |
140 | batadv_neigh_node_free_ref(neigh_node); | 140 | batadv_neigh_node_free_ref(neigh_node); |
@@ -161,7 +161,7 @@ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node) | |||
161 | void batadv_originator_free(struct batadv_priv *bat_priv) | 161 | void batadv_originator_free(struct batadv_priv *bat_priv) |
162 | { | 162 | { |
163 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 163 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
164 | struct hlist_node *node, *node_tmp; | 164 | struct hlist_node *node_tmp; |
165 | struct hlist_head *head; | 165 | struct hlist_head *head; |
166 | spinlock_t *list_lock; /* spinlock to protect write access */ | 166 | spinlock_t *list_lock; /* spinlock to protect write access */ |
167 | struct batadv_orig_node *orig_node; | 167 | struct batadv_orig_node *orig_node; |
@@ -179,9 +179,9 @@ void batadv_originator_free(struct batadv_priv *bat_priv) | |||
179 | list_lock = &hash->list_locks[i]; | 179 | list_lock = &hash->list_locks[i]; |
180 | 180 | ||
181 | spin_lock_bh(list_lock); | 181 | spin_lock_bh(list_lock); |
182 | hlist_for_each_entry_safe(orig_node, node, node_tmp, | 182 | hlist_for_each_entry_safe(orig_node, node_tmp, |
183 | head, hash_entry) { | 183 | head, hash_entry) { |
184 | hlist_del_rcu(node); | 184 | hlist_del_rcu(&orig_node->hash_entry); |
185 | batadv_orig_node_free_ref(orig_node); | 185 | batadv_orig_node_free_ref(orig_node); |
186 | } | 186 | } |
187 | spin_unlock_bh(list_lock); | 187 | spin_unlock_bh(list_lock); |
@@ -274,7 +274,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv, | |||
274 | struct batadv_orig_node *orig_node, | 274 | struct batadv_orig_node *orig_node, |
275 | struct batadv_neigh_node **best_neigh_node) | 275 | struct batadv_neigh_node **best_neigh_node) |
276 | { | 276 | { |
277 | struct hlist_node *node, *node_tmp; | 277 | struct hlist_node *node_tmp; |
278 | struct batadv_neigh_node *neigh_node; | 278 | struct batadv_neigh_node *neigh_node; |
279 | bool neigh_purged = false; | 279 | bool neigh_purged = false; |
280 | unsigned long last_seen; | 280 | unsigned long last_seen; |
@@ -285,7 +285,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv, | |||
285 | spin_lock_bh(&orig_node->neigh_list_lock); | 285 | spin_lock_bh(&orig_node->neigh_list_lock); |
286 | 286 | ||
287 | /* for all neighbors towards this originator ... */ | 287 | /* for all neighbors towards this originator ... */ |
288 | hlist_for_each_entry_safe(neigh_node, node, node_tmp, | 288 | hlist_for_each_entry_safe(neigh_node, node_tmp, |
289 | &orig_node->neigh_list, list) { | 289 | &orig_node->neigh_list, list) { |
290 | last_seen = neigh_node->last_seen; | 290 | last_seen = neigh_node->last_seen; |
291 | if_incoming = neigh_node->if_incoming; | 291 | if_incoming = neigh_node->if_incoming; |
@@ -348,7 +348,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, | |||
348 | static void _batadv_purge_orig(struct batadv_priv *bat_priv) | 348 | static void _batadv_purge_orig(struct batadv_priv *bat_priv) |
349 | { | 349 | { |
350 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 350 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
351 | struct hlist_node *node, *node_tmp; | 351 | struct hlist_node *node_tmp; |
352 | struct hlist_head *head; | 352 | struct hlist_head *head; |
353 | spinlock_t *list_lock; /* spinlock to protect write access */ | 353 | spinlock_t *list_lock; /* spinlock to protect write access */ |
354 | struct batadv_orig_node *orig_node; | 354 | struct batadv_orig_node *orig_node; |
@@ -363,13 +363,13 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv) | |||
363 | list_lock = &hash->list_locks[i]; | 363 | list_lock = &hash->list_locks[i]; |
364 | 364 | ||
365 | spin_lock_bh(list_lock); | 365 | spin_lock_bh(list_lock); |
366 | hlist_for_each_entry_safe(orig_node, node, node_tmp, | 366 | hlist_for_each_entry_safe(orig_node, node_tmp, |
367 | head, hash_entry) { | 367 | head, hash_entry) { |
368 | if (batadv_purge_orig_node(bat_priv, orig_node)) { | 368 | if (batadv_purge_orig_node(bat_priv, orig_node)) { |
369 | if (orig_node->gw_flags) | 369 | if (orig_node->gw_flags) |
370 | batadv_gw_node_delete(bat_priv, | 370 | batadv_gw_node_delete(bat_priv, |
371 | orig_node); | 371 | orig_node); |
372 | hlist_del_rcu(node); | 372 | hlist_del_rcu(&orig_node->hash_entry); |
373 | batadv_orig_node_free_ref(orig_node); | 373 | batadv_orig_node_free_ref(orig_node); |
374 | continue; | 374 | continue; |
375 | } | 375 | } |
@@ -408,7 +408,6 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) | |||
408 | struct net_device *net_dev = (struct net_device *)seq->private; | 408 | struct net_device *net_dev = (struct net_device *)seq->private; |
409 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | 409 | struct batadv_priv *bat_priv = netdev_priv(net_dev); |
410 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 410 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
411 | struct hlist_node *node, *node_tmp; | ||
412 | struct hlist_head *head; | 411 | struct hlist_head *head; |
413 | struct batadv_hard_iface *primary_if; | 412 | struct batadv_hard_iface *primary_if; |
414 | struct batadv_orig_node *orig_node; | 413 | struct batadv_orig_node *orig_node; |
@@ -434,7 +433,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) | |||
434 | head = &hash->table[i]; | 433 | head = &hash->table[i]; |
435 | 434 | ||
436 | rcu_read_lock(); | 435 | rcu_read_lock(); |
437 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 436 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
438 | neigh_node = batadv_orig_node_get_router(orig_node); | 437 | neigh_node = batadv_orig_node_get_router(orig_node); |
439 | if (!neigh_node) | 438 | if (!neigh_node) |
440 | continue; | 439 | continue; |
@@ -453,7 +452,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) | |||
453 | neigh_node->addr, | 452 | neigh_node->addr, |
454 | neigh_node->if_incoming->net_dev->name); | 453 | neigh_node->if_incoming->net_dev->name); |
455 | 454 | ||
456 | hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp, | 455 | hlist_for_each_entry_rcu(neigh_node_tmp, |
457 | &orig_node->neigh_list, list) { | 456 | &orig_node->neigh_list, list) { |
458 | seq_printf(seq, " %pM (%3i)", | 457 | seq_printf(seq, " %pM (%3i)", |
459 | neigh_node_tmp->addr, | 458 | neigh_node_tmp->addr, |
@@ -511,7 +510,6 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, | |||
511 | { | 510 | { |
512 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 511 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
513 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 512 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
514 | struct hlist_node *node; | ||
515 | struct hlist_head *head; | 513 | struct hlist_head *head; |
516 | struct batadv_orig_node *orig_node; | 514 | struct batadv_orig_node *orig_node; |
517 | uint32_t i; | 515 | uint32_t i; |
@@ -524,7 +522,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, | |||
524 | head = &hash->table[i]; | 522 | head = &hash->table[i]; |
525 | 523 | ||
526 | rcu_read_lock(); | 524 | rcu_read_lock(); |
527 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 525 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
528 | spin_lock_bh(&orig_node->ogm_cnt_lock); | 526 | spin_lock_bh(&orig_node->ogm_cnt_lock); |
529 | ret = batadv_orig_node_add_if(orig_node, max_if_num); | 527 | ret = batadv_orig_node_add_if(orig_node, max_if_num); |
530 | spin_unlock_bh(&orig_node->ogm_cnt_lock); | 528 | spin_unlock_bh(&orig_node->ogm_cnt_lock); |
@@ -595,7 +593,6 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, | |||
595 | { | 593 | { |
596 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 594 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
597 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 595 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
598 | struct hlist_node *node; | ||
599 | struct hlist_head *head; | 596 | struct hlist_head *head; |
600 | struct batadv_hard_iface *hard_iface_tmp; | 597 | struct batadv_hard_iface *hard_iface_tmp; |
601 | struct batadv_orig_node *orig_node; | 598 | struct batadv_orig_node *orig_node; |
@@ -609,7 +606,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, | |||
609 | head = &hash->table[i]; | 606 | head = &hash->table[i]; |
610 | 607 | ||
611 | rcu_read_lock(); | 608 | rcu_read_lock(); |
612 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 609 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
613 | spin_lock_bh(&orig_node->ogm_cnt_lock); | 610 | spin_lock_bh(&orig_node->ogm_cnt_lock); |
614 | ret = batadv_orig_node_del_if(orig_node, max_if_num, | 611 | ret = batadv_orig_node_del_if(orig_node, max_if_num, |
615 | hard_iface->if_num); | 612 | hard_iface->if_num); |
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 286bf743e76a..7df48fa7669d 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h | |||
@@ -68,7 +68,6 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data) | |||
68 | { | 68 | { |
69 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 69 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
70 | struct hlist_head *head; | 70 | struct hlist_head *head; |
71 | struct hlist_node *node; | ||
72 | struct batadv_orig_node *orig_node, *orig_node_tmp = NULL; | 71 | struct batadv_orig_node *orig_node, *orig_node_tmp = NULL; |
73 | int index; | 72 | int index; |
74 | 73 | ||
@@ -79,7 +78,7 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data) | |||
79 | head = &hash->table[index]; | 78 | head = &hash->table[index]; |
80 | 79 | ||
81 | rcu_read_lock(); | 80 | rcu_read_lock(); |
82 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 81 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
83 | if (!batadv_compare_eth(orig_node, data)) | 82 | if (!batadv_compare_eth(orig_node, data)) |
84 | continue; | 83 | continue; |
85 | 84 | ||
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 60ba03fc8390..5ee21cebbbb0 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -37,7 +37,6 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) | |||
37 | { | 37 | { |
38 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 38 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
39 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 39 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
40 | struct hlist_node *node; | ||
41 | struct hlist_head *head; | 40 | struct hlist_head *head; |
42 | struct batadv_orig_node *orig_node; | 41 | struct batadv_orig_node *orig_node; |
43 | unsigned long *word; | 42 | unsigned long *word; |
@@ -49,7 +48,7 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) | |||
49 | head = &hash->table[i]; | 48 | head = &hash->table[i]; |
50 | 49 | ||
51 | rcu_read_lock(); | 50 | rcu_read_lock(); |
52 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 51 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
53 | spin_lock_bh(&orig_node->ogm_cnt_lock); | 52 | spin_lock_bh(&orig_node->ogm_cnt_lock); |
54 | word_index = hard_iface->if_num * BATADV_NUM_WORDS; | 53 | word_index = hard_iface->if_num * BATADV_NUM_WORDS; |
55 | word = &(orig_node->bcast_own[word_index]); | 54 | word = &(orig_node->bcast_own[word_index]); |
@@ -146,7 +145,6 @@ out: | |||
146 | void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node, | 145 | void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node, |
147 | struct batadv_neigh_node *neigh_node) | 146 | struct batadv_neigh_node *neigh_node) |
148 | { | 147 | { |
149 | struct hlist_node *node; | ||
150 | struct batadv_neigh_node *tmp_neigh_node, *router = NULL; | 148 | struct batadv_neigh_node *tmp_neigh_node, *router = NULL; |
151 | uint8_t interference_candidate = 0; | 149 | uint8_t interference_candidate = 0; |
152 | 150 | ||
@@ -169,7 +167,7 @@ void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node, | |||
169 | * interface. If we do, we won't select this candidate because of | 167 | * interface. If we do, we won't select this candidate because of |
170 | * possible interference. | 168 | * possible interference. |
171 | */ | 169 | */ |
172 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | 170 | hlist_for_each_entry_rcu(tmp_neigh_node, |
173 | &orig_node->neigh_list, list) { | 171 | &orig_node->neigh_list, list) { |
174 | if (tmp_neigh_node == neigh_node) | 172 | if (tmp_neigh_node == neigh_node) |
175 | continue; | 173 | continue; |
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 80ca65fc89a1..a67cffde37ae 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -316,7 +316,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, | |||
316 | const struct batadv_hard_iface *hard_iface) | 316 | const struct batadv_hard_iface *hard_iface) |
317 | { | 317 | { |
318 | struct batadv_forw_packet *forw_packet; | 318 | struct batadv_forw_packet *forw_packet; |
319 | struct hlist_node *tmp_node, *safe_tmp_node; | 319 | struct hlist_node *safe_tmp_node; |
320 | bool pending; | 320 | bool pending; |
321 | 321 | ||
322 | if (hard_iface) | 322 | if (hard_iface) |
@@ -329,7 +329,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, | |||
329 | 329 | ||
330 | /* free bcast list */ | 330 | /* free bcast list */ |
331 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 331 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
332 | hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, | 332 | hlist_for_each_entry_safe(forw_packet, safe_tmp_node, |
333 | &bat_priv->forw_bcast_list, list) { | 333 | &bat_priv->forw_bcast_list, list) { |
334 | /* if purge_outstanding_packets() was called with an argument | 334 | /* if purge_outstanding_packets() was called with an argument |
335 | * we delete only packets belonging to the given interface | 335 | * we delete only packets belonging to the given interface |
@@ -355,7 +355,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, | |||
355 | 355 | ||
356 | /* free batman packet list */ | 356 | /* free batman packet list */ |
357 | spin_lock_bh(&bat_priv->forw_bat_list_lock); | 357 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
358 | hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, | 358 | hlist_for_each_entry_safe(forw_packet, safe_tmp_node, |
359 | &bat_priv->forw_bat_list, list) { | 359 | &bat_priv->forw_bat_list, list) { |
360 | /* if purge_outstanding_packets() was called with an argument | 360 | /* if purge_outstanding_packets() was called with an argument |
361 | * we delete only packets belonging to the given interface | 361 | * we delete only packets belonging to the given interface |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index d44672f4a349..98a66a021a60 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -56,7 +56,6 @@ static struct batadv_tt_common_entry * | |||
56 | batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data) | 56 | batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data) |
57 | { | 57 | { |
58 | struct hlist_head *head; | 58 | struct hlist_head *head; |
59 | struct hlist_node *node; | ||
60 | struct batadv_tt_common_entry *tt_common_entry; | 59 | struct batadv_tt_common_entry *tt_common_entry; |
61 | struct batadv_tt_common_entry *tt_common_entry_tmp = NULL; | 60 | struct batadv_tt_common_entry *tt_common_entry_tmp = NULL; |
62 | uint32_t index; | 61 | uint32_t index; |
@@ -68,7 +67,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data) | |||
68 | head = &hash->table[index]; | 67 | head = &hash->table[index]; |
69 | 68 | ||
70 | rcu_read_lock(); | 69 | rcu_read_lock(); |
71 | hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { | 70 | hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) { |
72 | if (!batadv_compare_eth(tt_common_entry, data)) | 71 | if (!batadv_compare_eth(tt_common_entry, data)) |
73 | continue; | 72 | continue; |
74 | 73 | ||
@@ -257,7 +256,6 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, | |||
257 | struct batadv_tt_local_entry *tt_local; | 256 | struct batadv_tt_local_entry *tt_local; |
258 | struct batadv_tt_global_entry *tt_global; | 257 | struct batadv_tt_global_entry *tt_global; |
259 | struct hlist_head *head; | 258 | struct hlist_head *head; |
260 | struct hlist_node *node; | ||
261 | struct batadv_tt_orig_list_entry *orig_entry; | 259 | struct batadv_tt_orig_list_entry *orig_entry; |
262 | int hash_added; | 260 | int hash_added; |
263 | bool roamed_back = false; | 261 | bool roamed_back = false; |
@@ -339,7 +337,7 @@ check_roaming: | |||
339 | /* These node are probably going to update their tt table */ | 337 | /* These node are probably going to update their tt table */ |
340 | head = &tt_global->orig_list; | 338 | head = &tt_global->orig_list; |
341 | rcu_read_lock(); | 339 | rcu_read_lock(); |
342 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { | 340 | hlist_for_each_entry_rcu(orig_entry, head, list) { |
343 | batadv_send_roam_adv(bat_priv, tt_global->common.addr, | 341 | batadv_send_roam_adv(bat_priv, tt_global->common.addr, |
344 | orig_entry->orig_node); | 342 | orig_entry->orig_node); |
345 | } | 343 | } |
@@ -470,7 +468,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
470 | struct batadv_tt_common_entry *tt_common_entry; | 468 | struct batadv_tt_common_entry *tt_common_entry; |
471 | struct batadv_tt_local_entry *tt_local; | 469 | struct batadv_tt_local_entry *tt_local; |
472 | struct batadv_hard_iface *primary_if; | 470 | struct batadv_hard_iface *primary_if; |
473 | struct hlist_node *node; | ||
474 | struct hlist_head *head; | 471 | struct hlist_head *head; |
475 | uint32_t i; | 472 | uint32_t i; |
476 | int last_seen_secs; | 473 | int last_seen_secs; |
@@ -494,7 +491,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
494 | head = &hash->table[i]; | 491 | head = &hash->table[i]; |
495 | 492 | ||
496 | rcu_read_lock(); | 493 | rcu_read_lock(); |
497 | hlist_for_each_entry_rcu(tt_common_entry, node, | 494 | hlist_for_each_entry_rcu(tt_common_entry, |
498 | head, hash_entry) { | 495 | head, hash_entry) { |
499 | tt_local = container_of(tt_common_entry, | 496 | tt_local = container_of(tt_common_entry, |
500 | struct batadv_tt_local_entry, | 497 | struct batadv_tt_local_entry, |
@@ -605,9 +602,9 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv, | |||
605 | { | 602 | { |
606 | struct batadv_tt_local_entry *tt_local_entry; | 603 | struct batadv_tt_local_entry *tt_local_entry; |
607 | struct batadv_tt_common_entry *tt_common_entry; | 604 | struct batadv_tt_common_entry *tt_common_entry; |
608 | struct hlist_node *node, *node_tmp; | 605 | struct hlist_node *node_tmp; |
609 | 606 | ||
610 | hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head, | 607 | hlist_for_each_entry_safe(tt_common_entry, node_tmp, head, |
611 | hash_entry) { | 608 | hash_entry) { |
612 | tt_local_entry = container_of(tt_common_entry, | 609 | tt_local_entry = container_of(tt_common_entry, |
613 | struct batadv_tt_local_entry, | 610 | struct batadv_tt_local_entry, |
@@ -651,7 +648,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) | |||
651 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 648 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
652 | struct batadv_tt_common_entry *tt_common_entry; | 649 | struct batadv_tt_common_entry *tt_common_entry; |
653 | struct batadv_tt_local_entry *tt_local; | 650 | struct batadv_tt_local_entry *tt_local; |
654 | struct hlist_node *node, *node_tmp; | 651 | struct hlist_node *node_tmp; |
655 | struct hlist_head *head; | 652 | struct hlist_head *head; |
656 | uint32_t i; | 653 | uint32_t i; |
657 | 654 | ||
@@ -665,9 +662,9 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) | |||
665 | list_lock = &hash->list_locks[i]; | 662 | list_lock = &hash->list_locks[i]; |
666 | 663 | ||
667 | spin_lock_bh(list_lock); | 664 | spin_lock_bh(list_lock); |
668 | hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, | 665 | hlist_for_each_entry_safe(tt_common_entry, node_tmp, |
669 | head, hash_entry) { | 666 | head, hash_entry) { |
670 | hlist_del_rcu(node); | 667 | hlist_del_rcu(&tt_common_entry->hash_entry); |
671 | tt_local = container_of(tt_common_entry, | 668 | tt_local = container_of(tt_common_entry, |
672 | struct batadv_tt_local_entry, | 669 | struct batadv_tt_local_entry, |
673 | common); | 670 | common); |
@@ -724,11 +721,10 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry, | |||
724 | { | 721 | { |
725 | struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL; | 722 | struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL; |
726 | const struct hlist_head *head; | 723 | const struct hlist_head *head; |
727 | struct hlist_node *node; | ||
728 | 724 | ||
729 | rcu_read_lock(); | 725 | rcu_read_lock(); |
730 | head = &entry->orig_list; | 726 | head = &entry->orig_list; |
731 | hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) { | 727 | hlist_for_each_entry_rcu(tmp_orig_entry, head, list) { |
732 | if (tmp_orig_entry->orig_node != orig_node) | 728 | if (tmp_orig_entry->orig_node != orig_node) |
733 | continue; | 729 | continue; |
734 | if (!atomic_inc_not_zero(&tmp_orig_entry->refcount)) | 730 | if (!atomic_inc_not_zero(&tmp_orig_entry->refcount)) |
@@ -940,12 +936,11 @@ batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry) | |||
940 | { | 936 | { |
941 | struct batadv_neigh_node *router = NULL; | 937 | struct batadv_neigh_node *router = NULL; |
942 | struct hlist_head *head; | 938 | struct hlist_head *head; |
943 | struct hlist_node *node; | ||
944 | struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL; | 939 | struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL; |
945 | int best_tq = 0; | 940 | int best_tq = 0; |
946 | 941 | ||
947 | head = &tt_global_entry->orig_list; | 942 | head = &tt_global_entry->orig_list; |
948 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { | 943 | hlist_for_each_entry_rcu(orig_entry, head, list) { |
949 | router = batadv_orig_node_get_router(orig_entry->orig_node); | 944 | router = batadv_orig_node_get_router(orig_entry->orig_node); |
950 | if (!router) | 945 | if (!router) |
951 | continue; | 946 | continue; |
@@ -973,7 +968,6 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry, | |||
973 | struct seq_file *seq) | 968 | struct seq_file *seq) |
974 | { | 969 | { |
975 | struct hlist_head *head; | 970 | struct hlist_head *head; |
976 | struct hlist_node *node; | ||
977 | struct batadv_tt_orig_list_entry *orig_entry, *best_entry; | 971 | struct batadv_tt_orig_list_entry *orig_entry, *best_entry; |
978 | struct batadv_tt_common_entry *tt_common_entry; | 972 | struct batadv_tt_common_entry *tt_common_entry; |
979 | uint16_t flags; | 973 | uint16_t flags; |
@@ -997,7 +991,7 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry, | |||
997 | 991 | ||
998 | head = &tt_global_entry->orig_list; | 992 | head = &tt_global_entry->orig_list; |
999 | 993 | ||
1000 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { | 994 | hlist_for_each_entry_rcu(orig_entry, head, list) { |
1001 | if (best_entry == orig_entry) | 995 | if (best_entry == orig_entry) |
1002 | continue; | 996 | continue; |
1003 | 997 | ||
@@ -1020,7 +1014,6 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) | |||
1020 | struct batadv_tt_common_entry *tt_common_entry; | 1014 | struct batadv_tt_common_entry *tt_common_entry; |
1021 | struct batadv_tt_global_entry *tt_global; | 1015 | struct batadv_tt_global_entry *tt_global; |
1022 | struct batadv_hard_iface *primary_if; | 1016 | struct batadv_hard_iface *primary_if; |
1023 | struct hlist_node *node; | ||
1024 | struct hlist_head *head; | 1017 | struct hlist_head *head; |
1025 | uint32_t i; | 1018 | uint32_t i; |
1026 | 1019 | ||
@@ -1039,7 +1032,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) | |||
1039 | head = &hash->table[i]; | 1032 | head = &hash->table[i]; |
1040 | 1033 | ||
1041 | rcu_read_lock(); | 1034 | rcu_read_lock(); |
1042 | hlist_for_each_entry_rcu(tt_common_entry, node, | 1035 | hlist_for_each_entry_rcu(tt_common_entry, |
1043 | head, hash_entry) { | 1036 | head, hash_entry) { |
1044 | tt_global = container_of(tt_common_entry, | 1037 | tt_global = container_of(tt_common_entry, |
1045 | struct batadv_tt_global_entry, | 1038 | struct batadv_tt_global_entry, |
@@ -1059,13 +1052,13 @@ static void | |||
1059 | batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry) | 1052 | batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry) |
1060 | { | 1053 | { |
1061 | struct hlist_head *head; | 1054 | struct hlist_head *head; |
1062 | struct hlist_node *node, *safe; | 1055 | struct hlist_node *safe; |
1063 | struct batadv_tt_orig_list_entry *orig_entry; | 1056 | struct batadv_tt_orig_list_entry *orig_entry; |
1064 | 1057 | ||
1065 | spin_lock_bh(&tt_global_entry->list_lock); | 1058 | spin_lock_bh(&tt_global_entry->list_lock); |
1066 | head = &tt_global_entry->orig_list; | 1059 | head = &tt_global_entry->orig_list; |
1067 | hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { | 1060 | hlist_for_each_entry_safe(orig_entry, safe, head, list) { |
1068 | hlist_del_rcu(node); | 1061 | hlist_del_rcu(&orig_entry->list); |
1069 | batadv_tt_orig_list_entry_free_ref(orig_entry); | 1062 | batadv_tt_orig_list_entry_free_ref(orig_entry); |
1070 | } | 1063 | } |
1071 | spin_unlock_bh(&tt_global_entry->list_lock); | 1064 | spin_unlock_bh(&tt_global_entry->list_lock); |
@@ -1078,18 +1071,18 @@ batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv, | |||
1078 | const char *message) | 1071 | const char *message) |
1079 | { | 1072 | { |
1080 | struct hlist_head *head; | 1073 | struct hlist_head *head; |
1081 | struct hlist_node *node, *safe; | 1074 | struct hlist_node *safe; |
1082 | struct batadv_tt_orig_list_entry *orig_entry; | 1075 | struct batadv_tt_orig_list_entry *orig_entry; |
1083 | 1076 | ||
1084 | spin_lock_bh(&tt_global_entry->list_lock); | 1077 | spin_lock_bh(&tt_global_entry->list_lock); |
1085 | head = &tt_global_entry->orig_list; | 1078 | head = &tt_global_entry->orig_list; |
1086 | hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { | 1079 | hlist_for_each_entry_safe(orig_entry, safe, head, list) { |
1087 | if (orig_entry->orig_node == orig_node) { | 1080 | if (orig_entry->orig_node == orig_node) { |
1088 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 1081 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
1089 | "Deleting %pM from global tt entry %pM: %s\n", | 1082 | "Deleting %pM from global tt entry %pM: %s\n", |
1090 | orig_node->orig, | 1083 | orig_node->orig, |
1091 | tt_global_entry->common.addr, message); | 1084 | tt_global_entry->common.addr, message); |
1092 | hlist_del_rcu(node); | 1085 | hlist_del_rcu(&orig_entry->list); |
1093 | batadv_tt_orig_list_entry_free_ref(orig_entry); | 1086 | batadv_tt_orig_list_entry_free_ref(orig_entry); |
1094 | } | 1087 | } |
1095 | } | 1088 | } |
@@ -1108,7 +1101,6 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv, | |||
1108 | { | 1101 | { |
1109 | bool last_entry = true; | 1102 | bool last_entry = true; |
1110 | struct hlist_head *head; | 1103 | struct hlist_head *head; |
1111 | struct hlist_node *node; | ||
1112 | struct batadv_tt_orig_list_entry *orig_entry; | 1104 | struct batadv_tt_orig_list_entry *orig_entry; |
1113 | 1105 | ||
1114 | /* no local entry exists, case 1: | 1106 | /* no local entry exists, case 1: |
@@ -1117,7 +1109,7 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv, | |||
1117 | 1109 | ||
1118 | rcu_read_lock(); | 1110 | rcu_read_lock(); |
1119 | head = &tt_global_entry->orig_list; | 1111 | head = &tt_global_entry->orig_list; |
1120 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { | 1112 | hlist_for_each_entry_rcu(orig_entry, head, list) { |
1121 | if (orig_entry->orig_node != orig_node) { | 1113 | if (orig_entry->orig_node != orig_node) { |
1122 | last_entry = false; | 1114 | last_entry = false; |
1123 | break; | 1115 | break; |
@@ -1202,7 +1194,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, | |||
1202 | struct batadv_tt_common_entry *tt_common_entry; | 1194 | struct batadv_tt_common_entry *tt_common_entry; |
1203 | uint32_t i; | 1195 | uint32_t i; |
1204 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; | 1196 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; |
1205 | struct hlist_node *node, *safe; | 1197 | struct hlist_node *safe; |
1206 | struct hlist_head *head; | 1198 | struct hlist_head *head; |
1207 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 1199 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
1208 | 1200 | ||
@@ -1214,7 +1206,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, | |||
1214 | list_lock = &hash->list_locks[i]; | 1206 | list_lock = &hash->list_locks[i]; |
1215 | 1207 | ||
1216 | spin_lock_bh(list_lock); | 1208 | spin_lock_bh(list_lock); |
1217 | hlist_for_each_entry_safe(tt_common_entry, node, safe, | 1209 | hlist_for_each_entry_safe(tt_common_entry, safe, |
1218 | head, hash_entry) { | 1210 | head, hash_entry) { |
1219 | tt_global = container_of(tt_common_entry, | 1211 | tt_global = container_of(tt_common_entry, |
1220 | struct batadv_tt_global_entry, | 1212 | struct batadv_tt_global_entry, |
@@ -1227,7 +1219,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, | |||
1227 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 1219 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
1228 | "Deleting global tt entry %pM: %s\n", | 1220 | "Deleting global tt entry %pM: %s\n", |
1229 | tt_global->common.addr, message); | 1221 | tt_global->common.addr, message); |
1230 | hlist_del_rcu(node); | 1222 | hlist_del_rcu(&tt_common_entry->hash_entry); |
1231 | batadv_tt_global_entry_free_ref(tt_global); | 1223 | batadv_tt_global_entry_free_ref(tt_global); |
1232 | } | 1224 | } |
1233 | } | 1225 | } |
@@ -1262,7 +1254,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv) | |||
1262 | { | 1254 | { |
1263 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; | 1255 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; |
1264 | struct hlist_head *head; | 1256 | struct hlist_head *head; |
1265 | struct hlist_node *node, *node_tmp; | 1257 | struct hlist_node *node_tmp; |
1266 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 1258 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
1267 | uint32_t i; | 1259 | uint32_t i; |
1268 | char *msg = NULL; | 1260 | char *msg = NULL; |
@@ -1274,7 +1266,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv) | |||
1274 | list_lock = &hash->list_locks[i]; | 1266 | list_lock = &hash->list_locks[i]; |
1275 | 1267 | ||
1276 | spin_lock_bh(list_lock); | 1268 | spin_lock_bh(list_lock); |
1277 | hlist_for_each_entry_safe(tt_common, node, node_tmp, head, | 1269 | hlist_for_each_entry_safe(tt_common, node_tmp, head, |
1278 | hash_entry) { | 1270 | hash_entry) { |
1279 | tt_global = container_of(tt_common, | 1271 | tt_global = container_of(tt_common, |
1280 | struct batadv_tt_global_entry, | 1272 | struct batadv_tt_global_entry, |
@@ -1287,7 +1279,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv) | |||
1287 | "Deleting global tt entry (%pM): %s\n", | 1279 | "Deleting global tt entry (%pM): %s\n", |
1288 | tt_global->common.addr, msg); | 1280 | tt_global->common.addr, msg); |
1289 | 1281 | ||
1290 | hlist_del_rcu(node); | 1282 | hlist_del_rcu(&tt_common->hash_entry); |
1291 | 1283 | ||
1292 | batadv_tt_global_entry_free_ref(tt_global); | 1284 | batadv_tt_global_entry_free_ref(tt_global); |
1293 | } | 1285 | } |
@@ -1301,7 +1293,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) | |||
1301 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 1293 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
1302 | struct batadv_tt_common_entry *tt_common_entry; | 1294 | struct batadv_tt_common_entry *tt_common_entry; |
1303 | struct batadv_tt_global_entry *tt_global; | 1295 | struct batadv_tt_global_entry *tt_global; |
1304 | struct hlist_node *node, *node_tmp; | 1296 | struct hlist_node *node_tmp; |
1305 | struct hlist_head *head; | 1297 | struct hlist_head *head; |
1306 | uint32_t i; | 1298 | uint32_t i; |
1307 | 1299 | ||
@@ -1315,9 +1307,9 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) | |||
1315 | list_lock = &hash->list_locks[i]; | 1307 | list_lock = &hash->list_locks[i]; |
1316 | 1308 | ||
1317 | spin_lock_bh(list_lock); | 1309 | spin_lock_bh(list_lock); |
1318 | hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, | 1310 | hlist_for_each_entry_safe(tt_common_entry, node_tmp, |
1319 | head, hash_entry) { | 1311 | head, hash_entry) { |
1320 | hlist_del_rcu(node); | 1312 | hlist_del_rcu(&tt_common_entry->hash_entry); |
1321 | tt_global = container_of(tt_common_entry, | 1313 | tt_global = container_of(tt_common_entry, |
1322 | struct batadv_tt_global_entry, | 1314 | struct batadv_tt_global_entry, |
1323 | common); | 1315 | common); |
@@ -1397,7 +1389,6 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
1397 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; | 1389 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; |
1398 | struct batadv_tt_common_entry *tt_common; | 1390 | struct batadv_tt_common_entry *tt_common; |
1399 | struct batadv_tt_global_entry *tt_global; | 1391 | struct batadv_tt_global_entry *tt_global; |
1400 | struct hlist_node *node; | ||
1401 | struct hlist_head *head; | 1392 | struct hlist_head *head; |
1402 | uint32_t i; | 1393 | uint32_t i; |
1403 | int j; | 1394 | int j; |
@@ -1406,7 +1397,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
1406 | head = &hash->table[i]; | 1397 | head = &hash->table[i]; |
1407 | 1398 | ||
1408 | rcu_read_lock(); | 1399 | rcu_read_lock(); |
1409 | hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) { | 1400 | hlist_for_each_entry_rcu(tt_common, head, hash_entry) { |
1410 | tt_global = container_of(tt_common, | 1401 | tt_global = container_of(tt_common, |
1411 | struct batadv_tt_global_entry, | 1402 | struct batadv_tt_global_entry, |
1412 | common); | 1403 | common); |
@@ -1449,7 +1440,6 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv) | |||
1449 | uint16_t total = 0, total_one; | 1440 | uint16_t total = 0, total_one; |
1450 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; | 1441 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; |
1451 | struct batadv_tt_common_entry *tt_common; | 1442 | struct batadv_tt_common_entry *tt_common; |
1452 | struct hlist_node *node; | ||
1453 | struct hlist_head *head; | 1443 | struct hlist_head *head; |
1454 | uint32_t i; | 1444 | uint32_t i; |
1455 | int j; | 1445 | int j; |
@@ -1458,7 +1448,7 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv) | |||
1458 | head = &hash->table[i]; | 1448 | head = &hash->table[i]; |
1459 | 1449 | ||
1460 | rcu_read_lock(); | 1450 | rcu_read_lock(); |
1461 | hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) { | 1451 | hlist_for_each_entry_rcu(tt_common, head, hash_entry) { |
1462 | /* not yet committed clients have not to be taken into | 1452 | /* not yet committed clients have not to be taken into |
1463 | * account while computing the CRC | 1453 | * account while computing the CRC |
1464 | */ | 1454 | */ |
@@ -1597,7 +1587,6 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, | |||
1597 | struct batadv_tt_common_entry *tt_common_entry; | 1587 | struct batadv_tt_common_entry *tt_common_entry; |
1598 | struct batadv_tt_query_packet *tt_response; | 1588 | struct batadv_tt_query_packet *tt_response; |
1599 | struct batadv_tt_change *tt_change; | 1589 | struct batadv_tt_change *tt_change; |
1600 | struct hlist_node *node; | ||
1601 | struct hlist_head *head; | 1590 | struct hlist_head *head; |
1602 | struct sk_buff *skb = NULL; | 1591 | struct sk_buff *skb = NULL; |
1603 | uint16_t tt_tot, tt_count; | 1592 | uint16_t tt_tot, tt_count; |
@@ -1627,7 +1616,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, | |||
1627 | for (i = 0; i < hash->size; i++) { | 1616 | for (i = 0; i < hash->size; i++) { |
1628 | head = &hash->table[i]; | 1617 | head = &hash->table[i]; |
1629 | 1618 | ||
1630 | hlist_for_each_entry_rcu(tt_common_entry, node, | 1619 | hlist_for_each_entry_rcu(tt_common_entry, |
1631 | head, hash_entry) { | 1620 | head, hash_entry) { |
1632 | if (tt_count == tt_tot) | 1621 | if (tt_count == tt_tot) |
1633 | break; | 1622 | break; |
@@ -2307,7 +2296,6 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash, | |||
2307 | uint32_t i; | 2296 | uint32_t i; |
2308 | uint16_t changed_num = 0; | 2297 | uint16_t changed_num = 0; |
2309 | struct hlist_head *head; | 2298 | struct hlist_head *head; |
2310 | struct hlist_node *node; | ||
2311 | struct batadv_tt_common_entry *tt_common_entry; | 2299 | struct batadv_tt_common_entry *tt_common_entry; |
2312 | 2300 | ||
2313 | if (!hash) | 2301 | if (!hash) |
@@ -2317,7 +2305,7 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash, | |||
2317 | head = &hash->table[i]; | 2305 | head = &hash->table[i]; |
2318 | 2306 | ||
2319 | rcu_read_lock(); | 2307 | rcu_read_lock(); |
2320 | hlist_for_each_entry_rcu(tt_common_entry, node, | 2308 | hlist_for_each_entry_rcu(tt_common_entry, |
2321 | head, hash_entry) { | 2309 | head, hash_entry) { |
2322 | if (enable) { | 2310 | if (enable) { |
2323 | if ((tt_common_entry->flags & flags) == flags) | 2311 | if ((tt_common_entry->flags & flags) == flags) |
@@ -2342,7 +2330,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) | |||
2342 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; | 2330 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; |
2343 | struct batadv_tt_common_entry *tt_common; | 2331 | struct batadv_tt_common_entry *tt_common; |
2344 | struct batadv_tt_local_entry *tt_local; | 2332 | struct batadv_tt_local_entry *tt_local; |
2345 | struct hlist_node *node, *node_tmp; | 2333 | struct hlist_node *node_tmp; |
2346 | struct hlist_head *head; | 2334 | struct hlist_head *head; |
2347 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 2335 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
2348 | uint32_t i; | 2336 | uint32_t i; |
@@ -2355,7 +2343,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) | |||
2355 | list_lock = &hash->list_locks[i]; | 2343 | list_lock = &hash->list_locks[i]; |
2356 | 2344 | ||
2357 | spin_lock_bh(list_lock); | 2345 | spin_lock_bh(list_lock); |
2358 | hlist_for_each_entry_safe(tt_common, node, node_tmp, head, | 2346 | hlist_for_each_entry_safe(tt_common, node_tmp, head, |
2359 | hash_entry) { | 2347 | hash_entry) { |
2360 | if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING)) | 2348 | if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING)) |
2361 | continue; | 2349 | continue; |
@@ -2365,7 +2353,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) | |||
2365 | tt_common->addr); | 2353 | tt_common->addr); |
2366 | 2354 | ||
2367 | atomic_dec(&bat_priv->tt.local_entry_num); | 2355 | atomic_dec(&bat_priv->tt.local_entry_num); |
2368 | hlist_del_rcu(node); | 2356 | hlist_del_rcu(&tt_common->hash_entry); |
2369 | tt_local = container_of(tt_common, | 2357 | tt_local = container_of(tt_common, |
2370 | struct batadv_tt_local_entry, | 2358 | struct batadv_tt_local_entry, |
2371 | common); | 2359 | common); |
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index 22d2785177d1..c053244b97bd 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c | |||
@@ -97,7 +97,6 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data) | |||
97 | { | 97 | { |
98 | struct batadv_hashtable *hash = bat_priv->vis.hash; | 98 | struct batadv_hashtable *hash = bat_priv->vis.hash; |
99 | struct hlist_head *head; | 99 | struct hlist_head *head; |
100 | struct hlist_node *node; | ||
101 | struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; | 100 | struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; |
102 | uint32_t index; | 101 | uint32_t index; |
103 | 102 | ||
@@ -108,8 +107,8 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data) | |||
108 | head = &hash->table[index]; | 107 | head = &hash->table[index]; |
109 | 108 | ||
110 | rcu_read_lock(); | 109 | rcu_read_lock(); |
111 | hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) { | 110 | hlist_for_each_entry_rcu(vis_info, head, hash_entry) { |
112 | if (!batadv_vis_info_cmp(node, data)) | 111 | if (!batadv_vis_info_cmp(&vis_info->hash_entry, data)) |
113 | continue; | 112 | continue; |
114 | 113 | ||
115 | vis_info_tmp = vis_info; | 114 | vis_info_tmp = vis_info; |
@@ -128,9 +127,8 @@ static void batadv_vis_data_insert_interface(const uint8_t *interface, | |||
128 | bool primary) | 127 | bool primary) |
129 | { | 128 | { |
130 | struct batadv_vis_if_list_entry *entry; | 129 | struct batadv_vis_if_list_entry *entry; |
131 | struct hlist_node *pos; | ||
132 | 130 | ||
133 | hlist_for_each_entry(entry, pos, if_list, list) { | 131 | hlist_for_each_entry(entry, if_list, list) { |
134 | if (batadv_compare_eth(entry->addr, interface)) | 132 | if (batadv_compare_eth(entry->addr, interface)) |
135 | return; | 133 | return; |
136 | } | 134 | } |
@@ -148,9 +146,8 @@ static void batadv_vis_data_read_prim_sec(struct seq_file *seq, | |||
148 | const struct hlist_head *if_list) | 146 | const struct hlist_head *if_list) |
149 | { | 147 | { |
150 | struct batadv_vis_if_list_entry *entry; | 148 | struct batadv_vis_if_list_entry *entry; |
151 | struct hlist_node *pos; | ||
152 | 149 | ||
153 | hlist_for_each_entry(entry, pos, if_list, list) { | 150 | hlist_for_each_entry(entry, if_list, list) { |
154 | if (entry->primary) | 151 | if (entry->primary) |
155 | seq_printf(seq, "PRIMARY, "); | 152 | seq_printf(seq, "PRIMARY, "); |
156 | else | 153 | else |
@@ -198,9 +195,8 @@ static void batadv_vis_data_read_entries(struct seq_file *seq, | |||
198 | { | 195 | { |
199 | int i; | 196 | int i; |
200 | struct batadv_vis_if_list_entry *entry; | 197 | struct batadv_vis_if_list_entry *entry; |
201 | struct hlist_node *pos; | ||
202 | 198 | ||
203 | hlist_for_each_entry(entry, pos, list, list) { | 199 | hlist_for_each_entry(entry, list, list) { |
204 | seq_printf(seq, "%pM,", entry->addr); | 200 | seq_printf(seq, "%pM,", entry->addr); |
205 | 201 | ||
206 | for (i = 0; i < packet->entries; i++) | 202 | for (i = 0; i < packet->entries; i++) |
@@ -218,17 +214,16 @@ static void batadv_vis_data_read_entries(struct seq_file *seq, | |||
218 | static void batadv_vis_seq_print_text_bucket(struct seq_file *seq, | 214 | static void batadv_vis_seq_print_text_bucket(struct seq_file *seq, |
219 | const struct hlist_head *head) | 215 | const struct hlist_head *head) |
220 | { | 216 | { |
221 | struct hlist_node *node; | ||
222 | struct batadv_vis_info *info; | 217 | struct batadv_vis_info *info; |
223 | struct batadv_vis_packet *packet; | 218 | struct batadv_vis_packet *packet; |
224 | uint8_t *entries_pos; | 219 | uint8_t *entries_pos; |
225 | struct batadv_vis_info_entry *entries; | 220 | struct batadv_vis_info_entry *entries; |
226 | struct batadv_vis_if_list_entry *entry; | 221 | struct batadv_vis_if_list_entry *entry; |
227 | struct hlist_node *pos, *n; | 222 | struct hlist_node *n; |
228 | 223 | ||
229 | HLIST_HEAD(vis_if_list); | 224 | HLIST_HEAD(vis_if_list); |
230 | 225 | ||
231 | hlist_for_each_entry_rcu(info, node, head, hash_entry) { | 226 | hlist_for_each_entry_rcu(info, head, hash_entry) { |
232 | packet = (struct batadv_vis_packet *)info->skb_packet->data; | 227 | packet = (struct batadv_vis_packet *)info->skb_packet->data; |
233 | entries_pos = (uint8_t *)packet + sizeof(*packet); | 228 | entries_pos = (uint8_t *)packet + sizeof(*packet); |
234 | entries = (struct batadv_vis_info_entry *)entries_pos; | 229 | entries = (struct batadv_vis_info_entry *)entries_pos; |
@@ -240,7 +235,7 @@ static void batadv_vis_seq_print_text_bucket(struct seq_file *seq, | |||
240 | batadv_vis_data_read_entries(seq, &vis_if_list, packet, | 235 | batadv_vis_data_read_entries(seq, &vis_if_list, packet, |
241 | entries); | 236 | entries); |
242 | 237 | ||
243 | hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) { | 238 | hlist_for_each_entry_safe(entry, n, &vis_if_list, list) { |
244 | hlist_del(&entry->list); | 239 | hlist_del(&entry->list); |
245 | kfree(entry); | 240 | kfree(entry); |
246 | } | 241 | } |
@@ -519,7 +514,6 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv, | |||
519 | { | 514 | { |
520 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 515 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
521 | struct batadv_neigh_node *router; | 516 | struct batadv_neigh_node *router; |
522 | struct hlist_node *node; | ||
523 | struct hlist_head *head; | 517 | struct hlist_head *head; |
524 | struct batadv_orig_node *orig_node; | 518 | struct batadv_orig_node *orig_node; |
525 | struct batadv_vis_packet *packet; | 519 | struct batadv_vis_packet *packet; |
@@ -532,7 +526,7 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv, | |||
532 | head = &hash->table[i]; | 526 | head = &hash->table[i]; |
533 | 527 | ||
534 | rcu_read_lock(); | 528 | rcu_read_lock(); |
535 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 529 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
536 | router = batadv_orig_node_get_router(orig_node); | 530 | router = batadv_orig_node_get_router(orig_node); |
537 | if (!router) | 531 | if (!router) |
538 | continue; | 532 | continue; |
@@ -571,7 +565,6 @@ static bool batadv_vis_packet_full(const struct batadv_vis_info *info) | |||
571 | static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) | 565 | static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) |
572 | { | 566 | { |
573 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 567 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
574 | struct hlist_node *node; | ||
575 | struct hlist_head *head; | 568 | struct hlist_head *head; |
576 | struct batadv_orig_node *orig_node; | 569 | struct batadv_orig_node *orig_node; |
577 | struct batadv_neigh_node *router; | 570 | struct batadv_neigh_node *router; |
@@ -605,7 +598,7 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) | |||
605 | head = &hash->table[i]; | 598 | head = &hash->table[i]; |
606 | 599 | ||
607 | rcu_read_lock(); | 600 | rcu_read_lock(); |
608 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 601 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
609 | router = batadv_orig_node_get_router(orig_node); | 602 | router = batadv_orig_node_get_router(orig_node); |
610 | if (!router) | 603 | if (!router) |
611 | continue; | 604 | continue; |
@@ -644,7 +637,7 @@ next: | |||
644 | head = &hash->table[i]; | 637 | head = &hash->table[i]; |
645 | 638 | ||
646 | rcu_read_lock(); | 639 | rcu_read_lock(); |
647 | hlist_for_each_entry_rcu(tt_common_entry, node, head, | 640 | hlist_for_each_entry_rcu(tt_common_entry, head, |
648 | hash_entry) { | 641 | hash_entry) { |
649 | packet_pos = skb_put(info->skb_packet, sizeof(*entry)); | 642 | packet_pos = skb_put(info->skb_packet, sizeof(*entry)); |
650 | entry = (struct batadv_vis_info_entry *)packet_pos; | 643 | entry = (struct batadv_vis_info_entry *)packet_pos; |
@@ -673,14 +666,14 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) | |||
673 | { | 666 | { |
674 | uint32_t i; | 667 | uint32_t i; |
675 | struct batadv_hashtable *hash = bat_priv->vis.hash; | 668 | struct batadv_hashtable *hash = bat_priv->vis.hash; |
676 | struct hlist_node *node, *node_tmp; | 669 | struct hlist_node *node_tmp; |
677 | struct hlist_head *head; | 670 | struct hlist_head *head; |
678 | struct batadv_vis_info *info; | 671 | struct batadv_vis_info *info; |
679 | 672 | ||
680 | for (i = 0; i < hash->size; i++) { | 673 | for (i = 0; i < hash->size; i++) { |
681 | head = &hash->table[i]; | 674 | head = &hash->table[i]; |
682 | 675 | ||
683 | hlist_for_each_entry_safe(info, node, node_tmp, | 676 | hlist_for_each_entry_safe(info, node_tmp, |
684 | head, hash_entry) { | 677 | head, hash_entry) { |
685 | /* never purge own data. */ | 678 | /* never purge own data. */ |
686 | if (info == bat_priv->vis.my_info) | 679 | if (info == bat_priv->vis.my_info) |
@@ -688,7 +681,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) | |||
688 | 681 | ||
689 | if (batadv_has_timed_out(info->first_seen, | 682 | if (batadv_has_timed_out(info->first_seen, |
690 | BATADV_VIS_TIMEOUT)) { | 683 | BATADV_VIS_TIMEOUT)) { |
691 | hlist_del(node); | 684 | hlist_del(&info->hash_entry); |
692 | batadv_send_list_del(info); | 685 | batadv_send_list_del(info); |
693 | kref_put(&info->refcount, batadv_free_info); | 686 | kref_put(&info->refcount, batadv_free_info); |
694 | } | 687 | } |
@@ -700,7 +693,6 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv, | |||
700 | struct batadv_vis_info *info) | 693 | struct batadv_vis_info *info) |
701 | { | 694 | { |
702 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 695 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
703 | struct hlist_node *node; | ||
704 | struct hlist_head *head; | 696 | struct hlist_head *head; |
705 | struct batadv_orig_node *orig_node; | 697 | struct batadv_orig_node *orig_node; |
706 | struct batadv_vis_packet *packet; | 698 | struct batadv_vis_packet *packet; |
@@ -715,7 +707,7 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv, | |||
715 | head = &hash->table[i]; | 707 | head = &hash->table[i]; |
716 | 708 | ||
717 | rcu_read_lock(); | 709 | rcu_read_lock(); |
718 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 710 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
719 | /* if it's a vis server and reachable, send it. */ | 711 | /* if it's a vis server and reachable, send it. */ |
720 | if (!(orig_node->flags & BATADV_VIS_SERVER)) | 712 | if (!(orig_node->flags & BATADV_VIS_SERVER)) |
721 | continue; | 713 | continue; |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 07f073935811..6a93614f2c49 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -70,14 +70,13 @@ static struct bt_sock_list hci_sk_list = { | |||
70 | void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) | 70 | void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) |
71 | { | 71 | { |
72 | struct sock *sk; | 72 | struct sock *sk; |
73 | struct hlist_node *node; | ||
74 | struct sk_buff *skb_copy = NULL; | 73 | struct sk_buff *skb_copy = NULL; |
75 | 74 | ||
76 | BT_DBG("hdev %p len %d", hdev, skb->len); | 75 | BT_DBG("hdev %p len %d", hdev, skb->len); |
77 | 76 | ||
78 | read_lock(&hci_sk_list.lock); | 77 | read_lock(&hci_sk_list.lock); |
79 | 78 | ||
80 | sk_for_each(sk, node, &hci_sk_list.head) { | 79 | sk_for_each(sk, &hci_sk_list.head) { |
81 | struct hci_filter *flt; | 80 | struct hci_filter *flt; |
82 | struct sk_buff *nskb; | 81 | struct sk_buff *nskb; |
83 | 82 | ||
@@ -142,13 +141,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) | |||
142 | void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) | 141 | void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) |
143 | { | 142 | { |
144 | struct sock *sk; | 143 | struct sock *sk; |
145 | struct hlist_node *node; | ||
146 | 144 | ||
147 | BT_DBG("len %d", skb->len); | 145 | BT_DBG("len %d", skb->len); |
148 | 146 | ||
149 | read_lock(&hci_sk_list.lock); | 147 | read_lock(&hci_sk_list.lock); |
150 | 148 | ||
151 | sk_for_each(sk, node, &hci_sk_list.head) { | 149 | sk_for_each(sk, &hci_sk_list.head) { |
152 | struct sk_buff *nskb; | 150 | struct sk_buff *nskb; |
153 | 151 | ||
154 | /* Skip the original socket */ | 152 | /* Skip the original socket */ |
@@ -176,7 +174,6 @@ void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) | |||
176 | void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) | 174 | void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) |
177 | { | 175 | { |
178 | struct sock *sk; | 176 | struct sock *sk; |
179 | struct hlist_node *node; | ||
180 | struct sk_buff *skb_copy = NULL; | 177 | struct sk_buff *skb_copy = NULL; |
181 | __le16 opcode; | 178 | __le16 opcode; |
182 | 179 | ||
@@ -210,7 +207,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) | |||
210 | 207 | ||
211 | read_lock(&hci_sk_list.lock); | 208 | read_lock(&hci_sk_list.lock); |
212 | 209 | ||
213 | sk_for_each(sk, node, &hci_sk_list.head) { | 210 | sk_for_each(sk, &hci_sk_list.head) { |
214 | struct sk_buff *nskb; | 211 | struct sk_buff *nskb; |
215 | 212 | ||
216 | if (sk->sk_state != BT_BOUND) | 213 | if (sk->sk_state != BT_BOUND) |
@@ -251,13 +248,12 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) | |||
251 | static void send_monitor_event(struct sk_buff *skb) | 248 | static void send_monitor_event(struct sk_buff *skb) |
252 | { | 249 | { |
253 | struct sock *sk; | 250 | struct sock *sk; |
254 | struct hlist_node *node; | ||
255 | 251 | ||
256 | BT_DBG("len %d", skb->len); | 252 | BT_DBG("len %d", skb->len); |
257 | 253 | ||
258 | read_lock(&hci_sk_list.lock); | 254 | read_lock(&hci_sk_list.lock); |
259 | 255 | ||
260 | sk_for_each(sk, node, &hci_sk_list.head) { | 256 | sk_for_each(sk, &hci_sk_list.head) { |
261 | struct sk_buff *nskb; | 257 | struct sk_buff *nskb; |
262 | 258 | ||
263 | if (sk->sk_state != BT_BOUND) | 259 | if (sk->sk_state != BT_BOUND) |
@@ -393,11 +389,10 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) | |||
393 | 389 | ||
394 | if (event == HCI_DEV_UNREG) { | 390 | if (event == HCI_DEV_UNREG) { |
395 | struct sock *sk; | 391 | struct sock *sk; |
396 | struct hlist_node *node; | ||
397 | 392 | ||
398 | /* Detach sockets from device */ | 393 | /* Detach sockets from device */ |
399 | read_lock(&hci_sk_list.lock); | 394 | read_lock(&hci_sk_list.lock); |
400 | sk_for_each(sk, node, &hci_sk_list.head) { | 395 | sk_for_each(sk, &hci_sk_list.head) { |
401 | bh_lock_sock_nested(sk); | 396 | bh_lock_sock_nested(sk); |
402 | if (hci_pi(sk)->hdev == hdev) { | 397 | if (hci_pi(sk)->hdev == hdev) { |
403 | hci_pi(sk)->hdev = NULL; | 398 | hci_pi(sk)->hdev = NULL; |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index ce3f6658f4b2..c23bae86263b 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -107,15 +107,14 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) | |||
107 | static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) | 107 | static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) |
108 | { | 108 | { |
109 | struct sock *sk = NULL; | 109 | struct sock *sk = NULL; |
110 | struct hlist_node *node; | ||
111 | 110 | ||
112 | sk_for_each(sk, node, &rfcomm_sk_list.head) { | 111 | sk_for_each(sk, &rfcomm_sk_list.head) { |
113 | if (rfcomm_pi(sk)->channel == channel && | 112 | if (rfcomm_pi(sk)->channel == channel && |
114 | !bacmp(&bt_sk(sk)->src, src)) | 113 | !bacmp(&bt_sk(sk)->src, src)) |
115 | break; | 114 | break; |
116 | } | 115 | } |
117 | 116 | ||
118 | return node ? sk : NULL; | 117 | return sk ? sk : NULL; |
119 | } | 118 | } |
120 | 119 | ||
121 | /* Find socket with channel and source bdaddr. | 120 | /* Find socket with channel and source bdaddr. |
@@ -124,11 +123,10 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) | |||
124 | static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) | 123 | static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) |
125 | { | 124 | { |
126 | struct sock *sk = NULL, *sk1 = NULL; | 125 | struct sock *sk = NULL, *sk1 = NULL; |
127 | struct hlist_node *node; | ||
128 | 126 | ||
129 | read_lock(&rfcomm_sk_list.lock); | 127 | read_lock(&rfcomm_sk_list.lock); |
130 | 128 | ||
131 | sk_for_each(sk, node, &rfcomm_sk_list.head) { | 129 | sk_for_each(sk, &rfcomm_sk_list.head) { |
132 | if (state && sk->sk_state != state) | 130 | if (state && sk->sk_state != state) |
133 | continue; | 131 | continue; |
134 | 132 | ||
@@ -145,7 +143,7 @@ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t * | |||
145 | 143 | ||
146 | read_unlock(&rfcomm_sk_list.lock); | 144 | read_unlock(&rfcomm_sk_list.lock); |
147 | 145 | ||
148 | return node ? sk : sk1; | 146 | return sk ? sk : sk1; |
149 | } | 147 | } |
150 | 148 | ||
151 | static void rfcomm_sock_destruct(struct sock *sk) | 149 | static void rfcomm_sock_destruct(struct sock *sk) |
@@ -970,11 +968,10 @@ done: | |||
970 | static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) | 968 | static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) |
971 | { | 969 | { |
972 | struct sock *sk; | 970 | struct sock *sk; |
973 | struct hlist_node *node; | ||
974 | 971 | ||
975 | read_lock(&rfcomm_sk_list.lock); | 972 | read_lock(&rfcomm_sk_list.lock); |
976 | 973 | ||
977 | sk_for_each(sk, node, &rfcomm_sk_list.head) { | 974 | sk_for_each(sk, &rfcomm_sk_list.head) { |
978 | seq_printf(f, "%pMR %pMR %d %d\n", | 975 | seq_printf(f, "%pMR %pMR %d %d\n", |
979 | &bt_sk(sk)->src, &bt_sk(sk)->dst, | 976 | &bt_sk(sk)->src, &bt_sk(sk)->dst, |
980 | sk->sk_state, rfcomm_pi(sk)->channel); | 977 | sk->sk_state, rfcomm_pi(sk)->channel); |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index b5178d62064e..79d87d8d4f51 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -259,10 +259,9 @@ drop: | |||
259 | /* -------- Socket interface ---------- */ | 259 | /* -------- Socket interface ---------- */ |
260 | static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) | 260 | static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) |
261 | { | 261 | { |
262 | struct hlist_node *node; | ||
263 | struct sock *sk; | 262 | struct sock *sk; |
264 | 263 | ||
265 | sk_for_each(sk, node, &sco_sk_list.head) { | 264 | sk_for_each(sk, &sco_sk_list.head) { |
266 | if (sk->sk_state != BT_LISTEN) | 265 | if (sk->sk_state != BT_LISTEN) |
267 | continue; | 266 | continue; |
268 | 267 | ||
@@ -279,11 +278,10 @@ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) | |||
279 | static struct sock *sco_get_sock_listen(bdaddr_t *src) | 278 | static struct sock *sco_get_sock_listen(bdaddr_t *src) |
280 | { | 279 | { |
281 | struct sock *sk = NULL, *sk1 = NULL; | 280 | struct sock *sk = NULL, *sk1 = NULL; |
282 | struct hlist_node *node; | ||
283 | 281 | ||
284 | read_lock(&sco_sk_list.lock); | 282 | read_lock(&sco_sk_list.lock); |
285 | 283 | ||
286 | sk_for_each(sk, node, &sco_sk_list.head) { | 284 | sk_for_each(sk, &sco_sk_list.head) { |
287 | if (sk->sk_state != BT_LISTEN) | 285 | if (sk->sk_state != BT_LISTEN) |
288 | continue; | 286 | continue; |
289 | 287 | ||
@@ -298,7 +296,7 @@ static struct sock *sco_get_sock_listen(bdaddr_t *src) | |||
298 | 296 | ||
299 | read_unlock(&sco_sk_list.lock); | 297 | read_unlock(&sco_sk_list.lock); |
300 | 298 | ||
301 | return node ? sk : sk1; | 299 | return sk ? sk : sk1; |
302 | } | 300 | } |
303 | 301 | ||
304 | static void sco_sock_destruct(struct sock *sk) | 302 | static void sco_sock_destruct(struct sock *sk) |
@@ -951,14 +949,13 @@ static void sco_conn_ready(struct sco_conn *conn) | |||
951 | int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) | 949 | int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) |
952 | { | 950 | { |
953 | struct sock *sk; | 951 | struct sock *sk; |
954 | struct hlist_node *node; | ||
955 | int lm = 0; | 952 | int lm = 0; |
956 | 953 | ||
957 | BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); | 954 | BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); |
958 | 955 | ||
959 | /* Find listening sockets */ | 956 | /* Find listening sockets */ |
960 | read_lock(&sco_sk_list.lock); | 957 | read_lock(&sco_sk_list.lock); |
961 | sk_for_each(sk, node, &sco_sk_list.head) { | 958 | sk_for_each(sk, &sco_sk_list.head) { |
962 | if (sk->sk_state != BT_LISTEN) | 959 | if (sk->sk_state != BT_LISTEN) |
963 | continue; | 960 | continue; |
964 | 961 | ||
@@ -1018,11 +1015,10 @@ drop: | |||
1018 | static int sco_debugfs_show(struct seq_file *f, void *p) | 1015 | static int sco_debugfs_show(struct seq_file *f, void *p) |
1019 | { | 1016 | { |
1020 | struct sock *sk; | 1017 | struct sock *sk; |
1021 | struct hlist_node *node; | ||
1022 | 1018 | ||
1023 | read_lock(&sco_sk_list.lock); | 1019 | read_lock(&sco_sk_list.lock); |
1024 | 1020 | ||
1025 | sk_for_each(sk, node, &sco_sk_list.head) { | 1021 | sk_for_each(sk, &sco_sk_list.head) { |
1026 | seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src, | 1022 | seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src, |
1027 | &bt_sk(sk)->dst, sk->sk_state); | 1023 | &bt_sk(sk)->dst, sk->sk_state); |
1028 | } | 1024 | } |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 8117900af4de..b0812c91c0f0 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -181,9 +181,9 @@ void br_fdb_cleanup(unsigned long _data) | |||
181 | spin_lock(&br->hash_lock); | 181 | spin_lock(&br->hash_lock); |
182 | for (i = 0; i < BR_HASH_SIZE; i++) { | 182 | for (i = 0; i < BR_HASH_SIZE; i++) { |
183 | struct net_bridge_fdb_entry *f; | 183 | struct net_bridge_fdb_entry *f; |
184 | struct hlist_node *h, *n; | 184 | struct hlist_node *n; |
185 | 185 | ||
186 | hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { | 186 | hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) { |
187 | unsigned long this_timer; | 187 | unsigned long this_timer; |
188 | if (f->is_static) | 188 | if (f->is_static) |
189 | continue; | 189 | continue; |
@@ -207,8 +207,8 @@ void br_fdb_flush(struct net_bridge *br) | |||
207 | spin_lock_bh(&br->hash_lock); | 207 | spin_lock_bh(&br->hash_lock); |
208 | for (i = 0; i < BR_HASH_SIZE; i++) { | 208 | for (i = 0; i < BR_HASH_SIZE; i++) { |
209 | struct net_bridge_fdb_entry *f; | 209 | struct net_bridge_fdb_entry *f; |
210 | struct hlist_node *h, *n; | 210 | struct hlist_node *n; |
211 | hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { | 211 | hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) { |
212 | if (!f->is_static) | 212 | if (!f->is_static) |
213 | fdb_delete(br, f); | 213 | fdb_delete(br, f); |
214 | } | 214 | } |
@@ -266,10 +266,9 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, | |||
266 | const unsigned char *addr, | 266 | const unsigned char *addr, |
267 | __u16 vid) | 267 | __u16 vid) |
268 | { | 268 | { |
269 | struct hlist_node *h; | ||
270 | struct net_bridge_fdb_entry *fdb; | 269 | struct net_bridge_fdb_entry *fdb; |
271 | 270 | ||
272 | hlist_for_each_entry_rcu(fdb, h, | 271 | hlist_for_each_entry_rcu(fdb, |
273 | &br->hash[br_mac_hash(addr, vid)], hlist) { | 272 | &br->hash[br_mac_hash(addr, vid)], hlist) { |
274 | if (ether_addr_equal(fdb->addr.addr, addr) && | 273 | if (ether_addr_equal(fdb->addr.addr, addr) && |
275 | fdb->vlan_id == vid) { | 274 | fdb->vlan_id == vid) { |
@@ -315,14 +314,13 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, | |||
315 | { | 314 | { |
316 | struct __fdb_entry *fe = buf; | 315 | struct __fdb_entry *fe = buf; |
317 | int i, num = 0; | 316 | int i, num = 0; |
318 | struct hlist_node *h; | ||
319 | struct net_bridge_fdb_entry *f; | 317 | struct net_bridge_fdb_entry *f; |
320 | 318 | ||
321 | memset(buf, 0, maxnum*sizeof(struct __fdb_entry)); | 319 | memset(buf, 0, maxnum*sizeof(struct __fdb_entry)); |
322 | 320 | ||
323 | rcu_read_lock(); | 321 | rcu_read_lock(); |
324 | for (i = 0; i < BR_HASH_SIZE; i++) { | 322 | for (i = 0; i < BR_HASH_SIZE; i++) { |
325 | hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { | 323 | hlist_for_each_entry_rcu(f, &br->hash[i], hlist) { |
326 | if (num >= maxnum) | 324 | if (num >= maxnum) |
327 | goto out; | 325 | goto out; |
328 | 326 | ||
@@ -363,10 +361,9 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, | |||
363 | const unsigned char *addr, | 361 | const unsigned char *addr, |
364 | __u16 vid) | 362 | __u16 vid) |
365 | { | 363 | { |
366 | struct hlist_node *h; | ||
367 | struct net_bridge_fdb_entry *fdb; | 364 | struct net_bridge_fdb_entry *fdb; |
368 | 365 | ||
369 | hlist_for_each_entry(fdb, h, head, hlist) { | 366 | hlist_for_each_entry(fdb, head, hlist) { |
370 | if (ether_addr_equal(fdb->addr.addr, addr) && | 367 | if (ether_addr_equal(fdb->addr.addr, addr) && |
371 | fdb->vlan_id == vid) | 368 | fdb->vlan_id == vid) |
372 | return fdb; | 369 | return fdb; |
@@ -378,10 +375,9 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head, | |||
378 | const unsigned char *addr, | 375 | const unsigned char *addr, |
379 | __u16 vid) | 376 | __u16 vid) |
380 | { | 377 | { |
381 | struct hlist_node *h; | ||
382 | struct net_bridge_fdb_entry *fdb; | 378 | struct net_bridge_fdb_entry *fdb; |
383 | 379 | ||
384 | hlist_for_each_entry_rcu(fdb, h, head, hlist) { | 380 | hlist_for_each_entry_rcu(fdb, head, hlist) { |
385 | if (ether_addr_equal(fdb->addr.addr, addr) && | 381 | if (ether_addr_equal(fdb->addr.addr, addr) && |
386 | fdb->vlan_id == vid) | 382 | fdb->vlan_id == vid) |
387 | return fdb; | 383 | return fdb; |
@@ -593,10 +589,9 @@ int br_fdb_dump(struct sk_buff *skb, | |||
593 | goto out; | 589 | goto out; |
594 | 590 | ||
595 | for (i = 0; i < BR_HASH_SIZE; i++) { | 591 | for (i = 0; i < BR_HASH_SIZE; i++) { |
596 | struct hlist_node *h; | ||
597 | struct net_bridge_fdb_entry *f; | 592 | struct net_bridge_fdb_entry *f; |
598 | 593 | ||
599 | hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { | 594 | hlist_for_each_entry_rcu(f, &br->hash[i], hlist) { |
600 | if (idx < cb->args[0]) | 595 | if (idx < cb->args[0]) |
601 | goto skip; | 596 | goto skip; |
602 | 597 | ||
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 38991e03646d..9f97b850fc65 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
@@ -18,7 +18,6 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, | |||
18 | { | 18 | { |
19 | struct net_bridge *br = netdev_priv(dev); | 19 | struct net_bridge *br = netdev_priv(dev); |
20 | struct net_bridge_port *p; | 20 | struct net_bridge_port *p; |
21 | struct hlist_node *n; | ||
22 | struct nlattr *nest; | 21 | struct nlattr *nest; |
23 | 22 | ||
24 | if (!br->multicast_router || hlist_empty(&br->router_list)) | 23 | if (!br->multicast_router || hlist_empty(&br->router_list)) |
@@ -28,7 +27,7 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, | |||
28 | if (nest == NULL) | 27 | if (nest == NULL) |
29 | return -EMSGSIZE; | 28 | return -EMSGSIZE; |
30 | 29 | ||
31 | hlist_for_each_entry_rcu(p, n, &br->router_list, rlist) { | 30 | hlist_for_each_entry_rcu(p, &br->router_list, rlist) { |
32 | if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex)) | 31 | if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex)) |
33 | goto fail; | 32 | goto fail; |
34 | } | 33 | } |
@@ -61,12 +60,11 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, | |||
61 | return -EMSGSIZE; | 60 | return -EMSGSIZE; |
62 | 61 | ||
63 | for (i = 0; i < mdb->max; i++) { | 62 | for (i = 0; i < mdb->max; i++) { |
64 | struct hlist_node *h; | ||
65 | struct net_bridge_mdb_entry *mp; | 63 | struct net_bridge_mdb_entry *mp; |
66 | struct net_bridge_port_group *p, **pp; | 64 | struct net_bridge_port_group *p, **pp; |
67 | struct net_bridge_port *port; | 65 | struct net_bridge_port *port; |
68 | 66 | ||
69 | hlist_for_each_entry_rcu(mp, h, &mdb->mhash[i], hlist[mdb->ver]) { | 67 | hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) { |
70 | if (idx < s_idx) | 68 | if (idx < s_idx) |
71 | goto skip; | 69 | goto skip; |
72 | 70 | ||
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 7d886b0a8b7b..10e6fce1bb62 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -86,9 +86,8 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get( | |||
86 | struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) | 86 | struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) |
87 | { | 87 | { |
88 | struct net_bridge_mdb_entry *mp; | 88 | struct net_bridge_mdb_entry *mp; |
89 | struct hlist_node *p; | ||
90 | 89 | ||
91 | hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { | 90 | hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { |
92 | if (br_ip_equal(&mp->addr, dst)) | 91 | if (br_ip_equal(&mp->addr, dst)) |
93 | return mp; | 92 | return mp; |
94 | } | 93 | } |
@@ -178,13 +177,12 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new, | |||
178 | int elasticity) | 177 | int elasticity) |
179 | { | 178 | { |
180 | struct net_bridge_mdb_entry *mp; | 179 | struct net_bridge_mdb_entry *mp; |
181 | struct hlist_node *p; | ||
182 | int maxlen; | 180 | int maxlen; |
183 | int len; | 181 | int len; |
184 | int i; | 182 | int i; |
185 | 183 | ||
186 | for (i = 0; i < old->max; i++) | 184 | for (i = 0; i < old->max; i++) |
187 | hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) | 185 | hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) |
188 | hlist_add_head(&mp->hlist[new->ver], | 186 | hlist_add_head(&mp->hlist[new->ver], |
189 | &new->mhash[br_ip_hash(new, &mp->addr)]); | 187 | &new->mhash[br_ip_hash(new, &mp->addr)]); |
190 | 188 | ||
@@ -194,7 +192,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new, | |||
194 | maxlen = 0; | 192 | maxlen = 0; |
195 | for (i = 0; i < new->max; i++) { | 193 | for (i = 0; i < new->max; i++) { |
196 | len = 0; | 194 | len = 0; |
197 | hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver]) | 195 | hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) |
198 | len++; | 196 | len++; |
199 | if (len > maxlen) | 197 | if (len > maxlen) |
200 | maxlen = len; | 198 | maxlen = len; |
@@ -510,14 +508,13 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( | |||
510 | { | 508 | { |
511 | struct net_bridge_mdb_htable *mdb; | 509 | struct net_bridge_mdb_htable *mdb; |
512 | struct net_bridge_mdb_entry *mp; | 510 | struct net_bridge_mdb_entry *mp; |
513 | struct hlist_node *p; | ||
514 | unsigned int count = 0; | 511 | unsigned int count = 0; |
515 | unsigned int max; | 512 | unsigned int max; |
516 | int elasticity; | 513 | int elasticity; |
517 | int err; | 514 | int err; |
518 | 515 | ||
519 | mdb = rcu_dereference_protected(br->mdb, 1); | 516 | mdb = rcu_dereference_protected(br->mdb, 1); |
520 | hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { | 517 | hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { |
521 | count++; | 518 | count++; |
522 | if (unlikely(br_ip_equal(group, &mp->addr))) | 519 | if (unlikely(br_ip_equal(group, &mp->addr))) |
523 | return mp; | 520 | return mp; |
@@ -882,10 +879,10 @@ void br_multicast_disable_port(struct net_bridge_port *port) | |||
882 | { | 879 | { |
883 | struct net_bridge *br = port->br; | 880 | struct net_bridge *br = port->br; |
884 | struct net_bridge_port_group *pg; | 881 | struct net_bridge_port_group *pg; |
885 | struct hlist_node *p, *n; | 882 | struct hlist_node *n; |
886 | 883 | ||
887 | spin_lock(&br->multicast_lock); | 884 | spin_lock(&br->multicast_lock); |
888 | hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist) | 885 | hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) |
889 | br_multicast_del_pg(br, pg); | 886 | br_multicast_del_pg(br, pg); |
890 | 887 | ||
891 | if (!hlist_unhashed(&port->rlist)) | 888 | if (!hlist_unhashed(&port->rlist)) |
@@ -1025,12 +1022,12 @@ static void br_multicast_add_router(struct net_bridge *br, | |||
1025 | struct net_bridge_port *port) | 1022 | struct net_bridge_port *port) |
1026 | { | 1023 | { |
1027 | struct net_bridge_port *p; | 1024 | struct net_bridge_port *p; |
1028 | struct hlist_node *n, *slot = NULL; | 1025 | struct hlist_node *slot = NULL; |
1029 | 1026 | ||
1030 | hlist_for_each_entry(p, n, &br->router_list, rlist) { | 1027 | hlist_for_each_entry(p, &br->router_list, rlist) { |
1031 | if ((unsigned long) port >= (unsigned long) p) | 1028 | if ((unsigned long) port >= (unsigned long) p) |
1032 | break; | 1029 | break; |
1033 | slot = n; | 1030 | slot = &p->rlist; |
1034 | } | 1031 | } |
1035 | 1032 | ||
1036 | if (slot) | 1033 | if (slot) |
@@ -1653,7 +1650,7 @@ void br_multicast_stop(struct net_bridge *br) | |||
1653 | { | 1650 | { |
1654 | struct net_bridge_mdb_htable *mdb; | 1651 | struct net_bridge_mdb_htable *mdb; |
1655 | struct net_bridge_mdb_entry *mp; | 1652 | struct net_bridge_mdb_entry *mp; |
1656 | struct hlist_node *p, *n; | 1653 | struct hlist_node *n; |
1657 | u32 ver; | 1654 | u32 ver; |
1658 | int i; | 1655 | int i; |
1659 | 1656 | ||
@@ -1670,7 +1667,7 @@ void br_multicast_stop(struct net_bridge *br) | |||
1670 | 1667 | ||
1671 | ver = mdb->ver; | 1668 | ver = mdb->ver; |
1672 | for (i = 0; i < mdb->max; i++) { | 1669 | for (i = 0; i < mdb->max; i++) { |
1673 | hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], | 1670 | hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], |
1674 | hlist[ver]) { | 1671 | hlist[ver]) { |
1675 | del_timer(&mp->timer); | 1672 | del_timer(&mp->timer); |
1676 | call_rcu_bh(&mp->rcu, br_multicast_free_group); | 1673 | call_rcu_bh(&mp->rcu, br_multicast_free_group); |
diff --git a/net/can/af_can.c b/net/can/af_can.c index ddac1ee2ed20..c48e5220bbac 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -516,7 +516,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
516 | { | 516 | { |
517 | struct receiver *r = NULL; | 517 | struct receiver *r = NULL; |
518 | struct hlist_head *rl; | 518 | struct hlist_head *rl; |
519 | struct hlist_node *next; | ||
520 | struct dev_rcv_lists *d; | 519 | struct dev_rcv_lists *d; |
521 | 520 | ||
522 | if (dev && dev->type != ARPHRD_CAN) | 521 | if (dev && dev->type != ARPHRD_CAN) |
@@ -540,7 +539,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
540 | * been registered before. | 539 | * been registered before. |
541 | */ | 540 | */ |
542 | 541 | ||
543 | hlist_for_each_entry_rcu(r, next, rl, list) { | 542 | hlist_for_each_entry_rcu(r, rl, list) { |
544 | if (r->can_id == can_id && r->mask == mask && | 543 | if (r->can_id == can_id && r->mask == mask && |
545 | r->func == func && r->data == data) | 544 | r->func == func && r->data == data) |
546 | break; | 545 | break; |
@@ -552,7 +551,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
552 | * will be NULL, while r will point to the last item of the list. | 551 | * will be NULL, while r will point to the last item of the list. |
553 | */ | 552 | */ |
554 | 553 | ||
555 | if (!next) { | 554 | if (!r) { |
556 | printk(KERN_ERR "BUG: receive list entry not found for " | 555 | printk(KERN_ERR "BUG: receive list entry not found for " |
557 | "dev %s, id %03X, mask %03X\n", | 556 | "dev %s, id %03X, mask %03X\n", |
558 | DNAME(dev), can_id, mask); | 557 | DNAME(dev), can_id, mask); |
@@ -590,7 +589,6 @@ static inline void deliver(struct sk_buff *skb, struct receiver *r) | |||
590 | static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | 589 | static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) |
591 | { | 590 | { |
592 | struct receiver *r; | 591 | struct receiver *r; |
593 | struct hlist_node *n; | ||
594 | int matches = 0; | 592 | int matches = 0; |
595 | struct can_frame *cf = (struct can_frame *)skb->data; | 593 | struct can_frame *cf = (struct can_frame *)skb->data; |
596 | canid_t can_id = cf->can_id; | 594 | canid_t can_id = cf->can_id; |
@@ -600,7 +598,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | |||
600 | 598 | ||
601 | if (can_id & CAN_ERR_FLAG) { | 599 | if (can_id & CAN_ERR_FLAG) { |
602 | /* check for error message frame entries only */ | 600 | /* check for error message frame entries only */ |
603 | hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) { | 601 | hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) { |
604 | if (can_id & r->mask) { | 602 | if (can_id & r->mask) { |
605 | deliver(skb, r); | 603 | deliver(skb, r); |
606 | matches++; | 604 | matches++; |
@@ -610,13 +608,13 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | |||
610 | } | 608 | } |
611 | 609 | ||
612 | /* check for unfiltered entries */ | 610 | /* check for unfiltered entries */ |
613 | hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) { | 611 | hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) { |
614 | deliver(skb, r); | 612 | deliver(skb, r); |
615 | matches++; | 613 | matches++; |
616 | } | 614 | } |
617 | 615 | ||
618 | /* check for can_id/mask entries */ | 616 | /* check for can_id/mask entries */ |
619 | hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) { | 617 | hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) { |
620 | if ((can_id & r->mask) == r->can_id) { | 618 | if ((can_id & r->mask) == r->can_id) { |
621 | deliver(skb, r); | 619 | deliver(skb, r); |
622 | matches++; | 620 | matches++; |
@@ -624,7 +622,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | |||
624 | } | 622 | } |
625 | 623 | ||
626 | /* check for inverted can_id/mask entries */ | 624 | /* check for inverted can_id/mask entries */ |
627 | hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) { | 625 | hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) { |
628 | if ((can_id & r->mask) != r->can_id) { | 626 | if ((can_id & r->mask) != r->can_id) { |
629 | deliver(skb, r); | 627 | deliver(skb, r); |
630 | matches++; | 628 | matches++; |
@@ -636,7 +634,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | |||
636 | return matches; | 634 | return matches; |
637 | 635 | ||
638 | if (can_id & CAN_EFF_FLAG) { | 636 | if (can_id & CAN_EFF_FLAG) { |
639 | hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) { | 637 | hlist_for_each_entry_rcu(r, &d->rx[RX_EFF], list) { |
640 | if (r->can_id == can_id) { | 638 | if (r->can_id == can_id) { |
641 | deliver(skb, r); | 639 | deliver(skb, r); |
642 | matches++; | 640 | matches++; |
@@ -644,7 +642,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | |||
644 | } | 642 | } |
645 | } else { | 643 | } else { |
646 | can_id &= CAN_SFF_MASK; | 644 | can_id &= CAN_SFF_MASK; |
647 | hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) { | 645 | hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) { |
648 | deliver(skb, r); | 646 | deliver(skb, r); |
649 | matches++; | 647 | matches++; |
650 | } | 648 | } |
diff --git a/net/can/gw.c b/net/can/gw.c index c185fcd5e828..2d117dc5ebea 100644 --- a/net/can/gw.c +++ b/net/can/gw.c | |||
@@ -457,11 +457,11 @@ static int cgw_notifier(struct notifier_block *nb, | |||
457 | if (msg == NETDEV_UNREGISTER) { | 457 | if (msg == NETDEV_UNREGISTER) { |
458 | 458 | ||
459 | struct cgw_job *gwj = NULL; | 459 | struct cgw_job *gwj = NULL; |
460 | struct hlist_node *n, *nx; | 460 | struct hlist_node *nx; |
461 | 461 | ||
462 | ASSERT_RTNL(); | 462 | ASSERT_RTNL(); |
463 | 463 | ||
464 | hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { | 464 | hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { |
465 | 465 | ||
466 | if (gwj->src.dev == dev || gwj->dst.dev == dev) { | 466 | if (gwj->src.dev == dev || gwj->dst.dev == dev) { |
467 | hlist_del(&gwj->list); | 467 | hlist_del(&gwj->list); |
@@ -575,12 +575,11 @@ cancel: | |||
575 | static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb) | 575 | static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb) |
576 | { | 576 | { |
577 | struct cgw_job *gwj = NULL; | 577 | struct cgw_job *gwj = NULL; |
578 | struct hlist_node *n; | ||
579 | int idx = 0; | 578 | int idx = 0; |
580 | int s_idx = cb->args[0]; | 579 | int s_idx = cb->args[0]; |
581 | 580 | ||
582 | rcu_read_lock(); | 581 | rcu_read_lock(); |
583 | hlist_for_each_entry_rcu(gwj, n, &cgw_list, list) { | 582 | hlist_for_each_entry_rcu(gwj, &cgw_list, list) { |
584 | if (idx < s_idx) | 583 | if (idx < s_idx) |
585 | goto cont; | 584 | goto cont; |
586 | 585 | ||
@@ -858,11 +857,11 @@ out: | |||
858 | static void cgw_remove_all_jobs(void) | 857 | static void cgw_remove_all_jobs(void) |
859 | { | 858 | { |
860 | struct cgw_job *gwj = NULL; | 859 | struct cgw_job *gwj = NULL; |
861 | struct hlist_node *n, *nx; | 860 | struct hlist_node *nx; |
862 | 861 | ||
863 | ASSERT_RTNL(); | 862 | ASSERT_RTNL(); |
864 | 863 | ||
865 | hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { | 864 | hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { |
866 | hlist_del(&gwj->list); | 865 | hlist_del(&gwj->list); |
867 | cgw_unregister_filter(gwj); | 866 | cgw_unregister_filter(gwj); |
868 | kfree(gwj); | 867 | kfree(gwj); |
@@ -872,7 +871,7 @@ static void cgw_remove_all_jobs(void) | |||
872 | static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | 871 | static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) |
873 | { | 872 | { |
874 | struct cgw_job *gwj = NULL; | 873 | struct cgw_job *gwj = NULL; |
875 | struct hlist_node *n, *nx; | 874 | struct hlist_node *nx; |
876 | struct rtcanmsg *r; | 875 | struct rtcanmsg *r; |
877 | struct cf_mod mod; | 876 | struct cf_mod mod; |
878 | struct can_can_gw ccgw; | 877 | struct can_can_gw ccgw; |
@@ -907,7 +906,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
907 | ASSERT_RTNL(); | 906 | ASSERT_RTNL(); |
908 | 907 | ||
909 | /* remove only the first matching entry */ | 908 | /* remove only the first matching entry */ |
910 | hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { | 909 | hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { |
911 | 910 | ||
912 | if (gwj->flags != r->flags) | 911 | if (gwj->flags != r->flags) |
913 | continue; | 912 | continue; |
diff --git a/net/can/proc.c b/net/can/proc.c index 497335892146..1ab8c888f102 100644 --- a/net/can/proc.c +++ b/net/can/proc.c | |||
@@ -195,9 +195,8 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list, | |||
195 | struct net_device *dev) | 195 | struct net_device *dev) |
196 | { | 196 | { |
197 | struct receiver *r; | 197 | struct receiver *r; |
198 | struct hlist_node *n; | ||
199 | 198 | ||
200 | hlist_for_each_entry_rcu(r, n, rx_list, list) { | 199 | hlist_for_each_entry_rcu(r, rx_list, list) { |
201 | char *fmt = (r->can_id & CAN_EFF_FLAG)? | 200 | char *fmt = (r->can_id & CAN_EFF_FLAG)? |
202 | " %-5s %08x %08x %pK %pK %8ld %s\n" : | 201 | " %-5s %08x %08x %pK %pK %8ld %s\n" : |
203 | " %-5s %03x %08x %pK %pK %8ld %s\n"; | 202 | " %-5s %03x %08x %pK %pK %8ld %s\n"; |
diff --git a/net/core/dev.c b/net/core/dev.c index 18d8b5acc343..a06a7a58dd11 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -658,11 +658,10 @@ __setup("netdev=", netdev_boot_setup); | |||
658 | 658 | ||
659 | struct net_device *__dev_get_by_name(struct net *net, const char *name) | 659 | struct net_device *__dev_get_by_name(struct net *net, const char *name) |
660 | { | 660 | { |
661 | struct hlist_node *p; | ||
662 | struct net_device *dev; | 661 | struct net_device *dev; |
663 | struct hlist_head *head = dev_name_hash(net, name); | 662 | struct hlist_head *head = dev_name_hash(net, name); |
664 | 663 | ||
665 | hlist_for_each_entry(dev, p, head, name_hlist) | 664 | hlist_for_each_entry(dev, head, name_hlist) |
666 | if (!strncmp(dev->name, name, IFNAMSIZ)) | 665 | if (!strncmp(dev->name, name, IFNAMSIZ)) |
667 | return dev; | 666 | return dev; |
668 | 667 | ||
@@ -684,11 +683,10 @@ EXPORT_SYMBOL(__dev_get_by_name); | |||
684 | 683 | ||
685 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) | 684 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) |
686 | { | 685 | { |
687 | struct hlist_node *p; | ||
688 | struct net_device *dev; | 686 | struct net_device *dev; |
689 | struct hlist_head *head = dev_name_hash(net, name); | 687 | struct hlist_head *head = dev_name_hash(net, name); |
690 | 688 | ||
691 | hlist_for_each_entry_rcu(dev, p, head, name_hlist) | 689 | hlist_for_each_entry_rcu(dev, head, name_hlist) |
692 | if (!strncmp(dev->name, name, IFNAMSIZ)) | 690 | if (!strncmp(dev->name, name, IFNAMSIZ)) |
693 | return dev; | 691 | return dev; |
694 | 692 | ||
@@ -735,11 +733,10 @@ EXPORT_SYMBOL(dev_get_by_name); | |||
735 | 733 | ||
736 | struct net_device *__dev_get_by_index(struct net *net, int ifindex) | 734 | struct net_device *__dev_get_by_index(struct net *net, int ifindex) |
737 | { | 735 | { |
738 | struct hlist_node *p; | ||
739 | struct net_device *dev; | 736 | struct net_device *dev; |
740 | struct hlist_head *head = dev_index_hash(net, ifindex); | 737 | struct hlist_head *head = dev_index_hash(net, ifindex); |
741 | 738 | ||
742 | hlist_for_each_entry(dev, p, head, index_hlist) | 739 | hlist_for_each_entry(dev, head, index_hlist) |
743 | if (dev->ifindex == ifindex) | 740 | if (dev->ifindex == ifindex) |
744 | return dev; | 741 | return dev; |
745 | 742 | ||
@@ -760,11 +757,10 @@ EXPORT_SYMBOL(__dev_get_by_index); | |||
760 | 757 | ||
761 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) | 758 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) |
762 | { | 759 | { |
763 | struct hlist_node *p; | ||
764 | struct net_device *dev; | 760 | struct net_device *dev; |
765 | struct hlist_head *head = dev_index_hash(net, ifindex); | 761 | struct hlist_head *head = dev_index_hash(net, ifindex); |
766 | 762 | ||
767 | hlist_for_each_entry_rcu(dev, p, head, index_hlist) | 763 | hlist_for_each_entry_rcu(dev, head, index_hlist) |
768 | if (dev->ifindex == ifindex) | 764 | if (dev->ifindex == ifindex) |
769 | return dev; | 765 | return dev; |
770 | 766 | ||
diff --git a/net/core/flow.c b/net/core/flow.c index 43f7495df27a..c56ea6f7f6c7 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -132,14 +132,14 @@ static void __flow_cache_shrink(struct flow_cache *fc, | |||
132 | int shrink_to) | 132 | int shrink_to) |
133 | { | 133 | { |
134 | struct flow_cache_entry *fle; | 134 | struct flow_cache_entry *fle; |
135 | struct hlist_node *entry, *tmp; | 135 | struct hlist_node *tmp; |
136 | LIST_HEAD(gc_list); | 136 | LIST_HEAD(gc_list); |
137 | int i, deleted = 0; | 137 | int i, deleted = 0; |
138 | 138 | ||
139 | for (i = 0; i < flow_cache_hash_size(fc); i++) { | 139 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
140 | int saved = 0; | 140 | int saved = 0; |
141 | 141 | ||
142 | hlist_for_each_entry_safe(fle, entry, tmp, | 142 | hlist_for_each_entry_safe(fle, tmp, |
143 | &fcp->hash_table[i], u.hlist) { | 143 | &fcp->hash_table[i], u.hlist) { |
144 | if (saved < shrink_to && | 144 | if (saved < shrink_to && |
145 | flow_entry_valid(fle)) { | 145 | flow_entry_valid(fle)) { |
@@ -211,7 +211,6 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, | |||
211 | struct flow_cache *fc = &flow_cache_global; | 211 | struct flow_cache *fc = &flow_cache_global; |
212 | struct flow_cache_percpu *fcp; | 212 | struct flow_cache_percpu *fcp; |
213 | struct flow_cache_entry *fle, *tfle; | 213 | struct flow_cache_entry *fle, *tfle; |
214 | struct hlist_node *entry; | ||
215 | struct flow_cache_object *flo; | 214 | struct flow_cache_object *flo; |
216 | size_t keysize; | 215 | size_t keysize; |
217 | unsigned int hash; | 216 | unsigned int hash; |
@@ -235,7 +234,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, | |||
235 | flow_new_hash_rnd(fc, fcp); | 234 | flow_new_hash_rnd(fc, fcp); |
236 | 235 | ||
237 | hash = flow_hash_code(fc, fcp, key, keysize); | 236 | hash = flow_hash_code(fc, fcp, key, keysize); |
238 | hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { | 237 | hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) { |
239 | if (tfle->net == net && | 238 | if (tfle->net == net && |
240 | tfle->family == family && | 239 | tfle->family == family && |
241 | tfle->dir == dir && | 240 | tfle->dir == dir && |
@@ -301,13 +300,13 @@ static void flow_cache_flush_tasklet(unsigned long data) | |||
301 | struct flow_cache *fc = info->cache; | 300 | struct flow_cache *fc = info->cache; |
302 | struct flow_cache_percpu *fcp; | 301 | struct flow_cache_percpu *fcp; |
303 | struct flow_cache_entry *fle; | 302 | struct flow_cache_entry *fle; |
304 | struct hlist_node *entry, *tmp; | 303 | struct hlist_node *tmp; |
305 | LIST_HEAD(gc_list); | 304 | LIST_HEAD(gc_list); |
306 | int i, deleted = 0; | 305 | int i, deleted = 0; |
307 | 306 | ||
308 | fcp = this_cpu_ptr(fc->percpu); | 307 | fcp = this_cpu_ptr(fc->percpu); |
309 | for (i = 0; i < flow_cache_hash_size(fc); i++) { | 308 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
310 | hlist_for_each_entry_safe(fle, entry, tmp, | 309 | hlist_for_each_entry_safe(fle, tmp, |
311 | &fcp->hash_table[i], u.hlist) { | 310 | &fcp->hash_table[i], u.hlist) { |
312 | if (flow_entry_valid(fle)) | 311 | if (flow_entry_valid(fle)) |
313 | continue; | 312 | continue; |
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index 0f6bb6f8d391..3174f1998ee6 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c | |||
@@ -16,12 +16,11 @@ static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff | |||
16 | { | 16 | { |
17 | struct net *net = seq_file_net(seq); | 17 | struct net *net = seq_file_net(seq); |
18 | struct net_device *dev; | 18 | struct net_device *dev; |
19 | struct hlist_node *p; | ||
20 | struct hlist_head *h; | 19 | struct hlist_head *h; |
21 | unsigned int count = 0, offset = get_offset(*pos); | 20 | unsigned int count = 0, offset = get_offset(*pos); |
22 | 21 | ||
23 | h = &net->dev_name_head[get_bucket(*pos)]; | 22 | h = &net->dev_name_head[get_bucket(*pos)]; |
24 | hlist_for_each_entry_rcu(dev, p, h, name_hlist) { | 23 | hlist_for_each_entry_rcu(dev, h, name_hlist) { |
25 | if (++count == offset) | 24 | if (++count == offset) |
26 | return dev; | 25 | return dev; |
27 | } | 26 | } |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d8aa20f6a46e..b376410ff259 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1060,7 +1060,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
1060 | int idx = 0, s_idx; | 1060 | int idx = 0, s_idx; |
1061 | struct net_device *dev; | 1061 | struct net_device *dev; |
1062 | struct hlist_head *head; | 1062 | struct hlist_head *head; |
1063 | struct hlist_node *node; | ||
1064 | struct nlattr *tb[IFLA_MAX+1]; | 1063 | struct nlattr *tb[IFLA_MAX+1]; |
1065 | u32 ext_filter_mask = 0; | 1064 | u32 ext_filter_mask = 0; |
1066 | 1065 | ||
@@ -1080,7 +1079,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
1080 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { | 1079 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
1081 | idx = 0; | 1080 | idx = 0; |
1082 | head = &net->dev_index_head[h]; | 1081 | head = &net->dev_index_head[h]; |
1083 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 1082 | hlist_for_each_entry_rcu(dev, head, index_hlist) { |
1084 | if (idx < s_idx) | 1083 | if (idx < s_idx) |
1085 | goto cont; | 1084 | goto cont; |
1086 | if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, | 1085 | if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index c4a2def5b7bd..c21f200eed93 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -175,12 +175,11 @@ static struct hlist_head *dn_find_list(struct sock *sk) | |||
175 | static int check_port(__le16 port) | 175 | static int check_port(__le16 port) |
176 | { | 176 | { |
177 | struct sock *sk; | 177 | struct sock *sk; |
178 | struct hlist_node *node; | ||
179 | 178 | ||
180 | if (port == 0) | 179 | if (port == 0) |
181 | return -1; | 180 | return -1; |
182 | 181 | ||
183 | sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) { | 182 | sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) { |
184 | struct dn_scp *scp = DN_SK(sk); | 183 | struct dn_scp *scp = DN_SK(sk); |
185 | if (scp->addrloc == port) | 184 | if (scp->addrloc == port) |
186 | return -1; | 185 | return -1; |
@@ -374,11 +373,10 @@ int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, | |||
374 | struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr) | 373 | struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr) |
375 | { | 374 | { |
376 | struct hlist_head *list = listen_hash(addr); | 375 | struct hlist_head *list = listen_hash(addr); |
377 | struct hlist_node *node; | ||
378 | struct sock *sk; | 376 | struct sock *sk; |
379 | 377 | ||
380 | read_lock(&dn_hash_lock); | 378 | read_lock(&dn_hash_lock); |
381 | sk_for_each(sk, node, list) { | 379 | sk_for_each(sk, list) { |
382 | struct dn_scp *scp = DN_SK(sk); | 380 | struct dn_scp *scp = DN_SK(sk); |
383 | if (sk->sk_state != TCP_LISTEN) | 381 | if (sk->sk_state != TCP_LISTEN) |
384 | continue; | 382 | continue; |
@@ -414,11 +412,10 @@ struct sock *dn_find_by_skb(struct sk_buff *skb) | |||
414 | { | 412 | { |
415 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 413 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
416 | struct sock *sk; | 414 | struct sock *sk; |
417 | struct hlist_node *node; | ||
418 | struct dn_scp *scp; | 415 | struct dn_scp *scp; |
419 | 416 | ||
420 | read_lock(&dn_hash_lock); | 417 | read_lock(&dn_hash_lock); |
421 | sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) { | 418 | sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) { |
422 | scp = DN_SK(sk); | 419 | scp = DN_SK(sk); |
423 | if (cb->src != dn_saddr2dn(&scp->peer)) | 420 | if (cb->src != dn_saddr2dn(&scp->peer)) |
424 | continue; | 421 | continue; |
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index f968c1b58f47..6c2445bcaba1 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c | |||
@@ -483,7 +483,6 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
483 | unsigned int h, s_h; | 483 | unsigned int h, s_h; |
484 | unsigned int e = 0, s_e; | 484 | unsigned int e = 0, s_e; |
485 | struct dn_fib_table *tb; | 485 | struct dn_fib_table *tb; |
486 | struct hlist_node *node; | ||
487 | int dumped = 0; | 486 | int dumped = 0; |
488 | 487 | ||
489 | if (!net_eq(net, &init_net)) | 488 | if (!net_eq(net, &init_net)) |
@@ -498,7 +497,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
498 | 497 | ||
499 | for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) { | 498 | for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) { |
500 | e = 0; | 499 | e = 0; |
501 | hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) { | 500 | hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) { |
502 | if (e < s_e) | 501 | if (e < s_e) |
503 | goto next; | 502 | goto next; |
504 | if (dumped) | 503 | if (dumped) |
@@ -828,7 +827,6 @@ out: | |||
828 | struct dn_fib_table *dn_fib_get_table(u32 n, int create) | 827 | struct dn_fib_table *dn_fib_get_table(u32 n, int create) |
829 | { | 828 | { |
830 | struct dn_fib_table *t; | 829 | struct dn_fib_table *t; |
831 | struct hlist_node *node; | ||
832 | unsigned int h; | 830 | unsigned int h; |
833 | 831 | ||
834 | if (n < RT_TABLE_MIN) | 832 | if (n < RT_TABLE_MIN) |
@@ -839,7 +837,7 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create) | |||
839 | 837 | ||
840 | h = n & (DN_FIB_TABLE_HASHSZ - 1); | 838 | h = n & (DN_FIB_TABLE_HASHSZ - 1); |
841 | rcu_read_lock(); | 839 | rcu_read_lock(); |
842 | hlist_for_each_entry_rcu(t, node, &dn_fib_table_hash[h], hlist) { | 840 | hlist_for_each_entry_rcu(t, &dn_fib_table_hash[h], hlist) { |
843 | if (t->n == n) { | 841 | if (t->n == n) { |
844 | rcu_read_unlock(); | 842 | rcu_read_unlock(); |
845 | return t; | 843 | return t; |
@@ -885,11 +883,10 @@ void dn_fib_flush(void) | |||
885 | { | 883 | { |
886 | int flushed = 0; | 884 | int flushed = 0; |
887 | struct dn_fib_table *tb; | 885 | struct dn_fib_table *tb; |
888 | struct hlist_node *node; | ||
889 | unsigned int h; | 886 | unsigned int h; |
890 | 887 | ||
891 | for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { | 888 | for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { |
892 | hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) | 889 | hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) |
893 | flushed += tb->flush(tb); | 890 | flushed += tb->flush(tb); |
894 | } | 891 | } |
895 | 892 | ||
@@ -908,12 +905,12 @@ void __init dn_fib_table_init(void) | |||
908 | void __exit dn_fib_table_cleanup(void) | 905 | void __exit dn_fib_table_cleanup(void) |
909 | { | 906 | { |
910 | struct dn_fib_table *t; | 907 | struct dn_fib_table *t; |
911 | struct hlist_node *node, *next; | 908 | struct hlist_node *next; |
912 | unsigned int h; | 909 | unsigned int h; |
913 | 910 | ||
914 | write_lock(&dn_fib_tables_lock); | 911 | write_lock(&dn_fib_tables_lock); |
915 | for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { | 912 | for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { |
916 | hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h], | 913 | hlist_for_each_entry_safe(t, next, &dn_fib_table_hash[h], |
917 | hlist) { | 914 | hlist) { |
918 | hlist_del(&t->hlist); | 915 | hlist_del(&t->hlist); |
919 | kfree(t); | 916 | kfree(t); |
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index 16705611589a..e0da175f8e5b 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c | |||
@@ -350,7 +350,6 @@ static inline int ieee802154_match_sock(u8 *hw_addr, u16 pan_id, | |||
350 | int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) | 350 | int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) |
351 | { | 351 | { |
352 | struct sock *sk, *prev = NULL; | 352 | struct sock *sk, *prev = NULL; |
353 | struct hlist_node *node; | ||
354 | int ret = NET_RX_SUCCESS; | 353 | int ret = NET_RX_SUCCESS; |
355 | u16 pan_id, short_addr; | 354 | u16 pan_id, short_addr; |
356 | 355 | ||
@@ -361,7 +360,7 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) | |||
361 | short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev); | 360 | short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev); |
362 | 361 | ||
363 | read_lock(&dgram_lock); | 362 | read_lock(&dgram_lock); |
364 | sk_for_each(sk, node, &dgram_head) { | 363 | sk_for_each(sk, &dgram_head) { |
365 | if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr, | 364 | if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr, |
366 | dgram_sk(sk))) { | 365 | dgram_sk(sk))) { |
367 | if (prev) { | 366 | if (prev) { |
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index 50e823927d49..41f538b8e59c 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c | |||
@@ -221,10 +221,9 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
221 | void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb) | 221 | void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb) |
222 | { | 222 | { |
223 | struct sock *sk; | 223 | struct sock *sk; |
224 | struct hlist_node *node; | ||
225 | 224 | ||
226 | read_lock(&raw_lock); | 225 | read_lock(&raw_lock); |
227 | sk_for_each(sk, node, &raw_head) { | 226 | sk_for_each(sk, &raw_head) { |
228 | bh_lock_sock(sk); | 227 | bh_lock_sock(sk); |
229 | if (!sk->sk_bound_dev_if || | 228 | if (!sk->sk_bound_dev_if || |
230 | sk->sk_bound_dev_if == dev->ifindex) { | 229 | sk->sk_bound_dev_if == dev->ifindex) { |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 5281314886c1..f678507bc829 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -139,10 +139,9 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref) | |||
139 | u32 hash = inet_addr_hash(net, addr); | 139 | u32 hash = inet_addr_hash(net, addr); |
140 | struct net_device *result = NULL; | 140 | struct net_device *result = NULL; |
141 | struct in_ifaddr *ifa; | 141 | struct in_ifaddr *ifa; |
142 | struct hlist_node *node; | ||
143 | 142 | ||
144 | rcu_read_lock(); | 143 | rcu_read_lock(); |
145 | hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) { | 144 | hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) { |
146 | if (ifa->ifa_local == addr) { | 145 | if (ifa->ifa_local == addr) { |
147 | struct net_device *dev = ifa->ifa_dev->dev; | 146 | struct net_device *dev = ifa->ifa_dev->dev; |
148 | 147 | ||
@@ -588,7 +587,6 @@ static void check_lifetime(struct work_struct *work) | |||
588 | { | 587 | { |
589 | unsigned long now, next, next_sec, next_sched; | 588 | unsigned long now, next, next_sec, next_sched; |
590 | struct in_ifaddr *ifa; | 589 | struct in_ifaddr *ifa; |
591 | struct hlist_node *node; | ||
592 | int i; | 590 | int i; |
593 | 591 | ||
594 | now = jiffies; | 592 | now = jiffies; |
@@ -596,8 +594,7 @@ static void check_lifetime(struct work_struct *work) | |||
596 | 594 | ||
597 | rcu_read_lock(); | 595 | rcu_read_lock(); |
598 | for (i = 0; i < IN4_ADDR_HSIZE; i++) { | 596 | for (i = 0; i < IN4_ADDR_HSIZE; i++) { |
599 | hlist_for_each_entry_rcu(ifa, node, | 597 | hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { |
600 | &inet_addr_lst[i], hash) { | ||
601 | unsigned long age; | 598 | unsigned long age; |
602 | 599 | ||
603 | if (ifa->ifa_flags & IFA_F_PERMANENT) | 600 | if (ifa->ifa_flags & IFA_F_PERMANENT) |
@@ -1493,7 +1490,6 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | |||
1493 | struct in_device *in_dev; | 1490 | struct in_device *in_dev; |
1494 | struct in_ifaddr *ifa; | 1491 | struct in_ifaddr *ifa; |
1495 | struct hlist_head *head; | 1492 | struct hlist_head *head; |
1496 | struct hlist_node *node; | ||
1497 | 1493 | ||
1498 | s_h = cb->args[0]; | 1494 | s_h = cb->args[0]; |
1499 | s_idx = idx = cb->args[1]; | 1495 | s_idx = idx = cb->args[1]; |
@@ -1503,7 +1499,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | |||
1503 | idx = 0; | 1499 | idx = 0; |
1504 | head = &net->dev_index_head[h]; | 1500 | head = &net->dev_index_head[h]; |
1505 | rcu_read_lock(); | 1501 | rcu_read_lock(); |
1506 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 1502 | hlist_for_each_entry_rcu(dev, head, index_hlist) { |
1507 | if (idx < s_idx) | 1503 | if (idx < s_idx) |
1508 | goto cont; | 1504 | goto cont; |
1509 | if (h > s_h || idx > s_idx) | 1505 | if (h > s_h || idx > s_idx) |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 99f00d39d10b..eb4bb12b3eb4 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -112,7 +112,6 @@ struct fib_table *fib_new_table(struct net *net, u32 id) | |||
112 | struct fib_table *fib_get_table(struct net *net, u32 id) | 112 | struct fib_table *fib_get_table(struct net *net, u32 id) |
113 | { | 113 | { |
114 | struct fib_table *tb; | 114 | struct fib_table *tb; |
115 | struct hlist_node *node; | ||
116 | struct hlist_head *head; | 115 | struct hlist_head *head; |
117 | unsigned int h; | 116 | unsigned int h; |
118 | 117 | ||
@@ -122,7 +121,7 @@ struct fib_table *fib_get_table(struct net *net, u32 id) | |||
122 | 121 | ||
123 | rcu_read_lock(); | 122 | rcu_read_lock(); |
124 | head = &net->ipv4.fib_table_hash[h]; | 123 | head = &net->ipv4.fib_table_hash[h]; |
125 | hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { | 124 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
126 | if (tb->tb_id == id) { | 125 | if (tb->tb_id == id) { |
127 | rcu_read_unlock(); | 126 | rcu_read_unlock(); |
128 | return tb; | 127 | return tb; |
@@ -137,13 +136,12 @@ static void fib_flush(struct net *net) | |||
137 | { | 136 | { |
138 | int flushed = 0; | 137 | int flushed = 0; |
139 | struct fib_table *tb; | 138 | struct fib_table *tb; |
140 | struct hlist_node *node; | ||
141 | struct hlist_head *head; | 139 | struct hlist_head *head; |
142 | unsigned int h; | 140 | unsigned int h; |
143 | 141 | ||
144 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { | 142 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
145 | head = &net->ipv4.fib_table_hash[h]; | 143 | head = &net->ipv4.fib_table_hash[h]; |
146 | hlist_for_each_entry(tb, node, head, tb_hlist) | 144 | hlist_for_each_entry(tb, head, tb_hlist) |
147 | flushed += fib_table_flush(tb); | 145 | flushed += fib_table_flush(tb); |
148 | } | 146 | } |
149 | 147 | ||
@@ -656,7 +654,6 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
656 | unsigned int h, s_h; | 654 | unsigned int h, s_h; |
657 | unsigned int e = 0, s_e; | 655 | unsigned int e = 0, s_e; |
658 | struct fib_table *tb; | 656 | struct fib_table *tb; |
659 | struct hlist_node *node; | ||
660 | struct hlist_head *head; | 657 | struct hlist_head *head; |
661 | int dumped = 0; | 658 | int dumped = 0; |
662 | 659 | ||
@@ -670,7 +667,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
670 | for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { | 667 | for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { |
671 | e = 0; | 668 | e = 0; |
672 | head = &net->ipv4.fib_table_hash[h]; | 669 | head = &net->ipv4.fib_table_hash[h]; |
673 | hlist_for_each_entry(tb, node, head, tb_hlist) { | 670 | hlist_for_each_entry(tb, head, tb_hlist) { |
674 | if (e < s_e) | 671 | if (e < s_e) |
675 | goto next; | 672 | goto next; |
676 | if (dumped) | 673 | if (dumped) |
@@ -1117,11 +1114,11 @@ static void ip_fib_net_exit(struct net *net) | |||
1117 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) { | 1114 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) { |
1118 | struct fib_table *tb; | 1115 | struct fib_table *tb; |
1119 | struct hlist_head *head; | 1116 | struct hlist_head *head; |
1120 | struct hlist_node *node, *tmp; | 1117 | struct hlist_node *tmp; |
1121 | 1118 | ||
1122 | head = &net->ipv4.fib_table_hash[i]; | 1119 | head = &net->ipv4.fib_table_hash[i]; |
1123 | hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { | 1120 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { |
1124 | hlist_del(node); | 1121 | hlist_del(&tb->tb_hlist); |
1125 | fib_table_flush(tb); | 1122 | fib_table_flush(tb); |
1126 | fib_free_table(tb); | 1123 | fib_free_table(tb); |
1127 | } | 1124 | } |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 4797a800faf8..8f6cb7a87cd6 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -298,14 +298,13 @@ static inline unsigned int fib_info_hashfn(const struct fib_info *fi) | |||
298 | static struct fib_info *fib_find_info(const struct fib_info *nfi) | 298 | static struct fib_info *fib_find_info(const struct fib_info *nfi) |
299 | { | 299 | { |
300 | struct hlist_head *head; | 300 | struct hlist_head *head; |
301 | struct hlist_node *node; | ||
302 | struct fib_info *fi; | 301 | struct fib_info *fi; |
303 | unsigned int hash; | 302 | unsigned int hash; |
304 | 303 | ||
305 | hash = fib_info_hashfn(nfi); | 304 | hash = fib_info_hashfn(nfi); |
306 | head = &fib_info_hash[hash]; | 305 | head = &fib_info_hash[hash]; |
307 | 306 | ||
308 | hlist_for_each_entry(fi, node, head, fib_hash) { | 307 | hlist_for_each_entry(fi, head, fib_hash) { |
309 | if (!net_eq(fi->fib_net, nfi->fib_net)) | 308 | if (!net_eq(fi->fib_net, nfi->fib_net)) |
310 | continue; | 309 | continue; |
311 | if (fi->fib_nhs != nfi->fib_nhs) | 310 | if (fi->fib_nhs != nfi->fib_nhs) |
@@ -331,7 +330,6 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi) | |||
331 | int ip_fib_check_default(__be32 gw, struct net_device *dev) | 330 | int ip_fib_check_default(__be32 gw, struct net_device *dev) |
332 | { | 331 | { |
333 | struct hlist_head *head; | 332 | struct hlist_head *head; |
334 | struct hlist_node *node; | ||
335 | struct fib_nh *nh; | 333 | struct fib_nh *nh; |
336 | unsigned int hash; | 334 | unsigned int hash; |
337 | 335 | ||
@@ -339,7 +337,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev) | |||
339 | 337 | ||
340 | hash = fib_devindex_hashfn(dev->ifindex); | 338 | hash = fib_devindex_hashfn(dev->ifindex); |
341 | head = &fib_info_devhash[hash]; | 339 | head = &fib_info_devhash[hash]; |
342 | hlist_for_each_entry(nh, node, head, nh_hash) { | 340 | hlist_for_each_entry(nh, head, nh_hash) { |
343 | if (nh->nh_dev == dev && | 341 | if (nh->nh_dev == dev && |
344 | nh->nh_gw == gw && | 342 | nh->nh_gw == gw && |
345 | !(nh->nh_flags & RTNH_F_DEAD)) { | 343 | !(nh->nh_flags & RTNH_F_DEAD)) { |
@@ -721,10 +719,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash, | |||
721 | 719 | ||
722 | for (i = 0; i < old_size; i++) { | 720 | for (i = 0; i < old_size; i++) { |
723 | struct hlist_head *head = &fib_info_hash[i]; | 721 | struct hlist_head *head = &fib_info_hash[i]; |
724 | struct hlist_node *node, *n; | 722 | struct hlist_node *n; |
725 | struct fib_info *fi; | 723 | struct fib_info *fi; |
726 | 724 | ||
727 | hlist_for_each_entry_safe(fi, node, n, head, fib_hash) { | 725 | hlist_for_each_entry_safe(fi, n, head, fib_hash) { |
728 | struct hlist_head *dest; | 726 | struct hlist_head *dest; |
729 | unsigned int new_hash; | 727 | unsigned int new_hash; |
730 | 728 | ||
@@ -739,10 +737,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash, | |||
739 | 737 | ||
740 | for (i = 0; i < old_size; i++) { | 738 | for (i = 0; i < old_size; i++) { |
741 | struct hlist_head *lhead = &fib_info_laddrhash[i]; | 739 | struct hlist_head *lhead = &fib_info_laddrhash[i]; |
742 | struct hlist_node *node, *n; | 740 | struct hlist_node *n; |
743 | struct fib_info *fi; | 741 | struct fib_info *fi; |
744 | 742 | ||
745 | hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) { | 743 | hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) { |
746 | struct hlist_head *ldest; | 744 | struct hlist_head *ldest; |
747 | unsigned int new_hash; | 745 | unsigned int new_hash; |
748 | 746 | ||
@@ -1096,13 +1094,12 @@ int fib_sync_down_addr(struct net *net, __be32 local) | |||
1096 | int ret = 0; | 1094 | int ret = 0; |
1097 | unsigned int hash = fib_laddr_hashfn(local); | 1095 | unsigned int hash = fib_laddr_hashfn(local); |
1098 | struct hlist_head *head = &fib_info_laddrhash[hash]; | 1096 | struct hlist_head *head = &fib_info_laddrhash[hash]; |
1099 | struct hlist_node *node; | ||
1100 | struct fib_info *fi; | 1097 | struct fib_info *fi; |
1101 | 1098 | ||
1102 | if (fib_info_laddrhash == NULL || local == 0) | 1099 | if (fib_info_laddrhash == NULL || local == 0) |
1103 | return 0; | 1100 | return 0; |
1104 | 1101 | ||
1105 | hlist_for_each_entry(fi, node, head, fib_lhash) { | 1102 | hlist_for_each_entry(fi, head, fib_lhash) { |
1106 | if (!net_eq(fi->fib_net, net)) | 1103 | if (!net_eq(fi->fib_net, net)) |
1107 | continue; | 1104 | continue; |
1108 | if (fi->fib_prefsrc == local) { | 1105 | if (fi->fib_prefsrc == local) { |
@@ -1120,13 +1117,12 @@ int fib_sync_down_dev(struct net_device *dev, int force) | |||
1120 | struct fib_info *prev_fi = NULL; | 1117 | struct fib_info *prev_fi = NULL; |
1121 | unsigned int hash = fib_devindex_hashfn(dev->ifindex); | 1118 | unsigned int hash = fib_devindex_hashfn(dev->ifindex); |
1122 | struct hlist_head *head = &fib_info_devhash[hash]; | 1119 | struct hlist_head *head = &fib_info_devhash[hash]; |
1123 | struct hlist_node *node; | ||
1124 | struct fib_nh *nh; | 1120 | struct fib_nh *nh; |
1125 | 1121 | ||
1126 | if (force) | 1122 | if (force) |
1127 | scope = -1; | 1123 | scope = -1; |
1128 | 1124 | ||
1129 | hlist_for_each_entry(nh, node, head, nh_hash) { | 1125 | hlist_for_each_entry(nh, head, nh_hash) { |
1130 | struct fib_info *fi = nh->nh_parent; | 1126 | struct fib_info *fi = nh->nh_parent; |
1131 | int dead; | 1127 | int dead; |
1132 | 1128 | ||
@@ -1232,7 +1228,6 @@ int fib_sync_up(struct net_device *dev) | |||
1232 | struct fib_info *prev_fi; | 1228 | struct fib_info *prev_fi; |
1233 | unsigned int hash; | 1229 | unsigned int hash; |
1234 | struct hlist_head *head; | 1230 | struct hlist_head *head; |
1235 | struct hlist_node *node; | ||
1236 | struct fib_nh *nh; | 1231 | struct fib_nh *nh; |
1237 | int ret; | 1232 | int ret; |
1238 | 1233 | ||
@@ -1244,7 +1239,7 @@ int fib_sync_up(struct net_device *dev) | |||
1244 | head = &fib_info_devhash[hash]; | 1239 | head = &fib_info_devhash[hash]; |
1245 | ret = 0; | 1240 | ret = 0; |
1246 | 1241 | ||
1247 | hlist_for_each_entry(nh, node, head, nh_hash) { | 1242 | hlist_for_each_entry(nh, head, nh_hash) { |
1248 | struct fib_info *fi = nh->nh_parent; | 1243 | struct fib_info *fi = nh->nh_parent; |
1249 | int alive; | 1244 | int alive; |
1250 | 1245 | ||
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 61e03da3e1f5..ff06b7543d9f 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -920,10 +920,9 @@ nomem: | |||
920 | static struct leaf_info *find_leaf_info(struct leaf *l, int plen) | 920 | static struct leaf_info *find_leaf_info(struct leaf *l, int plen) |
921 | { | 921 | { |
922 | struct hlist_head *head = &l->list; | 922 | struct hlist_head *head = &l->list; |
923 | struct hlist_node *node; | ||
924 | struct leaf_info *li; | 923 | struct leaf_info *li; |
925 | 924 | ||
926 | hlist_for_each_entry_rcu(li, node, head, hlist) | 925 | hlist_for_each_entry_rcu(li, head, hlist) |
927 | if (li->plen == plen) | 926 | if (li->plen == plen) |
928 | return li; | 927 | return li; |
929 | 928 | ||
@@ -943,12 +942,11 @@ static inline struct list_head *get_fa_head(struct leaf *l, int plen) | |||
943 | static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) | 942 | static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) |
944 | { | 943 | { |
945 | struct leaf_info *li = NULL, *last = NULL; | 944 | struct leaf_info *li = NULL, *last = NULL; |
946 | struct hlist_node *node; | ||
947 | 945 | ||
948 | if (hlist_empty(head)) { | 946 | if (hlist_empty(head)) { |
949 | hlist_add_head_rcu(&new->hlist, head); | 947 | hlist_add_head_rcu(&new->hlist, head); |
950 | } else { | 948 | } else { |
951 | hlist_for_each_entry(li, node, head, hlist) { | 949 | hlist_for_each_entry(li, head, hlist) { |
952 | if (new->plen > li->plen) | 950 | if (new->plen > li->plen) |
953 | break; | 951 | break; |
954 | 952 | ||
@@ -1354,9 +1352,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l, | |||
1354 | { | 1352 | { |
1355 | struct leaf_info *li; | 1353 | struct leaf_info *li; |
1356 | struct hlist_head *hhead = &l->list; | 1354 | struct hlist_head *hhead = &l->list; |
1357 | struct hlist_node *node; | ||
1358 | 1355 | ||
1359 | hlist_for_each_entry_rcu(li, node, hhead, hlist) { | 1356 | hlist_for_each_entry_rcu(li, hhead, hlist) { |
1360 | struct fib_alias *fa; | 1357 | struct fib_alias *fa; |
1361 | 1358 | ||
1362 | if (l->key != (key & li->mask_plen)) | 1359 | if (l->key != (key & li->mask_plen)) |
@@ -1740,10 +1737,10 @@ static int trie_flush_leaf(struct leaf *l) | |||
1740 | { | 1737 | { |
1741 | int found = 0; | 1738 | int found = 0; |
1742 | struct hlist_head *lih = &l->list; | 1739 | struct hlist_head *lih = &l->list; |
1743 | struct hlist_node *node, *tmp; | 1740 | struct hlist_node *tmp; |
1744 | struct leaf_info *li = NULL; | 1741 | struct leaf_info *li = NULL; |
1745 | 1742 | ||
1746 | hlist_for_each_entry_safe(li, node, tmp, lih, hlist) { | 1743 | hlist_for_each_entry_safe(li, tmp, lih, hlist) { |
1747 | found += trie_flush_list(&li->falh); | 1744 | found += trie_flush_list(&li->falh); |
1748 | 1745 | ||
1749 | if (list_empty(&li->falh)) { | 1746 | if (list_empty(&li->falh)) { |
@@ -1895,14 +1892,13 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb, | |||
1895 | struct sk_buff *skb, struct netlink_callback *cb) | 1892 | struct sk_buff *skb, struct netlink_callback *cb) |
1896 | { | 1893 | { |
1897 | struct leaf_info *li; | 1894 | struct leaf_info *li; |
1898 | struct hlist_node *node; | ||
1899 | int i, s_i; | 1895 | int i, s_i; |
1900 | 1896 | ||
1901 | s_i = cb->args[4]; | 1897 | s_i = cb->args[4]; |
1902 | i = 0; | 1898 | i = 0; |
1903 | 1899 | ||
1904 | /* rcu_read_lock is hold by caller */ | 1900 | /* rcu_read_lock is hold by caller */ |
1905 | hlist_for_each_entry_rcu(li, node, &l->list, hlist) { | 1901 | hlist_for_each_entry_rcu(li, &l->list, hlist) { |
1906 | if (i < s_i) { | 1902 | if (i < s_i) { |
1907 | i++; | 1903 | i++; |
1908 | continue; | 1904 | continue; |
@@ -2092,14 +2088,13 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s) | |||
2092 | if (IS_LEAF(n)) { | 2088 | if (IS_LEAF(n)) { |
2093 | struct leaf *l = (struct leaf *)n; | 2089 | struct leaf *l = (struct leaf *)n; |
2094 | struct leaf_info *li; | 2090 | struct leaf_info *li; |
2095 | struct hlist_node *tmp; | ||
2096 | 2091 | ||
2097 | s->leaves++; | 2092 | s->leaves++; |
2098 | s->totdepth += iter.depth; | 2093 | s->totdepth += iter.depth; |
2099 | if (iter.depth > s->maxdepth) | 2094 | if (iter.depth > s->maxdepth) |
2100 | s->maxdepth = iter.depth; | 2095 | s->maxdepth = iter.depth; |
2101 | 2096 | ||
2102 | hlist_for_each_entry_rcu(li, tmp, &l->list, hlist) | 2097 | hlist_for_each_entry_rcu(li, &l->list, hlist) |
2103 | ++s->prefixes; | 2098 | ++s->prefixes; |
2104 | } else { | 2099 | } else { |
2105 | const struct tnode *tn = (const struct tnode *) n; | 2100 | const struct tnode *tn = (const struct tnode *) n; |
@@ -2200,10 +2195,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) | |||
2200 | 2195 | ||
2201 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { | 2196 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
2202 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; | 2197 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
2203 | struct hlist_node *node; | ||
2204 | struct fib_table *tb; | 2198 | struct fib_table *tb; |
2205 | 2199 | ||
2206 | hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { | 2200 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
2207 | struct trie *t = (struct trie *) tb->tb_data; | 2201 | struct trie *t = (struct trie *) tb->tb_data; |
2208 | struct trie_stat stat; | 2202 | struct trie_stat stat; |
2209 | 2203 | ||
@@ -2245,10 +2239,9 @@ static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos) | |||
2245 | 2239 | ||
2246 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { | 2240 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
2247 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; | 2241 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
2248 | struct hlist_node *node; | ||
2249 | struct fib_table *tb; | 2242 | struct fib_table *tb; |
2250 | 2243 | ||
2251 | hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { | 2244 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
2252 | struct rt_trie_node *n; | 2245 | struct rt_trie_node *n; |
2253 | 2246 | ||
2254 | for (n = fib_trie_get_first(iter, | 2247 | for (n = fib_trie_get_first(iter, |
@@ -2298,7 +2291,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2298 | /* new hash chain */ | 2291 | /* new hash chain */ |
2299 | while (++h < FIB_TABLE_HASHSZ) { | 2292 | while (++h < FIB_TABLE_HASHSZ) { |
2300 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; | 2293 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
2301 | hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) { | 2294 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
2302 | n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); | 2295 | n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); |
2303 | if (n) | 2296 | if (n) |
2304 | goto found; | 2297 | goto found; |
@@ -2381,13 +2374,12 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v) | |||
2381 | } else { | 2374 | } else { |
2382 | struct leaf *l = (struct leaf *) n; | 2375 | struct leaf *l = (struct leaf *) n; |
2383 | struct leaf_info *li; | 2376 | struct leaf_info *li; |
2384 | struct hlist_node *node; | ||
2385 | __be32 val = htonl(l->key); | 2377 | __be32 val = htonl(l->key); |
2386 | 2378 | ||
2387 | seq_indent(seq, iter->depth); | 2379 | seq_indent(seq, iter->depth); |
2388 | seq_printf(seq, " |-- %pI4\n", &val); | 2380 | seq_printf(seq, " |-- %pI4\n", &val); |
2389 | 2381 | ||
2390 | hlist_for_each_entry_rcu(li, node, &l->list, hlist) { | 2382 | hlist_for_each_entry_rcu(li, &l->list, hlist) { |
2391 | struct fib_alias *fa; | 2383 | struct fib_alias *fa; |
2392 | 2384 | ||
2393 | list_for_each_entry_rcu(fa, &li->falh, fa_list) { | 2385 | list_for_each_entry_rcu(fa, &li->falh, fa_list) { |
@@ -2532,7 +2524,6 @@ static int fib_route_seq_show(struct seq_file *seq, void *v) | |||
2532 | { | 2524 | { |
2533 | struct leaf *l = v; | 2525 | struct leaf *l = v; |
2534 | struct leaf_info *li; | 2526 | struct leaf_info *li; |
2535 | struct hlist_node *node; | ||
2536 | 2527 | ||
2537 | if (v == SEQ_START_TOKEN) { | 2528 | if (v == SEQ_START_TOKEN) { |
2538 | seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway " | 2529 | seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway " |
@@ -2541,7 +2532,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v) | |||
2541 | return 0; | 2532 | return 0; |
2542 | } | 2533 | } |
2543 | 2534 | ||
2544 | hlist_for_each_entry_rcu(li, node, &l->list, hlist) { | 2535 | hlist_for_each_entry_rcu(li, &l->list, hlist) { |
2545 | struct fib_alias *fa; | 2536 | struct fib_alias *fa; |
2546 | __be32 mask, prefix; | 2537 | __be32 mask, prefix; |
2547 | 2538 | ||
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 11cb4979a465..7d1874be1df3 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -57,7 +57,6 @@ int inet_csk_bind_conflict(const struct sock *sk, | |||
57 | const struct inet_bind_bucket *tb, bool relax) | 57 | const struct inet_bind_bucket *tb, bool relax) |
58 | { | 58 | { |
59 | struct sock *sk2; | 59 | struct sock *sk2; |
60 | struct hlist_node *node; | ||
61 | int reuse = sk->sk_reuse; | 60 | int reuse = sk->sk_reuse; |
62 | int reuseport = sk->sk_reuseport; | 61 | int reuseport = sk->sk_reuseport; |
63 | kuid_t uid = sock_i_uid((struct sock *)sk); | 62 | kuid_t uid = sock_i_uid((struct sock *)sk); |
@@ -69,7 +68,7 @@ int inet_csk_bind_conflict(const struct sock *sk, | |||
69 | * one this bucket belongs to. | 68 | * one this bucket belongs to. |
70 | */ | 69 | */ |
71 | 70 | ||
72 | sk_for_each_bound(sk2, node, &tb->owners) { | 71 | sk_for_each_bound(sk2, &tb->owners) { |
73 | if (sk != sk2 && | 72 | if (sk != sk2 && |
74 | !inet_v6_ipv6only(sk2) && | 73 | !inet_v6_ipv6only(sk2) && |
75 | (!sk->sk_bound_dev_if || | 74 | (!sk->sk_bound_dev_if || |
@@ -95,7 +94,7 @@ int inet_csk_bind_conflict(const struct sock *sk, | |||
95 | } | 94 | } |
96 | } | 95 | } |
97 | } | 96 | } |
98 | return node != NULL; | 97 | return sk2 != NULL; |
99 | } | 98 | } |
100 | EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); | 99 | EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); |
101 | 100 | ||
@@ -106,7 +105,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) | |||
106 | { | 105 | { |
107 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; | 106 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
108 | struct inet_bind_hashbucket *head; | 107 | struct inet_bind_hashbucket *head; |
109 | struct hlist_node *node; | ||
110 | struct inet_bind_bucket *tb; | 108 | struct inet_bind_bucket *tb; |
111 | int ret, attempts = 5; | 109 | int ret, attempts = 5; |
112 | struct net *net = sock_net(sk); | 110 | struct net *net = sock_net(sk); |
@@ -129,7 +127,7 @@ again: | |||
129 | head = &hashinfo->bhash[inet_bhashfn(net, rover, | 127 | head = &hashinfo->bhash[inet_bhashfn(net, rover, |
130 | hashinfo->bhash_size)]; | 128 | hashinfo->bhash_size)]; |
131 | spin_lock(&head->lock); | 129 | spin_lock(&head->lock); |
132 | inet_bind_bucket_for_each(tb, node, &head->chain) | 130 | inet_bind_bucket_for_each(tb, &head->chain) |
133 | if (net_eq(ib_net(tb), net) && tb->port == rover) { | 131 | if (net_eq(ib_net(tb), net) && tb->port == rover) { |
134 | if (((tb->fastreuse > 0 && | 132 | if (((tb->fastreuse > 0 && |
135 | sk->sk_reuse && | 133 | sk->sk_reuse && |
@@ -183,7 +181,7 @@ have_snum: | |||
183 | head = &hashinfo->bhash[inet_bhashfn(net, snum, | 181 | head = &hashinfo->bhash[inet_bhashfn(net, snum, |
184 | hashinfo->bhash_size)]; | 182 | hashinfo->bhash_size)]; |
185 | spin_lock(&head->lock); | 183 | spin_lock(&head->lock); |
186 | inet_bind_bucket_for_each(tb, node, &head->chain) | 184 | inet_bind_bucket_for_each(tb, &head->chain) |
187 | if (net_eq(ib_net(tb), net) && tb->port == snum) | 185 | if (net_eq(ib_net(tb), net) && tb->port == snum) |
188 | goto tb_found; | 186 | goto tb_found; |
189 | } | 187 | } |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 2e453bde6992..245ae078a07f 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -33,9 +33,9 @@ static void inet_frag_secret_rebuild(unsigned long dummy) | |||
33 | get_random_bytes(&f->rnd, sizeof(u32)); | 33 | get_random_bytes(&f->rnd, sizeof(u32)); |
34 | for (i = 0; i < INETFRAGS_HASHSZ; i++) { | 34 | for (i = 0; i < INETFRAGS_HASHSZ; i++) { |
35 | struct inet_frag_queue *q; | 35 | struct inet_frag_queue *q; |
36 | struct hlist_node *p, *n; | 36 | struct hlist_node *n; |
37 | 37 | ||
38 | hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) { | 38 | hlist_for_each_entry_safe(q, n, &f->hash[i], list) { |
39 | unsigned int hval = f->hashfn(q); | 39 | unsigned int hval = f->hashfn(q); |
40 | 40 | ||
41 | if (hval != i) { | 41 | if (hval != i) { |
@@ -203,7 +203,6 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, | |||
203 | { | 203 | { |
204 | struct inet_frag_queue *qp; | 204 | struct inet_frag_queue *qp; |
205 | #ifdef CONFIG_SMP | 205 | #ifdef CONFIG_SMP |
206 | struct hlist_node *n; | ||
207 | #endif | 206 | #endif |
208 | unsigned int hash; | 207 | unsigned int hash; |
209 | 208 | ||
@@ -219,7 +218,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, | |||
219 | * such entry could be created on other cpu, while we | 218 | * such entry could be created on other cpu, while we |
220 | * promoted read lock to write lock. | 219 | * promoted read lock to write lock. |
221 | */ | 220 | */ |
222 | hlist_for_each_entry(qp, n, &f->hash[hash], list) { | 221 | hlist_for_each_entry(qp, &f->hash[hash], list) { |
223 | if (qp->net == nf && f->match(qp, arg)) { | 222 | if (qp->net == nf && f->match(qp, arg)) { |
224 | atomic_inc(&qp->refcnt); | 223 | atomic_inc(&qp->refcnt); |
225 | write_unlock(&f->lock); | 224 | write_unlock(&f->lock); |
@@ -278,9 +277,8 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
278 | __releases(&f->lock) | 277 | __releases(&f->lock) |
279 | { | 278 | { |
280 | struct inet_frag_queue *q; | 279 | struct inet_frag_queue *q; |
281 | struct hlist_node *n; | ||
282 | 280 | ||
283 | hlist_for_each_entry(q, n, &f->hash[hash], list) { | 281 | hlist_for_each_entry(q, &f->hash[hash], list) { |
284 | if (q->net == nf && f->match(q, key)) { | 282 | if (q->net == nf && f->match(q, key)) { |
285 | atomic_inc(&q->refcnt); | 283 | atomic_inc(&q->refcnt); |
286 | read_unlock(&f->lock); | 284 | read_unlock(&f->lock); |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 0ce0595d9861..6af375afeeef 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -120,13 +120,12 @@ int __inet_inherit_port(struct sock *sk, struct sock *child) | |||
120 | * that the listener socket's icsk_bind_hash is the same | 120 | * that the listener socket's icsk_bind_hash is the same |
121 | * as that of the child socket. We have to look up or | 121 | * as that of the child socket. We have to look up or |
122 | * create a new bind bucket for the child here. */ | 122 | * create a new bind bucket for the child here. */ |
123 | struct hlist_node *node; | 123 | inet_bind_bucket_for_each(tb, &head->chain) { |
124 | inet_bind_bucket_for_each(tb, node, &head->chain) { | ||
125 | if (net_eq(ib_net(tb), sock_net(sk)) && | 124 | if (net_eq(ib_net(tb), sock_net(sk)) && |
126 | tb->port == port) | 125 | tb->port == port) |
127 | break; | 126 | break; |
128 | } | 127 | } |
129 | if (!node) { | 128 | if (!tb) { |
130 | tb = inet_bind_bucket_create(table->bind_bucket_cachep, | 129 | tb = inet_bind_bucket_create(table->bind_bucket_cachep, |
131 | sock_net(sk), head, port); | 130 | sock_net(sk), head, port); |
132 | if (!tb) { | 131 | if (!tb) { |
@@ -493,7 +492,6 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
493 | int i, remaining, low, high, port; | 492 | int i, remaining, low, high, port; |
494 | static u32 hint; | 493 | static u32 hint; |
495 | u32 offset = hint + port_offset; | 494 | u32 offset = hint + port_offset; |
496 | struct hlist_node *node; | ||
497 | struct inet_timewait_sock *tw = NULL; | 495 | struct inet_timewait_sock *tw = NULL; |
498 | 496 | ||
499 | inet_get_local_port_range(&low, &high); | 497 | inet_get_local_port_range(&low, &high); |
@@ -512,7 +510,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
512 | * because the established check is already | 510 | * because the established check is already |
513 | * unique enough. | 511 | * unique enough. |
514 | */ | 512 | */ |
515 | inet_bind_bucket_for_each(tb, node, &head->chain) { | 513 | inet_bind_bucket_for_each(tb, &head->chain) { |
516 | if (net_eq(ib_net(tb), net) && | 514 | if (net_eq(ib_net(tb), net) && |
517 | tb->port == port) { | 515 | tb->port == port) { |
518 | if (tb->fastreuse >= 0 || | 516 | if (tb->fastreuse >= 0 || |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 2784db3155fb..1f27c9f4afd0 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -216,7 +216,6 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr, | |||
216 | const int slot) | 216 | const int slot) |
217 | { | 217 | { |
218 | struct inet_timewait_sock *tw; | 218 | struct inet_timewait_sock *tw; |
219 | struct hlist_node *node; | ||
220 | unsigned int killed; | 219 | unsigned int killed; |
221 | int ret; | 220 | int ret; |
222 | 221 | ||
@@ -229,7 +228,7 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr, | |||
229 | killed = 0; | 228 | killed = 0; |
230 | ret = 0; | 229 | ret = 0; |
231 | rescan: | 230 | rescan: |
232 | inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { | 231 | inet_twsk_for_each_inmate(tw, &twdr->cells[slot]) { |
233 | __inet_twsk_del_dead_node(tw); | 232 | __inet_twsk_del_dead_node(tw); |
234 | spin_unlock(&twdr->death_lock); | 233 | spin_unlock(&twdr->death_lock); |
235 | __inet_twsk_kill(tw, twdr->hashinfo); | 234 | __inet_twsk_kill(tw, twdr->hashinfo); |
@@ -438,10 +437,10 @@ void inet_twdr_twcal_tick(unsigned long data) | |||
438 | 437 | ||
439 | for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { | 438 | for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { |
440 | if (time_before_eq(j, now)) { | 439 | if (time_before_eq(j, now)) { |
441 | struct hlist_node *node, *safe; | 440 | struct hlist_node *safe; |
442 | struct inet_timewait_sock *tw; | 441 | struct inet_timewait_sock *tw; |
443 | 442 | ||
444 | inet_twsk_for_each_inmate_safe(tw, node, safe, | 443 | inet_twsk_for_each_inmate_safe(tw, safe, |
445 | &twdr->twcal_row[slot]) { | 444 | &twdr->twcal_row[slot]) { |
446 | __inet_twsk_del_dead_node(tw); | 445 | __inet_twsk_del_dead_node(tw); |
447 | __inet_twsk_kill(tw, twdr->hashinfo); | 446 | __inet_twsk_kill(tw, twdr->hashinfo); |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 53ddebc292b6..dd44e0ab600c 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -111,9 +111,7 @@ EXPORT_SYMBOL_GPL(raw_unhash_sk); | |||
111 | static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, | 111 | static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, |
112 | unsigned short num, __be32 raddr, __be32 laddr, int dif) | 112 | unsigned short num, __be32 raddr, __be32 laddr, int dif) |
113 | { | 113 | { |
114 | struct hlist_node *node; | 114 | sk_for_each_from(sk) { |
115 | |||
116 | sk_for_each_from(sk, node) { | ||
117 | struct inet_sock *inet = inet_sk(sk); | 115 | struct inet_sock *inet = inet_sk(sk); |
118 | 116 | ||
119 | if (net_eq(sock_net(sk), net) && inet->inet_num == num && | 117 | if (net_eq(sock_net(sk), net) && inet->inet_num == num && |
@@ -914,9 +912,7 @@ static struct sock *raw_get_first(struct seq_file *seq) | |||
914 | 912 | ||
915 | for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; | 913 | for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; |
916 | ++state->bucket) { | 914 | ++state->bucket) { |
917 | struct hlist_node *node; | 915 | sk_for_each(sk, &state->h->ht[state->bucket]) |
918 | |||
919 | sk_for_each(sk, node, &state->h->ht[state->bucket]) | ||
920 | if (sock_net(sk) == seq_file_net(seq)) | 916 | if (sock_net(sk) == seq_file_net(seq)) |
921 | goto found; | 917 | goto found; |
922 | } | 918 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 145d3bf8df86..4a8ec457310f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -954,7 +954,6 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, | |||
954 | { | 954 | { |
955 | struct tcp_sock *tp = tcp_sk(sk); | 955 | struct tcp_sock *tp = tcp_sk(sk); |
956 | struct tcp_md5sig_key *key; | 956 | struct tcp_md5sig_key *key; |
957 | struct hlist_node *pos; | ||
958 | unsigned int size = sizeof(struct in_addr); | 957 | unsigned int size = sizeof(struct in_addr); |
959 | struct tcp_md5sig_info *md5sig; | 958 | struct tcp_md5sig_info *md5sig; |
960 | 959 | ||
@@ -968,7 +967,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, | |||
968 | if (family == AF_INET6) | 967 | if (family == AF_INET6) |
969 | size = sizeof(struct in6_addr); | 968 | size = sizeof(struct in6_addr); |
970 | #endif | 969 | #endif |
971 | hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) { | 970 | hlist_for_each_entry_rcu(key, &md5sig->head, node) { |
972 | if (key->family != family) | 971 | if (key->family != family) |
973 | continue; | 972 | continue; |
974 | if (!memcmp(&key->addr, addr, size)) | 973 | if (!memcmp(&key->addr, addr, size)) |
@@ -1069,14 +1068,14 @@ static void tcp_clear_md5_list(struct sock *sk) | |||
1069 | { | 1068 | { |
1070 | struct tcp_sock *tp = tcp_sk(sk); | 1069 | struct tcp_sock *tp = tcp_sk(sk); |
1071 | struct tcp_md5sig_key *key; | 1070 | struct tcp_md5sig_key *key; |
1072 | struct hlist_node *pos, *n; | 1071 | struct hlist_node *n; |
1073 | struct tcp_md5sig_info *md5sig; | 1072 | struct tcp_md5sig_info *md5sig; |
1074 | 1073 | ||
1075 | md5sig = rcu_dereference_protected(tp->md5sig_info, 1); | 1074 | md5sig = rcu_dereference_protected(tp->md5sig_info, 1); |
1076 | 1075 | ||
1077 | if (!hlist_empty(&md5sig->head)) | 1076 | if (!hlist_empty(&md5sig->head)) |
1078 | tcp_free_md5sig_pool(); | 1077 | tcp_free_md5sig_pool(); |
1079 | hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) { | 1078 | hlist_for_each_entry_safe(key, n, &md5sig->head, node) { |
1080 | hlist_del_rcu(&key->node); | 1079 | hlist_del_rcu(&key->node); |
1081 | atomic_sub(sizeof(*key), &sk->sk_omem_alloc); | 1080 | atomic_sub(sizeof(*key), &sk->sk_omem_alloc); |
1082 | kfree_rcu(key, rcu); | 1081 | kfree_rcu(key, rcu); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 4dc0d44a5d31..f2c7e615f902 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1419,11 +1419,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, | |||
1419 | struct net_device *dev, int strict) | 1419 | struct net_device *dev, int strict) |
1420 | { | 1420 | { |
1421 | struct inet6_ifaddr *ifp; | 1421 | struct inet6_ifaddr *ifp; |
1422 | struct hlist_node *node; | ||
1423 | unsigned int hash = inet6_addr_hash(addr); | 1422 | unsigned int hash = inet6_addr_hash(addr); |
1424 | 1423 | ||
1425 | rcu_read_lock_bh(); | 1424 | rcu_read_lock_bh(); |
1426 | hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { | 1425 | hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) { |
1427 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1426 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1428 | continue; | 1427 | continue; |
1429 | if (ipv6_addr_equal(&ifp->addr, addr) && | 1428 | if (ipv6_addr_equal(&ifp->addr, addr) && |
@@ -1445,9 +1444,8 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | |||
1445 | { | 1444 | { |
1446 | unsigned int hash = inet6_addr_hash(addr); | 1445 | unsigned int hash = inet6_addr_hash(addr); |
1447 | struct inet6_ifaddr *ifp; | 1446 | struct inet6_ifaddr *ifp; |
1448 | struct hlist_node *node; | ||
1449 | 1447 | ||
1450 | hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) { | 1448 | hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) { |
1451 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1449 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1452 | continue; | 1450 | continue; |
1453 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1451 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
@@ -1487,10 +1485,9 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add | |||
1487 | { | 1485 | { |
1488 | struct inet6_ifaddr *ifp, *result = NULL; | 1486 | struct inet6_ifaddr *ifp, *result = NULL; |
1489 | unsigned int hash = inet6_addr_hash(addr); | 1487 | unsigned int hash = inet6_addr_hash(addr); |
1490 | struct hlist_node *node; | ||
1491 | 1488 | ||
1492 | rcu_read_lock_bh(); | 1489 | rcu_read_lock_bh(); |
1493 | hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) { | 1490 | hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) { |
1494 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1491 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1495 | continue; | 1492 | continue; |
1496 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1493 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
@@ -2907,11 +2904,10 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2907 | /* Step 2: clear hash table */ | 2904 | /* Step 2: clear hash table */ |
2908 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { | 2905 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { |
2909 | struct hlist_head *h = &inet6_addr_lst[i]; | 2906 | struct hlist_head *h = &inet6_addr_lst[i]; |
2910 | struct hlist_node *n; | ||
2911 | 2907 | ||
2912 | spin_lock_bh(&addrconf_hash_lock); | 2908 | spin_lock_bh(&addrconf_hash_lock); |
2913 | restart: | 2909 | restart: |
2914 | hlist_for_each_entry_rcu(ifa, n, h, addr_lst) { | 2910 | hlist_for_each_entry_rcu(ifa, h, addr_lst) { |
2915 | if (ifa->idev == idev) { | 2911 | if (ifa->idev == idev) { |
2916 | hlist_del_init_rcu(&ifa->addr_lst); | 2912 | hlist_del_init_rcu(&ifa->addr_lst); |
2917 | addrconf_del_timer(ifa); | 2913 | addrconf_del_timer(ifa); |
@@ -3218,8 +3214,7 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos) | |||
3218 | } | 3214 | } |
3219 | 3215 | ||
3220 | for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { | 3216 | for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { |
3221 | struct hlist_node *n; | 3217 | hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket], |
3222 | hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket], | ||
3223 | addr_lst) { | 3218 | addr_lst) { |
3224 | if (!net_eq(dev_net(ifa->idev->dev), net)) | 3219 | if (!net_eq(dev_net(ifa->idev->dev), net)) |
3225 | continue; | 3220 | continue; |
@@ -3244,9 +3239,8 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, | |||
3244 | { | 3239 | { |
3245 | struct if6_iter_state *state = seq->private; | 3240 | struct if6_iter_state *state = seq->private; |
3246 | struct net *net = seq_file_net(seq); | 3241 | struct net *net = seq_file_net(seq); |
3247 | struct hlist_node *n = &ifa->addr_lst; | ||
3248 | 3242 | ||
3249 | hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) { | 3243 | hlist_for_each_entry_continue_rcu_bh(ifa, addr_lst) { |
3250 | if (!net_eq(dev_net(ifa->idev->dev), net)) | 3244 | if (!net_eq(dev_net(ifa->idev->dev), net)) |
3251 | continue; | 3245 | continue; |
3252 | state->offset++; | 3246 | state->offset++; |
@@ -3255,7 +3249,7 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, | |||
3255 | 3249 | ||
3256 | while (++state->bucket < IN6_ADDR_HSIZE) { | 3250 | while (++state->bucket < IN6_ADDR_HSIZE) { |
3257 | state->offset = 0; | 3251 | state->offset = 0; |
3258 | hlist_for_each_entry_rcu_bh(ifa, n, | 3252 | hlist_for_each_entry_rcu_bh(ifa, |
3259 | &inet6_addr_lst[state->bucket], addr_lst) { | 3253 | &inet6_addr_lst[state->bucket], addr_lst) { |
3260 | if (!net_eq(dev_net(ifa->idev->dev), net)) | 3254 | if (!net_eq(dev_net(ifa->idev->dev), net)) |
3261 | continue; | 3255 | continue; |
@@ -3357,11 +3351,10 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr) | |||
3357 | { | 3351 | { |
3358 | int ret = 0; | 3352 | int ret = 0; |
3359 | struct inet6_ifaddr *ifp = NULL; | 3353 | struct inet6_ifaddr *ifp = NULL; |
3360 | struct hlist_node *n; | ||
3361 | unsigned int hash = inet6_addr_hash(addr); | 3354 | unsigned int hash = inet6_addr_hash(addr); |
3362 | 3355 | ||
3363 | rcu_read_lock_bh(); | 3356 | rcu_read_lock_bh(); |
3364 | hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) { | 3357 | hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) { |
3365 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 3358 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
3366 | continue; | 3359 | continue; |
3367 | if (ipv6_addr_equal(&ifp->addr, addr) && | 3360 | if (ipv6_addr_equal(&ifp->addr, addr) && |
@@ -3383,7 +3376,6 @@ static void addrconf_verify(unsigned long foo) | |||
3383 | { | 3376 | { |
3384 | unsigned long now, next, next_sec, next_sched; | 3377 | unsigned long now, next, next_sec, next_sched; |
3385 | struct inet6_ifaddr *ifp; | 3378 | struct inet6_ifaddr *ifp; |
3386 | struct hlist_node *node; | ||
3387 | int i; | 3379 | int i; |
3388 | 3380 | ||
3389 | rcu_read_lock_bh(); | 3381 | rcu_read_lock_bh(); |
@@ -3395,7 +3387,7 @@ static void addrconf_verify(unsigned long foo) | |||
3395 | 3387 | ||
3396 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { | 3388 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { |
3397 | restart: | 3389 | restart: |
3398 | hlist_for_each_entry_rcu_bh(ifp, node, | 3390 | hlist_for_each_entry_rcu_bh(ifp, |
3399 | &inet6_addr_lst[i], addr_lst) { | 3391 | &inet6_addr_lst[i], addr_lst) { |
3400 | unsigned long age; | 3392 | unsigned long age; |
3401 | 3393 | ||
@@ -3866,7 +3858,6 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, | |||
3866 | struct net_device *dev; | 3858 | struct net_device *dev; |
3867 | struct inet6_dev *idev; | 3859 | struct inet6_dev *idev; |
3868 | struct hlist_head *head; | 3860 | struct hlist_head *head; |
3869 | struct hlist_node *node; | ||
3870 | 3861 | ||
3871 | s_h = cb->args[0]; | 3862 | s_h = cb->args[0]; |
3872 | s_idx = idx = cb->args[1]; | 3863 | s_idx = idx = cb->args[1]; |
@@ -3876,7 +3867,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, | |||
3876 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { | 3867 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
3877 | idx = 0; | 3868 | idx = 0; |
3878 | head = &net->dev_index_head[h]; | 3869 | head = &net->dev_index_head[h]; |
3879 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 3870 | hlist_for_each_entry_rcu(dev, head, index_hlist) { |
3880 | if (idx < s_idx) | 3871 | if (idx < s_idx) |
3881 | goto cont; | 3872 | goto cont; |
3882 | if (h > s_h || idx > s_idx) | 3873 | if (h > s_h || idx > s_idx) |
@@ -4222,7 +4213,6 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
4222 | struct net_device *dev; | 4213 | struct net_device *dev; |
4223 | struct inet6_dev *idev; | 4214 | struct inet6_dev *idev; |
4224 | struct hlist_head *head; | 4215 | struct hlist_head *head; |
4225 | struct hlist_node *node; | ||
4226 | 4216 | ||
4227 | s_h = cb->args[0]; | 4217 | s_h = cb->args[0]; |
4228 | s_idx = cb->args[1]; | 4218 | s_idx = cb->args[1]; |
@@ -4231,7 +4221,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
4231 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { | 4221 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
4232 | idx = 0; | 4222 | idx = 0; |
4233 | head = &net->dev_index_head[h]; | 4223 | head = &net->dev_index_head[h]; |
4234 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 4224 | hlist_for_each_entry_rcu(dev, head, index_hlist) { |
4235 | if (idx < s_idx) | 4225 | if (idx < s_idx) |
4236 | goto cont; | 4226 | goto cont; |
4237 | idev = __in6_dev_get(dev); | 4227 | idev = __in6_dev_get(dev); |
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index ff76eecfd622..aad64352cb60 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c | |||
@@ -173,9 +173,8 @@ static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net, | |||
173 | const struct in6_addr *addr, | 173 | const struct in6_addr *addr, |
174 | int type, int ifindex) | 174 | int type, int ifindex) |
175 | { | 175 | { |
176 | struct hlist_node *pos; | ||
177 | struct ip6addrlbl_entry *p; | 176 | struct ip6addrlbl_entry *p; |
178 | hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { | 177 | hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) { |
179 | if (__ip6addrlbl_match(net, p, addr, type, ifindex)) | 178 | if (__ip6addrlbl_match(net, p, addr, type, ifindex)) |
180 | return p; | 179 | return p; |
181 | } | 180 | } |
@@ -261,9 +260,9 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace) | |||
261 | if (hlist_empty(&ip6addrlbl_table.head)) { | 260 | if (hlist_empty(&ip6addrlbl_table.head)) { |
262 | hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head); | 261 | hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head); |
263 | } else { | 262 | } else { |
264 | struct hlist_node *pos, *n; | 263 | struct hlist_node *n; |
265 | struct ip6addrlbl_entry *p = NULL; | 264 | struct ip6addrlbl_entry *p = NULL; |
266 | hlist_for_each_entry_safe(p, pos, n, | 265 | hlist_for_each_entry_safe(p, n, |
267 | &ip6addrlbl_table.head, list) { | 266 | &ip6addrlbl_table.head, list) { |
268 | if (p->prefixlen == newp->prefixlen && | 267 | if (p->prefixlen == newp->prefixlen && |
269 | net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) && | 268 | net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) && |
@@ -319,13 +318,13 @@ static int __ip6addrlbl_del(struct net *net, | |||
319 | int ifindex) | 318 | int ifindex) |
320 | { | 319 | { |
321 | struct ip6addrlbl_entry *p = NULL; | 320 | struct ip6addrlbl_entry *p = NULL; |
322 | struct hlist_node *pos, *n; | 321 | struct hlist_node *n; |
323 | int ret = -ESRCH; | 322 | int ret = -ESRCH; |
324 | 323 | ||
325 | ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", | 324 | ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", |
326 | __func__, prefix, prefixlen, ifindex); | 325 | __func__, prefix, prefixlen, ifindex); |
327 | 326 | ||
328 | hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { | 327 | hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) { |
329 | if (p->prefixlen == prefixlen && | 328 | if (p->prefixlen == prefixlen && |
330 | net_eq(ip6addrlbl_net(p), net) && | 329 | net_eq(ip6addrlbl_net(p), net) && |
331 | p->ifindex == ifindex && | 330 | p->ifindex == ifindex && |
@@ -380,11 +379,11 @@ static int __net_init ip6addrlbl_net_init(struct net *net) | |||
380 | static void __net_exit ip6addrlbl_net_exit(struct net *net) | 379 | static void __net_exit ip6addrlbl_net_exit(struct net *net) |
381 | { | 380 | { |
382 | struct ip6addrlbl_entry *p = NULL; | 381 | struct ip6addrlbl_entry *p = NULL; |
383 | struct hlist_node *pos, *n; | 382 | struct hlist_node *n; |
384 | 383 | ||
385 | /* Remove all labels belonging to the exiting net */ | 384 | /* Remove all labels belonging to the exiting net */ |
386 | spin_lock(&ip6addrlbl_table.lock); | 385 | spin_lock(&ip6addrlbl_table.lock); |
387 | hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { | 386 | hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) { |
388 | if (net_eq(ip6addrlbl_net(p), net)) { | 387 | if (net_eq(ip6addrlbl_net(p), net)) { |
389 | hlist_del_rcu(&p->list); | 388 | hlist_del_rcu(&p->list); |
390 | ip6addrlbl_put(p); | 389 | ip6addrlbl_put(p); |
@@ -505,12 +504,11 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
505 | { | 504 | { |
506 | struct net *net = sock_net(skb->sk); | 505 | struct net *net = sock_net(skb->sk); |
507 | struct ip6addrlbl_entry *p; | 506 | struct ip6addrlbl_entry *p; |
508 | struct hlist_node *pos; | ||
509 | int idx = 0, s_idx = cb->args[0]; | 507 | int idx = 0, s_idx = cb->args[0]; |
510 | int err; | 508 | int err; |
511 | 509 | ||
512 | rcu_read_lock(); | 510 | rcu_read_lock(); |
513 | hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { | 511 | hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) { |
514 | if (idx >= s_idx && | 512 | if (idx >= s_idx && |
515 | net_eq(ip6addrlbl_net(p), net)) { | 513 | net_eq(ip6addrlbl_net(p), net)) { |
516 | if ((err = ip6addrlbl_fill(skb, p, | 514 | if ((err = ip6addrlbl_fill(skb, p, |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index b386a2ce4c6f..9bfab19ff3c0 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -31,7 +31,6 @@ int inet6_csk_bind_conflict(const struct sock *sk, | |||
31 | const struct inet_bind_bucket *tb, bool relax) | 31 | const struct inet_bind_bucket *tb, bool relax) |
32 | { | 32 | { |
33 | const struct sock *sk2; | 33 | const struct sock *sk2; |
34 | const struct hlist_node *node; | ||
35 | int reuse = sk->sk_reuse; | 34 | int reuse = sk->sk_reuse; |
36 | int reuseport = sk->sk_reuseport; | 35 | int reuseport = sk->sk_reuseport; |
37 | kuid_t uid = sock_i_uid((struct sock *)sk); | 36 | kuid_t uid = sock_i_uid((struct sock *)sk); |
@@ -41,7 +40,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, | |||
41 | * See comment in inet_csk_bind_conflict about sock lookup | 40 | * See comment in inet_csk_bind_conflict about sock lookup |
42 | * vs net namespaces issues. | 41 | * vs net namespaces issues. |
43 | */ | 42 | */ |
44 | sk_for_each_bound(sk2, node, &tb->owners) { | 43 | sk_for_each_bound(sk2, &tb->owners) { |
45 | if (sk != sk2 && | 44 | if (sk != sk2 && |
46 | (!sk->sk_bound_dev_if || | 45 | (!sk->sk_bound_dev_if || |
47 | !sk2->sk_bound_dev_if || | 46 | !sk2->sk_bound_dev_if || |
@@ -58,7 +57,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, | |||
58 | } | 57 | } |
59 | } | 58 | } |
60 | 59 | ||
61 | return node != NULL; | 60 | return sk2 != NULL; |
62 | } | 61 | } |
63 | 62 | ||
64 | EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); | 63 | EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 710cafd2e1a9..192dd1a0e188 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -224,7 +224,6 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id) | |||
224 | { | 224 | { |
225 | struct fib6_table *tb; | 225 | struct fib6_table *tb; |
226 | struct hlist_head *head; | 226 | struct hlist_head *head; |
227 | struct hlist_node *node; | ||
228 | unsigned int h; | 227 | unsigned int h; |
229 | 228 | ||
230 | if (id == 0) | 229 | if (id == 0) |
@@ -232,7 +231,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id) | |||
232 | h = id & (FIB6_TABLE_HASHSZ - 1); | 231 | h = id & (FIB6_TABLE_HASHSZ - 1); |
233 | rcu_read_lock(); | 232 | rcu_read_lock(); |
234 | head = &net->ipv6.fib_table_hash[h]; | 233 | head = &net->ipv6.fib_table_hash[h]; |
235 | hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { | 234 | hlist_for_each_entry_rcu(tb, head, tb6_hlist) { |
236 | if (tb->tb6_id == id) { | 235 | if (tb->tb6_id == id) { |
237 | rcu_read_unlock(); | 236 | rcu_read_unlock(); |
238 | return tb; | 237 | return tb; |
@@ -363,7 +362,6 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
363 | struct rt6_rtnl_dump_arg arg; | 362 | struct rt6_rtnl_dump_arg arg; |
364 | struct fib6_walker_t *w; | 363 | struct fib6_walker_t *w; |
365 | struct fib6_table *tb; | 364 | struct fib6_table *tb; |
366 | struct hlist_node *node; | ||
367 | struct hlist_head *head; | 365 | struct hlist_head *head; |
368 | int res = 0; | 366 | int res = 0; |
369 | 367 | ||
@@ -398,7 +396,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
398 | for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { | 396 | for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { |
399 | e = 0; | 397 | e = 0; |
400 | head = &net->ipv6.fib_table_hash[h]; | 398 | head = &net->ipv6.fib_table_hash[h]; |
401 | hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { | 399 | hlist_for_each_entry_rcu(tb, head, tb6_hlist) { |
402 | if (e < s_e) | 400 | if (e < s_e) |
403 | goto next; | 401 | goto next; |
404 | res = fib6_dump_table(tb, skb, cb); | 402 | res = fib6_dump_table(tb, skb, cb); |
@@ -1520,14 +1518,13 @@ void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg | |||
1520 | int prune, void *arg) | 1518 | int prune, void *arg) |
1521 | { | 1519 | { |
1522 | struct fib6_table *table; | 1520 | struct fib6_table *table; |
1523 | struct hlist_node *node; | ||
1524 | struct hlist_head *head; | 1521 | struct hlist_head *head; |
1525 | unsigned int h; | 1522 | unsigned int h; |
1526 | 1523 | ||
1527 | rcu_read_lock(); | 1524 | rcu_read_lock(); |
1528 | for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { | 1525 | for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { |
1529 | head = &net->ipv6.fib_table_hash[h]; | 1526 | head = &net->ipv6.fib_table_hash[h]; |
1530 | hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { | 1527 | hlist_for_each_entry_rcu(table, head, tb6_hlist) { |
1531 | read_lock_bh(&table->tb6_lock); | 1528 | read_lock_bh(&table->tb6_lock); |
1532 | fib6_clean_tree(net, &table->tb6_root, | 1529 | fib6_clean_tree(net, &table->tb6_root, |
1533 | func, prune, arg); | 1530 | func, prune, arg); |
@@ -1540,14 +1537,13 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), | |||
1540 | int prune, void *arg) | 1537 | int prune, void *arg) |
1541 | { | 1538 | { |
1542 | struct fib6_table *table; | 1539 | struct fib6_table *table; |
1543 | struct hlist_node *node; | ||
1544 | struct hlist_head *head; | 1540 | struct hlist_head *head; |
1545 | unsigned int h; | 1541 | unsigned int h; |
1546 | 1542 | ||
1547 | rcu_read_lock(); | 1543 | rcu_read_lock(); |
1548 | for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { | 1544 | for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { |
1549 | head = &net->ipv6.fib_table_hash[h]; | 1545 | head = &net->ipv6.fib_table_hash[h]; |
1550 | hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { | 1546 | hlist_for_each_entry_rcu(table, head, tb6_hlist) { |
1551 | write_lock_bh(&table->tb6_lock); | 1547 | write_lock_bh(&table->tb6_lock); |
1552 | fib6_clean_tree(net, &table->tb6_root, | 1548 | fib6_clean_tree(net, &table->tb6_root, |
1553 | func, prune, arg); | 1549 | func, prune, arg); |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index c65907db8c44..330b5e7b7df6 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -71,10 +71,9 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, | |||
71 | unsigned short num, const struct in6_addr *loc_addr, | 71 | unsigned short num, const struct in6_addr *loc_addr, |
72 | const struct in6_addr *rmt_addr, int dif) | 72 | const struct in6_addr *rmt_addr, int dif) |
73 | { | 73 | { |
74 | struct hlist_node *node; | ||
75 | bool is_multicast = ipv6_addr_is_multicast(loc_addr); | 74 | bool is_multicast = ipv6_addr_is_multicast(loc_addr); |
76 | 75 | ||
77 | sk_for_each_from(sk, node) | 76 | sk_for_each_from(sk) |
78 | if (inet_sk(sk)->inet_num == num) { | 77 | if (inet_sk(sk)->inet_num == num) { |
79 | struct ipv6_pinfo *np = inet6_sk(sk); | 78 | struct ipv6_pinfo *np = inet6_sk(sk); |
80 | 79 | ||
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 6cc48012b730..de2bcfaaf759 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c | |||
@@ -89,9 +89,8 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const | |||
89 | { | 89 | { |
90 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); | 90 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); |
91 | struct xfrm6_tunnel_spi *x6spi; | 91 | struct xfrm6_tunnel_spi *x6spi; |
92 | struct hlist_node *pos; | ||
93 | 92 | ||
94 | hlist_for_each_entry_rcu(x6spi, pos, | 93 | hlist_for_each_entry_rcu(x6spi, |
95 | &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], | 94 | &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], |
96 | list_byaddr) { | 95 | list_byaddr) { |
97 | if (xfrm6_addr_equal(&x6spi->addr, saddr)) | 96 | if (xfrm6_addr_equal(&x6spi->addr, saddr)) |
@@ -120,9 +119,8 @@ static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi) | |||
120 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); | 119 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); |
121 | struct xfrm6_tunnel_spi *x6spi; | 120 | struct xfrm6_tunnel_spi *x6spi; |
122 | int index = xfrm6_tunnel_spi_hash_byspi(spi); | 121 | int index = xfrm6_tunnel_spi_hash_byspi(spi); |
123 | struct hlist_node *pos; | ||
124 | 122 | ||
125 | hlist_for_each_entry(x6spi, pos, | 123 | hlist_for_each_entry(x6spi, |
126 | &xfrm6_tn->spi_byspi[index], | 124 | &xfrm6_tn->spi_byspi[index], |
127 | list_byspi) { | 125 | list_byspi) { |
128 | if (x6spi->spi == spi) | 126 | if (x6spi->spi == spi) |
@@ -203,11 +201,11 @@ static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr) | |||
203 | { | 201 | { |
204 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); | 202 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); |
205 | struct xfrm6_tunnel_spi *x6spi; | 203 | struct xfrm6_tunnel_spi *x6spi; |
206 | struct hlist_node *pos, *n; | 204 | struct hlist_node *n; |
207 | 205 | ||
208 | spin_lock_bh(&xfrm6_tunnel_spi_lock); | 206 | spin_lock_bh(&xfrm6_tunnel_spi_lock); |
209 | 207 | ||
210 | hlist_for_each_entry_safe(x6spi, pos, n, | 208 | hlist_for_each_entry_safe(x6spi, n, |
211 | &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], | 209 | &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], |
212 | list_byaddr) | 210 | list_byaddr) |
213 | { | 211 | { |
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index dfd6faaf0ea7..f547a47d381c 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
@@ -228,9 +228,8 @@ static struct sock *__ipxitf_find_socket(struct ipx_interface *intrfc, | |||
228 | __be16 port) | 228 | __be16 port) |
229 | { | 229 | { |
230 | struct sock *s; | 230 | struct sock *s; |
231 | struct hlist_node *node; | ||
232 | 231 | ||
233 | sk_for_each(s, node, &intrfc->if_sklist) | 232 | sk_for_each(s, &intrfc->if_sklist) |
234 | if (ipx_sk(s)->port == port) | 233 | if (ipx_sk(s)->port == port) |
235 | goto found; | 234 | goto found; |
236 | s = NULL; | 235 | s = NULL; |
@@ -259,12 +258,11 @@ static struct sock *ipxitf_find_internal_socket(struct ipx_interface *intrfc, | |||
259 | __be16 port) | 258 | __be16 port) |
260 | { | 259 | { |
261 | struct sock *s; | 260 | struct sock *s; |
262 | struct hlist_node *node; | ||
263 | 261 | ||
264 | ipxitf_hold(intrfc); | 262 | ipxitf_hold(intrfc); |
265 | spin_lock_bh(&intrfc->if_sklist_lock); | 263 | spin_lock_bh(&intrfc->if_sklist_lock); |
266 | 264 | ||
267 | sk_for_each(s, node, &intrfc->if_sklist) { | 265 | sk_for_each(s, &intrfc->if_sklist) { |
268 | struct ipx_sock *ipxs = ipx_sk(s); | 266 | struct ipx_sock *ipxs = ipx_sk(s); |
269 | 267 | ||
270 | if (ipxs->port == port && | 268 | if (ipxs->port == port && |
@@ -282,14 +280,14 @@ found: | |||
282 | static void __ipxitf_down(struct ipx_interface *intrfc) | 280 | static void __ipxitf_down(struct ipx_interface *intrfc) |
283 | { | 281 | { |
284 | struct sock *s; | 282 | struct sock *s; |
285 | struct hlist_node *node, *t; | 283 | struct hlist_node *t; |
286 | 284 | ||
287 | /* Delete all routes associated with this interface */ | 285 | /* Delete all routes associated with this interface */ |
288 | ipxrtr_del_routes(intrfc); | 286 | ipxrtr_del_routes(intrfc); |
289 | 287 | ||
290 | spin_lock_bh(&intrfc->if_sklist_lock); | 288 | spin_lock_bh(&intrfc->if_sklist_lock); |
291 | /* error sockets */ | 289 | /* error sockets */ |
292 | sk_for_each_safe(s, node, t, &intrfc->if_sklist) { | 290 | sk_for_each_safe(s, t, &intrfc->if_sklist) { |
293 | struct ipx_sock *ipxs = ipx_sk(s); | 291 | struct ipx_sock *ipxs = ipx_sk(s); |
294 | 292 | ||
295 | s->sk_err = ENOLINK; | 293 | s->sk_err = ENOLINK; |
@@ -385,12 +383,11 @@ static int ipxitf_demux_socket(struct ipx_interface *intrfc, | |||
385 | int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node, | 383 | int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node, |
386 | IPX_NODE_LEN); | 384 | IPX_NODE_LEN); |
387 | struct sock *s; | 385 | struct sock *s; |
388 | struct hlist_node *node; | ||
389 | int rc; | 386 | int rc; |
390 | 387 | ||
391 | spin_lock_bh(&intrfc->if_sklist_lock); | 388 | spin_lock_bh(&intrfc->if_sklist_lock); |
392 | 389 | ||
393 | sk_for_each(s, node, &intrfc->if_sklist) { | 390 | sk_for_each(s, &intrfc->if_sklist) { |
394 | struct ipx_sock *ipxs = ipx_sk(s); | 391 | struct ipx_sock *ipxs = ipx_sk(s); |
395 | 392 | ||
396 | if (ipxs->port == ipx->ipx_dest.sock && | 393 | if (ipxs->port == ipx->ipx_dest.sock && |
@@ -446,12 +443,11 @@ static struct sock *ncp_connection_hack(struct ipx_interface *intrfc, | |||
446 | connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8); | 443 | connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8); |
447 | 444 | ||
448 | if (connection) { | 445 | if (connection) { |
449 | struct hlist_node *node; | ||
450 | /* Now we have to look for a special NCP connection handling | 446 | /* Now we have to look for a special NCP connection handling |
451 | * socket. Only these sockets have ipx_ncp_conn != 0, set by | 447 | * socket. Only these sockets have ipx_ncp_conn != 0, set by |
452 | * SIOCIPXNCPCONN. */ | 448 | * SIOCIPXNCPCONN. */ |
453 | spin_lock_bh(&intrfc->if_sklist_lock); | 449 | spin_lock_bh(&intrfc->if_sklist_lock); |
454 | sk_for_each(sk, node, &intrfc->if_sklist) | 450 | sk_for_each(sk, &intrfc->if_sklist) |
455 | if (ipx_sk(sk)->ipx_ncp_conn == connection) { | 451 | if (ipx_sk(sk)->ipx_ncp_conn == connection) { |
456 | sock_hold(sk); | 452 | sock_hold(sk); |
457 | goto found; | 453 | goto found; |
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c index 02ff7f2f60d4..65e8833a2510 100644 --- a/net/ipx/ipx_proc.c +++ b/net/ipx/ipx_proc.c | |||
@@ -103,19 +103,18 @@ out: | |||
103 | static __inline__ struct sock *ipx_get_socket_idx(loff_t pos) | 103 | static __inline__ struct sock *ipx_get_socket_idx(loff_t pos) |
104 | { | 104 | { |
105 | struct sock *s = NULL; | 105 | struct sock *s = NULL; |
106 | struct hlist_node *node; | ||
107 | struct ipx_interface *i; | 106 | struct ipx_interface *i; |
108 | 107 | ||
109 | list_for_each_entry(i, &ipx_interfaces, node) { | 108 | list_for_each_entry(i, &ipx_interfaces, node) { |
110 | spin_lock_bh(&i->if_sklist_lock); | 109 | spin_lock_bh(&i->if_sklist_lock); |
111 | sk_for_each(s, node, &i->if_sklist) { | 110 | sk_for_each(s, &i->if_sklist) { |
112 | if (!pos) | 111 | if (!pos) |
113 | break; | 112 | break; |
114 | --pos; | 113 | --pos; |
115 | } | 114 | } |
116 | spin_unlock_bh(&i->if_sklist_lock); | 115 | spin_unlock_bh(&i->if_sklist_lock); |
117 | if (!pos) { | 116 | if (!pos) { |
118 | if (node) | 117 | if (s) |
119 | goto found; | 118 | goto found; |
120 | break; | 119 | break; |
121 | } | 120 | } |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index cd6f7a991d80..a7d11ffe4284 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -156,14 +156,13 @@ static int afiucv_pm_freeze(struct device *dev) | |||
156 | { | 156 | { |
157 | struct iucv_sock *iucv; | 157 | struct iucv_sock *iucv; |
158 | struct sock *sk; | 158 | struct sock *sk; |
159 | struct hlist_node *node; | ||
160 | int err = 0; | 159 | int err = 0; |
161 | 160 | ||
162 | #ifdef CONFIG_PM_DEBUG | 161 | #ifdef CONFIG_PM_DEBUG |
163 | printk(KERN_WARNING "afiucv_pm_freeze\n"); | 162 | printk(KERN_WARNING "afiucv_pm_freeze\n"); |
164 | #endif | 163 | #endif |
165 | read_lock(&iucv_sk_list.lock); | 164 | read_lock(&iucv_sk_list.lock); |
166 | sk_for_each(sk, node, &iucv_sk_list.head) { | 165 | sk_for_each(sk, &iucv_sk_list.head) { |
167 | iucv = iucv_sk(sk); | 166 | iucv = iucv_sk(sk); |
168 | switch (sk->sk_state) { | 167 | switch (sk->sk_state) { |
169 | case IUCV_DISCONN: | 168 | case IUCV_DISCONN: |
@@ -194,13 +193,12 @@ static int afiucv_pm_freeze(struct device *dev) | |||
194 | static int afiucv_pm_restore_thaw(struct device *dev) | 193 | static int afiucv_pm_restore_thaw(struct device *dev) |
195 | { | 194 | { |
196 | struct sock *sk; | 195 | struct sock *sk; |
197 | struct hlist_node *node; | ||
198 | 196 | ||
199 | #ifdef CONFIG_PM_DEBUG | 197 | #ifdef CONFIG_PM_DEBUG |
200 | printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); | 198 | printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); |
201 | #endif | 199 | #endif |
202 | read_lock(&iucv_sk_list.lock); | 200 | read_lock(&iucv_sk_list.lock); |
203 | sk_for_each(sk, node, &iucv_sk_list.head) { | 201 | sk_for_each(sk, &iucv_sk_list.head) { |
204 | switch (sk->sk_state) { | 202 | switch (sk->sk_state) { |
205 | case IUCV_CONNECTED: | 203 | case IUCV_CONNECTED: |
206 | sk->sk_err = EPIPE; | 204 | sk->sk_err = EPIPE; |
@@ -390,9 +388,8 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | |||
390 | static struct sock *__iucv_get_sock_by_name(char *nm) | 388 | static struct sock *__iucv_get_sock_by_name(char *nm) |
391 | { | 389 | { |
392 | struct sock *sk; | 390 | struct sock *sk; |
393 | struct hlist_node *node; | ||
394 | 391 | ||
395 | sk_for_each(sk, node, &iucv_sk_list.head) | 392 | sk_for_each(sk, &iucv_sk_list.head) |
396 | if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) | 393 | if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) |
397 | return sk; | 394 | return sk; |
398 | 395 | ||
@@ -1678,7 +1675,6 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
1678 | unsigned char user_data[16]; | 1675 | unsigned char user_data[16]; |
1679 | unsigned char nuser_data[16]; | 1676 | unsigned char nuser_data[16]; |
1680 | unsigned char src_name[8]; | 1677 | unsigned char src_name[8]; |
1681 | struct hlist_node *node; | ||
1682 | struct sock *sk, *nsk; | 1678 | struct sock *sk, *nsk; |
1683 | struct iucv_sock *iucv, *niucv; | 1679 | struct iucv_sock *iucv, *niucv; |
1684 | int err; | 1680 | int err; |
@@ -1689,7 +1685,7 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
1689 | read_lock(&iucv_sk_list.lock); | 1685 | read_lock(&iucv_sk_list.lock); |
1690 | iucv = NULL; | 1686 | iucv = NULL; |
1691 | sk = NULL; | 1687 | sk = NULL; |
1692 | sk_for_each(sk, node, &iucv_sk_list.head) | 1688 | sk_for_each(sk, &iucv_sk_list.head) |
1693 | if (sk->sk_state == IUCV_LISTEN && | 1689 | if (sk->sk_state == IUCV_LISTEN && |
1694 | !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { | 1690 | !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { |
1695 | /* | 1691 | /* |
@@ -2115,7 +2111,6 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) | |||
2115 | static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | 2111 | static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, |
2116 | struct packet_type *pt, struct net_device *orig_dev) | 2112 | struct packet_type *pt, struct net_device *orig_dev) |
2117 | { | 2113 | { |
2118 | struct hlist_node *node; | ||
2119 | struct sock *sk; | 2114 | struct sock *sk; |
2120 | struct iucv_sock *iucv; | 2115 | struct iucv_sock *iucv; |
2121 | struct af_iucv_trans_hdr *trans_hdr; | 2116 | struct af_iucv_trans_hdr *trans_hdr; |
@@ -2132,7 +2127,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2132 | iucv = NULL; | 2127 | iucv = NULL; |
2133 | sk = NULL; | 2128 | sk = NULL; |
2134 | read_lock(&iucv_sk_list.lock); | 2129 | read_lock(&iucv_sk_list.lock); |
2135 | sk_for_each(sk, node, &iucv_sk_list.head) { | 2130 | sk_for_each(sk, &iucv_sk_list.head) { |
2136 | if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { | 2131 | if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { |
2137 | if ((!memcmp(&iucv_sk(sk)->src_name, | 2132 | if ((!memcmp(&iucv_sk(sk)->src_name, |
2138 | trans_hdr->destAppName, 8)) && | 2133 | trans_hdr->destAppName, 8)) && |
@@ -2225,10 +2220,9 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, | |||
2225 | struct sk_buff *list_skb; | 2220 | struct sk_buff *list_skb; |
2226 | struct sk_buff *nskb; | 2221 | struct sk_buff *nskb; |
2227 | unsigned long flags; | 2222 | unsigned long flags; |
2228 | struct hlist_node *node; | ||
2229 | 2223 | ||
2230 | read_lock_irqsave(&iucv_sk_list.lock, flags); | 2224 | read_lock_irqsave(&iucv_sk_list.lock, flags); |
2231 | sk_for_each(sk, node, &iucv_sk_list.head) | 2225 | sk_for_each(sk, &iucv_sk_list.head) |
2232 | if (sk == isk) { | 2226 | if (sk == isk) { |
2233 | iucv = iucv_sk(sk); | 2227 | iucv = iucv_sk(sk); |
2234 | break; | 2228 | break; |
@@ -2299,14 +2293,13 @@ static int afiucv_netdev_event(struct notifier_block *this, | |||
2299 | unsigned long event, void *ptr) | 2293 | unsigned long event, void *ptr) |
2300 | { | 2294 | { |
2301 | struct net_device *event_dev = (struct net_device *)ptr; | 2295 | struct net_device *event_dev = (struct net_device *)ptr; |
2302 | struct hlist_node *node; | ||
2303 | struct sock *sk; | 2296 | struct sock *sk; |
2304 | struct iucv_sock *iucv; | 2297 | struct iucv_sock *iucv; |
2305 | 2298 | ||
2306 | switch (event) { | 2299 | switch (event) { |
2307 | case NETDEV_REBOOT: | 2300 | case NETDEV_REBOOT: |
2308 | case NETDEV_GOING_DOWN: | 2301 | case NETDEV_GOING_DOWN: |
2309 | sk_for_each(sk, node, &iucv_sk_list.head) { | 2302 | sk_for_each(sk, &iucv_sk_list.head) { |
2310 | iucv = iucv_sk(sk); | 2303 | iucv = iucv_sk(sk); |
2311 | if ((iucv->hs_dev == event_dev) && | 2304 | if ((iucv->hs_dev == event_dev) && |
2312 | (sk->sk_state == IUCV_CONNECTED)) { | 2305 | (sk->sk_state == IUCV_CONNECTED)) { |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 9ef79851f297..556fdafdd1ea 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -225,7 +225,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
225 | { | 225 | { |
226 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); | 226 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); |
227 | struct sock *sk; | 227 | struct sock *sk; |
228 | struct hlist_node *node; | ||
229 | struct sk_buff *skb2 = NULL; | 228 | struct sk_buff *skb2 = NULL; |
230 | int err = -ESRCH; | 229 | int err = -ESRCH; |
231 | 230 | ||
@@ -236,7 +235,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
236 | return -ENOMEM; | 235 | return -ENOMEM; |
237 | 236 | ||
238 | rcu_read_lock(); | 237 | rcu_read_lock(); |
239 | sk_for_each_rcu(sk, node, &net_pfkey->table) { | 238 | sk_for_each_rcu(sk, &net_pfkey->table) { |
240 | struct pfkey_sock *pfk = pfkey_sk(sk); | 239 | struct pfkey_sock *pfk = pfkey_sk(sk); |
241 | int err2; | 240 | int err2; |
242 | 241 | ||
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index dcfd64e83ab7..d36875f3427e 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -221,10 +221,9 @@ static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) | |||
221 | struct hlist_head *session_list = | 221 | struct hlist_head *session_list = |
222 | l2tp_session_id_hash_2(pn, session_id); | 222 | l2tp_session_id_hash_2(pn, session_id); |
223 | struct l2tp_session *session; | 223 | struct l2tp_session *session; |
224 | struct hlist_node *walk; | ||
225 | 224 | ||
226 | rcu_read_lock_bh(); | 225 | rcu_read_lock_bh(); |
227 | hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) { | 226 | hlist_for_each_entry_rcu(session, session_list, global_hlist) { |
228 | if (session->session_id == session_id) { | 227 | if (session->session_id == session_id) { |
229 | rcu_read_unlock_bh(); | 228 | rcu_read_unlock_bh(); |
230 | return session; | 229 | return session; |
@@ -253,7 +252,6 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn | |||
253 | { | 252 | { |
254 | struct hlist_head *session_list; | 253 | struct hlist_head *session_list; |
255 | struct l2tp_session *session; | 254 | struct l2tp_session *session; |
256 | struct hlist_node *walk; | ||
257 | 255 | ||
258 | /* In L2TPv3, session_ids are unique over all tunnels and we | 256 | /* In L2TPv3, session_ids are unique over all tunnels and we |
259 | * sometimes need to look them up before we know the | 257 | * sometimes need to look them up before we know the |
@@ -264,7 +262,7 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn | |||
264 | 262 | ||
265 | session_list = l2tp_session_id_hash(tunnel, session_id); | 263 | session_list = l2tp_session_id_hash(tunnel, session_id); |
266 | read_lock_bh(&tunnel->hlist_lock); | 264 | read_lock_bh(&tunnel->hlist_lock); |
267 | hlist_for_each_entry(session, walk, session_list, hlist) { | 265 | hlist_for_each_entry(session, session_list, hlist) { |
268 | if (session->session_id == session_id) { | 266 | if (session->session_id == session_id) { |
269 | read_unlock_bh(&tunnel->hlist_lock); | 267 | read_unlock_bh(&tunnel->hlist_lock); |
270 | return session; | 268 | return session; |
@@ -279,13 +277,12 @@ EXPORT_SYMBOL_GPL(l2tp_session_find); | |||
279 | struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) | 277 | struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) |
280 | { | 278 | { |
281 | int hash; | 279 | int hash; |
282 | struct hlist_node *walk; | ||
283 | struct l2tp_session *session; | 280 | struct l2tp_session *session; |
284 | int count = 0; | 281 | int count = 0; |
285 | 282 | ||
286 | read_lock_bh(&tunnel->hlist_lock); | 283 | read_lock_bh(&tunnel->hlist_lock); |
287 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { | 284 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { |
288 | hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) { | 285 | hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { |
289 | if (++count > nth) { | 286 | if (++count > nth) { |
290 | read_unlock_bh(&tunnel->hlist_lock); | 287 | read_unlock_bh(&tunnel->hlist_lock); |
291 | return session; | 288 | return session; |
@@ -306,12 +303,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) | |||
306 | { | 303 | { |
307 | struct l2tp_net *pn = l2tp_pernet(net); | 304 | struct l2tp_net *pn = l2tp_pernet(net); |
308 | int hash; | 305 | int hash; |
309 | struct hlist_node *walk; | ||
310 | struct l2tp_session *session; | 306 | struct l2tp_session *session; |
311 | 307 | ||
312 | rcu_read_lock_bh(); | 308 | rcu_read_lock_bh(); |
313 | for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { | 309 | for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { |
314 | hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) { | 310 | hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { |
315 | if (!strcmp(session->ifname, ifname)) { | 311 | if (!strcmp(session->ifname, ifname)) { |
316 | rcu_read_unlock_bh(); | 312 | rcu_read_unlock_bh(); |
317 | return session; | 313 | return session; |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index f7ac8f42fee2..7f41b7051269 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -49,10 +49,9 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) | |||
49 | 49 | ||
50 | static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) | 50 | static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) |
51 | { | 51 | { |
52 | struct hlist_node *node; | ||
53 | struct sock *sk; | 52 | struct sock *sk; |
54 | 53 | ||
55 | sk_for_each_bound(sk, node, &l2tp_ip_bind_table) { | 54 | sk_for_each_bound(sk, &l2tp_ip_bind_table) { |
56 | struct inet_sock *inet = inet_sk(sk); | 55 | struct inet_sock *inet = inet_sk(sk); |
57 | struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); | 56 | struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); |
58 | 57 | ||
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 8ee4a86ae996..41f2f8126ebc 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -60,10 +60,9 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net, | |||
60 | struct in6_addr *laddr, | 60 | struct in6_addr *laddr, |
61 | int dif, u32 tunnel_id) | 61 | int dif, u32 tunnel_id) |
62 | { | 62 | { |
63 | struct hlist_node *node; | ||
64 | struct sock *sk; | 63 | struct sock *sk; |
65 | 64 | ||
66 | sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) { | 65 | sk_for_each_bound(sk, &l2tp_ip6_bind_table) { |
67 | struct in6_addr *addr = inet6_rcv_saddr(sk); | 66 | struct in6_addr *addr = inet6_rcv_saddr(sk); |
68 | struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); | 67 | struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); |
69 | 68 | ||
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 7c5073badc73..78be45cda5c1 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c | |||
@@ -393,12 +393,11 @@ static void llc_sap_mcast(struct llc_sap *sap, | |||
393 | { | 393 | { |
394 | int i = 0, count = 256 / sizeof(struct sock *); | 394 | int i = 0, count = 256 / sizeof(struct sock *); |
395 | struct sock *sk, *stack[count]; | 395 | struct sock *sk, *stack[count]; |
396 | struct hlist_node *node; | ||
397 | struct llc_sock *llc; | 396 | struct llc_sock *llc; |
398 | struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex); | 397 | struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex); |
399 | 398 | ||
400 | spin_lock_bh(&sap->sk_lock); | 399 | spin_lock_bh(&sap->sk_lock); |
401 | hlist_for_each_entry(llc, node, dev_hb, dev_hash_node) { | 400 | hlist_for_each_entry(llc, dev_hb, dev_hash_node) { |
402 | 401 | ||
403 | sk = &llc->sk; | 402 | sk = &llc->sk; |
404 | 403 | ||
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 6b3c4e119c63..dc7c8df40c2c 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -72,9 +72,9 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void) | |||
72 | * it's used twice. So it is illegal to do | 72 | * it's used twice. So it is illegal to do |
73 | * for_each_mesh_entry(rcu_dereference(...), ...) | 73 | * for_each_mesh_entry(rcu_dereference(...), ...) |
74 | */ | 74 | */ |
75 | #define for_each_mesh_entry(tbl, p, node, i) \ | 75 | #define for_each_mesh_entry(tbl, node, i) \ |
76 | for (i = 0; i <= tbl->hash_mask; i++) \ | 76 | for (i = 0; i <= tbl->hash_mask; i++) \ |
77 | hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) | 77 | hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list) |
78 | 78 | ||
79 | 79 | ||
80 | static struct mesh_table *mesh_table_alloc(int size_order) | 80 | static struct mesh_table *mesh_table_alloc(int size_order) |
@@ -139,7 +139,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | |||
139 | } | 139 | } |
140 | if (free_leafs) { | 140 | if (free_leafs) { |
141 | spin_lock_bh(&tbl->gates_lock); | 141 | spin_lock_bh(&tbl->gates_lock); |
142 | hlist_for_each_entry_safe(gate, p, q, | 142 | hlist_for_each_entry_safe(gate, q, |
143 | tbl->known_gates, list) { | 143 | tbl->known_gates, list) { |
144 | hlist_del(&gate->list); | 144 | hlist_del(&gate->list); |
145 | kfree(gate); | 145 | kfree(gate); |
@@ -333,12 +333,11 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, | |||
333 | struct ieee80211_sub_if_data *sdata) | 333 | struct ieee80211_sub_if_data *sdata) |
334 | { | 334 | { |
335 | struct mesh_path *mpath; | 335 | struct mesh_path *mpath; |
336 | struct hlist_node *n; | ||
337 | struct hlist_head *bucket; | 336 | struct hlist_head *bucket; |
338 | struct mpath_node *node; | 337 | struct mpath_node *node; |
339 | 338 | ||
340 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | 339 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
341 | hlist_for_each_entry_rcu(node, n, bucket, list) { | 340 | hlist_for_each_entry_rcu(node, bucket, list) { |
342 | mpath = node->mpath; | 341 | mpath = node->mpath; |
343 | if (mpath->sdata == sdata && | 342 | if (mpath->sdata == sdata && |
344 | ether_addr_equal(dst, mpath->dst)) { | 343 | ether_addr_equal(dst, mpath->dst)) { |
@@ -389,11 +388,10 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) | |||
389 | { | 388 | { |
390 | struct mesh_table *tbl = rcu_dereference(mesh_paths); | 389 | struct mesh_table *tbl = rcu_dereference(mesh_paths); |
391 | struct mpath_node *node; | 390 | struct mpath_node *node; |
392 | struct hlist_node *p; | ||
393 | int i; | 391 | int i; |
394 | int j = 0; | 392 | int j = 0; |
395 | 393 | ||
396 | for_each_mesh_entry(tbl, p, node, i) { | 394 | for_each_mesh_entry(tbl, node, i) { |
397 | if (sdata && node->mpath->sdata != sdata) | 395 | if (sdata && node->mpath->sdata != sdata) |
398 | continue; | 396 | continue; |
399 | if (j++ == idx) { | 397 | if (j++ == idx) { |
@@ -417,13 +415,12 @@ int mesh_path_add_gate(struct mesh_path *mpath) | |||
417 | { | 415 | { |
418 | struct mesh_table *tbl; | 416 | struct mesh_table *tbl; |
419 | struct mpath_node *gate, *new_gate; | 417 | struct mpath_node *gate, *new_gate; |
420 | struct hlist_node *n; | ||
421 | int err; | 418 | int err; |
422 | 419 | ||
423 | rcu_read_lock(); | 420 | rcu_read_lock(); |
424 | tbl = rcu_dereference(mesh_paths); | 421 | tbl = rcu_dereference(mesh_paths); |
425 | 422 | ||
426 | hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) | 423 | hlist_for_each_entry_rcu(gate, tbl->known_gates, list) |
427 | if (gate->mpath == mpath) { | 424 | if (gate->mpath == mpath) { |
428 | err = -EEXIST; | 425 | err = -EEXIST; |
429 | goto err_rcu; | 426 | goto err_rcu; |
@@ -460,9 +457,9 @@ err_rcu: | |||
460 | static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) | 457 | static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) |
461 | { | 458 | { |
462 | struct mpath_node *gate; | 459 | struct mpath_node *gate; |
463 | struct hlist_node *p, *q; | 460 | struct hlist_node *q; |
464 | 461 | ||
465 | hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) { | 462 | hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) { |
466 | if (gate->mpath != mpath) | 463 | if (gate->mpath != mpath) |
467 | continue; | 464 | continue; |
468 | spin_lock_bh(&tbl->gates_lock); | 465 | spin_lock_bh(&tbl->gates_lock); |
@@ -504,7 +501,6 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst) | |||
504 | struct mesh_path *mpath, *new_mpath; | 501 | struct mesh_path *mpath, *new_mpath; |
505 | struct mpath_node *node, *new_node; | 502 | struct mpath_node *node, *new_node; |
506 | struct hlist_head *bucket; | 503 | struct hlist_head *bucket; |
507 | struct hlist_node *n; | ||
508 | int grow = 0; | 504 | int grow = 0; |
509 | int err = 0; | 505 | int err = 0; |
510 | u32 hash_idx; | 506 | u32 hash_idx; |
@@ -550,7 +546,7 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst) | |||
550 | spin_lock(&tbl->hashwlock[hash_idx]); | 546 | spin_lock(&tbl->hashwlock[hash_idx]); |
551 | 547 | ||
552 | err = -EEXIST; | 548 | err = -EEXIST; |
553 | hlist_for_each_entry(node, n, bucket, list) { | 549 | hlist_for_each_entry(node, bucket, list) { |
554 | mpath = node->mpath; | 550 | mpath = node->mpath; |
555 | if (mpath->sdata == sdata && | 551 | if (mpath->sdata == sdata && |
556 | ether_addr_equal(dst, mpath->dst)) | 552 | ether_addr_equal(dst, mpath->dst)) |
@@ -640,7 +636,6 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
640 | struct mesh_path *mpath, *new_mpath; | 636 | struct mesh_path *mpath, *new_mpath; |
641 | struct mpath_node *node, *new_node; | 637 | struct mpath_node *node, *new_node; |
642 | struct hlist_head *bucket; | 638 | struct hlist_head *bucket; |
643 | struct hlist_node *n; | ||
644 | int grow = 0; | 639 | int grow = 0; |
645 | int err = 0; | 640 | int err = 0; |
646 | u32 hash_idx; | 641 | u32 hash_idx; |
@@ -680,7 +675,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
680 | spin_lock(&tbl->hashwlock[hash_idx]); | 675 | spin_lock(&tbl->hashwlock[hash_idx]); |
681 | 676 | ||
682 | err = -EEXIST; | 677 | err = -EEXIST; |
683 | hlist_for_each_entry(node, n, bucket, list) { | 678 | hlist_for_each_entry(node, bucket, list) { |
684 | mpath = node->mpath; | 679 | mpath = node->mpath; |
685 | if (mpath->sdata == sdata && | 680 | if (mpath->sdata == sdata && |
686 | ether_addr_equal(dst, mpath->dst)) | 681 | ether_addr_equal(dst, mpath->dst)) |
@@ -725,14 +720,13 @@ void mesh_plink_broken(struct sta_info *sta) | |||
725 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 720 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
726 | struct mesh_path *mpath; | 721 | struct mesh_path *mpath; |
727 | struct mpath_node *node; | 722 | struct mpath_node *node; |
728 | struct hlist_node *p; | ||
729 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 723 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
730 | int i; | 724 | int i; |
731 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); | 725 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); |
732 | 726 | ||
733 | rcu_read_lock(); | 727 | rcu_read_lock(); |
734 | tbl = rcu_dereference(mesh_paths); | 728 | tbl = rcu_dereference(mesh_paths); |
735 | for_each_mesh_entry(tbl, p, node, i) { | 729 | for_each_mesh_entry(tbl, node, i) { |
736 | mpath = node->mpath; | 730 | mpath = node->mpath; |
737 | if (rcu_dereference(mpath->next_hop) == sta && | 731 | if (rcu_dereference(mpath->next_hop) == sta && |
738 | mpath->flags & MESH_PATH_ACTIVE && | 732 | mpath->flags & MESH_PATH_ACTIVE && |
@@ -792,13 +786,12 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
792 | struct mesh_table *tbl; | 786 | struct mesh_table *tbl; |
793 | struct mesh_path *mpath; | 787 | struct mesh_path *mpath; |
794 | struct mpath_node *node; | 788 | struct mpath_node *node; |
795 | struct hlist_node *p; | ||
796 | int i; | 789 | int i; |
797 | 790 | ||
798 | rcu_read_lock(); | 791 | rcu_read_lock(); |
799 | read_lock_bh(&pathtbl_resize_lock); | 792 | read_lock_bh(&pathtbl_resize_lock); |
800 | tbl = resize_dereference_mesh_paths(); | 793 | tbl = resize_dereference_mesh_paths(); |
801 | for_each_mesh_entry(tbl, p, node, i) { | 794 | for_each_mesh_entry(tbl, node, i) { |
802 | mpath = node->mpath; | 795 | mpath = node->mpath; |
803 | if (rcu_dereference(mpath->next_hop) == sta) { | 796 | if (rcu_dereference(mpath->next_hop) == sta) { |
804 | spin_lock(&tbl->hashwlock[i]); | 797 | spin_lock(&tbl->hashwlock[i]); |
@@ -815,11 +808,10 @@ static void table_flush_by_iface(struct mesh_table *tbl, | |||
815 | { | 808 | { |
816 | struct mesh_path *mpath; | 809 | struct mesh_path *mpath; |
817 | struct mpath_node *node; | 810 | struct mpath_node *node; |
818 | struct hlist_node *p; | ||
819 | int i; | 811 | int i; |
820 | 812 | ||
821 | WARN_ON(!rcu_read_lock_held()); | 813 | WARN_ON(!rcu_read_lock_held()); |
822 | for_each_mesh_entry(tbl, p, node, i) { | 814 | for_each_mesh_entry(tbl, node, i) { |
823 | mpath = node->mpath; | 815 | mpath = node->mpath; |
824 | if (mpath->sdata != sdata) | 816 | if (mpath->sdata != sdata) |
825 | continue; | 817 | continue; |
@@ -865,7 +857,6 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) | |||
865 | struct mesh_path *mpath; | 857 | struct mesh_path *mpath; |
866 | struct mpath_node *node; | 858 | struct mpath_node *node; |
867 | struct hlist_head *bucket; | 859 | struct hlist_head *bucket; |
868 | struct hlist_node *n; | ||
869 | int hash_idx; | 860 | int hash_idx; |
870 | int err = 0; | 861 | int err = 0; |
871 | 862 | ||
@@ -875,7 +866,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) | |||
875 | bucket = &tbl->hash_buckets[hash_idx]; | 866 | bucket = &tbl->hash_buckets[hash_idx]; |
876 | 867 | ||
877 | spin_lock(&tbl->hashwlock[hash_idx]); | 868 | spin_lock(&tbl->hashwlock[hash_idx]); |
878 | hlist_for_each_entry(node, n, bucket, list) { | 869 | hlist_for_each_entry(node, bucket, list) { |
879 | mpath = node->mpath; | 870 | mpath = node->mpath; |
880 | if (mpath->sdata == sdata && | 871 | if (mpath->sdata == sdata && |
881 | ether_addr_equal(addr, mpath->dst)) { | 872 | ether_addr_equal(addr, mpath->dst)) { |
@@ -920,7 +911,6 @@ void mesh_path_tx_pending(struct mesh_path *mpath) | |||
920 | int mesh_path_send_to_gates(struct mesh_path *mpath) | 911 | int mesh_path_send_to_gates(struct mesh_path *mpath) |
921 | { | 912 | { |
922 | struct ieee80211_sub_if_data *sdata = mpath->sdata; | 913 | struct ieee80211_sub_if_data *sdata = mpath->sdata; |
923 | struct hlist_node *n; | ||
924 | struct mesh_table *tbl; | 914 | struct mesh_table *tbl; |
925 | struct mesh_path *from_mpath = mpath; | 915 | struct mesh_path *from_mpath = mpath; |
926 | struct mpath_node *gate = NULL; | 916 | struct mpath_node *gate = NULL; |
@@ -935,7 +925,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) | |||
935 | if (!known_gates) | 925 | if (!known_gates) |
936 | return -EHOSTUNREACH; | 926 | return -EHOSTUNREACH; |
937 | 927 | ||
938 | hlist_for_each_entry_rcu(gate, n, known_gates, list) { | 928 | hlist_for_each_entry_rcu(gate, known_gates, list) { |
939 | if (gate->mpath->sdata != sdata) | 929 | if (gate->mpath->sdata != sdata) |
940 | continue; | 930 | continue; |
941 | 931 | ||
@@ -951,7 +941,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) | |||
951 | } | 941 | } |
952 | } | 942 | } |
953 | 943 | ||
954 | hlist_for_each_entry_rcu(gate, n, known_gates, list) | 944 | hlist_for_each_entry_rcu(gate, known_gates, list) |
955 | if (gate->mpath->sdata == sdata) { | 945 | if (gate->mpath->sdata == sdata) { |
956 | mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); | 946 | mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); |
957 | mesh_path_tx_pending(gate->mpath); | 947 | mesh_path_tx_pending(gate->mpath); |
@@ -1096,12 +1086,11 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | |||
1096 | struct mesh_table *tbl; | 1086 | struct mesh_table *tbl; |
1097 | struct mesh_path *mpath; | 1087 | struct mesh_path *mpath; |
1098 | struct mpath_node *node; | 1088 | struct mpath_node *node; |
1099 | struct hlist_node *p; | ||
1100 | int i; | 1089 | int i; |
1101 | 1090 | ||
1102 | rcu_read_lock(); | 1091 | rcu_read_lock(); |
1103 | tbl = rcu_dereference(mesh_paths); | 1092 | tbl = rcu_dereference(mesh_paths); |
1104 | for_each_mesh_entry(tbl, p, node, i) { | 1093 | for_each_mesh_entry(tbl, node, i) { |
1105 | if (node->mpath->sdata != sdata) | 1094 | if (node->mpath->sdata != sdata) |
1106 | continue; | 1095 | continue; |
1107 | mpath = node->mpath; | 1096 | mpath = node->mpath; |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 9f00db7e03f2..704e514e02ab 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
@@ -259,13 +259,12 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p) | |||
259 | { | 259 | { |
260 | unsigned int hash; | 260 | unsigned int hash; |
261 | struct ip_vs_conn *cp; | 261 | struct ip_vs_conn *cp; |
262 | struct hlist_node *n; | ||
263 | 262 | ||
264 | hash = ip_vs_conn_hashkey_param(p, false); | 263 | hash = ip_vs_conn_hashkey_param(p, false); |
265 | 264 | ||
266 | ct_read_lock(hash); | 265 | ct_read_lock(hash); |
267 | 266 | ||
268 | hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { | 267 | hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
269 | if (cp->af == p->af && | 268 | if (cp->af == p->af && |
270 | p->cport == cp->cport && p->vport == cp->vport && | 269 | p->cport == cp->cport && p->vport == cp->vport && |
271 | ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && | 270 | ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && |
@@ -344,13 +343,12 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p) | |||
344 | { | 343 | { |
345 | unsigned int hash; | 344 | unsigned int hash; |
346 | struct ip_vs_conn *cp; | 345 | struct ip_vs_conn *cp; |
347 | struct hlist_node *n; | ||
348 | 346 | ||
349 | hash = ip_vs_conn_hashkey_param(p, false); | 347 | hash = ip_vs_conn_hashkey_param(p, false); |
350 | 348 | ||
351 | ct_read_lock(hash); | 349 | ct_read_lock(hash); |
352 | 350 | ||
353 | hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { | 351 | hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
354 | if (!ip_vs_conn_net_eq(cp, p->net)) | 352 | if (!ip_vs_conn_net_eq(cp, p->net)) |
355 | continue; | 353 | continue; |
356 | if (p->pe_data && p->pe->ct_match) { | 354 | if (p->pe_data && p->pe->ct_match) { |
@@ -394,7 +392,6 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p) | |||
394 | { | 392 | { |
395 | unsigned int hash; | 393 | unsigned int hash; |
396 | struct ip_vs_conn *cp, *ret=NULL; | 394 | struct ip_vs_conn *cp, *ret=NULL; |
397 | struct hlist_node *n; | ||
398 | 395 | ||
399 | /* | 396 | /* |
400 | * Check for "full" addressed entries | 397 | * Check for "full" addressed entries |
@@ -403,7 +400,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p) | |||
403 | 400 | ||
404 | ct_read_lock(hash); | 401 | ct_read_lock(hash); |
405 | 402 | ||
406 | hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { | 403 | hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
407 | if (cp->af == p->af && | 404 | if (cp->af == p->af && |
408 | p->vport == cp->cport && p->cport == cp->dport && | 405 | p->vport == cp->cport && p->cport == cp->dport && |
409 | ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && | 406 | ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && |
@@ -953,11 +950,10 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos) | |||
953 | int idx; | 950 | int idx; |
954 | struct ip_vs_conn *cp; | 951 | struct ip_vs_conn *cp; |
955 | struct ip_vs_iter_state *iter = seq->private; | 952 | struct ip_vs_iter_state *iter = seq->private; |
956 | struct hlist_node *n; | ||
957 | 953 | ||
958 | for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { | 954 | for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { |
959 | ct_read_lock_bh(idx); | 955 | ct_read_lock_bh(idx); |
960 | hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) { | 956 | hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { |
961 | if (pos-- == 0) { | 957 | if (pos-- == 0) { |
962 | iter->l = &ip_vs_conn_tab[idx]; | 958 | iter->l = &ip_vs_conn_tab[idx]; |
963 | return cp; | 959 | return cp; |
@@ -981,7 +977,6 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
981 | { | 977 | { |
982 | struct ip_vs_conn *cp = v; | 978 | struct ip_vs_conn *cp = v; |
983 | struct ip_vs_iter_state *iter = seq->private; | 979 | struct ip_vs_iter_state *iter = seq->private; |
984 | struct hlist_node *e; | ||
985 | struct hlist_head *l = iter->l; | 980 | struct hlist_head *l = iter->l; |
986 | int idx; | 981 | int idx; |
987 | 982 | ||
@@ -990,15 +985,15 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
990 | return ip_vs_conn_array(seq, 0); | 985 | return ip_vs_conn_array(seq, 0); |
991 | 986 | ||
992 | /* more on same hash chain? */ | 987 | /* more on same hash chain? */ |
993 | if ((e = cp->c_list.next)) | 988 | if (cp->c_list.next) |
994 | return hlist_entry(e, struct ip_vs_conn, c_list); | 989 | return hlist_entry(cp->c_list.next, struct ip_vs_conn, c_list); |
995 | 990 | ||
996 | idx = l - ip_vs_conn_tab; | 991 | idx = l - ip_vs_conn_tab; |
997 | ct_read_unlock_bh(idx); | 992 | ct_read_unlock_bh(idx); |
998 | 993 | ||
999 | while (++idx < ip_vs_conn_tab_size) { | 994 | while (++idx < ip_vs_conn_tab_size) { |
1000 | ct_read_lock_bh(idx); | 995 | ct_read_lock_bh(idx); |
1001 | hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) { | 996 | hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { |
1002 | iter->l = &ip_vs_conn_tab[idx]; | 997 | iter->l = &ip_vs_conn_tab[idx]; |
1003 | return cp; | 998 | return cp; |
1004 | } | 999 | } |
@@ -1200,14 +1195,13 @@ void ip_vs_random_dropentry(struct net *net) | |||
1200 | */ | 1195 | */ |
1201 | for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { | 1196 | for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { |
1202 | unsigned int hash = net_random() & ip_vs_conn_tab_mask; | 1197 | unsigned int hash = net_random() & ip_vs_conn_tab_mask; |
1203 | struct hlist_node *n; | ||
1204 | 1198 | ||
1205 | /* | 1199 | /* |
1206 | * Lock is actually needed in this loop. | 1200 | * Lock is actually needed in this loop. |
1207 | */ | 1201 | */ |
1208 | ct_write_lock_bh(hash); | 1202 | ct_write_lock_bh(hash); |
1209 | 1203 | ||
1210 | hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { | 1204 | hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
1211 | if (cp->flags & IP_VS_CONN_F_TEMPLATE) | 1205 | if (cp->flags & IP_VS_CONN_F_TEMPLATE) |
1212 | /* connection template */ | 1206 | /* connection template */ |
1213 | continue; | 1207 | continue; |
@@ -1255,14 +1249,12 @@ static void ip_vs_conn_flush(struct net *net) | |||
1255 | 1249 | ||
1256 | flush_again: | 1250 | flush_again: |
1257 | for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { | 1251 | for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { |
1258 | struct hlist_node *n; | ||
1259 | |||
1260 | /* | 1252 | /* |
1261 | * Lock is actually needed in this loop. | 1253 | * Lock is actually needed in this loop. |
1262 | */ | 1254 | */ |
1263 | ct_write_lock_bh(idx); | 1255 | ct_write_lock_bh(idx); |
1264 | 1256 | ||
1265 | hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) { | 1257 | hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { |
1266 | if (!ip_vs_conn_net_eq(cp, net)) | 1258 | if (!ip_vs_conn_net_eq(cp, net)) |
1267 | continue; | 1259 | continue; |
1268 | IP_VS_DBG(4, "del connection\n"); | 1260 | IP_VS_DBG(4, "del connection\n"); |
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 3921e5bc1235..8c10e3db3d9b 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c | |||
@@ -90,14 +90,13 @@ __nf_ct_expect_find(struct net *net, u16 zone, | |||
90 | const struct nf_conntrack_tuple *tuple) | 90 | const struct nf_conntrack_tuple *tuple) |
91 | { | 91 | { |
92 | struct nf_conntrack_expect *i; | 92 | struct nf_conntrack_expect *i; |
93 | struct hlist_node *n; | ||
94 | unsigned int h; | 93 | unsigned int h; |
95 | 94 | ||
96 | if (!net->ct.expect_count) | 95 | if (!net->ct.expect_count) |
97 | return NULL; | 96 | return NULL; |
98 | 97 | ||
99 | h = nf_ct_expect_dst_hash(tuple); | 98 | h = nf_ct_expect_dst_hash(tuple); |
100 | hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) { | 99 | hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) { |
101 | if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && | 100 | if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && |
102 | nf_ct_zone(i->master) == zone) | 101 | nf_ct_zone(i->master) == zone) |
103 | return i; | 102 | return i; |
@@ -130,14 +129,13 @@ nf_ct_find_expectation(struct net *net, u16 zone, | |||
130 | const struct nf_conntrack_tuple *tuple) | 129 | const struct nf_conntrack_tuple *tuple) |
131 | { | 130 | { |
132 | struct nf_conntrack_expect *i, *exp = NULL; | 131 | struct nf_conntrack_expect *i, *exp = NULL; |
133 | struct hlist_node *n; | ||
134 | unsigned int h; | 132 | unsigned int h; |
135 | 133 | ||
136 | if (!net->ct.expect_count) | 134 | if (!net->ct.expect_count) |
137 | return NULL; | 135 | return NULL; |
138 | 136 | ||
139 | h = nf_ct_expect_dst_hash(tuple); | 137 | h = nf_ct_expect_dst_hash(tuple); |
140 | hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { | 138 | hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) { |
141 | if (!(i->flags & NF_CT_EXPECT_INACTIVE) && | 139 | if (!(i->flags & NF_CT_EXPECT_INACTIVE) && |
142 | nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && | 140 | nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && |
143 | nf_ct_zone(i->master) == zone) { | 141 | nf_ct_zone(i->master) == zone) { |
@@ -172,13 +170,13 @@ void nf_ct_remove_expectations(struct nf_conn *ct) | |||
172 | { | 170 | { |
173 | struct nf_conn_help *help = nfct_help(ct); | 171 | struct nf_conn_help *help = nfct_help(ct); |
174 | struct nf_conntrack_expect *exp; | 172 | struct nf_conntrack_expect *exp; |
175 | struct hlist_node *n, *next; | 173 | struct hlist_node *next; |
176 | 174 | ||
177 | /* Optimization: most connection never expect any others. */ | 175 | /* Optimization: most connection never expect any others. */ |
178 | if (!help) | 176 | if (!help) |
179 | return; | 177 | return; |
180 | 178 | ||
181 | hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { | 179 | hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) { |
182 | if (del_timer(&exp->timeout)) { | 180 | if (del_timer(&exp->timeout)) { |
183 | nf_ct_unlink_expect(exp); | 181 | nf_ct_unlink_expect(exp); |
184 | nf_ct_expect_put(exp); | 182 | nf_ct_expect_put(exp); |
@@ -348,9 +346,8 @@ static void evict_oldest_expect(struct nf_conn *master, | |||
348 | { | 346 | { |
349 | struct nf_conn_help *master_help = nfct_help(master); | 347 | struct nf_conn_help *master_help = nfct_help(master); |
350 | struct nf_conntrack_expect *exp, *last = NULL; | 348 | struct nf_conntrack_expect *exp, *last = NULL; |
351 | struct hlist_node *n; | ||
352 | 349 | ||
353 | hlist_for_each_entry(exp, n, &master_help->expectations, lnode) { | 350 | hlist_for_each_entry(exp, &master_help->expectations, lnode) { |
354 | if (exp->class == new->class) | 351 | if (exp->class == new->class) |
355 | last = exp; | 352 | last = exp; |
356 | } | 353 | } |
@@ -369,7 +366,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) | |||
369 | struct nf_conn_help *master_help = nfct_help(master); | 366 | struct nf_conn_help *master_help = nfct_help(master); |
370 | struct nf_conntrack_helper *helper; | 367 | struct nf_conntrack_helper *helper; |
371 | struct net *net = nf_ct_exp_net(expect); | 368 | struct net *net = nf_ct_exp_net(expect); |
372 | struct hlist_node *n, *next; | 369 | struct hlist_node *next; |
373 | unsigned int h; | 370 | unsigned int h; |
374 | int ret = 1; | 371 | int ret = 1; |
375 | 372 | ||
@@ -378,7 +375,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) | |||
378 | goto out; | 375 | goto out; |
379 | } | 376 | } |
380 | h = nf_ct_expect_dst_hash(&expect->tuple); | 377 | h = nf_ct_expect_dst_hash(&expect->tuple); |
381 | hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) { | 378 | hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) { |
382 | if (expect_matches(i, expect)) { | 379 | if (expect_matches(i, expect)) { |
383 | if (del_timer(&i->timeout)) { | 380 | if (del_timer(&i->timeout)) { |
384 | nf_ct_unlink_expect(i); | 381 | nf_ct_unlink_expect(i); |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 013cdf69fe29..a9740bd6fe54 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -116,14 +116,13 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple) | |||
116 | { | 116 | { |
117 | struct nf_conntrack_helper *helper; | 117 | struct nf_conntrack_helper *helper; |
118 | struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) }; | 118 | struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) }; |
119 | struct hlist_node *n; | ||
120 | unsigned int h; | 119 | unsigned int h; |
121 | 120 | ||
122 | if (!nf_ct_helper_count) | 121 | if (!nf_ct_helper_count) |
123 | return NULL; | 122 | return NULL; |
124 | 123 | ||
125 | h = helper_hash(tuple); | 124 | h = helper_hash(tuple); |
126 | hlist_for_each_entry_rcu(helper, n, &nf_ct_helper_hash[h], hnode) { | 125 | hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) { |
127 | if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask)) | 126 | if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask)) |
128 | return helper; | 127 | return helper; |
129 | } | 128 | } |
@@ -134,11 +133,10 @@ struct nf_conntrack_helper * | |||
134 | __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) | 133 | __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) |
135 | { | 134 | { |
136 | struct nf_conntrack_helper *h; | 135 | struct nf_conntrack_helper *h; |
137 | struct hlist_node *n; | ||
138 | unsigned int i; | 136 | unsigned int i; |
139 | 137 | ||
140 | for (i = 0; i < nf_ct_helper_hsize; i++) { | 138 | for (i = 0; i < nf_ct_helper_hsize; i++) { |
141 | hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) { | 139 | hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { |
142 | if (!strcmp(h->name, name) && | 140 | if (!strcmp(h->name, name) && |
143 | h->tuple.src.l3num == l3num && | 141 | h->tuple.src.l3num == l3num && |
144 | h->tuple.dst.protonum == protonum) | 142 | h->tuple.dst.protonum == protonum) |
@@ -357,7 +355,6 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me) | |||
357 | { | 355 | { |
358 | int ret = 0; | 356 | int ret = 0; |
359 | struct nf_conntrack_helper *cur; | 357 | struct nf_conntrack_helper *cur; |
360 | struct hlist_node *n; | ||
361 | unsigned int h = helper_hash(&me->tuple); | 358 | unsigned int h = helper_hash(&me->tuple); |
362 | 359 | ||
363 | BUG_ON(me->expect_policy == NULL); | 360 | BUG_ON(me->expect_policy == NULL); |
@@ -365,7 +362,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me) | |||
365 | BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); | 362 | BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); |
366 | 363 | ||
367 | mutex_lock(&nf_ct_helper_mutex); | 364 | mutex_lock(&nf_ct_helper_mutex); |
368 | hlist_for_each_entry(cur, n, &nf_ct_helper_hash[h], hnode) { | 365 | hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { |
369 | if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && | 366 | if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && |
370 | cur->tuple.src.l3num == me->tuple.src.l3num && | 367 | cur->tuple.src.l3num == me->tuple.src.l3num && |
371 | cur->tuple.dst.protonum == me->tuple.dst.protonum) { | 368 | cur->tuple.dst.protonum == me->tuple.dst.protonum) { |
@@ -386,13 +383,13 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, | |||
386 | { | 383 | { |
387 | struct nf_conntrack_tuple_hash *h; | 384 | struct nf_conntrack_tuple_hash *h; |
388 | struct nf_conntrack_expect *exp; | 385 | struct nf_conntrack_expect *exp; |
389 | const struct hlist_node *n, *next; | 386 | const struct hlist_node *next; |
390 | const struct hlist_nulls_node *nn; | 387 | const struct hlist_nulls_node *nn; |
391 | unsigned int i; | 388 | unsigned int i; |
392 | 389 | ||
393 | /* Get rid of expectations */ | 390 | /* Get rid of expectations */ |
394 | for (i = 0; i < nf_ct_expect_hsize; i++) { | 391 | for (i = 0; i < nf_ct_expect_hsize; i++) { |
395 | hlist_for_each_entry_safe(exp, n, next, | 392 | hlist_for_each_entry_safe(exp, next, |
396 | &net->ct.expect_hash[i], hnode) { | 393 | &net->ct.expect_hash[i], hnode) { |
397 | struct nf_conn_help *help = nfct_help(exp->master); | 394 | struct nf_conn_help *help = nfct_help(exp->master); |
398 | if ((rcu_dereference_protected( | 395 | if ((rcu_dereference_protected( |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 5d60e04f9679..9904b15f600e 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -2370,14 +2370,13 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | |||
2370 | struct net *net = sock_net(skb->sk); | 2370 | struct net *net = sock_net(skb->sk); |
2371 | struct nf_conntrack_expect *exp, *last; | 2371 | struct nf_conntrack_expect *exp, *last; |
2372 | struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); | 2372 | struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); |
2373 | struct hlist_node *n; | ||
2374 | u_int8_t l3proto = nfmsg->nfgen_family; | 2373 | u_int8_t l3proto = nfmsg->nfgen_family; |
2375 | 2374 | ||
2376 | rcu_read_lock(); | 2375 | rcu_read_lock(); |
2377 | last = (struct nf_conntrack_expect *)cb->args[1]; | 2376 | last = (struct nf_conntrack_expect *)cb->args[1]; |
2378 | for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { | 2377 | for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { |
2379 | restart: | 2378 | restart: |
2380 | hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]], | 2379 | hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]], |
2381 | hnode) { | 2380 | hnode) { |
2382 | if (l3proto && exp->tuple.src.l3num != l3proto) | 2381 | if (l3proto && exp->tuple.src.l3num != l3proto) |
2383 | continue; | 2382 | continue; |
@@ -2510,7 +2509,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
2510 | struct nf_conntrack_expect *exp; | 2509 | struct nf_conntrack_expect *exp; |
2511 | struct nf_conntrack_tuple tuple; | 2510 | struct nf_conntrack_tuple tuple; |
2512 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 2511 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
2513 | struct hlist_node *n, *next; | 2512 | struct hlist_node *next; |
2514 | u_int8_t u3 = nfmsg->nfgen_family; | 2513 | u_int8_t u3 = nfmsg->nfgen_family; |
2515 | unsigned int i; | 2514 | unsigned int i; |
2516 | u16 zone; | 2515 | u16 zone; |
@@ -2557,7 +2556,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
2557 | /* delete all expectations for this helper */ | 2556 | /* delete all expectations for this helper */ |
2558 | spin_lock_bh(&nf_conntrack_lock); | 2557 | spin_lock_bh(&nf_conntrack_lock); |
2559 | for (i = 0; i < nf_ct_expect_hsize; i++) { | 2558 | for (i = 0; i < nf_ct_expect_hsize; i++) { |
2560 | hlist_for_each_entry_safe(exp, n, next, | 2559 | hlist_for_each_entry_safe(exp, next, |
2561 | &net->ct.expect_hash[i], | 2560 | &net->ct.expect_hash[i], |
2562 | hnode) { | 2561 | hnode) { |
2563 | m_help = nfct_help(exp->master); | 2562 | m_help = nfct_help(exp->master); |
@@ -2575,7 +2574,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
2575 | /* This basically means we have to flush everything*/ | 2574 | /* This basically means we have to flush everything*/ |
2576 | spin_lock_bh(&nf_conntrack_lock); | 2575 | spin_lock_bh(&nf_conntrack_lock); |
2577 | for (i = 0; i < nf_ct_expect_hsize; i++) { | 2576 | for (i = 0; i < nf_ct_expect_hsize; i++) { |
2578 | hlist_for_each_entry_safe(exp, n, next, | 2577 | hlist_for_each_entry_safe(exp, next, |
2579 | &net->ct.expect_hash[i], | 2578 | &net->ct.expect_hash[i], |
2580 | hnode) { | 2579 | hnode) { |
2581 | if (del_timer(&exp->timeout)) { | 2580 | if (del_timer(&exp->timeout)) { |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 069229d919b6..0e7d423324c3 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -855,11 +855,11 @@ static int refresh_signalling_expectation(struct nf_conn *ct, | |||
855 | { | 855 | { |
856 | struct nf_conn_help *help = nfct_help(ct); | 856 | struct nf_conn_help *help = nfct_help(ct); |
857 | struct nf_conntrack_expect *exp; | 857 | struct nf_conntrack_expect *exp; |
858 | struct hlist_node *n, *next; | 858 | struct hlist_node *next; |
859 | int found = 0; | 859 | int found = 0; |
860 | 860 | ||
861 | spin_lock_bh(&nf_conntrack_lock); | 861 | spin_lock_bh(&nf_conntrack_lock); |
862 | hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { | 862 | hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) { |
863 | if (exp->class != SIP_EXPECT_SIGNALLING || | 863 | if (exp->class != SIP_EXPECT_SIGNALLING || |
864 | !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || | 864 | !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || |
865 | exp->tuple.dst.protonum != proto || | 865 | exp->tuple.dst.protonum != proto || |
@@ -881,10 +881,10 @@ static void flush_expectations(struct nf_conn *ct, bool media) | |||
881 | { | 881 | { |
882 | struct nf_conn_help *help = nfct_help(ct); | 882 | struct nf_conn_help *help = nfct_help(ct); |
883 | struct nf_conntrack_expect *exp; | 883 | struct nf_conntrack_expect *exp; |
884 | struct hlist_node *n, *next; | 884 | struct hlist_node *next; |
885 | 885 | ||
886 | spin_lock_bh(&nf_conntrack_lock); | 886 | spin_lock_bh(&nf_conntrack_lock); |
887 | hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { | 887 | hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) { |
888 | if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media) | 888 | if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media) |
889 | continue; | 889 | continue; |
890 | if (!del_timer(&exp->timeout)) | 890 | if (!del_timer(&exp->timeout)) |
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 5f2f9109f461..8d5769c6d16e 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c | |||
@@ -191,9 +191,8 @@ find_appropriate_src(struct net *net, u16 zone, | |||
191 | unsigned int h = hash_by_src(net, zone, tuple); | 191 | unsigned int h = hash_by_src(net, zone, tuple); |
192 | const struct nf_conn_nat *nat; | 192 | const struct nf_conn_nat *nat; |
193 | const struct nf_conn *ct; | 193 | const struct nf_conn *ct; |
194 | const struct hlist_node *n; | ||
195 | 194 | ||
196 | hlist_for_each_entry_rcu(nat, n, &net->ct.nat_bysource[h], bysource) { | 195 | hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) { |
197 | ct = nat->ct; | 196 | ct = nat->ct; |
198 | if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) { | 197 | if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) { |
199 | /* Copy source part from reply tuple. */ | 198 | /* Copy source part from reply tuple. */ |
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 945950a8b1f1..a191b6db657e 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c | |||
@@ -282,7 +282,6 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb, | |||
282 | const char *helper_name; | 282 | const char *helper_name; |
283 | struct nf_conntrack_helper *cur, *helper = NULL; | 283 | struct nf_conntrack_helper *cur, *helper = NULL; |
284 | struct nf_conntrack_tuple tuple; | 284 | struct nf_conntrack_tuple tuple; |
285 | struct hlist_node *n; | ||
286 | int ret = 0, i; | 285 | int ret = 0, i; |
287 | 286 | ||
288 | if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) | 287 | if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) |
@@ -296,7 +295,7 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb, | |||
296 | 295 | ||
297 | rcu_read_lock(); | 296 | rcu_read_lock(); |
298 | for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { | 297 | for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { |
299 | hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) { | 298 | hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { |
300 | 299 | ||
301 | /* skip non-userspace conntrack helpers. */ | 300 | /* skip non-userspace conntrack helpers. */ |
302 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) | 301 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) |
@@ -452,13 +451,12 @@ static int | |||
452 | nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | 451 | nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb) |
453 | { | 452 | { |
454 | struct nf_conntrack_helper *cur, *last; | 453 | struct nf_conntrack_helper *cur, *last; |
455 | struct hlist_node *n; | ||
456 | 454 | ||
457 | rcu_read_lock(); | 455 | rcu_read_lock(); |
458 | last = (struct nf_conntrack_helper *)cb->args[1]; | 456 | last = (struct nf_conntrack_helper *)cb->args[1]; |
459 | for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) { | 457 | for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) { |
460 | restart: | 458 | restart: |
461 | hlist_for_each_entry_rcu(cur, n, | 459 | hlist_for_each_entry_rcu(cur, |
462 | &nf_ct_helper_hash[cb->args[0]], hnode) { | 460 | &nf_ct_helper_hash[cb->args[0]], hnode) { |
463 | 461 | ||
464 | /* skip non-userspace conntrack helpers. */ | 462 | /* skip non-userspace conntrack helpers. */ |
@@ -495,7 +493,6 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb, | |||
495 | { | 493 | { |
496 | int ret = -ENOENT, i; | 494 | int ret = -ENOENT, i; |
497 | struct nf_conntrack_helper *cur; | 495 | struct nf_conntrack_helper *cur; |
498 | struct hlist_node *n; | ||
499 | struct sk_buff *skb2; | 496 | struct sk_buff *skb2; |
500 | char *helper_name = NULL; | 497 | char *helper_name = NULL; |
501 | struct nf_conntrack_tuple tuple; | 498 | struct nf_conntrack_tuple tuple; |
@@ -520,7 +517,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb, | |||
520 | } | 517 | } |
521 | 518 | ||
522 | for (i = 0; i < nf_ct_helper_hsize; i++) { | 519 | for (i = 0; i < nf_ct_helper_hsize; i++) { |
523 | hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) { | 520 | hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { |
524 | 521 | ||
525 | /* skip non-userspace conntrack helpers. */ | 522 | /* skip non-userspace conntrack helpers. */ |
526 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) | 523 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) |
@@ -568,7 +565,7 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb, | |||
568 | { | 565 | { |
569 | char *helper_name = NULL; | 566 | char *helper_name = NULL; |
570 | struct nf_conntrack_helper *cur; | 567 | struct nf_conntrack_helper *cur; |
571 | struct hlist_node *n, *tmp; | 568 | struct hlist_node *tmp; |
572 | struct nf_conntrack_tuple tuple; | 569 | struct nf_conntrack_tuple tuple; |
573 | bool tuple_set = false, found = false; | 570 | bool tuple_set = false, found = false; |
574 | int i, j = 0, ret; | 571 | int i, j = 0, ret; |
@@ -585,7 +582,7 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb, | |||
585 | } | 582 | } |
586 | 583 | ||
587 | for (i = 0; i < nf_ct_helper_hsize; i++) { | 584 | for (i = 0; i < nf_ct_helper_hsize; i++) { |
588 | hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i], | 585 | hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], |
589 | hnode) { | 586 | hnode) { |
590 | /* skip non-userspace conntrack helpers. */ | 587 | /* skip non-userspace conntrack helpers. */ |
591 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) | 588 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) |
@@ -654,13 +651,13 @@ err_out: | |||
654 | static void __exit nfnl_cthelper_exit(void) | 651 | static void __exit nfnl_cthelper_exit(void) |
655 | { | 652 | { |
656 | struct nf_conntrack_helper *cur; | 653 | struct nf_conntrack_helper *cur; |
657 | struct hlist_node *n, *tmp; | 654 | struct hlist_node *tmp; |
658 | int i; | 655 | int i; |
659 | 656 | ||
660 | nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); | 657 | nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); |
661 | 658 | ||
662 | for (i=0; i<nf_ct_helper_hsize; i++) { | 659 | for (i=0; i<nf_ct_helper_hsize; i++) { |
663 | hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i], | 660 | hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], |
664 | hnode) { | 661 | hnode) { |
665 | /* skip non-userspace conntrack helpers. */ | 662 | /* skip non-userspace conntrack helpers. */ |
666 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) | 663 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 92fd8eca0d31..f248db572972 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -87,11 +87,10 @@ static struct nfulnl_instance * | |||
87 | __instance_lookup(u_int16_t group_num) | 87 | __instance_lookup(u_int16_t group_num) |
88 | { | 88 | { |
89 | struct hlist_head *head; | 89 | struct hlist_head *head; |
90 | struct hlist_node *pos; | ||
91 | struct nfulnl_instance *inst; | 90 | struct nfulnl_instance *inst; |
92 | 91 | ||
93 | head = &instance_table[instance_hashfn(group_num)]; | 92 | head = &instance_table[instance_hashfn(group_num)]; |
94 | hlist_for_each_entry_rcu(inst, pos, head, hlist) { | 93 | hlist_for_each_entry_rcu(inst, head, hlist) { |
95 | if (inst->group_num == group_num) | 94 | if (inst->group_num == group_num) |
96 | return inst; | 95 | return inst; |
97 | } | 96 | } |
@@ -717,11 +716,11 @@ nfulnl_rcv_nl_event(struct notifier_block *this, | |||
717 | /* destroy all instances for this portid */ | 716 | /* destroy all instances for this portid */ |
718 | spin_lock_bh(&instances_lock); | 717 | spin_lock_bh(&instances_lock); |
719 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 718 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
720 | struct hlist_node *tmp, *t2; | 719 | struct hlist_node *t2; |
721 | struct nfulnl_instance *inst; | 720 | struct nfulnl_instance *inst; |
722 | struct hlist_head *head = &instance_table[i]; | 721 | struct hlist_head *head = &instance_table[i]; |
723 | 722 | ||
724 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { | 723 | hlist_for_each_entry_safe(inst, t2, head, hlist) { |
725 | if ((net_eq(n->net, &init_net)) && | 724 | if ((net_eq(n->net, &init_net)) && |
726 | (n->portid == inst->peer_portid)) | 725 | (n->portid == inst->peer_portid)) |
727 | __instance_destroy(inst); | 726 | __instance_destroy(inst); |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 3158d87b56a8..858fd52c1040 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
@@ -80,11 +80,10 @@ static struct nfqnl_instance * | |||
80 | instance_lookup(u_int16_t queue_num) | 80 | instance_lookup(u_int16_t queue_num) |
81 | { | 81 | { |
82 | struct hlist_head *head; | 82 | struct hlist_head *head; |
83 | struct hlist_node *pos; | ||
84 | struct nfqnl_instance *inst; | 83 | struct nfqnl_instance *inst; |
85 | 84 | ||
86 | head = &instance_table[instance_hashfn(queue_num)]; | 85 | head = &instance_table[instance_hashfn(queue_num)]; |
87 | hlist_for_each_entry_rcu(inst, pos, head, hlist) { | 86 | hlist_for_each_entry_rcu(inst, head, hlist) { |
88 | if (inst->queue_num == queue_num) | 87 | if (inst->queue_num == queue_num) |
89 | return inst; | 88 | return inst; |
90 | } | 89 | } |
@@ -583,11 +582,10 @@ nfqnl_dev_drop(int ifindex) | |||
583 | rcu_read_lock(); | 582 | rcu_read_lock(); |
584 | 583 | ||
585 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 584 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
586 | struct hlist_node *tmp; | ||
587 | struct nfqnl_instance *inst; | 585 | struct nfqnl_instance *inst; |
588 | struct hlist_head *head = &instance_table[i]; | 586 | struct hlist_head *head = &instance_table[i]; |
589 | 587 | ||
590 | hlist_for_each_entry_rcu(inst, tmp, head, hlist) | 588 | hlist_for_each_entry_rcu(inst, head, hlist) |
591 | nfqnl_flush(inst, dev_cmp, ifindex); | 589 | nfqnl_flush(inst, dev_cmp, ifindex); |
592 | } | 590 | } |
593 | 591 | ||
@@ -627,11 +625,11 @@ nfqnl_rcv_nl_event(struct notifier_block *this, | |||
627 | /* destroy all instances for this portid */ | 625 | /* destroy all instances for this portid */ |
628 | spin_lock(&instances_lock); | 626 | spin_lock(&instances_lock); |
629 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 627 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
630 | struct hlist_node *tmp, *t2; | 628 | struct hlist_node *t2; |
631 | struct nfqnl_instance *inst; | 629 | struct nfqnl_instance *inst; |
632 | struct hlist_head *head = &instance_table[i]; | 630 | struct hlist_head *head = &instance_table[i]; |
633 | 631 | ||
634 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { | 632 | hlist_for_each_entry_safe(inst, t2, head, hlist) { |
635 | if ((n->net == &init_net) && | 633 | if ((n->net == &init_net) && |
636 | (n->portid == inst->peer_portid)) | 634 | (n->portid == inst->peer_portid)) |
637 | __instance_destroy(inst); | 635 | __instance_destroy(inst); |
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index f264032b8c56..370adf622cef 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c | |||
@@ -43,12 +43,11 @@ static void xt_rateest_hash_insert(struct xt_rateest *est) | |||
43 | struct xt_rateest *xt_rateest_lookup(const char *name) | 43 | struct xt_rateest *xt_rateest_lookup(const char *name) |
44 | { | 44 | { |
45 | struct xt_rateest *est; | 45 | struct xt_rateest *est; |
46 | struct hlist_node *n; | ||
47 | unsigned int h; | 46 | unsigned int h; |
48 | 47 | ||
49 | h = xt_rateest_hash(name); | 48 | h = xt_rateest_hash(name); |
50 | mutex_lock(&xt_rateest_mutex); | 49 | mutex_lock(&xt_rateest_mutex); |
51 | hlist_for_each_entry(est, n, &rateest_hash[h], list) { | 50 | hlist_for_each_entry(est, &rateest_hash[h], list) { |
52 | if (strcmp(est->name, name) == 0) { | 51 | if (strcmp(est->name, name) == 0) { |
53 | est->refcnt++; | 52 | est->refcnt++; |
54 | mutex_unlock(&xt_rateest_mutex); | 53 | mutex_unlock(&xt_rateest_mutex); |
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index 70b5591a2586..c40b2695633b 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c | |||
@@ -101,7 +101,7 @@ static int count_them(struct net *net, | |||
101 | { | 101 | { |
102 | const struct nf_conntrack_tuple_hash *found; | 102 | const struct nf_conntrack_tuple_hash *found; |
103 | struct xt_connlimit_conn *conn; | 103 | struct xt_connlimit_conn *conn; |
104 | struct hlist_node *pos, *n; | 104 | struct hlist_node *n; |
105 | struct nf_conn *found_ct; | 105 | struct nf_conn *found_ct; |
106 | struct hlist_head *hash; | 106 | struct hlist_head *hash; |
107 | bool addit = true; | 107 | bool addit = true; |
@@ -115,7 +115,7 @@ static int count_them(struct net *net, | |||
115 | rcu_read_lock(); | 115 | rcu_read_lock(); |
116 | 116 | ||
117 | /* check the saved connections */ | 117 | /* check the saved connections */ |
118 | hlist_for_each_entry_safe(conn, pos, n, hash, node) { | 118 | hlist_for_each_entry_safe(conn, n, hash, node) { |
119 | found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE, | 119 | found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE, |
120 | &conn->tuple); | 120 | &conn->tuple); |
121 | found_ct = NULL; | 121 | found_ct = NULL; |
@@ -258,14 +258,14 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par) | |||
258 | { | 258 | { |
259 | const struct xt_connlimit_info *info = par->matchinfo; | 259 | const struct xt_connlimit_info *info = par->matchinfo; |
260 | struct xt_connlimit_conn *conn; | 260 | struct xt_connlimit_conn *conn; |
261 | struct hlist_node *pos, *n; | 261 | struct hlist_node *n; |
262 | struct hlist_head *hash = info->data->iphash; | 262 | struct hlist_head *hash = info->data->iphash; |
263 | unsigned int i; | 263 | unsigned int i; |
264 | 264 | ||
265 | nf_ct_l3proto_module_put(par->family); | 265 | nf_ct_l3proto_module_put(par->family); |
266 | 266 | ||
267 | for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) { | 267 | for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) { |
268 | hlist_for_each_entry_safe(conn, pos, n, &hash[i], node) { | 268 | hlist_for_each_entry_safe(conn, n, &hash[i], node) { |
269 | hlist_del(&conn->node); | 269 | hlist_del(&conn->node); |
270 | kfree(conn); | 270 | kfree(conn); |
271 | } | 271 | } |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 98218c896d2e..f330e8beaf69 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -141,11 +141,10 @@ dsthash_find(const struct xt_hashlimit_htable *ht, | |||
141 | const struct dsthash_dst *dst) | 141 | const struct dsthash_dst *dst) |
142 | { | 142 | { |
143 | struct dsthash_ent *ent; | 143 | struct dsthash_ent *ent; |
144 | struct hlist_node *pos; | ||
145 | u_int32_t hash = hash_dst(ht, dst); | 144 | u_int32_t hash = hash_dst(ht, dst); |
146 | 145 | ||
147 | if (!hlist_empty(&ht->hash[hash])) { | 146 | if (!hlist_empty(&ht->hash[hash])) { |
148 | hlist_for_each_entry_rcu(ent, pos, &ht->hash[hash], node) | 147 | hlist_for_each_entry_rcu(ent, &ht->hash[hash], node) |
149 | if (dst_cmp(ent, dst)) { | 148 | if (dst_cmp(ent, dst)) { |
150 | spin_lock(&ent->lock); | 149 | spin_lock(&ent->lock); |
151 | return ent; | 150 | return ent; |
@@ -297,8 +296,8 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, | |||
297 | spin_lock_bh(&ht->lock); | 296 | spin_lock_bh(&ht->lock); |
298 | for (i = 0; i < ht->cfg.size; i++) { | 297 | for (i = 0; i < ht->cfg.size; i++) { |
299 | struct dsthash_ent *dh; | 298 | struct dsthash_ent *dh; |
300 | struct hlist_node *pos, *n; | 299 | struct hlist_node *n; |
301 | hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) { | 300 | hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) { |
302 | if ((*select)(ht, dh)) | 301 | if ((*select)(ht, dh)) |
303 | dsthash_free(ht, dh); | 302 | dsthash_free(ht, dh); |
304 | } | 303 | } |
@@ -343,9 +342,8 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net, | |||
343 | { | 342 | { |
344 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); | 343 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); |
345 | struct xt_hashlimit_htable *hinfo; | 344 | struct xt_hashlimit_htable *hinfo; |
346 | struct hlist_node *pos; | ||
347 | 345 | ||
348 | hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) { | 346 | hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) { |
349 | if (!strcmp(name, hinfo->pde->name) && | 347 | if (!strcmp(name, hinfo->pde->name) && |
350 | hinfo->family == family) { | 348 | hinfo->family == family) { |
351 | hinfo->use++; | 349 | hinfo->use++; |
@@ -821,10 +819,9 @@ static int dl_seq_show(struct seq_file *s, void *v) | |||
821 | struct xt_hashlimit_htable *htable = s->private; | 819 | struct xt_hashlimit_htable *htable = s->private; |
822 | unsigned int *bucket = (unsigned int *)v; | 820 | unsigned int *bucket = (unsigned int *)v; |
823 | struct dsthash_ent *ent; | 821 | struct dsthash_ent *ent; |
824 | struct hlist_node *pos; | ||
825 | 822 | ||
826 | if (!hlist_empty(&htable->hash[*bucket])) { | 823 | if (!hlist_empty(&htable->hash[*bucket])) { |
827 | hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) | 824 | hlist_for_each_entry(ent, &htable->hash[*bucket], node) |
828 | if (dl_seq_real_show(ent, htable->family, s)) | 825 | if (dl_seq_real_show(ent, htable->family, s)) |
829 | return -1; | 826 | return -1; |
830 | } | 827 | } |
@@ -877,7 +874,6 @@ static int __net_init hashlimit_proc_net_init(struct net *net) | |||
877 | static void __net_exit hashlimit_proc_net_exit(struct net *net) | 874 | static void __net_exit hashlimit_proc_net_exit(struct net *net) |
878 | { | 875 | { |
879 | struct xt_hashlimit_htable *hinfo; | 876 | struct xt_hashlimit_htable *hinfo; |
880 | struct hlist_node *pos; | ||
881 | struct proc_dir_entry *pde; | 877 | struct proc_dir_entry *pde; |
882 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); | 878 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); |
883 | 879 | ||
@@ -890,7 +886,7 @@ static void __net_exit hashlimit_proc_net_exit(struct net *net) | |||
890 | if (pde == NULL) | 886 | if (pde == NULL) |
891 | pde = hashlimit_net->ip6t_hashlimit; | 887 | pde = hashlimit_net->ip6t_hashlimit; |
892 | 888 | ||
893 | hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) | 889 | hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) |
894 | remove_proc_entry(hinfo->pde->name, pde); | 890 | remove_proc_entry(hinfo->pde->name, pde); |
895 | 891 | ||
896 | hashlimit_net->ipt_hashlimit = NULL; | 892 | hashlimit_net->ipt_hashlimit = NULL; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 8097b4f3ead4..1e3fd5bfcd86 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -248,11 +248,10 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) | |||
248 | struct nl_portid_hash *hash = &nl_table[protocol].hash; | 248 | struct nl_portid_hash *hash = &nl_table[protocol].hash; |
249 | struct hlist_head *head; | 249 | struct hlist_head *head; |
250 | struct sock *sk; | 250 | struct sock *sk; |
251 | struct hlist_node *node; | ||
252 | 251 | ||
253 | read_lock(&nl_table_lock); | 252 | read_lock(&nl_table_lock); |
254 | head = nl_portid_hashfn(hash, portid); | 253 | head = nl_portid_hashfn(hash, portid); |
255 | sk_for_each(sk, node, head) { | 254 | sk_for_each(sk, head) { |
256 | if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) { | 255 | if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) { |
257 | sock_hold(sk); | 256 | sock_hold(sk); |
258 | goto found; | 257 | goto found; |
@@ -312,9 +311,9 @@ static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow) | |||
312 | 311 | ||
313 | for (i = 0; i <= omask; i++) { | 312 | for (i = 0; i <= omask; i++) { |
314 | struct sock *sk; | 313 | struct sock *sk; |
315 | struct hlist_node *node, *tmp; | 314 | struct hlist_node *tmp; |
316 | 315 | ||
317 | sk_for_each_safe(sk, node, tmp, &otable[i]) | 316 | sk_for_each_safe(sk, tmp, &otable[i]) |
318 | __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid)); | 317 | __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid)); |
319 | } | 318 | } |
320 | 319 | ||
@@ -344,7 +343,6 @@ static void | |||
344 | netlink_update_listeners(struct sock *sk) | 343 | netlink_update_listeners(struct sock *sk) |
345 | { | 344 | { |
346 | struct netlink_table *tbl = &nl_table[sk->sk_protocol]; | 345 | struct netlink_table *tbl = &nl_table[sk->sk_protocol]; |
347 | struct hlist_node *node; | ||
348 | unsigned long mask; | 346 | unsigned long mask; |
349 | unsigned int i; | 347 | unsigned int i; |
350 | struct listeners *listeners; | 348 | struct listeners *listeners; |
@@ -355,7 +353,7 @@ netlink_update_listeners(struct sock *sk) | |||
355 | 353 | ||
356 | for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { | 354 | for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { |
357 | mask = 0; | 355 | mask = 0; |
358 | sk_for_each_bound(sk, node, &tbl->mc_list) { | 356 | sk_for_each_bound(sk, &tbl->mc_list) { |
359 | if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) | 357 | if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) |
360 | mask |= nlk_sk(sk)->groups[i]; | 358 | mask |= nlk_sk(sk)->groups[i]; |
361 | } | 359 | } |
@@ -371,18 +369,17 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid) | |||
371 | struct hlist_head *head; | 369 | struct hlist_head *head; |
372 | int err = -EADDRINUSE; | 370 | int err = -EADDRINUSE; |
373 | struct sock *osk; | 371 | struct sock *osk; |
374 | struct hlist_node *node; | ||
375 | int len; | 372 | int len; |
376 | 373 | ||
377 | netlink_table_grab(); | 374 | netlink_table_grab(); |
378 | head = nl_portid_hashfn(hash, portid); | 375 | head = nl_portid_hashfn(hash, portid); |
379 | len = 0; | 376 | len = 0; |
380 | sk_for_each(osk, node, head) { | 377 | sk_for_each(osk, head) { |
381 | if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid)) | 378 | if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid)) |
382 | break; | 379 | break; |
383 | len++; | 380 | len++; |
384 | } | 381 | } |
385 | if (node) | 382 | if (osk) |
386 | goto err; | 383 | goto err; |
387 | 384 | ||
388 | err = -EBUSY; | 385 | err = -EBUSY; |
@@ -575,7 +572,6 @@ static int netlink_autobind(struct socket *sock) | |||
575 | struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; | 572 | struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; |
576 | struct hlist_head *head; | 573 | struct hlist_head *head; |
577 | struct sock *osk; | 574 | struct sock *osk; |
578 | struct hlist_node *node; | ||
579 | s32 portid = task_tgid_vnr(current); | 575 | s32 portid = task_tgid_vnr(current); |
580 | int err; | 576 | int err; |
581 | static s32 rover = -4097; | 577 | static s32 rover = -4097; |
@@ -584,7 +580,7 @@ retry: | |||
584 | cond_resched(); | 580 | cond_resched(); |
585 | netlink_table_grab(); | 581 | netlink_table_grab(); |
586 | head = nl_portid_hashfn(hash, portid); | 582 | head = nl_portid_hashfn(hash, portid); |
587 | sk_for_each(osk, node, head) { | 583 | sk_for_each(osk, head) { |
588 | if (!net_eq(sock_net(osk), net)) | 584 | if (!net_eq(sock_net(osk), net)) |
589 | continue; | 585 | continue; |
590 | if (nlk_sk(osk)->portid == portid) { | 586 | if (nlk_sk(osk)->portid == portid) { |
@@ -1101,7 +1097,6 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid | |||
1101 | { | 1097 | { |
1102 | struct net *net = sock_net(ssk); | 1098 | struct net *net = sock_net(ssk); |
1103 | struct netlink_broadcast_data info; | 1099 | struct netlink_broadcast_data info; |
1104 | struct hlist_node *node; | ||
1105 | struct sock *sk; | 1100 | struct sock *sk; |
1106 | 1101 | ||
1107 | skb = netlink_trim(skb, allocation); | 1102 | skb = netlink_trim(skb, allocation); |
@@ -1124,7 +1119,7 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid | |||
1124 | 1119 | ||
1125 | netlink_lock_table(); | 1120 | netlink_lock_table(); |
1126 | 1121 | ||
1127 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | 1122 | sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) |
1128 | do_one_broadcast(sk, &info); | 1123 | do_one_broadcast(sk, &info); |
1129 | 1124 | ||
1130 | consume_skb(skb); | 1125 | consume_skb(skb); |
@@ -1200,7 +1195,6 @@ out: | |||
1200 | int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) | 1195 | int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) |
1201 | { | 1196 | { |
1202 | struct netlink_set_err_data info; | 1197 | struct netlink_set_err_data info; |
1203 | struct hlist_node *node; | ||
1204 | struct sock *sk; | 1198 | struct sock *sk; |
1205 | int ret = 0; | 1199 | int ret = 0; |
1206 | 1200 | ||
@@ -1212,7 +1206,7 @@ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) | |||
1212 | 1206 | ||
1213 | read_lock(&nl_table_lock); | 1207 | read_lock(&nl_table_lock); |
1214 | 1208 | ||
1215 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | 1209 | sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) |
1216 | ret += do_one_set_err(sk, &info); | 1210 | ret += do_one_set_err(sk, &info); |
1217 | 1211 | ||
1218 | read_unlock(&nl_table_lock); | 1212 | read_unlock(&nl_table_lock); |
@@ -1676,10 +1670,9 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups) | |||
1676 | void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) | 1670 | void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) |
1677 | { | 1671 | { |
1678 | struct sock *sk; | 1672 | struct sock *sk; |
1679 | struct hlist_node *node; | ||
1680 | struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; | 1673 | struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; |
1681 | 1674 | ||
1682 | sk_for_each_bound(sk, node, &tbl->mc_list) | 1675 | sk_for_each_bound(sk, &tbl->mc_list) |
1683 | netlink_update_socket_mc(nlk_sk(sk), group, 0); | 1676 | netlink_update_socket_mc(nlk_sk(sk), group, 0); |
1684 | } | 1677 | } |
1685 | 1678 | ||
@@ -1974,14 +1967,13 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) | |||
1974 | struct nl_seq_iter *iter = seq->private; | 1967 | struct nl_seq_iter *iter = seq->private; |
1975 | int i, j; | 1968 | int i, j; |
1976 | struct sock *s; | 1969 | struct sock *s; |
1977 | struct hlist_node *node; | ||
1978 | loff_t off = 0; | 1970 | loff_t off = 0; |
1979 | 1971 | ||
1980 | for (i = 0; i < MAX_LINKS; i++) { | 1972 | for (i = 0; i < MAX_LINKS; i++) { |
1981 | struct nl_portid_hash *hash = &nl_table[i].hash; | 1973 | struct nl_portid_hash *hash = &nl_table[i].hash; |
1982 | 1974 | ||
1983 | for (j = 0; j <= hash->mask; j++) { | 1975 | for (j = 0; j <= hash->mask; j++) { |
1984 | sk_for_each(s, node, &hash->table[j]) { | 1976 | sk_for_each(s, &hash->table[j]) { |
1985 | if (sock_net(s) != seq_file_net(seq)) | 1977 | if (sock_net(s) != seq_file_net(seq)) |
1986 | continue; | 1978 | continue; |
1987 | if (off == pos) { | 1979 | if (off == pos) { |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 297b07a029de..d1fa1d9ffd2e 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -104,10 +104,9 @@ static void nr_remove_socket(struct sock *sk) | |||
104 | static void nr_kill_by_device(struct net_device *dev) | 104 | static void nr_kill_by_device(struct net_device *dev) |
105 | { | 105 | { |
106 | struct sock *s; | 106 | struct sock *s; |
107 | struct hlist_node *node; | ||
108 | 107 | ||
109 | spin_lock_bh(&nr_list_lock); | 108 | spin_lock_bh(&nr_list_lock); |
110 | sk_for_each(s, node, &nr_list) | 109 | sk_for_each(s, &nr_list) |
111 | if (nr_sk(s)->device == dev) | 110 | if (nr_sk(s)->device == dev) |
112 | nr_disconnect(s, ENETUNREACH); | 111 | nr_disconnect(s, ENETUNREACH); |
113 | spin_unlock_bh(&nr_list_lock); | 112 | spin_unlock_bh(&nr_list_lock); |
@@ -149,10 +148,9 @@ static void nr_insert_socket(struct sock *sk) | |||
149 | static struct sock *nr_find_listener(ax25_address *addr) | 148 | static struct sock *nr_find_listener(ax25_address *addr) |
150 | { | 149 | { |
151 | struct sock *s; | 150 | struct sock *s; |
152 | struct hlist_node *node; | ||
153 | 151 | ||
154 | spin_lock_bh(&nr_list_lock); | 152 | spin_lock_bh(&nr_list_lock); |
155 | sk_for_each(s, node, &nr_list) | 153 | sk_for_each(s, &nr_list) |
156 | if (!ax25cmp(&nr_sk(s)->source_addr, addr) && | 154 | if (!ax25cmp(&nr_sk(s)->source_addr, addr) && |
157 | s->sk_state == TCP_LISTEN) { | 155 | s->sk_state == TCP_LISTEN) { |
158 | bh_lock_sock(s); | 156 | bh_lock_sock(s); |
@@ -170,10 +168,9 @@ found: | |||
170 | static struct sock *nr_find_socket(unsigned char index, unsigned char id) | 168 | static struct sock *nr_find_socket(unsigned char index, unsigned char id) |
171 | { | 169 | { |
172 | struct sock *s; | 170 | struct sock *s; |
173 | struct hlist_node *node; | ||
174 | 171 | ||
175 | spin_lock_bh(&nr_list_lock); | 172 | spin_lock_bh(&nr_list_lock); |
176 | sk_for_each(s, node, &nr_list) { | 173 | sk_for_each(s, &nr_list) { |
177 | struct nr_sock *nr = nr_sk(s); | 174 | struct nr_sock *nr = nr_sk(s); |
178 | 175 | ||
179 | if (nr->my_index == index && nr->my_id == id) { | 176 | if (nr->my_index == index && nr->my_id == id) { |
@@ -194,10 +191,9 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id, | |||
194 | ax25_address *dest) | 191 | ax25_address *dest) |
195 | { | 192 | { |
196 | struct sock *s; | 193 | struct sock *s; |
197 | struct hlist_node *node; | ||
198 | 194 | ||
199 | spin_lock_bh(&nr_list_lock); | 195 | spin_lock_bh(&nr_list_lock); |
200 | sk_for_each(s, node, &nr_list) { | 196 | sk_for_each(s, &nr_list) { |
201 | struct nr_sock *nr = nr_sk(s); | 197 | struct nr_sock *nr = nr_sk(s); |
202 | 198 | ||
203 | if (nr->your_index == index && nr->your_id == id && | 199 | if (nr->your_index == index && nr->your_id == id && |
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 70ffff76a967..b976d5eff2de 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c | |||
@@ -49,10 +49,9 @@ static struct nr_node *nr_node_get(ax25_address *callsign) | |||
49 | { | 49 | { |
50 | struct nr_node *found = NULL; | 50 | struct nr_node *found = NULL; |
51 | struct nr_node *nr_node; | 51 | struct nr_node *nr_node; |
52 | struct hlist_node *node; | ||
53 | 52 | ||
54 | spin_lock_bh(&nr_node_list_lock); | 53 | spin_lock_bh(&nr_node_list_lock); |
55 | nr_node_for_each(nr_node, node, &nr_node_list) | 54 | nr_node_for_each(nr_node, &nr_node_list) |
56 | if (ax25cmp(callsign, &nr_node->callsign) == 0) { | 55 | if (ax25cmp(callsign, &nr_node->callsign) == 0) { |
57 | nr_node_hold(nr_node); | 56 | nr_node_hold(nr_node); |
58 | found = nr_node; | 57 | found = nr_node; |
@@ -67,10 +66,9 @@ static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign, | |||
67 | { | 66 | { |
68 | struct nr_neigh *found = NULL; | 67 | struct nr_neigh *found = NULL; |
69 | struct nr_neigh *nr_neigh; | 68 | struct nr_neigh *nr_neigh; |
70 | struct hlist_node *node; | ||
71 | 69 | ||
72 | spin_lock_bh(&nr_neigh_list_lock); | 70 | spin_lock_bh(&nr_neigh_list_lock); |
73 | nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) | 71 | nr_neigh_for_each(nr_neigh, &nr_neigh_list) |
74 | if (ax25cmp(callsign, &nr_neigh->callsign) == 0 && | 72 | if (ax25cmp(callsign, &nr_neigh->callsign) == 0 && |
75 | nr_neigh->dev == dev) { | 73 | nr_neigh->dev == dev) { |
76 | nr_neigh_hold(nr_neigh); | 74 | nr_neigh_hold(nr_neigh); |
@@ -114,10 +112,9 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, | |||
114 | */ | 112 | */ |
115 | if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) { | 113 | if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) { |
116 | struct nr_node *nr_nodet; | 114 | struct nr_node *nr_nodet; |
117 | struct hlist_node *node; | ||
118 | 115 | ||
119 | spin_lock_bh(&nr_node_list_lock); | 116 | spin_lock_bh(&nr_node_list_lock); |
120 | nr_node_for_each(nr_nodet, node, &nr_node_list) { | 117 | nr_node_for_each(nr_nodet, &nr_node_list) { |
121 | nr_node_lock(nr_nodet); | 118 | nr_node_lock(nr_nodet); |
122 | for (i = 0; i < nr_nodet->count; i++) | 119 | for (i = 0; i < nr_nodet->count; i++) |
123 | if (nr_nodet->routes[i].neighbour == nr_neigh) | 120 | if (nr_nodet->routes[i].neighbour == nr_neigh) |
@@ -485,11 +482,11 @@ static int nr_dec_obs(void) | |||
485 | { | 482 | { |
486 | struct nr_neigh *nr_neigh; | 483 | struct nr_neigh *nr_neigh; |
487 | struct nr_node *s; | 484 | struct nr_node *s; |
488 | struct hlist_node *node, *nodet; | 485 | struct hlist_node *nodet; |
489 | int i; | 486 | int i; |
490 | 487 | ||
491 | spin_lock_bh(&nr_node_list_lock); | 488 | spin_lock_bh(&nr_node_list_lock); |
492 | nr_node_for_each_safe(s, node, nodet, &nr_node_list) { | 489 | nr_node_for_each_safe(s, nodet, &nr_node_list) { |
493 | nr_node_lock(s); | 490 | nr_node_lock(s); |
494 | for (i = 0; i < s->count; i++) { | 491 | for (i = 0; i < s->count; i++) { |
495 | switch (s->routes[i].obs_count) { | 492 | switch (s->routes[i].obs_count) { |
@@ -540,15 +537,15 @@ static int nr_dec_obs(void) | |||
540 | void nr_rt_device_down(struct net_device *dev) | 537 | void nr_rt_device_down(struct net_device *dev) |
541 | { | 538 | { |
542 | struct nr_neigh *s; | 539 | struct nr_neigh *s; |
543 | struct hlist_node *node, *nodet, *node2, *node2t; | 540 | struct hlist_node *nodet, *node2t; |
544 | struct nr_node *t; | 541 | struct nr_node *t; |
545 | int i; | 542 | int i; |
546 | 543 | ||
547 | spin_lock_bh(&nr_neigh_list_lock); | 544 | spin_lock_bh(&nr_neigh_list_lock); |
548 | nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { | 545 | nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) { |
549 | if (s->dev == dev) { | 546 | if (s->dev == dev) { |
550 | spin_lock_bh(&nr_node_list_lock); | 547 | spin_lock_bh(&nr_node_list_lock); |
551 | nr_node_for_each_safe(t, node2, node2t, &nr_node_list) { | 548 | nr_node_for_each_safe(t, node2t, &nr_node_list) { |
552 | nr_node_lock(t); | 549 | nr_node_lock(t); |
553 | for (i = 0; i < t->count; i++) { | 550 | for (i = 0; i < t->count; i++) { |
554 | if (t->routes[i].neighbour == s) { | 551 | if (t->routes[i].neighbour == s) { |
@@ -737,11 +734,10 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg) | |||
737 | void nr_link_failed(ax25_cb *ax25, int reason) | 734 | void nr_link_failed(ax25_cb *ax25, int reason) |
738 | { | 735 | { |
739 | struct nr_neigh *s, *nr_neigh = NULL; | 736 | struct nr_neigh *s, *nr_neigh = NULL; |
740 | struct hlist_node *node; | ||
741 | struct nr_node *nr_node = NULL; | 737 | struct nr_node *nr_node = NULL; |
742 | 738 | ||
743 | spin_lock_bh(&nr_neigh_list_lock); | 739 | spin_lock_bh(&nr_neigh_list_lock); |
744 | nr_neigh_for_each(s, node, &nr_neigh_list) { | 740 | nr_neigh_for_each(s, &nr_neigh_list) { |
745 | if (s->ax25 == ax25) { | 741 | if (s->ax25 == ax25) { |
746 | nr_neigh_hold(s); | 742 | nr_neigh_hold(s); |
747 | nr_neigh = s; | 743 | nr_neigh = s; |
@@ -761,7 +757,7 @@ void nr_link_failed(ax25_cb *ax25, int reason) | |||
761 | return; | 757 | return; |
762 | } | 758 | } |
763 | spin_lock_bh(&nr_node_list_lock); | 759 | spin_lock_bh(&nr_node_list_lock); |
764 | nr_node_for_each(nr_node, node, &nr_node_list) { | 760 | nr_node_for_each(nr_node, &nr_node_list) { |
765 | nr_node_lock(nr_node); | 761 | nr_node_lock(nr_node); |
766 | if (nr_node->which < nr_node->count && | 762 | if (nr_node->which < nr_node->count && |
767 | nr_node->routes[nr_node->which].neighbour == nr_neigh) | 763 | nr_node->routes[nr_node->which].neighbour == nr_neigh) |
@@ -1013,16 +1009,16 @@ void __exit nr_rt_free(void) | |||
1013 | { | 1009 | { |
1014 | struct nr_neigh *s = NULL; | 1010 | struct nr_neigh *s = NULL; |
1015 | struct nr_node *t = NULL; | 1011 | struct nr_node *t = NULL; |
1016 | struct hlist_node *node, *nodet; | 1012 | struct hlist_node *nodet; |
1017 | 1013 | ||
1018 | spin_lock_bh(&nr_neigh_list_lock); | 1014 | spin_lock_bh(&nr_neigh_list_lock); |
1019 | spin_lock_bh(&nr_node_list_lock); | 1015 | spin_lock_bh(&nr_node_list_lock); |
1020 | nr_node_for_each_safe(t, node, nodet, &nr_node_list) { | 1016 | nr_node_for_each_safe(t, nodet, &nr_node_list) { |
1021 | nr_node_lock(t); | 1017 | nr_node_lock(t); |
1022 | nr_remove_node_locked(t); | 1018 | nr_remove_node_locked(t); |
1023 | nr_node_unlock(t); | 1019 | nr_node_unlock(t); |
1024 | } | 1020 | } |
1025 | nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { | 1021 | nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) { |
1026 | while(s->count) { | 1022 | while(s->count) { |
1027 | s->count--; | 1023 | s->count--; |
1028 | nr_neigh_put(s); | 1024 | nr_neigh_put(s); |
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 746f5a2f9804..7f8266dd14cb 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c | |||
@@ -71,14 +71,14 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock) | |||
71 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | 71 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) |
72 | { | 72 | { |
73 | struct sock *sk; | 73 | struct sock *sk; |
74 | struct hlist_node *node, *tmp; | 74 | struct hlist_node *tmp; |
75 | struct nfc_llcp_sock *llcp_sock; | 75 | struct nfc_llcp_sock *llcp_sock; |
76 | 76 | ||
77 | skb_queue_purge(&local->tx_queue); | 77 | skb_queue_purge(&local->tx_queue); |
78 | 78 | ||
79 | write_lock(&local->sockets.lock); | 79 | write_lock(&local->sockets.lock); |
80 | 80 | ||
81 | sk_for_each_safe(sk, node, tmp, &local->sockets.head) { | 81 | sk_for_each_safe(sk, tmp, &local->sockets.head) { |
82 | llcp_sock = nfc_llcp_sock(sk); | 82 | llcp_sock = nfc_llcp_sock(sk); |
83 | 83 | ||
84 | bh_lock_sock(sk); | 84 | bh_lock_sock(sk); |
@@ -171,7 +171,6 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, | |||
171 | u8 ssap, u8 dsap) | 171 | u8 ssap, u8 dsap) |
172 | { | 172 | { |
173 | struct sock *sk; | 173 | struct sock *sk; |
174 | struct hlist_node *node; | ||
175 | struct nfc_llcp_sock *llcp_sock, *tmp_sock; | 174 | struct nfc_llcp_sock *llcp_sock, *tmp_sock; |
176 | 175 | ||
177 | pr_debug("ssap dsap %d %d\n", ssap, dsap); | 176 | pr_debug("ssap dsap %d %d\n", ssap, dsap); |
@@ -183,7 +182,7 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, | |||
183 | 182 | ||
184 | llcp_sock = NULL; | 183 | llcp_sock = NULL; |
185 | 184 | ||
186 | sk_for_each(sk, node, &local->sockets.head) { | 185 | sk_for_each(sk, &local->sockets.head) { |
187 | tmp_sock = nfc_llcp_sock(sk); | 186 | tmp_sock = nfc_llcp_sock(sk); |
188 | 187 | ||
189 | if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) { | 188 | if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) { |
@@ -272,7 +271,6 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local, | |||
272 | u8 *sn, size_t sn_len) | 271 | u8 *sn, size_t sn_len) |
273 | { | 272 | { |
274 | struct sock *sk; | 273 | struct sock *sk; |
275 | struct hlist_node *node; | ||
276 | struct nfc_llcp_sock *llcp_sock, *tmp_sock; | 274 | struct nfc_llcp_sock *llcp_sock, *tmp_sock; |
277 | 275 | ||
278 | pr_debug("sn %zd %p\n", sn_len, sn); | 276 | pr_debug("sn %zd %p\n", sn_len, sn); |
@@ -284,7 +282,7 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local, | |||
284 | 282 | ||
285 | llcp_sock = NULL; | 283 | llcp_sock = NULL; |
286 | 284 | ||
287 | sk_for_each(sk, node, &local->sockets.head) { | 285 | sk_for_each(sk, &local->sockets.head) { |
288 | tmp_sock = nfc_llcp_sock(sk); | 286 | tmp_sock = nfc_llcp_sock(sk); |
289 | 287 | ||
290 | pr_debug("llcp sock %p\n", tmp_sock); | 288 | pr_debug("llcp sock %p\n", tmp_sock); |
@@ -601,14 +599,13 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu) | |||
601 | void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local, | 599 | void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local, |
602 | struct sk_buff *skb, u8 direction) | 600 | struct sk_buff *skb, u8 direction) |
603 | { | 601 | { |
604 | struct hlist_node *node; | ||
605 | struct sk_buff *skb_copy = NULL, *nskb; | 602 | struct sk_buff *skb_copy = NULL, *nskb; |
606 | struct sock *sk; | 603 | struct sock *sk; |
607 | u8 *data; | 604 | u8 *data; |
608 | 605 | ||
609 | read_lock(&local->raw_sockets.lock); | 606 | read_lock(&local->raw_sockets.lock); |
610 | 607 | ||
611 | sk_for_each(sk, node, &local->raw_sockets.head) { | 608 | sk_for_each(sk, &local->raw_sockets.head) { |
612 | if (sk->sk_state != LLCP_BOUND) | 609 | if (sk->sk_state != LLCP_BOUND) |
613 | continue; | 610 | continue; |
614 | 611 | ||
@@ -697,11 +694,10 @@ static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local | |||
697 | { | 694 | { |
698 | struct sock *sk; | 695 | struct sock *sk; |
699 | struct nfc_llcp_sock *llcp_sock; | 696 | struct nfc_llcp_sock *llcp_sock; |
700 | struct hlist_node *node; | ||
701 | 697 | ||
702 | read_lock(&local->connecting_sockets.lock); | 698 | read_lock(&local->connecting_sockets.lock); |
703 | 699 | ||
704 | sk_for_each(sk, node, &local->connecting_sockets.head) { | 700 | sk_for_each(sk, &local->connecting_sockets.head) { |
705 | llcp_sock = nfc_llcp_sock(sk); | 701 | llcp_sock = nfc_llcp_sock(sk); |
706 | 702 | ||
707 | if (llcp_sock->ssap == ssap) { | 703 | if (llcp_sock->ssap == ssap) { |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 9dc537df46c4..e87a26506dba 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -158,11 +158,10 @@ static struct hlist_head *vport_hash_bucket(const struct datapath *dp, | |||
158 | struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) | 158 | struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) |
159 | { | 159 | { |
160 | struct vport *vport; | 160 | struct vport *vport; |
161 | struct hlist_node *n; | ||
162 | struct hlist_head *head; | 161 | struct hlist_head *head; |
163 | 162 | ||
164 | head = vport_hash_bucket(dp, port_no); | 163 | head = vport_hash_bucket(dp, port_no); |
165 | hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) { | 164 | hlist_for_each_entry_rcu(vport, head, dp_hash_node) { |
166 | if (vport->port_no == port_no) | 165 | if (vport->port_no == port_no) |
167 | return vport; | 166 | return vport; |
168 | } | 167 | } |
@@ -1386,9 +1385,9 @@ static void __dp_destroy(struct datapath *dp) | |||
1386 | 1385 | ||
1387 | for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { | 1386 | for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { |
1388 | struct vport *vport; | 1387 | struct vport *vport; |
1389 | struct hlist_node *node, *n; | 1388 | struct hlist_node *n; |
1390 | 1389 | ||
1391 | hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node) | 1390 | hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) |
1392 | if (vport->port_no != OVSP_LOCAL) | 1391 | if (vport->port_no != OVSP_LOCAL) |
1393 | ovs_dp_detach_port(vport); | 1392 | ovs_dp_detach_port(vport); |
1394 | } | 1393 | } |
@@ -1825,10 +1824,9 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1825 | rcu_read_lock(); | 1824 | rcu_read_lock(); |
1826 | for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { | 1825 | for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { |
1827 | struct vport *vport; | 1826 | struct vport *vport; |
1828 | struct hlist_node *n; | ||
1829 | 1827 | ||
1830 | j = 0; | 1828 | j = 0; |
1831 | hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) { | 1829 | hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) { |
1832 | if (j >= skip && | 1830 | if (j >= skip && |
1833 | ovs_vport_cmd_fill_info(vport, skb, | 1831 | ovs_vport_cmd_fill_info(vport, skb, |
1834 | NETLINK_CB(cb->skb).portid, | 1832 | NETLINK_CB(cb->skb).portid, |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index c3294cebc4f2..20605ecf100b 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -299,10 +299,10 @@ void ovs_flow_tbl_destroy(struct flow_table *table) | |||
299 | for (i = 0; i < table->n_buckets; i++) { | 299 | for (i = 0; i < table->n_buckets; i++) { |
300 | struct sw_flow *flow; | 300 | struct sw_flow *flow; |
301 | struct hlist_head *head = flex_array_get(table->buckets, i); | 301 | struct hlist_head *head = flex_array_get(table->buckets, i); |
302 | struct hlist_node *node, *n; | 302 | struct hlist_node *n; |
303 | int ver = table->node_ver; | 303 | int ver = table->node_ver; |
304 | 304 | ||
305 | hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) { | 305 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { |
306 | hlist_del_rcu(&flow->hash_node[ver]); | 306 | hlist_del_rcu(&flow->hash_node[ver]); |
307 | ovs_flow_free(flow); | 307 | ovs_flow_free(flow); |
308 | } | 308 | } |
@@ -332,7 +332,6 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la | |||
332 | { | 332 | { |
333 | struct sw_flow *flow; | 333 | struct sw_flow *flow; |
334 | struct hlist_head *head; | 334 | struct hlist_head *head; |
335 | struct hlist_node *n; | ||
336 | int ver; | 335 | int ver; |
337 | int i; | 336 | int i; |
338 | 337 | ||
@@ -340,7 +339,7 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la | |||
340 | while (*bucket < table->n_buckets) { | 339 | while (*bucket < table->n_buckets) { |
341 | i = 0; | 340 | i = 0; |
342 | head = flex_array_get(table->buckets, *bucket); | 341 | head = flex_array_get(table->buckets, *bucket); |
343 | hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) { | 342 | hlist_for_each_entry_rcu(flow, head, hash_node[ver]) { |
344 | if (i < *last) { | 343 | if (i < *last) { |
345 | i++; | 344 | i++; |
346 | continue; | 345 | continue; |
@@ -367,11 +366,10 @@ static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new | |||
367 | for (i = 0; i < old->n_buckets; i++) { | 366 | for (i = 0; i < old->n_buckets; i++) { |
368 | struct sw_flow *flow; | 367 | struct sw_flow *flow; |
369 | struct hlist_head *head; | 368 | struct hlist_head *head; |
370 | struct hlist_node *n; | ||
371 | 369 | ||
372 | head = flex_array_get(old->buckets, i); | 370 | head = flex_array_get(old->buckets, i); |
373 | 371 | ||
374 | hlist_for_each_entry(flow, n, head, hash_node[old_ver]) | 372 | hlist_for_each_entry(flow, head, hash_node[old_ver]) |
375 | ovs_flow_tbl_insert(new, flow); | 373 | ovs_flow_tbl_insert(new, flow); |
376 | } | 374 | } |
377 | old->keep_flows = true; | 375 | old->keep_flows = true; |
@@ -766,14 +764,13 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, | |||
766 | struct sw_flow_key *key, int key_len) | 764 | struct sw_flow_key *key, int key_len) |
767 | { | 765 | { |
768 | struct sw_flow *flow; | 766 | struct sw_flow *flow; |
769 | struct hlist_node *n; | ||
770 | struct hlist_head *head; | 767 | struct hlist_head *head; |
771 | u32 hash; | 768 | u32 hash; |
772 | 769 | ||
773 | hash = ovs_flow_hash(key, key_len); | 770 | hash = ovs_flow_hash(key, key_len); |
774 | 771 | ||
775 | head = find_bucket(table, hash); | 772 | head = find_bucket(table, hash); |
776 | hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) { | 773 | hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { |
777 | 774 | ||
778 | if (flow->hash == hash && | 775 | if (flow->hash == hash && |
779 | !memcmp(&flow->key, key, key_len)) { | 776 | !memcmp(&flow->key, key, key_len)) { |
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 70af0bedbac4..ba717cc038b3 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
@@ -86,9 +86,8 @@ struct vport *ovs_vport_locate(struct net *net, const char *name) | |||
86 | { | 86 | { |
87 | struct hlist_head *bucket = hash_bucket(net, name); | 87 | struct hlist_head *bucket = hash_bucket(net, name); |
88 | struct vport *vport; | 88 | struct vport *vport; |
89 | struct hlist_node *node; | ||
90 | 89 | ||
91 | hlist_for_each_entry_rcu(vport, node, bucket, hash_node) | 90 | hlist_for_each_entry_rcu(vport, bucket, hash_node) |
92 | if (!strcmp(name, vport->ops->get_name(vport)) && | 91 | if (!strcmp(name, vport->ops->get_name(vport)) && |
93 | net_eq(ovs_dp_get_net(vport->dp), net)) | 92 | net_eq(ovs_dp_get_net(vport->dp), net)) |
94 | return vport; | 93 | return vport; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c7bfeff10767..1d6793dbfbae 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -3263,12 +3263,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
3263 | static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) | 3263 | static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) |
3264 | { | 3264 | { |
3265 | struct sock *sk; | 3265 | struct sock *sk; |
3266 | struct hlist_node *node; | ||
3267 | struct net_device *dev = data; | 3266 | struct net_device *dev = data; |
3268 | struct net *net = dev_net(dev); | 3267 | struct net *net = dev_net(dev); |
3269 | 3268 | ||
3270 | rcu_read_lock(); | 3269 | rcu_read_lock(); |
3271 | sk_for_each_rcu(sk, node, &net->packet.sklist) { | 3270 | sk_for_each_rcu(sk, &net->packet.sklist) { |
3272 | struct packet_sock *po = pkt_sk(sk); | 3271 | struct packet_sock *po = pkt_sk(sk); |
3273 | 3272 | ||
3274 | switch (msg) { | 3273 | switch (msg) { |
diff --git a/net/packet/diag.c b/net/packet/diag.c index 8db6e21c46bd..d3fcd1ebef7e 100644 --- a/net/packet/diag.c +++ b/net/packet/diag.c | |||
@@ -172,13 +172,12 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
172 | struct packet_diag_req *req; | 172 | struct packet_diag_req *req; |
173 | struct net *net; | 173 | struct net *net; |
174 | struct sock *sk; | 174 | struct sock *sk; |
175 | struct hlist_node *node; | ||
176 | 175 | ||
177 | net = sock_net(skb->sk); | 176 | net = sock_net(skb->sk); |
178 | req = nlmsg_data(cb->nlh); | 177 | req = nlmsg_data(cb->nlh); |
179 | 178 | ||
180 | mutex_lock(&net->packet.sklist_lock); | 179 | mutex_lock(&net->packet.sklist_lock); |
181 | sk_for_each(sk, node, &net->packet.sklist) { | 180 | sk_for_each(sk, &net->packet.sklist) { |
182 | if (!net_eq(sock_net(sk), net)) | 181 | if (!net_eq(sock_net(sk), net)) |
183 | continue; | 182 | continue; |
184 | if (num < s_num) | 183 | if (num < s_num) |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 576f22c9c76e..e77411735de8 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -640,11 +640,10 @@ static struct sock *pep_find_pipe(const struct hlist_head *hlist, | |||
640 | const struct sockaddr_pn *dst, | 640 | const struct sockaddr_pn *dst, |
641 | u8 pipe_handle) | 641 | u8 pipe_handle) |
642 | { | 642 | { |
643 | struct hlist_node *node; | ||
644 | struct sock *sknode; | 643 | struct sock *sknode; |
645 | u16 dobj = pn_sockaddr_get_object(dst); | 644 | u16 dobj = pn_sockaddr_get_object(dst); |
646 | 645 | ||
647 | sk_for_each(sknode, node, hlist) { | 646 | sk_for_each(sknode, hlist) { |
648 | struct pep_sock *pnnode = pep_sk(sknode); | 647 | struct pep_sock *pnnode = pep_sk(sknode); |
649 | 648 | ||
650 | /* Ports match, but addresses might not: */ | 649 | /* Ports match, but addresses might not: */ |
diff --git a/net/phonet/socket.c b/net/phonet/socket.c index b7e982782255..1afd1381cdc7 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c | |||
@@ -76,7 +76,6 @@ static struct hlist_head *pn_hash_list(u16 obj) | |||
76 | */ | 76 | */ |
77 | struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) | 77 | struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) |
78 | { | 78 | { |
79 | struct hlist_node *node; | ||
80 | struct sock *sknode; | 79 | struct sock *sknode; |
81 | struct sock *rval = NULL; | 80 | struct sock *rval = NULL; |
82 | u16 obj = pn_sockaddr_get_object(spn); | 81 | u16 obj = pn_sockaddr_get_object(spn); |
@@ -84,7 +83,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) | |||
84 | struct hlist_head *hlist = pn_hash_list(obj); | 83 | struct hlist_head *hlist = pn_hash_list(obj); |
85 | 84 | ||
86 | rcu_read_lock(); | 85 | rcu_read_lock(); |
87 | sk_for_each_rcu(sknode, node, hlist) { | 86 | sk_for_each_rcu(sknode, hlist) { |
88 | struct pn_sock *pn = pn_sk(sknode); | 87 | struct pn_sock *pn = pn_sk(sknode); |
89 | BUG_ON(!pn->sobject); /* unbound socket */ | 88 | BUG_ON(!pn->sobject); /* unbound socket */ |
90 | 89 | ||
@@ -120,10 +119,9 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) | |||
120 | 119 | ||
121 | rcu_read_lock(); | 120 | rcu_read_lock(); |
122 | for (h = 0; h < PN_HASHSIZE; h++) { | 121 | for (h = 0; h < PN_HASHSIZE; h++) { |
123 | struct hlist_node *node; | ||
124 | struct sock *sknode; | 122 | struct sock *sknode; |
125 | 123 | ||
126 | sk_for_each(sknode, node, hlist) { | 124 | sk_for_each(sknode, hlist) { |
127 | struct sk_buff *clone; | 125 | struct sk_buff *clone; |
128 | 126 | ||
129 | if (!net_eq(sock_net(sknode), net)) | 127 | if (!net_eq(sock_net(sknode), net)) |
@@ -543,12 +541,11 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) | |||
543 | { | 541 | { |
544 | struct net *net = seq_file_net(seq); | 542 | struct net *net = seq_file_net(seq); |
545 | struct hlist_head *hlist = pnsocks.hlist; | 543 | struct hlist_head *hlist = pnsocks.hlist; |
546 | struct hlist_node *node; | ||
547 | struct sock *sknode; | 544 | struct sock *sknode; |
548 | unsigned int h; | 545 | unsigned int h; |
549 | 546 | ||
550 | for (h = 0; h < PN_HASHSIZE; h++) { | 547 | for (h = 0; h < PN_HASHSIZE; h++) { |
551 | sk_for_each_rcu(sknode, node, hlist) { | 548 | sk_for_each_rcu(sknode, hlist) { |
552 | if (!net_eq(net, sock_net(sknode))) | 549 | if (!net_eq(net, sock_net(sknode))) |
553 | continue; | 550 | continue; |
554 | if (!pos) | 551 | if (!pos) |
diff --git a/net/rds/bind.c b/net/rds/bind.c index 637bde56c9db..b5ad65a0067e 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c | |||
@@ -52,13 +52,12 @@ static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port, | |||
52 | struct rds_sock *insert) | 52 | struct rds_sock *insert) |
53 | { | 53 | { |
54 | struct rds_sock *rs; | 54 | struct rds_sock *rs; |
55 | struct hlist_node *node; | ||
56 | struct hlist_head *head = hash_to_bucket(addr, port); | 55 | struct hlist_head *head = hash_to_bucket(addr, port); |
57 | u64 cmp; | 56 | u64 cmp; |
58 | u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port); | 57 | u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port); |
59 | 58 | ||
60 | rcu_read_lock(); | 59 | rcu_read_lock(); |
61 | hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) { | 60 | hlist_for_each_entry_rcu(rs, head, rs_bound_node) { |
62 | cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) | | 61 | cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) | |
63 | be16_to_cpu(rs->rs_bound_port); | 62 | be16_to_cpu(rs->rs_bound_port); |
64 | 63 | ||
diff --git a/net/rds/connection.c b/net/rds/connection.c index 9e07c756d1f9..642ad42c416b 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c | |||
@@ -69,9 +69,8 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head, | |||
69 | struct rds_transport *trans) | 69 | struct rds_transport *trans) |
70 | { | 70 | { |
71 | struct rds_connection *conn, *ret = NULL; | 71 | struct rds_connection *conn, *ret = NULL; |
72 | struct hlist_node *pos; | ||
73 | 72 | ||
74 | hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { | 73 | hlist_for_each_entry_rcu(conn, head, c_hash_node) { |
75 | if (conn->c_faddr == faddr && conn->c_laddr == laddr && | 74 | if (conn->c_faddr == faddr && conn->c_laddr == laddr && |
76 | conn->c_trans == trans) { | 75 | conn->c_trans == trans) { |
77 | ret = conn; | 76 | ret = conn; |
@@ -376,7 +375,6 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len, | |||
376 | int want_send) | 375 | int want_send) |
377 | { | 376 | { |
378 | struct hlist_head *head; | 377 | struct hlist_head *head; |
379 | struct hlist_node *pos; | ||
380 | struct list_head *list; | 378 | struct list_head *list; |
381 | struct rds_connection *conn; | 379 | struct rds_connection *conn; |
382 | struct rds_message *rm; | 380 | struct rds_message *rm; |
@@ -390,7 +388,7 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len, | |||
390 | 388 | ||
391 | for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); | 389 | for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); |
392 | i++, head++) { | 390 | i++, head++) { |
393 | hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { | 391 | hlist_for_each_entry_rcu(conn, head, c_hash_node) { |
394 | if (want_send) | 392 | if (want_send) |
395 | list = &conn->c_send_queue; | 393 | list = &conn->c_send_queue; |
396 | else | 394 | else |
@@ -439,7 +437,6 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len, | |||
439 | { | 437 | { |
440 | uint64_t buffer[(item_len + 7) / 8]; | 438 | uint64_t buffer[(item_len + 7) / 8]; |
441 | struct hlist_head *head; | 439 | struct hlist_head *head; |
442 | struct hlist_node *pos; | ||
443 | struct rds_connection *conn; | 440 | struct rds_connection *conn; |
444 | size_t i; | 441 | size_t i; |
445 | 442 | ||
@@ -450,7 +447,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len, | |||
450 | 447 | ||
451 | for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); | 448 | for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); |
452 | i++, head++) { | 449 | i++, head++) { |
453 | hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { | 450 | hlist_for_each_entry_rcu(conn, head, c_hash_node) { |
454 | 451 | ||
455 | /* XXX no c_lock usage.. */ | 452 | /* XXX no c_lock usage.. */ |
456 | if (!visitor(conn, buffer)) | 453 | if (!visitor(conn, buffer)) |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index b768fe9d5e7a..cf68e6e4054a 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -165,10 +165,9 @@ static void rose_remove_socket(struct sock *sk) | |||
165 | void rose_kill_by_neigh(struct rose_neigh *neigh) | 165 | void rose_kill_by_neigh(struct rose_neigh *neigh) |
166 | { | 166 | { |
167 | struct sock *s; | 167 | struct sock *s; |
168 | struct hlist_node *node; | ||
169 | 168 | ||
170 | spin_lock_bh(&rose_list_lock); | 169 | spin_lock_bh(&rose_list_lock); |
171 | sk_for_each(s, node, &rose_list) { | 170 | sk_for_each(s, &rose_list) { |
172 | struct rose_sock *rose = rose_sk(s); | 171 | struct rose_sock *rose = rose_sk(s); |
173 | 172 | ||
174 | if (rose->neighbour == neigh) { | 173 | if (rose->neighbour == neigh) { |
@@ -186,10 +185,9 @@ void rose_kill_by_neigh(struct rose_neigh *neigh) | |||
186 | static void rose_kill_by_device(struct net_device *dev) | 185 | static void rose_kill_by_device(struct net_device *dev) |
187 | { | 186 | { |
188 | struct sock *s; | 187 | struct sock *s; |
189 | struct hlist_node *node; | ||
190 | 188 | ||
191 | spin_lock_bh(&rose_list_lock); | 189 | spin_lock_bh(&rose_list_lock); |
192 | sk_for_each(s, node, &rose_list) { | 190 | sk_for_each(s, &rose_list) { |
193 | struct rose_sock *rose = rose_sk(s); | 191 | struct rose_sock *rose = rose_sk(s); |
194 | 192 | ||
195 | if (rose->device == dev) { | 193 | if (rose->device == dev) { |
@@ -246,10 +244,9 @@ static void rose_insert_socket(struct sock *sk) | |||
246 | static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) | 244 | static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) |
247 | { | 245 | { |
248 | struct sock *s; | 246 | struct sock *s; |
249 | struct hlist_node *node; | ||
250 | 247 | ||
251 | spin_lock_bh(&rose_list_lock); | 248 | spin_lock_bh(&rose_list_lock); |
252 | sk_for_each(s, node, &rose_list) { | 249 | sk_for_each(s, &rose_list) { |
253 | struct rose_sock *rose = rose_sk(s); | 250 | struct rose_sock *rose = rose_sk(s); |
254 | 251 | ||
255 | if (!rosecmp(&rose->source_addr, addr) && | 252 | if (!rosecmp(&rose->source_addr, addr) && |
@@ -258,7 +255,7 @@ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) | |||
258 | goto found; | 255 | goto found; |
259 | } | 256 | } |
260 | 257 | ||
261 | sk_for_each(s, node, &rose_list) { | 258 | sk_for_each(s, &rose_list) { |
262 | struct rose_sock *rose = rose_sk(s); | 259 | struct rose_sock *rose = rose_sk(s); |
263 | 260 | ||
264 | if (!rosecmp(&rose->source_addr, addr) && | 261 | if (!rosecmp(&rose->source_addr, addr) && |
@@ -278,10 +275,9 @@ found: | |||
278 | struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) | 275 | struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) |
279 | { | 276 | { |
280 | struct sock *s; | 277 | struct sock *s; |
281 | struct hlist_node *node; | ||
282 | 278 | ||
283 | spin_lock_bh(&rose_list_lock); | 279 | spin_lock_bh(&rose_list_lock); |
284 | sk_for_each(s, node, &rose_list) { | 280 | sk_for_each(s, &rose_list) { |
285 | struct rose_sock *rose = rose_sk(s); | 281 | struct rose_sock *rose = rose_sk(s); |
286 | 282 | ||
287 | if (rose->lci == lci && rose->neighbour == neigh) | 283 | if (rose->lci == lci && rose->neighbour == neigh) |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index a181b484812a..c297e2a8e2a1 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -545,7 +545,7 @@ static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n) | |||
545 | void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) | 545 | void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) |
546 | { | 546 | { |
547 | struct Qdisc_class_common *cl; | 547 | struct Qdisc_class_common *cl; |
548 | struct hlist_node *n, *next; | 548 | struct hlist_node *next; |
549 | struct hlist_head *nhash, *ohash; | 549 | struct hlist_head *nhash, *ohash; |
550 | unsigned int nsize, nmask, osize; | 550 | unsigned int nsize, nmask, osize; |
551 | unsigned int i, h; | 551 | unsigned int i, h; |
@@ -564,7 +564,7 @@ void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) | |||
564 | 564 | ||
565 | sch_tree_lock(sch); | 565 | sch_tree_lock(sch); |
566 | for (i = 0; i < osize; i++) { | 566 | for (i = 0; i < osize; i++) { |
567 | hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) { | 567 | hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) { |
568 | h = qdisc_class_hash(cl->classid, nmask); | 568 | h = qdisc_class_hash(cl->classid, nmask); |
569 | hlist_add_head(&cl->hnode, &nhash[h]); | 569 | hlist_add_head(&cl->hnode, &nhash[h]); |
570 | } | 570 | } |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 0e19948470b8..13aa47aa2ffb 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -1041,14 +1041,13 @@ static void cbq_adjust_levels(struct cbq_class *this) | |||
1041 | static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) | 1041 | static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) |
1042 | { | 1042 | { |
1043 | struct cbq_class *cl; | 1043 | struct cbq_class *cl; |
1044 | struct hlist_node *n; | ||
1045 | unsigned int h; | 1044 | unsigned int h; |
1046 | 1045 | ||
1047 | if (q->quanta[prio] == 0) | 1046 | if (q->quanta[prio] == 0) |
1048 | return; | 1047 | return; |
1049 | 1048 | ||
1050 | for (h = 0; h < q->clhash.hashsize; h++) { | 1049 | for (h = 0; h < q->clhash.hashsize; h++) { |
1051 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { | 1050 | hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { |
1052 | /* BUGGGG... Beware! This expression suffer of | 1051 | /* BUGGGG... Beware! This expression suffer of |
1053 | * arithmetic overflows! | 1052 | * arithmetic overflows! |
1054 | */ | 1053 | */ |
@@ -1087,10 +1086,9 @@ static void cbq_sync_defmap(struct cbq_class *cl) | |||
1087 | continue; | 1086 | continue; |
1088 | 1087 | ||
1089 | for (h = 0; h < q->clhash.hashsize; h++) { | 1088 | for (h = 0; h < q->clhash.hashsize; h++) { |
1090 | struct hlist_node *n; | ||
1091 | struct cbq_class *c; | 1089 | struct cbq_class *c; |
1092 | 1090 | ||
1093 | hlist_for_each_entry(c, n, &q->clhash.hash[h], | 1091 | hlist_for_each_entry(c, &q->clhash.hash[h], |
1094 | common.hnode) { | 1092 | common.hnode) { |
1095 | if (c->split == split && c->level < level && | 1093 | if (c->split == split && c->level < level && |
1096 | c->defmap & (1<<i)) { | 1094 | c->defmap & (1<<i)) { |
@@ -1210,7 +1208,6 @@ cbq_reset(struct Qdisc *sch) | |||
1210 | { | 1208 | { |
1211 | struct cbq_sched_data *q = qdisc_priv(sch); | 1209 | struct cbq_sched_data *q = qdisc_priv(sch); |
1212 | struct cbq_class *cl; | 1210 | struct cbq_class *cl; |
1213 | struct hlist_node *n; | ||
1214 | int prio; | 1211 | int prio; |
1215 | unsigned int h; | 1212 | unsigned int h; |
1216 | 1213 | ||
@@ -1228,7 +1225,7 @@ cbq_reset(struct Qdisc *sch) | |||
1228 | q->active[prio] = NULL; | 1225 | q->active[prio] = NULL; |
1229 | 1226 | ||
1230 | for (h = 0; h < q->clhash.hashsize; h++) { | 1227 | for (h = 0; h < q->clhash.hashsize; h++) { |
1231 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { | 1228 | hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { |
1232 | qdisc_reset(cl->q); | 1229 | qdisc_reset(cl->q); |
1233 | 1230 | ||
1234 | cl->next_alive = NULL; | 1231 | cl->next_alive = NULL; |
@@ -1697,7 +1694,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) | |||
1697 | static void cbq_destroy(struct Qdisc *sch) | 1694 | static void cbq_destroy(struct Qdisc *sch) |
1698 | { | 1695 | { |
1699 | struct cbq_sched_data *q = qdisc_priv(sch); | 1696 | struct cbq_sched_data *q = qdisc_priv(sch); |
1700 | struct hlist_node *n, *next; | 1697 | struct hlist_node *next; |
1701 | struct cbq_class *cl; | 1698 | struct cbq_class *cl; |
1702 | unsigned int h; | 1699 | unsigned int h; |
1703 | 1700 | ||
@@ -1710,11 +1707,11 @@ static void cbq_destroy(struct Qdisc *sch) | |||
1710 | * be bound to classes which have been destroyed already. --TGR '04 | 1707 | * be bound to classes which have been destroyed already. --TGR '04 |
1711 | */ | 1708 | */ |
1712 | for (h = 0; h < q->clhash.hashsize; h++) { | 1709 | for (h = 0; h < q->clhash.hashsize; h++) { |
1713 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) | 1710 | hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) |
1714 | tcf_destroy_chain(&cl->filter_list); | 1711 | tcf_destroy_chain(&cl->filter_list); |
1715 | } | 1712 | } |
1716 | for (h = 0; h < q->clhash.hashsize; h++) { | 1713 | for (h = 0; h < q->clhash.hashsize; h++) { |
1717 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h], | 1714 | hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], |
1718 | common.hnode) | 1715 | common.hnode) |
1719 | cbq_destroy_class(sch, cl); | 1716 | cbq_destroy_class(sch, cl); |
1720 | } | 1717 | } |
@@ -2013,14 +2010,13 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
2013 | { | 2010 | { |
2014 | struct cbq_sched_data *q = qdisc_priv(sch); | 2011 | struct cbq_sched_data *q = qdisc_priv(sch); |
2015 | struct cbq_class *cl; | 2012 | struct cbq_class *cl; |
2016 | struct hlist_node *n; | ||
2017 | unsigned int h; | 2013 | unsigned int h; |
2018 | 2014 | ||
2019 | if (arg->stop) | 2015 | if (arg->stop) |
2020 | return; | 2016 | return; |
2021 | 2017 | ||
2022 | for (h = 0; h < q->clhash.hashsize; h++) { | 2018 | for (h = 0; h < q->clhash.hashsize; h++) { |
2023 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { | 2019 | hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { |
2024 | if (arg->count < arg->skip) { | 2020 | if (arg->count < arg->skip) { |
2025 | arg->count++; | 2021 | arg->count++; |
2026 | continue; | 2022 | continue; |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 71e50c80315f..759b308d1a8d 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -293,14 +293,13 @@ static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
293 | { | 293 | { |
294 | struct drr_sched *q = qdisc_priv(sch); | 294 | struct drr_sched *q = qdisc_priv(sch); |
295 | struct drr_class *cl; | 295 | struct drr_class *cl; |
296 | struct hlist_node *n; | ||
297 | unsigned int i; | 296 | unsigned int i; |
298 | 297 | ||
299 | if (arg->stop) | 298 | if (arg->stop) |
300 | return; | 299 | return; |
301 | 300 | ||
302 | for (i = 0; i < q->clhash.hashsize; i++) { | 301 | for (i = 0; i < q->clhash.hashsize; i++) { |
303 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 302 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { |
304 | if (arg->count < arg->skip) { | 303 | if (arg->count < arg->skip) { |
305 | arg->count++; | 304 | arg->count++; |
306 | continue; | 305 | continue; |
@@ -451,11 +450,10 @@ static void drr_reset_qdisc(struct Qdisc *sch) | |||
451 | { | 450 | { |
452 | struct drr_sched *q = qdisc_priv(sch); | 451 | struct drr_sched *q = qdisc_priv(sch); |
453 | struct drr_class *cl; | 452 | struct drr_class *cl; |
454 | struct hlist_node *n; | ||
455 | unsigned int i; | 453 | unsigned int i; |
456 | 454 | ||
457 | for (i = 0; i < q->clhash.hashsize; i++) { | 455 | for (i = 0; i < q->clhash.hashsize; i++) { |
458 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 456 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { |
459 | if (cl->qdisc->q.qlen) | 457 | if (cl->qdisc->q.qlen) |
460 | list_del(&cl->alist); | 458 | list_del(&cl->alist); |
461 | qdisc_reset(cl->qdisc); | 459 | qdisc_reset(cl->qdisc); |
@@ -468,13 +466,13 @@ static void drr_destroy_qdisc(struct Qdisc *sch) | |||
468 | { | 466 | { |
469 | struct drr_sched *q = qdisc_priv(sch); | 467 | struct drr_sched *q = qdisc_priv(sch); |
470 | struct drr_class *cl; | 468 | struct drr_class *cl; |
471 | struct hlist_node *n, *next; | 469 | struct hlist_node *next; |
472 | unsigned int i; | 470 | unsigned int i; |
473 | 471 | ||
474 | tcf_destroy_chain(&q->filter_list); | 472 | tcf_destroy_chain(&q->filter_list); |
475 | 473 | ||
476 | for (i = 0; i < q->clhash.hashsize; i++) { | 474 | for (i = 0; i < q->clhash.hashsize; i++) { |
477 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], | 475 | hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], |
478 | common.hnode) | 476 | common.hnode) |
479 | drr_destroy_class(sch, cl); | 477 | drr_destroy_class(sch, cl); |
480 | } | 478 | } |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 6c2ec4510540..9facea03faeb 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1389,7 +1389,6 @@ static void | |||
1389 | hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) | 1389 | hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) |
1390 | { | 1390 | { |
1391 | struct hfsc_sched *q = qdisc_priv(sch); | 1391 | struct hfsc_sched *q = qdisc_priv(sch); |
1392 | struct hlist_node *n; | ||
1393 | struct hfsc_class *cl; | 1392 | struct hfsc_class *cl; |
1394 | unsigned int i; | 1393 | unsigned int i; |
1395 | 1394 | ||
@@ -1397,7 +1396,7 @@ hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
1397 | return; | 1396 | return; |
1398 | 1397 | ||
1399 | for (i = 0; i < q->clhash.hashsize; i++) { | 1398 | for (i = 0; i < q->clhash.hashsize; i++) { |
1400 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], | 1399 | hlist_for_each_entry(cl, &q->clhash.hash[i], |
1401 | cl_common.hnode) { | 1400 | cl_common.hnode) { |
1402 | if (arg->count < arg->skip) { | 1401 | if (arg->count < arg->skip) { |
1403 | arg->count++; | 1402 | arg->count++; |
@@ -1523,11 +1522,10 @@ hfsc_reset_qdisc(struct Qdisc *sch) | |||
1523 | { | 1522 | { |
1524 | struct hfsc_sched *q = qdisc_priv(sch); | 1523 | struct hfsc_sched *q = qdisc_priv(sch); |
1525 | struct hfsc_class *cl; | 1524 | struct hfsc_class *cl; |
1526 | struct hlist_node *n; | ||
1527 | unsigned int i; | 1525 | unsigned int i; |
1528 | 1526 | ||
1529 | for (i = 0; i < q->clhash.hashsize; i++) { | 1527 | for (i = 0; i < q->clhash.hashsize; i++) { |
1530 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) | 1528 | hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) |
1531 | hfsc_reset_class(cl); | 1529 | hfsc_reset_class(cl); |
1532 | } | 1530 | } |
1533 | q->eligible = RB_ROOT; | 1531 | q->eligible = RB_ROOT; |
@@ -1540,16 +1538,16 @@ static void | |||
1540 | hfsc_destroy_qdisc(struct Qdisc *sch) | 1538 | hfsc_destroy_qdisc(struct Qdisc *sch) |
1541 | { | 1539 | { |
1542 | struct hfsc_sched *q = qdisc_priv(sch); | 1540 | struct hfsc_sched *q = qdisc_priv(sch); |
1543 | struct hlist_node *n, *next; | 1541 | struct hlist_node *next; |
1544 | struct hfsc_class *cl; | 1542 | struct hfsc_class *cl; |
1545 | unsigned int i; | 1543 | unsigned int i; |
1546 | 1544 | ||
1547 | for (i = 0; i < q->clhash.hashsize; i++) { | 1545 | for (i = 0; i < q->clhash.hashsize; i++) { |
1548 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) | 1546 | hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) |
1549 | tcf_destroy_chain(&cl->filter_list); | 1547 | tcf_destroy_chain(&cl->filter_list); |
1550 | } | 1548 | } |
1551 | for (i = 0; i < q->clhash.hashsize; i++) { | 1549 | for (i = 0; i < q->clhash.hashsize; i++) { |
1552 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], | 1550 | hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], |
1553 | cl_common.hnode) | 1551 | cl_common.hnode) |
1554 | hfsc_destroy_class(sch, cl); | 1552 | hfsc_destroy_class(sch, cl); |
1555 | } | 1553 | } |
@@ -1564,12 +1562,11 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) | |||
1564 | unsigned char *b = skb_tail_pointer(skb); | 1562 | unsigned char *b = skb_tail_pointer(skb); |
1565 | struct tc_hfsc_qopt qopt; | 1563 | struct tc_hfsc_qopt qopt; |
1566 | struct hfsc_class *cl; | 1564 | struct hfsc_class *cl; |
1567 | struct hlist_node *n; | ||
1568 | unsigned int i; | 1565 | unsigned int i; |
1569 | 1566 | ||
1570 | sch->qstats.backlog = 0; | 1567 | sch->qstats.backlog = 0; |
1571 | for (i = 0; i < q->clhash.hashsize; i++) { | 1568 | for (i = 0; i < q->clhash.hashsize; i++) { |
1572 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) | 1569 | hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) |
1573 | sch->qstats.backlog += cl->qdisc->qstats.backlog; | 1570 | sch->qstats.backlog += cl->qdisc->qstats.backlog; |
1574 | } | 1571 | } |
1575 | 1572 | ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 03c2692ca01e..571f1d211f4d 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -949,11 +949,10 @@ static void htb_reset(struct Qdisc *sch) | |||
949 | { | 949 | { |
950 | struct htb_sched *q = qdisc_priv(sch); | 950 | struct htb_sched *q = qdisc_priv(sch); |
951 | struct htb_class *cl; | 951 | struct htb_class *cl; |
952 | struct hlist_node *n; | ||
953 | unsigned int i; | 952 | unsigned int i; |
954 | 953 | ||
955 | for (i = 0; i < q->clhash.hashsize; i++) { | 954 | for (i = 0; i < q->clhash.hashsize; i++) { |
956 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 955 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { |
957 | if (cl->level) | 956 | if (cl->level) |
958 | memset(&cl->un.inner, 0, sizeof(cl->un.inner)); | 957 | memset(&cl->un.inner, 0, sizeof(cl->un.inner)); |
959 | else { | 958 | else { |
@@ -1218,7 +1217,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | |||
1218 | static void htb_destroy(struct Qdisc *sch) | 1217 | static void htb_destroy(struct Qdisc *sch) |
1219 | { | 1218 | { |
1220 | struct htb_sched *q = qdisc_priv(sch); | 1219 | struct htb_sched *q = qdisc_priv(sch); |
1221 | struct hlist_node *n, *next; | 1220 | struct hlist_node *next; |
1222 | struct htb_class *cl; | 1221 | struct htb_class *cl; |
1223 | unsigned int i; | 1222 | unsigned int i; |
1224 | 1223 | ||
@@ -1232,11 +1231,11 @@ static void htb_destroy(struct Qdisc *sch) | |||
1232 | tcf_destroy_chain(&q->filter_list); | 1231 | tcf_destroy_chain(&q->filter_list); |
1233 | 1232 | ||
1234 | for (i = 0; i < q->clhash.hashsize; i++) { | 1233 | for (i = 0; i < q->clhash.hashsize; i++) { |
1235 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) | 1234 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) |
1236 | tcf_destroy_chain(&cl->filter_list); | 1235 | tcf_destroy_chain(&cl->filter_list); |
1237 | } | 1236 | } |
1238 | for (i = 0; i < q->clhash.hashsize; i++) { | 1237 | for (i = 0; i < q->clhash.hashsize; i++) { |
1239 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], | 1238 | hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], |
1240 | common.hnode) | 1239 | common.hnode) |
1241 | htb_destroy_class(sch, cl); | 1240 | htb_destroy_class(sch, cl); |
1242 | } | 1241 | } |
@@ -1516,14 +1515,13 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
1516 | { | 1515 | { |
1517 | struct htb_sched *q = qdisc_priv(sch); | 1516 | struct htb_sched *q = qdisc_priv(sch); |
1518 | struct htb_class *cl; | 1517 | struct htb_class *cl; |
1519 | struct hlist_node *n; | ||
1520 | unsigned int i; | 1518 | unsigned int i; |
1521 | 1519 | ||
1522 | if (arg->stop) | 1520 | if (arg->stop) |
1523 | return; | 1521 | return; |
1524 | 1522 | ||
1525 | for (i = 0; i < q->clhash.hashsize; i++) { | 1523 | for (i = 0; i < q->clhash.hashsize; i++) { |
1526 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 1524 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { |
1527 | if (arg->count < arg->skip) { | 1525 | if (arg->count < arg->skip) { |
1528 | arg->count++; | 1526 | arg->count++; |
1529 | continue; | 1527 | continue; |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 6ed37652a4c3..e9a77f621c3d 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
@@ -276,9 +276,8 @@ static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, | |||
276 | u32 lmax, u32 weight) | 276 | u32 lmax, u32 weight) |
277 | { | 277 | { |
278 | struct qfq_aggregate *agg; | 278 | struct qfq_aggregate *agg; |
279 | struct hlist_node *n; | ||
280 | 279 | ||
281 | hlist_for_each_entry(agg, n, &q->nonfull_aggs, nonfull_next) | 280 | hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next) |
282 | if (agg->lmax == lmax && agg->class_weight == weight) | 281 | if (agg->lmax == lmax && agg->class_weight == weight) |
283 | return agg; | 282 | return agg; |
284 | 283 | ||
@@ -670,14 +669,13 @@ static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
670 | { | 669 | { |
671 | struct qfq_sched *q = qdisc_priv(sch); | 670 | struct qfq_sched *q = qdisc_priv(sch); |
672 | struct qfq_class *cl; | 671 | struct qfq_class *cl; |
673 | struct hlist_node *n; | ||
674 | unsigned int i; | 672 | unsigned int i; |
675 | 673 | ||
676 | if (arg->stop) | 674 | if (arg->stop) |
677 | return; | 675 | return; |
678 | 676 | ||
679 | for (i = 0; i < q->clhash.hashsize; i++) { | 677 | for (i = 0; i < q->clhash.hashsize; i++) { |
680 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 678 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { |
681 | if (arg->count < arg->skip) { | 679 | if (arg->count < arg->skip) { |
682 | arg->count++; | 680 | arg->count++; |
683 | continue; | 681 | continue; |
@@ -1376,11 +1374,10 @@ static unsigned int qfq_drop_from_slot(struct qfq_sched *q, | |||
1376 | struct hlist_head *slot) | 1374 | struct hlist_head *slot) |
1377 | { | 1375 | { |
1378 | struct qfq_aggregate *agg; | 1376 | struct qfq_aggregate *agg; |
1379 | struct hlist_node *n; | ||
1380 | struct qfq_class *cl; | 1377 | struct qfq_class *cl; |
1381 | unsigned int len; | 1378 | unsigned int len; |
1382 | 1379 | ||
1383 | hlist_for_each_entry(agg, n, slot, next) { | 1380 | hlist_for_each_entry(agg, slot, next) { |
1384 | list_for_each_entry(cl, &agg->active, alist) { | 1381 | list_for_each_entry(cl, &agg->active, alist) { |
1385 | 1382 | ||
1386 | if (!cl->qdisc->ops->drop) | 1383 | if (!cl->qdisc->ops->drop) |
@@ -1459,11 +1456,10 @@ static void qfq_reset_qdisc(struct Qdisc *sch) | |||
1459 | { | 1456 | { |
1460 | struct qfq_sched *q = qdisc_priv(sch); | 1457 | struct qfq_sched *q = qdisc_priv(sch); |
1461 | struct qfq_class *cl; | 1458 | struct qfq_class *cl; |
1462 | struct hlist_node *n; | ||
1463 | unsigned int i; | 1459 | unsigned int i; |
1464 | 1460 | ||
1465 | for (i = 0; i < q->clhash.hashsize; i++) { | 1461 | for (i = 0; i < q->clhash.hashsize; i++) { |
1466 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 1462 | hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { |
1467 | if (cl->qdisc->q.qlen > 0) | 1463 | if (cl->qdisc->q.qlen > 0) |
1468 | qfq_deactivate_class(q, cl); | 1464 | qfq_deactivate_class(q, cl); |
1469 | 1465 | ||
@@ -1477,13 +1473,13 @@ static void qfq_destroy_qdisc(struct Qdisc *sch) | |||
1477 | { | 1473 | { |
1478 | struct qfq_sched *q = qdisc_priv(sch); | 1474 | struct qfq_sched *q = qdisc_priv(sch); |
1479 | struct qfq_class *cl; | 1475 | struct qfq_class *cl; |
1480 | struct hlist_node *n, *next; | 1476 | struct hlist_node *next; |
1481 | unsigned int i; | 1477 | unsigned int i; |
1482 | 1478 | ||
1483 | tcf_destroy_chain(&q->filter_list); | 1479 | tcf_destroy_chain(&q->filter_list); |
1484 | 1480 | ||
1485 | for (i = 0; i < q->clhash.hashsize; i++) { | 1481 | for (i = 0; i < q->clhash.hashsize; i++) { |
1486 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], | 1482 | hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], |
1487 | common.hnode) { | 1483 | common.hnode) { |
1488 | qfq_destroy_class(sch, cl); | 1484 | qfq_destroy_class(sch, cl); |
1489 | } | 1485 | } |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 73aad3d16a45..2b3ef03c6098 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -332,7 +332,6 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc( | |||
332 | struct sctp_transport *t = NULL; | 332 | struct sctp_transport *t = NULL; |
333 | struct sctp_hashbucket *head; | 333 | struct sctp_hashbucket *head; |
334 | struct sctp_ep_common *epb; | 334 | struct sctp_ep_common *epb; |
335 | struct hlist_node *node; | ||
336 | int hash; | 335 | int hash; |
337 | int rport; | 336 | int rport; |
338 | 337 | ||
@@ -350,7 +349,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc( | |||
350 | rport); | 349 | rport); |
351 | head = &sctp_assoc_hashtable[hash]; | 350 | head = &sctp_assoc_hashtable[hash]; |
352 | read_lock(&head->lock); | 351 | read_lock(&head->lock); |
353 | sctp_for_each_hentry(epb, node, &head->chain) { | 352 | sctp_for_each_hentry(epb, &head->chain) { |
354 | tmp = sctp_assoc(epb); | 353 | tmp = sctp_assoc(epb); |
355 | if (tmp->ep != ep || rport != tmp->peer.port) | 354 | if (tmp->ep != ep || rport != tmp->peer.port) |
356 | continue; | 355 | continue; |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 965bbbbe48d4..4b2c83146aa7 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -784,13 +784,12 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net, | |||
784 | struct sctp_hashbucket *head; | 784 | struct sctp_hashbucket *head; |
785 | struct sctp_ep_common *epb; | 785 | struct sctp_ep_common *epb; |
786 | struct sctp_endpoint *ep; | 786 | struct sctp_endpoint *ep; |
787 | struct hlist_node *node; | ||
788 | int hash; | 787 | int hash; |
789 | 788 | ||
790 | hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port)); | 789 | hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port)); |
791 | head = &sctp_ep_hashtable[hash]; | 790 | head = &sctp_ep_hashtable[hash]; |
792 | read_lock(&head->lock); | 791 | read_lock(&head->lock); |
793 | sctp_for_each_hentry(epb, node, &head->chain) { | 792 | sctp_for_each_hentry(epb, &head->chain) { |
794 | ep = sctp_ep(epb); | 793 | ep = sctp_ep(epb); |
795 | if (sctp_endpoint_is_match(ep, net, laddr)) | 794 | if (sctp_endpoint_is_match(ep, net, laddr)) |
796 | goto hit; | 795 | goto hit; |
@@ -876,7 +875,6 @@ static struct sctp_association *__sctp_lookup_association( | |||
876 | struct sctp_ep_common *epb; | 875 | struct sctp_ep_common *epb; |
877 | struct sctp_association *asoc; | 876 | struct sctp_association *asoc; |
878 | struct sctp_transport *transport; | 877 | struct sctp_transport *transport; |
879 | struct hlist_node *node; | ||
880 | int hash; | 878 | int hash; |
881 | 879 | ||
882 | /* Optimize here for direct hit, only listening connections can | 880 | /* Optimize here for direct hit, only listening connections can |
@@ -886,7 +884,7 @@ static struct sctp_association *__sctp_lookup_association( | |||
886 | ntohs(peer->v4.sin_port)); | 884 | ntohs(peer->v4.sin_port)); |
887 | head = &sctp_assoc_hashtable[hash]; | 885 | head = &sctp_assoc_hashtable[hash]; |
888 | read_lock(&head->lock); | 886 | read_lock(&head->lock); |
889 | sctp_for_each_hentry(epb, node, &head->chain) { | 887 | sctp_for_each_hentry(epb, &head->chain) { |
890 | asoc = sctp_assoc(epb); | 888 | asoc = sctp_assoc(epb); |
891 | transport = sctp_assoc_is_match(asoc, net, local, peer); | 889 | transport = sctp_assoc_is_match(asoc, net, local, peer); |
892 | if (transport) | 890 | if (transport) |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 8c19e97262ca..ab3bba8cb0a8 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -213,7 +213,6 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) | |||
213 | struct sctp_ep_common *epb; | 213 | struct sctp_ep_common *epb; |
214 | struct sctp_endpoint *ep; | 214 | struct sctp_endpoint *ep; |
215 | struct sock *sk; | 215 | struct sock *sk; |
216 | struct hlist_node *node; | ||
217 | int hash = *(loff_t *)v; | 216 | int hash = *(loff_t *)v; |
218 | 217 | ||
219 | if (hash >= sctp_ep_hashsize) | 218 | if (hash >= sctp_ep_hashsize) |
@@ -222,7 +221,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) | |||
222 | head = &sctp_ep_hashtable[hash]; | 221 | head = &sctp_ep_hashtable[hash]; |
223 | sctp_local_bh_disable(); | 222 | sctp_local_bh_disable(); |
224 | read_lock(&head->lock); | 223 | read_lock(&head->lock); |
225 | sctp_for_each_hentry(epb, node, &head->chain) { | 224 | sctp_for_each_hentry(epb, &head->chain) { |
226 | ep = sctp_ep(epb); | 225 | ep = sctp_ep(epb); |
227 | sk = epb->sk; | 226 | sk = epb->sk; |
228 | if (!net_eq(sock_net(sk), seq_file_net(seq))) | 227 | if (!net_eq(sock_net(sk), seq_file_net(seq))) |
@@ -321,7 +320,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
321 | struct sctp_ep_common *epb; | 320 | struct sctp_ep_common *epb; |
322 | struct sctp_association *assoc; | 321 | struct sctp_association *assoc; |
323 | struct sock *sk; | 322 | struct sock *sk; |
324 | struct hlist_node *node; | ||
325 | int hash = *(loff_t *)v; | 323 | int hash = *(loff_t *)v; |
326 | 324 | ||
327 | if (hash >= sctp_assoc_hashsize) | 325 | if (hash >= sctp_assoc_hashsize) |
@@ -330,7 +328,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
330 | head = &sctp_assoc_hashtable[hash]; | 328 | head = &sctp_assoc_hashtable[hash]; |
331 | sctp_local_bh_disable(); | 329 | sctp_local_bh_disable(); |
332 | read_lock(&head->lock); | 330 | read_lock(&head->lock); |
333 | sctp_for_each_hentry(epb, node, &head->chain) { | 331 | sctp_for_each_hentry(epb, &head->chain) { |
334 | assoc = sctp_assoc(epb); | 332 | assoc = sctp_assoc(epb); |
335 | sk = epb->sk; | 333 | sk = epb->sk; |
336 | if (!net_eq(sock_net(sk), seq_file_net(seq))) | 334 | if (!net_eq(sock_net(sk), seq_file_net(seq))) |
@@ -436,7 +434,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) | |||
436 | struct sctp_hashbucket *head; | 434 | struct sctp_hashbucket *head; |
437 | struct sctp_ep_common *epb; | 435 | struct sctp_ep_common *epb; |
438 | struct sctp_association *assoc; | 436 | struct sctp_association *assoc; |
439 | struct hlist_node *node; | ||
440 | struct sctp_transport *tsp; | 437 | struct sctp_transport *tsp; |
441 | int hash = *(loff_t *)v; | 438 | int hash = *(loff_t *)v; |
442 | 439 | ||
@@ -447,7 +444,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) | |||
447 | sctp_local_bh_disable(); | 444 | sctp_local_bh_disable(); |
448 | read_lock(&head->lock); | 445 | read_lock(&head->lock); |
449 | rcu_read_lock(); | 446 | rcu_read_lock(); |
450 | sctp_for_each_hentry(epb, node, &head->chain) { | 447 | sctp_for_each_hentry(epb, &head->chain) { |
451 | if (!net_eq(sock_net(epb->sk), seq_file_net(seq))) | 448 | if (!net_eq(sock_net(epb->sk), seq_file_net(seq))) |
452 | continue; | 449 | continue; |
453 | assoc = sctp_assoc(epb); | 450 | assoc = sctp_assoc(epb); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index cedd9bf67b8c..c99458df3f3f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -5882,8 +5882,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( | |||
5882 | static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) | 5882 | static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) |
5883 | { | 5883 | { |
5884 | struct sctp_bind_hashbucket *head; /* hash list */ | 5884 | struct sctp_bind_hashbucket *head; /* hash list */ |
5885 | struct sctp_bind_bucket *pp; /* hash list port iterator */ | 5885 | struct sctp_bind_bucket *pp; |
5886 | struct hlist_node *node; | ||
5887 | unsigned short snum; | 5886 | unsigned short snum; |
5888 | int ret; | 5887 | int ret; |
5889 | 5888 | ||
@@ -5910,7 +5909,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) | |||
5910 | index = sctp_phashfn(sock_net(sk), rover); | 5909 | index = sctp_phashfn(sock_net(sk), rover); |
5911 | head = &sctp_port_hashtable[index]; | 5910 | head = &sctp_port_hashtable[index]; |
5912 | sctp_spin_lock(&head->lock); | 5911 | sctp_spin_lock(&head->lock); |
5913 | sctp_for_each_hentry(pp, node, &head->chain) | 5912 | sctp_for_each_hentry(pp, &head->chain) |
5914 | if ((pp->port == rover) && | 5913 | if ((pp->port == rover) && |
5915 | net_eq(sock_net(sk), pp->net)) | 5914 | net_eq(sock_net(sk), pp->net)) |
5916 | goto next; | 5915 | goto next; |
@@ -5938,7 +5937,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) | |||
5938 | */ | 5937 | */ |
5939 | head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; | 5938 | head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; |
5940 | sctp_spin_lock(&head->lock); | 5939 | sctp_spin_lock(&head->lock); |
5941 | sctp_for_each_hentry(pp, node, &head->chain) { | 5940 | sctp_for_each_hentry(pp, &head->chain) { |
5942 | if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) | 5941 | if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) |
5943 | goto pp_found; | 5942 | goto pp_found; |
5944 | } | 5943 | } |
@@ -5970,7 +5969,7 @@ pp_found: | |||
5970 | * that this port/socket (sk) combination are already | 5969 | * that this port/socket (sk) combination are already |
5971 | * in an endpoint. | 5970 | * in an endpoint. |
5972 | */ | 5971 | */ |
5973 | sk_for_each_bound(sk2, node, &pp->owner) { | 5972 | sk_for_each_bound(sk2, &pp->owner) { |
5974 | struct sctp_endpoint *ep2; | 5973 | struct sctp_endpoint *ep2; |
5975 | ep2 = sctp_sk(sk2)->ep; | 5974 | ep2 = sctp_sk(sk2)->ep; |
5976 | 5975 | ||
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 392adc41e2e5..f5294047df77 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -407,7 +407,6 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | |||
407 | { | 407 | { |
408 | LIST_HEAD(free); | 408 | LIST_HEAD(free); |
409 | struct rpc_cred_cache *cache = auth->au_credcache; | 409 | struct rpc_cred_cache *cache = auth->au_credcache; |
410 | struct hlist_node *pos; | ||
411 | struct rpc_cred *cred = NULL, | 410 | struct rpc_cred *cred = NULL, |
412 | *entry, *new; | 411 | *entry, *new; |
413 | unsigned int nr; | 412 | unsigned int nr; |
@@ -415,7 +414,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | |||
415 | nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits); | 414 | nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits); |
416 | 415 | ||
417 | rcu_read_lock(); | 416 | rcu_read_lock(); |
418 | hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) { | 417 | hlist_for_each_entry_rcu(entry, &cache->hashtable[nr], cr_hash) { |
419 | if (!entry->cr_ops->crmatch(acred, entry, flags)) | 418 | if (!entry->cr_ops->crmatch(acred, entry, flags)) |
420 | continue; | 419 | continue; |
421 | spin_lock(&cache->lock); | 420 | spin_lock(&cache->lock); |
@@ -439,7 +438,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | |||
439 | } | 438 | } |
440 | 439 | ||
441 | spin_lock(&cache->lock); | 440 | spin_lock(&cache->lock); |
442 | hlist_for_each_entry(entry, pos, &cache->hashtable[nr], cr_hash) { | 441 | hlist_for_each_entry(entry, &cache->hashtable[nr], cr_hash) { |
443 | if (!entry->cr_ops->crmatch(acred, entry, flags)) | 442 | if (!entry->cr_ops->crmatch(acred, entry, flags)) |
444 | continue; | 443 | continue; |
445 | cred = get_rpccred(entry); | 444 | cred = get_rpccred(entry); |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index f3897d10f649..39a4112faf54 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -670,13 +670,13 @@ static void cache_revisit_request(struct cache_head *item) | |||
670 | { | 670 | { |
671 | struct cache_deferred_req *dreq; | 671 | struct cache_deferred_req *dreq; |
672 | struct list_head pending; | 672 | struct list_head pending; |
673 | struct hlist_node *lp, *tmp; | 673 | struct hlist_node *tmp; |
674 | int hash = DFR_HASH(item); | 674 | int hash = DFR_HASH(item); |
675 | 675 | ||
676 | INIT_LIST_HEAD(&pending); | 676 | INIT_LIST_HEAD(&pending); |
677 | spin_lock(&cache_defer_lock); | 677 | spin_lock(&cache_defer_lock); |
678 | 678 | ||
679 | hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash) | 679 | hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash) |
680 | if (dreq->item == item) { | 680 | if (dreq->item == item) { |
681 | __unhash_deferred_req(dreq); | 681 | __unhash_deferred_req(dreq); |
682 | list_add(&dreq->recent, &pending); | 682 | list_add(&dreq->recent, &pending); |
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 7963569fc04f..2af7b0cba43a 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c | |||
@@ -138,13 +138,12 @@ auth_domain_lookup(char *name, struct auth_domain *new) | |||
138 | { | 138 | { |
139 | struct auth_domain *hp; | 139 | struct auth_domain *hp; |
140 | struct hlist_head *head; | 140 | struct hlist_head *head; |
141 | struct hlist_node *np; | ||
142 | 141 | ||
143 | head = &auth_domain_table[hash_str(name, DN_HASHBITS)]; | 142 | head = &auth_domain_table[hash_str(name, DN_HASHBITS)]; |
144 | 143 | ||
145 | spin_lock(&auth_domain_lock); | 144 | spin_lock(&auth_domain_lock); |
146 | 145 | ||
147 | hlist_for_each_entry(hp, np, head, hash) { | 146 | hlist_for_each_entry(hp, head, hash) { |
148 | if (strcmp(hp->name, name)==0) { | 147 | if (strcmp(hp->name, name)==0) { |
149 | kref_get(&hp->ref); | 148 | kref_get(&hp->ref); |
150 | spin_unlock(&auth_domain_lock); | 149 | spin_unlock(&auth_domain_lock); |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 46754779fd3d..24b167914311 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -473,11 +473,10 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, | |||
473 | static struct name_seq *nametbl_find_seq(u32 type) | 473 | static struct name_seq *nametbl_find_seq(u32 type) |
474 | { | 474 | { |
475 | struct hlist_head *seq_head; | 475 | struct hlist_head *seq_head; |
476 | struct hlist_node *seq_node; | ||
477 | struct name_seq *ns; | 476 | struct name_seq *ns; |
478 | 477 | ||
479 | seq_head = &table.types[hash(type)]; | 478 | seq_head = &table.types[hash(type)]; |
480 | hlist_for_each_entry(ns, seq_node, seq_head, ns_list) { | 479 | hlist_for_each_entry(ns, seq_head, ns_list) { |
481 | if (ns->type == type) | 480 | if (ns->type == type) |
482 | return ns; | 481 | return ns; |
483 | } | 482 | } |
@@ -853,7 +852,6 @@ static int nametbl_list(char *buf, int len, u32 depth_info, | |||
853 | u32 type, u32 lowbound, u32 upbound) | 852 | u32 type, u32 lowbound, u32 upbound) |
854 | { | 853 | { |
855 | struct hlist_head *seq_head; | 854 | struct hlist_head *seq_head; |
856 | struct hlist_node *seq_node; | ||
857 | struct name_seq *seq; | 855 | struct name_seq *seq; |
858 | int all_types; | 856 | int all_types; |
859 | int ret = 0; | 857 | int ret = 0; |
@@ -873,7 +871,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info, | |||
873 | upbound = ~0; | 871 | upbound = ~0; |
874 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { | 872 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { |
875 | seq_head = &table.types[i]; | 873 | seq_head = &table.types[i]; |
876 | hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { | 874 | hlist_for_each_entry(seq, seq_head, ns_list) { |
877 | ret += nameseq_list(seq, buf + ret, len - ret, | 875 | ret += nameseq_list(seq, buf + ret, len - ret, |
878 | depth, seq->type, | 876 | depth, seq->type, |
879 | lowbound, upbound, i); | 877 | lowbound, upbound, i); |
@@ -889,7 +887,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info, | |||
889 | ret += nametbl_header(buf + ret, len - ret, depth); | 887 | ret += nametbl_header(buf + ret, len - ret, depth); |
890 | i = hash(type); | 888 | i = hash(type); |
891 | seq_head = &table.types[i]; | 889 | seq_head = &table.types[i]; |
892 | hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { | 890 | hlist_for_each_entry(seq, seq_head, ns_list) { |
893 | if (seq->type == type) { | 891 | if (seq->type == type) { |
894 | ret += nameseq_list(seq, buf + ret, len - ret, | 892 | ret += nameseq_list(seq, buf + ret, len - ret, |
895 | depth, type, | 893 | depth, type, |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 48f39dd3eae8..6e6c434872e8 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -69,12 +69,11 @@ static unsigned int tipc_hashfn(u32 addr) | |||
69 | struct tipc_node *tipc_node_find(u32 addr) | 69 | struct tipc_node *tipc_node_find(u32 addr) |
70 | { | 70 | { |
71 | struct tipc_node *node; | 71 | struct tipc_node *node; |
72 | struct hlist_node *pos; | ||
73 | 72 | ||
74 | if (unlikely(!in_own_cluster_exact(addr))) | 73 | if (unlikely(!in_own_cluster_exact(addr))) |
75 | return NULL; | 74 | return NULL; |
76 | 75 | ||
77 | hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) { | 76 | hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) { |
78 | if (node->addr == addr) | 77 | if (node->addr == addr) |
79 | return node; | 78 | return node; |
80 | } | 79 | } |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 87d284289012..51be64f163ec 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -263,9 +263,8 @@ static struct sock *__unix_find_socket_byname(struct net *net, | |||
263 | int len, int type, unsigned int hash) | 263 | int len, int type, unsigned int hash) |
264 | { | 264 | { |
265 | struct sock *s; | 265 | struct sock *s; |
266 | struct hlist_node *node; | ||
267 | 266 | ||
268 | sk_for_each(s, node, &unix_socket_table[hash ^ type]) { | 267 | sk_for_each(s, &unix_socket_table[hash ^ type]) { |
269 | struct unix_sock *u = unix_sk(s); | 268 | struct unix_sock *u = unix_sk(s); |
270 | 269 | ||
271 | if (!net_eq(sock_net(s), net)) | 270 | if (!net_eq(sock_net(s), net)) |
@@ -298,10 +297,9 @@ static inline struct sock *unix_find_socket_byname(struct net *net, | |||
298 | static struct sock *unix_find_socket_byinode(struct inode *i) | 297 | static struct sock *unix_find_socket_byinode(struct inode *i) |
299 | { | 298 | { |
300 | struct sock *s; | 299 | struct sock *s; |
301 | struct hlist_node *node; | ||
302 | 300 | ||
303 | spin_lock(&unix_table_lock); | 301 | spin_lock(&unix_table_lock); |
304 | sk_for_each(s, node, | 302 | sk_for_each(s, |
305 | &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { | 303 | &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { |
306 | struct dentry *dentry = unix_sk(s)->path.dentry; | 304 | struct dentry *dentry = unix_sk(s)->path.dentry; |
307 | 305 | ||
diff --git a/net/unix/diag.c b/net/unix/diag.c index 5ac19dc1d5e4..d591091603bf 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c | |||
@@ -192,10 +192,9 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
192 | slot < ARRAY_SIZE(unix_socket_table); | 192 | slot < ARRAY_SIZE(unix_socket_table); |
193 | s_num = 0, slot++) { | 193 | s_num = 0, slot++) { |
194 | struct sock *sk; | 194 | struct sock *sk; |
195 | struct hlist_node *node; | ||
196 | 195 | ||
197 | num = 0; | 196 | num = 0; |
198 | sk_for_each(sk, node, &unix_socket_table[slot]) { | 197 | sk_for_each(sk, &unix_socket_table[slot]) { |
199 | if (!net_eq(sock_net(sk), net)) | 198 | if (!net_eq(sock_net(sk), net)) |
200 | continue; | 199 | continue; |
201 | if (num < s_num) | 200 | if (num < s_num) |
@@ -226,9 +225,7 @@ static struct sock *unix_lookup_by_ino(int ino) | |||
226 | 225 | ||
227 | spin_lock(&unix_table_lock); | 226 | spin_lock(&unix_table_lock); |
228 | for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) { | 227 | for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) { |
229 | struct hlist_node *node; | 228 | sk_for_each(sk, &unix_socket_table[i]) |
230 | |||
231 | sk_for_each(sk, node, &unix_socket_table[i]) | ||
232 | if (ino == sock_i_ino(sk)) { | 229 | if (ino == sock_i_ino(sk)) { |
233 | sock_hold(sk); | 230 | sock_hold(sk); |
234 | spin_unlock(&unix_table_lock); | 231 | spin_unlock(&unix_table_lock); |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index a306bc66000e..37ca9694aabe 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -208,11 +208,10 @@ static void x25_remove_socket(struct sock *sk) | |||
208 | static void x25_kill_by_device(struct net_device *dev) | 208 | static void x25_kill_by_device(struct net_device *dev) |
209 | { | 209 | { |
210 | struct sock *s; | 210 | struct sock *s; |
211 | struct hlist_node *node; | ||
212 | 211 | ||
213 | write_lock_bh(&x25_list_lock); | 212 | write_lock_bh(&x25_list_lock); |
214 | 213 | ||
215 | sk_for_each(s, node, &x25_list) | 214 | sk_for_each(s, &x25_list) |
216 | if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev) | 215 | if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev) |
217 | x25_disconnect(s, ENETUNREACH, 0, 0); | 216 | x25_disconnect(s, ENETUNREACH, 0, 0); |
218 | 217 | ||
@@ -280,12 +279,11 @@ static struct sock *x25_find_listener(struct x25_address *addr, | |||
280 | { | 279 | { |
281 | struct sock *s; | 280 | struct sock *s; |
282 | struct sock *next_best; | 281 | struct sock *next_best; |
283 | struct hlist_node *node; | ||
284 | 282 | ||
285 | read_lock_bh(&x25_list_lock); | 283 | read_lock_bh(&x25_list_lock); |
286 | next_best = NULL; | 284 | next_best = NULL; |
287 | 285 | ||
288 | sk_for_each(s, node, &x25_list) | 286 | sk_for_each(s, &x25_list) |
289 | if ((!strcmp(addr->x25_addr, | 287 | if ((!strcmp(addr->x25_addr, |
290 | x25_sk(s)->source_addr.x25_addr) || | 288 | x25_sk(s)->source_addr.x25_addr) || |
291 | !strcmp(addr->x25_addr, | 289 | !strcmp(addr->x25_addr, |
@@ -323,9 +321,8 @@ found: | |||
323 | static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) | 321 | static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) |
324 | { | 322 | { |
325 | struct sock *s; | 323 | struct sock *s; |
326 | struct hlist_node *node; | ||
327 | 324 | ||
328 | sk_for_each(s, node, &x25_list) | 325 | sk_for_each(s, &x25_list) |
329 | if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { | 326 | if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { |
330 | sock_hold(s); | 327 | sock_hold(s); |
331 | goto found; | 328 | goto found; |
@@ -1782,11 +1779,10 @@ static struct notifier_block x25_dev_notifier = { | |||
1782 | void x25_kill_by_neigh(struct x25_neigh *nb) | 1779 | void x25_kill_by_neigh(struct x25_neigh *nb) |
1783 | { | 1780 | { |
1784 | struct sock *s; | 1781 | struct sock *s; |
1785 | struct hlist_node *node; | ||
1786 | 1782 | ||
1787 | write_lock_bh(&x25_list_lock); | 1783 | write_lock_bh(&x25_list_lock); |
1788 | 1784 | ||
1789 | sk_for_each(s, node, &x25_list) | 1785 | sk_for_each(s, &x25_list) |
1790 | if (x25_sk(s)->neighbour == nb) | 1786 | if (x25_sk(s)->neighbour == nb) |
1791 | x25_disconnect(s, ENETUNREACH, 0, 0); | 1787 | x25_disconnect(s, ENETUNREACH, 0, 0); |
1792 | 1788 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 5b47180986f8..167c67d46c6a 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -379,27 +379,27 @@ static void xfrm_dst_hash_transfer(struct hlist_head *list, | |||
379 | struct hlist_head *ndsttable, | 379 | struct hlist_head *ndsttable, |
380 | unsigned int nhashmask) | 380 | unsigned int nhashmask) |
381 | { | 381 | { |
382 | struct hlist_node *entry, *tmp, *entry0 = NULL; | 382 | struct hlist_node *tmp, *entry0 = NULL; |
383 | struct xfrm_policy *pol; | 383 | struct xfrm_policy *pol; |
384 | unsigned int h0 = 0; | 384 | unsigned int h0 = 0; |
385 | 385 | ||
386 | redo: | 386 | redo: |
387 | hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) { | 387 | hlist_for_each_entry_safe(pol, tmp, list, bydst) { |
388 | unsigned int h; | 388 | unsigned int h; |
389 | 389 | ||
390 | h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, | 390 | h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, |
391 | pol->family, nhashmask); | 391 | pol->family, nhashmask); |
392 | if (!entry0) { | 392 | if (!entry0) { |
393 | hlist_del(entry); | 393 | hlist_del(&pol->bydst); |
394 | hlist_add_head(&pol->bydst, ndsttable+h); | 394 | hlist_add_head(&pol->bydst, ndsttable+h); |
395 | h0 = h; | 395 | h0 = h; |
396 | } else { | 396 | } else { |
397 | if (h != h0) | 397 | if (h != h0) |
398 | continue; | 398 | continue; |
399 | hlist_del(entry); | 399 | hlist_del(&pol->bydst); |
400 | hlist_add_after(entry0, &pol->bydst); | 400 | hlist_add_after(entry0, &pol->bydst); |
401 | } | 401 | } |
402 | entry0 = entry; | 402 | entry0 = &pol->bydst; |
403 | } | 403 | } |
404 | if (!hlist_empty(list)) { | 404 | if (!hlist_empty(list)) { |
405 | entry0 = NULL; | 405 | entry0 = NULL; |
@@ -411,10 +411,10 @@ static void xfrm_idx_hash_transfer(struct hlist_head *list, | |||
411 | struct hlist_head *nidxtable, | 411 | struct hlist_head *nidxtable, |
412 | unsigned int nhashmask) | 412 | unsigned int nhashmask) |
413 | { | 413 | { |
414 | struct hlist_node *entry, *tmp; | 414 | struct hlist_node *tmp; |
415 | struct xfrm_policy *pol; | 415 | struct xfrm_policy *pol; |
416 | 416 | ||
417 | hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) { | 417 | hlist_for_each_entry_safe(pol, tmp, list, byidx) { |
418 | unsigned int h; | 418 | unsigned int h; |
419 | 419 | ||
420 | h = __idx_hash(pol->index, nhashmask); | 420 | h = __idx_hash(pol->index, nhashmask); |
@@ -544,7 +544,6 @@ static u32 xfrm_gen_index(struct net *net, int dir) | |||
544 | static u32 idx_generator; | 544 | static u32 idx_generator; |
545 | 545 | ||
546 | for (;;) { | 546 | for (;;) { |
547 | struct hlist_node *entry; | ||
548 | struct hlist_head *list; | 547 | struct hlist_head *list; |
549 | struct xfrm_policy *p; | 548 | struct xfrm_policy *p; |
550 | u32 idx; | 549 | u32 idx; |
@@ -556,7 +555,7 @@ static u32 xfrm_gen_index(struct net *net, int dir) | |||
556 | idx = 8; | 555 | idx = 8; |
557 | list = net->xfrm.policy_byidx + idx_hash(net, idx); | 556 | list = net->xfrm.policy_byidx + idx_hash(net, idx); |
558 | found = 0; | 557 | found = 0; |
559 | hlist_for_each_entry(p, entry, list, byidx) { | 558 | hlist_for_each_entry(p, list, byidx) { |
560 | if (p->index == idx) { | 559 | if (p->index == idx) { |
561 | found = 1; | 560 | found = 1; |
562 | break; | 561 | break; |
@@ -628,13 +627,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
628 | struct xfrm_policy *pol; | 627 | struct xfrm_policy *pol; |
629 | struct xfrm_policy *delpol; | 628 | struct xfrm_policy *delpol; |
630 | struct hlist_head *chain; | 629 | struct hlist_head *chain; |
631 | struct hlist_node *entry, *newpos; | 630 | struct hlist_node *newpos; |
632 | 631 | ||
633 | write_lock_bh(&xfrm_policy_lock); | 632 | write_lock_bh(&xfrm_policy_lock); |
634 | chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); | 633 | chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); |
635 | delpol = NULL; | 634 | delpol = NULL; |
636 | newpos = NULL; | 635 | newpos = NULL; |
637 | hlist_for_each_entry(pol, entry, chain, bydst) { | 636 | hlist_for_each_entry(pol, chain, bydst) { |
638 | if (pol->type == policy->type && | 637 | if (pol->type == policy->type && |
639 | !selector_cmp(&pol->selector, &policy->selector) && | 638 | !selector_cmp(&pol->selector, &policy->selector) && |
640 | xfrm_policy_mark_match(policy, pol) && | 639 | xfrm_policy_mark_match(policy, pol) && |
@@ -691,13 +690,12 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, | |||
691 | { | 690 | { |
692 | struct xfrm_policy *pol, *ret; | 691 | struct xfrm_policy *pol, *ret; |
693 | struct hlist_head *chain; | 692 | struct hlist_head *chain; |
694 | struct hlist_node *entry; | ||
695 | 693 | ||
696 | *err = 0; | 694 | *err = 0; |
697 | write_lock_bh(&xfrm_policy_lock); | 695 | write_lock_bh(&xfrm_policy_lock); |
698 | chain = policy_hash_bysel(net, sel, sel->family, dir); | 696 | chain = policy_hash_bysel(net, sel, sel->family, dir); |
699 | ret = NULL; | 697 | ret = NULL; |
700 | hlist_for_each_entry(pol, entry, chain, bydst) { | 698 | hlist_for_each_entry(pol, chain, bydst) { |
701 | if (pol->type == type && | 699 | if (pol->type == type && |
702 | (mark & pol->mark.m) == pol->mark.v && | 700 | (mark & pol->mark.m) == pol->mark.v && |
703 | !selector_cmp(sel, &pol->selector) && | 701 | !selector_cmp(sel, &pol->selector) && |
@@ -729,7 +727,6 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, | |||
729 | { | 727 | { |
730 | struct xfrm_policy *pol, *ret; | 728 | struct xfrm_policy *pol, *ret; |
731 | struct hlist_head *chain; | 729 | struct hlist_head *chain; |
732 | struct hlist_node *entry; | ||
733 | 730 | ||
734 | *err = -ENOENT; | 731 | *err = -ENOENT; |
735 | if (xfrm_policy_id2dir(id) != dir) | 732 | if (xfrm_policy_id2dir(id) != dir) |
@@ -739,7 +736,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, | |||
739 | write_lock_bh(&xfrm_policy_lock); | 736 | write_lock_bh(&xfrm_policy_lock); |
740 | chain = net->xfrm.policy_byidx + idx_hash(net, id); | 737 | chain = net->xfrm.policy_byidx + idx_hash(net, id); |
741 | ret = NULL; | 738 | ret = NULL; |
742 | hlist_for_each_entry(pol, entry, chain, byidx) { | 739 | hlist_for_each_entry(pol, chain, byidx) { |
743 | if (pol->type == type && pol->index == id && | 740 | if (pol->type == type && pol->index == id && |
744 | (mark & pol->mark.m) == pol->mark.v) { | 741 | (mark & pol->mark.m) == pol->mark.v) { |
745 | xfrm_pol_hold(pol); | 742 | xfrm_pol_hold(pol); |
@@ -772,10 +769,9 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi | |||
772 | 769 | ||
773 | for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { | 770 | for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { |
774 | struct xfrm_policy *pol; | 771 | struct xfrm_policy *pol; |
775 | struct hlist_node *entry; | ||
776 | int i; | 772 | int i; |
777 | 773 | ||
778 | hlist_for_each_entry(pol, entry, | 774 | hlist_for_each_entry(pol, |
779 | &net->xfrm.policy_inexact[dir], bydst) { | 775 | &net->xfrm.policy_inexact[dir], bydst) { |
780 | if (pol->type != type) | 776 | if (pol->type != type) |
781 | continue; | 777 | continue; |
@@ -789,7 +785,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi | |||
789 | } | 785 | } |
790 | } | 786 | } |
791 | for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { | 787 | for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { |
792 | hlist_for_each_entry(pol, entry, | 788 | hlist_for_each_entry(pol, |
793 | net->xfrm.policy_bydst[dir].table + i, | 789 | net->xfrm.policy_bydst[dir].table + i, |
794 | bydst) { | 790 | bydst) { |
795 | if (pol->type != type) | 791 | if (pol->type != type) |
@@ -828,11 +824,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
828 | 824 | ||
829 | for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { | 825 | for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { |
830 | struct xfrm_policy *pol; | 826 | struct xfrm_policy *pol; |
831 | struct hlist_node *entry; | ||
832 | int i; | 827 | int i; |
833 | 828 | ||
834 | again1: | 829 | again1: |
835 | hlist_for_each_entry(pol, entry, | 830 | hlist_for_each_entry(pol, |
836 | &net->xfrm.policy_inexact[dir], bydst) { | 831 | &net->xfrm.policy_inexact[dir], bydst) { |
837 | if (pol->type != type) | 832 | if (pol->type != type) |
838 | continue; | 833 | continue; |
@@ -852,7 +847,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
852 | 847 | ||
853 | for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { | 848 | for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { |
854 | again2: | 849 | again2: |
855 | hlist_for_each_entry(pol, entry, | 850 | hlist_for_each_entry(pol, |
856 | net->xfrm.policy_bydst[dir].table + i, | 851 | net->xfrm.policy_bydst[dir].table + i, |
857 | bydst) { | 852 | bydst) { |
858 | if (pol->type != type) | 853 | if (pol->type != type) |
@@ -980,7 +975,6 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, | |||
980 | int err; | 975 | int err; |
981 | struct xfrm_policy *pol, *ret; | 976 | struct xfrm_policy *pol, *ret; |
982 | const xfrm_address_t *daddr, *saddr; | 977 | const xfrm_address_t *daddr, *saddr; |
983 | struct hlist_node *entry; | ||
984 | struct hlist_head *chain; | 978 | struct hlist_head *chain; |
985 | u32 priority = ~0U; | 979 | u32 priority = ~0U; |
986 | 980 | ||
@@ -992,7 +986,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, | |||
992 | read_lock_bh(&xfrm_policy_lock); | 986 | read_lock_bh(&xfrm_policy_lock); |
993 | chain = policy_hash_direct(net, daddr, saddr, family, dir); | 987 | chain = policy_hash_direct(net, daddr, saddr, family, dir); |
994 | ret = NULL; | 988 | ret = NULL; |
995 | hlist_for_each_entry(pol, entry, chain, bydst) { | 989 | hlist_for_each_entry(pol, chain, bydst) { |
996 | err = xfrm_policy_match(pol, fl, type, family, dir); | 990 | err = xfrm_policy_match(pol, fl, type, family, dir); |
997 | if (err) { | 991 | if (err) { |
998 | if (err == -ESRCH) | 992 | if (err == -ESRCH) |
@@ -1008,7 +1002,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, | |||
1008 | } | 1002 | } |
1009 | } | 1003 | } |
1010 | chain = &net->xfrm.policy_inexact[dir]; | 1004 | chain = &net->xfrm.policy_inexact[dir]; |
1011 | hlist_for_each_entry(pol, entry, chain, bydst) { | 1005 | hlist_for_each_entry(pol, chain, bydst) { |
1012 | err = xfrm_policy_match(pol, fl, type, family, dir); | 1006 | err = xfrm_policy_match(pol, fl, type, family, dir); |
1013 | if (err) { | 1007 | if (err) { |
1014 | if (err == -ESRCH) | 1008 | if (err == -ESRCH) |
@@ -3041,13 +3035,12 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector | |||
3041 | u8 dir, u8 type) | 3035 | u8 dir, u8 type) |
3042 | { | 3036 | { |
3043 | struct xfrm_policy *pol, *ret = NULL; | 3037 | struct xfrm_policy *pol, *ret = NULL; |
3044 | struct hlist_node *entry; | ||
3045 | struct hlist_head *chain; | 3038 | struct hlist_head *chain; |
3046 | u32 priority = ~0U; | 3039 | u32 priority = ~0U; |
3047 | 3040 | ||
3048 | read_lock_bh(&xfrm_policy_lock); | 3041 | read_lock_bh(&xfrm_policy_lock); |
3049 | chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); | 3042 | chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); |
3050 | hlist_for_each_entry(pol, entry, chain, bydst) { | 3043 | hlist_for_each_entry(pol, chain, bydst) { |
3051 | if (xfrm_migrate_selector_match(sel, &pol->selector) && | 3044 | if (xfrm_migrate_selector_match(sel, &pol->selector) && |
3052 | pol->type == type) { | 3045 | pol->type == type) { |
3053 | ret = pol; | 3046 | ret = pol; |
@@ -3056,7 +3049,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector | |||
3056 | } | 3049 | } |
3057 | } | 3050 | } |
3058 | chain = &init_net.xfrm.policy_inexact[dir]; | 3051 | chain = &init_net.xfrm.policy_inexact[dir]; |
3059 | hlist_for_each_entry(pol, entry, chain, bydst) { | 3052 | hlist_for_each_entry(pol, chain, bydst) { |
3060 | if (xfrm_migrate_selector_match(sel, &pol->selector) && | 3053 | if (xfrm_migrate_selector_match(sel, &pol->selector) && |
3061 | pol->type == type && | 3054 | pol->type == type && |
3062 | pol->priority < priority) { | 3055 | pol->priority < priority) { |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index ae01bdbcb294..2c341bdaf47c 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -72,10 +72,10 @@ static void xfrm_hash_transfer(struct hlist_head *list, | |||
72 | struct hlist_head *nspitable, | 72 | struct hlist_head *nspitable, |
73 | unsigned int nhashmask) | 73 | unsigned int nhashmask) |
74 | { | 74 | { |
75 | struct hlist_node *entry, *tmp; | 75 | struct hlist_node *tmp; |
76 | struct xfrm_state *x; | 76 | struct xfrm_state *x; |
77 | 77 | ||
78 | hlist_for_each_entry_safe(x, entry, tmp, list, bydst) { | 78 | hlist_for_each_entry_safe(x, tmp, list, bydst) { |
79 | unsigned int h; | 79 | unsigned int h; |
80 | 80 | ||
81 | h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, | 81 | h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, |
@@ -368,14 +368,14 @@ static void xfrm_state_gc_task(struct work_struct *work) | |||
368 | { | 368 | { |
369 | struct net *net = container_of(work, struct net, xfrm.state_gc_work); | 369 | struct net *net = container_of(work, struct net, xfrm.state_gc_work); |
370 | struct xfrm_state *x; | 370 | struct xfrm_state *x; |
371 | struct hlist_node *entry, *tmp; | 371 | struct hlist_node *tmp; |
372 | struct hlist_head gc_list; | 372 | struct hlist_head gc_list; |
373 | 373 | ||
374 | spin_lock_bh(&xfrm_state_gc_lock); | 374 | spin_lock_bh(&xfrm_state_gc_lock); |
375 | hlist_move_list(&net->xfrm.state_gc_list, &gc_list); | 375 | hlist_move_list(&net->xfrm.state_gc_list, &gc_list); |
376 | spin_unlock_bh(&xfrm_state_gc_lock); | 376 | spin_unlock_bh(&xfrm_state_gc_lock); |
377 | 377 | ||
378 | hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist) | 378 | hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) |
379 | xfrm_state_gc_destroy(x); | 379 | xfrm_state_gc_destroy(x); |
380 | 380 | ||
381 | wake_up(&net->xfrm.km_waitq); | 381 | wake_up(&net->xfrm.km_waitq); |
@@ -577,10 +577,9 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi | |||
577 | int i, err = 0; | 577 | int i, err = 0; |
578 | 578 | ||
579 | for (i = 0; i <= net->xfrm.state_hmask; i++) { | 579 | for (i = 0; i <= net->xfrm.state_hmask; i++) { |
580 | struct hlist_node *entry; | ||
581 | struct xfrm_state *x; | 580 | struct xfrm_state *x; |
582 | 581 | ||
583 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { | 582 | hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { |
584 | if (xfrm_id_proto_match(x->id.proto, proto) && | 583 | if (xfrm_id_proto_match(x->id.proto, proto) && |
585 | (err = security_xfrm_state_delete(x)) != 0) { | 584 | (err = security_xfrm_state_delete(x)) != 0) { |
586 | xfrm_audit_state_delete(x, 0, | 585 | xfrm_audit_state_delete(x, 0, |
@@ -613,10 +612,9 @@ int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info) | |||
613 | 612 | ||
614 | err = -ESRCH; | 613 | err = -ESRCH; |
615 | for (i = 0; i <= net->xfrm.state_hmask; i++) { | 614 | for (i = 0; i <= net->xfrm.state_hmask; i++) { |
616 | struct hlist_node *entry; | ||
617 | struct xfrm_state *x; | 615 | struct xfrm_state *x; |
618 | restart: | 616 | restart: |
619 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { | 617 | hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { |
620 | if (!xfrm_state_kern(x) && | 618 | if (!xfrm_state_kern(x) && |
621 | xfrm_id_proto_match(x->id.proto, proto)) { | 619 | xfrm_id_proto_match(x->id.proto, proto)) { |
622 | xfrm_state_hold(x); | 620 | xfrm_state_hold(x); |
@@ -685,9 +683,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, | |||
685 | { | 683 | { |
686 | unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); | 684 | unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); |
687 | struct xfrm_state *x; | 685 | struct xfrm_state *x; |
688 | struct hlist_node *entry; | ||
689 | 686 | ||
690 | hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) { | 687 | hlist_for_each_entry(x, net->xfrm.state_byspi+h, byspi) { |
691 | if (x->props.family != family || | 688 | if (x->props.family != family || |
692 | x->id.spi != spi || | 689 | x->id.spi != spi || |
693 | x->id.proto != proto || | 690 | x->id.proto != proto || |
@@ -710,9 +707,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark, | |||
710 | { | 707 | { |
711 | unsigned int h = xfrm_src_hash(net, daddr, saddr, family); | 708 | unsigned int h = xfrm_src_hash(net, daddr, saddr, family); |
712 | struct xfrm_state *x; | 709 | struct xfrm_state *x; |
713 | struct hlist_node *entry; | ||
714 | 710 | ||
715 | hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) { | 711 | hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) { |
716 | if (x->props.family != family || | 712 | if (x->props.family != family || |
717 | x->id.proto != proto || | 713 | x->id.proto != proto || |
718 | !xfrm_addr_equal(&x->id.daddr, daddr, family) || | 714 | !xfrm_addr_equal(&x->id.daddr, daddr, family) || |
@@ -798,7 +794,6 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, | |||
798 | static xfrm_address_t saddr_wildcard = { }; | 794 | static xfrm_address_t saddr_wildcard = { }; |
799 | struct net *net = xp_net(pol); | 795 | struct net *net = xp_net(pol); |
800 | unsigned int h, h_wildcard; | 796 | unsigned int h, h_wildcard; |
801 | struct hlist_node *entry; | ||
802 | struct xfrm_state *x, *x0, *to_put; | 797 | struct xfrm_state *x, *x0, *to_put; |
803 | int acquire_in_progress = 0; | 798 | int acquire_in_progress = 0; |
804 | int error = 0; | 799 | int error = 0; |
@@ -810,7 +805,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, | |||
810 | 805 | ||
811 | spin_lock_bh(&xfrm_state_lock); | 806 | spin_lock_bh(&xfrm_state_lock); |
812 | h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); | 807 | h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); |
813 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { | 808 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { |
814 | if (x->props.family == encap_family && | 809 | if (x->props.family == encap_family && |
815 | x->props.reqid == tmpl->reqid && | 810 | x->props.reqid == tmpl->reqid && |
816 | (mark & x->mark.m) == x->mark.v && | 811 | (mark & x->mark.m) == x->mark.v && |
@@ -826,7 +821,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, | |||
826 | goto found; | 821 | goto found; |
827 | 822 | ||
828 | h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); | 823 | h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); |
829 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { | 824 | hlist_for_each_entry(x, net->xfrm.state_bydst+h_wildcard, bydst) { |
830 | if (x->props.family == encap_family && | 825 | if (x->props.family == encap_family && |
831 | x->props.reqid == tmpl->reqid && | 826 | x->props.reqid == tmpl->reqid && |
832 | (mark & x->mark.m) == x->mark.v && | 827 | (mark & x->mark.m) == x->mark.v && |
@@ -906,11 +901,10 @@ xfrm_stateonly_find(struct net *net, u32 mark, | |||
906 | { | 901 | { |
907 | unsigned int h; | 902 | unsigned int h; |
908 | struct xfrm_state *rx = NULL, *x = NULL; | 903 | struct xfrm_state *rx = NULL, *x = NULL; |
909 | struct hlist_node *entry; | ||
910 | 904 | ||
911 | spin_lock(&xfrm_state_lock); | 905 | spin_lock(&xfrm_state_lock); |
912 | h = xfrm_dst_hash(net, daddr, saddr, reqid, family); | 906 | h = xfrm_dst_hash(net, daddr, saddr, reqid, family); |
913 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { | 907 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { |
914 | if (x->props.family == family && | 908 | if (x->props.family == family && |
915 | x->props.reqid == reqid && | 909 | x->props.reqid == reqid && |
916 | (mark & x->mark.m) == x->mark.v && | 910 | (mark & x->mark.m) == x->mark.v && |
@@ -972,12 +966,11 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew) | |||
972 | unsigned short family = xnew->props.family; | 966 | unsigned short family = xnew->props.family; |
973 | u32 reqid = xnew->props.reqid; | 967 | u32 reqid = xnew->props.reqid; |
974 | struct xfrm_state *x; | 968 | struct xfrm_state *x; |
975 | struct hlist_node *entry; | ||
976 | unsigned int h; | 969 | unsigned int h; |
977 | u32 mark = xnew->mark.v & xnew->mark.m; | 970 | u32 mark = xnew->mark.v & xnew->mark.m; |
978 | 971 | ||
979 | h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); | 972 | h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); |
980 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { | 973 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { |
981 | if (x->props.family == family && | 974 | if (x->props.family == family && |
982 | x->props.reqid == reqid && | 975 | x->props.reqid == reqid && |
983 | (mark & x->mark.m) == x->mark.v && | 976 | (mark & x->mark.m) == x->mark.v && |
@@ -1004,11 +997,10 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, | |||
1004 | const xfrm_address_t *saddr, int create) | 997 | const xfrm_address_t *saddr, int create) |
1005 | { | 998 | { |
1006 | unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); | 999 | unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); |
1007 | struct hlist_node *entry; | ||
1008 | struct xfrm_state *x; | 1000 | struct xfrm_state *x; |
1009 | u32 mark = m->v & m->m; | 1001 | u32 mark = m->v & m->m; |
1010 | 1002 | ||
1011 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { | 1003 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { |
1012 | if (x->props.reqid != reqid || | 1004 | if (x->props.reqid != reqid || |
1013 | x->props.mode != mode || | 1005 | x->props.mode != mode || |
1014 | x->props.family != family || | 1006 | x->props.family != family || |
@@ -1215,12 +1207,11 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m) | |||
1215 | { | 1207 | { |
1216 | unsigned int h; | 1208 | unsigned int h; |
1217 | struct xfrm_state *x; | 1209 | struct xfrm_state *x; |
1218 | struct hlist_node *entry; | ||
1219 | 1210 | ||
1220 | if (m->reqid) { | 1211 | if (m->reqid) { |
1221 | h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr, | 1212 | h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr, |
1222 | m->reqid, m->old_family); | 1213 | m->reqid, m->old_family); |
1223 | hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) { | 1214 | hlist_for_each_entry(x, init_net.xfrm.state_bydst+h, bydst) { |
1224 | if (x->props.mode != m->mode || | 1215 | if (x->props.mode != m->mode || |
1225 | x->id.proto != m->proto) | 1216 | x->id.proto != m->proto) |
1226 | continue; | 1217 | continue; |
@@ -1237,7 +1228,7 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m) | |||
1237 | } else { | 1228 | } else { |
1238 | h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr, | 1229 | h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr, |
1239 | m->old_family); | 1230 | m->old_family); |
1240 | hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) { | 1231 | hlist_for_each_entry(x, init_net.xfrm.state_bysrc+h, bysrc) { |
1241 | if (x->props.mode != m->mode || | 1232 | if (x->props.mode != m->mode || |
1242 | x->id.proto != m->proto) | 1233 | x->id.proto != m->proto) |
1243 | continue; | 1234 | continue; |
@@ -1466,10 +1457,9 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 s | |||
1466 | int i; | 1457 | int i; |
1467 | 1458 | ||
1468 | for (i = 0; i <= net->xfrm.state_hmask; i++) { | 1459 | for (i = 0; i <= net->xfrm.state_hmask; i++) { |
1469 | struct hlist_node *entry; | ||
1470 | struct xfrm_state *x; | 1460 | struct xfrm_state *x; |
1471 | 1461 | ||
1472 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { | 1462 | hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { |
1473 | if (x->km.seq == seq && | 1463 | if (x->km.seq == seq && |
1474 | (mark & x->mark.m) == x->mark.v && | 1464 | (mark & x->mark.m) == x->mark.v && |
1475 | x->km.state == XFRM_STATE_ACQ) { | 1465 | x->km.state == XFRM_STATE_ACQ) { |