aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/kprobes.c6
-rw-r--r--arch/ia64/kernel/kprobes.c8
-rw-r--r--arch/mips/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c18
-rw-r--r--arch/s390/kernel/kprobes.c8
-rw-r--r--arch/s390/pci/pci_msi.c3
-rw-r--r--arch/sh/kernel/kprobes.c6
-rw-r--r--arch/sparc/kernel/kprobes.c6
-rw-r--r--arch/sparc/kernel/ldc.c3
-rw-r--r--arch/x86/kernel/kprobes/core.c8
-rw-r--r--arch/x86/kvm/mmu.c26
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--block/blk-ioc.c3
-rw-r--r--block/bsg.c3
-rw-r--r--block/cfq-iosched.c3
-rw-r--r--block/elevator.c4
-rw-r--r--crypto/algapi.c6
-rw-r--r--drivers/atm/atmtcp.c6
-rw-r--r--drivers/atm/eni.c3
-rw-r--r--drivers/atm/he.c3
-rw-r--r--drivers/atm/solos-pci.c3
-rw-r--r--drivers/clk/clk.c59
-rw-r--r--drivers/gpu/drm/drm_hashtab.c19
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/fmr_pool.c3
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/isdn/mISDN/stack.c3
-rw-r--r--drivers/md/dm-bio-prison.c3
-rw-r--r--drivers/md/dm-bufio.c3
-rw-r--r--drivers/md/dm-snap.c3
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c7
-rw-r--r--drivers/md/raid5.c3
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c3
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c7
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c10
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/vxlan.c12
-rw-r--r--drivers/net/wireless/zd1201.c7
-rw-r--r--drivers/pci/pci.c12
-rw-r--r--drivers/staging/android/binder.c19
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c12
-rw-r--r--fs/affs/amigaffs.c3
-rw-r--r--fs/aio.c3
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/dcache.c9
-rw-r--r--fs/dlm/lowcomms.c11
-rw-r--r--fs/ecryptfs/messaging.c6
-rw-r--r--fs/exportfs/expfs.c3
-rw-r--r--fs/fat/inode.c3
-rw-r--r--fs/fat/nfs.c3
-rw-r--r--fs/fscache/cookie.c11
-rw-r--r--fs/inode.c19
-rw-r--r--fs/lockd/host.c29
-rw-r--r--fs/lockd/svcsubs.c7
-rw-r--r--fs/nfs/pnfs_dev.c9
-rw-r--r--fs/nfsd/nfscache.c3
-rw-r--r--fs/notify/fsnotify.c3
-rw-r--r--fs/notify/inode_mark.c19
-rw-r--r--fs/notify/vfsmount_mark.c19
-rw-r--r--fs/ocfs2/dcache.c3
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c6
-rw-r--r--fs/super.c6
-rw-r--r--fs/sysfs/bin.c3
-rw-r--r--fs/xfs/xfs_log_recover.c3
-rw-r--r--include/linux/hashtable.h40
-rw-r--r--include/linux/if_team.h6
-rw-r--r--include/linux/list.h49
-rw-r--r--include/linux/pid.h3
-rw-r--r--include/linux/rculist.h56
-rw-r--r--include/net/ax25.h8
-rw-r--r--include/net/inet_hashtables.h4
-rw-r--r--include/net/inet_timewait_sock.h8
-rw-r--r--include/net/netrom.h16
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sock.h21
-rw-r--r--kernel/cgroup.c12
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/kprobes.c35
-rw-r--r--kernel/pid.c3
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/trace/ftrace.c24
-rw-r--r--kernel/trace/trace_output.c3
-rw-r--r--kernel/tracepoint.c6
-rw-r--r--kernel/user-return-notifier.c4
-rw-r--r--kernel/user.c3
-rw-r--r--kernel/workqueue.c13
-rw-r--r--lib/debugobjects.c21
-rw-r--r--lib/lru_cache.c3
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/kmemleak.c9
-rw-r--r--mm/ksm.c15
-rw-r--r--mm/mmu_notifier.c18
-rw-r--r--net/9p/error.c4
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/appletalk/ddp.c9
-rw-r--r--net/atm/common.c7
-rw-r--r--net/atm/lec.c66
-rw-r--r--net/atm/signaling.c3
-rw-r--r--net/ax25/af_ax25.c15
-rw-r--r--net/ax25/ax25_ds_subr.c6
-rw-r--r--net/ax25/ax25_ds_timer.c3
-rw-r--r--net/ax25/ax25_iface.c3
-rw-r--r--net/ax25/ax25_uid.c11
-rw-r--r--net/batman-adv/bat_iv_ogm.c12
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c39
-rw-r--r--net/batman-adv/distributed-arp-table.c15
-rw-r--r--net/batman-adv/gateway_client.c13
-rw-r--r--net/batman-adv/main.c6
-rw-r--r--net/batman-adv/originator.c31
-rw-r--r--net/batman-adv/originator.h3
-rw-r--r--net/batman-adv/routing.c6
-rw-r--r--net/batman-adv/send.c6
-rw-r--r--net/batman-adv/translation-table.c82
-rw-r--r--net/batman-adv/vis.c38
-rw-r--r--net/bluetooth/hci_sock.c15
-rw-r--r--net/bluetooth/rfcomm/sock.c13
-rw-r--r--net/bluetooth/sco.c14
-rw-r--r--net/bridge/br_fdb.c23
-rw-r--r--net/bridge/br_mdb.c6
-rw-r--r--net/bridge/br_multicast.c25
-rw-r--r--net/can/af_can.c18
-rw-r--r--net/can/gw.c15
-rw-r--r--net/can/proc.c3
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/flow.c11
-rw-r--r--net/core/net-procfs.c3
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/decnet/af_decnet.c9
-rw-r--r--net/decnet/dn_table.c13
-rw-r--r--net/ieee802154/dgram.c3
-rw-r--r--net/ieee802154/raw.c3
-rw-r--r--net/ipv4/devinet.c10
-rw-r--r--net/ipv4/fib_frontend.c15
-rw-r--r--net/ipv4/fib_semantics.c23
-rw-r--r--net/ipv4/fib_trie.c33
-rw-r--r--net/ipv4/inet_connection_sock.c10
-rw-r--r--net/ipv4/inet_fragment.c10
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/inet_timewait_sock.c7
-rw-r--r--net/ipv4/raw.c8
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv6/addrconf.c32
-rw-r--r--net/ipv6/addrlabel.c18
-rw-r--r--net/ipv6/inet6_connection_sock.c5
-rw-r--r--net/ipv6/ip6_fib.c12
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/xfrm6_tunnel.c10
-rw-r--r--net/ipx/af_ipx.c16
-rw-r--r--net/ipx/ipx_proc.c5
-rw-r--r--net/iucv/af_iucv.c21
-rw-r--r--net/key/af_key.c3
-rw-r--r--net/l2tp/l2tp_core.c12
-rw-r--r--net/l2tp/l2tp_ip.c3
-rw-r--r--net/l2tp/l2tp_ip6.c3
-rw-r--r--net/llc/llc_sap.c3
-rw-r--r--net/mac80211/mesh_pathtbl.c45
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c26
-rw-r--r--net/netfilter/nf_conntrack_expect.c17
-rw-r--r--net/netfilter/nf_conntrack_helper.c13
-rw-r--r--net/netfilter/nf_conntrack_netlink.c9
-rw-r--r--net/netfilter/nf_conntrack_sip.c8
-rw-r--r--net/netfilter/nf_nat_core.c3
-rw-r--r--net/netfilter/nfnetlink_cthelper.c17
-rw-r--r--net/netfilter/nfnetlink_log.c7
-rw-r--r--net/netfilter/nfnetlink_queue_core.c10
-rw-r--r--net/netfilter/xt_RATEEST.c3
-rw-r--r--net/netfilter/xt_connlimit.c8
-rw-r--r--net/netfilter/xt_hashlimit.c16
-rw-r--r--net/netlink/af_netlink.c30
-rw-r--r--net/netrom/af_netrom.c12
-rw-r--r--net/netrom/nr_route.c30
-rw-r--r--net/nfc/llcp/llcp.c16
-rw-r--r--net/openvswitch/datapath.c10
-rw-r--r--net/openvswitch/flow.c13
-rw-r--r--net/openvswitch/vport.c3
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/packet/diag.c3
-rw-r--r--net/phonet/pep.c3
-rw-r--r--net/phonet/socket.c9
-rw-r--r--net/rds/bind.c3
-rw-r--r--net/rds/connection.c9
-rw-r--r--net/rose/af_rose.c14
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_cbq.c18
-rw-r--r--net/sched/sch_drr.c10
-rw-r--r--net/sched/sch_hfsc.c15
-rw-r--r--net/sched/sch_htb.c12
-rw-r--r--net/sched/sch_qfq.c16
-rw-r--r--net/sctp/endpointola.c3
-rw-r--r--net/sctp/input.c6
-rw-r--r--net/sctp/proc.c9
-rw-r--r--net/sctp/socket.c9
-rw-r--r--net/sunrpc/auth.c5
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/svcauth.c3
-rw-r--r--net/tipc/name_table.c8
-rw-r--r--net/tipc/node.c3
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/unix/diag.c7
-rw-r--r--net/x25/af_x25.c12
-rw-r--r--net/xfrm/xfrm_policy.c47
-rw-r--r--net/xfrm/xfrm_state.c42
-rw-r--r--security/integrity/ima/ima_queue.c3
-rw-r--r--security/selinux/avc.c19
-rw-r--r--tools/perf/util/evlist.c3
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/irq_comm.c18
218 files changed, 987 insertions, 1494 deletions
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 4dd41fc9e235..170e9f34003f 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -395,7 +395,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
395{ 395{
396 struct kretprobe_instance *ri = NULL; 396 struct kretprobe_instance *ri = NULL;
397 struct hlist_head *head, empty_rp; 397 struct hlist_head *head, empty_rp;
398 struct hlist_node *node, *tmp; 398 struct hlist_node *tmp;
399 unsigned long flags, orig_ret_address = 0; 399 unsigned long flags, orig_ret_address = 0;
400 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 400 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
401 401
@@ -415,7 +415,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
415 * real return address, and all the rest will point to 415 * real return address, and all the rest will point to
416 * kretprobe_trampoline 416 * kretprobe_trampoline
417 */ 417 */
418 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 418 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
419 if (ri->task != current) 419 if (ri->task != current)
420 /* another task is sharing our hash bucket */ 420 /* another task is sharing our hash bucket */
421 continue; 421 continue;
@@ -442,7 +442,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
442 kretprobe_assert(ri, orig_ret_address, trampoline_address); 442 kretprobe_assert(ri, orig_ret_address, trampoline_address);
443 kretprobe_hash_unlock(current, &flags); 443 kretprobe_hash_unlock(current, &flags);
444 444
445 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 445 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
446 hlist_del(&ri->hlist); 446 hlist_del(&ri->hlist);
447 kfree(ri); 447 kfree(ri);
448 } 448 }
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 7026b29e277a..f8280a766a78 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -423,7 +423,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
423{ 423{
424 struct kretprobe_instance *ri = NULL; 424 struct kretprobe_instance *ri = NULL;
425 struct hlist_head *head, empty_rp; 425 struct hlist_head *head, empty_rp;
426 struct hlist_node *node, *tmp; 426 struct hlist_node *tmp;
427 unsigned long flags, orig_ret_address = 0; 427 unsigned long flags, orig_ret_address = 0;
428 unsigned long trampoline_address = 428 unsigned long trampoline_address =
429 ((struct fnptr *)kretprobe_trampoline)->ip; 429 ((struct fnptr *)kretprobe_trampoline)->ip;
@@ -444,7 +444,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
444 * real return address, and all the rest will point to 444 * real return address, and all the rest will point to
445 * kretprobe_trampoline 445 * kretprobe_trampoline
446 */ 446 */
447 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 447 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
448 if (ri->task != current) 448 if (ri->task != current)
449 /* another task is sharing our hash bucket */ 449 /* another task is sharing our hash bucket */
450 continue; 450 continue;
@@ -461,7 +461,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
461 461
462 regs->cr_iip = orig_ret_address; 462 regs->cr_iip = orig_ret_address;
463 463
464 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 464 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
465 if (ri->task != current) 465 if (ri->task != current)
466 /* another task is sharing our hash bucket */ 466 /* another task is sharing our hash bucket */
467 continue; 467 continue;
@@ -487,7 +487,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
487 kretprobe_hash_unlock(current, &flags); 487 kretprobe_hash_unlock(current, &flags);
488 preempt_enable_no_resched(); 488 preempt_enable_no_resched();
489 489
490 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 490 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
491 hlist_del(&ri->hlist); 491 hlist_del(&ri->hlist);
492 kfree(ri); 492 kfree(ri);
493 } 493 }
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 158467da9bc1..ce3f0807ad1e 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -598,7 +598,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
598{ 598{
599 struct kretprobe_instance *ri = NULL; 599 struct kretprobe_instance *ri = NULL;
600 struct hlist_head *head, empty_rp; 600 struct hlist_head *head, empty_rp;
601 struct hlist_node *node, *tmp; 601 struct hlist_node *tmp;
602 unsigned long flags, orig_ret_address = 0; 602 unsigned long flags, orig_ret_address = 0;
603 unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; 603 unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
604 604
@@ -618,7 +618,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
618 * real return address, and all the rest will point to 618 * real return address, and all the rest will point to
619 * kretprobe_trampoline 619 * kretprobe_trampoline
620 */ 620 */
621 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 621 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
622 if (ri->task != current) 622 if (ri->task != current)
623 /* another task is sharing our hash bucket */ 623 /* another task is sharing our hash bucket */
624 continue; 624 continue;
@@ -645,7 +645,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
645 kretprobe_hash_unlock(current, &flags); 645 kretprobe_hash_unlock(current, &flags);
646 preempt_enable_no_resched(); 646 preempt_enable_no_resched();
647 647
648 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 648 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
649 hlist_del(&ri->hlist); 649 hlist_del(&ri->hlist);
650 kfree(ri); 650 kfree(ri);
651 } 651 }
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e88c64331819..11f5b03a0b06 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -310,7 +310,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
310{ 310{
311 struct kretprobe_instance *ri = NULL; 311 struct kretprobe_instance *ri = NULL;
312 struct hlist_head *head, empty_rp; 312 struct hlist_head *head, empty_rp;
313 struct hlist_node *node, *tmp; 313 struct hlist_node *tmp;
314 unsigned long flags, orig_ret_address = 0; 314 unsigned long flags, orig_ret_address = 0;
315 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 315 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
316 316
@@ -330,7 +330,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
330 * real return address, and all the rest will point to 330 * real return address, and all the rest will point to
331 * kretprobe_trampoline 331 * kretprobe_trampoline
332 */ 332 */
333 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 333 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
334 if (ri->task != current) 334 if (ri->task != current)
335 /* another task is sharing our hash bucket */ 335 /* another task is sharing our hash bucket */
336 continue; 336 continue;
@@ -357,7 +357,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
357 kretprobe_hash_unlock(current, &flags); 357 kretprobe_hash_unlock(current, &flags);
358 preempt_enable_no_resched(); 358 preempt_enable_no_resched();
359 359
360 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 360 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
361 hlist_del(&ri->hlist); 361 hlist_del(&ri->hlist);
362 kfree(ri); 362 kfree(ri);
363 } 363 }
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 2c86b0d63714..da8b13c4b776 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -124,7 +124,6 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
124{ 124{
125 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 125 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
126 struct hpte_cache *pte; 126 struct hpte_cache *pte;
127 struct hlist_node *node;
128 int i; 127 int i;
129 128
130 rcu_read_lock(); 129 rcu_read_lock();
@@ -132,7 +131,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
132 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 131 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
133 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; 132 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
134 133
135 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 134 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
136 invalidate_pte(vcpu, pte); 135 invalidate_pte(vcpu, pte);
137 } 136 }
138 137
@@ -143,7 +142,6 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
143{ 142{
144 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 143 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
145 struct hlist_head *list; 144 struct hlist_head *list;
146 struct hlist_node *node;
147 struct hpte_cache *pte; 145 struct hpte_cache *pte;
148 146
149 /* Find the list of entries in the map */ 147 /* Find the list of entries in the map */
@@ -152,7 +150,7 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
152 rcu_read_lock(); 150 rcu_read_lock();
153 151
154 /* Check the list for matching entries and invalidate */ 152 /* Check the list for matching entries and invalidate */
155 hlist_for_each_entry_rcu(pte, node, list, list_pte) 153 hlist_for_each_entry_rcu(pte, list, list_pte)
156 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) 154 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
157 invalidate_pte(vcpu, pte); 155 invalidate_pte(vcpu, pte);
158 156
@@ -163,7 +161,6 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
163{ 161{
164 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 162 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
165 struct hlist_head *list; 163 struct hlist_head *list;
166 struct hlist_node *node;
167 struct hpte_cache *pte; 164 struct hpte_cache *pte;
168 165
169 /* Find the list of entries in the map */ 166 /* Find the list of entries in the map */
@@ -173,7 +170,7 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
173 rcu_read_lock(); 170 rcu_read_lock();
174 171
175 /* Check the list for matching entries and invalidate */ 172 /* Check the list for matching entries and invalidate */
176 hlist_for_each_entry_rcu(pte, node, list, list_pte_long) 173 hlist_for_each_entry_rcu(pte, list, list_pte_long)
177 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) 174 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
178 invalidate_pte(vcpu, pte); 175 invalidate_pte(vcpu, pte);
179 176
@@ -207,7 +204,6 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
207{ 204{
208 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 205 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
209 struct hlist_head *list; 206 struct hlist_head *list;
210 struct hlist_node *node;
211 struct hpte_cache *pte; 207 struct hpte_cache *pte;
212 u64 vp_mask = 0xfffffffffULL; 208 u64 vp_mask = 0xfffffffffULL;
213 209
@@ -216,7 +212,7 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
216 rcu_read_lock(); 212 rcu_read_lock();
217 213
218 /* Check the list for matching entries and invalidate */ 214 /* Check the list for matching entries and invalidate */
219 hlist_for_each_entry_rcu(pte, node, list, list_vpte) 215 hlist_for_each_entry_rcu(pte, list, list_vpte)
220 if ((pte->pte.vpage & vp_mask) == guest_vp) 216 if ((pte->pte.vpage & vp_mask) == guest_vp)
221 invalidate_pte(vcpu, pte); 217 invalidate_pte(vcpu, pte);
222 218
@@ -228,7 +224,6 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
228{ 224{
229 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 225 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
230 struct hlist_head *list; 226 struct hlist_head *list;
231 struct hlist_node *node;
232 struct hpte_cache *pte; 227 struct hpte_cache *pte;
233 u64 vp_mask = 0xffffff000ULL; 228 u64 vp_mask = 0xffffff000ULL;
234 229
@@ -238,7 +233,7 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
238 rcu_read_lock(); 233 rcu_read_lock();
239 234
240 /* Check the list for matching entries and invalidate */ 235 /* Check the list for matching entries and invalidate */
241 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 236 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
242 if ((pte->pte.vpage & vp_mask) == guest_vp) 237 if ((pte->pte.vpage & vp_mask) == guest_vp)
243 invalidate_pte(vcpu, pte); 238 invalidate_pte(vcpu, pte);
244 239
@@ -266,7 +261,6 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
266void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) 261void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
267{ 262{
268 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 263 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
269 struct hlist_node *node;
270 struct hpte_cache *pte; 264 struct hpte_cache *pte;
271 int i; 265 int i;
272 266
@@ -277,7 +271,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
277 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 271 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
278 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; 272 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
279 273
280 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 274 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
281 if ((pte->pte.raddr >= pa_start) && 275 if ((pte->pte.raddr >= pa_start) &&
282 (pte->pte.raddr < pa_end)) 276 (pte->pte.raddr < pa_end))
283 invalidate_pte(vcpu, pte); 277 invalidate_pte(vcpu, pte);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d1c7214e157c..3388b2b2a07d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -354,7 +354,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
354{ 354{
355 struct kretprobe_instance *ri; 355 struct kretprobe_instance *ri;
356 struct hlist_head *head, empty_rp; 356 struct hlist_head *head, empty_rp;
357 struct hlist_node *node, *tmp; 357 struct hlist_node *tmp;
358 unsigned long flags, orig_ret_address; 358 unsigned long flags, orig_ret_address;
359 unsigned long trampoline_address; 359 unsigned long trampoline_address;
360 kprobe_opcode_t *correct_ret_addr; 360 kprobe_opcode_t *correct_ret_addr;
@@ -379,7 +379,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
379 orig_ret_address = 0; 379 orig_ret_address = 0;
380 correct_ret_addr = NULL; 380 correct_ret_addr = NULL;
381 trampoline_address = (unsigned long) &kretprobe_trampoline; 381 trampoline_address = (unsigned long) &kretprobe_trampoline;
382 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 382 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
383 if (ri->task != current) 383 if (ri->task != current)
384 /* another task is sharing our hash bucket */ 384 /* another task is sharing our hash bucket */
385 continue; 385 continue;
@@ -398,7 +398,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
398 kretprobe_assert(ri, orig_ret_address, trampoline_address); 398 kretprobe_assert(ri, orig_ret_address, trampoline_address);
399 399
400 correct_ret_addr = ri->ret_addr; 400 correct_ret_addr = ri->ret_addr;
401 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 401 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
402 if (ri->task != current) 402 if (ri->task != current)
403 /* another task is sharing our hash bucket */ 403 /* another task is sharing our hash bucket */
404 continue; 404 continue;
@@ -427,7 +427,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
427 kretprobe_hash_unlock(current, &flags); 427 kretprobe_hash_unlock(current, &flags);
428 preempt_enable_no_resched(); 428 preempt_enable_no_resched();
429 429
430 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 430 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
431 hlist_del(&ri->hlist); 431 hlist_del(&ri->hlist);
432 kfree(ri); 432 kfree(ri);
433 } 433 }
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
index 90fd3482b9e2..0297931335e1 100644
--- a/arch/s390/pci/pci_msi.c
+++ b/arch/s390/pci/pci_msi.c
@@ -25,10 +25,9 @@ static DEFINE_SPINLOCK(msi_map_lock);
25 25
26struct msi_desc *__irq_get_msi_desc(unsigned int irq) 26struct msi_desc *__irq_get_msi_desc(unsigned int irq)
27{ 27{
28 struct hlist_node *entry;
29 struct msi_map *map; 28 struct msi_map *map;
30 29
31 hlist_for_each_entry_rcu(map, entry, 30 hlist_for_each_entry_rcu(map,
32 &msi_hash[msi_hashfn(irq)], msi_chain) 31 &msi_hash[msi_hashfn(irq)], msi_chain)
33 if (map->irq == irq) 32 if (map->irq == irq)
34 return map->msi; 33 return map->msi;
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 1208b09e95c3..42b46e61a2d5 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -310,7 +310,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
310{ 310{
311 struct kretprobe_instance *ri = NULL; 311 struct kretprobe_instance *ri = NULL;
312 struct hlist_head *head, empty_rp; 312 struct hlist_head *head, empty_rp;
313 struct hlist_node *node, *tmp; 313 struct hlist_node *tmp;
314 unsigned long flags, orig_ret_address = 0; 314 unsigned long flags, orig_ret_address = 0;
315 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 315 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
316 316
@@ -330,7 +330,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
330 * real return address, and all the rest will point to 330 * real return address, and all the rest will point to
331 * kretprobe_trampoline 331 * kretprobe_trampoline
332 */ 332 */
333 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 333 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
334 if (ri->task != current) 334 if (ri->task != current)
335 /* another task is sharing our hash bucket */ 335 /* another task is sharing our hash bucket */
336 continue; 336 continue;
@@ -360,7 +360,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
360 360
361 preempt_enable_no_resched(); 361 preempt_enable_no_resched();
362 362
363 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 363 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
364 hlist_del(&ri->hlist); 364 hlist_del(&ri->hlist);
365 kfree(ri); 365 kfree(ri);
366 } 366 }
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index a39d1ba5a119..e72212148d2a 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -511,7 +511,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
511{ 511{
512 struct kretprobe_instance *ri = NULL; 512 struct kretprobe_instance *ri = NULL;
513 struct hlist_head *head, empty_rp; 513 struct hlist_head *head, empty_rp;
514 struct hlist_node *node, *tmp; 514 struct hlist_node *tmp;
515 unsigned long flags, orig_ret_address = 0; 515 unsigned long flags, orig_ret_address = 0;
516 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 516 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
517 517
@@ -531,7 +531,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
531 * real return address, and all the rest will point to 531 * real return address, and all the rest will point to
532 * kretprobe_trampoline 532 * kretprobe_trampoline
533 */ 533 */
534 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 534 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
535 if (ri->task != current) 535 if (ri->task != current)
536 /* another task is sharing our hash bucket */ 536 /* another task is sharing our hash bucket */
537 continue; 537 continue;
@@ -559,7 +559,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
559 kretprobe_hash_unlock(current, &flags); 559 kretprobe_hash_unlock(current, &flags);
560 preempt_enable_no_resched(); 560 preempt_enable_no_resched();
561 561
562 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 562 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
563 hlist_del(&ri->hlist); 563 hlist_del(&ri->hlist);
564 kfree(ri); 564 kfree(ri);
565 } 565 }
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 9fcc6b4e93b3..54df554b82d9 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -953,9 +953,8 @@ static HLIST_HEAD(ldc_channel_list);
953static int __ldc_channel_exists(unsigned long id) 953static int __ldc_channel_exists(unsigned long id)
954{ 954{
955 struct ldc_channel *lp; 955 struct ldc_channel *lp;
956 struct hlist_node *n;
957 956
958 hlist_for_each_entry(lp, n, &ldc_channel_list, list) { 957 hlist_for_each_entry(lp, &ldc_channel_list, list) {
959 if (lp->id == id) 958 if (lp->id == id)
960 return 1; 959 return 1;
961 } 960 }
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index e124554598ee..3f06e6149981 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -652,7 +652,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
652{ 652{
653 struct kretprobe_instance *ri = NULL; 653 struct kretprobe_instance *ri = NULL;
654 struct hlist_head *head, empty_rp; 654 struct hlist_head *head, empty_rp;
655 struct hlist_node *node, *tmp; 655 struct hlist_node *tmp;
656 unsigned long flags, orig_ret_address = 0; 656 unsigned long flags, orig_ret_address = 0;
657 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 657 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
658 kprobe_opcode_t *correct_ret_addr = NULL; 658 kprobe_opcode_t *correct_ret_addr = NULL;
@@ -682,7 +682,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
682 * will be the real return address, and all the rest will 682 * will be the real return address, and all the rest will
683 * point to kretprobe_trampoline. 683 * point to kretprobe_trampoline.
684 */ 684 */
685 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 685 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
686 if (ri->task != current) 686 if (ri->task != current)
687 /* another task is sharing our hash bucket */ 687 /* another task is sharing our hash bucket */
688 continue; 688 continue;
@@ -701,7 +701,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
701 kretprobe_assert(ri, orig_ret_address, trampoline_address); 701 kretprobe_assert(ri, orig_ret_address, trampoline_address);
702 702
703 correct_ret_addr = ri->ret_addr; 703 correct_ret_addr = ri->ret_addr;
704 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 704 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
705 if (ri->task != current) 705 if (ri->task != current)
706 /* another task is sharing our hash bucket */ 706 /* another task is sharing our hash bucket */
707 continue; 707 continue;
@@ -728,7 +728,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
728 728
729 kretprobe_hash_unlock(current, &flags); 729 kretprobe_hash_unlock(current, &flags);
730 730
731 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 731 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
732 hlist_del(&ri->hlist); 732 hlist_del(&ri->hlist);
733 kfree(ri); 733 kfree(ri);
734 } 734 }
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4ed3edbe06bd..956ca358108a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1644,13 +1644,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1644static void kvm_mmu_commit_zap_page(struct kvm *kvm, 1644static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1645 struct list_head *invalid_list); 1645 struct list_head *invalid_list);
1646 1646
1647#define for_each_gfn_sp(kvm, sp, gfn, pos) \ 1647#define for_each_gfn_sp(kvm, sp, gfn) \
1648 hlist_for_each_entry(sp, pos, \ 1648 hlist_for_each_entry(sp, \
1649 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ 1649 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1650 if ((sp)->gfn != (gfn)) {} else 1650 if ((sp)->gfn != (gfn)) {} else
1651 1651
1652#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ 1652#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn) \
1653 hlist_for_each_entry(sp, pos, \ 1653 hlist_for_each_entry(sp, \
1654 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ 1654 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1655 if ((sp)->gfn != (gfn) || (sp)->role.direct || \ 1655 if ((sp)->gfn != (gfn) || (sp)->role.direct || \
1656 (sp)->role.invalid) {} else 1656 (sp)->role.invalid) {} else
@@ -1706,11 +1706,10 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1706static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 1706static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1707{ 1707{
1708 struct kvm_mmu_page *s; 1708 struct kvm_mmu_page *s;
1709 struct hlist_node *node;
1710 LIST_HEAD(invalid_list); 1709 LIST_HEAD(invalid_list);
1711 bool flush = false; 1710 bool flush = false;
1712 1711
1713 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 1712 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
1714 if (!s->unsync) 1713 if (!s->unsync)
1715 continue; 1714 continue;
1716 1715
@@ -1848,7 +1847,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1848 union kvm_mmu_page_role role; 1847 union kvm_mmu_page_role role;
1849 unsigned quadrant; 1848 unsigned quadrant;
1850 struct kvm_mmu_page *sp; 1849 struct kvm_mmu_page *sp;
1851 struct hlist_node *node;
1852 bool need_sync = false; 1850 bool need_sync = false;
1853 1851
1854 role = vcpu->arch.mmu.base_role; 1852 role = vcpu->arch.mmu.base_role;
@@ -1863,7 +1861,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1863 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 1861 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1864 role.quadrant = quadrant; 1862 role.quadrant = quadrant;
1865 } 1863 }
1866 for_each_gfn_sp(vcpu->kvm, sp, gfn, node) { 1864 for_each_gfn_sp(vcpu->kvm, sp, gfn) {
1867 if (!need_sync && sp->unsync) 1865 if (!need_sync && sp->unsync)
1868 need_sync = true; 1866 need_sync = true;
1869 1867
@@ -2151,14 +2149,13 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
2151int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 2149int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2152{ 2150{
2153 struct kvm_mmu_page *sp; 2151 struct kvm_mmu_page *sp;
2154 struct hlist_node *node;
2155 LIST_HEAD(invalid_list); 2152 LIST_HEAD(invalid_list);
2156 int r; 2153 int r;
2157 2154
2158 pgprintk("%s: looking for gfn %llx\n", __func__, gfn); 2155 pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2159 r = 0; 2156 r = 0;
2160 spin_lock(&kvm->mmu_lock); 2157 spin_lock(&kvm->mmu_lock);
2161 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { 2158 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2162 pgprintk("%s: gfn %llx role %x\n", __func__, gfn, 2159 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2163 sp->role.word); 2160 sp->role.word);
2164 r = 1; 2161 r = 1;
@@ -2288,9 +2285,8 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2288static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 2285static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
2289{ 2286{
2290 struct kvm_mmu_page *s; 2287 struct kvm_mmu_page *s;
2291 struct hlist_node *node;
2292 2288
2293 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 2289 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2294 if (s->unsync) 2290 if (s->unsync)
2295 continue; 2291 continue;
2296 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 2292 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
@@ -2302,10 +2298,9 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2302 bool can_unsync) 2298 bool can_unsync)
2303{ 2299{
2304 struct kvm_mmu_page *s; 2300 struct kvm_mmu_page *s;
2305 struct hlist_node *node;
2306 bool need_unsync = false; 2301 bool need_unsync = false;
2307 2302
2308 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 2303 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2309 if (!can_unsync) 2304 if (!can_unsync)
2310 return 1; 2305 return 1;
2311 2306
@@ -3933,7 +3928,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3933 gfn_t gfn = gpa >> PAGE_SHIFT; 3928 gfn_t gfn = gpa >> PAGE_SHIFT;
3934 union kvm_mmu_page_role mask = { .word = 0 }; 3929 union kvm_mmu_page_role mask = { .word = 0 };
3935 struct kvm_mmu_page *sp; 3930 struct kvm_mmu_page *sp;
3936 struct hlist_node *node;
3937 LIST_HEAD(invalid_list); 3931 LIST_HEAD(invalid_list);
3938 u64 entry, gentry, *spte; 3932 u64 entry, gentry, *spte;
3939 int npte; 3933 int npte;
@@ -3964,7 +3958,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3964 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 3958 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
3965 3959
3966 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; 3960 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
3967 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { 3961 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
3968 if (detect_write_misaligned(sp, gpa, bytes) || 3962 if (detect_write_misaligned(sp, gpa, bytes) ||
3969 detect_write_flooding(sp)) { 3963 detect_write_flooding(sp)) {
3970 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, 3964 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b8858fb0cafa..8bdebb6781e1 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -357,7 +357,6 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
357{ 357{
358 struct blkcg *blkcg = cgroup_to_blkcg(cgroup); 358 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
359 struct blkcg_gq *blkg; 359 struct blkcg_gq *blkg;
360 struct hlist_node *n;
361 int i; 360 int i;
362 361
363 mutex_lock(&blkcg_pol_mutex); 362 mutex_lock(&blkcg_pol_mutex);
@@ -368,7 +367,7 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
368 * stat updates. This is a debug feature which shouldn't exist 367 * stat updates. This is a debug feature which shouldn't exist
369 * anyway. If you get hit by a race, retry. 368 * anyway. If you get hit by a race, retry.
370 */ 369 */
371 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 370 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
372 for (i = 0; i < BLKCG_MAX_POLS; i++) { 371 for (i = 0; i < BLKCG_MAX_POLS; i++) {
373 struct blkcg_policy *pol = blkcg_policy[i]; 372 struct blkcg_policy *pol = blkcg_policy[i];
374 373
@@ -415,11 +414,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
415 bool show_total) 414 bool show_total)
416{ 415{
417 struct blkcg_gq *blkg; 416 struct blkcg_gq *blkg;
418 struct hlist_node *n;
419 u64 total = 0; 417 u64 total = 0;
420 418
421 spin_lock_irq(&blkcg->lock); 419 spin_lock_irq(&blkcg->lock);
422 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) 420 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node)
423 if (blkcg_policy_enabled(blkg->q, pol)) 421 if (blkcg_policy_enabled(blkg->q, pol))
424 total += prfill(sf, blkg->pd[pol->plid], data); 422 total += prfill(sf, blkg->pd[pol->plid], data);
425 spin_unlock_irq(&blkcg->lock); 423 spin_unlock_irq(&blkcg->lock);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fab4cdd3f7bb..9c4bb8266bc8 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -164,7 +164,6 @@ EXPORT_SYMBOL(put_io_context);
164 */ 164 */
165void put_io_context_active(struct io_context *ioc) 165void put_io_context_active(struct io_context *ioc)
166{ 166{
167 struct hlist_node *n;
168 unsigned long flags; 167 unsigned long flags;
169 struct io_cq *icq; 168 struct io_cq *icq;
170 169
@@ -180,7 +179,7 @@ void put_io_context_active(struct io_context *ioc)
180 */ 179 */
181retry: 180retry:
182 spin_lock_irqsave_nested(&ioc->lock, flags, 1); 181 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
183 hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) { 182 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
184 if (icq->flags & ICQ_EXITED) 183 if (icq->flags & ICQ_EXITED)
185 continue; 184 continue;
186 if (spin_trylock(icq->q->queue_lock)) { 185 if (spin_trylock(icq->q->queue_lock)) {
diff --git a/block/bsg.c b/block/bsg.c
index 3ca92ebf6bbb..420a5a9f1b23 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -800,11 +800,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
800static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) 800static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
801{ 801{
802 struct bsg_device *bd; 802 struct bsg_device *bd;
803 struct hlist_node *entry;
804 803
805 mutex_lock(&bsg_mutex); 804 mutex_lock(&bsg_mutex);
806 805
807 hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { 806 hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
808 if (bd->queue == q) { 807 if (bd->queue == q) {
809 atomic_inc(&bd->ref_count); 808 atomic_inc(&bd->ref_count);
810 goto found; 809 goto found;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e62e9205b80a..ec52807cdd09 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1435,7 +1435,6 @@ static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1435{ 1435{
1436 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1436 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1437 struct blkcg_gq *blkg; 1437 struct blkcg_gq *blkg;
1438 struct hlist_node *n;
1439 1438
1440 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) 1439 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
1441 return -EINVAL; 1440 return -EINVAL;
@@ -1443,7 +1442,7 @@ static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1443 spin_lock_irq(&blkcg->lock); 1442 spin_lock_irq(&blkcg->lock);
1444 blkcg->cfq_weight = (unsigned int)val; 1443 blkcg->cfq_weight = (unsigned int)val;
1445 1444
1446 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 1445 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1447 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1446 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1448 1447
1449 if (cfqg && !cfqg->dev_weight) 1448 if (cfqg && !cfqg->dev_weight)
diff --git a/block/elevator.c b/block/elevator.c
index 603b2c178740..d0acb31cc083 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -288,10 +288,10 @@ static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
288{ 288{
289 struct elevator_queue *e = q->elevator; 289 struct elevator_queue *e = q->elevator;
290 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; 290 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
291 struct hlist_node *entry, *next; 291 struct hlist_node *next;
292 struct request *rq; 292 struct request *rq;
293 293
294 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { 294 hlist_for_each_entry_safe(rq, next, hash_list, hash) {
295 BUG_ON(!ELV_ON_HASH(rq)); 295 BUG_ON(!ELV_ON_HASH(rq));
296 296
297 if (unlikely(!rq_mergeable(rq))) { 297 if (unlikely(!rq_mergeable(rq))) {
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 08c57c8aec95..6149a6e09643 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -447,7 +447,7 @@ EXPORT_SYMBOL_GPL(crypto_register_template);
447void crypto_unregister_template(struct crypto_template *tmpl) 447void crypto_unregister_template(struct crypto_template *tmpl)
448{ 448{
449 struct crypto_instance *inst; 449 struct crypto_instance *inst;
450 struct hlist_node *p, *n; 450 struct hlist_node *n;
451 struct hlist_head *list; 451 struct hlist_head *list;
452 LIST_HEAD(users); 452 LIST_HEAD(users);
453 453
@@ -457,7 +457,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
457 list_del_init(&tmpl->list); 457 list_del_init(&tmpl->list);
458 458
459 list = &tmpl->instances; 459 list = &tmpl->instances;
460 hlist_for_each_entry(inst, p, list, list) { 460 hlist_for_each_entry(inst, list, list) {
461 int err = crypto_remove_alg(&inst->alg, &users); 461 int err = crypto_remove_alg(&inst->alg, &users);
462 BUG_ON(err); 462 BUG_ON(err);
463 } 463 }
@@ -466,7 +466,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
466 466
467 up_write(&crypto_alg_sem); 467 up_write(&crypto_alg_sem);
468 468
469 hlist_for_each_entry_safe(inst, p, n, list, list) { 469 hlist_for_each_entry_safe(inst, n, list, list) {
470 BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); 470 BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
471 tmpl->free(inst); 471 tmpl->free(inst);
472 } 472 }
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index b22d71cac54c..0e3f8f9dcd29 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -157,7 +157,6 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
157{ 157{
158 struct atm_cirange ci; 158 struct atm_cirange ci;
159 struct atm_vcc *vcc; 159 struct atm_vcc *vcc;
160 struct hlist_node *node;
161 struct sock *s; 160 struct sock *s;
162 int i; 161 int i;
163 162
@@ -171,7 +170,7 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
171 for(i = 0; i < VCC_HTABLE_SIZE; ++i) { 170 for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
172 struct hlist_head *head = &vcc_hash[i]; 171 struct hlist_head *head = &vcc_hash[i];
173 172
174 sk_for_each(s, node, head) { 173 sk_for_each(s, head) {
175 vcc = atm_sk(s); 174 vcc = atm_sk(s);
176 if (vcc->dev != dev) 175 if (vcc->dev != dev)
177 continue; 176 continue;
@@ -264,12 +263,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
264{ 263{
265 struct hlist_head *head; 264 struct hlist_head *head;
266 struct atm_vcc *vcc; 265 struct atm_vcc *vcc;
267 struct hlist_node *node;
268 struct sock *s; 266 struct sock *s;
269 267
270 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; 268 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
271 269
272 sk_for_each(s, node, head) { 270 sk_for_each(s, head) {
273 vcc = atm_sk(s); 271 vcc = atm_sk(s);
274 if (vcc->dev == dev && 272 if (vcc->dev == dev &&
275 vcc->vci == vci && vcc->vpi == vpi && 273 vcc->vci == vci && vcc->vpi == vpi &&
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index c1eb6fa8ac35..b1955ba40d63 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2093,7 +2093,6 @@ static unsigned char eni_phy_get(struct atm_dev *dev,unsigned long addr)
2093 2093
2094static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page) 2094static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
2095{ 2095{
2096 struct hlist_node *node;
2097 struct sock *s; 2096 struct sock *s;
2098 static const char *signal[] = { "LOST","unknown","okay" }; 2097 static const char *signal[] = { "LOST","unknown","okay" };
2099 struct eni_dev *eni_dev = ENI_DEV(dev); 2098 struct eni_dev *eni_dev = ENI_DEV(dev);
@@ -2171,7 +2170,7 @@ static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
2171 for(i = 0; i < VCC_HTABLE_SIZE; ++i) { 2170 for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
2172 struct hlist_head *head = &vcc_hash[i]; 2171 struct hlist_head *head = &vcc_hash[i];
2173 2172
2174 sk_for_each(s, node, head) { 2173 sk_for_each(s, head) {
2175 struct eni_vcc *eni_vcc; 2174 struct eni_vcc *eni_vcc;
2176 int length; 2175 int length;
2177 2176
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 72b6960fa95f..d6891267f5bb 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -329,7 +329,6 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
329{ 329{
330 struct hlist_head *head; 330 struct hlist_head *head;
331 struct atm_vcc *vcc; 331 struct atm_vcc *vcc;
332 struct hlist_node *node;
333 struct sock *s; 332 struct sock *s;
334 short vpi; 333 short vpi;
335 int vci; 334 int vci;
@@ -338,7 +337,7 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
338 vci = cid & ((1 << he_dev->vcibits) - 1); 337 vci = cid & ((1 << he_dev->vcibits) - 1);
339 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; 338 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
340 339
341 sk_for_each(s, node, head) { 340 sk_for_each(s, head) {
342 vcc = atm_sk(s); 341 vcc = atm_sk(s);
343 if (vcc->dev == he_dev->atm_dev && 342 if (vcc->dev == he_dev->atm_dev &&
344 vcc->vci == vci && vcc->vpi == vpi && 343 vcc->vci == vci && vcc->vpi == vpi &&
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 0474a89170b9..32784d18d1f7 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -896,12 +896,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
896{ 896{
897 struct hlist_head *head; 897 struct hlist_head *head;
898 struct atm_vcc *vcc = NULL; 898 struct atm_vcc *vcc = NULL;
899 struct hlist_node *node;
900 struct sock *s; 899 struct sock *s;
901 900
902 read_lock(&vcc_sklist_lock); 901 read_lock(&vcc_sklist_lock);
903 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; 902 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
904 sk_for_each(s, node, head) { 903 sk_for_each(s, head) {
905 vcc = atm_sk(s); 904 vcc = atm_sk(s);
906 if (vcc->dev == dev && vcc->vci == vci && 905 if (vcc->dev == dev && vcc->vci == vci &&
907 vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE && 906 vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE &&
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index fabbfe1a9253..ed87b2405806 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -52,31 +52,29 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
52 int level) 52 int level)
53{ 53{
54 struct clk *child; 54 struct clk *child;
55 struct hlist_node *tmp;
56 55
57 if (!c) 56 if (!c)
58 return; 57 return;
59 58
60 clk_summary_show_one(s, c, level); 59 clk_summary_show_one(s, c, level);
61 60
62 hlist_for_each_entry(child, tmp, &c->children, child_node) 61 hlist_for_each_entry(child, &c->children, child_node)
63 clk_summary_show_subtree(s, child, level + 1); 62 clk_summary_show_subtree(s, child, level + 1);
64} 63}
65 64
66static int clk_summary_show(struct seq_file *s, void *data) 65static int clk_summary_show(struct seq_file *s, void *data)
67{ 66{
68 struct clk *c; 67 struct clk *c;
69 struct hlist_node *tmp;
70 68
71 seq_printf(s, " clock enable_cnt prepare_cnt rate\n"); 69 seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
72 seq_printf(s, "---------------------------------------------------------------------\n"); 70 seq_printf(s, "---------------------------------------------------------------------\n");
73 71
74 mutex_lock(&prepare_lock); 72 mutex_lock(&prepare_lock);
75 73
76 hlist_for_each_entry(c, tmp, &clk_root_list, child_node) 74 hlist_for_each_entry(c, &clk_root_list, child_node)
77 clk_summary_show_subtree(s, c, 0); 75 clk_summary_show_subtree(s, c, 0);
78 76
79 hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) 77 hlist_for_each_entry(c, &clk_orphan_list, child_node)
80 clk_summary_show_subtree(s, c, 0); 78 clk_summary_show_subtree(s, c, 0);
81 79
82 mutex_unlock(&prepare_lock); 80 mutex_unlock(&prepare_lock);
@@ -111,14 +109,13 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
111static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) 109static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
112{ 110{
113 struct clk *child; 111 struct clk *child;
114 struct hlist_node *tmp;
115 112
116 if (!c) 113 if (!c)
117 return; 114 return;
118 115
119 clk_dump_one(s, c, level); 116 clk_dump_one(s, c, level);
120 117
121 hlist_for_each_entry(child, tmp, &c->children, child_node) { 118 hlist_for_each_entry(child, &c->children, child_node) {
122 seq_printf(s, ","); 119 seq_printf(s, ",");
123 clk_dump_subtree(s, child, level + 1); 120 clk_dump_subtree(s, child, level + 1);
124 } 121 }
@@ -129,21 +126,20 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
129static int clk_dump(struct seq_file *s, void *data) 126static int clk_dump(struct seq_file *s, void *data)
130{ 127{
131 struct clk *c; 128 struct clk *c;
132 struct hlist_node *tmp;
133 bool first_node = true; 129 bool first_node = true;
134 130
135 seq_printf(s, "{"); 131 seq_printf(s, "{");
136 132
137 mutex_lock(&prepare_lock); 133 mutex_lock(&prepare_lock);
138 134
139 hlist_for_each_entry(c, tmp, &clk_root_list, child_node) { 135 hlist_for_each_entry(c, &clk_root_list, child_node) {
140 if (!first_node) 136 if (!first_node)
141 seq_printf(s, ","); 137 seq_printf(s, ",");
142 first_node = false; 138 first_node = false;
143 clk_dump_subtree(s, c, 0); 139 clk_dump_subtree(s, c, 0);
144 } 140 }
145 141
146 hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) { 142 hlist_for_each_entry(c, &clk_orphan_list, child_node) {
147 seq_printf(s, ","); 143 seq_printf(s, ",");
148 clk_dump_subtree(s, c, 0); 144 clk_dump_subtree(s, c, 0);
149 } 145 }
@@ -222,7 +218,6 @@ out:
222static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry) 218static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
223{ 219{
224 struct clk *child; 220 struct clk *child;
225 struct hlist_node *tmp;
226 int ret = -EINVAL;; 221 int ret = -EINVAL;;
227 222
228 if (!clk || !pdentry) 223 if (!clk || !pdentry)
@@ -233,7 +228,7 @@ static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
233 if (ret) 228 if (ret)
234 goto out; 229 goto out;
235 230
236 hlist_for_each_entry(child, tmp, &clk->children, child_node) 231 hlist_for_each_entry(child, &clk->children, child_node)
237 clk_debug_create_subtree(child, clk->dentry); 232 clk_debug_create_subtree(child, clk->dentry);
238 233
239 ret = 0; 234 ret = 0;
@@ -299,7 +294,6 @@ out:
299static int __init clk_debug_init(void) 294static int __init clk_debug_init(void)
300{ 295{
301 struct clk *clk; 296 struct clk *clk;
302 struct hlist_node *tmp;
303 struct dentry *d; 297 struct dentry *d;
304 298
305 rootdir = debugfs_create_dir("clk", NULL); 299 rootdir = debugfs_create_dir("clk", NULL);
@@ -324,10 +318,10 @@ static int __init clk_debug_init(void)
324 318
325 mutex_lock(&prepare_lock); 319 mutex_lock(&prepare_lock);
326 320
327 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node) 321 hlist_for_each_entry(clk, &clk_root_list, child_node)
328 clk_debug_create_subtree(clk, rootdir); 322 clk_debug_create_subtree(clk, rootdir);
329 323
330 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node) 324 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
331 clk_debug_create_subtree(clk, orphandir); 325 clk_debug_create_subtree(clk, orphandir);
332 326
333 inited = 1; 327 inited = 1;
@@ -345,13 +339,12 @@ static inline int clk_debug_register(struct clk *clk) { return 0; }
345static void clk_disable_unused_subtree(struct clk *clk) 339static void clk_disable_unused_subtree(struct clk *clk)
346{ 340{
347 struct clk *child; 341 struct clk *child;
348 struct hlist_node *tmp;
349 unsigned long flags; 342 unsigned long flags;
350 343
351 if (!clk) 344 if (!clk)
352 goto out; 345 goto out;
353 346
354 hlist_for_each_entry(child, tmp, &clk->children, child_node) 347 hlist_for_each_entry(child, &clk->children, child_node)
355 clk_disable_unused_subtree(child); 348 clk_disable_unused_subtree(child);
356 349
357 spin_lock_irqsave(&enable_lock, flags); 350 spin_lock_irqsave(&enable_lock, flags);
@@ -384,14 +377,13 @@ out:
384static int clk_disable_unused(void) 377static int clk_disable_unused(void)
385{ 378{
386 struct clk *clk; 379 struct clk *clk;
387 struct hlist_node *tmp;
388 380
389 mutex_lock(&prepare_lock); 381 mutex_lock(&prepare_lock);
390 382
391 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node) 383 hlist_for_each_entry(clk, &clk_root_list, child_node)
392 clk_disable_unused_subtree(clk); 384 clk_disable_unused_subtree(clk);
393 385
394 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node) 386 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
395 clk_disable_unused_subtree(clk); 387 clk_disable_unused_subtree(clk);
396 388
397 mutex_unlock(&prepare_lock); 389 mutex_unlock(&prepare_lock);
@@ -484,12 +476,11 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
484{ 476{
485 struct clk *child; 477 struct clk *child;
486 struct clk *ret; 478 struct clk *ret;
487 struct hlist_node *tmp;
488 479
489 if (!strcmp(clk->name, name)) 480 if (!strcmp(clk->name, name))
490 return clk; 481 return clk;
491 482
492 hlist_for_each_entry(child, tmp, &clk->children, child_node) { 483 hlist_for_each_entry(child, &clk->children, child_node) {
493 ret = __clk_lookup_subtree(name, child); 484 ret = __clk_lookup_subtree(name, child);
494 if (ret) 485 if (ret)
495 return ret; 486 return ret;
@@ -502,20 +493,19 @@ struct clk *__clk_lookup(const char *name)
502{ 493{
503 struct clk *root_clk; 494 struct clk *root_clk;
504 struct clk *ret; 495 struct clk *ret;
505 struct hlist_node *tmp;
506 496
507 if (!name) 497 if (!name)
508 return NULL; 498 return NULL;
509 499
510 /* search the 'proper' clk tree first */ 500 /* search the 'proper' clk tree first */
511 hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) { 501 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
512 ret = __clk_lookup_subtree(name, root_clk); 502 ret = __clk_lookup_subtree(name, root_clk);
513 if (ret) 503 if (ret)
514 return ret; 504 return ret;
515 } 505 }
516 506
517 /* if not found, then search the orphan tree */ 507 /* if not found, then search the orphan tree */
518 hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) { 508 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
519 ret = __clk_lookup_subtree(name, root_clk); 509 ret = __clk_lookup_subtree(name, root_clk);
520 if (ret) 510 if (ret)
521 return ret; 511 return ret;
@@ -812,7 +802,6 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
812{ 802{
813 unsigned long old_rate; 803 unsigned long old_rate;
814 unsigned long parent_rate = 0; 804 unsigned long parent_rate = 0;
815 struct hlist_node *tmp;
816 struct clk *child; 805 struct clk *child;
817 806
818 old_rate = clk->rate; 807 old_rate = clk->rate;
@@ -832,7 +821,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
832 if (clk->notifier_count && msg) 821 if (clk->notifier_count && msg)
833 __clk_notify(clk, msg, old_rate, clk->rate); 822 __clk_notify(clk, msg, old_rate, clk->rate);
834 823
835 hlist_for_each_entry(child, tmp, &clk->children, child_node) 824 hlist_for_each_entry(child, &clk->children, child_node)
836 __clk_recalc_rates(child, msg); 825 __clk_recalc_rates(child, msg);
837} 826}
838 827
@@ -878,7 +867,6 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
878 */ 867 */
879static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) 868static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
880{ 869{
881 struct hlist_node *tmp;
882 struct clk *child; 870 struct clk *child;
883 unsigned long new_rate; 871 unsigned long new_rate;
884 int ret = NOTIFY_DONE; 872 int ret = NOTIFY_DONE;
@@ -895,7 +883,7 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
895 if (ret == NOTIFY_BAD) 883 if (ret == NOTIFY_BAD)
896 goto out; 884 goto out;
897 885
898 hlist_for_each_entry(child, tmp, &clk->children, child_node) { 886 hlist_for_each_entry(child, &clk->children, child_node) {
899 ret = __clk_speculate_rates(child, new_rate); 887 ret = __clk_speculate_rates(child, new_rate);
900 if (ret == NOTIFY_BAD) 888 if (ret == NOTIFY_BAD)
901 break; 889 break;
@@ -908,11 +896,10 @@ out:
908static void clk_calc_subtree(struct clk *clk, unsigned long new_rate) 896static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
909{ 897{
910 struct clk *child; 898 struct clk *child;
911 struct hlist_node *tmp;
912 899
913 clk->new_rate = new_rate; 900 clk->new_rate = new_rate;
914 901
915 hlist_for_each_entry(child, tmp, &clk->children, child_node) { 902 hlist_for_each_entry(child, &clk->children, child_node) {
916 if (child->ops->recalc_rate) 903 if (child->ops->recalc_rate)
917 child->new_rate = child->ops->recalc_rate(child->hw, new_rate); 904 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
918 else 905 else
@@ -983,7 +970,6 @@ out:
983 */ 970 */
984static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) 971static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
985{ 972{
986 struct hlist_node *tmp;
987 struct clk *child, *fail_clk = NULL; 973 struct clk *child, *fail_clk = NULL;
988 int ret = NOTIFY_DONE; 974 int ret = NOTIFY_DONE;
989 975
@@ -996,7 +982,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
996 fail_clk = clk; 982 fail_clk = clk;
997 } 983 }
998 984
999 hlist_for_each_entry(child, tmp, &clk->children, child_node) { 985 hlist_for_each_entry(child, &clk->children, child_node) {
1000 clk = clk_propagate_rate_change(child, event); 986 clk = clk_propagate_rate_change(child, event);
1001 if (clk) 987 if (clk)
1002 fail_clk = clk; 988 fail_clk = clk;
@@ -1014,7 +1000,6 @@ static void clk_change_rate(struct clk *clk)
1014 struct clk *child; 1000 struct clk *child;
1015 unsigned long old_rate; 1001 unsigned long old_rate;
1016 unsigned long best_parent_rate = 0; 1002 unsigned long best_parent_rate = 0;
1017 struct hlist_node *tmp;
1018 1003
1019 old_rate = clk->rate; 1004 old_rate = clk->rate;
1020 1005
@@ -1032,7 +1017,7 @@ static void clk_change_rate(struct clk *clk)
1032 if (clk->notifier_count && old_rate != clk->rate) 1017 if (clk->notifier_count && old_rate != clk->rate)
1033 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); 1018 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1034 1019
1035 hlist_for_each_entry(child, tmp, &clk->children, child_node) 1020 hlist_for_each_entry(child, &clk->children, child_node)
1036 clk_change_rate(child); 1021 clk_change_rate(child);
1037} 1022}
1038 1023
@@ -1348,7 +1333,7 @@ int __clk_init(struct device *dev, struct clk *clk)
1348{ 1333{
1349 int i, ret = 0; 1334 int i, ret = 0;
1350 struct clk *orphan; 1335 struct clk *orphan;
1351 struct hlist_node *tmp, *tmp2; 1336 struct hlist_node *tmp2;
1352 1337
1353 if (!clk) 1338 if (!clk)
1354 return -EINVAL; 1339 return -EINVAL;
@@ -1448,7 +1433,7 @@ int __clk_init(struct device *dev, struct clk *clk)
1448 * walk the list of orphan clocks and reparent any that are children of 1433 * walk the list of orphan clocks and reparent any that are children of
1449 * this clock 1434 * this clock
1450 */ 1435 */
1451 hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) { 1436 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
1452 if (orphan->ops->get_parent) { 1437 if (orphan->ops->get_parent) {
1453 i = orphan->ops->get_parent(orphan->hw); 1438 i = orphan->ops->get_parent(orphan->hw);
1454 if (!strcmp(clk->name, orphan->parent_names[i])) 1439 if (!strcmp(clk->name, orphan->parent_names[i]))
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 80254547a3f8..7e4bae760e27 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -60,14 +60,13 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
60{ 60{
61 struct drm_hash_item *entry; 61 struct drm_hash_item *entry;
62 struct hlist_head *h_list; 62 struct hlist_head *h_list;
63 struct hlist_node *list;
64 unsigned int hashed_key; 63 unsigned int hashed_key;
65 int count = 0; 64 int count = 0;
66 65
67 hashed_key = hash_long(key, ht->order); 66 hashed_key = hash_long(key, ht->order);
68 DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); 67 DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
69 h_list = &ht->table[hashed_key]; 68 h_list = &ht->table[hashed_key];
70 hlist_for_each_entry(entry, list, h_list, head) 69 hlist_for_each_entry(entry, h_list, head)
71 DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); 70 DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
72} 71}
73 72
@@ -76,14 +75,13 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
76{ 75{
77 struct drm_hash_item *entry; 76 struct drm_hash_item *entry;
78 struct hlist_head *h_list; 77 struct hlist_head *h_list;
79 struct hlist_node *list;
80 unsigned int hashed_key; 78 unsigned int hashed_key;
81 79
82 hashed_key = hash_long(key, ht->order); 80 hashed_key = hash_long(key, ht->order);
83 h_list = &ht->table[hashed_key]; 81 h_list = &ht->table[hashed_key];
84 hlist_for_each_entry(entry, list, h_list, head) { 82 hlist_for_each_entry(entry, h_list, head) {
85 if (entry->key == key) 83 if (entry->key == key)
86 return list; 84 return &entry->head;
87 if (entry->key > key) 85 if (entry->key > key)
88 break; 86 break;
89 } 87 }
@@ -95,14 +93,13 @@ static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
95{ 93{
96 struct drm_hash_item *entry; 94 struct drm_hash_item *entry;
97 struct hlist_head *h_list; 95 struct hlist_head *h_list;
98 struct hlist_node *list;
99 unsigned int hashed_key; 96 unsigned int hashed_key;
100 97
101 hashed_key = hash_long(key, ht->order); 98 hashed_key = hash_long(key, ht->order);
102 h_list = &ht->table[hashed_key]; 99 h_list = &ht->table[hashed_key];
103 hlist_for_each_entry_rcu(entry, list, h_list, head) { 100 hlist_for_each_entry_rcu(entry, h_list, head) {
104 if (entry->key == key) 101 if (entry->key == key)
105 return list; 102 return &entry->head;
106 if (entry->key > key) 103 if (entry->key > key)
107 break; 104 break;
108 } 105 }
@@ -113,19 +110,19 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
113{ 110{
114 struct drm_hash_item *entry; 111 struct drm_hash_item *entry;
115 struct hlist_head *h_list; 112 struct hlist_head *h_list;
116 struct hlist_node *list, *parent; 113 struct hlist_node *parent;
117 unsigned int hashed_key; 114 unsigned int hashed_key;
118 unsigned long key = item->key; 115 unsigned long key = item->key;
119 116
120 hashed_key = hash_long(key, ht->order); 117 hashed_key = hash_long(key, ht->order);
121 h_list = &ht->table[hashed_key]; 118 h_list = &ht->table[hashed_key];
122 parent = NULL; 119 parent = NULL;
123 hlist_for_each_entry(entry, list, h_list, head) { 120 hlist_for_each_entry(entry, h_list, head) {
124 if (entry->key == key) 121 if (entry->key == key)
125 return -EINVAL; 122 return -EINVAL;
126 if (entry->key > key) 123 if (entry->key > key)
127 break; 124 break;
128 parent = list; 125 parent = &entry->head;
129 } 126 }
130 if (parent) { 127 if (parent) {
131 hlist_add_after_rcu(parent, &item->head); 128 hlist_add_after_rcu(parent, &item->head);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index c32eeaa3f3b1..71c2c7116802 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2204,10 +2204,9 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
2204{ 2204{
2205 struct rdma_id_private *cur_id; 2205 struct rdma_id_private *cur_id;
2206 struct sockaddr *addr, *cur_addr; 2206 struct sockaddr *addr, *cur_addr;
2207 struct hlist_node *node;
2208 2207
2209 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 2208 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
2210 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2209 hlist_for_each_entry(cur_id, &bind_list->owners, node) {
2211 if (id_priv == cur_id) 2210 if (id_priv == cur_id)
2212 continue; 2211 continue;
2213 2212
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 176c8f90f2bb..9f5ad7cc33c8 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -118,14 +118,13 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
118{ 118{
119 struct hlist_head *bucket; 119 struct hlist_head *bucket;
120 struct ib_pool_fmr *fmr; 120 struct ib_pool_fmr *fmr;
121 struct hlist_node *pos;
122 121
123 if (!pool->cache_bucket) 122 if (!pool->cache_bucket)
124 return NULL; 123 return NULL;
125 124
126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); 125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
127 126
128 hlist_for_each_entry(fmr, pos, bucket, cache_node) 127 hlist_for_each_entry(fmr, bucket, cache_node)
129 if (io_virtual_address == fmr->io_virtual_address && 128 if (io_virtual_address == fmr->io_virtual_address &&
130 page_list_len == fmr->page_list_len && 129 page_list_len == fmr->page_list_len &&
131 !memcmp(page_list, fmr->page_list, 130 !memcmp(page_list, fmr->page_list,
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index abe2d699b6f3..8b07f83d48ad 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -483,7 +483,6 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
483{ 483{
484 struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; 484 struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
485 struct sock *sk = sock->sk; 485 struct sock *sk = sock->sk;
486 struct hlist_node *node;
487 struct sock *csk; 486 struct sock *csk;
488 int err = 0; 487 int err = 0;
489 488
@@ -508,7 +507,7 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
508 507
509 if (sk->sk_protocol < ISDN_P_B_START) { 508 if (sk->sk_protocol < ISDN_P_B_START) {
510 read_lock_bh(&data_sockets.lock); 509 read_lock_bh(&data_sockets.lock);
511 sk_for_each(csk, node, &data_sockets.head) { 510 sk_for_each(csk, &data_sockets.head) {
512 if (sk == csk) 511 if (sk == csk)
513 continue; 512 continue;
514 if (_pms(csk)->dev != _pms(sk)->dev) 513 if (_pms(csk)->dev != _pms(sk)->dev)
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index deda591f70b9..9cb4b621fbc3 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -64,12 +64,11 @@ unlock:
64static void 64static void
65send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb) 65send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
66{ 66{
67 struct hlist_node *node;
68 struct sock *sk; 67 struct sock *sk;
69 struct sk_buff *cskb = NULL; 68 struct sk_buff *cskb = NULL;
70 69
71 read_lock(&sl->lock); 70 read_lock(&sl->lock);
72 sk_for_each(sk, node, &sl->head) { 71 sk_for_each(sk, &sl->head) {
73 if (sk->sk_state != MISDN_BOUND) 72 if (sk->sk_state != MISDN_BOUND)
74 continue; 73 continue;
75 if (!cskb) 74 if (!cskb)
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index aefb78e3cbf9..d9d3f1c7b662 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -106,9 +106,8 @@ static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
106 struct dm_cell_key *key) 106 struct dm_cell_key *key)
107{ 107{
108 struct dm_bio_prison_cell *cell; 108 struct dm_bio_prison_cell *cell;
109 struct hlist_node *tmp;
110 109
111 hlist_for_each_entry(cell, tmp, bucket, list) 110 hlist_for_each_entry(cell, bucket, list)
112 if (keys_equal(&cell->key, key)) 111 if (keys_equal(&cell->key, key))
113 return cell; 112 return cell;
114 113
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 651ca79881dd..93205e32a004 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -859,9 +859,8 @@ static void __check_watermark(struct dm_bufio_client *c)
859static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) 859static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
860{ 860{
861 struct dm_buffer *b; 861 struct dm_buffer *b;
862 struct hlist_node *hn;
863 862
864 hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)], 863 hlist_for_each_entry(b, &c->cache_hash[DM_BUFIO_HASH(block)],
865 hash_list) { 864 hash_list) {
866 dm_bufio_cond_resched(); 865 dm_bufio_cond_resched();
867 if (b->block == block) 866 if (b->block == block)
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 59fc18ae52c2..10079e07edf4 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -227,12 +227,11 @@ static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
227static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 227static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
228{ 228{
229 struct dm_snap_tracked_chunk *c; 229 struct dm_snap_tracked_chunk *c;
230 struct hlist_node *hn;
231 int found = 0; 230 int found = 0;
232 231
233 spin_lock_irq(&s->tracked_chunk_lock); 232 spin_lock_irq(&s->tracked_chunk_lock);
234 233
235 hlist_for_each_entry(c, hn, 234 hlist_for_each_entry(c,
236 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 235 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
237 if (c->chunk == chunk) { 236 if (c->chunk == chunk) {
238 found = 1; 237 found = 1;
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 7b17a1fdeaf9..81da1a26042e 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -46,10 +46,9 @@ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
46 int r = 0; 46 int r = 0;
47 unsigned bucket = dm_hash_block(b, DM_HASH_MASK); 47 unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
48 struct shadow_info *si; 48 struct shadow_info *si;
49 struct hlist_node *n;
50 49
51 spin_lock(&tm->lock); 50 spin_lock(&tm->lock);
52 hlist_for_each_entry(si, n, tm->buckets + bucket, hlist) 51 hlist_for_each_entry(si, tm->buckets + bucket, hlist)
53 if (si->where == b) { 52 if (si->where == b) {
54 r = 1; 53 r = 1;
55 break; 54 break;
@@ -81,14 +80,14 @@ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
81static void wipe_shadow_table(struct dm_transaction_manager *tm) 80static void wipe_shadow_table(struct dm_transaction_manager *tm)
82{ 81{
83 struct shadow_info *si; 82 struct shadow_info *si;
84 struct hlist_node *n, *tmp; 83 struct hlist_node *tmp;
85 struct hlist_head *bucket; 84 struct hlist_head *bucket;
86 int i; 85 int i;
87 86
88 spin_lock(&tm->lock); 87 spin_lock(&tm->lock);
89 for (i = 0; i < DM_HASH_SIZE; i++) { 88 for (i = 0; i < DM_HASH_SIZE; i++) {
90 bucket = tm->buckets + i; 89 bucket = tm->buckets + i;
91 hlist_for_each_entry_safe(si, n, tmp, bucket, hlist) 90 hlist_for_each_entry_safe(si, tmp, bucket, hlist)
92 kfree(si); 91 kfree(si);
93 92
94 INIT_HLIST_HEAD(bucket); 93 INIT_HLIST_HEAD(bucket);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 19d77a026639..697f026cb318 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -365,10 +365,9 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
365 short generation) 365 short generation)
366{ 366{
367 struct stripe_head *sh; 367 struct stripe_head *sh;
368 struct hlist_node *hn;
369 368
370 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 369 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
371 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 370 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
372 if (sh->sector == sector && sh->generation == generation) 371 if (sh->sector == sector && sh->generation == generation)
373 return sh; 372 return sh;
374 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 373 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index 240a6d361665..2129274ef7ab 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -280,11 +280,10 @@ static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm,
280 const struct mmu_notifier_ops *ops) 280 const struct mmu_notifier_ops *ops)
281{ 281{
282 struct mmu_notifier *mn, *gru_mn = NULL; 282 struct mmu_notifier *mn, *gru_mn = NULL;
283 struct hlist_node *n;
284 283
285 if (mm->mmu_notifier_mm) { 284 if (mm->mmu_notifier_mm) {
286 rcu_read_lock(); 285 rcu_read_lock();
287 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, 286 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list,
288 hlist) 287 hlist)
289 if (mn->ops == ops) { 288 if (mn->ops == ops) {
290 gru_mn = mn; 289 gru_mn = mn;
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index c3e8397f62ed..a8cee33ae8d2 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -127,9 +127,8 @@ static struct dbell_entry *dbell_index_table_find(u32 idx)
127{ 127{
128 u32 bucket = VMCI_DOORBELL_HASH(idx); 128 u32 bucket = VMCI_DOORBELL_HASH(idx);
129 struct dbell_entry *dbell; 129 struct dbell_entry *dbell;
130 struct hlist_node *node;
131 130
132 hlist_for_each_entry(dbell, node, &vmci_doorbell_it.entries[bucket], 131 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket],
133 node) { 132 node) {
134 if (idx == dbell->idx) 133 if (idx == dbell->idx)
135 return dbell; 134 return dbell;
@@ -359,12 +358,10 @@ static void dbell_fire_entries(u32 notify_idx)
359{ 358{
360 u32 bucket = VMCI_DOORBELL_HASH(notify_idx); 359 u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
361 struct dbell_entry *dbell; 360 struct dbell_entry *dbell;
362 struct hlist_node *node;
363 361
364 spin_lock_bh(&vmci_doorbell_it.lock); 362 spin_lock_bh(&vmci_doorbell_it.lock);
365 363
366 hlist_for_each_entry(dbell, node, 364 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) {
367 &vmci_doorbell_it.entries[bucket], node) {
368 if (dbell->idx == notify_idx && 365 if (dbell->idx == notify_idx &&
369 atomic_read(&dbell->active) == 1) { 366 atomic_read(&dbell->active) == 1) {
370 if (dbell->run_delayed) { 367 if (dbell->run_delayed) {
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
index a196f84a4fd2..9a53a30de445 100644
--- a/drivers/misc/vmw_vmci/vmci_resource.c
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -46,11 +46,10 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
46 enum vmci_resource_type type) 46 enum vmci_resource_type type)
47{ 47{
48 struct vmci_resource *r, *resource = NULL; 48 struct vmci_resource *r, *resource = NULL;
49 struct hlist_node *node;
50 unsigned int idx = vmci_resource_hash(handle); 49 unsigned int idx = vmci_resource_hash(handle);
51 50
52 rcu_read_lock(); 51 rcu_read_lock();
53 hlist_for_each_entry_rcu(r, node, 52 hlist_for_each_entry_rcu(r,
54 &vmci_resource_table.entries[idx], node) { 53 &vmci_resource_table.entries[idx], node) {
55 u32 cid = r->handle.context; 54 u32 cid = r->handle.context;
56 u32 rid = r->handle.resource; 55 u32 rid = r->handle.resource;
@@ -146,12 +145,11 @@ void vmci_resource_remove(struct vmci_resource *resource)
146 struct vmci_handle handle = resource->handle; 145 struct vmci_handle handle = resource->handle;
147 unsigned int idx = vmci_resource_hash(handle); 146 unsigned int idx = vmci_resource_hash(handle);
148 struct vmci_resource *r; 147 struct vmci_resource *r;
149 struct hlist_node *node;
150 148
151 /* Remove resource from hash table. */ 149 /* Remove resource from hash table. */
152 spin_lock(&vmci_resource_table.lock); 150 spin_lock(&vmci_resource_table.lock);
153 151
154 hlist_for_each_entry(r, node, &vmci_resource_table.entries[idx], node) { 152 hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
155 if (vmci_handle_is_equal(r->handle, resource->handle)) { 153 if (vmci_handle_is_equal(r->handle, resource->handle)) {
156 hlist_del_init_rcu(&r->node); 154 hlist_del_init_rcu(&r->node);
157 break; 155 break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index f4d2e9e3c6d5..c3f1afd86906 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2197,13 +2197,13 @@ static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2197 union ixgbe_atr_input *mask = &adapter->fdir_mask; 2197 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2198 struct ethtool_rx_flow_spec *fsp = 2198 struct ethtool_rx_flow_spec *fsp =
2199 (struct ethtool_rx_flow_spec *)&cmd->fs; 2199 (struct ethtool_rx_flow_spec *)&cmd->fs;
2200 struct hlist_node *node, *node2; 2200 struct hlist_node *node2;
2201 struct ixgbe_fdir_filter *rule = NULL; 2201 struct ixgbe_fdir_filter *rule = NULL;
2202 2202
2203 /* report total rule count */ 2203 /* report total rule count */
2204 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2204 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2205 2205
2206 hlist_for_each_entry_safe(rule, node, node2, 2206 hlist_for_each_entry_safe(rule, node2,
2207 &adapter->fdir_filter_list, fdir_node) { 2207 &adapter->fdir_filter_list, fdir_node) {
2208 if (fsp->location <= rule->sw_idx) 2208 if (fsp->location <= rule->sw_idx)
2209 break; 2209 break;
@@ -2264,14 +2264,14 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2264 struct ethtool_rxnfc *cmd, 2264 struct ethtool_rxnfc *cmd,
2265 u32 *rule_locs) 2265 u32 *rule_locs)
2266{ 2266{
2267 struct hlist_node *node, *node2; 2267 struct hlist_node *node2;
2268 struct ixgbe_fdir_filter *rule; 2268 struct ixgbe_fdir_filter *rule;
2269 int cnt = 0; 2269 int cnt = 0;
2270 2270
2271 /* report total rule count */ 2271 /* report total rule count */
2272 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2272 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2273 2273
2274 hlist_for_each_entry_safe(rule, node, node2, 2274 hlist_for_each_entry_safe(rule, node2,
2275 &adapter->fdir_filter_list, fdir_node) { 2275 &adapter->fdir_filter_list, fdir_node) {
2276 if (cnt == cmd->rule_cnt) 2276 if (cnt == cmd->rule_cnt)
2277 return -EMSGSIZE; 2277 return -EMSGSIZE;
@@ -2358,19 +2358,19 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2358 u16 sw_idx) 2358 u16 sw_idx)
2359{ 2359{
2360 struct ixgbe_hw *hw = &adapter->hw; 2360 struct ixgbe_hw *hw = &adapter->hw;
2361 struct hlist_node *node, *node2, *parent; 2361 struct hlist_node *node2;
2362 struct ixgbe_fdir_filter *rule; 2362 struct ixgbe_fdir_filter *rule, *parent;
2363 int err = -EINVAL; 2363 int err = -EINVAL;
2364 2364
2365 parent = NULL; 2365 parent = NULL;
2366 rule = NULL; 2366 rule = NULL;
2367 2367
2368 hlist_for_each_entry_safe(rule, node, node2, 2368 hlist_for_each_entry_safe(rule, node2,
2369 &adapter->fdir_filter_list, fdir_node) { 2369 &adapter->fdir_filter_list, fdir_node) {
2370 /* hash found, or no matching entry */ 2370 /* hash found, or no matching entry */
2371 if (rule->sw_idx >= sw_idx) 2371 if (rule->sw_idx >= sw_idx)
2372 break; 2372 break;
2373 parent = node; 2373 parent = rule;
2374 } 2374 }
2375 2375
2376 /* if there is an old rule occupying our place remove it */ 2376 /* if there is an old rule occupying our place remove it */
@@ -2399,7 +2399,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2399 2399
2400 /* add filter to the list */ 2400 /* add filter to the list */
2401 if (parent) 2401 if (parent)
2402 hlist_add_after(parent, &input->fdir_node); 2402 hlist_add_after(&parent->fdir_node, &input->fdir_node);
2403 else 2403 else
2404 hlist_add_head(&input->fdir_node, 2404 hlist_add_head(&input->fdir_node,
2405 &adapter->fdir_filter_list); 2405 &adapter->fdir_filter_list);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 68478d6dfa2d..db5611ae407e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3891,7 +3891,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
3891static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) 3891static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
3892{ 3892{
3893 struct ixgbe_hw *hw = &adapter->hw; 3893 struct ixgbe_hw *hw = &adapter->hw;
3894 struct hlist_node *node, *node2; 3894 struct hlist_node *node2;
3895 struct ixgbe_fdir_filter *filter; 3895 struct ixgbe_fdir_filter *filter;
3896 3896
3897 spin_lock(&adapter->fdir_perfect_lock); 3897 spin_lock(&adapter->fdir_perfect_lock);
@@ -3899,7 +3899,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
3899 if (!hlist_empty(&adapter->fdir_filter_list)) 3899 if (!hlist_empty(&adapter->fdir_filter_list))
3900 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); 3900 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
3901 3901
3902 hlist_for_each_entry_safe(filter, node, node2, 3902 hlist_for_each_entry_safe(filter, node2,
3903 &adapter->fdir_filter_list, fdir_node) { 3903 &adapter->fdir_filter_list, fdir_node) {
3904 ixgbe_fdir_write_perfect_filter_82599(hw, 3904 ixgbe_fdir_write_perfect_filter_82599(hw,
3905 &filter->filter, 3905 &filter->filter,
@@ -4356,12 +4356,12 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
4356 4356
4357static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) 4357static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
4358{ 4358{
4359 struct hlist_node *node, *node2; 4359 struct hlist_node *node2;
4360 struct ixgbe_fdir_filter *filter; 4360 struct ixgbe_fdir_filter *filter;
4361 4361
4362 spin_lock(&adapter->fdir_perfect_lock); 4362 spin_lock(&adapter->fdir_perfect_lock);
4363 4363
4364 hlist_for_each_entry_safe(filter, node, node2, 4364 hlist_for_each_entry_safe(filter, node2,
4365 &adapter->fdir_filter_list, fdir_node) { 4365 &adapter->fdir_filter_list, fdir_node) {
4366 hlist_del(&filter->fdir_node); 4366 hlist_del(&filter->fdir_node);
4367 kfree(filter); 4367 kfree(filter);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5385474bb526..bb4d8d99f36d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -225,11 +225,10 @@ static inline struct mlx4_en_filter *
225mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 225mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
226 __be16 src_port, __be16 dst_port) 226 __be16 src_port, __be16 dst_port)
227{ 227{
228 struct hlist_node *elem;
229 struct mlx4_en_filter *filter; 228 struct mlx4_en_filter *filter;
230 struct mlx4_en_filter *ret = NULL; 229 struct mlx4_en_filter *ret = NULL;
231 230
232 hlist_for_each_entry(filter, elem, 231 hlist_for_each_entry(filter,
233 filter_hash_bucket(priv, src_ip, dst_ip, 232 filter_hash_bucket(priv, src_ip, dst_ip,
234 src_port, dst_port), 233 src_port, dst_port),
235 filter_chain) { 234 filter_chain) {
@@ -574,13 +573,13 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
574 573
575 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 574 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
576 struct mlx4_mac_entry *entry; 575 struct mlx4_mac_entry *entry;
577 struct hlist_node *n, *tmp; 576 struct hlist_node *tmp;
578 struct hlist_head *bucket; 577 struct hlist_head *bucket;
579 unsigned int mac_hash; 578 unsigned int mac_hash;
580 579
581 mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX]; 580 mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
582 bucket = &priv->mac_hash[mac_hash]; 581 bucket = &priv->mac_hash[mac_hash];
583 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { 582 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
584 if (ether_addr_equal_64bits(entry->mac, 583 if (ether_addr_equal_64bits(entry->mac,
585 priv->dev->dev_addr)) { 584 priv->dev->dev_addr)) {
586 en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n", 585 en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
@@ -609,11 +608,11 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
609 struct hlist_head *bucket; 608 struct hlist_head *bucket;
610 unsigned int mac_hash; 609 unsigned int mac_hash;
611 struct mlx4_mac_entry *entry; 610 struct mlx4_mac_entry *entry;
612 struct hlist_node *n, *tmp; 611 struct hlist_node *tmp;
613 u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac); 612 u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
614 613
615 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; 614 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
616 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { 615 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
617 if (ether_addr_equal_64bits(entry->mac, prev_mac)) { 616 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
618 mlx4_en_uc_steer_release(priv, entry->mac, 617 mlx4_en_uc_steer_release(priv, entry->mac,
619 qpn, entry->reg_id); 618 qpn, entry->reg_id);
@@ -1019,7 +1018,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1019{ 1018{
1020 struct netdev_hw_addr *ha; 1019 struct netdev_hw_addr *ha;
1021 struct mlx4_mac_entry *entry; 1020 struct mlx4_mac_entry *entry;
1022 struct hlist_node *n, *tmp; 1021 struct hlist_node *tmp;
1023 bool found; 1022 bool found;
1024 u64 mac; 1023 u64 mac;
1025 int err = 0; 1024 int err = 0;
@@ -1035,7 +1034,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1035 /* find what to remove */ 1034 /* find what to remove */
1036 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1035 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1037 bucket = &priv->mac_hash[i]; 1036 bucket = &priv->mac_hash[i];
1038 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { 1037 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1039 found = false; 1038 found = false;
1040 netdev_for_each_uc_addr(ha, dev) { 1039 netdev_for_each_uc_addr(ha, dev) {
1041 if (ether_addr_equal_64bits(entry->mac, 1040 if (ether_addr_equal_64bits(entry->mac,
@@ -1078,7 +1077,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1078 netdev_for_each_uc_addr(ha, dev) { 1077 netdev_for_each_uc_addr(ha, dev) {
1079 found = false; 1078 found = false;
1080 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; 1079 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1081 hlist_for_each_entry(entry, n, bucket, hlist) { 1080 hlist_for_each_entry(entry, bucket, hlist) {
1082 if (ether_addr_equal_64bits(entry->mac, ha->addr)) { 1081 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1083 found = true; 1082 found = true;
1084 break; 1083 break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index ce38654bbdd0..c7f856308e1a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -35,6 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/mlx4/qp.h> 36#include <linux/mlx4/qp.h>
37#include <linux/skbuff.h> 37#include <linux/skbuff.h>
38#include <linux/rculist.h>
38#include <linux/if_ether.h> 39#include <linux/if_ether.h>
39#include <linux/if_vlan.h> 40#include <linux/if_vlan.h>
40#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
@@ -617,7 +618,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
617 618
618 if (is_multicast_ether_addr(ethh->h_dest)) { 619 if (is_multicast_ether_addr(ethh->h_dest)) {
619 struct mlx4_mac_entry *entry; 620 struct mlx4_mac_entry *entry;
620 struct hlist_node *n;
621 struct hlist_head *bucket; 621 struct hlist_head *bucket;
622 unsigned int mac_hash; 622 unsigned int mac_hash;
623 623
@@ -625,7 +625,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
625 mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; 625 mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
626 bucket = &priv->mac_hash[mac_hash]; 626 bucket = &priv->mac_hash[mac_hash];
627 rcu_read_lock(); 627 rcu_read_lock();
628 hlist_for_each_entry_rcu(entry, n, bucket, hlist) { 628 hlist_for_each_entry_rcu(entry, bucket, hlist) {
629 if (ether_addr_equal_64bits(entry->mac, 629 if (ether_addr_equal_64bits(entry->mac,
630 ethh->h_source)) { 630 ethh->h_source)) {
631 rcu_read_unlock(); 631 rcu_read_unlock();
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 325e11e1ce0f..f89cc7a3fe6c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -576,7 +576,7 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
576void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) 576void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
577{ 577{
578 struct qlcnic_filter *tmp_fil; 578 struct qlcnic_filter *tmp_fil;
579 struct hlist_node *tmp_hnode, *n; 579 struct hlist_node *n;
580 struct hlist_head *head; 580 struct hlist_head *head;
581 int i; 581 int i;
582 unsigned long time; 582 unsigned long time;
@@ -584,7 +584,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
584 584
585 for (i = 0; i < adapter->fhash.fbucket_size; i++) { 585 for (i = 0; i < adapter->fhash.fbucket_size; i++) {
586 head = &(adapter->fhash.fhead[i]); 586 head = &(adapter->fhash.fhead[i]);
587 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 587 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
588 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : 588 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
589 QLCNIC_MAC_DEL; 589 QLCNIC_MAC_DEL;
590 time = tmp_fil->ftime; 590 time = tmp_fil->ftime;
@@ -604,7 +604,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
604 for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) { 604 for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
605 head = &(adapter->rx_fhash.fhead[i]); 605 head = &(adapter->rx_fhash.fhead[i]);
606 606
607 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) 607 hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
608 { 608 {
609 time = tmp_fil->ftime; 609 time = tmp_fil->ftime;
610 if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) { 610 if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
@@ -621,14 +621,14 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
621void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) 621void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
622{ 622{
623 struct qlcnic_filter *tmp_fil; 623 struct qlcnic_filter *tmp_fil;
624 struct hlist_node *tmp_hnode, *n; 624 struct hlist_node *n;
625 struct hlist_head *head; 625 struct hlist_head *head;
626 int i; 626 int i;
627 u8 cmd; 627 u8 cmd;
628 628
629 for (i = 0; i < adapter->fhash.fbucket_size; i++) { 629 for (i = 0; i < adapter->fhash.fbucket_size; i++) {
630 head = &(adapter->fhash.fhead[i]); 630 head = &(adapter->fhash.fhead[i]);
631 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 631 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
632 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : 632 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
633 QLCNIC_MAC_DEL; 633 QLCNIC_MAC_DEL;
634 qlcnic_sre_macaddr_change(adapter, 634 qlcnic_sre_macaddr_change(adapter,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 6387e0cc3ea9..0e630061bff3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -162,7 +162,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
162{ 162{
163 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 163 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
164 struct qlcnic_filter *fil, *tmp_fil; 164 struct qlcnic_filter *fil, *tmp_fil;
165 struct hlist_node *tmp_hnode, *n; 165 struct hlist_node *n;
166 struct hlist_head *head; 166 struct hlist_head *head;
167 unsigned long time; 167 unsigned long time;
168 u64 src_addr = 0; 168 u64 src_addr = 0;
@@ -179,7 +179,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
179 (adapter->fhash.fbucket_size - 1); 179 (adapter->fhash.fbucket_size - 1);
180 head = &(adapter->rx_fhash.fhead[hindex]); 180 head = &(adapter->rx_fhash.fhead[hindex]);
181 181
182 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 182 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
183 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && 183 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
184 tmp_fil->vlan_id == vlan_id) { 184 tmp_fil->vlan_id == vlan_id) {
185 time = tmp_fil->ftime; 185 time = tmp_fil->ftime;
@@ -205,7 +205,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
205 (adapter->fhash.fbucket_size - 1); 205 (adapter->fhash.fbucket_size - 1);
206 head = &(adapter->rx_fhash.fhead[hindex]); 206 head = &(adapter->rx_fhash.fhead[hindex]);
207 spin_lock(&adapter->rx_mac_learn_lock); 207 spin_lock(&adapter->rx_mac_learn_lock);
208 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 208 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
209 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && 209 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
210 tmp_fil->vlan_id == vlan_id) { 210 tmp_fil->vlan_id == vlan_id) {
211 found = 1; 211 found = 1;
@@ -272,7 +272,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
272 struct sk_buff *skb) 272 struct sk_buff *skb)
273{ 273{
274 struct qlcnic_filter *fil, *tmp_fil; 274 struct qlcnic_filter *fil, *tmp_fil;
275 struct hlist_node *tmp_hnode, *n; 275 struct hlist_node *n;
276 struct hlist_head *head; 276 struct hlist_head *head;
277 struct net_device *netdev = adapter->netdev; 277 struct net_device *netdev = adapter->netdev;
278 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 278 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -294,7 +294,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
294 hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1); 294 hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
295 head = &(adapter->fhash.fhead[hindex]); 295 head = &(adapter->fhash.fhead[hindex]);
296 296
297 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 297 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
298 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && 298 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
299 tmp_fil->vlan_id == vlan_id) { 299 tmp_fil->vlan_id == vlan_id) {
300 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) 300 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 289b4eefb42f..1df0ff3839e8 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -614,10 +614,9 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
614{ 614{
615 unsigned int hash = vnet_hashfn(skb->data); 615 unsigned int hash = vnet_hashfn(skb->data);
616 struct hlist_head *hp = &vp->port_hash[hash]; 616 struct hlist_head *hp = &vp->port_hash[hash];
617 struct hlist_node *n;
618 struct vnet_port *port; 617 struct vnet_port *port;
619 618
620 hlist_for_each_entry(port, n, hp, hash) { 619 hlist_for_each_entry(port, hp, hash) {
621 if (ether_addr_equal(port->raddr, skb->data)) 620 if (ether_addr_equal(port->raddr, skb->data))
622 return port; 621 return port;
623 } 622 }
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index defcd8a85744..417b2af1aa80 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -55,9 +55,8 @@ static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
55 const unsigned char *addr) 55 const unsigned char *addr)
56{ 56{
57 struct macvlan_dev *vlan; 57 struct macvlan_dev *vlan;
58 struct hlist_node *n;
59 58
60 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) { 59 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[addr[5]], hlist) {
61 if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr)) 60 if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
62 return vlan; 61 return vlan;
63 } 62 }
@@ -149,7 +148,6 @@ static void macvlan_broadcast(struct sk_buff *skb,
149{ 148{
150 const struct ethhdr *eth = eth_hdr(skb); 149 const struct ethhdr *eth = eth_hdr(skb);
151 const struct macvlan_dev *vlan; 150 const struct macvlan_dev *vlan;
152 struct hlist_node *n;
153 struct sk_buff *nskb; 151 struct sk_buff *nskb;
154 unsigned int i; 152 unsigned int i;
155 int err; 153 int err;
@@ -159,7 +157,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
159 return; 157 return;
160 158
161 for (i = 0; i < MACVLAN_HASH_SIZE; i++) { 159 for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
162 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { 160 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) {
163 if (vlan->dev == src || !(vlan->mode & mode)) 161 if (vlan->dev == src || !(vlan->mode & mode))
164 continue; 162 continue;
165 163
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b6f45c5d84d5..2c6a22e278ea 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -197,9 +197,8 @@ static inline u32 tun_hashfn(u32 rxhash)
197static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 197static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
198{ 198{
199 struct tun_flow_entry *e; 199 struct tun_flow_entry *e;
200 struct hlist_node *n;
201 200
202 hlist_for_each_entry_rcu(e, n, head, hash_link) { 201 hlist_for_each_entry_rcu(e, head, hash_link) {
203 if (e->rxhash == rxhash) 202 if (e->rxhash == rxhash)
204 return e; 203 return e;
205 } 204 }
@@ -241,9 +240,9 @@ static void tun_flow_flush(struct tun_struct *tun)
241 spin_lock_bh(&tun->lock); 240 spin_lock_bh(&tun->lock);
242 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 241 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
243 struct tun_flow_entry *e; 242 struct tun_flow_entry *e;
244 struct hlist_node *h, *n; 243 struct hlist_node *n;
245 244
246 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) 245 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
247 tun_flow_delete(tun, e); 246 tun_flow_delete(tun, e);
248 } 247 }
249 spin_unlock_bh(&tun->lock); 248 spin_unlock_bh(&tun->lock);
@@ -256,9 +255,9 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
256 spin_lock_bh(&tun->lock); 255 spin_lock_bh(&tun->lock);
257 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 256 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
258 struct tun_flow_entry *e; 257 struct tun_flow_entry *e;
259 struct hlist_node *h, *n; 258 struct hlist_node *n;
260 259
261 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) { 260 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
262 if (e->queue_index == queue_index) 261 if (e->queue_index == queue_index)
263 tun_flow_delete(tun, e); 262 tun_flow_delete(tun, e);
264 } 263 }
@@ -279,9 +278,9 @@ static void tun_flow_cleanup(unsigned long data)
279 spin_lock_bh(&tun->lock); 278 spin_lock_bh(&tun->lock);
280 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 279 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
281 struct tun_flow_entry *e; 280 struct tun_flow_entry *e;
282 struct hlist_node *h, *n; 281 struct hlist_node *n;
283 282
284 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) { 283 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
285 unsigned long this_timer; 284 unsigned long this_timer;
286 count++; 285 count++;
287 this_timer = e->updated + delay; 286 this_timer = e->updated + delay;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f736823f8437..f10e58ac9c1b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -145,9 +145,8 @@ static inline struct hlist_head *vni_head(struct net *net, u32 id)
145static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id) 145static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
146{ 146{
147 struct vxlan_dev *vxlan; 147 struct vxlan_dev *vxlan;
148 struct hlist_node *node;
149 148
150 hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) { 149 hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) {
151 if (vxlan->vni == id) 150 if (vxlan->vni == id)
152 return vxlan; 151 return vxlan;
153 } 152 }
@@ -292,9 +291,8 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
292{ 291{
293 struct hlist_head *head = vxlan_fdb_head(vxlan, mac); 292 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
294 struct vxlan_fdb *f; 293 struct vxlan_fdb *f;
295 struct hlist_node *node;
296 294
297 hlist_for_each_entry_rcu(f, node, head, hlist) { 295 hlist_for_each_entry_rcu(f, head, hlist) {
298 if (compare_ether_addr(mac, f->eth_addr) == 0) 296 if (compare_ether_addr(mac, f->eth_addr) == 0)
299 return f; 297 return f;
300 } 298 }
@@ -422,10 +420,9 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
422 420
423 for (h = 0; h < FDB_HASH_SIZE; ++h) { 421 for (h = 0; h < FDB_HASH_SIZE; ++h) {
424 struct vxlan_fdb *f; 422 struct vxlan_fdb *f;
425 struct hlist_node *n;
426 int err; 423 int err;
427 424
428 hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) { 425 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
429 if (idx < cb->args[0]) 426 if (idx < cb->args[0])
430 goto skip; 427 goto skip;
431 428
@@ -483,11 +480,10 @@ static bool vxlan_group_used(struct vxlan_net *vn,
483 const struct vxlan_dev *this) 480 const struct vxlan_dev *this)
484{ 481{
485 const struct vxlan_dev *vxlan; 482 const struct vxlan_dev *vxlan;
486 struct hlist_node *node;
487 unsigned h; 483 unsigned h;
488 484
489 for (h = 0; h < VNI_HASH_SIZE; ++h) 485 for (h = 0; h < VNI_HASH_SIZE; ++h)
490 hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) { 486 hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) {
491 if (vxlan == this) 487 if (vxlan == this)
492 continue; 488 continue;
493 489
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 48273dd05b63..4941f201d6c8 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -309,7 +309,6 @@ static void zd1201_usbrx(struct urb *urb)
309 if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) { 309 if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) {
310 int datalen = urb->actual_length-1; 310 int datalen = urb->actual_length-1;
311 unsigned short len, fc, seq; 311 unsigned short len, fc, seq;
312 struct hlist_node *node;
313 312
314 len = ntohs(*(__be16 *)&data[datalen-2]); 313 len = ntohs(*(__be16 *)&data[datalen-2]);
315 if (len>datalen) 314 if (len>datalen)
@@ -362,7 +361,7 @@ static void zd1201_usbrx(struct urb *urb)
362 hlist_add_head(&frag->fnode, &zd->fraglist); 361 hlist_add_head(&frag->fnode, &zd->fraglist);
363 goto resubmit; 362 goto resubmit;
364 } 363 }
365 hlist_for_each_entry(frag, node, &zd->fraglist, fnode) 364 hlist_for_each_entry(frag, &zd->fraglist, fnode)
366 if (frag->seq == (seq&IEEE80211_SCTL_SEQ)) 365 if (frag->seq == (seq&IEEE80211_SCTL_SEQ))
367 break; 366 break;
368 if (!frag) 367 if (!frag)
@@ -1831,14 +1830,14 @@ err_zd:
1831static void zd1201_disconnect(struct usb_interface *interface) 1830static void zd1201_disconnect(struct usb_interface *interface)
1832{ 1831{
1833 struct zd1201 *zd = usb_get_intfdata(interface); 1832 struct zd1201 *zd = usb_get_intfdata(interface);
1834 struct hlist_node *node, *node2; 1833 struct hlist_node *node2;
1835 struct zd1201_frag *frag; 1834 struct zd1201_frag *frag;
1836 1835
1837 if (!zd) 1836 if (!zd)
1838 return; 1837 return;
1839 usb_set_intfdata(interface, NULL); 1838 usb_set_intfdata(interface, NULL);
1840 1839
1841 hlist_for_each_entry_safe(frag, node, node2, &zd->fraglist, fnode) { 1840 hlist_for_each_entry_safe(frag, node2, &zd->fraglist, fnode) {
1842 hlist_del_init(&frag->fnode); 1841 hlist_del_init(&frag->fnode);
1843 kfree_skb(frag->skb); 1842 kfree_skb(frag->skb);
1844 kfree(frag); 1843 kfree(frag);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 924e4665bd57..b099e0025d2b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -842,9 +842,8 @@ static struct pci_cap_saved_state *pci_find_saved_cap(
842 struct pci_dev *pci_dev, char cap) 842 struct pci_dev *pci_dev, char cap)
843{ 843{
844 struct pci_cap_saved_state *tmp; 844 struct pci_cap_saved_state *tmp;
845 struct hlist_node *pos;
846 845
847 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) { 846 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
848 if (tmp->cap.cap_nr == cap) 847 if (tmp->cap.cap_nr == cap)
849 return tmp; 848 return tmp;
850 } 849 }
@@ -1041,7 +1040,6 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1041 struct pci_saved_state *state; 1040 struct pci_saved_state *state;
1042 struct pci_cap_saved_state *tmp; 1041 struct pci_cap_saved_state *tmp;
1043 struct pci_cap_saved_data *cap; 1042 struct pci_cap_saved_data *cap;
1044 struct hlist_node *pos;
1045 size_t size; 1043 size_t size;
1046 1044
1047 if (!dev->state_saved) 1045 if (!dev->state_saved)
@@ -1049,7 +1047,7 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1049 1047
1050 size = sizeof(*state) + sizeof(struct pci_cap_saved_data); 1048 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1051 1049
1052 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) 1050 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1053 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size; 1051 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1054 1052
1055 state = kzalloc(size, GFP_KERNEL); 1053 state = kzalloc(size, GFP_KERNEL);
@@ -1060,7 +1058,7 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1060 sizeof(state->config_space)); 1058 sizeof(state->config_space));
1061 1059
1062 cap = state->cap; 1060 cap = state->cap;
1063 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) { 1061 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1064 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size; 1062 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1065 memcpy(cap, &tmp->cap, len); 1063 memcpy(cap, &tmp->cap, len);
1066 cap = (struct pci_cap_saved_data *)((u8 *)cap + len); 1064 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
@@ -2038,9 +2036,9 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2038void pci_free_cap_save_buffers(struct pci_dev *dev) 2036void pci_free_cap_save_buffers(struct pci_dev *dev)
2039{ 2037{
2040 struct pci_cap_saved_state *tmp; 2038 struct pci_cap_saved_state *tmp;
2041 struct hlist_node *pos, *n; 2039 struct hlist_node *n;
2042 2040
2043 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next) 2041 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2044 kfree(tmp); 2042 kfree(tmp);
2045} 2043}
2046 2044
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 538ebe213129..24456a0de6b2 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -2880,7 +2880,6 @@ static int binder_release(struct inode *nodp, struct file *filp)
2880 2880
2881static void binder_deferred_release(struct binder_proc *proc) 2881static void binder_deferred_release(struct binder_proc *proc)
2882{ 2882{
2883 struct hlist_node *pos;
2884 struct binder_transaction *t; 2883 struct binder_transaction *t;
2885 struct rb_node *n; 2884 struct rb_node *n;
2886 int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; 2885 int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
@@ -2924,7 +2923,7 @@ static void binder_deferred_release(struct binder_proc *proc)
2924 node->local_weak_refs = 0; 2923 node->local_weak_refs = 0;
2925 hlist_add_head(&node->dead_node, &binder_dead_nodes); 2924 hlist_add_head(&node->dead_node, &binder_dead_nodes);
2926 2925
2927 hlist_for_each_entry(ref, pos, &node->refs, node_entry) { 2926 hlist_for_each_entry(ref, &node->refs, node_entry) {
2928 incoming_refs++; 2927 incoming_refs++;
2929 if (ref->death) { 2928 if (ref->death) {
2930 death++; 2929 death++;
@@ -3156,12 +3155,11 @@ static void print_binder_thread(struct seq_file *m,
3156static void print_binder_node(struct seq_file *m, struct binder_node *node) 3155static void print_binder_node(struct seq_file *m, struct binder_node *node)
3157{ 3156{
3158 struct binder_ref *ref; 3157 struct binder_ref *ref;
3159 struct hlist_node *pos;
3160 struct binder_work *w; 3158 struct binder_work *w;
3161 int count; 3159 int count;
3162 3160
3163 count = 0; 3161 count = 0;
3164 hlist_for_each_entry(ref, pos, &node->refs, node_entry) 3162 hlist_for_each_entry(ref, &node->refs, node_entry)
3165 count++; 3163 count++;
3166 3164
3167 seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", 3165 seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
@@ -3171,7 +3169,7 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
3171 node->internal_strong_refs, count); 3169 node->internal_strong_refs, count);
3172 if (count) { 3170 if (count) {
3173 seq_puts(m, " proc"); 3171 seq_puts(m, " proc");
3174 hlist_for_each_entry(ref, pos, &node->refs, node_entry) 3172 hlist_for_each_entry(ref, &node->refs, node_entry)
3175 seq_printf(m, " %d", ref->proc->pid); 3173 seq_printf(m, " %d", ref->proc->pid);
3176 } 3174 }
3177 seq_puts(m, "\n"); 3175 seq_puts(m, "\n");
@@ -3369,7 +3367,6 @@ static void print_binder_proc_stats(struct seq_file *m,
3369static int binder_state_show(struct seq_file *m, void *unused) 3367static int binder_state_show(struct seq_file *m, void *unused)
3370{ 3368{
3371 struct binder_proc *proc; 3369 struct binder_proc *proc;
3372 struct hlist_node *pos;
3373 struct binder_node *node; 3370 struct binder_node *node;
3374 int do_lock = !binder_debug_no_lock; 3371 int do_lock = !binder_debug_no_lock;
3375 3372
@@ -3380,10 +3377,10 @@ static int binder_state_show(struct seq_file *m, void *unused)
3380 3377
3381 if (!hlist_empty(&binder_dead_nodes)) 3378 if (!hlist_empty(&binder_dead_nodes))
3382 seq_puts(m, "dead nodes:\n"); 3379 seq_puts(m, "dead nodes:\n");
3383 hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) 3380 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3384 print_binder_node(m, node); 3381 print_binder_node(m, node);
3385 3382
3386 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3383 hlist_for_each_entry(proc, &binder_procs, proc_node)
3387 print_binder_proc(m, proc, 1); 3384 print_binder_proc(m, proc, 1);
3388 if (do_lock) 3385 if (do_lock)
3389 binder_unlock(__func__); 3386 binder_unlock(__func__);
@@ -3393,7 +3390,6 @@ static int binder_state_show(struct seq_file *m, void *unused)
3393static int binder_stats_show(struct seq_file *m, void *unused) 3390static int binder_stats_show(struct seq_file *m, void *unused)
3394{ 3391{
3395 struct binder_proc *proc; 3392 struct binder_proc *proc;
3396 struct hlist_node *pos;
3397 int do_lock = !binder_debug_no_lock; 3393 int do_lock = !binder_debug_no_lock;
3398 3394
3399 if (do_lock) 3395 if (do_lock)
@@ -3403,7 +3399,7 @@ static int binder_stats_show(struct seq_file *m, void *unused)
3403 3399
3404 print_binder_stats(m, "", &binder_stats); 3400 print_binder_stats(m, "", &binder_stats);
3405 3401
3406 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3402 hlist_for_each_entry(proc, &binder_procs, proc_node)
3407 print_binder_proc_stats(m, proc); 3403 print_binder_proc_stats(m, proc);
3408 if (do_lock) 3404 if (do_lock)
3409 binder_unlock(__func__); 3405 binder_unlock(__func__);
@@ -3413,14 +3409,13 @@ static int binder_stats_show(struct seq_file *m, void *unused)
3413static int binder_transactions_show(struct seq_file *m, void *unused) 3409static int binder_transactions_show(struct seq_file *m, void *unused)
3414{ 3410{
3415 struct binder_proc *proc; 3411 struct binder_proc *proc;
3416 struct hlist_node *pos;
3417 int do_lock = !binder_debug_no_lock; 3412 int do_lock = !binder_debug_no_lock;
3418 3413
3419 if (do_lock) 3414 if (do_lock)
3420 binder_lock(__func__); 3415 binder_lock(__func__);
3421 3416
3422 seq_puts(m, "binder transactions:\n"); 3417 seq_puts(m, "binder transactions:\n");
3423 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3418 hlist_for_each_entry(proc, &binder_procs, proc_node)
3424 print_binder_proc(m, proc, 0); 3419 print_binder_proc(m, proc, 0);
3425 if (do_lock) 3420 if (do_lock)
3426 binder_unlock(__func__); 3421 binder_unlock(__func__);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 6659dd36e806..113f33598b9f 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -169,7 +169,6 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
169{ 169{
170 struct ft_tport *tport; 170 struct ft_tport *tport;
171 struct hlist_head *head; 171 struct hlist_head *head;
172 struct hlist_node *pos;
173 struct ft_sess *sess; 172 struct ft_sess *sess;
174 173
175 rcu_read_lock(); 174 rcu_read_lock();
@@ -178,7 +177,7 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
178 goto out; 177 goto out;
179 178
180 head = &tport->hash[ft_sess_hash(port_id)]; 179 head = &tport->hash[ft_sess_hash(port_id)];
181 hlist_for_each_entry_rcu(sess, pos, head, hash) { 180 hlist_for_each_entry_rcu(sess, head, hash) {
182 if (sess->port_id == port_id) { 181 if (sess->port_id == port_id) {
183 kref_get(&sess->kref); 182 kref_get(&sess->kref);
184 rcu_read_unlock(); 183 rcu_read_unlock();
@@ -201,10 +200,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
201{ 200{
202 struct ft_sess *sess; 201 struct ft_sess *sess;
203 struct hlist_head *head; 202 struct hlist_head *head;
204 struct hlist_node *pos;
205 203
206 head = &tport->hash[ft_sess_hash(port_id)]; 204 head = &tport->hash[ft_sess_hash(port_id)];
207 hlist_for_each_entry_rcu(sess, pos, head, hash) 205 hlist_for_each_entry_rcu(sess, head, hash)
208 if (sess->port_id == port_id) 206 if (sess->port_id == port_id)
209 return sess; 207 return sess;
210 208
@@ -253,11 +251,10 @@ static void ft_sess_unhash(struct ft_sess *sess)
253static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id) 251static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
254{ 252{
255 struct hlist_head *head; 253 struct hlist_head *head;
256 struct hlist_node *pos;
257 struct ft_sess *sess; 254 struct ft_sess *sess;
258 255
259 head = &tport->hash[ft_sess_hash(port_id)]; 256 head = &tport->hash[ft_sess_hash(port_id)];
260 hlist_for_each_entry_rcu(sess, pos, head, hash) { 257 hlist_for_each_entry_rcu(sess, head, hash) {
261 if (sess->port_id == port_id) { 258 if (sess->port_id == port_id) {
262 ft_sess_unhash(sess); 259 ft_sess_unhash(sess);
263 return sess; 260 return sess;
@@ -273,12 +270,11 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
273static void ft_sess_delete_all(struct ft_tport *tport) 270static void ft_sess_delete_all(struct ft_tport *tport)
274{ 271{
275 struct hlist_head *head; 272 struct hlist_head *head;
276 struct hlist_node *pos;
277 struct ft_sess *sess; 273 struct ft_sess *sess;
278 274
279 for (head = tport->hash; 275 for (head = tport->hash;
280 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { 276 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
281 hlist_for_each_entry_rcu(sess, pos, head, hash) { 277 hlist_for_each_entry_rcu(sess, head, hash) {
282 ft_sess_unhash(sess); 278 ft_sess_unhash(sess);
283 transport_deregister_session_configfs(sess->se_sess); 279 transport_deregister_session_configfs(sess->se_sess);
284 ft_sess_put(sess); /* release from table */ 280 ft_sess_put(sess); /* release from table */
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index eb82ee53ee0b..d9a43674cb94 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -125,9 +125,8 @@ static void
125affs_fix_dcache(struct inode *inode, u32 entry_ino) 125affs_fix_dcache(struct inode *inode, u32 entry_ino)
126{ 126{
127 struct dentry *dentry; 127 struct dentry *dentry;
128 struct hlist_node *p;
129 spin_lock(&inode->i_lock); 128 spin_lock(&inode->i_lock);
130 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 129 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
131 if (entry_ino == (u32)(long)dentry->d_fsdata) { 130 if (entry_ino == (u32)(long)dentry->d_fsdata) {
132 dentry->d_fsdata = (void *)inode->i_ino; 131 dentry->d_fsdata = (void *)inode->i_ino;
133 break; 132 break;
diff --git a/fs/aio.c b/fs/aio.c
index 064bfbe37566..3f941f2a3059 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -591,11 +591,10 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
591{ 591{
592 struct mm_struct *mm = current->mm; 592 struct mm_struct *mm = current->mm;
593 struct kioctx *ctx, *ret = NULL; 593 struct kioctx *ctx, *ret = NULL;
594 struct hlist_node *n;
595 594
596 rcu_read_lock(); 595 rcu_read_lock();
597 596
598 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { 597 hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
599 /* 598 /*
600 * RCU protects us against accessing freed memory but 599 * RCU protects us against accessing freed memory but
601 * we have to be careful not to get a reference when the 600 * we have to be careful not to get a reference when the
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index d2a833999bcc..83f2606c76d0 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -816,10 +816,9 @@ static bool
816inode_has_hashed_dentries(struct inode *inode) 816inode_has_hashed_dentries(struct inode *inode)
817{ 817{
818 struct dentry *dentry; 818 struct dentry *dentry;
819 struct hlist_node *p;
820 819
821 spin_lock(&inode->i_lock); 820 spin_lock(&inode->i_lock);
822 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 821 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
823 if (!d_unhashed(dentry) || IS_ROOT(dentry)) { 822 if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
824 spin_unlock(&inode->i_lock); 823 spin_unlock(&inode->i_lock);
825 return true; 824 return true;
diff --git a/fs/dcache.c b/fs/dcache.c
index 68220dd0c135..fbfae008ba44 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -675,11 +675,10 @@ EXPORT_SYMBOL(dget_parent);
675static struct dentry *__d_find_alias(struct inode *inode, int want_discon) 675static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
676{ 676{
677 struct dentry *alias, *discon_alias; 677 struct dentry *alias, *discon_alias;
678 struct hlist_node *p;
679 678
680again: 679again:
681 discon_alias = NULL; 680 discon_alias = NULL;
682 hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { 681 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
683 spin_lock(&alias->d_lock); 682 spin_lock(&alias->d_lock);
684 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 683 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
685 if (IS_ROOT(alias) && 684 if (IS_ROOT(alias) &&
@@ -730,10 +729,9 @@ EXPORT_SYMBOL(d_find_alias);
730void d_prune_aliases(struct inode *inode) 729void d_prune_aliases(struct inode *inode)
731{ 730{
732 struct dentry *dentry; 731 struct dentry *dentry;
733 struct hlist_node *p;
734restart: 732restart:
735 spin_lock(&inode->i_lock); 733 spin_lock(&inode->i_lock);
736 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 734 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
737 spin_lock(&dentry->d_lock); 735 spin_lock(&dentry->d_lock);
738 if (!dentry->d_count) { 736 if (!dentry->d_count) {
739 __dget_dlock(dentry); 737 __dget_dlock(dentry);
@@ -1443,14 +1441,13 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
1443 int len = entry->d_name.len; 1441 int len = entry->d_name.len;
1444 const char *name = entry->d_name.name; 1442 const char *name = entry->d_name.name;
1445 unsigned int hash = entry->d_name.hash; 1443 unsigned int hash = entry->d_name.hash;
1446 struct hlist_node *p;
1447 1444
1448 if (!inode) { 1445 if (!inode) {
1449 __d_instantiate(entry, NULL); 1446 __d_instantiate(entry, NULL);
1450 return NULL; 1447 return NULL;
1451 } 1448 }
1452 1449
1453 hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { 1450 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1454 /* 1451 /*
1455 * Don't need alias->d_lock here, because aliases with 1452 * Don't need alias->d_lock here, because aliases with
1456 * d_parent == entry->d_parent are not subject to name or 1453 * d_parent == entry->d_parent are not subject to name or
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index dd87a31bcc21..4f5ad246582f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -177,12 +177,11 @@ static inline int nodeid_hash(int nodeid)
177static struct connection *__find_con(int nodeid) 177static struct connection *__find_con(int nodeid)
178{ 178{
179 int r; 179 int r;
180 struct hlist_node *h;
181 struct connection *con; 180 struct connection *con;
182 181
183 r = nodeid_hash(nodeid); 182 r = nodeid_hash(nodeid);
184 183
185 hlist_for_each_entry(con, h, &connection_hash[r], list) { 184 hlist_for_each_entry(con, &connection_hash[r], list) {
186 if (con->nodeid == nodeid) 185 if (con->nodeid == nodeid)
187 return con; 186 return con;
188 } 187 }
@@ -232,13 +231,12 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
232static void foreach_conn(void (*conn_func)(struct connection *c)) 231static void foreach_conn(void (*conn_func)(struct connection *c))
233{ 232{
234 int i; 233 int i;
235 struct hlist_node *h, *n; 234 struct hlist_node *n;
236 struct connection *con; 235 struct connection *con;
237 236
238 for (i = 0; i < CONN_HASH_SIZE; i++) { 237 for (i = 0; i < CONN_HASH_SIZE; i++) {
239 hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){ 238 hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
240 conn_func(con); 239 conn_func(con);
241 }
242 } 240 }
243} 241}
244 242
@@ -257,13 +255,12 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
257static struct connection *assoc2con(int assoc_id) 255static struct connection *assoc2con(int assoc_id)
258{ 256{
259 int i; 257 int i;
260 struct hlist_node *h;
261 struct connection *con; 258 struct connection *con;
262 259
263 mutex_lock(&connections_lock); 260 mutex_lock(&connections_lock);
264 261
265 for (i = 0 ; i < CONN_HASH_SIZE; i++) { 262 for (i = 0 ; i < CONN_HASH_SIZE; i++) {
266 hlist_for_each_entry(con, h, &connection_hash[i], list) { 263 hlist_for_each_entry(con, &connection_hash[i], list) {
267 if (con->sctp_assoc == assoc_id) { 264 if (con->sctp_assoc == assoc_id) {
268 mutex_unlock(&connections_lock); 265 mutex_unlock(&connections_lock);
269 return con; 266 return con;
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 5fa2471796c2..8d7a577ae497 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -115,10 +115,9 @@ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
115 */ 115 */
116int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon) 116int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon)
117{ 117{
118 struct hlist_node *elem;
119 int rc; 118 int rc;
120 119
121 hlist_for_each_entry(*daemon, elem, 120 hlist_for_each_entry(*daemon,
122 &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()], 121 &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()],
123 euid_chain) { 122 euid_chain) {
124 if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) { 123 if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) {
@@ -445,7 +444,6 @@ void ecryptfs_release_messaging(void)
445 mutex_unlock(&ecryptfs_msg_ctx_lists_mux); 444 mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
446 } 445 }
447 if (ecryptfs_daemon_hash) { 446 if (ecryptfs_daemon_hash) {
448 struct hlist_node *elem;
449 struct ecryptfs_daemon *daemon; 447 struct ecryptfs_daemon *daemon;
450 int i; 448 int i;
451 449
@@ -453,7 +451,7 @@ void ecryptfs_release_messaging(void)
453 for (i = 0; i < (1 << ecryptfs_hash_bits); i++) { 451 for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
454 int rc; 452 int rc;
455 453
456 hlist_for_each_entry(daemon, elem, 454 hlist_for_each_entry(daemon,
457 &ecryptfs_daemon_hash[i], 455 &ecryptfs_daemon_hash[i],
458 euid_chain) { 456 euid_chain) {
459 rc = ecryptfs_exorcise_daemon(daemon); 457 rc = ecryptfs_exorcise_daemon(daemon);
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 5df4bb4aab14..262fc9940982 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -44,14 +44,13 @@ find_acceptable_alias(struct dentry *result,
44{ 44{
45 struct dentry *dentry, *toput = NULL; 45 struct dentry *dentry, *toput = NULL;
46 struct inode *inode; 46 struct inode *inode;
47 struct hlist_node *p;
48 47
49 if (acceptable(context, result)) 48 if (acceptable(context, result))
50 return result; 49 return result;
51 50
52 inode = result->d_inode; 51 inode = result->d_inode;
53 spin_lock(&inode->i_lock); 52 spin_lock(&inode->i_lock);
54 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 53 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
55 dget(dentry); 54 dget(dentry);
56 spin_unlock(&inode->i_lock); 55 spin_unlock(&inode->i_lock);
57 if (toput) 56 if (toput)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 780e20806346..acf6e479b443 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -341,12 +341,11 @@ struct inode *fat_iget(struct super_block *sb, loff_t i_pos)
341{ 341{
342 struct msdos_sb_info *sbi = MSDOS_SB(sb); 342 struct msdos_sb_info *sbi = MSDOS_SB(sb);
343 struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos); 343 struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos);
344 struct hlist_node *_p;
345 struct msdos_inode_info *i; 344 struct msdos_inode_info *i;
346 struct inode *inode = NULL; 345 struct inode *inode = NULL;
347 346
348 spin_lock(&sbi->inode_hash_lock); 347 spin_lock(&sbi->inode_hash_lock);
349 hlist_for_each_entry(i, _p, head, i_fat_hash) { 348 hlist_for_each_entry(i, head, i_fat_hash) {
350 BUG_ON(i->vfs_inode.i_sb != sb); 349 BUG_ON(i->vfs_inode.i_sb != sb);
351 if (i->i_pos != i_pos) 350 if (i->i_pos != i_pos)
352 continue; 351 continue;
diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c
index ef4b5faba87b..499c10438ca2 100644
--- a/fs/fat/nfs.c
+++ b/fs/fat/nfs.c
@@ -21,13 +21,12 @@ static struct inode *fat_dget(struct super_block *sb, int i_logstart)
21{ 21{
22 struct msdos_sb_info *sbi = MSDOS_SB(sb); 22 struct msdos_sb_info *sbi = MSDOS_SB(sb);
23 struct hlist_head *head; 23 struct hlist_head *head;
24 struct hlist_node *_p;
25 struct msdos_inode_info *i; 24 struct msdos_inode_info *i;
26 struct inode *inode = NULL; 25 struct inode *inode = NULL;
27 26
28 head = sbi->dir_hashtable + fat_dir_hash(i_logstart); 27 head = sbi->dir_hashtable + fat_dir_hash(i_logstart);
29 spin_lock(&sbi->dir_hash_lock); 28 spin_lock(&sbi->dir_hash_lock);
30 hlist_for_each_entry(i, _p, head, i_dir_hash) { 29 hlist_for_each_entry(i, head, i_dir_hash) {
31 BUG_ON(i->vfs_inode.i_sb != sb); 30 BUG_ON(i->vfs_inode.i_sb != sb);
32 if (i->i_logstart != i_logstart) 31 if (i->i_logstart != i_logstart)
33 continue; 32 continue;
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 8dcb114758e3..e2cba1f60c21 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -237,13 +237,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
237 struct fscache_cookie *cookie) 237 struct fscache_cookie *cookie)
238{ 238{
239 struct fscache_object *object; 239 struct fscache_object *object;
240 struct hlist_node *_n;
241 int ret; 240 int ret;
242 241
243 _enter("%p,%p{%s}", cache, cookie, cookie->def->name); 242 _enter("%p,%p{%s}", cache, cookie, cookie->def->name);
244 243
245 spin_lock(&cookie->lock); 244 spin_lock(&cookie->lock);
246 hlist_for_each_entry(object, _n, &cookie->backing_objects, 245 hlist_for_each_entry(object, &cookie->backing_objects,
247 cookie_link) { 246 cookie_link) {
248 if (object->cache == cache) 247 if (object->cache == cache)
249 goto object_already_extant; 248 goto object_already_extant;
@@ -311,7 +310,6 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
311{ 310{
312 struct fscache_object *p; 311 struct fscache_object *p;
313 struct fscache_cache *cache = object->cache; 312 struct fscache_cache *cache = object->cache;
314 struct hlist_node *_n;
315 int ret; 313 int ret;
316 314
317 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id); 315 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
@@ -321,7 +319,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
321 /* there may be multiple initial creations of this object, but we only 319 /* there may be multiple initial creations of this object, but we only
322 * want one */ 320 * want one */
323 ret = -EEXIST; 321 ret = -EEXIST;
324 hlist_for_each_entry(p, _n, &cookie->backing_objects, cookie_link) { 322 hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
325 if (p->cache == object->cache) { 323 if (p->cache == object->cache) {
326 if (p->state >= FSCACHE_OBJECT_DYING) 324 if (p->state >= FSCACHE_OBJECT_DYING)
327 ret = -ENOBUFS; 325 ret = -ENOBUFS;
@@ -331,7 +329,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
331 329
332 /* pin the parent object */ 330 /* pin the parent object */
333 spin_lock_nested(&cookie->parent->lock, 1); 331 spin_lock_nested(&cookie->parent->lock, 1);
334 hlist_for_each_entry(p, _n, &cookie->parent->backing_objects, 332 hlist_for_each_entry(p, &cookie->parent->backing_objects,
335 cookie_link) { 333 cookie_link) {
336 if (p->cache == object->cache) { 334 if (p->cache == object->cache) {
337 if (p->state >= FSCACHE_OBJECT_DYING) { 335 if (p->state >= FSCACHE_OBJECT_DYING) {
@@ -435,7 +433,6 @@ EXPORT_SYMBOL(__fscache_wait_on_invalidate);
435void __fscache_update_cookie(struct fscache_cookie *cookie) 433void __fscache_update_cookie(struct fscache_cookie *cookie)
436{ 434{
437 struct fscache_object *object; 435 struct fscache_object *object;
438 struct hlist_node *_p;
439 436
440 fscache_stat(&fscache_n_updates); 437 fscache_stat(&fscache_n_updates);
441 438
@@ -452,7 +449,7 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
452 spin_lock(&cookie->lock); 449 spin_lock(&cookie->lock);
453 450
454 /* update the index entry on disk in each cache backing this cookie */ 451 /* update the index entry on disk in each cache backing this cookie */
455 hlist_for_each_entry(object, _p, 452 hlist_for_each_entry(object,
456 &cookie->backing_objects, cookie_link) { 453 &cookie->backing_objects, cookie_link) {
457 fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE); 454 fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
458 } 455 }
diff --git a/fs/inode.c b/fs/inode.c
index 67880e604399..f5f7c06c36fb 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -798,11 +798,10 @@ static struct inode *find_inode(struct super_block *sb,
798 int (*test)(struct inode *, void *), 798 int (*test)(struct inode *, void *),
799 void *data) 799 void *data)
800{ 800{
801 struct hlist_node *node;
802 struct inode *inode = NULL; 801 struct inode *inode = NULL;
803 802
804repeat: 803repeat:
805 hlist_for_each_entry(inode, node, head, i_hash) { 804 hlist_for_each_entry(inode, head, i_hash) {
806 spin_lock(&inode->i_lock); 805 spin_lock(&inode->i_lock);
807 if (inode->i_sb != sb) { 806 if (inode->i_sb != sb) {
808 spin_unlock(&inode->i_lock); 807 spin_unlock(&inode->i_lock);
@@ -830,11 +829,10 @@ repeat:
830static struct inode *find_inode_fast(struct super_block *sb, 829static struct inode *find_inode_fast(struct super_block *sb,
831 struct hlist_head *head, unsigned long ino) 830 struct hlist_head *head, unsigned long ino)
832{ 831{
833 struct hlist_node *node;
834 struct inode *inode = NULL; 832 struct inode *inode = NULL;
835 833
836repeat: 834repeat:
837 hlist_for_each_entry(inode, node, head, i_hash) { 835 hlist_for_each_entry(inode, head, i_hash) {
838 spin_lock(&inode->i_lock); 836 spin_lock(&inode->i_lock);
839 if (inode->i_ino != ino) { 837 if (inode->i_ino != ino) {
840 spin_unlock(&inode->i_lock); 838 spin_unlock(&inode->i_lock);
@@ -1132,11 +1130,10 @@ EXPORT_SYMBOL(iget_locked);
1132static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1130static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1133{ 1131{
1134 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1132 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1135 struct hlist_node *node;
1136 struct inode *inode; 1133 struct inode *inode;
1137 1134
1138 spin_lock(&inode_hash_lock); 1135 spin_lock(&inode_hash_lock);
1139 hlist_for_each_entry(inode, node, b, i_hash) { 1136 hlist_for_each_entry(inode, b, i_hash) {
1140 if (inode->i_ino == ino && inode->i_sb == sb) { 1137 if (inode->i_ino == ino && inode->i_sb == sb) {
1141 spin_unlock(&inode_hash_lock); 1138 spin_unlock(&inode_hash_lock);
1142 return 0; 1139 return 0;
@@ -1291,10 +1288,9 @@ int insert_inode_locked(struct inode *inode)
1291 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1288 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1292 1289
1293 while (1) { 1290 while (1) {
1294 struct hlist_node *node;
1295 struct inode *old = NULL; 1291 struct inode *old = NULL;
1296 spin_lock(&inode_hash_lock); 1292 spin_lock(&inode_hash_lock);
1297 hlist_for_each_entry(old, node, head, i_hash) { 1293 hlist_for_each_entry(old, head, i_hash) {
1298 if (old->i_ino != ino) 1294 if (old->i_ino != ino)
1299 continue; 1295 continue;
1300 if (old->i_sb != sb) 1296 if (old->i_sb != sb)
@@ -1306,7 +1302,7 @@ int insert_inode_locked(struct inode *inode)
1306 } 1302 }
1307 break; 1303 break;
1308 } 1304 }
1309 if (likely(!node)) { 1305 if (likely(!old)) {
1310 spin_lock(&inode->i_lock); 1306 spin_lock(&inode->i_lock);
1311 inode->i_state |= I_NEW; 1307 inode->i_state |= I_NEW;
1312 hlist_add_head(&inode->i_hash, head); 1308 hlist_add_head(&inode->i_hash, head);
@@ -1334,11 +1330,10 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1334 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1330 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1335 1331
1336 while (1) { 1332 while (1) {
1337 struct hlist_node *node;
1338 struct inode *old = NULL; 1333 struct inode *old = NULL;
1339 1334
1340 spin_lock(&inode_hash_lock); 1335 spin_lock(&inode_hash_lock);
1341 hlist_for_each_entry(old, node, head, i_hash) { 1336 hlist_for_each_entry(old, head, i_hash) {
1342 if (old->i_sb != sb) 1337 if (old->i_sb != sb)
1343 continue; 1338 continue;
1344 if (!test(old, data)) 1339 if (!test(old, data))
@@ -1350,7 +1345,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1350 } 1345 }
1351 break; 1346 break;
1352 } 1347 }
1353 if (likely(!node)) { 1348 if (likely(!old)) {
1354 spin_lock(&inode->i_lock); 1349 spin_lock(&inode->i_lock);
1355 inode->i_state |= I_NEW; 1350 inode->i_state |= I_NEW;
1356 hlist_add_head(&inode->i_hash, head); 1351 hlist_add_head(&inode->i_hash, head);
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 0e17090c310f..abdd75d44dd4 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -32,15 +32,15 @@
32static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH]; 32static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH];
33static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; 33static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH];
34 34
35#define for_each_host(host, pos, chain, table) \ 35#define for_each_host(host, chain, table) \
36 for ((chain) = (table); \ 36 for ((chain) = (table); \
37 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ 37 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
38 hlist_for_each_entry((host), (pos), (chain), h_hash) 38 hlist_for_each_entry((host), (chain), h_hash)
39 39
40#define for_each_host_safe(host, pos, next, chain, table) \ 40#define for_each_host_safe(host, next, chain, table) \
41 for ((chain) = (table); \ 41 for ((chain) = (table); \
42 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ 42 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
43 hlist_for_each_entry_safe((host), (pos), (next), \ 43 hlist_for_each_entry_safe((host), (next), \
44 (chain), h_hash) 44 (chain), h_hash)
45 45
46static unsigned long nrhosts; 46static unsigned long nrhosts;
@@ -225,7 +225,6 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
225 .net = net, 225 .net = net,
226 }; 226 };
227 struct hlist_head *chain; 227 struct hlist_head *chain;
228 struct hlist_node *pos;
229 struct nlm_host *host; 228 struct nlm_host *host;
230 struct nsm_handle *nsm = NULL; 229 struct nsm_handle *nsm = NULL;
231 struct lockd_net *ln = net_generic(net, lockd_net_id); 230 struct lockd_net *ln = net_generic(net, lockd_net_id);
@@ -237,7 +236,7 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
237 mutex_lock(&nlm_host_mutex); 236 mutex_lock(&nlm_host_mutex);
238 237
239 chain = &nlm_client_hosts[nlm_hash_address(sap)]; 238 chain = &nlm_client_hosts[nlm_hash_address(sap)];
240 hlist_for_each_entry(host, pos, chain, h_hash) { 239 hlist_for_each_entry(host, chain, h_hash) {
241 if (host->net != net) 240 if (host->net != net)
242 continue; 241 continue;
243 if (!rpc_cmp_addr(nlm_addr(host), sap)) 242 if (!rpc_cmp_addr(nlm_addr(host), sap))
@@ -322,7 +321,6 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
322 const size_t hostname_len) 321 const size_t hostname_len)
323{ 322{
324 struct hlist_head *chain; 323 struct hlist_head *chain;
325 struct hlist_node *pos;
326 struct nlm_host *host = NULL; 324 struct nlm_host *host = NULL;
327 struct nsm_handle *nsm = NULL; 325 struct nsm_handle *nsm = NULL;
328 struct sockaddr *src_sap = svc_daddr(rqstp); 326 struct sockaddr *src_sap = svc_daddr(rqstp);
@@ -350,7 +348,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
350 nlm_gc_hosts(net); 348 nlm_gc_hosts(net);
351 349
352 chain = &nlm_server_hosts[nlm_hash_address(ni.sap)]; 350 chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
353 hlist_for_each_entry(host, pos, chain, h_hash) { 351 hlist_for_each_entry(host, chain, h_hash) {
354 if (host->net != net) 352 if (host->net != net)
355 continue; 353 continue;
356 if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) 354 if (!rpc_cmp_addr(nlm_addr(host), ni.sap))
@@ -515,10 +513,9 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
515{ 513{
516 struct nlm_host *host; 514 struct nlm_host *host;
517 struct hlist_head *chain; 515 struct hlist_head *chain;
518 struct hlist_node *pos;
519 516
520 mutex_lock(&nlm_host_mutex); 517 mutex_lock(&nlm_host_mutex);
521 for_each_host(host, pos, chain, cache) { 518 for_each_host(host, chain, cache) {
522 if (host->h_nsmhandle == nsm 519 if (host->h_nsmhandle == nsm
523 && host->h_nsmstate != info->state) { 520 && host->h_nsmstate != info->state) {
524 host->h_nsmstate = info->state; 521 host->h_nsmstate = info->state;
@@ -570,7 +567,6 @@ void nlm_host_rebooted(const struct nlm_reboot *info)
570static void nlm_complain_hosts(struct net *net) 567static void nlm_complain_hosts(struct net *net)
571{ 568{
572 struct hlist_head *chain; 569 struct hlist_head *chain;
573 struct hlist_node *pos;
574 struct nlm_host *host; 570 struct nlm_host *host;
575 571
576 if (net) { 572 if (net) {
@@ -587,7 +583,7 @@ static void nlm_complain_hosts(struct net *net)
587 dprintk("lockd: %lu hosts left:\n", nrhosts); 583 dprintk("lockd: %lu hosts left:\n", nrhosts);
588 } 584 }
589 585
590 for_each_host(host, pos, chain, nlm_server_hosts) { 586 for_each_host(host, chain, nlm_server_hosts) {
591 if (net && host->net != net) 587 if (net && host->net != net)
592 continue; 588 continue;
593 dprintk(" %s (cnt %d use %d exp %ld net %p)\n", 589 dprintk(" %s (cnt %d use %d exp %ld net %p)\n",
@@ -600,14 +596,13 @@ void
600nlm_shutdown_hosts_net(struct net *net) 596nlm_shutdown_hosts_net(struct net *net)
601{ 597{
602 struct hlist_head *chain; 598 struct hlist_head *chain;
603 struct hlist_node *pos;
604 struct nlm_host *host; 599 struct nlm_host *host;
605 600
606 mutex_lock(&nlm_host_mutex); 601 mutex_lock(&nlm_host_mutex);
607 602
608 /* First, make all hosts eligible for gc */ 603 /* First, make all hosts eligible for gc */
609 dprintk("lockd: nuking all hosts in net %p...\n", net); 604 dprintk("lockd: nuking all hosts in net %p...\n", net);
610 for_each_host(host, pos, chain, nlm_server_hosts) { 605 for_each_host(host, chain, nlm_server_hosts) {
611 if (net && host->net != net) 606 if (net && host->net != net)
612 continue; 607 continue;
613 host->h_expires = jiffies - 1; 608 host->h_expires = jiffies - 1;
@@ -644,11 +639,11 @@ static void
644nlm_gc_hosts(struct net *net) 639nlm_gc_hosts(struct net *net)
645{ 640{
646 struct hlist_head *chain; 641 struct hlist_head *chain;
647 struct hlist_node *pos, *next; 642 struct hlist_node *next;
648 struct nlm_host *host; 643 struct nlm_host *host;
649 644
650 dprintk("lockd: host garbage collection for net %p\n", net); 645 dprintk("lockd: host garbage collection for net %p\n", net);
651 for_each_host(host, pos, chain, nlm_server_hosts) { 646 for_each_host(host, chain, nlm_server_hosts) {
652 if (net && host->net != net) 647 if (net && host->net != net)
653 continue; 648 continue;
654 host->h_inuse = 0; 649 host->h_inuse = 0;
@@ -657,7 +652,7 @@ nlm_gc_hosts(struct net *net)
657 /* Mark all hosts that hold locks, blocks or shares */ 652 /* Mark all hosts that hold locks, blocks or shares */
658 nlmsvc_mark_resources(net); 653 nlmsvc_mark_resources(net);
659 654
660 for_each_host_safe(host, pos, next, chain, nlm_server_hosts) { 655 for_each_host_safe(host, next, chain, nlm_server_hosts) {
661 if (net && host->net != net) 656 if (net && host->net != net)
662 continue; 657 continue;
663 if (atomic_read(&host->h_count) || host->h_inuse 658 if (atomic_read(&host->h_count) || host->h_inuse
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index b3a24b07d981..d17bb62b06d6 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -84,7 +84,6 @@ __be32
84nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result, 84nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
85 struct nfs_fh *f) 85 struct nfs_fh *f)
86{ 86{
87 struct hlist_node *pos;
88 struct nlm_file *file; 87 struct nlm_file *file;
89 unsigned int hash; 88 unsigned int hash;
90 __be32 nfserr; 89 __be32 nfserr;
@@ -96,7 +95,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
96 /* Lock file table */ 95 /* Lock file table */
97 mutex_lock(&nlm_file_mutex); 96 mutex_lock(&nlm_file_mutex);
98 97
99 hlist_for_each_entry(file, pos, &nlm_files[hash], f_list) 98 hlist_for_each_entry(file, &nlm_files[hash], f_list)
100 if (!nfs_compare_fh(&file->f_handle, f)) 99 if (!nfs_compare_fh(&file->f_handle, f))
101 goto found; 100 goto found;
102 101
@@ -248,13 +247,13 @@ static int
248nlm_traverse_files(void *data, nlm_host_match_fn_t match, 247nlm_traverse_files(void *data, nlm_host_match_fn_t match,
249 int (*is_failover_file)(void *data, struct nlm_file *file)) 248 int (*is_failover_file)(void *data, struct nlm_file *file))
250{ 249{
251 struct hlist_node *pos, *next; 250 struct hlist_node *next;
252 struct nlm_file *file; 251 struct nlm_file *file;
253 int i, ret = 0; 252 int i, ret = 0;
254 253
255 mutex_lock(&nlm_file_mutex); 254 mutex_lock(&nlm_file_mutex);
256 for (i = 0; i < FILE_NRHASH; i++) { 255 for (i = 0; i < FILE_NRHASH; i++) {
257 hlist_for_each_entry_safe(file, pos, next, &nlm_files[i], f_list) { 256 hlist_for_each_entry_safe(file, next, &nlm_files[i], f_list) {
258 if (is_failover_file && !is_failover_file(data, file)) 257 if (is_failover_file && !is_failover_file(data, file))
259 continue; 258 continue;
260 file->f_count++; 259 file->f_count++;
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index d35b62e83ea6..6da209bd9408 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -77,9 +77,8 @@ _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
77 long hash) 77 long hash)
78{ 78{
79 struct nfs4_deviceid_node *d; 79 struct nfs4_deviceid_node *d;
80 struct hlist_node *n;
81 80
82 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) 81 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
83 if (d->ld == ld && d->nfs_client == clp && 82 if (d->ld == ld && d->nfs_client == clp &&
84 !memcmp(&d->deviceid, id, sizeof(*id))) { 83 !memcmp(&d->deviceid, id, sizeof(*id))) {
85 if (atomic_read(&d->ref)) 84 if (atomic_read(&d->ref))
@@ -248,12 +247,11 @@ static void
248_deviceid_purge_client(const struct nfs_client *clp, long hash) 247_deviceid_purge_client(const struct nfs_client *clp, long hash)
249{ 248{
250 struct nfs4_deviceid_node *d; 249 struct nfs4_deviceid_node *d;
251 struct hlist_node *n;
252 HLIST_HEAD(tmp); 250 HLIST_HEAD(tmp);
253 251
254 spin_lock(&nfs4_deviceid_lock); 252 spin_lock(&nfs4_deviceid_lock);
255 rcu_read_lock(); 253 rcu_read_lock();
256 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) 254 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
257 if (d->nfs_client == clp && atomic_read(&d->ref)) { 255 if (d->nfs_client == clp && atomic_read(&d->ref)) {
258 hlist_del_init_rcu(&d->node); 256 hlist_del_init_rcu(&d->node);
259 hlist_add_head(&d->tmpnode, &tmp); 257 hlist_add_head(&d->tmpnode, &tmp);
@@ -291,12 +289,11 @@ void
291nfs4_deviceid_mark_client_invalid(struct nfs_client *clp) 289nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
292{ 290{
293 struct nfs4_deviceid_node *d; 291 struct nfs4_deviceid_node *d;
294 struct hlist_node *n;
295 int i; 292 int i;
296 293
297 rcu_read_lock(); 294 rcu_read_lock();
298 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){ 295 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
299 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node) 296 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
300 if (d->nfs_client == clp) 297 if (d->nfs_client == clp)
301 set_bit(NFS_DEVICEID_INVALID, &d->flags); 298 set_bit(NFS_DEVICEID_INVALID, &d->flags);
302 } 299 }
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 2cbac34a55da..da3dbd0f8979 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -120,7 +120,6 @@ hash_refile(struct svc_cacherep *rp)
120int 120int
121nfsd_cache_lookup(struct svc_rqst *rqstp) 121nfsd_cache_lookup(struct svc_rqst *rqstp)
122{ 122{
123 struct hlist_node *hn;
124 struct hlist_head *rh; 123 struct hlist_head *rh;
125 struct svc_cacherep *rp; 124 struct svc_cacherep *rp;
126 __be32 xid = rqstp->rq_xid; 125 __be32 xid = rqstp->rq_xid;
@@ -141,7 +140,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
141 rtn = RC_DOIT; 140 rtn = RC_DOIT;
142 141
143 rh = &cache_hash[request_hash(xid)]; 142 rh = &cache_hash[request_hash(xid)];
144 hlist_for_each_entry(rp, hn, rh, c_hash) { 143 hlist_for_each_entry(rp, rh, c_hash) {
145 if (rp->c_state != RC_UNUSED && 144 if (rp->c_state != RC_UNUSED &&
146 xid == rp->c_xid && proc == rp->c_proc && 145 xid == rp->c_xid && proc == rp->c_proc &&
147 proto == rp->c_prot && vers == rp->c_vers && 146 proto == rp->c_prot && vers == rp->c_vers &&
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 6baadb5a8430..4bb21d67d9b1 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -52,7 +52,6 @@ void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
52void __fsnotify_update_child_dentry_flags(struct inode *inode) 52void __fsnotify_update_child_dentry_flags(struct inode *inode)
53{ 53{
54 struct dentry *alias; 54 struct dentry *alias;
55 struct hlist_node *p;
56 int watched; 55 int watched;
57 56
58 if (!S_ISDIR(inode->i_mode)) 57 if (!S_ISDIR(inode->i_mode))
@@ -64,7 +63,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
64 spin_lock(&inode->i_lock); 63 spin_lock(&inode->i_lock);
65 /* run all of the dentries associated with this inode. Since this is a 64 /* run all of the dentries associated with this inode. Since this is a
66 * directory, there damn well better only be one item on this list */ 65 * directory, there damn well better only be one item on this list */
67 hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { 66 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
68 struct dentry *child; 67 struct dentry *child;
69 68
70 /* run all of the children of the original inode and fix their 69 /* run all of the children of the original inode and fix their
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index f31e90fc050d..74825be65b7b 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -36,12 +36,11 @@
36static void fsnotify_recalc_inode_mask_locked(struct inode *inode) 36static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
37{ 37{
38 struct fsnotify_mark *mark; 38 struct fsnotify_mark *mark;
39 struct hlist_node *pos;
40 __u32 new_mask = 0; 39 __u32 new_mask = 0;
41 40
42 assert_spin_locked(&inode->i_lock); 41 assert_spin_locked(&inode->i_lock);
43 42
44 hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) 43 hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list)
45 new_mask |= mark->mask; 44 new_mask |= mark->mask;
46 inode->i_fsnotify_mask = new_mask; 45 inode->i_fsnotify_mask = new_mask;
47} 46}
@@ -87,11 +86,11 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
87void fsnotify_clear_marks_by_inode(struct inode *inode) 86void fsnotify_clear_marks_by_inode(struct inode *inode)
88{ 87{
89 struct fsnotify_mark *mark, *lmark; 88 struct fsnotify_mark *mark, *lmark;
90 struct hlist_node *pos, *n; 89 struct hlist_node *n;
91 LIST_HEAD(free_list); 90 LIST_HEAD(free_list);
92 91
93 spin_lock(&inode->i_lock); 92 spin_lock(&inode->i_lock);
94 hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) { 93 hlist_for_each_entry_safe(mark, n, &inode->i_fsnotify_marks, i.i_list) {
95 list_add(&mark->i.free_i_list, &free_list); 94 list_add(&mark->i.free_i_list, &free_list);
96 hlist_del_init_rcu(&mark->i.i_list); 95 hlist_del_init_rcu(&mark->i.i_list);
97 fsnotify_get_mark(mark); 96 fsnotify_get_mark(mark);
@@ -129,11 +128,10 @@ static struct fsnotify_mark *fsnotify_find_inode_mark_locked(
129 struct inode *inode) 128 struct inode *inode)
130{ 129{
131 struct fsnotify_mark *mark; 130 struct fsnotify_mark *mark;
132 struct hlist_node *pos;
133 131
134 assert_spin_locked(&inode->i_lock); 132 assert_spin_locked(&inode->i_lock);
135 133
136 hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) { 134 hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list) {
137 if (mark->group == group) { 135 if (mark->group == group) {
138 fsnotify_get_mark(mark); 136 fsnotify_get_mark(mark);
139 return mark; 137 return mark;
@@ -194,8 +192,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
194 struct fsnotify_group *group, struct inode *inode, 192 struct fsnotify_group *group, struct inode *inode,
195 int allow_dups) 193 int allow_dups)
196{ 194{
197 struct fsnotify_mark *lmark; 195 struct fsnotify_mark *lmark, *last = NULL;
198 struct hlist_node *node, *last = NULL;
199 int ret = 0; 196 int ret = 0;
200 197
201 mark->flags |= FSNOTIFY_MARK_FLAG_INODE; 198 mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
@@ -214,8 +211,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
214 } 211 }
215 212
216 /* should mark be in the middle of the current list? */ 213 /* should mark be in the middle of the current list? */
217 hlist_for_each_entry(lmark, node, &inode->i_fsnotify_marks, i.i_list) { 214 hlist_for_each_entry(lmark, &inode->i_fsnotify_marks, i.i_list) {
218 last = node; 215 last = lmark;
219 216
220 if ((lmark->group == group) && !allow_dups) { 217 if ((lmark->group == group) && !allow_dups) {
221 ret = -EEXIST; 218 ret = -EEXIST;
@@ -235,7 +232,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
235 232
236 BUG_ON(last == NULL); 233 BUG_ON(last == NULL);
237 /* mark should be the last entry. last is the current last entry */ 234 /* mark should be the last entry. last is the current last entry */
238 hlist_add_after_rcu(last, &mark->i.i_list); 235 hlist_add_after_rcu(&last->i.i_list, &mark->i.i_list);
239out: 236out:
240 fsnotify_recalc_inode_mask_locked(inode); 237 fsnotify_recalc_inode_mask_locked(inode);
241 spin_unlock(&inode->i_lock); 238 spin_unlock(&inode->i_lock);
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index 4df58b8ea64a..68ca5a8704b5 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -33,12 +33,12 @@
33void fsnotify_clear_marks_by_mount(struct vfsmount *mnt) 33void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
34{ 34{
35 struct fsnotify_mark *mark, *lmark; 35 struct fsnotify_mark *mark, *lmark;
36 struct hlist_node *pos, *n; 36 struct hlist_node *n;
37 struct mount *m = real_mount(mnt); 37 struct mount *m = real_mount(mnt);
38 LIST_HEAD(free_list); 38 LIST_HEAD(free_list);
39 39
40 spin_lock(&mnt->mnt_root->d_lock); 40 spin_lock(&mnt->mnt_root->d_lock);
41 hlist_for_each_entry_safe(mark, pos, n, &m->mnt_fsnotify_marks, m.m_list) { 41 hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, m.m_list) {
42 list_add(&mark->m.free_m_list, &free_list); 42 list_add(&mark->m.free_m_list, &free_list);
43 hlist_del_init_rcu(&mark->m.m_list); 43 hlist_del_init_rcu(&mark->m.m_list);
44 fsnotify_get_mark(mark); 44 fsnotify_get_mark(mark);
@@ -71,12 +71,11 @@ static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
71{ 71{
72 struct mount *m = real_mount(mnt); 72 struct mount *m = real_mount(mnt);
73 struct fsnotify_mark *mark; 73 struct fsnotify_mark *mark;
74 struct hlist_node *pos;
75 __u32 new_mask = 0; 74 __u32 new_mask = 0;
76 75
77 assert_spin_locked(&mnt->mnt_root->d_lock); 76 assert_spin_locked(&mnt->mnt_root->d_lock);
78 77
79 hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) 78 hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list)
80 new_mask |= mark->mask; 79 new_mask |= mark->mask;
81 m->mnt_fsnotify_mask = new_mask; 80 m->mnt_fsnotify_mask = new_mask;
82} 81}
@@ -114,11 +113,10 @@ static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_
114{ 113{
115 struct mount *m = real_mount(mnt); 114 struct mount *m = real_mount(mnt);
116 struct fsnotify_mark *mark; 115 struct fsnotify_mark *mark;
117 struct hlist_node *pos;
118 116
119 assert_spin_locked(&mnt->mnt_root->d_lock); 117 assert_spin_locked(&mnt->mnt_root->d_lock);
120 118
121 hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) { 119 hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list) {
122 if (mark->group == group) { 120 if (mark->group == group) {
123 fsnotify_get_mark(mark); 121 fsnotify_get_mark(mark);
124 return mark; 122 return mark;
@@ -153,8 +151,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
153 int allow_dups) 151 int allow_dups)
154{ 152{
155 struct mount *m = real_mount(mnt); 153 struct mount *m = real_mount(mnt);
156 struct fsnotify_mark *lmark; 154 struct fsnotify_mark *lmark, *last = NULL;
157 struct hlist_node *node, *last = NULL;
158 int ret = 0; 155 int ret = 0;
159 156
160 mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT; 157 mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
@@ -173,8 +170,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
173 } 170 }
174 171
175 /* should mark be in the middle of the current list? */ 172 /* should mark be in the middle of the current list? */
176 hlist_for_each_entry(lmark, node, &m->mnt_fsnotify_marks, m.m_list) { 173 hlist_for_each_entry(lmark, &m->mnt_fsnotify_marks, m.m_list) {
177 last = node; 174 last = lmark;
178 175
179 if ((lmark->group == group) && !allow_dups) { 176 if ((lmark->group == group) && !allow_dups) {
180 ret = -EEXIST; 177 ret = -EEXIST;
@@ -194,7 +191,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
194 191
195 BUG_ON(last == NULL); 192 BUG_ON(last == NULL);
196 /* mark should be the last entry. last is the current last entry */ 193 /* mark should be the last entry. last is the current last entry */
197 hlist_add_after_rcu(last, &mark->m.m_list); 194 hlist_add_after_rcu(&last->m.m_list, &mark->m.m_list);
198out: 195out:
199 fsnotify_recalc_vfsmount_mask_locked(mnt); 196 fsnotify_recalc_vfsmount_mask_locked(mnt);
200 spin_unlock(&mnt->mnt_root->d_lock); 197 spin_unlock(&mnt->mnt_root->d_lock);
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 8db4b58b2e4b..ef999729e274 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -169,11 +169,10 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
169 u64 parent_blkno, 169 u64 parent_blkno,
170 int skip_unhashed) 170 int skip_unhashed)
171{ 171{
172 struct hlist_node *p;
173 struct dentry *dentry; 172 struct dentry *dentry;
174 173
175 spin_lock(&inode->i_lock); 174 spin_lock(&inode->i_lock);
176 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 175 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
177 spin_lock(&dentry->d_lock); 176 spin_lock(&dentry->d_lock);
178 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { 177 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
179 trace_ocfs2_find_local_alias(dentry->d_name.len, 178 trace_ocfs2_find_local_alias(dentry->d_name.len,
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 01ebfd0bdad7..eeac97bb3bfa 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2083,7 +2083,6 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2083 u8 dead_node, u8 new_master) 2083 u8 dead_node, u8 new_master)
2084{ 2084{
2085 int i; 2085 int i;
2086 struct hlist_node *hash_iter;
2087 struct hlist_head *bucket; 2086 struct hlist_head *bucket;
2088 struct dlm_lock_resource *res, *next; 2087 struct dlm_lock_resource *res, *next;
2089 2088
@@ -2114,7 +2113,7 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2114 * if necessary */ 2113 * if necessary */
2115 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2114 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2116 bucket = dlm_lockres_hash(dlm, i); 2115 bucket = dlm_lockres_hash(dlm, i);
2117 hlist_for_each_entry(res, hash_iter, bucket, hash_node) { 2116 hlist_for_each_entry(res, bucket, hash_node) {
2118 if (!(res->state & DLM_LOCK_RES_RECOVERING)) 2117 if (!(res->state & DLM_LOCK_RES_RECOVERING))
2119 continue; 2118 continue;
2120 2119
@@ -2273,7 +2272,6 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2273 2272
2274static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) 2273static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2275{ 2274{
2276 struct hlist_node *iter;
2277 struct dlm_lock_resource *res; 2275 struct dlm_lock_resource *res;
2278 int i; 2276 int i;
2279 struct hlist_head *bucket; 2277 struct hlist_head *bucket;
@@ -2299,7 +2297,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2299 */ 2297 */
2300 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2298 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2301 bucket = dlm_lockres_hash(dlm, i); 2299 bucket = dlm_lockres_hash(dlm, i);
2302 hlist_for_each_entry(res, iter, bucket, hash_node) { 2300 hlist_for_each_entry(res, bucket, hash_node) {
2303 /* always prune any $RECOVERY entries for dead nodes, 2301 /* always prune any $RECOVERY entries for dead nodes,
2304 * otherwise hangs can occur during later recovery */ 2302 * otherwise hangs can occur during later recovery */
2305 if (dlm_is_recovery_lock(res->lockname.name, 2303 if (dlm_is_recovery_lock(res->lockname.name,
diff --git a/fs/super.c b/fs/super.c
index df6c2f4c6b59..7465d4364208 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -447,14 +447,13 @@ struct super_block *sget(struct file_system_type *type,
447 void *data) 447 void *data)
448{ 448{
449 struct super_block *s = NULL; 449 struct super_block *s = NULL;
450 struct hlist_node *node;
451 struct super_block *old; 450 struct super_block *old;
452 int err; 451 int err;
453 452
454retry: 453retry:
455 spin_lock(&sb_lock); 454 spin_lock(&sb_lock);
456 if (test) { 455 if (test) {
457 hlist_for_each_entry(old, node, &type->fs_supers, s_instances) { 456 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
458 if (!test(old, data)) 457 if (!test(old, data))
459 continue; 458 continue;
460 if (!grab_super(old)) 459 if (!grab_super(old))
@@ -554,10 +553,9 @@ void iterate_supers_type(struct file_system_type *type,
554 void (*f)(struct super_block *, void *), void *arg) 553 void (*f)(struct super_block *, void *), void *arg)
555{ 554{
556 struct super_block *sb, *p = NULL; 555 struct super_block *sb, *p = NULL;
557 struct hlist_node *node;
558 556
559 spin_lock(&sb_lock); 557 spin_lock(&sb_lock);
560 hlist_for_each_entry(sb, node, &type->fs_supers, s_instances) { 558 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
561 sb->s_count++; 559 sb->s_count++;
562 spin_unlock(&sb_lock); 560 spin_unlock(&sb_lock);
563 561
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 2ce9a5db6ab5..15c68f9489ae 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -461,14 +461,13 @@ const struct file_operations bin_fops = {
461void unmap_bin_file(struct sysfs_dirent *attr_sd) 461void unmap_bin_file(struct sysfs_dirent *attr_sd)
462{ 462{
463 struct bin_buffer *bb; 463 struct bin_buffer *bb;
464 struct hlist_node *tmp;
465 464
466 if (sysfs_type(attr_sd) != SYSFS_KOBJ_BIN_ATTR) 465 if (sysfs_type(attr_sd) != SYSFS_KOBJ_BIN_ATTR)
467 return; 466 return;
468 467
469 mutex_lock(&sysfs_bin_lock); 468 mutex_lock(&sysfs_bin_lock);
470 469
471 hlist_for_each_entry(bb, tmp, &attr_sd->s_bin_attr.buffers, list) { 470 hlist_for_each_entry(bb, &attr_sd->s_bin_attr.buffers, list) {
472 struct inode *inode = file_inode(bb->file); 471 struct inode *inode = file_inode(bb->file);
473 472
474 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 473 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 96fcbb85ff83..d1dba7ce75ae 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1442,9 +1442,8 @@ xlog_recover_find_tid(
1442 xlog_tid_t tid) 1442 xlog_tid_t tid)
1443{ 1443{
1444 xlog_recover_t *trans; 1444 xlog_recover_t *trans;
1445 struct hlist_node *n;
1446 1445
1447 hlist_for_each_entry(trans, n, head, r_list) { 1446 hlist_for_each_entry(trans, head, r_list) {
1448 if (trans->r_log_tid == tid) 1447 if (trans->r_log_tid == tid)
1449 return trans; 1448 return trans;
1450 } 1449 }
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 227c62424f3c..a9df51f5d54c 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -115,51 +115,50 @@ static inline void hash_del_rcu(struct hlist_node *node)
115 * hash_for_each - iterate over a hashtable 115 * hash_for_each - iterate over a hashtable
116 * @name: hashtable to iterate 116 * @name: hashtable to iterate
117 * @bkt: integer to use as bucket loop cursor 117 * @bkt: integer to use as bucket loop cursor
118 * @node: the &struct list_head to use as a loop cursor for each entry
119 * @obj: the type * to use as a loop cursor for each entry 118 * @obj: the type * to use as a loop cursor for each entry
120 * @member: the name of the hlist_node within the struct 119 * @member: the name of the hlist_node within the struct
121 */ 120 */
122#define hash_for_each(name, bkt, node, obj, member) \ 121#define hash_for_each(name, bkt, obj, member) \
123 for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ 122 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
124 hlist_for_each_entry(obj, node, &name[bkt], member) 123 (bkt)++)\
124 hlist_for_each_entry(obj, &name[bkt], member)
125 125
126/** 126/**
127 * hash_for_each_rcu - iterate over a rcu enabled hashtable 127 * hash_for_each_rcu - iterate over a rcu enabled hashtable
128 * @name: hashtable to iterate 128 * @name: hashtable to iterate
129 * @bkt: integer to use as bucket loop cursor 129 * @bkt: integer to use as bucket loop cursor
130 * @node: the &struct list_head to use as a loop cursor for each entry
131 * @obj: the type * to use as a loop cursor for each entry 130 * @obj: the type * to use as a loop cursor for each entry
132 * @member: the name of the hlist_node within the struct 131 * @member: the name of the hlist_node within the struct
133 */ 132 */
134#define hash_for_each_rcu(name, bkt, node, obj, member) \ 133#define hash_for_each_rcu(name, bkt, obj, member) \
135 for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ 134 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
136 hlist_for_each_entry_rcu(obj, node, &name[bkt], member) 135 (bkt)++)\
136 hlist_for_each_entry_rcu(obj, &name[bkt], member)
137 137
138/** 138/**
139 * hash_for_each_safe - iterate over a hashtable safe against removal of 139 * hash_for_each_safe - iterate over a hashtable safe against removal of
140 * hash entry 140 * hash entry
141 * @name: hashtable to iterate 141 * @name: hashtable to iterate
142 * @bkt: integer to use as bucket loop cursor 142 * @bkt: integer to use as bucket loop cursor
143 * @node: the &struct list_head to use as a loop cursor for each entry
144 * @tmp: a &struct used for temporary storage 143 * @tmp: a &struct used for temporary storage
145 * @obj: the type * to use as a loop cursor for each entry 144 * @obj: the type * to use as a loop cursor for each entry
146 * @member: the name of the hlist_node within the struct 145 * @member: the name of the hlist_node within the struct
147 */ 146 */
148#define hash_for_each_safe(name, bkt, node, tmp, obj, member) \ 147#define hash_for_each_safe(name, bkt, tmp, obj, member) \
149 for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ 148 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
150 hlist_for_each_entry_safe(obj, node, tmp, &name[bkt], member) 149 (bkt)++)\
150 hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
151 151
152/** 152/**
153 * hash_for_each_possible - iterate over all possible objects hashing to the 153 * hash_for_each_possible - iterate over all possible objects hashing to the
154 * same bucket 154 * same bucket
155 * @name: hashtable to iterate 155 * @name: hashtable to iterate
156 * @obj: the type * to use as a loop cursor for each entry 156 * @obj: the type * to use as a loop cursor for each entry
157 * @node: the &struct list_head to use as a loop cursor for each entry
158 * @member: the name of the hlist_node within the struct 157 * @member: the name of the hlist_node within the struct
159 * @key: the key of the objects to iterate over 158 * @key: the key of the objects to iterate over
160 */ 159 */
161#define hash_for_each_possible(name, obj, node, member, key) \ 160#define hash_for_each_possible(name, obj, member, key) \
162 hlist_for_each_entry(obj, node, &name[hash_min(key, HASH_BITS(name))], member) 161 hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
163 162
164/** 163/**
165 * hash_for_each_possible_rcu - iterate over all possible objects hashing to the 164 * hash_for_each_possible_rcu - iterate over all possible objects hashing to the
@@ -167,25 +166,24 @@ static inline void hash_del_rcu(struct hlist_node *node)
167 * in a rcu enabled hashtable 166 * in a rcu enabled hashtable
168 * @name: hashtable to iterate 167 * @name: hashtable to iterate
169 * @obj: the type * to use as a loop cursor for each entry 168 * @obj: the type * to use as a loop cursor for each entry
170 * @node: the &struct list_head to use as a loop cursor for each entry
171 * @member: the name of the hlist_node within the struct 169 * @member: the name of the hlist_node within the struct
172 * @key: the key of the objects to iterate over 170 * @key: the key of the objects to iterate over
173 */ 171 */
174#define hash_for_each_possible_rcu(name, obj, node, member, key) \ 172#define hash_for_each_possible_rcu(name, obj, member, key) \
175 hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member) 173 hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
174 member)
176 175
177/** 176/**
178 * hash_for_each_possible_safe - iterate over all possible objects hashing to the 177 * hash_for_each_possible_safe - iterate over all possible objects hashing to the
179 * same bucket safe against removals 178 * same bucket safe against removals
180 * @name: hashtable to iterate 179 * @name: hashtable to iterate
181 * @obj: the type * to use as a loop cursor for each entry 180 * @obj: the type * to use as a loop cursor for each entry
182 * @node: the &struct list_head to use as a loop cursor for each entry
183 * @tmp: a &struct used for temporary storage 181 * @tmp: a &struct used for temporary storage
184 * @member: the name of the hlist_node within the struct 182 * @member: the name of the hlist_node within the struct
185 * @key: the key of the objects to iterate over 183 * @key: the key of the objects to iterate over
186 */ 184 */
187#define hash_for_each_possible_safe(name, obj, node, tmp, member, key) \ 185#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
188 hlist_for_each_entry_safe(obj, node, tmp, \ 186 hlist_for_each_entry_safe(obj, tmp,\
189 &name[hash_min(key, HASH_BITS(name))], member) 187 &name[hash_min(key, HASH_BITS(name))], member)
190 188
191 189
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 4648d8021244..cfd21e3d5506 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -216,11 +216,10 @@ static inline struct hlist_head *team_port_index_hash(struct team *team,
216static inline struct team_port *team_get_port_by_index(struct team *team, 216static inline struct team_port *team_get_port_by_index(struct team *team,
217 int port_index) 217 int port_index)
218{ 218{
219 struct hlist_node *p;
220 struct team_port *port; 219 struct team_port *port;
221 struct hlist_head *head = team_port_index_hash(team, port_index); 220 struct hlist_head *head = team_port_index_hash(team, port_index);
222 221
223 hlist_for_each_entry(port, p, head, hlist) 222 hlist_for_each_entry(port, head, hlist)
224 if (port->index == port_index) 223 if (port->index == port_index)
225 return port; 224 return port;
226 return NULL; 225 return NULL;
@@ -228,11 +227,10 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
228static inline struct team_port *team_get_port_by_index_rcu(struct team *team, 227static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
229 int port_index) 228 int port_index)
230{ 229{
231 struct hlist_node *p;
232 struct team_port *port; 230 struct team_port *port;
233 struct hlist_head *head = team_port_index_hash(team, port_index); 231 struct hlist_head *head = team_port_index_hash(team, port_index);
234 232
235 hlist_for_each_entry_rcu(port, p, head, hlist) 233 hlist_for_each_entry_rcu(port, head, hlist)
236 if (port->index == port_index) 234 if (port->index == port_index)
237 return port; 235 return port;
238 return NULL; 236 return NULL;
diff --git a/include/linux/list.h b/include/linux/list.h
index cc6d2aa6b415..d991cc147c98 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -666,54 +666,49 @@ static inline void hlist_move_list(struct hlist_head *old,
666 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ 666 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
667 pos = n) 667 pos = n)
668 668
669#define hlist_entry_safe(ptr, type, member) \
670 (ptr) ? hlist_entry(ptr, type, member) : NULL
671
669/** 672/**
670 * hlist_for_each_entry - iterate over list of given type 673 * hlist_for_each_entry - iterate over list of given type
671 * @tpos: the type * to use as a loop cursor. 674 * @pos: the type * to use as a loop cursor.
672 * @pos: the &struct hlist_node to use as a loop cursor.
673 * @head: the head for your list. 675 * @head: the head for your list.
674 * @member: the name of the hlist_node within the struct. 676 * @member: the name of the hlist_node within the struct.
675 */ 677 */
676#define hlist_for_each_entry(tpos, pos, head, member) \ 678#define hlist_for_each_entry(pos, head, member) \
677 for (pos = (head)->first; \ 679 for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
678 pos && \ 680 pos; \
679 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 681 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
680 pos = pos->next)
681 682
682/** 683/**
683 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point 684 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
684 * @tpos: the type * to use as a loop cursor. 685 * @pos: the type * to use as a loop cursor.
685 * @pos: the &struct hlist_node to use as a loop cursor.
686 * @member: the name of the hlist_node within the struct. 686 * @member: the name of the hlist_node within the struct.
687 */ 687 */
688#define hlist_for_each_entry_continue(tpos, pos, member) \ 688#define hlist_for_each_entry_continue(pos, member) \
689 for (pos = (pos)->next; \ 689 for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
690 pos && \ 690 pos; \
691 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 691 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
692 pos = pos->next)
693 692
694/** 693/**
695 * hlist_for_each_entry_from - iterate over a hlist continuing from current point 694 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
696 * @tpos: the type * to use as a loop cursor. 695 * @pos: the type * to use as a loop cursor.
697 * @pos: the &struct hlist_node to use as a loop cursor.
698 * @member: the name of the hlist_node within the struct. 696 * @member: the name of the hlist_node within the struct.
699 */ 697 */
700#define hlist_for_each_entry_from(tpos, pos, member) \ 698#define hlist_for_each_entry_from(pos, member) \
701 for (; pos && \ 699 for (; pos; \
702 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 700 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
703 pos = pos->next)
704 701
705/** 702/**
706 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry 703 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
707 * @tpos: the type * to use as a loop cursor. 704 * @pos: the type * to use as a loop cursor.
708 * @pos: the &struct hlist_node to use as a loop cursor.
709 * @n: another &struct hlist_node to use as temporary storage 705 * @n: another &struct hlist_node to use as temporary storage
710 * @head: the head for your list. 706 * @head: the head for your list.
711 * @member: the name of the hlist_node within the struct. 707 * @member: the name of the hlist_node within the struct.
712 */ 708 */
713#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ 709#define hlist_for_each_entry_safe(pos, n, head, member) \
714 for (pos = (head)->first; \ 710 for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
715 pos && ({ n = pos->next; 1; }) && \ 711 pos && ({ n = pos->member.next; 1; }); \
716 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 712 pos = hlist_entry_safe(n, typeof(*pos), member))
717 pos = n)
718 713
719#endif 714#endif
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 2381c973d897..a089a3c447fc 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -176,9 +176,8 @@ pid_t pid_vnr(struct pid *pid);
176 176
177#define do_each_pid_task(pid, type, task) \ 177#define do_each_pid_task(pid, type, task) \
178 do { \ 178 do { \
179 struct hlist_node *pos___; \
180 if ((pid) != NULL) \ 179 if ((pid) != NULL) \
181 hlist_for_each_entry_rcu((task), pos___, \ 180 hlist_for_each_entry_rcu((task), \
182 &(pid)->tasks[type], pids[type].node) { 181 &(pid)->tasks[type], pids[type].node) {
183 182
184 /* 183 /*
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index c92dd28eaa6c..8089e35d47ac 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -445,8 +445,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
445 445
446/** 446/**
447 * hlist_for_each_entry_rcu - iterate over rcu list of given type 447 * hlist_for_each_entry_rcu - iterate over rcu list of given type
448 * @tpos: the type * to use as a loop cursor. 448 * @pos: the type * to use as a loop cursor.
449 * @pos: the &struct hlist_node to use as a loop cursor.
450 * @head: the head for your list. 449 * @head: the head for your list.
451 * @member: the name of the hlist_node within the struct. 450 * @member: the name of the hlist_node within the struct.
452 * 451 *
@@ -454,16 +453,16 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
454 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 453 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
455 * as long as the traversal is guarded by rcu_read_lock(). 454 * as long as the traversal is guarded by rcu_read_lock().
456 */ 455 */
457#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ 456#define hlist_for_each_entry_rcu(pos, head, member) \
458 for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ 457 for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
459 pos && \ 458 typeof(*(pos)), member); \
460 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 459 pos; \
461 pos = rcu_dereference_raw(hlist_next_rcu(pos))) 460 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
461 &(pos)->member)), typeof(*(pos)), member))
462 462
463/** 463/**
464 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type 464 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
465 * @tpos: the type * to use as a loop cursor. 465 * @pos: the type * to use as a loop cursor.
466 * @pos: the &struct hlist_node to use as a loop cursor.
467 * @head: the head for your list. 466 * @head: the head for your list.
468 * @member: the name of the hlist_node within the struct. 467 * @member: the name of the hlist_node within the struct.
469 * 468 *
@@ -471,35 +470,36 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
471 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 470 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
472 * as long as the traversal is guarded by rcu_read_lock(). 471 * as long as the traversal is guarded by rcu_read_lock().
473 */ 472 */
474#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ 473#define hlist_for_each_entry_rcu_bh(pos, head, member) \
475 for (pos = rcu_dereference_bh((head)->first); \ 474 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
476 pos && \ 475 typeof(*(pos)), member); \
477 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 476 pos; \
478 pos = rcu_dereference_bh(pos->next)) 477 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
478 &(pos)->member)), typeof(*(pos)), member))
479 479
480/** 480/**
481 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point 481 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
482 * @tpos: the type * to use as a loop cursor. 482 * @pos: the type * to use as a loop cursor.
483 * @pos: the &struct hlist_node to use as a loop cursor.
484 * @member: the name of the hlist_node within the struct. 483 * @member: the name of the hlist_node within the struct.
485 */ 484 */
486#define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ 485#define hlist_for_each_entry_continue_rcu(pos, member) \
487 for (pos = rcu_dereference((pos)->next); \ 486 for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
488 pos && \ 487 typeof(*(pos)), member); \
489 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 488 pos; \
490 pos = rcu_dereference(pos->next)) 489 pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
490 typeof(*(pos)), member))
491 491
492/** 492/**
493 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point 493 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
494 * @tpos: the type * to use as a loop cursor. 494 * @pos: the type * to use as a loop cursor.
495 * @pos: the &struct hlist_node to use as a loop cursor.
496 * @member: the name of the hlist_node within the struct. 495 * @member: the name of the hlist_node within the struct.
497 */ 496 */
498#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ 497#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
499 for (pos = rcu_dereference_bh((pos)->next); \ 498 for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
500 pos && \ 499 typeof(*(pos)), member); \
501 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 500 pos; \
502 pos = rcu_dereference_bh(pos->next)) 501 pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
502 typeof(*(pos)), member))
503 503
504 504
505#endif /* __KERNEL__ */ 505#endif /* __KERNEL__ */
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 53539acbd81a..89ed9ac5701f 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -161,8 +161,8 @@ typedef struct ax25_uid_assoc {
161 ax25_address call; 161 ax25_address call;
162} ax25_uid_assoc; 162} ax25_uid_assoc;
163 163
164#define ax25_uid_for_each(__ax25, node, list) \ 164#define ax25_uid_for_each(__ax25, list) \
165 hlist_for_each_entry(__ax25, node, list, uid_node) 165 hlist_for_each_entry(__ax25, list, uid_node)
166 166
167#define ax25_uid_hold(ax25) \ 167#define ax25_uid_hold(ax25) \
168 atomic_inc(&((ax25)->refcount)) 168 atomic_inc(&((ax25)->refcount))
@@ -247,8 +247,8 @@ typedef struct ax25_cb {
247 247
248#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo) 248#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo)
249 249
250#define ax25_for_each(__ax25, node, list) \ 250#define ax25_for_each(__ax25, list) \
251 hlist_for_each_entry(__ax25, node, list, ax25_node) 251 hlist_for_each_entry(__ax25, list, ax25_node)
252 252
253#define ax25_cb_hold(__ax25) \ 253#define ax25_cb_hold(__ax25) \
254 atomic_inc(&((__ax25)->refcount)) 254 atomic_inc(&((__ax25)->refcount))
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 7b2ae9d37076..ef83d9e844b5 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -94,8 +94,8 @@ static inline struct net *ib_net(struct inet_bind_bucket *ib)
94 return read_pnet(&ib->ib_net); 94 return read_pnet(&ib->ib_net);
95} 95}
96 96
97#define inet_bind_bucket_for_each(tb, pos, head) \ 97#define inet_bind_bucket_for_each(tb, head) \
98 hlist_for_each_entry(tb, pos, head, node) 98 hlist_for_each_entry(tb, head, node)
99 99
100struct inet_bind_hashbucket { 100struct inet_bind_hashbucket {
101 spinlock_t lock; 101 spinlock_t lock;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 7d658d577368..f908dfc06505 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -178,11 +178,11 @@ static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
178#define inet_twsk_for_each(tw, node, head) \ 178#define inet_twsk_for_each(tw, node, head) \
179 hlist_nulls_for_each_entry(tw, node, head, tw_node) 179 hlist_nulls_for_each_entry(tw, node, head, tw_node)
180 180
181#define inet_twsk_for_each_inmate(tw, node, jail) \ 181#define inet_twsk_for_each_inmate(tw, jail) \
182 hlist_for_each_entry(tw, node, jail, tw_death_node) 182 hlist_for_each_entry(tw, jail, tw_death_node)
183 183
184#define inet_twsk_for_each_inmate_safe(tw, node, safe, jail) \ 184#define inet_twsk_for_each_inmate_safe(tw, safe, jail) \
185 hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node) 185 hlist_for_each_entry_safe(tw, safe, jail, tw_death_node)
186 186
187static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk) 187static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
188{ 188{
diff --git a/include/net/netrom.h b/include/net/netrom.h
index f0793c1cb5f8..121dcf854db5 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -154,17 +154,17 @@ static __inline__ void nr_node_unlock(struct nr_node *nr_node)
154 nr_node_put(nr_node); 154 nr_node_put(nr_node);
155} 155}
156 156
157#define nr_neigh_for_each(__nr_neigh, node, list) \ 157#define nr_neigh_for_each(__nr_neigh, list) \
158 hlist_for_each_entry(__nr_neigh, node, list, neigh_node) 158 hlist_for_each_entry(__nr_neigh, list, neigh_node)
159 159
160#define nr_neigh_for_each_safe(__nr_neigh, node, node2, list) \ 160#define nr_neigh_for_each_safe(__nr_neigh, node2, list) \
161 hlist_for_each_entry_safe(__nr_neigh, node, node2, list, neigh_node) 161 hlist_for_each_entry_safe(__nr_neigh, node2, list, neigh_node)
162 162
163#define nr_node_for_each(__nr_node, node, list) \ 163#define nr_node_for_each(__nr_node, list) \
164 hlist_for_each_entry(__nr_node, node, list, node_node) 164 hlist_for_each_entry(__nr_node, list, node_node)
165 165
166#define nr_node_for_each_safe(__nr_node, node, node2, list) \ 166#define nr_node_for_each_safe(__nr_node, node2, list) \
167 hlist_for_each_entry_safe(__nr_node, node, node2, list, node_node) 167 hlist_for_each_entry_safe(__nr_node, node2, list, node_node)
168 168
169 169
170/*********************************************************************/ 170/*********************************************************************/
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 2761c905504e..f10818fc8804 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -339,11 +339,10 @@ static inline struct Qdisc_class_common *
339qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 339qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
340{ 340{
341 struct Qdisc_class_common *cl; 341 struct Qdisc_class_common *cl;
342 struct hlist_node *n;
343 unsigned int h; 342 unsigned int h;
344 343
345 h = qdisc_class_hash(id, hash->hashmask); 344 h = qdisc_class_hash(id, hash->hashmask);
346 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) { 345 hlist_for_each_entry(cl, &hash->hash[h], hnode) {
347 if (cl->classid == id) 346 if (cl->classid == id)
348 return cl; 347 return cl;
349 } 348 }
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 7fdf298a47ef..df85a0c0f2d5 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -675,8 +675,8 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag)
675 return h & (sctp_assoc_hashsize - 1); 675 return h & (sctp_assoc_hashsize - 1);
676} 676}
677 677
678#define sctp_for_each_hentry(epb, node, head) \ 678#define sctp_for_each_hentry(epb, head) \
679 hlist_for_each_entry(epb, node, head, node) 679 hlist_for_each_entry(epb, head, node)
680 680
681/* Is a socket of this style? */ 681/* Is a socket of this style? */
682#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style)) 682#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
diff --git a/include/net/sock.h b/include/net/sock.h
index a66caa223d18..14f6e9d19dc7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -606,24 +606,23 @@ static inline void sk_add_bind_node(struct sock *sk,
606 hlist_add_head(&sk->sk_bind_node, list); 606 hlist_add_head(&sk->sk_bind_node, list);
607} 607}
608 608
609#define sk_for_each(__sk, node, list) \ 609#define sk_for_each(__sk, list) \
610 hlist_for_each_entry(__sk, node, list, sk_node) 610 hlist_for_each_entry(__sk, list, sk_node)
611#define sk_for_each_rcu(__sk, node, list) \ 611#define sk_for_each_rcu(__sk, list) \
612 hlist_for_each_entry_rcu(__sk, node, list, sk_node) 612 hlist_for_each_entry_rcu(__sk, list, sk_node)
613#define sk_nulls_for_each(__sk, node, list) \ 613#define sk_nulls_for_each(__sk, node, list) \
614 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) 614 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
615#define sk_nulls_for_each_rcu(__sk, node, list) \ 615#define sk_nulls_for_each_rcu(__sk, node, list) \
616 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) 616 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
617#define sk_for_each_from(__sk, node) \ 617#define sk_for_each_from(__sk) \
618 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 618 hlist_for_each_entry_from(__sk, sk_node)
619 hlist_for_each_entry_from(__sk, node, sk_node)
620#define sk_nulls_for_each_from(__sk, node) \ 619#define sk_nulls_for_each_from(__sk, node) \
621 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ 620 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
622 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) 621 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
623#define sk_for_each_safe(__sk, node, tmp, list) \ 622#define sk_for_each_safe(__sk, tmp, list) \
624 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) 623 hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
625#define sk_for_each_bound(__sk, node, list) \ 624#define sk_for_each_bound(__sk, list) \
626 hlist_for_each_entry(__sk, node, list, sk_bind_node) 625 hlist_for_each_entry(__sk, list, sk_bind_node)
627 626
628static inline struct user_namespace *sk_user_ns(struct sock *sk) 627static inline struct user_namespace *sk_user_ns(struct sock *sk)
629{ 628{
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 40e0df6c2a2f..a32f9432666c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -554,7 +554,6 @@ static struct css_set *find_existing_css_set(
554{ 554{
555 int i; 555 int i;
556 struct cgroupfs_root *root = cgrp->root; 556 struct cgroupfs_root *root = cgrp->root;
557 struct hlist_node *node;
558 struct css_set *cg; 557 struct css_set *cg;
559 unsigned long key; 558 unsigned long key;
560 559
@@ -577,7 +576,7 @@ static struct css_set *find_existing_css_set(
577 } 576 }
578 577
579 key = css_set_hash(template); 578 key = css_set_hash(template);
580 hash_for_each_possible(css_set_table, cg, node, hlist, key) { 579 hash_for_each_possible(css_set_table, cg, hlist, key) {
581 if (!compare_css_sets(cg, oldcg, cgrp, template)) 580 if (!compare_css_sets(cg, oldcg, cgrp, template))
582 continue; 581 continue;
583 582
@@ -1611,7 +1610,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1611 struct cgroupfs_root *existing_root; 1610 struct cgroupfs_root *existing_root;
1612 const struct cred *cred; 1611 const struct cred *cred;
1613 int i; 1612 int i;
1614 struct hlist_node *node;
1615 struct css_set *cg; 1613 struct css_set *cg;
1616 1614
1617 BUG_ON(sb->s_root != NULL); 1615 BUG_ON(sb->s_root != NULL);
@@ -1666,7 +1664,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1666 /* Link the top cgroup in this hierarchy into all 1664 /* Link the top cgroup in this hierarchy into all
1667 * the css_set objects */ 1665 * the css_set objects */
1668 write_lock(&css_set_lock); 1666 write_lock(&css_set_lock);
1669 hash_for_each(css_set_table, i, node, cg, hlist) 1667 hash_for_each(css_set_table, i, cg, hlist)
1670 link_css_set(&tmp_cg_links, cg, root_cgrp); 1668 link_css_set(&tmp_cg_links, cg, root_cgrp);
1671 write_unlock(&css_set_lock); 1669 write_unlock(&css_set_lock);
1672 1670
@@ -4493,7 +4491,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4493{ 4491{
4494 struct cgroup_subsys_state *css; 4492 struct cgroup_subsys_state *css;
4495 int i, ret; 4493 int i, ret;
4496 struct hlist_node *node, *tmp; 4494 struct hlist_node *tmp;
4497 struct css_set *cg; 4495 struct css_set *cg;
4498 unsigned long key; 4496 unsigned long key;
4499 4497
@@ -4561,7 +4559,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4561 * this is all done under the css_set_lock. 4559 * this is all done under the css_set_lock.
4562 */ 4560 */
4563 write_lock(&css_set_lock); 4561 write_lock(&css_set_lock);
4564 hash_for_each_safe(css_set_table, i, node, tmp, cg, hlist) { 4562 hash_for_each_safe(css_set_table, i, tmp, cg, hlist) {
4565 /* skip entries that we already rehashed */ 4563 /* skip entries that we already rehashed */
4566 if (cg->subsys[ss->subsys_id]) 4564 if (cg->subsys[ss->subsys_id])
4567 continue; 4565 continue;
@@ -4571,7 +4569,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4571 cg->subsys[ss->subsys_id] = css; 4569 cg->subsys[ss->subsys_id] = css;
4572 /* recompute hash and restore entry */ 4570 /* recompute hash and restore entry */
4573 key = css_set_hash(cg->subsys); 4571 key = css_set_hash(cg->subsys);
4574 hash_add(css_set_table, node, key); 4572 hash_add(css_set_table, &cg->hlist, key);
4575 } 4573 }
4576 write_unlock(&css_set_lock); 4574 write_unlock(&css_set_lock);
4577 4575
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5a92cf6beff0..b0cd86501c30 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5126,7 +5126,6 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5126{ 5126{
5127 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5127 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5128 struct perf_event *event; 5128 struct perf_event *event;
5129 struct hlist_node *node;
5130 struct hlist_head *head; 5129 struct hlist_head *head;
5131 5130
5132 rcu_read_lock(); 5131 rcu_read_lock();
@@ -5134,7 +5133,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5134 if (!head) 5133 if (!head)
5135 goto end; 5134 goto end;
5136 5135
5137 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 5136 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5138 if (perf_swevent_match(event, type, event_id, data, regs)) 5137 if (perf_swevent_match(event, type, event_id, data, regs))
5139 perf_swevent_event(event, nr, data, regs); 5138 perf_swevent_event(event, nr, data, regs);
5140 } 5139 }
@@ -5419,7 +5418,6 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5419{ 5418{
5420 struct perf_sample_data data; 5419 struct perf_sample_data data;
5421 struct perf_event *event; 5420 struct perf_event *event;
5422 struct hlist_node *node;
5423 5421
5424 struct perf_raw_record raw = { 5422 struct perf_raw_record raw = {
5425 .size = entry_size, 5423 .size = entry_size,
@@ -5429,7 +5427,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5429 perf_sample_data_init(&data, addr, 0); 5427 perf_sample_data_init(&data, addr, 0);
5430 data.raw = &raw; 5428 data.raw = &raw;
5431 5429
5432 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 5430 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5433 if (perf_tp_event_match(event, &data, regs)) 5431 if (perf_tp_event_match(event, &data, regs))
5434 perf_swevent_event(event, count, &data, regs); 5432 perf_swevent_event(event, count, &data, regs);
5435 } 5433 }
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 550294d58a02..e35be53f6613 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -334,11 +334,10 @@ static inline void reset_kprobe_instance(void)
334struct kprobe __kprobes *get_kprobe(void *addr) 334struct kprobe __kprobes *get_kprobe(void *addr)
335{ 335{
336 struct hlist_head *head; 336 struct hlist_head *head;
337 struct hlist_node *node;
338 struct kprobe *p; 337 struct kprobe *p;
339 338
340 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 339 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
341 hlist_for_each_entry_rcu(p, node, head, hlist) { 340 hlist_for_each_entry_rcu(p, head, hlist) {
342 if (p->addr == addr) 341 if (p->addr == addr)
343 return p; 342 return p;
344 } 343 }
@@ -799,7 +798,6 @@ out:
799static void __kprobes optimize_all_kprobes(void) 798static void __kprobes optimize_all_kprobes(void)
800{ 799{
801 struct hlist_head *head; 800 struct hlist_head *head;
802 struct hlist_node *node;
803 struct kprobe *p; 801 struct kprobe *p;
804 unsigned int i; 802 unsigned int i;
805 803
@@ -810,7 +808,7 @@ static void __kprobes optimize_all_kprobes(void)
810 kprobes_allow_optimization = true; 808 kprobes_allow_optimization = true;
811 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 809 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
812 head = &kprobe_table[i]; 810 head = &kprobe_table[i];
813 hlist_for_each_entry_rcu(p, node, head, hlist) 811 hlist_for_each_entry_rcu(p, head, hlist)
814 if (!kprobe_disabled(p)) 812 if (!kprobe_disabled(p))
815 optimize_kprobe(p); 813 optimize_kprobe(p);
816 } 814 }
@@ -821,7 +819,6 @@ static void __kprobes optimize_all_kprobes(void)
821static void __kprobes unoptimize_all_kprobes(void) 819static void __kprobes unoptimize_all_kprobes(void)
822{ 820{
823 struct hlist_head *head; 821 struct hlist_head *head;
824 struct hlist_node *node;
825 struct kprobe *p; 822 struct kprobe *p;
826 unsigned int i; 823 unsigned int i;
827 824
@@ -832,7 +829,7 @@ static void __kprobes unoptimize_all_kprobes(void)
832 kprobes_allow_optimization = false; 829 kprobes_allow_optimization = false;
833 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 830 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
834 head = &kprobe_table[i]; 831 head = &kprobe_table[i];
835 hlist_for_each_entry_rcu(p, node, head, hlist) { 832 hlist_for_each_entry_rcu(p, head, hlist) {
836 if (!kprobe_disabled(p)) 833 if (!kprobe_disabled(p))
837 unoptimize_kprobe(p, false); 834 unoptimize_kprobe(p, false);
838 } 835 }
@@ -1148,7 +1145,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1148{ 1145{
1149 struct kretprobe_instance *ri; 1146 struct kretprobe_instance *ri;
1150 struct hlist_head *head, empty_rp; 1147 struct hlist_head *head, empty_rp;
1151 struct hlist_node *node, *tmp; 1148 struct hlist_node *tmp;
1152 unsigned long hash, flags = 0; 1149 unsigned long hash, flags = 0;
1153 1150
1154 if (unlikely(!kprobes_initialized)) 1151 if (unlikely(!kprobes_initialized))
@@ -1159,12 +1156,12 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1159 hash = hash_ptr(tk, KPROBE_HASH_BITS); 1156 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1160 head = &kretprobe_inst_table[hash]; 1157 head = &kretprobe_inst_table[hash];
1161 kretprobe_table_lock(hash, &flags); 1158 kretprobe_table_lock(hash, &flags);
1162 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 1159 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1163 if (ri->task == tk) 1160 if (ri->task == tk)
1164 recycle_rp_inst(ri, &empty_rp); 1161 recycle_rp_inst(ri, &empty_rp);
1165 } 1162 }
1166 kretprobe_table_unlock(hash, &flags); 1163 kretprobe_table_unlock(hash, &flags);
1167 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 1164 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1168 hlist_del(&ri->hlist); 1165 hlist_del(&ri->hlist);
1169 kfree(ri); 1166 kfree(ri);
1170 } 1167 }
@@ -1173,9 +1170,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1173static inline void free_rp_inst(struct kretprobe *rp) 1170static inline void free_rp_inst(struct kretprobe *rp)
1174{ 1171{
1175 struct kretprobe_instance *ri; 1172 struct kretprobe_instance *ri;
1176 struct hlist_node *pos, *next; 1173 struct hlist_node *next;
1177 1174
1178 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { 1175 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1179 hlist_del(&ri->hlist); 1176 hlist_del(&ri->hlist);
1180 kfree(ri); 1177 kfree(ri);
1181 } 1178 }
@@ -1185,14 +1182,14 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1185{ 1182{
1186 unsigned long flags, hash; 1183 unsigned long flags, hash;
1187 struct kretprobe_instance *ri; 1184 struct kretprobe_instance *ri;
1188 struct hlist_node *pos, *next; 1185 struct hlist_node *next;
1189 struct hlist_head *head; 1186 struct hlist_head *head;
1190 1187
1191 /* No race here */ 1188 /* No race here */
1192 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 1189 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1193 kretprobe_table_lock(hash, &flags); 1190 kretprobe_table_lock(hash, &flags);
1194 head = &kretprobe_inst_table[hash]; 1191 head = &kretprobe_inst_table[hash];
1195 hlist_for_each_entry_safe(ri, pos, next, head, hlist) { 1192 hlist_for_each_entry_safe(ri, next, head, hlist) {
1196 if (ri->rp == rp) 1193 if (ri->rp == rp)
1197 ri->rp = NULL; 1194 ri->rp = NULL;
1198 } 1195 }
@@ -2028,7 +2025,6 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2028{ 2025{
2029 struct module *mod = data; 2026 struct module *mod = data;
2030 struct hlist_head *head; 2027 struct hlist_head *head;
2031 struct hlist_node *node;
2032 struct kprobe *p; 2028 struct kprobe *p;
2033 unsigned int i; 2029 unsigned int i;
2034 int checkcore = (val == MODULE_STATE_GOING); 2030 int checkcore = (val == MODULE_STATE_GOING);
@@ -2045,7 +2041,7 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2045 mutex_lock(&kprobe_mutex); 2041 mutex_lock(&kprobe_mutex);
2046 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2042 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2047 head = &kprobe_table[i]; 2043 head = &kprobe_table[i];
2048 hlist_for_each_entry_rcu(p, node, head, hlist) 2044 hlist_for_each_entry_rcu(p, head, hlist)
2049 if (within_module_init((unsigned long)p->addr, mod) || 2045 if (within_module_init((unsigned long)p->addr, mod) ||
2050 (checkcore && 2046 (checkcore &&
2051 within_module_core((unsigned long)p->addr, mod))) { 2047 within_module_core((unsigned long)p->addr, mod))) {
@@ -2192,7 +2188,6 @@ static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
2192static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) 2188static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2193{ 2189{
2194 struct hlist_head *head; 2190 struct hlist_head *head;
2195 struct hlist_node *node;
2196 struct kprobe *p, *kp; 2191 struct kprobe *p, *kp;
2197 const char *sym = NULL; 2192 const char *sym = NULL;
2198 unsigned int i = *(loff_t *) v; 2193 unsigned int i = *(loff_t *) v;
@@ -2201,7 +2196,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2201 2196
2202 head = &kprobe_table[i]; 2197 head = &kprobe_table[i];
2203 preempt_disable(); 2198 preempt_disable();
2204 hlist_for_each_entry_rcu(p, node, head, hlist) { 2199 hlist_for_each_entry_rcu(p, head, hlist) {
2205 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2200 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2206 &offset, &modname, namebuf); 2201 &offset, &modname, namebuf);
2207 if (kprobe_aggrprobe(p)) { 2202 if (kprobe_aggrprobe(p)) {
@@ -2236,7 +2231,6 @@ static const struct file_operations debugfs_kprobes_operations = {
2236static void __kprobes arm_all_kprobes(void) 2231static void __kprobes arm_all_kprobes(void)
2237{ 2232{
2238 struct hlist_head *head; 2233 struct hlist_head *head;
2239 struct hlist_node *node;
2240 struct kprobe *p; 2234 struct kprobe *p;
2241 unsigned int i; 2235 unsigned int i;
2242 2236
@@ -2249,7 +2243,7 @@ static void __kprobes arm_all_kprobes(void)
2249 /* Arming kprobes doesn't optimize kprobe itself */ 2243 /* Arming kprobes doesn't optimize kprobe itself */
2250 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2244 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2251 head = &kprobe_table[i]; 2245 head = &kprobe_table[i];
2252 hlist_for_each_entry_rcu(p, node, head, hlist) 2246 hlist_for_each_entry_rcu(p, head, hlist)
2253 if (!kprobe_disabled(p)) 2247 if (!kprobe_disabled(p))
2254 arm_kprobe(p); 2248 arm_kprobe(p);
2255 } 2249 }
@@ -2265,7 +2259,6 @@ already_enabled:
2265static void __kprobes disarm_all_kprobes(void) 2259static void __kprobes disarm_all_kprobes(void)
2266{ 2260{
2267 struct hlist_head *head; 2261 struct hlist_head *head;
2268 struct hlist_node *node;
2269 struct kprobe *p; 2262 struct kprobe *p;
2270 unsigned int i; 2263 unsigned int i;
2271 2264
@@ -2282,7 +2275,7 @@ static void __kprobes disarm_all_kprobes(void)
2282 2275
2283 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2276 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2284 head = &kprobe_table[i]; 2277 head = &kprobe_table[i];
2285 hlist_for_each_entry_rcu(p, node, head, hlist) { 2278 hlist_for_each_entry_rcu(p, head, hlist) {
2286 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 2279 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2287 disarm_kprobe(p, false); 2280 disarm_kprobe(p, false);
2288 } 2281 }
diff --git a/kernel/pid.c b/kernel/pid.c
index f2c6a6825098..047dc6264638 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -350,10 +350,9 @@ void disable_pid_allocation(struct pid_namespace *ns)
350 350
351struct pid *find_pid_ns(int nr, struct pid_namespace *ns) 351struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
352{ 352{
353 struct hlist_node *elem;
354 struct upid *pnr; 353 struct upid *pnr;
355 354
356 hlist_for_each_entry_rcu(pnr, elem, 355 hlist_for_each_entry_rcu(pnr,
357 &pid_hash[pid_hashfn(nr, ns)], pid_chain) 356 &pid_hash[pid_hashfn(nr, ns)], pid_chain)
358 if (pnr->nr == nr && pnr->ns == ns) 357 if (pnr->nr == nr && pnr->ns == ns)
359 return container_of(pnr, struct pid, 358 return container_of(pnr, struct pid,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2b5243176aba..12af4270c9c1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1752,9 +1752,8 @@ EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1752static void fire_sched_in_preempt_notifiers(struct task_struct *curr) 1752static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1753{ 1753{
1754 struct preempt_notifier *notifier; 1754 struct preempt_notifier *notifier;
1755 struct hlist_node *node;
1756 1755
1757 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) 1756 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
1758 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 1757 notifier->ops->sched_in(notifier, raw_smp_processor_id());
1759} 1758}
1760 1759
@@ -1763,9 +1762,8 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
1763 struct task_struct *next) 1762 struct task_struct *next)
1764{ 1763{
1765 struct preempt_notifier *notifier; 1764 struct preempt_notifier *notifier;
1766 struct hlist_node *node;
1767 1765
1768 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) 1766 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
1769 notifier->ops->sched_out(notifier, next); 1767 notifier->ops->sched_out(notifier, next);
1770} 1768}
1771 1769
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index d4abac261779..b9bde5727829 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -131,7 +131,7 @@ static int smpboot_thread_fn(void *data)
131 continue; 131 continue;
132 } 132 }
133 133
134 BUG_ON(td->cpu != smp_processor_id()); 134 //BUG_ON(td->cpu != smp_processor_id());
135 135
136 /* Check for state change setup */ 136 /* Check for state change setup */
137 switch (td->status) { 137 switch (td->status) {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 98ca94a41819..ab25b88aae56 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -762,7 +762,6 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
762{ 762{
763 struct ftrace_profile *rec; 763 struct ftrace_profile *rec;
764 struct hlist_head *hhd; 764 struct hlist_head *hhd;
765 struct hlist_node *n;
766 unsigned long key; 765 unsigned long key;
767 766
768 key = hash_long(ip, ftrace_profile_bits); 767 key = hash_long(ip, ftrace_profile_bits);
@@ -771,7 +770,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
771 if (hlist_empty(hhd)) 770 if (hlist_empty(hhd))
772 return NULL; 771 return NULL;
773 772
774 hlist_for_each_entry_rcu(rec, n, hhd, node) { 773 hlist_for_each_entry_rcu(rec, hhd, node) {
775 if (rec->ip == ip) 774 if (rec->ip == ip)
776 return rec; 775 return rec;
777 } 776 }
@@ -1133,7 +1132,6 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1133 unsigned long key; 1132 unsigned long key;
1134 struct ftrace_func_entry *entry; 1133 struct ftrace_func_entry *entry;
1135 struct hlist_head *hhd; 1134 struct hlist_head *hhd;
1136 struct hlist_node *n;
1137 1135
1138 if (ftrace_hash_empty(hash)) 1136 if (ftrace_hash_empty(hash))
1139 return NULL; 1137 return NULL;
@@ -1145,7 +1143,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1145 1143
1146 hhd = &hash->buckets[key]; 1144 hhd = &hash->buckets[key];
1147 1145
1148 hlist_for_each_entry_rcu(entry, n, hhd, hlist) { 1146 hlist_for_each_entry_rcu(entry, hhd, hlist) {
1149 if (entry->ip == ip) 1147 if (entry->ip == ip)
1150 return entry; 1148 return entry;
1151 } 1149 }
@@ -1202,7 +1200,7 @@ remove_hash_entry(struct ftrace_hash *hash,
1202static void ftrace_hash_clear(struct ftrace_hash *hash) 1200static void ftrace_hash_clear(struct ftrace_hash *hash)
1203{ 1201{
1204 struct hlist_head *hhd; 1202 struct hlist_head *hhd;
1205 struct hlist_node *tp, *tn; 1203 struct hlist_node *tn;
1206 struct ftrace_func_entry *entry; 1204 struct ftrace_func_entry *entry;
1207 int size = 1 << hash->size_bits; 1205 int size = 1 << hash->size_bits;
1208 int i; 1206 int i;
@@ -1212,7 +1210,7 @@ static void ftrace_hash_clear(struct ftrace_hash *hash)
1212 1210
1213 for (i = 0; i < size; i++) { 1211 for (i = 0; i < size; i++) {
1214 hhd = &hash->buckets[i]; 1212 hhd = &hash->buckets[i];
1215 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) 1213 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1216 free_hash_entry(hash, entry); 1214 free_hash_entry(hash, entry);
1217 } 1215 }
1218 FTRACE_WARN_ON(hash->count); 1216 FTRACE_WARN_ON(hash->count);
@@ -1275,7 +1273,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1275{ 1273{
1276 struct ftrace_func_entry *entry; 1274 struct ftrace_func_entry *entry;
1277 struct ftrace_hash *new_hash; 1275 struct ftrace_hash *new_hash;
1278 struct hlist_node *tp;
1279 int size; 1276 int size;
1280 int ret; 1277 int ret;
1281 int i; 1278 int i;
@@ -1290,7 +1287,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1290 1287
1291 size = 1 << hash->size_bits; 1288 size = 1 << hash->size_bits;
1292 for (i = 0; i < size; i++) { 1289 for (i = 0; i < size; i++) {
1293 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) { 1290 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1294 ret = add_hash_entry(new_hash, entry->ip); 1291 ret = add_hash_entry(new_hash, entry->ip);
1295 if (ret < 0) 1292 if (ret < 0)
1296 goto free_hash; 1293 goto free_hash;
@@ -1316,7 +1313,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1316 struct ftrace_hash **dst, struct ftrace_hash *src) 1313 struct ftrace_hash **dst, struct ftrace_hash *src)
1317{ 1314{
1318 struct ftrace_func_entry *entry; 1315 struct ftrace_func_entry *entry;
1319 struct hlist_node *tp, *tn; 1316 struct hlist_node *tn;
1320 struct hlist_head *hhd; 1317 struct hlist_head *hhd;
1321 struct ftrace_hash *old_hash; 1318 struct ftrace_hash *old_hash;
1322 struct ftrace_hash *new_hash; 1319 struct ftrace_hash *new_hash;
@@ -1362,7 +1359,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1362 size = 1 << src->size_bits; 1359 size = 1 << src->size_bits;
1363 for (i = 0; i < size; i++) { 1360 for (i = 0; i < size; i++) {
1364 hhd = &src->buckets[i]; 1361 hhd = &src->buckets[i];
1365 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) { 1362 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1366 if (bits > 0) 1363 if (bits > 0)
1367 key = hash_long(entry->ip, bits); 1364 key = hash_long(entry->ip, bits);
1368 else 1365 else
@@ -2901,7 +2898,6 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2901{ 2898{
2902 struct ftrace_func_probe *entry; 2899 struct ftrace_func_probe *entry;
2903 struct hlist_head *hhd; 2900 struct hlist_head *hhd;
2904 struct hlist_node *n;
2905 unsigned long key; 2901 unsigned long key;
2906 2902
2907 key = hash_long(ip, FTRACE_HASH_BITS); 2903 key = hash_long(ip, FTRACE_HASH_BITS);
@@ -2917,7 +2913,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2917 * on the hash. rcu_read_lock is too dangerous here. 2913 * on the hash. rcu_read_lock is too dangerous here.
2918 */ 2914 */
2919 preempt_disable_notrace(); 2915 preempt_disable_notrace();
2920 hlist_for_each_entry_rcu(entry, n, hhd, node) { 2916 hlist_for_each_entry_rcu(entry, hhd, node) {
2921 if (entry->ip == ip) 2917 if (entry->ip == ip)
2922 entry->ops->func(ip, parent_ip, &entry->data); 2918 entry->ops->func(ip, parent_ip, &entry->data);
2923 } 2919 }
@@ -3068,7 +3064,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3068 void *data, int flags) 3064 void *data, int flags)
3069{ 3065{
3070 struct ftrace_func_probe *entry; 3066 struct ftrace_func_probe *entry;
3071 struct hlist_node *n, *tmp; 3067 struct hlist_node *tmp;
3072 char str[KSYM_SYMBOL_LEN]; 3068 char str[KSYM_SYMBOL_LEN];
3073 int type = MATCH_FULL; 3069 int type = MATCH_FULL;
3074 int i, len = 0; 3070 int i, len = 0;
@@ -3091,7 +3087,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3091 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3087 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3092 struct hlist_head *hhd = &ftrace_func_hash[i]; 3088 struct hlist_head *hhd = &ftrace_func_hash[i];
3093 3089
3094 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { 3090 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3095 3091
3096 /* break up if statements for readability */ 3092 /* break up if statements for readability */
3097 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) 3093 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 194d79602dc7..697e88d13907 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -739,12 +739,11 @@ static int task_state_char(unsigned long state)
739struct trace_event *ftrace_find_event(int type) 739struct trace_event *ftrace_find_event(int type)
740{ 740{
741 struct trace_event *event; 741 struct trace_event *event;
742 struct hlist_node *n;
743 unsigned key; 742 unsigned key;
744 743
745 key = type & (EVENT_HASHSIZE - 1); 744 key = type & (EVENT_HASHSIZE - 1);
746 745
747 hlist_for_each_entry(event, n, &event_hash[key], node) { 746 hlist_for_each_entry(event, &event_hash[key], node) {
748 if (event->type == type) 747 if (event->type == type)
749 return event; 748 return event;
750 } 749 }
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d96ba22dabfa..0c05a4592047 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -192,12 +192,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
192static struct tracepoint_entry *get_tracepoint(const char *name) 192static struct tracepoint_entry *get_tracepoint(const char *name)
193{ 193{
194 struct hlist_head *head; 194 struct hlist_head *head;
195 struct hlist_node *node;
196 struct tracepoint_entry *e; 195 struct tracepoint_entry *e;
197 u32 hash = jhash(name, strlen(name), 0); 196 u32 hash = jhash(name, strlen(name), 0);
198 197
199 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; 198 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
200 hlist_for_each_entry(e, node, head, hlist) { 199 hlist_for_each_entry(e, head, hlist) {
201 if (!strcmp(name, e->name)) 200 if (!strcmp(name, e->name))
202 return e; 201 return e;
203 } 202 }
@@ -211,13 +210,12 @@ static struct tracepoint_entry *get_tracepoint(const char *name)
211static struct tracepoint_entry *add_tracepoint(const char *name) 210static struct tracepoint_entry *add_tracepoint(const char *name)
212{ 211{
213 struct hlist_head *head; 212 struct hlist_head *head;
214 struct hlist_node *node;
215 struct tracepoint_entry *e; 213 struct tracepoint_entry *e;
216 size_t name_len = strlen(name) + 1; 214 size_t name_len = strlen(name) + 1;
217 u32 hash = jhash(name, name_len-1, 0); 215 u32 hash = jhash(name, name_len-1, 0);
218 216
219 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; 217 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
220 hlist_for_each_entry(e, node, head, hlist) { 218 hlist_for_each_entry(e, head, hlist) {
221 if (!strcmp(name, e->name)) { 219 if (!strcmp(name, e->name)) {
222 printk(KERN_NOTICE 220 printk(KERN_NOTICE
223 "tracepoint %s busy\n", name); 221 "tracepoint %s busy\n", name);
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
index 1744bb80f1fb..394f70b17162 100644
--- a/kernel/user-return-notifier.c
+++ b/kernel/user-return-notifier.c
@@ -34,11 +34,11 @@ EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
34void fire_user_return_notifiers(void) 34void fire_user_return_notifiers(void)
35{ 35{
36 struct user_return_notifier *urn; 36 struct user_return_notifier *urn;
37 struct hlist_node *tmp1, *tmp2; 37 struct hlist_node *tmp2;
38 struct hlist_head *head; 38 struct hlist_head *head;
39 39
40 head = &get_cpu_var(return_notifier_list); 40 head = &get_cpu_var(return_notifier_list);
41 hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link) 41 hlist_for_each_entry_safe(urn, tmp2, head, link)
42 urn->on_user_return(urn); 42 urn->on_user_return(urn);
43 put_cpu_var(return_notifier_list); 43 put_cpu_var(return_notifier_list);
44} 44}
diff --git a/kernel/user.c b/kernel/user.c
index 57ebfd42023c..e81978e8c03b 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -105,9 +105,8 @@ static void uid_hash_remove(struct user_struct *up)
105static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) 105static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
106{ 106{
107 struct user_struct *user; 107 struct user_struct *user;
108 struct hlist_node *h;
109 108
110 hlist_for_each_entry(user, h, hashent, uidhash_node) { 109 hlist_for_each_entry(user, hashent, uidhash_node) {
111 if (uid_eq(user->uid, uid)) { 110 if (uid_eq(user->uid, uid)) {
112 atomic_inc(&user->__count); 111 atomic_inc(&user->__count);
113 return user; 112 return user;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f4feacad3812..81f2457811eb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -251,8 +251,8 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
251 for ((pool) = &std_worker_pools(cpu)[0]; \ 251 for ((pool) = &std_worker_pools(cpu)[0]; \
252 (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++) 252 (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
253 253
254#define for_each_busy_worker(worker, i, pos, pool) \ 254#define for_each_busy_worker(worker, i, pool) \
255 hash_for_each(pool->busy_hash, i, pos, worker, hentry) 255 hash_for_each(pool->busy_hash, i, worker, hentry)
256 256
257static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, 257static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
258 unsigned int sw) 258 unsigned int sw)
@@ -909,9 +909,8 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
909 struct work_struct *work) 909 struct work_struct *work)
910{ 910{
911 struct worker *worker; 911 struct worker *worker;
912 struct hlist_node *tmp;
913 912
914 hash_for_each_possible(pool->busy_hash, worker, tmp, hentry, 913 hash_for_each_possible(pool->busy_hash, worker, hentry,
915 (unsigned long)work) 914 (unsigned long)work)
916 if (worker->current_work == work && 915 if (worker->current_work == work &&
917 worker->current_func == work->func) 916 worker->current_func == work->func)
@@ -1626,7 +1625,6 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1626static void rebind_workers(struct worker_pool *pool) 1625static void rebind_workers(struct worker_pool *pool)
1627{ 1626{
1628 struct worker *worker, *n; 1627 struct worker *worker, *n;
1629 struct hlist_node *pos;
1630 int i; 1628 int i;
1631 1629
1632 lockdep_assert_held(&pool->assoc_mutex); 1630 lockdep_assert_held(&pool->assoc_mutex);
@@ -1648,7 +1646,7 @@ static void rebind_workers(struct worker_pool *pool)
1648 } 1646 }
1649 1647
1650 /* rebind busy workers */ 1648 /* rebind busy workers */
1651 for_each_busy_worker(worker, i, pos, pool) { 1649 for_each_busy_worker(worker, i, pool) {
1652 struct work_struct *rebind_work = &worker->rebind_work; 1650 struct work_struct *rebind_work = &worker->rebind_work;
1653 struct workqueue_struct *wq; 1651 struct workqueue_struct *wq;
1654 1652
@@ -3423,7 +3421,6 @@ static void wq_unbind_fn(struct work_struct *work)
3423 int cpu = smp_processor_id(); 3421 int cpu = smp_processor_id();
3424 struct worker_pool *pool; 3422 struct worker_pool *pool;
3425 struct worker *worker; 3423 struct worker *worker;
3426 struct hlist_node *pos;
3427 int i; 3424 int i;
3428 3425
3429 for_each_std_worker_pool(pool, cpu) { 3426 for_each_std_worker_pool(pool, cpu) {
@@ -3442,7 +3439,7 @@ static void wq_unbind_fn(struct work_struct *work)
3442 list_for_each_entry(worker, &pool->idle_list, entry) 3439 list_for_each_entry(worker, &pool->idle_list, entry)
3443 worker->flags |= WORKER_UNBOUND; 3440 worker->flags |= WORKER_UNBOUND;
3444 3441
3445 for_each_busy_worker(worker, i, pos, pool) 3442 for_each_busy_worker(worker, i, pool)
3446 worker->flags |= WORKER_UNBOUND; 3443 worker->flags |= WORKER_UNBOUND;
3447 3444
3448 pool->flags |= POOL_DISASSOCIATED; 3445 pool->flags |= POOL_DISASSOCIATED;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index d11808ca4bc4..37061ede8b81 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -109,11 +109,10 @@ static void fill_pool(void)
109 */ 109 */
110static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 110static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
111{ 111{
112 struct hlist_node *node;
113 struct debug_obj *obj; 112 struct debug_obj *obj;
114 int cnt = 0; 113 int cnt = 0;
115 114
116 hlist_for_each_entry(obj, node, &b->list, node) { 115 hlist_for_each_entry(obj, &b->list, node) {
117 cnt++; 116 cnt++;
118 if (obj->object == addr) 117 if (obj->object == addr)
119 return obj; 118 return obj;
@@ -213,7 +212,7 @@ static void free_object(struct debug_obj *obj)
213static void debug_objects_oom(void) 212static void debug_objects_oom(void)
214{ 213{
215 struct debug_bucket *db = obj_hash; 214 struct debug_bucket *db = obj_hash;
216 struct hlist_node *node, *tmp; 215 struct hlist_node *tmp;
217 HLIST_HEAD(freelist); 216 HLIST_HEAD(freelist);
218 struct debug_obj *obj; 217 struct debug_obj *obj;
219 unsigned long flags; 218 unsigned long flags;
@@ -227,7 +226,7 @@ static void debug_objects_oom(void)
227 raw_spin_unlock_irqrestore(&db->lock, flags); 226 raw_spin_unlock_irqrestore(&db->lock, flags);
228 227
229 /* Now free them */ 228 /* Now free them */
230 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 229 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
231 hlist_del(&obj->node); 230 hlist_del(&obj->node);
232 free_object(obj); 231 free_object(obj);
233 } 232 }
@@ -658,7 +657,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
658static void __debug_check_no_obj_freed(const void *address, unsigned long size) 657static void __debug_check_no_obj_freed(const void *address, unsigned long size)
659{ 658{
660 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 659 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
661 struct hlist_node *node, *tmp; 660 struct hlist_node *tmp;
662 HLIST_HEAD(freelist); 661 HLIST_HEAD(freelist);
663 struct debug_obj_descr *descr; 662 struct debug_obj_descr *descr;
664 enum debug_obj_state state; 663 enum debug_obj_state state;
@@ -678,7 +677,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
678repeat: 677repeat:
679 cnt = 0; 678 cnt = 0;
680 raw_spin_lock_irqsave(&db->lock, flags); 679 raw_spin_lock_irqsave(&db->lock, flags);
681 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 680 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
682 cnt++; 681 cnt++;
683 oaddr = (unsigned long) obj->object; 682 oaddr = (unsigned long) obj->object;
684 if (oaddr < saddr || oaddr >= eaddr) 683 if (oaddr < saddr || oaddr >= eaddr)
@@ -702,7 +701,7 @@ repeat:
702 raw_spin_unlock_irqrestore(&db->lock, flags); 701 raw_spin_unlock_irqrestore(&db->lock, flags);
703 702
704 /* Now free them */ 703 /* Now free them */
705 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 704 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
706 hlist_del(&obj->node); 705 hlist_del(&obj->node);
707 free_object(obj); 706 free_object(obj);
708 } 707 }
@@ -1013,7 +1012,7 @@ void __init debug_objects_early_init(void)
1013static int __init debug_objects_replace_static_objects(void) 1012static int __init debug_objects_replace_static_objects(void)
1014{ 1013{
1015 struct debug_bucket *db = obj_hash; 1014 struct debug_bucket *db = obj_hash;
1016 struct hlist_node *node, *tmp; 1015 struct hlist_node *tmp;
1017 struct debug_obj *obj, *new; 1016 struct debug_obj *obj, *new;
1018 HLIST_HEAD(objects); 1017 HLIST_HEAD(objects);
1019 int i, cnt = 0; 1018 int i, cnt = 0;
@@ -1033,7 +1032,7 @@ static int __init debug_objects_replace_static_objects(void)
1033 local_irq_disable(); 1032 local_irq_disable();
1034 1033
1035 /* Remove the statically allocated objects from the pool */ 1034 /* Remove the statically allocated objects from the pool */
1036 hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) 1035 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1037 hlist_del(&obj->node); 1036 hlist_del(&obj->node);
1038 /* Move the allocated objects to the pool */ 1037 /* Move the allocated objects to the pool */
1039 hlist_move_list(&objects, &obj_pool); 1038 hlist_move_list(&objects, &obj_pool);
@@ -1042,7 +1041,7 @@ static int __init debug_objects_replace_static_objects(void)
1042 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 1041 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1043 hlist_move_list(&db->list, &objects); 1042 hlist_move_list(&db->list, &objects);
1044 1043
1045 hlist_for_each_entry(obj, node, &objects, node) { 1044 hlist_for_each_entry(obj, &objects, node) {
1046 new = hlist_entry(obj_pool.first, typeof(*obj), node); 1045 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1047 hlist_del(&new->node); 1046 hlist_del(&new->node);
1048 /* copy object data */ 1047 /* copy object data */
@@ -1057,7 +1056,7 @@ static int __init debug_objects_replace_static_objects(void)
1057 obj_pool_used); 1056 obj_pool_used);
1058 return 0; 1057 return 0;
1059free: 1058free:
1060 hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { 1059 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1061 hlist_del(&obj->node); 1060 hlist_del(&obj->node);
1062 kmem_cache_free(obj_cache, obj); 1061 kmem_cache_free(obj_cache, obj);
1063 } 1062 }
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index d71d89498943..8335d39d2ccd 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -262,12 +262,11 @@ static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
262static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, 262static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
263 bool include_changing) 263 bool include_changing)
264{ 264{
265 struct hlist_node *n;
266 struct lc_element *e; 265 struct lc_element *e;
267 266
268 BUG_ON(!lc); 267 BUG_ON(!lc);
269 BUG_ON(!lc->nr_elements); 268 BUG_ON(!lc->nr_elements);
270 hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) { 269 hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
271 /* "about to be changed" elements, pending transaction commit, 270 /* "about to be changed" elements, pending transaction commit,
272 * are hashed by their "new number". "Normal" elements have 271 * are hashed by their "new number". "Normal" elements have
273 * lc_number == lc_new_number. */ 272 * lc_number == lc_new_number. */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bfa142e67b1c..e2f7f5aaaafb 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1906,9 +1906,8 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
1906static struct mm_slot *get_mm_slot(struct mm_struct *mm) 1906static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1907{ 1907{
1908 struct mm_slot *mm_slot; 1908 struct mm_slot *mm_slot;
1909 struct hlist_node *node;
1910 1909
1911 hash_for_each_possible(mm_slots_hash, mm_slot, node, hash, (unsigned long)mm) 1910 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
1912 if (mm == mm_slot->mm) 1911 if (mm == mm_slot->mm)
1913 return mm_slot; 1912 return mm_slot;
1914 1913
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 83dd5fbf5e60..c8d7f3110fd0 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -436,7 +436,7 @@ static int get_object(struct kmemleak_object *object)
436 */ 436 */
437static void free_object_rcu(struct rcu_head *rcu) 437static void free_object_rcu(struct rcu_head *rcu)
438{ 438{
439 struct hlist_node *elem, *tmp; 439 struct hlist_node *tmp;
440 struct kmemleak_scan_area *area; 440 struct kmemleak_scan_area *area;
441 struct kmemleak_object *object = 441 struct kmemleak_object *object =
442 container_of(rcu, struct kmemleak_object, rcu); 442 container_of(rcu, struct kmemleak_object, rcu);
@@ -445,8 +445,8 @@ static void free_object_rcu(struct rcu_head *rcu)
445 * Once use_count is 0 (guaranteed by put_object), there is no other 445 * Once use_count is 0 (guaranteed by put_object), there is no other
446 * code accessing this object, hence no need for locking. 446 * code accessing this object, hence no need for locking.
447 */ 447 */
448 hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { 448 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
449 hlist_del(elem); 449 hlist_del(&area->node);
450 kmem_cache_free(scan_area_cache, area); 450 kmem_cache_free(scan_area_cache, area);
451 } 451 }
452 kmem_cache_free(object_cache, object); 452 kmem_cache_free(object_cache, object);
@@ -1177,7 +1177,6 @@ static void scan_block(void *_start, void *_end,
1177static void scan_object(struct kmemleak_object *object) 1177static void scan_object(struct kmemleak_object *object)
1178{ 1178{
1179 struct kmemleak_scan_area *area; 1179 struct kmemleak_scan_area *area;
1180 struct hlist_node *elem;
1181 unsigned long flags; 1180 unsigned long flags;
1182 1181
1183 /* 1182 /*
@@ -1205,7 +1204,7 @@ static void scan_object(struct kmemleak_object *object)
1205 spin_lock_irqsave(&object->lock, flags); 1204 spin_lock_irqsave(&object->lock, flags);
1206 } 1205 }
1207 } else 1206 } else
1208 hlist_for_each_entry(area, elem, &object->area_list, node) 1207 hlist_for_each_entry(area, &object->area_list, node)
1209 scan_block((void *)area->start, 1208 scan_block((void *)area->start,
1210 (void *)(area->start + area->size), 1209 (void *)(area->start + area->size),
1211 object, 0); 1210 object, 0);
diff --git a/mm/ksm.c b/mm/ksm.c
index ab2ba9ad3c59..85bfd4c16346 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -320,10 +320,9 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
320 320
321static struct mm_slot *get_mm_slot(struct mm_struct *mm) 321static struct mm_slot *get_mm_slot(struct mm_struct *mm)
322{ 322{
323 struct hlist_node *node;
324 struct mm_slot *slot; 323 struct mm_slot *slot;
325 324
326 hash_for_each_possible(mm_slots_hash, slot, node, link, (unsigned long)mm) 325 hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
327 if (slot->mm == mm) 326 if (slot->mm == mm)
328 return slot; 327 return slot;
329 328
@@ -496,9 +495,8 @@ static inline int get_kpfn_nid(unsigned long kpfn)
496static void remove_node_from_stable_tree(struct stable_node *stable_node) 495static void remove_node_from_stable_tree(struct stable_node *stable_node)
497{ 496{
498 struct rmap_item *rmap_item; 497 struct rmap_item *rmap_item;
499 struct hlist_node *hlist;
500 498
501 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 499 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
502 if (rmap_item->hlist.next) 500 if (rmap_item->hlist.next)
503 ksm_pages_sharing--; 501 ksm_pages_sharing--;
504 else 502 else
@@ -1898,7 +1896,6 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
1898{ 1896{
1899 struct stable_node *stable_node; 1897 struct stable_node *stable_node;
1900 struct rmap_item *rmap_item; 1898 struct rmap_item *rmap_item;
1901 struct hlist_node *hlist;
1902 unsigned int mapcount = page_mapcount(page); 1899 unsigned int mapcount = page_mapcount(page);
1903 int referenced = 0; 1900 int referenced = 0;
1904 int search_new_forks = 0; 1901 int search_new_forks = 0;
@@ -1910,7 +1907,7 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
1910 if (!stable_node) 1907 if (!stable_node)
1911 return 0; 1908 return 0;
1912again: 1909again:
1913 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1910 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
1914 struct anon_vma *anon_vma = rmap_item->anon_vma; 1911 struct anon_vma *anon_vma = rmap_item->anon_vma;
1915 struct anon_vma_chain *vmac; 1912 struct anon_vma_chain *vmac;
1916 struct vm_area_struct *vma; 1913 struct vm_area_struct *vma;
@@ -1952,7 +1949,6 @@ out:
1952int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) 1949int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
1953{ 1950{
1954 struct stable_node *stable_node; 1951 struct stable_node *stable_node;
1955 struct hlist_node *hlist;
1956 struct rmap_item *rmap_item; 1952 struct rmap_item *rmap_item;
1957 int ret = SWAP_AGAIN; 1953 int ret = SWAP_AGAIN;
1958 int search_new_forks = 0; 1954 int search_new_forks = 0;
@@ -1964,7 +1960,7 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
1964 if (!stable_node) 1960 if (!stable_node)
1965 return SWAP_FAIL; 1961 return SWAP_FAIL;
1966again: 1962again:
1967 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1963 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
1968 struct anon_vma *anon_vma = rmap_item->anon_vma; 1964 struct anon_vma *anon_vma = rmap_item->anon_vma;
1969 struct anon_vma_chain *vmac; 1965 struct anon_vma_chain *vmac;
1970 struct vm_area_struct *vma; 1966 struct vm_area_struct *vma;
@@ -2005,7 +2001,6 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
2005 struct vm_area_struct *, unsigned long, void *), void *arg) 2001 struct vm_area_struct *, unsigned long, void *), void *arg)
2006{ 2002{
2007 struct stable_node *stable_node; 2003 struct stable_node *stable_node;
2008 struct hlist_node *hlist;
2009 struct rmap_item *rmap_item; 2004 struct rmap_item *rmap_item;
2010 int ret = SWAP_AGAIN; 2005 int ret = SWAP_AGAIN;
2011 int search_new_forks = 0; 2006 int search_new_forks = 0;
@@ -2017,7 +2012,7 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
2017 if (!stable_node) 2012 if (!stable_node)
2018 return ret; 2013 return ret;
2019again: 2014again:
2020 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 2015 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
2021 struct anon_vma *anon_vma = rmap_item->anon_vma; 2016 struct anon_vma *anon_vma = rmap_item->anon_vma;
2022 struct anon_vma_chain *vmac; 2017 struct anon_vma_chain *vmac;
2023 struct vm_area_struct *vma; 2018 struct vm_area_struct *vma;
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 2175fb0d501c..be04122fb277 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -95,11 +95,10 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
95 unsigned long address) 95 unsigned long address)
96{ 96{
97 struct mmu_notifier *mn; 97 struct mmu_notifier *mn;
98 struct hlist_node *n;
99 int young = 0, id; 98 int young = 0, id;
100 99
101 id = srcu_read_lock(&srcu); 100 id = srcu_read_lock(&srcu);
102 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 101 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
103 if (mn->ops->clear_flush_young) 102 if (mn->ops->clear_flush_young)
104 young |= mn->ops->clear_flush_young(mn, mm, address); 103 young |= mn->ops->clear_flush_young(mn, mm, address);
105 } 104 }
@@ -112,11 +111,10 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
112 unsigned long address) 111 unsigned long address)
113{ 112{
114 struct mmu_notifier *mn; 113 struct mmu_notifier *mn;
115 struct hlist_node *n;
116 int young = 0, id; 114 int young = 0, id;
117 115
118 id = srcu_read_lock(&srcu); 116 id = srcu_read_lock(&srcu);
119 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 117 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
120 if (mn->ops->test_young) { 118 if (mn->ops->test_young) {
121 young = mn->ops->test_young(mn, mm, address); 119 young = mn->ops->test_young(mn, mm, address);
122 if (young) 120 if (young)
@@ -132,11 +130,10 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
132 pte_t pte) 130 pte_t pte)
133{ 131{
134 struct mmu_notifier *mn; 132 struct mmu_notifier *mn;
135 struct hlist_node *n;
136 int id; 133 int id;
137 134
138 id = srcu_read_lock(&srcu); 135 id = srcu_read_lock(&srcu);
139 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 136 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
140 if (mn->ops->change_pte) 137 if (mn->ops->change_pte)
141 mn->ops->change_pte(mn, mm, address, pte); 138 mn->ops->change_pte(mn, mm, address, pte);
142 } 139 }
@@ -147,11 +144,10 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
147 unsigned long address) 144 unsigned long address)
148{ 145{
149 struct mmu_notifier *mn; 146 struct mmu_notifier *mn;
150 struct hlist_node *n;
151 int id; 147 int id;
152 148
153 id = srcu_read_lock(&srcu); 149 id = srcu_read_lock(&srcu);
154 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 150 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
155 if (mn->ops->invalidate_page) 151 if (mn->ops->invalidate_page)
156 mn->ops->invalidate_page(mn, mm, address); 152 mn->ops->invalidate_page(mn, mm, address);
157 } 153 }
@@ -162,11 +158,10 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
162 unsigned long start, unsigned long end) 158 unsigned long start, unsigned long end)
163{ 159{
164 struct mmu_notifier *mn; 160 struct mmu_notifier *mn;
165 struct hlist_node *n;
166 int id; 161 int id;
167 162
168 id = srcu_read_lock(&srcu); 163 id = srcu_read_lock(&srcu);
169 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 164 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
170 if (mn->ops->invalidate_range_start) 165 if (mn->ops->invalidate_range_start)
171 mn->ops->invalidate_range_start(mn, mm, start, end); 166 mn->ops->invalidate_range_start(mn, mm, start, end);
172 } 167 }
@@ -178,11 +173,10 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
178 unsigned long start, unsigned long end) 173 unsigned long start, unsigned long end)
179{ 174{
180 struct mmu_notifier *mn; 175 struct mmu_notifier *mn;
181 struct hlist_node *n;
182 int id; 176 int id;
183 177
184 id = srcu_read_lock(&srcu); 178 id = srcu_read_lock(&srcu);
185 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 179 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
186 if (mn->ops->invalidate_range_end) 180 if (mn->ops->invalidate_range_end)
187 mn->ops->invalidate_range_end(mn, mm, start, end); 181 mn->ops->invalidate_range_end(mn, mm, start, end);
188 } 182 }
diff --git a/net/9p/error.c b/net/9p/error.c
index 2ab2de76010f..126fd0dceea2 100644
--- a/net/9p/error.c
+++ b/net/9p/error.c
@@ -221,15 +221,13 @@ EXPORT_SYMBOL(p9_error_init);
221int p9_errstr2errno(char *errstr, int len) 221int p9_errstr2errno(char *errstr, int len)
222{ 222{
223 int errno; 223 int errno;
224 struct hlist_node *p;
225 struct errormap *c; 224 struct errormap *c;
226 int bucket; 225 int bucket;
227 226
228 errno = 0; 227 errno = 0;
229 p = NULL;
230 c = NULL; 228 c = NULL;
231 bucket = jhash(errstr, len, 0) % ERRHASHSZ; 229 bucket = jhash(errstr, len, 0) % ERRHASHSZ;
232 hlist_for_each_entry(c, p, &hash_errmap[bucket], list) { 230 hlist_for_each_entry(c, &hash_errmap[bucket], list) {
233 if (c->namelen == len && !memcmp(c->name, errstr, len)) { 231 if (c->namelen == len && !memcmp(c->name, errstr, len)) {
234 errno = c->val; 232 errno = c->val;
235 break; 233 break;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index de2e950a0a7a..74dea377fe5b 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -655,7 +655,7 @@ static struct p9_trans_module p9_virtio_trans = {
655 .create = p9_virtio_create, 655 .create = p9_virtio_create,
656 .close = p9_virtio_close, 656 .close = p9_virtio_close,
657 .request = p9_virtio_request, 657 .request = p9_virtio_request,
658 .zc_request = p9_virtio_zc_request, 658 //.zc_request = p9_virtio_zc_request,
659 .cancel = p9_virtio_cancel, 659 .cancel = p9_virtio_cancel,
660 /* 660 /*
661 * We leave one entry for input and one entry for response 661 * We leave one entry for input and one entry for response
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 33475291c9c1..4a141e3cf076 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -93,10 +93,9 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to,
93 struct atalk_iface *atif) 93 struct atalk_iface *atif)
94{ 94{
95 struct sock *s; 95 struct sock *s;
96 struct hlist_node *node;
97 96
98 read_lock_bh(&atalk_sockets_lock); 97 read_lock_bh(&atalk_sockets_lock);
99 sk_for_each(s, node, &atalk_sockets) { 98 sk_for_each(s, &atalk_sockets) {
100 struct atalk_sock *at = at_sk(s); 99 struct atalk_sock *at = at_sk(s);
101 100
102 if (to->sat_port != at->src_port) 101 if (to->sat_port != at->src_port)
@@ -141,11 +140,10 @@ static struct sock *atalk_find_or_insert_socket(struct sock *sk,
141 struct sockaddr_at *sat) 140 struct sockaddr_at *sat)
142{ 141{
143 struct sock *s; 142 struct sock *s;
144 struct hlist_node *node;
145 struct atalk_sock *at; 143 struct atalk_sock *at;
146 144
147 write_lock_bh(&atalk_sockets_lock); 145 write_lock_bh(&atalk_sockets_lock);
148 sk_for_each(s, node, &atalk_sockets) { 146 sk_for_each(s, &atalk_sockets) {
149 at = at_sk(s); 147 at = at_sk(s);
150 148
151 if (at->src_net == sat->sat_addr.s_net && 149 if (at->src_net == sat->sat_addr.s_net &&
@@ -1084,9 +1082,8 @@ static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat)
1084 sat->sat_port < ATPORT_LAST; 1082 sat->sat_port < ATPORT_LAST;
1085 sat->sat_port++) { 1083 sat->sat_port++) {
1086 struct sock *s; 1084 struct sock *s;
1087 struct hlist_node *node;
1088 1085
1089 sk_for_each(s, node, &atalk_sockets) { 1086 sk_for_each(s, &atalk_sockets) {
1090 struct atalk_sock *at = at_sk(s); 1087 struct atalk_sock *at = at_sk(s);
1091 1088
1092 if (at->src_net == sat->sat_addr.s_net && 1089 if (at->src_net == sat->sat_addr.s_net &&
diff --git a/net/atm/common.c b/net/atm/common.c
index 806fc0a40051..7b491006eaf4 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -270,11 +270,11 @@ void atm_dev_release_vccs(struct atm_dev *dev)
270 write_lock_irq(&vcc_sklist_lock); 270 write_lock_irq(&vcc_sklist_lock);
271 for (i = 0; i < VCC_HTABLE_SIZE; i++) { 271 for (i = 0; i < VCC_HTABLE_SIZE; i++) {
272 struct hlist_head *head = &vcc_hash[i]; 272 struct hlist_head *head = &vcc_hash[i];
273 struct hlist_node *node, *tmp; 273 struct hlist_node *tmp;
274 struct sock *s; 274 struct sock *s;
275 struct atm_vcc *vcc; 275 struct atm_vcc *vcc;
276 276
277 sk_for_each_safe(s, node, tmp, head) { 277 sk_for_each_safe(s, tmp, head) {
278 vcc = atm_sk(s); 278 vcc = atm_sk(s);
279 if (vcc->dev == dev) { 279 if (vcc->dev == dev) {
280 vcc_release_async(vcc, -EPIPE); 280 vcc_release_async(vcc, -EPIPE);
@@ -317,11 +317,10 @@ static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
317static int check_ci(const struct atm_vcc *vcc, short vpi, int vci) 317static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
318{ 318{
319 struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)]; 319 struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
320 struct hlist_node *node;
321 struct sock *s; 320 struct sock *s;
322 struct atm_vcc *walk; 321 struct atm_vcc *walk;
323 322
324 sk_for_each(s, node, head) { 323 sk_for_each(s, head) {
325 walk = atm_sk(s); 324 walk = atm_sk(s);
326 if (walk->dev != vcc->dev) 325 if (walk->dev != vcc->dev)
327 continue; 326 continue;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 2e3d942e77f1..f23916be18fb 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -842,7 +842,9 @@ static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl,
842 --*l; 842 --*l;
843 } 843 }
844 844
845 hlist_for_each_entry_from(tmp, e, next) { 845 tmp = container_of(e, struct lec_arp_table, next);
846
847 hlist_for_each_entry_from(tmp, next) {
846 if (--*l < 0) 848 if (--*l < 0)
847 break; 849 break;
848 } 850 }
@@ -1307,7 +1309,6 @@ lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry)
1307static int 1309static int
1308lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) 1310lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1309{ 1311{
1310 struct hlist_node *node;
1311 struct lec_arp_table *entry; 1312 struct lec_arp_table *entry;
1312 int i, remove_vcc = 1; 1313 int i, remove_vcc = 1;
1313 1314
@@ -1326,7 +1327,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1326 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT 1327 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT
1327 */ 1328 */
1328 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1329 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1329 hlist_for_each_entry(entry, node, 1330 hlist_for_each_entry(entry,
1330 &priv->lec_arp_tables[i], next) { 1331 &priv->lec_arp_tables[i], next) {
1331 if (memcmp(to_remove->atm_addr, 1332 if (memcmp(to_remove->atm_addr,
1332 entry->atm_addr, ATM_ESA_LEN) == 0) { 1333 entry->atm_addr, ATM_ESA_LEN) == 0) {
@@ -1364,14 +1365,13 @@ static const char *get_status_string(unsigned char st)
1364 1365
1365static void dump_arp_table(struct lec_priv *priv) 1366static void dump_arp_table(struct lec_priv *priv)
1366{ 1367{
1367 struct hlist_node *node;
1368 struct lec_arp_table *rulla; 1368 struct lec_arp_table *rulla;
1369 char buf[256]; 1369 char buf[256];
1370 int i, j, offset; 1370 int i, j, offset;
1371 1371
1372 pr_info("Dump %p:\n", priv); 1372 pr_info("Dump %p:\n", priv);
1373 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1373 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1374 hlist_for_each_entry(rulla, node, 1374 hlist_for_each_entry(rulla,
1375 &priv->lec_arp_tables[i], next) { 1375 &priv->lec_arp_tables[i], next) {
1376 offset = 0; 1376 offset = 0;
1377 offset += sprintf(buf, "%d: %p\n", i, rulla); 1377 offset += sprintf(buf, "%d: %p\n", i, rulla);
@@ -1403,7 +1403,7 @@ static void dump_arp_table(struct lec_priv *priv)
1403 1403
1404 if (!hlist_empty(&priv->lec_no_forward)) 1404 if (!hlist_empty(&priv->lec_no_forward))
1405 pr_info("No forward\n"); 1405 pr_info("No forward\n");
1406 hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) { 1406 hlist_for_each_entry(rulla, &priv->lec_no_forward, next) {
1407 offset = 0; 1407 offset = 0;
1408 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); 1408 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1409 offset += sprintf(buf + offset, " Atm:"); 1409 offset += sprintf(buf + offset, " Atm:");
@@ -1428,7 +1428,7 @@ static void dump_arp_table(struct lec_priv *priv)
1428 1428
1429 if (!hlist_empty(&priv->lec_arp_empty_ones)) 1429 if (!hlist_empty(&priv->lec_arp_empty_ones))
1430 pr_info("Empty ones\n"); 1430 pr_info("Empty ones\n");
1431 hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) { 1431 hlist_for_each_entry(rulla, &priv->lec_arp_empty_ones, next) {
1432 offset = 0; 1432 offset = 0;
1433 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); 1433 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1434 offset += sprintf(buf + offset, " Atm:"); 1434 offset += sprintf(buf + offset, " Atm:");
@@ -1453,7 +1453,7 @@ static void dump_arp_table(struct lec_priv *priv)
1453 1453
1454 if (!hlist_empty(&priv->mcast_fwds)) 1454 if (!hlist_empty(&priv->mcast_fwds))
1455 pr_info("Multicast Forward VCCs\n"); 1455 pr_info("Multicast Forward VCCs\n");
1456 hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) { 1456 hlist_for_each_entry(rulla, &priv->mcast_fwds, next) {
1457 offset = 0; 1457 offset = 0;
1458 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); 1458 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1459 offset += sprintf(buf + offset, " Atm:"); 1459 offset += sprintf(buf + offset, " Atm:");
@@ -1487,7 +1487,7 @@ static void dump_arp_table(struct lec_priv *priv)
1487static void lec_arp_destroy(struct lec_priv *priv) 1487static void lec_arp_destroy(struct lec_priv *priv)
1488{ 1488{
1489 unsigned long flags; 1489 unsigned long flags;
1490 struct hlist_node *node, *next; 1490 struct hlist_node *next;
1491 struct lec_arp_table *entry; 1491 struct lec_arp_table *entry;
1492 int i; 1492 int i;
1493 1493
@@ -1499,7 +1499,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
1499 1499
1500 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1500 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1501 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1501 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1502 hlist_for_each_entry_safe(entry, node, next, 1502 hlist_for_each_entry_safe(entry, next,
1503 &priv->lec_arp_tables[i], next) { 1503 &priv->lec_arp_tables[i], next) {
1504 lec_arp_remove(priv, entry); 1504 lec_arp_remove(priv, entry);
1505 lec_arp_put(entry); 1505 lec_arp_put(entry);
@@ -1507,7 +1507,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
1507 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); 1507 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1508 } 1508 }
1509 1509
1510 hlist_for_each_entry_safe(entry, node, next, 1510 hlist_for_each_entry_safe(entry, next,
1511 &priv->lec_arp_empty_ones, next) { 1511 &priv->lec_arp_empty_ones, next) {
1512 del_timer_sync(&entry->timer); 1512 del_timer_sync(&entry->timer);
1513 lec_arp_clear_vccs(entry); 1513 lec_arp_clear_vccs(entry);
@@ -1516,7 +1516,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
1516 } 1516 }
1517 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); 1517 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1518 1518
1519 hlist_for_each_entry_safe(entry, node, next, 1519 hlist_for_each_entry_safe(entry, next,
1520 &priv->lec_no_forward, next) { 1520 &priv->lec_no_forward, next) {
1521 del_timer_sync(&entry->timer); 1521 del_timer_sync(&entry->timer);
1522 lec_arp_clear_vccs(entry); 1522 lec_arp_clear_vccs(entry);
@@ -1525,7 +1525,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
1525 } 1525 }
1526 INIT_HLIST_HEAD(&priv->lec_no_forward); 1526 INIT_HLIST_HEAD(&priv->lec_no_forward);
1527 1527
1528 hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { 1528 hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) {
1529 /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ 1529 /* No timer, LANEv2 7.1.20 and 2.3.5.3 */
1530 lec_arp_clear_vccs(entry); 1530 lec_arp_clear_vccs(entry);
1531 hlist_del(&entry->next); 1531 hlist_del(&entry->next);
@@ -1542,14 +1542,13 @@ static void lec_arp_destroy(struct lec_priv *priv)
1542static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, 1542static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
1543 const unsigned char *mac_addr) 1543 const unsigned char *mac_addr)
1544{ 1544{
1545 struct hlist_node *node;
1546 struct hlist_head *head; 1545 struct hlist_head *head;
1547 struct lec_arp_table *entry; 1546 struct lec_arp_table *entry;
1548 1547
1549 pr_debug("%pM\n", mac_addr); 1548 pr_debug("%pM\n", mac_addr);
1550 1549
1551 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; 1550 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
1552 hlist_for_each_entry(entry, node, head, next) { 1551 hlist_for_each_entry(entry, head, next) {
1553 if (ether_addr_equal(mac_addr, entry->mac_addr)) 1552 if (ether_addr_equal(mac_addr, entry->mac_addr))
1554 return entry; 1553 return entry;
1555 } 1554 }
@@ -1686,7 +1685,7 @@ static void lec_arp_check_expire(struct work_struct *work)
1686 unsigned long flags; 1685 unsigned long flags;
1687 struct lec_priv *priv = 1686 struct lec_priv *priv =
1688 container_of(work, struct lec_priv, lec_arp_work.work); 1687 container_of(work, struct lec_priv, lec_arp_work.work);
1689 struct hlist_node *node, *next; 1688 struct hlist_node *next;
1690 struct lec_arp_table *entry; 1689 struct lec_arp_table *entry;
1691 unsigned long now; 1690 unsigned long now;
1692 int i; 1691 int i;
@@ -1696,7 +1695,7 @@ static void lec_arp_check_expire(struct work_struct *work)
1696restart: 1695restart:
1697 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1696 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1698 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1697 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1699 hlist_for_each_entry_safe(entry, node, next, 1698 hlist_for_each_entry_safe(entry, next,
1700 &priv->lec_arp_tables[i], next) { 1699 &priv->lec_arp_tables[i], next) {
1701 if (__lec_arp_check_expire(entry, now, priv)) { 1700 if (__lec_arp_check_expire(entry, now, priv)) {
1702 struct sk_buff *skb; 1701 struct sk_buff *skb;
@@ -1823,14 +1822,14 @@ lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
1823 unsigned long permanent) 1822 unsigned long permanent)
1824{ 1823{
1825 unsigned long flags; 1824 unsigned long flags;
1826 struct hlist_node *node, *next; 1825 struct hlist_node *next;
1827 struct lec_arp_table *entry; 1826 struct lec_arp_table *entry;
1828 int i; 1827 int i;
1829 1828
1830 pr_debug("\n"); 1829 pr_debug("\n");
1831 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1830 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1832 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1831 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1833 hlist_for_each_entry_safe(entry, node, next, 1832 hlist_for_each_entry_safe(entry, next,
1834 &priv->lec_arp_tables[i], next) { 1833 &priv->lec_arp_tables[i], next) {
1835 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) && 1834 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) &&
1836 (permanent || 1835 (permanent ||
@@ -1855,7 +1854,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
1855 unsigned int targetless_le_arp) 1854 unsigned int targetless_le_arp)
1856{ 1855{
1857 unsigned long flags; 1856 unsigned long flags;
1858 struct hlist_node *node, *next; 1857 struct hlist_node *next;
1859 struct lec_arp_table *entry, *tmp; 1858 struct lec_arp_table *entry, *tmp;
1860 int i; 1859 int i;
1861 1860
@@ -1870,7 +1869,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
1870 * we have no entry in the cache. 7.1.30 1869 * we have no entry in the cache. 7.1.30
1871 */ 1870 */
1872 if (!hlist_empty(&priv->lec_arp_empty_ones)) { 1871 if (!hlist_empty(&priv->lec_arp_empty_ones)) {
1873 hlist_for_each_entry_safe(entry, node, next, 1872 hlist_for_each_entry_safe(entry, next,
1874 &priv->lec_arp_empty_ones, next) { 1873 &priv->lec_arp_empty_ones, next) {
1875 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { 1874 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
1876 hlist_del(&entry->next); 1875 hlist_del(&entry->next);
@@ -1915,7 +1914,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
1915 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); 1914 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
1916 del_timer(&entry->timer); 1915 del_timer(&entry->timer);
1917 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1916 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1918 hlist_for_each_entry(tmp, node, 1917 hlist_for_each_entry(tmp,
1919 &priv->lec_arp_tables[i], next) { 1918 &priv->lec_arp_tables[i], next) {
1920 if (entry != tmp && 1919 if (entry != tmp &&
1921 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { 1920 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) {
@@ -1956,7 +1955,6 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
1956 void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) 1955 void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb))
1957{ 1956{
1958 unsigned long flags; 1957 unsigned long flags;
1959 struct hlist_node *node;
1960 struct lec_arp_table *entry; 1958 struct lec_arp_table *entry;
1961 int i, found_entry = 0; 1959 int i, found_entry = 0;
1962 1960
@@ -2026,7 +2024,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2026 ioc_data->atm_addr[16], ioc_data->atm_addr[17], 2024 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2027 ioc_data->atm_addr[18], ioc_data->atm_addr[19]); 2025 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2028 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2026 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2029 hlist_for_each_entry(entry, node, 2027 hlist_for_each_entry(entry,
2030 &priv->lec_arp_tables[i], next) { 2028 &priv->lec_arp_tables[i], next) {
2031 if (memcmp 2029 if (memcmp
2032 (ioc_data->atm_addr, entry->atm_addr, 2030 (ioc_data->atm_addr, entry->atm_addr,
@@ -2103,7 +2101,6 @@ out:
2103static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) 2101static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
2104{ 2102{
2105 unsigned long flags; 2103 unsigned long flags;
2106 struct hlist_node *node;
2107 struct lec_arp_table *entry; 2104 struct lec_arp_table *entry;
2108 int i; 2105 int i;
2109 2106
@@ -2111,7 +2108,7 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
2111restart: 2108restart:
2112 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2109 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2113 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2110 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2114 hlist_for_each_entry(entry, node, 2111 hlist_for_each_entry(entry,
2115 &priv->lec_arp_tables[i], next) { 2112 &priv->lec_arp_tables[i], next) {
2116 if (entry->flush_tran_id == tran_id && 2113 if (entry->flush_tran_id == tran_id &&
2117 entry->status == ESI_FLUSH_PENDING) { 2114 entry->status == ESI_FLUSH_PENDING) {
@@ -2140,13 +2137,12 @@ lec_set_flush_tran_id(struct lec_priv *priv,
2140 const unsigned char *atm_addr, unsigned long tran_id) 2137 const unsigned char *atm_addr, unsigned long tran_id)
2141{ 2138{
2142 unsigned long flags; 2139 unsigned long flags;
2143 struct hlist_node *node;
2144 struct lec_arp_table *entry; 2140 struct lec_arp_table *entry;
2145 int i; 2141 int i;
2146 2142
2147 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2143 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2148 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) 2144 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
2149 hlist_for_each_entry(entry, node, 2145 hlist_for_each_entry(entry,
2150 &priv->lec_arp_tables[i], next) { 2146 &priv->lec_arp_tables[i], next) {
2151 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { 2147 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
2152 entry->flush_tran_id = tran_id; 2148 entry->flush_tran_id = tran_id;
@@ -2198,7 +2194,7 @@ out:
2198static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) 2194static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2199{ 2195{
2200 unsigned long flags; 2196 unsigned long flags;
2201 struct hlist_node *node, *next; 2197 struct hlist_node *next;
2202 struct lec_arp_table *entry; 2198 struct lec_arp_table *entry;
2203 int i; 2199 int i;
2204 2200
@@ -2208,7 +2204,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2208 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2204 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2209 2205
2210 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2206 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2211 hlist_for_each_entry_safe(entry, node, next, 2207 hlist_for_each_entry_safe(entry, next,
2212 &priv->lec_arp_tables[i], next) { 2208 &priv->lec_arp_tables[i], next) {
2213 if (vcc == entry->vcc) { 2209 if (vcc == entry->vcc) {
2214 lec_arp_remove(priv, entry); 2210 lec_arp_remove(priv, entry);
@@ -2219,7 +2215,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2219 } 2215 }
2220 } 2216 }
2221 2217
2222 hlist_for_each_entry_safe(entry, node, next, 2218 hlist_for_each_entry_safe(entry, next,
2223 &priv->lec_arp_empty_ones, next) { 2219 &priv->lec_arp_empty_ones, next) {
2224 if (entry->vcc == vcc) { 2220 if (entry->vcc == vcc) {
2225 lec_arp_clear_vccs(entry); 2221 lec_arp_clear_vccs(entry);
@@ -2229,7 +2225,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2229 } 2225 }
2230 } 2226 }
2231 2227
2232 hlist_for_each_entry_safe(entry, node, next, 2228 hlist_for_each_entry_safe(entry, next,
2233 &priv->lec_no_forward, next) { 2229 &priv->lec_no_forward, next) {
2234 if (entry->recv_vcc == vcc) { 2230 if (entry->recv_vcc == vcc) {
2235 lec_arp_clear_vccs(entry); 2231 lec_arp_clear_vccs(entry);
@@ -2239,7 +2235,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2239 } 2235 }
2240 } 2236 }
2241 2237
2242 hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { 2238 hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) {
2243 if (entry->recv_vcc == vcc) { 2239 if (entry->recv_vcc == vcc) {
2244 lec_arp_clear_vccs(entry); 2240 lec_arp_clear_vccs(entry);
2245 /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ 2241 /* No timer, LANEv2 7.1.20 and 2.3.5.3 */
@@ -2257,13 +2253,13 @@ lec_arp_check_empties(struct lec_priv *priv,
2257 struct atm_vcc *vcc, struct sk_buff *skb) 2253 struct atm_vcc *vcc, struct sk_buff *skb)
2258{ 2254{
2259 unsigned long flags; 2255 unsigned long flags;
2260 struct hlist_node *node, *next; 2256 struct hlist_node *next;
2261 struct lec_arp_table *entry, *tmp; 2257 struct lec_arp_table *entry, *tmp;
2262 struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; 2258 struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data;
2263 unsigned char *src = hdr->h_source; 2259 unsigned char *src = hdr->h_source;
2264 2260
2265 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2261 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2266 hlist_for_each_entry_safe(entry, node, next, 2262 hlist_for_each_entry_safe(entry, next,
2267 &priv->lec_arp_empty_ones, next) { 2263 &priv->lec_arp_empty_ones, next) {
2268 if (vcc == entry->vcc) { 2264 if (vcc == entry->vcc) {
2269 del_timer(&entry->timer); 2265 del_timer(&entry->timer);
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 86767ca908a3..4176887e72eb 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -217,7 +217,6 @@ static void purge_vcc(struct atm_vcc *vcc)
217 217
218static void sigd_close(struct atm_vcc *vcc) 218static void sigd_close(struct atm_vcc *vcc)
219{ 219{
220 struct hlist_node *node;
221 struct sock *s; 220 struct sock *s;
222 int i; 221 int i;
223 222
@@ -231,7 +230,7 @@ static void sigd_close(struct atm_vcc *vcc)
231 for (i = 0; i < VCC_HTABLE_SIZE; ++i) { 230 for (i = 0; i < VCC_HTABLE_SIZE; ++i) {
232 struct hlist_head *head = &vcc_hash[i]; 231 struct hlist_head *head = &vcc_hash[i];
233 232
234 sk_for_each(s, node, head) { 233 sk_for_each(s, head) {
235 vcc = atm_sk(s); 234 vcc = atm_sk(s);
236 235
237 purge_vcc(vcc); 236 purge_vcc(vcc);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 69a06c47b648..7b11f8bc5071 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -81,14 +81,13 @@ static void ax25_kill_by_device(struct net_device *dev)
81{ 81{
82 ax25_dev *ax25_dev; 82 ax25_dev *ax25_dev;
83 ax25_cb *s; 83 ax25_cb *s;
84 struct hlist_node *node;
85 84
86 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) 85 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
87 return; 86 return;
88 87
89 spin_lock_bh(&ax25_list_lock); 88 spin_lock_bh(&ax25_list_lock);
90again: 89again:
91 ax25_for_each(s, node, &ax25_list) { 90 ax25_for_each(s, &ax25_list) {
92 if (s->ax25_dev == ax25_dev) { 91 if (s->ax25_dev == ax25_dev) {
93 s->ax25_dev = NULL; 92 s->ax25_dev = NULL;
94 spin_unlock_bh(&ax25_list_lock); 93 spin_unlock_bh(&ax25_list_lock);
@@ -158,10 +157,9 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
158 struct net_device *dev, int type) 157 struct net_device *dev, int type)
159{ 158{
160 ax25_cb *s; 159 ax25_cb *s;
161 struct hlist_node *node;
162 160
163 spin_lock(&ax25_list_lock); 161 spin_lock(&ax25_list_lock);
164 ax25_for_each(s, node, &ax25_list) { 162 ax25_for_each(s, &ax25_list) {
165 if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) 163 if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
166 continue; 164 continue;
167 if (s->sk && !ax25cmp(&s->source_addr, addr) && 165 if (s->sk && !ax25cmp(&s->source_addr, addr) &&
@@ -187,10 +185,9 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
187{ 185{
188 struct sock *sk = NULL; 186 struct sock *sk = NULL;
189 ax25_cb *s; 187 ax25_cb *s;
190 struct hlist_node *node;
191 188
192 spin_lock(&ax25_list_lock); 189 spin_lock(&ax25_list_lock);
193 ax25_for_each(s, node, &ax25_list) { 190 ax25_for_each(s, &ax25_list) {
194 if (s->sk && !ax25cmp(&s->source_addr, my_addr) && 191 if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
195 !ax25cmp(&s->dest_addr, dest_addr) && 192 !ax25cmp(&s->dest_addr, dest_addr) &&
196 s->sk->sk_type == type) { 193 s->sk->sk_type == type) {
@@ -213,10 +210,9 @@ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr,
213 ax25_digi *digi, struct net_device *dev) 210 ax25_digi *digi, struct net_device *dev)
214{ 211{
215 ax25_cb *s; 212 ax25_cb *s;
216 struct hlist_node *node;
217 213
218 spin_lock_bh(&ax25_list_lock); 214 spin_lock_bh(&ax25_list_lock);
219 ax25_for_each(s, node, &ax25_list) { 215 ax25_for_each(s, &ax25_list) {
220 if (s->sk && s->sk->sk_type != SOCK_SEQPACKET) 216 if (s->sk && s->sk->sk_type != SOCK_SEQPACKET)
221 continue; 217 continue;
222 if (s->ax25_dev == NULL) 218 if (s->ax25_dev == NULL)
@@ -248,10 +244,9 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
248{ 244{
249 ax25_cb *s; 245 ax25_cb *s;
250 struct sk_buff *copy; 246 struct sk_buff *copy;
251 struct hlist_node *node;
252 247
253 spin_lock(&ax25_list_lock); 248 spin_lock(&ax25_list_lock);
254 ax25_for_each(s, node, &ax25_list) { 249 ax25_for_each(s, &ax25_list) {
255 if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && 250 if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
256 s->sk->sk_type == SOCK_RAW && 251 s->sk->sk_type == SOCK_RAW &&
257 s->sk->sk_protocol == proto && 252 s->sk->sk_protocol == proto &&
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c
index 5ea7fd3e2af9..e05bd57b5afd 100644
--- a/net/ax25/ax25_ds_subr.c
+++ b/net/ax25/ax25_ds_subr.c
@@ -39,7 +39,6 @@ void ax25_ds_nr_error_recovery(ax25_cb *ax25)
39void ax25_ds_enquiry_response(ax25_cb *ax25) 39void ax25_ds_enquiry_response(ax25_cb *ax25)
40{ 40{
41 ax25_cb *ax25o; 41 ax25_cb *ax25o;
42 struct hlist_node *node;
43 42
44 /* Please note that neither DK4EG's nor DG2FEF's 43 /* Please note that neither DK4EG's nor DG2FEF's
45 * DAMA spec mention the following behaviour as seen 44 * DAMA spec mention the following behaviour as seen
@@ -80,7 +79,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25)
80 ax25_ds_set_timer(ax25->ax25_dev); 79 ax25_ds_set_timer(ax25->ax25_dev);
81 80
82 spin_lock(&ax25_list_lock); 81 spin_lock(&ax25_list_lock);
83 ax25_for_each(ax25o, node, &ax25_list) { 82 ax25_for_each(ax25o, &ax25_list) {
84 if (ax25o == ax25) 83 if (ax25o == ax25)
85 continue; 84 continue;
86 85
@@ -159,10 +158,9 @@ static int ax25_check_dama_slave(ax25_dev *ax25_dev)
159{ 158{
160 ax25_cb *ax25; 159 ax25_cb *ax25;
161 int res = 0; 160 int res = 0;
162 struct hlist_node *node;
163 161
164 spin_lock(&ax25_list_lock); 162 spin_lock(&ax25_list_lock);
165 ax25_for_each(ax25, node, &ax25_list) 163 ax25_for_each(ax25, &ax25_list)
166 if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { 164 if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) {
167 res = 1; 165 res = 1;
168 break; 166 break;
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 993c439b4f71..951cd57bb07d 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -70,7 +70,6 @@ static void ax25_ds_timeout(unsigned long arg)
70{ 70{
71 ax25_dev *ax25_dev = (struct ax25_dev *) arg; 71 ax25_dev *ax25_dev = (struct ax25_dev *) arg;
72 ax25_cb *ax25; 72 ax25_cb *ax25;
73 struct hlist_node *node;
74 73
75 if (ax25_dev == NULL || !ax25_dev->dama.slave) 74 if (ax25_dev == NULL || !ax25_dev->dama.slave)
76 return; /* Yikes! */ 75 return; /* Yikes! */
@@ -81,7 +80,7 @@ static void ax25_ds_timeout(unsigned long arg)
81 } 80 }
82 81
83 spin_lock(&ax25_list_lock); 82 spin_lock(&ax25_list_lock);
84 ax25_for_each(ax25, node, &ax25_list) { 83 ax25_for_each(ax25, &ax25_list) {
85 if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE)) 84 if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE))
86 continue; 85 continue;
87 86
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 7d5f24b82cc8..7f16e8a931b2 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -193,10 +193,9 @@ int ax25_listen_mine(ax25_address *callsign, struct net_device *dev)
193void ax25_link_failed(ax25_cb *ax25, int reason) 193void ax25_link_failed(ax25_cb *ax25, int reason)
194{ 194{
195 struct ax25_linkfail *lf; 195 struct ax25_linkfail *lf;
196 struct hlist_node *node;
197 196
198 spin_lock_bh(&linkfail_lock); 197 spin_lock_bh(&linkfail_lock);
199 hlist_for_each_entry(lf, node, &ax25_linkfail_list, lf_node) 198 hlist_for_each_entry(lf, &ax25_linkfail_list, lf_node)
200 lf->func(ax25, reason); 199 lf->func(ax25, reason);
201 spin_unlock_bh(&linkfail_lock); 200 spin_unlock_bh(&linkfail_lock);
202} 201}
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 957999e43ff7..71c4badbc807 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -54,10 +54,9 @@ EXPORT_SYMBOL(ax25_uid_policy);
54ax25_uid_assoc *ax25_findbyuid(kuid_t uid) 54ax25_uid_assoc *ax25_findbyuid(kuid_t uid)
55{ 55{
56 ax25_uid_assoc *ax25_uid, *res = NULL; 56 ax25_uid_assoc *ax25_uid, *res = NULL;
57 struct hlist_node *node;
58 57
59 read_lock(&ax25_uid_lock); 58 read_lock(&ax25_uid_lock);
60 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { 59 ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
61 if (uid_eq(ax25_uid->uid, uid)) { 60 if (uid_eq(ax25_uid->uid, uid)) {
62 ax25_uid_hold(ax25_uid); 61 ax25_uid_hold(ax25_uid);
63 res = ax25_uid; 62 res = ax25_uid;
@@ -74,7 +73,6 @@ EXPORT_SYMBOL(ax25_findbyuid);
74int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) 73int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
75{ 74{
76 ax25_uid_assoc *ax25_uid; 75 ax25_uid_assoc *ax25_uid;
77 struct hlist_node *node;
78 ax25_uid_assoc *user; 76 ax25_uid_assoc *user;
79 unsigned long res; 77 unsigned long res;
80 78
@@ -82,7 +80,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
82 case SIOCAX25GETUID: 80 case SIOCAX25GETUID:
83 res = -ENOENT; 81 res = -ENOENT;
84 read_lock(&ax25_uid_lock); 82 read_lock(&ax25_uid_lock);
85 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { 83 ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
86 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { 84 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
87 res = from_kuid_munged(current_user_ns(), ax25_uid->uid); 85 res = from_kuid_munged(current_user_ns(), ax25_uid->uid);
88 break; 86 break;
@@ -126,7 +124,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
126 124
127 ax25_uid = NULL; 125 ax25_uid = NULL;
128 write_lock(&ax25_uid_lock); 126 write_lock(&ax25_uid_lock);
129 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { 127 ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
130 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) 128 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0)
131 break; 129 break;
132 } 130 }
@@ -212,11 +210,10 @@ const struct file_operations ax25_uid_fops = {
212void __exit ax25_uid_free(void) 210void __exit ax25_uid_free(void)
213{ 211{
214 ax25_uid_assoc *ax25_uid; 212 ax25_uid_assoc *ax25_uid;
215 struct hlist_node *node;
216 213
217 write_lock(&ax25_uid_lock); 214 write_lock(&ax25_uid_lock);
218again: 215again:
219 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { 216 ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
220 hlist_del_init(&ax25_uid->uid_node); 217 hlist_del_init(&ax25_uid->uid_node);
221 ax25_uid_put(ax25_uid); 218 ax25_uid_put(ax25_uid);
222 goto again; 219 goto again;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 72fe1bbf7721..a0b253ecadaf 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -487,7 +487,6 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
487 */ 487 */
488 struct batadv_forw_packet *forw_packet_aggr = NULL; 488 struct batadv_forw_packet *forw_packet_aggr = NULL;
489 struct batadv_forw_packet *forw_packet_pos = NULL; 489 struct batadv_forw_packet *forw_packet_pos = NULL;
490 struct hlist_node *tmp_node;
491 struct batadv_ogm_packet *batadv_ogm_packet; 490 struct batadv_ogm_packet *batadv_ogm_packet;
492 bool direct_link; 491 bool direct_link;
493 unsigned long max_aggregation_jiffies; 492 unsigned long max_aggregation_jiffies;
@@ -500,7 +499,7 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
500 spin_lock_bh(&bat_priv->forw_bat_list_lock); 499 spin_lock_bh(&bat_priv->forw_bat_list_lock);
501 /* own packets are not to be aggregated */ 500 /* own packets are not to be aggregated */
502 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { 501 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
503 hlist_for_each_entry(forw_packet_pos, tmp_node, 502 hlist_for_each_entry(forw_packet_pos,
504 &bat_priv->forw_bat_list, list) { 503 &bat_priv->forw_bat_list, list) {
505 if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet, 504 if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
506 bat_priv, packet_len, 505 bat_priv, packet_len,
@@ -655,7 +654,6 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
655 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 654 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
656 struct batadv_neigh_node *router = NULL; 655 struct batadv_neigh_node *router = NULL;
657 struct batadv_orig_node *orig_node_tmp; 656 struct batadv_orig_node *orig_node_tmp;
658 struct hlist_node *node;
659 int if_num; 657 int if_num;
660 uint8_t sum_orig, sum_neigh; 658 uint8_t sum_orig, sum_neigh;
661 uint8_t *neigh_addr; 659 uint8_t *neigh_addr;
@@ -665,7 +663,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
665 "update_originator(): Searching and updating originator entry of received packet\n"); 663 "update_originator(): Searching and updating originator entry of received packet\n");
666 664
667 rcu_read_lock(); 665 rcu_read_lock();
668 hlist_for_each_entry_rcu(tmp_neigh_node, node, 666 hlist_for_each_entry_rcu(tmp_neigh_node,
669 &orig_node->neigh_list, list) { 667 &orig_node->neigh_list, list) {
670 neigh_addr = tmp_neigh_node->addr; 668 neigh_addr = tmp_neigh_node->addr;
671 if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && 669 if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
@@ -801,7 +799,6 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
801{ 799{
802 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 800 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
803 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node; 801 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
804 struct hlist_node *node;
805 uint8_t total_count; 802 uint8_t total_count;
806 uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; 803 uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
807 unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; 804 unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
@@ -810,7 +807,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
810 807
811 /* find corresponding one hop neighbor */ 808 /* find corresponding one hop neighbor */
812 rcu_read_lock(); 809 rcu_read_lock();
813 hlist_for_each_entry_rcu(tmp_neigh_node, node, 810 hlist_for_each_entry_rcu(tmp_neigh_node,
814 &orig_neigh_node->neigh_list, list) { 811 &orig_neigh_node->neigh_list, list) {
815 if (!batadv_compare_eth(tmp_neigh_node->addr, 812 if (!batadv_compare_eth(tmp_neigh_node->addr,
816 orig_neigh_node->orig)) 813 orig_neigh_node->orig))
@@ -920,7 +917,6 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
920 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 917 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
921 struct batadv_orig_node *orig_node; 918 struct batadv_orig_node *orig_node;
922 struct batadv_neigh_node *tmp_neigh_node; 919 struct batadv_neigh_node *tmp_neigh_node;
923 struct hlist_node *node;
924 int is_duplicate = 0; 920 int is_duplicate = 0;
925 int32_t seq_diff; 921 int32_t seq_diff;
926 int need_update = 0; 922 int need_update = 0;
@@ -943,7 +939,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
943 goto out; 939 goto out;
944 940
945 rcu_read_lock(); 941 rcu_read_lock();
946 hlist_for_each_entry_rcu(tmp_neigh_node, node, 942 hlist_for_each_entry_rcu(tmp_neigh_node,
947 &orig_node->neigh_list, list) { 943 &orig_node->neigh_list, list) {
948 is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits, 944 is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
949 orig_node->last_real_seqno, 945 orig_node->last_real_seqno,
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 30f46526cbbd..6a4f728680ae 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -144,7 +144,6 @@ static struct batadv_bla_claim
144{ 144{
145 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 145 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
146 struct hlist_head *head; 146 struct hlist_head *head;
147 struct hlist_node *node;
148 struct batadv_bla_claim *claim; 147 struct batadv_bla_claim *claim;
149 struct batadv_bla_claim *claim_tmp = NULL; 148 struct batadv_bla_claim *claim_tmp = NULL;
150 int index; 149 int index;
@@ -156,7 +155,7 @@ static struct batadv_bla_claim
156 head = &hash->table[index]; 155 head = &hash->table[index];
157 156
158 rcu_read_lock(); 157 rcu_read_lock();
159 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 158 hlist_for_each_entry_rcu(claim, head, hash_entry) {
160 if (!batadv_compare_claim(&claim->hash_entry, data)) 159 if (!batadv_compare_claim(&claim->hash_entry, data))
161 continue; 160 continue;
162 161
@@ -185,7 +184,6 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv,
185{ 184{
186 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 185 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
187 struct hlist_head *head; 186 struct hlist_head *head;
188 struct hlist_node *node;
189 struct batadv_bla_backbone_gw search_entry, *backbone_gw; 187 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
190 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL; 188 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
191 int index; 189 int index;
@@ -200,7 +198,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv,
200 head = &hash->table[index]; 198 head = &hash->table[index];
201 199
202 rcu_read_lock(); 200 rcu_read_lock();
203 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 201 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
204 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry, 202 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
205 &search_entry)) 203 &search_entry))
206 continue; 204 continue;
@@ -221,7 +219,7 @@ static void
221batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) 219batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
222{ 220{
223 struct batadv_hashtable *hash; 221 struct batadv_hashtable *hash;
224 struct hlist_node *node, *node_tmp; 222 struct hlist_node *node_tmp;
225 struct hlist_head *head; 223 struct hlist_head *head;
226 struct batadv_bla_claim *claim; 224 struct batadv_bla_claim *claim;
227 int i; 225 int i;
@@ -236,13 +234,13 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
236 list_lock = &hash->list_locks[i]; 234 list_lock = &hash->list_locks[i];
237 235
238 spin_lock_bh(list_lock); 236 spin_lock_bh(list_lock);
239 hlist_for_each_entry_safe(claim, node, node_tmp, 237 hlist_for_each_entry_safe(claim, node_tmp,
240 head, hash_entry) { 238 head, hash_entry) {
241 if (claim->backbone_gw != backbone_gw) 239 if (claim->backbone_gw != backbone_gw)
242 continue; 240 continue;
243 241
244 batadv_claim_free_ref(claim); 242 batadv_claim_free_ref(claim);
245 hlist_del_rcu(node); 243 hlist_del_rcu(&claim->hash_entry);
246 } 244 }
247 spin_unlock_bh(list_lock); 245 spin_unlock_bh(list_lock);
248 } 246 }
@@ -460,7 +458,6 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
460 struct batadv_hard_iface *primary_if, 458 struct batadv_hard_iface *primary_if,
461 short vid) 459 short vid)
462{ 460{
463 struct hlist_node *node;
464 struct hlist_head *head; 461 struct hlist_head *head;
465 struct batadv_hashtable *hash; 462 struct batadv_hashtable *hash;
466 struct batadv_bla_claim *claim; 463 struct batadv_bla_claim *claim;
@@ -481,7 +478,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
481 head = &hash->table[i]; 478 head = &hash->table[i];
482 479
483 rcu_read_lock(); 480 rcu_read_lock();
484 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 481 hlist_for_each_entry_rcu(claim, head, hash_entry) {
485 /* only own claims are interesting */ 482 /* only own claims are interesting */
486 if (claim->backbone_gw != backbone_gw) 483 if (claim->backbone_gw != backbone_gw)
487 continue; 484 continue;
@@ -958,7 +955,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
958static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) 955static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
959{ 956{
960 struct batadv_bla_backbone_gw *backbone_gw; 957 struct batadv_bla_backbone_gw *backbone_gw;
961 struct hlist_node *node, *node_tmp; 958 struct hlist_node *node_tmp;
962 struct hlist_head *head; 959 struct hlist_head *head;
963 struct batadv_hashtable *hash; 960 struct batadv_hashtable *hash;
964 spinlock_t *list_lock; /* protects write access to the hash lists */ 961 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -973,7 +970,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
973 list_lock = &hash->list_locks[i]; 970 list_lock = &hash->list_locks[i];
974 971
975 spin_lock_bh(list_lock); 972 spin_lock_bh(list_lock);
976 hlist_for_each_entry_safe(backbone_gw, node, node_tmp, 973 hlist_for_each_entry_safe(backbone_gw, node_tmp,
977 head, hash_entry) { 974 head, hash_entry) {
978 if (now) 975 if (now)
979 goto purge_now; 976 goto purge_now;
@@ -992,7 +989,7 @@ purge_now:
992 989
993 batadv_bla_del_backbone_claims(backbone_gw); 990 batadv_bla_del_backbone_claims(backbone_gw);
994 991
995 hlist_del_rcu(node); 992 hlist_del_rcu(&backbone_gw->hash_entry);
996 batadv_backbone_gw_free_ref(backbone_gw); 993 batadv_backbone_gw_free_ref(backbone_gw);
997 } 994 }
998 spin_unlock_bh(list_lock); 995 spin_unlock_bh(list_lock);
@@ -1013,7 +1010,6 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1013 int now) 1010 int now)
1014{ 1011{
1015 struct batadv_bla_claim *claim; 1012 struct batadv_bla_claim *claim;
1016 struct hlist_node *node;
1017 struct hlist_head *head; 1013 struct hlist_head *head;
1018 struct batadv_hashtable *hash; 1014 struct batadv_hashtable *hash;
1019 int i; 1015 int i;
@@ -1026,7 +1022,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1026 head = &hash->table[i]; 1022 head = &hash->table[i];
1027 1023
1028 rcu_read_lock(); 1024 rcu_read_lock();
1029 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 1025 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1030 if (now) 1026 if (now)
1031 goto purge_now; 1027 goto purge_now;
1032 if (!batadv_compare_eth(claim->backbone_gw->orig, 1028 if (!batadv_compare_eth(claim->backbone_gw->orig,
@@ -1062,7 +1058,6 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1062 struct batadv_hard_iface *oldif) 1058 struct batadv_hard_iface *oldif)
1063{ 1059{
1064 struct batadv_bla_backbone_gw *backbone_gw; 1060 struct batadv_bla_backbone_gw *backbone_gw;
1065 struct hlist_node *node;
1066 struct hlist_head *head; 1061 struct hlist_head *head;
1067 struct batadv_hashtable *hash; 1062 struct batadv_hashtable *hash;
1068 __be16 group; 1063 __be16 group;
@@ -1086,7 +1081,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1086 head = &hash->table[i]; 1081 head = &hash->table[i];
1087 1082
1088 rcu_read_lock(); 1083 rcu_read_lock();
1089 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1084 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1090 /* own orig still holds the old value. */ 1085 /* own orig still holds the old value. */
1091 if (!batadv_compare_eth(backbone_gw->orig, 1086 if (!batadv_compare_eth(backbone_gw->orig,
1092 oldif->net_dev->dev_addr)) 1087 oldif->net_dev->dev_addr))
@@ -1112,7 +1107,6 @@ static void batadv_bla_periodic_work(struct work_struct *work)
1112 struct delayed_work *delayed_work; 1107 struct delayed_work *delayed_work;
1113 struct batadv_priv *bat_priv; 1108 struct batadv_priv *bat_priv;
1114 struct batadv_priv_bla *priv_bla; 1109 struct batadv_priv_bla *priv_bla;
1115 struct hlist_node *node;
1116 struct hlist_head *head; 1110 struct hlist_head *head;
1117 struct batadv_bla_backbone_gw *backbone_gw; 1111 struct batadv_bla_backbone_gw *backbone_gw;
1118 struct batadv_hashtable *hash; 1112 struct batadv_hashtable *hash;
@@ -1140,7 +1134,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
1140 head = &hash->table[i]; 1134 head = &hash->table[i];
1141 1135
1142 rcu_read_lock(); 1136 rcu_read_lock();
1143 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1137 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1144 if (!batadv_compare_eth(backbone_gw->orig, 1138 if (!batadv_compare_eth(backbone_gw->orig,
1145 primary_if->net_dev->dev_addr)) 1139 primary_if->net_dev->dev_addr))
1146 continue; 1140 continue;
@@ -1322,7 +1316,6 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
1322{ 1316{
1323 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 1317 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1324 struct hlist_head *head; 1318 struct hlist_head *head;
1325 struct hlist_node *node;
1326 struct batadv_bla_backbone_gw *backbone_gw; 1319 struct batadv_bla_backbone_gw *backbone_gw;
1327 int i; 1320 int i;
1328 1321
@@ -1336,7 +1329,7 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
1336 head = &hash->table[i]; 1329 head = &hash->table[i];
1337 1330
1338 rcu_read_lock(); 1331 rcu_read_lock();
1339 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1332 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1340 if (batadv_compare_eth(backbone_gw->orig, orig)) { 1333 if (batadv_compare_eth(backbone_gw->orig, orig)) {
1341 rcu_read_unlock(); 1334 rcu_read_unlock();
1342 return 1; 1335 return 1;
@@ -1607,7 +1600,6 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1607 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 1600 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
1608 struct batadv_bla_claim *claim; 1601 struct batadv_bla_claim *claim;
1609 struct batadv_hard_iface *primary_if; 1602 struct batadv_hard_iface *primary_if;
1610 struct hlist_node *node;
1611 struct hlist_head *head; 1603 struct hlist_head *head;
1612 uint32_t i; 1604 uint32_t i;
1613 bool is_own; 1605 bool is_own;
@@ -1628,7 +1620,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1628 head = &hash->table[i]; 1620 head = &hash->table[i];
1629 1621
1630 rcu_read_lock(); 1622 rcu_read_lock();
1631 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 1623 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1632 is_own = batadv_compare_eth(claim->backbone_gw->orig, 1624 is_own = batadv_compare_eth(claim->backbone_gw->orig,
1633 primary_addr); 1625 primary_addr);
1634 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n", 1626 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n",
@@ -1652,7 +1644,6 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1652 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 1644 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1653 struct batadv_bla_backbone_gw *backbone_gw; 1645 struct batadv_bla_backbone_gw *backbone_gw;
1654 struct batadv_hard_iface *primary_if; 1646 struct batadv_hard_iface *primary_if;
1655 struct hlist_node *node;
1656 struct hlist_head *head; 1647 struct hlist_head *head;
1657 int secs, msecs; 1648 int secs, msecs;
1658 uint32_t i; 1649 uint32_t i;
@@ -1674,7 +1665,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1674 head = &hash->table[i]; 1665 head = &hash->table[i];
1675 1666
1676 rcu_read_lock(); 1667 rcu_read_lock();
1677 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1668 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1678 msecs = jiffies_to_msecs(jiffies - 1669 msecs = jiffies_to_msecs(jiffies -
1679 backbone_gw->lasttime); 1670 backbone_gw->lasttime);
1680 secs = msecs / 1000; 1671 secs = msecs / 1000;
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 761a59002e34..d54188a112ea 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -83,7 +83,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
83{ 83{
84 spinlock_t *list_lock; /* protects write access to the hash lists */ 84 spinlock_t *list_lock; /* protects write access to the hash lists */
85 struct batadv_dat_entry *dat_entry; 85 struct batadv_dat_entry *dat_entry;
86 struct hlist_node *node, *node_tmp; 86 struct hlist_node *node_tmp;
87 struct hlist_head *head; 87 struct hlist_head *head;
88 uint32_t i; 88 uint32_t i;
89 89
@@ -95,7 +95,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
95 list_lock = &bat_priv->dat.hash->list_locks[i]; 95 list_lock = &bat_priv->dat.hash->list_locks[i];
96 96
97 spin_lock_bh(list_lock); 97 spin_lock_bh(list_lock);
98 hlist_for_each_entry_safe(dat_entry, node, node_tmp, head, 98 hlist_for_each_entry_safe(dat_entry, node_tmp, head,
99 hash_entry) { 99 hash_entry) {
100 /* if an helper function has been passed as parameter, 100 /* if an helper function has been passed as parameter,
101 * ask it if the entry has to be purged or not 101 * ask it if the entry has to be purged or not
@@ -103,7 +103,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
103 if (to_purge && !to_purge(dat_entry)) 103 if (to_purge && !to_purge(dat_entry))
104 continue; 104 continue;
105 105
106 hlist_del_rcu(node); 106 hlist_del_rcu(&dat_entry->hash_entry);
107 batadv_dat_entry_free_ref(dat_entry); 107 batadv_dat_entry_free_ref(dat_entry);
108 } 108 }
109 spin_unlock_bh(list_lock); 109 spin_unlock_bh(list_lock);
@@ -235,7 +235,6 @@ static struct batadv_dat_entry *
235batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip) 235batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
236{ 236{
237 struct hlist_head *head; 237 struct hlist_head *head;
238 struct hlist_node *node;
239 struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL; 238 struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL;
240 struct batadv_hashtable *hash = bat_priv->dat.hash; 239 struct batadv_hashtable *hash = bat_priv->dat.hash;
241 uint32_t index; 240 uint32_t index;
@@ -247,7 +246,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
247 head = &hash->table[index]; 246 head = &hash->table[index];
248 247
249 rcu_read_lock(); 248 rcu_read_lock();
250 hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { 249 hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
251 if (dat_entry->ip != ip) 250 if (dat_entry->ip != ip)
252 continue; 251 continue;
253 252
@@ -465,7 +464,6 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
465 batadv_dat_addr_t max = 0, tmp_max = 0; 464 batadv_dat_addr_t max = 0, tmp_max = 0;
466 struct batadv_orig_node *orig_node, *max_orig_node = NULL; 465 struct batadv_orig_node *orig_node, *max_orig_node = NULL;
467 struct batadv_hashtable *hash = bat_priv->orig_hash; 466 struct batadv_hashtable *hash = bat_priv->orig_hash;
468 struct hlist_node *node;
469 struct hlist_head *head; 467 struct hlist_head *head;
470 int i; 468 int i;
471 469
@@ -481,7 +479,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
481 head = &hash->table[i]; 479 head = &hash->table[i];
482 480
483 rcu_read_lock(); 481 rcu_read_lock();
484 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 482 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
485 /* the dht space is a ring and addresses are unsigned */ 483 /* the dht space is a ring and addresses are unsigned */
486 tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr + 484 tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr +
487 ip_key; 485 ip_key;
@@ -686,7 +684,6 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
686 struct batadv_hashtable *hash = bat_priv->dat.hash; 684 struct batadv_hashtable *hash = bat_priv->dat.hash;
687 struct batadv_dat_entry *dat_entry; 685 struct batadv_dat_entry *dat_entry;
688 struct batadv_hard_iface *primary_if; 686 struct batadv_hard_iface *primary_if;
689 struct hlist_node *node;
690 struct hlist_head *head; 687 struct hlist_head *head;
691 unsigned long last_seen_jiffies; 688 unsigned long last_seen_jiffies;
692 int last_seen_msecs, last_seen_secs, last_seen_mins; 689 int last_seen_msecs, last_seen_secs, last_seen_mins;
@@ -704,7 +701,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
704 head = &hash->table[i]; 701 head = &hash->table[i];
705 702
706 rcu_read_lock(); 703 rcu_read_lock();
707 hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { 704 hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
708 last_seen_jiffies = jiffies - dat_entry->last_update; 705 last_seen_jiffies = jiffies - dat_entry->last_update;
709 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies); 706 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
710 last_seen_mins = last_seen_msecs / 60000; 707 last_seen_mins = last_seen_msecs / 60000;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 074107f2cfaa..34f99a46ec1d 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -114,7 +114,6 @@ static struct batadv_gw_node *
114batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) 114batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
115{ 115{
116 struct batadv_neigh_node *router; 116 struct batadv_neigh_node *router;
117 struct hlist_node *node;
118 struct batadv_gw_node *gw_node, *curr_gw = NULL; 117 struct batadv_gw_node *gw_node, *curr_gw = NULL;
119 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 118 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
120 uint32_t gw_divisor; 119 uint32_t gw_divisor;
@@ -127,7 +126,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
127 gw_divisor *= 64; 126 gw_divisor *= 64;
128 127
129 rcu_read_lock(); 128 rcu_read_lock();
130 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { 129 hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
131 if (gw_node->deleted) 130 if (gw_node->deleted)
132 continue; 131 continue;
133 132
@@ -344,7 +343,6 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
344 struct batadv_orig_node *orig_node, 343 struct batadv_orig_node *orig_node,
345 uint8_t new_gwflags) 344 uint8_t new_gwflags)
346{ 345{
347 struct hlist_node *node;
348 struct batadv_gw_node *gw_node, *curr_gw; 346 struct batadv_gw_node *gw_node, *curr_gw;
349 347
350 /* Note: We don't need a NULL check here, since curr_gw never gets 348 /* Note: We don't need a NULL check here, since curr_gw never gets
@@ -355,7 +353,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
355 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 353 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
356 354
357 rcu_read_lock(); 355 rcu_read_lock();
358 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { 356 hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
359 if (gw_node->orig_node != orig_node) 357 if (gw_node->orig_node != orig_node)
360 continue; 358 continue;
361 359
@@ -403,7 +401,7 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
403void batadv_gw_node_purge(struct batadv_priv *bat_priv) 401void batadv_gw_node_purge(struct batadv_priv *bat_priv)
404{ 402{
405 struct batadv_gw_node *gw_node, *curr_gw; 403 struct batadv_gw_node *gw_node, *curr_gw;
406 struct hlist_node *node, *node_tmp; 404 struct hlist_node *node_tmp;
407 unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT); 405 unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
408 int do_deselect = 0; 406 int do_deselect = 0;
409 407
@@ -411,7 +409,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
411 409
412 spin_lock_bh(&bat_priv->gw.list_lock); 410 spin_lock_bh(&bat_priv->gw.list_lock);
413 411
414 hlist_for_each_entry_safe(gw_node, node, node_tmp, 412 hlist_for_each_entry_safe(gw_node, node_tmp,
415 &bat_priv->gw.list, list) { 413 &bat_priv->gw.list, list) {
416 if (((!gw_node->deleted) || 414 if (((!gw_node->deleted) ||
417 (time_before(jiffies, gw_node->deleted + timeout))) && 415 (time_before(jiffies, gw_node->deleted + timeout))) &&
@@ -476,7 +474,6 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
476 struct batadv_priv *bat_priv = netdev_priv(net_dev); 474 struct batadv_priv *bat_priv = netdev_priv(net_dev);
477 struct batadv_hard_iface *primary_if; 475 struct batadv_hard_iface *primary_if;
478 struct batadv_gw_node *gw_node; 476 struct batadv_gw_node *gw_node;
479 struct hlist_node *node;
480 int gw_count = 0; 477 int gw_count = 0;
481 478
482 primary_if = batadv_seq_print_text_primary_if_get(seq); 479 primary_if = batadv_seq_print_text_primary_if_get(seq);
@@ -490,7 +487,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
490 primary_if->net_dev->dev_addr, net_dev->name); 487 primary_if->net_dev->dev_addr, net_dev->name);
491 488
492 rcu_read_lock(); 489 rcu_read_lock();
493 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { 490 hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
494 if (gw_node->deleted) 491 if (gw_node->deleted)
495 continue; 492 continue;
496 493
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 21fe6987733b..0488d70c8c35 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -345,9 +345,8 @@ void batadv_recv_handler_unregister(uint8_t packet_type)
345static struct batadv_algo_ops *batadv_algo_get(char *name) 345static struct batadv_algo_ops *batadv_algo_get(char *name)
346{ 346{
347 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; 347 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
348 struct hlist_node *node;
349 348
350 hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) { 349 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
351 if (strcmp(bat_algo_ops_tmp->name, name) != 0) 350 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
352 continue; 351 continue;
353 352
@@ -411,11 +410,10 @@ out:
411int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) 410int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
412{ 411{
413 struct batadv_algo_ops *bat_algo_ops; 412 struct batadv_algo_ops *bat_algo_ops;
414 struct hlist_node *node;
415 413
416 seq_printf(seq, "Available routing algorithms:\n"); 414 seq_printf(seq, "Available routing algorithms:\n");
417 415
418 hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) { 416 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
419 seq_printf(seq, "%s\n", bat_algo_ops->name); 417 seq_printf(seq, "%s\n", bat_algo_ops->name);
420 } 418 }
421 419
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 457ea445217c..96fb80b724dc 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -118,7 +118,7 @@ out:
118 118
119static void batadv_orig_node_free_rcu(struct rcu_head *rcu) 119static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
120{ 120{
121 struct hlist_node *node, *node_tmp; 121 struct hlist_node *node_tmp;
122 struct batadv_neigh_node *neigh_node, *tmp_neigh_node; 122 struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
123 struct batadv_orig_node *orig_node; 123 struct batadv_orig_node *orig_node;
124 124
@@ -134,7 +134,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
134 } 134 }
135 135
136 /* for all neighbors towards this originator ... */ 136 /* for all neighbors towards this originator ... */
137 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 137 hlist_for_each_entry_safe(neigh_node, node_tmp,
138 &orig_node->neigh_list, list) { 138 &orig_node->neigh_list, list) {
139 hlist_del_rcu(&neigh_node->list); 139 hlist_del_rcu(&neigh_node->list);
140 batadv_neigh_node_free_ref(neigh_node); 140 batadv_neigh_node_free_ref(neigh_node);
@@ -161,7 +161,7 @@ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
161void batadv_originator_free(struct batadv_priv *bat_priv) 161void batadv_originator_free(struct batadv_priv *bat_priv)
162{ 162{
163 struct batadv_hashtable *hash = bat_priv->orig_hash; 163 struct batadv_hashtable *hash = bat_priv->orig_hash;
164 struct hlist_node *node, *node_tmp; 164 struct hlist_node *node_tmp;
165 struct hlist_head *head; 165 struct hlist_head *head;
166 spinlock_t *list_lock; /* spinlock to protect write access */ 166 spinlock_t *list_lock; /* spinlock to protect write access */
167 struct batadv_orig_node *orig_node; 167 struct batadv_orig_node *orig_node;
@@ -179,9 +179,9 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
179 list_lock = &hash->list_locks[i]; 179 list_lock = &hash->list_locks[i];
180 180
181 spin_lock_bh(list_lock); 181 spin_lock_bh(list_lock);
182 hlist_for_each_entry_safe(orig_node, node, node_tmp, 182 hlist_for_each_entry_safe(orig_node, node_tmp,
183 head, hash_entry) { 183 head, hash_entry) {
184 hlist_del_rcu(node); 184 hlist_del_rcu(&orig_node->hash_entry);
185 batadv_orig_node_free_ref(orig_node); 185 batadv_orig_node_free_ref(orig_node);
186 } 186 }
187 spin_unlock_bh(list_lock); 187 spin_unlock_bh(list_lock);
@@ -274,7 +274,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
274 struct batadv_orig_node *orig_node, 274 struct batadv_orig_node *orig_node,
275 struct batadv_neigh_node **best_neigh_node) 275 struct batadv_neigh_node **best_neigh_node)
276{ 276{
277 struct hlist_node *node, *node_tmp; 277 struct hlist_node *node_tmp;
278 struct batadv_neigh_node *neigh_node; 278 struct batadv_neigh_node *neigh_node;
279 bool neigh_purged = false; 279 bool neigh_purged = false;
280 unsigned long last_seen; 280 unsigned long last_seen;
@@ -285,7 +285,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
285 spin_lock_bh(&orig_node->neigh_list_lock); 285 spin_lock_bh(&orig_node->neigh_list_lock);
286 286
287 /* for all neighbors towards this originator ... */ 287 /* for all neighbors towards this originator ... */
288 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 288 hlist_for_each_entry_safe(neigh_node, node_tmp,
289 &orig_node->neigh_list, list) { 289 &orig_node->neigh_list, list) {
290 last_seen = neigh_node->last_seen; 290 last_seen = neigh_node->last_seen;
291 if_incoming = neigh_node->if_incoming; 291 if_incoming = neigh_node->if_incoming;
@@ -348,7 +348,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
348static void _batadv_purge_orig(struct batadv_priv *bat_priv) 348static void _batadv_purge_orig(struct batadv_priv *bat_priv)
349{ 349{
350 struct batadv_hashtable *hash = bat_priv->orig_hash; 350 struct batadv_hashtable *hash = bat_priv->orig_hash;
351 struct hlist_node *node, *node_tmp; 351 struct hlist_node *node_tmp;
352 struct hlist_head *head; 352 struct hlist_head *head;
353 spinlock_t *list_lock; /* spinlock to protect write access */ 353 spinlock_t *list_lock; /* spinlock to protect write access */
354 struct batadv_orig_node *orig_node; 354 struct batadv_orig_node *orig_node;
@@ -363,13 +363,13 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
363 list_lock = &hash->list_locks[i]; 363 list_lock = &hash->list_locks[i];
364 364
365 spin_lock_bh(list_lock); 365 spin_lock_bh(list_lock);
366 hlist_for_each_entry_safe(orig_node, node, node_tmp, 366 hlist_for_each_entry_safe(orig_node, node_tmp,
367 head, hash_entry) { 367 head, hash_entry) {
368 if (batadv_purge_orig_node(bat_priv, orig_node)) { 368 if (batadv_purge_orig_node(bat_priv, orig_node)) {
369 if (orig_node->gw_flags) 369 if (orig_node->gw_flags)
370 batadv_gw_node_delete(bat_priv, 370 batadv_gw_node_delete(bat_priv,
371 orig_node); 371 orig_node);
372 hlist_del_rcu(node); 372 hlist_del_rcu(&orig_node->hash_entry);
373 batadv_orig_node_free_ref(orig_node); 373 batadv_orig_node_free_ref(orig_node);
374 continue; 374 continue;
375 } 375 }
@@ -408,7 +408,6 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
408 struct net_device *net_dev = (struct net_device *)seq->private; 408 struct net_device *net_dev = (struct net_device *)seq->private;
409 struct batadv_priv *bat_priv = netdev_priv(net_dev); 409 struct batadv_priv *bat_priv = netdev_priv(net_dev);
410 struct batadv_hashtable *hash = bat_priv->orig_hash; 410 struct batadv_hashtable *hash = bat_priv->orig_hash;
411 struct hlist_node *node, *node_tmp;
412 struct hlist_head *head; 411 struct hlist_head *head;
413 struct batadv_hard_iface *primary_if; 412 struct batadv_hard_iface *primary_if;
414 struct batadv_orig_node *orig_node; 413 struct batadv_orig_node *orig_node;
@@ -434,7 +433,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
434 head = &hash->table[i]; 433 head = &hash->table[i];
435 434
436 rcu_read_lock(); 435 rcu_read_lock();
437 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 436 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
438 neigh_node = batadv_orig_node_get_router(orig_node); 437 neigh_node = batadv_orig_node_get_router(orig_node);
439 if (!neigh_node) 438 if (!neigh_node)
440 continue; 439 continue;
@@ -453,7 +452,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
453 neigh_node->addr, 452 neigh_node->addr,
454 neigh_node->if_incoming->net_dev->name); 453 neigh_node->if_incoming->net_dev->name);
455 454
456 hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp, 455 hlist_for_each_entry_rcu(neigh_node_tmp,
457 &orig_node->neigh_list, list) { 456 &orig_node->neigh_list, list) {
458 seq_printf(seq, " %pM (%3i)", 457 seq_printf(seq, " %pM (%3i)",
459 neigh_node_tmp->addr, 458 neigh_node_tmp->addr,
@@ -511,7 +510,6 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
511{ 510{
512 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 511 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
513 struct batadv_hashtable *hash = bat_priv->orig_hash; 512 struct batadv_hashtable *hash = bat_priv->orig_hash;
514 struct hlist_node *node;
515 struct hlist_head *head; 513 struct hlist_head *head;
516 struct batadv_orig_node *orig_node; 514 struct batadv_orig_node *orig_node;
517 uint32_t i; 515 uint32_t i;
@@ -524,7 +522,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
524 head = &hash->table[i]; 522 head = &hash->table[i];
525 523
526 rcu_read_lock(); 524 rcu_read_lock();
527 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 525 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
528 spin_lock_bh(&orig_node->ogm_cnt_lock); 526 spin_lock_bh(&orig_node->ogm_cnt_lock);
529 ret = batadv_orig_node_add_if(orig_node, max_if_num); 527 ret = batadv_orig_node_add_if(orig_node, max_if_num);
530 spin_unlock_bh(&orig_node->ogm_cnt_lock); 528 spin_unlock_bh(&orig_node->ogm_cnt_lock);
@@ -595,7 +593,6 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
595{ 593{
596 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 594 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
597 struct batadv_hashtable *hash = bat_priv->orig_hash; 595 struct batadv_hashtable *hash = bat_priv->orig_hash;
598 struct hlist_node *node;
599 struct hlist_head *head; 596 struct hlist_head *head;
600 struct batadv_hard_iface *hard_iface_tmp; 597 struct batadv_hard_iface *hard_iface_tmp;
601 struct batadv_orig_node *orig_node; 598 struct batadv_orig_node *orig_node;
@@ -609,7 +606,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
609 head = &hash->table[i]; 606 head = &hash->table[i];
610 607
611 rcu_read_lock(); 608 rcu_read_lock();
612 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 609 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
613 spin_lock_bh(&orig_node->ogm_cnt_lock); 610 spin_lock_bh(&orig_node->ogm_cnt_lock);
614 ret = batadv_orig_node_del_if(orig_node, max_if_num, 611 ret = batadv_orig_node_del_if(orig_node, max_if_num,
615 hard_iface->if_num); 612 hard_iface->if_num);
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 286bf743e76a..7df48fa7669d 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -68,7 +68,6 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
68{ 68{
69 struct batadv_hashtable *hash = bat_priv->orig_hash; 69 struct batadv_hashtable *hash = bat_priv->orig_hash;
70 struct hlist_head *head; 70 struct hlist_head *head;
71 struct hlist_node *node;
72 struct batadv_orig_node *orig_node, *orig_node_tmp = NULL; 71 struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
73 int index; 72 int index;
74 73
@@ -79,7 +78,7 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
79 head = &hash->table[index]; 78 head = &hash->table[index];
80 79
81 rcu_read_lock(); 80 rcu_read_lock();
82 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 81 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
83 if (!batadv_compare_eth(orig_node, data)) 82 if (!batadv_compare_eth(orig_node, data))
84 continue; 83 continue;
85 84
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 60ba03fc8390..5ee21cebbbb0 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -37,7 +37,6 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
37{ 37{
38 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 38 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
39 struct batadv_hashtable *hash = bat_priv->orig_hash; 39 struct batadv_hashtable *hash = bat_priv->orig_hash;
40 struct hlist_node *node;
41 struct hlist_head *head; 40 struct hlist_head *head;
42 struct batadv_orig_node *orig_node; 41 struct batadv_orig_node *orig_node;
43 unsigned long *word; 42 unsigned long *word;
@@ -49,7 +48,7 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
49 head = &hash->table[i]; 48 head = &hash->table[i];
50 49
51 rcu_read_lock(); 50 rcu_read_lock();
52 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 51 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
53 spin_lock_bh(&orig_node->ogm_cnt_lock); 52 spin_lock_bh(&orig_node->ogm_cnt_lock);
54 word_index = hard_iface->if_num * BATADV_NUM_WORDS; 53 word_index = hard_iface->if_num * BATADV_NUM_WORDS;
55 word = &(orig_node->bcast_own[word_index]); 54 word = &(orig_node->bcast_own[word_index]);
@@ -146,7 +145,6 @@ out:
146void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node, 145void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
147 struct batadv_neigh_node *neigh_node) 146 struct batadv_neigh_node *neigh_node)
148{ 147{
149 struct hlist_node *node;
150 struct batadv_neigh_node *tmp_neigh_node, *router = NULL; 148 struct batadv_neigh_node *tmp_neigh_node, *router = NULL;
151 uint8_t interference_candidate = 0; 149 uint8_t interference_candidate = 0;
152 150
@@ -169,7 +167,7 @@ void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
169 * interface. If we do, we won't select this candidate because of 167 * interface. If we do, we won't select this candidate because of
170 * possible interference. 168 * possible interference.
171 */ 169 */
172 hlist_for_each_entry_rcu(tmp_neigh_node, node, 170 hlist_for_each_entry_rcu(tmp_neigh_node,
173 &orig_node->neigh_list, list) { 171 &orig_node->neigh_list, list) {
174 if (tmp_neigh_node == neigh_node) 172 if (tmp_neigh_node == neigh_node)
175 continue; 173 continue;
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 80ca65fc89a1..a67cffde37ae 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -316,7 +316,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
316 const struct batadv_hard_iface *hard_iface) 316 const struct batadv_hard_iface *hard_iface)
317{ 317{
318 struct batadv_forw_packet *forw_packet; 318 struct batadv_forw_packet *forw_packet;
319 struct hlist_node *tmp_node, *safe_tmp_node; 319 struct hlist_node *safe_tmp_node;
320 bool pending; 320 bool pending;
321 321
322 if (hard_iface) 322 if (hard_iface)
@@ -329,7 +329,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
329 329
330 /* free bcast list */ 330 /* free bcast list */
331 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 331 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
332 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 332 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
333 &bat_priv->forw_bcast_list, list) { 333 &bat_priv->forw_bcast_list, list) {
334 /* if purge_outstanding_packets() was called with an argument 334 /* if purge_outstanding_packets() was called with an argument
335 * we delete only packets belonging to the given interface 335 * we delete only packets belonging to the given interface
@@ -355,7 +355,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
355 355
356 /* free batman packet list */ 356 /* free batman packet list */
357 spin_lock_bh(&bat_priv->forw_bat_list_lock); 357 spin_lock_bh(&bat_priv->forw_bat_list_lock);
358 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 358 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
359 &bat_priv->forw_bat_list, list) { 359 &bat_priv->forw_bat_list, list) {
360 /* if purge_outstanding_packets() was called with an argument 360 /* if purge_outstanding_packets() was called with an argument
361 * we delete only packets belonging to the given interface 361 * we delete only packets belonging to the given interface
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d44672f4a349..98a66a021a60 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -56,7 +56,6 @@ static struct batadv_tt_common_entry *
56batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data) 56batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
57{ 57{
58 struct hlist_head *head; 58 struct hlist_head *head;
59 struct hlist_node *node;
60 struct batadv_tt_common_entry *tt_common_entry; 59 struct batadv_tt_common_entry *tt_common_entry;
61 struct batadv_tt_common_entry *tt_common_entry_tmp = NULL; 60 struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
62 uint32_t index; 61 uint32_t index;
@@ -68,7 +67,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
68 head = &hash->table[index]; 67 head = &hash->table[index];
69 68
70 rcu_read_lock(); 69 rcu_read_lock();
71 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { 70 hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) {
72 if (!batadv_compare_eth(tt_common_entry, data)) 71 if (!batadv_compare_eth(tt_common_entry, data))
73 continue; 72 continue;
74 73
@@ -257,7 +256,6 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
257 struct batadv_tt_local_entry *tt_local; 256 struct batadv_tt_local_entry *tt_local;
258 struct batadv_tt_global_entry *tt_global; 257 struct batadv_tt_global_entry *tt_global;
259 struct hlist_head *head; 258 struct hlist_head *head;
260 struct hlist_node *node;
261 struct batadv_tt_orig_list_entry *orig_entry; 259 struct batadv_tt_orig_list_entry *orig_entry;
262 int hash_added; 260 int hash_added;
263 bool roamed_back = false; 261 bool roamed_back = false;
@@ -339,7 +337,7 @@ check_roaming:
339 /* These node are probably going to update their tt table */ 337 /* These node are probably going to update their tt table */
340 head = &tt_global->orig_list; 338 head = &tt_global->orig_list;
341 rcu_read_lock(); 339 rcu_read_lock();
342 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 340 hlist_for_each_entry_rcu(orig_entry, head, list) {
343 batadv_send_roam_adv(bat_priv, tt_global->common.addr, 341 batadv_send_roam_adv(bat_priv, tt_global->common.addr,
344 orig_entry->orig_node); 342 orig_entry->orig_node);
345 } 343 }
@@ -470,7 +468,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
470 struct batadv_tt_common_entry *tt_common_entry; 468 struct batadv_tt_common_entry *tt_common_entry;
471 struct batadv_tt_local_entry *tt_local; 469 struct batadv_tt_local_entry *tt_local;
472 struct batadv_hard_iface *primary_if; 470 struct batadv_hard_iface *primary_if;
473 struct hlist_node *node;
474 struct hlist_head *head; 471 struct hlist_head *head;
475 uint32_t i; 472 uint32_t i;
476 int last_seen_secs; 473 int last_seen_secs;
@@ -494,7 +491,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
494 head = &hash->table[i]; 491 head = &hash->table[i];
495 492
496 rcu_read_lock(); 493 rcu_read_lock();
497 hlist_for_each_entry_rcu(tt_common_entry, node, 494 hlist_for_each_entry_rcu(tt_common_entry,
498 head, hash_entry) { 495 head, hash_entry) {
499 tt_local = container_of(tt_common_entry, 496 tt_local = container_of(tt_common_entry,
500 struct batadv_tt_local_entry, 497 struct batadv_tt_local_entry,
@@ -605,9 +602,9 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
605{ 602{
606 struct batadv_tt_local_entry *tt_local_entry; 603 struct batadv_tt_local_entry *tt_local_entry;
607 struct batadv_tt_common_entry *tt_common_entry; 604 struct batadv_tt_common_entry *tt_common_entry;
608 struct hlist_node *node, *node_tmp; 605 struct hlist_node *node_tmp;
609 606
610 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head, 607 hlist_for_each_entry_safe(tt_common_entry, node_tmp, head,
611 hash_entry) { 608 hash_entry) {
612 tt_local_entry = container_of(tt_common_entry, 609 tt_local_entry = container_of(tt_common_entry,
613 struct batadv_tt_local_entry, 610 struct batadv_tt_local_entry,
@@ -651,7 +648,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
651 spinlock_t *list_lock; /* protects write access to the hash lists */ 648 spinlock_t *list_lock; /* protects write access to the hash lists */
652 struct batadv_tt_common_entry *tt_common_entry; 649 struct batadv_tt_common_entry *tt_common_entry;
653 struct batadv_tt_local_entry *tt_local; 650 struct batadv_tt_local_entry *tt_local;
654 struct hlist_node *node, *node_tmp; 651 struct hlist_node *node_tmp;
655 struct hlist_head *head; 652 struct hlist_head *head;
656 uint32_t i; 653 uint32_t i;
657 654
@@ -665,9 +662,9 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
665 list_lock = &hash->list_locks[i]; 662 list_lock = &hash->list_locks[i];
666 663
667 spin_lock_bh(list_lock); 664 spin_lock_bh(list_lock);
668 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 665 hlist_for_each_entry_safe(tt_common_entry, node_tmp,
669 head, hash_entry) { 666 head, hash_entry) {
670 hlist_del_rcu(node); 667 hlist_del_rcu(&tt_common_entry->hash_entry);
671 tt_local = container_of(tt_common_entry, 668 tt_local = container_of(tt_common_entry,
672 struct batadv_tt_local_entry, 669 struct batadv_tt_local_entry,
673 common); 670 common);
@@ -724,11 +721,10 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
724{ 721{
725 struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL; 722 struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
726 const struct hlist_head *head; 723 const struct hlist_head *head;
727 struct hlist_node *node;
728 724
729 rcu_read_lock(); 725 rcu_read_lock();
730 head = &entry->orig_list; 726 head = &entry->orig_list;
731 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) { 727 hlist_for_each_entry_rcu(tmp_orig_entry, head, list) {
732 if (tmp_orig_entry->orig_node != orig_node) 728 if (tmp_orig_entry->orig_node != orig_node)
733 continue; 729 continue;
734 if (!atomic_inc_not_zero(&tmp_orig_entry->refcount)) 730 if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
@@ -940,12 +936,11 @@ batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
940{ 936{
941 struct batadv_neigh_node *router = NULL; 937 struct batadv_neigh_node *router = NULL;
942 struct hlist_head *head; 938 struct hlist_head *head;
943 struct hlist_node *node;
944 struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL; 939 struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
945 int best_tq = 0; 940 int best_tq = 0;
946 941
947 head = &tt_global_entry->orig_list; 942 head = &tt_global_entry->orig_list;
948 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 943 hlist_for_each_entry_rcu(orig_entry, head, list) {
949 router = batadv_orig_node_get_router(orig_entry->orig_node); 944 router = batadv_orig_node_get_router(orig_entry->orig_node);
950 if (!router) 945 if (!router)
951 continue; 946 continue;
@@ -973,7 +968,6 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
973 struct seq_file *seq) 968 struct seq_file *seq)
974{ 969{
975 struct hlist_head *head; 970 struct hlist_head *head;
976 struct hlist_node *node;
977 struct batadv_tt_orig_list_entry *orig_entry, *best_entry; 971 struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
978 struct batadv_tt_common_entry *tt_common_entry; 972 struct batadv_tt_common_entry *tt_common_entry;
979 uint16_t flags; 973 uint16_t flags;
@@ -997,7 +991,7 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
997 991
998 head = &tt_global_entry->orig_list; 992 head = &tt_global_entry->orig_list;
999 993
1000 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 994 hlist_for_each_entry_rcu(orig_entry, head, list) {
1001 if (best_entry == orig_entry) 995 if (best_entry == orig_entry)
1002 continue; 996 continue;
1003 997
@@ -1020,7 +1014,6 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
1020 struct batadv_tt_common_entry *tt_common_entry; 1014 struct batadv_tt_common_entry *tt_common_entry;
1021 struct batadv_tt_global_entry *tt_global; 1015 struct batadv_tt_global_entry *tt_global;
1022 struct batadv_hard_iface *primary_if; 1016 struct batadv_hard_iface *primary_if;
1023 struct hlist_node *node;
1024 struct hlist_head *head; 1017 struct hlist_head *head;
1025 uint32_t i; 1018 uint32_t i;
1026 1019
@@ -1039,7 +1032,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
1039 head = &hash->table[i]; 1032 head = &hash->table[i];
1040 1033
1041 rcu_read_lock(); 1034 rcu_read_lock();
1042 hlist_for_each_entry_rcu(tt_common_entry, node, 1035 hlist_for_each_entry_rcu(tt_common_entry,
1043 head, hash_entry) { 1036 head, hash_entry) {
1044 tt_global = container_of(tt_common_entry, 1037 tt_global = container_of(tt_common_entry,
1045 struct batadv_tt_global_entry, 1038 struct batadv_tt_global_entry,
@@ -1059,13 +1052,13 @@ static void
1059batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry) 1052batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
1060{ 1053{
1061 struct hlist_head *head; 1054 struct hlist_head *head;
1062 struct hlist_node *node, *safe; 1055 struct hlist_node *safe;
1063 struct batadv_tt_orig_list_entry *orig_entry; 1056 struct batadv_tt_orig_list_entry *orig_entry;
1064 1057
1065 spin_lock_bh(&tt_global_entry->list_lock); 1058 spin_lock_bh(&tt_global_entry->list_lock);
1066 head = &tt_global_entry->orig_list; 1059 head = &tt_global_entry->orig_list;
1067 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { 1060 hlist_for_each_entry_safe(orig_entry, safe, head, list) {
1068 hlist_del_rcu(node); 1061 hlist_del_rcu(&orig_entry->list);
1069 batadv_tt_orig_list_entry_free_ref(orig_entry); 1062 batadv_tt_orig_list_entry_free_ref(orig_entry);
1070 } 1063 }
1071 spin_unlock_bh(&tt_global_entry->list_lock); 1064 spin_unlock_bh(&tt_global_entry->list_lock);
@@ -1078,18 +1071,18 @@ batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
1078 const char *message) 1071 const char *message)
1079{ 1072{
1080 struct hlist_head *head; 1073 struct hlist_head *head;
1081 struct hlist_node *node, *safe; 1074 struct hlist_node *safe;
1082 struct batadv_tt_orig_list_entry *orig_entry; 1075 struct batadv_tt_orig_list_entry *orig_entry;
1083 1076
1084 spin_lock_bh(&tt_global_entry->list_lock); 1077 spin_lock_bh(&tt_global_entry->list_lock);
1085 head = &tt_global_entry->orig_list; 1078 head = &tt_global_entry->orig_list;
1086 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { 1079 hlist_for_each_entry_safe(orig_entry, safe, head, list) {
1087 if (orig_entry->orig_node == orig_node) { 1080 if (orig_entry->orig_node == orig_node) {
1088 batadv_dbg(BATADV_DBG_TT, bat_priv, 1081 batadv_dbg(BATADV_DBG_TT, bat_priv,
1089 "Deleting %pM from global tt entry %pM: %s\n", 1082 "Deleting %pM from global tt entry %pM: %s\n",
1090 orig_node->orig, 1083 orig_node->orig,
1091 tt_global_entry->common.addr, message); 1084 tt_global_entry->common.addr, message);
1092 hlist_del_rcu(node); 1085 hlist_del_rcu(&orig_entry->list);
1093 batadv_tt_orig_list_entry_free_ref(orig_entry); 1086 batadv_tt_orig_list_entry_free_ref(orig_entry);
1094 } 1087 }
1095 } 1088 }
@@ -1108,7 +1101,6 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
1108{ 1101{
1109 bool last_entry = true; 1102 bool last_entry = true;
1110 struct hlist_head *head; 1103 struct hlist_head *head;
1111 struct hlist_node *node;
1112 struct batadv_tt_orig_list_entry *orig_entry; 1104 struct batadv_tt_orig_list_entry *orig_entry;
1113 1105
1114 /* no local entry exists, case 1: 1106 /* no local entry exists, case 1:
@@ -1117,7 +1109,7 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
1117 1109
1118 rcu_read_lock(); 1110 rcu_read_lock();
1119 head = &tt_global_entry->orig_list; 1111 head = &tt_global_entry->orig_list;
1120 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 1112 hlist_for_each_entry_rcu(orig_entry, head, list) {
1121 if (orig_entry->orig_node != orig_node) { 1113 if (orig_entry->orig_node != orig_node) {
1122 last_entry = false; 1114 last_entry = false;
1123 break; 1115 break;
@@ -1202,7 +1194,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
1202 struct batadv_tt_common_entry *tt_common_entry; 1194 struct batadv_tt_common_entry *tt_common_entry;
1203 uint32_t i; 1195 uint32_t i;
1204 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 1196 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1205 struct hlist_node *node, *safe; 1197 struct hlist_node *safe;
1206 struct hlist_head *head; 1198 struct hlist_head *head;
1207 spinlock_t *list_lock; /* protects write access to the hash lists */ 1199 spinlock_t *list_lock; /* protects write access to the hash lists */
1208 1200
@@ -1214,7 +1206,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
1214 list_lock = &hash->list_locks[i]; 1206 list_lock = &hash->list_locks[i];
1215 1207
1216 spin_lock_bh(list_lock); 1208 spin_lock_bh(list_lock);
1217 hlist_for_each_entry_safe(tt_common_entry, node, safe, 1209 hlist_for_each_entry_safe(tt_common_entry, safe,
1218 head, hash_entry) { 1210 head, hash_entry) {
1219 tt_global = container_of(tt_common_entry, 1211 tt_global = container_of(tt_common_entry,
1220 struct batadv_tt_global_entry, 1212 struct batadv_tt_global_entry,
@@ -1227,7 +1219,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
1227 batadv_dbg(BATADV_DBG_TT, bat_priv, 1219 batadv_dbg(BATADV_DBG_TT, bat_priv,
1228 "Deleting global tt entry %pM: %s\n", 1220 "Deleting global tt entry %pM: %s\n",
1229 tt_global->common.addr, message); 1221 tt_global->common.addr, message);
1230 hlist_del_rcu(node); 1222 hlist_del_rcu(&tt_common_entry->hash_entry);
1231 batadv_tt_global_entry_free_ref(tt_global); 1223 batadv_tt_global_entry_free_ref(tt_global);
1232 } 1224 }
1233 } 1225 }
@@ -1262,7 +1254,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
1262{ 1254{
1263 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 1255 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1264 struct hlist_head *head; 1256 struct hlist_head *head;
1265 struct hlist_node *node, *node_tmp; 1257 struct hlist_node *node_tmp;
1266 spinlock_t *list_lock; /* protects write access to the hash lists */ 1258 spinlock_t *list_lock; /* protects write access to the hash lists */
1267 uint32_t i; 1259 uint32_t i;
1268 char *msg = NULL; 1260 char *msg = NULL;
@@ -1274,7 +1266,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
1274 list_lock = &hash->list_locks[i]; 1266 list_lock = &hash->list_locks[i];
1275 1267
1276 spin_lock_bh(list_lock); 1268 spin_lock_bh(list_lock);
1277 hlist_for_each_entry_safe(tt_common, node, node_tmp, head, 1269 hlist_for_each_entry_safe(tt_common, node_tmp, head,
1278 hash_entry) { 1270 hash_entry) {
1279 tt_global = container_of(tt_common, 1271 tt_global = container_of(tt_common,
1280 struct batadv_tt_global_entry, 1272 struct batadv_tt_global_entry,
@@ -1287,7 +1279,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
1287 "Deleting global tt entry (%pM): %s\n", 1279 "Deleting global tt entry (%pM): %s\n",
1288 tt_global->common.addr, msg); 1280 tt_global->common.addr, msg);
1289 1281
1290 hlist_del_rcu(node); 1282 hlist_del_rcu(&tt_common->hash_entry);
1291 1283
1292 batadv_tt_global_entry_free_ref(tt_global); 1284 batadv_tt_global_entry_free_ref(tt_global);
1293 } 1285 }
@@ -1301,7 +1293,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
1301 spinlock_t *list_lock; /* protects write access to the hash lists */ 1293 spinlock_t *list_lock; /* protects write access to the hash lists */
1302 struct batadv_tt_common_entry *tt_common_entry; 1294 struct batadv_tt_common_entry *tt_common_entry;
1303 struct batadv_tt_global_entry *tt_global; 1295 struct batadv_tt_global_entry *tt_global;
1304 struct hlist_node *node, *node_tmp; 1296 struct hlist_node *node_tmp;
1305 struct hlist_head *head; 1297 struct hlist_head *head;
1306 uint32_t i; 1298 uint32_t i;
1307 1299
@@ -1315,9 +1307,9 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
1315 list_lock = &hash->list_locks[i]; 1307 list_lock = &hash->list_locks[i];
1316 1308
1317 spin_lock_bh(list_lock); 1309 spin_lock_bh(list_lock);
1318 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 1310 hlist_for_each_entry_safe(tt_common_entry, node_tmp,
1319 head, hash_entry) { 1311 head, hash_entry) {
1320 hlist_del_rcu(node); 1312 hlist_del_rcu(&tt_common_entry->hash_entry);
1321 tt_global = container_of(tt_common_entry, 1313 tt_global = container_of(tt_common_entry,
1322 struct batadv_tt_global_entry, 1314 struct batadv_tt_global_entry,
1323 common); 1315 common);
@@ -1397,7 +1389,6 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1397 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 1389 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1398 struct batadv_tt_common_entry *tt_common; 1390 struct batadv_tt_common_entry *tt_common;
1399 struct batadv_tt_global_entry *tt_global; 1391 struct batadv_tt_global_entry *tt_global;
1400 struct hlist_node *node;
1401 struct hlist_head *head; 1392 struct hlist_head *head;
1402 uint32_t i; 1393 uint32_t i;
1403 int j; 1394 int j;
@@ -1406,7 +1397,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1406 head = &hash->table[i]; 1397 head = &hash->table[i];
1407 1398
1408 rcu_read_lock(); 1399 rcu_read_lock();
1409 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) { 1400 hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
1410 tt_global = container_of(tt_common, 1401 tt_global = container_of(tt_common,
1411 struct batadv_tt_global_entry, 1402 struct batadv_tt_global_entry,
1412 common); 1403 common);
@@ -1449,7 +1440,6 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
1449 uint16_t total = 0, total_one; 1440 uint16_t total = 0, total_one;
1450 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 1441 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
1451 struct batadv_tt_common_entry *tt_common; 1442 struct batadv_tt_common_entry *tt_common;
1452 struct hlist_node *node;
1453 struct hlist_head *head; 1443 struct hlist_head *head;
1454 uint32_t i; 1444 uint32_t i;
1455 int j; 1445 int j;
@@ -1458,7 +1448,7 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
1458 head = &hash->table[i]; 1448 head = &hash->table[i];
1459 1449
1460 rcu_read_lock(); 1450 rcu_read_lock();
1461 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) { 1451 hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
1462 /* not yet committed clients have not to be taken into 1452 /* not yet committed clients have not to be taken into
1463 * account while computing the CRC 1453 * account while computing the CRC
1464 */ 1454 */
@@ -1597,7 +1587,6 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1597 struct batadv_tt_common_entry *tt_common_entry; 1587 struct batadv_tt_common_entry *tt_common_entry;
1598 struct batadv_tt_query_packet *tt_response; 1588 struct batadv_tt_query_packet *tt_response;
1599 struct batadv_tt_change *tt_change; 1589 struct batadv_tt_change *tt_change;
1600 struct hlist_node *node;
1601 struct hlist_head *head; 1590 struct hlist_head *head;
1602 struct sk_buff *skb = NULL; 1591 struct sk_buff *skb = NULL;
1603 uint16_t tt_tot, tt_count; 1592 uint16_t tt_tot, tt_count;
@@ -1627,7 +1616,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1627 for (i = 0; i < hash->size; i++) { 1616 for (i = 0; i < hash->size; i++) {
1628 head = &hash->table[i]; 1617 head = &hash->table[i];
1629 1618
1630 hlist_for_each_entry_rcu(tt_common_entry, node, 1619 hlist_for_each_entry_rcu(tt_common_entry,
1631 head, hash_entry) { 1620 head, hash_entry) {
1632 if (tt_count == tt_tot) 1621 if (tt_count == tt_tot)
1633 break; 1622 break;
@@ -2307,7 +2296,6 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
2307 uint32_t i; 2296 uint32_t i;
2308 uint16_t changed_num = 0; 2297 uint16_t changed_num = 0;
2309 struct hlist_head *head; 2298 struct hlist_head *head;
2310 struct hlist_node *node;
2311 struct batadv_tt_common_entry *tt_common_entry; 2299 struct batadv_tt_common_entry *tt_common_entry;
2312 2300
2313 if (!hash) 2301 if (!hash)
@@ -2317,7 +2305,7 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
2317 head = &hash->table[i]; 2305 head = &hash->table[i];
2318 2306
2319 rcu_read_lock(); 2307 rcu_read_lock();
2320 hlist_for_each_entry_rcu(tt_common_entry, node, 2308 hlist_for_each_entry_rcu(tt_common_entry,
2321 head, hash_entry) { 2309 head, hash_entry) {
2322 if (enable) { 2310 if (enable) {
2323 if ((tt_common_entry->flags & flags) == flags) 2311 if ((tt_common_entry->flags & flags) == flags)
@@ -2342,7 +2330,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2342 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 2330 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
2343 struct batadv_tt_common_entry *tt_common; 2331 struct batadv_tt_common_entry *tt_common;
2344 struct batadv_tt_local_entry *tt_local; 2332 struct batadv_tt_local_entry *tt_local;
2345 struct hlist_node *node, *node_tmp; 2333 struct hlist_node *node_tmp;
2346 struct hlist_head *head; 2334 struct hlist_head *head;
2347 spinlock_t *list_lock; /* protects write access to the hash lists */ 2335 spinlock_t *list_lock; /* protects write access to the hash lists */
2348 uint32_t i; 2336 uint32_t i;
@@ -2355,7 +2343,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2355 list_lock = &hash->list_locks[i]; 2343 list_lock = &hash->list_locks[i];
2356 2344
2357 spin_lock_bh(list_lock); 2345 spin_lock_bh(list_lock);
2358 hlist_for_each_entry_safe(tt_common, node, node_tmp, head, 2346 hlist_for_each_entry_safe(tt_common, node_tmp, head,
2359 hash_entry) { 2347 hash_entry) {
2360 if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING)) 2348 if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
2361 continue; 2349 continue;
@@ -2365,7 +2353,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2365 tt_common->addr); 2353 tt_common->addr);
2366 2354
2367 atomic_dec(&bat_priv->tt.local_entry_num); 2355 atomic_dec(&bat_priv->tt.local_entry_num);
2368 hlist_del_rcu(node); 2356 hlist_del_rcu(&tt_common->hash_entry);
2369 tt_local = container_of(tt_common, 2357 tt_local = container_of(tt_common,
2370 struct batadv_tt_local_entry, 2358 struct batadv_tt_local_entry,
2371 common); 2359 common);
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 22d2785177d1..c053244b97bd 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -97,7 +97,6 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
97{ 97{
98 struct batadv_hashtable *hash = bat_priv->vis.hash; 98 struct batadv_hashtable *hash = bat_priv->vis.hash;
99 struct hlist_head *head; 99 struct hlist_head *head;
100 struct hlist_node *node;
101 struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; 100 struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
102 uint32_t index; 101 uint32_t index;
103 102
@@ -108,8 +107,8 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
108 head = &hash->table[index]; 107 head = &hash->table[index];
109 108
110 rcu_read_lock(); 109 rcu_read_lock();
111 hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) { 110 hlist_for_each_entry_rcu(vis_info, head, hash_entry) {
112 if (!batadv_vis_info_cmp(node, data)) 111 if (!batadv_vis_info_cmp(&vis_info->hash_entry, data))
113 continue; 112 continue;
114 113
115 vis_info_tmp = vis_info; 114 vis_info_tmp = vis_info;
@@ -128,9 +127,8 @@ static void batadv_vis_data_insert_interface(const uint8_t *interface,
128 bool primary) 127 bool primary)
129{ 128{
130 struct batadv_vis_if_list_entry *entry; 129 struct batadv_vis_if_list_entry *entry;
131 struct hlist_node *pos;
132 130
133 hlist_for_each_entry(entry, pos, if_list, list) { 131 hlist_for_each_entry(entry, if_list, list) {
134 if (batadv_compare_eth(entry->addr, interface)) 132 if (batadv_compare_eth(entry->addr, interface))
135 return; 133 return;
136 } 134 }
@@ -148,9 +146,8 @@ static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
148 const struct hlist_head *if_list) 146 const struct hlist_head *if_list)
149{ 147{
150 struct batadv_vis_if_list_entry *entry; 148 struct batadv_vis_if_list_entry *entry;
151 struct hlist_node *pos;
152 149
153 hlist_for_each_entry(entry, pos, if_list, list) { 150 hlist_for_each_entry(entry, if_list, list) {
154 if (entry->primary) 151 if (entry->primary)
155 seq_printf(seq, "PRIMARY, "); 152 seq_printf(seq, "PRIMARY, ");
156 else 153 else
@@ -198,9 +195,8 @@ static void batadv_vis_data_read_entries(struct seq_file *seq,
198{ 195{
199 int i; 196 int i;
200 struct batadv_vis_if_list_entry *entry; 197 struct batadv_vis_if_list_entry *entry;
201 struct hlist_node *pos;
202 198
203 hlist_for_each_entry(entry, pos, list, list) { 199 hlist_for_each_entry(entry, list, list) {
204 seq_printf(seq, "%pM,", entry->addr); 200 seq_printf(seq, "%pM,", entry->addr);
205 201
206 for (i = 0; i < packet->entries; i++) 202 for (i = 0; i < packet->entries; i++)
@@ -218,17 +214,16 @@ static void batadv_vis_data_read_entries(struct seq_file *seq,
218static void batadv_vis_seq_print_text_bucket(struct seq_file *seq, 214static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
219 const struct hlist_head *head) 215 const struct hlist_head *head)
220{ 216{
221 struct hlist_node *node;
222 struct batadv_vis_info *info; 217 struct batadv_vis_info *info;
223 struct batadv_vis_packet *packet; 218 struct batadv_vis_packet *packet;
224 uint8_t *entries_pos; 219 uint8_t *entries_pos;
225 struct batadv_vis_info_entry *entries; 220 struct batadv_vis_info_entry *entries;
226 struct batadv_vis_if_list_entry *entry; 221 struct batadv_vis_if_list_entry *entry;
227 struct hlist_node *pos, *n; 222 struct hlist_node *n;
228 223
229 HLIST_HEAD(vis_if_list); 224 HLIST_HEAD(vis_if_list);
230 225
231 hlist_for_each_entry_rcu(info, node, head, hash_entry) { 226 hlist_for_each_entry_rcu(info, head, hash_entry) {
232 packet = (struct batadv_vis_packet *)info->skb_packet->data; 227 packet = (struct batadv_vis_packet *)info->skb_packet->data;
233 entries_pos = (uint8_t *)packet + sizeof(*packet); 228 entries_pos = (uint8_t *)packet + sizeof(*packet);
234 entries = (struct batadv_vis_info_entry *)entries_pos; 229 entries = (struct batadv_vis_info_entry *)entries_pos;
@@ -240,7 +235,7 @@ static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
240 batadv_vis_data_read_entries(seq, &vis_if_list, packet, 235 batadv_vis_data_read_entries(seq, &vis_if_list, packet,
241 entries); 236 entries);
242 237
243 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) { 238 hlist_for_each_entry_safe(entry, n, &vis_if_list, list) {
244 hlist_del(&entry->list); 239 hlist_del(&entry->list);
245 kfree(entry); 240 kfree(entry);
246 } 241 }
@@ -519,7 +514,6 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
519{ 514{
520 struct batadv_hashtable *hash = bat_priv->orig_hash; 515 struct batadv_hashtable *hash = bat_priv->orig_hash;
521 struct batadv_neigh_node *router; 516 struct batadv_neigh_node *router;
522 struct hlist_node *node;
523 struct hlist_head *head; 517 struct hlist_head *head;
524 struct batadv_orig_node *orig_node; 518 struct batadv_orig_node *orig_node;
525 struct batadv_vis_packet *packet; 519 struct batadv_vis_packet *packet;
@@ -532,7 +526,7 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
532 head = &hash->table[i]; 526 head = &hash->table[i];
533 527
534 rcu_read_lock(); 528 rcu_read_lock();
535 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 529 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
536 router = batadv_orig_node_get_router(orig_node); 530 router = batadv_orig_node_get_router(orig_node);
537 if (!router) 531 if (!router)
538 continue; 532 continue;
@@ -571,7 +565,6 @@ static bool batadv_vis_packet_full(const struct batadv_vis_info *info)
571static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) 565static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
572{ 566{
573 struct batadv_hashtable *hash = bat_priv->orig_hash; 567 struct batadv_hashtable *hash = bat_priv->orig_hash;
574 struct hlist_node *node;
575 struct hlist_head *head; 568 struct hlist_head *head;
576 struct batadv_orig_node *orig_node; 569 struct batadv_orig_node *orig_node;
577 struct batadv_neigh_node *router; 570 struct batadv_neigh_node *router;
@@ -605,7 +598,7 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
605 head = &hash->table[i]; 598 head = &hash->table[i];
606 599
607 rcu_read_lock(); 600 rcu_read_lock();
608 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 601 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
609 router = batadv_orig_node_get_router(orig_node); 602 router = batadv_orig_node_get_router(orig_node);
610 if (!router) 603 if (!router)
611 continue; 604 continue;
@@ -644,7 +637,7 @@ next:
644 head = &hash->table[i]; 637 head = &hash->table[i];
645 638
646 rcu_read_lock(); 639 rcu_read_lock();
647 hlist_for_each_entry_rcu(tt_common_entry, node, head, 640 hlist_for_each_entry_rcu(tt_common_entry, head,
648 hash_entry) { 641 hash_entry) {
649 packet_pos = skb_put(info->skb_packet, sizeof(*entry)); 642 packet_pos = skb_put(info->skb_packet, sizeof(*entry));
650 entry = (struct batadv_vis_info_entry *)packet_pos; 643 entry = (struct batadv_vis_info_entry *)packet_pos;
@@ -673,14 +666,14 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
673{ 666{
674 uint32_t i; 667 uint32_t i;
675 struct batadv_hashtable *hash = bat_priv->vis.hash; 668 struct batadv_hashtable *hash = bat_priv->vis.hash;
676 struct hlist_node *node, *node_tmp; 669 struct hlist_node *node_tmp;
677 struct hlist_head *head; 670 struct hlist_head *head;
678 struct batadv_vis_info *info; 671 struct batadv_vis_info *info;
679 672
680 for (i = 0; i < hash->size; i++) { 673 for (i = 0; i < hash->size; i++) {
681 head = &hash->table[i]; 674 head = &hash->table[i];
682 675
683 hlist_for_each_entry_safe(info, node, node_tmp, 676 hlist_for_each_entry_safe(info, node_tmp,
684 head, hash_entry) { 677 head, hash_entry) {
685 /* never purge own data. */ 678 /* never purge own data. */
686 if (info == bat_priv->vis.my_info) 679 if (info == bat_priv->vis.my_info)
@@ -688,7 +681,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
688 681
689 if (batadv_has_timed_out(info->first_seen, 682 if (batadv_has_timed_out(info->first_seen,
690 BATADV_VIS_TIMEOUT)) { 683 BATADV_VIS_TIMEOUT)) {
691 hlist_del(node); 684 hlist_del(&info->hash_entry);
692 batadv_send_list_del(info); 685 batadv_send_list_del(info);
693 kref_put(&info->refcount, batadv_free_info); 686 kref_put(&info->refcount, batadv_free_info);
694 } 687 }
@@ -700,7 +693,6 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
700 struct batadv_vis_info *info) 693 struct batadv_vis_info *info)
701{ 694{
702 struct batadv_hashtable *hash = bat_priv->orig_hash; 695 struct batadv_hashtable *hash = bat_priv->orig_hash;
703 struct hlist_node *node;
704 struct hlist_head *head; 696 struct hlist_head *head;
705 struct batadv_orig_node *orig_node; 697 struct batadv_orig_node *orig_node;
706 struct batadv_vis_packet *packet; 698 struct batadv_vis_packet *packet;
@@ -715,7 +707,7 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
715 head = &hash->table[i]; 707 head = &hash->table[i];
716 708
717 rcu_read_lock(); 709 rcu_read_lock();
718 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 710 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
719 /* if it's a vis server and reachable, send it. */ 711 /* if it's a vis server and reachable, send it. */
720 if (!(orig_node->flags & BATADV_VIS_SERVER)) 712 if (!(orig_node->flags & BATADV_VIS_SERVER))
721 continue; 713 continue;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 07f073935811..6a93614f2c49 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -70,14 +70,13 @@ static struct bt_sock_list hci_sk_list = {
70void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) 70void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
71{ 71{
72 struct sock *sk; 72 struct sock *sk;
73 struct hlist_node *node;
74 struct sk_buff *skb_copy = NULL; 73 struct sk_buff *skb_copy = NULL;
75 74
76 BT_DBG("hdev %p len %d", hdev, skb->len); 75 BT_DBG("hdev %p len %d", hdev, skb->len);
77 76
78 read_lock(&hci_sk_list.lock); 77 read_lock(&hci_sk_list.lock);
79 78
80 sk_for_each(sk, node, &hci_sk_list.head) { 79 sk_for_each(sk, &hci_sk_list.head) {
81 struct hci_filter *flt; 80 struct hci_filter *flt;
82 struct sk_buff *nskb; 81 struct sk_buff *nskb;
83 82
@@ -142,13 +141,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
142void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) 141void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
143{ 142{
144 struct sock *sk; 143 struct sock *sk;
145 struct hlist_node *node;
146 144
147 BT_DBG("len %d", skb->len); 145 BT_DBG("len %d", skb->len);
148 146
149 read_lock(&hci_sk_list.lock); 147 read_lock(&hci_sk_list.lock);
150 148
151 sk_for_each(sk, node, &hci_sk_list.head) { 149 sk_for_each(sk, &hci_sk_list.head) {
152 struct sk_buff *nskb; 150 struct sk_buff *nskb;
153 151
154 /* Skip the original socket */ 152 /* Skip the original socket */
@@ -176,7 +174,6 @@ void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
176void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) 174void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
177{ 175{
178 struct sock *sk; 176 struct sock *sk;
179 struct hlist_node *node;
180 struct sk_buff *skb_copy = NULL; 177 struct sk_buff *skb_copy = NULL;
181 __le16 opcode; 178 __le16 opcode;
182 179
@@ -210,7 +207,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
210 207
211 read_lock(&hci_sk_list.lock); 208 read_lock(&hci_sk_list.lock);
212 209
213 sk_for_each(sk, node, &hci_sk_list.head) { 210 sk_for_each(sk, &hci_sk_list.head) {
214 struct sk_buff *nskb; 211 struct sk_buff *nskb;
215 212
216 if (sk->sk_state != BT_BOUND) 213 if (sk->sk_state != BT_BOUND)
@@ -251,13 +248,12 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
251static void send_monitor_event(struct sk_buff *skb) 248static void send_monitor_event(struct sk_buff *skb)
252{ 249{
253 struct sock *sk; 250 struct sock *sk;
254 struct hlist_node *node;
255 251
256 BT_DBG("len %d", skb->len); 252 BT_DBG("len %d", skb->len);
257 253
258 read_lock(&hci_sk_list.lock); 254 read_lock(&hci_sk_list.lock);
259 255
260 sk_for_each(sk, node, &hci_sk_list.head) { 256 sk_for_each(sk, &hci_sk_list.head) {
261 struct sk_buff *nskb; 257 struct sk_buff *nskb;
262 258
263 if (sk->sk_state != BT_BOUND) 259 if (sk->sk_state != BT_BOUND)
@@ -393,11 +389,10 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
393 389
394 if (event == HCI_DEV_UNREG) { 390 if (event == HCI_DEV_UNREG) {
395 struct sock *sk; 391 struct sock *sk;
396 struct hlist_node *node;
397 392
398 /* Detach sockets from device */ 393 /* Detach sockets from device */
399 read_lock(&hci_sk_list.lock); 394 read_lock(&hci_sk_list.lock);
400 sk_for_each(sk, node, &hci_sk_list.head) { 395 sk_for_each(sk, &hci_sk_list.head) {
401 bh_lock_sock_nested(sk); 396 bh_lock_sock_nested(sk);
402 if (hci_pi(sk)->hdev == hdev) { 397 if (hci_pi(sk)->hdev == hdev) {
403 hci_pi(sk)->hdev = NULL; 398 hci_pi(sk)->hdev = NULL;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index ce3f6658f4b2..c23bae86263b 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -107,15 +107,14 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
107static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) 107static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
108{ 108{
109 struct sock *sk = NULL; 109 struct sock *sk = NULL;
110 struct hlist_node *node;
111 110
112 sk_for_each(sk, node, &rfcomm_sk_list.head) { 111 sk_for_each(sk, &rfcomm_sk_list.head) {
113 if (rfcomm_pi(sk)->channel == channel && 112 if (rfcomm_pi(sk)->channel == channel &&
114 !bacmp(&bt_sk(sk)->src, src)) 113 !bacmp(&bt_sk(sk)->src, src))
115 break; 114 break;
116 } 115 }
117 116
118 return node ? sk : NULL; 117 return sk ? sk : NULL;
119} 118}
120 119
121/* Find socket with channel and source bdaddr. 120/* Find socket with channel and source bdaddr.
@@ -124,11 +123,10 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
124static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) 123static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
125{ 124{
126 struct sock *sk = NULL, *sk1 = NULL; 125 struct sock *sk = NULL, *sk1 = NULL;
127 struct hlist_node *node;
128 126
129 read_lock(&rfcomm_sk_list.lock); 127 read_lock(&rfcomm_sk_list.lock);
130 128
131 sk_for_each(sk, node, &rfcomm_sk_list.head) { 129 sk_for_each(sk, &rfcomm_sk_list.head) {
132 if (state && sk->sk_state != state) 130 if (state && sk->sk_state != state)
133 continue; 131 continue;
134 132
@@ -145,7 +143,7 @@ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *
145 143
146 read_unlock(&rfcomm_sk_list.lock); 144 read_unlock(&rfcomm_sk_list.lock);
147 145
148 return node ? sk : sk1; 146 return sk ? sk : sk1;
149} 147}
150 148
151static void rfcomm_sock_destruct(struct sock *sk) 149static void rfcomm_sock_destruct(struct sock *sk)
@@ -970,11 +968,10 @@ done:
970static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) 968static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
971{ 969{
972 struct sock *sk; 970 struct sock *sk;
973 struct hlist_node *node;
974 971
975 read_lock(&rfcomm_sk_list.lock); 972 read_lock(&rfcomm_sk_list.lock);
976 973
977 sk_for_each(sk, node, &rfcomm_sk_list.head) { 974 sk_for_each(sk, &rfcomm_sk_list.head) {
978 seq_printf(f, "%pMR %pMR %d %d\n", 975 seq_printf(f, "%pMR %pMR %d %d\n",
979 &bt_sk(sk)->src, &bt_sk(sk)->dst, 976 &bt_sk(sk)->src, &bt_sk(sk)->dst,
980 sk->sk_state, rfcomm_pi(sk)->channel); 977 sk->sk_state, rfcomm_pi(sk)->channel);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index b5178d62064e..79d87d8d4f51 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -259,10 +259,9 @@ drop:
259/* -------- Socket interface ---------- */ 259/* -------- Socket interface ---------- */
260static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) 260static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
261{ 261{
262 struct hlist_node *node;
263 struct sock *sk; 262 struct sock *sk;
264 263
265 sk_for_each(sk, node, &sco_sk_list.head) { 264 sk_for_each(sk, &sco_sk_list.head) {
266 if (sk->sk_state != BT_LISTEN) 265 if (sk->sk_state != BT_LISTEN)
267 continue; 266 continue;
268 267
@@ -279,11 +278,10 @@ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
279static struct sock *sco_get_sock_listen(bdaddr_t *src) 278static struct sock *sco_get_sock_listen(bdaddr_t *src)
280{ 279{
281 struct sock *sk = NULL, *sk1 = NULL; 280 struct sock *sk = NULL, *sk1 = NULL;
282 struct hlist_node *node;
283 281
284 read_lock(&sco_sk_list.lock); 282 read_lock(&sco_sk_list.lock);
285 283
286 sk_for_each(sk, node, &sco_sk_list.head) { 284 sk_for_each(sk, &sco_sk_list.head) {
287 if (sk->sk_state != BT_LISTEN) 285 if (sk->sk_state != BT_LISTEN)
288 continue; 286 continue;
289 287
@@ -298,7 +296,7 @@ static struct sock *sco_get_sock_listen(bdaddr_t *src)
298 296
299 read_unlock(&sco_sk_list.lock); 297 read_unlock(&sco_sk_list.lock);
300 298
301 return node ? sk : sk1; 299 return sk ? sk : sk1;
302} 300}
303 301
304static void sco_sock_destruct(struct sock *sk) 302static void sco_sock_destruct(struct sock *sk)
@@ -951,14 +949,13 @@ static void sco_conn_ready(struct sco_conn *conn)
951int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) 949int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
952{ 950{
953 struct sock *sk; 951 struct sock *sk;
954 struct hlist_node *node;
955 int lm = 0; 952 int lm = 0;
956 953
957 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); 954 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
958 955
959 /* Find listening sockets */ 956 /* Find listening sockets */
960 read_lock(&sco_sk_list.lock); 957 read_lock(&sco_sk_list.lock);
961 sk_for_each(sk, node, &sco_sk_list.head) { 958 sk_for_each(sk, &sco_sk_list.head) {
962 if (sk->sk_state != BT_LISTEN) 959 if (sk->sk_state != BT_LISTEN)
963 continue; 960 continue;
964 961
@@ -1018,11 +1015,10 @@ drop:
1018static int sco_debugfs_show(struct seq_file *f, void *p) 1015static int sco_debugfs_show(struct seq_file *f, void *p)
1019{ 1016{
1020 struct sock *sk; 1017 struct sock *sk;
1021 struct hlist_node *node;
1022 1018
1023 read_lock(&sco_sk_list.lock); 1019 read_lock(&sco_sk_list.lock);
1024 1020
1025 sk_for_each(sk, node, &sco_sk_list.head) { 1021 sk_for_each(sk, &sco_sk_list.head) {
1026 seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src, 1022 seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src,
1027 &bt_sk(sk)->dst, sk->sk_state); 1023 &bt_sk(sk)->dst, sk->sk_state);
1028 } 1024 }
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 8117900af4de..b0812c91c0f0 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -181,9 +181,9 @@ void br_fdb_cleanup(unsigned long _data)
181 spin_lock(&br->hash_lock); 181 spin_lock(&br->hash_lock);
182 for (i = 0; i < BR_HASH_SIZE; i++) { 182 for (i = 0; i < BR_HASH_SIZE; i++) {
183 struct net_bridge_fdb_entry *f; 183 struct net_bridge_fdb_entry *f;
184 struct hlist_node *h, *n; 184 struct hlist_node *n;
185 185
186 hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { 186 hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
187 unsigned long this_timer; 187 unsigned long this_timer;
188 if (f->is_static) 188 if (f->is_static)
189 continue; 189 continue;
@@ -207,8 +207,8 @@ void br_fdb_flush(struct net_bridge *br)
207 spin_lock_bh(&br->hash_lock); 207 spin_lock_bh(&br->hash_lock);
208 for (i = 0; i < BR_HASH_SIZE; i++) { 208 for (i = 0; i < BR_HASH_SIZE; i++) {
209 struct net_bridge_fdb_entry *f; 209 struct net_bridge_fdb_entry *f;
210 struct hlist_node *h, *n; 210 struct hlist_node *n;
211 hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { 211 hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
212 if (!f->is_static) 212 if (!f->is_static)
213 fdb_delete(br, f); 213 fdb_delete(br, f);
214 } 214 }
@@ -266,10 +266,9 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
266 const unsigned char *addr, 266 const unsigned char *addr,
267 __u16 vid) 267 __u16 vid)
268{ 268{
269 struct hlist_node *h;
270 struct net_bridge_fdb_entry *fdb; 269 struct net_bridge_fdb_entry *fdb;
271 270
272 hlist_for_each_entry_rcu(fdb, h, 271 hlist_for_each_entry_rcu(fdb,
273 &br->hash[br_mac_hash(addr, vid)], hlist) { 272 &br->hash[br_mac_hash(addr, vid)], hlist) {
274 if (ether_addr_equal(fdb->addr.addr, addr) && 273 if (ether_addr_equal(fdb->addr.addr, addr) &&
275 fdb->vlan_id == vid) { 274 fdb->vlan_id == vid) {
@@ -315,14 +314,13 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
315{ 314{
316 struct __fdb_entry *fe = buf; 315 struct __fdb_entry *fe = buf;
317 int i, num = 0; 316 int i, num = 0;
318 struct hlist_node *h;
319 struct net_bridge_fdb_entry *f; 317 struct net_bridge_fdb_entry *f;
320 318
321 memset(buf, 0, maxnum*sizeof(struct __fdb_entry)); 319 memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
322 320
323 rcu_read_lock(); 321 rcu_read_lock();
324 for (i = 0; i < BR_HASH_SIZE; i++) { 322 for (i = 0; i < BR_HASH_SIZE; i++) {
325 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { 323 hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
326 if (num >= maxnum) 324 if (num >= maxnum)
327 goto out; 325 goto out;
328 326
@@ -363,10 +361,9 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
363 const unsigned char *addr, 361 const unsigned char *addr,
364 __u16 vid) 362 __u16 vid)
365{ 363{
366 struct hlist_node *h;
367 struct net_bridge_fdb_entry *fdb; 364 struct net_bridge_fdb_entry *fdb;
368 365
369 hlist_for_each_entry(fdb, h, head, hlist) { 366 hlist_for_each_entry(fdb, head, hlist) {
370 if (ether_addr_equal(fdb->addr.addr, addr) && 367 if (ether_addr_equal(fdb->addr.addr, addr) &&
371 fdb->vlan_id == vid) 368 fdb->vlan_id == vid)
372 return fdb; 369 return fdb;
@@ -378,10 +375,9 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
378 const unsigned char *addr, 375 const unsigned char *addr,
379 __u16 vid) 376 __u16 vid)
380{ 377{
381 struct hlist_node *h;
382 struct net_bridge_fdb_entry *fdb; 378 struct net_bridge_fdb_entry *fdb;
383 379
384 hlist_for_each_entry_rcu(fdb, h, head, hlist) { 380 hlist_for_each_entry_rcu(fdb, head, hlist) {
385 if (ether_addr_equal(fdb->addr.addr, addr) && 381 if (ether_addr_equal(fdb->addr.addr, addr) &&
386 fdb->vlan_id == vid) 382 fdb->vlan_id == vid)
387 return fdb; 383 return fdb;
@@ -593,10 +589,9 @@ int br_fdb_dump(struct sk_buff *skb,
593 goto out; 589 goto out;
594 590
595 for (i = 0; i < BR_HASH_SIZE; i++) { 591 for (i = 0; i < BR_HASH_SIZE; i++) {
596 struct hlist_node *h;
597 struct net_bridge_fdb_entry *f; 592 struct net_bridge_fdb_entry *f;
598 593
599 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { 594 hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
600 if (idx < cb->args[0]) 595 if (idx < cb->args[0])
601 goto skip; 596 goto skip;
602 597
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 38991e03646d..9f97b850fc65 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -18,7 +18,6 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
18{ 18{
19 struct net_bridge *br = netdev_priv(dev); 19 struct net_bridge *br = netdev_priv(dev);
20 struct net_bridge_port *p; 20 struct net_bridge_port *p;
21 struct hlist_node *n;
22 struct nlattr *nest; 21 struct nlattr *nest;
23 22
24 if (!br->multicast_router || hlist_empty(&br->router_list)) 23 if (!br->multicast_router || hlist_empty(&br->router_list))
@@ -28,7 +27,7 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
28 if (nest == NULL) 27 if (nest == NULL)
29 return -EMSGSIZE; 28 return -EMSGSIZE;
30 29
31 hlist_for_each_entry_rcu(p, n, &br->router_list, rlist) { 30 hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
32 if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex)) 31 if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex))
33 goto fail; 32 goto fail;
34 } 33 }
@@ -61,12 +60,11 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
61 return -EMSGSIZE; 60 return -EMSGSIZE;
62 61
63 for (i = 0; i < mdb->max; i++) { 62 for (i = 0; i < mdb->max; i++) {
64 struct hlist_node *h;
65 struct net_bridge_mdb_entry *mp; 63 struct net_bridge_mdb_entry *mp;
66 struct net_bridge_port_group *p, **pp; 64 struct net_bridge_port_group *p, **pp;
67 struct net_bridge_port *port; 65 struct net_bridge_port *port;
68 66
69 hlist_for_each_entry_rcu(mp, h, &mdb->mhash[i], hlist[mdb->ver]) { 67 hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
70 if (idx < s_idx) 68 if (idx < s_idx)
71 goto skip; 69 goto skip;
72 70
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 7d886b0a8b7b..10e6fce1bb62 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -86,9 +86,8 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
86 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 86 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
87{ 87{
88 struct net_bridge_mdb_entry *mp; 88 struct net_bridge_mdb_entry *mp;
89 struct hlist_node *p;
90 89
91 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 90 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
92 if (br_ip_equal(&mp->addr, dst)) 91 if (br_ip_equal(&mp->addr, dst))
93 return mp; 92 return mp;
94 } 93 }
@@ -178,13 +177,12 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
178 int elasticity) 177 int elasticity)
179{ 178{
180 struct net_bridge_mdb_entry *mp; 179 struct net_bridge_mdb_entry *mp;
181 struct hlist_node *p;
182 int maxlen; 180 int maxlen;
183 int len; 181 int len;
184 int i; 182 int i;
185 183
186 for (i = 0; i < old->max; i++) 184 for (i = 0; i < old->max; i++)
187 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) 185 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
188 hlist_add_head(&mp->hlist[new->ver], 186 hlist_add_head(&mp->hlist[new->ver],
189 &new->mhash[br_ip_hash(new, &mp->addr)]); 187 &new->mhash[br_ip_hash(new, &mp->addr)]);
190 188
@@ -194,7 +192,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
194 maxlen = 0; 192 maxlen = 0;
195 for (i = 0; i < new->max; i++) { 193 for (i = 0; i < new->max; i++) {
196 len = 0; 194 len = 0;
197 hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver]) 195 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
198 len++; 196 len++;
199 if (len > maxlen) 197 if (len > maxlen)
200 maxlen = len; 198 maxlen = len;
@@ -510,14 +508,13 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
510{ 508{
511 struct net_bridge_mdb_htable *mdb; 509 struct net_bridge_mdb_htable *mdb;
512 struct net_bridge_mdb_entry *mp; 510 struct net_bridge_mdb_entry *mp;
513 struct hlist_node *p;
514 unsigned int count = 0; 511 unsigned int count = 0;
515 unsigned int max; 512 unsigned int max;
516 int elasticity; 513 int elasticity;
517 int err; 514 int err;
518 515
519 mdb = rcu_dereference_protected(br->mdb, 1); 516 mdb = rcu_dereference_protected(br->mdb, 1);
520 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 517 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
521 count++; 518 count++;
522 if (unlikely(br_ip_equal(group, &mp->addr))) 519 if (unlikely(br_ip_equal(group, &mp->addr)))
523 return mp; 520 return mp;
@@ -882,10 +879,10 @@ void br_multicast_disable_port(struct net_bridge_port *port)
882{ 879{
883 struct net_bridge *br = port->br; 880 struct net_bridge *br = port->br;
884 struct net_bridge_port_group *pg; 881 struct net_bridge_port_group *pg;
885 struct hlist_node *p, *n; 882 struct hlist_node *n;
886 883
887 spin_lock(&br->multicast_lock); 884 spin_lock(&br->multicast_lock);
888 hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist) 885 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
889 br_multicast_del_pg(br, pg); 886 br_multicast_del_pg(br, pg);
890 887
891 if (!hlist_unhashed(&port->rlist)) 888 if (!hlist_unhashed(&port->rlist))
@@ -1025,12 +1022,12 @@ static void br_multicast_add_router(struct net_bridge *br,
1025 struct net_bridge_port *port) 1022 struct net_bridge_port *port)
1026{ 1023{
1027 struct net_bridge_port *p; 1024 struct net_bridge_port *p;
1028 struct hlist_node *n, *slot = NULL; 1025 struct hlist_node *slot = NULL;
1029 1026
1030 hlist_for_each_entry(p, n, &br->router_list, rlist) { 1027 hlist_for_each_entry(p, &br->router_list, rlist) {
1031 if ((unsigned long) port >= (unsigned long) p) 1028 if ((unsigned long) port >= (unsigned long) p)
1032 break; 1029 break;
1033 slot = n; 1030 slot = &p->rlist;
1034 } 1031 }
1035 1032
1036 if (slot) 1033 if (slot)
@@ -1653,7 +1650,7 @@ void br_multicast_stop(struct net_bridge *br)
1653{ 1650{
1654 struct net_bridge_mdb_htable *mdb; 1651 struct net_bridge_mdb_htable *mdb;
1655 struct net_bridge_mdb_entry *mp; 1652 struct net_bridge_mdb_entry *mp;
1656 struct hlist_node *p, *n; 1653 struct hlist_node *n;
1657 u32 ver; 1654 u32 ver;
1658 int i; 1655 int i;
1659 1656
@@ -1670,7 +1667,7 @@ void br_multicast_stop(struct net_bridge *br)
1670 1667
1671 ver = mdb->ver; 1668 ver = mdb->ver;
1672 for (i = 0; i < mdb->max; i++) { 1669 for (i = 0; i < mdb->max; i++) {
1673 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], 1670 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
1674 hlist[ver]) { 1671 hlist[ver]) {
1675 del_timer(&mp->timer); 1672 del_timer(&mp->timer);
1676 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1673 call_rcu_bh(&mp->rcu, br_multicast_free_group);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index ddac1ee2ed20..c48e5220bbac 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -516,7 +516,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
516{ 516{
517 struct receiver *r = NULL; 517 struct receiver *r = NULL;
518 struct hlist_head *rl; 518 struct hlist_head *rl;
519 struct hlist_node *next;
520 struct dev_rcv_lists *d; 519 struct dev_rcv_lists *d;
521 520
522 if (dev && dev->type != ARPHRD_CAN) 521 if (dev && dev->type != ARPHRD_CAN)
@@ -540,7 +539,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
540 * been registered before. 539 * been registered before.
541 */ 540 */
542 541
543 hlist_for_each_entry_rcu(r, next, rl, list) { 542 hlist_for_each_entry_rcu(r, rl, list) {
544 if (r->can_id == can_id && r->mask == mask && 543 if (r->can_id == can_id && r->mask == mask &&
545 r->func == func && r->data == data) 544 r->func == func && r->data == data)
546 break; 545 break;
@@ -552,7 +551,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
552 * will be NULL, while r will point to the last item of the list. 551 * will be NULL, while r will point to the last item of the list.
553 */ 552 */
554 553
555 if (!next) { 554 if (!r) {
556 printk(KERN_ERR "BUG: receive list entry not found for " 555 printk(KERN_ERR "BUG: receive list entry not found for "
557 "dev %s, id %03X, mask %03X\n", 556 "dev %s, id %03X, mask %03X\n",
558 DNAME(dev), can_id, mask); 557 DNAME(dev), can_id, mask);
@@ -590,7 +589,6 @@ static inline void deliver(struct sk_buff *skb, struct receiver *r)
590static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) 589static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
591{ 590{
592 struct receiver *r; 591 struct receiver *r;
593 struct hlist_node *n;
594 int matches = 0; 592 int matches = 0;
595 struct can_frame *cf = (struct can_frame *)skb->data; 593 struct can_frame *cf = (struct can_frame *)skb->data;
596 canid_t can_id = cf->can_id; 594 canid_t can_id = cf->can_id;
@@ -600,7 +598,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
600 598
601 if (can_id & CAN_ERR_FLAG) { 599 if (can_id & CAN_ERR_FLAG) {
602 /* check for error message frame entries only */ 600 /* check for error message frame entries only */
603 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) { 601 hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) {
604 if (can_id & r->mask) { 602 if (can_id & r->mask) {
605 deliver(skb, r); 603 deliver(skb, r);
606 matches++; 604 matches++;
@@ -610,13 +608,13 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
610 } 608 }
611 609
612 /* check for unfiltered entries */ 610 /* check for unfiltered entries */
613 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) { 611 hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) {
614 deliver(skb, r); 612 deliver(skb, r);
615 matches++; 613 matches++;
616 } 614 }
617 615
618 /* check for can_id/mask entries */ 616 /* check for can_id/mask entries */
619 hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) { 617 hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) {
620 if ((can_id & r->mask) == r->can_id) { 618 if ((can_id & r->mask) == r->can_id) {
621 deliver(skb, r); 619 deliver(skb, r);
622 matches++; 620 matches++;
@@ -624,7 +622,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
624 } 622 }
625 623
626 /* check for inverted can_id/mask entries */ 624 /* check for inverted can_id/mask entries */
627 hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) { 625 hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) {
628 if ((can_id & r->mask) != r->can_id) { 626 if ((can_id & r->mask) != r->can_id) {
629 deliver(skb, r); 627 deliver(skb, r);
630 matches++; 628 matches++;
@@ -636,7 +634,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
636 return matches; 634 return matches;
637 635
638 if (can_id & CAN_EFF_FLAG) { 636 if (can_id & CAN_EFF_FLAG) {
639 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) { 637 hlist_for_each_entry_rcu(r, &d->rx[RX_EFF], list) {
640 if (r->can_id == can_id) { 638 if (r->can_id == can_id) {
641 deliver(skb, r); 639 deliver(skb, r);
642 matches++; 640 matches++;
@@ -644,7 +642,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
644 } 642 }
645 } else { 643 } else {
646 can_id &= CAN_SFF_MASK; 644 can_id &= CAN_SFF_MASK;
647 hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) { 645 hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) {
648 deliver(skb, r); 646 deliver(skb, r);
649 matches++; 647 matches++;
650 } 648 }
diff --git a/net/can/gw.c b/net/can/gw.c
index c185fcd5e828..2d117dc5ebea 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -457,11 +457,11 @@ static int cgw_notifier(struct notifier_block *nb,
457 if (msg == NETDEV_UNREGISTER) { 457 if (msg == NETDEV_UNREGISTER) {
458 458
459 struct cgw_job *gwj = NULL; 459 struct cgw_job *gwj = NULL;
460 struct hlist_node *n, *nx; 460 struct hlist_node *nx;
461 461
462 ASSERT_RTNL(); 462 ASSERT_RTNL();
463 463
464 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { 464 hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
465 465
466 if (gwj->src.dev == dev || gwj->dst.dev == dev) { 466 if (gwj->src.dev == dev || gwj->dst.dev == dev) {
467 hlist_del(&gwj->list); 467 hlist_del(&gwj->list);
@@ -575,12 +575,11 @@ cancel:
575static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb) 575static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
576{ 576{
577 struct cgw_job *gwj = NULL; 577 struct cgw_job *gwj = NULL;
578 struct hlist_node *n;
579 int idx = 0; 578 int idx = 0;
580 int s_idx = cb->args[0]; 579 int s_idx = cb->args[0];
581 580
582 rcu_read_lock(); 581 rcu_read_lock();
583 hlist_for_each_entry_rcu(gwj, n, &cgw_list, list) { 582 hlist_for_each_entry_rcu(gwj, &cgw_list, list) {
584 if (idx < s_idx) 583 if (idx < s_idx)
585 goto cont; 584 goto cont;
586 585
@@ -858,11 +857,11 @@ out:
858static void cgw_remove_all_jobs(void) 857static void cgw_remove_all_jobs(void)
859{ 858{
860 struct cgw_job *gwj = NULL; 859 struct cgw_job *gwj = NULL;
861 struct hlist_node *n, *nx; 860 struct hlist_node *nx;
862 861
863 ASSERT_RTNL(); 862 ASSERT_RTNL();
864 863
865 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { 864 hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
866 hlist_del(&gwj->list); 865 hlist_del(&gwj->list);
867 cgw_unregister_filter(gwj); 866 cgw_unregister_filter(gwj);
868 kfree(gwj); 867 kfree(gwj);
@@ -872,7 +871,7 @@ static void cgw_remove_all_jobs(void)
872static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 871static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
873{ 872{
874 struct cgw_job *gwj = NULL; 873 struct cgw_job *gwj = NULL;
875 struct hlist_node *n, *nx; 874 struct hlist_node *nx;
876 struct rtcanmsg *r; 875 struct rtcanmsg *r;
877 struct cf_mod mod; 876 struct cf_mod mod;
878 struct can_can_gw ccgw; 877 struct can_can_gw ccgw;
@@ -907,7 +906,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
907 ASSERT_RTNL(); 906 ASSERT_RTNL();
908 907
909 /* remove only the first matching entry */ 908 /* remove only the first matching entry */
910 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { 909 hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
911 910
912 if (gwj->flags != r->flags) 911 if (gwj->flags != r->flags)
913 continue; 912 continue;
diff --git a/net/can/proc.c b/net/can/proc.c
index 497335892146..1ab8c888f102 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -195,9 +195,8 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
195 struct net_device *dev) 195 struct net_device *dev)
196{ 196{
197 struct receiver *r; 197 struct receiver *r;
198 struct hlist_node *n;
199 198
200 hlist_for_each_entry_rcu(r, n, rx_list, list) { 199 hlist_for_each_entry_rcu(r, rx_list, list) {
201 char *fmt = (r->can_id & CAN_EFF_FLAG)? 200 char *fmt = (r->can_id & CAN_EFF_FLAG)?
202 " %-5s %08x %08x %pK %pK %8ld %s\n" : 201 " %-5s %08x %08x %pK %pK %8ld %s\n" :
203 " %-5s %03x %08x %pK %pK %8ld %s\n"; 202 " %-5s %03x %08x %pK %pK %8ld %s\n";
diff --git a/net/core/dev.c b/net/core/dev.c
index 18d8b5acc343..a06a7a58dd11 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -658,11 +658,10 @@ __setup("netdev=", netdev_boot_setup);
658 658
659struct net_device *__dev_get_by_name(struct net *net, const char *name) 659struct net_device *__dev_get_by_name(struct net *net, const char *name)
660{ 660{
661 struct hlist_node *p;
662 struct net_device *dev; 661 struct net_device *dev;
663 struct hlist_head *head = dev_name_hash(net, name); 662 struct hlist_head *head = dev_name_hash(net, name);
664 663
665 hlist_for_each_entry(dev, p, head, name_hlist) 664 hlist_for_each_entry(dev, head, name_hlist)
666 if (!strncmp(dev->name, name, IFNAMSIZ)) 665 if (!strncmp(dev->name, name, IFNAMSIZ))
667 return dev; 666 return dev;
668 667
@@ -684,11 +683,10 @@ EXPORT_SYMBOL(__dev_get_by_name);
684 683
685struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 684struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
686{ 685{
687 struct hlist_node *p;
688 struct net_device *dev; 686 struct net_device *dev;
689 struct hlist_head *head = dev_name_hash(net, name); 687 struct hlist_head *head = dev_name_hash(net, name);
690 688
691 hlist_for_each_entry_rcu(dev, p, head, name_hlist) 689 hlist_for_each_entry_rcu(dev, head, name_hlist)
692 if (!strncmp(dev->name, name, IFNAMSIZ)) 690 if (!strncmp(dev->name, name, IFNAMSIZ))
693 return dev; 691 return dev;
694 692
@@ -735,11 +733,10 @@ EXPORT_SYMBOL(dev_get_by_name);
735 733
736struct net_device *__dev_get_by_index(struct net *net, int ifindex) 734struct net_device *__dev_get_by_index(struct net *net, int ifindex)
737{ 735{
738 struct hlist_node *p;
739 struct net_device *dev; 736 struct net_device *dev;
740 struct hlist_head *head = dev_index_hash(net, ifindex); 737 struct hlist_head *head = dev_index_hash(net, ifindex);
741 738
742 hlist_for_each_entry(dev, p, head, index_hlist) 739 hlist_for_each_entry(dev, head, index_hlist)
743 if (dev->ifindex == ifindex) 740 if (dev->ifindex == ifindex)
744 return dev; 741 return dev;
745 742
@@ -760,11 +757,10 @@ EXPORT_SYMBOL(__dev_get_by_index);
760 757
761struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 758struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
762{ 759{
763 struct hlist_node *p;
764 struct net_device *dev; 760 struct net_device *dev;
765 struct hlist_head *head = dev_index_hash(net, ifindex); 761 struct hlist_head *head = dev_index_hash(net, ifindex);
766 762
767 hlist_for_each_entry_rcu(dev, p, head, index_hlist) 763 hlist_for_each_entry_rcu(dev, head, index_hlist)
768 if (dev->ifindex == ifindex) 764 if (dev->ifindex == ifindex)
769 return dev; 765 return dev;
770 766
diff --git a/net/core/flow.c b/net/core/flow.c
index 43f7495df27a..c56ea6f7f6c7 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -132,14 +132,14 @@ static void __flow_cache_shrink(struct flow_cache *fc,
132 int shrink_to) 132 int shrink_to)
133{ 133{
134 struct flow_cache_entry *fle; 134 struct flow_cache_entry *fle;
135 struct hlist_node *entry, *tmp; 135 struct hlist_node *tmp;
136 LIST_HEAD(gc_list); 136 LIST_HEAD(gc_list);
137 int i, deleted = 0; 137 int i, deleted = 0;
138 138
139 for (i = 0; i < flow_cache_hash_size(fc); i++) { 139 for (i = 0; i < flow_cache_hash_size(fc); i++) {
140 int saved = 0; 140 int saved = 0;
141 141
142 hlist_for_each_entry_safe(fle, entry, tmp, 142 hlist_for_each_entry_safe(fle, tmp,
143 &fcp->hash_table[i], u.hlist) { 143 &fcp->hash_table[i], u.hlist) {
144 if (saved < shrink_to && 144 if (saved < shrink_to &&
145 flow_entry_valid(fle)) { 145 flow_entry_valid(fle)) {
@@ -211,7 +211,6 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
211 struct flow_cache *fc = &flow_cache_global; 211 struct flow_cache *fc = &flow_cache_global;
212 struct flow_cache_percpu *fcp; 212 struct flow_cache_percpu *fcp;
213 struct flow_cache_entry *fle, *tfle; 213 struct flow_cache_entry *fle, *tfle;
214 struct hlist_node *entry;
215 struct flow_cache_object *flo; 214 struct flow_cache_object *flo;
216 size_t keysize; 215 size_t keysize;
217 unsigned int hash; 216 unsigned int hash;
@@ -235,7 +234,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
235 flow_new_hash_rnd(fc, fcp); 234 flow_new_hash_rnd(fc, fcp);
236 235
237 hash = flow_hash_code(fc, fcp, key, keysize); 236 hash = flow_hash_code(fc, fcp, key, keysize);
238 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { 237 hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
239 if (tfle->net == net && 238 if (tfle->net == net &&
240 tfle->family == family && 239 tfle->family == family &&
241 tfle->dir == dir && 240 tfle->dir == dir &&
@@ -301,13 +300,13 @@ static void flow_cache_flush_tasklet(unsigned long data)
301 struct flow_cache *fc = info->cache; 300 struct flow_cache *fc = info->cache;
302 struct flow_cache_percpu *fcp; 301 struct flow_cache_percpu *fcp;
303 struct flow_cache_entry *fle; 302 struct flow_cache_entry *fle;
304 struct hlist_node *entry, *tmp; 303 struct hlist_node *tmp;
305 LIST_HEAD(gc_list); 304 LIST_HEAD(gc_list);
306 int i, deleted = 0; 305 int i, deleted = 0;
307 306
308 fcp = this_cpu_ptr(fc->percpu); 307 fcp = this_cpu_ptr(fc->percpu);
309 for (i = 0; i < flow_cache_hash_size(fc); i++) { 308 for (i = 0; i < flow_cache_hash_size(fc); i++) {
310 hlist_for_each_entry_safe(fle, entry, tmp, 309 hlist_for_each_entry_safe(fle, tmp,
311 &fcp->hash_table[i], u.hlist) { 310 &fcp->hash_table[i], u.hlist) {
312 if (flow_entry_valid(fle)) 311 if (flow_entry_valid(fle))
313 continue; 312 continue;
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 0f6bb6f8d391..3174f1998ee6 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -16,12 +16,11 @@ static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff
16{ 16{
17 struct net *net = seq_file_net(seq); 17 struct net *net = seq_file_net(seq);
18 struct net_device *dev; 18 struct net_device *dev;
19 struct hlist_node *p;
20 struct hlist_head *h; 19 struct hlist_head *h;
21 unsigned int count = 0, offset = get_offset(*pos); 20 unsigned int count = 0, offset = get_offset(*pos);
22 21
23 h = &net->dev_name_head[get_bucket(*pos)]; 22 h = &net->dev_name_head[get_bucket(*pos)];
24 hlist_for_each_entry_rcu(dev, p, h, name_hlist) { 23 hlist_for_each_entry_rcu(dev, h, name_hlist) {
25 if (++count == offset) 24 if (++count == offset)
26 return dev; 25 return dev;
27 } 26 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d8aa20f6a46e..b376410ff259 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1060,7 +1060,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1060 int idx = 0, s_idx; 1060 int idx = 0, s_idx;
1061 struct net_device *dev; 1061 struct net_device *dev;
1062 struct hlist_head *head; 1062 struct hlist_head *head;
1063 struct hlist_node *node;
1064 struct nlattr *tb[IFLA_MAX+1]; 1063 struct nlattr *tb[IFLA_MAX+1];
1065 u32 ext_filter_mask = 0; 1064 u32 ext_filter_mask = 0;
1066 1065
@@ -1080,7 +1079,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1080 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1079 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1081 idx = 0; 1080 idx = 0;
1082 head = &net->dev_index_head[h]; 1081 head = &net->dev_index_head[h];
1083 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 1082 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1084 if (idx < s_idx) 1083 if (idx < s_idx)
1085 goto cont; 1084 goto cont;
1086 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1085 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index c4a2def5b7bd..c21f200eed93 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -175,12 +175,11 @@ static struct hlist_head *dn_find_list(struct sock *sk)
175static int check_port(__le16 port) 175static int check_port(__le16 port)
176{ 176{
177 struct sock *sk; 177 struct sock *sk;
178 struct hlist_node *node;
179 178
180 if (port == 0) 179 if (port == 0)
181 return -1; 180 return -1;
182 181
183 sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) { 182 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
184 struct dn_scp *scp = DN_SK(sk); 183 struct dn_scp *scp = DN_SK(sk);
185 if (scp->addrloc == port) 184 if (scp->addrloc == port)
186 return -1; 185 return -1;
@@ -374,11 +373,10 @@ int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn,
374struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr) 373struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
375{ 374{
376 struct hlist_head *list = listen_hash(addr); 375 struct hlist_head *list = listen_hash(addr);
377 struct hlist_node *node;
378 struct sock *sk; 376 struct sock *sk;
379 377
380 read_lock(&dn_hash_lock); 378 read_lock(&dn_hash_lock);
381 sk_for_each(sk, node, list) { 379 sk_for_each(sk, list) {
382 struct dn_scp *scp = DN_SK(sk); 380 struct dn_scp *scp = DN_SK(sk);
383 if (sk->sk_state != TCP_LISTEN) 381 if (sk->sk_state != TCP_LISTEN)
384 continue; 382 continue;
@@ -414,11 +412,10 @@ struct sock *dn_find_by_skb(struct sk_buff *skb)
414{ 412{
415 struct dn_skb_cb *cb = DN_SKB_CB(skb); 413 struct dn_skb_cb *cb = DN_SKB_CB(skb);
416 struct sock *sk; 414 struct sock *sk;
417 struct hlist_node *node;
418 struct dn_scp *scp; 415 struct dn_scp *scp;
419 416
420 read_lock(&dn_hash_lock); 417 read_lock(&dn_hash_lock);
421 sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) { 418 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
422 scp = DN_SK(sk); 419 scp = DN_SK(sk);
423 if (cb->src != dn_saddr2dn(&scp->peer)) 420 if (cb->src != dn_saddr2dn(&scp->peer))
424 continue; 421 continue;
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index f968c1b58f47..6c2445bcaba1 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -483,7 +483,6 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
483 unsigned int h, s_h; 483 unsigned int h, s_h;
484 unsigned int e = 0, s_e; 484 unsigned int e = 0, s_e;
485 struct dn_fib_table *tb; 485 struct dn_fib_table *tb;
486 struct hlist_node *node;
487 int dumped = 0; 486 int dumped = 0;
488 487
489 if (!net_eq(net, &init_net)) 488 if (!net_eq(net, &init_net))
@@ -498,7 +497,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
498 497
499 for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) { 498 for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) {
500 e = 0; 499 e = 0;
501 hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) { 500 hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) {
502 if (e < s_e) 501 if (e < s_e)
503 goto next; 502 goto next;
504 if (dumped) 503 if (dumped)
@@ -828,7 +827,6 @@ out:
828struct dn_fib_table *dn_fib_get_table(u32 n, int create) 827struct dn_fib_table *dn_fib_get_table(u32 n, int create)
829{ 828{
830 struct dn_fib_table *t; 829 struct dn_fib_table *t;
831 struct hlist_node *node;
832 unsigned int h; 830 unsigned int h;
833 831
834 if (n < RT_TABLE_MIN) 832 if (n < RT_TABLE_MIN)
@@ -839,7 +837,7 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create)
839 837
840 h = n & (DN_FIB_TABLE_HASHSZ - 1); 838 h = n & (DN_FIB_TABLE_HASHSZ - 1);
841 rcu_read_lock(); 839 rcu_read_lock();
842 hlist_for_each_entry_rcu(t, node, &dn_fib_table_hash[h], hlist) { 840 hlist_for_each_entry_rcu(t, &dn_fib_table_hash[h], hlist) {
843 if (t->n == n) { 841 if (t->n == n) {
844 rcu_read_unlock(); 842 rcu_read_unlock();
845 return t; 843 return t;
@@ -885,11 +883,10 @@ void dn_fib_flush(void)
885{ 883{
886 int flushed = 0; 884 int flushed = 0;
887 struct dn_fib_table *tb; 885 struct dn_fib_table *tb;
888 struct hlist_node *node;
889 unsigned int h; 886 unsigned int h;
890 887
891 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { 888 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
892 hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) 889 hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist)
893 flushed += tb->flush(tb); 890 flushed += tb->flush(tb);
894 } 891 }
895 892
@@ -908,12 +905,12 @@ void __init dn_fib_table_init(void)
908void __exit dn_fib_table_cleanup(void) 905void __exit dn_fib_table_cleanup(void)
909{ 906{
910 struct dn_fib_table *t; 907 struct dn_fib_table *t;
911 struct hlist_node *node, *next; 908 struct hlist_node *next;
912 unsigned int h; 909 unsigned int h;
913 910
914 write_lock(&dn_fib_tables_lock); 911 write_lock(&dn_fib_tables_lock);
915 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { 912 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
916 hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h], 913 hlist_for_each_entry_safe(t, next, &dn_fib_table_hash[h],
917 hlist) { 914 hlist) {
918 hlist_del(&t->hlist); 915 hlist_del(&t->hlist);
919 kfree(t); 916 kfree(t);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 16705611589a..e0da175f8e5b 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -350,7 +350,6 @@ static inline int ieee802154_match_sock(u8 *hw_addr, u16 pan_id,
350int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) 350int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
351{ 351{
352 struct sock *sk, *prev = NULL; 352 struct sock *sk, *prev = NULL;
353 struct hlist_node *node;
354 int ret = NET_RX_SUCCESS; 353 int ret = NET_RX_SUCCESS;
355 u16 pan_id, short_addr; 354 u16 pan_id, short_addr;
356 355
@@ -361,7 +360,7 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
361 short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev); 360 short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
362 361
363 read_lock(&dgram_lock); 362 read_lock(&dgram_lock);
364 sk_for_each(sk, node, &dgram_head) { 363 sk_for_each(sk, &dgram_head) {
365 if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr, 364 if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr,
366 dgram_sk(sk))) { 365 dgram_sk(sk))) {
367 if (prev) { 366 if (prev) {
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index 50e823927d49..41f538b8e59c 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -221,10 +221,9 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
221void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb) 221void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
222{ 222{
223 struct sock *sk; 223 struct sock *sk;
224 struct hlist_node *node;
225 224
226 read_lock(&raw_lock); 225 read_lock(&raw_lock);
227 sk_for_each(sk, node, &raw_head) { 226 sk_for_each(sk, &raw_head) {
228 bh_lock_sock(sk); 227 bh_lock_sock(sk);
229 if (!sk->sk_bound_dev_if || 228 if (!sk->sk_bound_dev_if ||
230 sk->sk_bound_dev_if == dev->ifindex) { 229 sk->sk_bound_dev_if == dev->ifindex) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5281314886c1..f678507bc829 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -139,10 +139,9 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
139 u32 hash = inet_addr_hash(net, addr); 139 u32 hash = inet_addr_hash(net, addr);
140 struct net_device *result = NULL; 140 struct net_device *result = NULL;
141 struct in_ifaddr *ifa; 141 struct in_ifaddr *ifa;
142 struct hlist_node *node;
143 142
144 rcu_read_lock(); 143 rcu_read_lock();
145 hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) { 144 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
146 if (ifa->ifa_local == addr) { 145 if (ifa->ifa_local == addr) {
147 struct net_device *dev = ifa->ifa_dev->dev; 146 struct net_device *dev = ifa->ifa_dev->dev;
148 147
@@ -588,7 +587,6 @@ static void check_lifetime(struct work_struct *work)
588{ 587{
589 unsigned long now, next, next_sec, next_sched; 588 unsigned long now, next, next_sec, next_sched;
590 struct in_ifaddr *ifa; 589 struct in_ifaddr *ifa;
591 struct hlist_node *node;
592 int i; 590 int i;
593 591
594 now = jiffies; 592 now = jiffies;
@@ -596,8 +594,7 @@ static void check_lifetime(struct work_struct *work)
596 594
597 rcu_read_lock(); 595 rcu_read_lock();
598 for (i = 0; i < IN4_ADDR_HSIZE; i++) { 596 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
599 hlist_for_each_entry_rcu(ifa, node, 597 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
600 &inet_addr_lst[i], hash) {
601 unsigned long age; 598 unsigned long age;
602 599
603 if (ifa->ifa_flags & IFA_F_PERMANENT) 600 if (ifa->ifa_flags & IFA_F_PERMANENT)
@@ -1493,7 +1490,6 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1493 struct in_device *in_dev; 1490 struct in_device *in_dev;
1494 struct in_ifaddr *ifa; 1491 struct in_ifaddr *ifa;
1495 struct hlist_head *head; 1492 struct hlist_head *head;
1496 struct hlist_node *node;
1497 1493
1498 s_h = cb->args[0]; 1494 s_h = cb->args[0];
1499 s_idx = idx = cb->args[1]; 1495 s_idx = idx = cb->args[1];
@@ -1503,7 +1499,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1503 idx = 0; 1499 idx = 0;
1504 head = &net->dev_index_head[h]; 1500 head = &net->dev_index_head[h];
1505 rcu_read_lock(); 1501 rcu_read_lock();
1506 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 1502 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1507 if (idx < s_idx) 1503 if (idx < s_idx)
1508 goto cont; 1504 goto cont;
1509 if (h > s_h || idx > s_idx) 1505 if (h > s_h || idx > s_idx)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 99f00d39d10b..eb4bb12b3eb4 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -112,7 +112,6 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
112struct fib_table *fib_get_table(struct net *net, u32 id) 112struct fib_table *fib_get_table(struct net *net, u32 id)
113{ 113{
114 struct fib_table *tb; 114 struct fib_table *tb;
115 struct hlist_node *node;
116 struct hlist_head *head; 115 struct hlist_head *head;
117 unsigned int h; 116 unsigned int h;
118 117
@@ -122,7 +121,7 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
122 121
123 rcu_read_lock(); 122 rcu_read_lock();
124 head = &net->ipv4.fib_table_hash[h]; 123 head = &net->ipv4.fib_table_hash[h];
125 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { 124 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
126 if (tb->tb_id == id) { 125 if (tb->tb_id == id) {
127 rcu_read_unlock(); 126 rcu_read_unlock();
128 return tb; 127 return tb;
@@ -137,13 +136,12 @@ static void fib_flush(struct net *net)
137{ 136{
138 int flushed = 0; 137 int flushed = 0;
139 struct fib_table *tb; 138 struct fib_table *tb;
140 struct hlist_node *node;
141 struct hlist_head *head; 139 struct hlist_head *head;
142 unsigned int h; 140 unsigned int h;
143 141
144 for (h = 0; h < FIB_TABLE_HASHSZ; h++) { 142 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
145 head = &net->ipv4.fib_table_hash[h]; 143 head = &net->ipv4.fib_table_hash[h];
146 hlist_for_each_entry(tb, node, head, tb_hlist) 144 hlist_for_each_entry(tb, head, tb_hlist)
147 flushed += fib_table_flush(tb); 145 flushed += fib_table_flush(tb);
148 } 146 }
149 147
@@ -656,7 +654,6 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
656 unsigned int h, s_h; 654 unsigned int h, s_h;
657 unsigned int e = 0, s_e; 655 unsigned int e = 0, s_e;
658 struct fib_table *tb; 656 struct fib_table *tb;
659 struct hlist_node *node;
660 struct hlist_head *head; 657 struct hlist_head *head;
661 int dumped = 0; 658 int dumped = 0;
662 659
@@ -670,7 +667,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
670 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { 667 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
671 e = 0; 668 e = 0;
672 head = &net->ipv4.fib_table_hash[h]; 669 head = &net->ipv4.fib_table_hash[h];
673 hlist_for_each_entry(tb, node, head, tb_hlist) { 670 hlist_for_each_entry(tb, head, tb_hlist) {
674 if (e < s_e) 671 if (e < s_e)
675 goto next; 672 goto next;
676 if (dumped) 673 if (dumped)
@@ -1117,11 +1114,11 @@ static void ip_fib_net_exit(struct net *net)
1117 for (i = 0; i < FIB_TABLE_HASHSZ; i++) { 1114 for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
1118 struct fib_table *tb; 1115 struct fib_table *tb;
1119 struct hlist_head *head; 1116 struct hlist_head *head;
1120 struct hlist_node *node, *tmp; 1117 struct hlist_node *tmp;
1121 1118
1122 head = &net->ipv4.fib_table_hash[i]; 1119 head = &net->ipv4.fib_table_hash[i];
1123 hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { 1120 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
1124 hlist_del(node); 1121 hlist_del(&tb->tb_hlist);
1125 fib_table_flush(tb); 1122 fib_table_flush(tb);
1126 fib_free_table(tb); 1123 fib_free_table(tb);
1127 } 1124 }
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 4797a800faf8..8f6cb7a87cd6 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -298,14 +298,13 @@ static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
298static struct fib_info *fib_find_info(const struct fib_info *nfi) 298static struct fib_info *fib_find_info(const struct fib_info *nfi)
299{ 299{
300 struct hlist_head *head; 300 struct hlist_head *head;
301 struct hlist_node *node;
302 struct fib_info *fi; 301 struct fib_info *fi;
303 unsigned int hash; 302 unsigned int hash;
304 303
305 hash = fib_info_hashfn(nfi); 304 hash = fib_info_hashfn(nfi);
306 head = &fib_info_hash[hash]; 305 head = &fib_info_hash[hash];
307 306
308 hlist_for_each_entry(fi, node, head, fib_hash) { 307 hlist_for_each_entry(fi, head, fib_hash) {
309 if (!net_eq(fi->fib_net, nfi->fib_net)) 308 if (!net_eq(fi->fib_net, nfi->fib_net))
310 continue; 309 continue;
311 if (fi->fib_nhs != nfi->fib_nhs) 310 if (fi->fib_nhs != nfi->fib_nhs)
@@ -331,7 +330,6 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi)
331int ip_fib_check_default(__be32 gw, struct net_device *dev) 330int ip_fib_check_default(__be32 gw, struct net_device *dev)
332{ 331{
333 struct hlist_head *head; 332 struct hlist_head *head;
334 struct hlist_node *node;
335 struct fib_nh *nh; 333 struct fib_nh *nh;
336 unsigned int hash; 334 unsigned int hash;
337 335
@@ -339,7 +337,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev)
339 337
340 hash = fib_devindex_hashfn(dev->ifindex); 338 hash = fib_devindex_hashfn(dev->ifindex);
341 head = &fib_info_devhash[hash]; 339 head = &fib_info_devhash[hash];
342 hlist_for_each_entry(nh, node, head, nh_hash) { 340 hlist_for_each_entry(nh, head, nh_hash) {
343 if (nh->nh_dev == dev && 341 if (nh->nh_dev == dev &&
344 nh->nh_gw == gw && 342 nh->nh_gw == gw &&
345 !(nh->nh_flags & RTNH_F_DEAD)) { 343 !(nh->nh_flags & RTNH_F_DEAD)) {
@@ -721,10 +719,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
721 719
722 for (i = 0; i < old_size; i++) { 720 for (i = 0; i < old_size; i++) {
723 struct hlist_head *head = &fib_info_hash[i]; 721 struct hlist_head *head = &fib_info_hash[i];
724 struct hlist_node *node, *n; 722 struct hlist_node *n;
725 struct fib_info *fi; 723 struct fib_info *fi;
726 724
727 hlist_for_each_entry_safe(fi, node, n, head, fib_hash) { 725 hlist_for_each_entry_safe(fi, n, head, fib_hash) {
728 struct hlist_head *dest; 726 struct hlist_head *dest;
729 unsigned int new_hash; 727 unsigned int new_hash;
730 728
@@ -739,10 +737,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
739 737
740 for (i = 0; i < old_size; i++) { 738 for (i = 0; i < old_size; i++) {
741 struct hlist_head *lhead = &fib_info_laddrhash[i]; 739 struct hlist_head *lhead = &fib_info_laddrhash[i];
742 struct hlist_node *node, *n; 740 struct hlist_node *n;
743 struct fib_info *fi; 741 struct fib_info *fi;
744 742
745 hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) { 743 hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
746 struct hlist_head *ldest; 744 struct hlist_head *ldest;
747 unsigned int new_hash; 745 unsigned int new_hash;
748 746
@@ -1096,13 +1094,12 @@ int fib_sync_down_addr(struct net *net, __be32 local)
1096 int ret = 0; 1094 int ret = 0;
1097 unsigned int hash = fib_laddr_hashfn(local); 1095 unsigned int hash = fib_laddr_hashfn(local);
1098 struct hlist_head *head = &fib_info_laddrhash[hash]; 1096 struct hlist_head *head = &fib_info_laddrhash[hash];
1099 struct hlist_node *node;
1100 struct fib_info *fi; 1097 struct fib_info *fi;
1101 1098
1102 if (fib_info_laddrhash == NULL || local == 0) 1099 if (fib_info_laddrhash == NULL || local == 0)
1103 return 0; 1100 return 0;
1104 1101
1105 hlist_for_each_entry(fi, node, head, fib_lhash) { 1102 hlist_for_each_entry(fi, head, fib_lhash) {
1106 if (!net_eq(fi->fib_net, net)) 1103 if (!net_eq(fi->fib_net, net))
1107 continue; 1104 continue;
1108 if (fi->fib_prefsrc == local) { 1105 if (fi->fib_prefsrc == local) {
@@ -1120,13 +1117,12 @@ int fib_sync_down_dev(struct net_device *dev, int force)
1120 struct fib_info *prev_fi = NULL; 1117 struct fib_info *prev_fi = NULL;
1121 unsigned int hash = fib_devindex_hashfn(dev->ifindex); 1118 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1122 struct hlist_head *head = &fib_info_devhash[hash]; 1119 struct hlist_head *head = &fib_info_devhash[hash];
1123 struct hlist_node *node;
1124 struct fib_nh *nh; 1120 struct fib_nh *nh;
1125 1121
1126 if (force) 1122 if (force)
1127 scope = -1; 1123 scope = -1;
1128 1124
1129 hlist_for_each_entry(nh, node, head, nh_hash) { 1125 hlist_for_each_entry(nh, head, nh_hash) {
1130 struct fib_info *fi = nh->nh_parent; 1126 struct fib_info *fi = nh->nh_parent;
1131 int dead; 1127 int dead;
1132 1128
@@ -1232,7 +1228,6 @@ int fib_sync_up(struct net_device *dev)
1232 struct fib_info *prev_fi; 1228 struct fib_info *prev_fi;
1233 unsigned int hash; 1229 unsigned int hash;
1234 struct hlist_head *head; 1230 struct hlist_head *head;
1235 struct hlist_node *node;
1236 struct fib_nh *nh; 1231 struct fib_nh *nh;
1237 int ret; 1232 int ret;
1238 1233
@@ -1244,7 +1239,7 @@ int fib_sync_up(struct net_device *dev)
1244 head = &fib_info_devhash[hash]; 1239 head = &fib_info_devhash[hash];
1245 ret = 0; 1240 ret = 0;
1246 1241
1247 hlist_for_each_entry(nh, node, head, nh_hash) { 1242 hlist_for_each_entry(nh, head, nh_hash) {
1248 struct fib_info *fi = nh->nh_parent; 1243 struct fib_info *fi = nh->nh_parent;
1249 int alive; 1244 int alive;
1250 1245
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 61e03da3e1f5..ff06b7543d9f 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -920,10 +920,9 @@ nomem:
920static struct leaf_info *find_leaf_info(struct leaf *l, int plen) 920static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
921{ 921{
922 struct hlist_head *head = &l->list; 922 struct hlist_head *head = &l->list;
923 struct hlist_node *node;
924 struct leaf_info *li; 923 struct leaf_info *li;
925 924
926 hlist_for_each_entry_rcu(li, node, head, hlist) 925 hlist_for_each_entry_rcu(li, head, hlist)
927 if (li->plen == plen) 926 if (li->plen == plen)
928 return li; 927 return li;
929 928
@@ -943,12 +942,11 @@ static inline struct list_head *get_fa_head(struct leaf *l, int plen)
943static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) 942static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
944{ 943{
945 struct leaf_info *li = NULL, *last = NULL; 944 struct leaf_info *li = NULL, *last = NULL;
946 struct hlist_node *node;
947 945
948 if (hlist_empty(head)) { 946 if (hlist_empty(head)) {
949 hlist_add_head_rcu(&new->hlist, head); 947 hlist_add_head_rcu(&new->hlist, head);
950 } else { 948 } else {
951 hlist_for_each_entry(li, node, head, hlist) { 949 hlist_for_each_entry(li, head, hlist) {
952 if (new->plen > li->plen) 950 if (new->plen > li->plen)
953 break; 951 break;
954 952
@@ -1354,9 +1352,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
1354{ 1352{
1355 struct leaf_info *li; 1353 struct leaf_info *li;
1356 struct hlist_head *hhead = &l->list; 1354 struct hlist_head *hhead = &l->list;
1357 struct hlist_node *node;
1358 1355
1359 hlist_for_each_entry_rcu(li, node, hhead, hlist) { 1356 hlist_for_each_entry_rcu(li, hhead, hlist) {
1360 struct fib_alias *fa; 1357 struct fib_alias *fa;
1361 1358
1362 if (l->key != (key & li->mask_plen)) 1359 if (l->key != (key & li->mask_plen))
@@ -1740,10 +1737,10 @@ static int trie_flush_leaf(struct leaf *l)
1740{ 1737{
1741 int found = 0; 1738 int found = 0;
1742 struct hlist_head *lih = &l->list; 1739 struct hlist_head *lih = &l->list;
1743 struct hlist_node *node, *tmp; 1740 struct hlist_node *tmp;
1744 struct leaf_info *li = NULL; 1741 struct leaf_info *li = NULL;
1745 1742
1746 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) { 1743 hlist_for_each_entry_safe(li, tmp, lih, hlist) {
1747 found += trie_flush_list(&li->falh); 1744 found += trie_flush_list(&li->falh);
1748 1745
1749 if (list_empty(&li->falh)) { 1746 if (list_empty(&li->falh)) {
@@ -1895,14 +1892,13 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
1895 struct sk_buff *skb, struct netlink_callback *cb) 1892 struct sk_buff *skb, struct netlink_callback *cb)
1896{ 1893{
1897 struct leaf_info *li; 1894 struct leaf_info *li;
1898 struct hlist_node *node;
1899 int i, s_i; 1895 int i, s_i;
1900 1896
1901 s_i = cb->args[4]; 1897 s_i = cb->args[4];
1902 i = 0; 1898 i = 0;
1903 1899
1904 /* rcu_read_lock is hold by caller */ 1900 /* rcu_read_lock is hold by caller */
1905 hlist_for_each_entry_rcu(li, node, &l->list, hlist) { 1901 hlist_for_each_entry_rcu(li, &l->list, hlist) {
1906 if (i < s_i) { 1902 if (i < s_i) {
1907 i++; 1903 i++;
1908 continue; 1904 continue;
@@ -2092,14 +2088,13 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2092 if (IS_LEAF(n)) { 2088 if (IS_LEAF(n)) {
2093 struct leaf *l = (struct leaf *)n; 2089 struct leaf *l = (struct leaf *)n;
2094 struct leaf_info *li; 2090 struct leaf_info *li;
2095 struct hlist_node *tmp;
2096 2091
2097 s->leaves++; 2092 s->leaves++;
2098 s->totdepth += iter.depth; 2093 s->totdepth += iter.depth;
2099 if (iter.depth > s->maxdepth) 2094 if (iter.depth > s->maxdepth)
2100 s->maxdepth = iter.depth; 2095 s->maxdepth = iter.depth;
2101 2096
2102 hlist_for_each_entry_rcu(li, tmp, &l->list, hlist) 2097 hlist_for_each_entry_rcu(li, &l->list, hlist)
2103 ++s->prefixes; 2098 ++s->prefixes;
2104 } else { 2099 } else {
2105 const struct tnode *tn = (const struct tnode *) n; 2100 const struct tnode *tn = (const struct tnode *) n;
@@ -2200,10 +2195,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2200 2195
2201 for (h = 0; h < FIB_TABLE_HASHSZ; h++) { 2196 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2202 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; 2197 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2203 struct hlist_node *node;
2204 struct fib_table *tb; 2198 struct fib_table *tb;
2205 2199
2206 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { 2200 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2207 struct trie *t = (struct trie *) tb->tb_data; 2201 struct trie *t = (struct trie *) tb->tb_data;
2208 struct trie_stat stat; 2202 struct trie_stat stat;
2209 2203
@@ -2245,10 +2239,9 @@ static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
2245 2239
2246 for (h = 0; h < FIB_TABLE_HASHSZ; h++) { 2240 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2247 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; 2241 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2248 struct hlist_node *node;
2249 struct fib_table *tb; 2242 struct fib_table *tb;
2250 2243
2251 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { 2244 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2252 struct rt_trie_node *n; 2245 struct rt_trie_node *n;
2253 2246
2254 for (n = fib_trie_get_first(iter, 2247 for (n = fib_trie_get_first(iter,
@@ -2298,7 +2291,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2298 /* new hash chain */ 2291 /* new hash chain */
2299 while (++h < FIB_TABLE_HASHSZ) { 2292 while (++h < FIB_TABLE_HASHSZ) {
2300 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; 2293 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2301 hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) { 2294 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2302 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); 2295 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2303 if (n) 2296 if (n)
2304 goto found; 2297 goto found;
@@ -2381,13 +2374,12 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
2381 } else { 2374 } else {
2382 struct leaf *l = (struct leaf *) n; 2375 struct leaf *l = (struct leaf *) n;
2383 struct leaf_info *li; 2376 struct leaf_info *li;
2384 struct hlist_node *node;
2385 __be32 val = htonl(l->key); 2377 __be32 val = htonl(l->key);
2386 2378
2387 seq_indent(seq, iter->depth); 2379 seq_indent(seq, iter->depth);
2388 seq_printf(seq, " |-- %pI4\n", &val); 2380 seq_printf(seq, " |-- %pI4\n", &val);
2389 2381
2390 hlist_for_each_entry_rcu(li, node, &l->list, hlist) { 2382 hlist_for_each_entry_rcu(li, &l->list, hlist) {
2391 struct fib_alias *fa; 2383 struct fib_alias *fa;
2392 2384
2393 list_for_each_entry_rcu(fa, &li->falh, fa_list) { 2385 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
@@ -2532,7 +2524,6 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2532{ 2524{
2533 struct leaf *l = v; 2525 struct leaf *l = v;
2534 struct leaf_info *li; 2526 struct leaf_info *li;
2535 struct hlist_node *node;
2536 2527
2537 if (v == SEQ_START_TOKEN) { 2528 if (v == SEQ_START_TOKEN) {
2538 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway " 2529 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
@@ -2541,7 +2532,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2541 return 0; 2532 return 0;
2542 } 2533 }
2543 2534
2544 hlist_for_each_entry_rcu(li, node, &l->list, hlist) { 2535 hlist_for_each_entry_rcu(li, &l->list, hlist) {
2545 struct fib_alias *fa; 2536 struct fib_alias *fa;
2546 __be32 mask, prefix; 2537 __be32 mask, prefix;
2547 2538
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 11cb4979a465..7d1874be1df3 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -57,7 +57,6 @@ int inet_csk_bind_conflict(const struct sock *sk,
57 const struct inet_bind_bucket *tb, bool relax) 57 const struct inet_bind_bucket *tb, bool relax)
58{ 58{
59 struct sock *sk2; 59 struct sock *sk2;
60 struct hlist_node *node;
61 int reuse = sk->sk_reuse; 60 int reuse = sk->sk_reuse;
62 int reuseport = sk->sk_reuseport; 61 int reuseport = sk->sk_reuseport;
63 kuid_t uid = sock_i_uid((struct sock *)sk); 62 kuid_t uid = sock_i_uid((struct sock *)sk);
@@ -69,7 +68,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
69 * one this bucket belongs to. 68 * one this bucket belongs to.
70 */ 69 */
71 70
72 sk_for_each_bound(sk2, node, &tb->owners) { 71 sk_for_each_bound(sk2, &tb->owners) {
73 if (sk != sk2 && 72 if (sk != sk2 &&
74 !inet_v6_ipv6only(sk2) && 73 !inet_v6_ipv6only(sk2) &&
75 (!sk->sk_bound_dev_if || 74 (!sk->sk_bound_dev_if ||
@@ -95,7 +94,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
95 } 94 }
96 } 95 }
97 } 96 }
98 return node != NULL; 97 return sk2 != NULL;
99} 98}
100EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); 99EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
101 100
@@ -106,7 +105,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
106{ 105{
107 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 106 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
108 struct inet_bind_hashbucket *head; 107 struct inet_bind_hashbucket *head;
109 struct hlist_node *node;
110 struct inet_bind_bucket *tb; 108 struct inet_bind_bucket *tb;
111 int ret, attempts = 5; 109 int ret, attempts = 5;
112 struct net *net = sock_net(sk); 110 struct net *net = sock_net(sk);
@@ -129,7 +127,7 @@ again:
129 head = &hashinfo->bhash[inet_bhashfn(net, rover, 127 head = &hashinfo->bhash[inet_bhashfn(net, rover,
130 hashinfo->bhash_size)]; 128 hashinfo->bhash_size)];
131 spin_lock(&head->lock); 129 spin_lock(&head->lock);
132 inet_bind_bucket_for_each(tb, node, &head->chain) 130 inet_bind_bucket_for_each(tb, &head->chain)
133 if (net_eq(ib_net(tb), net) && tb->port == rover) { 131 if (net_eq(ib_net(tb), net) && tb->port == rover) {
134 if (((tb->fastreuse > 0 && 132 if (((tb->fastreuse > 0 &&
135 sk->sk_reuse && 133 sk->sk_reuse &&
@@ -183,7 +181,7 @@ have_snum:
183 head = &hashinfo->bhash[inet_bhashfn(net, snum, 181 head = &hashinfo->bhash[inet_bhashfn(net, snum,
184 hashinfo->bhash_size)]; 182 hashinfo->bhash_size)];
185 spin_lock(&head->lock); 183 spin_lock(&head->lock);
186 inet_bind_bucket_for_each(tb, node, &head->chain) 184 inet_bind_bucket_for_each(tb, &head->chain)
187 if (net_eq(ib_net(tb), net) && tb->port == snum) 185 if (net_eq(ib_net(tb), net) && tb->port == snum)
188 goto tb_found; 186 goto tb_found;
189 } 187 }
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 2e453bde6992..245ae078a07f 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -33,9 +33,9 @@ static void inet_frag_secret_rebuild(unsigned long dummy)
33 get_random_bytes(&f->rnd, sizeof(u32)); 33 get_random_bytes(&f->rnd, sizeof(u32));
34 for (i = 0; i < INETFRAGS_HASHSZ; i++) { 34 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
35 struct inet_frag_queue *q; 35 struct inet_frag_queue *q;
36 struct hlist_node *p, *n; 36 struct hlist_node *n;
37 37
38 hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) { 38 hlist_for_each_entry_safe(q, n, &f->hash[i], list) {
39 unsigned int hval = f->hashfn(q); 39 unsigned int hval = f->hashfn(q);
40 40
41 if (hval != i) { 41 if (hval != i) {
@@ -203,7 +203,6 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
203{ 203{
204 struct inet_frag_queue *qp; 204 struct inet_frag_queue *qp;
205#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
206 struct hlist_node *n;
207#endif 206#endif
208 unsigned int hash; 207 unsigned int hash;
209 208
@@ -219,7 +218,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
219 * such entry could be created on other cpu, while we 218 * such entry could be created on other cpu, while we
220 * promoted read lock to write lock. 219 * promoted read lock to write lock.
221 */ 220 */
222 hlist_for_each_entry(qp, n, &f->hash[hash], list) { 221 hlist_for_each_entry(qp, &f->hash[hash], list) {
223 if (qp->net == nf && f->match(qp, arg)) { 222 if (qp->net == nf && f->match(qp, arg)) {
224 atomic_inc(&qp->refcnt); 223 atomic_inc(&qp->refcnt);
225 write_unlock(&f->lock); 224 write_unlock(&f->lock);
@@ -278,9 +277,8 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
278 __releases(&f->lock) 277 __releases(&f->lock)
279{ 278{
280 struct inet_frag_queue *q; 279 struct inet_frag_queue *q;
281 struct hlist_node *n;
282 280
283 hlist_for_each_entry(q, n, &f->hash[hash], list) { 281 hlist_for_each_entry(q, &f->hash[hash], list) {
284 if (q->net == nf && f->match(q, key)) { 282 if (q->net == nf && f->match(q, key)) {
285 atomic_inc(&q->refcnt); 283 atomic_inc(&q->refcnt);
286 read_unlock(&f->lock); 284 read_unlock(&f->lock);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 0ce0595d9861..6af375afeeef 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -120,13 +120,12 @@ int __inet_inherit_port(struct sock *sk, struct sock *child)
120 * that the listener socket's icsk_bind_hash is the same 120 * that the listener socket's icsk_bind_hash is the same
121 * as that of the child socket. We have to look up or 121 * as that of the child socket. We have to look up or
122 * create a new bind bucket for the child here. */ 122 * create a new bind bucket for the child here. */
123 struct hlist_node *node; 123 inet_bind_bucket_for_each(tb, &head->chain) {
124 inet_bind_bucket_for_each(tb, node, &head->chain) {
125 if (net_eq(ib_net(tb), sock_net(sk)) && 124 if (net_eq(ib_net(tb), sock_net(sk)) &&
126 tb->port == port) 125 tb->port == port)
127 break; 126 break;
128 } 127 }
129 if (!node) { 128 if (!tb) {
130 tb = inet_bind_bucket_create(table->bind_bucket_cachep, 129 tb = inet_bind_bucket_create(table->bind_bucket_cachep,
131 sock_net(sk), head, port); 130 sock_net(sk), head, port);
132 if (!tb) { 131 if (!tb) {
@@ -493,7 +492,6 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
493 int i, remaining, low, high, port; 492 int i, remaining, low, high, port;
494 static u32 hint; 493 static u32 hint;
495 u32 offset = hint + port_offset; 494 u32 offset = hint + port_offset;
496 struct hlist_node *node;
497 struct inet_timewait_sock *tw = NULL; 495 struct inet_timewait_sock *tw = NULL;
498 496
499 inet_get_local_port_range(&low, &high); 497 inet_get_local_port_range(&low, &high);
@@ -512,7 +510,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
512 * because the established check is already 510 * because the established check is already
513 * unique enough. 511 * unique enough.
514 */ 512 */
515 inet_bind_bucket_for_each(tb, node, &head->chain) { 513 inet_bind_bucket_for_each(tb, &head->chain) {
516 if (net_eq(ib_net(tb), net) && 514 if (net_eq(ib_net(tb), net) &&
517 tb->port == port) { 515 tb->port == port) {
518 if (tb->fastreuse >= 0 || 516 if (tb->fastreuse >= 0 ||
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 2784db3155fb..1f27c9f4afd0 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -216,7 +216,6 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
216 const int slot) 216 const int slot)
217{ 217{
218 struct inet_timewait_sock *tw; 218 struct inet_timewait_sock *tw;
219 struct hlist_node *node;
220 unsigned int killed; 219 unsigned int killed;
221 int ret; 220 int ret;
222 221
@@ -229,7 +228,7 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
229 killed = 0; 228 killed = 0;
230 ret = 0; 229 ret = 0;
231rescan: 230rescan:
232 inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { 231 inet_twsk_for_each_inmate(tw, &twdr->cells[slot]) {
233 __inet_twsk_del_dead_node(tw); 232 __inet_twsk_del_dead_node(tw);
234 spin_unlock(&twdr->death_lock); 233 spin_unlock(&twdr->death_lock);
235 __inet_twsk_kill(tw, twdr->hashinfo); 234 __inet_twsk_kill(tw, twdr->hashinfo);
@@ -438,10 +437,10 @@ void inet_twdr_twcal_tick(unsigned long data)
438 437
439 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { 438 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
440 if (time_before_eq(j, now)) { 439 if (time_before_eq(j, now)) {
441 struct hlist_node *node, *safe; 440 struct hlist_node *safe;
442 struct inet_timewait_sock *tw; 441 struct inet_timewait_sock *tw;
443 442
444 inet_twsk_for_each_inmate_safe(tw, node, safe, 443 inet_twsk_for_each_inmate_safe(tw, safe,
445 &twdr->twcal_row[slot]) { 444 &twdr->twcal_row[slot]) {
446 __inet_twsk_del_dead_node(tw); 445 __inet_twsk_del_dead_node(tw);
447 __inet_twsk_kill(tw, twdr->hashinfo); 446 __inet_twsk_kill(tw, twdr->hashinfo);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 53ddebc292b6..dd44e0ab600c 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -111,9 +111,7 @@ EXPORT_SYMBOL_GPL(raw_unhash_sk);
111static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, 111static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
112 unsigned short num, __be32 raddr, __be32 laddr, int dif) 112 unsigned short num, __be32 raddr, __be32 laddr, int dif)
113{ 113{
114 struct hlist_node *node; 114 sk_for_each_from(sk) {
115
116 sk_for_each_from(sk, node) {
117 struct inet_sock *inet = inet_sk(sk); 115 struct inet_sock *inet = inet_sk(sk);
118 116
119 if (net_eq(sock_net(sk), net) && inet->inet_num == num && 117 if (net_eq(sock_net(sk), net) && inet->inet_num == num &&
@@ -914,9 +912,7 @@ static struct sock *raw_get_first(struct seq_file *seq)
914 912
915 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; 913 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
916 ++state->bucket) { 914 ++state->bucket) {
917 struct hlist_node *node; 915 sk_for_each(sk, &state->h->ht[state->bucket])
918
919 sk_for_each(sk, node, &state->h->ht[state->bucket])
920 if (sock_net(sk) == seq_file_net(seq)) 916 if (sock_net(sk) == seq_file_net(seq))
921 goto found; 917 goto found;
922 } 918 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 145d3bf8df86..4a8ec457310f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -954,7 +954,6 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
954{ 954{
955 struct tcp_sock *tp = tcp_sk(sk); 955 struct tcp_sock *tp = tcp_sk(sk);
956 struct tcp_md5sig_key *key; 956 struct tcp_md5sig_key *key;
957 struct hlist_node *pos;
958 unsigned int size = sizeof(struct in_addr); 957 unsigned int size = sizeof(struct in_addr);
959 struct tcp_md5sig_info *md5sig; 958 struct tcp_md5sig_info *md5sig;
960 959
@@ -968,7 +967,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
968 if (family == AF_INET6) 967 if (family == AF_INET6)
969 size = sizeof(struct in6_addr); 968 size = sizeof(struct in6_addr);
970#endif 969#endif
971 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) { 970 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
972 if (key->family != family) 971 if (key->family != family)
973 continue; 972 continue;
974 if (!memcmp(&key->addr, addr, size)) 973 if (!memcmp(&key->addr, addr, size))
@@ -1069,14 +1068,14 @@ static void tcp_clear_md5_list(struct sock *sk)
1069{ 1068{
1070 struct tcp_sock *tp = tcp_sk(sk); 1069 struct tcp_sock *tp = tcp_sk(sk);
1071 struct tcp_md5sig_key *key; 1070 struct tcp_md5sig_key *key;
1072 struct hlist_node *pos, *n; 1071 struct hlist_node *n;
1073 struct tcp_md5sig_info *md5sig; 1072 struct tcp_md5sig_info *md5sig;
1074 1073
1075 md5sig = rcu_dereference_protected(tp->md5sig_info, 1); 1074 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1076 1075
1077 if (!hlist_empty(&md5sig->head)) 1076 if (!hlist_empty(&md5sig->head))
1078 tcp_free_md5sig_pool(); 1077 tcp_free_md5sig_pool();
1079 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) { 1078 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1080 hlist_del_rcu(&key->node); 1079 hlist_del_rcu(&key->node);
1081 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); 1080 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1082 kfree_rcu(key, rcu); 1081 kfree_rcu(key, rcu);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4dc0d44a5d31..f2c7e615f902 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1419,11 +1419,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1419 struct net_device *dev, int strict) 1419 struct net_device *dev, int strict)
1420{ 1420{
1421 struct inet6_ifaddr *ifp; 1421 struct inet6_ifaddr *ifp;
1422 struct hlist_node *node;
1423 unsigned int hash = inet6_addr_hash(addr); 1422 unsigned int hash = inet6_addr_hash(addr);
1424 1423
1425 rcu_read_lock_bh(); 1424 rcu_read_lock_bh();
1426 hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { 1425 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1427 if (!net_eq(dev_net(ifp->idev->dev), net)) 1426 if (!net_eq(dev_net(ifp->idev->dev), net))
1428 continue; 1427 continue;
1429 if (ipv6_addr_equal(&ifp->addr, addr) && 1428 if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -1445,9 +1444,8 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1445{ 1444{
1446 unsigned int hash = inet6_addr_hash(addr); 1445 unsigned int hash = inet6_addr_hash(addr);
1447 struct inet6_ifaddr *ifp; 1446 struct inet6_ifaddr *ifp;
1448 struct hlist_node *node;
1449 1447
1450 hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) { 1448 hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
1451 if (!net_eq(dev_net(ifp->idev->dev), net)) 1449 if (!net_eq(dev_net(ifp->idev->dev), net))
1452 continue; 1450 continue;
1453 if (ipv6_addr_equal(&ifp->addr, addr)) { 1451 if (ipv6_addr_equal(&ifp->addr, addr)) {
@@ -1487,10 +1485,9 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
1487{ 1485{
1488 struct inet6_ifaddr *ifp, *result = NULL; 1486 struct inet6_ifaddr *ifp, *result = NULL;
1489 unsigned int hash = inet6_addr_hash(addr); 1487 unsigned int hash = inet6_addr_hash(addr);
1490 struct hlist_node *node;
1491 1488
1492 rcu_read_lock_bh(); 1489 rcu_read_lock_bh();
1493 hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) { 1490 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
1494 if (!net_eq(dev_net(ifp->idev->dev), net)) 1491 if (!net_eq(dev_net(ifp->idev->dev), net))
1495 continue; 1492 continue;
1496 if (ipv6_addr_equal(&ifp->addr, addr)) { 1493 if (ipv6_addr_equal(&ifp->addr, addr)) {
@@ -2907,11 +2904,10 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2907 /* Step 2: clear hash table */ 2904 /* Step 2: clear hash table */
2908 for (i = 0; i < IN6_ADDR_HSIZE; i++) { 2905 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
2909 struct hlist_head *h = &inet6_addr_lst[i]; 2906 struct hlist_head *h = &inet6_addr_lst[i];
2910 struct hlist_node *n;
2911 2907
2912 spin_lock_bh(&addrconf_hash_lock); 2908 spin_lock_bh(&addrconf_hash_lock);
2913 restart: 2909 restart:
2914 hlist_for_each_entry_rcu(ifa, n, h, addr_lst) { 2910 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
2915 if (ifa->idev == idev) { 2911 if (ifa->idev == idev) {
2916 hlist_del_init_rcu(&ifa->addr_lst); 2912 hlist_del_init_rcu(&ifa->addr_lst);
2917 addrconf_del_timer(ifa); 2913 addrconf_del_timer(ifa);
@@ -3218,8 +3214,7 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
3218 } 3214 }
3219 3215
3220 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { 3216 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
3221 struct hlist_node *n; 3217 hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket],
3222 hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
3223 addr_lst) { 3218 addr_lst) {
3224 if (!net_eq(dev_net(ifa->idev->dev), net)) 3219 if (!net_eq(dev_net(ifa->idev->dev), net))
3225 continue; 3220 continue;
@@ -3244,9 +3239,8 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
3244{ 3239{
3245 struct if6_iter_state *state = seq->private; 3240 struct if6_iter_state *state = seq->private;
3246 struct net *net = seq_file_net(seq); 3241 struct net *net = seq_file_net(seq);
3247 struct hlist_node *n = &ifa->addr_lst;
3248 3242
3249 hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) { 3243 hlist_for_each_entry_continue_rcu_bh(ifa, addr_lst) {
3250 if (!net_eq(dev_net(ifa->idev->dev), net)) 3244 if (!net_eq(dev_net(ifa->idev->dev), net))
3251 continue; 3245 continue;
3252 state->offset++; 3246 state->offset++;
@@ -3255,7 +3249,7 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
3255 3249
3256 while (++state->bucket < IN6_ADDR_HSIZE) { 3250 while (++state->bucket < IN6_ADDR_HSIZE) {
3257 state->offset = 0; 3251 state->offset = 0;
3258 hlist_for_each_entry_rcu_bh(ifa, n, 3252 hlist_for_each_entry_rcu_bh(ifa,
3259 &inet6_addr_lst[state->bucket], addr_lst) { 3253 &inet6_addr_lst[state->bucket], addr_lst) {
3260 if (!net_eq(dev_net(ifa->idev->dev), net)) 3254 if (!net_eq(dev_net(ifa->idev->dev), net))
3261 continue; 3255 continue;
@@ -3357,11 +3351,10 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
3357{ 3351{
3358 int ret = 0; 3352 int ret = 0;
3359 struct inet6_ifaddr *ifp = NULL; 3353 struct inet6_ifaddr *ifp = NULL;
3360 struct hlist_node *n;
3361 unsigned int hash = inet6_addr_hash(addr); 3354 unsigned int hash = inet6_addr_hash(addr);
3362 3355
3363 rcu_read_lock_bh(); 3356 rcu_read_lock_bh();
3364 hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) { 3357 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
3365 if (!net_eq(dev_net(ifp->idev->dev), net)) 3358 if (!net_eq(dev_net(ifp->idev->dev), net))
3366 continue; 3359 continue;
3367 if (ipv6_addr_equal(&ifp->addr, addr) && 3360 if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -3383,7 +3376,6 @@ static void addrconf_verify(unsigned long foo)
3383{ 3376{
3384 unsigned long now, next, next_sec, next_sched; 3377 unsigned long now, next, next_sec, next_sched;
3385 struct inet6_ifaddr *ifp; 3378 struct inet6_ifaddr *ifp;
3386 struct hlist_node *node;
3387 int i; 3379 int i;
3388 3380
3389 rcu_read_lock_bh(); 3381 rcu_read_lock_bh();
@@ -3395,7 +3387,7 @@ static void addrconf_verify(unsigned long foo)
3395 3387
3396 for (i = 0; i < IN6_ADDR_HSIZE; i++) { 3388 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3397restart: 3389restart:
3398 hlist_for_each_entry_rcu_bh(ifp, node, 3390 hlist_for_each_entry_rcu_bh(ifp,
3399 &inet6_addr_lst[i], addr_lst) { 3391 &inet6_addr_lst[i], addr_lst) {
3400 unsigned long age; 3392 unsigned long age;
3401 3393
@@ -3866,7 +3858,6 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3866 struct net_device *dev; 3858 struct net_device *dev;
3867 struct inet6_dev *idev; 3859 struct inet6_dev *idev;
3868 struct hlist_head *head; 3860 struct hlist_head *head;
3869 struct hlist_node *node;
3870 3861
3871 s_h = cb->args[0]; 3862 s_h = cb->args[0];
3872 s_idx = idx = cb->args[1]; 3863 s_idx = idx = cb->args[1];
@@ -3876,7 +3867,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3876 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 3867 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3877 idx = 0; 3868 idx = 0;
3878 head = &net->dev_index_head[h]; 3869 head = &net->dev_index_head[h];
3879 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 3870 hlist_for_each_entry_rcu(dev, head, index_hlist) {
3880 if (idx < s_idx) 3871 if (idx < s_idx)
3881 goto cont; 3872 goto cont;
3882 if (h > s_h || idx > s_idx) 3873 if (h > s_h || idx > s_idx)
@@ -4222,7 +4213,6 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
4222 struct net_device *dev; 4213 struct net_device *dev;
4223 struct inet6_dev *idev; 4214 struct inet6_dev *idev;
4224 struct hlist_head *head; 4215 struct hlist_head *head;
4225 struct hlist_node *node;
4226 4216
4227 s_h = cb->args[0]; 4217 s_h = cb->args[0];
4228 s_idx = cb->args[1]; 4218 s_idx = cb->args[1];
@@ -4231,7 +4221,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
4231 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4221 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4232 idx = 0; 4222 idx = 0;
4233 head = &net->dev_index_head[h]; 4223 head = &net->dev_index_head[h];
4234 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 4224 hlist_for_each_entry_rcu(dev, head, index_hlist) {
4235 if (idx < s_idx) 4225 if (idx < s_idx)
4236 goto cont; 4226 goto cont;
4237 idev = __in6_dev_get(dev); 4227 idev = __in6_dev_get(dev);
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index ff76eecfd622..aad64352cb60 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -173,9 +173,8 @@ static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net,
173 const struct in6_addr *addr, 173 const struct in6_addr *addr,
174 int type, int ifindex) 174 int type, int ifindex)
175{ 175{
176 struct hlist_node *pos;
177 struct ip6addrlbl_entry *p; 176 struct ip6addrlbl_entry *p;
178 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { 177 hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
179 if (__ip6addrlbl_match(net, p, addr, type, ifindex)) 178 if (__ip6addrlbl_match(net, p, addr, type, ifindex))
180 return p; 179 return p;
181 } 180 }
@@ -261,9 +260,9 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
261 if (hlist_empty(&ip6addrlbl_table.head)) { 260 if (hlist_empty(&ip6addrlbl_table.head)) {
262 hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head); 261 hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
263 } else { 262 } else {
264 struct hlist_node *pos, *n; 263 struct hlist_node *n;
265 struct ip6addrlbl_entry *p = NULL; 264 struct ip6addrlbl_entry *p = NULL;
266 hlist_for_each_entry_safe(p, pos, n, 265 hlist_for_each_entry_safe(p, n,
267 &ip6addrlbl_table.head, list) { 266 &ip6addrlbl_table.head, list) {
268 if (p->prefixlen == newp->prefixlen && 267 if (p->prefixlen == newp->prefixlen &&
269 net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) && 268 net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
@@ -319,13 +318,13 @@ static int __ip6addrlbl_del(struct net *net,
319 int ifindex) 318 int ifindex)
320{ 319{
321 struct ip6addrlbl_entry *p = NULL; 320 struct ip6addrlbl_entry *p = NULL;
322 struct hlist_node *pos, *n; 321 struct hlist_node *n;
323 int ret = -ESRCH; 322 int ret = -ESRCH;
324 323
325 ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", 324 ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n",
326 __func__, prefix, prefixlen, ifindex); 325 __func__, prefix, prefixlen, ifindex);
327 326
328 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { 327 hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
329 if (p->prefixlen == prefixlen && 328 if (p->prefixlen == prefixlen &&
330 net_eq(ip6addrlbl_net(p), net) && 329 net_eq(ip6addrlbl_net(p), net) &&
331 p->ifindex == ifindex && 330 p->ifindex == ifindex &&
@@ -380,11 +379,11 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
380static void __net_exit ip6addrlbl_net_exit(struct net *net) 379static void __net_exit ip6addrlbl_net_exit(struct net *net)
381{ 380{
382 struct ip6addrlbl_entry *p = NULL; 381 struct ip6addrlbl_entry *p = NULL;
383 struct hlist_node *pos, *n; 382 struct hlist_node *n;
384 383
385 /* Remove all labels belonging to the exiting net */ 384 /* Remove all labels belonging to the exiting net */
386 spin_lock(&ip6addrlbl_table.lock); 385 spin_lock(&ip6addrlbl_table.lock);
387 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { 386 hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
388 if (net_eq(ip6addrlbl_net(p), net)) { 387 if (net_eq(ip6addrlbl_net(p), net)) {
389 hlist_del_rcu(&p->list); 388 hlist_del_rcu(&p->list);
390 ip6addrlbl_put(p); 389 ip6addrlbl_put(p);
@@ -505,12 +504,11 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
505{ 504{
506 struct net *net = sock_net(skb->sk); 505 struct net *net = sock_net(skb->sk);
507 struct ip6addrlbl_entry *p; 506 struct ip6addrlbl_entry *p;
508 struct hlist_node *pos;
509 int idx = 0, s_idx = cb->args[0]; 507 int idx = 0, s_idx = cb->args[0];
510 int err; 508 int err;
511 509
512 rcu_read_lock(); 510 rcu_read_lock();
513 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { 511 hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
514 if (idx >= s_idx && 512 if (idx >= s_idx &&
515 net_eq(ip6addrlbl_net(p), net)) { 513 net_eq(ip6addrlbl_net(p), net)) {
516 if ((err = ip6addrlbl_fill(skb, p, 514 if ((err = ip6addrlbl_fill(skb, p,
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index b386a2ce4c6f..9bfab19ff3c0 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -31,7 +31,6 @@ int inet6_csk_bind_conflict(const struct sock *sk,
31 const struct inet_bind_bucket *tb, bool relax) 31 const struct inet_bind_bucket *tb, bool relax)
32{ 32{
33 const struct sock *sk2; 33 const struct sock *sk2;
34 const struct hlist_node *node;
35 int reuse = sk->sk_reuse; 34 int reuse = sk->sk_reuse;
36 int reuseport = sk->sk_reuseport; 35 int reuseport = sk->sk_reuseport;
37 kuid_t uid = sock_i_uid((struct sock *)sk); 36 kuid_t uid = sock_i_uid((struct sock *)sk);
@@ -41,7 +40,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
41 * See comment in inet_csk_bind_conflict about sock lookup 40 * See comment in inet_csk_bind_conflict about sock lookup
42 * vs net namespaces issues. 41 * vs net namespaces issues.
43 */ 42 */
44 sk_for_each_bound(sk2, node, &tb->owners) { 43 sk_for_each_bound(sk2, &tb->owners) {
45 if (sk != sk2 && 44 if (sk != sk2 &&
46 (!sk->sk_bound_dev_if || 45 (!sk->sk_bound_dev_if ||
47 !sk2->sk_bound_dev_if || 46 !sk2->sk_bound_dev_if ||
@@ -58,7 +57,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
58 } 57 }
59 } 58 }
60 59
61 return node != NULL; 60 return sk2 != NULL;
62} 61}
63 62
64EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); 63EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 710cafd2e1a9..192dd1a0e188 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -224,7 +224,6 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
224{ 224{
225 struct fib6_table *tb; 225 struct fib6_table *tb;
226 struct hlist_head *head; 226 struct hlist_head *head;
227 struct hlist_node *node;
228 unsigned int h; 227 unsigned int h;
229 228
230 if (id == 0) 229 if (id == 0)
@@ -232,7 +231,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
232 h = id & (FIB6_TABLE_HASHSZ - 1); 231 h = id & (FIB6_TABLE_HASHSZ - 1);
233 rcu_read_lock(); 232 rcu_read_lock();
234 head = &net->ipv6.fib_table_hash[h]; 233 head = &net->ipv6.fib_table_hash[h];
235 hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { 234 hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
236 if (tb->tb6_id == id) { 235 if (tb->tb6_id == id) {
237 rcu_read_unlock(); 236 rcu_read_unlock();
238 return tb; 237 return tb;
@@ -363,7 +362,6 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
363 struct rt6_rtnl_dump_arg arg; 362 struct rt6_rtnl_dump_arg arg;
364 struct fib6_walker_t *w; 363 struct fib6_walker_t *w;
365 struct fib6_table *tb; 364 struct fib6_table *tb;
366 struct hlist_node *node;
367 struct hlist_head *head; 365 struct hlist_head *head;
368 int res = 0; 366 int res = 0;
369 367
@@ -398,7 +396,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
398 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { 396 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
399 e = 0; 397 e = 0;
400 head = &net->ipv6.fib_table_hash[h]; 398 head = &net->ipv6.fib_table_hash[h];
401 hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { 399 hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
402 if (e < s_e) 400 if (e < s_e)
403 goto next; 401 goto next;
404 res = fib6_dump_table(tb, skb, cb); 402 res = fib6_dump_table(tb, skb, cb);
@@ -1520,14 +1518,13 @@ void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg
1520 int prune, void *arg) 1518 int prune, void *arg)
1521{ 1519{
1522 struct fib6_table *table; 1520 struct fib6_table *table;
1523 struct hlist_node *node;
1524 struct hlist_head *head; 1521 struct hlist_head *head;
1525 unsigned int h; 1522 unsigned int h;
1526 1523
1527 rcu_read_lock(); 1524 rcu_read_lock();
1528 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { 1525 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
1529 head = &net->ipv6.fib_table_hash[h]; 1526 head = &net->ipv6.fib_table_hash[h];
1530 hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { 1527 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
1531 read_lock_bh(&table->tb6_lock); 1528 read_lock_bh(&table->tb6_lock);
1532 fib6_clean_tree(net, &table->tb6_root, 1529 fib6_clean_tree(net, &table->tb6_root,
1533 func, prune, arg); 1530 func, prune, arg);
@@ -1540,14 +1537,13 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
1540 int prune, void *arg) 1537 int prune, void *arg)
1541{ 1538{
1542 struct fib6_table *table; 1539 struct fib6_table *table;
1543 struct hlist_node *node;
1544 struct hlist_head *head; 1540 struct hlist_head *head;
1545 unsigned int h; 1541 unsigned int h;
1546 1542
1547 rcu_read_lock(); 1543 rcu_read_lock();
1548 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { 1544 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
1549 head = &net->ipv6.fib_table_hash[h]; 1545 head = &net->ipv6.fib_table_hash[h];
1550 hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { 1546 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
1551 write_lock_bh(&table->tb6_lock); 1547 write_lock_bh(&table->tb6_lock);
1552 fib6_clean_tree(net, &table->tb6_root, 1548 fib6_clean_tree(net, &table->tb6_root,
1553 func, prune, arg); 1549 func, prune, arg);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c65907db8c44..330b5e7b7df6 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -71,10 +71,9 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
71 unsigned short num, const struct in6_addr *loc_addr, 71 unsigned short num, const struct in6_addr *loc_addr,
72 const struct in6_addr *rmt_addr, int dif) 72 const struct in6_addr *rmt_addr, int dif)
73{ 73{
74 struct hlist_node *node;
75 bool is_multicast = ipv6_addr_is_multicast(loc_addr); 74 bool is_multicast = ipv6_addr_is_multicast(loc_addr);
76 75
77 sk_for_each_from(sk, node) 76 sk_for_each_from(sk)
78 if (inet_sk(sk)->inet_num == num) { 77 if (inet_sk(sk)->inet_num == num) {
79 struct ipv6_pinfo *np = inet6_sk(sk); 78 struct ipv6_pinfo *np = inet6_sk(sk);
80 79
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 6cc48012b730..de2bcfaaf759 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -89,9 +89,8 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const
89{ 89{
90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
91 struct xfrm6_tunnel_spi *x6spi; 91 struct xfrm6_tunnel_spi *x6spi;
92 struct hlist_node *pos;
93 92
94 hlist_for_each_entry_rcu(x6spi, pos, 93 hlist_for_each_entry_rcu(x6spi,
95 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 94 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
96 list_byaddr) { 95 list_byaddr) {
97 if (xfrm6_addr_equal(&x6spi->addr, saddr)) 96 if (xfrm6_addr_equal(&x6spi->addr, saddr))
@@ -120,9 +119,8 @@ static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
120 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 119 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
121 struct xfrm6_tunnel_spi *x6spi; 120 struct xfrm6_tunnel_spi *x6spi;
122 int index = xfrm6_tunnel_spi_hash_byspi(spi); 121 int index = xfrm6_tunnel_spi_hash_byspi(spi);
123 struct hlist_node *pos;
124 122
125 hlist_for_each_entry(x6spi, pos, 123 hlist_for_each_entry(x6spi,
126 &xfrm6_tn->spi_byspi[index], 124 &xfrm6_tn->spi_byspi[index],
127 list_byspi) { 125 list_byspi) {
128 if (x6spi->spi == spi) 126 if (x6spi->spi == spi)
@@ -203,11 +201,11 @@ static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
203{ 201{
204 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 202 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
205 struct xfrm6_tunnel_spi *x6spi; 203 struct xfrm6_tunnel_spi *x6spi;
206 struct hlist_node *pos, *n; 204 struct hlist_node *n;
207 205
208 spin_lock_bh(&xfrm6_tunnel_spi_lock); 206 spin_lock_bh(&xfrm6_tunnel_spi_lock);
209 207
210 hlist_for_each_entry_safe(x6spi, pos, n, 208 hlist_for_each_entry_safe(x6spi, n,
211 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 209 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
212 list_byaddr) 210 list_byaddr)
213 { 211 {
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index dfd6faaf0ea7..f547a47d381c 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -228,9 +228,8 @@ static struct sock *__ipxitf_find_socket(struct ipx_interface *intrfc,
228 __be16 port) 228 __be16 port)
229{ 229{
230 struct sock *s; 230 struct sock *s;
231 struct hlist_node *node;
232 231
233 sk_for_each(s, node, &intrfc->if_sklist) 232 sk_for_each(s, &intrfc->if_sklist)
234 if (ipx_sk(s)->port == port) 233 if (ipx_sk(s)->port == port)
235 goto found; 234 goto found;
236 s = NULL; 235 s = NULL;
@@ -259,12 +258,11 @@ static struct sock *ipxitf_find_internal_socket(struct ipx_interface *intrfc,
259 __be16 port) 258 __be16 port)
260{ 259{
261 struct sock *s; 260 struct sock *s;
262 struct hlist_node *node;
263 261
264 ipxitf_hold(intrfc); 262 ipxitf_hold(intrfc);
265 spin_lock_bh(&intrfc->if_sklist_lock); 263 spin_lock_bh(&intrfc->if_sklist_lock);
266 264
267 sk_for_each(s, node, &intrfc->if_sklist) { 265 sk_for_each(s, &intrfc->if_sklist) {
268 struct ipx_sock *ipxs = ipx_sk(s); 266 struct ipx_sock *ipxs = ipx_sk(s);
269 267
270 if (ipxs->port == port && 268 if (ipxs->port == port &&
@@ -282,14 +280,14 @@ found:
282static void __ipxitf_down(struct ipx_interface *intrfc) 280static void __ipxitf_down(struct ipx_interface *intrfc)
283{ 281{
284 struct sock *s; 282 struct sock *s;
285 struct hlist_node *node, *t; 283 struct hlist_node *t;
286 284
287 /* Delete all routes associated with this interface */ 285 /* Delete all routes associated with this interface */
288 ipxrtr_del_routes(intrfc); 286 ipxrtr_del_routes(intrfc);
289 287
290 spin_lock_bh(&intrfc->if_sklist_lock); 288 spin_lock_bh(&intrfc->if_sklist_lock);
291 /* error sockets */ 289 /* error sockets */
292 sk_for_each_safe(s, node, t, &intrfc->if_sklist) { 290 sk_for_each_safe(s, t, &intrfc->if_sklist) {
293 struct ipx_sock *ipxs = ipx_sk(s); 291 struct ipx_sock *ipxs = ipx_sk(s);
294 292
295 s->sk_err = ENOLINK; 293 s->sk_err = ENOLINK;
@@ -385,12 +383,11 @@ static int ipxitf_demux_socket(struct ipx_interface *intrfc,
385 int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node, 383 int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node,
386 IPX_NODE_LEN); 384 IPX_NODE_LEN);
387 struct sock *s; 385 struct sock *s;
388 struct hlist_node *node;
389 int rc; 386 int rc;
390 387
391 spin_lock_bh(&intrfc->if_sklist_lock); 388 spin_lock_bh(&intrfc->if_sklist_lock);
392 389
393 sk_for_each(s, node, &intrfc->if_sklist) { 390 sk_for_each(s, &intrfc->if_sklist) {
394 struct ipx_sock *ipxs = ipx_sk(s); 391 struct ipx_sock *ipxs = ipx_sk(s);
395 392
396 if (ipxs->port == ipx->ipx_dest.sock && 393 if (ipxs->port == ipx->ipx_dest.sock &&
@@ -446,12 +443,11 @@ static struct sock *ncp_connection_hack(struct ipx_interface *intrfc,
446 connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8); 443 connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8);
447 444
448 if (connection) { 445 if (connection) {
449 struct hlist_node *node;
450 /* Now we have to look for a special NCP connection handling 446 /* Now we have to look for a special NCP connection handling
451 * socket. Only these sockets have ipx_ncp_conn != 0, set by 447 * socket. Only these sockets have ipx_ncp_conn != 0, set by
452 * SIOCIPXNCPCONN. */ 448 * SIOCIPXNCPCONN. */
453 spin_lock_bh(&intrfc->if_sklist_lock); 449 spin_lock_bh(&intrfc->if_sklist_lock);
454 sk_for_each(sk, node, &intrfc->if_sklist) 450 sk_for_each(sk, &intrfc->if_sklist)
455 if (ipx_sk(sk)->ipx_ncp_conn == connection) { 451 if (ipx_sk(sk)->ipx_ncp_conn == connection) {
456 sock_hold(sk); 452 sock_hold(sk);
457 goto found; 453 goto found;
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 02ff7f2f60d4..65e8833a2510 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -103,19 +103,18 @@ out:
103static __inline__ struct sock *ipx_get_socket_idx(loff_t pos) 103static __inline__ struct sock *ipx_get_socket_idx(loff_t pos)
104{ 104{
105 struct sock *s = NULL; 105 struct sock *s = NULL;
106 struct hlist_node *node;
107 struct ipx_interface *i; 106 struct ipx_interface *i;
108 107
109 list_for_each_entry(i, &ipx_interfaces, node) { 108 list_for_each_entry(i, &ipx_interfaces, node) {
110 spin_lock_bh(&i->if_sklist_lock); 109 spin_lock_bh(&i->if_sklist_lock);
111 sk_for_each(s, node, &i->if_sklist) { 110 sk_for_each(s, &i->if_sklist) {
112 if (!pos) 111 if (!pos)
113 break; 112 break;
114 --pos; 113 --pos;
115 } 114 }
116 spin_unlock_bh(&i->if_sklist_lock); 115 spin_unlock_bh(&i->if_sklist_lock);
117 if (!pos) { 116 if (!pos) {
118 if (node) 117 if (s)
119 goto found; 118 goto found;
120 break; 119 break;
121 } 120 }
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index cd6f7a991d80..a7d11ffe4284 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -156,14 +156,13 @@ static int afiucv_pm_freeze(struct device *dev)
156{ 156{
157 struct iucv_sock *iucv; 157 struct iucv_sock *iucv;
158 struct sock *sk; 158 struct sock *sk;
159 struct hlist_node *node;
160 int err = 0; 159 int err = 0;
161 160
162#ifdef CONFIG_PM_DEBUG 161#ifdef CONFIG_PM_DEBUG
163 printk(KERN_WARNING "afiucv_pm_freeze\n"); 162 printk(KERN_WARNING "afiucv_pm_freeze\n");
164#endif 163#endif
165 read_lock(&iucv_sk_list.lock); 164 read_lock(&iucv_sk_list.lock);
166 sk_for_each(sk, node, &iucv_sk_list.head) { 165 sk_for_each(sk, &iucv_sk_list.head) {
167 iucv = iucv_sk(sk); 166 iucv = iucv_sk(sk);
168 switch (sk->sk_state) { 167 switch (sk->sk_state) {
169 case IUCV_DISCONN: 168 case IUCV_DISCONN:
@@ -194,13 +193,12 @@ static int afiucv_pm_freeze(struct device *dev)
194static int afiucv_pm_restore_thaw(struct device *dev) 193static int afiucv_pm_restore_thaw(struct device *dev)
195{ 194{
196 struct sock *sk; 195 struct sock *sk;
197 struct hlist_node *node;
198 196
199#ifdef CONFIG_PM_DEBUG 197#ifdef CONFIG_PM_DEBUG
200 printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); 198 printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
201#endif 199#endif
202 read_lock(&iucv_sk_list.lock); 200 read_lock(&iucv_sk_list.lock);
203 sk_for_each(sk, node, &iucv_sk_list.head) { 201 sk_for_each(sk, &iucv_sk_list.head) {
204 switch (sk->sk_state) { 202 switch (sk->sk_state) {
205 case IUCV_CONNECTED: 203 case IUCV_CONNECTED:
206 sk->sk_err = EPIPE; 204 sk->sk_err = EPIPE;
@@ -390,9 +388,8 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
390static struct sock *__iucv_get_sock_by_name(char *nm) 388static struct sock *__iucv_get_sock_by_name(char *nm)
391{ 389{
392 struct sock *sk; 390 struct sock *sk;
393 struct hlist_node *node;
394 391
395 sk_for_each(sk, node, &iucv_sk_list.head) 392 sk_for_each(sk, &iucv_sk_list.head)
396 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) 393 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
397 return sk; 394 return sk;
398 395
@@ -1678,7 +1675,6 @@ static int iucv_callback_connreq(struct iucv_path *path,
1678 unsigned char user_data[16]; 1675 unsigned char user_data[16];
1679 unsigned char nuser_data[16]; 1676 unsigned char nuser_data[16];
1680 unsigned char src_name[8]; 1677 unsigned char src_name[8];
1681 struct hlist_node *node;
1682 struct sock *sk, *nsk; 1678 struct sock *sk, *nsk;
1683 struct iucv_sock *iucv, *niucv; 1679 struct iucv_sock *iucv, *niucv;
1684 int err; 1680 int err;
@@ -1689,7 +1685,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1689 read_lock(&iucv_sk_list.lock); 1685 read_lock(&iucv_sk_list.lock);
1690 iucv = NULL; 1686 iucv = NULL;
1691 sk = NULL; 1687 sk = NULL;
1692 sk_for_each(sk, node, &iucv_sk_list.head) 1688 sk_for_each(sk, &iucv_sk_list.head)
1693 if (sk->sk_state == IUCV_LISTEN && 1689 if (sk->sk_state == IUCV_LISTEN &&
1694 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { 1690 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1695 /* 1691 /*
@@ -2115,7 +2111,6 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2115static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, 2111static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2116 struct packet_type *pt, struct net_device *orig_dev) 2112 struct packet_type *pt, struct net_device *orig_dev)
2117{ 2113{
2118 struct hlist_node *node;
2119 struct sock *sk; 2114 struct sock *sk;
2120 struct iucv_sock *iucv; 2115 struct iucv_sock *iucv;
2121 struct af_iucv_trans_hdr *trans_hdr; 2116 struct af_iucv_trans_hdr *trans_hdr;
@@ -2132,7 +2127,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2132 iucv = NULL; 2127 iucv = NULL;
2133 sk = NULL; 2128 sk = NULL;
2134 read_lock(&iucv_sk_list.lock); 2129 read_lock(&iucv_sk_list.lock);
2135 sk_for_each(sk, node, &iucv_sk_list.head) { 2130 sk_for_each(sk, &iucv_sk_list.head) {
2136 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { 2131 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2137 if ((!memcmp(&iucv_sk(sk)->src_name, 2132 if ((!memcmp(&iucv_sk(sk)->src_name,
2138 trans_hdr->destAppName, 8)) && 2133 trans_hdr->destAppName, 8)) &&
@@ -2225,10 +2220,9 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2225 struct sk_buff *list_skb; 2220 struct sk_buff *list_skb;
2226 struct sk_buff *nskb; 2221 struct sk_buff *nskb;
2227 unsigned long flags; 2222 unsigned long flags;
2228 struct hlist_node *node;
2229 2223
2230 read_lock_irqsave(&iucv_sk_list.lock, flags); 2224 read_lock_irqsave(&iucv_sk_list.lock, flags);
2231 sk_for_each(sk, node, &iucv_sk_list.head) 2225 sk_for_each(sk, &iucv_sk_list.head)
2232 if (sk == isk) { 2226 if (sk == isk) {
2233 iucv = iucv_sk(sk); 2227 iucv = iucv_sk(sk);
2234 break; 2228 break;
@@ -2299,14 +2293,13 @@ static int afiucv_netdev_event(struct notifier_block *this,
2299 unsigned long event, void *ptr) 2293 unsigned long event, void *ptr)
2300{ 2294{
2301 struct net_device *event_dev = (struct net_device *)ptr; 2295 struct net_device *event_dev = (struct net_device *)ptr;
2302 struct hlist_node *node;
2303 struct sock *sk; 2296 struct sock *sk;
2304 struct iucv_sock *iucv; 2297 struct iucv_sock *iucv;
2305 2298
2306 switch (event) { 2299 switch (event) {
2307 case NETDEV_REBOOT: 2300 case NETDEV_REBOOT:
2308 case NETDEV_GOING_DOWN: 2301 case NETDEV_GOING_DOWN:
2309 sk_for_each(sk, node, &iucv_sk_list.head) { 2302 sk_for_each(sk, &iucv_sk_list.head) {
2310 iucv = iucv_sk(sk); 2303 iucv = iucv_sk(sk);
2311 if ((iucv->hs_dev == event_dev) && 2304 if ((iucv->hs_dev == event_dev) &&
2312 (sk->sk_state == IUCV_CONNECTED)) { 2305 (sk->sk_state == IUCV_CONNECTED)) {
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 9ef79851f297..556fdafdd1ea 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -225,7 +225,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
225{ 225{
226 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 226 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
227 struct sock *sk; 227 struct sock *sk;
228 struct hlist_node *node;
229 struct sk_buff *skb2 = NULL; 228 struct sk_buff *skb2 = NULL;
230 int err = -ESRCH; 229 int err = -ESRCH;
231 230
@@ -236,7 +235,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
236 return -ENOMEM; 235 return -ENOMEM;
237 236
238 rcu_read_lock(); 237 rcu_read_lock();
239 sk_for_each_rcu(sk, node, &net_pfkey->table) { 238 sk_for_each_rcu(sk, &net_pfkey->table) {
240 struct pfkey_sock *pfk = pfkey_sk(sk); 239 struct pfkey_sock *pfk = pfkey_sk(sk);
241 int err2; 240 int err2;
242 241
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index dcfd64e83ab7..d36875f3427e 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -221,10 +221,9 @@ static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
221 struct hlist_head *session_list = 221 struct hlist_head *session_list =
222 l2tp_session_id_hash_2(pn, session_id); 222 l2tp_session_id_hash_2(pn, session_id);
223 struct l2tp_session *session; 223 struct l2tp_session *session;
224 struct hlist_node *walk;
225 224
226 rcu_read_lock_bh(); 225 rcu_read_lock_bh();
227 hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) { 226 hlist_for_each_entry_rcu(session, session_list, global_hlist) {
228 if (session->session_id == session_id) { 227 if (session->session_id == session_id) {
229 rcu_read_unlock_bh(); 228 rcu_read_unlock_bh();
230 return session; 229 return session;
@@ -253,7 +252,6 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
253{ 252{
254 struct hlist_head *session_list; 253 struct hlist_head *session_list;
255 struct l2tp_session *session; 254 struct l2tp_session *session;
256 struct hlist_node *walk;
257 255
258 /* In L2TPv3, session_ids are unique over all tunnels and we 256 /* In L2TPv3, session_ids are unique over all tunnels and we
259 * sometimes need to look them up before we know the 257 * sometimes need to look them up before we know the
@@ -264,7 +262,7 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
264 262
265 session_list = l2tp_session_id_hash(tunnel, session_id); 263 session_list = l2tp_session_id_hash(tunnel, session_id);
266 read_lock_bh(&tunnel->hlist_lock); 264 read_lock_bh(&tunnel->hlist_lock);
267 hlist_for_each_entry(session, walk, session_list, hlist) { 265 hlist_for_each_entry(session, session_list, hlist) {
268 if (session->session_id == session_id) { 266 if (session->session_id == session_id) {
269 read_unlock_bh(&tunnel->hlist_lock); 267 read_unlock_bh(&tunnel->hlist_lock);
270 return session; 268 return session;
@@ -279,13 +277,12 @@ EXPORT_SYMBOL_GPL(l2tp_session_find);
279struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) 277struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
280{ 278{
281 int hash; 279 int hash;
282 struct hlist_node *walk;
283 struct l2tp_session *session; 280 struct l2tp_session *session;
284 int count = 0; 281 int count = 0;
285 282
286 read_lock_bh(&tunnel->hlist_lock); 283 read_lock_bh(&tunnel->hlist_lock);
287 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { 284 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
288 hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) { 285 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
289 if (++count > nth) { 286 if (++count > nth) {
290 read_unlock_bh(&tunnel->hlist_lock); 287 read_unlock_bh(&tunnel->hlist_lock);
291 return session; 288 return session;
@@ -306,12 +303,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
306{ 303{
307 struct l2tp_net *pn = l2tp_pernet(net); 304 struct l2tp_net *pn = l2tp_pernet(net);
308 int hash; 305 int hash;
309 struct hlist_node *walk;
310 struct l2tp_session *session; 306 struct l2tp_session *session;
311 307
312 rcu_read_lock_bh(); 308 rcu_read_lock_bh();
313 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { 309 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
314 hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) { 310 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
315 if (!strcmp(session->ifname, ifname)) { 311 if (!strcmp(session->ifname, ifname)) {
316 rcu_read_unlock_bh(); 312 rcu_read_unlock_bh();
317 return session; 313 return session;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index f7ac8f42fee2..7f41b7051269 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -49,10 +49,9 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
49 49
50static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) 50static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
51{ 51{
52 struct hlist_node *node;
53 struct sock *sk; 52 struct sock *sk;
54 53
55 sk_for_each_bound(sk, node, &l2tp_ip_bind_table) { 54 sk_for_each_bound(sk, &l2tp_ip_bind_table) {
56 struct inet_sock *inet = inet_sk(sk); 55 struct inet_sock *inet = inet_sk(sk);
57 struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); 56 struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
58 57
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 8ee4a86ae996..41f2f8126ebc 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -60,10 +60,9 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
60 struct in6_addr *laddr, 60 struct in6_addr *laddr,
61 int dif, u32 tunnel_id) 61 int dif, u32 tunnel_id)
62{ 62{
63 struct hlist_node *node;
64 struct sock *sk; 63 struct sock *sk;
65 64
66 sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) { 65 sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
67 struct in6_addr *addr = inet6_rcv_saddr(sk); 66 struct in6_addr *addr = inet6_rcv_saddr(sk);
68 struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); 67 struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
69 68
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 7c5073badc73..78be45cda5c1 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -393,12 +393,11 @@ static void llc_sap_mcast(struct llc_sap *sap,
393{ 393{
394 int i = 0, count = 256 / sizeof(struct sock *); 394 int i = 0, count = 256 / sizeof(struct sock *);
395 struct sock *sk, *stack[count]; 395 struct sock *sk, *stack[count];
396 struct hlist_node *node;
397 struct llc_sock *llc; 396 struct llc_sock *llc;
398 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex); 397 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex);
399 398
400 spin_lock_bh(&sap->sk_lock); 399 spin_lock_bh(&sap->sk_lock);
401 hlist_for_each_entry(llc, node, dev_hb, dev_hash_node) { 400 hlist_for_each_entry(llc, dev_hb, dev_hash_node) {
402 401
403 sk = &llc->sk; 402 sk = &llc->sk;
404 403
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 6b3c4e119c63..dc7c8df40c2c 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -72,9 +72,9 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
72 * it's used twice. So it is illegal to do 72 * it's used twice. So it is illegal to do
73 * for_each_mesh_entry(rcu_dereference(...), ...) 73 * for_each_mesh_entry(rcu_dereference(...), ...)
74 */ 74 */
75#define for_each_mesh_entry(tbl, p, node, i) \ 75#define for_each_mesh_entry(tbl, node, i) \
76 for (i = 0; i <= tbl->hash_mask; i++) \ 76 for (i = 0; i <= tbl->hash_mask; i++) \
77 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) 77 hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
78 78
79 79
80static struct mesh_table *mesh_table_alloc(int size_order) 80static struct mesh_table *mesh_table_alloc(int size_order)
@@ -139,7 +139,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
139 } 139 }
140 if (free_leafs) { 140 if (free_leafs) {
141 spin_lock_bh(&tbl->gates_lock); 141 spin_lock_bh(&tbl->gates_lock);
142 hlist_for_each_entry_safe(gate, p, q, 142 hlist_for_each_entry_safe(gate, q,
143 tbl->known_gates, list) { 143 tbl->known_gates, list) {
144 hlist_del(&gate->list); 144 hlist_del(&gate->list);
145 kfree(gate); 145 kfree(gate);
@@ -333,12 +333,11 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
333 struct ieee80211_sub_if_data *sdata) 333 struct ieee80211_sub_if_data *sdata)
334{ 334{
335 struct mesh_path *mpath; 335 struct mesh_path *mpath;
336 struct hlist_node *n;
337 struct hlist_head *bucket; 336 struct hlist_head *bucket;
338 struct mpath_node *node; 337 struct mpath_node *node;
339 338
340 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; 339 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
341 hlist_for_each_entry_rcu(node, n, bucket, list) { 340 hlist_for_each_entry_rcu(node, bucket, list) {
342 mpath = node->mpath; 341 mpath = node->mpath;
343 if (mpath->sdata == sdata && 342 if (mpath->sdata == sdata &&
344 ether_addr_equal(dst, mpath->dst)) { 343 ether_addr_equal(dst, mpath->dst)) {
@@ -389,11 +388,10 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
389{ 388{
390 struct mesh_table *tbl = rcu_dereference(mesh_paths); 389 struct mesh_table *tbl = rcu_dereference(mesh_paths);
391 struct mpath_node *node; 390 struct mpath_node *node;
392 struct hlist_node *p;
393 int i; 391 int i;
394 int j = 0; 392 int j = 0;
395 393
396 for_each_mesh_entry(tbl, p, node, i) { 394 for_each_mesh_entry(tbl, node, i) {
397 if (sdata && node->mpath->sdata != sdata) 395 if (sdata && node->mpath->sdata != sdata)
398 continue; 396 continue;
399 if (j++ == idx) { 397 if (j++ == idx) {
@@ -417,13 +415,12 @@ int mesh_path_add_gate(struct mesh_path *mpath)
417{ 415{
418 struct mesh_table *tbl; 416 struct mesh_table *tbl;
419 struct mpath_node *gate, *new_gate; 417 struct mpath_node *gate, *new_gate;
420 struct hlist_node *n;
421 int err; 418 int err;
422 419
423 rcu_read_lock(); 420 rcu_read_lock();
424 tbl = rcu_dereference(mesh_paths); 421 tbl = rcu_dereference(mesh_paths);
425 422
426 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) 423 hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
427 if (gate->mpath == mpath) { 424 if (gate->mpath == mpath) {
428 err = -EEXIST; 425 err = -EEXIST;
429 goto err_rcu; 426 goto err_rcu;
@@ -460,9 +457,9 @@ err_rcu:
460static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) 457static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
461{ 458{
462 struct mpath_node *gate; 459 struct mpath_node *gate;
463 struct hlist_node *p, *q; 460 struct hlist_node *q;
464 461
465 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) { 462 hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
466 if (gate->mpath != mpath) 463 if (gate->mpath != mpath)
467 continue; 464 continue;
468 spin_lock_bh(&tbl->gates_lock); 465 spin_lock_bh(&tbl->gates_lock);
@@ -504,7 +501,6 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst)
504 struct mesh_path *mpath, *new_mpath; 501 struct mesh_path *mpath, *new_mpath;
505 struct mpath_node *node, *new_node; 502 struct mpath_node *node, *new_node;
506 struct hlist_head *bucket; 503 struct hlist_head *bucket;
507 struct hlist_node *n;
508 int grow = 0; 504 int grow = 0;
509 int err = 0; 505 int err = 0;
510 u32 hash_idx; 506 u32 hash_idx;
@@ -550,7 +546,7 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst)
550 spin_lock(&tbl->hashwlock[hash_idx]); 546 spin_lock(&tbl->hashwlock[hash_idx]);
551 547
552 err = -EEXIST; 548 err = -EEXIST;
553 hlist_for_each_entry(node, n, bucket, list) { 549 hlist_for_each_entry(node, bucket, list) {
554 mpath = node->mpath; 550 mpath = node->mpath;
555 if (mpath->sdata == sdata && 551 if (mpath->sdata == sdata &&
556 ether_addr_equal(dst, mpath->dst)) 552 ether_addr_equal(dst, mpath->dst))
@@ -640,7 +636,6 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
640 struct mesh_path *mpath, *new_mpath; 636 struct mesh_path *mpath, *new_mpath;
641 struct mpath_node *node, *new_node; 637 struct mpath_node *node, *new_node;
642 struct hlist_head *bucket; 638 struct hlist_head *bucket;
643 struct hlist_node *n;
644 int grow = 0; 639 int grow = 0;
645 int err = 0; 640 int err = 0;
646 u32 hash_idx; 641 u32 hash_idx;
@@ -680,7 +675,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
680 spin_lock(&tbl->hashwlock[hash_idx]); 675 spin_lock(&tbl->hashwlock[hash_idx]);
681 676
682 err = -EEXIST; 677 err = -EEXIST;
683 hlist_for_each_entry(node, n, bucket, list) { 678 hlist_for_each_entry(node, bucket, list) {
684 mpath = node->mpath; 679 mpath = node->mpath;
685 if (mpath->sdata == sdata && 680 if (mpath->sdata == sdata &&
686 ether_addr_equal(dst, mpath->dst)) 681 ether_addr_equal(dst, mpath->dst))
@@ -725,14 +720,13 @@ void mesh_plink_broken(struct sta_info *sta)
725 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 720 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
726 struct mesh_path *mpath; 721 struct mesh_path *mpath;
727 struct mpath_node *node; 722 struct mpath_node *node;
728 struct hlist_node *p;
729 struct ieee80211_sub_if_data *sdata = sta->sdata; 723 struct ieee80211_sub_if_data *sdata = sta->sdata;
730 int i; 724 int i;
731 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); 725 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
732 726
733 rcu_read_lock(); 727 rcu_read_lock();
734 tbl = rcu_dereference(mesh_paths); 728 tbl = rcu_dereference(mesh_paths);
735 for_each_mesh_entry(tbl, p, node, i) { 729 for_each_mesh_entry(tbl, node, i) {
736 mpath = node->mpath; 730 mpath = node->mpath;
737 if (rcu_dereference(mpath->next_hop) == sta && 731 if (rcu_dereference(mpath->next_hop) == sta &&
738 mpath->flags & MESH_PATH_ACTIVE && 732 mpath->flags & MESH_PATH_ACTIVE &&
@@ -792,13 +786,12 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
792 struct mesh_table *tbl; 786 struct mesh_table *tbl;
793 struct mesh_path *mpath; 787 struct mesh_path *mpath;
794 struct mpath_node *node; 788 struct mpath_node *node;
795 struct hlist_node *p;
796 int i; 789 int i;
797 790
798 rcu_read_lock(); 791 rcu_read_lock();
799 read_lock_bh(&pathtbl_resize_lock); 792 read_lock_bh(&pathtbl_resize_lock);
800 tbl = resize_dereference_mesh_paths(); 793 tbl = resize_dereference_mesh_paths();
801 for_each_mesh_entry(tbl, p, node, i) { 794 for_each_mesh_entry(tbl, node, i) {
802 mpath = node->mpath; 795 mpath = node->mpath;
803 if (rcu_dereference(mpath->next_hop) == sta) { 796 if (rcu_dereference(mpath->next_hop) == sta) {
804 spin_lock(&tbl->hashwlock[i]); 797 spin_lock(&tbl->hashwlock[i]);
@@ -815,11 +808,10 @@ static void table_flush_by_iface(struct mesh_table *tbl,
815{ 808{
816 struct mesh_path *mpath; 809 struct mesh_path *mpath;
817 struct mpath_node *node; 810 struct mpath_node *node;
818 struct hlist_node *p;
819 int i; 811 int i;
820 812
821 WARN_ON(!rcu_read_lock_held()); 813 WARN_ON(!rcu_read_lock_held());
822 for_each_mesh_entry(tbl, p, node, i) { 814 for_each_mesh_entry(tbl, node, i) {
823 mpath = node->mpath; 815 mpath = node->mpath;
824 if (mpath->sdata != sdata) 816 if (mpath->sdata != sdata)
825 continue; 817 continue;
@@ -865,7 +857,6 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
865 struct mesh_path *mpath; 857 struct mesh_path *mpath;
866 struct mpath_node *node; 858 struct mpath_node *node;
867 struct hlist_head *bucket; 859 struct hlist_head *bucket;
868 struct hlist_node *n;
869 int hash_idx; 860 int hash_idx;
870 int err = 0; 861 int err = 0;
871 862
@@ -875,7 +866,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
875 bucket = &tbl->hash_buckets[hash_idx]; 866 bucket = &tbl->hash_buckets[hash_idx];
876 867
877 spin_lock(&tbl->hashwlock[hash_idx]); 868 spin_lock(&tbl->hashwlock[hash_idx]);
878 hlist_for_each_entry(node, n, bucket, list) { 869 hlist_for_each_entry(node, bucket, list) {
879 mpath = node->mpath; 870 mpath = node->mpath;
880 if (mpath->sdata == sdata && 871 if (mpath->sdata == sdata &&
881 ether_addr_equal(addr, mpath->dst)) { 872 ether_addr_equal(addr, mpath->dst)) {
@@ -920,7 +911,6 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
920int mesh_path_send_to_gates(struct mesh_path *mpath) 911int mesh_path_send_to_gates(struct mesh_path *mpath)
921{ 912{
922 struct ieee80211_sub_if_data *sdata = mpath->sdata; 913 struct ieee80211_sub_if_data *sdata = mpath->sdata;
923 struct hlist_node *n;
924 struct mesh_table *tbl; 914 struct mesh_table *tbl;
925 struct mesh_path *from_mpath = mpath; 915 struct mesh_path *from_mpath = mpath;
926 struct mpath_node *gate = NULL; 916 struct mpath_node *gate = NULL;
@@ -935,7 +925,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
935 if (!known_gates) 925 if (!known_gates)
936 return -EHOSTUNREACH; 926 return -EHOSTUNREACH;
937 927
938 hlist_for_each_entry_rcu(gate, n, known_gates, list) { 928 hlist_for_each_entry_rcu(gate, known_gates, list) {
939 if (gate->mpath->sdata != sdata) 929 if (gate->mpath->sdata != sdata)
940 continue; 930 continue;
941 931
@@ -951,7 +941,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
951 } 941 }
952 } 942 }
953 943
954 hlist_for_each_entry_rcu(gate, n, known_gates, list) 944 hlist_for_each_entry_rcu(gate, known_gates, list)
955 if (gate->mpath->sdata == sdata) { 945 if (gate->mpath->sdata == sdata) {
956 mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); 946 mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
957 mesh_path_tx_pending(gate->mpath); 947 mesh_path_tx_pending(gate->mpath);
@@ -1096,12 +1086,11 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1096 struct mesh_table *tbl; 1086 struct mesh_table *tbl;
1097 struct mesh_path *mpath; 1087 struct mesh_path *mpath;
1098 struct mpath_node *node; 1088 struct mpath_node *node;
1099 struct hlist_node *p;
1100 int i; 1089 int i;
1101 1090
1102 rcu_read_lock(); 1091 rcu_read_lock();
1103 tbl = rcu_dereference(mesh_paths); 1092 tbl = rcu_dereference(mesh_paths);
1104 for_each_mesh_entry(tbl, p, node, i) { 1093 for_each_mesh_entry(tbl, node, i) {
1105 if (node->mpath->sdata != sdata) 1094 if (node->mpath->sdata != sdata)
1106 continue; 1095 continue;
1107 mpath = node->mpath; 1096 mpath = node->mpath;
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 9f00db7e03f2..704e514e02ab 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -259,13 +259,12 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
259{ 259{
260 unsigned int hash; 260 unsigned int hash;
261 struct ip_vs_conn *cp; 261 struct ip_vs_conn *cp;
262 struct hlist_node *n;
263 262
264 hash = ip_vs_conn_hashkey_param(p, false); 263 hash = ip_vs_conn_hashkey_param(p, false);
265 264
266 ct_read_lock(hash); 265 ct_read_lock(hash);
267 266
268 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { 267 hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
269 if (cp->af == p->af && 268 if (cp->af == p->af &&
270 p->cport == cp->cport && p->vport == cp->vport && 269 p->cport == cp->cport && p->vport == cp->vport &&
271 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && 270 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
@@ -344,13 +343,12 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
344{ 343{
345 unsigned int hash; 344 unsigned int hash;
346 struct ip_vs_conn *cp; 345 struct ip_vs_conn *cp;
347 struct hlist_node *n;
348 346
349 hash = ip_vs_conn_hashkey_param(p, false); 347 hash = ip_vs_conn_hashkey_param(p, false);
350 348
351 ct_read_lock(hash); 349 ct_read_lock(hash);
352 350
353 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { 351 hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
354 if (!ip_vs_conn_net_eq(cp, p->net)) 352 if (!ip_vs_conn_net_eq(cp, p->net))
355 continue; 353 continue;
356 if (p->pe_data && p->pe->ct_match) { 354 if (p->pe_data && p->pe->ct_match) {
@@ -394,7 +392,6 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
394{ 392{
395 unsigned int hash; 393 unsigned int hash;
396 struct ip_vs_conn *cp, *ret=NULL; 394 struct ip_vs_conn *cp, *ret=NULL;
397 struct hlist_node *n;
398 395
399 /* 396 /*
400 * Check for "full" addressed entries 397 * Check for "full" addressed entries
@@ -403,7 +400,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
403 400
404 ct_read_lock(hash); 401 ct_read_lock(hash);
405 402
406 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { 403 hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
407 if (cp->af == p->af && 404 if (cp->af == p->af &&
408 p->vport == cp->cport && p->cport == cp->dport && 405 p->vport == cp->cport && p->cport == cp->dport &&
409 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && 406 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
@@ -953,11 +950,10 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
953 int idx; 950 int idx;
954 struct ip_vs_conn *cp; 951 struct ip_vs_conn *cp;
955 struct ip_vs_iter_state *iter = seq->private; 952 struct ip_vs_iter_state *iter = seq->private;
956 struct hlist_node *n;
957 953
958 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 954 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
959 ct_read_lock_bh(idx); 955 ct_read_lock_bh(idx);
960 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) { 956 hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
961 if (pos-- == 0) { 957 if (pos-- == 0) {
962 iter->l = &ip_vs_conn_tab[idx]; 958 iter->l = &ip_vs_conn_tab[idx];
963 return cp; 959 return cp;
@@ -981,7 +977,6 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
981{ 977{
982 struct ip_vs_conn *cp = v; 978 struct ip_vs_conn *cp = v;
983 struct ip_vs_iter_state *iter = seq->private; 979 struct ip_vs_iter_state *iter = seq->private;
984 struct hlist_node *e;
985 struct hlist_head *l = iter->l; 980 struct hlist_head *l = iter->l;
986 int idx; 981 int idx;
987 982
@@ -990,15 +985,15 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
990 return ip_vs_conn_array(seq, 0); 985 return ip_vs_conn_array(seq, 0);
991 986
992 /* more on same hash chain? */ 987 /* more on same hash chain? */
993 if ((e = cp->c_list.next)) 988 if (cp->c_list.next)
994 return hlist_entry(e, struct ip_vs_conn, c_list); 989 return hlist_entry(cp->c_list.next, struct ip_vs_conn, c_list);
995 990
996 idx = l - ip_vs_conn_tab; 991 idx = l - ip_vs_conn_tab;
997 ct_read_unlock_bh(idx); 992 ct_read_unlock_bh(idx);
998 993
999 while (++idx < ip_vs_conn_tab_size) { 994 while (++idx < ip_vs_conn_tab_size) {
1000 ct_read_lock_bh(idx); 995 ct_read_lock_bh(idx);
1001 hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) { 996 hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
1002 iter->l = &ip_vs_conn_tab[idx]; 997 iter->l = &ip_vs_conn_tab[idx];
1003 return cp; 998 return cp;
1004 } 999 }
@@ -1200,14 +1195,13 @@ void ip_vs_random_dropentry(struct net *net)
1200 */ 1195 */
1201 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { 1196 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
1202 unsigned int hash = net_random() & ip_vs_conn_tab_mask; 1197 unsigned int hash = net_random() & ip_vs_conn_tab_mask;
1203 struct hlist_node *n;
1204 1198
1205 /* 1199 /*
1206 * Lock is actually needed in this loop. 1200 * Lock is actually needed in this loop.
1207 */ 1201 */
1208 ct_write_lock_bh(hash); 1202 ct_write_lock_bh(hash);
1209 1203
1210 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { 1204 hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
1211 if (cp->flags & IP_VS_CONN_F_TEMPLATE) 1205 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
1212 /* connection template */ 1206 /* connection template */
1213 continue; 1207 continue;
@@ -1255,14 +1249,12 @@ static void ip_vs_conn_flush(struct net *net)
1255 1249
1256flush_again: 1250flush_again:
1257 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 1251 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1258 struct hlist_node *n;
1259
1260 /* 1252 /*
1261 * Lock is actually needed in this loop. 1253 * Lock is actually needed in this loop.
1262 */ 1254 */
1263 ct_write_lock_bh(idx); 1255 ct_write_lock_bh(idx);
1264 1256
1265 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) { 1257 hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
1266 if (!ip_vs_conn_net_eq(cp, net)) 1258 if (!ip_vs_conn_net_eq(cp, net))
1267 continue; 1259 continue;
1268 IP_VS_DBG(4, "del connection\n"); 1260 IP_VS_DBG(4, "del connection\n");
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 3921e5bc1235..8c10e3db3d9b 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -90,14 +90,13 @@ __nf_ct_expect_find(struct net *net, u16 zone,
90 const struct nf_conntrack_tuple *tuple) 90 const struct nf_conntrack_tuple *tuple)
91{ 91{
92 struct nf_conntrack_expect *i; 92 struct nf_conntrack_expect *i;
93 struct hlist_node *n;
94 unsigned int h; 93 unsigned int h;
95 94
96 if (!net->ct.expect_count) 95 if (!net->ct.expect_count)
97 return NULL; 96 return NULL;
98 97
99 h = nf_ct_expect_dst_hash(tuple); 98 h = nf_ct_expect_dst_hash(tuple);
100 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) { 99 hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
101 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && 100 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 nf_ct_zone(i->master) == zone) 101 nf_ct_zone(i->master) == zone)
103 return i; 102 return i;
@@ -130,14 +129,13 @@ nf_ct_find_expectation(struct net *net, u16 zone,
130 const struct nf_conntrack_tuple *tuple) 129 const struct nf_conntrack_tuple *tuple)
131{ 130{
132 struct nf_conntrack_expect *i, *exp = NULL; 131 struct nf_conntrack_expect *i, *exp = NULL;
133 struct hlist_node *n;
134 unsigned int h; 132 unsigned int h;
135 133
136 if (!net->ct.expect_count) 134 if (!net->ct.expect_count)
137 return NULL; 135 return NULL;
138 136
139 h = nf_ct_expect_dst_hash(tuple); 137 h = nf_ct_expect_dst_hash(tuple);
140 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { 138 hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
141 if (!(i->flags & NF_CT_EXPECT_INACTIVE) && 139 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
142 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && 140 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
143 nf_ct_zone(i->master) == zone) { 141 nf_ct_zone(i->master) == zone) {
@@ -172,13 +170,13 @@ void nf_ct_remove_expectations(struct nf_conn *ct)
172{ 170{
173 struct nf_conn_help *help = nfct_help(ct); 171 struct nf_conn_help *help = nfct_help(ct);
174 struct nf_conntrack_expect *exp; 172 struct nf_conntrack_expect *exp;
175 struct hlist_node *n, *next; 173 struct hlist_node *next;
176 174
177 /* Optimization: most connection never expect any others. */ 175 /* Optimization: most connection never expect any others. */
178 if (!help) 176 if (!help)
179 return; 177 return;
180 178
181 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { 179 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
182 if (del_timer(&exp->timeout)) { 180 if (del_timer(&exp->timeout)) {
183 nf_ct_unlink_expect(exp); 181 nf_ct_unlink_expect(exp);
184 nf_ct_expect_put(exp); 182 nf_ct_expect_put(exp);
@@ -348,9 +346,8 @@ static void evict_oldest_expect(struct nf_conn *master,
348{ 346{
349 struct nf_conn_help *master_help = nfct_help(master); 347 struct nf_conn_help *master_help = nfct_help(master);
350 struct nf_conntrack_expect *exp, *last = NULL; 348 struct nf_conntrack_expect *exp, *last = NULL;
351 struct hlist_node *n;
352 349
353 hlist_for_each_entry(exp, n, &master_help->expectations, lnode) { 350 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
354 if (exp->class == new->class) 351 if (exp->class == new->class)
355 last = exp; 352 last = exp;
356 } 353 }
@@ -369,7 +366,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
369 struct nf_conn_help *master_help = nfct_help(master); 366 struct nf_conn_help *master_help = nfct_help(master);
370 struct nf_conntrack_helper *helper; 367 struct nf_conntrack_helper *helper;
371 struct net *net = nf_ct_exp_net(expect); 368 struct net *net = nf_ct_exp_net(expect);
372 struct hlist_node *n, *next; 369 struct hlist_node *next;
373 unsigned int h; 370 unsigned int h;
374 int ret = 1; 371 int ret = 1;
375 372
@@ -378,7 +375,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
378 goto out; 375 goto out;
379 } 376 }
380 h = nf_ct_expect_dst_hash(&expect->tuple); 377 h = nf_ct_expect_dst_hash(&expect->tuple);
381 hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) { 378 hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
382 if (expect_matches(i, expect)) { 379 if (expect_matches(i, expect)) {
383 if (del_timer(&i->timeout)) { 380 if (del_timer(&i->timeout)) {
384 nf_ct_unlink_expect(i); 381 nf_ct_unlink_expect(i);
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 013cdf69fe29..a9740bd6fe54 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -116,14 +116,13 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
116{ 116{
117 struct nf_conntrack_helper *helper; 117 struct nf_conntrack_helper *helper;
118 struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) }; 118 struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
119 struct hlist_node *n;
120 unsigned int h; 119 unsigned int h;
121 120
122 if (!nf_ct_helper_count) 121 if (!nf_ct_helper_count)
123 return NULL; 122 return NULL;
124 123
125 h = helper_hash(tuple); 124 h = helper_hash(tuple);
126 hlist_for_each_entry_rcu(helper, n, &nf_ct_helper_hash[h], hnode) { 125 hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) {
127 if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask)) 126 if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask))
128 return helper; 127 return helper;
129 } 128 }
@@ -134,11 +133,10 @@ struct nf_conntrack_helper *
134__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) 133__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
135{ 134{
136 struct nf_conntrack_helper *h; 135 struct nf_conntrack_helper *h;
137 struct hlist_node *n;
138 unsigned int i; 136 unsigned int i;
139 137
140 for (i = 0; i < nf_ct_helper_hsize; i++) { 138 for (i = 0; i < nf_ct_helper_hsize; i++) {
141 hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) { 139 hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) {
142 if (!strcmp(h->name, name) && 140 if (!strcmp(h->name, name) &&
143 h->tuple.src.l3num == l3num && 141 h->tuple.src.l3num == l3num &&
144 h->tuple.dst.protonum == protonum) 142 h->tuple.dst.protonum == protonum)
@@ -357,7 +355,6 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
357{ 355{
358 int ret = 0; 356 int ret = 0;
359 struct nf_conntrack_helper *cur; 357 struct nf_conntrack_helper *cur;
360 struct hlist_node *n;
361 unsigned int h = helper_hash(&me->tuple); 358 unsigned int h = helper_hash(&me->tuple);
362 359
363 BUG_ON(me->expect_policy == NULL); 360 BUG_ON(me->expect_policy == NULL);
@@ -365,7 +362,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
365 BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); 362 BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
366 363
367 mutex_lock(&nf_ct_helper_mutex); 364 mutex_lock(&nf_ct_helper_mutex);
368 hlist_for_each_entry(cur, n, &nf_ct_helper_hash[h], hnode) { 365 hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
369 if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && 366 if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
370 cur->tuple.src.l3num == me->tuple.src.l3num && 367 cur->tuple.src.l3num == me->tuple.src.l3num &&
371 cur->tuple.dst.protonum == me->tuple.dst.protonum) { 368 cur->tuple.dst.protonum == me->tuple.dst.protonum) {
@@ -386,13 +383,13 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
386{ 383{
387 struct nf_conntrack_tuple_hash *h; 384 struct nf_conntrack_tuple_hash *h;
388 struct nf_conntrack_expect *exp; 385 struct nf_conntrack_expect *exp;
389 const struct hlist_node *n, *next; 386 const struct hlist_node *next;
390 const struct hlist_nulls_node *nn; 387 const struct hlist_nulls_node *nn;
391 unsigned int i; 388 unsigned int i;
392 389
393 /* Get rid of expectations */ 390 /* Get rid of expectations */
394 for (i = 0; i < nf_ct_expect_hsize; i++) { 391 for (i = 0; i < nf_ct_expect_hsize; i++) {
395 hlist_for_each_entry_safe(exp, n, next, 392 hlist_for_each_entry_safe(exp, next,
396 &net->ct.expect_hash[i], hnode) { 393 &net->ct.expect_hash[i], hnode) {
397 struct nf_conn_help *help = nfct_help(exp->master); 394 struct nf_conn_help *help = nfct_help(exp->master);
398 if ((rcu_dereference_protected( 395 if ((rcu_dereference_protected(
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 5d60e04f9679..9904b15f600e 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2370,14 +2370,13 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2370 struct net *net = sock_net(skb->sk); 2370 struct net *net = sock_net(skb->sk);
2371 struct nf_conntrack_expect *exp, *last; 2371 struct nf_conntrack_expect *exp, *last;
2372 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 2372 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2373 struct hlist_node *n;
2374 u_int8_t l3proto = nfmsg->nfgen_family; 2373 u_int8_t l3proto = nfmsg->nfgen_family;
2375 2374
2376 rcu_read_lock(); 2375 rcu_read_lock();
2377 last = (struct nf_conntrack_expect *)cb->args[1]; 2376 last = (struct nf_conntrack_expect *)cb->args[1];
2378 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { 2377 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2379restart: 2378restart:
2380 hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]], 2379 hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]],
2381 hnode) { 2380 hnode) {
2382 if (l3proto && exp->tuple.src.l3num != l3proto) 2381 if (l3proto && exp->tuple.src.l3num != l3proto)
2383 continue; 2382 continue;
@@ -2510,7 +2509,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2510 struct nf_conntrack_expect *exp; 2509 struct nf_conntrack_expect *exp;
2511 struct nf_conntrack_tuple tuple; 2510 struct nf_conntrack_tuple tuple;
2512 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2511 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2513 struct hlist_node *n, *next; 2512 struct hlist_node *next;
2514 u_int8_t u3 = nfmsg->nfgen_family; 2513 u_int8_t u3 = nfmsg->nfgen_family;
2515 unsigned int i; 2514 unsigned int i;
2516 u16 zone; 2515 u16 zone;
@@ -2557,7 +2556,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2557 /* delete all expectations for this helper */ 2556 /* delete all expectations for this helper */
2558 spin_lock_bh(&nf_conntrack_lock); 2557 spin_lock_bh(&nf_conntrack_lock);
2559 for (i = 0; i < nf_ct_expect_hsize; i++) { 2558 for (i = 0; i < nf_ct_expect_hsize; i++) {
2560 hlist_for_each_entry_safe(exp, n, next, 2559 hlist_for_each_entry_safe(exp, next,
2561 &net->ct.expect_hash[i], 2560 &net->ct.expect_hash[i],
2562 hnode) { 2561 hnode) {
2563 m_help = nfct_help(exp->master); 2562 m_help = nfct_help(exp->master);
@@ -2575,7 +2574,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2575 /* This basically means we have to flush everything*/ 2574 /* This basically means we have to flush everything*/
2576 spin_lock_bh(&nf_conntrack_lock); 2575 spin_lock_bh(&nf_conntrack_lock);
2577 for (i = 0; i < nf_ct_expect_hsize; i++) { 2576 for (i = 0; i < nf_ct_expect_hsize; i++) {
2578 hlist_for_each_entry_safe(exp, n, next, 2577 hlist_for_each_entry_safe(exp, next,
2579 &net->ct.expect_hash[i], 2578 &net->ct.expect_hash[i],
2580 hnode) { 2579 hnode) {
2581 if (del_timer(&exp->timeout)) { 2580 if (del_timer(&exp->timeout)) {
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 069229d919b6..0e7d423324c3 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -855,11 +855,11 @@ static int refresh_signalling_expectation(struct nf_conn *ct,
855{ 855{
856 struct nf_conn_help *help = nfct_help(ct); 856 struct nf_conn_help *help = nfct_help(ct);
857 struct nf_conntrack_expect *exp; 857 struct nf_conntrack_expect *exp;
858 struct hlist_node *n, *next; 858 struct hlist_node *next;
859 int found = 0; 859 int found = 0;
860 860
861 spin_lock_bh(&nf_conntrack_lock); 861 spin_lock_bh(&nf_conntrack_lock);
862 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { 862 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
863 if (exp->class != SIP_EXPECT_SIGNALLING || 863 if (exp->class != SIP_EXPECT_SIGNALLING ||
864 !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || 864 !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
865 exp->tuple.dst.protonum != proto || 865 exp->tuple.dst.protonum != proto ||
@@ -881,10 +881,10 @@ static void flush_expectations(struct nf_conn *ct, bool media)
881{ 881{
882 struct nf_conn_help *help = nfct_help(ct); 882 struct nf_conn_help *help = nfct_help(ct);
883 struct nf_conntrack_expect *exp; 883 struct nf_conntrack_expect *exp;
884 struct hlist_node *n, *next; 884 struct hlist_node *next;
885 885
886 spin_lock_bh(&nf_conntrack_lock); 886 spin_lock_bh(&nf_conntrack_lock);
887 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { 887 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
888 if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media) 888 if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media)
889 continue; 889 continue;
890 if (!del_timer(&exp->timeout)) 890 if (!del_timer(&exp->timeout))
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 5f2f9109f461..8d5769c6d16e 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -191,9 +191,8 @@ find_appropriate_src(struct net *net, u16 zone,
191 unsigned int h = hash_by_src(net, zone, tuple); 191 unsigned int h = hash_by_src(net, zone, tuple);
192 const struct nf_conn_nat *nat; 192 const struct nf_conn_nat *nat;
193 const struct nf_conn *ct; 193 const struct nf_conn *ct;
194 const struct hlist_node *n;
195 194
196 hlist_for_each_entry_rcu(nat, n, &net->ct.nat_bysource[h], bysource) { 195 hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
197 ct = nat->ct; 196 ct = nat->ct;
198 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) { 197 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
199 /* Copy source part from reply tuple. */ 198 /* Copy source part from reply tuple. */
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 945950a8b1f1..a191b6db657e 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -282,7 +282,6 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
282 const char *helper_name; 282 const char *helper_name;
283 struct nf_conntrack_helper *cur, *helper = NULL; 283 struct nf_conntrack_helper *cur, *helper = NULL;
284 struct nf_conntrack_tuple tuple; 284 struct nf_conntrack_tuple tuple;
285 struct hlist_node *n;
286 int ret = 0, i; 285 int ret = 0, i;
287 286
288 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) 287 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
@@ -296,7 +295,7 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
296 295
297 rcu_read_lock(); 296 rcu_read_lock();
298 for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { 297 for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
299 hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) { 298 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
300 299
301 /* skip non-userspace conntrack helpers. */ 300 /* skip non-userspace conntrack helpers. */
302 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 301 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
@@ -452,13 +451,12 @@ static int
452nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 451nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
453{ 452{
454 struct nf_conntrack_helper *cur, *last; 453 struct nf_conntrack_helper *cur, *last;
455 struct hlist_node *n;
456 454
457 rcu_read_lock(); 455 rcu_read_lock();
458 last = (struct nf_conntrack_helper *)cb->args[1]; 456 last = (struct nf_conntrack_helper *)cb->args[1];
459 for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) { 457 for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) {
460restart: 458restart:
461 hlist_for_each_entry_rcu(cur, n, 459 hlist_for_each_entry_rcu(cur,
462 &nf_ct_helper_hash[cb->args[0]], hnode) { 460 &nf_ct_helper_hash[cb->args[0]], hnode) {
463 461
464 /* skip non-userspace conntrack helpers. */ 462 /* skip non-userspace conntrack helpers. */
@@ -495,7 +493,6 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
495{ 493{
496 int ret = -ENOENT, i; 494 int ret = -ENOENT, i;
497 struct nf_conntrack_helper *cur; 495 struct nf_conntrack_helper *cur;
498 struct hlist_node *n;
499 struct sk_buff *skb2; 496 struct sk_buff *skb2;
500 char *helper_name = NULL; 497 char *helper_name = NULL;
501 struct nf_conntrack_tuple tuple; 498 struct nf_conntrack_tuple tuple;
@@ -520,7 +517,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
520 } 517 }
521 518
522 for (i = 0; i < nf_ct_helper_hsize; i++) { 519 for (i = 0; i < nf_ct_helper_hsize; i++) {
523 hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) { 520 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
524 521
525 /* skip non-userspace conntrack helpers. */ 522 /* skip non-userspace conntrack helpers. */
526 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 523 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
@@ -568,7 +565,7 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
568{ 565{
569 char *helper_name = NULL; 566 char *helper_name = NULL;
570 struct nf_conntrack_helper *cur; 567 struct nf_conntrack_helper *cur;
571 struct hlist_node *n, *tmp; 568 struct hlist_node *tmp;
572 struct nf_conntrack_tuple tuple; 569 struct nf_conntrack_tuple tuple;
573 bool tuple_set = false, found = false; 570 bool tuple_set = false, found = false;
574 int i, j = 0, ret; 571 int i, j = 0, ret;
@@ -585,7 +582,7 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
585 } 582 }
586 583
587 for (i = 0; i < nf_ct_helper_hsize; i++) { 584 for (i = 0; i < nf_ct_helper_hsize; i++) {
588 hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i], 585 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
589 hnode) { 586 hnode) {
590 /* skip non-userspace conntrack helpers. */ 587 /* skip non-userspace conntrack helpers. */
591 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 588 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
@@ -654,13 +651,13 @@ err_out:
654static void __exit nfnl_cthelper_exit(void) 651static void __exit nfnl_cthelper_exit(void)
655{ 652{
656 struct nf_conntrack_helper *cur; 653 struct nf_conntrack_helper *cur;
657 struct hlist_node *n, *tmp; 654 struct hlist_node *tmp;
658 int i; 655 int i;
659 656
660 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); 657 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
661 658
662 for (i=0; i<nf_ct_helper_hsize; i++) { 659 for (i=0; i<nf_ct_helper_hsize; i++) {
663 hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i], 660 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
664 hnode) { 661 hnode) {
665 /* skip non-userspace conntrack helpers. */ 662 /* skip non-userspace conntrack helpers. */
666 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 663 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 92fd8eca0d31..f248db572972 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -87,11 +87,10 @@ static struct nfulnl_instance *
87__instance_lookup(u_int16_t group_num) 87__instance_lookup(u_int16_t group_num)
88{ 88{
89 struct hlist_head *head; 89 struct hlist_head *head;
90 struct hlist_node *pos;
91 struct nfulnl_instance *inst; 90 struct nfulnl_instance *inst;
92 91
93 head = &instance_table[instance_hashfn(group_num)]; 92 head = &instance_table[instance_hashfn(group_num)];
94 hlist_for_each_entry_rcu(inst, pos, head, hlist) { 93 hlist_for_each_entry_rcu(inst, head, hlist) {
95 if (inst->group_num == group_num) 94 if (inst->group_num == group_num)
96 return inst; 95 return inst;
97 } 96 }
@@ -717,11 +716,11 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
717 /* destroy all instances for this portid */ 716 /* destroy all instances for this portid */
718 spin_lock_bh(&instances_lock); 717 spin_lock_bh(&instances_lock);
719 for (i = 0; i < INSTANCE_BUCKETS; i++) { 718 for (i = 0; i < INSTANCE_BUCKETS; i++) {
720 struct hlist_node *tmp, *t2; 719 struct hlist_node *t2;
721 struct nfulnl_instance *inst; 720 struct nfulnl_instance *inst;
722 struct hlist_head *head = &instance_table[i]; 721 struct hlist_head *head = &instance_table[i];
723 722
724 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { 723 hlist_for_each_entry_safe(inst, t2, head, hlist) {
725 if ((net_eq(n->net, &init_net)) && 724 if ((net_eq(n->net, &init_net)) &&
726 (n->portid == inst->peer_portid)) 725 (n->portid == inst->peer_portid))
727 __instance_destroy(inst); 726 __instance_destroy(inst);
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 3158d87b56a8..858fd52c1040 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -80,11 +80,10 @@ static struct nfqnl_instance *
80instance_lookup(u_int16_t queue_num) 80instance_lookup(u_int16_t queue_num)
81{ 81{
82 struct hlist_head *head; 82 struct hlist_head *head;
83 struct hlist_node *pos;
84 struct nfqnl_instance *inst; 83 struct nfqnl_instance *inst;
85 84
86 head = &instance_table[instance_hashfn(queue_num)]; 85 head = &instance_table[instance_hashfn(queue_num)];
87 hlist_for_each_entry_rcu(inst, pos, head, hlist) { 86 hlist_for_each_entry_rcu(inst, head, hlist) {
88 if (inst->queue_num == queue_num) 87 if (inst->queue_num == queue_num)
89 return inst; 88 return inst;
90 } 89 }
@@ -583,11 +582,10 @@ nfqnl_dev_drop(int ifindex)
583 rcu_read_lock(); 582 rcu_read_lock();
584 583
585 for (i = 0; i < INSTANCE_BUCKETS; i++) { 584 for (i = 0; i < INSTANCE_BUCKETS; i++) {
586 struct hlist_node *tmp;
587 struct nfqnl_instance *inst; 585 struct nfqnl_instance *inst;
588 struct hlist_head *head = &instance_table[i]; 586 struct hlist_head *head = &instance_table[i];
589 587
590 hlist_for_each_entry_rcu(inst, tmp, head, hlist) 588 hlist_for_each_entry_rcu(inst, head, hlist)
591 nfqnl_flush(inst, dev_cmp, ifindex); 589 nfqnl_flush(inst, dev_cmp, ifindex);
592 } 590 }
593 591
@@ -627,11 +625,11 @@ nfqnl_rcv_nl_event(struct notifier_block *this,
627 /* destroy all instances for this portid */ 625 /* destroy all instances for this portid */
628 spin_lock(&instances_lock); 626 spin_lock(&instances_lock);
629 for (i = 0; i < INSTANCE_BUCKETS; i++) { 627 for (i = 0; i < INSTANCE_BUCKETS; i++) {
630 struct hlist_node *tmp, *t2; 628 struct hlist_node *t2;
631 struct nfqnl_instance *inst; 629 struct nfqnl_instance *inst;
632 struct hlist_head *head = &instance_table[i]; 630 struct hlist_head *head = &instance_table[i];
633 631
634 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { 632 hlist_for_each_entry_safe(inst, t2, head, hlist) {
635 if ((n->net == &init_net) && 633 if ((n->net == &init_net) &&
636 (n->portid == inst->peer_portid)) 634 (n->portid == inst->peer_portid))
637 __instance_destroy(inst); 635 __instance_destroy(inst);
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index f264032b8c56..370adf622cef 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -43,12 +43,11 @@ static void xt_rateest_hash_insert(struct xt_rateest *est)
43struct xt_rateest *xt_rateest_lookup(const char *name) 43struct xt_rateest *xt_rateest_lookup(const char *name)
44{ 44{
45 struct xt_rateest *est; 45 struct xt_rateest *est;
46 struct hlist_node *n;
47 unsigned int h; 46 unsigned int h;
48 47
49 h = xt_rateest_hash(name); 48 h = xt_rateest_hash(name);
50 mutex_lock(&xt_rateest_mutex); 49 mutex_lock(&xt_rateest_mutex);
51 hlist_for_each_entry(est, n, &rateest_hash[h], list) { 50 hlist_for_each_entry(est, &rateest_hash[h], list) {
52 if (strcmp(est->name, name) == 0) { 51 if (strcmp(est->name, name) == 0) {
53 est->refcnt++; 52 est->refcnt++;
54 mutex_unlock(&xt_rateest_mutex); 53 mutex_unlock(&xt_rateest_mutex);
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 70b5591a2586..c40b2695633b 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -101,7 +101,7 @@ static int count_them(struct net *net,
101{ 101{
102 const struct nf_conntrack_tuple_hash *found; 102 const struct nf_conntrack_tuple_hash *found;
103 struct xt_connlimit_conn *conn; 103 struct xt_connlimit_conn *conn;
104 struct hlist_node *pos, *n; 104 struct hlist_node *n;
105 struct nf_conn *found_ct; 105 struct nf_conn *found_ct;
106 struct hlist_head *hash; 106 struct hlist_head *hash;
107 bool addit = true; 107 bool addit = true;
@@ -115,7 +115,7 @@ static int count_them(struct net *net,
115 rcu_read_lock(); 115 rcu_read_lock();
116 116
117 /* check the saved connections */ 117 /* check the saved connections */
118 hlist_for_each_entry_safe(conn, pos, n, hash, node) { 118 hlist_for_each_entry_safe(conn, n, hash, node) {
119 found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE, 119 found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE,
120 &conn->tuple); 120 &conn->tuple);
121 found_ct = NULL; 121 found_ct = NULL;
@@ -258,14 +258,14 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
258{ 258{
259 const struct xt_connlimit_info *info = par->matchinfo; 259 const struct xt_connlimit_info *info = par->matchinfo;
260 struct xt_connlimit_conn *conn; 260 struct xt_connlimit_conn *conn;
261 struct hlist_node *pos, *n; 261 struct hlist_node *n;
262 struct hlist_head *hash = info->data->iphash; 262 struct hlist_head *hash = info->data->iphash;
263 unsigned int i; 263 unsigned int i;
264 264
265 nf_ct_l3proto_module_put(par->family); 265 nf_ct_l3proto_module_put(par->family);
266 266
267 for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) { 267 for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) {
268 hlist_for_each_entry_safe(conn, pos, n, &hash[i], node) { 268 hlist_for_each_entry_safe(conn, n, &hash[i], node) {
269 hlist_del(&conn->node); 269 hlist_del(&conn->node);
270 kfree(conn); 270 kfree(conn);
271 } 271 }
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 98218c896d2e..f330e8beaf69 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -141,11 +141,10 @@ dsthash_find(const struct xt_hashlimit_htable *ht,
141 const struct dsthash_dst *dst) 141 const struct dsthash_dst *dst)
142{ 142{
143 struct dsthash_ent *ent; 143 struct dsthash_ent *ent;
144 struct hlist_node *pos;
145 u_int32_t hash = hash_dst(ht, dst); 144 u_int32_t hash = hash_dst(ht, dst);
146 145
147 if (!hlist_empty(&ht->hash[hash])) { 146 if (!hlist_empty(&ht->hash[hash])) {
148 hlist_for_each_entry_rcu(ent, pos, &ht->hash[hash], node) 147 hlist_for_each_entry_rcu(ent, &ht->hash[hash], node)
149 if (dst_cmp(ent, dst)) { 148 if (dst_cmp(ent, dst)) {
150 spin_lock(&ent->lock); 149 spin_lock(&ent->lock);
151 return ent; 150 return ent;
@@ -297,8 +296,8 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
297 spin_lock_bh(&ht->lock); 296 spin_lock_bh(&ht->lock);
298 for (i = 0; i < ht->cfg.size; i++) { 297 for (i = 0; i < ht->cfg.size; i++) {
299 struct dsthash_ent *dh; 298 struct dsthash_ent *dh;
300 struct hlist_node *pos, *n; 299 struct hlist_node *n;
301 hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) { 300 hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
302 if ((*select)(ht, dh)) 301 if ((*select)(ht, dh))
303 dsthash_free(ht, dh); 302 dsthash_free(ht, dh);
304 } 303 }
@@ -343,9 +342,8 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
343{ 342{
344 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 343 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
345 struct xt_hashlimit_htable *hinfo; 344 struct xt_hashlimit_htable *hinfo;
346 struct hlist_node *pos;
347 345
348 hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) { 346 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
349 if (!strcmp(name, hinfo->pde->name) && 347 if (!strcmp(name, hinfo->pde->name) &&
350 hinfo->family == family) { 348 hinfo->family == family) {
351 hinfo->use++; 349 hinfo->use++;
@@ -821,10 +819,9 @@ static int dl_seq_show(struct seq_file *s, void *v)
821 struct xt_hashlimit_htable *htable = s->private; 819 struct xt_hashlimit_htable *htable = s->private;
822 unsigned int *bucket = (unsigned int *)v; 820 unsigned int *bucket = (unsigned int *)v;
823 struct dsthash_ent *ent; 821 struct dsthash_ent *ent;
824 struct hlist_node *pos;
825 822
826 if (!hlist_empty(&htable->hash[*bucket])) { 823 if (!hlist_empty(&htable->hash[*bucket])) {
827 hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) 824 hlist_for_each_entry(ent, &htable->hash[*bucket], node)
828 if (dl_seq_real_show(ent, htable->family, s)) 825 if (dl_seq_real_show(ent, htable->family, s))
829 return -1; 826 return -1;
830 } 827 }
@@ -877,7 +874,6 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
877static void __net_exit hashlimit_proc_net_exit(struct net *net) 874static void __net_exit hashlimit_proc_net_exit(struct net *net)
878{ 875{
879 struct xt_hashlimit_htable *hinfo; 876 struct xt_hashlimit_htable *hinfo;
880 struct hlist_node *pos;
881 struct proc_dir_entry *pde; 877 struct proc_dir_entry *pde;
882 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 878 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
883 879
@@ -890,7 +886,7 @@ static void __net_exit hashlimit_proc_net_exit(struct net *net)
890 if (pde == NULL) 886 if (pde == NULL)
891 pde = hashlimit_net->ip6t_hashlimit; 887 pde = hashlimit_net->ip6t_hashlimit;
892 888
893 hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) 889 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
894 remove_proc_entry(hinfo->pde->name, pde); 890 remove_proc_entry(hinfo->pde->name, pde);
895 891
896 hashlimit_net->ipt_hashlimit = NULL; 892 hashlimit_net->ipt_hashlimit = NULL;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 8097b4f3ead4..1e3fd5bfcd86 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -248,11 +248,10 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
248 struct nl_portid_hash *hash = &nl_table[protocol].hash; 248 struct nl_portid_hash *hash = &nl_table[protocol].hash;
249 struct hlist_head *head; 249 struct hlist_head *head;
250 struct sock *sk; 250 struct sock *sk;
251 struct hlist_node *node;
252 251
253 read_lock(&nl_table_lock); 252 read_lock(&nl_table_lock);
254 head = nl_portid_hashfn(hash, portid); 253 head = nl_portid_hashfn(hash, portid);
255 sk_for_each(sk, node, head) { 254 sk_for_each(sk, head) {
256 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) { 255 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
257 sock_hold(sk); 256 sock_hold(sk);
258 goto found; 257 goto found;
@@ -312,9 +311,9 @@ static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
312 311
313 for (i = 0; i <= omask; i++) { 312 for (i = 0; i <= omask; i++) {
314 struct sock *sk; 313 struct sock *sk;
315 struct hlist_node *node, *tmp; 314 struct hlist_node *tmp;
316 315
317 sk_for_each_safe(sk, node, tmp, &otable[i]) 316 sk_for_each_safe(sk, tmp, &otable[i])
318 __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid)); 317 __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
319 } 318 }
320 319
@@ -344,7 +343,6 @@ static void
344netlink_update_listeners(struct sock *sk) 343netlink_update_listeners(struct sock *sk)
345{ 344{
346 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; 345 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
347 struct hlist_node *node;
348 unsigned long mask; 346 unsigned long mask;
349 unsigned int i; 347 unsigned int i;
350 struct listeners *listeners; 348 struct listeners *listeners;
@@ -355,7 +353,7 @@ netlink_update_listeners(struct sock *sk)
355 353
356 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { 354 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
357 mask = 0; 355 mask = 0;
358 sk_for_each_bound(sk, node, &tbl->mc_list) { 356 sk_for_each_bound(sk, &tbl->mc_list) {
359 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) 357 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
360 mask |= nlk_sk(sk)->groups[i]; 358 mask |= nlk_sk(sk)->groups[i];
361 } 359 }
@@ -371,18 +369,17 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
371 struct hlist_head *head; 369 struct hlist_head *head;
372 int err = -EADDRINUSE; 370 int err = -EADDRINUSE;
373 struct sock *osk; 371 struct sock *osk;
374 struct hlist_node *node;
375 int len; 372 int len;
376 373
377 netlink_table_grab(); 374 netlink_table_grab();
378 head = nl_portid_hashfn(hash, portid); 375 head = nl_portid_hashfn(hash, portid);
379 len = 0; 376 len = 0;
380 sk_for_each(osk, node, head) { 377 sk_for_each(osk, head) {
381 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid)) 378 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
382 break; 379 break;
383 len++; 380 len++;
384 } 381 }
385 if (node) 382 if (osk)
386 goto err; 383 goto err;
387 384
388 err = -EBUSY; 385 err = -EBUSY;
@@ -575,7 +572,6 @@ static int netlink_autobind(struct socket *sock)
575 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; 572 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
576 struct hlist_head *head; 573 struct hlist_head *head;
577 struct sock *osk; 574 struct sock *osk;
578 struct hlist_node *node;
579 s32 portid = task_tgid_vnr(current); 575 s32 portid = task_tgid_vnr(current);
580 int err; 576 int err;
581 static s32 rover = -4097; 577 static s32 rover = -4097;
@@ -584,7 +580,7 @@ retry:
584 cond_resched(); 580 cond_resched();
585 netlink_table_grab(); 581 netlink_table_grab();
586 head = nl_portid_hashfn(hash, portid); 582 head = nl_portid_hashfn(hash, portid);
587 sk_for_each(osk, node, head) { 583 sk_for_each(osk, head) {
588 if (!net_eq(sock_net(osk), net)) 584 if (!net_eq(sock_net(osk), net))
589 continue; 585 continue;
590 if (nlk_sk(osk)->portid == portid) { 586 if (nlk_sk(osk)->portid == portid) {
@@ -1101,7 +1097,6 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
1101{ 1097{
1102 struct net *net = sock_net(ssk); 1098 struct net *net = sock_net(ssk);
1103 struct netlink_broadcast_data info; 1099 struct netlink_broadcast_data info;
1104 struct hlist_node *node;
1105 struct sock *sk; 1100 struct sock *sk;
1106 1101
1107 skb = netlink_trim(skb, allocation); 1102 skb = netlink_trim(skb, allocation);
@@ -1124,7 +1119,7 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
1124 1119
1125 netlink_lock_table(); 1120 netlink_lock_table();
1126 1121
1127 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) 1122 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1128 do_one_broadcast(sk, &info); 1123 do_one_broadcast(sk, &info);
1129 1124
1130 consume_skb(skb); 1125 consume_skb(skb);
@@ -1200,7 +1195,6 @@ out:
1200int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) 1195int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1201{ 1196{
1202 struct netlink_set_err_data info; 1197 struct netlink_set_err_data info;
1203 struct hlist_node *node;
1204 struct sock *sk; 1198 struct sock *sk;
1205 int ret = 0; 1199 int ret = 0;
1206 1200
@@ -1212,7 +1206,7 @@ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1212 1206
1213 read_lock(&nl_table_lock); 1207 read_lock(&nl_table_lock);
1214 1208
1215 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) 1209 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1216 ret += do_one_set_err(sk, &info); 1210 ret += do_one_set_err(sk, &info);
1217 1211
1218 read_unlock(&nl_table_lock); 1212 read_unlock(&nl_table_lock);
@@ -1676,10 +1670,9 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1676void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) 1670void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1677{ 1671{
1678 struct sock *sk; 1672 struct sock *sk;
1679 struct hlist_node *node;
1680 struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; 1673 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
1681 1674
1682 sk_for_each_bound(sk, node, &tbl->mc_list) 1675 sk_for_each_bound(sk, &tbl->mc_list)
1683 netlink_update_socket_mc(nlk_sk(sk), group, 0); 1676 netlink_update_socket_mc(nlk_sk(sk), group, 0);
1684} 1677}
1685 1678
@@ -1974,14 +1967,13 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1974 struct nl_seq_iter *iter = seq->private; 1967 struct nl_seq_iter *iter = seq->private;
1975 int i, j; 1968 int i, j;
1976 struct sock *s; 1969 struct sock *s;
1977 struct hlist_node *node;
1978 loff_t off = 0; 1970 loff_t off = 0;
1979 1971
1980 for (i = 0; i < MAX_LINKS; i++) { 1972 for (i = 0; i < MAX_LINKS; i++) {
1981 struct nl_portid_hash *hash = &nl_table[i].hash; 1973 struct nl_portid_hash *hash = &nl_table[i].hash;
1982 1974
1983 for (j = 0; j <= hash->mask; j++) { 1975 for (j = 0; j <= hash->mask; j++) {
1984 sk_for_each(s, node, &hash->table[j]) { 1976 sk_for_each(s, &hash->table[j]) {
1985 if (sock_net(s) != seq_file_net(seq)) 1977 if (sock_net(s) != seq_file_net(seq))
1986 continue; 1978 continue;
1987 if (off == pos) { 1979 if (off == pos) {
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 297b07a029de..d1fa1d9ffd2e 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -104,10 +104,9 @@ static void nr_remove_socket(struct sock *sk)
104static void nr_kill_by_device(struct net_device *dev) 104static void nr_kill_by_device(struct net_device *dev)
105{ 105{
106 struct sock *s; 106 struct sock *s;
107 struct hlist_node *node;
108 107
109 spin_lock_bh(&nr_list_lock); 108 spin_lock_bh(&nr_list_lock);
110 sk_for_each(s, node, &nr_list) 109 sk_for_each(s, &nr_list)
111 if (nr_sk(s)->device == dev) 110 if (nr_sk(s)->device == dev)
112 nr_disconnect(s, ENETUNREACH); 111 nr_disconnect(s, ENETUNREACH);
113 spin_unlock_bh(&nr_list_lock); 112 spin_unlock_bh(&nr_list_lock);
@@ -149,10 +148,9 @@ static void nr_insert_socket(struct sock *sk)
149static struct sock *nr_find_listener(ax25_address *addr) 148static struct sock *nr_find_listener(ax25_address *addr)
150{ 149{
151 struct sock *s; 150 struct sock *s;
152 struct hlist_node *node;
153 151
154 spin_lock_bh(&nr_list_lock); 152 spin_lock_bh(&nr_list_lock);
155 sk_for_each(s, node, &nr_list) 153 sk_for_each(s, &nr_list)
156 if (!ax25cmp(&nr_sk(s)->source_addr, addr) && 154 if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
157 s->sk_state == TCP_LISTEN) { 155 s->sk_state == TCP_LISTEN) {
158 bh_lock_sock(s); 156 bh_lock_sock(s);
@@ -170,10 +168,9 @@ found:
170static struct sock *nr_find_socket(unsigned char index, unsigned char id) 168static struct sock *nr_find_socket(unsigned char index, unsigned char id)
171{ 169{
172 struct sock *s; 170 struct sock *s;
173 struct hlist_node *node;
174 171
175 spin_lock_bh(&nr_list_lock); 172 spin_lock_bh(&nr_list_lock);
176 sk_for_each(s, node, &nr_list) { 173 sk_for_each(s, &nr_list) {
177 struct nr_sock *nr = nr_sk(s); 174 struct nr_sock *nr = nr_sk(s);
178 175
179 if (nr->my_index == index && nr->my_id == id) { 176 if (nr->my_index == index && nr->my_id == id) {
@@ -194,10 +191,9 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
194 ax25_address *dest) 191 ax25_address *dest)
195{ 192{
196 struct sock *s; 193 struct sock *s;
197 struct hlist_node *node;
198 194
199 spin_lock_bh(&nr_list_lock); 195 spin_lock_bh(&nr_list_lock);
200 sk_for_each(s, node, &nr_list) { 196 sk_for_each(s, &nr_list) {
201 struct nr_sock *nr = nr_sk(s); 197 struct nr_sock *nr = nr_sk(s);
202 198
203 if (nr->your_index == index && nr->your_id == id && 199 if (nr->your_index == index && nr->your_id == id &&
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 70ffff76a967..b976d5eff2de 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -49,10 +49,9 @@ static struct nr_node *nr_node_get(ax25_address *callsign)
49{ 49{
50 struct nr_node *found = NULL; 50 struct nr_node *found = NULL;
51 struct nr_node *nr_node; 51 struct nr_node *nr_node;
52 struct hlist_node *node;
53 52
54 spin_lock_bh(&nr_node_list_lock); 53 spin_lock_bh(&nr_node_list_lock);
55 nr_node_for_each(nr_node, node, &nr_node_list) 54 nr_node_for_each(nr_node, &nr_node_list)
56 if (ax25cmp(callsign, &nr_node->callsign) == 0) { 55 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
57 nr_node_hold(nr_node); 56 nr_node_hold(nr_node);
58 found = nr_node; 57 found = nr_node;
@@ -67,10 +66,9 @@ static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
67{ 66{
68 struct nr_neigh *found = NULL; 67 struct nr_neigh *found = NULL;
69 struct nr_neigh *nr_neigh; 68 struct nr_neigh *nr_neigh;
70 struct hlist_node *node;
71 69
72 spin_lock_bh(&nr_neigh_list_lock); 70 spin_lock_bh(&nr_neigh_list_lock);
73 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) 71 nr_neigh_for_each(nr_neigh, &nr_neigh_list)
74 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 && 72 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
75 nr_neigh->dev == dev) { 73 nr_neigh->dev == dev) {
76 nr_neigh_hold(nr_neigh); 74 nr_neigh_hold(nr_neigh);
@@ -114,10 +112,9 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
114 */ 112 */
115 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) { 113 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
116 struct nr_node *nr_nodet; 114 struct nr_node *nr_nodet;
117 struct hlist_node *node;
118 115
119 spin_lock_bh(&nr_node_list_lock); 116 spin_lock_bh(&nr_node_list_lock);
120 nr_node_for_each(nr_nodet, node, &nr_node_list) { 117 nr_node_for_each(nr_nodet, &nr_node_list) {
121 nr_node_lock(nr_nodet); 118 nr_node_lock(nr_nodet);
122 for (i = 0; i < nr_nodet->count; i++) 119 for (i = 0; i < nr_nodet->count; i++)
123 if (nr_nodet->routes[i].neighbour == nr_neigh) 120 if (nr_nodet->routes[i].neighbour == nr_neigh)
@@ -485,11 +482,11 @@ static int nr_dec_obs(void)
485{ 482{
486 struct nr_neigh *nr_neigh; 483 struct nr_neigh *nr_neigh;
487 struct nr_node *s; 484 struct nr_node *s;
488 struct hlist_node *node, *nodet; 485 struct hlist_node *nodet;
489 int i; 486 int i;
490 487
491 spin_lock_bh(&nr_node_list_lock); 488 spin_lock_bh(&nr_node_list_lock);
492 nr_node_for_each_safe(s, node, nodet, &nr_node_list) { 489 nr_node_for_each_safe(s, nodet, &nr_node_list) {
493 nr_node_lock(s); 490 nr_node_lock(s);
494 for (i = 0; i < s->count; i++) { 491 for (i = 0; i < s->count; i++) {
495 switch (s->routes[i].obs_count) { 492 switch (s->routes[i].obs_count) {
@@ -540,15 +537,15 @@ static int nr_dec_obs(void)
540void nr_rt_device_down(struct net_device *dev) 537void nr_rt_device_down(struct net_device *dev)
541{ 538{
542 struct nr_neigh *s; 539 struct nr_neigh *s;
543 struct hlist_node *node, *nodet, *node2, *node2t; 540 struct hlist_node *nodet, *node2t;
544 struct nr_node *t; 541 struct nr_node *t;
545 int i; 542 int i;
546 543
547 spin_lock_bh(&nr_neigh_list_lock); 544 spin_lock_bh(&nr_neigh_list_lock);
548 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { 545 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
549 if (s->dev == dev) { 546 if (s->dev == dev) {
550 spin_lock_bh(&nr_node_list_lock); 547 spin_lock_bh(&nr_node_list_lock);
551 nr_node_for_each_safe(t, node2, node2t, &nr_node_list) { 548 nr_node_for_each_safe(t, node2t, &nr_node_list) {
552 nr_node_lock(t); 549 nr_node_lock(t);
553 for (i = 0; i < t->count; i++) { 550 for (i = 0; i < t->count; i++) {
554 if (t->routes[i].neighbour == s) { 551 if (t->routes[i].neighbour == s) {
@@ -737,11 +734,10 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
737void nr_link_failed(ax25_cb *ax25, int reason) 734void nr_link_failed(ax25_cb *ax25, int reason)
738{ 735{
739 struct nr_neigh *s, *nr_neigh = NULL; 736 struct nr_neigh *s, *nr_neigh = NULL;
740 struct hlist_node *node;
741 struct nr_node *nr_node = NULL; 737 struct nr_node *nr_node = NULL;
742 738
743 spin_lock_bh(&nr_neigh_list_lock); 739 spin_lock_bh(&nr_neigh_list_lock);
744 nr_neigh_for_each(s, node, &nr_neigh_list) { 740 nr_neigh_for_each(s, &nr_neigh_list) {
745 if (s->ax25 == ax25) { 741 if (s->ax25 == ax25) {
746 nr_neigh_hold(s); 742 nr_neigh_hold(s);
747 nr_neigh = s; 743 nr_neigh = s;
@@ -761,7 +757,7 @@ void nr_link_failed(ax25_cb *ax25, int reason)
761 return; 757 return;
762 } 758 }
763 spin_lock_bh(&nr_node_list_lock); 759 spin_lock_bh(&nr_node_list_lock);
764 nr_node_for_each(nr_node, node, &nr_node_list) { 760 nr_node_for_each(nr_node, &nr_node_list) {
765 nr_node_lock(nr_node); 761 nr_node_lock(nr_node);
766 if (nr_node->which < nr_node->count && 762 if (nr_node->which < nr_node->count &&
767 nr_node->routes[nr_node->which].neighbour == nr_neigh) 763 nr_node->routes[nr_node->which].neighbour == nr_neigh)
@@ -1013,16 +1009,16 @@ void __exit nr_rt_free(void)
1013{ 1009{
1014 struct nr_neigh *s = NULL; 1010 struct nr_neigh *s = NULL;
1015 struct nr_node *t = NULL; 1011 struct nr_node *t = NULL;
1016 struct hlist_node *node, *nodet; 1012 struct hlist_node *nodet;
1017 1013
1018 spin_lock_bh(&nr_neigh_list_lock); 1014 spin_lock_bh(&nr_neigh_list_lock);
1019 spin_lock_bh(&nr_node_list_lock); 1015 spin_lock_bh(&nr_node_list_lock);
1020 nr_node_for_each_safe(t, node, nodet, &nr_node_list) { 1016 nr_node_for_each_safe(t, nodet, &nr_node_list) {
1021 nr_node_lock(t); 1017 nr_node_lock(t);
1022 nr_remove_node_locked(t); 1018 nr_remove_node_locked(t);
1023 nr_node_unlock(t); 1019 nr_node_unlock(t);
1024 } 1020 }
1025 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { 1021 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
1026 while(s->count) { 1022 while(s->count) {
1027 s->count--; 1023 s->count--;
1028 nr_neigh_put(s); 1024 nr_neigh_put(s);
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 746f5a2f9804..7f8266dd14cb 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -71,14 +71,14 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
71static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) 71static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
72{ 72{
73 struct sock *sk; 73 struct sock *sk;
74 struct hlist_node *node, *tmp; 74 struct hlist_node *tmp;
75 struct nfc_llcp_sock *llcp_sock; 75 struct nfc_llcp_sock *llcp_sock;
76 76
77 skb_queue_purge(&local->tx_queue); 77 skb_queue_purge(&local->tx_queue);
78 78
79 write_lock(&local->sockets.lock); 79 write_lock(&local->sockets.lock);
80 80
81 sk_for_each_safe(sk, node, tmp, &local->sockets.head) { 81 sk_for_each_safe(sk, tmp, &local->sockets.head) {
82 llcp_sock = nfc_llcp_sock(sk); 82 llcp_sock = nfc_llcp_sock(sk);
83 83
84 bh_lock_sock(sk); 84 bh_lock_sock(sk);
@@ -171,7 +171,6 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
171 u8 ssap, u8 dsap) 171 u8 ssap, u8 dsap)
172{ 172{
173 struct sock *sk; 173 struct sock *sk;
174 struct hlist_node *node;
175 struct nfc_llcp_sock *llcp_sock, *tmp_sock; 174 struct nfc_llcp_sock *llcp_sock, *tmp_sock;
176 175
177 pr_debug("ssap dsap %d %d\n", ssap, dsap); 176 pr_debug("ssap dsap %d %d\n", ssap, dsap);
@@ -183,7 +182,7 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
183 182
184 llcp_sock = NULL; 183 llcp_sock = NULL;
185 184
186 sk_for_each(sk, node, &local->sockets.head) { 185 sk_for_each(sk, &local->sockets.head) {
187 tmp_sock = nfc_llcp_sock(sk); 186 tmp_sock = nfc_llcp_sock(sk);
188 187
189 if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) { 188 if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
@@ -272,7 +271,6 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
272 u8 *sn, size_t sn_len) 271 u8 *sn, size_t sn_len)
273{ 272{
274 struct sock *sk; 273 struct sock *sk;
275 struct hlist_node *node;
276 struct nfc_llcp_sock *llcp_sock, *tmp_sock; 274 struct nfc_llcp_sock *llcp_sock, *tmp_sock;
277 275
278 pr_debug("sn %zd %p\n", sn_len, sn); 276 pr_debug("sn %zd %p\n", sn_len, sn);
@@ -284,7 +282,7 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
284 282
285 llcp_sock = NULL; 283 llcp_sock = NULL;
286 284
287 sk_for_each(sk, node, &local->sockets.head) { 285 sk_for_each(sk, &local->sockets.head) {
288 tmp_sock = nfc_llcp_sock(sk); 286 tmp_sock = nfc_llcp_sock(sk);
289 287
290 pr_debug("llcp sock %p\n", tmp_sock); 288 pr_debug("llcp sock %p\n", tmp_sock);
@@ -601,14 +599,13 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
601void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local, 599void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
602 struct sk_buff *skb, u8 direction) 600 struct sk_buff *skb, u8 direction)
603{ 601{
604 struct hlist_node *node;
605 struct sk_buff *skb_copy = NULL, *nskb; 602 struct sk_buff *skb_copy = NULL, *nskb;
606 struct sock *sk; 603 struct sock *sk;
607 u8 *data; 604 u8 *data;
608 605
609 read_lock(&local->raw_sockets.lock); 606 read_lock(&local->raw_sockets.lock);
610 607
611 sk_for_each(sk, node, &local->raw_sockets.head) { 608 sk_for_each(sk, &local->raw_sockets.head) {
612 if (sk->sk_state != LLCP_BOUND) 609 if (sk->sk_state != LLCP_BOUND)
613 continue; 610 continue;
614 611
@@ -697,11 +694,10 @@ static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local
697{ 694{
698 struct sock *sk; 695 struct sock *sk;
699 struct nfc_llcp_sock *llcp_sock; 696 struct nfc_llcp_sock *llcp_sock;
700 struct hlist_node *node;
701 697
702 read_lock(&local->connecting_sockets.lock); 698 read_lock(&local->connecting_sockets.lock);
703 699
704 sk_for_each(sk, node, &local->connecting_sockets.head) { 700 sk_for_each(sk, &local->connecting_sockets.head) {
705 llcp_sock = nfc_llcp_sock(sk); 701 llcp_sock = nfc_llcp_sock(sk);
706 702
707 if (llcp_sock->ssap == ssap) { 703 if (llcp_sock->ssap == ssap) {
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 9dc537df46c4..e87a26506dba 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -158,11 +158,10 @@ static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
158struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) 158struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
159{ 159{
160 struct vport *vport; 160 struct vport *vport;
161 struct hlist_node *n;
162 struct hlist_head *head; 161 struct hlist_head *head;
163 162
164 head = vport_hash_bucket(dp, port_no); 163 head = vport_hash_bucket(dp, port_no);
165 hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) { 164 hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
166 if (vport->port_no == port_no) 165 if (vport->port_no == port_no)
167 return vport; 166 return vport;
168 } 167 }
@@ -1386,9 +1385,9 @@ static void __dp_destroy(struct datapath *dp)
1386 1385
1387 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { 1386 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1388 struct vport *vport; 1387 struct vport *vport;
1389 struct hlist_node *node, *n; 1388 struct hlist_node *n;
1390 1389
1391 hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node) 1390 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1392 if (vport->port_no != OVSP_LOCAL) 1391 if (vport->port_no != OVSP_LOCAL)
1393 ovs_dp_detach_port(vport); 1392 ovs_dp_detach_port(vport);
1394 } 1393 }
@@ -1825,10 +1824,9 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1825 rcu_read_lock(); 1824 rcu_read_lock();
1826 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { 1825 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1827 struct vport *vport; 1826 struct vport *vport;
1828 struct hlist_node *n;
1829 1827
1830 j = 0; 1828 j = 0;
1831 hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) { 1829 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1832 if (j >= skip && 1830 if (j >= skip &&
1833 ovs_vport_cmd_fill_info(vport, skb, 1831 ovs_vport_cmd_fill_info(vport, skb,
1834 NETLINK_CB(cb->skb).portid, 1832 NETLINK_CB(cb->skb).portid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index c3294cebc4f2..20605ecf100b 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -299,10 +299,10 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
299 for (i = 0; i < table->n_buckets; i++) { 299 for (i = 0; i < table->n_buckets; i++) {
300 struct sw_flow *flow; 300 struct sw_flow *flow;
301 struct hlist_head *head = flex_array_get(table->buckets, i); 301 struct hlist_head *head = flex_array_get(table->buckets, i);
302 struct hlist_node *node, *n; 302 struct hlist_node *n;
303 int ver = table->node_ver; 303 int ver = table->node_ver;
304 304
305 hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) { 305 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
306 hlist_del_rcu(&flow->hash_node[ver]); 306 hlist_del_rcu(&flow->hash_node[ver]);
307 ovs_flow_free(flow); 307 ovs_flow_free(flow);
308 } 308 }
@@ -332,7 +332,6 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
332{ 332{
333 struct sw_flow *flow; 333 struct sw_flow *flow;
334 struct hlist_head *head; 334 struct hlist_head *head;
335 struct hlist_node *n;
336 int ver; 335 int ver;
337 int i; 336 int i;
338 337
@@ -340,7 +339,7 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
340 while (*bucket < table->n_buckets) { 339 while (*bucket < table->n_buckets) {
341 i = 0; 340 i = 0;
342 head = flex_array_get(table->buckets, *bucket); 341 head = flex_array_get(table->buckets, *bucket);
343 hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) { 342 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
344 if (i < *last) { 343 if (i < *last) {
345 i++; 344 i++;
346 continue; 345 continue;
@@ -367,11 +366,10 @@ static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new
367 for (i = 0; i < old->n_buckets; i++) { 366 for (i = 0; i < old->n_buckets; i++) {
368 struct sw_flow *flow; 367 struct sw_flow *flow;
369 struct hlist_head *head; 368 struct hlist_head *head;
370 struct hlist_node *n;
371 369
372 head = flex_array_get(old->buckets, i); 370 head = flex_array_get(old->buckets, i);
373 371
374 hlist_for_each_entry(flow, n, head, hash_node[old_ver]) 372 hlist_for_each_entry(flow, head, hash_node[old_ver])
375 ovs_flow_tbl_insert(new, flow); 373 ovs_flow_tbl_insert(new, flow);
376 } 374 }
377 old->keep_flows = true; 375 old->keep_flows = true;
@@ -766,14 +764,13 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
766 struct sw_flow_key *key, int key_len) 764 struct sw_flow_key *key, int key_len)
767{ 765{
768 struct sw_flow *flow; 766 struct sw_flow *flow;
769 struct hlist_node *n;
770 struct hlist_head *head; 767 struct hlist_head *head;
771 u32 hash; 768 u32 hash;
772 769
773 hash = ovs_flow_hash(key, key_len); 770 hash = ovs_flow_hash(key, key_len);
774 771
775 head = find_bucket(table, hash); 772 head = find_bucket(table, hash);
776 hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) { 773 hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
777 774
778 if (flow->hash == hash && 775 if (flow->hash == hash &&
779 !memcmp(&flow->key, key, key_len)) { 776 !memcmp(&flow->key, key, key_len)) {
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 70af0bedbac4..ba717cc038b3 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -86,9 +86,8 @@ struct vport *ovs_vport_locate(struct net *net, const char *name)
86{ 86{
87 struct hlist_head *bucket = hash_bucket(net, name); 87 struct hlist_head *bucket = hash_bucket(net, name);
88 struct vport *vport; 88 struct vport *vport;
89 struct hlist_node *node;
90 89
91 hlist_for_each_entry_rcu(vport, node, bucket, hash_node) 90 hlist_for_each_entry_rcu(vport, bucket, hash_node)
92 if (!strcmp(name, vport->ops->get_name(vport)) && 91 if (!strcmp(name, vport->ops->get_name(vport)) &&
93 net_eq(ovs_dp_get_net(vport->dp), net)) 92 net_eq(ovs_dp_get_net(vport->dp), net))
94 return vport; 93 return vport;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c7bfeff10767..1d6793dbfbae 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3263,12 +3263,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3263static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) 3263static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3264{ 3264{
3265 struct sock *sk; 3265 struct sock *sk;
3266 struct hlist_node *node;
3267 struct net_device *dev = data; 3266 struct net_device *dev = data;
3268 struct net *net = dev_net(dev); 3267 struct net *net = dev_net(dev);
3269 3268
3270 rcu_read_lock(); 3269 rcu_read_lock();
3271 sk_for_each_rcu(sk, node, &net->packet.sklist) { 3270 sk_for_each_rcu(sk, &net->packet.sklist) {
3272 struct packet_sock *po = pkt_sk(sk); 3271 struct packet_sock *po = pkt_sk(sk);
3273 3272
3274 switch (msg) { 3273 switch (msg) {
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 8db6e21c46bd..d3fcd1ebef7e 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -172,13 +172,12 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
172 struct packet_diag_req *req; 172 struct packet_diag_req *req;
173 struct net *net; 173 struct net *net;
174 struct sock *sk; 174 struct sock *sk;
175 struct hlist_node *node;
176 175
177 net = sock_net(skb->sk); 176 net = sock_net(skb->sk);
178 req = nlmsg_data(cb->nlh); 177 req = nlmsg_data(cb->nlh);
179 178
180 mutex_lock(&net->packet.sklist_lock); 179 mutex_lock(&net->packet.sklist_lock);
181 sk_for_each(sk, node, &net->packet.sklist) { 180 sk_for_each(sk, &net->packet.sklist) {
182 if (!net_eq(sock_net(sk), net)) 181 if (!net_eq(sock_net(sk), net))
183 continue; 182 continue;
184 if (num < s_num) 183 if (num < s_num)
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 576f22c9c76e..e77411735de8 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -640,11 +640,10 @@ static struct sock *pep_find_pipe(const struct hlist_head *hlist,
640 const struct sockaddr_pn *dst, 640 const struct sockaddr_pn *dst,
641 u8 pipe_handle) 641 u8 pipe_handle)
642{ 642{
643 struct hlist_node *node;
644 struct sock *sknode; 643 struct sock *sknode;
645 u16 dobj = pn_sockaddr_get_object(dst); 644 u16 dobj = pn_sockaddr_get_object(dst);
646 645
647 sk_for_each(sknode, node, hlist) { 646 sk_for_each(sknode, hlist) {
648 struct pep_sock *pnnode = pep_sk(sknode); 647 struct pep_sock *pnnode = pep_sk(sknode);
649 648
650 /* Ports match, but addresses might not: */ 649 /* Ports match, but addresses might not: */
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index b7e982782255..1afd1381cdc7 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -76,7 +76,6 @@ static struct hlist_head *pn_hash_list(u16 obj)
76 */ 76 */
77struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) 77struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
78{ 78{
79 struct hlist_node *node;
80 struct sock *sknode; 79 struct sock *sknode;
81 struct sock *rval = NULL; 80 struct sock *rval = NULL;
82 u16 obj = pn_sockaddr_get_object(spn); 81 u16 obj = pn_sockaddr_get_object(spn);
@@ -84,7 +83,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
84 struct hlist_head *hlist = pn_hash_list(obj); 83 struct hlist_head *hlist = pn_hash_list(obj);
85 84
86 rcu_read_lock(); 85 rcu_read_lock();
87 sk_for_each_rcu(sknode, node, hlist) { 86 sk_for_each_rcu(sknode, hlist) {
88 struct pn_sock *pn = pn_sk(sknode); 87 struct pn_sock *pn = pn_sk(sknode);
89 BUG_ON(!pn->sobject); /* unbound socket */ 88 BUG_ON(!pn->sobject); /* unbound socket */
90 89
@@ -120,10 +119,9 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
120 119
121 rcu_read_lock(); 120 rcu_read_lock();
122 for (h = 0; h < PN_HASHSIZE; h++) { 121 for (h = 0; h < PN_HASHSIZE; h++) {
123 struct hlist_node *node;
124 struct sock *sknode; 122 struct sock *sknode;
125 123
126 sk_for_each(sknode, node, hlist) { 124 sk_for_each(sknode, hlist) {
127 struct sk_buff *clone; 125 struct sk_buff *clone;
128 126
129 if (!net_eq(sock_net(sknode), net)) 127 if (!net_eq(sock_net(sknode), net))
@@ -543,12 +541,11 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
543{ 541{
544 struct net *net = seq_file_net(seq); 542 struct net *net = seq_file_net(seq);
545 struct hlist_head *hlist = pnsocks.hlist; 543 struct hlist_head *hlist = pnsocks.hlist;
546 struct hlist_node *node;
547 struct sock *sknode; 544 struct sock *sknode;
548 unsigned int h; 545 unsigned int h;
549 546
550 for (h = 0; h < PN_HASHSIZE; h++) { 547 for (h = 0; h < PN_HASHSIZE; h++) {
551 sk_for_each_rcu(sknode, node, hlist) { 548 sk_for_each_rcu(sknode, hlist) {
552 if (!net_eq(net, sock_net(sknode))) 549 if (!net_eq(net, sock_net(sknode)))
553 continue; 550 continue;
554 if (!pos) 551 if (!pos)
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 637bde56c9db..b5ad65a0067e 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -52,13 +52,12 @@ static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
52 struct rds_sock *insert) 52 struct rds_sock *insert)
53{ 53{
54 struct rds_sock *rs; 54 struct rds_sock *rs;
55 struct hlist_node *node;
56 struct hlist_head *head = hash_to_bucket(addr, port); 55 struct hlist_head *head = hash_to_bucket(addr, port);
57 u64 cmp; 56 u64 cmp;
58 u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port); 57 u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
59 58
60 rcu_read_lock(); 59 rcu_read_lock();
61 hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) { 60 hlist_for_each_entry_rcu(rs, head, rs_bound_node) {
62 cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) | 61 cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
63 be16_to_cpu(rs->rs_bound_port); 62 be16_to_cpu(rs->rs_bound_port);
64 63
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 9e07c756d1f9..642ad42c416b 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -69,9 +69,8 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
69 struct rds_transport *trans) 69 struct rds_transport *trans)
70{ 70{
71 struct rds_connection *conn, *ret = NULL; 71 struct rds_connection *conn, *ret = NULL;
72 struct hlist_node *pos;
73 72
74 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { 73 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
75 if (conn->c_faddr == faddr && conn->c_laddr == laddr && 74 if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
76 conn->c_trans == trans) { 75 conn->c_trans == trans) {
77 ret = conn; 76 ret = conn;
@@ -376,7 +375,6 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
376 int want_send) 375 int want_send)
377{ 376{
378 struct hlist_head *head; 377 struct hlist_head *head;
379 struct hlist_node *pos;
380 struct list_head *list; 378 struct list_head *list;
381 struct rds_connection *conn; 379 struct rds_connection *conn;
382 struct rds_message *rm; 380 struct rds_message *rm;
@@ -390,7 +388,7 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
390 388
391 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 389 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
392 i++, head++) { 390 i++, head++) {
393 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { 391 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
394 if (want_send) 392 if (want_send)
395 list = &conn->c_send_queue; 393 list = &conn->c_send_queue;
396 else 394 else
@@ -439,7 +437,6 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
439{ 437{
440 uint64_t buffer[(item_len + 7) / 8]; 438 uint64_t buffer[(item_len + 7) / 8];
441 struct hlist_head *head; 439 struct hlist_head *head;
442 struct hlist_node *pos;
443 struct rds_connection *conn; 440 struct rds_connection *conn;
444 size_t i; 441 size_t i;
445 442
@@ -450,7 +447,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
450 447
451 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 448 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
452 i++, head++) { 449 i++, head++) {
453 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { 450 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
454 451
455 /* XXX no c_lock usage.. */ 452 /* XXX no c_lock usage.. */
456 if (!visitor(conn, buffer)) 453 if (!visitor(conn, buffer))
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index b768fe9d5e7a..cf68e6e4054a 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -165,10 +165,9 @@ static void rose_remove_socket(struct sock *sk)
165void rose_kill_by_neigh(struct rose_neigh *neigh) 165void rose_kill_by_neigh(struct rose_neigh *neigh)
166{ 166{
167 struct sock *s; 167 struct sock *s;
168 struct hlist_node *node;
169 168
170 spin_lock_bh(&rose_list_lock); 169 spin_lock_bh(&rose_list_lock);
171 sk_for_each(s, node, &rose_list) { 170 sk_for_each(s, &rose_list) {
172 struct rose_sock *rose = rose_sk(s); 171 struct rose_sock *rose = rose_sk(s);
173 172
174 if (rose->neighbour == neigh) { 173 if (rose->neighbour == neigh) {
@@ -186,10 +185,9 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
186static void rose_kill_by_device(struct net_device *dev) 185static void rose_kill_by_device(struct net_device *dev)
187{ 186{
188 struct sock *s; 187 struct sock *s;
189 struct hlist_node *node;
190 188
191 spin_lock_bh(&rose_list_lock); 189 spin_lock_bh(&rose_list_lock);
192 sk_for_each(s, node, &rose_list) { 190 sk_for_each(s, &rose_list) {
193 struct rose_sock *rose = rose_sk(s); 191 struct rose_sock *rose = rose_sk(s);
194 192
195 if (rose->device == dev) { 193 if (rose->device == dev) {
@@ -246,10 +244,9 @@ static void rose_insert_socket(struct sock *sk)
246static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) 244static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
247{ 245{
248 struct sock *s; 246 struct sock *s;
249 struct hlist_node *node;
250 247
251 spin_lock_bh(&rose_list_lock); 248 spin_lock_bh(&rose_list_lock);
252 sk_for_each(s, node, &rose_list) { 249 sk_for_each(s, &rose_list) {
253 struct rose_sock *rose = rose_sk(s); 250 struct rose_sock *rose = rose_sk(s);
254 251
255 if (!rosecmp(&rose->source_addr, addr) && 252 if (!rosecmp(&rose->source_addr, addr) &&
@@ -258,7 +255,7 @@ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
258 goto found; 255 goto found;
259 } 256 }
260 257
261 sk_for_each(s, node, &rose_list) { 258 sk_for_each(s, &rose_list) {
262 struct rose_sock *rose = rose_sk(s); 259 struct rose_sock *rose = rose_sk(s);
263 260
264 if (!rosecmp(&rose->source_addr, addr) && 261 if (!rosecmp(&rose->source_addr, addr) &&
@@ -278,10 +275,9 @@ found:
278struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) 275struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
279{ 276{
280 struct sock *s; 277 struct sock *s;
281 struct hlist_node *node;
282 278
283 spin_lock_bh(&rose_list_lock); 279 spin_lock_bh(&rose_list_lock);
284 sk_for_each(s, node, &rose_list) { 280 sk_for_each(s, &rose_list) {
285 struct rose_sock *rose = rose_sk(s); 281 struct rose_sock *rose = rose_sk(s);
286 282
287 if (rose->lci == lci && rose->neighbour == neigh) 283 if (rose->lci == lci && rose->neighbour == neigh)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a181b484812a..c297e2a8e2a1 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -545,7 +545,7 @@ static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
545void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) 545void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
546{ 546{
547 struct Qdisc_class_common *cl; 547 struct Qdisc_class_common *cl;
548 struct hlist_node *n, *next; 548 struct hlist_node *next;
549 struct hlist_head *nhash, *ohash; 549 struct hlist_head *nhash, *ohash;
550 unsigned int nsize, nmask, osize; 550 unsigned int nsize, nmask, osize;
551 unsigned int i, h; 551 unsigned int i, h;
@@ -564,7 +564,7 @@ void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
564 564
565 sch_tree_lock(sch); 565 sch_tree_lock(sch);
566 for (i = 0; i < osize; i++) { 566 for (i = 0; i < osize; i++) {
567 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) { 567 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
568 h = qdisc_class_hash(cl->classid, nmask); 568 h = qdisc_class_hash(cl->classid, nmask);
569 hlist_add_head(&cl->hnode, &nhash[h]); 569 hlist_add_head(&cl->hnode, &nhash[h]);
570 } 570 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 0e19948470b8..13aa47aa2ffb 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1041,14 +1041,13 @@ static void cbq_adjust_levels(struct cbq_class *this)
1041static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) 1041static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1042{ 1042{
1043 struct cbq_class *cl; 1043 struct cbq_class *cl;
1044 struct hlist_node *n;
1045 unsigned int h; 1044 unsigned int h;
1046 1045
1047 if (q->quanta[prio] == 0) 1046 if (q->quanta[prio] == 0)
1048 return; 1047 return;
1049 1048
1050 for (h = 0; h < q->clhash.hashsize; h++) { 1049 for (h = 0; h < q->clhash.hashsize; h++) {
1051 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 1050 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1052 /* BUGGGG... Beware! This expression suffer of 1051 /* BUGGGG... Beware! This expression suffer of
1053 * arithmetic overflows! 1052 * arithmetic overflows!
1054 */ 1053 */
@@ -1087,10 +1086,9 @@ static void cbq_sync_defmap(struct cbq_class *cl)
1087 continue; 1086 continue;
1088 1087
1089 for (h = 0; h < q->clhash.hashsize; h++) { 1088 for (h = 0; h < q->clhash.hashsize; h++) {
1090 struct hlist_node *n;
1091 struct cbq_class *c; 1089 struct cbq_class *c;
1092 1090
1093 hlist_for_each_entry(c, n, &q->clhash.hash[h], 1091 hlist_for_each_entry(c, &q->clhash.hash[h],
1094 common.hnode) { 1092 common.hnode) {
1095 if (c->split == split && c->level < level && 1093 if (c->split == split && c->level < level &&
1096 c->defmap & (1<<i)) { 1094 c->defmap & (1<<i)) {
@@ -1210,7 +1208,6 @@ cbq_reset(struct Qdisc *sch)
1210{ 1208{
1211 struct cbq_sched_data *q = qdisc_priv(sch); 1209 struct cbq_sched_data *q = qdisc_priv(sch);
1212 struct cbq_class *cl; 1210 struct cbq_class *cl;
1213 struct hlist_node *n;
1214 int prio; 1211 int prio;
1215 unsigned int h; 1212 unsigned int h;
1216 1213
@@ -1228,7 +1225,7 @@ cbq_reset(struct Qdisc *sch)
1228 q->active[prio] = NULL; 1225 q->active[prio] = NULL;
1229 1226
1230 for (h = 0; h < q->clhash.hashsize; h++) { 1227 for (h = 0; h < q->clhash.hashsize; h++) {
1231 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 1228 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1232 qdisc_reset(cl->q); 1229 qdisc_reset(cl->q);
1233 1230
1234 cl->next_alive = NULL; 1231 cl->next_alive = NULL;
@@ -1697,7 +1694,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1697static void cbq_destroy(struct Qdisc *sch) 1694static void cbq_destroy(struct Qdisc *sch)
1698{ 1695{
1699 struct cbq_sched_data *q = qdisc_priv(sch); 1696 struct cbq_sched_data *q = qdisc_priv(sch);
1700 struct hlist_node *n, *next; 1697 struct hlist_node *next;
1701 struct cbq_class *cl; 1698 struct cbq_class *cl;
1702 unsigned int h; 1699 unsigned int h;
1703 1700
@@ -1710,11 +1707,11 @@ static void cbq_destroy(struct Qdisc *sch)
1710 * be bound to classes which have been destroyed already. --TGR '04 1707 * be bound to classes which have been destroyed already. --TGR '04
1711 */ 1708 */
1712 for (h = 0; h < q->clhash.hashsize; h++) { 1709 for (h = 0; h < q->clhash.hashsize; h++) {
1713 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) 1710 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
1714 tcf_destroy_chain(&cl->filter_list); 1711 tcf_destroy_chain(&cl->filter_list);
1715 } 1712 }
1716 for (h = 0; h < q->clhash.hashsize; h++) { 1713 for (h = 0; h < q->clhash.hashsize; h++) {
1717 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h], 1714 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1718 common.hnode) 1715 common.hnode)
1719 cbq_destroy_class(sch, cl); 1716 cbq_destroy_class(sch, cl);
1720 } 1717 }
@@ -2013,14 +2010,13 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2013{ 2010{
2014 struct cbq_sched_data *q = qdisc_priv(sch); 2011 struct cbq_sched_data *q = qdisc_priv(sch);
2015 struct cbq_class *cl; 2012 struct cbq_class *cl;
2016 struct hlist_node *n;
2017 unsigned int h; 2013 unsigned int h;
2018 2014
2019 if (arg->stop) 2015 if (arg->stop)
2020 return; 2016 return;
2021 2017
2022 for (h = 0; h < q->clhash.hashsize; h++) { 2018 for (h = 0; h < q->clhash.hashsize; h++) {
2023 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 2019 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
2024 if (arg->count < arg->skip) { 2020 if (arg->count < arg->skip) {
2025 arg->count++; 2021 arg->count++;
2026 continue; 2022 continue;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 71e50c80315f..759b308d1a8d 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -293,14 +293,13 @@ static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
293{ 293{
294 struct drr_sched *q = qdisc_priv(sch); 294 struct drr_sched *q = qdisc_priv(sch);
295 struct drr_class *cl; 295 struct drr_class *cl;
296 struct hlist_node *n;
297 unsigned int i; 296 unsigned int i;
298 297
299 if (arg->stop) 298 if (arg->stop)
300 return; 299 return;
301 300
302 for (i = 0; i < q->clhash.hashsize; i++) { 301 for (i = 0; i < q->clhash.hashsize; i++) {
303 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 302 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
304 if (arg->count < arg->skip) { 303 if (arg->count < arg->skip) {
305 arg->count++; 304 arg->count++;
306 continue; 305 continue;
@@ -451,11 +450,10 @@ static void drr_reset_qdisc(struct Qdisc *sch)
451{ 450{
452 struct drr_sched *q = qdisc_priv(sch); 451 struct drr_sched *q = qdisc_priv(sch);
453 struct drr_class *cl; 452 struct drr_class *cl;
454 struct hlist_node *n;
455 unsigned int i; 453 unsigned int i;
456 454
457 for (i = 0; i < q->clhash.hashsize; i++) { 455 for (i = 0; i < q->clhash.hashsize; i++) {
458 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 456 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
459 if (cl->qdisc->q.qlen) 457 if (cl->qdisc->q.qlen)
460 list_del(&cl->alist); 458 list_del(&cl->alist);
461 qdisc_reset(cl->qdisc); 459 qdisc_reset(cl->qdisc);
@@ -468,13 +466,13 @@ static void drr_destroy_qdisc(struct Qdisc *sch)
468{ 466{
469 struct drr_sched *q = qdisc_priv(sch); 467 struct drr_sched *q = qdisc_priv(sch);
470 struct drr_class *cl; 468 struct drr_class *cl;
471 struct hlist_node *n, *next; 469 struct hlist_node *next;
472 unsigned int i; 470 unsigned int i;
473 471
474 tcf_destroy_chain(&q->filter_list); 472 tcf_destroy_chain(&q->filter_list);
475 473
476 for (i = 0; i < q->clhash.hashsize; i++) { 474 for (i = 0; i < q->clhash.hashsize; i++) {
477 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 475 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
478 common.hnode) 476 common.hnode)
479 drr_destroy_class(sch, cl); 477 drr_destroy_class(sch, cl);
480 } 478 }
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6c2ec4510540..9facea03faeb 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1389,7 +1389,6 @@ static void
1389hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1389hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1390{ 1390{
1391 struct hfsc_sched *q = qdisc_priv(sch); 1391 struct hfsc_sched *q = qdisc_priv(sch);
1392 struct hlist_node *n;
1393 struct hfsc_class *cl; 1392 struct hfsc_class *cl;
1394 unsigned int i; 1393 unsigned int i;
1395 1394
@@ -1397,7 +1396,7 @@ hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1397 return; 1396 return;
1398 1397
1399 for (i = 0; i < q->clhash.hashsize; i++) { 1398 for (i = 0; i < q->clhash.hashsize; i++) {
1400 hlist_for_each_entry(cl, n, &q->clhash.hash[i], 1399 hlist_for_each_entry(cl, &q->clhash.hash[i],
1401 cl_common.hnode) { 1400 cl_common.hnode) {
1402 if (arg->count < arg->skip) { 1401 if (arg->count < arg->skip) {
1403 arg->count++; 1402 arg->count++;
@@ -1523,11 +1522,10 @@ hfsc_reset_qdisc(struct Qdisc *sch)
1523{ 1522{
1524 struct hfsc_sched *q = qdisc_priv(sch); 1523 struct hfsc_sched *q = qdisc_priv(sch);
1525 struct hfsc_class *cl; 1524 struct hfsc_class *cl;
1526 struct hlist_node *n;
1527 unsigned int i; 1525 unsigned int i;
1528 1526
1529 for (i = 0; i < q->clhash.hashsize; i++) { 1527 for (i = 0; i < q->clhash.hashsize; i++) {
1530 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1528 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1531 hfsc_reset_class(cl); 1529 hfsc_reset_class(cl);
1532 } 1530 }
1533 q->eligible = RB_ROOT; 1531 q->eligible = RB_ROOT;
@@ -1540,16 +1538,16 @@ static void
1540hfsc_destroy_qdisc(struct Qdisc *sch) 1538hfsc_destroy_qdisc(struct Qdisc *sch)
1541{ 1539{
1542 struct hfsc_sched *q = qdisc_priv(sch); 1540 struct hfsc_sched *q = qdisc_priv(sch);
1543 struct hlist_node *n, *next; 1541 struct hlist_node *next;
1544 struct hfsc_class *cl; 1542 struct hfsc_class *cl;
1545 unsigned int i; 1543 unsigned int i;
1546 1544
1547 for (i = 0; i < q->clhash.hashsize; i++) { 1545 for (i = 0; i < q->clhash.hashsize; i++) {
1548 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1546 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1549 tcf_destroy_chain(&cl->filter_list); 1547 tcf_destroy_chain(&cl->filter_list);
1550 } 1548 }
1551 for (i = 0; i < q->clhash.hashsize; i++) { 1549 for (i = 0; i < q->clhash.hashsize; i++) {
1552 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 1550 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1553 cl_common.hnode) 1551 cl_common.hnode)
1554 hfsc_destroy_class(sch, cl); 1552 hfsc_destroy_class(sch, cl);
1555 } 1553 }
@@ -1564,12 +1562,11 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1564 unsigned char *b = skb_tail_pointer(skb); 1562 unsigned char *b = skb_tail_pointer(skb);
1565 struct tc_hfsc_qopt qopt; 1563 struct tc_hfsc_qopt qopt;
1566 struct hfsc_class *cl; 1564 struct hfsc_class *cl;
1567 struct hlist_node *n;
1568 unsigned int i; 1565 unsigned int i;
1569 1566
1570 sch->qstats.backlog = 0; 1567 sch->qstats.backlog = 0;
1571 for (i = 0; i < q->clhash.hashsize; i++) { 1568 for (i = 0; i < q->clhash.hashsize; i++) {
1572 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1569 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1573 sch->qstats.backlog += cl->qdisc->qstats.backlog; 1570 sch->qstats.backlog += cl->qdisc->qstats.backlog;
1574 } 1571 }
1575 1572
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 03c2692ca01e..571f1d211f4d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -949,11 +949,10 @@ static void htb_reset(struct Qdisc *sch)
949{ 949{
950 struct htb_sched *q = qdisc_priv(sch); 950 struct htb_sched *q = qdisc_priv(sch);
951 struct htb_class *cl; 951 struct htb_class *cl;
952 struct hlist_node *n;
953 unsigned int i; 952 unsigned int i;
954 953
955 for (i = 0; i < q->clhash.hashsize; i++) { 954 for (i = 0; i < q->clhash.hashsize; i++) {
956 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 955 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
957 if (cl->level) 956 if (cl->level)
958 memset(&cl->un.inner, 0, sizeof(cl->un.inner)); 957 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
959 else { 958 else {
@@ -1218,7 +1217,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1218static void htb_destroy(struct Qdisc *sch) 1217static void htb_destroy(struct Qdisc *sch)
1219{ 1218{
1220 struct htb_sched *q = qdisc_priv(sch); 1219 struct htb_sched *q = qdisc_priv(sch);
1221 struct hlist_node *n, *next; 1220 struct hlist_node *next;
1222 struct htb_class *cl; 1221 struct htb_class *cl;
1223 unsigned int i; 1222 unsigned int i;
1224 1223
@@ -1232,11 +1231,11 @@ static void htb_destroy(struct Qdisc *sch)
1232 tcf_destroy_chain(&q->filter_list); 1231 tcf_destroy_chain(&q->filter_list);
1233 1232
1234 for (i = 0; i < q->clhash.hashsize; i++) { 1233 for (i = 0; i < q->clhash.hashsize; i++) {
1235 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) 1234 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
1236 tcf_destroy_chain(&cl->filter_list); 1235 tcf_destroy_chain(&cl->filter_list);
1237 } 1236 }
1238 for (i = 0; i < q->clhash.hashsize; i++) { 1237 for (i = 0; i < q->clhash.hashsize; i++) {
1239 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 1238 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1240 common.hnode) 1239 common.hnode)
1241 htb_destroy_class(sch, cl); 1240 htb_destroy_class(sch, cl);
1242 } 1241 }
@@ -1516,14 +1515,13 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1516{ 1515{
1517 struct htb_sched *q = qdisc_priv(sch); 1516 struct htb_sched *q = qdisc_priv(sch);
1518 struct htb_class *cl; 1517 struct htb_class *cl;
1519 struct hlist_node *n;
1520 unsigned int i; 1518 unsigned int i;
1521 1519
1522 if (arg->stop) 1520 if (arg->stop)
1523 return; 1521 return;
1524 1522
1525 for (i = 0; i < q->clhash.hashsize; i++) { 1523 for (i = 0; i < q->clhash.hashsize; i++) {
1526 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 1524 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1527 if (arg->count < arg->skip) { 1525 if (arg->count < arg->skip) {
1528 arg->count++; 1526 arg->count++;
1529 continue; 1527 continue;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 6ed37652a4c3..e9a77f621c3d 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -276,9 +276,8 @@ static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
276 u32 lmax, u32 weight) 276 u32 lmax, u32 weight)
277{ 277{
278 struct qfq_aggregate *agg; 278 struct qfq_aggregate *agg;
279 struct hlist_node *n;
280 279
281 hlist_for_each_entry(agg, n, &q->nonfull_aggs, nonfull_next) 280 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
282 if (agg->lmax == lmax && agg->class_weight == weight) 281 if (agg->lmax == lmax && agg->class_weight == weight)
283 return agg; 282 return agg;
284 283
@@ -670,14 +669,13 @@ static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
670{ 669{
671 struct qfq_sched *q = qdisc_priv(sch); 670 struct qfq_sched *q = qdisc_priv(sch);
672 struct qfq_class *cl; 671 struct qfq_class *cl;
673 struct hlist_node *n;
674 unsigned int i; 672 unsigned int i;
675 673
676 if (arg->stop) 674 if (arg->stop)
677 return; 675 return;
678 676
679 for (i = 0; i < q->clhash.hashsize; i++) { 677 for (i = 0; i < q->clhash.hashsize; i++) {
680 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 678 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
681 if (arg->count < arg->skip) { 679 if (arg->count < arg->skip) {
682 arg->count++; 680 arg->count++;
683 continue; 681 continue;
@@ -1376,11 +1374,10 @@ static unsigned int qfq_drop_from_slot(struct qfq_sched *q,
1376 struct hlist_head *slot) 1374 struct hlist_head *slot)
1377{ 1375{
1378 struct qfq_aggregate *agg; 1376 struct qfq_aggregate *agg;
1379 struct hlist_node *n;
1380 struct qfq_class *cl; 1377 struct qfq_class *cl;
1381 unsigned int len; 1378 unsigned int len;
1382 1379
1383 hlist_for_each_entry(agg, n, slot, next) { 1380 hlist_for_each_entry(agg, slot, next) {
1384 list_for_each_entry(cl, &agg->active, alist) { 1381 list_for_each_entry(cl, &agg->active, alist) {
1385 1382
1386 if (!cl->qdisc->ops->drop) 1383 if (!cl->qdisc->ops->drop)
@@ -1459,11 +1456,10 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
1459{ 1456{
1460 struct qfq_sched *q = qdisc_priv(sch); 1457 struct qfq_sched *q = qdisc_priv(sch);
1461 struct qfq_class *cl; 1458 struct qfq_class *cl;
1462 struct hlist_node *n;
1463 unsigned int i; 1459 unsigned int i;
1464 1460
1465 for (i = 0; i < q->clhash.hashsize; i++) { 1461 for (i = 0; i < q->clhash.hashsize; i++) {
1466 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 1462 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1467 if (cl->qdisc->q.qlen > 0) 1463 if (cl->qdisc->q.qlen > 0)
1468 qfq_deactivate_class(q, cl); 1464 qfq_deactivate_class(q, cl);
1469 1465
@@ -1477,13 +1473,13 @@ static void qfq_destroy_qdisc(struct Qdisc *sch)
1477{ 1473{
1478 struct qfq_sched *q = qdisc_priv(sch); 1474 struct qfq_sched *q = qdisc_priv(sch);
1479 struct qfq_class *cl; 1475 struct qfq_class *cl;
1480 struct hlist_node *n, *next; 1476 struct hlist_node *next;
1481 unsigned int i; 1477 unsigned int i;
1482 1478
1483 tcf_destroy_chain(&q->filter_list); 1479 tcf_destroy_chain(&q->filter_list);
1484 1480
1485 for (i = 0; i < q->clhash.hashsize; i++) { 1481 for (i = 0; i < q->clhash.hashsize; i++) {
1486 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 1482 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1487 common.hnode) { 1483 common.hnode) {
1488 qfq_destroy_class(sch, cl); 1484 qfq_destroy_class(sch, cl);
1489 } 1485 }
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 73aad3d16a45..2b3ef03c6098 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -332,7 +332,6 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
332 struct sctp_transport *t = NULL; 332 struct sctp_transport *t = NULL;
333 struct sctp_hashbucket *head; 333 struct sctp_hashbucket *head;
334 struct sctp_ep_common *epb; 334 struct sctp_ep_common *epb;
335 struct hlist_node *node;
336 int hash; 335 int hash;
337 int rport; 336 int rport;
338 337
@@ -350,7 +349,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
350 rport); 349 rport);
351 head = &sctp_assoc_hashtable[hash]; 350 head = &sctp_assoc_hashtable[hash];
352 read_lock(&head->lock); 351 read_lock(&head->lock);
353 sctp_for_each_hentry(epb, node, &head->chain) { 352 sctp_for_each_hentry(epb, &head->chain) {
354 tmp = sctp_assoc(epb); 353 tmp = sctp_assoc(epb);
355 if (tmp->ep != ep || rport != tmp->peer.port) 354 if (tmp->ep != ep || rport != tmp->peer.port)
356 continue; 355 continue;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 965bbbbe48d4..4b2c83146aa7 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -784,13 +784,12 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
784 struct sctp_hashbucket *head; 784 struct sctp_hashbucket *head;
785 struct sctp_ep_common *epb; 785 struct sctp_ep_common *epb;
786 struct sctp_endpoint *ep; 786 struct sctp_endpoint *ep;
787 struct hlist_node *node;
788 int hash; 787 int hash;
789 788
790 hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port)); 789 hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
791 head = &sctp_ep_hashtable[hash]; 790 head = &sctp_ep_hashtable[hash];
792 read_lock(&head->lock); 791 read_lock(&head->lock);
793 sctp_for_each_hentry(epb, node, &head->chain) { 792 sctp_for_each_hentry(epb, &head->chain) {
794 ep = sctp_ep(epb); 793 ep = sctp_ep(epb);
795 if (sctp_endpoint_is_match(ep, net, laddr)) 794 if (sctp_endpoint_is_match(ep, net, laddr))
796 goto hit; 795 goto hit;
@@ -876,7 +875,6 @@ static struct sctp_association *__sctp_lookup_association(
876 struct sctp_ep_common *epb; 875 struct sctp_ep_common *epb;
877 struct sctp_association *asoc; 876 struct sctp_association *asoc;
878 struct sctp_transport *transport; 877 struct sctp_transport *transport;
879 struct hlist_node *node;
880 int hash; 878 int hash;
881 879
882 /* Optimize here for direct hit, only listening connections can 880 /* Optimize here for direct hit, only listening connections can
@@ -886,7 +884,7 @@ static struct sctp_association *__sctp_lookup_association(
886 ntohs(peer->v4.sin_port)); 884 ntohs(peer->v4.sin_port));
887 head = &sctp_assoc_hashtable[hash]; 885 head = &sctp_assoc_hashtable[hash];
888 read_lock(&head->lock); 886 read_lock(&head->lock);
889 sctp_for_each_hentry(epb, node, &head->chain) { 887 sctp_for_each_hentry(epb, &head->chain) {
890 asoc = sctp_assoc(epb); 888 asoc = sctp_assoc(epb);
891 transport = sctp_assoc_is_match(asoc, net, local, peer); 889 transport = sctp_assoc_is_match(asoc, net, local, peer);
892 if (transport) 890 if (transport)
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 8c19e97262ca..ab3bba8cb0a8 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -213,7 +213,6 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
213 struct sctp_ep_common *epb; 213 struct sctp_ep_common *epb;
214 struct sctp_endpoint *ep; 214 struct sctp_endpoint *ep;
215 struct sock *sk; 215 struct sock *sk;
216 struct hlist_node *node;
217 int hash = *(loff_t *)v; 216 int hash = *(loff_t *)v;
218 217
219 if (hash >= sctp_ep_hashsize) 218 if (hash >= sctp_ep_hashsize)
@@ -222,7 +221,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
222 head = &sctp_ep_hashtable[hash]; 221 head = &sctp_ep_hashtable[hash];
223 sctp_local_bh_disable(); 222 sctp_local_bh_disable();
224 read_lock(&head->lock); 223 read_lock(&head->lock);
225 sctp_for_each_hentry(epb, node, &head->chain) { 224 sctp_for_each_hentry(epb, &head->chain) {
226 ep = sctp_ep(epb); 225 ep = sctp_ep(epb);
227 sk = epb->sk; 226 sk = epb->sk;
228 if (!net_eq(sock_net(sk), seq_file_net(seq))) 227 if (!net_eq(sock_net(sk), seq_file_net(seq)))
@@ -321,7 +320,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
321 struct sctp_ep_common *epb; 320 struct sctp_ep_common *epb;
322 struct sctp_association *assoc; 321 struct sctp_association *assoc;
323 struct sock *sk; 322 struct sock *sk;
324 struct hlist_node *node;
325 int hash = *(loff_t *)v; 323 int hash = *(loff_t *)v;
326 324
327 if (hash >= sctp_assoc_hashsize) 325 if (hash >= sctp_assoc_hashsize)
@@ -330,7 +328,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
330 head = &sctp_assoc_hashtable[hash]; 328 head = &sctp_assoc_hashtable[hash];
331 sctp_local_bh_disable(); 329 sctp_local_bh_disable();
332 read_lock(&head->lock); 330 read_lock(&head->lock);
333 sctp_for_each_hentry(epb, node, &head->chain) { 331 sctp_for_each_hentry(epb, &head->chain) {
334 assoc = sctp_assoc(epb); 332 assoc = sctp_assoc(epb);
335 sk = epb->sk; 333 sk = epb->sk;
336 if (!net_eq(sock_net(sk), seq_file_net(seq))) 334 if (!net_eq(sock_net(sk), seq_file_net(seq)))
@@ -436,7 +434,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
436 struct sctp_hashbucket *head; 434 struct sctp_hashbucket *head;
437 struct sctp_ep_common *epb; 435 struct sctp_ep_common *epb;
438 struct sctp_association *assoc; 436 struct sctp_association *assoc;
439 struct hlist_node *node;
440 struct sctp_transport *tsp; 437 struct sctp_transport *tsp;
441 int hash = *(loff_t *)v; 438 int hash = *(loff_t *)v;
442 439
@@ -447,7 +444,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
447 sctp_local_bh_disable(); 444 sctp_local_bh_disable();
448 read_lock(&head->lock); 445 read_lock(&head->lock);
449 rcu_read_lock(); 446 rcu_read_lock();
450 sctp_for_each_hentry(epb, node, &head->chain) { 447 sctp_for_each_hentry(epb, &head->chain) {
451 if (!net_eq(sock_net(epb->sk), seq_file_net(seq))) 448 if (!net_eq(sock_net(epb->sk), seq_file_net(seq)))
452 continue; 449 continue;
453 assoc = sctp_assoc(epb); 450 assoc = sctp_assoc(epb);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index cedd9bf67b8c..c99458df3f3f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5882,8 +5882,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
5882static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 5882static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5883{ 5883{
5884 struct sctp_bind_hashbucket *head; /* hash list */ 5884 struct sctp_bind_hashbucket *head; /* hash list */
5885 struct sctp_bind_bucket *pp; /* hash list port iterator */ 5885 struct sctp_bind_bucket *pp;
5886 struct hlist_node *node;
5887 unsigned short snum; 5886 unsigned short snum;
5888 int ret; 5887 int ret;
5889 5888
@@ -5910,7 +5909,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5910 index = sctp_phashfn(sock_net(sk), rover); 5909 index = sctp_phashfn(sock_net(sk), rover);
5911 head = &sctp_port_hashtable[index]; 5910 head = &sctp_port_hashtable[index];
5912 sctp_spin_lock(&head->lock); 5911 sctp_spin_lock(&head->lock);
5913 sctp_for_each_hentry(pp, node, &head->chain) 5912 sctp_for_each_hentry(pp, &head->chain)
5914 if ((pp->port == rover) && 5913 if ((pp->port == rover) &&
5915 net_eq(sock_net(sk), pp->net)) 5914 net_eq(sock_net(sk), pp->net))
5916 goto next; 5915 goto next;
@@ -5938,7 +5937,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5938 */ 5937 */
5939 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 5938 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
5940 sctp_spin_lock(&head->lock); 5939 sctp_spin_lock(&head->lock);
5941 sctp_for_each_hentry(pp, node, &head->chain) { 5940 sctp_for_each_hentry(pp, &head->chain) {
5942 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 5941 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
5943 goto pp_found; 5942 goto pp_found;
5944 } 5943 }
@@ -5970,7 +5969,7 @@ pp_found:
5970 * that this port/socket (sk) combination are already 5969 * that this port/socket (sk) combination are already
5971 * in an endpoint. 5970 * in an endpoint.
5972 */ 5971 */
5973 sk_for_each_bound(sk2, node, &pp->owner) { 5972 sk_for_each_bound(sk2, &pp->owner) {
5974 struct sctp_endpoint *ep2; 5973 struct sctp_endpoint *ep2;
5975 ep2 = sctp_sk(sk2)->ep; 5974 ep2 = sctp_sk(sk2)->ep;
5976 5975
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 392adc41e2e5..f5294047df77 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -407,7 +407,6 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
407{ 407{
408 LIST_HEAD(free); 408 LIST_HEAD(free);
409 struct rpc_cred_cache *cache = auth->au_credcache; 409 struct rpc_cred_cache *cache = auth->au_credcache;
410 struct hlist_node *pos;
411 struct rpc_cred *cred = NULL, 410 struct rpc_cred *cred = NULL,
412 *entry, *new; 411 *entry, *new;
413 unsigned int nr; 412 unsigned int nr;
@@ -415,7 +414,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
415 nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits); 414 nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits);
416 415
417 rcu_read_lock(); 416 rcu_read_lock();
418 hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) { 417 hlist_for_each_entry_rcu(entry, &cache->hashtable[nr], cr_hash) {
419 if (!entry->cr_ops->crmatch(acred, entry, flags)) 418 if (!entry->cr_ops->crmatch(acred, entry, flags))
420 continue; 419 continue;
421 spin_lock(&cache->lock); 420 spin_lock(&cache->lock);
@@ -439,7 +438,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
439 } 438 }
440 439
441 spin_lock(&cache->lock); 440 spin_lock(&cache->lock);
442 hlist_for_each_entry(entry, pos, &cache->hashtable[nr], cr_hash) { 441 hlist_for_each_entry(entry, &cache->hashtable[nr], cr_hash) {
443 if (!entry->cr_ops->crmatch(acred, entry, flags)) 442 if (!entry->cr_ops->crmatch(acred, entry, flags))
444 continue; 443 continue;
445 cred = get_rpccred(entry); 444 cred = get_rpccred(entry);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index f3897d10f649..39a4112faf54 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -670,13 +670,13 @@ static void cache_revisit_request(struct cache_head *item)
670{ 670{
671 struct cache_deferred_req *dreq; 671 struct cache_deferred_req *dreq;
672 struct list_head pending; 672 struct list_head pending;
673 struct hlist_node *lp, *tmp; 673 struct hlist_node *tmp;
674 int hash = DFR_HASH(item); 674 int hash = DFR_HASH(item);
675 675
676 INIT_LIST_HEAD(&pending); 676 INIT_LIST_HEAD(&pending);
677 spin_lock(&cache_defer_lock); 677 spin_lock(&cache_defer_lock);
678 678
679 hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash) 679 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
680 if (dreq->item == item) { 680 if (dreq->item == item) {
681 __unhash_deferred_req(dreq); 681 __unhash_deferred_req(dreq);
682 list_add(&dreq->recent, &pending); 682 list_add(&dreq->recent, &pending);
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 7963569fc04f..2af7b0cba43a 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -138,13 +138,12 @@ auth_domain_lookup(char *name, struct auth_domain *new)
138{ 138{
139 struct auth_domain *hp; 139 struct auth_domain *hp;
140 struct hlist_head *head; 140 struct hlist_head *head;
141 struct hlist_node *np;
142 141
143 head = &auth_domain_table[hash_str(name, DN_HASHBITS)]; 142 head = &auth_domain_table[hash_str(name, DN_HASHBITS)];
144 143
145 spin_lock(&auth_domain_lock); 144 spin_lock(&auth_domain_lock);
146 145
147 hlist_for_each_entry(hp, np, head, hash) { 146 hlist_for_each_entry(hp, head, hash) {
148 if (strcmp(hp->name, name)==0) { 147 if (strcmp(hp->name, name)==0) {
149 kref_get(&hp->ref); 148 kref_get(&hp->ref);
150 spin_unlock(&auth_domain_lock); 149 spin_unlock(&auth_domain_lock);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 46754779fd3d..24b167914311 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -473,11 +473,10 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
473static struct name_seq *nametbl_find_seq(u32 type) 473static struct name_seq *nametbl_find_seq(u32 type)
474{ 474{
475 struct hlist_head *seq_head; 475 struct hlist_head *seq_head;
476 struct hlist_node *seq_node;
477 struct name_seq *ns; 476 struct name_seq *ns;
478 477
479 seq_head = &table.types[hash(type)]; 478 seq_head = &table.types[hash(type)];
480 hlist_for_each_entry(ns, seq_node, seq_head, ns_list) { 479 hlist_for_each_entry(ns, seq_head, ns_list) {
481 if (ns->type == type) 480 if (ns->type == type)
482 return ns; 481 return ns;
483 } 482 }
@@ -853,7 +852,6 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
853 u32 type, u32 lowbound, u32 upbound) 852 u32 type, u32 lowbound, u32 upbound)
854{ 853{
855 struct hlist_head *seq_head; 854 struct hlist_head *seq_head;
856 struct hlist_node *seq_node;
857 struct name_seq *seq; 855 struct name_seq *seq;
858 int all_types; 856 int all_types;
859 int ret = 0; 857 int ret = 0;
@@ -873,7 +871,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
873 upbound = ~0; 871 upbound = ~0;
874 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { 872 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
875 seq_head = &table.types[i]; 873 seq_head = &table.types[i];
876 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { 874 hlist_for_each_entry(seq, seq_head, ns_list) {
877 ret += nameseq_list(seq, buf + ret, len - ret, 875 ret += nameseq_list(seq, buf + ret, len - ret,
878 depth, seq->type, 876 depth, seq->type,
879 lowbound, upbound, i); 877 lowbound, upbound, i);
@@ -889,7 +887,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
889 ret += nametbl_header(buf + ret, len - ret, depth); 887 ret += nametbl_header(buf + ret, len - ret, depth);
890 i = hash(type); 888 i = hash(type);
891 seq_head = &table.types[i]; 889 seq_head = &table.types[i];
892 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { 890 hlist_for_each_entry(seq, seq_head, ns_list) {
893 if (seq->type == type) { 891 if (seq->type == type) {
894 ret += nameseq_list(seq, buf + ret, len - ret, 892 ret += nameseq_list(seq, buf + ret, len - ret,
895 depth, type, 893 depth, type,
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 48f39dd3eae8..6e6c434872e8 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -69,12 +69,11 @@ static unsigned int tipc_hashfn(u32 addr)
69struct tipc_node *tipc_node_find(u32 addr) 69struct tipc_node *tipc_node_find(u32 addr)
70{ 70{
71 struct tipc_node *node; 71 struct tipc_node *node;
72 struct hlist_node *pos;
73 72
74 if (unlikely(!in_own_cluster_exact(addr))) 73 if (unlikely(!in_own_cluster_exact(addr)))
75 return NULL; 74 return NULL;
76 75
77 hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) { 76 hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) {
78 if (node->addr == addr) 77 if (node->addr == addr)
79 return node; 78 return node;
80 } 79 }
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 87d284289012..51be64f163ec 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -263,9 +263,8 @@ static struct sock *__unix_find_socket_byname(struct net *net,
263 int len, int type, unsigned int hash) 263 int len, int type, unsigned int hash)
264{ 264{
265 struct sock *s; 265 struct sock *s;
266 struct hlist_node *node;
267 266
268 sk_for_each(s, node, &unix_socket_table[hash ^ type]) { 267 sk_for_each(s, &unix_socket_table[hash ^ type]) {
269 struct unix_sock *u = unix_sk(s); 268 struct unix_sock *u = unix_sk(s);
270 269
271 if (!net_eq(sock_net(s), net)) 270 if (!net_eq(sock_net(s), net))
@@ -298,10 +297,9 @@ static inline struct sock *unix_find_socket_byname(struct net *net,
298static struct sock *unix_find_socket_byinode(struct inode *i) 297static struct sock *unix_find_socket_byinode(struct inode *i)
299{ 298{
300 struct sock *s; 299 struct sock *s;
301 struct hlist_node *node;
302 300
303 spin_lock(&unix_table_lock); 301 spin_lock(&unix_table_lock);
304 sk_for_each(s, node, 302 sk_for_each(s,
305 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { 303 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
306 struct dentry *dentry = unix_sk(s)->path.dentry; 304 struct dentry *dentry = unix_sk(s)->path.dentry;
307 305
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 5ac19dc1d5e4..d591091603bf 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -192,10 +192,9 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
192 slot < ARRAY_SIZE(unix_socket_table); 192 slot < ARRAY_SIZE(unix_socket_table);
193 s_num = 0, slot++) { 193 s_num = 0, slot++) {
194 struct sock *sk; 194 struct sock *sk;
195 struct hlist_node *node;
196 195
197 num = 0; 196 num = 0;
198 sk_for_each(sk, node, &unix_socket_table[slot]) { 197 sk_for_each(sk, &unix_socket_table[slot]) {
199 if (!net_eq(sock_net(sk), net)) 198 if (!net_eq(sock_net(sk), net))
200 continue; 199 continue;
201 if (num < s_num) 200 if (num < s_num)
@@ -226,9 +225,7 @@ static struct sock *unix_lookup_by_ino(int ino)
226 225
227 spin_lock(&unix_table_lock); 226 spin_lock(&unix_table_lock);
228 for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) { 227 for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
229 struct hlist_node *node; 228 sk_for_each(sk, &unix_socket_table[i])
230
231 sk_for_each(sk, node, &unix_socket_table[i])
232 if (ino == sock_i_ino(sk)) { 229 if (ino == sock_i_ino(sk)) {
233 sock_hold(sk); 230 sock_hold(sk);
234 spin_unlock(&unix_table_lock); 231 spin_unlock(&unix_table_lock);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index a306bc66000e..37ca9694aabe 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -208,11 +208,10 @@ static void x25_remove_socket(struct sock *sk)
208static void x25_kill_by_device(struct net_device *dev) 208static void x25_kill_by_device(struct net_device *dev)
209{ 209{
210 struct sock *s; 210 struct sock *s;
211 struct hlist_node *node;
212 211
213 write_lock_bh(&x25_list_lock); 212 write_lock_bh(&x25_list_lock);
214 213
215 sk_for_each(s, node, &x25_list) 214 sk_for_each(s, &x25_list)
216 if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev) 215 if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev)
217 x25_disconnect(s, ENETUNREACH, 0, 0); 216 x25_disconnect(s, ENETUNREACH, 0, 0);
218 217
@@ -280,12 +279,11 @@ static struct sock *x25_find_listener(struct x25_address *addr,
280{ 279{
281 struct sock *s; 280 struct sock *s;
282 struct sock *next_best; 281 struct sock *next_best;
283 struct hlist_node *node;
284 282
285 read_lock_bh(&x25_list_lock); 283 read_lock_bh(&x25_list_lock);
286 next_best = NULL; 284 next_best = NULL;
287 285
288 sk_for_each(s, node, &x25_list) 286 sk_for_each(s, &x25_list)
289 if ((!strcmp(addr->x25_addr, 287 if ((!strcmp(addr->x25_addr,
290 x25_sk(s)->source_addr.x25_addr) || 288 x25_sk(s)->source_addr.x25_addr) ||
291 !strcmp(addr->x25_addr, 289 !strcmp(addr->x25_addr,
@@ -323,9 +321,8 @@ found:
323static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) 321static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb)
324{ 322{
325 struct sock *s; 323 struct sock *s;
326 struct hlist_node *node;
327 324
328 sk_for_each(s, node, &x25_list) 325 sk_for_each(s, &x25_list)
329 if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { 326 if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) {
330 sock_hold(s); 327 sock_hold(s);
331 goto found; 328 goto found;
@@ -1782,11 +1779,10 @@ static struct notifier_block x25_dev_notifier = {
1782void x25_kill_by_neigh(struct x25_neigh *nb) 1779void x25_kill_by_neigh(struct x25_neigh *nb)
1783{ 1780{
1784 struct sock *s; 1781 struct sock *s;
1785 struct hlist_node *node;
1786 1782
1787 write_lock_bh(&x25_list_lock); 1783 write_lock_bh(&x25_list_lock);
1788 1784
1789 sk_for_each(s, node, &x25_list) 1785 sk_for_each(s, &x25_list)
1790 if (x25_sk(s)->neighbour == nb) 1786 if (x25_sk(s)->neighbour == nb)
1791 x25_disconnect(s, ENETUNREACH, 0, 0); 1787 x25_disconnect(s, ENETUNREACH, 0, 0);
1792 1788
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 5b47180986f8..167c67d46c6a 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -379,27 +379,27 @@ static void xfrm_dst_hash_transfer(struct hlist_head *list,
379 struct hlist_head *ndsttable, 379 struct hlist_head *ndsttable,
380 unsigned int nhashmask) 380 unsigned int nhashmask)
381{ 381{
382 struct hlist_node *entry, *tmp, *entry0 = NULL; 382 struct hlist_node *tmp, *entry0 = NULL;
383 struct xfrm_policy *pol; 383 struct xfrm_policy *pol;
384 unsigned int h0 = 0; 384 unsigned int h0 = 0;
385 385
386redo: 386redo:
387 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) { 387 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
388 unsigned int h; 388 unsigned int h;
389 389
390 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 390 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
391 pol->family, nhashmask); 391 pol->family, nhashmask);
392 if (!entry0) { 392 if (!entry0) {
393 hlist_del(entry); 393 hlist_del(&pol->bydst);
394 hlist_add_head(&pol->bydst, ndsttable+h); 394 hlist_add_head(&pol->bydst, ndsttable+h);
395 h0 = h; 395 h0 = h;
396 } else { 396 } else {
397 if (h != h0) 397 if (h != h0)
398 continue; 398 continue;
399 hlist_del(entry); 399 hlist_del(&pol->bydst);
400 hlist_add_after(entry0, &pol->bydst); 400 hlist_add_after(entry0, &pol->bydst);
401 } 401 }
402 entry0 = entry; 402 entry0 = &pol->bydst;
403 } 403 }
404 if (!hlist_empty(list)) { 404 if (!hlist_empty(list)) {
405 entry0 = NULL; 405 entry0 = NULL;
@@ -411,10 +411,10 @@ static void xfrm_idx_hash_transfer(struct hlist_head *list,
411 struct hlist_head *nidxtable, 411 struct hlist_head *nidxtable,
412 unsigned int nhashmask) 412 unsigned int nhashmask)
413{ 413{
414 struct hlist_node *entry, *tmp; 414 struct hlist_node *tmp;
415 struct xfrm_policy *pol; 415 struct xfrm_policy *pol;
416 416
417 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) { 417 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
418 unsigned int h; 418 unsigned int h;
419 419
420 h = __idx_hash(pol->index, nhashmask); 420 h = __idx_hash(pol->index, nhashmask);
@@ -544,7 +544,6 @@ static u32 xfrm_gen_index(struct net *net, int dir)
544 static u32 idx_generator; 544 static u32 idx_generator;
545 545
546 for (;;) { 546 for (;;) {
547 struct hlist_node *entry;
548 struct hlist_head *list; 547 struct hlist_head *list;
549 struct xfrm_policy *p; 548 struct xfrm_policy *p;
550 u32 idx; 549 u32 idx;
@@ -556,7 +555,7 @@ static u32 xfrm_gen_index(struct net *net, int dir)
556 idx = 8; 555 idx = 8;
557 list = net->xfrm.policy_byidx + idx_hash(net, idx); 556 list = net->xfrm.policy_byidx + idx_hash(net, idx);
558 found = 0; 557 found = 0;
559 hlist_for_each_entry(p, entry, list, byidx) { 558 hlist_for_each_entry(p, list, byidx) {
560 if (p->index == idx) { 559 if (p->index == idx) {
561 found = 1; 560 found = 1;
562 break; 561 break;
@@ -628,13 +627,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
628 struct xfrm_policy *pol; 627 struct xfrm_policy *pol;
629 struct xfrm_policy *delpol; 628 struct xfrm_policy *delpol;
630 struct hlist_head *chain; 629 struct hlist_head *chain;
631 struct hlist_node *entry, *newpos; 630 struct hlist_node *newpos;
632 631
633 write_lock_bh(&xfrm_policy_lock); 632 write_lock_bh(&xfrm_policy_lock);
634 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 633 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
635 delpol = NULL; 634 delpol = NULL;
636 newpos = NULL; 635 newpos = NULL;
637 hlist_for_each_entry(pol, entry, chain, bydst) { 636 hlist_for_each_entry(pol, chain, bydst) {
638 if (pol->type == policy->type && 637 if (pol->type == policy->type &&
639 !selector_cmp(&pol->selector, &policy->selector) && 638 !selector_cmp(&pol->selector, &policy->selector) &&
640 xfrm_policy_mark_match(policy, pol) && 639 xfrm_policy_mark_match(policy, pol) &&
@@ -691,13 +690,12 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
691{ 690{
692 struct xfrm_policy *pol, *ret; 691 struct xfrm_policy *pol, *ret;
693 struct hlist_head *chain; 692 struct hlist_head *chain;
694 struct hlist_node *entry;
695 693
696 *err = 0; 694 *err = 0;
697 write_lock_bh(&xfrm_policy_lock); 695 write_lock_bh(&xfrm_policy_lock);
698 chain = policy_hash_bysel(net, sel, sel->family, dir); 696 chain = policy_hash_bysel(net, sel, sel->family, dir);
699 ret = NULL; 697 ret = NULL;
700 hlist_for_each_entry(pol, entry, chain, bydst) { 698 hlist_for_each_entry(pol, chain, bydst) {
701 if (pol->type == type && 699 if (pol->type == type &&
702 (mark & pol->mark.m) == pol->mark.v && 700 (mark & pol->mark.m) == pol->mark.v &&
703 !selector_cmp(sel, &pol->selector) && 701 !selector_cmp(sel, &pol->selector) &&
@@ -729,7 +727,6 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
729{ 727{
730 struct xfrm_policy *pol, *ret; 728 struct xfrm_policy *pol, *ret;
731 struct hlist_head *chain; 729 struct hlist_head *chain;
732 struct hlist_node *entry;
733 730
734 *err = -ENOENT; 731 *err = -ENOENT;
735 if (xfrm_policy_id2dir(id) != dir) 732 if (xfrm_policy_id2dir(id) != dir)
@@ -739,7 +736,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
739 write_lock_bh(&xfrm_policy_lock); 736 write_lock_bh(&xfrm_policy_lock);
740 chain = net->xfrm.policy_byidx + idx_hash(net, id); 737 chain = net->xfrm.policy_byidx + idx_hash(net, id);
741 ret = NULL; 738 ret = NULL;
742 hlist_for_each_entry(pol, entry, chain, byidx) { 739 hlist_for_each_entry(pol, chain, byidx) {
743 if (pol->type == type && pol->index == id && 740 if (pol->type == type && pol->index == id &&
744 (mark & pol->mark.m) == pol->mark.v) { 741 (mark & pol->mark.m) == pol->mark.v) {
745 xfrm_pol_hold(pol); 742 xfrm_pol_hold(pol);
@@ -772,10 +769,9 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
772 769
773 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 770 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
774 struct xfrm_policy *pol; 771 struct xfrm_policy *pol;
775 struct hlist_node *entry;
776 int i; 772 int i;
777 773
778 hlist_for_each_entry(pol, entry, 774 hlist_for_each_entry(pol,
779 &net->xfrm.policy_inexact[dir], bydst) { 775 &net->xfrm.policy_inexact[dir], bydst) {
780 if (pol->type != type) 776 if (pol->type != type)
781 continue; 777 continue;
@@ -789,7 +785,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
789 } 785 }
790 } 786 }
791 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 787 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
792 hlist_for_each_entry(pol, entry, 788 hlist_for_each_entry(pol,
793 net->xfrm.policy_bydst[dir].table + i, 789 net->xfrm.policy_bydst[dir].table + i,
794 bydst) { 790 bydst) {
795 if (pol->type != type) 791 if (pol->type != type)
@@ -828,11 +824,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
828 824
829 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 825 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
830 struct xfrm_policy *pol; 826 struct xfrm_policy *pol;
831 struct hlist_node *entry;
832 int i; 827 int i;
833 828
834 again1: 829 again1:
835 hlist_for_each_entry(pol, entry, 830 hlist_for_each_entry(pol,
836 &net->xfrm.policy_inexact[dir], bydst) { 831 &net->xfrm.policy_inexact[dir], bydst) {
837 if (pol->type != type) 832 if (pol->type != type)
838 continue; 833 continue;
@@ -852,7 +847,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
852 847
853 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 848 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
854 again2: 849 again2:
855 hlist_for_each_entry(pol, entry, 850 hlist_for_each_entry(pol,
856 net->xfrm.policy_bydst[dir].table + i, 851 net->xfrm.policy_bydst[dir].table + i,
857 bydst) { 852 bydst) {
858 if (pol->type != type) 853 if (pol->type != type)
@@ -980,7 +975,6 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
980 int err; 975 int err;
981 struct xfrm_policy *pol, *ret; 976 struct xfrm_policy *pol, *ret;
982 const xfrm_address_t *daddr, *saddr; 977 const xfrm_address_t *daddr, *saddr;
983 struct hlist_node *entry;
984 struct hlist_head *chain; 978 struct hlist_head *chain;
985 u32 priority = ~0U; 979 u32 priority = ~0U;
986 980
@@ -992,7 +986,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
992 read_lock_bh(&xfrm_policy_lock); 986 read_lock_bh(&xfrm_policy_lock);
993 chain = policy_hash_direct(net, daddr, saddr, family, dir); 987 chain = policy_hash_direct(net, daddr, saddr, family, dir);
994 ret = NULL; 988 ret = NULL;
995 hlist_for_each_entry(pol, entry, chain, bydst) { 989 hlist_for_each_entry(pol, chain, bydst) {
996 err = xfrm_policy_match(pol, fl, type, family, dir); 990 err = xfrm_policy_match(pol, fl, type, family, dir);
997 if (err) { 991 if (err) {
998 if (err == -ESRCH) 992 if (err == -ESRCH)
@@ -1008,7 +1002,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
1008 } 1002 }
1009 } 1003 }
1010 chain = &net->xfrm.policy_inexact[dir]; 1004 chain = &net->xfrm.policy_inexact[dir];
1011 hlist_for_each_entry(pol, entry, chain, bydst) { 1005 hlist_for_each_entry(pol, chain, bydst) {
1012 err = xfrm_policy_match(pol, fl, type, family, dir); 1006 err = xfrm_policy_match(pol, fl, type, family, dir);
1013 if (err) { 1007 if (err) {
1014 if (err == -ESRCH) 1008 if (err == -ESRCH)
@@ -3041,13 +3035,12 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector
3041 u8 dir, u8 type) 3035 u8 dir, u8 type)
3042{ 3036{
3043 struct xfrm_policy *pol, *ret = NULL; 3037 struct xfrm_policy *pol, *ret = NULL;
3044 struct hlist_node *entry;
3045 struct hlist_head *chain; 3038 struct hlist_head *chain;
3046 u32 priority = ~0U; 3039 u32 priority = ~0U;
3047 3040
3048 read_lock_bh(&xfrm_policy_lock); 3041 read_lock_bh(&xfrm_policy_lock);
3049 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); 3042 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
3050 hlist_for_each_entry(pol, entry, chain, bydst) { 3043 hlist_for_each_entry(pol, chain, bydst) {
3051 if (xfrm_migrate_selector_match(sel, &pol->selector) && 3044 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3052 pol->type == type) { 3045 pol->type == type) {
3053 ret = pol; 3046 ret = pol;
@@ -3056,7 +3049,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector
3056 } 3049 }
3057 } 3050 }
3058 chain = &init_net.xfrm.policy_inexact[dir]; 3051 chain = &init_net.xfrm.policy_inexact[dir];
3059 hlist_for_each_entry(pol, entry, chain, bydst) { 3052 hlist_for_each_entry(pol, chain, bydst) {
3060 if (xfrm_migrate_selector_match(sel, &pol->selector) && 3053 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3061 pol->type == type && 3054 pol->type == type &&
3062 pol->priority < priority) { 3055 pol->priority < priority) {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index ae01bdbcb294..2c341bdaf47c 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -72,10 +72,10 @@ static void xfrm_hash_transfer(struct hlist_head *list,
72 struct hlist_head *nspitable, 72 struct hlist_head *nspitable,
73 unsigned int nhashmask) 73 unsigned int nhashmask)
74{ 74{
75 struct hlist_node *entry, *tmp; 75 struct hlist_node *tmp;
76 struct xfrm_state *x; 76 struct xfrm_state *x;
77 77
78 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) { 78 hlist_for_each_entry_safe(x, tmp, list, bydst) {
79 unsigned int h; 79 unsigned int h;
80 80
81 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, 81 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
@@ -368,14 +368,14 @@ static void xfrm_state_gc_task(struct work_struct *work)
368{ 368{
369 struct net *net = container_of(work, struct net, xfrm.state_gc_work); 369 struct net *net = container_of(work, struct net, xfrm.state_gc_work);
370 struct xfrm_state *x; 370 struct xfrm_state *x;
371 struct hlist_node *entry, *tmp; 371 struct hlist_node *tmp;
372 struct hlist_head gc_list; 372 struct hlist_head gc_list;
373 373
374 spin_lock_bh(&xfrm_state_gc_lock); 374 spin_lock_bh(&xfrm_state_gc_lock);
375 hlist_move_list(&net->xfrm.state_gc_list, &gc_list); 375 hlist_move_list(&net->xfrm.state_gc_list, &gc_list);
376 spin_unlock_bh(&xfrm_state_gc_lock); 376 spin_unlock_bh(&xfrm_state_gc_lock);
377 377
378 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist) 378 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
379 xfrm_state_gc_destroy(x); 379 xfrm_state_gc_destroy(x);
380 380
381 wake_up(&net->xfrm.km_waitq); 381 wake_up(&net->xfrm.km_waitq);
@@ -577,10 +577,9 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
577 int i, err = 0; 577 int i, err = 0;
578 578
579 for (i = 0; i <= net->xfrm.state_hmask; i++) { 579 for (i = 0; i <= net->xfrm.state_hmask; i++) {
580 struct hlist_node *entry;
581 struct xfrm_state *x; 580 struct xfrm_state *x;
582 581
583 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { 582 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
584 if (xfrm_id_proto_match(x->id.proto, proto) && 583 if (xfrm_id_proto_match(x->id.proto, proto) &&
585 (err = security_xfrm_state_delete(x)) != 0) { 584 (err = security_xfrm_state_delete(x)) != 0) {
586 xfrm_audit_state_delete(x, 0, 585 xfrm_audit_state_delete(x, 0,
@@ -613,10 +612,9 @@ int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
613 612
614 err = -ESRCH; 613 err = -ESRCH;
615 for (i = 0; i <= net->xfrm.state_hmask; i++) { 614 for (i = 0; i <= net->xfrm.state_hmask; i++) {
616 struct hlist_node *entry;
617 struct xfrm_state *x; 615 struct xfrm_state *x;
618restart: 616restart:
619 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { 617 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
620 if (!xfrm_state_kern(x) && 618 if (!xfrm_state_kern(x) &&
621 xfrm_id_proto_match(x->id.proto, proto)) { 619 xfrm_id_proto_match(x->id.proto, proto)) {
622 xfrm_state_hold(x); 620 xfrm_state_hold(x);
@@ -685,9 +683,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
685{ 683{
686 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); 684 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
687 struct xfrm_state *x; 685 struct xfrm_state *x;
688 struct hlist_node *entry;
689 686
690 hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) { 687 hlist_for_each_entry(x, net->xfrm.state_byspi+h, byspi) {
691 if (x->props.family != family || 688 if (x->props.family != family ||
692 x->id.spi != spi || 689 x->id.spi != spi ||
693 x->id.proto != proto || 690 x->id.proto != proto ||
@@ -710,9 +707,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
710{ 707{
711 unsigned int h = xfrm_src_hash(net, daddr, saddr, family); 708 unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
712 struct xfrm_state *x; 709 struct xfrm_state *x;
713 struct hlist_node *entry;
714 710
715 hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) { 711 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
716 if (x->props.family != family || 712 if (x->props.family != family ||
717 x->id.proto != proto || 713 x->id.proto != proto ||
718 !xfrm_addr_equal(&x->id.daddr, daddr, family) || 714 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
@@ -798,7 +794,6 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
798 static xfrm_address_t saddr_wildcard = { }; 794 static xfrm_address_t saddr_wildcard = { };
799 struct net *net = xp_net(pol); 795 struct net *net = xp_net(pol);
800 unsigned int h, h_wildcard; 796 unsigned int h, h_wildcard;
801 struct hlist_node *entry;
802 struct xfrm_state *x, *x0, *to_put; 797 struct xfrm_state *x, *x0, *to_put;
803 int acquire_in_progress = 0; 798 int acquire_in_progress = 0;
804 int error = 0; 799 int error = 0;
@@ -810,7 +805,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
810 805
811 spin_lock_bh(&xfrm_state_lock); 806 spin_lock_bh(&xfrm_state_lock);
812 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); 807 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
813 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 808 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
814 if (x->props.family == encap_family && 809 if (x->props.family == encap_family &&
815 x->props.reqid == tmpl->reqid && 810 x->props.reqid == tmpl->reqid &&
816 (mark & x->mark.m) == x->mark.v && 811 (mark & x->mark.m) == x->mark.v &&
@@ -826,7 +821,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
826 goto found; 821 goto found;
827 822
828 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); 823 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
829 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { 824 hlist_for_each_entry(x, net->xfrm.state_bydst+h_wildcard, bydst) {
830 if (x->props.family == encap_family && 825 if (x->props.family == encap_family &&
831 x->props.reqid == tmpl->reqid && 826 x->props.reqid == tmpl->reqid &&
832 (mark & x->mark.m) == x->mark.v && 827 (mark & x->mark.m) == x->mark.v &&
@@ -906,11 +901,10 @@ xfrm_stateonly_find(struct net *net, u32 mark,
906{ 901{
907 unsigned int h; 902 unsigned int h;
908 struct xfrm_state *rx = NULL, *x = NULL; 903 struct xfrm_state *rx = NULL, *x = NULL;
909 struct hlist_node *entry;
910 904
911 spin_lock(&xfrm_state_lock); 905 spin_lock(&xfrm_state_lock);
912 h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 906 h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
913 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 907 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
914 if (x->props.family == family && 908 if (x->props.family == family &&
915 x->props.reqid == reqid && 909 x->props.reqid == reqid &&
916 (mark & x->mark.m) == x->mark.v && 910 (mark & x->mark.m) == x->mark.v &&
@@ -972,12 +966,11 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
972 unsigned short family = xnew->props.family; 966 unsigned short family = xnew->props.family;
973 u32 reqid = xnew->props.reqid; 967 u32 reqid = xnew->props.reqid;
974 struct xfrm_state *x; 968 struct xfrm_state *x;
975 struct hlist_node *entry;
976 unsigned int h; 969 unsigned int h;
977 u32 mark = xnew->mark.v & xnew->mark.m; 970 u32 mark = xnew->mark.v & xnew->mark.m;
978 971
979 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); 972 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
980 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 973 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
981 if (x->props.family == family && 974 if (x->props.family == family &&
982 x->props.reqid == reqid && 975 x->props.reqid == reqid &&
983 (mark & x->mark.m) == x->mark.v && 976 (mark & x->mark.m) == x->mark.v &&
@@ -1004,11 +997,10 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
1004 const xfrm_address_t *saddr, int create) 997 const xfrm_address_t *saddr, int create)
1005{ 998{
1006 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 999 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1007 struct hlist_node *entry;
1008 struct xfrm_state *x; 1000 struct xfrm_state *x;
1009 u32 mark = m->v & m->m; 1001 u32 mark = m->v & m->m;
1010 1002
1011 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 1003 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1012 if (x->props.reqid != reqid || 1004 if (x->props.reqid != reqid ||
1013 x->props.mode != mode || 1005 x->props.mode != mode ||
1014 x->props.family != family || 1006 x->props.family != family ||
@@ -1215,12 +1207,11 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1215{ 1207{
1216 unsigned int h; 1208 unsigned int h;
1217 struct xfrm_state *x; 1209 struct xfrm_state *x;
1218 struct hlist_node *entry;
1219 1210
1220 if (m->reqid) { 1211 if (m->reqid) {
1221 h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr, 1212 h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr,
1222 m->reqid, m->old_family); 1213 m->reqid, m->old_family);
1223 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) { 1214 hlist_for_each_entry(x, init_net.xfrm.state_bydst+h, bydst) {
1224 if (x->props.mode != m->mode || 1215 if (x->props.mode != m->mode ||
1225 x->id.proto != m->proto) 1216 x->id.proto != m->proto)
1226 continue; 1217 continue;
@@ -1237,7 +1228,7 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1237 } else { 1228 } else {
1238 h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr, 1229 h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr,
1239 m->old_family); 1230 m->old_family);
1240 hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) { 1231 hlist_for_each_entry(x, init_net.xfrm.state_bysrc+h, bysrc) {
1241 if (x->props.mode != m->mode || 1232 if (x->props.mode != m->mode ||
1242 x->id.proto != m->proto) 1233 x->id.proto != m->proto)
1243 continue; 1234 continue;
@@ -1466,10 +1457,9 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 s
1466 int i; 1457 int i;
1467 1458
1468 for (i = 0; i <= net->xfrm.state_hmask; i++) { 1459 for (i = 0; i <= net->xfrm.state_hmask; i++) {
1469 struct hlist_node *entry;
1470 struct xfrm_state *x; 1460 struct xfrm_state *x;
1471 1461
1472 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { 1462 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
1473 if (x->km.seq == seq && 1463 if (x->km.seq == seq &&
1474 (mark & x->mark.m) == x->mark.v && 1464 (mark & x->mark.m) == x->mark.v &&
1475 x->km.state == XFRM_STATE_ACQ) { 1465 x->km.state == XFRM_STATE_ACQ) {
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 55a6271bce7a..ff63fe00c195 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -45,12 +45,11 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
45{ 45{
46 struct ima_queue_entry *qe, *ret = NULL; 46 struct ima_queue_entry *qe, *ret = NULL;
47 unsigned int key; 47 unsigned int key;
48 struct hlist_node *pos;
49 int rc; 48 int rc;
50 49
51 key = ima_hash_key(digest_value); 50 key = ima_hash_key(digest_value);
52 rcu_read_lock(); 51 rcu_read_lock();
53 hlist_for_each_entry_rcu(qe, pos, &ima_htable.queue[key], hnext) { 52 hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
54 rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE); 53 rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE);
55 if (rc == 0) { 54 if (rc == 0) {
56 ret = qe; 55 ret = qe;
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 4d3fab47e643..dad36a6ab45f 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -188,11 +188,9 @@ int avc_get_hash_stats(char *page)
188 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 188 for (i = 0; i < AVC_CACHE_SLOTS; i++) {
189 head = &avc_cache.slots[i]; 189 head = &avc_cache.slots[i];
190 if (!hlist_empty(head)) { 190 if (!hlist_empty(head)) {
191 struct hlist_node *next;
192
193 slots_used++; 191 slots_used++;
194 chain_len = 0; 192 chain_len = 0;
195 hlist_for_each_entry_rcu(node, next, head, list) 193 hlist_for_each_entry_rcu(node, head, list)
196 chain_len++; 194 chain_len++;
197 if (chain_len > max_chain_len) 195 if (chain_len > max_chain_len)
198 max_chain_len = chain_len; 196 max_chain_len = chain_len;
@@ -241,7 +239,6 @@ static inline int avc_reclaim_node(void)
241 int hvalue, try, ecx; 239 int hvalue, try, ecx;
242 unsigned long flags; 240 unsigned long flags;
243 struct hlist_head *head; 241 struct hlist_head *head;
244 struct hlist_node *next;
245 spinlock_t *lock; 242 spinlock_t *lock;
246 243
247 for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { 244 for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
@@ -253,7 +250,7 @@ static inline int avc_reclaim_node(void)
253 continue; 250 continue;
254 251
255 rcu_read_lock(); 252 rcu_read_lock();
256 hlist_for_each_entry(node, next, head, list) { 253 hlist_for_each_entry(node, head, list) {
257 avc_node_delete(node); 254 avc_node_delete(node);
258 avc_cache_stats_incr(reclaims); 255 avc_cache_stats_incr(reclaims);
259 ecx++; 256 ecx++;
@@ -301,11 +298,10 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
301 struct avc_node *node, *ret = NULL; 298 struct avc_node *node, *ret = NULL;
302 int hvalue; 299 int hvalue;
303 struct hlist_head *head; 300 struct hlist_head *head;
304 struct hlist_node *next;
305 301
306 hvalue = avc_hash(ssid, tsid, tclass); 302 hvalue = avc_hash(ssid, tsid, tclass);
307 head = &avc_cache.slots[hvalue]; 303 head = &avc_cache.slots[hvalue];
308 hlist_for_each_entry_rcu(node, next, head, list) { 304 hlist_for_each_entry_rcu(node, head, list) {
309 if (ssid == node->ae.ssid && 305 if (ssid == node->ae.ssid &&
310 tclass == node->ae.tclass && 306 tclass == node->ae.tclass &&
311 tsid == node->ae.tsid) { 307 tsid == node->ae.tsid) {
@@ -394,7 +390,6 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
394 node = avc_alloc_node(); 390 node = avc_alloc_node();
395 if (node) { 391 if (node) {
396 struct hlist_head *head; 392 struct hlist_head *head;
397 struct hlist_node *next;
398 spinlock_t *lock; 393 spinlock_t *lock;
399 394
400 hvalue = avc_hash(ssid, tsid, tclass); 395 hvalue = avc_hash(ssid, tsid, tclass);
@@ -404,7 +399,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
404 lock = &avc_cache.slots_lock[hvalue]; 399 lock = &avc_cache.slots_lock[hvalue];
405 400
406 spin_lock_irqsave(lock, flag); 401 spin_lock_irqsave(lock, flag);
407 hlist_for_each_entry(pos, next, head, list) { 402 hlist_for_each_entry(pos, head, list) {
408 if (pos->ae.ssid == ssid && 403 if (pos->ae.ssid == ssid &&
409 pos->ae.tsid == tsid && 404 pos->ae.tsid == tsid &&
410 pos->ae.tclass == tclass) { 405 pos->ae.tclass == tclass) {
@@ -541,7 +536,6 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
541 unsigned long flag; 536 unsigned long flag;
542 struct avc_node *pos, *node, *orig = NULL; 537 struct avc_node *pos, *node, *orig = NULL;
543 struct hlist_head *head; 538 struct hlist_head *head;
544 struct hlist_node *next;
545 spinlock_t *lock; 539 spinlock_t *lock;
546 540
547 node = avc_alloc_node(); 541 node = avc_alloc_node();
@@ -558,7 +552,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
558 552
559 spin_lock_irqsave(lock, flag); 553 spin_lock_irqsave(lock, flag);
560 554
561 hlist_for_each_entry(pos, next, head, list) { 555 hlist_for_each_entry(pos, head, list) {
562 if (ssid == pos->ae.ssid && 556 if (ssid == pos->ae.ssid &&
563 tsid == pos->ae.tsid && 557 tsid == pos->ae.tsid &&
564 tclass == pos->ae.tclass && 558 tclass == pos->ae.tclass &&
@@ -614,7 +608,6 @@ out:
614static void avc_flush(void) 608static void avc_flush(void)
615{ 609{
616 struct hlist_head *head; 610 struct hlist_head *head;
617 struct hlist_node *next;
618 struct avc_node *node; 611 struct avc_node *node;
619 spinlock_t *lock; 612 spinlock_t *lock;
620 unsigned long flag; 613 unsigned long flag;
@@ -630,7 +623,7 @@ static void avc_flush(void)
630 * prevent RCU grace periods from ending. 623 * prevent RCU grace periods from ending.
631 */ 624 */
632 rcu_read_lock(); 625 rcu_read_lock();
633 hlist_for_each_entry(node, next, head, list) 626 hlist_for_each_entry(node, head, list)
634 avc_node_delete(node); 627 avc_node_delete(node);
635 rcu_read_unlock(); 628 rcu_read_unlock();
636 spin_unlock_irqrestore(lock, flag); 629 spin_unlock_irqrestore(lock, flag);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index bc4ad7977438..c8be0fbc5145 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -314,7 +314,6 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
314struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 314struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
315{ 315{
316 struct hlist_head *head; 316 struct hlist_head *head;
317 struct hlist_node *pos;
318 struct perf_sample_id *sid; 317 struct perf_sample_id *sid;
319 int hash; 318 int hash;
320 319
@@ -324,7 +323,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
324 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 323 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
325 head = &evlist->heads[hash]; 324 head = &evlist->heads[hash];
326 325
327 hlist_for_each_entry(sid, pos, head, node) 326 hlist_for_each_entry(sid, head, node)
328 if (sid->id == id) 327 if (sid->id == id)
329 return sid->evsel; 328 return sid->evsel;
330 329
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index b6eea5cc7b34..adb17f266b28 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -268,14 +268,13 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
268 struct kvm_irq_routing_table *irq_rt) 268 struct kvm_irq_routing_table *irq_rt)
269{ 269{
270 struct kvm_kernel_irq_routing_entry *e; 270 struct kvm_kernel_irq_routing_entry *e;
271 struct hlist_node *n;
272 271
273 if (irqfd->gsi >= irq_rt->nr_rt_entries) { 272 if (irqfd->gsi >= irq_rt->nr_rt_entries) {
274 rcu_assign_pointer(irqfd->irq_entry, NULL); 273 rcu_assign_pointer(irqfd->irq_entry, NULL);
275 return; 274 return;
276 } 275 }
277 276
278 hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) { 277 hlist_for_each_entry(e, &irq_rt->map[irqfd->gsi], link) {
279 /* Only fast-path MSI. */ 278 /* Only fast-path MSI. */
280 if (e->type == KVM_IRQ_ROUTING_MSI) 279 if (e->type == KVM_IRQ_ROUTING_MSI)
281 rcu_assign_pointer(irqfd->irq_entry, e); 280 rcu_assign_pointer(irqfd->irq_entry, e);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index ff6d40e2c06d..e9073cf4d040 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -173,7 +173,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
173 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; 173 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
174 int ret = -1, i = 0; 174 int ret = -1, i = 0;
175 struct kvm_irq_routing_table *irq_rt; 175 struct kvm_irq_routing_table *irq_rt;
176 struct hlist_node *n;
177 176
178 trace_kvm_set_irq(irq, level, irq_source_id); 177 trace_kvm_set_irq(irq, level, irq_source_id);
179 178
@@ -184,7 +183,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
184 rcu_read_lock(); 183 rcu_read_lock();
185 irq_rt = rcu_dereference(kvm->irq_routing); 184 irq_rt = rcu_dereference(kvm->irq_routing);
186 if (irq < irq_rt->nr_rt_entries) 185 if (irq < irq_rt->nr_rt_entries)
187 hlist_for_each_entry(e, n, &irq_rt->map[irq], link) 186 hlist_for_each_entry(e, &irq_rt->map[irq], link)
188 irq_set[i++] = *e; 187 irq_set[i++] = *e;
189 rcu_read_unlock(); 188 rcu_read_unlock();
190 189
@@ -212,7 +211,6 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
212 struct kvm_kernel_irq_routing_entry *e; 211 struct kvm_kernel_irq_routing_entry *e;
213 int ret = -EINVAL; 212 int ret = -EINVAL;
214 struct kvm_irq_routing_table *irq_rt; 213 struct kvm_irq_routing_table *irq_rt;
215 struct hlist_node *n;
216 214
217 trace_kvm_set_irq(irq, level, irq_source_id); 215 trace_kvm_set_irq(irq, level, irq_source_id);
218 216
@@ -227,7 +225,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
227 rcu_read_lock(); 225 rcu_read_lock();
228 irq_rt = rcu_dereference(kvm->irq_routing); 226 irq_rt = rcu_dereference(kvm->irq_routing);
229 if (irq < irq_rt->nr_rt_entries) 227 if (irq < irq_rt->nr_rt_entries)
230 hlist_for_each_entry(e, n, &irq_rt->map[irq], link) { 228 hlist_for_each_entry(e, &irq_rt->map[irq], link) {
231 if (likely(e->type == KVM_IRQ_ROUTING_MSI)) 229 if (likely(e->type == KVM_IRQ_ROUTING_MSI))
232 ret = kvm_set_msi_inatomic(e, kvm); 230 ret = kvm_set_msi_inatomic(e, kvm);
233 else 231 else
@@ -241,13 +239,12 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
241bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) 239bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
242{ 240{
243 struct kvm_irq_ack_notifier *kian; 241 struct kvm_irq_ack_notifier *kian;
244 struct hlist_node *n;
245 int gsi; 242 int gsi;
246 243
247 rcu_read_lock(); 244 rcu_read_lock();
248 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 245 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
249 if (gsi != -1) 246 if (gsi != -1)
250 hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list, 247 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
251 link) 248 link)
252 if (kian->gsi == gsi) { 249 if (kian->gsi == gsi) {
253 rcu_read_unlock(); 250 rcu_read_unlock();
@@ -263,7 +260,6 @@ EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
263void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) 260void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
264{ 261{
265 struct kvm_irq_ack_notifier *kian; 262 struct kvm_irq_ack_notifier *kian;
266 struct hlist_node *n;
267 int gsi; 263 int gsi;
268 264
269 trace_kvm_ack_irq(irqchip, pin); 265 trace_kvm_ack_irq(irqchip, pin);
@@ -271,7 +267,7 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
271 rcu_read_lock(); 267 rcu_read_lock();
272 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 268 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
273 if (gsi != -1) 269 if (gsi != -1)
274 hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list, 270 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
275 link) 271 link)
276 if (kian->gsi == gsi) 272 if (kian->gsi == gsi)
277 kian->irq_acked(kian); 273 kian->irq_acked(kian);
@@ -369,13 +365,12 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
369 bool mask) 365 bool mask)
370{ 366{
371 struct kvm_irq_mask_notifier *kimn; 367 struct kvm_irq_mask_notifier *kimn;
372 struct hlist_node *n;
373 int gsi; 368 int gsi;
374 369
375 rcu_read_lock(); 370 rcu_read_lock();
376 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 371 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
377 if (gsi != -1) 372 if (gsi != -1)
378 hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link) 373 hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
379 if (kimn->irq == gsi) 374 if (kimn->irq == gsi)
380 kimn->func(kimn, mask); 375 kimn->func(kimn, mask);
381 rcu_read_unlock(); 376 rcu_read_unlock();
@@ -396,13 +391,12 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
396 int delta; 391 int delta;
397 unsigned max_pin; 392 unsigned max_pin;
398 struct kvm_kernel_irq_routing_entry *ei; 393 struct kvm_kernel_irq_routing_entry *ei;
399 struct hlist_node *n;
400 394
401 /* 395 /*
402 * Do not allow GSI to be mapped to the same irqchip more than once. 396 * Do not allow GSI to be mapped to the same irqchip more than once.
403 * Allow only one to one mapping between GSI and MSI. 397 * Allow only one to one mapping between GSI and MSI.
404 */ 398 */
405 hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link) 399 hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
406 if (ei->type == KVM_IRQ_ROUTING_MSI || 400 if (ei->type == KVM_IRQ_ROUTING_MSI ||
407 ue->type == KVM_IRQ_ROUTING_MSI || 401 ue->type == KVM_IRQ_ROUTING_MSI ||
408 ue->u.irqchip.irqchip == ei->irqchip.irqchip) 402 ue->u.irqchip.irqchip == ei->irqchip.irqchip)