aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 23:58:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 23:58:09 -0500
commit2a7d2b96d5cba7568139d9ab157a0e97ab32440f (patch)
treead029d8cc7b7068b7250e914360ec6315fdfa114
parente3c4877de8b9d93bd47b6ee88eb594b1c1e10da5 (diff)
parentb67bfe0d42cac56c512dd5da4b1b347a23f4b70a (diff)
Merge branch 'akpm' (final batch from Andrew)
Merge third patch-bumb from Andrew Morton: "This wraps me up for -rc1. - Lots of misc stuff and things which were deferred/missed from patchbombings 1 & 2. - ocfs2 things - lib/scatterlist - hfsplus - fatfs - documentation - signals - procfs - lockdep - coredump - seqfile core - kexec - Tejun's large IDR tree reworkings - ipmi - partitions - nbd - random() things - kfifo - tools/testing/selftests updates - Sasha's large and pointless hlist cleanup" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (163 commits) hlist: drop the node parameter from iterators kcmp: make it depend on CHECKPOINT_RESTORE selftests: add a simple doc tools/testing/selftests/Makefile: rearrange targets selftests/efivarfs: add create-read test selftests/efivarfs: add empty file creation test selftests: add tests for efivarfs kfifo: fix kfifo_alloc() and kfifo_init() kfifo: move kfifo.c from kernel/ to lib/ arch Kconfig: centralise CONFIG_ARCH_NO_VIRT_TO_BUS w1: add support for DS2413 Dual Channel Addressable Switch memstick: move the dereference below the NULL test drivers/pps/clients/pps-gpio.c: use devm_kzalloc Documentation/DMA-API-HOWTO.txt: fix typo include/linux/eventfd.h: fix incorrect filename is a comment mtd: mtd_stresstest: use prandom_bytes() mtd: mtd_subpagetest: convert to use prandom library mtd: mtd_speedtest: use prandom_bytes mtd: mtd_pagetest: convert to use prandom library mtd: mtd_oobtest: convert to use prandom library ...
-rw-r--r--Documentation/DMA-API-HOWTO.txt9
-rw-r--r--Documentation/IPMI.txt18
-rw-r--r--Documentation/blockdev/nbd.txt38
-rw-r--r--Documentation/cgroups/blkio-controller.txt2
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/kernel/kprobes.c6
-rw-r--r--arch/avr32/Kconfig1
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/cris/Kconfig1
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/mm/elf-fdpic.c49
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/kernel/kprobes.c8
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/kernel/kprobes.c6
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/kernel/sys_parisc.c48
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c18
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/kernel/kprobes.c8
-rw-r--r--arch/s390/pci/pci_msi.c3
-rw-r--r--arch/score/Kconfig1
-rw-r--r--arch/sh/Kconfig3
-rw-r--r--arch/sh/kernel/kprobes.c6
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/kernel/kprobes.c6
-rw-r--r--arch/sparc/kernel/ldc.c3
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/unicore32/Kconfig1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/kprobes/core.c8
-rw-r--r--arch/x86/kvm/mmu.c26
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--block/blk-ioc.c3
-rw-r--r--block/bsg.c29
-rw-r--r--block/cfq-iosched.c3
-rw-r--r--block/elevator.c4
-rw-r--r--block/genhd.c24
-rw-r--r--block/partition-generic.c6
-rw-r--r--block/partitions/check.c37
-rw-r--r--block/partitions/check.h4
-rw-r--r--block/partitions/efi.c12
-rw-r--r--block/partitions/mac.c4
-rw-r--r--block/partitions/msdos.c11
-rw-r--r--crypto/algapi.c6
-rw-r--r--drivers/atm/atmtcp.c6
-rw-r--r--drivers/atm/eni.c3
-rw-r--r--drivers/atm/he.c3
-rw-r--r--drivers/atm/nicstar.c25
-rw-r--r--drivers/atm/solos-pci.c3
-rw-r--r--drivers/block/drbd/drbd_main.c29
-rw-r--r--drivers/block/loop.c24
-rw-r--r--drivers/block/nbd.c36
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c66
-rw-r--r--drivers/char/misc.c16
-rw-r--r--drivers/clk/clk.c59
-rw-r--r--drivers/dca/dca-sysfs.c23
-rw-r--r--drivers/dma/dmaengine.c16
-rw-r--r--drivers/firewire/core-cdev.c20
-rw-r--r--drivers/firewire/core-device.c7
-rw-r--r--drivers/gpio/gpiolib.c11
-rw-r--r--drivers/gpu/drm/drm_context.c19
-rw-r--r--drivers/gpu/drm/drm_crtc.c20
-rw-r--r--drivers/gpu/drm/drm_drv.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c40
-rw-r--r--drivers/gpu/drm/drm_hashtab.c19
-rw-r--r--drivers/gpu/drm/drm_stub.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c21
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c1
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c13
-rw-r--r--drivers/gpu/drm/via/via_map.c1
-rw-r--r--drivers/gpu/drm/via/via_mm.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c17
-rw-r--r--drivers/i2c/i2c-core.c46
-rw-r--r--drivers/infiniband/core/cm.c22
-rw-r--r--drivers/infiniband/core/cma.c27
-rw-r--r--drivers/infiniband/core/fmr_pool.c3
-rw-r--r--drivers/infiniband/core/sa_query.c18
-rw-r--r--drivers/infiniband/core/ucm.c16
-rw-r--r--drivers/infiniband/core/ucma.c32
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c17
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c19
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h24
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h27
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c27
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c34
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c16
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c32
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c21
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/isdn/mISDN/stack.c3
-rw-r--r--drivers/md/dm-bio-prison.c3
-rw-r--r--drivers/md/dm-bufio.c3
-rw-r--r--drivers/md/dm-snap.c3
-rw-r--r--drivers/md/dm.c55
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c7
-rw-r--r--drivers/md/raid5.c3
-rw-r--r--drivers/memstick/core/memstick.c21
-rw-r--r--drivers/memstick/core/mspro_block.c17
-rw-r--r--drivers/memstick/host/r592.c3
-rw-r--r--drivers/mfd/rtsx_pcr.c13
-rw-r--r--drivers/misc/c2port/core.c22
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c3
-rw-r--r--drivers/misc/tifm_core.c11
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c7
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c6
-rw-r--r--drivers/mmc/core/host.c11
-rw-r--r--drivers/mmc/host/dw_mmc.c4
-rw-r--r--drivers/mtd/mtdcore.c9
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c2
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c49
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c43
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c9
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c3
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c10
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/macvtap.c21
-rw-r--r--drivers/net/ppp/ppp_generic.c33
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/vxlan.c12
-rw-r--r--drivers/net/wireless/zd1201.c7
-rw-r--r--drivers/pci/pci.c12
-rw-r--r--drivers/power/bq2415x_charger.c11
-rw-r--r--drivers/power/bq27x00_battery.c9
-rw-r--r--drivers/power/ds2782_battery.c9
-rw-r--r--drivers/pps/clients/pps-gpio.c6
-rw-r--r--drivers/pps/kapi.c2
-rw-r--r--drivers/pps/pps.c36
-rw-r--r--drivers/remoteproc/remoteproc_core.c11
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c31
-rw-r--r--drivers/scsi/bfa/bfad_im.c15
-rw-r--r--drivers/scsi/ch.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c12
-rw-r--r--drivers/scsi/sg.c43
-rw-r--r--drivers/scsi/st.c27
-rw-r--r--drivers/staging/android/binder.c19
-rw-r--r--drivers/target/iscsi/iscsi_target.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c15
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c12
-rw-r--r--drivers/thermal/cpu_cooling.c17
-rw-r--r--drivers/thermal/thermal_sys.c17
-rw-r--r--drivers/uio/uio.c19
-rw-r--r--drivers/vfio/vfio.c17
-rw-r--r--drivers/video/backlight/Kconfig6
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/ams369fg06.c8
-rw-r--r--drivers/video/backlight/lp8788_bl.c333
-rw-r--r--drivers/w1/slaves/Kconfig13
-rw-r--r--drivers/w1/slaves/Makefile3
-rw-r--r--drivers/w1/slaves/w1_ds2413.c177
-rw-r--r--drivers/w1/w1_family.h1
-rw-r--r--fs/affs/amigaffs.c3
-rw-r--r--fs/aio.c3
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/dcache.c9
-rw-r--r--fs/dlm/lock.c18
-rw-r--r--fs/dlm/lockspace.c1
-rw-r--r--fs/dlm/lowcomms.c11
-rw-r--r--fs/dlm/recover.c52
-rw-r--r--fs/ecryptfs/messaging.c6
-rw-r--r--fs/exec.c10
-rw-r--r--fs/exportfs/expfs.c3
-rw-r--r--fs/fat/fat.h2
-rw-r--r--fs/fat/inode.c77
-rw-r--r--fs/fat/nfs.c3
-rw-r--r--fs/fscache/cookie.c11
-rw-r--r--fs/hfsplus/Makefile4
-rw-r--r--fs/hfsplus/attributes.c399
-rw-r--r--fs/hfsplus/bfind.c93
-rw-r--r--fs/hfsplus/bnode.c8
-rw-r--r--fs/hfsplus/brec.c23
-rw-r--r--fs/hfsplus/btree.c8
-rw-r--r--fs/hfsplus/catalog.c36
-rw-r--r--fs/hfsplus/dir.c55
-rw-r--r--fs/hfsplus/extents.c4
-rw-r--r--fs/hfsplus/hfsplus_fs.h52
-rw-r--r--fs/hfsplus/hfsplus_raw.h68
-rw-r--r--fs/hfsplus/inode.c18
-rw-r--r--fs/hfsplus/ioctl.c108
-rw-r--r--fs/hfsplus/super.c56
-rw-r--r--fs/hfsplus/unicode.c7
-rw-r--r--fs/hfsplus/xattr.c709
-rw-r--r--fs/hfsplus/xattr.h60
-rw-r--r--fs/hfsplus/xattr_security.c104
-rw-r--r--fs/hfsplus/xattr_trusted.c63
-rw-r--r--fs/hfsplus/xattr_user.c63
-rw-r--r--fs/inode.c19
-rw-r--r--fs/lockd/host.c29
-rw-r--r--fs/lockd/svcsubs.c7
-rw-r--r--fs/nfs/client.c1
-rw-r--r--fs/nfs/nfs4client.c13
-rw-r--r--fs/nfs/pnfs_dev.c9
-rw-r--r--fs/nfsd/nfscache.c3
-rw-r--r--fs/notify/fsnotify.c3
-rw-r--r--fs/notify/inode_mark.c19
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c1
-rw-r--r--fs/notify/inotify/inotify_user.c24
-rw-r--r--fs/notify/vfsmount_mark.c19
-rw-r--r--fs/ocfs2/cluster/tcp.c32
-rw-r--r--fs/ocfs2/dcache.c3
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c6
-rw-r--r--fs/ocfs2/suballoc.c7
-rw-r--r--fs/ocfs2/suballoc.h2
-rw-r--r--fs/ocfs2/xattr.c2
-rw-r--r--fs/proc/base.c3
-rw-r--r--fs/proc/generic.c27
-rw-r--r--fs/proc/inode.c5
-rw-r--r--fs/proc/internal.h3
-rw-r--r--fs/proc/kcore.c3
-rw-r--r--fs/proc/proc_devtree.c13
-rw-r--r--fs/proc/proc_sysctl.c19
-rw-r--r--fs/proc/vmcore.c35
-rw-r--r--fs/seq_file.c40
-rw-r--r--fs/super.c8
-rw-r--r--fs/sysfs/bin.c3
-rw-r--r--fs/xfs/xfs_log_recover.c3
-rw-r--r--include/linux/debug_locks.h4
-rw-r--r--include/linux/eventfd.h2
-rw-r--r--include/linux/freezer.h3
-rw-r--r--include/linux/hashtable.h40
-rw-r--r--include/linux/idr.h170
-rw-r--r--include/linux/if_team.h6
-rw-r--r--include/linux/ipmi.h4
-rw-r--r--include/linux/list.h49
-rw-r--r--include/linux/mfd/lp8788.h24
-rw-r--r--include/linux/pid.h3
-rw-r--r--include/linux/rculist.h56
-rw-r--r--include/linux/scatterlist.h41
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/net/ax25.h8
-rw-r--r--include/net/inet_hashtables.h4
-rw-r--r--include/net/inet_timewait_sock.h8
-rw-r--r--include/net/netrom.h16
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sock.h21
-rw-r--r--include/uapi/linux/ipmi.h10
-rw-r--r--include/uapi/linux/msdos_fs.h38
-rw-r--r--include/uapi/linux/nbd.h3
-rw-r--r--include/uapi/linux/xattr.h13
-rw-r--r--ipc/util.c30
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/cgroup.c43
-rw-r--r--kernel/events/core.c16
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/kexec.c44
-rw-r--r--kernel/kprobes.c35
-rw-r--r--kernel/lockdep.c17
-rw-r--r--kernel/pid.c3
-rw-r--r--kernel/posix-timers.c18
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/signal.c14
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/sys.c10
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/sysctl_binary.c3
-rw-r--r--kernel/trace/ftrace.c24
-rw-r--r--kernel/trace/trace_output.c3
-rw-r--r--kernel/tracepoint.c6
-rw-r--r--kernel/user-return-notifier.c4
-rw-r--r--kernel/user.c3
-rw-r--r--kernel/utsname.c2
-rw-r--r--kernel/utsname_sysctl.c3
-rw-r--r--kernel/workqueue.c13
-rw-r--r--lib/Makefile2
-rw-r--r--lib/debugobjects.c21
-rw-r--r--lib/devres.c2
-rw-r--r--lib/idr.c446
-rw-r--r--lib/kfifo.c (renamed from kernel/kfifo.c)6
-rw-r--r--lib/lru_cache.c3
-rw-r--r--lib/scatterlist.c86
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/internal.h2
-rw-r--r--mm/kmemleak.c9
-rw-r--r--mm/ksm.c15
-rw-r--r--mm/mlock.c34
-rw-r--r--mm/mmu_notifier.c18
-rw-r--r--net/9p/error.c4
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/9p/util.c17
-rw-r--r--net/appletalk/ddp.c9
-rw-r--r--net/atm/common.c7
-rw-r--r--net/atm/lec.c66
-rw-r--r--net/atm/signaling.c3
-rw-r--r--net/ax25/af_ax25.c15
-rw-r--r--net/ax25/ax25_ds_subr.c6
-rw-r--r--net/ax25/ax25_ds_timer.c3
-rw-r--r--net/ax25/ax25_iface.c3
-rw-r--r--net/ax25/ax25_uid.c11
-rw-r--r--net/batman-adv/bat_iv_ogm.c12
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c39
-rw-r--r--net/batman-adv/distributed-arp-table.c15
-rw-r--r--net/batman-adv/gateway_client.c13
-rw-r--r--net/batman-adv/main.c6
-rw-r--r--net/batman-adv/originator.c31
-rw-r--r--net/batman-adv/originator.h3
-rw-r--r--net/batman-adv/routing.c6
-rw-r--r--net/batman-adv/send.c6
-rw-r--r--net/batman-adv/translation-table.c82
-rw-r--r--net/batman-adv/vis.c38
-rw-r--r--net/bluetooth/hci_sock.c15
-rw-r--r--net/bluetooth/rfcomm/sock.c13
-rw-r--r--net/bluetooth/sco.c14
-rw-r--r--net/bridge/br_fdb.c23
-rw-r--r--net/bridge/br_mdb.c6
-rw-r--r--net/bridge/br_multicast.c25
-rw-r--r--net/can/af_can.c18
-rw-r--r--net/can/gw.c15
-rw-r--r--net/can/proc.c3
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/flow.c11
-rw-r--r--net/core/net-procfs.c3
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/decnet/af_decnet.c9
-rw-r--r--net/decnet/dn_table.c13
-rw-r--r--net/ieee802154/dgram.c3
-rw-r--r--net/ieee802154/raw.c3
-rw-r--r--net/ipv4/devinet.c10
-rw-r--r--net/ipv4/fib_frontend.c15
-rw-r--r--net/ipv4/fib_semantics.c23
-rw-r--r--net/ipv4/fib_trie.c33
-rw-r--r--net/ipv4/inet_connection_sock.c10
-rw-r--r--net/ipv4/inet_fragment.c10
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/inet_timewait_sock.c7
-rw-r--r--net/ipv4/raw.c8
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv6/addrconf.c32
-rw-r--r--net/ipv6/addrlabel.c18
-rw-r--r--net/ipv6/inet6_connection_sock.c5
-rw-r--r--net/ipv6/ip6_fib.c12
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/xfrm6_tunnel.c10
-rw-r--r--net/ipx/af_ipx.c16
-rw-r--r--net/ipx/ipx_proc.c5
-rw-r--r--net/iucv/af_iucv.c21
-rw-r--r--net/key/af_key.c3
-rw-r--r--net/l2tp/l2tp_core.c12
-rw-r--r--net/l2tp/l2tp_ip.c3
-rw-r--r--net/l2tp/l2tp_ip6.c3
-rw-r--r--net/llc/llc_sap.c3
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c45
-rw-r--r--net/mac80211/tx.c18
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c26
-rw-r--r--net/netfilter/nf_conntrack_expect.c17
-rw-r--r--net/netfilter/nf_conntrack_helper.c13
-rw-r--r--net/netfilter/nf_conntrack_netlink.c9
-rw-r--r--net/netfilter/nf_conntrack_sip.c8
-rw-r--r--net/netfilter/nf_nat_core.c3
-rw-r--r--net/netfilter/nfnetlink_cthelper.c17
-rw-r--r--net/netfilter/nfnetlink_log.c7
-rw-r--r--net/netfilter/nfnetlink_queue_core.c10
-rw-r--r--net/netfilter/xt_RATEEST.c3
-rw-r--r--net/netfilter/xt_connlimit.c8
-rw-r--r--net/netfilter/xt_hashlimit.c16
-rw-r--r--net/netlink/af_netlink.c30
-rw-r--r--net/netrom/af_netrom.c12
-rw-r--r--net/netrom/nr_route.c30
-rw-r--r--net/nfc/llcp/llcp.c16
-rw-r--r--net/openvswitch/datapath.c10
-rw-r--r--net/openvswitch/flow.c13
-rw-r--r--net/openvswitch/vport.c3
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/packet/diag.c3
-rw-r--r--net/phonet/pep.c3
-rw-r--r--net/phonet/socket.c9
-rw-r--r--net/rds/bind.c3
-rw-r--r--net/rds/connection.c9
-rw-r--r--net/rose/af_rose.c14
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_cbq.c18
-rw-r--r--net/sched/sch_drr.c10
-rw-r--r--net/sched/sch_hfsc.c15
-rw-r--r--net/sched/sch_htb.c12
-rw-r--r--net/sched/sch_qfq.c16
-rw-r--r--net/sctp/associola.c31
-rw-r--r--net/sctp/endpointola.c3
-rw-r--r--net/sctp/input.c6
-rw-r--r--net/sctp/proc.c9
-rw-r--r--net/sctp/socket.c9
-rw-r--r--net/sunrpc/auth.c5
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/svcauth.c3
-rw-r--r--net/tipc/name_table.c8
-rw-r--r--net/tipc/node.c3
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/unix/diag.c7
-rw-r--r--net/x25/af_x25.c12
-rw-r--r--net/xfrm/xfrm_policy.c47
-rw-r--r--net/xfrm/xfrm_state.c42
-rwxr-xr-xscripts/checkpatch.pl2
-rwxr-xr-xscripts/get_maintainer.pl4
-rwxr-xr-xscripts/kernel-doc2
-rw-r--r--security/integrity/ima/ima_queue.c3
-rw-r--r--security/selinux/avc.c19
-rw-r--r--tools/perf/util/evlist.c3
-rw-r--r--tools/testing/selftests/Makefile8
-rw-r--r--tools/testing/selftests/README.txt42
-rw-r--r--tools/testing/selftests/efivarfs/Makefile12
-rw-r--r--tools/testing/selftests/efivarfs/create-read.c38
-rw-r--r--tools/testing/selftests/efivarfs/efivarfs.sh139
-rw-r--r--tools/testing/selftests/efivarfs/open-unlink.c63
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/irq_comm.c18
429 files changed, 5226 insertions, 3493 deletions
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 4a4fb295ceef..14129f149a75 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -488,9 +488,10 @@ will invoke the generic mapping error check interface. Doing so will ensure
488that the mapping code will work correctly on all dma implementations without 488that the mapping code will work correctly on all dma implementations without
489any dependency on the specifics of the underlying implementation. Using the 489any dependency on the specifics of the underlying implementation. Using the
490returned address without checking for errors could result in failures ranging 490returned address without checking for errors could result in failures ranging
491from panics to silent data corruption. Couple of example of incorrect ways to 491from panics to silent data corruption. A couple of examples of incorrect ways
492check for errors that make assumptions about the underlying dma implementation 492to check for errors that make assumptions about the underlying dma
493are as follows and these are applicable to dma_map_page() as well. 493implementation are as follows and these are applicable to dma_map_page() as
494well.
494 495
495Incorrect example 1: 496Incorrect example 1:
496 dma_addr_t dma_handle; 497 dma_addr_t dma_handle;
@@ -751,7 +752,7 @@ Example 1:
751 dma_unmap_single(dma_handle1); 752 dma_unmap_single(dma_handle1);
752 map_error_handling1: 753 map_error_handling1:
753 754
754Example 2: (if buffers are allocated a loop, unmap all mapped buffers when 755Example 2: (if buffers are allocated in a loop, unmap all mapped buffers when
755 mapping error is detected in the middle) 756 mapping error is detected in the middle)
756 757
757 dma_addr_t dma_addr; 758 dma_addr_t dma_addr;
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 16eb4c9e9233..f13c9132e9f2 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -348,34 +348,40 @@ You can change this at module load time (for a module) with:
348 348
349 modprobe ipmi_si.o type=<type1>,<type2>.... 349 modprobe ipmi_si.o type=<type1>,<type2>....
350 ports=<port1>,<port2>... addrs=<addr1>,<addr2>... 350 ports=<port1>,<port2>... addrs=<addr1>,<addr2>...
351 irqs=<irq1>,<irq2>... trydefaults=[0|1] 351 irqs=<irq1>,<irq2>...
352 regspacings=<sp1>,<sp2>,... regsizes=<size1>,<size2>,... 352 regspacings=<sp1>,<sp2>,... regsizes=<size1>,<size2>,...
353 regshifts=<shift1>,<shift2>,... 353 regshifts=<shift1>,<shift2>,...
354 slave_addrs=<addr1>,<addr2>,... 354 slave_addrs=<addr1>,<addr2>,...
355 force_kipmid=<enable1>,<enable2>,... 355 force_kipmid=<enable1>,<enable2>,...
356 kipmid_max_busy_us=<ustime1>,<ustime2>,... 356 kipmid_max_busy_us=<ustime1>,<ustime2>,...
357 unload_when_empty=[0|1] 357 unload_when_empty=[0|1]
358 trydefaults=[0|1] trydmi=[0|1] tryacpi=[0|1]
359 tryplatform=[0|1] trypci=[0|1]
358 360
359Each of these except si_trydefaults is a list, the first item for the 361Each of these except try... items is a list, the first item for the
360first interface, second item for the second interface, etc. 362first interface, second item for the second interface, etc.
361 363
362The si_type may be either "kcs", "smic", or "bt". If you leave it blank, it 364The si_type may be either "kcs", "smic", or "bt". If you leave it blank, it
363defaults to "kcs". 365defaults to "kcs".
364 366
365If you specify si_addrs as non-zero for an interface, the driver will 367If you specify addrs as non-zero for an interface, the driver will
366use the memory address given as the address of the device. This 368use the memory address given as the address of the device. This
367overrides si_ports. 369overrides si_ports.
368 370
369If you specify si_ports as non-zero for an interface, the driver will 371If you specify ports as non-zero for an interface, the driver will
370use the I/O port given as the device address. 372use the I/O port given as the device address.
371 373
372If you specify si_irqs as non-zero for an interface, the driver will 374If you specify irqs as non-zero for an interface, the driver will
373attempt to use the given interrupt for the device. 375attempt to use the given interrupt for the device.
374 376
375si_trydefaults sets whether the standard IPMI interface at 0xca2 and 377trydefaults sets whether the standard IPMI interface at 0xca2 and
376any interfaces specified by ACPE are tried. By default, the driver 378any interfaces specified by ACPE are tried. By default, the driver
377tries it, set this value to zero to turn this off. 379tries it, set this value to zero to turn this off.
378 380
381The other try... items disable discovery by their corresponding
382names. These are all enabled by default, set them to zero to disable
383them. The tryplatform disables openfirmware.
384
379The next three parameters have to do with register layout. The 385The next three parameters have to do with register layout. The
380registers used by the interfaces may not appear at successive 386registers used by the interfaces may not appear at successive
381locations and they may not be in 8-bit registers. These parameters 387locations and they may not be in 8-bit registers. These parameters
diff --git a/Documentation/blockdev/nbd.txt b/Documentation/blockdev/nbd.txt
index aeb93ffe6416..271e607304da 100644
--- a/Documentation/blockdev/nbd.txt
+++ b/Documentation/blockdev/nbd.txt
@@ -4,43 +4,13 @@
4 can use a remote server as one of its block devices. So every time 4 can use a remote server as one of its block devices. So every time
5 the client computer wants to read, e.g., /dev/nb0, it sends a 5 the client computer wants to read, e.g., /dev/nb0, it sends a
6 request over TCP to the server, which will reply with the data read. 6 request over TCP to the server, which will reply with the data read.
7 This can be used for stations with low disk space (or even diskless - 7 This can be used for stations with low disk space (or even diskless)
8 if you boot from floppy) to borrow disk space from another computer. 8 to borrow disk space from another computer.
9 Unlike NFS, it is possible to put any filesystem on it, etc. It should 9 Unlike NFS, it is possible to put any filesystem on it, etc.
10 even be possible to use NBD as a root filesystem (I've never tried), 10
11 but it requires a user-level program to be in the initrd to start.
12 It also allows you to run block-device in user land (making server
13 and client physically the same computer, communicating using loopback).
14
15 Current state: It currently works. Network block device is stable.
16 I originally thought that it was impossible to swap over TCP. It
17 turned out not to be true - swapping over TCP now works and seems
18 to be deadlock-free, but it requires heavy patches into Linux's
19 network layer.
20
21 For more information, or to download the nbd-client and nbd-server 11 For more information, or to download the nbd-client and nbd-server
22 tools, go to http://nbd.sf.net/. 12 tools, go to http://nbd.sf.net/.
23 13
24 Howto: To setup nbd, you can simply do the following:
25
26 First, serve a device or file from a remote server:
27
28 nbd-server <port-number> <device-or-file-to-serve-to-client>
29
30 e.g.,
31 root@server1 # nbd-server 1234 /dev/sdb1
32
33 (serves sdb1 partition on TCP port 1234)
34
35 Then, on the local (client) system:
36
37 nbd-client <server-name-or-IP> <server-port-number> /dev/nb[0-n]
38
39 e.g.,
40 root@client1 # nbd-client server1 1234 /dev/nb0
41
42 (creates the nb0 device on client1)
43
44 The nbd kernel module need only be installed on the client 14 The nbd kernel module need only be installed on the client
45 system, as the nbd-server is completely in userspace. In fact, 15 system, as the nbd-server is completely in userspace. In fact,
46 the nbd-server has been successfully ported to other operating 16 the nbd-server has been successfully ported to other operating
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index b4b1fb3a83f0..a794ce91a2d5 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -75,7 +75,7 @@ Throttling/Upper Limit policy
75 mount -t cgroup -o blkio none /sys/fs/cgroup/blkio 75 mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
76 76
77- Specify a bandwidth rate on particular device for root group. The format 77- Specify a bandwidth rate on particular device for root group. The format
78 for policy is "<major>:<minor> <byes_per_second>". 78 for policy is "<major>:<minor> <bytes_per_second>".
79 79
80 echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.throttle.read_bps_device 80 echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.throttle.read_bps_device
81 81
diff --git a/MAINTAINERS b/MAINTAINERS
index 1e5c3a4ece7b..0b4bb157a482 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -97,12 +97,13 @@ Descriptions of section entries:
97 X: net/ipv6/ 97 X: net/ipv6/
98 matches all files in and below net excluding net/ipv6/ 98 matches all files in and below net excluding net/ipv6/
99 K: Keyword perl extended regex pattern to match content in a 99 K: Keyword perl extended regex pattern to match content in a
100 patch or file. For instance: 100 patch or file, or an affected filename. For instance:
101 K: of_get_profile 101 K: of_get_profile
102 matches patches or files that contain "of_get_profile" 102 matches patch or file content, or filenames, that contain
103 "of_get_profile"
103 K: \b(printk|pr_(info|err))\b 104 K: \b(printk|pr_(info|err))\b
104 matches patches or files that contain one or more of the words 105 matches patch or file content, or filenames, that contain one or
105 printk, pr_info or pr_err 106 more of the words printk, pr_info or pr_err
106 One regex pattern per line. Multiple K: lines acceptable. 107 One regex pattern per line. Multiple K: lines acceptable.
107 108
108Note: For the hard of thinking, this list is meant to remain in alphabetical 109Note: For the hard of thinking, this list is meant to remain in alphabetical
@@ -5437,6 +5438,7 @@ F: net/netrom/
5437NETWORK BLOCK DEVICE (NBD) 5438NETWORK BLOCK DEVICE (NBD)
5438M: Paul Clements <Paul.Clements@steeleye.com> 5439M: Paul Clements <Paul.Clements@steeleye.com>
5439S: Maintained 5440S: Maintained
5441L: nbd-general@lists.sourceforge.net
5440F: Documentation/blockdev/nbd.txt 5442F: Documentation/blockdev/nbd.txt
5441F: drivers/block/nbd.c 5443F: drivers/block/nbd.c
5442F: include/linux/nbd.h 5444F: include/linux/nbd.h
@@ -7539,6 +7541,7 @@ STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
7539M: Julian Andres Klode <jak@jak-linux.org> 7541M: Julian Andres Klode <jak@jak-linux.org>
7540M: Marc Dietrich <marvin24@gmx.de> 7542M: Marc Dietrich <marvin24@gmx.de>
7541L: ac100@lists.launchpad.net (moderated for non-subscribers) 7543L: ac100@lists.launchpad.net (moderated for non-subscribers)
7544L: linux-tegra@vger.kernel.org
7542S: Maintained 7545S: Maintained
7543F: drivers/staging/nvec/ 7546F: drivers/staging/nvec/
7544 7547
@@ -7831,9 +7834,7 @@ L: linux-tegra@vger.kernel.org
7831Q: http://patchwork.ozlabs.org/project/linux-tegra/list/ 7834Q: http://patchwork.ozlabs.org/project/linux-tegra/list/
7832T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git 7835T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
7833S: Supported 7836S: Supported
7834F: arch/arm/mach-tegra 7837K: (?i)[^a-z]tegra
7835F: arch/arm/boot/dts/tegra*
7836F: arch/arm/configs/tegra_defconfig
7837 7838
7838TEHUTI ETHERNET DRIVER 7839TEHUTI ETHERNET DRIVER
7839M: Andy Gospodarek <andy@greyhouse.net> 7840M: Andy Gospodarek <andy@greyhouse.net>
diff --git a/arch/Kconfig b/arch/Kconfig
index 40e2b12c7916..dcd91a85536a 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -303,6 +303,13 @@ config ARCH_WANT_OLD_COMPAT_IPC
303 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 303 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
304 bool 304 bool
305 305
306config HAVE_VIRT_TO_BUS
307 bool
308 help
309 An architecture should select this if it implements the
310 deprecated interface virt_to_bus(). All new architectures
311 should probably not select this.
312
306config HAVE_ARCH_SECCOMP_FILTER 313config HAVE_ARCH_SECCOMP_FILTER
307 bool 314 bool
308 help 315 help
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 1ecbf7a1b677..5833aa441481 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -9,6 +9,7 @@ config ALPHA
9 select HAVE_PERF_EVENTS 9 select HAVE_PERF_EVENTS
10 select HAVE_DMA_ATTRS 10 select HAVE_DMA_ATTRS
11 select HAVE_GENERIC_HARDIRQS 11 select HAVE_GENERIC_HARDIRQS
12 select HAVE_VIRT_TO_BUS
12 select GENERIC_IRQ_PROBE 13 select GENERIC_IRQ_PROBE
13 select AUTO_IRQ_AFFINITY if SMP 14 select AUTO_IRQ_AFFINITY if SMP
14 select GENERIC_IRQ_SHOW 15 select GENERIC_IRQ_SHOW
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 6ec8eb3149ea..0e16cca1d011 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -49,6 +49,7 @@ config ARM
49 select HAVE_REGS_AND_STACK_ACCESS_API 49 select HAVE_REGS_AND_STACK_ACCESS_API
50 select HAVE_SYSCALL_TRACEPOINTS 50 select HAVE_SYSCALL_TRACEPOINTS
51 select HAVE_UID16 51 select HAVE_UID16
52 select HAVE_VIRT_TO_BUS
52 select KTIME_SCALAR 53 select KTIME_SCALAR
53 select PERF_USE_VMALLOC 54 select PERF_USE_VMALLOC
54 select RTC_LIB 55 select RTC_LIB
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 4dd41fc9e235..170e9f34003f 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -395,7 +395,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
395{ 395{
396 struct kretprobe_instance *ri = NULL; 396 struct kretprobe_instance *ri = NULL;
397 struct hlist_head *head, empty_rp; 397 struct hlist_head *head, empty_rp;
398 struct hlist_node *node, *tmp; 398 struct hlist_node *tmp;
399 unsigned long flags, orig_ret_address = 0; 399 unsigned long flags, orig_ret_address = 0;
400 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 400 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
401 401
@@ -415,7 +415,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
415 * real return address, and all the rest will point to 415 * real return address, and all the rest will point to
416 * kretprobe_trampoline 416 * kretprobe_trampoline
417 */ 417 */
418 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 418 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
419 if (ri->task != current) 419 if (ri->task != current)
420 /* another task is sharing our hash bucket */ 420 /* another task is sharing our hash bucket */
421 continue; 421 continue;
@@ -442,7 +442,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
442 kretprobe_assert(ri, orig_ret_address, trampoline_address); 442 kretprobe_assert(ri, orig_ret_address, trampoline_address);
443 kretprobe_hash_unlock(current, &flags); 443 kretprobe_hash_unlock(current, &flags);
444 444
445 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 445 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
446 hlist_del(&ri->hlist); 446 hlist_del(&ri->hlist);
447 kfree(ri); 447 kfree(ri);
448 } 448 }
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 2ae6591b3a55..9b89257b2cfd 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -7,6 +7,7 @@ config AVR32
7 select HAVE_OPROFILE 7 select HAVE_OPROFILE
8 select HAVE_KPROBES 8 select HAVE_KPROBES
9 select HAVE_GENERIC_HARDIRQS 9 select HAVE_GENERIC_HARDIRQS
10 select HAVE_VIRT_TO_BUS
10 select GENERIC_IRQ_PROBE 11 select GENERIC_IRQ_PROBE
11 select GENERIC_ATOMIC64 12 select GENERIC_ATOMIC64
12 select HARDIRQS_SW_RESEND 13 select HARDIRQS_SW_RESEND
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index e98f3248c8aa..600494c70e96 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -33,6 +33,7 @@ config BLACKFIN
33 select ARCH_HAVE_CUSTOM_GPIO_H 33 select ARCH_HAVE_CUSTOM_GPIO_H
34 select ARCH_WANT_OPTIONAL_GPIOLIB 34 select ARCH_WANT_OPTIONAL_GPIOLIB
35 select HAVE_UID16 35 select HAVE_UID16
36 select HAVE_VIRT_TO_BUS
36 select ARCH_WANT_IPC_PARSE_VERSION 37 select ARCH_WANT_IPC_PARSE_VERSION
37 select HAVE_GENERIC_HARDIRQS 38 select HAVE_GENERIC_HARDIRQS
38 select GENERIC_ATOMIC64 39 select GENERIC_ATOMIC64
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 0e5c187ac7d2..bb0ac66cf533 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -43,6 +43,7 @@ config CRIS
43 select GENERIC_ATOMIC64 43 select GENERIC_ATOMIC64
44 select HAVE_GENERIC_HARDIRQS 44 select HAVE_GENERIC_HARDIRQS
45 select HAVE_UID16 45 select HAVE_UID16
46 select HAVE_VIRT_TO_BUS
46 select ARCH_WANT_IPC_PARSE_VERSION 47 select ARCH_WANT_IPC_PARSE_VERSION
47 select GENERIC_IRQ_SHOW 48 select GENERIC_IRQ_SHOW
48 select GENERIC_IOMAP 49 select GENERIC_IOMAP
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 2d0509d4cfee..12369b194c7b 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -6,6 +6,7 @@ config FRV
6 select HAVE_PERF_EVENTS 6 select HAVE_PERF_EVENTS
7 select HAVE_UID16 7 select HAVE_UID16
8 select HAVE_GENERIC_HARDIRQS 8 select HAVE_GENERIC_HARDIRQS
9 select HAVE_VIRT_TO_BUS
9 select GENERIC_IRQ_SHOW 10 select GENERIC_IRQ_SHOW
10 select HAVE_DEBUG_BUGVERBOSE 11 select HAVE_DEBUG_BUGVERBOSE
11 select ARCH_HAVE_NMI_SAFE_CMPXCHG 12 select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index 385fd30b142f..836f14707a62 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -60,7 +60,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
60 unsigned long pgoff, unsigned long flags) 60 unsigned long pgoff, unsigned long flags)
61{ 61{
62 struct vm_area_struct *vma; 62 struct vm_area_struct *vma;
63 unsigned long limit; 63 struct vm_unmapped_area_info info;
64 64
65 if (len > TASK_SIZE) 65 if (len > TASK_SIZE)
66 return -ENOMEM; 66 return -ENOMEM;
@@ -79,39 +79,24 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
79 } 79 }
80 80
81 /* search between the bottom of user VM and the stack grow area */ 81 /* search between the bottom of user VM and the stack grow area */
82 addr = PAGE_SIZE; 82 info.flags = 0;
83 limit = (current->mm->start_stack - 0x00200000); 83 info.length = len;
84 if (addr + len <= limit) { 84 info.low_limit = PAGE_SIZE;
85 limit -= len; 85 info.high_limit = (current->mm->start_stack - 0x00200000);
86 86 info.align_mask = 0;
87 if (addr <= limit) { 87 info.align_offset = 0;
88 vma = find_vma(current->mm, PAGE_SIZE); 88 addr = vm_unmapped_area(&info);
89 for (; vma; vma = vma->vm_next) { 89 if (!(addr & ~PAGE_MASK))
90 if (addr > limit) 90 goto success;
91 break; 91 VM_BUG_ON(addr != -ENOMEM);
92 if (addr + len <= vma->vm_start)
93 goto success;
94 addr = vma->vm_end;
95 }
96 }
97 }
98 92
99 /* search from just above the WorkRAM area to the top of memory */ 93 /* search from just above the WorkRAM area to the top of memory */
100 addr = PAGE_ALIGN(0x80000000); 94 info.low_limit = PAGE_ALIGN(0x80000000);
101 limit = TASK_SIZE - len; 95 info.high_limit = TASK_SIZE;
102 if (addr <= limit) { 96 addr = vm_unmapped_area(&info);
103 vma = find_vma(current->mm, addr); 97 if (!(addr & ~PAGE_MASK))
104 for (; vma; vma = vma->vm_next) { 98 goto success;
105 if (addr > limit) 99 VM_BUG_ON(addr != -ENOMEM);
106 break;
107 if (addr + len <= vma->vm_start)
108 goto success;
109 addr = vma->vm_end;
110 }
111
112 if (!vma && addr <= limit)
113 goto success;
114 }
115 100
116#if 0 101#if 0
117 printk("[area] l=%lx (ENOMEM) f='%s'\n", 102 printk("[area] l=%lx (ENOMEM) f='%s'\n",
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 05b613af223a..ae8551eb3736 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -5,6 +5,7 @@ config H8300
5 select HAVE_GENERIC_HARDIRQS 5 select HAVE_GENERIC_HARDIRQS
6 select GENERIC_ATOMIC64 6 select GENERIC_ATOMIC64
7 select HAVE_UID16 7 select HAVE_UID16
8 select HAVE_VIRT_TO_BUS
8 select ARCH_WANT_IPC_PARSE_VERSION 9 select ARCH_WANT_IPC_PARSE_VERSION
9 select GENERIC_IRQ_SHOW 10 select GENERIC_IRQ_SHOW
10 select GENERIC_CPU_DEVICES 11 select GENERIC_CPU_DEVICES
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index c90366ef7183..33f3fdc0b214 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -26,6 +26,7 @@ config IA64
26 select HAVE_MEMBLOCK 26 select HAVE_MEMBLOCK
27 select HAVE_MEMBLOCK_NODE_MAP 27 select HAVE_MEMBLOCK_NODE_MAP
28 select HAVE_VIRT_CPU_ACCOUNTING 28 select HAVE_VIRT_CPU_ACCOUNTING
29 select HAVE_VIRT_TO_BUS
29 select ARCH_DISCARD_MEMBLOCK 30 select ARCH_DISCARD_MEMBLOCK
30 select GENERIC_IRQ_PROBE 31 select GENERIC_IRQ_PROBE
31 select GENERIC_PENDING_IRQ if SMP 32 select GENERIC_PENDING_IRQ if SMP
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 7026b29e277a..f8280a766a78 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -423,7 +423,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
423{ 423{
424 struct kretprobe_instance *ri = NULL; 424 struct kretprobe_instance *ri = NULL;
425 struct hlist_head *head, empty_rp; 425 struct hlist_head *head, empty_rp;
426 struct hlist_node *node, *tmp; 426 struct hlist_node *tmp;
427 unsigned long flags, orig_ret_address = 0; 427 unsigned long flags, orig_ret_address = 0;
428 unsigned long trampoline_address = 428 unsigned long trampoline_address =
429 ((struct fnptr *)kretprobe_trampoline)->ip; 429 ((struct fnptr *)kretprobe_trampoline)->ip;
@@ -444,7 +444,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
444 * real return address, and all the rest will point to 444 * real return address, and all the rest will point to
445 * kretprobe_trampoline 445 * kretprobe_trampoline
446 */ 446 */
447 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 447 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
448 if (ri->task != current) 448 if (ri->task != current)
449 /* another task is sharing our hash bucket */ 449 /* another task is sharing our hash bucket */
450 continue; 450 continue;
@@ -461,7 +461,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
461 461
462 regs->cr_iip = orig_ret_address; 462 regs->cr_iip = orig_ret_address;
463 463
464 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 464 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
465 if (ri->task != current) 465 if (ri->task != current)
466 /* another task is sharing our hash bucket */ 466 /* another task is sharing our hash bucket */
467 continue; 467 continue;
@@ -487,7 +487,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
487 kretprobe_hash_unlock(current, &flags); 487 kretprobe_hash_unlock(current, &flags);
488 preempt_enable_no_resched(); 488 preempt_enable_no_resched();
489 489
490 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 490 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
491 hlist_del(&ri->hlist); 491 hlist_del(&ri->hlist);
492 kfree(ri); 492 kfree(ri);
493 } 493 }
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index f807721e19a5..92623818a1fe 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -10,6 +10,7 @@ config M32R
10 select ARCH_WANT_IPC_PARSE_VERSION 10 select ARCH_WANT_IPC_PARSE_VERSION
11 select HAVE_DEBUG_BUGVERBOSE 11 select HAVE_DEBUG_BUGVERBOSE
12 select HAVE_GENERIC_HARDIRQS 12 select HAVE_GENERIC_HARDIRQS
13 select HAVE_VIRT_TO_BUS
13 select GENERIC_IRQ_PROBE 14 select GENERIC_IRQ_PROBE
14 select GENERIC_IRQ_SHOW 15 select GENERIC_IRQ_SHOW
15 select GENERIC_ATOMIC64 16 select GENERIC_ATOMIC64
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index efb1ce1f14a3..0e708c78e01c 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -8,6 +8,7 @@ config M68K
8 select GENERIC_IRQ_SHOW 8 select GENERIC_IRQ_SHOW
9 select GENERIC_ATOMIC64 9 select GENERIC_ATOMIC64
10 select HAVE_UID16 10 select HAVE_UID16
11 select HAVE_VIRT_TO_BUS
11 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS 12 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
12 select GENERIC_CPU_DEVICES 13 select GENERIC_CPU_DEVICES
13 select GENERIC_STRNCPY_FROM_USER if MMU 14 select GENERIC_STRNCPY_FROM_USER if MMU
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index ba3b7c8c04b8..7843d11156e6 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -19,6 +19,7 @@ config MICROBLAZE
19 select HAVE_DEBUG_KMEMLEAK 19 select HAVE_DEBUG_KMEMLEAK
20 select IRQ_DOMAIN 20 select IRQ_DOMAIN
21 select HAVE_GENERIC_HARDIRQS 21 select HAVE_GENERIC_HARDIRQS
22 select HAVE_VIRT_TO_BUS
22 select GENERIC_IRQ_PROBE 23 select GENERIC_IRQ_PROBE
23 select GENERIC_IRQ_SHOW 24 select GENERIC_IRQ_SHOW
24 select GENERIC_PCI_IOMAP 25 select GENERIC_PCI_IOMAP
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 198641589bb5..1eabe5753215 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -38,6 +38,7 @@ config MIPS
38 select GENERIC_CLOCKEVENTS 38 select GENERIC_CLOCKEVENTS
39 select GENERIC_CMOS_UPDATE 39 select GENERIC_CMOS_UPDATE
40 select HAVE_MOD_ARCH_SPECIFIC 40 select HAVE_MOD_ARCH_SPECIFIC
41 select HAVE_VIRT_TO_BUS
41 select MODULES_USE_ELF_REL if MODULES 42 select MODULES_USE_ELF_REL if MODULES
42 select MODULES_USE_ELF_RELA if MODULES && 64BIT 43 select MODULES_USE_ELF_RELA if MODULES && 64BIT
43 select CLONE_BACKWARDS 44 select CLONE_BACKWARDS
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 158467da9bc1..ce3f0807ad1e 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -598,7 +598,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
598{ 598{
599 struct kretprobe_instance *ri = NULL; 599 struct kretprobe_instance *ri = NULL;
600 struct hlist_head *head, empty_rp; 600 struct hlist_head *head, empty_rp;
601 struct hlist_node *node, *tmp; 601 struct hlist_node *tmp;
602 unsigned long flags, orig_ret_address = 0; 602 unsigned long flags, orig_ret_address = 0;
603 unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; 603 unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
604 604
@@ -618,7 +618,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
618 * real return address, and all the rest will point to 618 * real return address, and all the rest will point to
619 * kretprobe_trampoline 619 * kretprobe_trampoline
620 */ 620 */
621 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 621 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
622 if (ri->task != current) 622 if (ri->task != current)
623 /* another task is sharing our hash bucket */ 623 /* another task is sharing our hash bucket */
624 continue; 624 continue;
@@ -645,7 +645,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
645 kretprobe_hash_unlock(current, &flags); 645 kretprobe_hash_unlock(current, &flags);
646 preempt_enable_no_resched(); 646 preempt_enable_no_resched();
647 647
648 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 648 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
649 hlist_del(&ri->hlist); 649 hlist_del(&ri->hlist);
650 kfree(ri); 650 kfree(ri);
651 } 651 }
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index ad0caea0bfea..b06c7360b1c6 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -8,6 +8,7 @@ config MN10300
8 select HAVE_ARCH_KGDB 8 select HAVE_ARCH_KGDB
9 select GENERIC_ATOMIC64 9 select GENERIC_ATOMIC64
10 select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER 10 select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
11 select HAVE_VIRT_TO_BUS
11 select GENERIC_CLOCKEVENTS 12 select GENERIC_CLOCKEVENTS
12 select MODULES_USE_ELF_RELA 13 select MODULES_USE_ELF_RELA
13 select OLD_SIGSUSPEND3 14 select OLD_SIGSUSPEND3
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 35a4e5f6e71c..014a6482ed4c 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -12,6 +12,7 @@ config OPENRISC
12 select ARCH_WANT_OPTIONAL_GPIOLIB 12 select ARCH_WANT_OPTIONAL_GPIOLIB
13 select HAVE_ARCH_TRACEHOOK 13 select HAVE_ARCH_TRACEHOOK
14 select HAVE_GENERIC_HARDIRQS 14 select HAVE_GENERIC_HARDIRQS
15 select HAVE_VIRT_TO_BUS
15 select GENERIC_IRQ_CHIP 16 select GENERIC_IRQ_CHIP
16 select GENERIC_IRQ_PROBE 17 select GENERIC_IRQ_PROBE
17 select GENERIC_IRQ_SHOW 18 select GENERIC_IRQ_SHOW
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 7f9b3c53f74a..4d5ea7648574 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -19,6 +19,7 @@ config PARISC
19 select GENERIC_SMP_IDLE_THREAD 19 select GENERIC_SMP_IDLE_THREAD
20 select GENERIC_STRNCPY_FROM_USER 20 select GENERIC_STRNCPY_FROM_USER
21 select HAVE_MOD_ARCH_SPECIFIC 21 select HAVE_MOD_ARCH_SPECIFIC
22 select HAVE_VIRT_TO_BUS
22 select MODULES_USE_ELF_RELA 23 select MODULES_USE_ELF_RELA
23 select CLONE_BACKWARDS 24 select CLONE_BACKWARDS
24 select TTY # Needed for pdc_cons.c 25 select TTY # Needed for pdc_cons.c
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 54d619d4cac6..5dfd248e3f1a 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -35,22 +35,17 @@
35 35
36static unsigned long get_unshared_area(unsigned long addr, unsigned long len) 36static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
37{ 37{
38 struct vm_area_struct *vma; 38 struct vm_unmapped_area_info info;
39 39
40 addr = PAGE_ALIGN(addr); 40 info.flags = 0;
41 41 info.length = len;
42 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { 42 info.low_limit = PAGE_ALIGN(addr);
43 /* At this point: (!vma || addr < vma->vm_end). */ 43 info.high_limit = TASK_SIZE;
44 if (TASK_SIZE - len < addr) 44 info.align_mask = 0;
45 return -ENOMEM; 45 info.align_offset = 0;
46 if (!vma || addr + len <= vma->vm_start) 46 return vm_unmapped_area(&info);
47 return addr;
48 addr = vma->vm_end;
49 }
50} 47}
51 48
52#define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1))
53
54/* 49/*
55 * We need to know the offset to use. Old scheme was to look for 50 * We need to know the offset to use. Old scheme was to look for
56 * existing mapping and use the same offset. New scheme is to use the 51 * existing mapping and use the same offset. New scheme is to use the
@@ -63,30 +58,21 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
63 */ 58 */
64static int get_offset(struct address_space *mapping) 59static int get_offset(struct address_space *mapping)
65{ 60{
66 int offset = (unsigned long) mapping << (PAGE_SHIFT - 8); 61 return (unsigned long) mapping >> 8;
67 return offset & 0x3FF000;
68} 62}
69 63
70static unsigned long get_shared_area(struct address_space *mapping, 64static unsigned long get_shared_area(struct address_space *mapping,
71 unsigned long addr, unsigned long len, unsigned long pgoff) 65 unsigned long addr, unsigned long len, unsigned long pgoff)
72{ 66{
73 struct vm_area_struct *vma; 67 struct vm_unmapped_area_info info;
74 int offset = mapping ? get_offset(mapping) : 0;
75 68
76 offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000; 69 info.flags = 0;
77 70 info.length = len;
78 addr = DCACHE_ALIGN(addr - offset) + offset; 71 info.low_limit = PAGE_ALIGN(addr);
79 72 info.high_limit = TASK_SIZE;
80 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { 73 info.align_mask = PAGE_MASK & (SHMLBA - 1);
81 /* At this point: (!vma || addr < vma->vm_end). */ 74 info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
82 if (TASK_SIZE - len < addr) 75 return vm_unmapped_area(&info);
83 return -ENOMEM;
84 if (!vma || addr + len <= vma->vm_start)
85 return addr;
86 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
87 if (addr < vma->vm_end) /* handle wraparound */
88 return -ENOMEM;
89 }
90} 76}
91 77
92unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 78unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 5c7470689a10..b89d7eb730a2 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -87,9 +87,6 @@ config GENERIC_GPIO
87 help 87 help
88 Generic GPIO API support 88 Generic GPIO API support
89 89
90config ARCH_NO_VIRT_TO_BUS
91 def_bool PPC64
92
93config PPC 90config PPC
94 bool 91 bool
95 default y 92 default y
@@ -101,6 +98,7 @@ config PPC
101 select HAVE_FUNCTION_GRAPH_TRACER 98 select HAVE_FUNCTION_GRAPH_TRACER
102 select SYSCTL_EXCEPTION_TRACE 99 select SYSCTL_EXCEPTION_TRACE
103 select ARCH_WANT_OPTIONAL_GPIOLIB 100 select ARCH_WANT_OPTIONAL_GPIOLIB
101 select HAVE_VIRT_TO_BUS if !PPC64
104 select HAVE_IDE 102 select HAVE_IDE
105 select HAVE_IOREMAP_PROT 103 select HAVE_IOREMAP_PROT
106 select HAVE_EFFICIENT_UNALIGNED_ACCESS 104 select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e88c64331819..11f5b03a0b06 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -310,7 +310,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
310{ 310{
311 struct kretprobe_instance *ri = NULL; 311 struct kretprobe_instance *ri = NULL;
312 struct hlist_head *head, empty_rp; 312 struct hlist_head *head, empty_rp;
313 struct hlist_node *node, *tmp; 313 struct hlist_node *tmp;
314 unsigned long flags, orig_ret_address = 0; 314 unsigned long flags, orig_ret_address = 0;
315 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 315 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
316 316
@@ -330,7 +330,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
330 * real return address, and all the rest will point to 330 * real return address, and all the rest will point to
331 * kretprobe_trampoline 331 * kretprobe_trampoline
332 */ 332 */
333 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 333 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
334 if (ri->task != current) 334 if (ri->task != current)
335 /* another task is sharing our hash bucket */ 335 /* another task is sharing our hash bucket */
336 continue; 336 continue;
@@ -357,7 +357,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
357 kretprobe_hash_unlock(current, &flags); 357 kretprobe_hash_unlock(current, &flags);
358 preempt_enable_no_resched(); 358 preempt_enable_no_resched();
359 359
360 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 360 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
361 hlist_del(&ri->hlist); 361 hlist_del(&ri->hlist);
362 kfree(ri); 362 kfree(ri);
363 } 363 }
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 2c86b0d63714..da8b13c4b776 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -124,7 +124,6 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
124{ 124{
125 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 125 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
126 struct hpte_cache *pte; 126 struct hpte_cache *pte;
127 struct hlist_node *node;
128 int i; 127 int i;
129 128
130 rcu_read_lock(); 129 rcu_read_lock();
@@ -132,7 +131,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
132 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 131 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
133 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; 132 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
134 133
135 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 134 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
136 invalidate_pte(vcpu, pte); 135 invalidate_pte(vcpu, pte);
137 } 136 }
138 137
@@ -143,7 +142,6 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
143{ 142{
144 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 143 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
145 struct hlist_head *list; 144 struct hlist_head *list;
146 struct hlist_node *node;
147 struct hpte_cache *pte; 145 struct hpte_cache *pte;
148 146
149 /* Find the list of entries in the map */ 147 /* Find the list of entries in the map */
@@ -152,7 +150,7 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
152 rcu_read_lock(); 150 rcu_read_lock();
153 151
154 /* Check the list for matching entries and invalidate */ 152 /* Check the list for matching entries and invalidate */
155 hlist_for_each_entry_rcu(pte, node, list, list_pte) 153 hlist_for_each_entry_rcu(pte, list, list_pte)
156 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) 154 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
157 invalidate_pte(vcpu, pte); 155 invalidate_pte(vcpu, pte);
158 156
@@ -163,7 +161,6 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
163{ 161{
164 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 162 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
165 struct hlist_head *list; 163 struct hlist_head *list;
166 struct hlist_node *node;
167 struct hpte_cache *pte; 164 struct hpte_cache *pte;
168 165
169 /* Find the list of entries in the map */ 166 /* Find the list of entries in the map */
@@ -173,7 +170,7 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
173 rcu_read_lock(); 170 rcu_read_lock();
174 171
175 /* Check the list for matching entries and invalidate */ 172 /* Check the list for matching entries and invalidate */
176 hlist_for_each_entry_rcu(pte, node, list, list_pte_long) 173 hlist_for_each_entry_rcu(pte, list, list_pte_long)
177 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) 174 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
178 invalidate_pte(vcpu, pte); 175 invalidate_pte(vcpu, pte);
179 176
@@ -207,7 +204,6 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
207{ 204{
208 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 205 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
209 struct hlist_head *list; 206 struct hlist_head *list;
210 struct hlist_node *node;
211 struct hpte_cache *pte; 207 struct hpte_cache *pte;
212 u64 vp_mask = 0xfffffffffULL; 208 u64 vp_mask = 0xfffffffffULL;
213 209
@@ -216,7 +212,7 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
216 rcu_read_lock(); 212 rcu_read_lock();
217 213
218 /* Check the list for matching entries and invalidate */ 214 /* Check the list for matching entries and invalidate */
219 hlist_for_each_entry_rcu(pte, node, list, list_vpte) 215 hlist_for_each_entry_rcu(pte, list, list_vpte)
220 if ((pte->pte.vpage & vp_mask) == guest_vp) 216 if ((pte->pte.vpage & vp_mask) == guest_vp)
221 invalidate_pte(vcpu, pte); 217 invalidate_pte(vcpu, pte);
222 218
@@ -228,7 +224,6 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
228{ 224{
229 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 225 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
230 struct hlist_head *list; 226 struct hlist_head *list;
231 struct hlist_node *node;
232 struct hpte_cache *pte; 227 struct hpte_cache *pte;
233 u64 vp_mask = 0xffffff000ULL; 228 u64 vp_mask = 0xffffff000ULL;
234 229
@@ -238,7 +233,7 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
238 rcu_read_lock(); 233 rcu_read_lock();
239 234
240 /* Check the list for matching entries and invalidate */ 235 /* Check the list for matching entries and invalidate */
241 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 236 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
242 if ((pte->pte.vpage & vp_mask) == guest_vp) 237 if ((pte->pte.vpage & vp_mask) == guest_vp)
243 invalidate_pte(vcpu, pte); 238 invalidate_pte(vcpu, pte);
244 239
@@ -266,7 +261,6 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
266void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) 261void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
267{ 262{
268 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 263 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
269 struct hlist_node *node;
270 struct hpte_cache *pte; 264 struct hpte_cache *pte;
271 int i; 265 int i;
272 266
@@ -277,7 +271,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
277 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 271 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
278 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; 272 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
279 273
280 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 274 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
281 if ((pte->pte.raddr >= pa_start) && 275 if ((pte->pte.raddr >= pa_start) &&
282 (pte->pte.raddr < pa_end)) 276 (pte->pte.raddr < pa_end))
283 invalidate_pte(vcpu, pte); 277 invalidate_pte(vcpu, pte);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f09ae7b0b4c5..4b505370a1d5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -134,6 +134,7 @@ config S390
134 select HAVE_SYSCALL_WRAPPERS 134 select HAVE_SYSCALL_WRAPPERS
135 select HAVE_UID16 if 32BIT 135 select HAVE_UID16 if 32BIT
136 select HAVE_VIRT_CPU_ACCOUNTING 136 select HAVE_VIRT_CPU_ACCOUNTING
137 select HAVE_VIRT_TO_BUS
137 select INIT_ALL_POSSIBLE 138 select INIT_ALL_POSSIBLE
138 select KTIME_SCALAR if 32BIT 139 select KTIME_SCALAR if 32BIT
139 select MODULES_USE_ELF_RELA 140 select MODULES_USE_ELF_RELA
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d1c7214e157c..3388b2b2a07d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -354,7 +354,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
354{ 354{
355 struct kretprobe_instance *ri; 355 struct kretprobe_instance *ri;
356 struct hlist_head *head, empty_rp; 356 struct hlist_head *head, empty_rp;
357 struct hlist_node *node, *tmp; 357 struct hlist_node *tmp;
358 unsigned long flags, orig_ret_address; 358 unsigned long flags, orig_ret_address;
359 unsigned long trampoline_address; 359 unsigned long trampoline_address;
360 kprobe_opcode_t *correct_ret_addr; 360 kprobe_opcode_t *correct_ret_addr;
@@ -379,7 +379,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
379 orig_ret_address = 0; 379 orig_ret_address = 0;
380 correct_ret_addr = NULL; 380 correct_ret_addr = NULL;
381 trampoline_address = (unsigned long) &kretprobe_trampoline; 381 trampoline_address = (unsigned long) &kretprobe_trampoline;
382 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 382 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
383 if (ri->task != current) 383 if (ri->task != current)
384 /* another task is sharing our hash bucket */ 384 /* another task is sharing our hash bucket */
385 continue; 385 continue;
@@ -398,7 +398,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
398 kretprobe_assert(ri, orig_ret_address, trampoline_address); 398 kretprobe_assert(ri, orig_ret_address, trampoline_address);
399 399
400 correct_ret_addr = ri->ret_addr; 400 correct_ret_addr = ri->ret_addr;
401 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 401 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
402 if (ri->task != current) 402 if (ri->task != current)
403 /* another task is sharing our hash bucket */ 403 /* another task is sharing our hash bucket */
404 continue; 404 continue;
@@ -427,7 +427,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
427 kretprobe_hash_unlock(current, &flags); 427 kretprobe_hash_unlock(current, &flags);
428 preempt_enable_no_resched(); 428 preempt_enable_no_resched();
429 429
430 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 430 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
431 hlist_del(&ri->hlist); 431 hlist_del(&ri->hlist);
432 kfree(ri); 432 kfree(ri);
433 } 433 }
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
index 90fd3482b9e2..0297931335e1 100644
--- a/arch/s390/pci/pci_msi.c
+++ b/arch/s390/pci/pci_msi.c
@@ -25,10 +25,9 @@ static DEFINE_SPINLOCK(msi_map_lock);
25 25
26struct msi_desc *__irq_get_msi_desc(unsigned int irq) 26struct msi_desc *__irq_get_msi_desc(unsigned int irq)
27{ 27{
28 struct hlist_node *entry;
29 struct msi_map *map; 28 struct msi_map *map;
30 29
31 hlist_for_each_entry_rcu(map, entry, 30 hlist_for_each_entry_rcu(map,
32 &msi_hash[msi_hashfn(irq)], msi_chain) 31 &msi_hash[msi_hashfn(irq)], msi_chain)
33 if (map->irq == irq) 32 if (map->irq == irq)
34 return map->msi; 33 return map->msi;
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index 3b1482e7afac..e569aa1fd2ba 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -12,6 +12,7 @@ config SCORE
12 select GENERIC_CPU_DEVICES 12 select GENERIC_CPU_DEVICES
13 select GENERIC_CLOCKEVENTS 13 select GENERIC_CLOCKEVENTS
14 select HAVE_MOD_ARCH_SPECIFIC 14 select HAVE_MOD_ARCH_SPECIFIC
15 select HAVE_VIRT_TO_BUS
15 select MODULES_USE_ELF_REL 16 select MODULES_USE_ELF_REL
16 select CLONE_BACKWARDS 17 select CLONE_BACKWARDS
17 18
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index ef6717a64bc7..5e859633ce69 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -148,9 +148,6 @@ config ARCH_HAS_ILOG2_U32
148config ARCH_HAS_ILOG2_U64 148config ARCH_HAS_ILOG2_U64
149 def_bool n 149 def_bool n
150 150
151config ARCH_NO_VIRT_TO_BUS
152 def_bool y
153
154config ARCH_HAS_DEFAULT_IDLE 151config ARCH_HAS_DEFAULT_IDLE
155 def_bool y 152 def_bool y
156 153
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 1208b09e95c3..42b46e61a2d5 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -310,7 +310,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
310{ 310{
311 struct kretprobe_instance *ri = NULL; 311 struct kretprobe_instance *ri = NULL;
312 struct hlist_head *head, empty_rp; 312 struct hlist_head *head, empty_rp;
313 struct hlist_node *node, *tmp; 313 struct hlist_node *tmp;
314 unsigned long flags, orig_ret_address = 0; 314 unsigned long flags, orig_ret_address = 0;
315 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 315 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
316 316
@@ -330,7 +330,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
330 * real return address, and all the rest will point to 330 * real return address, and all the rest will point to
331 * kretprobe_trampoline 331 * kretprobe_trampoline
332 */ 332 */
333 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 333 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
334 if (ri->task != current) 334 if (ri->task != current)
335 /* another task is sharing our hash bucket */ 335 /* another task is sharing our hash bucket */
336 continue; 336 continue;
@@ -360,7 +360,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
360 360
361 preempt_enable_no_resched(); 361 preempt_enable_no_resched();
362 362
363 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 363 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
364 hlist_del(&ri->hlist); 364 hlist_del(&ri->hlist);
365 kfree(ri); 365 kfree(ri);
366 } 366 }
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 58fb1e3f631d..289127d5241c 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -146,9 +146,6 @@ config GENERIC_GPIO
146 help 146 help
147 Generic GPIO API support 147 Generic GPIO API support
148 148
149config ARCH_NO_VIRT_TO_BUS
150 def_bool y
151
152config ARCH_SUPPORTS_DEBUG_PAGEALLOC 149config ARCH_SUPPORTS_DEBUG_PAGEALLOC
153 def_bool y if SPARC64 150 def_bool y if SPARC64
154 151
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index a39d1ba5a119..e72212148d2a 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -511,7 +511,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
511{ 511{
512 struct kretprobe_instance *ri = NULL; 512 struct kretprobe_instance *ri = NULL;
513 struct hlist_head *head, empty_rp; 513 struct hlist_head *head, empty_rp;
514 struct hlist_node *node, *tmp; 514 struct hlist_node *tmp;
515 unsigned long flags, orig_ret_address = 0; 515 unsigned long flags, orig_ret_address = 0;
516 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 516 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
517 517
@@ -531,7 +531,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
531 * real return address, and all the rest will point to 531 * real return address, and all the rest will point to
532 * kretprobe_trampoline 532 * kretprobe_trampoline
533 */ 533 */
534 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 534 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
535 if (ri->task != current) 535 if (ri->task != current)
536 /* another task is sharing our hash bucket */ 536 /* another task is sharing our hash bucket */
537 continue; 537 continue;
@@ -559,7 +559,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
559 kretprobe_hash_unlock(current, &flags); 559 kretprobe_hash_unlock(current, &flags);
560 preempt_enable_no_resched(); 560 preempt_enable_no_resched();
561 561
562 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 562 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
563 hlist_del(&ri->hlist); 563 hlist_del(&ri->hlist);
564 kfree(ri); 564 kfree(ri);
565 } 565 }
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 9fcc6b4e93b3..54df554b82d9 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -953,9 +953,8 @@ static HLIST_HEAD(ldc_channel_list);
953static int __ldc_channel_exists(unsigned long id) 953static int __ldc_channel_exists(unsigned long id)
954{ 954{
955 struct ldc_channel *lp; 955 struct ldc_channel *lp;
956 struct hlist_node *n;
957 956
958 hlist_for_each_entry(lp, n, &ldc_channel_list, list) { 957 hlist_for_each_entry(lp, &ldc_channel_list, list) {
959 if (lp->id == id) 958 if (lp->id == id)
960 return 1; 959 return 1;
961 } 960 }
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 4ce6e4c390e0..ff496ab1e794 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -17,6 +17,7 @@ config TILE
17 select GENERIC_IRQ_SHOW 17 select GENERIC_IRQ_SHOW
18 select HAVE_DEBUG_BUGVERBOSE 18 select HAVE_DEBUG_BUGVERBOSE
19 select HAVE_SYSCALL_WRAPPERS if TILEGX 19 select HAVE_SYSCALL_WRAPPERS if TILEGX
20 select HAVE_VIRT_TO_BUS
20 select SYS_HYPERVISOR 21 select SYS_HYPERVISOR
21 select ARCH_HAVE_NMI_SAFE_CMPXCHG 22 select ARCH_HAVE_NMI_SAFE_CMPXCHG
22 select GENERIC_CLOCKEVENTS 23 select GENERIC_CLOCKEVENTS
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 60651df5f952..dc50b157fc83 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -9,6 +9,7 @@ config UNICORE32
9 select GENERIC_ATOMIC64 9 select GENERIC_ATOMIC64
10 select HAVE_KERNEL_LZO 10 select HAVE_KERNEL_LZO
11 select HAVE_KERNEL_LZMA 11 select HAVE_KERNEL_LZMA
12 select HAVE_VIRT_TO_BUS
12 select ARCH_HAVE_CUSTOM_GPIO_H 13 select ARCH_HAVE_CUSTOM_GPIO_H
13 select GENERIC_FIND_FIRST_BIT 14 select GENERIC_FIND_FIRST_BIT
14 select GENERIC_IRQ_PROBE 15 select GENERIC_IRQ_PROBE
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a9383370311..a4f24f5b1218 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -112,6 +112,7 @@ config X86
112 select GENERIC_STRNLEN_USER 112 select GENERIC_STRNLEN_USER
113 select HAVE_CONTEXT_TRACKING if X86_64 113 select HAVE_CONTEXT_TRACKING if X86_64
114 select HAVE_IRQ_TIME_ACCOUNTING 114 select HAVE_IRQ_TIME_ACCOUNTING
115 select HAVE_VIRT_TO_BUS
115 select MODULES_USE_ELF_REL if X86_32 116 select MODULES_USE_ELF_REL if X86_32
116 select MODULES_USE_ELF_RELA if X86_64 117 select MODULES_USE_ELF_RELA if X86_64
117 select CLONE_BACKWARDS if X86_32 118 select CLONE_BACKWARDS if X86_32
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index e124554598ee..3f06e6149981 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -652,7 +652,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
652{ 652{
653 struct kretprobe_instance *ri = NULL; 653 struct kretprobe_instance *ri = NULL;
654 struct hlist_head *head, empty_rp; 654 struct hlist_head *head, empty_rp;
655 struct hlist_node *node, *tmp; 655 struct hlist_node *tmp;
656 unsigned long flags, orig_ret_address = 0; 656 unsigned long flags, orig_ret_address = 0;
657 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 657 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
658 kprobe_opcode_t *correct_ret_addr = NULL; 658 kprobe_opcode_t *correct_ret_addr = NULL;
@@ -682,7 +682,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
682 * will be the real return address, and all the rest will 682 * will be the real return address, and all the rest will
683 * point to kretprobe_trampoline. 683 * point to kretprobe_trampoline.
684 */ 684 */
685 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 685 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
686 if (ri->task != current) 686 if (ri->task != current)
687 /* another task is sharing our hash bucket */ 687 /* another task is sharing our hash bucket */
688 continue; 688 continue;
@@ -701,7 +701,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
701 kretprobe_assert(ri, orig_ret_address, trampoline_address); 701 kretprobe_assert(ri, orig_ret_address, trampoline_address);
702 702
703 correct_ret_addr = ri->ret_addr; 703 correct_ret_addr = ri->ret_addr;
704 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 704 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
705 if (ri->task != current) 705 if (ri->task != current)
706 /* another task is sharing our hash bucket */ 706 /* another task is sharing our hash bucket */
707 continue; 707 continue;
@@ -728,7 +728,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
728 728
729 kretprobe_hash_unlock(current, &flags); 729 kretprobe_hash_unlock(current, &flags);
730 730
731 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 731 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
732 hlist_del(&ri->hlist); 732 hlist_del(&ri->hlist);
733 kfree(ri); 733 kfree(ri);
734 } 734 }
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4ed3edbe06bd..956ca358108a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1644,13 +1644,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1644static void kvm_mmu_commit_zap_page(struct kvm *kvm, 1644static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1645 struct list_head *invalid_list); 1645 struct list_head *invalid_list);
1646 1646
1647#define for_each_gfn_sp(kvm, sp, gfn, pos) \ 1647#define for_each_gfn_sp(kvm, sp, gfn) \
1648 hlist_for_each_entry(sp, pos, \ 1648 hlist_for_each_entry(sp, \
1649 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ 1649 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1650 if ((sp)->gfn != (gfn)) {} else 1650 if ((sp)->gfn != (gfn)) {} else
1651 1651
1652#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ 1652#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn) \
1653 hlist_for_each_entry(sp, pos, \ 1653 hlist_for_each_entry(sp, \
1654 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ 1654 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1655 if ((sp)->gfn != (gfn) || (sp)->role.direct || \ 1655 if ((sp)->gfn != (gfn) || (sp)->role.direct || \
1656 (sp)->role.invalid) {} else 1656 (sp)->role.invalid) {} else
@@ -1706,11 +1706,10 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1706static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 1706static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1707{ 1707{
1708 struct kvm_mmu_page *s; 1708 struct kvm_mmu_page *s;
1709 struct hlist_node *node;
1710 LIST_HEAD(invalid_list); 1709 LIST_HEAD(invalid_list);
1711 bool flush = false; 1710 bool flush = false;
1712 1711
1713 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 1712 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
1714 if (!s->unsync) 1713 if (!s->unsync)
1715 continue; 1714 continue;
1716 1715
@@ -1848,7 +1847,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1848 union kvm_mmu_page_role role; 1847 union kvm_mmu_page_role role;
1849 unsigned quadrant; 1848 unsigned quadrant;
1850 struct kvm_mmu_page *sp; 1849 struct kvm_mmu_page *sp;
1851 struct hlist_node *node;
1852 bool need_sync = false; 1850 bool need_sync = false;
1853 1851
1854 role = vcpu->arch.mmu.base_role; 1852 role = vcpu->arch.mmu.base_role;
@@ -1863,7 +1861,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1863 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 1861 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1864 role.quadrant = quadrant; 1862 role.quadrant = quadrant;
1865 } 1863 }
1866 for_each_gfn_sp(vcpu->kvm, sp, gfn, node) { 1864 for_each_gfn_sp(vcpu->kvm, sp, gfn) {
1867 if (!need_sync && sp->unsync) 1865 if (!need_sync && sp->unsync)
1868 need_sync = true; 1866 need_sync = true;
1869 1867
@@ -2151,14 +2149,13 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
2151int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 2149int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2152{ 2150{
2153 struct kvm_mmu_page *sp; 2151 struct kvm_mmu_page *sp;
2154 struct hlist_node *node;
2155 LIST_HEAD(invalid_list); 2152 LIST_HEAD(invalid_list);
2156 int r; 2153 int r;
2157 2154
2158 pgprintk("%s: looking for gfn %llx\n", __func__, gfn); 2155 pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2159 r = 0; 2156 r = 0;
2160 spin_lock(&kvm->mmu_lock); 2157 spin_lock(&kvm->mmu_lock);
2161 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { 2158 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2162 pgprintk("%s: gfn %llx role %x\n", __func__, gfn, 2159 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2163 sp->role.word); 2160 sp->role.word);
2164 r = 1; 2161 r = 1;
@@ -2288,9 +2285,8 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2288static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 2285static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
2289{ 2286{
2290 struct kvm_mmu_page *s; 2287 struct kvm_mmu_page *s;
2291 struct hlist_node *node;
2292 2288
2293 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 2289 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2294 if (s->unsync) 2290 if (s->unsync)
2295 continue; 2291 continue;
2296 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 2292 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
@@ -2302,10 +2298,9 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2302 bool can_unsync) 2298 bool can_unsync)
2303{ 2299{
2304 struct kvm_mmu_page *s; 2300 struct kvm_mmu_page *s;
2305 struct hlist_node *node;
2306 bool need_unsync = false; 2301 bool need_unsync = false;
2307 2302
2308 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 2303 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2309 if (!can_unsync) 2304 if (!can_unsync)
2310 return 1; 2305 return 1;
2311 2306
@@ -3933,7 +3928,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3933 gfn_t gfn = gpa >> PAGE_SHIFT; 3928 gfn_t gfn = gpa >> PAGE_SHIFT;
3934 union kvm_mmu_page_role mask = { .word = 0 }; 3929 union kvm_mmu_page_role mask = { .word = 0 };
3935 struct kvm_mmu_page *sp; 3930 struct kvm_mmu_page *sp;
3936 struct hlist_node *node;
3937 LIST_HEAD(invalid_list); 3931 LIST_HEAD(invalid_list);
3938 u64 entry, gentry, *spte; 3932 u64 entry, gentry, *spte;
3939 int npte; 3933 int npte;
@@ -3964,7 +3958,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3964 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 3958 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
3965 3959
3966 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; 3960 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
3967 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { 3961 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
3968 if (detect_write_misaligned(sp, gpa, bytes) || 3962 if (detect_write_misaligned(sp, gpa, bytes) ||
3969 detect_write_flooding(sp)) { 3963 detect_write_flooding(sp)) {
3970 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, 3964 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index a5c0663c2cdc..35876ffac11d 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -9,6 +9,7 @@ config XTENSA
9 select HAVE_IDE 9 select HAVE_IDE
10 select GENERIC_ATOMIC64 10 select GENERIC_ATOMIC64
11 select HAVE_GENERIC_HARDIRQS 11 select HAVE_GENERIC_HARDIRQS
12 select HAVE_VIRT_TO_BUS
12 select GENERIC_IRQ_SHOW 13 select GENERIC_IRQ_SHOW
13 select GENERIC_CPU_DEVICES 14 select GENERIC_CPU_DEVICES
14 select MODULES_USE_ELF_RELA 15 select MODULES_USE_ELF_RELA
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b8858fb0cafa..8bdebb6781e1 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -357,7 +357,6 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
357{ 357{
358 struct blkcg *blkcg = cgroup_to_blkcg(cgroup); 358 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
359 struct blkcg_gq *blkg; 359 struct blkcg_gq *blkg;
360 struct hlist_node *n;
361 int i; 360 int i;
362 361
363 mutex_lock(&blkcg_pol_mutex); 362 mutex_lock(&blkcg_pol_mutex);
@@ -368,7 +367,7 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
368 * stat updates. This is a debug feature which shouldn't exist 367 * stat updates. This is a debug feature which shouldn't exist
369 * anyway. If you get hit by a race, retry. 368 * anyway. If you get hit by a race, retry.
370 */ 369 */
371 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 370 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
372 for (i = 0; i < BLKCG_MAX_POLS; i++) { 371 for (i = 0; i < BLKCG_MAX_POLS; i++) {
373 struct blkcg_policy *pol = blkcg_policy[i]; 372 struct blkcg_policy *pol = blkcg_policy[i];
374 373
@@ -415,11 +414,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
415 bool show_total) 414 bool show_total)
416{ 415{
417 struct blkcg_gq *blkg; 416 struct blkcg_gq *blkg;
418 struct hlist_node *n;
419 u64 total = 0; 417 u64 total = 0;
420 418
421 spin_lock_irq(&blkcg->lock); 419 spin_lock_irq(&blkcg->lock);
422 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) 420 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node)
423 if (blkcg_policy_enabled(blkg->q, pol)) 421 if (blkcg_policy_enabled(blkg->q, pol))
424 total += prfill(sf, blkg->pd[pol->plid], data); 422 total += prfill(sf, blkg->pd[pol->plid], data);
425 spin_unlock_irq(&blkcg->lock); 423 spin_unlock_irq(&blkcg->lock);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fab4cdd3f7bb..9c4bb8266bc8 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -164,7 +164,6 @@ EXPORT_SYMBOL(put_io_context);
164 */ 164 */
165void put_io_context_active(struct io_context *ioc) 165void put_io_context_active(struct io_context *ioc)
166{ 166{
167 struct hlist_node *n;
168 unsigned long flags; 167 unsigned long flags;
169 struct io_cq *icq; 168 struct io_cq *icq;
170 169
@@ -180,7 +179,7 @@ void put_io_context_active(struct io_context *ioc)
180 */ 179 */
181retry: 180retry:
182 spin_lock_irqsave_nested(&ioc->lock, flags, 1); 181 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
183 hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) { 182 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
184 if (icq->flags & ICQ_EXITED) 183 if (icq->flags & ICQ_EXITED)
185 continue; 184 continue;
186 if (spin_trylock(icq->q->queue_lock)) { 185 if (spin_trylock(icq->q->queue_lock)) {
diff --git a/block/bsg.c b/block/bsg.c
index ff64ae3bacee..420a5a9f1b23 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -800,11 +800,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
800static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) 800static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
801{ 801{
802 struct bsg_device *bd; 802 struct bsg_device *bd;
803 struct hlist_node *entry;
804 803
805 mutex_lock(&bsg_mutex); 804 mutex_lock(&bsg_mutex);
806 805
807 hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { 806 hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
808 if (bd->queue == q) { 807 if (bd->queue == q) {
809 atomic_inc(&bd->ref_count); 808 atomic_inc(&bd->ref_count);
810 goto found; 809 goto found;
@@ -997,7 +996,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
997{ 996{
998 struct bsg_class_device *bcd; 997 struct bsg_class_device *bcd;
999 dev_t dev; 998 dev_t dev;
1000 int ret, minor; 999 int ret;
1001 struct device *class_dev = NULL; 1000 struct device *class_dev = NULL;
1002 const char *devname; 1001 const char *devname;
1003 1002
@@ -1017,23 +1016,16 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
1017 1016
1018 mutex_lock(&bsg_mutex); 1017 mutex_lock(&bsg_mutex);
1019 1018
1020 ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL); 1019 ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
1021 if (!ret) { 1020 if (ret < 0) {
1022 ret = -ENOMEM; 1021 if (ret == -ENOSPC) {
1023 goto unlock; 1022 printk(KERN_ERR "bsg: too many bsg devices\n");
1024 } 1023 ret = -EINVAL;
1025 1024 }
1026 ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
1027 if (ret < 0)
1028 goto unlock; 1025 goto unlock;
1029
1030 if (minor >= BSG_MAX_DEVS) {
1031 printk(KERN_ERR "bsg: too many bsg devices\n");
1032 ret = -EINVAL;
1033 goto remove_idr;
1034 } 1026 }
1035 1027
1036 bcd->minor = minor; 1028 bcd->minor = ret;
1037 bcd->queue = q; 1029 bcd->queue = q;
1038 bcd->parent = get_device(parent); 1030 bcd->parent = get_device(parent);
1039 bcd->release = release; 1031 bcd->release = release;
@@ -1059,8 +1051,7 @@ unregister_class_dev:
1059 device_unregister(class_dev); 1051 device_unregister(class_dev);
1060put_dev: 1052put_dev:
1061 put_device(parent); 1053 put_device(parent);
1062remove_idr: 1054 idr_remove(&bsg_minor_idr, bcd->minor);
1063 idr_remove(&bsg_minor_idr, minor);
1064unlock: 1055unlock:
1065 mutex_unlock(&bsg_mutex); 1056 mutex_unlock(&bsg_mutex);
1066 return ret; 1057 return ret;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e62e9205b80a..ec52807cdd09 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1435,7 +1435,6 @@ static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1435{ 1435{
1436 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1436 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1437 struct blkcg_gq *blkg; 1437 struct blkcg_gq *blkg;
1438 struct hlist_node *n;
1439 1438
1440 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) 1439 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
1441 return -EINVAL; 1440 return -EINVAL;
@@ -1443,7 +1442,7 @@ static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1443 spin_lock_irq(&blkcg->lock); 1442 spin_lock_irq(&blkcg->lock);
1444 blkcg->cfq_weight = (unsigned int)val; 1443 blkcg->cfq_weight = (unsigned int)val;
1445 1444
1446 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 1445 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1447 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1446 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1448 1447
1449 if (cfqg && !cfqg->dev_weight) 1448 if (cfqg && !cfqg->dev_weight)
diff --git a/block/elevator.c b/block/elevator.c
index 603b2c178740..d0acb31cc083 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -288,10 +288,10 @@ static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
288{ 288{
289 struct elevator_queue *e = q->elevator; 289 struct elevator_queue *e = q->elevator;
290 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; 290 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
291 struct hlist_node *entry, *next; 291 struct hlist_node *next;
292 struct request *rq; 292 struct request *rq;
293 293
294 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { 294 hlist_for_each_entry_safe(rq, next, hash_list, hash) {
295 BUG_ON(!ELV_ON_HASH(rq)); 295 BUG_ON(!ELV_ON_HASH(rq));
296 296
297 if (unlikely(!rq_mergeable(rq))) { 297 if (unlikely(!rq_mergeable(rq))) {
diff --git a/block/genhd.c b/block/genhd.c
index 5f73c2435fde..3c001fba80c7 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -26,7 +26,7 @@ static DEFINE_MUTEX(block_class_lock);
26struct kobject *block_depr; 26struct kobject *block_depr;
27 27
28/* for extended dynamic devt allocation, currently only one major is used */ 28/* for extended dynamic devt allocation, currently only one major is used */
29#define MAX_EXT_DEVT (1 << MINORBITS) 29#define NR_EXT_DEVT (1 << MINORBITS)
30 30
31/* For extended devt allocation. ext_devt_mutex prevents look up 31/* For extended devt allocation. ext_devt_mutex prevents look up
32 * results from going away underneath its user. 32 * results from going away underneath its user.
@@ -411,7 +411,7 @@ static int blk_mangle_minor(int minor)
411int blk_alloc_devt(struct hd_struct *part, dev_t *devt) 411int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
412{ 412{
413 struct gendisk *disk = part_to_disk(part); 413 struct gendisk *disk = part_to_disk(part);
414 int idx, rc; 414 int idx;
415 415
416 /* in consecutive minor range? */ 416 /* in consecutive minor range? */
417 if (part->partno < disk->minors) { 417 if (part->partno < disk->minors) {
@@ -420,19 +420,11 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
420 } 420 }
421 421
422 /* allocate ext devt */ 422 /* allocate ext devt */
423 do { 423 mutex_lock(&ext_devt_mutex);
424 if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL)) 424 idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
425 return -ENOMEM; 425 mutex_unlock(&ext_devt_mutex);
426 rc = idr_get_new(&ext_devt_idr, part, &idx); 426 if (idx < 0)
427 } while (rc == -EAGAIN); 427 return idx == -ENOSPC ? -EBUSY : idx;
428
429 if (rc)
430 return rc;
431
432 if (idx > MAX_EXT_DEVT) {
433 idr_remove(&ext_devt_idr, idx);
434 return -EBUSY;
435 }
436 428
437 *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx)); 429 *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
438 return 0; 430 return 0;
@@ -655,7 +647,6 @@ void del_gendisk(struct gendisk *disk)
655 disk_part_iter_exit(&piter); 647 disk_part_iter_exit(&piter);
656 648
657 invalidate_partition(disk, 0); 649 invalidate_partition(disk, 0);
658 blk_free_devt(disk_to_dev(disk)->devt);
659 set_capacity(disk, 0); 650 set_capacity(disk, 0);
660 disk->flags &= ~GENHD_FL_UP; 651 disk->flags &= ~GENHD_FL_UP;
661 652
@@ -674,6 +665,7 @@ void del_gendisk(struct gendisk *disk)
674 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); 665 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
675 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); 666 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
676 device_del(disk_to_dev(disk)); 667 device_del(disk_to_dev(disk));
668 blk_free_devt(disk_to_dev(disk)->devt);
677} 669}
678EXPORT_SYMBOL(del_gendisk); 670EXPORT_SYMBOL(del_gendisk);
679 671
diff --git a/block/partition-generic.c b/block/partition-generic.c
index f1d14519cc04..789cdea05893 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -249,11 +249,11 @@ void delete_partition(struct gendisk *disk, int partno)
249 if (!part) 249 if (!part)
250 return; 250 return;
251 251
252 blk_free_devt(part_devt(part));
253 rcu_assign_pointer(ptbl->part[partno], NULL); 252 rcu_assign_pointer(ptbl->part[partno], NULL);
254 rcu_assign_pointer(ptbl->last_lookup, NULL); 253 rcu_assign_pointer(ptbl->last_lookup, NULL);
255 kobject_put(part->holder_dir); 254 kobject_put(part->holder_dir);
256 device_del(part_to_dev(part)); 255 device_del(part_to_dev(part));
256 blk_free_devt(part_devt(part));
257 257
258 hd_struct_put(part); 258 hd_struct_put(part);
259} 259}
@@ -418,7 +418,7 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
418 int p, highest, res; 418 int p, highest, res;
419rescan: 419rescan:
420 if (state && !IS_ERR(state)) { 420 if (state && !IS_ERR(state)) {
421 kfree(state); 421 free_partitions(state);
422 state = NULL; 422 state = NULL;
423 } 423 }
424 424
@@ -525,7 +525,7 @@ rescan:
525 md_autodetect_dev(part_to_dev(part)->devt); 525 md_autodetect_dev(part_to_dev(part)->devt);
526#endif 526#endif
527 } 527 }
528 kfree(state); 528 free_partitions(state);
529 return 0; 529 return 0;
530} 530}
531 531
diff --git a/block/partitions/check.c b/block/partitions/check.c
index bc908672c976..19ba207ea7d1 100644
--- a/block/partitions/check.c
+++ b/block/partitions/check.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/vmalloc.h>
17#include <linux/ctype.h> 18#include <linux/ctype.h>
18#include <linux/genhd.h> 19#include <linux/genhd.h>
19 20
@@ -106,18 +107,45 @@ static int (*check_part[])(struct parsed_partitions *) = {
106 NULL 107 NULL
107}; 108};
108 109
110static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
111{
112 struct parsed_partitions *state;
113 int nr;
114
115 state = kzalloc(sizeof(*state), GFP_KERNEL);
116 if (!state)
117 return NULL;
118
119 nr = disk_max_parts(hd);
120 state->parts = vzalloc(nr * sizeof(state->parts[0]));
121 if (!state->parts) {
122 kfree(state);
123 return NULL;
124 }
125
126 state->limit = nr;
127
128 return state;
129}
130
131void free_partitions(struct parsed_partitions *state)
132{
133 vfree(state->parts);
134 kfree(state);
135}
136
109struct parsed_partitions * 137struct parsed_partitions *
110check_partition(struct gendisk *hd, struct block_device *bdev) 138check_partition(struct gendisk *hd, struct block_device *bdev)
111{ 139{
112 struct parsed_partitions *state; 140 struct parsed_partitions *state;
113 int i, res, err; 141 int i, res, err;
114 142
115 state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL); 143 state = allocate_partitions(hd);
116 if (!state) 144 if (!state)
117 return NULL; 145 return NULL;
118 state->pp_buf = (char *)__get_free_page(GFP_KERNEL); 146 state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
119 if (!state->pp_buf) { 147 if (!state->pp_buf) {
120 kfree(state); 148 free_partitions(state);
121 return NULL; 149 return NULL;
122 } 150 }
123 state->pp_buf[0] = '\0'; 151 state->pp_buf[0] = '\0';
@@ -128,10 +156,9 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
128 if (isdigit(state->name[strlen(state->name)-1])) 156 if (isdigit(state->name[strlen(state->name)-1]))
129 sprintf(state->name, "p"); 157 sprintf(state->name, "p");
130 158
131 state->limit = disk_max_parts(hd);
132 i = res = err = 0; 159 i = res = err = 0;
133 while (!res && check_part[i]) { 160 while (!res && check_part[i]) {
134 memset(&state->parts, 0, sizeof(state->parts)); 161 memset(state->parts, 0, state->limit * sizeof(state->parts[0]));
135 res = check_part[i++](state); 162 res = check_part[i++](state);
136 if (res < 0) { 163 if (res < 0) {
137 /* We have hit an I/O error which we don't report now. 164 /* We have hit an I/O error which we don't report now.
@@ -161,6 +188,6 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
161 printk(KERN_INFO "%s", state->pp_buf); 188 printk(KERN_INFO "%s", state->pp_buf);
162 189
163 free_page((unsigned long)state->pp_buf); 190 free_page((unsigned long)state->pp_buf);
164 kfree(state); 191 free_partitions(state);
165 return ERR_PTR(res); 192 return ERR_PTR(res);
166} 193}
diff --git a/block/partitions/check.h b/block/partitions/check.h
index 52b100311ec3..eade17ea910b 100644
--- a/block/partitions/check.h
+++ b/block/partitions/check.h
@@ -15,13 +15,15 @@ struct parsed_partitions {
15 int flags; 15 int flags;
16 bool has_info; 16 bool has_info;
17 struct partition_meta_info info; 17 struct partition_meta_info info;
18 } parts[DISK_MAX_PARTS]; 18 } *parts;
19 int next; 19 int next;
20 int limit; 20 int limit;
21 bool access_beyond_eod; 21 bool access_beyond_eod;
22 char *pp_buf; 22 char *pp_buf;
23}; 23};
24 24
25void free_partitions(struct parsed_partitions *state);
26
25struct parsed_partitions * 27struct parsed_partitions *
26check_partition(struct gendisk *, struct block_device *); 28check_partition(struct gendisk *, struct block_device *);
27 29
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index b62fb88b8711..ff5804e2f1d2 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -310,15 +310,23 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
310 goto fail; 310 goto fail;
311 } 311 }
312 312
313 /* Check the GUID Partition Table header size */ 313 /* Check the GUID Partition Table header size is too big */
314 if (le32_to_cpu((*gpt)->header_size) > 314 if (le32_to_cpu((*gpt)->header_size) >
315 bdev_logical_block_size(state->bdev)) { 315 bdev_logical_block_size(state->bdev)) {
316 pr_debug("GUID Partition Table Header size is wrong: %u > %u\n", 316 pr_debug("GUID Partition Table Header size is too large: %u > %u\n",
317 le32_to_cpu((*gpt)->header_size), 317 le32_to_cpu((*gpt)->header_size),
318 bdev_logical_block_size(state->bdev)); 318 bdev_logical_block_size(state->bdev));
319 goto fail; 319 goto fail;
320 } 320 }
321 321
322 /* Check the GUID Partition Table header size is too small */
323 if (le32_to_cpu((*gpt)->header_size) < sizeof(gpt_header)) {
324 pr_debug("GUID Partition Table Header size is too small: %u < %zu\n",
325 le32_to_cpu((*gpt)->header_size),
326 sizeof(gpt_header));
327 goto fail;
328 }
329
322 /* Check the GUID Partition Table CRC */ 330 /* Check the GUID Partition Table CRC */
323 origcrc = le32_to_cpu((*gpt)->header_crc32); 331 origcrc = le32_to_cpu((*gpt)->header_crc32);
324 (*gpt)->header_crc32 = 0; 332 (*gpt)->header_crc32 = 0;
diff --git a/block/partitions/mac.c b/block/partitions/mac.c
index 11f688bd76c5..76d8ba6379a9 100644
--- a/block/partitions/mac.c
+++ b/block/partitions/mac.c
@@ -63,6 +63,10 @@ int mac_partition(struct parsed_partitions *state)
63 put_dev_sector(sect); 63 put_dev_sector(sect);
64 return 0; 64 return 0;
65 } 65 }
66
67 if (blocks_in_map >= state->limit)
68 blocks_in_map = state->limit - 1;
69
66 strlcat(state->pp_buf, " [mac]", PAGE_SIZE); 70 strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
67 for (slot = 1; slot <= blocks_in_map; ++slot) { 71 for (slot = 1; slot <= blocks_in_map; ++slot) {
68 int pos = slot * secsize; 72 int pos = slot * secsize;
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 8752a5d26565..7681cd295ab8 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -455,14 +455,19 @@ int msdos_partition(struct parsed_partitions *state)
455 data = read_part_sector(state, 0, &sect); 455 data = read_part_sector(state, 0, &sect);
456 if (!data) 456 if (!data)
457 return -1; 457 return -1;
458 if (!msdos_magic_present(data + 510)) { 458
459 /*
460 * Note order! (some AIX disks, e.g. unbootable kind,
461 * have no MSDOS 55aa)
462 */
463 if (aix_magic_present(state, data)) {
459 put_dev_sector(sect); 464 put_dev_sector(sect);
465 strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
460 return 0; 466 return 0;
461 } 467 }
462 468
463 if (aix_magic_present(state, data)) { 469 if (!msdos_magic_present(data + 510)) {
464 put_dev_sector(sect); 470 put_dev_sector(sect);
465 strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
466 return 0; 471 return 0;
467 } 472 }
468 473
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 08c57c8aec95..6149a6e09643 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -447,7 +447,7 @@ EXPORT_SYMBOL_GPL(crypto_register_template);
447void crypto_unregister_template(struct crypto_template *tmpl) 447void crypto_unregister_template(struct crypto_template *tmpl)
448{ 448{
449 struct crypto_instance *inst; 449 struct crypto_instance *inst;
450 struct hlist_node *p, *n; 450 struct hlist_node *n;
451 struct hlist_head *list; 451 struct hlist_head *list;
452 LIST_HEAD(users); 452 LIST_HEAD(users);
453 453
@@ -457,7 +457,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
457 list_del_init(&tmpl->list); 457 list_del_init(&tmpl->list);
458 458
459 list = &tmpl->instances; 459 list = &tmpl->instances;
460 hlist_for_each_entry(inst, p, list, list) { 460 hlist_for_each_entry(inst, list, list) {
461 int err = crypto_remove_alg(&inst->alg, &users); 461 int err = crypto_remove_alg(&inst->alg, &users);
462 BUG_ON(err); 462 BUG_ON(err);
463 } 463 }
@@ -466,7 +466,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
466 466
467 up_write(&crypto_alg_sem); 467 up_write(&crypto_alg_sem);
468 468
469 hlist_for_each_entry_safe(inst, p, n, list, list) { 469 hlist_for_each_entry_safe(inst, n, list, list) {
470 BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); 470 BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
471 tmpl->free(inst); 471 tmpl->free(inst);
472 } 472 }
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index b22d71cac54c..0e3f8f9dcd29 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -157,7 +157,6 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
157{ 157{
158 struct atm_cirange ci; 158 struct atm_cirange ci;
159 struct atm_vcc *vcc; 159 struct atm_vcc *vcc;
160 struct hlist_node *node;
161 struct sock *s; 160 struct sock *s;
162 int i; 161 int i;
163 162
@@ -171,7 +170,7 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
171 for(i = 0; i < VCC_HTABLE_SIZE; ++i) { 170 for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
172 struct hlist_head *head = &vcc_hash[i]; 171 struct hlist_head *head = &vcc_hash[i];
173 172
174 sk_for_each(s, node, head) { 173 sk_for_each(s, head) {
175 vcc = atm_sk(s); 174 vcc = atm_sk(s);
176 if (vcc->dev != dev) 175 if (vcc->dev != dev)
177 continue; 176 continue;
@@ -264,12 +263,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
264{ 263{
265 struct hlist_head *head; 264 struct hlist_head *head;
266 struct atm_vcc *vcc; 265 struct atm_vcc *vcc;
267 struct hlist_node *node;
268 struct sock *s; 266 struct sock *s;
269 267
270 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; 268 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
271 269
272 sk_for_each(s, node, head) { 270 sk_for_each(s, head) {
273 vcc = atm_sk(s); 271 vcc = atm_sk(s);
274 if (vcc->dev == dev && 272 if (vcc->dev == dev &&
275 vcc->vci == vci && vcc->vpi == vpi && 273 vcc->vci == vci && vcc->vpi == vpi &&
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index c1eb6fa8ac35..b1955ba40d63 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2093,7 +2093,6 @@ static unsigned char eni_phy_get(struct atm_dev *dev,unsigned long addr)
2093 2093
2094static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page) 2094static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
2095{ 2095{
2096 struct hlist_node *node;
2097 struct sock *s; 2096 struct sock *s;
2098 static const char *signal[] = { "LOST","unknown","okay" }; 2097 static const char *signal[] = { "LOST","unknown","okay" };
2099 struct eni_dev *eni_dev = ENI_DEV(dev); 2098 struct eni_dev *eni_dev = ENI_DEV(dev);
@@ -2171,7 +2170,7 @@ static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
2171 for(i = 0; i < VCC_HTABLE_SIZE; ++i) { 2170 for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
2172 struct hlist_head *head = &vcc_hash[i]; 2171 struct hlist_head *head = &vcc_hash[i];
2173 2172
2174 sk_for_each(s, node, head) { 2173 sk_for_each(s, head) {
2175 struct eni_vcc *eni_vcc; 2174 struct eni_vcc *eni_vcc;
2176 int length; 2175 int length;
2177 2176
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 72b6960fa95f..d6891267f5bb 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -329,7 +329,6 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
329{ 329{
330 struct hlist_head *head; 330 struct hlist_head *head;
331 struct atm_vcc *vcc; 331 struct atm_vcc *vcc;
332 struct hlist_node *node;
333 struct sock *s; 332 struct sock *s;
334 short vpi; 333 short vpi;
335 int vci; 334 int vci;
@@ -338,7 +337,7 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
338 vci = cid & ((1 << he_dev->vcibits) - 1); 337 vci = cid & ((1 << he_dev->vcibits) - 1);
339 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; 338 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
340 339
341 sk_for_each(s, node, head) { 340 sk_for_each(s, head) {
342 vcc = atm_sk(s); 341 vcc = atm_sk(s);
343 if (vcc->dev == he_dev->atm_dev && 342 if (vcc->dev == he_dev->atm_dev &&
344 vcc->vci == vci && vcc->vpi == vpi && 343 vcc->vci == vci && vcc->vpi == vpi &&
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index ed1d2b7f923b..6587dc295eb0 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -251,7 +251,6 @@ static void nicstar_remove_one(struct pci_dev *pcidev)
251 if (card->scd2vc[j] != NULL) 251 if (card->scd2vc[j] != NULL)
252 free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); 252 free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
253 } 253 }
254 idr_remove_all(&card->idr);
255 idr_destroy(&card->idr); 254 idr_destroy(&card->idr);
256 pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, 255 pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
257 card->rsq.org, card->rsq.dma); 256 card->rsq.org, card->rsq.dma);
@@ -950,11 +949,10 @@ static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
950static void push_rxbufs(ns_dev * card, struct sk_buff *skb) 949static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
951{ 950{
952 struct sk_buff *handle1, *handle2; 951 struct sk_buff *handle1, *handle2;
953 u32 id1 = 0, id2 = 0; 952 int id1, id2;
954 u32 addr1, addr2; 953 u32 addr1, addr2;
955 u32 stat; 954 u32 stat;
956 unsigned long flags; 955 unsigned long flags;
957 int err;
958 956
959 /* *BARF* */ 957 /* *BARF* */
960 handle2 = NULL; 958 handle2 = NULL;
@@ -1027,23 +1025,12 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
1027 card->lbfqc += 2; 1025 card->lbfqc += 2;
1028 } 1026 }
1029 1027
1030 do { 1028 id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC);
1031 if (!idr_pre_get(&card->idr, GFP_ATOMIC)) { 1029 if (id1 < 0)
1032 printk(KERN_ERR 1030 goto out;
1033 "nicstar%d: no free memory for idr\n",
1034 card->index);
1035 goto out;
1036 }
1037
1038 if (!id1)
1039 err = idr_get_new_above(&card->idr, handle1, 0, &id1);
1040
1041 if (!id2 && err == 0)
1042 err = idr_get_new_above(&card->idr, handle2, 0, &id2);
1043
1044 } while (err == -EAGAIN);
1045 1031
1046 if (err) 1032 id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC);
1033 if (id2 < 0)
1047 goto out; 1034 goto out;
1048 1035
1049 spin_lock_irqsave(&card->res_lock, flags); 1036 spin_lock_irqsave(&card->res_lock, flags);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 0474a89170b9..32784d18d1f7 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -896,12 +896,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
896{ 896{
897 struct hlist_head *head; 897 struct hlist_head *head;
898 struct atm_vcc *vcc = NULL; 898 struct atm_vcc *vcc = NULL;
899 struct hlist_node *node;
900 struct sock *s; 899 struct sock *s;
901 900
902 read_lock(&vcc_sklist_lock); 901 read_lock(&vcc_sklist_lock);
903 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; 902 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
904 sk_for_each(s, node, head) { 903 sk_for_each(s, head) {
905 vcc = atm_sk(s); 904 vcc = atm_sk(s);
906 if (vcc->dev == dev && vcc->vci == vci && 905 if (vcc->dev == dev && vcc->vci == vci &&
907 vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE && 906 vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE &&
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 8c13eeb83c53..e98da675f0c1 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2660,25 +2660,24 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
2660 mdev->read_requests = RB_ROOT; 2660 mdev->read_requests = RB_ROOT;
2661 mdev->write_requests = RB_ROOT; 2661 mdev->write_requests = RB_ROOT;
2662 2662
2663 if (!idr_pre_get(&minors, GFP_KERNEL)) 2663 minor_got = idr_alloc(&minors, mdev, minor, minor + 1, GFP_KERNEL);
2664 goto out_no_minor_idr; 2664 if (minor_got < 0) {
2665 if (idr_get_new_above(&minors, mdev, minor, &minor_got)) 2665 if (minor_got == -ENOSPC) {
2666 err = ERR_MINOR_EXISTS;
2667 drbd_msg_put_info("requested minor exists already");
2668 }
2666 goto out_no_minor_idr; 2669 goto out_no_minor_idr;
2667 if (minor_got != minor) {
2668 err = ERR_MINOR_EXISTS;
2669 drbd_msg_put_info("requested minor exists already");
2670 goto out_idr_remove_minor;
2671 } 2670 }
2672 2671
2673 if (!idr_pre_get(&tconn->volumes, GFP_KERNEL)) 2672 vnr_got = idr_alloc(&tconn->volumes, mdev, vnr, vnr + 1, GFP_KERNEL);
2674 goto out_idr_remove_minor; 2673 if (vnr_got < 0) {
2675 if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got)) 2674 if (vnr_got == -ENOSPC) {
2675 err = ERR_INVALID_REQUEST;
2676 drbd_msg_put_info("requested volume exists already");
2677 }
2676 goto out_idr_remove_minor; 2678 goto out_idr_remove_minor;
2677 if (vnr_got != vnr) {
2678 err = ERR_INVALID_REQUEST;
2679 drbd_msg_put_info("requested volume exists already");
2680 goto out_idr_remove_vol;
2681 } 2679 }
2680
2682 add_disk(disk); 2681 add_disk(disk);
2683 kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */ 2682 kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
2684 2683
@@ -2689,8 +2688,6 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
2689 2688
2690 return NO_ERROR; 2689 return NO_ERROR;
2691 2690
2692out_idr_remove_vol:
2693 idr_remove(&tconn->volumes, vnr_got);
2694out_idr_remove_minor: 2691out_idr_remove_minor:
2695 idr_remove(&minors, minor_got); 2692 idr_remove(&minors, minor_got);
2696 synchronize_rcu(); 2693 synchronize_rcu();
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 8031a8cdd698..f47dccbda1d4 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1624,30 +1624,17 @@ static int loop_add(struct loop_device **l, int i)
1624 if (!lo) 1624 if (!lo)
1625 goto out; 1625 goto out;
1626 1626
1627 if (!idr_pre_get(&loop_index_idr, GFP_KERNEL)) 1627 /* allocate id, if @id >= 0, we're requesting that specific id */
1628 goto out_free_dev;
1629
1630 if (i >= 0) { 1628 if (i >= 0) {
1631 int m; 1629 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
1632 1630 if (err == -ENOSPC)
1633 /* create specific i in the index */
1634 err = idr_get_new_above(&loop_index_idr, lo, i, &m);
1635 if (err >= 0 && i != m) {
1636 idr_remove(&loop_index_idr, m);
1637 err = -EEXIST; 1631 err = -EEXIST;
1638 }
1639 } else if (i == -1) {
1640 int m;
1641
1642 /* get next free nr */
1643 err = idr_get_new(&loop_index_idr, lo, &m);
1644 if (err >= 0)
1645 i = m;
1646 } else { 1632 } else {
1647 err = -EINVAL; 1633 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
1648 } 1634 }
1649 if (err < 0) 1635 if (err < 0)
1650 goto out_free_dev; 1636 goto out_free_dev;
1637 i = err;
1651 1638
1652 lo->lo_queue = blk_alloc_queue(GFP_KERNEL); 1639 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1653 if (!lo->lo_queue) 1640 if (!lo->lo_queue)
@@ -1911,7 +1898,6 @@ static void __exit loop_exit(void)
1911 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; 1898 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
1912 1899
1913 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); 1900 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
1914 idr_remove_all(&loop_index_idr);
1915 idr_destroy(&loop_index_idr); 1901 idr_destroy(&loop_index_idr);
1916 1902
1917 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); 1903 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ade146bf65e5..7fecc784be01 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -98,6 +98,7 @@ static const char *nbdcmd_to_ascii(int cmd)
98 case NBD_CMD_READ: return "read"; 98 case NBD_CMD_READ: return "read";
99 case NBD_CMD_WRITE: return "write"; 99 case NBD_CMD_WRITE: return "write";
100 case NBD_CMD_DISC: return "disconnect"; 100 case NBD_CMD_DISC: return "disconnect";
101 case NBD_CMD_FLUSH: return "flush";
101 case NBD_CMD_TRIM: return "trim/discard"; 102 case NBD_CMD_TRIM: return "trim/discard";
102 } 103 }
103 return "invalid"; 104 return "invalid";
@@ -244,8 +245,15 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
244 245
245 request.magic = htonl(NBD_REQUEST_MAGIC); 246 request.magic = htonl(NBD_REQUEST_MAGIC);
246 request.type = htonl(nbd_cmd(req)); 247 request.type = htonl(nbd_cmd(req));
247 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 248
248 request.len = htonl(size); 249 if (nbd_cmd(req) == NBD_CMD_FLUSH) {
250 /* Other values are reserved for FLUSH requests. */
251 request.from = 0;
252 request.len = 0;
253 } else {
254 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
255 request.len = htonl(size);
256 }
249 memcpy(request.handle, &req, sizeof(req)); 257 memcpy(request.handle, &req, sizeof(req));
250 258
251 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", 259 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
@@ -482,6 +490,11 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
482 } 490 }
483 } 491 }
484 492
493 if (req->cmd_flags & REQ_FLUSH) {
494 BUG_ON(unlikely(blk_rq_sectors(req)));
495 nbd_cmd(req) = NBD_CMD_FLUSH;
496 }
497
485 req->errors = 0; 498 req->errors = 0;
486 499
487 mutex_lock(&nbd->tx_lock); 500 mutex_lock(&nbd->tx_lock);
@@ -551,6 +564,7 @@ static int nbd_thread(void *data)
551 */ 564 */
552 565
553static void do_nbd_request(struct request_queue *q) 566static void do_nbd_request(struct request_queue *q)
567 __releases(q->queue_lock) __acquires(q->queue_lock)
554{ 568{
555 struct request *req; 569 struct request *req;
556 570
@@ -595,12 +609,20 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
595 struct request sreq; 609 struct request sreq;
596 610
597 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); 611 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
612 if (!nbd->sock)
613 return -EINVAL;
598 614
615 mutex_unlock(&nbd->tx_lock);
616 fsync_bdev(bdev);
617 mutex_lock(&nbd->tx_lock);
599 blk_rq_init(NULL, &sreq); 618 blk_rq_init(NULL, &sreq);
600 sreq.cmd_type = REQ_TYPE_SPECIAL; 619 sreq.cmd_type = REQ_TYPE_SPECIAL;
601 nbd_cmd(&sreq) = NBD_CMD_DISC; 620 nbd_cmd(&sreq) = NBD_CMD_DISC;
621
622 /* Check again after getting mutex back. */
602 if (!nbd->sock) 623 if (!nbd->sock)
603 return -EINVAL; 624 return -EINVAL;
625
604 nbd_send_req(nbd, &sreq); 626 nbd_send_req(nbd, &sreq);
605 return 0; 627 return 0;
606 } 628 }
@@ -614,6 +636,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
614 nbd_clear_que(nbd); 636 nbd_clear_que(nbd);
615 BUG_ON(!list_empty(&nbd->queue_head)); 637 BUG_ON(!list_empty(&nbd->queue_head));
616 BUG_ON(!list_empty(&nbd->waiting_queue)); 638 BUG_ON(!list_empty(&nbd->waiting_queue));
639 kill_bdev(bdev);
617 if (file) 640 if (file)
618 fput(file); 641 fput(file);
619 return 0; 642 return 0;
@@ -681,9 +704,15 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
681 704
682 mutex_unlock(&nbd->tx_lock); 705 mutex_unlock(&nbd->tx_lock);
683 706
707 if (nbd->flags & NBD_FLAG_READ_ONLY)
708 set_device_ro(bdev, true);
684 if (nbd->flags & NBD_FLAG_SEND_TRIM) 709 if (nbd->flags & NBD_FLAG_SEND_TRIM)
685 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 710 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
686 nbd->disk->queue); 711 nbd->disk->queue);
712 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
713 blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
714 else
715 blk_queue_flush(nbd->disk->queue, 0);
687 716
688 thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name); 717 thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name);
689 if (IS_ERR(thread)) { 718 if (IS_ERR(thread)) {
@@ -702,9 +731,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
702 nbd->file = NULL; 731 nbd->file = NULL;
703 nbd_clear_que(nbd); 732 nbd_clear_que(nbd);
704 dev_warn(disk_to_dev(nbd->disk), "queue cleared\n"); 733 dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
734 kill_bdev(bdev);
705 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); 735 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
736 set_device_ro(bdev, false);
706 if (file) 737 if (file)
707 fput(file); 738 fput(file);
739 nbd->flags = 0;
708 nbd->bytesize = 0; 740 nbd->bytesize = 0;
709 bdev->bd_inode->i_size = 0; 741 bdev->bd_inode->i_size = 0;
710 set_capacity(nbd->disk, 0); 742 set_capacity(nbd->disk, 0);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 1c7fdcd22a98..0ac9b45a585e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1208,6 +1208,16 @@ static int smi_num; /* Used to sequence the SMIs */
1208#define DEFAULT_REGSPACING 1 1208#define DEFAULT_REGSPACING 1
1209#define DEFAULT_REGSIZE 1 1209#define DEFAULT_REGSIZE 1
1210 1210
1211#ifdef CONFIG_ACPI
1212static bool si_tryacpi = 1;
1213#endif
1214#ifdef CONFIG_DMI
1215static bool si_trydmi = 1;
1216#endif
1217static bool si_tryplatform = 1;
1218#ifdef CONFIG_PCI
1219static bool si_trypci = 1;
1220#endif
1211static bool si_trydefaults = 1; 1221static bool si_trydefaults = 1;
1212static char *si_type[SI_MAX_PARMS]; 1222static char *si_type[SI_MAX_PARMS];
1213#define MAX_SI_TYPE_STR 30 1223#define MAX_SI_TYPE_STR 30
@@ -1238,6 +1248,25 @@ MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
1238 " Documentation/IPMI.txt in the kernel sources for the" 1248 " Documentation/IPMI.txt in the kernel sources for the"
1239 " gory details."); 1249 " gory details.");
1240 1250
1251#ifdef CONFIG_ACPI
1252module_param_named(tryacpi, si_tryacpi, bool, 0);
1253MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
1254 " default scan of the interfaces identified via ACPI");
1255#endif
1256#ifdef CONFIG_DMI
1257module_param_named(trydmi, si_trydmi, bool, 0);
1258MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
1259 " default scan of the interfaces identified via DMI");
1260#endif
1261module_param_named(tryplatform, si_tryplatform, bool, 0);
1262MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
1263 " default scan of the interfaces identified via platform"
1264 " interfaces like openfirmware");
1265#ifdef CONFIG_PCI
1266module_param_named(trypci, si_trypci, bool, 0);
1267MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
1268 " default scan of the interfaces identified via pci");
1269#endif
1241module_param_named(trydefaults, si_trydefaults, bool, 0); 1270module_param_named(trydefaults, si_trydefaults, bool, 0);
1242MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the" 1271MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1243 " default scan of the KCS and SMIC interface at the standard" 1272 " default scan of the KCS and SMIC interface at the standard"
@@ -3371,13 +3400,15 @@ static int init_ipmi_si(void)
3371 return 0; 3400 return 0;
3372 initialized = 1; 3401 initialized = 1;
3373 3402
3374 rv = platform_driver_register(&ipmi_driver); 3403 if (si_tryplatform) {
3375 if (rv) { 3404 rv = platform_driver_register(&ipmi_driver);
3376 printk(KERN_ERR PFX "Unable to register driver: %d\n", rv); 3405 if (rv) {
3377 return rv; 3406 printk(KERN_ERR PFX "Unable to register "
3407 "driver: %d\n", rv);
3408 return rv;
3409 }
3378 } 3410 }
3379 3411
3380
3381 /* Parse out the si_type string into its components. */ 3412 /* Parse out the si_type string into its components. */
3382 str = si_type_str; 3413 str = si_type_str;
3383 if (*str != '\0') { 3414 if (*str != '\0') {
@@ -3400,24 +3431,31 @@ static int init_ipmi_si(void)
3400 return 0; 3431 return 0;
3401 3432
3402#ifdef CONFIG_PCI 3433#ifdef CONFIG_PCI
3403 rv = pci_register_driver(&ipmi_pci_driver); 3434 if (si_trypci) {
3404 if (rv) 3435 rv = pci_register_driver(&ipmi_pci_driver);
3405 printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv); 3436 if (rv)
3406 else 3437 printk(KERN_ERR PFX "Unable to register "
3407 pci_registered = 1; 3438 "PCI driver: %d\n", rv);
3439 else
3440 pci_registered = 1;
3441 }
3408#endif 3442#endif
3409 3443
3410#ifdef CONFIG_ACPI 3444#ifdef CONFIG_ACPI
3411 pnp_register_driver(&ipmi_pnp_driver); 3445 if (si_tryacpi) {
3412 pnp_registered = 1; 3446 pnp_register_driver(&ipmi_pnp_driver);
3447 pnp_registered = 1;
3448 }
3413#endif 3449#endif
3414 3450
3415#ifdef CONFIG_DMI 3451#ifdef CONFIG_DMI
3416 dmi_find_bmc(); 3452 if (si_trydmi)
3453 dmi_find_bmc();
3417#endif 3454#endif
3418 3455
3419#ifdef CONFIG_ACPI 3456#ifdef CONFIG_ACPI
3420 spmi_find_bmc(); 3457 if (si_tryacpi)
3458 spmi_find_bmc();
3421#endif 3459#endif
3422 3460
3423 /* We prefer devices with interrupts, but in the case of a machine 3461 /* We prefer devices with interrupts, but in the case of a machine
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 522136d40843..190d4423653f 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -183,19 +183,12 @@ static const struct file_operations misc_fops = {
183 183
184int misc_register(struct miscdevice * misc) 184int misc_register(struct miscdevice * misc)
185{ 185{
186 struct miscdevice *c;
187 dev_t dev; 186 dev_t dev;
188 int err = 0; 187 int err = 0;
189 188
190 INIT_LIST_HEAD(&misc->list); 189 INIT_LIST_HEAD(&misc->list);
191 190
192 mutex_lock(&misc_mtx); 191 mutex_lock(&misc_mtx);
193 list_for_each_entry(c, &misc_list, list) {
194 if (c->minor == misc->minor) {
195 mutex_unlock(&misc_mtx);
196 return -EBUSY;
197 }
198 }
199 192
200 if (misc->minor == MISC_DYNAMIC_MINOR) { 193 if (misc->minor == MISC_DYNAMIC_MINOR) {
201 int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS); 194 int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
@@ -205,6 +198,15 @@ int misc_register(struct miscdevice * misc)
205 } 198 }
206 misc->minor = DYNAMIC_MINORS - i - 1; 199 misc->minor = DYNAMIC_MINORS - i - 1;
207 set_bit(i, misc_minors); 200 set_bit(i, misc_minors);
201 } else {
202 struct miscdevice *c;
203
204 list_for_each_entry(c, &misc_list, list) {
205 if (c->minor == misc->minor) {
206 mutex_unlock(&misc_mtx);
207 return -EBUSY;
208 }
209 }
208 } 210 }
209 211
210 dev = MKDEV(MISC_MAJOR, misc->minor); 212 dev = MKDEV(MISC_MAJOR, misc->minor);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index fabbfe1a9253..ed87b2405806 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -52,31 +52,29 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
52 int level) 52 int level)
53{ 53{
54 struct clk *child; 54 struct clk *child;
55 struct hlist_node *tmp;
56 55
57 if (!c) 56 if (!c)
58 return; 57 return;
59 58
60 clk_summary_show_one(s, c, level); 59 clk_summary_show_one(s, c, level);
61 60
62 hlist_for_each_entry(child, tmp, &c->children, child_node) 61 hlist_for_each_entry(child, &c->children, child_node)
63 clk_summary_show_subtree(s, child, level + 1); 62 clk_summary_show_subtree(s, child, level + 1);
64} 63}
65 64
66static int clk_summary_show(struct seq_file *s, void *data) 65static int clk_summary_show(struct seq_file *s, void *data)
67{ 66{
68 struct clk *c; 67 struct clk *c;
69 struct hlist_node *tmp;
70 68
71 seq_printf(s, " clock enable_cnt prepare_cnt rate\n"); 69 seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
72 seq_printf(s, "---------------------------------------------------------------------\n"); 70 seq_printf(s, "---------------------------------------------------------------------\n");
73 71
74 mutex_lock(&prepare_lock); 72 mutex_lock(&prepare_lock);
75 73
76 hlist_for_each_entry(c, tmp, &clk_root_list, child_node) 74 hlist_for_each_entry(c, &clk_root_list, child_node)
77 clk_summary_show_subtree(s, c, 0); 75 clk_summary_show_subtree(s, c, 0);
78 76
79 hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) 77 hlist_for_each_entry(c, &clk_orphan_list, child_node)
80 clk_summary_show_subtree(s, c, 0); 78 clk_summary_show_subtree(s, c, 0);
81 79
82 mutex_unlock(&prepare_lock); 80 mutex_unlock(&prepare_lock);
@@ -111,14 +109,13 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
111static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) 109static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
112{ 110{
113 struct clk *child; 111 struct clk *child;
114 struct hlist_node *tmp;
115 112
116 if (!c) 113 if (!c)
117 return; 114 return;
118 115
119 clk_dump_one(s, c, level); 116 clk_dump_one(s, c, level);
120 117
121 hlist_for_each_entry(child, tmp, &c->children, child_node) { 118 hlist_for_each_entry(child, &c->children, child_node) {
122 seq_printf(s, ","); 119 seq_printf(s, ",");
123 clk_dump_subtree(s, child, level + 1); 120 clk_dump_subtree(s, child, level + 1);
124 } 121 }
@@ -129,21 +126,20 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
129static int clk_dump(struct seq_file *s, void *data) 126static int clk_dump(struct seq_file *s, void *data)
130{ 127{
131 struct clk *c; 128 struct clk *c;
132 struct hlist_node *tmp;
133 bool first_node = true; 129 bool first_node = true;
134 130
135 seq_printf(s, "{"); 131 seq_printf(s, "{");
136 132
137 mutex_lock(&prepare_lock); 133 mutex_lock(&prepare_lock);
138 134
139 hlist_for_each_entry(c, tmp, &clk_root_list, child_node) { 135 hlist_for_each_entry(c, &clk_root_list, child_node) {
140 if (!first_node) 136 if (!first_node)
141 seq_printf(s, ","); 137 seq_printf(s, ",");
142 first_node = false; 138 first_node = false;
143 clk_dump_subtree(s, c, 0); 139 clk_dump_subtree(s, c, 0);
144 } 140 }
145 141
146 hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) { 142 hlist_for_each_entry(c, &clk_orphan_list, child_node) {
147 seq_printf(s, ","); 143 seq_printf(s, ",");
148 clk_dump_subtree(s, c, 0); 144 clk_dump_subtree(s, c, 0);
149 } 145 }
@@ -222,7 +218,6 @@ out:
222static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry) 218static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
223{ 219{
224 struct clk *child; 220 struct clk *child;
225 struct hlist_node *tmp;
226 int ret = -EINVAL;; 221 int ret = -EINVAL;;
227 222
228 if (!clk || !pdentry) 223 if (!clk || !pdentry)
@@ -233,7 +228,7 @@ static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
233 if (ret) 228 if (ret)
234 goto out; 229 goto out;
235 230
236 hlist_for_each_entry(child, tmp, &clk->children, child_node) 231 hlist_for_each_entry(child, &clk->children, child_node)
237 clk_debug_create_subtree(child, clk->dentry); 232 clk_debug_create_subtree(child, clk->dentry);
238 233
239 ret = 0; 234 ret = 0;
@@ -299,7 +294,6 @@ out:
299static int __init clk_debug_init(void) 294static int __init clk_debug_init(void)
300{ 295{
301 struct clk *clk; 296 struct clk *clk;
302 struct hlist_node *tmp;
303 struct dentry *d; 297 struct dentry *d;
304 298
305 rootdir = debugfs_create_dir("clk", NULL); 299 rootdir = debugfs_create_dir("clk", NULL);
@@ -324,10 +318,10 @@ static int __init clk_debug_init(void)
324 318
325 mutex_lock(&prepare_lock); 319 mutex_lock(&prepare_lock);
326 320
327 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node) 321 hlist_for_each_entry(clk, &clk_root_list, child_node)
328 clk_debug_create_subtree(clk, rootdir); 322 clk_debug_create_subtree(clk, rootdir);
329 323
330 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node) 324 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
331 clk_debug_create_subtree(clk, orphandir); 325 clk_debug_create_subtree(clk, orphandir);
332 326
333 inited = 1; 327 inited = 1;
@@ -345,13 +339,12 @@ static inline int clk_debug_register(struct clk *clk) { return 0; }
345static void clk_disable_unused_subtree(struct clk *clk) 339static void clk_disable_unused_subtree(struct clk *clk)
346{ 340{
347 struct clk *child; 341 struct clk *child;
348 struct hlist_node *tmp;
349 unsigned long flags; 342 unsigned long flags;
350 343
351 if (!clk) 344 if (!clk)
352 goto out; 345 goto out;
353 346
354 hlist_for_each_entry(child, tmp, &clk->children, child_node) 347 hlist_for_each_entry(child, &clk->children, child_node)
355 clk_disable_unused_subtree(child); 348 clk_disable_unused_subtree(child);
356 349
357 spin_lock_irqsave(&enable_lock, flags); 350 spin_lock_irqsave(&enable_lock, flags);
@@ -384,14 +377,13 @@ out:
384static int clk_disable_unused(void) 377static int clk_disable_unused(void)
385{ 378{
386 struct clk *clk; 379 struct clk *clk;
387 struct hlist_node *tmp;
388 380
389 mutex_lock(&prepare_lock); 381 mutex_lock(&prepare_lock);
390 382
391 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node) 383 hlist_for_each_entry(clk, &clk_root_list, child_node)
392 clk_disable_unused_subtree(clk); 384 clk_disable_unused_subtree(clk);
393 385
394 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node) 386 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
395 clk_disable_unused_subtree(clk); 387 clk_disable_unused_subtree(clk);
396 388
397 mutex_unlock(&prepare_lock); 389 mutex_unlock(&prepare_lock);
@@ -484,12 +476,11 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
484{ 476{
485 struct clk *child; 477 struct clk *child;
486 struct clk *ret; 478 struct clk *ret;
487 struct hlist_node *tmp;
488 479
489 if (!strcmp(clk->name, name)) 480 if (!strcmp(clk->name, name))
490 return clk; 481 return clk;
491 482
492 hlist_for_each_entry(child, tmp, &clk->children, child_node) { 483 hlist_for_each_entry(child, &clk->children, child_node) {
493 ret = __clk_lookup_subtree(name, child); 484 ret = __clk_lookup_subtree(name, child);
494 if (ret) 485 if (ret)
495 return ret; 486 return ret;
@@ -502,20 +493,19 @@ struct clk *__clk_lookup(const char *name)
502{ 493{
503 struct clk *root_clk; 494 struct clk *root_clk;
504 struct clk *ret; 495 struct clk *ret;
505 struct hlist_node *tmp;
506 496
507 if (!name) 497 if (!name)
508 return NULL; 498 return NULL;
509 499
510 /* search the 'proper' clk tree first */ 500 /* search the 'proper' clk tree first */
511 hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) { 501 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
512 ret = __clk_lookup_subtree(name, root_clk); 502 ret = __clk_lookup_subtree(name, root_clk);
513 if (ret) 503 if (ret)
514 return ret; 504 return ret;
515 } 505 }
516 506
517 /* if not found, then search the orphan tree */ 507 /* if not found, then search the orphan tree */
518 hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) { 508 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
519 ret = __clk_lookup_subtree(name, root_clk); 509 ret = __clk_lookup_subtree(name, root_clk);
520 if (ret) 510 if (ret)
521 return ret; 511 return ret;
@@ -812,7 +802,6 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
812{ 802{
813 unsigned long old_rate; 803 unsigned long old_rate;
814 unsigned long parent_rate = 0; 804 unsigned long parent_rate = 0;
815 struct hlist_node *tmp;
816 struct clk *child; 805 struct clk *child;
817 806
818 old_rate = clk->rate; 807 old_rate = clk->rate;
@@ -832,7 +821,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
832 if (clk->notifier_count && msg) 821 if (clk->notifier_count && msg)
833 __clk_notify(clk, msg, old_rate, clk->rate); 822 __clk_notify(clk, msg, old_rate, clk->rate);
834 823
835 hlist_for_each_entry(child, tmp, &clk->children, child_node) 824 hlist_for_each_entry(child, &clk->children, child_node)
836 __clk_recalc_rates(child, msg); 825 __clk_recalc_rates(child, msg);
837} 826}
838 827
@@ -878,7 +867,6 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
878 */ 867 */
879static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) 868static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
880{ 869{
881 struct hlist_node *tmp;
882 struct clk *child; 870 struct clk *child;
883 unsigned long new_rate; 871 unsigned long new_rate;
884 int ret = NOTIFY_DONE; 872 int ret = NOTIFY_DONE;
@@ -895,7 +883,7 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
895 if (ret == NOTIFY_BAD) 883 if (ret == NOTIFY_BAD)
896 goto out; 884 goto out;
897 885
898 hlist_for_each_entry(child, tmp, &clk->children, child_node) { 886 hlist_for_each_entry(child, &clk->children, child_node) {
899 ret = __clk_speculate_rates(child, new_rate); 887 ret = __clk_speculate_rates(child, new_rate);
900 if (ret == NOTIFY_BAD) 888 if (ret == NOTIFY_BAD)
901 break; 889 break;
@@ -908,11 +896,10 @@ out:
908static void clk_calc_subtree(struct clk *clk, unsigned long new_rate) 896static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
909{ 897{
910 struct clk *child; 898 struct clk *child;
911 struct hlist_node *tmp;
912 899
913 clk->new_rate = new_rate; 900 clk->new_rate = new_rate;
914 901
915 hlist_for_each_entry(child, tmp, &clk->children, child_node) { 902 hlist_for_each_entry(child, &clk->children, child_node) {
916 if (child->ops->recalc_rate) 903 if (child->ops->recalc_rate)
917 child->new_rate = child->ops->recalc_rate(child->hw, new_rate); 904 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
918 else 905 else
@@ -983,7 +970,6 @@ out:
983 */ 970 */
984static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) 971static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
985{ 972{
986 struct hlist_node *tmp;
987 struct clk *child, *fail_clk = NULL; 973 struct clk *child, *fail_clk = NULL;
988 int ret = NOTIFY_DONE; 974 int ret = NOTIFY_DONE;
989 975
@@ -996,7 +982,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
996 fail_clk = clk; 982 fail_clk = clk;
997 } 983 }
998 984
999 hlist_for_each_entry(child, tmp, &clk->children, child_node) { 985 hlist_for_each_entry(child, &clk->children, child_node) {
1000 clk = clk_propagate_rate_change(child, event); 986 clk = clk_propagate_rate_change(child, event);
1001 if (clk) 987 if (clk)
1002 fail_clk = clk; 988 fail_clk = clk;
@@ -1014,7 +1000,6 @@ static void clk_change_rate(struct clk *clk)
1014 struct clk *child; 1000 struct clk *child;
1015 unsigned long old_rate; 1001 unsigned long old_rate;
1016 unsigned long best_parent_rate = 0; 1002 unsigned long best_parent_rate = 0;
1017 struct hlist_node *tmp;
1018 1003
1019 old_rate = clk->rate; 1004 old_rate = clk->rate;
1020 1005
@@ -1032,7 +1017,7 @@ static void clk_change_rate(struct clk *clk)
1032 if (clk->notifier_count && old_rate != clk->rate) 1017 if (clk->notifier_count && old_rate != clk->rate)
1033 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); 1018 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1034 1019
1035 hlist_for_each_entry(child, tmp, &clk->children, child_node) 1020 hlist_for_each_entry(child, &clk->children, child_node)
1036 clk_change_rate(child); 1021 clk_change_rate(child);
1037} 1022}
1038 1023
@@ -1348,7 +1333,7 @@ int __clk_init(struct device *dev, struct clk *clk)
1348{ 1333{
1349 int i, ret = 0; 1334 int i, ret = 0;
1350 struct clk *orphan; 1335 struct clk *orphan;
1351 struct hlist_node *tmp, *tmp2; 1336 struct hlist_node *tmp2;
1352 1337
1353 if (!clk) 1338 if (!clk)
1354 return -EINVAL; 1339 return -EINVAL;
@@ -1448,7 +1433,7 @@ int __clk_init(struct device *dev, struct clk *clk)
1448 * walk the list of orphan clocks and reparent any that are children of 1433 * walk the list of orphan clocks and reparent any that are children of
1449 * this clock 1434 * this clock
1450 */ 1435 */
1451 hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) { 1436 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
1452 if (orphan->ops->get_parent) { 1437 if (orphan->ops->get_parent) {
1453 i = orphan->ops->get_parent(orphan->hw); 1438 i = orphan->ops->get_parent(orphan->hw);
1454 if (!strcmp(clk->name, orphan->parent_names[i])) 1439 if (!strcmp(clk->name, orphan->parent_names[i]))
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index 591b6597c00a..126cf295b198 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -53,22 +53,19 @@ void dca_sysfs_remove_req(struct dca_provider *dca, int slot)
53int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev) 53int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev)
54{ 54{
55 struct device *cd; 55 struct device *cd;
56 int err = 0; 56 int ret;
57 57
58idr_try_again: 58 idr_preload(GFP_KERNEL);
59 if (!idr_pre_get(&dca_idr, GFP_KERNEL))
60 return -ENOMEM;
61 spin_lock(&dca_idr_lock); 59 spin_lock(&dca_idr_lock);
62 err = idr_get_new(&dca_idr, dca, &dca->id); 60
61 ret = idr_alloc(&dca_idr, dca, 0, 0, GFP_NOWAIT);
62 if (ret >= 0)
63 dca->id = ret;
64
63 spin_unlock(&dca_idr_lock); 65 spin_unlock(&dca_idr_lock);
64 switch (err) { 66 idr_preload_end();
65 case 0: 67 if (ret < 0)
66 break; 68 return ret;
67 case -EAGAIN:
68 goto idr_try_again;
69 default:
70 return err;
71 }
72 69
73 cd = device_create(dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id); 70 cd = device_create(dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id);
74 if (IS_ERR(cd)) { 71 if (IS_ERR(cd)) {
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 242b8c0a3de8..b2728d6ba2fd 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -686,18 +686,14 @@ static int get_dma_id(struct dma_device *device)
686{ 686{
687 int rc; 687 int rc;
688 688
689 idr_retry:
690 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
691 return -ENOMEM;
692 mutex_lock(&dma_list_mutex); 689 mutex_lock(&dma_list_mutex);
693 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
694 mutex_unlock(&dma_list_mutex);
695 if (rc == -EAGAIN)
696 goto idr_retry;
697 else if (rc != 0)
698 return rc;
699 690
700 return 0; 691 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
692 if (rc >= 0)
693 device->dev_id = rc;
694
695 mutex_unlock(&dma_list_mutex);
696 return rc < 0 ? rc : 0;
701} 697}
702 698
703/** 699/**
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index f8d22872d753..27ac423ab25e 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -487,27 +487,28 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
487static int add_client_resource(struct client *client, 487static int add_client_resource(struct client *client,
488 struct client_resource *resource, gfp_t gfp_mask) 488 struct client_resource *resource, gfp_t gfp_mask)
489{ 489{
490 bool preload = gfp_mask & __GFP_WAIT;
490 unsigned long flags; 491 unsigned long flags;
491 int ret; 492 int ret;
492 493
493 retry: 494 if (preload)
494 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0) 495 idr_preload(gfp_mask);
495 return -ENOMEM;
496
497 spin_lock_irqsave(&client->lock, flags); 496 spin_lock_irqsave(&client->lock, flags);
497
498 if (client->in_shutdown) 498 if (client->in_shutdown)
499 ret = -ECANCELED; 499 ret = -ECANCELED;
500 else 500 else
501 ret = idr_get_new(&client->resource_idr, resource, 501 ret = idr_alloc(&client->resource_idr, resource, 0, 0,
502 &resource->handle); 502 GFP_NOWAIT);
503 if (ret >= 0) { 503 if (ret >= 0) {
504 resource->handle = ret;
504 client_get(client); 505 client_get(client);
505 schedule_if_iso_resource(resource); 506 schedule_if_iso_resource(resource);
506 } 507 }
507 spin_unlock_irqrestore(&client->lock, flags);
508 508
509 if (ret == -EAGAIN) 509 spin_unlock_irqrestore(&client->lock, flags);
510 goto retry; 510 if (preload)
511 idr_preload_end();
511 512
512 return ret < 0 ? ret : 0; 513 return ret < 0 ? ret : 0;
513} 514}
@@ -1779,7 +1780,6 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
1779 wait_event(client->tx_flush_wait, !has_outbound_transactions(client)); 1780 wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1780 1781
1781 idr_for_each(&client->resource_idr, shutdown_resource, client); 1782 idr_for_each(&client->resource_idr, shutdown_resource, client);
1782 idr_remove_all(&client->resource_idr);
1783 idr_destroy(&client->resource_idr); 1783 idr_destroy(&client->resource_idr);
1784 1784
1785 list_for_each_entry_safe(event, next_event, &client->event_list, link) 1785 list_for_each_entry_safe(event, next_event, &client->event_list, link)
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 3873d535b28d..03ce7d980c6a 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -1017,12 +1017,11 @@ static void fw_device_init(struct work_struct *work)
1017 1017
1018 fw_device_get(device); 1018 fw_device_get(device);
1019 down_write(&fw_device_rwsem); 1019 down_write(&fw_device_rwsem);
1020 ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? 1020 minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS,
1021 idr_get_new(&fw_device_idr, device, &minor) : 1021 GFP_KERNEL);
1022 -ENOMEM;
1023 up_write(&fw_device_rwsem); 1022 up_write(&fw_device_rwsem);
1024 1023
1025 if (ret < 0) 1024 if (minor < 0)
1026 goto error; 1025 goto error;
1027 1026
1028 device->device.bus = &fw_bus_type; 1027 device->device.bus = &fw_bus_type;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 4828fe7c66cb..fff9786cdc64 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -411,15 +411,10 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
411 goto err_out; 411 goto err_out;
412 } 412 }
413 413
414 do { 414 ret = idr_alloc(&dirent_idr, value_sd, 1, 0, GFP_KERNEL);
415 ret = -ENOMEM; 415 if (ret < 0)
416 if (idr_pre_get(&dirent_idr, GFP_KERNEL))
417 ret = idr_get_new_above(&dirent_idr, value_sd,
418 1, &id);
419 } while (ret == -EAGAIN);
420
421 if (ret)
422 goto free_sd; 416 goto free_sd;
417 id = ret;
423 418
424 desc->flags &= GPIO_FLAGS_MASK; 419 desc->flags &= GPIO_FLAGS_MASK;
425 desc->flags |= (unsigned long)id << ID_SHIFT; 420 desc->flags |= (unsigned long)id << ID_SHIFT;
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 45adf97e678f..725968d38976 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -74,24 +74,13 @@ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
74 */ 74 */
75static int drm_ctxbitmap_next(struct drm_device * dev) 75static int drm_ctxbitmap_next(struct drm_device * dev)
76{ 76{
77 int new_id;
78 int ret; 77 int ret;
79 78
80again:
81 if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
82 DRM_ERROR("Out of memory expanding drawable idr\n");
83 return -ENOMEM;
84 }
85 mutex_lock(&dev->struct_mutex); 79 mutex_lock(&dev->struct_mutex);
86 ret = idr_get_new_above(&dev->ctx_idr, NULL, 80 ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
87 DRM_RESERVED_CONTEXTS, &new_id); 81 GFP_KERNEL);
88 mutex_unlock(&dev->struct_mutex); 82 mutex_unlock(&dev->struct_mutex);
89 if (ret == -EAGAIN) 83 return ret;
90 goto again;
91 else if (ret)
92 return ret;
93
94 return new_id;
95} 84}
96 85
97/** 86/**
@@ -118,7 +107,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
118void drm_ctxbitmap_cleanup(struct drm_device * dev) 107void drm_ctxbitmap_cleanup(struct drm_device * dev)
119{ 108{
120 mutex_lock(&dev->struct_mutex); 109 mutex_lock(&dev->struct_mutex);
121 idr_remove_all(&dev->ctx_idr); 110 idr_destroy(&dev->ctx_idr);
122 mutex_unlock(&dev->struct_mutex); 111 mutex_unlock(&dev->struct_mutex);
123} 112}
124 113
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3bdf2a650d9c..792c3e3795ca 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -266,32 +266,21 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
266static int drm_mode_object_get(struct drm_device *dev, 266static int drm_mode_object_get(struct drm_device *dev,
267 struct drm_mode_object *obj, uint32_t obj_type) 267 struct drm_mode_object *obj, uint32_t obj_type)
268{ 268{
269 int new_id = 0;
270 int ret; 269 int ret;
271 270
272again:
273 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
274 DRM_ERROR("Ran out memory getting a mode number\n");
275 return -ENOMEM;
276 }
277
278 mutex_lock(&dev->mode_config.idr_mutex); 271 mutex_lock(&dev->mode_config.idr_mutex);
279 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); 272 ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
280 273 if (ret >= 0) {
281 if (!ret) {
282 /* 274 /*
283 * Set up the object linking under the protection of the idr 275 * Set up the object linking under the protection of the idr
284 * lock so that other users can't see inconsistent state. 276 * lock so that other users can't see inconsistent state.
285 */ 277 */
286 obj->id = new_id; 278 obj->id = ret;
287 obj->type = obj_type; 279 obj->type = obj_type;
288 } 280 }
289 mutex_unlock(&dev->mode_config.idr_mutex); 281 mutex_unlock(&dev->mode_config.idr_mutex);
290 282
291 if (ret == -EAGAIN) 283 return ret < 0 ? ret : 0;
292 goto again;
293
294 return ret;
295} 284}
296 285
297/** 286/**
@@ -1272,7 +1261,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
1272 crtc->funcs->destroy(crtc); 1261 crtc->funcs->destroy(crtc);
1273 } 1262 }
1274 1263
1275 idr_remove_all(&dev->mode_config.crtc_idr);
1276 idr_destroy(&dev->mode_config.crtc_idr); 1264 idr_destroy(&dev->mode_config.crtc_idr);
1277} 1265}
1278EXPORT_SYMBOL(drm_mode_config_cleanup); 1266EXPORT_SYMBOL(drm_mode_config_cleanup);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index be174cab105a..25f91cd23e60 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -297,7 +297,6 @@ static void __exit drm_core_exit(void)
297 297
298 unregister_chrdev(DRM_MAJOR, "drm"); 298 unregister_chrdev(DRM_MAJOR, "drm");
299 299
300 idr_remove_all(&drm_minors_idr);
301 idr_destroy(&drm_minors_idr); 300 idr_destroy(&drm_minors_idr);
302} 301}
303 302
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 24efae464e2c..af779ae19ebf 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -270,21 +270,19 @@ drm_gem_handle_create(struct drm_file *file_priv,
270 int ret; 270 int ret;
271 271
272 /* 272 /*
273 * Get the user-visible handle using idr. 273 * Get the user-visible handle using idr. Preload and perform
274 * allocation under our spinlock.
274 */ 275 */
275again: 276 idr_preload(GFP_KERNEL);
276 /* ensure there is space available to allocate a handle */
277 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
278 return -ENOMEM;
279
280 /* do the allocation under our spinlock */
281 spin_lock(&file_priv->table_lock); 277 spin_lock(&file_priv->table_lock);
282 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); 278
279 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
280
283 spin_unlock(&file_priv->table_lock); 281 spin_unlock(&file_priv->table_lock);
284 if (ret == -EAGAIN) 282 idr_preload_end();
285 goto again; 283 if (ret < 0)
286 else if (ret)
287 return ret; 284 return ret;
285 *handlep = ret;
288 286
289 drm_gem_object_handle_reference(obj); 287 drm_gem_object_handle_reference(obj);
290 288
@@ -451,29 +449,25 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
451 if (obj == NULL) 449 if (obj == NULL)
452 return -ENOENT; 450 return -ENOENT;
453 451
454again: 452 idr_preload(GFP_KERNEL);
455 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
456 ret = -ENOMEM;
457 goto err;
458 }
459
460 spin_lock(&dev->object_name_lock); 453 spin_lock(&dev->object_name_lock);
461 if (!obj->name) { 454 if (!obj->name) {
462 ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 455 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
463 &obj->name); 456 obj->name = ret;
464 args->name = (uint64_t) obj->name; 457 args->name = (uint64_t) obj->name;
465 spin_unlock(&dev->object_name_lock); 458 spin_unlock(&dev->object_name_lock);
459 idr_preload_end();
466 460
467 if (ret == -EAGAIN) 461 if (ret < 0)
468 goto again;
469 else if (ret)
470 goto err; 462 goto err;
463 ret = 0;
471 464
472 /* Allocate a reference for the name table. */ 465 /* Allocate a reference for the name table. */
473 drm_gem_object_reference(obj); 466 drm_gem_object_reference(obj);
474 } else { 467 } else {
475 args->name = (uint64_t) obj->name; 468 args->name = (uint64_t) obj->name;
476 spin_unlock(&dev->object_name_lock); 469 spin_unlock(&dev->object_name_lock);
470 idr_preload_end();
477 ret = 0; 471 ret = 0;
478 } 472 }
479 473
@@ -561,8 +555,6 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
561{ 555{
562 idr_for_each(&file_private->object_idr, 556 idr_for_each(&file_private->object_idr,
563 &drm_gem_object_release_handle, file_private); 557 &drm_gem_object_release_handle, file_private);
564
565 idr_remove_all(&file_private->object_idr);
566 idr_destroy(&file_private->object_idr); 558 idr_destroy(&file_private->object_idr);
567} 559}
568 560
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 80254547a3f8..7e4bae760e27 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -60,14 +60,13 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
60{ 60{
61 struct drm_hash_item *entry; 61 struct drm_hash_item *entry;
62 struct hlist_head *h_list; 62 struct hlist_head *h_list;
63 struct hlist_node *list;
64 unsigned int hashed_key; 63 unsigned int hashed_key;
65 int count = 0; 64 int count = 0;
66 65
67 hashed_key = hash_long(key, ht->order); 66 hashed_key = hash_long(key, ht->order);
68 DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); 67 DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
69 h_list = &ht->table[hashed_key]; 68 h_list = &ht->table[hashed_key];
70 hlist_for_each_entry(entry, list, h_list, head) 69 hlist_for_each_entry(entry, h_list, head)
71 DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); 70 DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
72} 71}
73 72
@@ -76,14 +75,13 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
76{ 75{
77 struct drm_hash_item *entry; 76 struct drm_hash_item *entry;
78 struct hlist_head *h_list; 77 struct hlist_head *h_list;
79 struct hlist_node *list;
80 unsigned int hashed_key; 78 unsigned int hashed_key;
81 79
82 hashed_key = hash_long(key, ht->order); 80 hashed_key = hash_long(key, ht->order);
83 h_list = &ht->table[hashed_key]; 81 h_list = &ht->table[hashed_key];
84 hlist_for_each_entry(entry, list, h_list, head) { 82 hlist_for_each_entry(entry, h_list, head) {
85 if (entry->key == key) 83 if (entry->key == key)
86 return list; 84 return &entry->head;
87 if (entry->key > key) 85 if (entry->key > key)
88 break; 86 break;
89 } 87 }
@@ -95,14 +93,13 @@ static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
95{ 93{
96 struct drm_hash_item *entry; 94 struct drm_hash_item *entry;
97 struct hlist_head *h_list; 95 struct hlist_head *h_list;
98 struct hlist_node *list;
99 unsigned int hashed_key; 96 unsigned int hashed_key;
100 97
101 hashed_key = hash_long(key, ht->order); 98 hashed_key = hash_long(key, ht->order);
102 h_list = &ht->table[hashed_key]; 99 h_list = &ht->table[hashed_key];
103 hlist_for_each_entry_rcu(entry, list, h_list, head) { 100 hlist_for_each_entry_rcu(entry, h_list, head) {
104 if (entry->key == key) 101 if (entry->key == key)
105 return list; 102 return &entry->head;
106 if (entry->key > key) 103 if (entry->key > key)
107 break; 104 break;
108 } 105 }
@@ -113,19 +110,19 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
113{ 110{
114 struct drm_hash_item *entry; 111 struct drm_hash_item *entry;
115 struct hlist_head *h_list; 112 struct hlist_head *h_list;
116 struct hlist_node *list, *parent; 113 struct hlist_node *parent;
117 unsigned int hashed_key; 114 unsigned int hashed_key;
118 unsigned long key = item->key; 115 unsigned long key = item->key;
119 116
120 hashed_key = hash_long(key, ht->order); 117 hashed_key = hash_long(key, ht->order);
121 h_list = &ht->table[hashed_key]; 118 h_list = &ht->table[hashed_key];
122 parent = NULL; 119 parent = NULL;
123 hlist_for_each_entry(entry, list, h_list, head) { 120 hlist_for_each_entry(entry, h_list, head) {
124 if (entry->key == key) 121 if (entry->key == key)
125 return -EINVAL; 122 return -EINVAL;
126 if (entry->key > key) 123 if (entry->key > key)
127 break; 124 break;
128 parent = list; 125 parent = &entry->head;
129 } 126 }
130 if (parent) { 127 if (parent) {
131 hlist_add_after_rcu(parent, &item->head); 128 hlist_add_after_rcu(parent, &item->head);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 200e104f1fa0..7d30802a018f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -109,7 +109,6 @@ EXPORT_SYMBOL(drm_ut_debug_printk);
109 109
110static int drm_minor_get_id(struct drm_device *dev, int type) 110static int drm_minor_get_id(struct drm_device *dev, int type)
111{ 111{
112 int new_id;
113 int ret; 112 int ret;
114 int base = 0, limit = 63; 113 int base = 0, limit = 63;
115 114
@@ -121,25 +120,11 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
121 limit = base + 255; 120 limit = base + 255;
122 } 121 }
123 122
124again:
125 if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
126 DRM_ERROR("Out of memory expanding drawable idr\n");
127 return -ENOMEM;
128 }
129 mutex_lock(&dev->struct_mutex); 123 mutex_lock(&dev->struct_mutex);
130 ret = idr_get_new_above(&drm_minors_idr, NULL, 124 ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
131 base, &new_id);
132 mutex_unlock(&dev->struct_mutex); 125 mutex_unlock(&dev->struct_mutex);
133 if (ret == -EAGAIN)
134 goto again;
135 else if (ret)
136 return ret;
137 126
138 if (new_id >= limit) { 127 return ret == -ENOSPC ? -EINVAL : ret;
139 idr_remove(&drm_minors_idr, new_id);
140 return -EINVAL;
141 }
142 return new_id;
143} 128}
144 129
145struct drm_master *drm_master_create(struct drm_minor *minor) 130struct drm_master *drm_master_create(struct drm_minor *minor)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 1a556354e92f..1adce07ecb5b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -137,21 +137,15 @@ static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
137 137
138 DRM_DEBUG_KMS("%s\n", __func__); 138 DRM_DEBUG_KMS("%s\n", __func__);
139 139
140again:
141 /* ensure there is space available to allocate a handle */
142 if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
143 DRM_ERROR("failed to get idr.\n");
144 return -ENOMEM;
145 }
146
147 /* do the allocation under our mutexlock */ 140 /* do the allocation under our mutexlock */
148 mutex_lock(lock); 141 mutex_lock(lock);
149 ret = idr_get_new_above(id_idr, obj, 1, (int *)idp); 142 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
150 mutex_unlock(lock); 143 mutex_unlock(lock);
151 if (ret == -EAGAIN) 144 if (ret < 0)
152 goto again; 145 return ret;
153 146
154 return ret; 147 *idp = ret;
148 return 0;
155} 149}
156 150
157static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id) 151static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
@@ -1786,8 +1780,6 @@ err_iommu:
1786 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1780 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1787 1781
1788err_idr: 1782err_idr:
1789 idr_remove_all(&ctx->ipp_idr);
1790 idr_remove_all(&ctx->prop_idr);
1791 idr_destroy(&ctx->ipp_idr); 1783 idr_destroy(&ctx->ipp_idr);
1792 idr_destroy(&ctx->prop_idr); 1784 idr_destroy(&ctx->prop_idr);
1793 return ret; 1785 return ret;
@@ -1965,8 +1957,6 @@ static int ipp_remove(struct platform_device *pdev)
1965 exynos_drm_subdrv_unregister(&ctx->subdrv); 1957 exynos_drm_subdrv_unregister(&ctx->subdrv);
1966 1958
1967 /* remove,destroy ipp idr */ 1959 /* remove,destroy ipp idr */
1968 idr_remove_all(&ctx->ipp_idr);
1969 idr_remove_all(&ctx->prop_idr);
1970 idr_destroy(&ctx->ipp_idr); 1960 idr_destroy(&ctx->ipp_idr);
1971 idr_destroy(&ctx->prop_idr); 1961 idr_destroy(&ctx->prop_idr);
1972 1962
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 21177d9df423..94d873a6cffb 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -139,7 +139,7 @@ create_hw_context(struct drm_device *dev,
139{ 139{
140 struct drm_i915_private *dev_priv = dev->dev_private; 140 struct drm_i915_private *dev_priv = dev->dev_private;
141 struct i915_hw_context *ctx; 141 struct i915_hw_context *ctx;
142 int ret, id; 142 int ret;
143 143
144 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 144 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
145 if (ctx == NULL) 145 if (ctx == NULL)
@@ -164,22 +164,11 @@ create_hw_context(struct drm_device *dev,
164 164
165 ctx->file_priv = file_priv; 165 ctx->file_priv = file_priv;
166 166
167again: 167 ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
168 if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) { 168 GFP_KERNEL);
169 ret = -ENOMEM; 169 if (ret < 0)
170 DRM_DEBUG_DRIVER("idr allocation failed\n");
171 goto err_out;
172 }
173
174 ret = idr_get_new_above(&file_priv->context_idr, ctx,
175 DEFAULT_CONTEXT_ID + 1, &id);
176 if (ret == 0)
177 ctx->id = id;
178
179 if (ret == -EAGAIN)
180 goto again;
181 else if (ret)
182 goto err_out; 170 goto err_out;
171 ctx->id = ret;
183 172
184 return ctx; 173 return ctx;
185 174
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 841065b998a1..5a5325e6b759 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -58,7 +58,6 @@ static int sis_driver_unload(struct drm_device *dev)
58{ 58{
59 drm_sis_private_t *dev_priv = dev->dev_private; 59 drm_sis_private_t *dev_priv = dev->dev_private;
60 60
61 idr_remove_all(&dev_priv->object_idr);
62 idr_destroy(&dev_priv->object_idr); 61 idr_destroy(&dev_priv->object_idr);
63 62
64 kfree(dev_priv); 63 kfree(dev_priv);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 2b2f78c428af..9a43d98e5003 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -128,17 +128,10 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
128 if (retval) 128 if (retval)
129 goto fail_alloc; 129 goto fail_alloc;
130 130
131again: 131 retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
132 if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) { 132 if (retval < 0)
133 retval = -ENOMEM;
134 goto fail_idr;
135 }
136
137 retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
138 if (retval == -EAGAIN)
139 goto again;
140 if (retval)
141 goto fail_idr; 133 goto fail_idr;
134 user_key = retval;
142 135
143 list_add(&item->owner_list, &file_priv->obj_list); 136 list_add(&item->owner_list, &file_priv->obj_list);
144 mutex_unlock(&dev->struct_mutex); 137 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index c0f1cc7f5ca9..d0ab3fb32acd 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -120,7 +120,6 @@ int via_driver_unload(struct drm_device *dev)
120{ 120{
121 drm_via_private_t *dev_priv = dev->dev_private; 121 drm_via_private_t *dev_priv = dev->dev_private;
122 122
123 idr_remove_all(&dev_priv->object_idr);
124 idr_destroy(&dev_priv->object_idr); 123 idr_destroy(&dev_priv->object_idr);
125 124
126 kfree(dev_priv); 125 kfree(dev_priv);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 0d55432e02a2..0ab93ff09873 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -148,17 +148,10 @@ int via_mem_alloc(struct drm_device *dev, void *data,
148 if (retval) 148 if (retval)
149 goto fail_alloc; 149 goto fail_alloc;
150 150
151again: 151 retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
152 if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) { 152 if (retval < 0)
153 retval = -ENOMEM;
154 goto fail_idr;
155 }
156
157 retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
158 if (retval == -EAGAIN)
159 goto again;
160 if (retval)
161 goto fail_idr; 153 goto fail_idr;
154 user_key = retval;
162 155
163 list_add(&item->owner_list, &file_priv->obj_list); 156 list_add(&item->owner_list, &file_priv->obj_list);
164 mutex_unlock(&dev->struct_mutex); 157 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 16556170fb32..bc784254e78e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -177,17 +177,16 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
177 177
178 BUG_ON(res->id != -1); 178 BUG_ON(res->id != -1);
179 179
180 do { 180 idr_preload(GFP_KERNEL);
181 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) 181 write_lock(&dev_priv->resource_lock);
182 return -ENOMEM;
183
184 write_lock(&dev_priv->resource_lock);
185 ret = idr_get_new_above(idr, res, 1, &res->id);
186 write_unlock(&dev_priv->resource_lock);
187 182
188 } while (ret == -EAGAIN); 183 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
184 if (ret >= 0)
185 res->id = ret;
189 186
190 return ret; 187 write_unlock(&dev_priv->resource_lock);
188 idr_preload_end();
189 return ret < 0 ? ret : 0;
191} 190}
192 191
193/** 192/**
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 66a30f7ac882..991d38daa87d 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -935,25 +935,17 @@ out_list:
935 */ 935 */
936int i2c_add_adapter(struct i2c_adapter *adapter) 936int i2c_add_adapter(struct i2c_adapter *adapter)
937{ 937{
938 int id, res = 0; 938 int id;
939
940retry:
941 if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
942 return -ENOMEM;
943 939
944 mutex_lock(&core_lock); 940 mutex_lock(&core_lock);
945 /* "above" here means "above or equal to", sigh */ 941 id = idr_alloc(&i2c_adapter_idr, adapter,
946 res = idr_get_new_above(&i2c_adapter_idr, adapter, 942 __i2c_first_dynamic_bus_num, 0, GFP_KERNEL);
947 __i2c_first_dynamic_bus_num, &id);
948 mutex_unlock(&core_lock); 943 mutex_unlock(&core_lock);
949 944 if (id < 0)
950 if (res < 0) { 945 return id;
951 if (res == -EAGAIN)
952 goto retry;
953 return res;
954 }
955 946
956 adapter->nr = id; 947 adapter->nr = id;
948
957 return i2c_register_adapter(adapter); 949 return i2c_register_adapter(adapter);
958} 950}
959EXPORT_SYMBOL(i2c_add_adapter); 951EXPORT_SYMBOL(i2c_add_adapter);
@@ -984,33 +976,17 @@ EXPORT_SYMBOL(i2c_add_adapter);
984int i2c_add_numbered_adapter(struct i2c_adapter *adap) 976int i2c_add_numbered_adapter(struct i2c_adapter *adap)
985{ 977{
986 int id; 978 int id;
987 int status;
988 979
989 if (adap->nr == -1) /* -1 means dynamically assign bus id */ 980 if (adap->nr == -1) /* -1 means dynamically assign bus id */
990 return i2c_add_adapter(adap); 981 return i2c_add_adapter(adap);
991 if (adap->nr & ~MAX_IDR_MASK)
992 return -EINVAL;
993
994retry:
995 if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
996 return -ENOMEM;
997 982
998 mutex_lock(&core_lock); 983 mutex_lock(&core_lock);
999 /* "above" here means "above or equal to", sigh; 984 id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1,
1000 * we need the "equal to" result to force the result 985 GFP_KERNEL);
1001 */
1002 status = idr_get_new_above(&i2c_adapter_idr, adap, adap->nr, &id);
1003 if (status == 0 && id != adap->nr) {
1004 status = -EBUSY;
1005 idr_remove(&i2c_adapter_idr, id);
1006 }
1007 mutex_unlock(&core_lock); 986 mutex_unlock(&core_lock);
1008 if (status == -EAGAIN) 987 if (id < 0)
1009 goto retry; 988 return id == -ENOSPC ? -EBUSY : id;
1010 989 return i2c_register_adapter(adap);
1011 if (status == 0)
1012 status = i2c_register_adapter(adap);
1013 return status;
1014} 990}
1015EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter); 991EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
1016 992
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 394fea2ba1bc..784b97cb05b0 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -382,20 +382,21 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
382static int cm_alloc_id(struct cm_id_private *cm_id_priv) 382static int cm_alloc_id(struct cm_id_private *cm_id_priv)
383{ 383{
384 unsigned long flags; 384 unsigned long flags;
385 int ret, id; 385 int id;
386 static int next_id; 386 static int next_id;
387 387
388 do { 388 idr_preload(GFP_KERNEL);
389 spin_lock_irqsave(&cm.lock, flags); 389 spin_lock_irqsave(&cm.lock, flags);
390 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 390
391 next_id, &id); 391 id = idr_alloc(&cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT);
392 if (!ret) 392 if (id >= 0)
393 next_id = ((unsigned) id + 1) & MAX_IDR_MASK; 393 next_id = max(id + 1, 0);
394 spin_unlock_irqrestore(&cm.lock, flags); 394
395 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 395 spin_unlock_irqrestore(&cm.lock, flags);
396 idr_preload_end();
396 397
397 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; 398 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
398 return ret; 399 return id < 0 ? id : 0;
399} 400}
400 401
401static void cm_free_id(__be32 local_id) 402static void cm_free_id(__be32 local_id)
@@ -3844,7 +3845,6 @@ static int __init ib_cm_init(void)
3844 cm.remote_sidr_table = RB_ROOT; 3845 cm.remote_sidr_table = RB_ROOT;
3845 idr_init(&cm.local_id_table); 3846 idr_init(&cm.local_id_table);
3846 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); 3847 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
3847 idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3848 INIT_LIST_HEAD(&cm.timewait_list); 3848 INIT_LIST_HEAD(&cm.timewait_list);
3849 3849
3850 ret = class_register(&cm_class); 3850 ret = class_register(&cm_class);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d789eea32168..71c2c7116802 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2143,33 +2143,23 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
2143 unsigned short snum) 2143 unsigned short snum)
2144{ 2144{
2145 struct rdma_bind_list *bind_list; 2145 struct rdma_bind_list *bind_list;
2146 int port, ret; 2146 int ret;
2147 2147
2148 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2148 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
2149 if (!bind_list) 2149 if (!bind_list)
2150 return -ENOMEM; 2150 return -ENOMEM;
2151 2151
2152 do { 2152 ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
2153 ret = idr_get_new_above(ps, bind_list, snum, &port); 2153 if (ret < 0)
2154 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 2154 goto err;
2155
2156 if (ret)
2157 goto err1;
2158
2159 if (port != snum) {
2160 ret = -EADDRNOTAVAIL;
2161 goto err2;
2162 }
2163 2155
2164 bind_list->ps = ps; 2156 bind_list->ps = ps;
2165 bind_list->port = (unsigned short) port; 2157 bind_list->port = (unsigned short)ret;
2166 cma_bind_port(bind_list, id_priv); 2158 cma_bind_port(bind_list, id_priv);
2167 return 0; 2159 return 0;
2168err2: 2160err:
2169 idr_remove(ps, port);
2170err1:
2171 kfree(bind_list); 2161 kfree(bind_list);
2172 return ret; 2162 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
2173} 2163}
2174 2164
2175static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 2165static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2214,10 +2204,9 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
2214{ 2204{
2215 struct rdma_id_private *cur_id; 2205 struct rdma_id_private *cur_id;
2216 struct sockaddr *addr, *cur_addr; 2206 struct sockaddr *addr, *cur_addr;
2217 struct hlist_node *node;
2218 2207
2219 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 2208 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
2220 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2209 hlist_for_each_entry(cur_id, &bind_list->owners, node) {
2221 if (id_priv == cur_id) 2210 if (id_priv == cur_id)
2222 continue; 2211 continue;
2223 2212
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 176c8f90f2bb..9f5ad7cc33c8 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -118,14 +118,13 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
118{ 118{
119 struct hlist_head *bucket; 119 struct hlist_head *bucket;
120 struct ib_pool_fmr *fmr; 120 struct ib_pool_fmr *fmr;
121 struct hlist_node *pos;
122 121
123 if (!pool->cache_bucket) 122 if (!pool->cache_bucket)
124 return NULL; 123 return NULL;
125 124
126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); 125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
127 126
128 hlist_for_each_entry(fmr, pos, bucket, cache_node) 127 hlist_for_each_entry(fmr, bucket, cache_node)
129 if (io_virtual_address == fmr->io_virtual_address && 128 if (io_virtual_address == fmr->io_virtual_address &&
130 page_list_len == fmr->page_list_len && 129 page_list_len == fmr->page_list_len &&
131 !memcmp(page_list, fmr->page_list, 130 !memcmp(page_list, fmr->page_list,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a8905abc56e4..934f45e79e5e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -611,19 +611,21 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
611 611
612static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) 612static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
613{ 613{
614 bool preload = gfp_mask & __GFP_WAIT;
614 unsigned long flags; 615 unsigned long flags;
615 int ret, id; 616 int ret, id;
616 617
617retry: 618 if (preload)
618 if (!idr_pre_get(&query_idr, gfp_mask)) 619 idr_preload(gfp_mask);
619 return -ENOMEM;
620 spin_lock_irqsave(&idr_lock, flags); 620 spin_lock_irqsave(&idr_lock, flags);
621 ret = idr_get_new(&query_idr, query, &id); 621
622 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
623
622 spin_unlock_irqrestore(&idr_lock, flags); 624 spin_unlock_irqrestore(&idr_lock, flags);
623 if (ret == -EAGAIN) 625 if (preload)
624 goto retry; 626 idr_preload_end();
625 if (ret) 627 if (id < 0)
626 return ret; 628 return id;
627 629
628 query->mad_buf->timeout_ms = timeout_ms; 630 query->mad_buf->timeout_ms = timeout_ms;
629 query->mad_buf->context[0] = query; 631 query->mad_buf->context[0] = query;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 49b15ac1987e..f2f63933e8a9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -176,7 +176,6 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
176static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) 176static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
177{ 177{
178 struct ib_ucm_context *ctx; 178 struct ib_ucm_context *ctx;
179 int result;
180 179
181 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 180 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
182 if (!ctx) 181 if (!ctx)
@@ -187,17 +186,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
187 ctx->file = file; 186 ctx->file = file;
188 INIT_LIST_HEAD(&ctx->events); 187 INIT_LIST_HEAD(&ctx->events);
189 188
190 do { 189 mutex_lock(&ctx_id_mutex);
191 result = idr_pre_get(&ctx_id_table, GFP_KERNEL); 190 ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL);
192 if (!result) 191 mutex_unlock(&ctx_id_mutex);
193 goto error; 192 if (ctx->id < 0)
194
195 mutex_lock(&ctx_id_mutex);
196 result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
197 mutex_unlock(&ctx_id_mutex);
198 } while (result == -EAGAIN);
199
200 if (result)
201 goto error; 193 goto error;
202 194
203 list_add_tail(&ctx->file_list, &file->ctxs); 195 list_add_tail(&ctx->file_list, &file->ctxs);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 2709ff581392..5ca44cd9b00c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -145,7 +145,6 @@ static void ucma_put_ctx(struct ucma_context *ctx)
145static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) 145static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
146{ 146{
147 struct ucma_context *ctx; 147 struct ucma_context *ctx;
148 int ret;
149 148
150 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 149 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
151 if (!ctx) 150 if (!ctx)
@@ -156,17 +155,10 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
156 INIT_LIST_HEAD(&ctx->mc_list); 155 INIT_LIST_HEAD(&ctx->mc_list);
157 ctx->file = file; 156 ctx->file = file;
158 157
159 do { 158 mutex_lock(&mut);
160 ret = idr_pre_get(&ctx_idr, GFP_KERNEL); 159 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
161 if (!ret) 160 mutex_unlock(&mut);
162 goto error; 161 if (ctx->id < 0)
163
164 mutex_lock(&mut);
165 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
166 mutex_unlock(&mut);
167 } while (ret == -EAGAIN);
168
169 if (ret)
170 goto error; 162 goto error;
171 163
172 list_add_tail(&ctx->list, &file->ctx_list); 164 list_add_tail(&ctx->list, &file->ctx_list);
@@ -180,23 +172,15 @@ error:
180static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) 172static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
181{ 173{
182 struct ucma_multicast *mc; 174 struct ucma_multicast *mc;
183 int ret;
184 175
185 mc = kzalloc(sizeof(*mc), GFP_KERNEL); 176 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
186 if (!mc) 177 if (!mc)
187 return NULL; 178 return NULL;
188 179
189 do { 180 mutex_lock(&mut);
190 ret = idr_pre_get(&multicast_idr, GFP_KERNEL); 181 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
191 if (!ret) 182 mutex_unlock(&mut);
192 goto error; 183 if (mc->id < 0)
193
194 mutex_lock(&mut);
195 ret = idr_get_new(&multicast_idr, mc, &mc->id);
196 mutex_unlock(&mut);
197 } while (ret == -EAGAIN);
198
199 if (ret)
200 goto error; 184 goto error;
201 185
202 mc->ctx = ctx; 186 mc->ctx = ctx;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index e71d834c922a..a7d00f6b3bc1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -125,18 +125,17 @@ static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
125{ 125{
126 int ret; 126 int ret;
127 127
128retry: 128 idr_preload(GFP_KERNEL);
129 if (!idr_pre_get(idr, GFP_KERNEL))
130 return -ENOMEM;
131
132 spin_lock(&ib_uverbs_idr_lock); 129 spin_lock(&ib_uverbs_idr_lock);
133 ret = idr_get_new(idr, uobj, &uobj->id);
134 spin_unlock(&ib_uverbs_idr_lock);
135 130
136 if (ret == -EAGAIN) 131 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
137 goto retry; 132 if (ret >= 0)
133 uobj->id = ret;
138 134
139 return ret; 135 spin_unlock(&ib_uverbs_idr_lock);
136 idr_preload_end();
137
138 return ret < 0 ? ret : 0;
140} 139}
141 140
142void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 141void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 28cd5cb51859..0ab826b280b2 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -382,14 +382,17 @@ static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
382{ 382{
383 int ret; 383 int ret;
384 384
385 do { 385 idr_preload(GFP_KERNEL);
386 spin_lock_irq(&c2dev->qp_table.lock); 386 spin_lock_irq(&c2dev->qp_table.lock);
387 ret = idr_get_new_above(&c2dev->qp_table.idr, qp, 387
388 c2dev->qp_table.last++, &qp->qpn); 388 ret = idr_alloc(&c2dev->qp_table.idr, qp, c2dev->qp_table.last++, 0,
389 spin_unlock_irq(&c2dev->qp_table.lock); 389 GFP_NOWAIT);
390 } while ((ret == -EAGAIN) && 390 if (ret >= 0)
391 idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL)); 391 qp->qpn = ret;
392 return ret; 392
393 spin_unlock_irq(&c2dev->qp_table.lock);
394 idr_preload_end();
395 return ret < 0 ? ret : 0;
393} 396}
394 397
395static void c2_free_qpn(struct c2_dev *c2dev, int qpn) 398static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index a1c44578e039..837862287a29 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -153,19 +153,17 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
153 void *handle, u32 id) 153 void *handle, u32 id)
154{ 154{
155 int ret; 155 int ret;
156 int newid; 156
157 157 idr_preload(GFP_KERNEL);
158 do { 158 spin_lock_irq(&rhp->lock);
159 if (!idr_pre_get(idr, GFP_KERNEL)) { 159
160 return -ENOMEM; 160 ret = idr_alloc(idr, handle, id, id + 1, GFP_NOWAIT);
161 } 161
162 spin_lock_irq(&rhp->lock); 162 spin_unlock_irq(&rhp->lock);
163 ret = idr_get_new_above(idr, handle, id, &newid); 163 idr_preload_end();
164 BUG_ON(newid != id); 164
165 spin_unlock_irq(&rhp->lock); 165 BUG_ON(ret == -ENOSPC);
166 } while (ret == -EAGAIN); 166 return ret < 0 ? ret : 0;
167
168 return ret;
169} 167}
170 168
171static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id) 169static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id)
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 4c07fc069766..7eec5e13fa8c 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -260,20 +260,21 @@ static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
260 void *handle, u32 id, int lock) 260 void *handle, u32 id, int lock)
261{ 261{
262 int ret; 262 int ret;
263 int newid;
264 263
265 do { 264 if (lock) {
266 if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC)) 265 idr_preload(GFP_KERNEL);
267 return -ENOMEM; 266 spin_lock_irq(&rhp->lock);
268 if (lock) 267 }
269 spin_lock_irq(&rhp->lock); 268
270 ret = idr_get_new_above(idr, handle, id, &newid); 269 ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
271 BUG_ON(!ret && newid != id); 270
272 if (lock) 271 if (lock) {
273 spin_unlock_irq(&rhp->lock); 272 spin_unlock_irq(&rhp->lock);
274 } while (ret == -EAGAIN); 273 idr_preload_end();
275 274 }
276 return ret; 275
276 BUG_ON(ret == -ENOSPC);
277 return ret < 0 ? ret : 0;
277} 278}
278 279
279static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr, 280static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 8f5290147e8a..212150c25ea0 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -128,7 +128,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
128 void *vpage; 128 void *vpage;
129 u32 counter; 129 u32 counter;
130 u64 rpage, cqx_fec, h_ret; 130 u64 rpage, cqx_fec, h_ret;
131 int ipz_rc, ret, i; 131 int ipz_rc, i;
132 unsigned long flags; 132 unsigned long flags;
133 133
134 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) 134 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
@@ -163,32 +163,19 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
163 adapter_handle = shca->ipz_hca_handle; 163 adapter_handle = shca->ipz_hca_handle;
164 param.eq_handle = shca->eq.ipz_eq_handle; 164 param.eq_handle = shca->eq.ipz_eq_handle;
165 165
166 do { 166 idr_preload(GFP_KERNEL);
167 if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) { 167 write_lock_irqsave(&ehca_cq_idr_lock, flags);
168 cq = ERR_PTR(-ENOMEM); 168 my_cq->token = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
169 ehca_err(device, "Can't reserve idr nr. device=%p", 169 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
170 device); 170 idr_preload_end();
171 goto create_cq_exit1;
172 }
173
174 write_lock_irqsave(&ehca_cq_idr_lock, flags);
175 ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
176 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
177 } while (ret == -EAGAIN);
178 171
179 if (ret) { 172 if (my_cq->token < 0) {
180 cq = ERR_PTR(-ENOMEM); 173 cq = ERR_PTR(-ENOMEM);
181 ehca_err(device, "Can't allocate new idr entry. device=%p", 174 ehca_err(device, "Can't allocate new idr entry. device=%p",
182 device); 175 device);
183 goto create_cq_exit1; 176 goto create_cq_exit1;
184 } 177 }
185 178
186 if (my_cq->token > 0x1FFFFFF) {
187 cq = ERR_PTR(-ENOMEM);
188 ehca_err(device, "Invalid number of cq. device=%p", device);
189 goto create_cq_exit2;
190 }
191
192 /* 179 /*
193 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer 180 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
194 * for receiving errors CQEs. 181 * for receiving errors CQEs.
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 149393915ae5..00d6861a6a18 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -636,30 +636,26 @@ static struct ehca_qp *internal_create_qp(
636 my_qp->send_cq = 636 my_qp->send_cq =
637 container_of(init_attr->send_cq, struct ehca_cq, ib_cq); 637 container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
638 638
639 do { 639 idr_preload(GFP_KERNEL);
640 if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) { 640 write_lock_irqsave(&ehca_qp_idr_lock, flags);
641 ret = -ENOMEM;
642 ehca_err(pd->device, "Can't reserve idr resources.");
643 goto create_qp_exit0;
644 }
645 641
646 write_lock_irqsave(&ehca_qp_idr_lock, flags); 642 ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
647 ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token); 643 if (ret >= 0)
648 write_unlock_irqrestore(&ehca_qp_idr_lock, flags); 644 my_qp->token = ret;
649 } while (ret == -EAGAIN);
650 645
651 if (ret) { 646 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
652 ret = -ENOMEM; 647 idr_preload_end();
653 ehca_err(pd->device, "Can't allocate new idr entry."); 648 if (ret < 0) {
649 if (ret == -ENOSPC) {
650 ret = -EINVAL;
651 ehca_err(pd->device, "Invalid number of qp");
652 } else {
653 ret = -ENOMEM;
654 ehca_err(pd->device, "Can't allocate new idr entry.");
655 }
654 goto create_qp_exit0; 656 goto create_qp_exit0;
655 } 657 }
656 658
657 if (my_qp->token > 0x1FFFFFF) {
658 ret = -EINVAL;
659 ehca_err(pd->device, "Invalid number of qp");
660 goto create_qp_exit1;
661 }
662
663 if (has_srq) 659 if (has_srq)
664 parms.srq_token = my_qp->token; 660 parms.srq_token = my_qp->token;
665 661
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 7b371f545ece..bd0caedafe99 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -194,11 +194,6 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
194 struct ipath_devdata *dd; 194 struct ipath_devdata *dd;
195 int ret; 195 int ret;
196 196
197 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
198 dd = ERR_PTR(-ENOMEM);
199 goto bail;
200 }
201
202 dd = vzalloc(sizeof(*dd)); 197 dd = vzalloc(sizeof(*dd));
203 if (!dd) { 198 if (!dd) {
204 dd = ERR_PTR(-ENOMEM); 199 dd = ERR_PTR(-ENOMEM);
@@ -206,9 +201,10 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
206 } 201 }
207 dd->ipath_unit = -1; 202 dd->ipath_unit = -1;
208 203
204 idr_preload(GFP_KERNEL);
209 spin_lock_irqsave(&ipath_devs_lock, flags); 205 spin_lock_irqsave(&ipath_devs_lock, flags);
210 206
211 ret = idr_get_new(&unit_table, dd, &dd->ipath_unit); 207 ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);
212 if (ret < 0) { 208 if (ret < 0) {
213 printk(KERN_ERR IPATH_DRV_NAME 209 printk(KERN_ERR IPATH_DRV_NAME
214 ": Could not allocate unit ID: error %d\n", -ret); 210 ": Could not allocate unit ID: error %d\n", -ret);
@@ -216,6 +212,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
216 dd = ERR_PTR(ret); 212 dd = ERR_PTR(ret);
217 goto bail_unlock; 213 goto bail_unlock;
218 } 214 }
215 dd->ipath_unit = ret;
219 216
220 dd->pcidev = pdev; 217 dd->pcidev = pdev;
221 pci_set_drvdata(pdev, dd); 218 pci_set_drvdata(pdev, dd);
@@ -224,7 +221,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
224 221
225bail_unlock: 222bail_unlock:
226 spin_unlock_irqrestore(&ipath_devs_lock, flags); 223 spin_unlock_irqrestore(&ipath_devs_lock, flags);
227 224 idr_preload_end();
228bail: 225bail:
229 return dd; 226 return dd;
230} 227}
@@ -2503,11 +2500,6 @@ static int __init infinipath_init(void)
2503 * the PCI subsystem. 2500 * the PCI subsystem.
2504 */ 2501 */
2505 idr_init(&unit_table); 2502 idr_init(&unit_table);
2506 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
2507 printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
2508 ret = -ENOMEM;
2509 goto bail;
2510 }
2511 2503
2512 ret = pci_register_driver(&ipath_driver); 2504 ret = pci_register_driver(&ipath_driver);
2513 if (ret < 0) { 2505 if (ret < 0) {
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index dbc99d41605c..e0d79b2395e4 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -203,7 +203,7 @@ static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
203static struct id_map_entry * 203static struct id_map_entry *
204id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) 204id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
205{ 205{
206 int ret, id; 206 int ret;
207 static int next_id; 207 static int next_id;
208 struct id_map_entry *ent; 208 struct id_map_entry *ent;
209 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 209 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
@@ -220,25 +220,23 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
220 ent->dev = to_mdev(ibdev); 220 ent->dev = to_mdev(ibdev);
221 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); 221 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
222 222
223 do { 223 idr_preload(GFP_KERNEL);
224 spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); 224 spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
225 ret = idr_get_new_above(&sriov->pv_id_table, ent,
226 next_id, &id);
227 if (!ret) {
228 next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
229 ent->pv_cm_id = (u32)id;
230 sl_id_map_add(ibdev, ent);
231 }
232 225
233 spin_unlock(&sriov->id_map_lock); 226 ret = idr_alloc(&sriov->pv_id_table, ent, next_id, 0, GFP_NOWAIT);
234 } while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL)); 227 if (ret >= 0) {
235 /*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/ 228 next_id = max(ret + 1, 0);
236 if (!ret) { 229 ent->pv_cm_id = (u32)ret;
237 spin_lock(&sriov->id_map_lock); 230 sl_id_map_add(ibdev, ent);
238 list_add_tail(&ent->list, &sriov->cm_list); 231 list_add_tail(&ent->list, &sriov->cm_list);
239 spin_unlock(&sriov->id_map_lock);
240 return ent;
241 } 232 }
233
234 spin_unlock(&sriov->id_map_lock);
235 idr_preload_end();
236
237 if (ret >= 0)
238 return ent;
239
242 /*error flow*/ 240 /*error flow*/
243 kfree(ent); 241 kfree(ent);
244 mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret); 242 mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index c4e0131f1b57..48928c8e7774 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -51,18 +51,6 @@ static DEFINE_IDR(ocrdma_dev_id);
51 51
52static union ib_gid ocrdma_zero_sgid; 52static union ib_gid ocrdma_zero_sgid;
53 53
54static int ocrdma_get_instance(void)
55{
56 int instance = 0;
57
58 /* Assign an unused number */
59 if (!idr_pre_get(&ocrdma_dev_id, GFP_KERNEL))
60 return -1;
61 if (idr_get_new(&ocrdma_dev_id, NULL, &instance))
62 return -1;
63 return instance;
64}
65
66void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid) 54void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
67{ 55{
68 u8 mac_addr[6]; 56 u8 mac_addr[6];
@@ -416,7 +404,7 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
416 goto idr_err; 404 goto idr_err;
417 405
418 memcpy(&dev->nic_info, dev_info, sizeof(*dev_info)); 406 memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
419 dev->id = ocrdma_get_instance(); 407 dev->id = idr_alloc(&ocrdma_dev_id, NULL, 0, 0, GFP_KERNEL);
420 if (dev->id < 0) 408 if (dev->id < 0)
421 goto idr_err; 409 goto idr_err;
422 410
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index ddf066d9abb6..50e33aa0b4e3 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1060,22 +1060,23 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
1060 struct qib_devdata *dd; 1060 struct qib_devdata *dd;
1061 int ret; 1061 int ret;
1062 1062
1063 if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
1064 dd = ERR_PTR(-ENOMEM);
1065 goto bail;
1066 }
1067
1068 dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra); 1063 dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
1069 if (!dd) { 1064 if (!dd) {
1070 dd = ERR_PTR(-ENOMEM); 1065 dd = ERR_PTR(-ENOMEM);
1071 goto bail; 1066 goto bail;
1072 } 1067 }
1073 1068
1069 idr_preload(GFP_KERNEL);
1074 spin_lock_irqsave(&qib_devs_lock, flags); 1070 spin_lock_irqsave(&qib_devs_lock, flags);
1075 ret = idr_get_new(&qib_unit_table, dd, &dd->unit); 1071
1076 if (ret >= 0) 1072 ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT);
1073 if (ret >= 0) {
1074 dd->unit = ret;
1077 list_add(&dd->list, &qib_dev_list); 1075 list_add(&dd->list, &qib_dev_list);
1076 }
1077
1078 spin_unlock_irqrestore(&qib_devs_lock, flags); 1078 spin_unlock_irqrestore(&qib_devs_lock, flags);
1079 idr_preload_end();
1079 1080
1080 if (ret < 0) { 1081 if (ret < 0) {
1081 qib_early_err(&pdev->dev, 1082 qib_early_err(&pdev->dev,
@@ -1180,11 +1181,6 @@ static int __init qlogic_ib_init(void)
1180 * the PCI subsystem. 1181 * the PCI subsystem.
1181 */ 1182 */
1182 idr_init(&qib_unit_table); 1183 idr_init(&qib_unit_table);
1183 if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
1184 pr_err("idr_pre_get() failed\n");
1185 ret = -ENOMEM;
1186 goto bail_cq_wq;
1187 }
1188 1184
1189 ret = pci_register_driver(&qib_driver); 1185 ret = pci_register_driver(&qib_driver);
1190 if (ret < 0) { 1186 if (ret < 0) {
@@ -1199,7 +1195,6 @@ static int __init qlogic_ib_init(void)
1199 1195
1200bail_unit: 1196bail_unit:
1201 idr_destroy(&qib_unit_table); 1197 idr_destroy(&qib_unit_table);
1202bail_cq_wq:
1203 destroy_workqueue(qib_cq_wq); 1198 destroy_workqueue(qib_cq_wq);
1204bail_dev: 1199bail_dev:
1205 qib_dev_cleanup(); 1200 qib_dev_cleanup();
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index abe2d699b6f3..8b07f83d48ad 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -483,7 +483,6 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
483{ 483{
484 struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; 484 struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
485 struct sock *sk = sock->sk; 485 struct sock *sk = sock->sk;
486 struct hlist_node *node;
487 struct sock *csk; 486 struct sock *csk;
488 int err = 0; 487 int err = 0;
489 488
@@ -508,7 +507,7 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
508 507
509 if (sk->sk_protocol < ISDN_P_B_START) { 508 if (sk->sk_protocol < ISDN_P_B_START) {
510 read_lock_bh(&data_sockets.lock); 509 read_lock_bh(&data_sockets.lock);
511 sk_for_each(csk, node, &data_sockets.head) { 510 sk_for_each(csk, &data_sockets.head) {
512 if (sk == csk) 511 if (sk == csk)
513 continue; 512 continue;
514 if (_pms(csk)->dev != _pms(sk)->dev) 513 if (_pms(csk)->dev != _pms(sk)->dev)
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index deda591f70b9..9cb4b621fbc3 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -64,12 +64,11 @@ unlock:
64static void 64static void
65send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb) 65send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
66{ 66{
67 struct hlist_node *node;
68 struct sock *sk; 67 struct sock *sk;
69 struct sk_buff *cskb = NULL; 68 struct sk_buff *cskb = NULL;
70 69
71 read_lock(&sl->lock); 70 read_lock(&sl->lock);
72 sk_for_each(sk, node, &sl->head) { 71 sk_for_each(sk, &sl->head) {
73 if (sk->sk_state != MISDN_BOUND) 72 if (sk->sk_state != MISDN_BOUND)
74 continue; 73 continue;
75 if (!cskb) 74 if (!cskb)
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index aefb78e3cbf9..d9d3f1c7b662 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -106,9 +106,8 @@ static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
106 struct dm_cell_key *key) 106 struct dm_cell_key *key)
107{ 107{
108 struct dm_bio_prison_cell *cell; 108 struct dm_bio_prison_cell *cell;
109 struct hlist_node *tmp;
110 109
111 hlist_for_each_entry(cell, tmp, bucket, list) 110 hlist_for_each_entry(cell, bucket, list)
112 if (keys_equal(&cell->key, key)) 111 if (keys_equal(&cell->key, key))
113 return cell; 112 return cell;
114 113
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 651ca79881dd..93205e32a004 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -859,9 +859,8 @@ static void __check_watermark(struct dm_bufio_client *c)
859static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) 859static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
860{ 860{
861 struct dm_buffer *b; 861 struct dm_buffer *b;
862 struct hlist_node *hn;
863 862
864 hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)], 863 hlist_for_each_entry(b, &c->cache_hash[DM_BUFIO_HASH(block)],
865 hash_list) { 864 hash_list) {
866 dm_bufio_cond_resched(); 865 dm_bufio_cond_resched();
867 if (b->block == block) 866 if (b->block == block)
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 59fc18ae52c2..10079e07edf4 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -227,12 +227,11 @@ static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
227static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 227static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
228{ 228{
229 struct dm_snap_tracked_chunk *c; 229 struct dm_snap_tracked_chunk *c;
230 struct hlist_node *hn;
231 int found = 0; 230 int found = 0;
232 231
233 spin_lock_irq(&s->tracked_chunk_lock); 232 spin_lock_irq(&s->tracked_chunk_lock);
234 233
235 hlist_for_each_entry(c, hn, 234 hlist_for_each_entry(c,
236 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 235 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
237 if (c->chunk == chunk) { 236 if (c->chunk == chunk) {
238 found = 1; 237 found = 1;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 314a0e2faf79..e67a4be0080d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -318,7 +318,6 @@ static void __exit dm_exit(void)
318 /* 318 /*
319 * Should be empty by this point. 319 * Should be empty by this point.
320 */ 320 */
321 idr_remove_all(&_minor_idr);
322 idr_destroy(&_minor_idr); 321 idr_destroy(&_minor_idr);
323} 322}
324 323
@@ -1756,62 +1755,38 @@ static void free_minor(int minor)
1756 */ 1755 */
1757static int specific_minor(int minor) 1756static int specific_minor(int minor)
1758{ 1757{
1759 int r, m; 1758 int r;
1760 1759
1761 if (minor >= (1 << MINORBITS)) 1760 if (minor >= (1 << MINORBITS))
1762 return -EINVAL; 1761 return -EINVAL;
1763 1762
1764 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 1763 idr_preload(GFP_KERNEL);
1765 if (!r)
1766 return -ENOMEM;
1767
1768 spin_lock(&_minor_lock); 1764 spin_lock(&_minor_lock);
1769 1765
1770 if (idr_find(&_minor_idr, minor)) { 1766 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1771 r = -EBUSY;
1772 goto out;
1773 }
1774
1775 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
1776 if (r)
1777 goto out;
1778 1767
1779 if (m != minor) {
1780 idr_remove(&_minor_idr, m);
1781 r = -EBUSY;
1782 goto out;
1783 }
1784
1785out:
1786 spin_unlock(&_minor_lock); 1768 spin_unlock(&_minor_lock);
1787 return r; 1769 idr_preload_end();
1770 if (r < 0)
1771 return r == -ENOSPC ? -EBUSY : r;
1772 return 0;
1788} 1773}
1789 1774
1790static int next_free_minor(int *minor) 1775static int next_free_minor(int *minor)
1791{ 1776{
1792 int r, m; 1777 int r;
1793
1794 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1795 if (!r)
1796 return -ENOMEM;
1797 1778
1779 idr_preload(GFP_KERNEL);
1798 spin_lock(&_minor_lock); 1780 spin_lock(&_minor_lock);
1799 1781
1800 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); 1782 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1801 if (r)
1802 goto out;
1803
1804 if (m >= (1 << MINORBITS)) {
1805 idr_remove(&_minor_idr, m);
1806 r = -ENOSPC;
1807 goto out;
1808 }
1809
1810 *minor = m;
1811 1783
1812out:
1813 spin_unlock(&_minor_lock); 1784 spin_unlock(&_minor_lock);
1814 return r; 1785 idr_preload_end();
1786 if (r < 0)
1787 return r;
1788 *minor = r;
1789 return 0;
1815} 1790}
1816 1791
1817static const struct block_device_operations dm_blk_dops; 1792static const struct block_device_operations dm_blk_dops;
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 7b17a1fdeaf9..81da1a26042e 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -46,10 +46,9 @@ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
46 int r = 0; 46 int r = 0;
47 unsigned bucket = dm_hash_block(b, DM_HASH_MASK); 47 unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
48 struct shadow_info *si; 48 struct shadow_info *si;
49 struct hlist_node *n;
50 49
51 spin_lock(&tm->lock); 50 spin_lock(&tm->lock);
52 hlist_for_each_entry(si, n, tm->buckets + bucket, hlist) 51 hlist_for_each_entry(si, tm->buckets + bucket, hlist)
53 if (si->where == b) { 52 if (si->where == b) {
54 r = 1; 53 r = 1;
55 break; 54 break;
@@ -81,14 +80,14 @@ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
81static void wipe_shadow_table(struct dm_transaction_manager *tm) 80static void wipe_shadow_table(struct dm_transaction_manager *tm)
82{ 81{
83 struct shadow_info *si; 82 struct shadow_info *si;
84 struct hlist_node *n, *tmp; 83 struct hlist_node *tmp;
85 struct hlist_head *bucket; 84 struct hlist_head *bucket;
86 int i; 85 int i;
87 86
88 spin_lock(&tm->lock); 87 spin_lock(&tm->lock);
89 for (i = 0; i < DM_HASH_SIZE; i++) { 88 for (i = 0; i < DM_HASH_SIZE; i++) {
90 bucket = tm->buckets + i; 89 bucket = tm->buckets + i;
91 hlist_for_each_entry_safe(si, n, tmp, bucket, hlist) 90 hlist_for_each_entry_safe(si, tmp, bucket, hlist)
92 kfree(si); 91 kfree(si);
93 92
94 INIT_HLIST_HEAD(bucket); 93 INIT_HLIST_HEAD(bucket);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 19d77a026639..697f026cb318 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -365,10 +365,9 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
365 short generation) 365 short generation)
366{ 366{
367 struct stripe_head *sh; 367 struct stripe_head *sh;
368 struct hlist_node *hn;
369 368
370 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 369 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
371 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 370 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
372 if (sh->sector == sector && sh->generation == generation) 371 if (sh->sector == sector && sh->generation == generation)
373 return sh; 372 return sh;
374 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 373 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 56ff19cdc2ad..ffcb10ac4341 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -512,18 +512,17 @@ int memstick_add_host(struct memstick_host *host)
512{ 512{
513 int rc; 513 int rc;
514 514
515 while (1) { 515 idr_preload(GFP_KERNEL);
516 if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL)) 516 spin_lock(&memstick_host_lock);
517 return -ENOMEM;
518 517
519 spin_lock(&memstick_host_lock); 518 rc = idr_alloc(&memstick_host_idr, host, 0, 0, GFP_NOWAIT);
520 rc = idr_get_new(&memstick_host_idr, host, &host->id); 519 if (rc >= 0)
521 spin_unlock(&memstick_host_lock); 520 host->id = rc;
522 if (!rc) 521
523 break; 522 spin_unlock(&memstick_host_lock);
524 else if (rc != -EAGAIN) 523 idr_preload_end();
525 return rc; 524 if (rc < 0)
526 } 525 return rc;
527 526
528 dev_set_name(&host->dev, "memstick%u", host->id); 527 dev_set_name(&host->dev, "memstick%u", host->id);
529 528
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 9729b92fbfdd..f12b78dbce04 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1213,21 +1213,10 @@ static int mspro_block_init_disk(struct memstick_dev *card)
1213 msb->page_size = be16_to_cpu(sys_info->unit_size); 1213 msb->page_size = be16_to_cpu(sys_info->unit_size);
1214 1214
1215 mutex_lock(&mspro_block_disk_lock); 1215 mutex_lock(&mspro_block_disk_lock);
1216 if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL)) { 1216 disk_id = idr_alloc(&mspro_block_disk_idr, card, 0, 256, GFP_KERNEL);
1217 mutex_unlock(&mspro_block_disk_lock);
1218 return -ENOMEM;
1219 }
1220
1221 rc = idr_get_new(&mspro_block_disk_idr, card, &disk_id);
1222 mutex_unlock(&mspro_block_disk_lock); 1217 mutex_unlock(&mspro_block_disk_lock);
1223 1218 if (disk_id < 0)
1224 if (rc) 1219 return disk_id;
1225 return rc;
1226
1227 if ((disk_id << MSPRO_BLOCK_PART_SHIFT) > 255) {
1228 rc = -ENOSPC;
1229 goto out_release_id;
1230 }
1231 1220
1232 msb->disk = alloc_disk(1 << MSPRO_BLOCK_PART_SHIFT); 1221 msb->disk = alloc_disk(1 << MSPRO_BLOCK_PART_SHIFT);
1233 if (!msb->disk) { 1222 if (!msb->disk) {
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 29b2172ae18f..a7c5b31c0d50 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
454/* Executes one TPC (data is read/written from small or large fifo) */ 454/* Executes one TPC (data is read/written from small or large fifo) */
455static void r592_execute_tpc(struct r592_device *dev) 455static void r592_execute_tpc(struct r592_device *dev)
456{ 456{
457 bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS; 457 bool is_write;
458 int len, error; 458 int len, error;
459 u32 status, reg; 459 u32 status, reg;
460 460
@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
463 return; 463 return;
464 } 464 }
465 465
466 is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
466 len = dev->req->long_data ? 467 len = dev->req->long_data ?
467 dev->req->sg.length : dev->req->data_len; 468 dev->req->sg.length : dev->req->data_len;
468 469
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 481a98a10ecd..2f12cc13489a 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1091,15 +1091,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
1091 } 1091 }
1092 handle->pcr = pcr; 1092 handle->pcr = pcr;
1093 1093
1094 if (!idr_pre_get(&rtsx_pci_idr, GFP_KERNEL)) { 1094 idr_preload(GFP_KERNEL);
1095 ret = -ENOMEM;
1096 goto free_handle;
1097 }
1098
1099 spin_lock(&rtsx_pci_lock); 1095 spin_lock(&rtsx_pci_lock);
1100 ret = idr_get_new(&rtsx_pci_idr, pcr, &pcr->id); 1096 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1097 if (ret >= 0)
1098 pcr->id = ret;
1101 spin_unlock(&rtsx_pci_lock); 1099 spin_unlock(&rtsx_pci_lock);
1102 if (ret) 1100 idr_preload_end();
1101 if (ret < 0)
1103 goto free_handle; 1102 goto free_handle;
1104 1103
1105 pcr->pci = pcidev; 1104 pcr->pci = pcidev;
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index f428d86bfc10..f32550a74bdd 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -885,7 +885,7 @@ struct c2port_device *c2port_device_register(char *name,
885 struct c2port_ops *ops, void *devdata) 885 struct c2port_ops *ops, void *devdata)
886{ 886{
887 struct c2port_device *c2dev; 887 struct c2port_device *c2dev;
888 int id, ret; 888 int ret;
889 889
890 if (unlikely(!ops) || unlikely(!ops->access) || \ 890 if (unlikely(!ops) || unlikely(!ops->access) || \
891 unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \ 891 unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \
@@ -897,22 +897,18 @@ struct c2port_device *c2port_device_register(char *name,
897 if (unlikely(!c2dev)) 897 if (unlikely(!c2dev))
898 return ERR_PTR(-ENOMEM); 898 return ERR_PTR(-ENOMEM);
899 899
900 ret = idr_pre_get(&c2port_idr, GFP_KERNEL); 900 idr_preload(GFP_KERNEL);
901 if (!ret) {
902 ret = -ENOMEM;
903 goto error_idr_get_new;
904 }
905
906 spin_lock_irq(&c2port_idr_lock); 901 spin_lock_irq(&c2port_idr_lock);
907 ret = idr_get_new(&c2port_idr, c2dev, &id); 902 ret = idr_alloc(&c2port_idr, c2dev, 0, 0, GFP_NOWAIT);
908 spin_unlock_irq(&c2port_idr_lock); 903 spin_unlock_irq(&c2port_idr_lock);
904 idr_preload_end();
909 905
910 if (ret < 0) 906 if (ret < 0)
911 goto error_idr_get_new; 907 goto error_idr_alloc;
912 c2dev->id = id; 908 c2dev->id = ret;
913 909
914 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, 910 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
915 "c2port%d", id); 911 "c2port%d", c2dev->id);
916 if (unlikely(IS_ERR(c2dev->dev))) { 912 if (unlikely(IS_ERR(c2dev->dev))) {
917 ret = PTR_ERR(c2dev->dev); 913 ret = PTR_ERR(c2dev->dev);
918 goto error_device_create; 914 goto error_device_create;
@@ -946,10 +942,10 @@ error_device_create_bin_file:
946 942
947error_device_create: 943error_device_create:
948 spin_lock_irq(&c2port_idr_lock); 944 spin_lock_irq(&c2port_idr_lock);
949 idr_remove(&c2port_idr, id); 945 idr_remove(&c2port_idr, c2dev->id);
950 spin_unlock_irq(&c2port_idr_lock); 946 spin_unlock_irq(&c2port_idr_lock);
951 947
952error_idr_get_new: 948error_idr_alloc:
953 kfree(c2dev); 949 kfree(c2dev);
954 950
955 return ERR_PTR(ret); 951 return ERR_PTR(ret);
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index 240a6d361665..2129274ef7ab 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -280,11 +280,10 @@ static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm,
280 const struct mmu_notifier_ops *ops) 280 const struct mmu_notifier_ops *ops)
281{ 281{
282 struct mmu_notifier *mn, *gru_mn = NULL; 282 struct mmu_notifier *mn, *gru_mn = NULL;
283 struct hlist_node *n;
284 283
285 if (mm->mmu_notifier_mm) { 284 if (mm->mmu_notifier_mm) {
286 rcu_read_lock(); 285 rcu_read_lock();
287 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, 286 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list,
288 hlist) 287 hlist)
289 if (mn->ops == ops) { 288 if (mn->ops == ops) {
290 gru_mn = mn; 289 gru_mn = mn;
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 0bd5349b0422..0ab7c922212c 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -196,13 +196,14 @@ int tifm_add_adapter(struct tifm_adapter *fm)
196{ 196{
197 int rc; 197 int rc;
198 198
199 if (!idr_pre_get(&tifm_adapter_idr, GFP_KERNEL)) 199 idr_preload(GFP_KERNEL);
200 return -ENOMEM;
201
202 spin_lock(&tifm_adapter_lock); 200 spin_lock(&tifm_adapter_lock);
203 rc = idr_get_new(&tifm_adapter_idr, fm, &fm->id); 201 rc = idr_alloc(&tifm_adapter_idr, fm, 0, 0, GFP_NOWAIT);
202 if (rc >= 0)
203 fm->id = rc;
204 spin_unlock(&tifm_adapter_lock); 204 spin_unlock(&tifm_adapter_lock);
205 if (rc) 205 idr_preload_end();
206 if (rc < 0)
206 return rc; 207 return rc;
207 208
208 dev_set_name(&fm->dev, "tifm%u", fm->id); 209 dev_set_name(&fm->dev, "tifm%u", fm->id);
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index c3e8397f62ed..a8cee33ae8d2 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -127,9 +127,8 @@ static struct dbell_entry *dbell_index_table_find(u32 idx)
127{ 127{
128 u32 bucket = VMCI_DOORBELL_HASH(idx); 128 u32 bucket = VMCI_DOORBELL_HASH(idx);
129 struct dbell_entry *dbell; 129 struct dbell_entry *dbell;
130 struct hlist_node *node;
131 130
132 hlist_for_each_entry(dbell, node, &vmci_doorbell_it.entries[bucket], 131 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket],
133 node) { 132 node) {
134 if (idx == dbell->idx) 133 if (idx == dbell->idx)
135 return dbell; 134 return dbell;
@@ -359,12 +358,10 @@ static void dbell_fire_entries(u32 notify_idx)
359{ 358{
360 u32 bucket = VMCI_DOORBELL_HASH(notify_idx); 359 u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
361 struct dbell_entry *dbell; 360 struct dbell_entry *dbell;
362 struct hlist_node *node;
363 361
364 spin_lock_bh(&vmci_doorbell_it.lock); 362 spin_lock_bh(&vmci_doorbell_it.lock);
365 363
366 hlist_for_each_entry(dbell, node, 364 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) {
367 &vmci_doorbell_it.entries[bucket], node) {
368 if (dbell->idx == notify_idx && 365 if (dbell->idx == notify_idx &&
369 atomic_read(&dbell->active) == 1) { 366 atomic_read(&dbell->active) == 1) {
370 if (dbell->run_delayed) { 367 if (dbell->run_delayed) {
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
index a196f84a4fd2..9a53a30de445 100644
--- a/drivers/misc/vmw_vmci/vmci_resource.c
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -46,11 +46,10 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
46 enum vmci_resource_type type) 46 enum vmci_resource_type type)
47{ 47{
48 struct vmci_resource *r, *resource = NULL; 48 struct vmci_resource *r, *resource = NULL;
49 struct hlist_node *node;
50 unsigned int idx = vmci_resource_hash(handle); 49 unsigned int idx = vmci_resource_hash(handle);
51 50
52 rcu_read_lock(); 51 rcu_read_lock();
53 hlist_for_each_entry_rcu(r, node, 52 hlist_for_each_entry_rcu(r,
54 &vmci_resource_table.entries[idx], node) { 53 &vmci_resource_table.entries[idx], node) {
55 u32 cid = r->handle.context; 54 u32 cid = r->handle.context;
56 u32 rid = r->handle.resource; 55 u32 rid = r->handle.resource;
@@ -146,12 +145,11 @@ void vmci_resource_remove(struct vmci_resource *resource)
146 struct vmci_handle handle = resource->handle; 145 struct vmci_handle handle = resource->handle;
147 unsigned int idx = vmci_resource_hash(handle); 146 unsigned int idx = vmci_resource_hash(handle);
148 struct vmci_resource *r; 147 struct vmci_resource *r;
149 struct hlist_node *node;
150 148
151 /* Remove resource from hash table. */ 149 /* Remove resource from hash table. */
152 spin_lock(&vmci_resource_table.lock); 150 spin_lock(&vmci_resource_table.lock);
153 151
154 hlist_for_each_entry(r, node, &vmci_resource_table.entries[idx], node) { 152 hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
155 if (vmci_handle_is_equal(r->handle, resource->handle)) { 153 if (vmci_handle_is_equal(r->handle, resource->handle)) {
156 hlist_del_init_rcu(&r->node); 154 hlist_del_init_rcu(&r->node);
157 break; 155 break;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 821cd8224137..2a3593d9f87d 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -429,19 +429,20 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
429 int err; 429 int err;
430 struct mmc_host *host; 430 struct mmc_host *host;
431 431
432 if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL))
433 return NULL;
434
435 host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); 432 host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
436 if (!host) 433 if (!host)
437 return NULL; 434 return NULL;
438 435
439 /* scanning will be enabled when we're ready */ 436 /* scanning will be enabled when we're ready */
440 host->rescan_disable = 1; 437 host->rescan_disable = 1;
438 idr_preload(GFP_KERNEL);
441 spin_lock(&mmc_host_lock); 439 spin_lock(&mmc_host_lock);
442 err = idr_get_new(&mmc_host_idr, host, &host->index); 440 err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT);
441 if (err >= 0)
442 host->index = err;
443 spin_unlock(&mmc_host_lock); 443 spin_unlock(&mmc_host_lock);
444 if (err) 444 idr_preload_end();
445 if (err < 0)
445 goto free; 446 goto free;
446 447
447 dev_set_name(&host->class_dev, "mmc%d", host->index); 448 dev_set_name(&host->class_dev, "mmc%d", host->index);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 60063ccb4c4b..98342213ed21 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1453,7 +1453,7 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
1453 if (!sg_miter_next(sg_miter)) 1453 if (!sg_miter_next(sg_miter))
1454 goto done; 1454 goto done;
1455 1455
1456 host->sg = sg_miter->__sg; 1456 host->sg = sg_miter->piter.sg;
1457 buf = sg_miter->addr; 1457 buf = sg_miter->addr;
1458 remain = sg_miter->length; 1458 remain = sg_miter->length;
1459 offset = 0; 1459 offset = 0;
@@ -1508,7 +1508,7 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
1508 if (!sg_miter_next(sg_miter)) 1508 if (!sg_miter_next(sg_miter))
1509 goto done; 1509 goto done;
1510 1510
1511 host->sg = sg_miter->__sg; 1511 host->sg = sg_miter->piter.sg;
1512 buf = sg_miter->addr; 1512 buf = sg_miter->addr;
1513 remain = sg_miter->length; 1513 remain = sg_miter->length;
1514 offset = 0; 1514 offset = 0;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index ec794a72975d..61d5f56473e1 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -349,13 +349,8 @@ int add_mtd_device(struct mtd_info *mtd)
349 BUG_ON(mtd->writesize == 0); 349 BUG_ON(mtd->writesize == 0);
350 mutex_lock(&mtd_table_mutex); 350 mutex_lock(&mtd_table_mutex);
351 351
352 do { 352 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
353 if (!idr_pre_get(&mtd_idr, GFP_KERNEL)) 353 if (i < 0)
354 goto fail_locked;
355 error = idr_get_new(&mtd_idr, mtd, &i);
356 } while (error == -EAGAIN);
357
358 if (error)
359 goto fail_locked; 354 goto fail_locked;
360 355
361 mtd->index = i; 356 mtd->index = i;
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index 1eee264509a8..db2f22e7966a 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -256,7 +256,7 @@ static int nand_ecc_test_run(const size_t size)
256 goto error; 256 goto error;
257 } 257 }
258 258
259 get_random_bytes(correct_data, size); 259 prandom_bytes(correct_data, size);
260 __nand_calculate_ecc(correct_data, size, correct_ecc); 260 __nand_calculate_ecc(correct_data, size, correct_ecc);
261 261
262 for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) { 262 for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index e827fa8cd844..3e24b379ffa4 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -29,6 +29,7 @@
29#include <linux/mtd/mtd.h> 29#include <linux/mtd/mtd.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/random.h>
32 33
33static int dev = -EINVAL; 34static int dev = -EINVAL;
34module_param(dev, int, S_IRUGO); 35module_param(dev, int, S_IRUGO);
@@ -46,26 +47,7 @@ static int use_offset;
46static int use_len; 47static int use_len;
47static int use_len_max; 48static int use_len_max;
48static int vary_offset; 49static int vary_offset;
49static unsigned long next = 1; 50static struct rnd_state rnd_state;
50
51static inline unsigned int simple_rand(void)
52{
53 next = next * 1103515245 + 12345;
54 return (unsigned int)((next / 65536) % 32768);
55}
56
57static inline void simple_srand(unsigned long seed)
58{
59 next = seed;
60}
61
62static void set_random_data(unsigned char *buf, size_t len)
63{
64 size_t i;
65
66 for (i = 0; i < len; ++i)
67 buf[i] = simple_rand();
68}
69 51
70static int erase_eraseblock(int ebnum) 52static int erase_eraseblock(int ebnum)
71{ 53{
@@ -129,7 +111,7 @@ static int write_eraseblock(int ebnum)
129 loff_t addr = ebnum * mtd->erasesize; 111 loff_t addr = ebnum * mtd->erasesize;
130 112
131 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { 113 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
132 set_random_data(writebuf, use_len); 114 prandom_bytes_state(&rnd_state, writebuf, use_len);
133 ops.mode = MTD_OPS_AUTO_OOB; 115 ops.mode = MTD_OPS_AUTO_OOB;
134 ops.len = 0; 116 ops.len = 0;
135 ops.retlen = 0; 117 ops.retlen = 0;
@@ -182,7 +164,7 @@ static int verify_eraseblock(int ebnum)
182 loff_t addr = ebnum * mtd->erasesize; 164 loff_t addr = ebnum * mtd->erasesize;
183 165
184 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { 166 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
185 set_random_data(writebuf, use_len); 167 prandom_bytes_state(&rnd_state, writebuf, use_len);
186 ops.mode = MTD_OPS_AUTO_OOB; 168 ops.mode = MTD_OPS_AUTO_OOB;
187 ops.len = 0; 169 ops.len = 0;
188 ops.retlen = 0; 170 ops.retlen = 0;
@@ -273,7 +255,7 @@ static int verify_eraseblock_in_one_go(int ebnum)
273 loff_t addr = ebnum * mtd->erasesize; 255 loff_t addr = ebnum * mtd->erasesize;
274 size_t len = mtd->ecclayout->oobavail * pgcnt; 256 size_t len = mtd->ecclayout->oobavail * pgcnt;
275 257
276 set_random_data(writebuf, len); 258 prandom_bytes_state(&rnd_state, writebuf, len);
277 ops.mode = MTD_OPS_AUTO_OOB; 259 ops.mode = MTD_OPS_AUTO_OOB;
278 ops.len = 0; 260 ops.len = 0;
279 ops.retlen = 0; 261 ops.retlen = 0;
@@ -424,12 +406,12 @@ static int __init mtd_oobtest_init(void)
424 if (err) 406 if (err)
425 goto out; 407 goto out;
426 408
427 simple_srand(1); 409 prandom_seed_state(&rnd_state, 1);
428 err = write_whole_device(); 410 err = write_whole_device();
429 if (err) 411 if (err)
430 goto out; 412 goto out;
431 413
432 simple_srand(1); 414 prandom_seed_state(&rnd_state, 1);
433 err = verify_all_eraseblocks(); 415 err = verify_all_eraseblocks();
434 if (err) 416 if (err)
435 goto out; 417 goto out;
@@ -444,13 +426,13 @@ static int __init mtd_oobtest_init(void)
444 if (err) 426 if (err)
445 goto out; 427 goto out;
446 428
447 simple_srand(3); 429 prandom_seed_state(&rnd_state, 3);
448 err = write_whole_device(); 430 err = write_whole_device();
449 if (err) 431 if (err)
450 goto out; 432 goto out;
451 433
452 /* Check all eraseblocks */ 434 /* Check all eraseblocks */
453 simple_srand(3); 435 prandom_seed_state(&rnd_state, 3);
454 pr_info("verifying all eraseblocks\n"); 436 pr_info("verifying all eraseblocks\n");
455 for (i = 0; i < ebcnt; ++i) { 437 for (i = 0; i < ebcnt; ++i) {
456 if (bbt[i]) 438 if (bbt[i])
@@ -479,7 +461,7 @@ static int __init mtd_oobtest_init(void)
479 use_len = mtd->ecclayout->oobavail; 461 use_len = mtd->ecclayout->oobavail;
480 use_len_max = mtd->ecclayout->oobavail; 462 use_len_max = mtd->ecclayout->oobavail;
481 vary_offset = 1; 463 vary_offset = 1;
482 simple_srand(5); 464 prandom_seed_state(&rnd_state, 5);
483 465
484 err = write_whole_device(); 466 err = write_whole_device();
485 if (err) 467 if (err)
@@ -490,7 +472,7 @@ static int __init mtd_oobtest_init(void)
490 use_len = mtd->ecclayout->oobavail; 472 use_len = mtd->ecclayout->oobavail;
491 use_len_max = mtd->ecclayout->oobavail; 473 use_len_max = mtd->ecclayout->oobavail;
492 vary_offset = 1; 474 vary_offset = 1;
493 simple_srand(5); 475 prandom_seed_state(&rnd_state, 5);
494 err = verify_all_eraseblocks(); 476 err = verify_all_eraseblocks();
495 if (err) 477 if (err)
496 goto out; 478 goto out;
@@ -649,7 +631,7 @@ static int __init mtd_oobtest_init(void)
649 goto out; 631 goto out;
650 632
651 /* Write all eraseblocks */ 633 /* Write all eraseblocks */
652 simple_srand(11); 634 prandom_seed_state(&rnd_state, 11);
653 pr_info("writing OOBs of whole device\n"); 635 pr_info("writing OOBs of whole device\n");
654 for (i = 0; i < ebcnt - 1; ++i) { 636 for (i = 0; i < ebcnt - 1; ++i) {
655 int cnt = 2; 637 int cnt = 2;
@@ -659,7 +641,7 @@ static int __init mtd_oobtest_init(void)
659 continue; 641 continue;
660 addr = (i + 1) * mtd->erasesize - mtd->writesize; 642 addr = (i + 1) * mtd->erasesize - mtd->writesize;
661 for (pg = 0; pg < cnt; ++pg) { 643 for (pg = 0; pg < cnt; ++pg) {
662 set_random_data(writebuf, sz); 644 prandom_bytes_state(&rnd_state, writebuf, sz);
663 ops.mode = MTD_OPS_AUTO_OOB; 645 ops.mode = MTD_OPS_AUTO_OOB;
664 ops.len = 0; 646 ops.len = 0;
665 ops.retlen = 0; 647 ops.retlen = 0;
@@ -680,12 +662,13 @@ static int __init mtd_oobtest_init(void)
680 pr_info("written %u eraseblocks\n", i); 662 pr_info("written %u eraseblocks\n", i);
681 663
682 /* Check all eraseblocks */ 664 /* Check all eraseblocks */
683 simple_srand(11); 665 prandom_seed_state(&rnd_state, 11);
684 pr_info("verifying all eraseblocks\n"); 666 pr_info("verifying all eraseblocks\n");
685 for (i = 0; i < ebcnt - 1; ++i) { 667 for (i = 0; i < ebcnt - 1; ++i) {
686 if (bbt[i] || bbt[i + 1]) 668 if (bbt[i] || bbt[i + 1])
687 continue; 669 continue;
688 set_random_data(writebuf, mtd->ecclayout->oobavail * 2); 670 prandom_bytes_state(&rnd_state, writebuf,
671 mtd->ecclayout->oobavail * 2);
689 addr = (i + 1) * mtd->erasesize - mtd->writesize; 672 addr = (i + 1) * mtd->erasesize - mtd->writesize;
690 ops.mode = MTD_OPS_AUTO_OOB; 673 ops.mode = MTD_OPS_AUTO_OOB;
691 ops.len = 0; 674 ops.len = 0;
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index f93a76f88113..0c1140b6c286 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -29,6 +29,7 @@
29#include <linux/mtd/mtd.h> 29#include <linux/mtd/mtd.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/random.h>
32 33
33static int dev = -EINVAL; 34static int dev = -EINVAL;
34module_param(dev, int, S_IRUGO); 35module_param(dev, int, S_IRUGO);
@@ -45,26 +46,7 @@ static int bufsize;
45static int ebcnt; 46static int ebcnt;
46static int pgcnt; 47static int pgcnt;
47static int errcnt; 48static int errcnt;
48static unsigned long next = 1; 49static struct rnd_state rnd_state;
49
50static inline unsigned int simple_rand(void)
51{
52 next = next * 1103515245 + 12345;
53 return (unsigned int)((next / 65536) % 32768);
54}
55
56static inline void simple_srand(unsigned long seed)
57{
58 next = seed;
59}
60
61static void set_random_data(unsigned char *buf, size_t len)
62{
63 size_t i;
64
65 for (i = 0; i < len; ++i)
66 buf[i] = simple_rand();
67}
68 50
69static int erase_eraseblock(int ebnum) 51static int erase_eraseblock(int ebnum)
70{ 52{
@@ -98,7 +80,7 @@ static int write_eraseblock(int ebnum)
98 size_t written; 80 size_t written;
99 loff_t addr = ebnum * mtd->erasesize; 81 loff_t addr = ebnum * mtd->erasesize;
100 82
101 set_random_data(writebuf, mtd->erasesize); 83 prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
102 cond_resched(); 84 cond_resched();
103 err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf); 85 err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
104 if (err || written != mtd->erasesize) 86 if (err || written != mtd->erasesize)
@@ -124,7 +106,7 @@ static int verify_eraseblock(int ebnum)
124 for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i) 106 for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
125 addrn -= mtd->erasesize; 107 addrn -= mtd->erasesize;
126 108
127 set_random_data(writebuf, mtd->erasesize); 109 prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
128 for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) { 110 for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
129 /* Do a read to set the internal dataRAMs to different data */ 111 /* Do a read to set the internal dataRAMs to different data */
130 err = mtd_read(mtd, addr0, bufsize, &read, twopages); 112 err = mtd_read(mtd, addr0, bufsize, &read, twopages);
@@ -160,7 +142,8 @@ static int verify_eraseblock(int ebnum)
160 } 142 }
161 /* Check boundary between eraseblocks */ 143 /* Check boundary between eraseblocks */
162 if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) { 144 if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
163 unsigned long oldnext = next; 145 struct rnd_state old_state = rnd_state;
146
164 /* Do a read to set the internal dataRAMs to different data */ 147 /* Do a read to set the internal dataRAMs to different data */
165 err = mtd_read(mtd, addr0, bufsize, &read, twopages); 148 err = mtd_read(mtd, addr0, bufsize, &read, twopages);
166 if (mtd_is_bitflip(err)) 149 if (mtd_is_bitflip(err))
@@ -188,13 +171,13 @@ static int verify_eraseblock(int ebnum)
188 return err; 171 return err;
189 } 172 }
190 memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize); 173 memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
191 set_random_data(boundary + pgsize, pgsize); 174 prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize);
192 if (memcmp(twopages, boundary, bufsize)) { 175 if (memcmp(twopages, boundary, bufsize)) {
193 pr_err("error: verify failed at %#llx\n", 176 pr_err("error: verify failed at %#llx\n",
194 (long long)addr); 177 (long long)addr);
195 errcnt += 1; 178 errcnt += 1;
196 } 179 }
197 next = oldnext; 180 rnd_state = old_state;
198 } 181 }
199 return err; 182 return err;
200} 183}
@@ -326,7 +309,7 @@ static int erasecrosstest(void)
326 return err; 309 return err;
327 310
328 pr_info("writing 1st page of block %d\n", ebnum); 311 pr_info("writing 1st page of block %d\n", ebnum);
329 set_random_data(writebuf, pgsize); 312 prandom_bytes_state(&rnd_state, writebuf, pgsize);
330 strcpy(writebuf, "There is no data like this!"); 313 strcpy(writebuf, "There is no data like this!");
331 err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 314 err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
332 if (err || written != pgsize) { 315 if (err || written != pgsize) {
@@ -359,7 +342,7 @@ static int erasecrosstest(void)
359 return err; 342 return err;
360 343
361 pr_info("writing 1st page of block %d\n", ebnum); 344 pr_info("writing 1st page of block %d\n", ebnum);
362 set_random_data(writebuf, pgsize); 345 prandom_bytes_state(&rnd_state, writebuf, pgsize);
363 strcpy(writebuf, "There is no data like this!"); 346 strcpy(writebuf, "There is no data like this!");
364 err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 347 err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
365 if (err || written != pgsize) { 348 if (err || written != pgsize) {
@@ -417,7 +400,7 @@ static int erasetest(void)
417 return err; 400 return err;
418 401
419 pr_info("writing 1st page of block %d\n", ebnum); 402 pr_info("writing 1st page of block %d\n", ebnum);
420 set_random_data(writebuf, pgsize); 403 prandom_bytes_state(&rnd_state, writebuf, pgsize);
421 err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 404 err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
422 if (err || written != pgsize) { 405 if (err || written != pgsize) {
423 pr_err("error: write failed at %#llx\n", 406 pr_err("error: write failed at %#llx\n",
@@ -565,7 +548,7 @@ static int __init mtd_pagetest_init(void)
565 pr_info("erased %u eraseblocks\n", i); 548 pr_info("erased %u eraseblocks\n", i);
566 549
567 /* Write all eraseblocks */ 550 /* Write all eraseblocks */
568 simple_srand(1); 551 prandom_seed_state(&rnd_state, 1);
569 pr_info("writing whole device\n"); 552 pr_info("writing whole device\n");
570 for (i = 0; i < ebcnt; ++i) { 553 for (i = 0; i < ebcnt; ++i) {
571 if (bbt[i]) 554 if (bbt[i])
@@ -580,7 +563,7 @@ static int __init mtd_pagetest_init(void)
580 pr_info("written %u eraseblocks\n", i); 563 pr_info("written %u eraseblocks\n", i);
581 564
582 /* Check all eraseblocks */ 565 /* Check all eraseblocks */
583 simple_srand(1); 566 prandom_seed_state(&rnd_state, 1);
584 pr_info("verifying all eraseblocks\n"); 567 pr_info("verifying all eraseblocks\n");
585 for (i = 0; i < ebcnt; ++i) { 568 for (i = 0; i < ebcnt; ++i) {
586 if (bbt[i]) 569 if (bbt[i])
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 596cbea8df4c..a6ce9c1fa6c5 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -49,13 +49,6 @@ static int pgcnt;
49static int goodebcnt; 49static int goodebcnt;
50static struct timeval start, finish; 50static struct timeval start, finish;
51 51
52static void set_random_data(unsigned char *buf, size_t len)
53{
54 size_t i;
55
56 for (i = 0; i < len; ++i)
57 buf[i] = random32();
58}
59 52
60static int erase_eraseblock(int ebnum) 53static int erase_eraseblock(int ebnum)
61{ 54{
@@ -396,7 +389,7 @@ static int __init mtd_speedtest_init(void)
396 goto out; 389 goto out;
397 } 390 }
398 391
399 set_random_data(iobuf, mtd->erasesize); 392 prandom_bytes(iobuf, mtd->erasesize);
400 393
401 err = scan_for_bad_eraseblocks(); 394 err = scan_for_bad_eraseblocks();
402 if (err) 395 if (err)
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 3729f679ae5d..2d7e6cffd6d4 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -282,8 +282,7 @@ static int __init mtd_stresstest_init(void)
282 } 282 }
283 for (i = 0; i < ebcnt; i++) 283 for (i = 0; i < ebcnt; i++)
284 offsets[i] = mtd->erasesize; 284 offsets[i] = mtd->erasesize;
285 for (i = 0; i < bufsize; i++) 285 prandom_bytes(writebuf, bufsize);
286 writebuf[i] = random32();
287 286
288 err = scan_for_bad_eraseblocks(); 287 err = scan_for_bad_eraseblocks();
289 if (err) 288 if (err)
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index c880c2229c59..aade56f27945 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -28,6 +28,7 @@
28#include <linux/mtd/mtd.h> 28#include <linux/mtd/mtd.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/random.h>
31 32
32static int dev = -EINVAL; 33static int dev = -EINVAL;
33module_param(dev, int, S_IRUGO); 34module_param(dev, int, S_IRUGO);
@@ -43,26 +44,7 @@ static int bufsize;
43static int ebcnt; 44static int ebcnt;
44static int pgcnt; 45static int pgcnt;
45static int errcnt; 46static int errcnt;
46static unsigned long next = 1; 47static struct rnd_state rnd_state;
47
48static inline unsigned int simple_rand(void)
49{
50 next = next * 1103515245 + 12345;
51 return (unsigned int)((next / 65536) % 32768);
52}
53
54static inline void simple_srand(unsigned long seed)
55{
56 next = seed;
57}
58
59static void set_random_data(unsigned char *buf, size_t len)
60{
61 size_t i;
62
63 for (i = 0; i < len; ++i)
64 buf[i] = simple_rand();
65}
66 48
67static inline void clear_data(unsigned char *buf, size_t len) 49static inline void clear_data(unsigned char *buf, size_t len)
68{ 50{
@@ -119,7 +101,7 @@ static int write_eraseblock(int ebnum)
119 int err = 0; 101 int err = 0;
120 loff_t addr = ebnum * mtd->erasesize; 102 loff_t addr = ebnum * mtd->erasesize;
121 103
122 set_random_data(writebuf, subpgsize); 104 prandom_bytes_state(&rnd_state, writebuf, subpgsize);
123 err = mtd_write(mtd, addr, subpgsize, &written, writebuf); 105 err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
124 if (unlikely(err || written != subpgsize)) { 106 if (unlikely(err || written != subpgsize)) {
125 pr_err("error: write failed at %#llx\n", 107 pr_err("error: write failed at %#llx\n",
@@ -133,7 +115,7 @@ static int write_eraseblock(int ebnum)
133 115
134 addr += subpgsize; 116 addr += subpgsize;
135 117
136 set_random_data(writebuf, subpgsize); 118 prandom_bytes_state(&rnd_state, writebuf, subpgsize);
137 err = mtd_write(mtd, addr, subpgsize, &written, writebuf); 119 err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
138 if (unlikely(err || written != subpgsize)) { 120 if (unlikely(err || written != subpgsize)) {
139 pr_err("error: write failed at %#llx\n", 121 pr_err("error: write failed at %#llx\n",
@@ -157,7 +139,7 @@ static int write_eraseblock2(int ebnum)
157 for (k = 1; k < 33; ++k) { 139 for (k = 1; k < 33; ++k) {
158 if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize) 140 if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
159 break; 141 break;
160 set_random_data(writebuf, subpgsize * k); 142 prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
161 err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf); 143 err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
162 if (unlikely(err || written != subpgsize * k)) { 144 if (unlikely(err || written != subpgsize * k)) {
163 pr_err("error: write failed at %#llx\n", 145 pr_err("error: write failed at %#llx\n",
@@ -193,7 +175,7 @@ static int verify_eraseblock(int ebnum)
193 int err = 0; 175 int err = 0;
194 loff_t addr = ebnum * mtd->erasesize; 176 loff_t addr = ebnum * mtd->erasesize;
195 177
196 set_random_data(writebuf, subpgsize); 178 prandom_bytes_state(&rnd_state, writebuf, subpgsize);
197 clear_data(readbuf, subpgsize); 179 clear_data(readbuf, subpgsize);
198 err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 180 err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
199 if (unlikely(err || read != subpgsize)) { 181 if (unlikely(err || read != subpgsize)) {
@@ -220,7 +202,7 @@ static int verify_eraseblock(int ebnum)
220 202
221 addr += subpgsize; 203 addr += subpgsize;
222 204
223 set_random_data(writebuf, subpgsize); 205 prandom_bytes_state(&rnd_state, writebuf, subpgsize);
224 clear_data(readbuf, subpgsize); 206 clear_data(readbuf, subpgsize);
225 err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 207 err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
226 if (unlikely(err || read != subpgsize)) { 208 if (unlikely(err || read != subpgsize)) {
@@ -257,7 +239,7 @@ static int verify_eraseblock2(int ebnum)
257 for (k = 1; k < 33; ++k) { 239 for (k = 1; k < 33; ++k) {
258 if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize) 240 if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
259 break; 241 break;
260 set_random_data(writebuf, subpgsize * k); 242 prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
261 clear_data(readbuf, subpgsize * k); 243 clear_data(readbuf, subpgsize * k);
262 err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf); 244 err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
263 if (unlikely(err || read != subpgsize * k)) { 245 if (unlikely(err || read != subpgsize * k)) {
@@ -430,7 +412,7 @@ static int __init mtd_subpagetest_init(void)
430 goto out; 412 goto out;
431 413
432 pr_info("writing whole device\n"); 414 pr_info("writing whole device\n");
433 simple_srand(1); 415 prandom_seed_state(&rnd_state, 1);
434 for (i = 0; i < ebcnt; ++i) { 416 for (i = 0; i < ebcnt; ++i) {
435 if (bbt[i]) 417 if (bbt[i])
436 continue; 418 continue;
@@ -443,7 +425,7 @@ static int __init mtd_subpagetest_init(void)
443 } 425 }
444 pr_info("written %u eraseblocks\n", i); 426 pr_info("written %u eraseblocks\n", i);
445 427
446 simple_srand(1); 428 prandom_seed_state(&rnd_state, 1);
447 pr_info("verifying all eraseblocks\n"); 429 pr_info("verifying all eraseblocks\n");
448 for (i = 0; i < ebcnt; ++i) { 430 for (i = 0; i < ebcnt; ++i) {
449 if (bbt[i]) 431 if (bbt[i])
@@ -466,7 +448,7 @@ static int __init mtd_subpagetest_init(void)
466 goto out; 448 goto out;
467 449
468 /* Write all eraseblocks */ 450 /* Write all eraseblocks */
469 simple_srand(3); 451 prandom_seed_state(&rnd_state, 3);
470 pr_info("writing whole device\n"); 452 pr_info("writing whole device\n");
471 for (i = 0; i < ebcnt; ++i) { 453 for (i = 0; i < ebcnt; ++i) {
472 if (bbt[i]) 454 if (bbt[i])
@@ -481,7 +463,7 @@ static int __init mtd_subpagetest_init(void)
481 pr_info("written %u eraseblocks\n", i); 463 pr_info("written %u eraseblocks\n", i);
482 464
483 /* Check all eraseblocks */ 465 /* Check all eraseblocks */
484 simple_srand(3); 466 prandom_seed_state(&rnd_state, 3);
485 pr_info("verifying all eraseblocks\n"); 467 pr_info("verifying all eraseblocks\n");
486 for (i = 0; i < ebcnt; ++i) { 468 for (i = 0; i < ebcnt; ++i) {
487 if (bbt[i]) 469 if (bbt[i])
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index f4d2e9e3c6d5..c3f1afd86906 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2197,13 +2197,13 @@ static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2197 union ixgbe_atr_input *mask = &adapter->fdir_mask; 2197 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2198 struct ethtool_rx_flow_spec *fsp = 2198 struct ethtool_rx_flow_spec *fsp =
2199 (struct ethtool_rx_flow_spec *)&cmd->fs; 2199 (struct ethtool_rx_flow_spec *)&cmd->fs;
2200 struct hlist_node *node, *node2; 2200 struct hlist_node *node2;
2201 struct ixgbe_fdir_filter *rule = NULL; 2201 struct ixgbe_fdir_filter *rule = NULL;
2202 2202
2203 /* report total rule count */ 2203 /* report total rule count */
2204 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2204 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2205 2205
2206 hlist_for_each_entry_safe(rule, node, node2, 2206 hlist_for_each_entry_safe(rule, node2,
2207 &adapter->fdir_filter_list, fdir_node) { 2207 &adapter->fdir_filter_list, fdir_node) {
2208 if (fsp->location <= rule->sw_idx) 2208 if (fsp->location <= rule->sw_idx)
2209 break; 2209 break;
@@ -2264,14 +2264,14 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2264 struct ethtool_rxnfc *cmd, 2264 struct ethtool_rxnfc *cmd,
2265 u32 *rule_locs) 2265 u32 *rule_locs)
2266{ 2266{
2267 struct hlist_node *node, *node2; 2267 struct hlist_node *node2;
2268 struct ixgbe_fdir_filter *rule; 2268 struct ixgbe_fdir_filter *rule;
2269 int cnt = 0; 2269 int cnt = 0;
2270 2270
2271 /* report total rule count */ 2271 /* report total rule count */
2272 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2272 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2273 2273
2274 hlist_for_each_entry_safe(rule, node, node2, 2274 hlist_for_each_entry_safe(rule, node2,
2275 &adapter->fdir_filter_list, fdir_node) { 2275 &adapter->fdir_filter_list, fdir_node) {
2276 if (cnt == cmd->rule_cnt) 2276 if (cnt == cmd->rule_cnt)
2277 return -EMSGSIZE; 2277 return -EMSGSIZE;
@@ -2358,19 +2358,19 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2358 u16 sw_idx) 2358 u16 sw_idx)
2359{ 2359{
2360 struct ixgbe_hw *hw = &adapter->hw; 2360 struct ixgbe_hw *hw = &adapter->hw;
2361 struct hlist_node *node, *node2, *parent; 2361 struct hlist_node *node2;
2362 struct ixgbe_fdir_filter *rule; 2362 struct ixgbe_fdir_filter *rule, *parent;
2363 int err = -EINVAL; 2363 int err = -EINVAL;
2364 2364
2365 parent = NULL; 2365 parent = NULL;
2366 rule = NULL; 2366 rule = NULL;
2367 2367
2368 hlist_for_each_entry_safe(rule, node, node2, 2368 hlist_for_each_entry_safe(rule, node2,
2369 &adapter->fdir_filter_list, fdir_node) { 2369 &adapter->fdir_filter_list, fdir_node) {
2370 /* hash found, or no matching entry */ 2370 /* hash found, or no matching entry */
2371 if (rule->sw_idx >= sw_idx) 2371 if (rule->sw_idx >= sw_idx)
2372 break; 2372 break;
2373 parent = node; 2373 parent = rule;
2374 } 2374 }
2375 2375
2376 /* if there is an old rule occupying our place remove it */ 2376 /* if there is an old rule occupying our place remove it */
@@ -2399,7 +2399,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2399 2399
2400 /* add filter to the list */ 2400 /* add filter to the list */
2401 if (parent) 2401 if (parent)
2402 hlist_add_after(parent, &input->fdir_node); 2402 hlist_add_after(&parent->fdir_node, &input->fdir_node);
2403 else 2403 else
2404 hlist_add_head(&input->fdir_node, 2404 hlist_add_head(&input->fdir_node,
2405 &adapter->fdir_filter_list); 2405 &adapter->fdir_filter_list);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 68478d6dfa2d..db5611ae407e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3891,7 +3891,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
3891static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) 3891static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
3892{ 3892{
3893 struct ixgbe_hw *hw = &adapter->hw; 3893 struct ixgbe_hw *hw = &adapter->hw;
3894 struct hlist_node *node, *node2; 3894 struct hlist_node *node2;
3895 struct ixgbe_fdir_filter *filter; 3895 struct ixgbe_fdir_filter *filter;
3896 3896
3897 spin_lock(&adapter->fdir_perfect_lock); 3897 spin_lock(&adapter->fdir_perfect_lock);
@@ -3899,7 +3899,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
3899 if (!hlist_empty(&adapter->fdir_filter_list)) 3899 if (!hlist_empty(&adapter->fdir_filter_list))
3900 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); 3900 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
3901 3901
3902 hlist_for_each_entry_safe(filter, node, node2, 3902 hlist_for_each_entry_safe(filter, node2,
3903 &adapter->fdir_filter_list, fdir_node) { 3903 &adapter->fdir_filter_list, fdir_node) {
3904 ixgbe_fdir_write_perfect_filter_82599(hw, 3904 ixgbe_fdir_write_perfect_filter_82599(hw,
3905 &filter->filter, 3905 &filter->filter,
@@ -4356,12 +4356,12 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
4356 4356
4357static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) 4357static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
4358{ 4358{
4359 struct hlist_node *node, *node2; 4359 struct hlist_node *node2;
4360 struct ixgbe_fdir_filter *filter; 4360 struct ixgbe_fdir_filter *filter;
4361 4361
4362 spin_lock(&adapter->fdir_perfect_lock); 4362 spin_lock(&adapter->fdir_perfect_lock);
4363 4363
4364 hlist_for_each_entry_safe(filter, node, node2, 4364 hlist_for_each_entry_safe(filter, node2,
4365 &adapter->fdir_filter_list, fdir_node) { 4365 &adapter->fdir_filter_list, fdir_node) {
4366 hlist_del(&filter->fdir_node); 4366 hlist_del(&filter->fdir_node);
4367 kfree(filter); 4367 kfree(filter);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5385474bb526..bb4d8d99f36d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -225,11 +225,10 @@ static inline struct mlx4_en_filter *
225mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 225mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
226 __be16 src_port, __be16 dst_port) 226 __be16 src_port, __be16 dst_port)
227{ 227{
228 struct hlist_node *elem;
229 struct mlx4_en_filter *filter; 228 struct mlx4_en_filter *filter;
230 struct mlx4_en_filter *ret = NULL; 229 struct mlx4_en_filter *ret = NULL;
231 230
232 hlist_for_each_entry(filter, elem, 231 hlist_for_each_entry(filter,
233 filter_hash_bucket(priv, src_ip, dst_ip, 232 filter_hash_bucket(priv, src_ip, dst_ip,
234 src_port, dst_port), 233 src_port, dst_port),
235 filter_chain) { 234 filter_chain) {
@@ -574,13 +573,13 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
574 573
575 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 574 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
576 struct mlx4_mac_entry *entry; 575 struct mlx4_mac_entry *entry;
577 struct hlist_node *n, *tmp; 576 struct hlist_node *tmp;
578 struct hlist_head *bucket; 577 struct hlist_head *bucket;
579 unsigned int mac_hash; 578 unsigned int mac_hash;
580 579
581 mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX]; 580 mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
582 bucket = &priv->mac_hash[mac_hash]; 581 bucket = &priv->mac_hash[mac_hash];
583 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { 582 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
584 if (ether_addr_equal_64bits(entry->mac, 583 if (ether_addr_equal_64bits(entry->mac,
585 priv->dev->dev_addr)) { 584 priv->dev->dev_addr)) {
586 en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n", 585 en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
@@ -609,11 +608,11 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
609 struct hlist_head *bucket; 608 struct hlist_head *bucket;
610 unsigned int mac_hash; 609 unsigned int mac_hash;
611 struct mlx4_mac_entry *entry; 610 struct mlx4_mac_entry *entry;
612 struct hlist_node *n, *tmp; 611 struct hlist_node *tmp;
613 u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac); 612 u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
614 613
615 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; 614 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
616 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { 615 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
617 if (ether_addr_equal_64bits(entry->mac, prev_mac)) { 616 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
618 mlx4_en_uc_steer_release(priv, entry->mac, 617 mlx4_en_uc_steer_release(priv, entry->mac,
619 qpn, entry->reg_id); 618 qpn, entry->reg_id);
@@ -1019,7 +1018,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1019{ 1018{
1020 struct netdev_hw_addr *ha; 1019 struct netdev_hw_addr *ha;
1021 struct mlx4_mac_entry *entry; 1020 struct mlx4_mac_entry *entry;
1022 struct hlist_node *n, *tmp; 1021 struct hlist_node *tmp;
1023 bool found; 1022 bool found;
1024 u64 mac; 1023 u64 mac;
1025 int err = 0; 1024 int err = 0;
@@ -1035,7 +1034,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1035 /* find what to remove */ 1034 /* find what to remove */
1036 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1035 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1037 bucket = &priv->mac_hash[i]; 1036 bucket = &priv->mac_hash[i];
1038 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { 1037 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1039 found = false; 1038 found = false;
1040 netdev_for_each_uc_addr(ha, dev) { 1039 netdev_for_each_uc_addr(ha, dev) {
1041 if (ether_addr_equal_64bits(entry->mac, 1040 if (ether_addr_equal_64bits(entry->mac,
@@ -1078,7 +1077,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1078 netdev_for_each_uc_addr(ha, dev) { 1077 netdev_for_each_uc_addr(ha, dev) {
1079 found = false; 1078 found = false;
1080 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; 1079 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1081 hlist_for_each_entry(entry, n, bucket, hlist) { 1080 hlist_for_each_entry(entry, bucket, hlist) {
1082 if (ether_addr_equal_64bits(entry->mac, ha->addr)) { 1081 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1083 found = true; 1082 found = true;
1084 break; 1083 break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index ce38654bbdd0..c7f856308e1a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -35,6 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/mlx4/qp.h> 36#include <linux/mlx4/qp.h>
37#include <linux/skbuff.h> 37#include <linux/skbuff.h>
38#include <linux/rculist.h>
38#include <linux/if_ether.h> 39#include <linux/if_ether.h>
39#include <linux/if_vlan.h> 40#include <linux/if_vlan.h>
40#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
@@ -617,7 +618,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
617 618
618 if (is_multicast_ether_addr(ethh->h_dest)) { 619 if (is_multicast_ether_addr(ethh->h_dest)) {
619 struct mlx4_mac_entry *entry; 620 struct mlx4_mac_entry *entry;
620 struct hlist_node *n;
621 struct hlist_head *bucket; 621 struct hlist_head *bucket;
622 unsigned int mac_hash; 622 unsigned int mac_hash;
623 623
@@ -625,7 +625,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
625 mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; 625 mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
626 bucket = &priv->mac_hash[mac_hash]; 626 bucket = &priv->mac_hash[mac_hash];
627 rcu_read_lock(); 627 rcu_read_lock();
628 hlist_for_each_entry_rcu(entry, n, bucket, hlist) { 628 hlist_for_each_entry_rcu(entry, bucket, hlist) {
629 if (ether_addr_equal_64bits(entry->mac, 629 if (ether_addr_equal_64bits(entry->mac,
630 ethh->h_source)) { 630 ethh->h_source)) {
631 rcu_read_unlock(); 631 rcu_read_unlock();
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 325e11e1ce0f..f89cc7a3fe6c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -576,7 +576,7 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
576void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) 576void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
577{ 577{
578 struct qlcnic_filter *tmp_fil; 578 struct qlcnic_filter *tmp_fil;
579 struct hlist_node *tmp_hnode, *n; 579 struct hlist_node *n;
580 struct hlist_head *head; 580 struct hlist_head *head;
581 int i; 581 int i;
582 unsigned long time; 582 unsigned long time;
@@ -584,7 +584,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
584 584
585 for (i = 0; i < adapter->fhash.fbucket_size; i++) { 585 for (i = 0; i < adapter->fhash.fbucket_size; i++) {
586 head = &(adapter->fhash.fhead[i]); 586 head = &(adapter->fhash.fhead[i]);
587 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 587 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
588 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : 588 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
589 QLCNIC_MAC_DEL; 589 QLCNIC_MAC_DEL;
590 time = tmp_fil->ftime; 590 time = tmp_fil->ftime;
@@ -604,7 +604,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
604 for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) { 604 for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
605 head = &(adapter->rx_fhash.fhead[i]); 605 head = &(adapter->rx_fhash.fhead[i]);
606 606
607 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) 607 hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
608 { 608 {
609 time = tmp_fil->ftime; 609 time = tmp_fil->ftime;
610 if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) { 610 if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
@@ -621,14 +621,14 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
621void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) 621void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
622{ 622{
623 struct qlcnic_filter *tmp_fil; 623 struct qlcnic_filter *tmp_fil;
624 struct hlist_node *tmp_hnode, *n; 624 struct hlist_node *n;
625 struct hlist_head *head; 625 struct hlist_head *head;
626 int i; 626 int i;
627 u8 cmd; 627 u8 cmd;
628 628
629 for (i = 0; i < adapter->fhash.fbucket_size; i++) { 629 for (i = 0; i < adapter->fhash.fbucket_size; i++) {
630 head = &(adapter->fhash.fhead[i]); 630 head = &(adapter->fhash.fhead[i]);
631 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 631 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
632 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : 632 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
633 QLCNIC_MAC_DEL; 633 QLCNIC_MAC_DEL;
634 qlcnic_sre_macaddr_change(adapter, 634 qlcnic_sre_macaddr_change(adapter,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 6387e0cc3ea9..0e630061bff3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -162,7 +162,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
162{ 162{
163 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 163 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
164 struct qlcnic_filter *fil, *tmp_fil; 164 struct qlcnic_filter *fil, *tmp_fil;
165 struct hlist_node *tmp_hnode, *n; 165 struct hlist_node *n;
166 struct hlist_head *head; 166 struct hlist_head *head;
167 unsigned long time; 167 unsigned long time;
168 u64 src_addr = 0; 168 u64 src_addr = 0;
@@ -179,7 +179,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
179 (adapter->fhash.fbucket_size - 1); 179 (adapter->fhash.fbucket_size - 1);
180 head = &(adapter->rx_fhash.fhead[hindex]); 180 head = &(adapter->rx_fhash.fhead[hindex]);
181 181
182 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 182 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
183 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && 183 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
184 tmp_fil->vlan_id == vlan_id) { 184 tmp_fil->vlan_id == vlan_id) {
185 time = tmp_fil->ftime; 185 time = tmp_fil->ftime;
@@ -205,7 +205,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
205 (adapter->fhash.fbucket_size - 1); 205 (adapter->fhash.fbucket_size - 1);
206 head = &(adapter->rx_fhash.fhead[hindex]); 206 head = &(adapter->rx_fhash.fhead[hindex]);
207 spin_lock(&adapter->rx_mac_learn_lock); 207 spin_lock(&adapter->rx_mac_learn_lock);
208 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 208 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
209 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && 209 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
210 tmp_fil->vlan_id == vlan_id) { 210 tmp_fil->vlan_id == vlan_id) {
211 found = 1; 211 found = 1;
@@ -272,7 +272,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
272 struct sk_buff *skb) 272 struct sk_buff *skb)
273{ 273{
274 struct qlcnic_filter *fil, *tmp_fil; 274 struct qlcnic_filter *fil, *tmp_fil;
275 struct hlist_node *tmp_hnode, *n; 275 struct hlist_node *n;
276 struct hlist_head *head; 276 struct hlist_head *head;
277 struct net_device *netdev = adapter->netdev; 277 struct net_device *netdev = adapter->netdev;
278 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 278 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -294,7 +294,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
294 hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1); 294 hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
295 head = &(adapter->fhash.fhead[hindex]); 295 head = &(adapter->fhash.fhead[hindex]);
296 296
297 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 297 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
298 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && 298 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
299 tmp_fil->vlan_id == vlan_id) { 299 tmp_fil->vlan_id == vlan_id) {
300 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) 300 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 289b4eefb42f..1df0ff3839e8 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -614,10 +614,9 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
614{ 614{
615 unsigned int hash = vnet_hashfn(skb->data); 615 unsigned int hash = vnet_hashfn(skb->data);
616 struct hlist_head *hp = &vp->port_hash[hash]; 616 struct hlist_head *hp = &vp->port_hash[hash];
617 struct hlist_node *n;
618 struct vnet_port *port; 617 struct vnet_port *port;
619 618
620 hlist_for_each_entry(port, n, hp, hash) { 619 hlist_for_each_entry(port, hp, hash) {
621 if (ether_addr_equal(port->raddr, skb->data)) 620 if (ether_addr_equal(port->raddr, skb->data))
622 return port; 621 return port;
623 } 622 }
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index defcd8a85744..417b2af1aa80 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -55,9 +55,8 @@ static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
55 const unsigned char *addr) 55 const unsigned char *addr)
56{ 56{
57 struct macvlan_dev *vlan; 57 struct macvlan_dev *vlan;
58 struct hlist_node *n;
59 58
60 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) { 59 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[addr[5]], hlist) {
61 if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr)) 60 if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
62 return vlan; 61 return vlan;
63 } 62 }
@@ -149,7 +148,6 @@ static void macvlan_broadcast(struct sk_buff *skb,
149{ 148{
150 const struct ethhdr *eth = eth_hdr(skb); 149 const struct ethhdr *eth = eth_hdr(skb);
151 const struct macvlan_dev *vlan; 150 const struct macvlan_dev *vlan;
152 struct hlist_node *n;
153 struct sk_buff *nskb; 151 struct sk_buff *nskb;
154 unsigned int i; 152 unsigned int i;
155 int err; 153 int err;
@@ -159,7 +157,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
159 return; 157 return;
160 158
161 for (i = 0; i < MACVLAN_HASH_SIZE; i++) { 159 for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
162 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { 160 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) {
163 if (vlan->dev == src || !(vlan->mode & mode)) 161 if (vlan->dev == src || !(vlan->mode & mode))
164 continue; 162 continue;
165 163
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 97243011d319..a449439bd653 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -279,28 +279,17 @@ static int macvtap_receive(struct sk_buff *skb)
279static int macvtap_get_minor(struct macvlan_dev *vlan) 279static int macvtap_get_minor(struct macvlan_dev *vlan)
280{ 280{
281 int retval = -ENOMEM; 281 int retval = -ENOMEM;
282 int id;
283 282
284 mutex_lock(&minor_lock); 283 mutex_lock(&minor_lock);
285 if (idr_pre_get(&minor_idr, GFP_KERNEL) == 0) 284 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
286 goto exit; 285 if (retval >= 0) {
287 286 vlan->minor = retval;
288 retval = idr_get_new_above(&minor_idr, vlan, 1, &id); 287 } else if (retval == -ENOSPC) {
289 if (retval < 0) {
290 if (retval == -EAGAIN)
291 retval = -ENOMEM;
292 goto exit;
293 }
294 if (id < MACVTAP_NUM_DEVS) {
295 vlan->minor = id;
296 } else {
297 printk(KERN_ERR "too many macvtap devices\n"); 288 printk(KERN_ERR "too many macvtap devices\n");
298 retval = -EINVAL; 289 retval = -EINVAL;
299 idr_remove(&minor_idr, id);
300 } 290 }
301exit:
302 mutex_unlock(&minor_lock); 291 mutex_unlock(&minor_lock);
303 return retval; 292 return retval < 0 ? retval : 0;
304} 293}
305 294
306static void macvtap_free_minor(struct macvlan_dev *vlan) 295static void macvtap_free_minor(struct macvlan_dev *vlan)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 3db9131e9229..72ff14b811c6 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2953,46 +2953,21 @@ static void __exit ppp_cleanup(void)
2953 * by holding all_ppp_mutex 2953 * by holding all_ppp_mutex
2954 */ 2954 */
2955 2955
2956static int __unit_alloc(struct idr *p, void *ptr, int n)
2957{
2958 int unit, err;
2959
2960again:
2961 if (!idr_pre_get(p, GFP_KERNEL)) {
2962 pr_err("PPP: No free memory for idr\n");
2963 return -ENOMEM;
2964 }
2965
2966 err = idr_get_new_above(p, ptr, n, &unit);
2967 if (err < 0) {
2968 if (err == -EAGAIN)
2969 goto again;
2970 return err;
2971 }
2972
2973 return unit;
2974}
2975
2976/* associate pointer with specified number */ 2956/* associate pointer with specified number */
2977static int unit_set(struct idr *p, void *ptr, int n) 2957static int unit_set(struct idr *p, void *ptr, int n)
2978{ 2958{
2979 int unit; 2959 int unit;
2980 2960
2981 unit = __unit_alloc(p, ptr, n); 2961 unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
2982 if (unit < 0) 2962 if (unit == -ENOSPC)
2983 return unit; 2963 unit = -EINVAL;
2984 else if (unit != n) {
2985 idr_remove(p, unit);
2986 return -EINVAL;
2987 }
2988
2989 return unit; 2964 return unit;
2990} 2965}
2991 2966
2992/* get new free unit number and associate pointer with it */ 2967/* get new free unit number and associate pointer with it */
2993static int unit_get(struct idr *p, void *ptr) 2968static int unit_get(struct idr *p, void *ptr)
2994{ 2969{
2995 return __unit_alloc(p, ptr, 0); 2970 return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
2996} 2971}
2997 2972
2998/* put unit number back to a pool */ 2973/* put unit number back to a pool */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b6f45c5d84d5..2c6a22e278ea 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -197,9 +197,8 @@ static inline u32 tun_hashfn(u32 rxhash)
197static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 197static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
198{ 198{
199 struct tun_flow_entry *e; 199 struct tun_flow_entry *e;
200 struct hlist_node *n;
201 200
202 hlist_for_each_entry_rcu(e, n, head, hash_link) { 201 hlist_for_each_entry_rcu(e, head, hash_link) {
203 if (e->rxhash == rxhash) 202 if (e->rxhash == rxhash)
204 return e; 203 return e;
205 } 204 }
@@ -241,9 +240,9 @@ static void tun_flow_flush(struct tun_struct *tun)
241 spin_lock_bh(&tun->lock); 240 spin_lock_bh(&tun->lock);
242 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 241 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
243 struct tun_flow_entry *e; 242 struct tun_flow_entry *e;
244 struct hlist_node *h, *n; 243 struct hlist_node *n;
245 244
246 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) 245 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
247 tun_flow_delete(tun, e); 246 tun_flow_delete(tun, e);
248 } 247 }
249 spin_unlock_bh(&tun->lock); 248 spin_unlock_bh(&tun->lock);
@@ -256,9 +255,9 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
256 spin_lock_bh(&tun->lock); 255 spin_lock_bh(&tun->lock);
257 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 256 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
258 struct tun_flow_entry *e; 257 struct tun_flow_entry *e;
259 struct hlist_node *h, *n; 258 struct hlist_node *n;
260 259
261 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) { 260 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
262 if (e->queue_index == queue_index) 261 if (e->queue_index == queue_index)
263 tun_flow_delete(tun, e); 262 tun_flow_delete(tun, e);
264 } 263 }
@@ -279,9 +278,9 @@ static void tun_flow_cleanup(unsigned long data)
279 spin_lock_bh(&tun->lock); 278 spin_lock_bh(&tun->lock);
280 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 279 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
281 struct tun_flow_entry *e; 280 struct tun_flow_entry *e;
282 struct hlist_node *h, *n; 281 struct hlist_node *n;
283 282
284 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) { 283 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
285 unsigned long this_timer; 284 unsigned long this_timer;
286 count++; 285 count++;
287 this_timer = e->updated + delay; 286 this_timer = e->updated + delay;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f736823f8437..f10e58ac9c1b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -145,9 +145,8 @@ static inline struct hlist_head *vni_head(struct net *net, u32 id)
145static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id) 145static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
146{ 146{
147 struct vxlan_dev *vxlan; 147 struct vxlan_dev *vxlan;
148 struct hlist_node *node;
149 148
150 hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) { 149 hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) {
151 if (vxlan->vni == id) 150 if (vxlan->vni == id)
152 return vxlan; 151 return vxlan;
153 } 152 }
@@ -292,9 +291,8 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
292{ 291{
293 struct hlist_head *head = vxlan_fdb_head(vxlan, mac); 292 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
294 struct vxlan_fdb *f; 293 struct vxlan_fdb *f;
295 struct hlist_node *node;
296 294
297 hlist_for_each_entry_rcu(f, node, head, hlist) { 295 hlist_for_each_entry_rcu(f, head, hlist) {
298 if (compare_ether_addr(mac, f->eth_addr) == 0) 296 if (compare_ether_addr(mac, f->eth_addr) == 0)
299 return f; 297 return f;
300 } 298 }
@@ -422,10 +420,9 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
422 420
423 for (h = 0; h < FDB_HASH_SIZE; ++h) { 421 for (h = 0; h < FDB_HASH_SIZE; ++h) {
424 struct vxlan_fdb *f; 422 struct vxlan_fdb *f;
425 struct hlist_node *n;
426 int err; 423 int err;
427 424
428 hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) { 425 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
429 if (idx < cb->args[0]) 426 if (idx < cb->args[0])
430 goto skip; 427 goto skip;
431 428
@@ -483,11 +480,10 @@ static bool vxlan_group_used(struct vxlan_net *vn,
483 const struct vxlan_dev *this) 480 const struct vxlan_dev *this)
484{ 481{
485 const struct vxlan_dev *vxlan; 482 const struct vxlan_dev *vxlan;
486 struct hlist_node *node;
487 unsigned h; 483 unsigned h;
488 484
489 for (h = 0; h < VNI_HASH_SIZE; ++h) 485 for (h = 0; h < VNI_HASH_SIZE; ++h)
490 hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) { 486 hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) {
491 if (vxlan == this) 487 if (vxlan == this)
492 continue; 488 continue;
493 489
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 48273dd05b63..4941f201d6c8 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -309,7 +309,6 @@ static void zd1201_usbrx(struct urb *urb)
309 if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) { 309 if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) {
310 int datalen = urb->actual_length-1; 310 int datalen = urb->actual_length-1;
311 unsigned short len, fc, seq; 311 unsigned short len, fc, seq;
312 struct hlist_node *node;
313 312
314 len = ntohs(*(__be16 *)&data[datalen-2]); 313 len = ntohs(*(__be16 *)&data[datalen-2]);
315 if (len>datalen) 314 if (len>datalen)
@@ -362,7 +361,7 @@ static void zd1201_usbrx(struct urb *urb)
362 hlist_add_head(&frag->fnode, &zd->fraglist); 361 hlist_add_head(&frag->fnode, &zd->fraglist);
363 goto resubmit; 362 goto resubmit;
364 } 363 }
365 hlist_for_each_entry(frag, node, &zd->fraglist, fnode) 364 hlist_for_each_entry(frag, &zd->fraglist, fnode)
366 if (frag->seq == (seq&IEEE80211_SCTL_SEQ)) 365 if (frag->seq == (seq&IEEE80211_SCTL_SEQ))
367 break; 366 break;
368 if (!frag) 367 if (!frag)
@@ -1831,14 +1830,14 @@ err_zd:
1831static void zd1201_disconnect(struct usb_interface *interface) 1830static void zd1201_disconnect(struct usb_interface *interface)
1832{ 1831{
1833 struct zd1201 *zd = usb_get_intfdata(interface); 1832 struct zd1201 *zd = usb_get_intfdata(interface);
1834 struct hlist_node *node, *node2; 1833 struct hlist_node *node2;
1835 struct zd1201_frag *frag; 1834 struct zd1201_frag *frag;
1836 1835
1837 if (!zd) 1836 if (!zd)
1838 return; 1837 return;
1839 usb_set_intfdata(interface, NULL); 1838 usb_set_intfdata(interface, NULL);
1840 1839
1841 hlist_for_each_entry_safe(frag, node, node2, &zd->fraglist, fnode) { 1840 hlist_for_each_entry_safe(frag, node2, &zd->fraglist, fnode) {
1842 hlist_del_init(&frag->fnode); 1841 hlist_del_init(&frag->fnode);
1843 kfree_skb(frag->skb); 1842 kfree_skb(frag->skb);
1844 kfree(frag); 1843 kfree(frag);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 924e4665bd57..b099e0025d2b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -842,9 +842,8 @@ static struct pci_cap_saved_state *pci_find_saved_cap(
842 struct pci_dev *pci_dev, char cap) 842 struct pci_dev *pci_dev, char cap)
843{ 843{
844 struct pci_cap_saved_state *tmp; 844 struct pci_cap_saved_state *tmp;
845 struct hlist_node *pos;
846 845
847 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) { 846 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
848 if (tmp->cap.cap_nr == cap) 847 if (tmp->cap.cap_nr == cap)
849 return tmp; 848 return tmp;
850 } 849 }
@@ -1041,7 +1040,6 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1041 struct pci_saved_state *state; 1040 struct pci_saved_state *state;
1042 struct pci_cap_saved_state *tmp; 1041 struct pci_cap_saved_state *tmp;
1043 struct pci_cap_saved_data *cap; 1042 struct pci_cap_saved_data *cap;
1044 struct hlist_node *pos;
1045 size_t size; 1043 size_t size;
1046 1044
1047 if (!dev->state_saved) 1045 if (!dev->state_saved)
@@ -1049,7 +1047,7 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1049 1047
1050 size = sizeof(*state) + sizeof(struct pci_cap_saved_data); 1048 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1051 1049
1052 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) 1050 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1053 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size; 1051 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1054 1052
1055 state = kzalloc(size, GFP_KERNEL); 1053 state = kzalloc(size, GFP_KERNEL);
@@ -1060,7 +1058,7 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1060 sizeof(state->config_space)); 1058 sizeof(state->config_space));
1061 1059
1062 cap = state->cap; 1060 cap = state->cap;
1063 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) { 1061 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1064 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size; 1062 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1065 memcpy(cap, &tmp->cap, len); 1063 memcpy(cap, &tmp->cap, len);
1066 cap = (struct pci_cap_saved_data *)((u8 *)cap + len); 1064 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
@@ -2038,9 +2036,9 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2038void pci_free_cap_save_buffers(struct pci_dev *dev) 2036void pci_free_cap_save_buffers(struct pci_dev *dev)
2039{ 2037{
2040 struct pci_cap_saved_state *tmp; 2038 struct pci_cap_saved_state *tmp;
2041 struct hlist_node *pos, *n; 2039 struct hlist_node *n;
2042 2040
2043 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next) 2041 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2044 kfree(tmp); 2042 kfree(tmp);
2045} 2043}
2046 2044
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index ca91396fc48e..0727f9256138 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -1515,16 +1515,11 @@ static int bq2415x_probe(struct i2c_client *client,
1515 } 1515 }
1516 1516
1517 /* Get new ID for the new device */ 1517 /* Get new ID for the new device */
1518 ret = idr_pre_get(&bq2415x_id, GFP_KERNEL);
1519 if (ret == 0)
1520 return -ENOMEM;
1521
1522 mutex_lock(&bq2415x_id_mutex); 1518 mutex_lock(&bq2415x_id_mutex);
1523 ret = idr_get_new(&bq2415x_id, client, &num); 1519 num = idr_alloc(&bq2415x_id, client, 0, 0, GFP_KERNEL);
1524 mutex_unlock(&bq2415x_id_mutex); 1520 mutex_unlock(&bq2415x_id_mutex);
1525 1521 if (num < 0)
1526 if (ret < 0) 1522 return num;
1527 return ret;
1528 1523
1529 name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num); 1524 name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
1530 if (!name) { 1525 if (!name) {
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 8ccf5d7d0add..26037ca7efb4 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -791,14 +791,11 @@ static int bq27x00_battery_probe(struct i2c_client *client,
791 int retval = 0; 791 int retval = 0;
792 792
793 /* Get new ID for the new battery device */ 793 /* Get new ID for the new battery device */
794 retval = idr_pre_get(&battery_id, GFP_KERNEL);
795 if (retval == 0)
796 return -ENOMEM;
797 mutex_lock(&battery_mutex); 794 mutex_lock(&battery_mutex);
798 retval = idr_get_new(&battery_id, client, &num); 795 num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
799 mutex_unlock(&battery_mutex); 796 mutex_unlock(&battery_mutex);
800 if (retval < 0) 797 if (num < 0)
801 return retval; 798 return num;
802 799
803 name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num); 800 name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
804 if (!name) { 801 if (!name) {
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index e7301b3ed623..c09e7726c96c 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -395,17 +395,12 @@ static int ds278x_battery_probe(struct i2c_client *client,
395 } 395 }
396 396
397 /* Get an ID for this battery */ 397 /* Get an ID for this battery */
398 ret = idr_pre_get(&battery_id, GFP_KERNEL);
399 if (ret == 0) {
400 ret = -ENOMEM;
401 goto fail_id;
402 }
403
404 mutex_lock(&battery_lock); 398 mutex_lock(&battery_lock);
405 ret = idr_get_new(&battery_id, client, &num); 399 ret = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
406 mutex_unlock(&battery_lock); 400 mutex_unlock(&battery_lock);
407 if (ret < 0) 401 if (ret < 0)
408 goto fail_id; 402 goto fail_id;
403 num = ret;
409 404
410 info = kzalloc(sizeof(*info), GFP_KERNEL); 405 info = kzalloc(sizeof(*info), GFP_KERNEL);
411 if (!info) { 406 if (!info) {
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index 2bf0c1b608dd..d3db26e46489 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -128,7 +128,8 @@ static int pps_gpio_probe(struct platform_device *pdev)
128 } 128 }
129 129
130 /* allocate space for device info */ 130 /* allocate space for device info */
131 data = kzalloc(sizeof(struct pps_gpio_device_data), GFP_KERNEL); 131 data = devm_kzalloc(&pdev->dev, sizeof(struct pps_gpio_device_data),
132 GFP_KERNEL);
132 if (data == NULL) { 133 if (data == NULL) {
133 err = -ENOMEM; 134 err = -ENOMEM;
134 goto return_error; 135 goto return_error;
@@ -150,7 +151,6 @@ static int pps_gpio_probe(struct platform_device *pdev)
150 pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR; 151 pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR;
151 data->pps = pps_register_source(&data->info, pps_default_params); 152 data->pps = pps_register_source(&data->info, pps_default_params);
152 if (data->pps == NULL) { 153 if (data->pps == NULL) {
153 kfree(data);
154 pr_err("failed to register IRQ %d as PPS source\n", irq); 154 pr_err("failed to register IRQ %d as PPS source\n", irq);
155 err = -EINVAL; 155 err = -EINVAL;
156 goto return_error; 156 goto return_error;
@@ -164,7 +164,6 @@ static int pps_gpio_probe(struct platform_device *pdev)
164 get_irqf_trigger_flags(pdata), data->info.name, data); 164 get_irqf_trigger_flags(pdata), data->info.name, data);
165 if (ret) { 165 if (ret) {
166 pps_unregister_source(data->pps); 166 pps_unregister_source(data->pps);
167 kfree(data);
168 pr_err("failed to acquire IRQ %d\n", irq); 167 pr_err("failed to acquire IRQ %d\n", irq);
169 err = -EINVAL; 168 err = -EINVAL;
170 goto return_error; 169 goto return_error;
@@ -190,7 +189,6 @@ static int pps_gpio_remove(struct platform_device *pdev)
190 gpio_free(pdata->gpio_pin); 189 gpio_free(pdata->gpio_pin);
191 pps_unregister_source(data->pps); 190 pps_unregister_source(data->pps);
192 pr_info("removed IRQ %d as PPS source\n", data->irq); 191 pr_info("removed IRQ %d as PPS source\n", data->irq);
193 kfree(data);
194 return 0; 192 return 0;
195} 193}
196 194
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index f197e8ea185c..cdad4d95b20e 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -102,7 +102,7 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
102 goto pps_register_source_exit; 102 goto pps_register_source_exit;
103 } 103 }
104 104
105 /* These initializations must be done before calling idr_get_new() 105 /* These initializations must be done before calling idr_alloc()
106 * in order to avoid reces into pps_event(). 106 * in order to avoid reces into pps_event().
107 */ 107 */
108 pps->params.api_version = PPS_API_VERS; 108 pps->params.api_version = PPS_API_VERS;
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 6437703eb10f..7173e3ad475d 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -295,29 +295,21 @@ int pps_register_cdev(struct pps_device *pps)
295 dev_t devt; 295 dev_t devt;
296 296
297 mutex_lock(&pps_idr_lock); 297 mutex_lock(&pps_idr_lock);
298 /* Get new ID for the new PPS source */ 298 /*
299 if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) { 299 * Get new ID for the new PPS source. After idr_alloc() calling
300 mutex_unlock(&pps_idr_lock); 300 * the new source will be freely available into the kernel.
301 return -ENOMEM;
302 }
303
304 /* Now really allocate the PPS source.
305 * After idr_get_new() calling the new source will be freely available
306 * into the kernel.
307 */ 301 */
308 err = idr_get_new(&pps_idr, pps, &pps->id); 302 err = idr_alloc(&pps_idr, pps, 0, PPS_MAX_SOURCES, GFP_KERNEL);
309 mutex_unlock(&pps_idr_lock); 303 if (err < 0) {
310 304 if (err == -ENOSPC) {
311 if (err < 0) 305 pr_err("%s: too many PPS sources in the system\n",
312 return err; 306 pps->info.name);
313 307 err = -EBUSY;
314 pps->id &= MAX_IDR_MASK; 308 }
315 if (pps->id >= PPS_MAX_SOURCES) { 309 goto out_unlock;
316 pr_err("%s: too many PPS sources in the system\n",
317 pps->info.name);
318 err = -EBUSY;
319 goto free_idr;
320 } 310 }
311 pps->id = err;
312 mutex_unlock(&pps_idr_lock);
321 313
322 devt = MKDEV(MAJOR(pps_devt), pps->id); 314 devt = MKDEV(MAJOR(pps_devt), pps->id);
323 315
@@ -351,8 +343,8 @@ del_cdev:
351free_idr: 343free_idr:
352 mutex_lock(&pps_idr_lock); 344 mutex_lock(&pps_idr_lock);
353 idr_remove(&pps_idr, pps->id); 345 idr_remove(&pps_idr, pps->id);
346out_unlock:
354 mutex_unlock(&pps_idr_lock); 347 mutex_unlock(&pps_idr_lock);
355
356 return err; 348 return err;
357} 349}
358 350
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index dd3bfaf1ad40..29387df4bfc9 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -199,11 +199,6 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
199 /* actual size of vring (in bytes) */ 199 /* actual size of vring (in bytes) */
200 size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); 200 size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
201 201
202 if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) {
203 dev_err(dev, "idr_pre_get failed\n");
204 return -ENOMEM;
205 }
206
207 /* 202 /*
208 * Allocate non-cacheable memory for the vring. In the future 203 * Allocate non-cacheable memory for the vring. In the future
209 * this call will also configure the IOMMU for us 204 * this call will also configure the IOMMU for us
@@ -221,12 +216,13 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
221 * TODO: let the rproc know the notifyid of this vring 216 * TODO: let the rproc know the notifyid of this vring
222 * TODO: support predefined notifyids (via resource table) 217 * TODO: support predefined notifyids (via resource table)
223 */ 218 */
224 ret = idr_get_new(&rproc->notifyids, rvring, &notifyid); 219 ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
225 if (ret) { 220 if (ret) {
226 dev_err(dev, "idr_get_new failed: %d\n", ret); 221 dev_err(dev, "idr_alloc failed: %d\n", ret);
227 dma_free_coherent(dev->parent, size, va, dma); 222 dma_free_coherent(dev->parent, size, va, dma);
228 return ret; 223 return ret;
229 } 224 }
225 notifyid = ret;
230 226
231 /* Store largest notifyid */ 227 /* Store largest notifyid */
232 rproc->max_notifyid = max(rproc->max_notifyid, notifyid); 228 rproc->max_notifyid = max(rproc->max_notifyid, notifyid);
@@ -1180,7 +1176,6 @@ static void rproc_type_release(struct device *dev)
1180 1176
1181 rproc_delete_debug_dir(rproc); 1177 rproc_delete_debug_dir(rproc);
1182 1178
1183 idr_remove_all(&rproc->notifyids);
1184 idr_destroy(&rproc->notifyids); 1179 idr_destroy(&rproc->notifyids);
1185 1180
1186 if (rproc->index >= 0) 1181 if (rproc->index >= 0)
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index d85446021ddb..a59684b5fc68 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -213,13 +213,10 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
213 struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb, 213 struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
214 void *priv, u32 addr) 214 void *priv, u32 addr)
215{ 215{
216 int err, tmpaddr, request; 216 int id_min, id_max, id;
217 struct rpmsg_endpoint *ept; 217 struct rpmsg_endpoint *ept;
218 struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev; 218 struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
219 219
220 if (!idr_pre_get(&vrp->endpoints, GFP_KERNEL))
221 return NULL;
222
223 ept = kzalloc(sizeof(*ept), GFP_KERNEL); 220 ept = kzalloc(sizeof(*ept), GFP_KERNEL);
224 if (!ept) { 221 if (!ept) {
225 dev_err(dev, "failed to kzalloc a new ept\n"); 222 dev_err(dev, "failed to kzalloc a new ept\n");
@@ -234,31 +231,28 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
234 ept->priv = priv; 231 ept->priv = priv;
235 232
236 /* do we need to allocate a local address ? */ 233 /* do we need to allocate a local address ? */
237 request = addr == RPMSG_ADDR_ANY ? RPMSG_RESERVED_ADDRESSES : addr; 234 if (addr == RPMSG_ADDR_ANY) {
235 id_min = RPMSG_RESERVED_ADDRESSES;
236 id_max = 0;
237 } else {
238 id_min = addr;
239 id_max = addr + 1;
240 }
238 241
239 mutex_lock(&vrp->endpoints_lock); 242 mutex_lock(&vrp->endpoints_lock);
240 243
241 /* bind the endpoint to an rpmsg address (and allocate one if needed) */ 244 /* bind the endpoint to an rpmsg address (and allocate one if needed) */
242 err = idr_get_new_above(&vrp->endpoints, ept, request, &tmpaddr); 245 id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL);
243 if (err) { 246 if (id < 0) {
244 dev_err(dev, "idr_get_new_above failed: %d\n", err); 247 dev_err(dev, "idr_alloc failed: %d\n", id);
245 goto free_ept; 248 goto free_ept;
246 } 249 }
247 250 ept->addr = id;
248 /* make sure the user's address request is fulfilled, if relevant */
249 if (addr != RPMSG_ADDR_ANY && tmpaddr != addr) {
250 dev_err(dev, "address 0x%x already in use\n", addr);
251 goto rem_idr;
252 }
253
254 ept->addr = tmpaddr;
255 251
256 mutex_unlock(&vrp->endpoints_lock); 252 mutex_unlock(&vrp->endpoints_lock);
257 253
258 return ept; 254 return ept;
259 255
260rem_idr:
261 idr_remove(&vrp->endpoints, request);
262free_ept: 256free_ept:
263 mutex_unlock(&vrp->endpoints_lock); 257 mutex_unlock(&vrp->endpoints_lock);
264 kref_put(&ept->refcount, __ept_release); 258 kref_put(&ept->refcount, __ept_release);
@@ -1036,7 +1030,6 @@ static void rpmsg_remove(struct virtio_device *vdev)
1036 if (vrp->ns_ept) 1030 if (vrp->ns_ept)
1037 __rpmsg_destroy_ept(vrp, vrp->ns_ept); 1031 __rpmsg_destroy_ept(vrp, vrp->ns_ept);
1038 1032
1039 idr_remove_all(&vrp->endpoints);
1040 idr_destroy(&vrp->endpoints); 1033 idr_destroy(&vrp->endpoints);
1041 1034
1042 vdev->config->del_vqs(vrp->vdev); 1035 vdev->config->del_vqs(vrp->vdev);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 8f92732655c7..5864f987f206 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -523,20 +523,13 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
523 int error = 1; 523 int error = 1;
524 524
525 mutex_lock(&bfad_mutex); 525 mutex_lock(&bfad_mutex);
526 if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) { 526 error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL);
527 if (error < 0) {
527 mutex_unlock(&bfad_mutex); 528 mutex_unlock(&bfad_mutex);
528 printk(KERN_WARNING "idr_pre_get failure\n"); 529 printk(KERN_WARNING "idr_alloc failure\n");
529 goto out; 530 goto out;
530 } 531 }
531 532 im_port->idr_id = error;
532 error = idr_get_new(&bfad_im_port_index, im_port,
533 &im_port->idr_id);
534 if (error) {
535 mutex_unlock(&bfad_mutex);
536 printk(KERN_WARNING "idr_get_new failure\n");
537 goto out;
538 }
539
540 mutex_unlock(&bfad_mutex); 533 mutex_unlock(&bfad_mutex);
541 534
542 im_port->shost = bfad_scsi_host_alloc(im_port, bfad); 535 im_port->shost = bfad_scsi_host_alloc(im_port, bfad);
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index a15474eef5f7..2a323742ce04 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -895,7 +895,7 @@ static int ch_probe(struct device *dev)
895{ 895{
896 struct scsi_device *sd = to_scsi_device(dev); 896 struct scsi_device *sd = to_scsi_device(dev);
897 struct device *class_dev; 897 struct device *class_dev;
898 int minor, ret = -ENOMEM; 898 int ret;
899 scsi_changer *ch; 899 scsi_changer *ch;
900 900
901 if (sd->type != TYPE_MEDIUM_CHANGER) 901 if (sd->type != TYPE_MEDIUM_CHANGER)
@@ -905,22 +905,19 @@ static int ch_probe(struct device *dev)
905 if (NULL == ch) 905 if (NULL == ch)
906 return -ENOMEM; 906 return -ENOMEM;
907 907
908 if (!idr_pre_get(&ch_index_idr, GFP_KERNEL)) 908 idr_preload(GFP_KERNEL);
909 goto free_ch;
910
911 spin_lock(&ch_index_lock); 909 spin_lock(&ch_index_lock);
912 ret = idr_get_new(&ch_index_idr, ch, &minor); 910 ret = idr_alloc(&ch_index_idr, ch, 0, CH_MAX_DEVS + 1, GFP_NOWAIT);
913 spin_unlock(&ch_index_lock); 911 spin_unlock(&ch_index_lock);
912 idr_preload_end();
914 913
915 if (ret) 914 if (ret < 0) {
915 if (ret == -ENOSPC)
916 ret = -ENODEV;
916 goto free_ch; 917 goto free_ch;
917
918 if (minor > CH_MAX_DEVS) {
919 ret = -ENODEV;
920 goto remove_idr;
921 } 918 }
922 919
923 ch->minor = minor; 920 ch->minor = ret;
924 sprintf(ch->name,"ch%d",ch->minor); 921 sprintf(ch->name,"ch%d",ch->minor);
925 922
926 class_dev = device_create(ch_sysfs_class, dev, 923 class_dev = device_create(ch_sysfs_class, dev,
@@ -944,7 +941,7 @@ static int ch_probe(struct device *dev)
944 941
945 return 0; 942 return 0;
946remove_idr: 943remove_idr:
947 idr_remove(&ch_index_idr, minor); 944 idr_remove(&ch_index_idr, ch->minor);
948free_ch: 945free_ch:
949 kfree(ch); 946 kfree(ch);
950 return ret; 947 return ret;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 89ad55807012..7de4ef14698f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3165,14 +3165,10 @@ destroy_port(struct lpfc_vport *vport)
3165int 3165int
3166lpfc_get_instance(void) 3166lpfc_get_instance(void)
3167{ 3167{
3168 int instance = 0; 3168 int ret;
3169 3169
3170 /* Assign an unused number */ 3170 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
3171 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 3171 return ret < 0 ? -1 : ret;
3172 return -1;
3173 if (idr_get_new(&lpfc_hba_index, NULL, &instance))
3174 return -1;
3175 return instance;
3176} 3172}
3177 3173
3178/** 3174/**
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index be2c9a6561ff..9f0c46547459 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1391,24 +1391,23 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1391 return ERR_PTR(-ENOMEM); 1391 return ERR_PTR(-ENOMEM);
1392 } 1392 }
1393 1393
1394 if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) { 1394 idr_preload(GFP_KERNEL);
1395 printk(KERN_WARNING "idr expansion Sg_device failure\n");
1396 error = -ENOMEM;
1397 goto out;
1398 }
1399
1400 write_lock_irqsave(&sg_index_lock, iflags); 1395 write_lock_irqsave(&sg_index_lock, iflags);
1401 1396
1402 error = idr_get_new(&sg_index_idr, sdp, &k); 1397 error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
1403 if (error) { 1398 if (error < 0) {
1404 write_unlock_irqrestore(&sg_index_lock, iflags); 1399 if (error == -ENOSPC) {
1405 printk(KERN_WARNING "idr allocation Sg_device failure: %d\n", 1400 sdev_printk(KERN_WARNING, scsidp,
1406 error); 1401 "Unable to attach sg device type=%d, minor number exceeds %d\n",
1407 goto out; 1402 scsidp->type, SG_MAX_DEVS - 1);
1403 error = -ENODEV;
1404 } else {
1405 printk(KERN_WARNING
1406 "idr allocation Sg_device failure: %d\n", error);
1407 }
1408 goto out_unlock;
1408 } 1409 }
1409 1410 k = error;
1410 if (unlikely(k >= SG_MAX_DEVS))
1411 goto overflow;
1412 1411
1413 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k)); 1412 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1414 sprintf(disk->disk_name, "sg%d", k); 1413 sprintf(disk->disk_name, "sg%d", k);
@@ -1420,25 +1419,17 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1420 sdp->sg_tablesize = queue_max_segments(q); 1419 sdp->sg_tablesize = queue_max_segments(q);
1421 sdp->index = k; 1420 sdp->index = k;
1422 kref_init(&sdp->d_ref); 1421 kref_init(&sdp->d_ref);
1422 error = 0;
1423 1423
1424out_unlock:
1424 write_unlock_irqrestore(&sg_index_lock, iflags); 1425 write_unlock_irqrestore(&sg_index_lock, iflags);
1426 idr_preload_end();
1425 1427
1426 error = 0;
1427 out:
1428 if (error) { 1428 if (error) {
1429 kfree(sdp); 1429 kfree(sdp);
1430 return ERR_PTR(error); 1430 return ERR_PTR(error);
1431 } 1431 }
1432 return sdp; 1432 return sdp;
1433
1434 overflow:
1435 idr_remove(&sg_index_idr, k);
1436 write_unlock_irqrestore(&sg_index_lock, iflags);
1437 sdev_printk(KERN_WARNING, scsidp,
1438 "Unable to attach sg device type=%d, minor "
1439 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
1440 error = -ENODEV;
1441 goto out;
1442} 1433}
1443 1434
1444static int 1435static int
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3e2b3717cb5c..86974471af68 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4076,7 +4076,7 @@ static int st_probe(struct device *dev)
4076 struct st_modedef *STm; 4076 struct st_modedef *STm;
4077 struct st_partstat *STps; 4077 struct st_partstat *STps;
4078 struct st_buffer *buffer; 4078 struct st_buffer *buffer;
4079 int i, dev_num, error; 4079 int i, error;
4080 char *stp; 4080 char *stp;
4081 4081
4082 if (SDp->type != TYPE_TAPE) 4082 if (SDp->type != TYPE_TAPE)
@@ -4178,27 +4178,17 @@ static int st_probe(struct device *dev)
4178 tpnt->blksize_changed = 0; 4178 tpnt->blksize_changed = 0;
4179 mutex_init(&tpnt->lock); 4179 mutex_init(&tpnt->lock);
4180 4180
4181 if (!idr_pre_get(&st_index_idr, GFP_KERNEL)) { 4181 idr_preload(GFP_KERNEL);
4182 pr_warn("st: idr expansion failed\n");
4183 error = -ENOMEM;
4184 goto out_put_disk;
4185 }
4186
4187 spin_lock(&st_index_lock); 4182 spin_lock(&st_index_lock);
4188 error = idr_get_new(&st_index_idr, tpnt, &dev_num); 4183 error = idr_alloc(&st_index_idr, tpnt, 0, ST_MAX_TAPES + 1, GFP_NOWAIT);
4189 spin_unlock(&st_index_lock); 4184 spin_unlock(&st_index_lock);
4190 if (error) { 4185 idr_preload_end();
4186 if (error < 0) {
4191 pr_warn("st: idr allocation failed: %d\n", error); 4187 pr_warn("st: idr allocation failed: %d\n", error);
4192 goto out_put_disk; 4188 goto out_put_disk;
4193 } 4189 }
4194 4190 tpnt->index = error;
4195 if (dev_num > ST_MAX_TAPES) { 4191 sprintf(disk->disk_name, "st%d", tpnt->index);
4196 pr_err("st: Too many tape devices (max. %d).\n", ST_MAX_TAPES);
4197 goto out_put_index;
4198 }
4199
4200 tpnt->index = dev_num;
4201 sprintf(disk->disk_name, "st%d", dev_num);
4202 4192
4203 dev_set_drvdata(dev, tpnt); 4193 dev_set_drvdata(dev, tpnt);
4204 4194
@@ -4218,9 +4208,8 @@ static int st_probe(struct device *dev)
4218 4208
4219out_remove_devs: 4209out_remove_devs:
4220 remove_cdevs(tpnt); 4210 remove_cdevs(tpnt);
4221out_put_index:
4222 spin_lock(&st_index_lock); 4211 spin_lock(&st_index_lock);
4223 idr_remove(&st_index_idr, dev_num); 4212 idr_remove(&st_index_idr, tpnt->index);
4224 spin_unlock(&st_index_lock); 4213 spin_unlock(&st_index_lock);
4225out_put_disk: 4214out_put_disk:
4226 put_disk(disk); 4215 put_disk(disk);
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 538ebe213129..24456a0de6b2 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -2880,7 +2880,6 @@ static int binder_release(struct inode *nodp, struct file *filp)
2880 2880
2881static void binder_deferred_release(struct binder_proc *proc) 2881static void binder_deferred_release(struct binder_proc *proc)
2882{ 2882{
2883 struct hlist_node *pos;
2884 struct binder_transaction *t; 2883 struct binder_transaction *t;
2885 struct rb_node *n; 2884 struct rb_node *n;
2886 int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; 2885 int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
@@ -2924,7 +2923,7 @@ static void binder_deferred_release(struct binder_proc *proc)
2924 node->local_weak_refs = 0; 2923 node->local_weak_refs = 0;
2925 hlist_add_head(&node->dead_node, &binder_dead_nodes); 2924 hlist_add_head(&node->dead_node, &binder_dead_nodes);
2926 2925
2927 hlist_for_each_entry(ref, pos, &node->refs, node_entry) { 2926 hlist_for_each_entry(ref, &node->refs, node_entry) {
2928 incoming_refs++; 2927 incoming_refs++;
2929 if (ref->death) { 2928 if (ref->death) {
2930 death++; 2929 death++;
@@ -3156,12 +3155,11 @@ static void print_binder_thread(struct seq_file *m,
3156static void print_binder_node(struct seq_file *m, struct binder_node *node) 3155static void print_binder_node(struct seq_file *m, struct binder_node *node)
3157{ 3156{
3158 struct binder_ref *ref; 3157 struct binder_ref *ref;
3159 struct hlist_node *pos;
3160 struct binder_work *w; 3158 struct binder_work *w;
3161 int count; 3159 int count;
3162 3160
3163 count = 0; 3161 count = 0;
3164 hlist_for_each_entry(ref, pos, &node->refs, node_entry) 3162 hlist_for_each_entry(ref, &node->refs, node_entry)
3165 count++; 3163 count++;
3166 3164
3167 seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", 3165 seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
@@ -3171,7 +3169,7 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
3171 node->internal_strong_refs, count); 3169 node->internal_strong_refs, count);
3172 if (count) { 3170 if (count) {
3173 seq_puts(m, " proc"); 3171 seq_puts(m, " proc");
3174 hlist_for_each_entry(ref, pos, &node->refs, node_entry) 3172 hlist_for_each_entry(ref, &node->refs, node_entry)
3175 seq_printf(m, " %d", ref->proc->pid); 3173 seq_printf(m, " %d", ref->proc->pid);
3176 } 3174 }
3177 seq_puts(m, "\n"); 3175 seq_puts(m, "\n");
@@ -3369,7 +3367,6 @@ static void print_binder_proc_stats(struct seq_file *m,
3369static int binder_state_show(struct seq_file *m, void *unused) 3367static int binder_state_show(struct seq_file *m, void *unused)
3370{ 3368{
3371 struct binder_proc *proc; 3369 struct binder_proc *proc;
3372 struct hlist_node *pos;
3373 struct binder_node *node; 3370 struct binder_node *node;
3374 int do_lock = !binder_debug_no_lock; 3371 int do_lock = !binder_debug_no_lock;
3375 3372
@@ -3380,10 +3377,10 @@ static int binder_state_show(struct seq_file *m, void *unused)
3380 3377
3381 if (!hlist_empty(&binder_dead_nodes)) 3378 if (!hlist_empty(&binder_dead_nodes))
3382 seq_puts(m, "dead nodes:\n"); 3379 seq_puts(m, "dead nodes:\n");
3383 hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) 3380 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3384 print_binder_node(m, node); 3381 print_binder_node(m, node);
3385 3382
3386 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3383 hlist_for_each_entry(proc, &binder_procs, proc_node)
3387 print_binder_proc(m, proc, 1); 3384 print_binder_proc(m, proc, 1);
3388 if (do_lock) 3385 if (do_lock)
3389 binder_unlock(__func__); 3386 binder_unlock(__func__);
@@ -3393,7 +3390,6 @@ static int binder_state_show(struct seq_file *m, void *unused)
3393static int binder_stats_show(struct seq_file *m, void *unused) 3390static int binder_stats_show(struct seq_file *m, void *unused)
3394{ 3391{
3395 struct binder_proc *proc; 3392 struct binder_proc *proc;
3396 struct hlist_node *pos;
3397 int do_lock = !binder_debug_no_lock; 3393 int do_lock = !binder_debug_no_lock;
3398 3394
3399 if (do_lock) 3395 if (do_lock)
@@ -3403,7 +3399,7 @@ static int binder_stats_show(struct seq_file *m, void *unused)
3403 3399
3404 print_binder_stats(m, "", &binder_stats); 3400 print_binder_stats(m, "", &binder_stats);
3405 3401
3406 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3402 hlist_for_each_entry(proc, &binder_procs, proc_node)
3407 print_binder_proc_stats(m, proc); 3403 print_binder_proc_stats(m, proc);
3408 if (do_lock) 3404 if (do_lock)
3409 binder_unlock(__func__); 3405 binder_unlock(__func__);
@@ -3413,14 +3409,13 @@ static int binder_stats_show(struct seq_file *m, void *unused)
3413static int binder_transactions_show(struct seq_file *m, void *unused) 3409static int binder_transactions_show(struct seq_file *m, void *unused)
3414{ 3410{
3415 struct binder_proc *proc; 3411 struct binder_proc *proc;
3416 struct hlist_node *pos;
3417 int do_lock = !binder_debug_no_lock; 3412 int do_lock = !binder_debug_no_lock;
3418 3413
3419 if (do_lock) 3414 if (do_lock)
3420 binder_lock(__func__); 3415 binder_lock(__func__);
3421 3416
3422 seq_puts(m, "binder transactions:\n"); 3417 seq_puts(m, "binder transactions:\n");
3423 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3418 hlist_for_each_entry(proc, &binder_procs, proc_node)
3424 print_binder_proc(m, proc, 0); 3419 print_binder_proc(m, proc, 0);
3425 if (do_lock) 3420 if (do_lock)
3426 binder_unlock(__func__); 3421 binder_unlock(__func__);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 23a98e658306..9435a3d369a7 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -144,23 +144,24 @@ struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
144 spin_lock_init(&tiqn->login_stats.lock); 144 spin_lock_init(&tiqn->login_stats.lock);
145 spin_lock_init(&tiqn->logout_stats.lock); 145 spin_lock_init(&tiqn->logout_stats.lock);
146 146
147 if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
148 pr_err("idr_pre_get() for tiqn_idr failed\n");
149 kfree(tiqn);
150 return ERR_PTR(-ENOMEM);
151 }
152 tiqn->tiqn_state = TIQN_STATE_ACTIVE; 147 tiqn->tiqn_state = TIQN_STATE_ACTIVE;
153 148
149 idr_preload(GFP_KERNEL);
154 spin_lock(&tiqn_lock); 150 spin_lock(&tiqn_lock);
155 ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index); 151
152 ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
156 if (ret < 0) { 153 if (ret < 0) {
157 pr_err("idr_get_new() failed for tiqn->tiqn_index\n"); 154 pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
158 spin_unlock(&tiqn_lock); 155 spin_unlock(&tiqn_lock);
156 idr_preload_end();
159 kfree(tiqn); 157 kfree(tiqn);
160 return ERR_PTR(ret); 158 return ERR_PTR(ret);
161 } 159 }
160 tiqn->tiqn_index = ret;
162 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list); 161 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
162
163 spin_unlock(&tiqn_lock); 163 spin_unlock(&tiqn_lock);
164 idr_preload_end();
164 165
165 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn); 166 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
166 167
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index fdb632f0ab85..2535d4d46c0e 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -247,19 +247,16 @@ static int iscsi_login_zero_tsih_s1(
247 spin_lock_init(&sess->session_usage_lock); 247 spin_lock_init(&sess->session_usage_lock);
248 spin_lock_init(&sess->ttt_lock); 248 spin_lock_init(&sess->ttt_lock);
249 249
250 if (!idr_pre_get(&sess_idr, GFP_KERNEL)) { 250 idr_preload(GFP_KERNEL);
251 pr_err("idr_pre_get() for sess_idr failed\n");
252 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
253 ISCSI_LOGIN_STATUS_NO_RESOURCES);
254 kfree(sess);
255 return -ENOMEM;
256 }
257 spin_lock_bh(&sess_idr_lock); 251 spin_lock_bh(&sess_idr_lock);
258 ret = idr_get_new(&sess_idr, NULL, &sess->session_index); 252 ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT);
253 if (ret >= 0)
254 sess->session_index = ret;
259 spin_unlock_bh(&sess_idr_lock); 255 spin_unlock_bh(&sess_idr_lock);
256 idr_preload_end();
260 257
261 if (ret < 0) { 258 if (ret < 0) {
262 pr_err("idr_get_new() for sess_idr failed\n"); 259 pr_err("idr_alloc() for sess_idr failed\n");
263 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 260 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
264 ISCSI_LOGIN_STATUS_NO_RESOURCES); 261 ISCSI_LOGIN_STATUS_NO_RESOURCES);
265 kfree(sess); 262 kfree(sess);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 6659dd36e806..113f33598b9f 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -169,7 +169,6 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
169{ 169{
170 struct ft_tport *tport; 170 struct ft_tport *tport;
171 struct hlist_head *head; 171 struct hlist_head *head;
172 struct hlist_node *pos;
173 struct ft_sess *sess; 172 struct ft_sess *sess;
174 173
175 rcu_read_lock(); 174 rcu_read_lock();
@@ -178,7 +177,7 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
178 goto out; 177 goto out;
179 178
180 head = &tport->hash[ft_sess_hash(port_id)]; 179 head = &tport->hash[ft_sess_hash(port_id)];
181 hlist_for_each_entry_rcu(sess, pos, head, hash) { 180 hlist_for_each_entry_rcu(sess, head, hash) {
182 if (sess->port_id == port_id) { 181 if (sess->port_id == port_id) {
183 kref_get(&sess->kref); 182 kref_get(&sess->kref);
184 rcu_read_unlock(); 183 rcu_read_unlock();
@@ -201,10 +200,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
201{ 200{
202 struct ft_sess *sess; 201 struct ft_sess *sess;
203 struct hlist_head *head; 202 struct hlist_head *head;
204 struct hlist_node *pos;
205 203
206 head = &tport->hash[ft_sess_hash(port_id)]; 204 head = &tport->hash[ft_sess_hash(port_id)];
207 hlist_for_each_entry_rcu(sess, pos, head, hash) 205 hlist_for_each_entry_rcu(sess, head, hash)
208 if (sess->port_id == port_id) 206 if (sess->port_id == port_id)
209 return sess; 207 return sess;
210 208
@@ -253,11 +251,10 @@ static void ft_sess_unhash(struct ft_sess *sess)
253static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id) 251static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
254{ 252{
255 struct hlist_head *head; 253 struct hlist_head *head;
256 struct hlist_node *pos;
257 struct ft_sess *sess; 254 struct ft_sess *sess;
258 255
259 head = &tport->hash[ft_sess_hash(port_id)]; 256 head = &tport->hash[ft_sess_hash(port_id)];
260 hlist_for_each_entry_rcu(sess, pos, head, hash) { 257 hlist_for_each_entry_rcu(sess, head, hash) {
261 if (sess->port_id == port_id) { 258 if (sess->port_id == port_id) {
262 ft_sess_unhash(sess); 259 ft_sess_unhash(sess);
263 return sess; 260 return sess;
@@ -273,12 +270,11 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
273static void ft_sess_delete_all(struct ft_tport *tport) 270static void ft_sess_delete_all(struct ft_tport *tport)
274{ 271{
275 struct hlist_head *head; 272 struct hlist_head *head;
276 struct hlist_node *pos;
277 struct ft_sess *sess; 273 struct ft_sess *sess;
278 274
279 for (head = tport->hash; 275 for (head = tport->hash;
280 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { 276 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
281 hlist_for_each_entry_rcu(sess, pos, head, hash) { 277 hlist_for_each_entry_rcu(sess, head, hash) {
282 ft_sess_unhash(sess); 278 ft_sess_unhash(sess);
283 transport_deregister_session_configfs(sess->se_sess); 279 transport_deregister_session_configfs(sess->se_sess);
284 ft_sess_put(sess); /* release from table */ 280 ft_sess_put(sess); /* release from table */
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 836828e29a87..c33fa5315d6b 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -73,21 +73,14 @@ static struct cpufreq_cooling_device *notify_device;
73 */ 73 */
74static int get_idr(struct idr *idr, int *id) 74static int get_idr(struct idr *idr, int *id)
75{ 75{
76 int err; 76 int ret;
77again:
78 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
79 return -ENOMEM;
80 77
81 mutex_lock(&cooling_cpufreq_lock); 78 mutex_lock(&cooling_cpufreq_lock);
82 err = idr_get_new(idr, NULL, id); 79 ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
83 mutex_unlock(&cooling_cpufreq_lock); 80 mutex_unlock(&cooling_cpufreq_lock);
84 81 if (unlikely(ret < 0))
85 if (unlikely(err == -EAGAIN)) 82 return ret;
86 goto again; 83 *id = ret;
87 else if (unlikely(err))
88 return err;
89
90 *id = *id & MAX_IDR_MASK;
91 return 0; 84 return 0;
92} 85}
93 86
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 8c8ce806180f..84e95f32cdb6 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -132,23 +132,16 @@ EXPORT_SYMBOL_GPL(thermal_unregister_governor);
132 132
133static int get_idr(struct idr *idr, struct mutex *lock, int *id) 133static int get_idr(struct idr *idr, struct mutex *lock, int *id)
134{ 134{
135 int err; 135 int ret;
136
137again:
138 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
139 return -ENOMEM;
140 136
141 if (lock) 137 if (lock)
142 mutex_lock(lock); 138 mutex_lock(lock);
143 err = idr_get_new(idr, NULL, id); 139 ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
144 if (lock) 140 if (lock)
145 mutex_unlock(lock); 141 mutex_unlock(lock);
146 if (unlikely(err == -EAGAIN)) 142 if (unlikely(ret < 0))
147 goto again; 143 return ret;
148 else if (unlikely(err)) 144 *id = ret;
149 return err;
150
151 *id = *id & MAX_IDR_MASK;
152 return 0; 145 return 0;
153} 146}
154 147
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 5110f367f1f1..c8b926291e28 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -369,26 +369,15 @@ static void uio_dev_del_attributes(struct uio_device *idev)
369static int uio_get_minor(struct uio_device *idev) 369static int uio_get_minor(struct uio_device *idev)
370{ 370{
371 int retval = -ENOMEM; 371 int retval = -ENOMEM;
372 int id;
373 372
374 mutex_lock(&minor_lock); 373 mutex_lock(&minor_lock);
375 if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0) 374 retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL);
376 goto exit; 375 if (retval >= 0) {
377 376 idev->minor = retval;
378 retval = idr_get_new(&uio_idr, idev, &id); 377 } else if (retval == -ENOSPC) {
379 if (retval < 0) {
380 if (retval == -EAGAIN)
381 retval = -ENOMEM;
382 goto exit;
383 }
384 if (id < UIO_MAX_DEVICES) {
385 idev->minor = id;
386 } else {
387 dev_err(idev->dev, "too many uio devices\n"); 378 dev_err(idev->dev, "too many uio devices\n");
388 retval = -EINVAL; 379 retval = -EINVAL;
389 idr_remove(&uio_idr, id);
390 } 380 }
391exit:
392 mutex_unlock(&minor_lock); 381 mutex_unlock(&minor_lock);
393 return retval; 382 return retval;
394} 383}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 28e2d5b2c0c7..fcc12f3e60a3 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -139,23 +139,8 @@ EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
139 */ 139 */
140static int vfio_alloc_group_minor(struct vfio_group *group) 140static int vfio_alloc_group_minor(struct vfio_group *group)
141{ 141{
142 int ret, minor;
143
144again:
145 if (unlikely(idr_pre_get(&vfio.group_idr, GFP_KERNEL) == 0))
146 return -ENOMEM;
147
148 /* index 0 is used by /dev/vfio/vfio */ 142 /* index 0 is used by /dev/vfio/vfio */
149 ret = idr_get_new_above(&vfio.group_idr, group, 1, &minor); 143 return idr_alloc(&vfio.group_idr, group, 1, MINORMASK + 1, GFP_KERNEL);
150 if (ret == -EAGAIN)
151 goto again;
152 if (ret || minor > MINORMASK) {
153 if (minor > MINORMASK)
154 idr_remove(&vfio.group_idr, minor);
155 return -ENOSPC;
156 }
157
158 return minor;
159} 144}
160 145
161static void vfio_free_group_minor(int minor) 146static void vfio_free_group_minor(int minor)
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index be27b551473f..db10d0120d2b 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -384,6 +384,12 @@ config BACKLIGHT_LP855X
384 This supports TI LP8550, LP8551, LP8552, LP8553, LP8556 and LP8557 384 This supports TI LP8550, LP8551, LP8552, LP8553, LP8556 and LP8557
385 backlight driver. 385 backlight driver.
386 386
387config BACKLIGHT_LP8788
388 tristate "Backlight driver for TI LP8788 MFD"
389 depends on BACKLIGHT_CLASS_DEVICE && MFD_LP8788
390 help
391 This supports TI LP8788 backlight driver.
392
387config BACKLIGHT_OT200 393config BACKLIGHT_OT200
388 tristate "Backlight driver for ot200 visualisation device" 394 tristate "Backlight driver for ot200 visualisation device"
389 depends on BACKLIGHT_CLASS_DEVICE && CS5535_MFGPT && GPIO_CS5535 395 depends on BACKLIGHT_CLASS_DEVICE && CS5535_MFGPT && GPIO_CS5535
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 4606c218e8e4..96c4d620c5ce 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_BACKLIGHT_LM3630) += lm3630_bl.o
38obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o 38obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o
39obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o 39obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
40obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o 40obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
41obj-$(CONFIG_BACKLIGHT_LP8788) += lp8788_bl.o
41obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o 42obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
42obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o 43obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
43obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o 44obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index d29e49443f29..c02aa2c2575a 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -317,10 +317,7 @@ static int ams369fg06_power_on(struct ams369fg06 *lcd)
317 pd = lcd->lcd_pd; 317 pd = lcd->lcd_pd;
318 bd = lcd->bd; 318 bd = lcd->bd;
319 319
320 if (!pd->power_on) { 320 if (pd->power_on) {
321 dev_err(lcd->dev, "power_on is NULL.\n");
322 return -EINVAL;
323 } else {
324 pd->power_on(lcd->ld, 1); 321 pd->power_on(lcd->ld, 1);
325 msleep(pd->power_on_delay); 322 msleep(pd->power_on_delay);
326 } 323 }
@@ -370,7 +367,8 @@ static int ams369fg06_power_off(struct ams369fg06 *lcd)
370 367
371 msleep(pd->power_off_delay); 368 msleep(pd->power_off_delay);
372 369
373 pd->power_on(lcd->ld, 0); 370 if (pd->power_on)
371 pd->power_on(lcd->ld, 0);
374 372
375 return 0; 373 return 0;
376} 374}
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
new file mode 100644
index 000000000000..4bb8b4f140cf
--- /dev/null
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -0,0 +1,333 @@
1/*
2 * TI LP8788 MFD - backlight driver
3 *
4 * Copyright 2012 Texas Instruments
5 *
6 * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/backlight.h>
15#include <linux/err.h>
16#include <linux/mfd/lp8788.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/pwm.h>
20#include <linux/slab.h>
21
22/* Register address */
23#define LP8788_BL_CONFIG 0x96
24#define LP8788_BL_EN BIT(0)
25#define LP8788_BL_PWM_INPUT_EN BIT(5)
26#define LP8788_BL_FULLSCALE_SHIFT 2
27#define LP8788_BL_DIM_MODE_SHIFT 1
28#define LP8788_BL_PWM_POLARITY_SHIFT 6
29
30#define LP8788_BL_BRIGHTNESS 0x97
31
32#define LP8788_BL_RAMP 0x98
33#define LP8788_BL_RAMP_RISE_SHIFT 4
34
35#define MAX_BRIGHTNESS 127
36#define DEFAULT_BL_NAME "lcd-backlight"
37
38struct lp8788_bl_config {
39 enum lp8788_bl_ctrl_mode bl_mode;
40 enum lp8788_bl_dim_mode dim_mode;
41 enum lp8788_bl_full_scale_current full_scale;
42 enum lp8788_bl_ramp_step rise_time;
43 enum lp8788_bl_ramp_step fall_time;
44 enum pwm_polarity pwm_pol;
45};
46
47struct lp8788_bl {
48 struct lp8788 *lp;
49 struct backlight_device *bl_dev;
50 struct lp8788_backlight_platform_data *pdata;
51 enum lp8788_bl_ctrl_mode mode;
52 struct pwm_device *pwm;
53};
54
55struct lp8788_bl_config default_bl_config = {
56 .bl_mode = LP8788_BL_REGISTER_ONLY,
57 .dim_mode = LP8788_DIM_EXPONENTIAL,
58 .full_scale = LP8788_FULLSCALE_1900uA,
59 .rise_time = LP8788_RAMP_8192us,
60 .fall_time = LP8788_RAMP_8192us,
61 .pwm_pol = PWM_POLARITY_NORMAL,
62};
63
64static inline bool is_brightness_ctrl_by_pwm(enum lp8788_bl_ctrl_mode mode)
65{
66 return (mode == LP8788_BL_COMB_PWM_BASED);
67}
68
69static inline bool is_brightness_ctrl_by_register(enum lp8788_bl_ctrl_mode mode)
70{
71 return (mode == LP8788_BL_REGISTER_ONLY ||
72 mode == LP8788_BL_COMB_REGISTER_BASED);
73}
74
75static int lp8788_backlight_configure(struct lp8788_bl *bl)
76{
77 struct lp8788_backlight_platform_data *pdata = bl->pdata;
78 struct lp8788_bl_config *cfg = &default_bl_config;
79 int ret;
80 u8 val;
81
82 /*
83 * Update chip configuration if platform data exists,
84 * otherwise use the default settings.
85 */
86 if (pdata) {
87 cfg->bl_mode = pdata->bl_mode;
88 cfg->dim_mode = pdata->dim_mode;
89 cfg->full_scale = pdata->full_scale;
90 cfg->rise_time = pdata->rise_time;
91 cfg->fall_time = pdata->fall_time;
92 cfg->pwm_pol = pdata->pwm_pol;
93 }
94
95 /* Brightness ramp up/down */
96 val = (cfg->rise_time << LP8788_BL_RAMP_RISE_SHIFT) | cfg->fall_time;
97 ret = lp8788_write_byte(bl->lp, LP8788_BL_RAMP, val);
98 if (ret)
99 return ret;
100
101 /* Fullscale current setting */
102 val = (cfg->full_scale << LP8788_BL_FULLSCALE_SHIFT) |
103 (cfg->dim_mode << LP8788_BL_DIM_MODE_SHIFT);
104
105 /* Brightness control mode */
106 switch (cfg->bl_mode) {
107 case LP8788_BL_REGISTER_ONLY:
108 val |= LP8788_BL_EN;
109 break;
110 case LP8788_BL_COMB_PWM_BASED:
111 case LP8788_BL_COMB_REGISTER_BASED:
112 val |= LP8788_BL_EN | LP8788_BL_PWM_INPUT_EN |
113 (cfg->pwm_pol << LP8788_BL_PWM_POLARITY_SHIFT);
114 break;
115 default:
116 dev_err(bl->lp->dev, "invalid mode: %d\n", cfg->bl_mode);
117 return -EINVAL;
118 }
119
120 bl->mode = cfg->bl_mode;
121
122 return lp8788_write_byte(bl->lp, LP8788_BL_CONFIG, val);
123}
124
125static void lp8788_pwm_ctrl(struct lp8788_bl *bl, int br, int max_br)
126{
127 unsigned int period;
128 unsigned int duty;
129 struct device *dev;
130 struct pwm_device *pwm;
131
132 if (!bl->pdata)
133 return;
134
135 period = bl->pdata->period_ns;
136 duty = br * period / max_br;
137 dev = bl->lp->dev;
138
139 /* request PWM device with the consumer name */
140 if (!bl->pwm) {
141 pwm = devm_pwm_get(dev, LP8788_DEV_BACKLIGHT);
142 if (IS_ERR(pwm)) {
143 dev_err(dev, "can not get PWM device\n");
144 return;
145 }
146
147 bl->pwm = pwm;
148 }
149
150 pwm_config(bl->pwm, duty, period);
151 if (duty)
152 pwm_enable(bl->pwm);
153 else
154 pwm_disable(bl->pwm);
155}
156
157static int lp8788_bl_update_status(struct backlight_device *bl_dev)
158{
159 struct lp8788_bl *bl = bl_get_data(bl_dev);
160 enum lp8788_bl_ctrl_mode mode = bl->mode;
161
162 if (bl_dev->props.state & BL_CORE_SUSPENDED)
163 bl_dev->props.brightness = 0;
164
165 if (is_brightness_ctrl_by_pwm(mode)) {
166 int brt = bl_dev->props.brightness;
167 int max = bl_dev->props.max_brightness;
168
169 lp8788_pwm_ctrl(bl, brt, max);
170 } else if (is_brightness_ctrl_by_register(mode)) {
171 u8 brt = bl_dev->props.brightness;
172
173 lp8788_write_byte(bl->lp, LP8788_BL_BRIGHTNESS, brt);
174 }
175
176 return 0;
177}
178
179static int lp8788_bl_get_brightness(struct backlight_device *bl_dev)
180{
181 return bl_dev->props.brightness;
182}
183
184static const struct backlight_ops lp8788_bl_ops = {
185 .options = BL_CORE_SUSPENDRESUME,
186 .update_status = lp8788_bl_update_status,
187 .get_brightness = lp8788_bl_get_brightness,
188};
189
190static int lp8788_backlight_register(struct lp8788_bl *bl)
191{
192 struct backlight_device *bl_dev;
193 struct backlight_properties props;
194 struct lp8788_backlight_platform_data *pdata = bl->pdata;
195 int init_brt;
196 char *name;
197
198 props.type = BACKLIGHT_PLATFORM;
199 props.max_brightness = MAX_BRIGHTNESS;
200
201 /* Initial brightness */
202 if (pdata)
203 init_brt = min_t(int, pdata->initial_brightness,
204 props.max_brightness);
205 else
206 init_brt = 0;
207
208 props.brightness = init_brt;
209
210 /* Backlight device name */
211 if (!pdata || !pdata->name)
212 name = DEFAULT_BL_NAME;
213 else
214 name = pdata->name;
215
216 bl_dev = backlight_device_register(name, bl->lp->dev, bl,
217 &lp8788_bl_ops, &props);
218 if (IS_ERR(bl_dev))
219 return PTR_ERR(bl_dev);
220
221 bl->bl_dev = bl_dev;
222
223 return 0;
224}
225
226static void lp8788_backlight_unregister(struct lp8788_bl *bl)
227{
228 struct backlight_device *bl_dev = bl->bl_dev;
229
230 if (bl_dev)
231 backlight_device_unregister(bl_dev);
232}
233
234static ssize_t lp8788_get_bl_ctl_mode(struct device *dev,
235 struct device_attribute *attr, char *buf)
236{
237 struct lp8788_bl *bl = dev_get_drvdata(dev);
238 enum lp8788_bl_ctrl_mode mode = bl->mode;
239 char *strmode;
240
241 if (is_brightness_ctrl_by_pwm(mode))
242 strmode = "PWM based";
243 else if (is_brightness_ctrl_by_register(mode))
244 strmode = "Register based";
245 else
246 strmode = "Invalid mode";
247
248 return scnprintf(buf, PAGE_SIZE, "%s\n", strmode);
249}
250
251static DEVICE_ATTR(bl_ctl_mode, S_IRUGO, lp8788_get_bl_ctl_mode, NULL);
252
253static struct attribute *lp8788_attributes[] = {
254 &dev_attr_bl_ctl_mode.attr,
255 NULL,
256};
257
258static const struct attribute_group lp8788_attr_group = {
259 .attrs = lp8788_attributes,
260};
261
262static int lp8788_backlight_probe(struct platform_device *pdev)
263{
264 struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
265 struct lp8788_bl *bl;
266 int ret;
267
268 bl = devm_kzalloc(lp->dev, sizeof(struct lp8788_bl), GFP_KERNEL);
269 if (!bl)
270 return -ENOMEM;
271
272 bl->lp = lp;
273 if (lp->pdata)
274 bl->pdata = lp->pdata->bl_pdata;
275
276 platform_set_drvdata(pdev, bl);
277
278 ret = lp8788_backlight_configure(bl);
279 if (ret) {
280 dev_err(lp->dev, "backlight config err: %d\n", ret);
281 goto err_dev;
282 }
283
284 ret = lp8788_backlight_register(bl);
285 if (ret) {
286 dev_err(lp->dev, "register backlight err: %d\n", ret);
287 goto err_dev;
288 }
289
290 ret = sysfs_create_group(&pdev->dev.kobj, &lp8788_attr_group);
291 if (ret) {
292 dev_err(lp->dev, "register sysfs err: %d\n", ret);
293 goto err_sysfs;
294 }
295
296 backlight_update_status(bl->bl_dev);
297
298 return 0;
299
300err_sysfs:
301 lp8788_backlight_unregister(bl);
302err_dev:
303 return ret;
304}
305
306static int lp8788_backlight_remove(struct platform_device *pdev)
307{
308 struct lp8788_bl *bl = platform_get_drvdata(pdev);
309 struct backlight_device *bl_dev = bl->bl_dev;
310
311 bl_dev->props.brightness = 0;
312 backlight_update_status(bl_dev);
313 sysfs_remove_group(&pdev->dev.kobj, &lp8788_attr_group);
314 lp8788_backlight_unregister(bl);
315 platform_set_drvdata(pdev, NULL);
316
317 return 0;
318}
319
320static struct platform_driver lp8788_bl_driver = {
321 .probe = lp8788_backlight_probe,
322 .remove = lp8788_backlight_remove,
323 .driver = {
324 .name = LP8788_DEV_BACKLIGHT,
325 .owner = THIS_MODULE,
326 },
327};
328module_platform_driver(lp8788_bl_driver);
329
330MODULE_DESCRIPTION("Texas Instruments LP8788 Backlight Driver");
331MODULE_AUTHOR("Milo Kim");
332MODULE_LICENSE("GPL");
333MODULE_ALIAS("platform:lp8788-backlight");
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 67526690acbc..762561fbabbf 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -17,11 +17,16 @@ config W1_SLAVE_SMEM
17 simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire. 17 simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
18 18
19config W1_SLAVE_DS2408 19config W1_SLAVE_DS2408
20 tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)" 20 tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
21 help 21 help
22 Say Y here if you want to use a 1-wire 22 Say Y here if you want to use a 1-wire
23 DS2408 8-Channel Addressable Switch device support
23 24
24 DS2408 8-Channel Addressable Switch device support 25config W1_SLAVE_DS2413
26 tristate "Dual Channel Addressable Switch 0x3a family support (DS2413)"
27 help
28 Say Y here if you want to use a 1-wire
29 DS2413 Dual Channel Addressable Switch device support
25 30
26config W1_SLAVE_DS2423 31config W1_SLAVE_DS2423
27 tristate "Counter 1-wire device (DS2423)" 32 tristate "Counter 1-wire device (DS2423)"
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 05188f6aab5a..06529f3157ab 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -4,7 +4,8 @@
4 4
5obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o 5obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o
6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o 6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o
7obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o 7obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
8obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o
8obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o 9obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
9obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o 10obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
10obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o 11obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
new file mode 100644
index 000000000000..829786252c6b
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2413.c
@@ -0,0 +1,177 @@
1/*
2 * w1_ds2413.c - w1 family 3a (DS2413) driver
3 * based on w1_ds2408.c by Jean-Francois Dagenais <dagenaisj@sonatest.com>
4 *
5 * Copyright (c) 2013 Mariusz Bialonczyk <manio@skyboo.net>
6 *
7 * This source code is licensed under the GNU General Public License,
8 * Version 2. See the file COPYING for more details.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/device.h>
15#include <linux/types.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18
19#include "../w1.h"
20#include "../w1_int.h"
21#include "../w1_family.h"
22
23MODULE_LICENSE("GPL");
24MODULE_AUTHOR("Mariusz Bialonczyk <manio@skyboo.net>");
25MODULE_DESCRIPTION("w1 family 3a driver for DS2413 2 Pin IO");
26
27#define W1_F3A_RETRIES 3
28#define W1_F3A_FUNC_PIO_ACCESS_READ 0xF5
29#define W1_F3A_FUNC_PIO_ACCESS_WRITE 0x5A
30#define W1_F3A_SUCCESS_CONFIRM_BYTE 0xAA
31
32static ssize_t w1_f3a_read_state(
33 struct file *filp, struct kobject *kobj,
34 struct bin_attribute *bin_attr,
35 char *buf, loff_t off, size_t count)
36{
37 struct w1_slave *sl = kobj_to_w1_slave(kobj);
38 dev_dbg(&sl->dev,
39 "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
40 bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
41
42 if (off != 0)
43 return 0;
44 if (!buf)
45 return -EINVAL;
46
47 mutex_lock(&sl->master->bus_mutex);
48 dev_dbg(&sl->dev, "mutex locked");
49
50 if (w1_reset_select_slave(sl)) {
51 mutex_unlock(&sl->master->bus_mutex);
52 return -EIO;
53 }
54
55 w1_write_8(sl->master, W1_F3A_FUNC_PIO_ACCESS_READ);
56 *buf = w1_read_8(sl->master);
57
58 mutex_unlock(&sl->master->bus_mutex);
59 dev_dbg(&sl->dev, "mutex unlocked");
60
61 /* check for correct complement */
62 if ((*buf & 0x0F) != ((~*buf >> 4) & 0x0F))
63 return -EIO;
64 else
65 return 1;
66}
67
68static ssize_t w1_f3a_write_output(
69 struct file *filp, struct kobject *kobj,
70 struct bin_attribute *bin_attr,
71 char *buf, loff_t off, size_t count)
72{
73 struct w1_slave *sl = kobj_to_w1_slave(kobj);
74 u8 w1_buf[3];
75 unsigned int retries = W1_F3A_RETRIES;
76
77 if (count != 1 || off != 0)
78 return -EFAULT;
79
80 dev_dbg(&sl->dev, "locking mutex for write_output");
81 mutex_lock(&sl->master->bus_mutex);
82 dev_dbg(&sl->dev, "mutex locked");
83
84 if (w1_reset_select_slave(sl))
85 goto error;
86
87 /* according to the DS2413 datasheet the most significant 6 bits
88 should be set to "1"s, so do it now */
89 *buf = *buf | 0xFC;
90
91 while (retries--) {
92 w1_buf[0] = W1_F3A_FUNC_PIO_ACCESS_WRITE;
93 w1_buf[1] = *buf;
94 w1_buf[2] = ~(*buf);
95 w1_write_block(sl->master, w1_buf, 3);
96
97 if (w1_read_8(sl->master) == W1_F3A_SUCCESS_CONFIRM_BYTE) {
98 mutex_unlock(&sl->master->bus_mutex);
99 dev_dbg(&sl->dev, "mutex unlocked, retries:%d", retries);
100 return 1;
101 }
102 if (w1_reset_resume_command(sl->master))
103 goto error;
104 }
105
106error:
107 mutex_unlock(&sl->master->bus_mutex);
108 dev_dbg(&sl->dev, "mutex unlocked in error, retries:%d", retries);
109 return -EIO;
110}
111
112#define NB_SYSFS_BIN_FILES 2
113static struct bin_attribute w1_f3a_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
114 {
115 .attr = {
116 .name = "state",
117 .mode = S_IRUGO,
118 },
119 .size = 1,
120 .read = w1_f3a_read_state,
121 },
122 {
123 .attr = {
124 .name = "output",
125 .mode = S_IRUGO | S_IWUSR | S_IWGRP,
126 },
127 .size = 1,
128 .write = w1_f3a_write_output,
129 }
130};
131
132static int w1_f3a_add_slave(struct w1_slave *sl)
133{
134 int err = 0;
135 int i;
136
137 for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
138 err = sysfs_create_bin_file(
139 &sl->dev.kobj,
140 &(w1_f3a_sysfs_bin_files[i]));
141 if (err)
142 while (--i >= 0)
143 sysfs_remove_bin_file(&sl->dev.kobj,
144 &(w1_f3a_sysfs_bin_files[i]));
145 return err;
146}
147
148static void w1_f3a_remove_slave(struct w1_slave *sl)
149{
150 int i;
151 for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
152 sysfs_remove_bin_file(&sl->dev.kobj,
153 &(w1_f3a_sysfs_bin_files[i]));
154}
155
156static struct w1_family_ops w1_f3a_fops = {
157 .add_slave = w1_f3a_add_slave,
158 .remove_slave = w1_f3a_remove_slave,
159};
160
161static struct w1_family w1_family_3a = {
162 .fid = W1_FAMILY_DS2413,
163 .fops = &w1_f3a_fops,
164};
165
166static int __init w1_f3a_init(void)
167{
168 return w1_register_family(&w1_family_3a);
169}
170
171static void __exit w1_f3a_exit(void)
172{
173 w1_unregister_family(&w1_family_3a);
174}
175
176module_init(w1_f3a_init);
177module_exit(w1_f3a_exit);
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index a1f0ce151d53..625dd08f775f 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -39,6 +39,7 @@
39#define W1_EEPROM_DS2431 0x2D 39#define W1_EEPROM_DS2431 0x2D
40#define W1_FAMILY_DS2760 0x30 40#define W1_FAMILY_DS2760 0x30
41#define W1_FAMILY_DS2780 0x32 41#define W1_FAMILY_DS2780 0x32
42#define W1_FAMILY_DS2413 0x3A
42#define W1_THERM_DS1825 0x3B 43#define W1_THERM_DS1825 0x3B
43#define W1_FAMILY_DS2781 0x3D 44#define W1_FAMILY_DS2781 0x3D
44#define W1_THERM_DS28EA00 0x42 45#define W1_THERM_DS28EA00 0x42
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index eb82ee53ee0b..d9a43674cb94 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -125,9 +125,8 @@ static void
125affs_fix_dcache(struct inode *inode, u32 entry_ino) 125affs_fix_dcache(struct inode *inode, u32 entry_ino)
126{ 126{
127 struct dentry *dentry; 127 struct dentry *dentry;
128 struct hlist_node *p;
129 spin_lock(&inode->i_lock); 128 spin_lock(&inode->i_lock);
130 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 129 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
131 if (entry_ino == (u32)(long)dentry->d_fsdata) { 130 if (entry_ino == (u32)(long)dentry->d_fsdata) {
132 dentry->d_fsdata = (void *)inode->i_ino; 131 dentry->d_fsdata = (void *)inode->i_ino;
133 break; 132 break;
diff --git a/fs/aio.c b/fs/aio.c
index 064bfbe37566..3f941f2a3059 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -591,11 +591,10 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
591{ 591{
592 struct mm_struct *mm = current->mm; 592 struct mm_struct *mm = current->mm;
593 struct kioctx *ctx, *ret = NULL; 593 struct kioctx *ctx, *ret = NULL;
594 struct hlist_node *n;
595 594
596 rcu_read_lock(); 595 rcu_read_lock();
597 596
598 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { 597 hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
599 /* 598 /*
600 * RCU protects us against accessing freed memory but 599 * RCU protects us against accessing freed memory but
601 * we have to be careful not to get a reference when the 600 * we have to be careful not to get a reference when the
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index d2a833999bcc..83f2606c76d0 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -816,10 +816,9 @@ static bool
816inode_has_hashed_dentries(struct inode *inode) 816inode_has_hashed_dentries(struct inode *inode)
817{ 817{
818 struct dentry *dentry; 818 struct dentry *dentry;
819 struct hlist_node *p;
820 819
821 spin_lock(&inode->i_lock); 820 spin_lock(&inode->i_lock);
822 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 821 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
823 if (!d_unhashed(dentry) || IS_ROOT(dentry)) { 822 if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
824 spin_unlock(&inode->i_lock); 823 spin_unlock(&inode->i_lock);
825 return true; 824 return true;
diff --git a/fs/coredump.c b/fs/coredump.c
index 69baf903d3bd..c6479658d487 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -501,7 +501,7 @@ void do_coredump(siginfo_t *siginfo)
501 * so we dump it as root in mode 2, and only into a controlled 501 * so we dump it as root in mode 2, and only into a controlled
502 * environment (pipe handler or fully qualified path). 502 * environment (pipe handler or fully qualified path).
503 */ 503 */
504 if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) { 504 if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
505 /* Setuid core dump mode */ 505 /* Setuid core dump mode */
506 flag = O_EXCL; /* Stop rewrite attacks */ 506 flag = O_EXCL; /* Stop rewrite attacks */
507 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */ 507 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
diff --git a/fs/dcache.c b/fs/dcache.c
index 68220dd0c135..fbfae008ba44 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -675,11 +675,10 @@ EXPORT_SYMBOL(dget_parent);
675static struct dentry *__d_find_alias(struct inode *inode, int want_discon) 675static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
676{ 676{
677 struct dentry *alias, *discon_alias; 677 struct dentry *alias, *discon_alias;
678 struct hlist_node *p;
679 678
680again: 679again:
681 discon_alias = NULL; 680 discon_alias = NULL;
682 hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { 681 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
683 spin_lock(&alias->d_lock); 682 spin_lock(&alias->d_lock);
684 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 683 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
685 if (IS_ROOT(alias) && 684 if (IS_ROOT(alias) &&
@@ -730,10 +729,9 @@ EXPORT_SYMBOL(d_find_alias);
730void d_prune_aliases(struct inode *inode) 729void d_prune_aliases(struct inode *inode)
731{ 730{
732 struct dentry *dentry; 731 struct dentry *dentry;
733 struct hlist_node *p;
734restart: 732restart:
735 spin_lock(&inode->i_lock); 733 spin_lock(&inode->i_lock);
736 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 734 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
737 spin_lock(&dentry->d_lock); 735 spin_lock(&dentry->d_lock);
738 if (!dentry->d_count) { 736 if (!dentry->d_count) {
739 __dget_dlock(dentry); 737 __dget_dlock(dentry);
@@ -1443,14 +1441,13 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
1443 int len = entry->d_name.len; 1441 int len = entry->d_name.len;
1444 const char *name = entry->d_name.name; 1442 const char *name = entry->d_name.name;
1445 unsigned int hash = entry->d_name.hash; 1443 unsigned int hash = entry->d_name.hash;
1446 struct hlist_node *p;
1447 1444
1448 if (!inode) { 1445 if (!inode) {
1449 __d_instantiate(entry, NULL); 1446 __d_instantiate(entry, NULL);
1450 return NULL; 1447 return NULL;
1451 } 1448 }
1452 1449
1453 hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { 1450 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1454 /* 1451 /*
1455 * Don't need alias->d_lock here, because aliases with 1452 * Don't need alias->d_lock here, because aliases with
1456 * d_parent == entry->d_parent are not subject to name or 1453 * d_parent == entry->d_parent are not subject to name or
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index f7501651762d..1b1146670c4b 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1183,7 +1183,7 @@ static void detach_lkb(struct dlm_lkb *lkb)
1183static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret) 1183static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1184{ 1184{
1185 struct dlm_lkb *lkb; 1185 struct dlm_lkb *lkb;
1186 int rv, id; 1186 int rv;
1187 1187
1188 lkb = dlm_allocate_lkb(ls); 1188 lkb = dlm_allocate_lkb(ls);
1189 if (!lkb) 1189 if (!lkb)
@@ -1199,19 +1199,13 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1199 mutex_init(&lkb->lkb_cb_mutex); 1199 mutex_init(&lkb->lkb_cb_mutex);
1200 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work); 1200 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1201 1201
1202 retry: 1202 idr_preload(GFP_NOFS);
1203 rv = idr_pre_get(&ls->ls_lkbidr, GFP_NOFS);
1204 if (!rv)
1205 return -ENOMEM;
1206
1207 spin_lock(&ls->ls_lkbidr_spin); 1203 spin_lock(&ls->ls_lkbidr_spin);
1208 rv = idr_get_new_above(&ls->ls_lkbidr, lkb, 1, &id); 1204 rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
1209 if (!rv) 1205 if (rv >= 0)
1210 lkb->lkb_id = id; 1206 lkb->lkb_id = rv;
1211 spin_unlock(&ls->ls_lkbidr_spin); 1207 spin_unlock(&ls->ls_lkbidr_spin);
1212 1208 idr_preload_end();
1213 if (rv == -EAGAIN)
1214 goto retry;
1215 1209
1216 if (rv < 0) { 1210 if (rv < 0) {
1217 log_error(ls, "create_lkb idr error %d", rv); 1211 log_error(ls, "create_lkb idr error %d", rv);
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 2e99fb0c9737..3ca79d3253b9 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -796,7 +796,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
796 */ 796 */
797 797
798 idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls); 798 idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
799 idr_remove_all(&ls->ls_lkbidr);
800 idr_destroy(&ls->ls_lkbidr); 799 idr_destroy(&ls->ls_lkbidr);
801 800
802 /* 801 /*
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index dd87a31bcc21..4f5ad246582f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -177,12 +177,11 @@ static inline int nodeid_hash(int nodeid)
177static struct connection *__find_con(int nodeid) 177static struct connection *__find_con(int nodeid)
178{ 178{
179 int r; 179 int r;
180 struct hlist_node *h;
181 struct connection *con; 180 struct connection *con;
182 181
183 r = nodeid_hash(nodeid); 182 r = nodeid_hash(nodeid);
184 183
185 hlist_for_each_entry(con, h, &connection_hash[r], list) { 184 hlist_for_each_entry(con, &connection_hash[r], list) {
186 if (con->nodeid == nodeid) 185 if (con->nodeid == nodeid)
187 return con; 186 return con;
188 } 187 }
@@ -232,13 +231,12 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
232static void foreach_conn(void (*conn_func)(struct connection *c)) 231static void foreach_conn(void (*conn_func)(struct connection *c))
233{ 232{
234 int i; 233 int i;
235 struct hlist_node *h, *n; 234 struct hlist_node *n;
236 struct connection *con; 235 struct connection *con;
237 236
238 for (i = 0; i < CONN_HASH_SIZE; i++) { 237 for (i = 0; i < CONN_HASH_SIZE; i++) {
239 hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){ 238 hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
240 conn_func(con); 239 conn_func(con);
241 }
242 } 240 }
243} 241}
244 242
@@ -257,13 +255,12 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
257static struct connection *assoc2con(int assoc_id) 255static struct connection *assoc2con(int assoc_id)
258{ 256{
259 int i; 257 int i;
260 struct hlist_node *h;
261 struct connection *con; 258 struct connection *con;
262 259
263 mutex_lock(&connections_lock); 260 mutex_lock(&connections_lock);
264 261
265 for (i = 0 ; i < CONN_HASH_SIZE; i++) { 262 for (i = 0 ; i < CONN_HASH_SIZE; i++) {
266 hlist_for_each_entry(con, h, &connection_hash[i], list) { 263 hlist_for_each_entry(con, &connection_hash[i], list) {
267 if (con->sctp_assoc == assoc_id) { 264 if (con->sctp_assoc == assoc_id) {
268 mutex_unlock(&connections_lock); 265 mutex_unlock(&connections_lock);
269 return con; 266 return con;
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index aedea28a86a1..a6bc63f6e31b 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -305,27 +305,26 @@ static int recover_idr_empty(struct dlm_ls *ls)
305static int recover_idr_add(struct dlm_rsb *r) 305static int recover_idr_add(struct dlm_rsb *r)
306{ 306{
307 struct dlm_ls *ls = r->res_ls; 307 struct dlm_ls *ls = r->res_ls;
308 int rv, id; 308 int rv;
309
310 rv = idr_pre_get(&ls->ls_recover_idr, GFP_NOFS);
311 if (!rv)
312 return -ENOMEM;
313 309
310 idr_preload(GFP_NOFS);
314 spin_lock(&ls->ls_recover_idr_lock); 311 spin_lock(&ls->ls_recover_idr_lock);
315 if (r->res_id) { 312 if (r->res_id) {
316 spin_unlock(&ls->ls_recover_idr_lock); 313 rv = -1;
317 return -1; 314 goto out_unlock;
318 }
319 rv = idr_get_new_above(&ls->ls_recover_idr, r, 1, &id);
320 if (rv) {
321 spin_unlock(&ls->ls_recover_idr_lock);
322 return rv;
323 } 315 }
324 r->res_id = id; 316 rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT);
317 if (rv < 0)
318 goto out_unlock;
319
320 r->res_id = rv;
325 ls->ls_recover_list_count++; 321 ls->ls_recover_list_count++;
326 dlm_hold_rsb(r); 322 dlm_hold_rsb(r);
323 rv = 0;
324out_unlock:
327 spin_unlock(&ls->ls_recover_idr_lock); 325 spin_unlock(&ls->ls_recover_idr_lock);
328 return 0; 326 idr_preload_end();
327 return rv;
329} 328}
330 329
331static void recover_idr_del(struct dlm_rsb *r) 330static void recover_idr_del(struct dlm_rsb *r)
@@ -351,24 +350,21 @@ static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
351 return r; 350 return r;
352} 351}
353 352
354static int recover_idr_clear_rsb(int id, void *p, void *data) 353static void recover_idr_clear(struct dlm_ls *ls)
355{ 354{
356 struct dlm_ls *ls = data; 355 struct dlm_rsb *r;
357 struct dlm_rsb *r = p; 356 int id;
358 357
359 r->res_id = 0; 358 spin_lock(&ls->ls_recover_idr_lock);
360 r->res_recover_locks_count = 0;
361 ls->ls_recover_list_count--;
362 359
363 dlm_put_rsb(r); 360 idr_for_each_entry(&ls->ls_recover_idr, r, id) {
364 return 0; 361 idr_remove(&ls->ls_recover_idr, id);
365} 362 r->res_id = 0;
363 r->res_recover_locks_count = 0;
364 ls->ls_recover_list_count--;
366 365
367static void recover_idr_clear(struct dlm_ls *ls) 366 dlm_put_rsb(r);
368{ 367 }
369 spin_lock(&ls->ls_recover_idr_lock);
370 idr_for_each(&ls->ls_recover_idr, recover_idr_clear_rsb, ls);
371 idr_remove_all(&ls->ls_recover_idr);
372 368
373 if (ls->ls_recover_list_count != 0) { 369 if (ls->ls_recover_list_count != 0) {
374 log_error(ls, "warning: recover_list_count %d", 370 log_error(ls, "warning: recover_list_count %d",
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 5fa2471796c2..8d7a577ae497 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -115,10 +115,9 @@ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
115 */ 115 */
116int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon) 116int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon)
117{ 117{
118 struct hlist_node *elem;
119 int rc; 118 int rc;
120 119
121 hlist_for_each_entry(*daemon, elem, 120 hlist_for_each_entry(*daemon,
122 &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()], 121 &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()],
123 euid_chain) { 122 euid_chain) {
124 if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) { 123 if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) {
@@ -445,7 +444,6 @@ void ecryptfs_release_messaging(void)
445 mutex_unlock(&ecryptfs_msg_ctx_lists_mux); 444 mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
446 } 445 }
447 if (ecryptfs_daemon_hash) { 446 if (ecryptfs_daemon_hash) {
448 struct hlist_node *elem;
449 struct ecryptfs_daemon *daemon; 447 struct ecryptfs_daemon *daemon;
450 int i; 448 int i;
451 449
@@ -453,7 +451,7 @@ void ecryptfs_release_messaging(void)
453 for (i = 0; i < (1 << ecryptfs_hash_bits); i++) { 451 for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
454 int rc; 452 int rc;
455 453
456 hlist_for_each_entry(daemon, elem, 454 hlist_for_each_entry(daemon,
457 &ecryptfs_daemon_hash[i], 455 &ecryptfs_daemon_hash[i],
458 euid_chain) { 456 euid_chain) {
459 rc = ecryptfs_exorcise_daemon(daemon); 457 rc = ecryptfs_exorcise_daemon(daemon);
diff --git a/fs/exec.c b/fs/exec.c
index 864c50df660a..a96a4885bbbf 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1111,7 +1111,7 @@ void setup_new_exec(struct linux_binprm * bprm)
1111 current->sas_ss_sp = current->sas_ss_size = 0; 1111 current->sas_ss_sp = current->sas_ss_size = 0;
1112 1112
1113 if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid())) 1113 if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
1114 set_dumpable(current->mm, SUID_DUMPABLE_ENABLED); 1114 set_dumpable(current->mm, SUID_DUMP_USER);
1115 else 1115 else
1116 set_dumpable(current->mm, suid_dumpable); 1116 set_dumpable(current->mm, suid_dumpable);
1117 1117
@@ -1639,17 +1639,17 @@ EXPORT_SYMBOL(set_binfmt);
1639void set_dumpable(struct mm_struct *mm, int value) 1639void set_dumpable(struct mm_struct *mm, int value)
1640{ 1640{
1641 switch (value) { 1641 switch (value) {
1642 case SUID_DUMPABLE_DISABLED: 1642 case SUID_DUMP_DISABLE:
1643 clear_bit(MMF_DUMPABLE, &mm->flags); 1643 clear_bit(MMF_DUMPABLE, &mm->flags);
1644 smp_wmb(); 1644 smp_wmb();
1645 clear_bit(MMF_DUMP_SECURELY, &mm->flags); 1645 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1646 break; 1646 break;
1647 case SUID_DUMPABLE_ENABLED: 1647 case SUID_DUMP_USER:
1648 set_bit(MMF_DUMPABLE, &mm->flags); 1648 set_bit(MMF_DUMPABLE, &mm->flags);
1649 smp_wmb(); 1649 smp_wmb();
1650 clear_bit(MMF_DUMP_SECURELY, &mm->flags); 1650 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1651 break; 1651 break;
1652 case SUID_DUMPABLE_SAFE: 1652 case SUID_DUMP_ROOT:
1653 set_bit(MMF_DUMP_SECURELY, &mm->flags); 1653 set_bit(MMF_DUMP_SECURELY, &mm->flags);
1654 smp_wmb(); 1654 smp_wmb();
1655 set_bit(MMF_DUMPABLE, &mm->flags); 1655 set_bit(MMF_DUMPABLE, &mm->flags);
@@ -1662,7 +1662,7 @@ int __get_dumpable(unsigned long mm_flags)
1662 int ret; 1662 int ret;
1663 1663
1664 ret = mm_flags & MMF_DUMPABLE_MASK; 1664 ret = mm_flags & MMF_DUMPABLE_MASK;
1665 return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret; 1665 return (ret > SUID_DUMP_USER) ? SUID_DUMP_ROOT : ret;
1666} 1666}
1667 1667
1668int get_dumpable(struct mm_struct *mm) 1668int get_dumpable(struct mm_struct *mm)
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 5df4bb4aab14..262fc9940982 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -44,14 +44,13 @@ find_acceptable_alias(struct dentry *result,
44{ 44{
45 struct dentry *dentry, *toput = NULL; 45 struct dentry *dentry, *toput = NULL;
46 struct inode *inode; 46 struct inode *inode;
47 struct hlist_node *p;
48 47
49 if (acceptable(context, result)) 48 if (acceptable(context, result))
50 return result; 49 return result;
51 50
52 inode = result->d_inode; 51 inode = result->d_inode;
53 spin_lock(&inode->i_lock); 52 spin_lock(&inode->i_lock);
54 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 53 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
55 dget(dentry); 54 dget(dentry);
56 spin_unlock(&inode->i_lock); 55 spin_unlock(&inode->i_lock);
57 if (toput) 56 if (toput)
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 12701a567752..e9cc3f0d58e2 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -95,6 +95,8 @@ struct msdos_sb_info {
95 95
96 spinlock_t dir_hash_lock; 96 spinlock_t dir_hash_lock;
97 struct hlist_head dir_hashtable[FAT_HASH_SIZE]; 97 struct hlist_head dir_hashtable[FAT_HASH_SIZE];
98
99 unsigned int dirty; /* fs state before mount */
98}; 100};
99 101
100#define FAT_CACHE_VALID 0 /* special case for valid cache */ 102#define FAT_CACHE_VALID 0 /* special case for valid cache */
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index f8f491677a4a..acf6e479b443 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -341,12 +341,11 @@ struct inode *fat_iget(struct super_block *sb, loff_t i_pos)
341{ 341{
342 struct msdos_sb_info *sbi = MSDOS_SB(sb); 342 struct msdos_sb_info *sbi = MSDOS_SB(sb);
343 struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos); 343 struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos);
344 struct hlist_node *_p;
345 struct msdos_inode_info *i; 344 struct msdos_inode_info *i;
346 struct inode *inode = NULL; 345 struct inode *inode = NULL;
347 346
348 spin_lock(&sbi->inode_hash_lock); 347 spin_lock(&sbi->inode_hash_lock);
349 hlist_for_each_entry(i, _p, head, i_fat_hash) { 348 hlist_for_each_entry(i, head, i_fat_hash) {
350 BUG_ON(i->vfs_inode.i_sb != sb); 349 BUG_ON(i->vfs_inode.i_sb != sb);
351 if (i->i_pos != i_pos) 350 if (i->i_pos != i_pos)
352 continue; 351 continue;
@@ -488,10 +487,59 @@ static void fat_evict_inode(struct inode *inode)
488 fat_detach(inode); 487 fat_detach(inode);
489} 488}
490 489
490static void fat_set_state(struct super_block *sb,
491 unsigned int set, unsigned int force)
492{
493 struct buffer_head *bh;
494 struct fat_boot_sector *b;
495 struct msdos_sb_info *sbi = sb->s_fs_info;
496
497 /* do not change any thing if mounted read only */
498 if ((sb->s_flags & MS_RDONLY) && !force)
499 return;
500
501 /* do not change state if fs was dirty */
502 if (sbi->dirty) {
503 /* warn only on set (mount). */
504 if (set)
505 fat_msg(sb, KERN_WARNING, "Volume was not properly "
506 "unmounted. Some data may be corrupt. "
507 "Please run fsck.");
508 return;
509 }
510
511 bh = sb_bread(sb, 0);
512 if (bh == NULL) {
513 fat_msg(sb, KERN_ERR, "unable to read boot sector "
514 "to mark fs as dirty");
515 return;
516 }
517
518 b = (struct fat_boot_sector *) bh->b_data;
519
520 if (sbi->fat_bits == 32) {
521 if (set)
522 b->fat32.state |= FAT_STATE_DIRTY;
523 else
524 b->fat32.state &= ~FAT_STATE_DIRTY;
525 } else /* fat 16 and 12 */ {
526 if (set)
527 b->fat16.state |= FAT_STATE_DIRTY;
528 else
529 b->fat16.state &= ~FAT_STATE_DIRTY;
530 }
531
532 mark_buffer_dirty(bh);
533 sync_dirty_buffer(bh);
534 brelse(bh);
535}
536
491static void fat_put_super(struct super_block *sb) 537static void fat_put_super(struct super_block *sb)
492{ 538{
493 struct msdos_sb_info *sbi = MSDOS_SB(sb); 539 struct msdos_sb_info *sbi = MSDOS_SB(sb);
494 540
541 fat_set_state(sb, 0, 0);
542
495 iput(sbi->fsinfo_inode); 543 iput(sbi->fsinfo_inode);
496 iput(sbi->fat_inode); 544 iput(sbi->fat_inode);
497 545
@@ -566,8 +614,18 @@ static void __exit fat_destroy_inodecache(void)
566 614
567static int fat_remount(struct super_block *sb, int *flags, char *data) 615static int fat_remount(struct super_block *sb, int *flags, char *data)
568{ 616{
617 int new_rdonly;
569 struct msdos_sb_info *sbi = MSDOS_SB(sb); 618 struct msdos_sb_info *sbi = MSDOS_SB(sb);
570 *flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME); 619 *flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
620
621 /* make sure we update state on remount. */
622 new_rdonly = *flags & MS_RDONLY;
623 if (new_rdonly != (sb->s_flags & MS_RDONLY)) {
624 if (new_rdonly)
625 fat_set_state(sb, 0, 0);
626 else
627 fat_set_state(sb, 1, 1);
628 }
571 return 0; 629 return 0;
572} 630}
573 631
@@ -1298,17 +1356,17 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
1298 sbi->prev_free = FAT_START_ENT; 1356 sbi->prev_free = FAT_START_ENT;
1299 sb->s_maxbytes = 0xffffffff; 1357 sb->s_maxbytes = 0xffffffff;
1300 1358
1301 if (!sbi->fat_length && b->fat32_length) { 1359 if (!sbi->fat_length && b->fat32.length) {
1302 struct fat_boot_fsinfo *fsinfo; 1360 struct fat_boot_fsinfo *fsinfo;
1303 struct buffer_head *fsinfo_bh; 1361 struct buffer_head *fsinfo_bh;
1304 1362
1305 /* Must be FAT32 */ 1363 /* Must be FAT32 */
1306 sbi->fat_bits = 32; 1364 sbi->fat_bits = 32;
1307 sbi->fat_length = le32_to_cpu(b->fat32_length); 1365 sbi->fat_length = le32_to_cpu(b->fat32.length);
1308 sbi->root_cluster = le32_to_cpu(b->root_cluster); 1366 sbi->root_cluster = le32_to_cpu(b->fat32.root_cluster);
1309 1367
1310 /* MC - if info_sector is 0, don't multiply by 0 */ 1368 /* MC - if info_sector is 0, don't multiply by 0 */
1311 sbi->fsinfo_sector = le16_to_cpu(b->info_sector); 1369 sbi->fsinfo_sector = le16_to_cpu(b->fat32.info_sector);
1312 if (sbi->fsinfo_sector == 0) 1370 if (sbi->fsinfo_sector == 0)
1313 sbi->fsinfo_sector = 1; 1371 sbi->fsinfo_sector = 1;
1314 1372
@@ -1362,6 +1420,12 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
1362 if (sbi->fat_bits != 32) 1420 if (sbi->fat_bits != 32)
1363 sbi->fat_bits = (total_clusters > MAX_FAT12) ? 16 : 12; 1421 sbi->fat_bits = (total_clusters > MAX_FAT12) ? 16 : 12;
1364 1422
1423 /* some OSes set FAT_STATE_DIRTY and clean it on unmount. */
1424 if (sbi->fat_bits == 32)
1425 sbi->dirty = b->fat32.state & FAT_STATE_DIRTY;
1426 else /* fat 16 or 12 */
1427 sbi->dirty = b->fat16.state & FAT_STATE_DIRTY;
1428
1365 /* check that FAT table does not overflow */ 1429 /* check that FAT table does not overflow */
1366 fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits; 1430 fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
1367 total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT); 1431 total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
@@ -1456,6 +1520,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
1456 "the device does not support discard"); 1520 "the device does not support discard");
1457 } 1521 }
1458 1522
1523 fat_set_state(sb, 1, 0);
1459 return 0; 1524 return 0;
1460 1525
1461out_invalid: 1526out_invalid:
diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c
index ef4b5faba87b..499c10438ca2 100644
--- a/fs/fat/nfs.c
+++ b/fs/fat/nfs.c
@@ -21,13 +21,12 @@ static struct inode *fat_dget(struct super_block *sb, int i_logstart)
21{ 21{
22 struct msdos_sb_info *sbi = MSDOS_SB(sb); 22 struct msdos_sb_info *sbi = MSDOS_SB(sb);
23 struct hlist_head *head; 23 struct hlist_head *head;
24 struct hlist_node *_p;
25 struct msdos_inode_info *i; 24 struct msdos_inode_info *i;
26 struct inode *inode = NULL; 25 struct inode *inode = NULL;
27 26
28 head = sbi->dir_hashtable + fat_dir_hash(i_logstart); 27 head = sbi->dir_hashtable + fat_dir_hash(i_logstart);
29 spin_lock(&sbi->dir_hash_lock); 28 spin_lock(&sbi->dir_hash_lock);
30 hlist_for_each_entry(i, _p, head, i_dir_hash) { 29 hlist_for_each_entry(i, head, i_dir_hash) {
31 BUG_ON(i->vfs_inode.i_sb != sb); 30 BUG_ON(i->vfs_inode.i_sb != sb);
32 if (i->i_logstart != i_logstart) 31 if (i->i_logstart != i_logstart)
33 continue; 32 continue;
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 8dcb114758e3..e2cba1f60c21 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -237,13 +237,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
237 struct fscache_cookie *cookie) 237 struct fscache_cookie *cookie)
238{ 238{
239 struct fscache_object *object; 239 struct fscache_object *object;
240 struct hlist_node *_n;
241 int ret; 240 int ret;
242 241
243 _enter("%p,%p{%s}", cache, cookie, cookie->def->name); 242 _enter("%p,%p{%s}", cache, cookie, cookie->def->name);
244 243
245 spin_lock(&cookie->lock); 244 spin_lock(&cookie->lock);
246 hlist_for_each_entry(object, _n, &cookie->backing_objects, 245 hlist_for_each_entry(object, &cookie->backing_objects,
247 cookie_link) { 246 cookie_link) {
248 if (object->cache == cache) 247 if (object->cache == cache)
249 goto object_already_extant; 248 goto object_already_extant;
@@ -311,7 +310,6 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
311{ 310{
312 struct fscache_object *p; 311 struct fscache_object *p;
313 struct fscache_cache *cache = object->cache; 312 struct fscache_cache *cache = object->cache;
314 struct hlist_node *_n;
315 int ret; 313 int ret;
316 314
317 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id); 315 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
@@ -321,7 +319,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
321 /* there may be multiple initial creations of this object, but we only 319 /* there may be multiple initial creations of this object, but we only
322 * want one */ 320 * want one */
323 ret = -EEXIST; 321 ret = -EEXIST;
324 hlist_for_each_entry(p, _n, &cookie->backing_objects, cookie_link) { 322 hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
325 if (p->cache == object->cache) { 323 if (p->cache == object->cache) {
326 if (p->state >= FSCACHE_OBJECT_DYING) 324 if (p->state >= FSCACHE_OBJECT_DYING)
327 ret = -ENOBUFS; 325 ret = -ENOBUFS;
@@ -331,7 +329,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
331 329
332 /* pin the parent object */ 330 /* pin the parent object */
333 spin_lock_nested(&cookie->parent->lock, 1); 331 spin_lock_nested(&cookie->parent->lock, 1);
334 hlist_for_each_entry(p, _n, &cookie->parent->backing_objects, 332 hlist_for_each_entry(p, &cookie->parent->backing_objects,
335 cookie_link) { 333 cookie_link) {
336 if (p->cache == object->cache) { 334 if (p->cache == object->cache) {
337 if (p->state >= FSCACHE_OBJECT_DYING) { 335 if (p->state >= FSCACHE_OBJECT_DYING) {
@@ -435,7 +433,6 @@ EXPORT_SYMBOL(__fscache_wait_on_invalidate);
435void __fscache_update_cookie(struct fscache_cookie *cookie) 433void __fscache_update_cookie(struct fscache_cookie *cookie)
436{ 434{
437 struct fscache_object *object; 435 struct fscache_object *object;
438 struct hlist_node *_p;
439 436
440 fscache_stat(&fscache_n_updates); 437 fscache_stat(&fscache_n_updates);
441 438
@@ -452,7 +449,7 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
452 spin_lock(&cookie->lock); 449 spin_lock(&cookie->lock);
453 450
454 /* update the index entry on disk in each cache backing this cookie */ 451 /* update the index entry on disk in each cache backing this cookie */
455 hlist_for_each_entry(object, _p, 452 hlist_for_each_entry(object,
456 &cookie->backing_objects, cookie_link) { 453 &cookie->backing_objects, cookie_link) {
457 fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE); 454 fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
458 } 455 }
diff --git a/fs/hfsplus/Makefile b/fs/hfsplus/Makefile
index 3cc0df730156..09d278bb7b91 100644
--- a/fs/hfsplus/Makefile
+++ b/fs/hfsplus/Makefile
@@ -5,5 +5,5 @@
5obj-$(CONFIG_HFSPLUS_FS) += hfsplus.o 5obj-$(CONFIG_HFSPLUS_FS) += hfsplus.o
6 6
7hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \ 7hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \
8 bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o 8 bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o \
9 9 attributes.o xattr.o xattr_user.o xattr_security.o xattr_trusted.o
diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
new file mode 100644
index 000000000000..8d691f124714
--- /dev/null
+++ b/fs/hfsplus/attributes.c
@@ -0,0 +1,399 @@
1/*
2 * linux/fs/hfsplus/attributes.c
3 *
4 * Vyacheslav Dubeyko <slava@dubeyko.com>
5 *
6 * Handling of records in attributes tree
7 */
8
9#include "hfsplus_fs.h"
10#include "hfsplus_raw.h"
11
12static struct kmem_cache *hfsplus_attr_tree_cachep;
13
14int hfsplus_create_attr_tree_cache(void)
15{
16 if (hfsplus_attr_tree_cachep)
17 return -EEXIST;
18
19 hfsplus_attr_tree_cachep =
20 kmem_cache_create("hfsplus_attr_cache",
21 sizeof(hfsplus_attr_entry), 0,
22 SLAB_HWCACHE_ALIGN, NULL);
23 if (!hfsplus_attr_tree_cachep)
24 return -ENOMEM;
25
26 return 0;
27}
28
29void hfsplus_destroy_attr_tree_cache(void)
30{
31 kmem_cache_destroy(hfsplus_attr_tree_cachep);
32}
33
34int hfsplus_attr_bin_cmp_key(const hfsplus_btree_key *k1,
35 const hfsplus_btree_key *k2)
36{
37 __be32 k1_cnid, k2_cnid;
38
39 k1_cnid = k1->attr.cnid;
40 k2_cnid = k2->attr.cnid;
41 if (k1_cnid != k2_cnid)
42 return be32_to_cpu(k1_cnid) < be32_to_cpu(k2_cnid) ? -1 : 1;
43
44 return hfsplus_strcmp(
45 (const struct hfsplus_unistr *)&k1->attr.key_name,
46 (const struct hfsplus_unistr *)&k2->attr.key_name);
47}
48
49int hfsplus_attr_build_key(struct super_block *sb, hfsplus_btree_key *key,
50 u32 cnid, const char *name)
51{
52 int len;
53
54 memset(key, 0, sizeof(struct hfsplus_attr_key));
55 key->attr.cnid = cpu_to_be32(cnid);
56 if (name) {
57 len = strlen(name);
58 if (len > HFSPLUS_ATTR_MAX_STRLEN) {
59 printk(KERN_ERR "hfs: invalid xattr name's length\n");
60 return -EINVAL;
61 }
62 hfsplus_asc2uni(sb,
63 (struct hfsplus_unistr *)&key->attr.key_name,
64 HFSPLUS_ATTR_MAX_STRLEN, name, len);
65 len = be16_to_cpu(key->attr.key_name.length);
66 } else {
67 key->attr.key_name.length = 0;
68 len = 0;
69 }
70
71 /* The length of the key, as stored in key_len field, does not include
72 * the size of the key_len field itself.
73 * So, offsetof(hfsplus_attr_key, key_name) is a trick because
74 * it takes into consideration key_len field (__be16) of
75 * hfsplus_attr_key structure instead of length field (__be16) of
76 * hfsplus_attr_unistr structure.
77 */
78 key->key_len =
79 cpu_to_be16(offsetof(struct hfsplus_attr_key, key_name) +
80 2 * len);
81
82 return 0;
83}
84
85void hfsplus_attr_build_key_uni(hfsplus_btree_key *key,
86 u32 cnid,
87 struct hfsplus_attr_unistr *name)
88{
89 int ustrlen;
90
91 memset(key, 0, sizeof(struct hfsplus_attr_key));
92 ustrlen = be16_to_cpu(name->length);
93 key->attr.cnid = cpu_to_be32(cnid);
94 key->attr.key_name.length = cpu_to_be16(ustrlen);
95 ustrlen *= 2;
96 memcpy(key->attr.key_name.unicode, name->unicode, ustrlen);
97
98 /* The length of the key, as stored in key_len field, does not include
99 * the size of the key_len field itself.
100 * So, offsetof(hfsplus_attr_key, key_name) is a trick because
101 * it takes into consideration key_len field (__be16) of
102 * hfsplus_attr_key structure instead of length field (__be16) of
103 * hfsplus_attr_unistr structure.
104 */
105 key->key_len =
106 cpu_to_be16(offsetof(struct hfsplus_attr_key, key_name) +
107 ustrlen);
108}
109
110hfsplus_attr_entry *hfsplus_alloc_attr_entry(void)
111{
112 return kmem_cache_alloc(hfsplus_attr_tree_cachep, GFP_KERNEL);
113}
114
115void hfsplus_destroy_attr_entry(hfsplus_attr_entry *entry)
116{
117 if (entry)
118 kmem_cache_free(hfsplus_attr_tree_cachep, entry);
119}
120
121#define HFSPLUS_INVALID_ATTR_RECORD -1
122
123static int hfsplus_attr_build_record(hfsplus_attr_entry *entry, int record_type,
124 u32 cnid, const void *value, size_t size)
125{
126 if (record_type == HFSPLUS_ATTR_FORK_DATA) {
127 /*
128 * Mac OS X supports only inline data attributes.
129 * Do nothing
130 */
131 memset(entry, 0, sizeof(*entry));
132 return sizeof(struct hfsplus_attr_fork_data);
133 } else if (record_type == HFSPLUS_ATTR_EXTENTS) {
134 /*
135 * Mac OS X supports only inline data attributes.
136 * Do nothing.
137 */
138 memset(entry, 0, sizeof(*entry));
139 return sizeof(struct hfsplus_attr_extents);
140 } else if (record_type == HFSPLUS_ATTR_INLINE_DATA) {
141 u16 len;
142
143 memset(entry, 0, sizeof(struct hfsplus_attr_inline_data));
144 entry->inline_data.record_type = cpu_to_be32(record_type);
145 if (size <= HFSPLUS_MAX_INLINE_DATA_SIZE)
146 len = size;
147 else
148 return HFSPLUS_INVALID_ATTR_RECORD;
149 entry->inline_data.length = cpu_to_be16(len);
150 memcpy(entry->inline_data.raw_bytes, value, len);
151 /*
152 * Align len on two-byte boundary.
153 * It needs to add pad byte if we have odd len.
154 */
155 len = round_up(len, 2);
156 return offsetof(struct hfsplus_attr_inline_data, raw_bytes) +
157 len;
158 } else /* invalid input */
159 memset(entry, 0, sizeof(*entry));
160
161 return HFSPLUS_INVALID_ATTR_RECORD;
162}
163
164int hfsplus_find_attr(struct super_block *sb, u32 cnid,
165 const char *name, struct hfs_find_data *fd)
166{
167 int err = 0;
168
169 dprint(DBG_ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid);
170
171 if (!HFSPLUS_SB(sb)->attr_tree) {
172 printk(KERN_ERR "hfs: attributes file doesn't exist\n");
173 return -EINVAL;
174 }
175
176 if (name) {
177 err = hfsplus_attr_build_key(sb, fd->search_key, cnid, name);
178 if (err)
179 goto failed_find_attr;
180 err = hfs_brec_find(fd, hfs_find_rec_by_key);
181 if (err)
182 goto failed_find_attr;
183 } else {
184 err = hfsplus_attr_build_key(sb, fd->search_key, cnid, NULL);
185 if (err)
186 goto failed_find_attr;
187 err = hfs_brec_find(fd, hfs_find_1st_rec_by_cnid);
188 if (err)
189 goto failed_find_attr;
190 }
191
192failed_find_attr:
193 return err;
194}
195
196int hfsplus_attr_exists(struct inode *inode, const char *name)
197{
198 int err = 0;
199 struct super_block *sb = inode->i_sb;
200 struct hfs_find_data fd;
201
202 if (!HFSPLUS_SB(sb)->attr_tree)
203 return 0;
204
205 err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd);
206 if (err)
207 return 0;
208
209 err = hfsplus_find_attr(sb, inode->i_ino, name, &fd);
210 if (err)
211 goto attr_not_found;
212
213 hfs_find_exit(&fd);
214 return 1;
215
216attr_not_found:
217 hfs_find_exit(&fd);
218 return 0;
219}
220
221int hfsplus_create_attr(struct inode *inode,
222 const char *name,
223 const void *value, size_t size)
224{
225 struct super_block *sb = inode->i_sb;
226 struct hfs_find_data fd;
227 hfsplus_attr_entry *entry_ptr;
228 int entry_size;
229 int err;
230
231 dprint(DBG_ATTR_MOD, "create_attr: %s,%ld\n",
232 name ? name : NULL, inode->i_ino);
233
234 if (!HFSPLUS_SB(sb)->attr_tree) {
235 printk(KERN_ERR "hfs: attributes file doesn't exist\n");
236 return -EINVAL;
237 }
238
239 entry_ptr = hfsplus_alloc_attr_entry();
240 if (!entry_ptr)
241 return -ENOMEM;
242
243 err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd);
244 if (err)
245 goto failed_init_create_attr;
246
247 if (name) {
248 err = hfsplus_attr_build_key(sb, fd.search_key,
249 inode->i_ino, name);
250 if (err)
251 goto failed_create_attr;
252 } else {
253 err = -EINVAL;
254 goto failed_create_attr;
255 }
256
257 /* Mac OS X supports only inline data attributes. */
258 entry_size = hfsplus_attr_build_record(entry_ptr,
259 HFSPLUS_ATTR_INLINE_DATA,
260 inode->i_ino,
261 value, size);
262 if (entry_size == HFSPLUS_INVALID_ATTR_RECORD) {
263 err = -EINVAL;
264 goto failed_create_attr;
265 }
266
267 err = hfs_brec_find(&fd, hfs_find_rec_by_key);
268 if (err != -ENOENT) {
269 if (!err)
270 err = -EEXIST;
271 goto failed_create_attr;
272 }
273
274 err = hfs_brec_insert(&fd, entry_ptr, entry_size);
275 if (err)
276 goto failed_create_attr;
277
278 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY);
279
280failed_create_attr:
281 hfs_find_exit(&fd);
282
283failed_init_create_attr:
284 hfsplus_destroy_attr_entry(entry_ptr);
285 return err;
286}
287
288static int __hfsplus_delete_attr(struct inode *inode, u32 cnid,
289 struct hfs_find_data *fd)
290{
291 int err = 0;
292 __be32 found_cnid, record_type;
293
294 hfs_bnode_read(fd->bnode, &found_cnid,
295 fd->keyoffset +
296 offsetof(struct hfsplus_attr_key, cnid),
297 sizeof(__be32));
298 if (cnid != be32_to_cpu(found_cnid))
299 return -ENOENT;
300
301 hfs_bnode_read(fd->bnode, &record_type,
302 fd->entryoffset, sizeof(record_type));
303
304 switch (be32_to_cpu(record_type)) {
305 case HFSPLUS_ATTR_INLINE_DATA:
306 /* All is OK. Do nothing. */
307 break;
308 case HFSPLUS_ATTR_FORK_DATA:
309 case HFSPLUS_ATTR_EXTENTS:
310 printk(KERN_ERR "hfs: only inline data xattr are supported\n");
311 return -EOPNOTSUPP;
312 default:
313 printk(KERN_ERR "hfs: invalid extended attribute record\n");
314 return -ENOENT;
315 }
316
317 err = hfs_brec_remove(fd);
318 if (err)
319 return err;
320
321 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY);
322 return err;
323}
324
325int hfsplus_delete_attr(struct inode *inode, const char *name)
326{
327 int err = 0;
328 struct super_block *sb = inode->i_sb;
329 struct hfs_find_data fd;
330
331 dprint(DBG_ATTR_MOD, "delete_attr: %s,%ld\n",
332 name ? name : NULL, inode->i_ino);
333
334 if (!HFSPLUS_SB(sb)->attr_tree) {
335 printk(KERN_ERR "hfs: attributes file doesn't exist\n");
336 return -EINVAL;
337 }
338
339 err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd);
340 if (err)
341 return err;
342
343 if (name) {
344 err = hfsplus_attr_build_key(sb, fd.search_key,
345 inode->i_ino, name);
346 if (err)
347 goto out;
348 } else {
349 printk(KERN_ERR "hfs: invalid extended attribute name\n");
350 err = -EINVAL;
351 goto out;
352 }
353
354 err = hfs_brec_find(&fd, hfs_find_rec_by_key);
355 if (err)
356 goto out;
357
358 err = __hfsplus_delete_attr(inode, inode->i_ino, &fd);
359 if (err)
360 goto out;
361
362out:
363 hfs_find_exit(&fd);
364 return err;
365}
366
367int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid)
368{
369 int err = 0;
370 struct hfs_find_data fd;
371
372 dprint(DBG_ATTR_MOD, "delete_all_attrs: %d\n", cnid);
373
374 if (!HFSPLUS_SB(dir->i_sb)->attr_tree) {
375 printk(KERN_ERR "hfs: attributes file doesn't exist\n");
376 return -EINVAL;
377 }
378
379 err = hfs_find_init(HFSPLUS_SB(dir->i_sb)->attr_tree, &fd);
380 if (err)
381 return err;
382
383 for (;;) {
384 err = hfsplus_find_attr(dir->i_sb, cnid, NULL, &fd);
385 if (err) {
386 if (err != -ENOENT)
387 printk(KERN_ERR "hfs: xattr search failed.\n");
388 goto end_delete_all;
389 }
390
391 err = __hfsplus_delete_attr(dir, cnid, &fd);
392 if (err)
393 goto end_delete_all;
394 }
395
396end_delete_all:
397 hfs_find_exit(&fd);
398 return err;
399}
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
index 5d799c13205f..d73c98d1ee99 100644
--- a/fs/hfsplus/bfind.c
+++ b/fs/hfsplus/bfind.c
@@ -24,7 +24,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
24 fd->key = ptr + tree->max_key_len + 2; 24 fd->key = ptr + tree->max_key_len + 2;
25 dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", 25 dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n",
26 tree->cnid, __builtin_return_address(0)); 26 tree->cnid, __builtin_return_address(0));
27 mutex_lock(&tree->tree_lock); 27 switch (tree->cnid) {
28 case HFSPLUS_CAT_CNID:
29 mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
30 break;
31 case HFSPLUS_EXT_CNID:
32 mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
33 break;
34 case HFSPLUS_ATTR_CNID:
35 mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
36 break;
37 default:
38 BUG();
39 }
28 return 0; 40 return 0;
29} 41}
30 42
@@ -38,15 +50,73 @@ void hfs_find_exit(struct hfs_find_data *fd)
38 fd->tree = NULL; 50 fd->tree = NULL;
39} 51}
40 52
41/* Find the record in bnode that best matches key (not greater than...)*/ 53int hfs_find_1st_rec_by_cnid(struct hfs_bnode *bnode,
42int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd) 54 struct hfs_find_data *fd,
55 int *begin,
56 int *end,
57 int *cur_rec)
58{
59 __be32 cur_cnid, search_cnid;
60
61 if (bnode->tree->cnid == HFSPLUS_EXT_CNID) {
62 cur_cnid = fd->key->ext.cnid;
63 search_cnid = fd->search_key->ext.cnid;
64 } else if (bnode->tree->cnid == HFSPLUS_CAT_CNID) {
65 cur_cnid = fd->key->cat.parent;
66 search_cnid = fd->search_key->cat.parent;
67 } else if (bnode->tree->cnid == HFSPLUS_ATTR_CNID) {
68 cur_cnid = fd->key->attr.cnid;
69 search_cnid = fd->search_key->attr.cnid;
70 } else
71 BUG();
72
73 if (cur_cnid == search_cnid) {
74 (*end) = (*cur_rec);
75 if ((*begin) == (*end))
76 return 1;
77 } else {
78 if (be32_to_cpu(cur_cnid) < be32_to_cpu(search_cnid))
79 (*begin) = (*cur_rec) + 1;
80 else
81 (*end) = (*cur_rec) - 1;
82 }
83
84 return 0;
85}
86
87int hfs_find_rec_by_key(struct hfs_bnode *bnode,
88 struct hfs_find_data *fd,
89 int *begin,
90 int *end,
91 int *cur_rec)
43{ 92{
44 int cmpval; 93 int cmpval;
94
95 cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
96 if (!cmpval) {
97 (*end) = (*cur_rec);
98 return 1;
99 }
100 if (cmpval < 0)
101 (*begin) = (*cur_rec) + 1;
102 else
103 *(end) = (*cur_rec) - 1;
104
105 return 0;
106}
107
108/* Find the record in bnode that best matches key (not greater than...)*/
109int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd,
110 search_strategy_t rec_found)
111{
45 u16 off, len, keylen; 112 u16 off, len, keylen;
46 int rec; 113 int rec;
47 int b, e; 114 int b, e;
48 int res; 115 int res;
49 116
117 if (!rec_found)
118 BUG();
119
50 b = 0; 120 b = 0;
51 e = bnode->num_recs - 1; 121 e = bnode->num_recs - 1;
52 res = -ENOENT; 122 res = -ENOENT;
@@ -59,17 +129,12 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
59 goto fail; 129 goto fail;
60 } 130 }
61 hfs_bnode_read(bnode, fd->key, off, keylen); 131 hfs_bnode_read(bnode, fd->key, off, keylen);
62 cmpval = bnode->tree->keycmp(fd->key, fd->search_key); 132 if (rec_found(bnode, fd, &b, &e, &rec)) {
63 if (!cmpval) {
64 e = rec;
65 res = 0; 133 res = 0;
66 goto done; 134 goto done;
67 } 135 }
68 if (cmpval < 0)
69 b = rec + 1;
70 else
71 e = rec - 1;
72 } while (b <= e); 136 } while (b <= e);
137
73 if (rec != e && e >= 0) { 138 if (rec != e && e >= 0) {
74 len = hfs_brec_lenoff(bnode, e, &off); 139 len = hfs_brec_lenoff(bnode, e, &off);
75 keylen = hfs_brec_keylen(bnode, e); 140 keylen = hfs_brec_keylen(bnode, e);
@@ -79,19 +144,21 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
79 } 144 }
80 hfs_bnode_read(bnode, fd->key, off, keylen); 145 hfs_bnode_read(bnode, fd->key, off, keylen);
81 } 146 }
147
82done: 148done:
83 fd->record = e; 149 fd->record = e;
84 fd->keyoffset = off; 150 fd->keyoffset = off;
85 fd->keylength = keylen; 151 fd->keylength = keylen;
86 fd->entryoffset = off + keylen; 152 fd->entryoffset = off + keylen;
87 fd->entrylength = len - keylen; 153 fd->entrylength = len - keylen;
154
88fail: 155fail:
89 return res; 156 return res;
90} 157}
91 158
92/* Traverse a B*Tree from the root to a leaf finding best fit to key */ 159/* Traverse a B*Tree from the root to a leaf finding best fit to key */
93/* Return allocated copy of node found, set recnum to best record */ 160/* Return allocated copy of node found, set recnum to best record */
94int hfs_brec_find(struct hfs_find_data *fd) 161int hfs_brec_find(struct hfs_find_data *fd, search_strategy_t do_key_compare)
95{ 162{
96 struct hfs_btree *tree; 163 struct hfs_btree *tree;
97 struct hfs_bnode *bnode; 164 struct hfs_bnode *bnode;
@@ -122,7 +189,7 @@ int hfs_brec_find(struct hfs_find_data *fd)
122 goto invalid; 189 goto invalid;
123 bnode->parent = parent; 190 bnode->parent = parent;
124 191
125 res = __hfs_brec_find(bnode, fd); 192 res = __hfs_brec_find(bnode, fd, do_key_compare);
126 if (!height) 193 if (!height)
127 break; 194 break;
128 if (fd->record < 0) 195 if (fd->record < 0)
@@ -149,7 +216,7 @@ int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len)
149{ 216{
150 int res; 217 int res;
151 218
152 res = hfs_brec_find(fd); 219 res = hfs_brec_find(fd, hfs_find_rec_by_key);
153 if (res) 220 if (res)
154 return res; 221 return res;
155 if (fd->entrylength > rec_len) 222 if (fd->entrylength > rec_len)
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 1c42cc5b899f..f31ac6f404f1 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -62,7 +62,8 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
62 62
63 tree = node->tree; 63 tree = node->tree;
64 if (node->type == HFS_NODE_LEAF || 64 if (node->type == HFS_NODE_LEAF ||
65 tree->attributes & HFS_TREE_VARIDXKEYS) 65 tree->attributes & HFS_TREE_VARIDXKEYS ||
66 node->tree->cnid == HFSPLUS_ATTR_CNID)
66 key_len = hfs_bnode_read_u16(node, off) + 2; 67 key_len = hfs_bnode_read_u16(node, off) + 2;
67 else 68 else
68 key_len = tree->max_key_len + 2; 69 key_len = tree->max_key_len + 2;
@@ -314,7 +315,8 @@ void hfs_bnode_dump(struct hfs_bnode *node)
314 if (i && node->type == HFS_NODE_INDEX) { 315 if (i && node->type == HFS_NODE_INDEX) {
315 int tmp; 316 int tmp;
316 317
317 if (node->tree->attributes & HFS_TREE_VARIDXKEYS) 318 if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
319 node->tree->cnid == HFSPLUS_ATTR_CNID)
318 tmp = hfs_bnode_read_u16(node, key_off) + 2; 320 tmp = hfs_bnode_read_u16(node, key_off) + 2;
319 else 321 else
320 tmp = node->tree->max_key_len + 2; 322 tmp = node->tree->max_key_len + 2;
@@ -646,6 +648,8 @@ void hfs_bnode_put(struct hfs_bnode *node)
646 if (test_bit(HFS_BNODE_DELETED, &node->flags)) { 648 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
647 hfs_bnode_unhash(node); 649 hfs_bnode_unhash(node);
648 spin_unlock(&tree->hash_lock); 650 spin_unlock(&tree->hash_lock);
651 hfs_bnode_clear(node, 0,
652 PAGE_CACHE_SIZE * tree->pages_per_bnode);
649 hfs_bmap_free(node); 653 hfs_bmap_free(node);
650 hfs_bnode_free(node); 654 hfs_bnode_free(node);
651 return; 655 return;
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 2a734cfccc92..298d4e45604b 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -36,7 +36,8 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
36 return 0; 36 return 0;
37 37
38 if ((node->type == HFS_NODE_INDEX) && 38 if ((node->type == HFS_NODE_INDEX) &&
39 !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) { 39 !(node->tree->attributes & HFS_TREE_VARIDXKEYS) &&
40 (node->tree->cnid != HFSPLUS_ATTR_CNID)) {
40 retval = node->tree->max_key_len + 2; 41 retval = node->tree->max_key_len + 2;
41 } else { 42 } else {
42 recoff = hfs_bnode_read_u16(node, 43 recoff = hfs_bnode_read_u16(node,
@@ -151,12 +152,13 @@ skip:
151 152
152 /* get index key */ 153 /* get index key */
153 hfs_bnode_read_key(new_node, fd->search_key, 14); 154 hfs_bnode_read_key(new_node, fd->search_key, 14);
154 __hfs_brec_find(fd->bnode, fd); 155 __hfs_brec_find(fd->bnode, fd, hfs_find_rec_by_key);
155 156
156 hfs_bnode_put(new_node); 157 hfs_bnode_put(new_node);
157 new_node = NULL; 158 new_node = NULL;
158 159
159 if (tree->attributes & HFS_TREE_VARIDXKEYS) 160 if ((tree->attributes & HFS_TREE_VARIDXKEYS) ||
161 (tree->cnid == HFSPLUS_ATTR_CNID))
160 key_len = be16_to_cpu(fd->search_key->key_len) + 2; 162 key_len = be16_to_cpu(fd->search_key->key_len) + 2;
161 else { 163 else {
162 fd->search_key->key_len = 164 fd->search_key->key_len =
@@ -201,7 +203,7 @@ again:
201 hfs_bnode_put(node); 203 hfs_bnode_put(node);
202 node = fd->bnode = parent; 204 node = fd->bnode = parent;
203 205
204 __hfs_brec_find(node, fd); 206 __hfs_brec_find(node, fd, hfs_find_rec_by_key);
205 goto again; 207 goto again;
206 } 208 }
207 hfs_bnode_write_u16(node, 209 hfs_bnode_write_u16(node,
@@ -367,12 +369,13 @@ again:
367 parent = hfs_bnode_find(tree, node->parent); 369 parent = hfs_bnode_find(tree, node->parent);
368 if (IS_ERR(parent)) 370 if (IS_ERR(parent))
369 return PTR_ERR(parent); 371 return PTR_ERR(parent);
370 __hfs_brec_find(parent, fd); 372 __hfs_brec_find(parent, fd, hfs_find_rec_by_key);
371 hfs_bnode_dump(parent); 373 hfs_bnode_dump(parent);
372 rec = fd->record; 374 rec = fd->record;
373 375
374 /* size difference between old and new key */ 376 /* size difference between old and new key */
375 if (tree->attributes & HFS_TREE_VARIDXKEYS) 377 if ((tree->attributes & HFS_TREE_VARIDXKEYS) ||
378 (tree->cnid == HFSPLUS_ATTR_CNID))
376 newkeylen = hfs_bnode_read_u16(node, 14) + 2; 379 newkeylen = hfs_bnode_read_u16(node, 14) + 2;
377 else 380 else
378 fd->keylength = newkeylen = tree->max_key_len + 2; 381 fd->keylength = newkeylen = tree->max_key_len + 2;
@@ -427,7 +430,7 @@ skip:
427 hfs_bnode_read_key(new_node, fd->search_key, 14); 430 hfs_bnode_read_key(new_node, fd->search_key, 14);
428 cnid = cpu_to_be32(new_node->this); 431 cnid = cpu_to_be32(new_node->this);
429 432
430 __hfs_brec_find(fd->bnode, fd); 433 __hfs_brec_find(fd->bnode, fd, hfs_find_rec_by_key);
431 hfs_brec_insert(fd, &cnid, sizeof(cnid)); 434 hfs_brec_insert(fd, &cnid, sizeof(cnid));
432 hfs_bnode_put(fd->bnode); 435 hfs_bnode_put(fd->bnode);
433 hfs_bnode_put(new_node); 436 hfs_bnode_put(new_node);
@@ -495,13 +498,15 @@ static int hfs_btree_inc_height(struct hfs_btree *tree)
495 /* insert old root idx into new root */ 498 /* insert old root idx into new root */
496 node->parent = tree->root; 499 node->parent = tree->root;
497 if (node->type == HFS_NODE_LEAF || 500 if (node->type == HFS_NODE_LEAF ||
498 tree->attributes & HFS_TREE_VARIDXKEYS) 501 tree->attributes & HFS_TREE_VARIDXKEYS ||
502 tree->cnid == HFSPLUS_ATTR_CNID)
499 key_size = hfs_bnode_read_u16(node, 14) + 2; 503 key_size = hfs_bnode_read_u16(node, 14) + 2;
500 else 504 else
501 key_size = tree->max_key_len + 2; 505 key_size = tree->max_key_len + 2;
502 hfs_bnode_copy(new_node, 14, node, 14, key_size); 506 hfs_bnode_copy(new_node, 14, node, 14, key_size);
503 507
504 if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) { 508 if (!(tree->attributes & HFS_TREE_VARIDXKEYS) &&
509 (tree->cnid != HFSPLUS_ATTR_CNID)) {
505 key_size = tree->max_key_len + 2; 510 key_size = tree->max_key_len + 2;
506 hfs_bnode_write_u16(new_node, 14, tree->max_key_len); 511 hfs_bnode_write_u16(new_node, 14, tree->max_key_len);
507 } 512 }
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 685d07d0ed18..efb689c21a95 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -98,6 +98,14 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
98 set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags); 98 set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
99 } 99 }
100 break; 100 break;
101 case HFSPLUS_ATTR_CNID:
102 if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
103 printk(KERN_ERR "hfs: invalid attributes max_key_len %d\n",
104 tree->max_key_len);
105 goto fail_page;
106 }
107 tree->keycmp = hfsplus_attr_bin_cmp_key;
108 break;
101 default: 109 default:
102 printk(KERN_ERR "hfs: unknown B*Tree requested\n"); 110 printk(KERN_ERR "hfs: unknown B*Tree requested\n");
103 goto fail_page; 111 goto fail_page;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 798d9c4c5e71..840d71edd193 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -45,7 +45,8 @@ void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
45 45
46 key->cat.parent = cpu_to_be32(parent); 46 key->cat.parent = cpu_to_be32(parent);
47 if (str) { 47 if (str) {
48 hfsplus_asc2uni(sb, &key->cat.name, str->name, str->len); 48 hfsplus_asc2uni(sb, &key->cat.name, HFSPLUS_MAX_STRLEN,
49 str->name, str->len);
49 len = be16_to_cpu(key->cat.name.length); 50 len = be16_to_cpu(key->cat.name.length);
50 } else { 51 } else {
51 key->cat.name.length = 0; 52 key->cat.name.length = 0;
@@ -167,7 +168,8 @@ static int hfsplus_fill_cat_thread(struct super_block *sb,
167 entry->type = cpu_to_be16(type); 168 entry->type = cpu_to_be16(type);
168 entry->thread.reserved = 0; 169 entry->thread.reserved = 0;
169 entry->thread.parentID = cpu_to_be32(parentid); 170 entry->thread.parentID = cpu_to_be32(parentid);
170 hfsplus_asc2uni(sb, &entry->thread.nodeName, str->name, str->len); 171 hfsplus_asc2uni(sb, &entry->thread.nodeName, HFSPLUS_MAX_STRLEN,
172 str->name, str->len);
171 return 10 + be16_to_cpu(entry->thread.nodeName.length) * 2; 173 return 10 + be16_to_cpu(entry->thread.nodeName.length) * 2;
172} 174}
173 175
@@ -198,7 +200,7 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
198 hfsplus_cat_build_key_uni(fd->search_key, 200 hfsplus_cat_build_key_uni(fd->search_key,
199 be32_to_cpu(tmp.thread.parentID), 201 be32_to_cpu(tmp.thread.parentID),
200 &tmp.thread.nodeName); 202 &tmp.thread.nodeName);
201 return hfs_brec_find(fd); 203 return hfs_brec_find(fd, hfs_find_rec_by_key);
202} 204}
203 205
204int hfsplus_create_cat(u32 cnid, struct inode *dir, 206int hfsplus_create_cat(u32 cnid, struct inode *dir,
@@ -221,7 +223,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
221 S_ISDIR(inode->i_mode) ? 223 S_ISDIR(inode->i_mode) ?
222 HFSPLUS_FOLDER_THREAD : HFSPLUS_FILE_THREAD, 224 HFSPLUS_FOLDER_THREAD : HFSPLUS_FILE_THREAD,
223 dir->i_ino, str); 225 dir->i_ino, str);
224 err = hfs_brec_find(&fd); 226 err = hfs_brec_find(&fd, hfs_find_rec_by_key);
225 if (err != -ENOENT) { 227 if (err != -ENOENT) {
226 if (!err) 228 if (!err)
227 err = -EEXIST; 229 err = -EEXIST;
@@ -233,7 +235,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
233 235
234 hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str); 236 hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
235 entry_size = hfsplus_cat_build_record(&entry, cnid, inode); 237 entry_size = hfsplus_cat_build_record(&entry, cnid, inode);
236 err = hfs_brec_find(&fd); 238 err = hfs_brec_find(&fd, hfs_find_rec_by_key);
237 if (err != -ENOENT) { 239 if (err != -ENOENT) {
238 /* panic? */ 240 /* panic? */
239 if (!err) 241 if (!err)
@@ -253,7 +255,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
253 255
254err1: 256err1:
255 hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL); 257 hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
256 if (!hfs_brec_find(&fd)) 258 if (!hfs_brec_find(&fd, hfs_find_rec_by_key))
257 hfs_brec_remove(&fd); 259 hfs_brec_remove(&fd);
258err2: 260err2:
259 hfs_find_exit(&fd); 261 hfs_find_exit(&fd);
@@ -279,7 +281,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
279 int len; 281 int len;
280 282
281 hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL); 283 hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
282 err = hfs_brec_find(&fd); 284 err = hfs_brec_find(&fd, hfs_find_rec_by_key);
283 if (err) 285 if (err)
284 goto out; 286 goto out;
285 287
@@ -296,7 +298,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
296 } else 298 } else
297 hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str); 299 hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
298 300
299 err = hfs_brec_find(&fd); 301 err = hfs_brec_find(&fd, hfs_find_rec_by_key);
300 if (err) 302 if (err)
301 goto out; 303 goto out;
302 304
@@ -326,7 +328,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
326 goto out; 328 goto out;
327 329
328 hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL); 330 hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
329 err = hfs_brec_find(&fd); 331 err = hfs_brec_find(&fd, hfs_find_rec_by_key);
330 if (err) 332 if (err)
331 goto out; 333 goto out;
332 334
@@ -337,6 +339,12 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
337 dir->i_size--; 339 dir->i_size--;
338 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; 340 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
339 hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); 341 hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
342
343 if (type == HFSPLUS_FILE || type == HFSPLUS_FOLDER) {
344 if (HFSPLUS_SB(sb)->attr_tree)
345 hfsplus_delete_all_attrs(dir, cnid);
346 }
347
340out: 348out:
341 hfs_find_exit(&fd); 349 hfs_find_exit(&fd);
342 350
@@ -363,7 +371,7 @@ int hfsplus_rename_cat(u32 cnid,
363 371
364 /* find the old dir entry and read the data */ 372 /* find the old dir entry and read the data */
365 hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name); 373 hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
366 err = hfs_brec_find(&src_fd); 374 err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
367 if (err) 375 if (err)
368 goto out; 376 goto out;
369 if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) { 377 if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
@@ -376,7 +384,7 @@ int hfsplus_rename_cat(u32 cnid,
376 384
377 /* create new dir entry with the data from the old entry */ 385 /* create new dir entry with the data from the old entry */
378 hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name); 386 hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
379 err = hfs_brec_find(&dst_fd); 387 err = hfs_brec_find(&dst_fd, hfs_find_rec_by_key);
380 if (err != -ENOENT) { 388 if (err != -ENOENT) {
381 if (!err) 389 if (!err)
382 err = -EEXIST; 390 err = -EEXIST;
@@ -391,7 +399,7 @@ int hfsplus_rename_cat(u32 cnid,
391 399
392 /* finally remove the old entry */ 400 /* finally remove the old entry */
393 hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name); 401 hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
394 err = hfs_brec_find(&src_fd); 402 err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
395 if (err) 403 if (err)
396 goto out; 404 goto out;
397 err = hfs_brec_remove(&src_fd); 405 err = hfs_brec_remove(&src_fd);
@@ -402,7 +410,7 @@ int hfsplus_rename_cat(u32 cnid,
402 410
403 /* remove old thread entry */ 411 /* remove old thread entry */
404 hfsplus_cat_build_key(sb, src_fd.search_key, cnid, NULL); 412 hfsplus_cat_build_key(sb, src_fd.search_key, cnid, NULL);
405 err = hfs_brec_find(&src_fd); 413 err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
406 if (err) 414 if (err)
407 goto out; 415 goto out;
408 type = hfs_bnode_read_u16(src_fd.bnode, src_fd.entryoffset); 416 type = hfs_bnode_read_u16(src_fd.bnode, src_fd.entryoffset);
@@ -414,7 +422,7 @@ int hfsplus_rename_cat(u32 cnid,
414 hfsplus_cat_build_key(sb, dst_fd.search_key, cnid, NULL); 422 hfsplus_cat_build_key(sb, dst_fd.search_key, cnid, NULL);
415 entry_size = hfsplus_fill_cat_thread(sb, &entry, type, 423 entry_size = hfsplus_fill_cat_thread(sb, &entry, type,
416 dst_dir->i_ino, dst_name); 424 dst_dir->i_ino, dst_name);
417 err = hfs_brec_find(&dst_fd); 425 err = hfs_brec_find(&dst_fd, hfs_find_rec_by_key);
418 if (err != -ENOENT) { 426 if (err != -ENOENT) {
419 if (!err) 427 if (!err)
420 err = -EEXIST; 428 err = -EEXIST;
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 074e04589248..031c24e50521 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -15,6 +15,7 @@
15 15
16#include "hfsplus_fs.h" 16#include "hfsplus_fs.h"
17#include "hfsplus_raw.h" 17#include "hfsplus_raw.h"
18#include "xattr.h"
18 19
19static inline void hfsplus_instantiate(struct dentry *dentry, 20static inline void hfsplus_instantiate(struct dentry *dentry,
20 struct inode *inode, u32 cnid) 21 struct inode *inode, u32 cnid)
@@ -138,7 +139,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
138 if (err) 139 if (err)
139 return err; 140 return err;
140 hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); 141 hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
141 err = hfs_brec_find(&fd); 142 err = hfs_brec_find(&fd, hfs_find_rec_by_key);
142 if (err) 143 if (err)
143 goto out; 144 goto out;
144 145
@@ -421,6 +422,15 @@ static int hfsplus_symlink(struct inode *dir, struct dentry *dentry,
421 if (res) 422 if (res)
422 goto out_err; 423 goto out_err;
423 424
425 res = hfsplus_init_inode_security(inode, dir, &dentry->d_name);
426 if (res == -EOPNOTSUPP)
427 res = 0; /* Operation is not supported. */
428 else if (res) {
429 /* Try to delete anyway without error analysis. */
430 hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
431 goto out_err;
432 }
433
424 hfsplus_instantiate(dentry, inode, inode->i_ino); 434 hfsplus_instantiate(dentry, inode, inode->i_ino);
425 mark_inode_dirty(inode); 435 mark_inode_dirty(inode);
426 goto out; 436 goto out;
@@ -450,15 +460,26 @@ static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
450 init_special_inode(inode, mode, rdev); 460 init_special_inode(inode, mode, rdev);
451 461
452 res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); 462 res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
453 if (res) { 463 if (res)
454 clear_nlink(inode); 464 goto failed_mknod;
455 hfsplus_delete_inode(inode); 465
456 iput(inode); 466 res = hfsplus_init_inode_security(inode, dir, &dentry->d_name);
457 goto out; 467 if (res == -EOPNOTSUPP)
468 res = 0; /* Operation is not supported. */
469 else if (res) {
470 /* Try to delete anyway without error analysis. */
471 hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
472 goto failed_mknod;
458 } 473 }
459 474
460 hfsplus_instantiate(dentry, inode, inode->i_ino); 475 hfsplus_instantiate(dentry, inode, inode->i_ino);
461 mark_inode_dirty(inode); 476 mark_inode_dirty(inode);
477 goto out;
478
479failed_mknod:
480 clear_nlink(inode);
481 hfsplus_delete_inode(inode);
482 iput(inode);
462out: 483out:
463 mutex_unlock(&sbi->vh_mutex); 484 mutex_unlock(&sbi->vh_mutex);
464 return res; 485 return res;
@@ -499,15 +520,19 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
499} 520}
500 521
501const struct inode_operations hfsplus_dir_inode_operations = { 522const struct inode_operations hfsplus_dir_inode_operations = {
502 .lookup = hfsplus_lookup, 523 .lookup = hfsplus_lookup,
503 .create = hfsplus_create, 524 .create = hfsplus_create,
504 .link = hfsplus_link, 525 .link = hfsplus_link,
505 .unlink = hfsplus_unlink, 526 .unlink = hfsplus_unlink,
506 .mkdir = hfsplus_mkdir, 527 .mkdir = hfsplus_mkdir,
507 .rmdir = hfsplus_rmdir, 528 .rmdir = hfsplus_rmdir,
508 .symlink = hfsplus_symlink, 529 .symlink = hfsplus_symlink,
509 .mknod = hfsplus_mknod, 530 .mknod = hfsplus_mknod,
510 .rename = hfsplus_rename, 531 .rename = hfsplus_rename,
532 .setxattr = generic_setxattr,
533 .getxattr = generic_getxattr,
534 .listxattr = hfsplus_listxattr,
535 .removexattr = hfsplus_removexattr,
511}; 536};
512 537
513const struct file_operations hfsplus_dir_operations = { 538const struct file_operations hfsplus_dir_operations = {
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index eba76eab6d62..a94f0f779d5e 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -95,7 +95,7 @@ static void __hfsplus_ext_write_extent(struct inode *inode,
95 HFSPLUS_IS_RSRC(inode) ? 95 HFSPLUS_IS_RSRC(inode) ?
96 HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); 96 HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
97 97
98 res = hfs_brec_find(fd); 98 res = hfs_brec_find(fd, hfs_find_rec_by_key);
99 if (hip->extent_state & HFSPLUS_EXT_NEW) { 99 if (hip->extent_state & HFSPLUS_EXT_NEW) {
100 if (res != -ENOENT) 100 if (res != -ENOENT)
101 return; 101 return;
@@ -154,7 +154,7 @@ static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
154 154
155 hfsplus_ext_build_key(fd->search_key, cnid, block, type); 155 hfsplus_ext_build_key(fd->search_key, cnid, block, type);
156 fd->key->ext.cnid = 0; 156 fd->key->ext.cnid = 0;
157 res = hfs_brec_find(fd); 157 res = hfs_brec_find(fd, hfs_find_rec_by_key);
158 if (res && res != -ENOENT) 158 if (res && res != -ENOENT)
159 return res; 159 return res;
160 if (fd->key->ext.cnid != fd->search_key->ext.cnid || 160 if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index a6da86b1b4c1..05b11f36024c 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -23,6 +23,7 @@
23#define DBG_SUPER 0x00000010 23#define DBG_SUPER 0x00000010
24#define DBG_EXTENT 0x00000020 24#define DBG_EXTENT 0x00000020
25#define DBG_BITMAP 0x00000040 25#define DBG_BITMAP 0x00000040
26#define DBG_ATTR_MOD 0x00000080
26 27
27#if 0 28#if 0
28#define DBG_MASK (DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD) 29#define DBG_MASK (DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD)
@@ -46,6 +47,13 @@ typedef int (*btree_keycmp)(const hfsplus_btree_key *,
46 47
47#define NODE_HASH_SIZE 256 48#define NODE_HASH_SIZE 256
48 49
50/* B-tree mutex nested subclasses */
51enum hfsplus_btree_mutex_classes {
52 CATALOG_BTREE_MUTEX,
53 EXTENTS_BTREE_MUTEX,
54 ATTR_BTREE_MUTEX,
55};
56
49/* An HFS+ BTree held in memory */ 57/* An HFS+ BTree held in memory */
50struct hfs_btree { 58struct hfs_btree {
51 struct super_block *sb; 59 struct super_block *sb;
@@ -223,6 +231,7 @@ struct hfsplus_inode_info {
223#define HFSPLUS_I_CAT_DIRTY 1 /* has changes in the catalog tree */ 231#define HFSPLUS_I_CAT_DIRTY 1 /* has changes in the catalog tree */
224#define HFSPLUS_I_EXT_DIRTY 2 /* has changes in the extent tree */ 232#define HFSPLUS_I_EXT_DIRTY 2 /* has changes in the extent tree */
225#define HFSPLUS_I_ALLOC_DIRTY 3 /* has changes in the allocation file */ 233#define HFSPLUS_I_ALLOC_DIRTY 3 /* has changes in the allocation file */
234#define HFSPLUS_I_ATTR_DIRTY 4 /* has changes in the attributes tree */
226 235
227#define HFSPLUS_IS_RSRC(inode) \ 236#define HFSPLUS_IS_RSRC(inode) \
228 test_bit(HFSPLUS_I_RSRC, &HFSPLUS_I(inode)->flags) 237 test_bit(HFSPLUS_I_RSRC, &HFSPLUS_I(inode)->flags)
@@ -302,7 +311,7 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
302#define hfs_brec_remove hfsplus_brec_remove 311#define hfs_brec_remove hfsplus_brec_remove
303#define hfs_find_init hfsplus_find_init 312#define hfs_find_init hfsplus_find_init
304#define hfs_find_exit hfsplus_find_exit 313#define hfs_find_exit hfsplus_find_exit
305#define __hfs_brec_find __hplusfs_brec_find 314#define __hfs_brec_find __hfsplus_brec_find
306#define hfs_brec_find hfsplus_brec_find 315#define hfs_brec_find hfsplus_brec_find
307#define hfs_brec_read hfsplus_brec_read 316#define hfs_brec_read hfsplus_brec_read
308#define hfs_brec_goto hfsplus_brec_goto 317#define hfs_brec_goto hfsplus_brec_goto
@@ -324,10 +333,33 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
324 */ 333 */
325#define HFSPLUS_IOC_BLESS _IO('h', 0x80) 334#define HFSPLUS_IOC_BLESS _IO('h', 0x80)
326 335
336typedef int (*search_strategy_t)(struct hfs_bnode *,
337 struct hfs_find_data *,
338 int *, int *, int *);
339
327/* 340/*
328 * Functions in any *.c used in other files 341 * Functions in any *.c used in other files
329 */ 342 */
330 343
344/* attributes.c */
345int hfsplus_create_attr_tree_cache(void);
346void hfsplus_destroy_attr_tree_cache(void);
347hfsplus_attr_entry *hfsplus_alloc_attr_entry(void);
348void hfsplus_destroy_attr_entry(hfsplus_attr_entry *entry_p);
349int hfsplus_attr_bin_cmp_key(const hfsplus_btree_key *,
350 const hfsplus_btree_key *);
351int hfsplus_attr_build_key(struct super_block *, hfsplus_btree_key *,
352 u32, const char *);
353void hfsplus_attr_build_key_uni(hfsplus_btree_key *key,
354 u32 cnid,
355 struct hfsplus_attr_unistr *name);
356int hfsplus_find_attr(struct super_block *, u32,
357 const char *, struct hfs_find_data *);
358int hfsplus_attr_exists(struct inode *inode, const char *name);
359int hfsplus_create_attr(struct inode *, const char *, const void *, size_t);
360int hfsplus_delete_attr(struct inode *, const char *);
361int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid);
362
331/* bitmap.c */ 363/* bitmap.c */
332int hfsplus_block_allocate(struct super_block *, u32, u32, u32 *); 364int hfsplus_block_allocate(struct super_block *, u32, u32, u32 *);
333int hfsplus_block_free(struct super_block *, u32, u32); 365int hfsplus_block_free(struct super_block *, u32, u32);
@@ -369,8 +401,15 @@ int hfs_brec_remove(struct hfs_find_data *);
369/* bfind.c */ 401/* bfind.c */
370int hfs_find_init(struct hfs_btree *, struct hfs_find_data *); 402int hfs_find_init(struct hfs_btree *, struct hfs_find_data *);
371void hfs_find_exit(struct hfs_find_data *); 403void hfs_find_exit(struct hfs_find_data *);
372int __hfs_brec_find(struct hfs_bnode *, struct hfs_find_data *); 404int hfs_find_1st_rec_by_cnid(struct hfs_bnode *,
373int hfs_brec_find(struct hfs_find_data *); 405 struct hfs_find_data *,
406 int *, int *, int *);
407int hfs_find_rec_by_key(struct hfs_bnode *,
408 struct hfs_find_data *,
409 int *, int *, int *);
410int __hfs_brec_find(struct hfs_bnode *, struct hfs_find_data *,
411 search_strategy_t);
412int hfs_brec_find(struct hfs_find_data *, search_strategy_t);
374int hfs_brec_read(struct hfs_find_data *, void *, int); 413int hfs_brec_read(struct hfs_find_data *, void *, int);
375int hfs_brec_goto(struct hfs_find_data *, int); 414int hfs_brec_goto(struct hfs_find_data *, int);
376 415
@@ -417,11 +456,6 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
417 456
418/* ioctl.c */ 457/* ioctl.c */
419long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 458long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
420int hfsplus_setxattr(struct dentry *dentry, const char *name,
421 const void *value, size_t size, int flags);
422ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
423 void *value, size_t size);
424ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
425 459
426/* options.c */ 460/* options.c */
427int hfsplus_parse_options(char *, struct hfsplus_sb_info *); 461int hfsplus_parse_options(char *, struct hfsplus_sb_info *);
@@ -446,7 +480,7 @@ int hfsplus_strcmp(const struct hfsplus_unistr *,
446int hfsplus_uni2asc(struct super_block *, 480int hfsplus_uni2asc(struct super_block *,
447 const struct hfsplus_unistr *, char *, int *); 481 const struct hfsplus_unistr *, char *, int *);
448int hfsplus_asc2uni(struct super_block *, 482int hfsplus_asc2uni(struct super_block *,
449 struct hfsplus_unistr *, const char *, int); 483 struct hfsplus_unistr *, int, const char *, int);
450int hfsplus_hash_dentry(const struct dentry *dentry, 484int hfsplus_hash_dentry(const struct dentry *dentry,
451 const struct inode *inode, struct qstr *str); 485 const struct inode *inode, struct qstr *str);
452int hfsplus_compare_dentry(const struct dentry *parent, 486int hfsplus_compare_dentry(const struct dentry *parent,
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index 921967e5abb1..452ede01b036 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -52,13 +52,23 @@
52typedef __be32 hfsplus_cnid; 52typedef __be32 hfsplus_cnid;
53typedef __be16 hfsplus_unichr; 53typedef __be16 hfsplus_unichr;
54 54
55#define HFSPLUS_MAX_STRLEN 255
56#define HFSPLUS_ATTR_MAX_STRLEN 127
57
55/* A "string" as used in filenames, etc. */ 58/* A "string" as used in filenames, etc. */
56struct hfsplus_unistr { 59struct hfsplus_unistr {
57 __be16 length; 60 __be16 length;
58 hfsplus_unichr unicode[255]; 61 hfsplus_unichr unicode[HFSPLUS_MAX_STRLEN];
59} __packed; 62} __packed;
60 63
61#define HFSPLUS_MAX_STRLEN 255 64/*
65 * A "string" is used in attributes file
66 * for name of extended attribute
67 */
68struct hfsplus_attr_unistr {
69 __be16 length;
70 hfsplus_unichr unicode[HFSPLUS_ATTR_MAX_STRLEN];
71} __packed;
62 72
63/* POSIX permissions */ 73/* POSIX permissions */
64struct hfsplus_perm { 74struct hfsplus_perm {
@@ -291,6 +301,8 @@ struct hfsplus_cat_file {
291/* File attribute bits */ 301/* File attribute bits */
292#define HFSPLUS_FILE_LOCKED 0x0001 302#define HFSPLUS_FILE_LOCKED 0x0001
293#define HFSPLUS_FILE_THREAD_EXISTS 0x0002 303#define HFSPLUS_FILE_THREAD_EXISTS 0x0002
304#define HFSPLUS_XATTR_EXISTS 0x0004
305#define HFSPLUS_ACL_EXISTS 0x0008
294 306
295/* HFS+ catalog thread (part of a cat_entry) */ 307/* HFS+ catalog thread (part of a cat_entry) */
296struct hfsplus_cat_thread { 308struct hfsplus_cat_thread {
@@ -327,11 +339,63 @@ struct hfsplus_ext_key {
327 339
328#define HFSPLUS_EXT_KEYLEN sizeof(struct hfsplus_ext_key) 340#define HFSPLUS_EXT_KEYLEN sizeof(struct hfsplus_ext_key)
329 341
342#define HFSPLUS_XATTR_FINDER_INFO_NAME "com.apple.FinderInfo"
343#define HFSPLUS_XATTR_ACL_NAME "com.apple.system.Security"
344
345#define HFSPLUS_ATTR_INLINE_DATA 0x10
346#define HFSPLUS_ATTR_FORK_DATA 0x20
347#define HFSPLUS_ATTR_EXTENTS 0x30
348
349/* HFS+ attributes tree key */
350struct hfsplus_attr_key {
351 __be16 key_len;
352 __be16 pad;
353 hfsplus_cnid cnid;
354 __be32 start_block;
355 struct hfsplus_attr_unistr key_name;
356} __packed;
357
358#define HFSPLUS_ATTR_KEYLEN sizeof(struct hfsplus_attr_key)
359
360/* HFS+ fork data attribute */
361struct hfsplus_attr_fork_data {
362 __be32 record_type;
363 __be32 reserved;
364 struct hfsplus_fork_raw the_fork;
365} __packed;
366
367/* HFS+ extension attribute */
368struct hfsplus_attr_extents {
369 __be32 record_type;
370 __be32 reserved;
371 struct hfsplus_extent extents;
372} __packed;
373
374#define HFSPLUS_MAX_INLINE_DATA_SIZE 3802
375
376/* HFS+ attribute inline data */
377struct hfsplus_attr_inline_data {
378 __be32 record_type;
379 __be32 reserved1;
380 u8 reserved2[6];
381 __be16 length;
382 u8 raw_bytes[HFSPLUS_MAX_INLINE_DATA_SIZE];
383} __packed;
384
385/* A data record in the attributes tree */
386typedef union {
387 __be32 record_type;
388 struct hfsplus_attr_fork_data fork_data;
389 struct hfsplus_attr_extents extents;
390 struct hfsplus_attr_inline_data inline_data;
391} __packed hfsplus_attr_entry;
392
330/* HFS+ generic BTree key */ 393/* HFS+ generic BTree key */
331typedef union { 394typedef union {
332 __be16 key_len; 395 __be16 key_len;
333 struct hfsplus_cat_key cat; 396 struct hfsplus_cat_key cat;
334 struct hfsplus_ext_key ext; 397 struct hfsplus_ext_key ext;
398 struct hfsplus_attr_key attr;
335} __packed hfsplus_btree_key; 399} __packed hfsplus_btree_key;
336 400
337#endif 401#endif
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index dcd05be5344b..160ccc9cdb4b 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -17,6 +17,7 @@
17 17
18#include "hfsplus_fs.h" 18#include "hfsplus_fs.h"
19#include "hfsplus_raw.h" 19#include "hfsplus_raw.h"
20#include "xattr.h"
20 21
21static int hfsplus_readpage(struct file *file, struct page *page) 22static int hfsplus_readpage(struct file *file, struct page *page)
22{ 23{
@@ -348,6 +349,18 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
348 error = error2; 349 error = error2;
349 } 350 }
350 351
352 if (test_and_clear_bit(HFSPLUS_I_ATTR_DIRTY, &hip->flags)) {
353 if (sbi->attr_tree) {
354 error2 =
355 filemap_write_and_wait(
356 sbi->attr_tree->inode->i_mapping);
357 if (!error)
358 error = error2;
359 } else {
360 printk(KERN_ERR "hfs: sync non-existent attributes tree\n");
361 }
362 }
363
351 if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY, &hip->flags)) { 364 if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY, &hip->flags)) {
352 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping); 365 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
353 if (!error) 366 if (!error)
@@ -365,9 +378,10 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
365static const struct inode_operations hfsplus_file_inode_operations = { 378static const struct inode_operations hfsplus_file_inode_operations = {
366 .lookup = hfsplus_file_lookup, 379 .lookup = hfsplus_file_lookup,
367 .setattr = hfsplus_setattr, 380 .setattr = hfsplus_setattr,
368 .setxattr = hfsplus_setxattr, 381 .setxattr = generic_setxattr,
369 .getxattr = hfsplus_getxattr, 382 .getxattr = generic_getxattr,
370 .listxattr = hfsplus_listxattr, 383 .listxattr = hfsplus_listxattr,
384 .removexattr = hfsplus_removexattr,
371}; 385};
372 386
373static const struct file_operations hfsplus_file_operations = { 387static const struct file_operations hfsplus_file_operations = {
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index e3c4c4209428..d3ff5cc317d7 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -16,7 +16,6 @@
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/mount.h> 17#include <linux/mount.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/xattr.h>
20#include <asm/uaccess.h> 19#include <asm/uaccess.h>
21#include "hfsplus_fs.h" 20#include "hfsplus_fs.h"
22 21
@@ -151,110 +150,3 @@ long hfsplus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
151 return -ENOTTY; 150 return -ENOTTY;
152 } 151 }
153} 152}
154
155int hfsplus_setxattr(struct dentry *dentry, const char *name,
156 const void *value, size_t size, int flags)
157{
158 struct inode *inode = dentry->d_inode;
159 struct hfs_find_data fd;
160 hfsplus_cat_entry entry;
161 struct hfsplus_cat_file *file;
162 int res;
163
164 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
165 return -EOPNOTSUPP;
166
167 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
168 if (res)
169 return res;
170 res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
171 if (res)
172 goto out;
173 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
174 sizeof(struct hfsplus_cat_file));
175 file = &entry.file;
176
177 if (!strcmp(name, "hfs.type")) {
178 if (size == 4)
179 memcpy(&file->user_info.fdType, value, 4);
180 else
181 res = -ERANGE;
182 } else if (!strcmp(name, "hfs.creator")) {
183 if (size == 4)
184 memcpy(&file->user_info.fdCreator, value, 4);
185 else
186 res = -ERANGE;
187 } else
188 res = -EOPNOTSUPP;
189 if (!res) {
190 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
191 sizeof(struct hfsplus_cat_file));
192 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
193 }
194out:
195 hfs_find_exit(&fd);
196 return res;
197}
198
199ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
200 void *value, size_t size)
201{
202 struct inode *inode = dentry->d_inode;
203 struct hfs_find_data fd;
204 hfsplus_cat_entry entry;
205 struct hfsplus_cat_file *file;
206 ssize_t res = 0;
207
208 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
209 return -EOPNOTSUPP;
210
211 if (size) {
212 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
213 if (res)
214 return res;
215 res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
216 if (res)
217 goto out;
218 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
219 sizeof(struct hfsplus_cat_file));
220 }
221 file = &entry.file;
222
223 if (!strcmp(name, "hfs.type")) {
224 if (size >= 4) {
225 memcpy(value, &file->user_info.fdType, 4);
226 res = 4;
227 } else
228 res = size ? -ERANGE : 4;
229 } else if (!strcmp(name, "hfs.creator")) {
230 if (size >= 4) {
231 memcpy(value, &file->user_info.fdCreator, 4);
232 res = 4;
233 } else
234 res = size ? -ERANGE : 4;
235 } else
236 res = -EOPNOTSUPP;
237out:
238 if (size)
239 hfs_find_exit(&fd);
240 return res;
241}
242
243#define HFSPLUS_ATTRLIST_SIZE (sizeof("hfs.creator")+sizeof("hfs.type"))
244
245ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
246{
247 struct inode *inode = dentry->d_inode;
248
249 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
250 return -EOPNOTSUPP;
251
252 if (!buffer || !size)
253 return HFSPLUS_ATTRLIST_SIZE;
254 if (size < HFSPLUS_ATTRLIST_SIZE)
255 return -ERANGE;
256 strcpy(buffer, "hfs.type");
257 strcpy(buffer + sizeof("hfs.type"), "hfs.creator");
258
259 return HFSPLUS_ATTRLIST_SIZE;
260}
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 796198d26553..974c26f96fae 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -20,6 +20,7 @@ static struct inode *hfsplus_alloc_inode(struct super_block *sb);
20static void hfsplus_destroy_inode(struct inode *inode); 20static void hfsplus_destroy_inode(struct inode *inode);
21 21
22#include "hfsplus_fs.h" 22#include "hfsplus_fs.h"
23#include "xattr.h"
23 24
24static int hfsplus_system_read_inode(struct inode *inode) 25static int hfsplus_system_read_inode(struct inode *inode)
25{ 26{
@@ -118,6 +119,7 @@ static int hfsplus_system_write_inode(struct inode *inode)
118 case HFSPLUS_ATTR_CNID: 119 case HFSPLUS_ATTR_CNID:
119 fork = &vhdr->attr_file; 120 fork = &vhdr->attr_file;
120 tree = sbi->attr_tree; 121 tree = sbi->attr_tree;
122 break;
121 default: 123 default:
122 return -EIO; 124 return -EIO;
123 } 125 }
@@ -191,6 +193,12 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
191 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); 193 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
192 if (!error) 194 if (!error)
193 error = error2; 195 error = error2;
196 if (sbi->attr_tree) {
197 error2 =
198 filemap_write_and_wait(sbi->attr_tree->inode->i_mapping);
199 if (!error)
200 error = error2;
201 }
194 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping); 202 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
195 if (!error) 203 if (!error)
196 error = error2; 204 error = error2;
@@ -281,6 +289,7 @@ static void hfsplus_put_super(struct super_block *sb)
281 hfsplus_sync_fs(sb, 1); 289 hfsplus_sync_fs(sb, 1);
282 } 290 }
283 291
292 hfs_btree_close(sbi->attr_tree);
284 hfs_btree_close(sbi->cat_tree); 293 hfs_btree_close(sbi->cat_tree);
285 hfs_btree_close(sbi->ext_tree); 294 hfs_btree_close(sbi->ext_tree);
286 iput(sbi->alloc_file); 295 iput(sbi->alloc_file);
@@ -477,12 +486,20 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
477 printk(KERN_ERR "hfs: failed to load catalog file\n"); 486 printk(KERN_ERR "hfs: failed to load catalog file\n");
478 goto out_close_ext_tree; 487 goto out_close_ext_tree;
479 } 488 }
489 if (vhdr->attr_file.total_blocks != 0) {
490 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
491 if (!sbi->attr_tree) {
492 printk(KERN_ERR "hfs: failed to load attributes file\n");
493 goto out_close_cat_tree;
494 }
495 }
496 sb->s_xattr = hfsplus_xattr_handlers;
480 497
481 inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID); 498 inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
482 if (IS_ERR(inode)) { 499 if (IS_ERR(inode)) {
483 printk(KERN_ERR "hfs: failed to load allocation file\n"); 500 printk(KERN_ERR "hfs: failed to load allocation file\n");
484 err = PTR_ERR(inode); 501 err = PTR_ERR(inode);
485 goto out_close_cat_tree; 502 goto out_close_attr_tree;
486 } 503 }
487 sbi->alloc_file = inode; 504 sbi->alloc_file = inode;
488 505
@@ -542,10 +559,27 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
542 } 559 }
543 err = hfsplus_create_cat(sbi->hidden_dir->i_ino, root, 560 err = hfsplus_create_cat(sbi->hidden_dir->i_ino, root,
544 &str, sbi->hidden_dir); 561 &str, sbi->hidden_dir);
545 mutex_unlock(&sbi->vh_mutex); 562 if (err) {
546 if (err) 563 mutex_unlock(&sbi->vh_mutex);
564 goto out_put_hidden_dir;
565 }
566
567 err = hfsplus_init_inode_security(sbi->hidden_dir,
568 root, &str);
569 if (err == -EOPNOTSUPP)
570 err = 0; /* Operation is not supported. */
571 else if (err) {
572 /*
573 * Try to delete anyway without
574 * error analysis.
575 */
576 hfsplus_delete_cat(sbi->hidden_dir->i_ino,
577 root, &str);
578 mutex_unlock(&sbi->vh_mutex);
547 goto out_put_hidden_dir; 579 goto out_put_hidden_dir;
580 }
548 581
582 mutex_unlock(&sbi->vh_mutex);
549 hfsplus_mark_inode_dirty(sbi->hidden_dir, 583 hfsplus_mark_inode_dirty(sbi->hidden_dir,
550 HFSPLUS_I_CAT_DIRTY); 584 HFSPLUS_I_CAT_DIRTY);
551 } 585 }
@@ -562,6 +596,8 @@ out_put_root:
562 sb->s_root = NULL; 596 sb->s_root = NULL;
563out_put_alloc_file: 597out_put_alloc_file:
564 iput(sbi->alloc_file); 598 iput(sbi->alloc_file);
599out_close_attr_tree:
600 hfs_btree_close(sbi->attr_tree);
565out_close_cat_tree: 601out_close_cat_tree:
566 hfs_btree_close(sbi->cat_tree); 602 hfs_btree_close(sbi->cat_tree);
567out_close_ext_tree: 603out_close_ext_tree:
@@ -635,9 +671,20 @@ static int __init init_hfsplus_fs(void)
635 hfsplus_init_once); 671 hfsplus_init_once);
636 if (!hfsplus_inode_cachep) 672 if (!hfsplus_inode_cachep)
637 return -ENOMEM; 673 return -ENOMEM;
674 err = hfsplus_create_attr_tree_cache();
675 if (err)
676 goto destroy_inode_cache;
638 err = register_filesystem(&hfsplus_fs_type); 677 err = register_filesystem(&hfsplus_fs_type);
639 if (err) 678 if (err)
640 kmem_cache_destroy(hfsplus_inode_cachep); 679 goto destroy_attr_tree_cache;
680 return 0;
681
682destroy_attr_tree_cache:
683 hfsplus_destroy_attr_tree_cache();
684
685destroy_inode_cache:
686 kmem_cache_destroy(hfsplus_inode_cachep);
687
641 return err; 688 return err;
642} 689}
643 690
@@ -650,6 +697,7 @@ static void __exit exit_hfsplus_fs(void)
650 * destroy cache. 697 * destroy cache.
651 */ 698 */
652 rcu_barrier(); 699 rcu_barrier();
700 hfsplus_destroy_attr_tree_cache();
653 kmem_cache_destroy(hfsplus_inode_cachep); 701 kmem_cache_destroy(hfsplus_inode_cachep);
654} 702}
655 703
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index a32998f29f0b..2c2e47dcfdd8 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -295,7 +295,8 @@ static inline u16 *decompose_unichar(wchar_t uc, int *size)
295 return hfsplus_decompose_table + (off / 4); 295 return hfsplus_decompose_table + (off / 4);
296} 296}
297 297
298int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr, 298int hfsplus_asc2uni(struct super_block *sb,
299 struct hfsplus_unistr *ustr, int max_unistr_len,
299 const char *astr, int len) 300 const char *astr, int len)
300{ 301{
301 int size, dsize, decompose; 302 int size, dsize, decompose;
@@ -303,7 +304,7 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
303 wchar_t c; 304 wchar_t c;
304 305
305 decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); 306 decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
306 while (outlen < HFSPLUS_MAX_STRLEN && len > 0) { 307 while (outlen < max_unistr_len && len > 0) {
307 size = asc2unichar(sb, astr, len, &c); 308 size = asc2unichar(sb, astr, len, &c);
308 309
309 if (decompose) 310 if (decompose)
@@ -311,7 +312,7 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
311 else 312 else
312 dstr = NULL; 313 dstr = NULL;
313 if (dstr) { 314 if (dstr) {
314 if (outlen + dsize > HFSPLUS_MAX_STRLEN) 315 if (outlen + dsize > max_unistr_len)
315 break; 316 break;
316 do { 317 do {
317 ustr->unicode[outlen++] = cpu_to_be16(*dstr++); 318 ustr->unicode[outlen++] = cpu_to_be16(*dstr++);
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
new file mode 100644
index 000000000000..e8a4b0815c61
--- /dev/null
+++ b/fs/hfsplus/xattr.c
@@ -0,0 +1,709 @@
1/*
2 * linux/fs/hfsplus/xattr.c
3 *
4 * Vyacheslav Dubeyko <slava@dubeyko.com>
5 *
6 * Logic of processing extended attributes
7 */
8
9#include "hfsplus_fs.h"
10#include "xattr.h"
11
12const struct xattr_handler *hfsplus_xattr_handlers[] = {
13 &hfsplus_xattr_osx_handler,
14 &hfsplus_xattr_user_handler,
15 &hfsplus_xattr_trusted_handler,
16 &hfsplus_xattr_security_handler,
17 NULL
18};
19
20static int strcmp_xattr_finder_info(const char *name)
21{
22 if (name) {
23 return strncmp(name, HFSPLUS_XATTR_FINDER_INFO_NAME,
24 sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME));
25 }
26 return -1;
27}
28
29static int strcmp_xattr_acl(const char *name)
30{
31 if (name) {
32 return strncmp(name, HFSPLUS_XATTR_ACL_NAME,
33 sizeof(HFSPLUS_XATTR_ACL_NAME));
34 }
35 return -1;
36}
37
38static inline int is_known_namespace(const char *name)
39{
40 if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
41 strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
42 strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
43 strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
44 return false;
45
46 return true;
47}
48
49static int can_set_xattr(struct inode *inode, const char *name,
50 const void *value, size_t value_len)
51{
52 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
53 return -EOPNOTSUPP; /* TODO: implement ACL support */
54
55 if (!strncmp(name, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN)) {
56 /*
57 * This makes sure that we aren't trying to set an
58 * attribute in a different namespace by prefixing it
59 * with "osx."
60 */
61 if (is_known_namespace(name + XATTR_MAC_OSX_PREFIX_LEN))
62 return -EOPNOTSUPP;
63
64 return 0;
65 }
66
67 /*
68 * Don't allow setting an attribute in an unknown namespace.
69 */
70 if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) &&
71 strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
72 strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
73 return -EOPNOTSUPP;
74
75 return 0;
76}
77
78int __hfsplus_setxattr(struct inode *inode, const char *name,
79 const void *value, size_t size, int flags)
80{
81 int err = 0;
82 struct hfs_find_data cat_fd;
83 hfsplus_cat_entry entry;
84 u16 cat_entry_flags, cat_entry_type;
85 u16 folder_finderinfo_len = sizeof(struct DInfo) +
86 sizeof(struct DXInfo);
87 u16 file_finderinfo_len = sizeof(struct FInfo) +
88 sizeof(struct FXInfo);
89
90 if ((!S_ISREG(inode->i_mode) &&
91 !S_ISDIR(inode->i_mode)) ||
92 HFSPLUS_IS_RSRC(inode))
93 return -EOPNOTSUPP;
94
95 err = can_set_xattr(inode, name, value, size);
96 if (err)
97 return err;
98
99 if (strncmp(name, XATTR_MAC_OSX_PREFIX,
100 XATTR_MAC_OSX_PREFIX_LEN) == 0)
101 name += XATTR_MAC_OSX_PREFIX_LEN;
102
103 if (value == NULL) {
104 value = "";
105 size = 0;
106 }
107
108 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
109 if (err) {
110 printk(KERN_ERR "hfs: can't init xattr find struct\n");
111 return err;
112 }
113
114 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
115 if (err) {
116 printk(KERN_ERR "hfs: catalog searching failed\n");
117 goto end_setxattr;
118 }
119
120 if (!strcmp_xattr_finder_info(name)) {
121 if (flags & XATTR_CREATE) {
122 printk(KERN_ERR "hfs: xattr exists yet\n");
123 err = -EOPNOTSUPP;
124 goto end_setxattr;
125 }
126 hfs_bnode_read(cat_fd.bnode, &entry, cat_fd.entryoffset,
127 sizeof(hfsplus_cat_entry));
128 if (be16_to_cpu(entry.type) == HFSPLUS_FOLDER) {
129 if (size == folder_finderinfo_len) {
130 memcpy(&entry.folder.user_info, value,
131 folder_finderinfo_len);
132 hfs_bnode_write(cat_fd.bnode, &entry,
133 cat_fd.entryoffset,
134 sizeof(struct hfsplus_cat_folder));
135 hfsplus_mark_inode_dirty(inode,
136 HFSPLUS_I_CAT_DIRTY);
137 } else {
138 err = -ERANGE;
139 goto end_setxattr;
140 }
141 } else if (be16_to_cpu(entry.type) == HFSPLUS_FILE) {
142 if (size == file_finderinfo_len) {
143 memcpy(&entry.file.user_info, value,
144 file_finderinfo_len);
145 hfs_bnode_write(cat_fd.bnode, &entry,
146 cat_fd.entryoffset,
147 sizeof(struct hfsplus_cat_file));
148 hfsplus_mark_inode_dirty(inode,
149 HFSPLUS_I_CAT_DIRTY);
150 } else {
151 err = -ERANGE;
152 goto end_setxattr;
153 }
154 } else {
155 err = -EOPNOTSUPP;
156 goto end_setxattr;
157 }
158 goto end_setxattr;
159 }
160
161 if (!HFSPLUS_SB(inode->i_sb)->attr_tree) {
162 err = -EOPNOTSUPP;
163 goto end_setxattr;
164 }
165
166 if (hfsplus_attr_exists(inode, name)) {
167 if (flags & XATTR_CREATE) {
168 printk(KERN_ERR "hfs: xattr exists yet\n");
169 err = -EOPNOTSUPP;
170 goto end_setxattr;
171 }
172 err = hfsplus_delete_attr(inode, name);
173 if (err)
174 goto end_setxattr;
175 err = hfsplus_create_attr(inode, name, value, size);
176 if (err)
177 goto end_setxattr;
178 } else {
179 if (flags & XATTR_REPLACE) {
180 printk(KERN_ERR "hfs: cannot replace xattr\n");
181 err = -EOPNOTSUPP;
182 goto end_setxattr;
183 }
184 err = hfsplus_create_attr(inode, name, value, size);
185 if (err)
186 goto end_setxattr;
187 }
188
189 cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset);
190 if (cat_entry_type == HFSPLUS_FOLDER) {
191 cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode,
192 cat_fd.entryoffset +
193 offsetof(struct hfsplus_cat_folder, flags));
194 cat_entry_flags |= HFSPLUS_XATTR_EXISTS;
195 if (!strcmp_xattr_acl(name))
196 cat_entry_flags |= HFSPLUS_ACL_EXISTS;
197 hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
198 offsetof(struct hfsplus_cat_folder, flags),
199 cat_entry_flags);
200 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
201 } else if (cat_entry_type == HFSPLUS_FILE) {
202 cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode,
203 cat_fd.entryoffset +
204 offsetof(struct hfsplus_cat_file, flags));
205 cat_entry_flags |= HFSPLUS_XATTR_EXISTS;
206 if (!strcmp_xattr_acl(name))
207 cat_entry_flags |= HFSPLUS_ACL_EXISTS;
208 hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
209 offsetof(struct hfsplus_cat_file, flags),
210 cat_entry_flags);
211 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
212 } else {
213 printk(KERN_ERR "hfs: invalid catalog entry type\n");
214 err = -EIO;
215 goto end_setxattr;
216 }
217
218end_setxattr:
219 hfs_find_exit(&cat_fd);
220 return err;
221}
222
223static inline int is_osx_xattr(const char *xattr_name)
224{
225 return !is_known_namespace(xattr_name);
226}
227
228static int name_len(const char *xattr_name, int xattr_name_len)
229{
230 int len = xattr_name_len + 1;
231
232 if (is_osx_xattr(xattr_name))
233 len += XATTR_MAC_OSX_PREFIX_LEN;
234
235 return len;
236}
237
238static int copy_name(char *buffer, const char *xattr_name, int name_len)
239{
240 int len = name_len;
241 int offset = 0;
242
243 if (is_osx_xattr(xattr_name)) {
244 strncpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN);
245 offset += XATTR_MAC_OSX_PREFIX_LEN;
246 len += XATTR_MAC_OSX_PREFIX_LEN;
247 }
248
249 strncpy(buffer + offset, xattr_name, name_len);
250 memset(buffer + offset + name_len, 0, 1);
251 len += 1;
252
253 return len;
254}
255
256static ssize_t hfsplus_getxattr_finder_info(struct dentry *dentry,
257 void *value, size_t size)
258{
259 ssize_t res = 0;
260 struct inode *inode = dentry->d_inode;
261 struct hfs_find_data fd;
262 u16 entry_type;
263 u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo);
264 u16 file_rec_len = sizeof(struct FInfo) + sizeof(struct FXInfo);
265 u16 record_len = max(folder_rec_len, file_rec_len);
266 u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
267 u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
268
269 if (size >= record_len) {
270 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
271 if (res) {
272 printk(KERN_ERR "hfs: can't init xattr find struct\n");
273 return res;
274 }
275 res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
276 if (res)
277 goto end_getxattr_finder_info;
278 entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
279
280 if (entry_type == HFSPLUS_FOLDER) {
281 hfs_bnode_read(fd.bnode, folder_finder_info,
282 fd.entryoffset +
283 offsetof(struct hfsplus_cat_folder, user_info),
284 folder_rec_len);
285 memcpy(value, folder_finder_info, folder_rec_len);
286 res = folder_rec_len;
287 } else if (entry_type == HFSPLUS_FILE) {
288 hfs_bnode_read(fd.bnode, file_finder_info,
289 fd.entryoffset +
290 offsetof(struct hfsplus_cat_file, user_info),
291 file_rec_len);
292 memcpy(value, file_finder_info, file_rec_len);
293 res = file_rec_len;
294 } else {
295 res = -EOPNOTSUPP;
296 goto end_getxattr_finder_info;
297 }
298 } else
299 res = size ? -ERANGE : record_len;
300
301end_getxattr_finder_info:
302 if (size >= record_len)
303 hfs_find_exit(&fd);
304 return res;
305}
306
307ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
308 void *value, size_t size)
309{
310 struct inode *inode = dentry->d_inode;
311 struct hfs_find_data fd;
312 hfsplus_attr_entry *entry;
313 __be32 xattr_record_type;
314 u32 record_type;
315 u16 record_length = 0;
316 ssize_t res = 0;
317
318 if ((!S_ISREG(inode->i_mode) &&
319 !S_ISDIR(inode->i_mode)) ||
320 HFSPLUS_IS_RSRC(inode))
321 return -EOPNOTSUPP;
322
323 if (strncmp(name, XATTR_MAC_OSX_PREFIX,
324 XATTR_MAC_OSX_PREFIX_LEN) == 0) {
325 /* skip "osx." prefix */
326 name += XATTR_MAC_OSX_PREFIX_LEN;
327 /*
328 * Don't allow retrieving properly prefixed attributes
329 * by prepending them with "osx."
330 */
331 if (is_known_namespace(name))
332 return -EOPNOTSUPP;
333 }
334
335 if (!strcmp_xattr_finder_info(name))
336 return hfsplus_getxattr_finder_info(dentry, value, size);
337
338 if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
339 return -EOPNOTSUPP;
340
341 entry = hfsplus_alloc_attr_entry();
342 if (!entry) {
343 printk(KERN_ERR "hfs: can't allocate xattr entry\n");
344 return -ENOMEM;
345 }
346
347 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
348 if (res) {
349 printk(KERN_ERR "hfs: can't init xattr find struct\n");
350 goto failed_getxattr_init;
351 }
352
353 res = hfsplus_find_attr(inode->i_sb, inode->i_ino, name, &fd);
354 if (res) {
355 if (res == -ENOENT)
356 res = -ENODATA;
357 else
358 printk(KERN_ERR "hfs: xattr searching failed\n");
359 goto out;
360 }
361
362 hfs_bnode_read(fd.bnode, &xattr_record_type,
363 fd.entryoffset, sizeof(xattr_record_type));
364 record_type = be32_to_cpu(xattr_record_type);
365 if (record_type == HFSPLUS_ATTR_INLINE_DATA) {
366 record_length = hfs_bnode_read_u16(fd.bnode,
367 fd.entryoffset +
368 offsetof(struct hfsplus_attr_inline_data,
369 length));
370 if (record_length > HFSPLUS_MAX_INLINE_DATA_SIZE) {
371 printk(KERN_ERR "hfs: invalid xattr record size\n");
372 res = -EIO;
373 goto out;
374 }
375 } else if (record_type == HFSPLUS_ATTR_FORK_DATA ||
376 record_type == HFSPLUS_ATTR_EXTENTS) {
377 printk(KERN_ERR "hfs: only inline data xattr are supported\n");
378 res = -EOPNOTSUPP;
379 goto out;
380 } else {
381 printk(KERN_ERR "hfs: invalid xattr record\n");
382 res = -EIO;
383 goto out;
384 }
385
386 if (size) {
387 hfs_bnode_read(fd.bnode, entry, fd.entryoffset,
388 offsetof(struct hfsplus_attr_inline_data,
389 raw_bytes) + record_length);
390 }
391
392 if (size >= record_length) {
393 memcpy(value, entry->inline_data.raw_bytes, record_length);
394 res = record_length;
395 } else
396 res = size ? -ERANGE : record_length;
397
398out:
399 hfs_find_exit(&fd);
400
401failed_getxattr_init:
402 hfsplus_destroy_attr_entry(entry);
403 return res;
404}
405
406static inline int can_list(const char *xattr_name)
407{
408 if (!xattr_name)
409 return 0;
410
411 return strncmp(xattr_name, XATTR_TRUSTED_PREFIX,
412 XATTR_TRUSTED_PREFIX_LEN) ||
413 capable(CAP_SYS_ADMIN);
414}
415
416static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry,
417 char *buffer, size_t size)
418{
419 ssize_t res = 0;
420 struct inode *inode = dentry->d_inode;
421 struct hfs_find_data fd;
422 u16 entry_type;
423 u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
424 u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
425 unsigned long len, found_bit;
426 int xattr_name_len, symbols_count;
427
428 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
429 if (res) {
430 printk(KERN_ERR "hfs: can't init xattr find struct\n");
431 return res;
432 }
433
434 res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
435 if (res)
436 goto end_listxattr_finder_info;
437
438 entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
439 if (entry_type == HFSPLUS_FOLDER) {
440 len = sizeof(struct DInfo) + sizeof(struct DXInfo);
441 hfs_bnode_read(fd.bnode, folder_finder_info,
442 fd.entryoffset +
443 offsetof(struct hfsplus_cat_folder, user_info),
444 len);
445 found_bit = find_first_bit((void *)folder_finder_info, len*8);
446 } else if (entry_type == HFSPLUS_FILE) {
447 len = sizeof(struct FInfo) + sizeof(struct FXInfo);
448 hfs_bnode_read(fd.bnode, file_finder_info,
449 fd.entryoffset +
450 offsetof(struct hfsplus_cat_file, user_info),
451 len);
452 found_bit = find_first_bit((void *)file_finder_info, len*8);
453 } else {
454 res = -EOPNOTSUPP;
455 goto end_listxattr_finder_info;
456 }
457
458 if (found_bit >= (len*8))
459 res = 0;
460 else {
461 symbols_count = sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME) - 1;
462 xattr_name_len =
463 name_len(HFSPLUS_XATTR_FINDER_INFO_NAME, symbols_count);
464 if (!buffer || !size) {
465 if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME))
466 res = xattr_name_len;
467 } else if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME)) {
468 if (size < xattr_name_len)
469 res = -ERANGE;
470 else {
471 res = copy_name(buffer,
472 HFSPLUS_XATTR_FINDER_INFO_NAME,
473 symbols_count);
474 }
475 }
476 }
477
478end_listxattr_finder_info:
479 hfs_find_exit(&fd);
480
481 return res;
482}
483
484ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
485{
486 ssize_t err;
487 ssize_t res = 0;
488 struct inode *inode = dentry->d_inode;
489 struct hfs_find_data fd;
490 u16 key_len = 0;
491 struct hfsplus_attr_key attr_key;
492 char strbuf[HFSPLUS_ATTR_MAX_STRLEN +
493 XATTR_MAC_OSX_PREFIX_LEN + 1] = {0};
494 int xattr_name_len;
495
496 if ((!S_ISREG(inode->i_mode) &&
497 !S_ISDIR(inode->i_mode)) ||
498 HFSPLUS_IS_RSRC(inode))
499 return -EOPNOTSUPP;
500
501 res = hfsplus_listxattr_finder_info(dentry, buffer, size);
502 if (res < 0)
503 return res;
504 else if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
505 return (res == 0) ? -EOPNOTSUPP : res;
506
507 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
508 if (err) {
509 printk(KERN_ERR "hfs: can't init xattr find struct\n");
510 return err;
511 }
512
513 err = hfsplus_find_attr(inode->i_sb, inode->i_ino, NULL, &fd);
514 if (err) {
515 if (err == -ENOENT) {
516 if (res == 0)
517 res = -ENODATA;
518 goto end_listxattr;
519 } else {
520 res = err;
521 goto end_listxattr;
522 }
523 }
524
525 for (;;) {
526 key_len = hfs_bnode_read_u16(fd.bnode, fd.keyoffset);
527 if (key_len == 0 || key_len > fd.tree->max_key_len) {
528 printk(KERN_ERR "hfs: invalid xattr key length: %d\n",
529 key_len);
530 res = -EIO;
531 goto end_listxattr;
532 }
533
534 hfs_bnode_read(fd.bnode, &attr_key,
535 fd.keyoffset, key_len + sizeof(key_len));
536
537 if (be32_to_cpu(attr_key.cnid) != inode->i_ino)
538 goto end_listxattr;
539
540 xattr_name_len = HFSPLUS_ATTR_MAX_STRLEN;
541 if (hfsplus_uni2asc(inode->i_sb,
542 (const struct hfsplus_unistr *)&fd.key->attr.key_name,
543 strbuf, &xattr_name_len)) {
544 printk(KERN_ERR "hfs: unicode conversion failed\n");
545 res = -EIO;
546 goto end_listxattr;
547 }
548
549 if (!buffer || !size) {
550 if (can_list(strbuf))
551 res += name_len(strbuf, xattr_name_len);
552 } else if (can_list(strbuf)) {
553 if (size < (res + name_len(strbuf, xattr_name_len))) {
554 res = -ERANGE;
555 goto end_listxattr;
556 } else
557 res += copy_name(buffer + res,
558 strbuf, xattr_name_len);
559 }
560
561 if (hfs_brec_goto(&fd, 1))
562 goto end_listxattr;
563 }
564
565end_listxattr:
566 hfs_find_exit(&fd);
567 return res;
568}
569
570int hfsplus_removexattr(struct dentry *dentry, const char *name)
571{
572 int err = 0;
573 struct inode *inode = dentry->d_inode;
574 struct hfs_find_data cat_fd;
575 u16 flags;
576 u16 cat_entry_type;
577 int is_xattr_acl_deleted = 0;
578 int is_all_xattrs_deleted = 0;
579
580 if ((!S_ISREG(inode->i_mode) &&
581 !S_ISDIR(inode->i_mode)) ||
582 HFSPLUS_IS_RSRC(inode))
583 return -EOPNOTSUPP;
584
585 if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
586 return -EOPNOTSUPP;
587
588 err = can_set_xattr(inode, name, NULL, 0);
589 if (err)
590 return err;
591
592 if (strncmp(name, XATTR_MAC_OSX_PREFIX,
593 XATTR_MAC_OSX_PREFIX_LEN) == 0)
594 name += XATTR_MAC_OSX_PREFIX_LEN;
595
596 if (!strcmp_xattr_finder_info(name))
597 return -EOPNOTSUPP;
598
599 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
600 if (err) {
601 printk(KERN_ERR "hfs: can't init xattr find struct\n");
602 return err;
603 }
604
605 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
606 if (err) {
607 printk(KERN_ERR "hfs: catalog searching failed\n");
608 goto end_removexattr;
609 }
610
611 err = hfsplus_delete_attr(inode, name);
612 if (err)
613 goto end_removexattr;
614
615 is_xattr_acl_deleted = !strcmp_xattr_acl(name);
616 is_all_xattrs_deleted = !hfsplus_attr_exists(inode, NULL);
617
618 if (!is_xattr_acl_deleted && !is_all_xattrs_deleted)
619 goto end_removexattr;
620
621 cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset);
622
623 if (cat_entry_type == HFSPLUS_FOLDER) {
624 flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset +
625 offsetof(struct hfsplus_cat_folder, flags));
626 if (is_xattr_acl_deleted)
627 flags &= ~HFSPLUS_ACL_EXISTS;
628 if (is_all_xattrs_deleted)
629 flags &= ~HFSPLUS_XATTR_EXISTS;
630 hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
631 offsetof(struct hfsplus_cat_folder, flags),
632 flags);
633 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
634 } else if (cat_entry_type == HFSPLUS_FILE) {
635 flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset +
636 offsetof(struct hfsplus_cat_file, flags));
637 if (is_xattr_acl_deleted)
638 flags &= ~HFSPLUS_ACL_EXISTS;
639 if (is_all_xattrs_deleted)
640 flags &= ~HFSPLUS_XATTR_EXISTS;
641 hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
642 offsetof(struct hfsplus_cat_file, flags),
643 flags);
644 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
645 } else {
646 printk(KERN_ERR "hfs: invalid catalog entry type\n");
647 err = -EIO;
648 goto end_removexattr;
649 }
650
651end_removexattr:
652 hfs_find_exit(&cat_fd);
653 return err;
654}
655
656static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
657 void *buffer, size_t size, int type)
658{
659 char xattr_name[HFSPLUS_ATTR_MAX_STRLEN +
660 XATTR_MAC_OSX_PREFIX_LEN + 1] = {0};
661 size_t len = strlen(name);
662
663 if (!strcmp(name, ""))
664 return -EINVAL;
665
666 if (len > HFSPLUS_ATTR_MAX_STRLEN)
667 return -EOPNOTSUPP;
668
669 strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
670 strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
671
672 return hfsplus_getxattr(dentry, xattr_name, buffer, size);
673}
674
675static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
676 const void *buffer, size_t size, int flags, int type)
677{
678 char xattr_name[HFSPLUS_ATTR_MAX_STRLEN +
679 XATTR_MAC_OSX_PREFIX_LEN + 1] = {0};
680 size_t len = strlen(name);
681
682 if (!strcmp(name, ""))
683 return -EINVAL;
684
685 if (len > HFSPLUS_ATTR_MAX_STRLEN)
686 return -EOPNOTSUPP;
687
688 strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
689 strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
690
691 return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
692}
693
694static size_t hfsplus_osx_listxattr(struct dentry *dentry, char *list,
695 size_t list_size, const char *name, size_t name_len, int type)
696{
697 /*
698 * This method is not used.
699 * It is used hfsplus_listxattr() instead of generic_listxattr().
700 */
701 return -EOPNOTSUPP;
702}
703
704const struct xattr_handler hfsplus_xattr_osx_handler = {
705 .prefix = XATTR_MAC_OSX_PREFIX,
706 .list = hfsplus_osx_listxattr,
707 .get = hfsplus_osx_getxattr,
708 .set = hfsplus_osx_setxattr,
709};
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
new file mode 100644
index 000000000000..847b695b984d
--- /dev/null
+++ b/fs/hfsplus/xattr.h
@@ -0,0 +1,60 @@
1/*
2 * linux/fs/hfsplus/xattr.h
3 *
4 * Vyacheslav Dubeyko <slava@dubeyko.com>
5 *
6 * Logic of processing extended attributes
7 */
8
9#ifndef _LINUX_HFSPLUS_XATTR_H
10#define _LINUX_HFSPLUS_XATTR_H
11
12#include <linux/xattr.h>
13
14extern const struct xattr_handler hfsplus_xattr_osx_handler;
15extern const struct xattr_handler hfsplus_xattr_user_handler;
16extern const struct xattr_handler hfsplus_xattr_trusted_handler;
17/*extern const struct xattr_handler hfsplus_xattr_acl_access_handler;*/
18/*extern const struct xattr_handler hfsplus_xattr_acl_default_handler;*/
19extern const struct xattr_handler hfsplus_xattr_security_handler;
20
21extern const struct xattr_handler *hfsplus_xattr_handlers[];
22
23int __hfsplus_setxattr(struct inode *inode, const char *name,
24 const void *value, size_t size, int flags);
25
26static inline int hfsplus_setxattr(struct dentry *dentry, const char *name,
27 const void *value, size_t size, int flags)
28{
29 return __hfsplus_setxattr(dentry->d_inode, name, value, size, flags);
30}
31
32ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
33 void *value, size_t size);
34
35ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
36
37int hfsplus_removexattr(struct dentry *dentry, const char *name);
38
39int hfsplus_init_security(struct inode *inode, struct inode *dir,
40 const struct qstr *qstr);
41
42static inline int hfsplus_init_acl(struct inode *inode, struct inode *dir)
43{
44 /*TODO: implement*/
45 return 0;
46}
47
48static inline int hfsplus_init_inode_security(struct inode *inode,
49 struct inode *dir,
50 const struct qstr *qstr)
51{
52 int err;
53
54 err = hfsplus_init_acl(inode, dir);
55 if (!err)
56 err = hfsplus_init_security(inode, dir, qstr);
57 return err;
58}
59
60#endif
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
new file mode 100644
index 000000000000..83b842f113c5
--- /dev/null
+++ b/fs/hfsplus/xattr_security.c
@@ -0,0 +1,104 @@
1/*
2 * linux/fs/hfsplus/xattr_trusted.c
3 *
4 * Vyacheslav Dubeyko <slava@dubeyko.com>
5 *
6 * Handler for storing security labels as extended attributes.
7 */
8
9#include <linux/security.h>
10#include "hfsplus_fs.h"
11#include "xattr.h"
12
13static int hfsplus_security_getxattr(struct dentry *dentry, const char *name,
14 void *buffer, size_t size, int type)
15{
16 char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
17 size_t len = strlen(name);
18
19 if (!strcmp(name, ""))
20 return -EINVAL;
21
22 if (len + XATTR_SECURITY_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
23 return -EOPNOTSUPP;
24
25 strcpy(xattr_name, XATTR_SECURITY_PREFIX);
26 strcpy(xattr_name + XATTR_SECURITY_PREFIX_LEN, name);
27
28 return hfsplus_getxattr(dentry, xattr_name, buffer, size);
29}
30
31static int hfsplus_security_setxattr(struct dentry *dentry, const char *name,
32 const void *buffer, size_t size, int flags, int type)
33{
34 char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
35 size_t len = strlen(name);
36
37 if (!strcmp(name, ""))
38 return -EINVAL;
39
40 if (len + XATTR_SECURITY_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
41 return -EOPNOTSUPP;
42
43 strcpy(xattr_name, XATTR_SECURITY_PREFIX);
44 strcpy(xattr_name + XATTR_SECURITY_PREFIX_LEN, name);
45
46 return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
47}
48
49static size_t hfsplus_security_listxattr(struct dentry *dentry, char *list,
50 size_t list_size, const char *name, size_t name_len, int type)
51{
52 /*
53 * This method is not used.
54 * It is used hfsplus_listxattr() instead of generic_listxattr().
55 */
56 return -EOPNOTSUPP;
57}
58
59static int hfsplus_initxattrs(struct inode *inode,
60 const struct xattr *xattr_array,
61 void *fs_info)
62{
63 const struct xattr *xattr;
64 char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
65 size_t xattr_name_len;
66 int err = 0;
67
68 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
69 xattr_name_len = strlen(xattr->name);
70
71 if (xattr_name_len == 0)
72 continue;
73
74 if (xattr_name_len + XATTR_SECURITY_PREFIX_LEN >
75 HFSPLUS_ATTR_MAX_STRLEN)
76 return -EOPNOTSUPP;
77
78 strcpy(xattr_name, XATTR_SECURITY_PREFIX);
79 strcpy(xattr_name +
80 XATTR_SECURITY_PREFIX_LEN, xattr->name);
81 memset(xattr_name +
82 XATTR_SECURITY_PREFIX_LEN + xattr_name_len, 0, 1);
83
84 err = __hfsplus_setxattr(inode, xattr_name,
85 xattr->value, xattr->value_len, 0);
86 if (err)
87 break;
88 }
89 return err;
90}
91
92int hfsplus_init_security(struct inode *inode, struct inode *dir,
93 const struct qstr *qstr)
94{
95 return security_inode_init_security(inode, dir, qstr,
96 &hfsplus_initxattrs, NULL);
97}
98
99const struct xattr_handler hfsplus_xattr_security_handler = {
100 .prefix = XATTR_SECURITY_PREFIX,
101 .list = hfsplus_security_listxattr,
102 .get = hfsplus_security_getxattr,
103 .set = hfsplus_security_setxattr,
104};
diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c
new file mode 100644
index 000000000000..426cee277542
--- /dev/null
+++ b/fs/hfsplus/xattr_trusted.c
@@ -0,0 +1,63 @@
1/*
2 * linux/fs/hfsplus/xattr_trusted.c
3 *
4 * Vyacheslav Dubeyko <slava@dubeyko.com>
5 *
6 * Handler for trusted extended attributes.
7 */
8
9#include "hfsplus_fs.h"
10#include "xattr.h"
11
12static int hfsplus_trusted_getxattr(struct dentry *dentry, const char *name,
13 void *buffer, size_t size, int type)
14{
15 char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
16 size_t len = strlen(name);
17
18 if (!strcmp(name, ""))
19 return -EINVAL;
20
21 if (len + XATTR_TRUSTED_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
22 return -EOPNOTSUPP;
23
24 strcpy(xattr_name, XATTR_TRUSTED_PREFIX);
25 strcpy(xattr_name + XATTR_TRUSTED_PREFIX_LEN, name);
26
27 return hfsplus_getxattr(dentry, xattr_name, buffer, size);
28}
29
30static int hfsplus_trusted_setxattr(struct dentry *dentry, const char *name,
31 const void *buffer, size_t size, int flags, int type)
32{
33 char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
34 size_t len = strlen(name);
35
36 if (!strcmp(name, ""))
37 return -EINVAL;
38
39 if (len + XATTR_TRUSTED_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
40 return -EOPNOTSUPP;
41
42 strcpy(xattr_name, XATTR_TRUSTED_PREFIX);
43 strcpy(xattr_name + XATTR_TRUSTED_PREFIX_LEN, name);
44
45 return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
46}
47
48static size_t hfsplus_trusted_listxattr(struct dentry *dentry, char *list,
49 size_t list_size, const char *name, size_t name_len, int type)
50{
51 /*
52 * This method is not used.
53 * It is used hfsplus_listxattr() instead of generic_listxattr().
54 */
55 return -EOPNOTSUPP;
56}
57
58const struct xattr_handler hfsplus_xattr_trusted_handler = {
59 .prefix = XATTR_TRUSTED_PREFIX,
60 .list = hfsplus_trusted_listxattr,
61 .get = hfsplus_trusted_getxattr,
62 .set = hfsplus_trusted_setxattr,
63};
diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c
new file mode 100644
index 000000000000..e34016561ae0
--- /dev/null
+++ b/fs/hfsplus/xattr_user.c
@@ -0,0 +1,63 @@
1/*
2 * linux/fs/hfsplus/xattr_user.c
3 *
4 * Vyacheslav Dubeyko <slava@dubeyko.com>
5 *
6 * Handler for user extended attributes.
7 */
8
9#include "hfsplus_fs.h"
10#include "xattr.h"
11
12static int hfsplus_user_getxattr(struct dentry *dentry, const char *name,
13 void *buffer, size_t size, int type)
14{
15 char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
16 size_t len = strlen(name);
17
18 if (!strcmp(name, ""))
19 return -EINVAL;
20
21 if (len + XATTR_USER_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
22 return -EOPNOTSUPP;
23
24 strcpy(xattr_name, XATTR_USER_PREFIX);
25 strcpy(xattr_name + XATTR_USER_PREFIX_LEN, name);
26
27 return hfsplus_getxattr(dentry, xattr_name, buffer, size);
28}
29
30static int hfsplus_user_setxattr(struct dentry *dentry, const char *name,
31 const void *buffer, size_t size, int flags, int type)
32{
33 char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
34 size_t len = strlen(name);
35
36 if (!strcmp(name, ""))
37 return -EINVAL;
38
39 if (len + XATTR_USER_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
40 return -EOPNOTSUPP;
41
42 strcpy(xattr_name, XATTR_USER_PREFIX);
43 strcpy(xattr_name + XATTR_USER_PREFIX_LEN, name);
44
45 return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
46}
47
48static size_t hfsplus_user_listxattr(struct dentry *dentry, char *list,
49 size_t list_size, const char *name, size_t name_len, int type)
50{
51 /*
52 * This method is not used.
53 * It is used hfsplus_listxattr() instead of generic_listxattr().
54 */
55 return -EOPNOTSUPP;
56}
57
58const struct xattr_handler hfsplus_xattr_user_handler = {
59 .prefix = XATTR_USER_PREFIX,
60 .list = hfsplus_user_listxattr,
61 .get = hfsplus_user_getxattr,
62 .set = hfsplus_user_setxattr,
63};
diff --git a/fs/inode.c b/fs/inode.c
index 67880e604399..f5f7c06c36fb 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -798,11 +798,10 @@ static struct inode *find_inode(struct super_block *sb,
798 int (*test)(struct inode *, void *), 798 int (*test)(struct inode *, void *),
799 void *data) 799 void *data)
800{ 800{
801 struct hlist_node *node;
802 struct inode *inode = NULL; 801 struct inode *inode = NULL;
803 802
804repeat: 803repeat:
805 hlist_for_each_entry(inode, node, head, i_hash) { 804 hlist_for_each_entry(inode, head, i_hash) {
806 spin_lock(&inode->i_lock); 805 spin_lock(&inode->i_lock);
807 if (inode->i_sb != sb) { 806 if (inode->i_sb != sb) {
808 spin_unlock(&inode->i_lock); 807 spin_unlock(&inode->i_lock);
@@ -830,11 +829,10 @@ repeat:
830static struct inode *find_inode_fast(struct super_block *sb, 829static struct inode *find_inode_fast(struct super_block *sb,
831 struct hlist_head *head, unsigned long ino) 830 struct hlist_head *head, unsigned long ino)
832{ 831{
833 struct hlist_node *node;
834 struct inode *inode = NULL; 832 struct inode *inode = NULL;
835 833
836repeat: 834repeat:
837 hlist_for_each_entry(inode, node, head, i_hash) { 835 hlist_for_each_entry(inode, head, i_hash) {
838 spin_lock(&inode->i_lock); 836 spin_lock(&inode->i_lock);
839 if (inode->i_ino != ino) { 837 if (inode->i_ino != ino) {
840 spin_unlock(&inode->i_lock); 838 spin_unlock(&inode->i_lock);
@@ -1132,11 +1130,10 @@ EXPORT_SYMBOL(iget_locked);
1132static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1130static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1133{ 1131{
1134 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1132 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1135 struct hlist_node *node;
1136 struct inode *inode; 1133 struct inode *inode;
1137 1134
1138 spin_lock(&inode_hash_lock); 1135 spin_lock(&inode_hash_lock);
1139 hlist_for_each_entry(inode, node, b, i_hash) { 1136 hlist_for_each_entry(inode, b, i_hash) {
1140 if (inode->i_ino == ino && inode->i_sb == sb) { 1137 if (inode->i_ino == ino && inode->i_sb == sb) {
1141 spin_unlock(&inode_hash_lock); 1138 spin_unlock(&inode_hash_lock);
1142 return 0; 1139 return 0;
@@ -1291,10 +1288,9 @@ int insert_inode_locked(struct inode *inode)
1291 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1288 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1292 1289
1293 while (1) { 1290 while (1) {
1294 struct hlist_node *node;
1295 struct inode *old = NULL; 1291 struct inode *old = NULL;
1296 spin_lock(&inode_hash_lock); 1292 spin_lock(&inode_hash_lock);
1297 hlist_for_each_entry(old, node, head, i_hash) { 1293 hlist_for_each_entry(old, head, i_hash) {
1298 if (old->i_ino != ino) 1294 if (old->i_ino != ino)
1299 continue; 1295 continue;
1300 if (old->i_sb != sb) 1296 if (old->i_sb != sb)
@@ -1306,7 +1302,7 @@ int insert_inode_locked(struct inode *inode)
1306 } 1302 }
1307 break; 1303 break;
1308 } 1304 }
1309 if (likely(!node)) { 1305 if (likely(!old)) {
1310 spin_lock(&inode->i_lock); 1306 spin_lock(&inode->i_lock);
1311 inode->i_state |= I_NEW; 1307 inode->i_state |= I_NEW;
1312 hlist_add_head(&inode->i_hash, head); 1308 hlist_add_head(&inode->i_hash, head);
@@ -1334,11 +1330,10 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1334 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1330 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1335 1331
1336 while (1) { 1332 while (1) {
1337 struct hlist_node *node;
1338 struct inode *old = NULL; 1333 struct inode *old = NULL;
1339 1334
1340 spin_lock(&inode_hash_lock); 1335 spin_lock(&inode_hash_lock);
1341 hlist_for_each_entry(old, node, head, i_hash) { 1336 hlist_for_each_entry(old, head, i_hash) {
1342 if (old->i_sb != sb) 1337 if (old->i_sb != sb)
1343 continue; 1338 continue;
1344 if (!test(old, data)) 1339 if (!test(old, data))
@@ -1350,7 +1345,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1350 } 1345 }
1351 break; 1346 break;
1352 } 1347 }
1353 if (likely(!node)) { 1348 if (likely(!old)) {
1354 spin_lock(&inode->i_lock); 1349 spin_lock(&inode->i_lock);
1355 inode->i_state |= I_NEW; 1350 inode->i_state |= I_NEW;
1356 hlist_add_head(&inode->i_hash, head); 1351 hlist_add_head(&inode->i_hash, head);
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 0e17090c310f..abdd75d44dd4 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -32,15 +32,15 @@
32static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH]; 32static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH];
33static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; 33static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH];
34 34
35#define for_each_host(host, pos, chain, table) \ 35#define for_each_host(host, chain, table) \
36 for ((chain) = (table); \ 36 for ((chain) = (table); \
37 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ 37 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
38 hlist_for_each_entry((host), (pos), (chain), h_hash) 38 hlist_for_each_entry((host), (chain), h_hash)
39 39
40#define for_each_host_safe(host, pos, next, chain, table) \ 40#define for_each_host_safe(host, next, chain, table) \
41 for ((chain) = (table); \ 41 for ((chain) = (table); \
42 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ 42 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
43 hlist_for_each_entry_safe((host), (pos), (next), \ 43 hlist_for_each_entry_safe((host), (next), \
44 (chain), h_hash) 44 (chain), h_hash)
45 45
46static unsigned long nrhosts; 46static unsigned long nrhosts;
@@ -225,7 +225,6 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
225 .net = net, 225 .net = net,
226 }; 226 };
227 struct hlist_head *chain; 227 struct hlist_head *chain;
228 struct hlist_node *pos;
229 struct nlm_host *host; 228 struct nlm_host *host;
230 struct nsm_handle *nsm = NULL; 229 struct nsm_handle *nsm = NULL;
231 struct lockd_net *ln = net_generic(net, lockd_net_id); 230 struct lockd_net *ln = net_generic(net, lockd_net_id);
@@ -237,7 +236,7 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
237 mutex_lock(&nlm_host_mutex); 236 mutex_lock(&nlm_host_mutex);
238 237
239 chain = &nlm_client_hosts[nlm_hash_address(sap)]; 238 chain = &nlm_client_hosts[nlm_hash_address(sap)];
240 hlist_for_each_entry(host, pos, chain, h_hash) { 239 hlist_for_each_entry(host, chain, h_hash) {
241 if (host->net != net) 240 if (host->net != net)
242 continue; 241 continue;
243 if (!rpc_cmp_addr(nlm_addr(host), sap)) 242 if (!rpc_cmp_addr(nlm_addr(host), sap))
@@ -322,7 +321,6 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
322 const size_t hostname_len) 321 const size_t hostname_len)
323{ 322{
324 struct hlist_head *chain; 323 struct hlist_head *chain;
325 struct hlist_node *pos;
326 struct nlm_host *host = NULL; 324 struct nlm_host *host = NULL;
327 struct nsm_handle *nsm = NULL; 325 struct nsm_handle *nsm = NULL;
328 struct sockaddr *src_sap = svc_daddr(rqstp); 326 struct sockaddr *src_sap = svc_daddr(rqstp);
@@ -350,7 +348,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
350 nlm_gc_hosts(net); 348 nlm_gc_hosts(net);
351 349
352 chain = &nlm_server_hosts[nlm_hash_address(ni.sap)]; 350 chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
353 hlist_for_each_entry(host, pos, chain, h_hash) { 351 hlist_for_each_entry(host, chain, h_hash) {
354 if (host->net != net) 352 if (host->net != net)
355 continue; 353 continue;
356 if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) 354 if (!rpc_cmp_addr(nlm_addr(host), ni.sap))
@@ -515,10 +513,9 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
515{ 513{
516 struct nlm_host *host; 514 struct nlm_host *host;
517 struct hlist_head *chain; 515 struct hlist_head *chain;
518 struct hlist_node *pos;
519 516
520 mutex_lock(&nlm_host_mutex); 517 mutex_lock(&nlm_host_mutex);
521 for_each_host(host, pos, chain, cache) { 518 for_each_host(host, chain, cache) {
522 if (host->h_nsmhandle == nsm 519 if (host->h_nsmhandle == nsm
523 && host->h_nsmstate != info->state) { 520 && host->h_nsmstate != info->state) {
524 host->h_nsmstate = info->state; 521 host->h_nsmstate = info->state;
@@ -570,7 +567,6 @@ void nlm_host_rebooted(const struct nlm_reboot *info)
570static void nlm_complain_hosts(struct net *net) 567static void nlm_complain_hosts(struct net *net)
571{ 568{
572 struct hlist_head *chain; 569 struct hlist_head *chain;
573 struct hlist_node *pos;
574 struct nlm_host *host; 570 struct nlm_host *host;
575 571
576 if (net) { 572 if (net) {
@@ -587,7 +583,7 @@ static void nlm_complain_hosts(struct net *net)
587 dprintk("lockd: %lu hosts left:\n", nrhosts); 583 dprintk("lockd: %lu hosts left:\n", nrhosts);
588 } 584 }
589 585
590 for_each_host(host, pos, chain, nlm_server_hosts) { 586 for_each_host(host, chain, nlm_server_hosts) {
591 if (net && host->net != net) 587 if (net && host->net != net)
592 continue; 588 continue;
593 dprintk(" %s (cnt %d use %d exp %ld net %p)\n", 589 dprintk(" %s (cnt %d use %d exp %ld net %p)\n",
@@ -600,14 +596,13 @@ void
600nlm_shutdown_hosts_net(struct net *net) 596nlm_shutdown_hosts_net(struct net *net)
601{ 597{
602 struct hlist_head *chain; 598 struct hlist_head *chain;
603 struct hlist_node *pos;
604 struct nlm_host *host; 599 struct nlm_host *host;
605 600
606 mutex_lock(&nlm_host_mutex); 601 mutex_lock(&nlm_host_mutex);
607 602
608 /* First, make all hosts eligible for gc */ 603 /* First, make all hosts eligible for gc */
609 dprintk("lockd: nuking all hosts in net %p...\n", net); 604 dprintk("lockd: nuking all hosts in net %p...\n", net);
610 for_each_host(host, pos, chain, nlm_server_hosts) { 605 for_each_host(host, chain, nlm_server_hosts) {
611 if (net && host->net != net) 606 if (net && host->net != net)
612 continue; 607 continue;
613 host->h_expires = jiffies - 1; 608 host->h_expires = jiffies - 1;
@@ -644,11 +639,11 @@ static void
644nlm_gc_hosts(struct net *net) 639nlm_gc_hosts(struct net *net)
645{ 640{
646 struct hlist_head *chain; 641 struct hlist_head *chain;
647 struct hlist_node *pos, *next; 642 struct hlist_node *next;
648 struct nlm_host *host; 643 struct nlm_host *host;
649 644
650 dprintk("lockd: host garbage collection for net %p\n", net); 645 dprintk("lockd: host garbage collection for net %p\n", net);
651 for_each_host(host, pos, chain, nlm_server_hosts) { 646 for_each_host(host, chain, nlm_server_hosts) {
652 if (net && host->net != net) 647 if (net && host->net != net)
653 continue; 648 continue;
654 host->h_inuse = 0; 649 host->h_inuse = 0;
@@ -657,7 +652,7 @@ nlm_gc_hosts(struct net *net)
657 /* Mark all hosts that hold locks, blocks or shares */ 652 /* Mark all hosts that hold locks, blocks or shares */
658 nlmsvc_mark_resources(net); 653 nlmsvc_mark_resources(net);
659 654
660 for_each_host_safe(host, pos, next, chain, nlm_server_hosts) { 655 for_each_host_safe(host, next, chain, nlm_server_hosts) {
661 if (net && host->net != net) 656 if (net && host->net != net)
662 continue; 657 continue;
663 if (atomic_read(&host->h_count) || host->h_inuse 658 if (atomic_read(&host->h_count) || host->h_inuse
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index b3a24b07d981..d17bb62b06d6 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -84,7 +84,6 @@ __be32
84nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result, 84nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
85 struct nfs_fh *f) 85 struct nfs_fh *f)
86{ 86{
87 struct hlist_node *pos;
88 struct nlm_file *file; 87 struct nlm_file *file;
89 unsigned int hash; 88 unsigned int hash;
90 __be32 nfserr; 89 __be32 nfserr;
@@ -96,7 +95,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
96 /* Lock file table */ 95 /* Lock file table */
97 mutex_lock(&nlm_file_mutex); 96 mutex_lock(&nlm_file_mutex);
98 97
99 hlist_for_each_entry(file, pos, &nlm_files[hash], f_list) 98 hlist_for_each_entry(file, &nlm_files[hash], f_list)
100 if (!nfs_compare_fh(&file->f_handle, f)) 99 if (!nfs_compare_fh(&file->f_handle, f))
101 goto found; 100 goto found;
102 101
@@ -248,13 +247,13 @@ static int
248nlm_traverse_files(void *data, nlm_host_match_fn_t match, 247nlm_traverse_files(void *data, nlm_host_match_fn_t match,
249 int (*is_failover_file)(void *data, struct nlm_file *file)) 248 int (*is_failover_file)(void *data, struct nlm_file *file))
250{ 249{
251 struct hlist_node *pos, *next; 250 struct hlist_node *next;
252 struct nlm_file *file; 251 struct nlm_file *file;
253 int i, ret = 0; 252 int i, ret = 0;
254 253
255 mutex_lock(&nlm_file_mutex); 254 mutex_lock(&nlm_file_mutex);
256 for (i = 0; i < FILE_NRHASH; i++) { 255 for (i = 0; i < FILE_NRHASH; i++) {
257 hlist_for_each_entry_safe(file, pos, next, &nlm_files[i], f_list) { 256 hlist_for_each_entry_safe(file, next, &nlm_files[i], f_list) {
258 if (is_failover_file && !is_failover_file(data, file)) 257 if (is_failover_file && !is_failover_file(data, file))
259 continue; 258 continue;
260 file->f_count++; 259 file->f_count++;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 9f3c66438d0e..84d8eae203a7 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -197,7 +197,6 @@ error_0:
197EXPORT_SYMBOL_GPL(nfs_alloc_client); 197EXPORT_SYMBOL_GPL(nfs_alloc_client);
198 198
199#if IS_ENABLED(CONFIG_NFS_V4) 199#if IS_ENABLED(CONFIG_NFS_V4)
200/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
201void nfs_cleanup_cb_ident_idr(struct net *net) 200void nfs_cleanup_cb_ident_idr(struct net *net)
202{ 201{
203 struct nfs_net *nn = net_generic(net, nfs_net_id); 202 struct nfs_net *nn = net_generic(net, nfs_net_id);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 2e9779b58b7a..47d100872390 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -29,15 +29,14 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
29 29
30 if (clp->rpc_ops->version != 4 || minorversion != 0) 30 if (clp->rpc_ops->version != 4 || minorversion != 0)
31 return ret; 31 return ret;
32retry: 32 idr_preload(GFP_KERNEL);
33 if (!idr_pre_get(&nn->cb_ident_idr, GFP_KERNEL))
34 return -ENOMEM;
35 spin_lock(&nn->nfs_client_lock); 33 spin_lock(&nn->nfs_client_lock);
36 ret = idr_get_new(&nn->cb_ident_idr, clp, &clp->cl_cb_ident); 34 ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT);
35 if (ret >= 0)
36 clp->cl_cb_ident = ret;
37 spin_unlock(&nn->nfs_client_lock); 37 spin_unlock(&nn->nfs_client_lock);
38 if (ret == -EAGAIN) 38 idr_preload_end();
39 goto retry; 39 return ret < 0 ? ret : 0;
40 return ret;
41} 40}
42 41
43#ifdef CONFIG_NFS_V4_1 42#ifdef CONFIG_NFS_V4_1
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index d35b62e83ea6..6da209bd9408 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -77,9 +77,8 @@ _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
77 long hash) 77 long hash)
78{ 78{
79 struct nfs4_deviceid_node *d; 79 struct nfs4_deviceid_node *d;
80 struct hlist_node *n;
81 80
82 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) 81 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
83 if (d->ld == ld && d->nfs_client == clp && 82 if (d->ld == ld && d->nfs_client == clp &&
84 !memcmp(&d->deviceid, id, sizeof(*id))) { 83 !memcmp(&d->deviceid, id, sizeof(*id))) {
85 if (atomic_read(&d->ref)) 84 if (atomic_read(&d->ref))
@@ -248,12 +247,11 @@ static void
248_deviceid_purge_client(const struct nfs_client *clp, long hash) 247_deviceid_purge_client(const struct nfs_client *clp, long hash)
249{ 248{
250 struct nfs4_deviceid_node *d; 249 struct nfs4_deviceid_node *d;
251 struct hlist_node *n;
252 HLIST_HEAD(tmp); 250 HLIST_HEAD(tmp);
253 251
254 spin_lock(&nfs4_deviceid_lock); 252 spin_lock(&nfs4_deviceid_lock);
255 rcu_read_lock(); 253 rcu_read_lock();
256 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) 254 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
257 if (d->nfs_client == clp && atomic_read(&d->ref)) { 255 if (d->nfs_client == clp && atomic_read(&d->ref)) {
258 hlist_del_init_rcu(&d->node); 256 hlist_del_init_rcu(&d->node);
259 hlist_add_head(&d->tmpnode, &tmp); 257 hlist_add_head(&d->tmpnode, &tmp);
@@ -291,12 +289,11 @@ void
291nfs4_deviceid_mark_client_invalid(struct nfs_client *clp) 289nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
292{ 290{
293 struct nfs4_deviceid_node *d; 291 struct nfs4_deviceid_node *d;
294 struct hlist_node *n;
295 int i; 292 int i;
296 293
297 rcu_read_lock(); 294 rcu_read_lock();
298 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){ 295 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
299 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node) 296 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
300 if (d->nfs_client == clp) 297 if (d->nfs_client == clp)
301 set_bit(NFS_DEVICEID_INVALID, &d->flags); 298 set_bit(NFS_DEVICEID_INVALID, &d->flags);
302 } 299 }
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 2cbac34a55da..da3dbd0f8979 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -120,7 +120,6 @@ hash_refile(struct svc_cacherep *rp)
120int 120int
121nfsd_cache_lookup(struct svc_rqst *rqstp) 121nfsd_cache_lookup(struct svc_rqst *rqstp)
122{ 122{
123 struct hlist_node *hn;
124 struct hlist_head *rh; 123 struct hlist_head *rh;
125 struct svc_cacherep *rp; 124 struct svc_cacherep *rp;
126 __be32 xid = rqstp->rq_xid; 125 __be32 xid = rqstp->rq_xid;
@@ -141,7 +140,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
141 rtn = RC_DOIT; 140 rtn = RC_DOIT;
142 141
143 rh = &cache_hash[request_hash(xid)]; 142 rh = &cache_hash[request_hash(xid)];
144 hlist_for_each_entry(rp, hn, rh, c_hash) { 143 hlist_for_each_entry(rp, rh, c_hash) {
145 if (rp->c_state != RC_UNUSED && 144 if (rp->c_state != RC_UNUSED &&
146 xid == rp->c_xid && proc == rp->c_proc && 145 xid == rp->c_xid && proc == rp->c_proc &&
147 proto == rp->c_prot && vers == rp->c_vers && 146 proto == rp->c_prot && vers == rp->c_vers &&
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 6baadb5a8430..4bb21d67d9b1 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -52,7 +52,6 @@ void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
52void __fsnotify_update_child_dentry_flags(struct inode *inode) 52void __fsnotify_update_child_dentry_flags(struct inode *inode)
53{ 53{
54 struct dentry *alias; 54 struct dentry *alias;
55 struct hlist_node *p;
56 int watched; 55 int watched;
57 56
58 if (!S_ISDIR(inode->i_mode)) 57 if (!S_ISDIR(inode->i_mode))
@@ -64,7 +63,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
64 spin_lock(&inode->i_lock); 63 spin_lock(&inode->i_lock);
65 /* run all of the dentries associated with this inode. Since this is a 64 /* run all of the dentries associated with this inode. Since this is a
66 * directory, there damn well better only be one item on this list */ 65 * directory, there damn well better only be one item on this list */
67 hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { 66 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
68 struct dentry *child; 67 struct dentry *child;
69 68
70 /* run all of the children of the original inode and fix their 69 /* run all of the children of the original inode and fix their
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index f31e90fc050d..74825be65b7b 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -36,12 +36,11 @@
36static void fsnotify_recalc_inode_mask_locked(struct inode *inode) 36static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
37{ 37{
38 struct fsnotify_mark *mark; 38 struct fsnotify_mark *mark;
39 struct hlist_node *pos;
40 __u32 new_mask = 0; 39 __u32 new_mask = 0;
41 40
42 assert_spin_locked(&inode->i_lock); 41 assert_spin_locked(&inode->i_lock);
43 42
44 hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) 43 hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list)
45 new_mask |= mark->mask; 44 new_mask |= mark->mask;
46 inode->i_fsnotify_mask = new_mask; 45 inode->i_fsnotify_mask = new_mask;
47} 46}
@@ -87,11 +86,11 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
87void fsnotify_clear_marks_by_inode(struct inode *inode) 86void fsnotify_clear_marks_by_inode(struct inode *inode)
88{ 87{
89 struct fsnotify_mark *mark, *lmark; 88 struct fsnotify_mark *mark, *lmark;
90 struct hlist_node *pos, *n; 89 struct hlist_node *n;
91 LIST_HEAD(free_list); 90 LIST_HEAD(free_list);
92 91
93 spin_lock(&inode->i_lock); 92 spin_lock(&inode->i_lock);
94 hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) { 93 hlist_for_each_entry_safe(mark, n, &inode->i_fsnotify_marks, i.i_list) {
95 list_add(&mark->i.free_i_list, &free_list); 94 list_add(&mark->i.free_i_list, &free_list);
96 hlist_del_init_rcu(&mark->i.i_list); 95 hlist_del_init_rcu(&mark->i.i_list);
97 fsnotify_get_mark(mark); 96 fsnotify_get_mark(mark);
@@ -129,11 +128,10 @@ static struct fsnotify_mark *fsnotify_find_inode_mark_locked(
129 struct inode *inode) 128 struct inode *inode)
130{ 129{
131 struct fsnotify_mark *mark; 130 struct fsnotify_mark *mark;
132 struct hlist_node *pos;
133 131
134 assert_spin_locked(&inode->i_lock); 132 assert_spin_locked(&inode->i_lock);
135 133
136 hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) { 134 hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list) {
137 if (mark->group == group) { 135 if (mark->group == group) {
138 fsnotify_get_mark(mark); 136 fsnotify_get_mark(mark);
139 return mark; 137 return mark;
@@ -194,8 +192,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
194 struct fsnotify_group *group, struct inode *inode, 192 struct fsnotify_group *group, struct inode *inode,
195 int allow_dups) 193 int allow_dups)
196{ 194{
197 struct fsnotify_mark *lmark; 195 struct fsnotify_mark *lmark, *last = NULL;
198 struct hlist_node *node, *last = NULL;
199 int ret = 0; 196 int ret = 0;
200 197
201 mark->flags |= FSNOTIFY_MARK_FLAG_INODE; 198 mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
@@ -214,8 +211,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
214 } 211 }
215 212
216 /* should mark be in the middle of the current list? */ 213 /* should mark be in the middle of the current list? */
217 hlist_for_each_entry(lmark, node, &inode->i_fsnotify_marks, i.i_list) { 214 hlist_for_each_entry(lmark, &inode->i_fsnotify_marks, i.i_list) {
218 last = node; 215 last = lmark;
219 216
220 if ((lmark->group == group) && !allow_dups) { 217 if ((lmark->group == group) && !allow_dups) {
221 ret = -EEXIST; 218 ret = -EEXIST;
@@ -235,7 +232,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
235 232
236 BUG_ON(last == NULL); 233 BUG_ON(last == NULL);
237 /* mark should be the last entry. last is the current last entry */ 234 /* mark should be the last entry. last is the current last entry */
238 hlist_add_after_rcu(last, &mark->i.i_list); 235 hlist_add_after_rcu(&last->i.i_list, &mark->i.i_list);
239out: 236out:
240 fsnotify_recalc_inode_mask_locked(inode); 237 fsnotify_recalc_inode_mask_locked(inode);
241 spin_unlock(&inode->i_lock); 238 spin_unlock(&inode->i_lock);
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 871569c7d609..4216308b81b4 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -197,7 +197,6 @@ static void inotify_free_group_priv(struct fsnotify_group *group)
197{ 197{
198 /* ideally the idr is empty and we won't hit the BUG in the callback */ 198 /* ideally the idr is empty and we won't hit the BUG in the callback */
199 idr_for_each(&group->inotify_data.idr, idr_callback, group); 199 idr_for_each(&group->inotify_data.idr, idr_callback, group);
200 idr_remove_all(&group->inotify_data.idr);
201 idr_destroy(&group->inotify_data.idr); 200 idr_destroy(&group->inotify_data.idr);
202 atomic_dec(&group->inotify_data.user->inotify_devs); 201 atomic_dec(&group->inotify_data.user->inotify_devs);
203 free_uid(group->inotify_data.user); 202 free_uid(group->inotify_data.user);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 07f7a92fe88e..e0f7c1241a6a 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -364,22 +364,20 @@ static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
364{ 364{
365 int ret; 365 int ret;
366 366
367 do { 367 idr_preload(GFP_KERNEL);
368 if (unlikely(!idr_pre_get(idr, GFP_KERNEL))) 368 spin_lock(idr_lock);
369 return -ENOMEM;
370 369
371 spin_lock(idr_lock); 370 ret = idr_alloc(idr, i_mark, *last_wd + 1, 0, GFP_NOWAIT);
372 ret = idr_get_new_above(idr, i_mark, *last_wd + 1, 371 if (ret >= 0) {
373 &i_mark->wd);
374 /* we added the mark to the idr, take a reference */ 372 /* we added the mark to the idr, take a reference */
375 if (!ret) { 373 i_mark->wd = ret;
376 *last_wd = i_mark->wd; 374 *last_wd = i_mark->wd;
377 fsnotify_get_mark(&i_mark->fsn_mark); 375 fsnotify_get_mark(&i_mark->fsn_mark);
378 } 376 }
379 spin_unlock(idr_lock);
380 } while (ret == -EAGAIN);
381 377
382 return ret; 378 spin_unlock(idr_lock);
379 idr_preload_end();
380 return ret < 0 ? ret : 0;
383} 381}
384 382
385static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group, 383static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index 4df58b8ea64a..68ca5a8704b5 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -33,12 +33,12 @@
33void fsnotify_clear_marks_by_mount(struct vfsmount *mnt) 33void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
34{ 34{
35 struct fsnotify_mark *mark, *lmark; 35 struct fsnotify_mark *mark, *lmark;
36 struct hlist_node *pos, *n; 36 struct hlist_node *n;
37 struct mount *m = real_mount(mnt); 37 struct mount *m = real_mount(mnt);
38 LIST_HEAD(free_list); 38 LIST_HEAD(free_list);
39 39
40 spin_lock(&mnt->mnt_root->d_lock); 40 spin_lock(&mnt->mnt_root->d_lock);
41 hlist_for_each_entry_safe(mark, pos, n, &m->mnt_fsnotify_marks, m.m_list) { 41 hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, m.m_list) {
42 list_add(&mark->m.free_m_list, &free_list); 42 list_add(&mark->m.free_m_list, &free_list);
43 hlist_del_init_rcu(&mark->m.m_list); 43 hlist_del_init_rcu(&mark->m.m_list);
44 fsnotify_get_mark(mark); 44 fsnotify_get_mark(mark);
@@ -71,12 +71,11 @@ static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
71{ 71{
72 struct mount *m = real_mount(mnt); 72 struct mount *m = real_mount(mnt);
73 struct fsnotify_mark *mark; 73 struct fsnotify_mark *mark;
74 struct hlist_node *pos;
75 __u32 new_mask = 0; 74 __u32 new_mask = 0;
76 75
77 assert_spin_locked(&mnt->mnt_root->d_lock); 76 assert_spin_locked(&mnt->mnt_root->d_lock);
78 77
79 hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) 78 hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list)
80 new_mask |= mark->mask; 79 new_mask |= mark->mask;
81 m->mnt_fsnotify_mask = new_mask; 80 m->mnt_fsnotify_mask = new_mask;
82} 81}
@@ -114,11 +113,10 @@ static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_
114{ 113{
115 struct mount *m = real_mount(mnt); 114 struct mount *m = real_mount(mnt);
116 struct fsnotify_mark *mark; 115 struct fsnotify_mark *mark;
117 struct hlist_node *pos;
118 116
119 assert_spin_locked(&mnt->mnt_root->d_lock); 117 assert_spin_locked(&mnt->mnt_root->d_lock);
120 118
121 hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) { 119 hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list) {
122 if (mark->group == group) { 120 if (mark->group == group) {
123 fsnotify_get_mark(mark); 121 fsnotify_get_mark(mark);
124 return mark; 122 return mark;
@@ -153,8 +151,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
153 int allow_dups) 151 int allow_dups)
154{ 152{
155 struct mount *m = real_mount(mnt); 153 struct mount *m = real_mount(mnt);
156 struct fsnotify_mark *lmark; 154 struct fsnotify_mark *lmark, *last = NULL;
157 struct hlist_node *node, *last = NULL;
158 int ret = 0; 155 int ret = 0;
159 156
160 mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT; 157 mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
@@ -173,8 +170,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
173 } 170 }
174 171
175 /* should mark be in the middle of the current list? */ 172 /* should mark be in the middle of the current list? */
176 hlist_for_each_entry(lmark, node, &m->mnt_fsnotify_marks, m.m_list) { 173 hlist_for_each_entry(lmark, &m->mnt_fsnotify_marks, m.m_list) {
177 last = node; 174 last = lmark;
178 175
179 if ((lmark->group == group) && !allow_dups) { 176 if ((lmark->group == group) && !allow_dups) {
180 ret = -EEXIST; 177 ret = -EEXIST;
@@ -194,7 +191,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
194 191
195 BUG_ON(last == NULL); 192 BUG_ON(last == NULL);
196 /* mark should be the last entry. last is the current last entry */ 193 /* mark should be the last entry. last is the current last entry */
197 hlist_add_after_rcu(last, &mark->m.m_list); 194 hlist_add_after_rcu(&last->m.m_list, &mark->m.m_list);
198out: 195out:
199 fsnotify_recalc_vfsmount_mask_locked(mnt); 196 fsnotify_recalc_vfsmount_mask_locked(mnt);
200 spin_unlock(&mnt->mnt_root->d_lock); 197 spin_unlock(&mnt->mnt_root->d_lock);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 0d2bf566e39a..aa88bd8bcedc 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -304,28 +304,22 @@ static u8 o2net_num_from_nn(struct o2net_node *nn)
304 304
305static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw) 305static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw)
306{ 306{
307 int ret = 0; 307 int ret;
308
309 do {
310 if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
311 ret = -EAGAIN;
312 break;
313 }
314 spin_lock(&nn->nn_lock);
315 ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id);
316 if (ret == 0)
317 list_add_tail(&nsw->ns_node_item,
318 &nn->nn_status_list);
319 spin_unlock(&nn->nn_lock);
320 } while (ret == -EAGAIN);
321 308
322 if (ret == 0) { 309 spin_lock(&nn->nn_lock);
323 init_waitqueue_head(&nsw->ns_wq); 310 ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC);
324 nsw->ns_sys_status = O2NET_ERR_NONE; 311 if (ret >= 0) {
325 nsw->ns_status = 0; 312 nsw->ns_id = ret;
313 list_add_tail(&nsw->ns_node_item, &nn->nn_status_list);
326 } 314 }
315 spin_unlock(&nn->nn_lock);
316 if (ret < 0)
317 return ret;
327 318
328 return ret; 319 init_waitqueue_head(&nsw->ns_wq);
320 nsw->ns_sys_status = O2NET_ERR_NONE;
321 nsw->ns_status = 0;
322 return 0;
329} 323}
330 324
331static void o2net_complete_nsw_locked(struct o2net_node *nn, 325static void o2net_complete_nsw_locked(struct o2net_node *nn,
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 8db4b58b2e4b..ef999729e274 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -169,11 +169,10 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
169 u64 parent_blkno, 169 u64 parent_blkno,
170 int skip_unhashed) 170 int skip_unhashed)
171{ 171{
172 struct hlist_node *p;
173 struct dentry *dentry; 172 struct dentry *dentry;
174 173
175 spin_lock(&inode->i_lock); 174 spin_lock(&inode->i_lock);
176 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 175 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
177 spin_lock(&dentry->d_lock); 176 spin_lock(&dentry->d_lock);
178 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { 177 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
179 trace_ocfs2_find_local_alias(dentry->d_name.len, 178 trace_ocfs2_find_local_alias(dentry->d_name.len,
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 01ebfd0bdad7..eeac97bb3bfa 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2083,7 +2083,6 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2083 u8 dead_node, u8 new_master) 2083 u8 dead_node, u8 new_master)
2084{ 2084{
2085 int i; 2085 int i;
2086 struct hlist_node *hash_iter;
2087 struct hlist_head *bucket; 2086 struct hlist_head *bucket;
2088 struct dlm_lock_resource *res, *next; 2087 struct dlm_lock_resource *res, *next;
2089 2088
@@ -2114,7 +2113,7 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2114 * if necessary */ 2113 * if necessary */
2115 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2114 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2116 bucket = dlm_lockres_hash(dlm, i); 2115 bucket = dlm_lockres_hash(dlm, i);
2117 hlist_for_each_entry(res, hash_iter, bucket, hash_node) { 2116 hlist_for_each_entry(res, bucket, hash_node) {
2118 if (!(res->state & DLM_LOCK_RES_RECOVERING)) 2117 if (!(res->state & DLM_LOCK_RES_RECOVERING))
2119 continue; 2118 continue;
2120 2119
@@ -2273,7 +2272,6 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2273 2272
2274static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) 2273static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2275{ 2274{
2276 struct hlist_node *iter;
2277 struct dlm_lock_resource *res; 2275 struct dlm_lock_resource *res;
2278 int i; 2276 int i;
2279 struct hlist_head *bucket; 2277 struct hlist_head *bucket;
@@ -2299,7 +2297,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2299 */ 2297 */
2300 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2298 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2301 bucket = dlm_lockres_hash(dlm, i); 2299 bucket = dlm_lockres_hash(dlm, i);
2302 hlist_for_each_entry(res, iter, bucket, hash_node) { 2300 hlist_for_each_entry(res, bucket, hash_node) {
2303 /* always prune any $RECOVERY entries for dead nodes, 2301 /* always prune any $RECOVERY entries for dead nodes,
2304 * otherwise hangs can occur during later recovery */ 2302 * otherwise hangs can occur during later recovery */
2305 if (dlm_is_recovery_lock(res->lockname.name, 2303 if (dlm_is_recovery_lock(res->lockname.name,
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index f169da4624fd..b7e74b580c0f 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -642,7 +642,7 @@ ocfs2_block_group_alloc_discontig(handle_t *handle,
642 * cluster groups will be staying in cache for the duration of 642 * cluster groups will be staying in cache for the duration of
643 * this operation. 643 * this operation.
644 */ 644 */
645 ac->ac_allow_chain_relink = 0; 645 ac->ac_disable_chain_relink = 1;
646 646
647 /* Claim the first region */ 647 /* Claim the first region */
648 status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits, 648 status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits,
@@ -1823,7 +1823,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1823 * Do this *after* figuring out how many bits we're taking out 1823 * Do this *after* figuring out how many bits we're taking out
1824 * of our target group. 1824 * of our target group.
1825 */ 1825 */
1826 if (ac->ac_allow_chain_relink && 1826 if (!ac->ac_disable_chain_relink &&
1827 (prev_group_bh) && 1827 (prev_group_bh) &&
1828 (ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) { 1828 (ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) {
1829 status = ocfs2_relink_block_group(handle, alloc_inode, 1829 status = ocfs2_relink_block_group(handle, alloc_inode,
@@ -1928,7 +1928,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1928 1928
1929 victim = ocfs2_find_victim_chain(cl); 1929 victim = ocfs2_find_victim_chain(cl);
1930 ac->ac_chain = victim; 1930 ac->ac_chain = victim;
1931 ac->ac_allow_chain_relink = 1;
1932 1931
1933 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, 1932 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
1934 res, &bits_left); 1933 res, &bits_left);
@@ -1947,7 +1946,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1947 * searching each chain in order. Don't allow chain relinking 1946 * searching each chain in order. Don't allow chain relinking
1948 * because we only calculate enough journal credits for one 1947 * because we only calculate enough journal credits for one
1949 * relink per alloc. */ 1948 * relink per alloc. */
1950 ac->ac_allow_chain_relink = 0; 1949 ac->ac_disable_chain_relink = 1;
1951 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) { 1950 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
1952 if (i == victim) 1951 if (i == victim)
1953 continue; 1952 continue;
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index b8afabfeede4..a36d0aa50911 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -49,7 +49,7 @@ struct ocfs2_alloc_context {
49 49
50 /* these are used by the chain search */ 50 /* these are used by the chain search */
51 u16 ac_chain; 51 u16 ac_chain;
52 int ac_allow_chain_relink; 52 int ac_disable_chain_relink;
53 group_search_t *ac_group_search; 53 group_search_t *ac_group_search;
54 54
55 u64 ac_last_group; 55 u64 ac_last_group;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 0ba9ea1e7961..2e3ea308c144 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7189,7 +7189,7 @@ int ocfs2_init_security_and_acl(struct inode *dir,
7189 struct buffer_head *dir_bh = NULL; 7189 struct buffer_head *dir_bh = NULL;
7190 7190
7191 ret = ocfs2_init_security_get(inode, dir, qstr, NULL); 7191 ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
7192 if (!ret) { 7192 if (ret) {
7193 mlog_errno(ret); 7193 mlog_errno(ret);
7194 goto leave; 7194 goto leave;
7195 } 7195 }
diff --git a/fs/proc/base.c b/fs/proc/base.c
index f3b133d79914..69078c7cef1f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -73,6 +73,7 @@
73#include <linux/security.h> 73#include <linux/security.h>
74#include <linux/ptrace.h> 74#include <linux/ptrace.h>
75#include <linux/tracehook.h> 75#include <linux/tracehook.h>
76#include <linux/printk.h>
76#include <linux/cgroup.h> 77#include <linux/cgroup.h>
77#include <linux/cpuset.h> 78#include <linux/cpuset.h>
78#include <linux/audit.h> 79#include <linux/audit.h>
@@ -952,7 +953,7 @@ static ssize_t oom_adj_write(struct file *file, const char __user *buf,
952 * /proc/pid/oom_adj is provided for legacy purposes, ask users to use 953 * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
953 * /proc/pid/oom_score_adj instead. 954 * /proc/pid/oom_score_adj instead.
954 */ 955 */
955 printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n", 956 pr_warn_once("%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
956 current->comm, task_pid_nr(current), task_pid_nr(task), 957 current->comm, task_pid_nr(current), task_pid_nr(task),
957 task_pid_nr(task)); 958 task_pid_nr(task));
958 959
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 2983dc52ca25..4b3b3ffb52f1 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -15,6 +15,7 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/printk.h>
18#include <linux/mount.h> 19#include <linux/mount.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/idr.h> 21#include <linux/idr.h>
@@ -132,11 +133,8 @@ __proc_file_read(struct file *file, char __user *buf, size_t nbytes,
132 } 133 }
133 134
134 if (start == NULL) { 135 if (start == NULL) {
135 if (n > PAGE_SIZE) { 136 if (n > PAGE_SIZE) /* Apparent buffer overflow */
136 printk(KERN_ERR
137 "proc_file_read: Apparent buffer overflow!\n");
138 n = PAGE_SIZE; 137 n = PAGE_SIZE;
139 }
140 n -= *ppos; 138 n -= *ppos;
141 if (n <= 0) 139 if (n <= 0)
142 break; 140 break;
@@ -144,26 +142,19 @@ __proc_file_read(struct file *file, char __user *buf, size_t nbytes,
144 n = count; 142 n = count;
145 start = page + *ppos; 143 start = page + *ppos;
146 } else if (start < page) { 144 } else if (start < page) {
147 if (n > PAGE_SIZE) { 145 if (n > PAGE_SIZE) /* Apparent buffer overflow */
148 printk(KERN_ERR
149 "proc_file_read: Apparent buffer overflow!\n");
150 n = PAGE_SIZE; 146 n = PAGE_SIZE;
151 }
152 if (n > count) { 147 if (n > count) {
153 /* 148 /*
154 * Don't reduce n because doing so might 149 * Don't reduce n because doing so might
155 * cut off part of a data block. 150 * cut off part of a data block.
156 */ 151 */
157 printk(KERN_WARNING 152 pr_warn("proc_file_read: count exceeded\n");
158 "proc_file_read: Read count exceeded\n");
159 } 153 }
160 } else /* start >= page */ { 154 } else /* start >= page */ {
161 unsigned long startoff = (unsigned long)(start - page); 155 unsigned long startoff = (unsigned long)(start - page);
162 if (n > (PAGE_SIZE - startoff)) { 156 if (n > (PAGE_SIZE - startoff)) /* buffer overflow? */
163 printk(KERN_ERR
164 "proc_file_read: Apparent buffer overflow!\n");
165 n = PAGE_SIZE - startoff; 157 n = PAGE_SIZE - startoff;
166 }
167 if (n > count) 158 if (n > count)
168 n = count; 159 n = count;
169 } 160 }
@@ -569,7 +560,7 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
569 560
570 for (tmp = dir->subdir; tmp; tmp = tmp->next) 561 for (tmp = dir->subdir; tmp; tmp = tmp->next)
571 if (strcmp(tmp->name, dp->name) == 0) { 562 if (strcmp(tmp->name, dp->name) == 0) {
572 WARN(1, KERN_WARNING "proc_dir_entry '%s/%s' already registered\n", 563 WARN(1, "proc_dir_entry '%s/%s' already registered\n",
573 dir->name, dp->name); 564 dir->name, dp->name);
574 break; 565 break;
575 } 566 }
@@ -830,9 +821,9 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
830 if (S_ISDIR(de->mode)) 821 if (S_ISDIR(de->mode))
831 parent->nlink--; 822 parent->nlink--;
832 de->nlink = 0; 823 de->nlink = 0;
833 WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory " 824 WARN(de->subdir, "%s: removing non-empty directory "
834 "'%s/%s', leaking at least '%s'\n", __func__, 825 "'%s/%s', leaking at least '%s'\n", __func__,
835 de->parent->name, de->name, de->subdir->name); 826 de->parent->name, de->name, de->subdir->name);
836 pde_put(de); 827 pde_put(de);
837} 828}
838EXPORT_SYMBOL(remove_proc_entry); 829EXPORT_SYMBOL(remove_proc_entry);
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 70322e1a4f0f..a86aebc9ba7c 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -13,6 +13,7 @@
13#include <linux/stat.h> 13#include <linux/stat.h>
14#include <linux/completion.h> 14#include <linux/completion.h>
15#include <linux/poll.h> 15#include <linux/poll.h>
16#include <linux/printk.h>
16#include <linux/file.h> 17#include <linux/file.h>
17#include <linux/limits.h> 18#include <linux/limits.h>
18#include <linux/init.h> 19#include <linux/init.h>
@@ -495,13 +496,13 @@ int proc_fill_super(struct super_block *s)
495 pde_get(&proc_root); 496 pde_get(&proc_root);
496 root_inode = proc_get_inode(s, &proc_root); 497 root_inode = proc_get_inode(s, &proc_root);
497 if (!root_inode) { 498 if (!root_inode) {
498 printk(KERN_ERR "proc_fill_super: get root inode failed\n"); 499 pr_err("proc_fill_super: get root inode failed\n");
499 return -ENOMEM; 500 return -ENOMEM;
500 } 501 }
501 502
502 s->s_root = d_make_root(root_inode); 503 s->s_root = d_make_root(root_inode);
503 if (!s->s_root) { 504 if (!s->s_root) {
504 printk(KERN_ERR "proc_fill_super: allocate dentry failed\n"); 505 pr_err("proc_fill_super: allocate dentry failed\n");
505 return -ENOMEM; 506 return -ENOMEM;
506 } 507 }
507 508
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 252544c05207..85ff3a4598b3 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
14#include <linux/binfmts.h>
14struct ctl_table_header; 15struct ctl_table_header;
15struct mempolicy; 16struct mempolicy;
16 17
@@ -108,7 +109,7 @@ static inline int task_dumpable(struct task_struct *task)
108 if (mm) 109 if (mm)
109 dumpable = get_dumpable(mm); 110 dumpable = get_dumpable(mm);
110 task_unlock(task); 111 task_unlock(task);
111 if (dumpable == SUID_DUMPABLE_ENABLED) 112 if (dumpable == SUID_DUMP_USER)
112 return 1; 113 return 1;
113 return 0; 114 return 0;
114} 115}
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index e96d4f18ca3a..eda6f017f272 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -17,6 +17,7 @@
17#include <linux/elfcore.h> 17#include <linux/elfcore.h>
18#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/printk.h>
20#include <linux/bootmem.h> 21#include <linux/bootmem.h>
21#include <linux/init.h> 22#include <linux/init.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
@@ -619,7 +620,7 @@ static int __init proc_kcore_init(void)
619 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, 620 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
620 &proc_kcore_operations); 621 &proc_kcore_operations);
621 if (!proc_root_kcore) { 622 if (!proc_root_kcore) {
622 printk(KERN_ERR "couldn't create /proc/kcore\n"); 623 pr_err("couldn't create /proc/kcore\n");
623 return 0; /* Always returns 0. */ 624 return 0; /* Always returns 0. */
624 } 625 }
625 /* Store text area if it's special */ 626 /* Store text area if it's special */
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index de20ec480fa0..30b590f5bd35 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -8,6 +8,7 @@
8#include <linux/time.h> 8#include <linux/time.h>
9#include <linux/proc_fs.h> 9#include <linux/proc_fs.h>
10#include <linux/seq_file.h> 10#include <linux/seq_file.h>
11#include <linux/printk.h>
11#include <linux/stat.h> 12#include <linux/stat.h>
12#include <linux/string.h> 13#include <linux/string.h>
13#include <linux/of.h> 14#include <linux/of.h>
@@ -110,8 +111,8 @@ void proc_device_tree_update_prop(struct proc_dir_entry *pde,
110 if (ent->data == oldprop) 111 if (ent->data == oldprop)
111 break; 112 break;
112 if (ent == NULL) { 113 if (ent == NULL) {
113 printk(KERN_WARNING "device-tree: property \"%s\" " 114 pr_warn("device-tree: property \"%s\" does not exist\n",
114 " does not exist\n", oldprop->name); 115 oldprop->name);
115 } else { 116 } else {
116 ent->data = newprop; 117 ent->data = newprop;
117 ent->size = newprop->length; 118 ent->size = newprop->length;
@@ -153,8 +154,8 @@ static const char *fixup_name(struct device_node *np, struct proc_dir_entry *de,
153realloc: 154realloc:
154 fixed_name = kmalloc(fixup_len, GFP_KERNEL); 155 fixed_name = kmalloc(fixup_len, GFP_KERNEL);
155 if (fixed_name == NULL) { 156 if (fixed_name == NULL) {
156 printk(KERN_ERR "device-tree: Out of memory trying to fixup " 157 pr_err("device-tree: Out of memory trying to fixup "
157 "name \"%s\"\n", name); 158 "name \"%s\"\n", name);
158 return name; 159 return name;
159 } 160 }
160 161
@@ -175,8 +176,8 @@ retry:
175 goto retry; 176 goto retry;
176 } 177 }
177 178
178 printk(KERN_WARNING "device-tree: Duplicate name in %s, " 179 pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
179 "renamed to \"%s\"\n", np->full_name, fixed_name); 180 np->full_name, fixed_name);
180 181
181 return fixed_name; 182 return fixed_name;
182} 183}
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 612df79cc6a1..ac05f33a0dde 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -5,6 +5,7 @@
5#include <linux/sysctl.h> 5#include <linux/sysctl.h>
6#include <linux/poll.h> 6#include <linux/poll.h>
7#include <linux/proc_fs.h> 7#include <linux/proc_fs.h>
8#include <linux/printk.h>
8#include <linux/security.h> 9#include <linux/security.h>
9#include <linux/sched.h> 10#include <linux/sched.h>
10#include <linux/namei.h> 11#include <linux/namei.h>
@@ -57,7 +58,7 @@ static void sysctl_print_dir(struct ctl_dir *dir)
57{ 58{
58 if (dir->header.parent) 59 if (dir->header.parent)
59 sysctl_print_dir(dir->header.parent); 60 sysctl_print_dir(dir->header.parent);
60 printk(KERN_CONT "%s/", dir->header.ctl_table[0].procname); 61 pr_cont("%s/", dir->header.ctl_table[0].procname);
61} 62}
62 63
63static int namecmp(const char *name1, int len1, const char *name2, int len2) 64static int namecmp(const char *name1, int len1, const char *name2, int len2)
@@ -134,9 +135,9 @@ static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry)
134 else if (cmp > 0) 135 else if (cmp > 0)
135 p = &(*p)->rb_right; 136 p = &(*p)->rb_right;
136 else { 137 else {
137 printk(KERN_ERR "sysctl duplicate entry: "); 138 pr_err("sysctl duplicate entry: ");
138 sysctl_print_dir(head->parent); 139 sysctl_print_dir(head->parent);
139 printk(KERN_CONT "/%s\n", entry->procname); 140 pr_cont("/%s\n", entry->procname);
140 return -EEXIST; 141 return -EEXIST;
141 } 142 }
142 } 143 }
@@ -927,9 +928,9 @@ found:
927 subdir->header.nreg++; 928 subdir->header.nreg++;
928failed: 929failed:
929 if (unlikely(IS_ERR(subdir))) { 930 if (unlikely(IS_ERR(subdir))) {
930 printk(KERN_ERR "sysctl could not get directory: "); 931 pr_err("sysctl could not get directory: ");
931 sysctl_print_dir(dir); 932 sysctl_print_dir(dir);
932 printk(KERN_CONT "/%*.*s %ld\n", 933 pr_cont("/%*.*s %ld\n",
933 namelen, namelen, name, PTR_ERR(subdir)); 934 namelen, namelen, name, PTR_ERR(subdir));
934 } 935 }
935 drop_sysctl_table(&dir->header); 936 drop_sysctl_table(&dir->header);
@@ -995,8 +996,8 @@ static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...)
995 vaf.fmt = fmt; 996 vaf.fmt = fmt;
996 vaf.va = &args; 997 vaf.va = &args;
997 998
998 printk(KERN_ERR "sysctl table check failed: %s/%s %pV\n", 999 pr_err("sysctl table check failed: %s/%s %pV\n",
999 path, table->procname, &vaf); 1000 path, table->procname, &vaf);
1000 1001
1001 va_end(args); 1002 va_end(args);
1002 return -EINVAL; 1003 return -EINVAL;
@@ -1510,9 +1511,9 @@ static void put_links(struct ctl_table_header *header)
1510 drop_sysctl_table(link_head); 1511 drop_sysctl_table(link_head);
1511 } 1512 }
1512 else { 1513 else {
1513 printk(KERN_ERR "sysctl link missing during unregister: "); 1514 pr_err("sysctl link missing during unregister: ");
1514 sysctl_print_dir(parent); 1515 sysctl_print_dir(parent);
1515 printk(KERN_CONT "/%s\n", name); 1516 pr_cont("/%s\n", name);
1516 } 1517 }
1517 } 1518 }
1518} 1519}
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 0d5071d29985..b870f740ab5a 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -15,6 +15,7 @@
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/highmem.h> 17#include <linux/highmem.h>
18#include <linux/printk.h>
18#include <linux/bootmem.h> 19#include <linux/bootmem.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/crash_dump.h> 21#include <linux/crash_dump.h>
@@ -175,15 +176,15 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
175 start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m); 176 start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
176 if (!curr_m) 177 if (!curr_m)
177 return -EINVAL; 178 return -EINVAL;
178 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
179 tsz = buflen;
180
181 /* Calculate left bytes in current memory segment. */
182 nr_bytes = (curr_m->size - (start - curr_m->paddr));
183 if (tsz > nr_bytes)
184 tsz = nr_bytes;
185 179
186 while (buflen) { 180 while (buflen) {
181 tsz = min_t(size_t, buflen, PAGE_SIZE - (start & ~PAGE_MASK));
182
183 /* Calculate left bytes in current memory segment. */
184 nr_bytes = (curr_m->size - (start - curr_m->paddr));
185 if (tsz > nr_bytes)
186 tsz = nr_bytes;
187
187 tmp = read_from_oldmem(buffer, tsz, &start, 1); 188 tmp = read_from_oldmem(buffer, tsz, &start, 1);
188 if (tmp < 0) 189 if (tmp < 0)
189 return tmp; 190 return tmp;
@@ -198,12 +199,6 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
198 struct vmcore, list); 199 struct vmcore, list);
199 start = curr_m->paddr; 200 start = curr_m->paddr;
200 } 201 }
201 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
202 tsz = buflen;
203 /* Calculate left bytes in current memory segment. */
204 nr_bytes = (curr_m->size - (start - curr_m->paddr));
205 if (tsz > nr_bytes)
206 tsz = nr_bytes;
207 } 202 }
208 return acc; 203 return acc;
209} 204}
@@ -553,8 +548,7 @@ static int __init parse_crash_elf64_headers(void)
553 ehdr.e_ehsize != sizeof(Elf64_Ehdr) || 548 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
554 ehdr.e_phentsize != sizeof(Elf64_Phdr) || 549 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
555 ehdr.e_phnum == 0) { 550 ehdr.e_phnum == 0) {
556 printk(KERN_WARNING "Warning: Core image elf header is not" 551 pr_warn("Warning: Core image elf header is not sane\n");
557 "sane\n");
558 return -EINVAL; 552 return -EINVAL;
559 } 553 }
560 554
@@ -609,8 +603,7 @@ static int __init parse_crash_elf32_headers(void)
609 ehdr.e_ehsize != sizeof(Elf32_Ehdr) || 603 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
610 ehdr.e_phentsize != sizeof(Elf32_Phdr) || 604 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
611 ehdr.e_phnum == 0) { 605 ehdr.e_phnum == 0) {
612 printk(KERN_WARNING "Warning: Core image elf header is not" 606 pr_warn("Warning: Core image elf header is not sane\n");
613 "sane\n");
614 return -EINVAL; 607 return -EINVAL;
615 } 608 }
616 609
@@ -653,8 +646,7 @@ static int __init parse_crash_elf_headers(void)
653 if (rc < 0) 646 if (rc < 0)
654 return rc; 647 return rc;
655 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { 648 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
656 printk(KERN_WARNING "Warning: Core image elf header" 649 pr_warn("Warning: Core image elf header not found\n");
657 " not found\n");
658 return -EINVAL; 650 return -EINVAL;
659 } 651 }
660 652
@@ -673,8 +665,7 @@ static int __init parse_crash_elf_headers(void)
673 /* Determine vmcore size. */ 665 /* Determine vmcore size. */
674 vmcore_size = get_vmcore_size_elf32(elfcorebuf); 666 vmcore_size = get_vmcore_size_elf32(elfcorebuf);
675 } else { 667 } else {
676 printk(KERN_WARNING "Warning: Core image elf header is not" 668 pr_warn("Warning: Core image elf header is not sane\n");
677 " sane\n");
678 return -EINVAL; 669 return -EINVAL;
679 } 670 }
680 return 0; 671 return 0;
@@ -690,7 +681,7 @@ static int __init vmcore_init(void)
690 return rc; 681 return rc;
691 rc = parse_crash_elf_headers(); 682 rc = parse_crash_elf_headers();
692 if (rc) { 683 if (rc) {
693 printk(KERN_WARNING "Kdump: vmcore not initialized\n"); 684 pr_warn("Kdump: vmcore not initialized\n");
694 return rc; 685 return rc;
695 } 686 }
696 687
diff --git a/fs/seq_file.c b/fs/seq_file.c
index f2bc3dfd0b88..15c6304bab71 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -308,27 +308,27 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence)
308 mutex_lock(&m->lock); 308 mutex_lock(&m->lock);
309 m->version = file->f_version; 309 m->version = file->f_version;
310 switch (whence) { 310 switch (whence) {
311 case 1: 311 case SEEK_CUR:
312 offset += file->f_pos; 312 offset += file->f_pos;
313 case 0: 313 case SEEK_SET:
314 if (offset < 0) 314 if (offset < 0)
315 break; 315 break;
316 retval = offset; 316 retval = offset;
317 if (offset != m->read_pos) { 317 if (offset != m->read_pos) {
318 while ((retval=traverse(m, offset)) == -EAGAIN) 318 while ((retval = traverse(m, offset)) == -EAGAIN)
319 ; 319 ;
320 if (retval) { 320 if (retval) {
321 /* with extreme prejudice... */ 321 /* with extreme prejudice... */
322 file->f_pos = 0; 322 file->f_pos = 0;
323 m->read_pos = 0; 323 m->read_pos = 0;
324 m->version = 0; 324 m->version = 0;
325 m->index = 0; 325 m->index = 0;
326 m->count = 0; 326 m->count = 0;
327 } else { 327 } else {
328 m->read_pos = offset; 328 m->read_pos = offset;
329 retval = file->f_pos = offset; 329 retval = file->f_pos = offset;
330 }
331 } 330 }
331 }
332 } 332 }
333 file->f_version = m->version; 333 file->f_version = m->version;
334 mutex_unlock(&m->lock); 334 mutex_unlock(&m->lock);
diff --git a/fs/super.c b/fs/super.c
index 12f123712161..7465d4364208 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -447,14 +447,13 @@ struct super_block *sget(struct file_system_type *type,
447 void *data) 447 void *data)
448{ 448{
449 struct super_block *s = NULL; 449 struct super_block *s = NULL;
450 struct hlist_node *node;
451 struct super_block *old; 450 struct super_block *old;
452 int err; 451 int err;
453 452
454retry: 453retry:
455 spin_lock(&sb_lock); 454 spin_lock(&sb_lock);
456 if (test) { 455 if (test) {
457 hlist_for_each_entry(old, node, &type->fs_supers, s_instances) { 456 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
458 if (!test(old, data)) 457 if (!test(old, data))
459 continue; 458 continue;
460 if (!grab_super(old)) 459 if (!grab_super(old))
@@ -554,10 +553,9 @@ void iterate_supers_type(struct file_system_type *type,
554 void (*f)(struct super_block *, void *), void *arg) 553 void (*f)(struct super_block *, void *), void *arg)
555{ 554{
556 struct super_block *sb, *p = NULL; 555 struct super_block *sb, *p = NULL;
557 struct hlist_node *node;
558 556
559 spin_lock(&sb_lock); 557 spin_lock(&sb_lock);
560 hlist_for_each_entry(sb, node, &type->fs_supers, s_instances) { 558 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
561 sb->s_count++; 559 sb->s_count++;
562 spin_unlock(&sb_lock); 560 spin_unlock(&sb_lock);
563 561
@@ -842,7 +840,7 @@ int get_anon_bdev(dev_t *p)
842 else if (error) 840 else if (error)
843 return -EAGAIN; 841 return -EAGAIN;
844 842
845 if ((dev & MAX_IDR_MASK) == (1 << MINORBITS)) { 843 if (dev == (1 << MINORBITS)) {
846 spin_lock(&unnamed_dev_lock); 844 spin_lock(&unnamed_dev_lock);
847 ida_remove(&unnamed_dev_ida, dev); 845 ida_remove(&unnamed_dev_ida, dev);
848 if (unnamed_dev_start > dev) 846 if (unnamed_dev_start > dev)
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 2ce9a5db6ab5..15c68f9489ae 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -461,14 +461,13 @@ const struct file_operations bin_fops = {
461void unmap_bin_file(struct sysfs_dirent *attr_sd) 461void unmap_bin_file(struct sysfs_dirent *attr_sd)
462{ 462{
463 struct bin_buffer *bb; 463 struct bin_buffer *bb;
464 struct hlist_node *tmp;
465 464
466 if (sysfs_type(attr_sd) != SYSFS_KOBJ_BIN_ATTR) 465 if (sysfs_type(attr_sd) != SYSFS_KOBJ_BIN_ATTR)
467 return; 466 return;
468 467
469 mutex_lock(&sysfs_bin_lock); 468 mutex_lock(&sysfs_bin_lock);
470 469
471 hlist_for_each_entry(bb, tmp, &attr_sd->s_bin_attr.buffers, list) { 470 hlist_for_each_entry(bb, &attr_sd->s_bin_attr.buffers, list) {
472 struct inode *inode = file_inode(bb->file); 471 struct inode *inode = file_inode(bb->file);
473 472
474 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 473 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 96fcbb85ff83..d1dba7ce75ae 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1442,9 +1442,8 @@ xlog_recover_find_tid(
1442 xlog_tid_t tid) 1442 xlog_tid_t tid)
1443{ 1443{
1444 xlog_recover_t *trans; 1444 xlog_recover_t *trans;
1445 struct hlist_node *n;
1446 1445
1447 hlist_for_each_entry(trans, n, head, r_list) { 1446 hlist_for_each_entry(trans, head, r_list) {
1448 if (trans->r_log_tid == tid) 1447 if (trans->r_log_tid == tid)
1449 return trans; 1448 return trans;
1450 } 1449 }
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 3bd46f766751..a975de1ff59f 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -51,7 +51,7 @@ struct task_struct;
51extern void debug_show_all_locks(void); 51extern void debug_show_all_locks(void);
52extern void debug_show_held_locks(struct task_struct *task); 52extern void debug_show_held_locks(struct task_struct *task);
53extern void debug_check_no_locks_freed(const void *from, unsigned long len); 53extern void debug_check_no_locks_freed(const void *from, unsigned long len);
54extern void debug_check_no_locks_held(struct task_struct *task); 54extern void debug_check_no_locks_held(void);
55#else 55#else
56static inline void debug_show_all_locks(void) 56static inline void debug_show_all_locks(void)
57{ 57{
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
67} 67}
68 68
69static inline void 69static inline void
70debug_check_no_locks_held(struct task_struct *task) 70debug_check_no_locks_held(void)
71{ 71{
72} 72}
73#endif 73#endif
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 3c3ef19a625a..cf5d2af61b81 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -13,7 +13,7 @@
13#include <linux/wait.h> 13#include <linux/wait.h>
14 14
15/* 15/*
16 * CAREFUL: Check include/asm-generic/fcntl.h when defining 16 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
17 * new flags, since they might collide with O_* ones. We want 17 * new flags, since they might collide with O_* ones. We want
18 * to re-use O_* flags that couldn't possibly have a meaning 18 * to re-use O_* flags that couldn't possibly have a meaning
19 * from eventfd, in order to leave a free define-space for 19 * from eventfd, in order to leave a free define-space for
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index e70df40d84f6..043a5cf8b5ba 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -3,6 +3,7 @@
3#ifndef FREEZER_H_INCLUDED 3#ifndef FREEZER_H_INCLUDED
4#define FREEZER_H_INCLUDED 4#define FREEZER_H_INCLUDED
5 5
6#include <linux/debug_locks.h>
6#include <linux/sched.h> 7#include <linux/sched.h>
7#include <linux/wait.h> 8#include <linux/wait.h>
8#include <linux/atomic.h> 9#include <linux/atomic.h>
@@ -48,6 +49,8 @@ extern void thaw_kernel_threads(void);
48 49
49static inline bool try_to_freeze(void) 50static inline bool try_to_freeze(void)
50{ 51{
52 if (!(current->flags & PF_NOFREEZE))
53 debug_check_no_locks_held();
51 might_sleep(); 54 might_sleep();
52 if (likely(!freezing(current))) 55 if (likely(!freezing(current)))
53 return false; 56 return false;
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 227c62424f3c..a9df51f5d54c 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -115,51 +115,50 @@ static inline void hash_del_rcu(struct hlist_node *node)
115 * hash_for_each - iterate over a hashtable 115 * hash_for_each - iterate over a hashtable
116 * @name: hashtable to iterate 116 * @name: hashtable to iterate
117 * @bkt: integer to use as bucket loop cursor 117 * @bkt: integer to use as bucket loop cursor
118 * @node: the &struct list_head to use as a loop cursor for each entry
119 * @obj: the type * to use as a loop cursor for each entry 118 * @obj: the type * to use as a loop cursor for each entry
120 * @member: the name of the hlist_node within the struct 119 * @member: the name of the hlist_node within the struct
121 */ 120 */
122#define hash_for_each(name, bkt, node, obj, member) \ 121#define hash_for_each(name, bkt, obj, member) \
123 for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ 122 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
124 hlist_for_each_entry(obj, node, &name[bkt], member) 123 (bkt)++)\
124 hlist_for_each_entry(obj, &name[bkt], member)
125 125
126/** 126/**
127 * hash_for_each_rcu - iterate over a rcu enabled hashtable 127 * hash_for_each_rcu - iterate over a rcu enabled hashtable
128 * @name: hashtable to iterate 128 * @name: hashtable to iterate
129 * @bkt: integer to use as bucket loop cursor 129 * @bkt: integer to use as bucket loop cursor
130 * @node: the &struct list_head to use as a loop cursor for each entry
131 * @obj: the type * to use as a loop cursor for each entry 130 * @obj: the type * to use as a loop cursor for each entry
132 * @member: the name of the hlist_node within the struct 131 * @member: the name of the hlist_node within the struct
133 */ 132 */
134#define hash_for_each_rcu(name, bkt, node, obj, member) \ 133#define hash_for_each_rcu(name, bkt, obj, member) \
135 for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ 134 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
136 hlist_for_each_entry_rcu(obj, node, &name[bkt], member) 135 (bkt)++)\
136 hlist_for_each_entry_rcu(obj, &name[bkt], member)
137 137
138/** 138/**
139 * hash_for_each_safe - iterate over a hashtable safe against removal of 139 * hash_for_each_safe - iterate over a hashtable safe against removal of
140 * hash entry 140 * hash entry
141 * @name: hashtable to iterate 141 * @name: hashtable to iterate
142 * @bkt: integer to use as bucket loop cursor 142 * @bkt: integer to use as bucket loop cursor
143 * @node: the &struct list_head to use as a loop cursor for each entry
144 * @tmp: a &struct used for temporary storage 143 * @tmp: a &struct used for temporary storage
145 * @obj: the type * to use as a loop cursor for each entry 144 * @obj: the type * to use as a loop cursor for each entry
146 * @member: the name of the hlist_node within the struct 145 * @member: the name of the hlist_node within the struct
147 */ 146 */
148#define hash_for_each_safe(name, bkt, node, tmp, obj, member) \ 147#define hash_for_each_safe(name, bkt, tmp, obj, member) \
149 for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\ 148 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
150 hlist_for_each_entry_safe(obj, node, tmp, &name[bkt], member) 149 (bkt)++)\
150 hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
151 151
152/** 152/**
153 * hash_for_each_possible - iterate over all possible objects hashing to the 153 * hash_for_each_possible - iterate over all possible objects hashing to the
154 * same bucket 154 * same bucket
155 * @name: hashtable to iterate 155 * @name: hashtable to iterate
156 * @obj: the type * to use as a loop cursor for each entry 156 * @obj: the type * to use as a loop cursor for each entry
157 * @node: the &struct list_head to use as a loop cursor for each entry
158 * @member: the name of the hlist_node within the struct 157 * @member: the name of the hlist_node within the struct
159 * @key: the key of the objects to iterate over 158 * @key: the key of the objects to iterate over
160 */ 159 */
161#define hash_for_each_possible(name, obj, node, member, key) \ 160#define hash_for_each_possible(name, obj, member, key) \
162 hlist_for_each_entry(obj, node, &name[hash_min(key, HASH_BITS(name))], member) 161 hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
163 162
164/** 163/**
165 * hash_for_each_possible_rcu - iterate over all possible objects hashing to the 164 * hash_for_each_possible_rcu - iterate over all possible objects hashing to the
@@ -167,25 +166,24 @@ static inline void hash_del_rcu(struct hlist_node *node)
167 * in a rcu enabled hashtable 166 * in a rcu enabled hashtable
168 * @name: hashtable to iterate 167 * @name: hashtable to iterate
169 * @obj: the type * to use as a loop cursor for each entry 168 * @obj: the type * to use as a loop cursor for each entry
170 * @node: the &struct list_head to use as a loop cursor for each entry
171 * @member: the name of the hlist_node within the struct 169 * @member: the name of the hlist_node within the struct
172 * @key: the key of the objects to iterate over 170 * @key: the key of the objects to iterate over
173 */ 171 */
174#define hash_for_each_possible_rcu(name, obj, node, member, key) \ 172#define hash_for_each_possible_rcu(name, obj, member, key) \
175 hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member) 173 hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
174 member)
176 175
177/** 176/**
178 * hash_for_each_possible_safe - iterate over all possible objects hashing to the 177 * hash_for_each_possible_safe - iterate over all possible objects hashing to the
179 * same bucket safe against removals 178 * same bucket safe against removals
180 * @name: hashtable to iterate 179 * @name: hashtable to iterate
181 * @obj: the type * to use as a loop cursor for each entry 180 * @obj: the type * to use as a loop cursor for each entry
182 * @node: the &struct list_head to use as a loop cursor for each entry
183 * @tmp: a &struct used for temporary storage 181 * @tmp: a &struct used for temporary storage
184 * @member: the name of the hlist_node within the struct 182 * @member: the name of the hlist_node within the struct
185 * @key: the key of the objects to iterate over 183 * @key: the key of the objects to iterate over
186 */ 184 */
187#define hash_for_each_possible_safe(name, obj, node, tmp, member, key) \ 185#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
188 hlist_for_each_entry_safe(obj, node, tmp, \ 186 hlist_for_each_entry_safe(obj, tmp,\
189 &name[hash_min(key, HASH_BITS(name))], member) 187 &name[hash_min(key, HASH_BITS(name))], member)
190 188
191 189
diff --git a/include/linux/idr.h b/include/linux/idr.h
index e5eb125effe6..a6f38b5c34e4 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -17,69 +17,40 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19 19
20#if BITS_PER_LONG == 32 20/*
21# define IDR_BITS 5 21 * We want shallower trees and thus more bits covered at each layer. 8
22# define IDR_FULL 0xfffffffful 22 * bits gives us large enough first layer for most use cases and maximum
23/* We can only use two of the bits in the top level because there is 23 * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and
24 only one possible bit in the top level (5 bits * 7 levels = 35 24 * 1k on 32bit.
25 bits, but you only use 31 bits in the id). */ 25 */
26# define TOP_LEVEL_FULL (IDR_FULL >> 30) 26#define IDR_BITS 8
27#elif BITS_PER_LONG == 64
28# define IDR_BITS 6
29# define IDR_FULL 0xfffffffffffffffful
30/* We can only use two of the bits in the top level because there is
31 only one possible bit in the top level (6 bits * 6 levels = 36
32 bits, but you only use 31 bits in the id). */
33# define TOP_LEVEL_FULL (IDR_FULL >> 62)
34#else
35# error "BITS_PER_LONG is not 32 or 64"
36#endif
37
38#define IDR_SIZE (1 << IDR_BITS) 27#define IDR_SIZE (1 << IDR_BITS)
39#define IDR_MASK ((1 << IDR_BITS)-1) 28#define IDR_MASK ((1 << IDR_BITS)-1)
40 29
41#define MAX_IDR_SHIFT (sizeof(int)*8 - 1)
42#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
43#define MAX_IDR_MASK (MAX_IDR_BIT - 1)
44
45/* Leave the possibility of an incomplete final layer */
46#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
47
48/* Number of id_layer structs to leave in free list */
49#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
50
51struct idr_layer { 30struct idr_layer {
52 unsigned long bitmap; /* A zero bit means "space here" */ 31 int prefix; /* the ID prefix of this idr_layer */
32 DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */
53 struct idr_layer __rcu *ary[1<<IDR_BITS]; 33 struct idr_layer __rcu *ary[1<<IDR_BITS];
54 int count; /* When zero, we can release it */ 34 int count; /* When zero, we can release it */
55 int layer; /* distance from leaf */ 35 int layer; /* distance from leaf */
56 struct rcu_head rcu_head; 36 struct rcu_head rcu_head;
57}; 37};
58 38
59struct idr { 39struct idr {
60 struct idr_layer __rcu *top; 40 struct idr_layer __rcu *hint; /* the last layer allocated from */
61 struct idr_layer *id_free; 41 struct idr_layer __rcu *top;
62 int layers; /* only valid without concurrent changes */ 42 struct idr_layer *id_free;
63 int id_free_cnt; 43 int layers; /* only valid w/o concurrent changes */
64 spinlock_t lock; 44 int id_free_cnt;
45 spinlock_t lock;
65}; 46};
66 47
67#define IDR_INIT(name) \ 48#define IDR_INIT(name) \
68{ \ 49{ \
69 .top = NULL, \ 50 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
70 .id_free = NULL, \
71 .layers = 0, \
72 .id_free_cnt = 0, \
73 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
74} 51}
75#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) 52#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
76 53
77/* Actions to be taken after a call to _idr_sub_alloc */
78#define IDR_NEED_TO_GROW -2
79#define IDR_NOMORE_SPACE -3
80
81#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC)
82
83/** 54/**
84 * DOC: idr sync 55 * DOC: idr sync
85 * idr synchronization (stolen from radix-tree.h) 56 * idr synchronization (stolen from radix-tree.h)
@@ -101,19 +72,90 @@ struct idr {
101 * This is what we export. 72 * This is what we export.
102 */ 73 */
103 74
104void *idr_find(struct idr *idp, int id); 75void *idr_find_slowpath(struct idr *idp, int id);
105int idr_pre_get(struct idr *idp, gfp_t gfp_mask); 76int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
106int idr_get_new(struct idr *idp, void *ptr, int *id);
107int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); 77int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
78void idr_preload(gfp_t gfp_mask);
79int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
108int idr_for_each(struct idr *idp, 80int idr_for_each(struct idr *idp,
109 int (*fn)(int id, void *p, void *data), void *data); 81 int (*fn)(int id, void *p, void *data), void *data);
110void *idr_get_next(struct idr *idp, int *nextid); 82void *idr_get_next(struct idr *idp, int *nextid);
111void *idr_replace(struct idr *idp, void *ptr, int id); 83void *idr_replace(struct idr *idp, void *ptr, int id);
112void idr_remove(struct idr *idp, int id); 84void idr_remove(struct idr *idp, int id);
113void idr_remove_all(struct idr *idp); 85void idr_free(struct idr *idp, int id);
114void idr_destroy(struct idr *idp); 86void idr_destroy(struct idr *idp);
115void idr_init(struct idr *idp); 87void idr_init(struct idr *idp);
116 88
89/**
90 * idr_preload_end - end preload section started with idr_preload()
91 *
92 * Each idr_preload() should be matched with an invocation of this
93 * function. See idr_preload() for details.
94 */
95static inline void idr_preload_end(void)
96{
97 preempt_enable();
98}
99
100/**
101 * idr_find - return pointer for given id
102 * @idp: idr handle
103 * @id: lookup key
104 *
105 * Return the pointer given the id it has been registered with. A %NULL
106 * return indicates that @id is not valid or you passed %NULL in
107 * idr_get_new().
108 *
109 * This function can be called under rcu_read_lock(), given that the leaf
110 * pointers lifetimes are correctly managed.
111 */
112static inline void *idr_find(struct idr *idr, int id)
113{
114 struct idr_layer *hint = rcu_dereference_raw(idr->hint);
115
116 if (hint && (id & ~IDR_MASK) == hint->prefix)
117 return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
118
119 return idr_find_slowpath(idr, id);
120}
121
122/**
123 * idr_get_new - allocate new idr entry
124 * @idp: idr handle
125 * @ptr: pointer you want associated with the id
126 * @id: pointer to the allocated handle
127 *
128 * Simple wrapper around idr_get_new_above() w/ @starting_id of zero.
129 */
130static inline int idr_get_new(struct idr *idp, void *ptr, int *id)
131{
132 return idr_get_new_above(idp, ptr, 0, id);
133}
134
135/**
136 * idr_for_each_entry - iterate over an idr's elements of a given type
137 * @idp: idr handle
138 * @entry: the type * to use as cursor
139 * @id: id entry's key
140 */
141#define idr_for_each_entry(idp, entry, id) \
142 for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
143 entry != NULL; \
144 ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
145
146void __idr_remove_all(struct idr *idp); /* don't use */
147
148/**
149 * idr_remove_all - remove all ids from the given idr tree
150 * @idp: idr handle
151 *
152 * If you're trying to destroy @idp, calling idr_destroy() is enough.
153 * This is going away. Don't use.
154 */
155static inline void __deprecated idr_remove_all(struct idr *idp)
156{
157 __idr_remove_all(idp);
158}
117 159
118/* 160/*
119 * IDA - IDR based id allocator, use when translation from id to 161 * IDA - IDR based id allocator, use when translation from id to
@@ -141,7 +183,6 @@ struct ida {
141 183
142int ida_pre_get(struct ida *ida, gfp_t gfp_mask); 184int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
143int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); 185int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
144int ida_get_new(struct ida *ida, int *p_id);
145void ida_remove(struct ida *ida, int id); 186void ida_remove(struct ida *ida, int id);
146void ida_destroy(struct ida *ida); 187void ida_destroy(struct ida *ida);
147void ida_init(struct ida *ida); 188void ida_init(struct ida *ida);
@@ -150,17 +191,18 @@ int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
150 gfp_t gfp_mask); 191 gfp_t gfp_mask);
151void ida_simple_remove(struct ida *ida, unsigned int id); 192void ida_simple_remove(struct ida *ida, unsigned int id);
152 193
153void __init idr_init_cache(void);
154
155/** 194/**
156 * idr_for_each_entry - iterate over an idr's elements of a given type 195 * ida_get_new - allocate new ID
157 * @idp: idr handle 196 * @ida: idr handle
158 * @entry: the type * to use as cursor 197 * @p_id: pointer to the allocated handle
159 * @id: id entry's key 198 *
199 * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
160 */ 200 */
161#define idr_for_each_entry(idp, entry, id) \ 201static inline int ida_get_new(struct ida *ida, int *p_id)
162 for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \ 202{
163 entry != NULL; \ 203 return ida_get_new_above(ida, 0, p_id);
164 ++id, entry = (typeof(entry))idr_get_next((idp), &(id))) 204}
205
206void __init idr_init_cache(void);
165 207
166#endif /* __IDR_H__ */ 208#endif /* __IDR_H__ */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 4648d8021244..cfd21e3d5506 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -216,11 +216,10 @@ static inline struct hlist_head *team_port_index_hash(struct team *team,
216static inline struct team_port *team_get_port_by_index(struct team *team, 216static inline struct team_port *team_get_port_by_index(struct team *team,
217 int port_index) 217 int port_index)
218{ 218{
219 struct hlist_node *p;
220 struct team_port *port; 219 struct team_port *port;
221 struct hlist_head *head = team_port_index_hash(team, port_index); 220 struct hlist_head *head = team_port_index_hash(team, port_index);
222 221
223 hlist_for_each_entry(port, p, head, hlist) 222 hlist_for_each_entry(port, head, hlist)
224 if (port->index == port_index) 223 if (port->index == port_index)
225 return port; 224 return port;
226 return NULL; 225 return NULL;
@@ -228,11 +227,10 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
228static inline struct team_port *team_get_port_by_index_rcu(struct team *team, 227static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
229 int port_index) 228 int port_index)
230{ 229{
231 struct hlist_node *p;
232 struct team_port *port; 230 struct team_port *port;
233 struct hlist_head *head = team_port_index_hash(team, port_index); 231 struct hlist_head *head = team_port_index_hash(team, port_index);
234 232
235 hlist_for_each_entry_rcu(port, p, head, hlist) 233 hlist_for_each_entry_rcu(port, head, hlist)
236 if (port->index == port_index) 234 if (port->index == port_index)
237 return port; 235 return port;
238 return NULL; 236 return NULL;
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 1487e7906bbd..1f9f56e28851 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -35,10 +35,6 @@
35 35
36#include <uapi/linux/ipmi.h> 36#include <uapi/linux/ipmi.h>
37 37
38
39/*
40 * The in-kernel interface.
41 */
42#include <linux/list.h> 38#include <linux/list.h>
43#include <linux/proc_fs.h> 39#include <linux/proc_fs.h>
44 40
diff --git a/include/linux/list.h b/include/linux/list.h
index cc6d2aa6b415..d991cc147c98 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -666,54 +666,49 @@ static inline void hlist_move_list(struct hlist_head *old,
666 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ 666 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
667 pos = n) 667 pos = n)
668 668
669#define hlist_entry_safe(ptr, type, member) \
670 (ptr) ? hlist_entry(ptr, type, member) : NULL
671
669/** 672/**
670 * hlist_for_each_entry - iterate over list of given type 673 * hlist_for_each_entry - iterate over list of given type
671 * @tpos: the type * to use as a loop cursor. 674 * @pos: the type * to use as a loop cursor.
672 * @pos: the &struct hlist_node to use as a loop cursor.
673 * @head: the head for your list. 675 * @head: the head for your list.
674 * @member: the name of the hlist_node within the struct. 676 * @member: the name of the hlist_node within the struct.
675 */ 677 */
676#define hlist_for_each_entry(tpos, pos, head, member) \ 678#define hlist_for_each_entry(pos, head, member) \
677 for (pos = (head)->first; \ 679 for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
678 pos && \ 680 pos; \
679 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 681 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
680 pos = pos->next)
681 682
682/** 683/**
683 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point 684 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
684 * @tpos: the type * to use as a loop cursor. 685 * @pos: the type * to use as a loop cursor.
685 * @pos: the &struct hlist_node to use as a loop cursor.
686 * @member: the name of the hlist_node within the struct. 686 * @member: the name of the hlist_node within the struct.
687 */ 687 */
688#define hlist_for_each_entry_continue(tpos, pos, member) \ 688#define hlist_for_each_entry_continue(pos, member) \
689 for (pos = (pos)->next; \ 689 for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
690 pos && \ 690 pos; \
691 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 691 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
692 pos = pos->next)
693 692
694/** 693/**
695 * hlist_for_each_entry_from - iterate over a hlist continuing from current point 694 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
696 * @tpos: the type * to use as a loop cursor. 695 * @pos: the type * to use as a loop cursor.
697 * @pos: the &struct hlist_node to use as a loop cursor.
698 * @member: the name of the hlist_node within the struct. 696 * @member: the name of the hlist_node within the struct.
699 */ 697 */
700#define hlist_for_each_entry_from(tpos, pos, member) \ 698#define hlist_for_each_entry_from(pos, member) \
701 for (; pos && \ 699 for (; pos; \
702 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 700 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
703 pos = pos->next)
704 701
705/** 702/**
706 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry 703 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
707 * @tpos: the type * to use as a loop cursor. 704 * @pos: the type * to use as a loop cursor.
708 * @pos: the &struct hlist_node to use as a loop cursor.
709 * @n: another &struct hlist_node to use as temporary storage 705 * @n: another &struct hlist_node to use as temporary storage
710 * @head: the head for your list. 706 * @head: the head for your list.
711 * @member: the name of the hlist_node within the struct. 707 * @member: the name of the hlist_node within the struct.
712 */ 708 */
713#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ 709#define hlist_for_each_entry_safe(pos, n, head, member) \
714 for (pos = (head)->first; \ 710 for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
715 pos && ({ n = pos->next; 1; }) && \ 711 pos && ({ n = pos->member.next; 1; }); \
716 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 712 pos = hlist_entry_safe(n, typeof(*pos), member))
717 pos = n)
718 713
719#endif 714#endif
diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h
index 2a32b16f79cb..786bf6679a28 100644
--- a/include/linux/mfd/lp8788.h
+++ b/include/linux/mfd/lp8788.h
@@ -16,6 +16,7 @@
16 16
17#include <linux/gpio.h> 17#include <linux/gpio.h>
18#include <linux/irqdomain.h> 18#include <linux/irqdomain.h>
19#include <linux/pwm.h>
19#include <linux/regmap.h> 20#include <linux/regmap.h>
20 21
21#define LP8788_DEV_BUCK "lp8788-buck" 22#define LP8788_DEV_BUCK "lp8788-buck"
@@ -124,11 +125,6 @@ enum lp8788_bl_ramp_step {
124 LP8788_RAMP_65538us, 125 LP8788_RAMP_65538us,
125}; 126};
126 127
127enum lp8788_bl_pwm_polarity {
128 LP8788_PWM_ACTIVE_HIGH,
129 LP8788_PWM_ACTIVE_LOW,
130};
131
132enum lp8788_isink_scale { 128enum lp8788_isink_scale {
133 LP8788_ISINK_SCALE_100mA, 129 LP8788_ISINK_SCALE_100mA,
134 LP8788_ISINK_SCALE_120mA, 130 LP8788_ISINK_SCALE_120mA,
@@ -229,16 +225,6 @@ struct lp8788_charger_platform_data {
229}; 225};
230 226
231/* 227/*
232 * struct lp8788_bl_pwm_data
233 * @pwm_set_intensity : set duty of pwm
234 * @pwm_get_intensity : get current duty of pwm
235 */
236struct lp8788_bl_pwm_data {
237 void (*pwm_set_intensity) (int brightness, int max_brightness);
238 int (*pwm_get_intensity) (int max_brightness);
239};
240
241/*
242 * struct lp8788_backlight_platform_data 228 * struct lp8788_backlight_platform_data
243 * @name : backlight driver name. (default: "lcd-backlight") 229 * @name : backlight driver name. (default: "lcd-backlight")
244 * @initial_brightness : initial value of backlight brightness 230 * @initial_brightness : initial value of backlight brightness
@@ -248,8 +234,8 @@ struct lp8788_bl_pwm_data {
248 * @rise_time : brightness ramp up step time 234 * @rise_time : brightness ramp up step time
249 * @fall_time : brightness ramp down step time 235 * @fall_time : brightness ramp down step time
250 * @pwm_pol : pwm polarity setting when bl_mode is pwm based 236 * @pwm_pol : pwm polarity setting when bl_mode is pwm based
251 * @pwm_data : platform specific pwm generation functions 237 * @period_ns : platform specific pwm period value. unit is nano.
252 * only valid when bl_mode is pwm based 238 Only valid when bl_mode is LP8788_BL_COMB_PWM_BASED
253 */ 239 */
254struct lp8788_backlight_platform_data { 240struct lp8788_backlight_platform_data {
255 char *name; 241 char *name;
@@ -259,8 +245,8 @@ struct lp8788_backlight_platform_data {
259 enum lp8788_bl_full_scale_current full_scale; 245 enum lp8788_bl_full_scale_current full_scale;
260 enum lp8788_bl_ramp_step rise_time; 246 enum lp8788_bl_ramp_step rise_time;
261 enum lp8788_bl_ramp_step fall_time; 247 enum lp8788_bl_ramp_step fall_time;
262 enum lp8788_bl_pwm_polarity pwm_pol; 248 enum pwm_polarity pwm_pol;
263 struct lp8788_bl_pwm_data pwm_data; 249 unsigned int period_ns;
264}; 250};
265 251
266/* 252/*
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 2381c973d897..a089a3c447fc 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -176,9 +176,8 @@ pid_t pid_vnr(struct pid *pid);
176 176
177#define do_each_pid_task(pid, type, task) \ 177#define do_each_pid_task(pid, type, task) \
178 do { \ 178 do { \
179 struct hlist_node *pos___; \
180 if ((pid) != NULL) \ 179 if ((pid) != NULL) \
181 hlist_for_each_entry_rcu((task), pos___, \ 180 hlist_for_each_entry_rcu((task), \
182 &(pid)->tasks[type], pids[type].node) { 181 &(pid)->tasks[type], pids[type].node) {
183 182
184 /* 183 /*
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index c92dd28eaa6c..8089e35d47ac 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -445,8 +445,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
445 445
446/** 446/**
447 * hlist_for_each_entry_rcu - iterate over rcu list of given type 447 * hlist_for_each_entry_rcu - iterate over rcu list of given type
448 * @tpos: the type * to use as a loop cursor. 448 * @pos: the type * to use as a loop cursor.
449 * @pos: the &struct hlist_node to use as a loop cursor.
450 * @head: the head for your list. 449 * @head: the head for your list.
451 * @member: the name of the hlist_node within the struct. 450 * @member: the name of the hlist_node within the struct.
452 * 451 *
@@ -454,16 +453,16 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
454 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 453 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
455 * as long as the traversal is guarded by rcu_read_lock(). 454 * as long as the traversal is guarded by rcu_read_lock().
456 */ 455 */
457#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ 456#define hlist_for_each_entry_rcu(pos, head, member) \
458 for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ 457 for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
459 pos && \ 458 typeof(*(pos)), member); \
460 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 459 pos; \
461 pos = rcu_dereference_raw(hlist_next_rcu(pos))) 460 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
461 &(pos)->member)), typeof(*(pos)), member))
462 462
463/** 463/**
464 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type 464 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
465 * @tpos: the type * to use as a loop cursor. 465 * @pos: the type * to use as a loop cursor.
466 * @pos: the &struct hlist_node to use as a loop cursor.
467 * @head: the head for your list. 466 * @head: the head for your list.
468 * @member: the name of the hlist_node within the struct. 467 * @member: the name of the hlist_node within the struct.
469 * 468 *
@@ -471,35 +470,36 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
471 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 470 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
472 * as long as the traversal is guarded by rcu_read_lock(). 471 * as long as the traversal is guarded by rcu_read_lock().
473 */ 472 */
474#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ 473#define hlist_for_each_entry_rcu_bh(pos, head, member) \
475 for (pos = rcu_dereference_bh((head)->first); \ 474 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
476 pos && \ 475 typeof(*(pos)), member); \
477 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 476 pos; \
478 pos = rcu_dereference_bh(pos->next)) 477 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
478 &(pos)->member)), typeof(*(pos)), member))
479 479
480/** 480/**
481 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point 481 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
482 * @tpos: the type * to use as a loop cursor. 482 * @pos: the type * to use as a loop cursor.
483 * @pos: the &struct hlist_node to use as a loop cursor.
484 * @member: the name of the hlist_node within the struct. 483 * @member: the name of the hlist_node within the struct.
485 */ 484 */
486#define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ 485#define hlist_for_each_entry_continue_rcu(pos, member) \
487 for (pos = rcu_dereference((pos)->next); \ 486 for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
488 pos && \ 487 typeof(*(pos)), member); \
489 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 488 pos; \
490 pos = rcu_dereference(pos->next)) 489 pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
490 typeof(*(pos)), member))
491 491
492/** 492/**
493 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point 493 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
494 * @tpos: the type * to use as a loop cursor. 494 * @pos: the type * to use as a loop cursor.
495 * @pos: the &struct hlist_node to use as a loop cursor.
496 * @member: the name of the hlist_node within the struct. 495 * @member: the name of the hlist_node within the struct.
497 */ 496 */
498#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ 497#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
499 for (pos = rcu_dereference_bh((pos)->next); \ 498 for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
500 pos && \ 499 typeof(*(pos)), member); \
501 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 500 pos; \
502 pos = rcu_dereference_bh(pos->next)) 501 pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
502 typeof(*(pos)), member))
503 503
504 504
505#endif /* __KERNEL__ */ 505#endif /* __KERNEL__ */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 4bd6c06eb28e..2d8bdaef9611 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -231,6 +231,41 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
231 */ 231 */
232#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) 232#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
233 233
234/*
235 * sg page iterator
236 *
237 * Iterates over sg entries page-by-page. On each successful iteration,
238 * @piter->page points to the current page, @piter->sg to the sg holding this
239 * page and @piter->sg_pgoffset to the page's page offset within the sg. The
240 * iteration will stop either when a maximum number of sg entries was reached
241 * or a terminating sg (sg_last(sg) == true) was reached.
242 */
243struct sg_page_iter {
244 struct page *page; /* current page */
245 struct scatterlist *sg; /* sg holding the page */
246 unsigned int sg_pgoffset; /* page offset within the sg */
247
248 /* these are internal states, keep away */
249 unsigned int __nents; /* remaining sg entries */
250 int __pg_advance; /* nr pages to advance at the
251 * next step */
252};
253
254bool __sg_page_iter_next(struct sg_page_iter *piter);
255void __sg_page_iter_start(struct sg_page_iter *piter,
256 struct scatterlist *sglist, unsigned int nents,
257 unsigned long pgoffset);
258
259/**
260 * for_each_sg_page - iterate over the pages of the given sg list
261 * @sglist: sglist to iterate over
262 * @piter: page iterator to hold current page, sg, sg_pgoffset
263 * @nents: maximum number of sg entries to iterate over
264 * @pgoffset: starting page offset
265 */
266#define for_each_sg_page(sglist, piter, nents, pgoffset) \
267 for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
268 __sg_page_iter_next(piter);)
234 269
235/* 270/*
236 * Mapping sg iterator 271 * Mapping sg iterator
@@ -258,11 +293,11 @@ struct sg_mapping_iter {
258 void *addr; /* pointer to the mapped area */ 293 void *addr; /* pointer to the mapped area */
259 size_t length; /* length of the mapped area */ 294 size_t length; /* length of the mapped area */
260 size_t consumed; /* number of consumed bytes */ 295 size_t consumed; /* number of consumed bytes */
296 struct sg_page_iter piter; /* page iterator */
261 297
262 /* these are internal states, keep away */ 298 /* these are internal states, keep away */
263 struct scatterlist *__sg; /* current entry */ 299 unsigned int __offset; /* offset within page */
264 unsigned int __nents; /* nr of remaining entries */ 300 unsigned int __remaining; /* remaining bytes on page */
265 unsigned int __offset; /* offset within sg */
266 unsigned int __flags; 301 unsigned int __flags;
267}; 302};
268 303
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6853bf947fde..d35d2b6ddbfb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -346,11 +346,6 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
346extern void set_dumpable(struct mm_struct *mm, int value); 346extern void set_dumpable(struct mm_struct *mm, int value);
347extern int get_dumpable(struct mm_struct *mm); 347extern int get_dumpable(struct mm_struct *mm);
348 348
349/* get/set_dumpable() values */
350#define SUID_DUMPABLE_DISABLED 0
351#define SUID_DUMPABLE_ENABLED 1
352#define SUID_DUMPABLE_SAFE 2
353
354/* mm flags */ 349/* mm flags */
355/* dumpable bits */ 350/* dumpable bits */
356#define MMF_DUMPABLE 0 /* core dump is permitted */ 351#define MMF_DUMPABLE 0 /* core dump is permitted */
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 53539acbd81a..89ed9ac5701f 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -161,8 +161,8 @@ typedef struct ax25_uid_assoc {
161 ax25_address call; 161 ax25_address call;
162} ax25_uid_assoc; 162} ax25_uid_assoc;
163 163
164#define ax25_uid_for_each(__ax25, node, list) \ 164#define ax25_uid_for_each(__ax25, list) \
165 hlist_for_each_entry(__ax25, node, list, uid_node) 165 hlist_for_each_entry(__ax25, list, uid_node)
166 166
167#define ax25_uid_hold(ax25) \ 167#define ax25_uid_hold(ax25) \
168 atomic_inc(&((ax25)->refcount)) 168 atomic_inc(&((ax25)->refcount))
@@ -247,8 +247,8 @@ typedef struct ax25_cb {
247 247
248#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo) 248#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo)
249 249
250#define ax25_for_each(__ax25, node, list) \ 250#define ax25_for_each(__ax25, list) \
251 hlist_for_each_entry(__ax25, node, list, ax25_node) 251 hlist_for_each_entry(__ax25, list, ax25_node)
252 252
253#define ax25_cb_hold(__ax25) \ 253#define ax25_cb_hold(__ax25) \
254 atomic_inc(&((__ax25)->refcount)) 254 atomic_inc(&((__ax25)->refcount))
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 7b2ae9d37076..ef83d9e844b5 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -94,8 +94,8 @@ static inline struct net *ib_net(struct inet_bind_bucket *ib)
94 return read_pnet(&ib->ib_net); 94 return read_pnet(&ib->ib_net);
95} 95}
96 96
97#define inet_bind_bucket_for_each(tb, pos, head) \ 97#define inet_bind_bucket_for_each(tb, head) \
98 hlist_for_each_entry(tb, pos, head, node) 98 hlist_for_each_entry(tb, head, node)
99 99
100struct inet_bind_hashbucket { 100struct inet_bind_hashbucket {
101 spinlock_t lock; 101 spinlock_t lock;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 7d658d577368..f908dfc06505 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -178,11 +178,11 @@ static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
178#define inet_twsk_for_each(tw, node, head) \ 178#define inet_twsk_for_each(tw, node, head) \
179 hlist_nulls_for_each_entry(tw, node, head, tw_node) 179 hlist_nulls_for_each_entry(tw, node, head, tw_node)
180 180
181#define inet_twsk_for_each_inmate(tw, node, jail) \ 181#define inet_twsk_for_each_inmate(tw, jail) \
182 hlist_for_each_entry(tw, node, jail, tw_death_node) 182 hlist_for_each_entry(tw, jail, tw_death_node)
183 183
184#define inet_twsk_for_each_inmate_safe(tw, node, safe, jail) \ 184#define inet_twsk_for_each_inmate_safe(tw, safe, jail) \
185 hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node) 185 hlist_for_each_entry_safe(tw, safe, jail, tw_death_node)
186 186
187static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk) 187static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
188{ 188{
diff --git a/include/net/netrom.h b/include/net/netrom.h
index f0793c1cb5f8..121dcf854db5 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -154,17 +154,17 @@ static __inline__ void nr_node_unlock(struct nr_node *nr_node)
154 nr_node_put(nr_node); 154 nr_node_put(nr_node);
155} 155}
156 156
157#define nr_neigh_for_each(__nr_neigh, node, list) \ 157#define nr_neigh_for_each(__nr_neigh, list) \
158 hlist_for_each_entry(__nr_neigh, node, list, neigh_node) 158 hlist_for_each_entry(__nr_neigh, list, neigh_node)
159 159
160#define nr_neigh_for_each_safe(__nr_neigh, node, node2, list) \ 160#define nr_neigh_for_each_safe(__nr_neigh, node2, list) \
161 hlist_for_each_entry_safe(__nr_neigh, node, node2, list, neigh_node) 161 hlist_for_each_entry_safe(__nr_neigh, node2, list, neigh_node)
162 162
163#define nr_node_for_each(__nr_node, node, list) \ 163#define nr_node_for_each(__nr_node, list) \
164 hlist_for_each_entry(__nr_node, node, list, node_node) 164 hlist_for_each_entry(__nr_node, list, node_node)
165 165
166#define nr_node_for_each_safe(__nr_node, node, node2, list) \ 166#define nr_node_for_each_safe(__nr_node, node2, list) \
167 hlist_for_each_entry_safe(__nr_node, node, node2, list, node_node) 167 hlist_for_each_entry_safe(__nr_node, node2, list, node_node)
168 168
169 169
170/*********************************************************************/ 170/*********************************************************************/
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 2761c905504e..f10818fc8804 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -339,11 +339,10 @@ static inline struct Qdisc_class_common *
339qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 339qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
340{ 340{
341 struct Qdisc_class_common *cl; 341 struct Qdisc_class_common *cl;
342 struct hlist_node *n;
343 unsigned int h; 342 unsigned int h;
344 343
345 h = qdisc_class_hash(id, hash->hashmask); 344 h = qdisc_class_hash(id, hash->hashmask);
346 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) { 345 hlist_for_each_entry(cl, &hash->hash[h], hnode) {
347 if (cl->classid == id) 346 if (cl->classid == id)
348 return cl; 347 return cl;
349 } 348 }
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 7fdf298a47ef..df85a0c0f2d5 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -675,8 +675,8 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag)
675 return h & (sctp_assoc_hashsize - 1); 675 return h & (sctp_assoc_hashsize - 1);
676} 676}
677 677
678#define sctp_for_each_hentry(epb, node, head) \ 678#define sctp_for_each_hentry(epb, head) \
679 hlist_for_each_entry(epb, node, head, node) 679 hlist_for_each_entry(epb, head, node)
680 680
681/* Is a socket of this style? */ 681/* Is a socket of this style? */
682#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style)) 682#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
diff --git a/include/net/sock.h b/include/net/sock.h
index a66caa223d18..14f6e9d19dc7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -606,24 +606,23 @@ static inline void sk_add_bind_node(struct sock *sk,
606 hlist_add_head(&sk->sk_bind_node, list); 606 hlist_add_head(&sk->sk_bind_node, list);
607} 607}
608 608
609#define sk_for_each(__sk, node, list) \ 609#define sk_for_each(__sk, list) \
610 hlist_for_each_entry(__sk, node, list, sk_node) 610 hlist_for_each_entry(__sk, list, sk_node)
611#define sk_for_each_rcu(__sk, node, list) \ 611#define sk_for_each_rcu(__sk, list) \
612 hlist_for_each_entry_rcu(__sk, node, list, sk_node) 612 hlist_for_each_entry_rcu(__sk, list, sk_node)
613#define sk_nulls_for_each(__sk, node, list) \ 613#define sk_nulls_for_each(__sk, node, list) \
614 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) 614 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
615#define sk_nulls_for_each_rcu(__sk, node, list) \ 615#define sk_nulls_for_each_rcu(__sk, node, list) \
616 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) 616 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
617#define sk_for_each_from(__sk, node) \ 617#define sk_for_each_from(__sk) \
618 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 618 hlist_for_each_entry_from(__sk, sk_node)
619 hlist_for_each_entry_from(__sk, node, sk_node)
620#define sk_nulls_for_each_from(__sk, node) \ 619#define sk_nulls_for_each_from(__sk, node) \
621 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ 620 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
622 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) 621 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
623#define sk_for_each_safe(__sk, node, tmp, list) \ 622#define sk_for_each_safe(__sk, tmp, list) \
624 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) 623 hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
625#define sk_for_each_bound(__sk, node, list) \ 624#define sk_for_each_bound(__sk, list) \
626 hlist_for_each_entry(__sk, node, list, sk_bind_node) 625 hlist_for_each_entry(__sk, list, sk_bind_node)
627 626
628static inline struct user_namespace *sk_user_ns(struct sock *sk) 627static inline struct user_namespace *sk_user_ns(struct sock *sk)
629{ 628{
diff --git a/include/uapi/linux/ipmi.h b/include/uapi/linux/ipmi.h
index 33fbc99b3812..7b26a62e5707 100644
--- a/include/uapi/linux/ipmi.h
+++ b/include/uapi/linux/ipmi.h
@@ -59,15 +59,7 @@
59 * if it becomes full and it is queried once a second to see if 59 * if it becomes full and it is queried once a second to see if
60 * anything is in it. Incoming commands to the driver will get 60 * anything is in it. Incoming commands to the driver will get
61 * delivered as commands. 61 * delivered as commands.
62 * 62 */
63 * This driver provides two main interfaces: one for in-kernel
64 * applications and another for userland applications. The
65 * capabilities are basically the same for both interface, although
66 * the interfaces are somewhat different. The stuff in the
67 * #ifdef __KERNEL__ below is the in-kernel interface. The userland
68 * interface is defined later in the file. */
69
70
71 63
72/* 64/*
73 * This is an overlay for all the address types, so it's easy to 65 * This is an overlay for all the address types, so it's easy to
diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h
index 996719f82e28..f055e58b3147 100644
--- a/include/uapi/linux/msdos_fs.h
+++ b/include/uapi/linux/msdos_fs.h
@@ -87,6 +87,8 @@
87#define IS_FSINFO(x) (le32_to_cpu((x)->signature1) == FAT_FSINFO_SIG1 \ 87#define IS_FSINFO(x) (le32_to_cpu((x)->signature1) == FAT_FSINFO_SIG1 \
88 && le32_to_cpu((x)->signature2) == FAT_FSINFO_SIG2) 88 && le32_to_cpu((x)->signature2) == FAT_FSINFO_SIG2)
89 89
90#define FAT_STATE_DIRTY 0x01
91
90struct __fat_dirent { 92struct __fat_dirent {
91 long d_ino; 93 long d_ino;
92 __kernel_off_t d_off; 94 __kernel_off_t d_off;
@@ -120,14 +122,34 @@ struct fat_boot_sector {
120 __le32 hidden; /* hidden sectors (unused) */ 122 __le32 hidden; /* hidden sectors (unused) */
121 __le32 total_sect; /* number of sectors (if sectors == 0) */ 123 __le32 total_sect; /* number of sectors (if sectors == 0) */
122 124
123 /* The following fields are only used by FAT32 */ 125 union {
124 __le32 fat32_length; /* sectors/FAT */ 126 struct {
125 __le16 flags; /* bit 8: fat mirroring, low 4: active fat */ 127 /* Extended BPB Fields for FAT16 */
126 __u8 version[2]; /* major, minor filesystem version */ 128 __u8 drive_number; /* Physical drive number */
127 __le32 root_cluster; /* first cluster in root directory */ 129 __u8 state; /* undocumented, but used
128 __le16 info_sector; /* filesystem info sector */ 130 for mount state. */
129 __le16 backup_boot; /* backup boot sector */ 131 /* other fiealds are not added here */
130 __le16 reserved2[6]; /* Unused */ 132 } fat16;
133
134 struct {
135 /* only used by FAT32 */
136 __le32 length; /* sectors/FAT */
137 __le16 flags; /* bit 8: fat mirroring,
138 low 4: active fat */
139 __u8 version[2]; /* major, minor filesystem
140 version */
141 __le32 root_cluster; /* first cluster in
142 root directory */
143 __le16 info_sector; /* filesystem info sector */
144 __le16 backup_boot; /* backup boot sector */
145 __le16 reserved2[6]; /* Unused */
146 /* Extended BPB Fields for FAT32 */
147 __u8 drive_number; /* Physical drive number */
148 __u8 state; /* undocumented, but used
149 for mount state. */
150 /* other fiealds are not added here */
151 } fat32;
152 };
131}; 153};
132 154
133struct fat_boot_fsinfo { 155struct fat_boot_fsinfo {
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index dfb514472cbc..4f52549b23ff 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -33,13 +33,14 @@ enum {
33 NBD_CMD_READ = 0, 33 NBD_CMD_READ = 0,
34 NBD_CMD_WRITE = 1, 34 NBD_CMD_WRITE = 1,
35 NBD_CMD_DISC = 2, 35 NBD_CMD_DISC = 2,
36 /* there is a gap here to match userspace */ 36 NBD_CMD_FLUSH = 3,
37 NBD_CMD_TRIM = 4 37 NBD_CMD_TRIM = 4
38}; 38};
39 39
40/* values for flags field */ 40/* values for flags field */
41#define NBD_FLAG_HAS_FLAGS (1 << 0) /* nbd-server supports flags */ 41#define NBD_FLAG_HAS_FLAGS (1 << 0) /* nbd-server supports flags */
42#define NBD_FLAG_READ_ONLY (1 << 1) /* device is read-only */ 42#define NBD_FLAG_READ_ONLY (1 << 1) /* device is read-only */
43#define NBD_FLAG_SEND_FLUSH (1 << 2) /* can flush writeback cache */
43/* there is a gap here to match userspace */ 44/* there is a gap here to match userspace */
44#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */ 45#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */
45 46
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index 26607bd965fa..e4629b93bdd6 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -15,19 +15,22 @@
15 15
16/* Namespaces */ 16/* Namespaces */
17#define XATTR_OS2_PREFIX "os2." 17#define XATTR_OS2_PREFIX "os2."
18#define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1) 18#define XATTR_OS2_PREFIX_LEN (sizeof(XATTR_OS2_PREFIX) - 1)
19
20#define XATTR_MAC_OSX_PREFIX "osx."
21#define XATTR_MAC_OSX_PREFIX_LEN (sizeof(XATTR_MAC_OSX_PREFIX) - 1)
19 22
20#define XATTR_SECURITY_PREFIX "security." 23#define XATTR_SECURITY_PREFIX "security."
21#define XATTR_SECURITY_PREFIX_LEN (sizeof (XATTR_SECURITY_PREFIX) - 1) 24#define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1)
22 25
23#define XATTR_SYSTEM_PREFIX "system." 26#define XATTR_SYSTEM_PREFIX "system."
24#define XATTR_SYSTEM_PREFIX_LEN (sizeof (XATTR_SYSTEM_PREFIX) - 1) 27#define XATTR_SYSTEM_PREFIX_LEN (sizeof(XATTR_SYSTEM_PREFIX) - 1)
25 28
26#define XATTR_TRUSTED_PREFIX "trusted." 29#define XATTR_TRUSTED_PREFIX "trusted."
27#define XATTR_TRUSTED_PREFIX_LEN (sizeof (XATTR_TRUSTED_PREFIX) - 1) 30#define XATTR_TRUSTED_PREFIX_LEN (sizeof(XATTR_TRUSTED_PREFIX) - 1)
28 31
29#define XATTR_USER_PREFIX "user." 32#define XATTR_USER_PREFIX "user."
30#define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1) 33#define XATTR_USER_PREFIX_LEN (sizeof(XATTR_USER_PREFIX) - 1)
31 34
32/* Security namespace */ 35/* Security namespace */
33#define XATTR_EVM_SUFFIX "evm" 36#define XATTR_EVM_SUFFIX "evm"
diff --git a/ipc/util.c b/ipc/util.c
index 74e1d9c7a98a..464a8abd779f 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -252,7 +252,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
252{ 252{
253 kuid_t euid; 253 kuid_t euid;
254 kgid_t egid; 254 kgid_t egid;
255 int id, err; 255 int id;
256 int next_id = ids->next_id; 256 int next_id = ids->next_id;
257 257
258 if (size > IPCMNI) 258 if (size > IPCMNI)
@@ -261,17 +261,21 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
261 if (ids->in_use >= size) 261 if (ids->in_use >= size)
262 return -ENOSPC; 262 return -ENOSPC;
263 263
264 idr_preload(GFP_KERNEL);
265
264 spin_lock_init(&new->lock); 266 spin_lock_init(&new->lock);
265 new->deleted = 0; 267 new->deleted = 0;
266 rcu_read_lock(); 268 rcu_read_lock();
267 spin_lock(&new->lock); 269 spin_lock(&new->lock);
268 270
269 err = idr_get_new_above(&ids->ipcs_idr, new, 271 id = idr_alloc(&ids->ipcs_idr, new,
270 (next_id < 0) ? 0 : ipcid_to_idx(next_id), &id); 272 (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
271 if (err) { 273 GFP_NOWAIT);
274 idr_preload_end();
275 if (id < 0) {
272 spin_unlock(&new->lock); 276 spin_unlock(&new->lock);
273 rcu_read_unlock(); 277 rcu_read_unlock();
274 return err; 278 return id;
275 } 279 }
276 280
277 ids->in_use++; 281 ids->in_use++;
@@ -307,19 +311,10 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
307 struct ipc_ops *ops, struct ipc_params *params) 311 struct ipc_ops *ops, struct ipc_params *params)
308{ 312{
309 int err; 313 int err;
310retry:
311 err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
312
313 if (!err)
314 return -ENOMEM;
315 314
316 down_write(&ids->rw_mutex); 315 down_write(&ids->rw_mutex);
317 err = ops->getnew(ns, params); 316 err = ops->getnew(ns, params);
318 up_write(&ids->rw_mutex); 317 up_write(&ids->rw_mutex);
319
320 if (err == -EAGAIN)
321 goto retry;
322
323 return err; 318 return err;
324} 319}
325 320
@@ -376,8 +371,6 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
376 struct kern_ipc_perm *ipcp; 371 struct kern_ipc_perm *ipcp;
377 int flg = params->flg; 372 int flg = params->flg;
378 int err; 373 int err;
379retry:
380 err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
381 374
382 /* 375 /*
383 * Take the lock as a writer since we are potentially going to add 376 * Take the lock as a writer since we are potentially going to add
@@ -389,8 +382,6 @@ retry:
389 /* key not used */ 382 /* key not used */
390 if (!(flg & IPC_CREAT)) 383 if (!(flg & IPC_CREAT))
391 err = -ENOENT; 384 err = -ENOENT;
392 else if (!err)
393 err = -ENOMEM;
394 else 385 else
395 err = ops->getnew(ns, params); 386 err = ops->getnew(ns, params);
396 } else { 387 } else {
@@ -413,9 +404,6 @@ retry:
413 } 404 }
414 up_write(&ids->rw_mutex); 405 up_write(&ids->rw_mutex);
415 406
416 if (err == -EAGAIN)
417 goto retry;
418
419 return err; 407 return err;
420} 408}
421 409
diff --git a/kernel/Makefile b/kernel/Makefile
index 05949c0510c5..bbde5f1a4486 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -7,7 +7,7 @@ obj-y = fork.o exec_domain.o panic.o printk.o \
7 sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ 7 sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
8 signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
9 rcupdate.o extable.o params.o posix-timers.o \ 9 rcupdate.o extable.o params.o posix-timers.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ 11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
12 notifier.o ksysfs.o cred.o \ 12 notifier.o ksysfs.o cred.o \
13 async.o range.o groups.o lglock.o smpboot.o 13 async.o range.o groups.o lglock.o smpboot.o
@@ -25,9 +25,7 @@ endif
25obj-y += sched/ 25obj-y += sched/
26obj-y += power/ 26obj-y += power/
27 27
28ifeq ($(CONFIG_CHECKPOINT_RESTORE),y) 28obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
29obj-$(CONFIG_X86) += kcmp.o
30endif
31obj-$(CONFIG_FREEZER) += freezer.o 29obj-$(CONFIG_FREEZER) += freezer.o
32obj-$(CONFIG_PROFILING) += profile.o 30obj-$(CONFIG_PROFILING) += profile.o
33obj-$(CONFIG_STACKTRACE) += stacktrace.o 31obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index fb2fb11fbb25..a32f9432666c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -554,7 +554,6 @@ static struct css_set *find_existing_css_set(
554{ 554{
555 int i; 555 int i;
556 struct cgroupfs_root *root = cgrp->root; 556 struct cgroupfs_root *root = cgrp->root;
557 struct hlist_node *node;
558 struct css_set *cg; 557 struct css_set *cg;
559 unsigned long key; 558 unsigned long key;
560 559
@@ -577,7 +576,7 @@ static struct css_set *find_existing_css_set(
577 } 576 }
578 577
579 key = css_set_hash(template); 578 key = css_set_hash(template);
580 hash_for_each_possible(css_set_table, cg, node, hlist, key) { 579 hash_for_each_possible(css_set_table, cg, hlist, key) {
581 if (!compare_css_sets(cg, oldcg, cgrp, template)) 580 if (!compare_css_sets(cg, oldcg, cgrp, template))
582 continue; 581 continue;
583 582
@@ -1611,7 +1610,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1611 struct cgroupfs_root *existing_root; 1610 struct cgroupfs_root *existing_root;
1612 const struct cred *cred; 1611 const struct cred *cred;
1613 int i; 1612 int i;
1614 struct hlist_node *node;
1615 struct css_set *cg; 1613 struct css_set *cg;
1616 1614
1617 BUG_ON(sb->s_root != NULL); 1615 BUG_ON(sb->s_root != NULL);
@@ -1666,7 +1664,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1666 /* Link the top cgroup in this hierarchy into all 1664 /* Link the top cgroup in this hierarchy into all
1667 * the css_set objects */ 1665 * the css_set objects */
1668 write_lock(&css_set_lock); 1666 write_lock(&css_set_lock);
1669 hash_for_each(css_set_table, i, node, cg, hlist) 1667 hash_for_each(css_set_table, i, cg, hlist)
1670 link_css_set(&tmp_cg_links, cg, root_cgrp); 1668 link_css_set(&tmp_cg_links, cg, root_cgrp);
1671 write_unlock(&css_set_lock); 1669 write_unlock(&css_set_lock);
1672 1670
@@ -4493,7 +4491,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4493{ 4491{
4494 struct cgroup_subsys_state *css; 4492 struct cgroup_subsys_state *css;
4495 int i, ret; 4493 int i, ret;
4496 struct hlist_node *node, *tmp; 4494 struct hlist_node *tmp;
4497 struct css_set *cg; 4495 struct css_set *cg;
4498 unsigned long key; 4496 unsigned long key;
4499 4497
@@ -4561,7 +4559,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4561 * this is all done under the css_set_lock. 4559 * this is all done under the css_set_lock.
4562 */ 4560 */
4563 write_lock(&css_set_lock); 4561 write_lock(&css_set_lock);
4564 hash_for_each_safe(css_set_table, i, node, tmp, cg, hlist) { 4562 hash_for_each_safe(css_set_table, i, tmp, cg, hlist) {
4565 /* skip entries that we already rehashed */ 4563 /* skip entries that we already rehashed */
4566 if (cg->subsys[ss->subsys_id]) 4564 if (cg->subsys[ss->subsys_id])
4567 continue; 4565 continue;
@@ -4571,7 +4569,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4571 cg->subsys[ss->subsys_id] = css; 4569 cg->subsys[ss->subsys_id] = css;
4572 /* recompute hash and restore entry */ 4570 /* recompute hash and restore entry */
4573 key = css_set_hash(cg->subsys); 4571 key = css_set_hash(cg->subsys);
4574 hash_add(css_set_table, node, key); 4572 hash_add(css_set_table, &cg->hlist, key);
4575 } 4573 }
4576 write_unlock(&css_set_lock); 4574 write_unlock(&css_set_lock);
4577 4575
@@ -4618,10 +4616,8 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
4618 offline_css(ss, dummytop); 4616 offline_css(ss, dummytop);
4619 ss->active = 0; 4617 ss->active = 0;
4620 4618
4621 if (ss->use_id) { 4619 if (ss->use_id)
4622 idr_remove_all(&ss->idr);
4623 idr_destroy(&ss->idr); 4620 idr_destroy(&ss->idr);
4624 }
4625 4621
4626 /* deassign the subsys_id */ 4622 /* deassign the subsys_id */
4627 subsys[ss->subsys_id] = NULL; 4623 subsys[ss->subsys_id] = NULL;
@@ -5322,7 +5318,7 @@ EXPORT_SYMBOL_GPL(free_css_id);
5322static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth) 5318static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
5323{ 5319{
5324 struct css_id *newid; 5320 struct css_id *newid;
5325 int myid, error, size; 5321 int ret, size;
5326 5322
5327 BUG_ON(!ss->use_id); 5323 BUG_ON(!ss->use_id);
5328 5324
@@ -5330,35 +5326,24 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
5330 newid = kzalloc(size, GFP_KERNEL); 5326 newid = kzalloc(size, GFP_KERNEL);
5331 if (!newid) 5327 if (!newid)
5332 return ERR_PTR(-ENOMEM); 5328 return ERR_PTR(-ENOMEM);
5333 /* get id */ 5329
5334 if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) { 5330 idr_preload(GFP_KERNEL);
5335 error = -ENOMEM;
5336 goto err_out;
5337 }
5338 spin_lock(&ss->id_lock); 5331 spin_lock(&ss->id_lock);
5339 /* Don't use 0. allocates an ID of 1-65535 */ 5332 /* Don't use 0. allocates an ID of 1-65535 */
5340 error = idr_get_new_above(&ss->idr, newid, 1, &myid); 5333 ret = idr_alloc(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_NOWAIT);
5341 spin_unlock(&ss->id_lock); 5334 spin_unlock(&ss->id_lock);
5335 idr_preload_end();
5342 5336
5343 /* Returns error when there are no free spaces for new ID.*/ 5337 /* Returns error when there are no free spaces for new ID.*/
5344 if (error) { 5338 if (ret < 0)
5345 error = -ENOSPC;
5346 goto err_out; 5339 goto err_out;
5347 }
5348 if (myid > CSS_ID_MAX)
5349 goto remove_idr;
5350 5340
5351 newid->id = myid; 5341 newid->id = ret;
5352 newid->depth = depth; 5342 newid->depth = depth;
5353 return newid; 5343 return newid;
5354remove_idr:
5355 error = -ENOSPC;
5356 spin_lock(&ss->id_lock);
5357 idr_remove(&ss->idr, myid);
5358 spin_unlock(&ss->id_lock);
5359err_out: 5344err_out:
5360 kfree(newid); 5345 kfree(newid);
5361 return ERR_PTR(error); 5346 return ERR_PTR(ret);
5362 5347
5363} 5348}
5364 5349
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ccc457e36354..b0cd86501c30 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5126,7 +5126,6 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5126{ 5126{
5127 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5127 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5128 struct perf_event *event; 5128 struct perf_event *event;
5129 struct hlist_node *node;
5130 struct hlist_head *head; 5129 struct hlist_head *head;
5131 5130
5132 rcu_read_lock(); 5131 rcu_read_lock();
@@ -5134,7 +5133,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5134 if (!head) 5133 if (!head)
5135 goto end; 5134 goto end;
5136 5135
5137 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 5136 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5138 if (perf_swevent_match(event, type, event_id, data, regs)) 5137 if (perf_swevent_match(event, type, event_id, data, regs))
5139 perf_swevent_event(event, nr, data, regs); 5138 perf_swevent_event(event, nr, data, regs);
5140 } 5139 }
@@ -5419,7 +5418,6 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5419{ 5418{
5420 struct perf_sample_data data; 5419 struct perf_sample_data data;
5421 struct perf_event *event; 5420 struct perf_event *event;
5422 struct hlist_node *node;
5423 5421
5424 struct perf_raw_record raw = { 5422 struct perf_raw_record raw = {
5425 .size = entry_size, 5423 .size = entry_size,
@@ -5429,7 +5427,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5429 perf_sample_data_init(&data, addr, 0); 5427 perf_sample_data_init(&data, addr, 0);
5430 data.raw = &raw; 5428 data.raw = &raw;
5431 5429
5432 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 5430 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5433 if (perf_tp_event_match(event, &data, regs)) 5431 if (perf_tp_event_match(event, &data, regs))
5434 perf_swevent_event(event, count, &data, regs); 5432 perf_swevent_event(event, count, &data, regs);
5435 } 5433 }
@@ -5965,13 +5963,9 @@ int perf_pmu_register(struct pmu *pmu, char *name, int type)
5965 pmu->name = name; 5963 pmu->name = name;
5966 5964
5967 if (type < 0) { 5965 if (type < 0) {
5968 int err = idr_pre_get(&pmu_idr, GFP_KERNEL); 5966 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
5969 if (!err) 5967 if (type < 0) {
5970 goto free_pdc; 5968 ret = type;
5971
5972 err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5973 if (err) {
5974 ret = err;
5975 goto free_pdc; 5969 goto free_pdc;
5976 } 5970 }
5977 } 5971 }
diff --git a/kernel/exit.c b/kernel/exit.c
index 7dd20408707c..51e485ca9935 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -20,6 +20,7 @@
20#include <linux/tsacct_kern.h> 20#include <linux/tsacct_kern.h>
21#include <linux/file.h> 21#include <linux/file.h>
22#include <linux/fdtable.h> 22#include <linux/fdtable.h>
23#include <linux/freezer.h>
23#include <linux/binfmts.h> 24#include <linux/binfmts.h>
24#include <linux/nsproxy.h> 25#include <linux/nsproxy.h>
25#include <linux/pid_namespace.h> 26#include <linux/pid_namespace.h>
@@ -31,7 +32,6 @@
31#include <linux/mempolicy.h> 32#include <linux/mempolicy.h>
32#include <linux/taskstats_kern.h> 33#include <linux/taskstats_kern.h>
33#include <linux/delayacct.h> 34#include <linux/delayacct.h>
34#include <linux/freezer.h>
35#include <linux/cgroup.h> 35#include <linux/cgroup.h>
36#include <linux/syscalls.h> 36#include <linux/syscalls.h>
37#include <linux/signal.h> 37#include <linux/signal.h>
@@ -485,7 +485,7 @@ static void exit_mm(struct task_struct * tsk)
485 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 485 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
486 if (!self.task) /* see coredump_finish() */ 486 if (!self.task) /* see coredump_finish() */
487 break; 487 break;
488 schedule(); 488 freezable_schedule();
489 } 489 }
490 __set_task_state(tsk, TASK_RUNNING); 490 __set_task_state(tsk, TASK_RUNNING);
491 down_read(&mm->mmap_sem); 491 down_read(&mm->mmap_sem);
@@ -835,7 +835,7 @@ void do_exit(long code)
835 /* 835 /*
836 * Make sure we are holding no locks: 836 * Make sure we are holding no locks:
837 */ 837 */
838 debug_check_no_locks_held(tsk); 838 debug_check_no_locks_held();
839 /* 839 /*
840 * We can do this unlocked here. The futex code uses this flag 840 * We can do this unlocked here. The futex code uses this flag
841 * just to verify whether the pi state cleanup has been done 841 * just to verify whether the pi state cleanup has been done
diff --git a/kernel/fork.c b/kernel/fork.c
index 8f62b2a0f120..8d932b1c9056 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1861,10 +1861,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1861 exit_sem(current); 1861 exit_sem(current);
1862 } 1862 }
1863 1863
1864 if (new_nsproxy) { 1864 if (new_nsproxy)
1865 switch_task_namespaces(current, new_nsproxy); 1865 switch_task_namespaces(current, new_nsproxy);
1866 new_nsproxy = NULL;
1867 }
1868 1866
1869 task_lock(current); 1867 task_lock(current);
1870 1868
@@ -1894,9 +1892,6 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1894 } 1892 }
1895 } 1893 }
1896 1894
1897 if (new_nsproxy)
1898 put_nsproxy(new_nsproxy);
1899
1900bad_unshare_cleanup_cred: 1895bad_unshare_cleanup_cred:
1901 if (new_cred) 1896 if (new_cred)
1902 put_cred(new_cred); 1897 put_cred(new_cred);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 2436ffcec91f..bddd3d7a74b6 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -229,6 +229,8 @@ out:
229 229
230} 230}
231 231
232static void kimage_free_page_list(struct list_head *list);
233
232static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, 234static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
233 unsigned long nr_segments, 235 unsigned long nr_segments,
234 struct kexec_segment __user *segments) 236 struct kexec_segment __user *segments)
@@ -242,8 +244,6 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
242 if (result) 244 if (result)
243 goto out; 245 goto out;
244 246
245 *rimage = image;
246
247 /* 247 /*
248 * Find a location for the control code buffer, and add it 248 * Find a location for the control code buffer, and add it
249 * the vector of segments so that it's pages will also be 249 * the vector of segments so that it's pages will also be
@@ -254,22 +254,22 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
254 get_order(KEXEC_CONTROL_PAGE_SIZE)); 254 get_order(KEXEC_CONTROL_PAGE_SIZE));
255 if (!image->control_code_page) { 255 if (!image->control_code_page) {
256 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 256 printk(KERN_ERR "Could not allocate control_code_buffer\n");
257 goto out; 257 goto out_free;
258 } 258 }
259 259
260 image->swap_page = kimage_alloc_control_pages(image, 0); 260 image->swap_page = kimage_alloc_control_pages(image, 0);
261 if (!image->swap_page) { 261 if (!image->swap_page) {
262 printk(KERN_ERR "Could not allocate swap buffer\n"); 262 printk(KERN_ERR "Could not allocate swap buffer\n");
263 goto out; 263 goto out_free;
264 } 264 }
265 265
266 result = 0; 266 *rimage = image;
267 out: 267 return 0;
268 if (result == 0)
269 *rimage = image;
270 else
271 kfree(image);
272 268
269out_free:
270 kimage_free_page_list(&image->control_pages);
271 kfree(image);
272out:
273 return result; 273 return result;
274} 274}
275 275
@@ -316,7 +316,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
316 mend = mstart + image->segment[i].memsz - 1; 316 mend = mstart + image->segment[i].memsz - 1;
317 /* Ensure we are within the crash kernel limits */ 317 /* Ensure we are within the crash kernel limits */
318 if ((mstart < crashk_res.start) || (mend > crashk_res.end)) 318 if ((mstart < crashk_res.start) || (mend > crashk_res.end))
319 goto out; 319 goto out_free;
320 } 320 }
321 321
322 /* 322 /*
@@ -329,16 +329,15 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
329 get_order(KEXEC_CONTROL_PAGE_SIZE)); 329 get_order(KEXEC_CONTROL_PAGE_SIZE));
330 if (!image->control_code_page) { 330 if (!image->control_code_page) {
331 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 331 printk(KERN_ERR "Could not allocate control_code_buffer\n");
332 goto out; 332 goto out_free;
333 } 333 }
334 334
335 result = 0; 335 *rimage = image;
336out: 336 return 0;
337 if (result == 0)
338 *rimage = image;
339 else
340 kfree(image);
341 337
338out_free:
339 kfree(image);
340out:
342 return result; 341 return result;
343} 342}
344 343
@@ -503,8 +502,6 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
503 502
504 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) 503 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
505 break; 504 break;
506 if (hole_end > crashk_res.end)
507 break;
508 /* See if I overlap any of the segments */ 505 /* See if I overlap any of the segments */
509 for (i = 0; i < image->nr_segments; i++) { 506 for (i = 0; i < image->nr_segments; i++) {
510 unsigned long mstart, mend; 507 unsigned long mstart, mend;
@@ -1514,6 +1511,8 @@ static int __init crash_save_vmcoreinfo_init(void)
1514 VMCOREINFO_OFFSET(page, _count); 1511 VMCOREINFO_OFFSET(page, _count);
1515 VMCOREINFO_OFFSET(page, mapping); 1512 VMCOREINFO_OFFSET(page, mapping);
1516 VMCOREINFO_OFFSET(page, lru); 1513 VMCOREINFO_OFFSET(page, lru);
1514 VMCOREINFO_OFFSET(page, _mapcount);
1515 VMCOREINFO_OFFSET(page, private);
1517 VMCOREINFO_OFFSET(pglist_data, node_zones); 1516 VMCOREINFO_OFFSET(pglist_data, node_zones);
1518 VMCOREINFO_OFFSET(pglist_data, nr_zones); 1517 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1519#ifdef CONFIG_FLAT_NODE_MEM_MAP 1518#ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -1536,6 +1535,11 @@ static int __init crash_save_vmcoreinfo_init(void)
1536 VMCOREINFO_NUMBER(PG_lru); 1535 VMCOREINFO_NUMBER(PG_lru);
1537 VMCOREINFO_NUMBER(PG_private); 1536 VMCOREINFO_NUMBER(PG_private);
1538 VMCOREINFO_NUMBER(PG_swapcache); 1537 VMCOREINFO_NUMBER(PG_swapcache);
1538 VMCOREINFO_NUMBER(PG_slab);
1539#ifdef CONFIG_MEMORY_FAILURE
1540 VMCOREINFO_NUMBER(PG_hwpoison);
1541#endif
1542 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1539 1543
1540 arch_crash_save_vmcoreinfo(); 1544 arch_crash_save_vmcoreinfo();
1541 update_vmcoreinfo_note(); 1545 update_vmcoreinfo_note();
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 550294d58a02..e35be53f6613 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -334,11 +334,10 @@ static inline void reset_kprobe_instance(void)
334struct kprobe __kprobes *get_kprobe(void *addr) 334struct kprobe __kprobes *get_kprobe(void *addr)
335{ 335{
336 struct hlist_head *head; 336 struct hlist_head *head;
337 struct hlist_node *node;
338 struct kprobe *p; 337 struct kprobe *p;
339 338
340 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 339 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
341 hlist_for_each_entry_rcu(p, node, head, hlist) { 340 hlist_for_each_entry_rcu(p, head, hlist) {
342 if (p->addr == addr) 341 if (p->addr == addr)
343 return p; 342 return p;
344 } 343 }
@@ -799,7 +798,6 @@ out:
799static void __kprobes optimize_all_kprobes(void) 798static void __kprobes optimize_all_kprobes(void)
800{ 799{
801 struct hlist_head *head; 800 struct hlist_head *head;
802 struct hlist_node *node;
803 struct kprobe *p; 801 struct kprobe *p;
804 unsigned int i; 802 unsigned int i;
805 803
@@ -810,7 +808,7 @@ static void __kprobes optimize_all_kprobes(void)
810 kprobes_allow_optimization = true; 808 kprobes_allow_optimization = true;
811 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 809 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
812 head = &kprobe_table[i]; 810 head = &kprobe_table[i];
813 hlist_for_each_entry_rcu(p, node, head, hlist) 811 hlist_for_each_entry_rcu(p, head, hlist)
814 if (!kprobe_disabled(p)) 812 if (!kprobe_disabled(p))
815 optimize_kprobe(p); 813 optimize_kprobe(p);
816 } 814 }
@@ -821,7 +819,6 @@ static void __kprobes optimize_all_kprobes(void)
821static void __kprobes unoptimize_all_kprobes(void) 819static void __kprobes unoptimize_all_kprobes(void)
822{ 820{
823 struct hlist_head *head; 821 struct hlist_head *head;
824 struct hlist_node *node;
825 struct kprobe *p; 822 struct kprobe *p;
826 unsigned int i; 823 unsigned int i;
827 824
@@ -832,7 +829,7 @@ static void __kprobes unoptimize_all_kprobes(void)
832 kprobes_allow_optimization = false; 829 kprobes_allow_optimization = false;
833 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 830 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
834 head = &kprobe_table[i]; 831 head = &kprobe_table[i];
835 hlist_for_each_entry_rcu(p, node, head, hlist) { 832 hlist_for_each_entry_rcu(p, head, hlist) {
836 if (!kprobe_disabled(p)) 833 if (!kprobe_disabled(p))
837 unoptimize_kprobe(p, false); 834 unoptimize_kprobe(p, false);
838 } 835 }
@@ -1148,7 +1145,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1148{ 1145{
1149 struct kretprobe_instance *ri; 1146 struct kretprobe_instance *ri;
1150 struct hlist_head *head, empty_rp; 1147 struct hlist_head *head, empty_rp;
1151 struct hlist_node *node, *tmp; 1148 struct hlist_node *tmp;
1152 unsigned long hash, flags = 0; 1149 unsigned long hash, flags = 0;
1153 1150
1154 if (unlikely(!kprobes_initialized)) 1151 if (unlikely(!kprobes_initialized))
@@ -1159,12 +1156,12 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1159 hash = hash_ptr(tk, KPROBE_HASH_BITS); 1156 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1160 head = &kretprobe_inst_table[hash]; 1157 head = &kretprobe_inst_table[hash];
1161 kretprobe_table_lock(hash, &flags); 1158 kretprobe_table_lock(hash, &flags);
1162 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 1159 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1163 if (ri->task == tk) 1160 if (ri->task == tk)
1164 recycle_rp_inst(ri, &empty_rp); 1161 recycle_rp_inst(ri, &empty_rp);
1165 } 1162 }
1166 kretprobe_table_unlock(hash, &flags); 1163 kretprobe_table_unlock(hash, &flags);
1167 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 1164 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1168 hlist_del(&ri->hlist); 1165 hlist_del(&ri->hlist);
1169 kfree(ri); 1166 kfree(ri);
1170 } 1167 }
@@ -1173,9 +1170,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1173static inline void free_rp_inst(struct kretprobe *rp) 1170static inline void free_rp_inst(struct kretprobe *rp)
1174{ 1171{
1175 struct kretprobe_instance *ri; 1172 struct kretprobe_instance *ri;
1176 struct hlist_node *pos, *next; 1173 struct hlist_node *next;
1177 1174
1178 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { 1175 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1179 hlist_del(&ri->hlist); 1176 hlist_del(&ri->hlist);
1180 kfree(ri); 1177 kfree(ri);
1181 } 1178 }
@@ -1185,14 +1182,14 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1185{ 1182{
1186 unsigned long flags, hash; 1183 unsigned long flags, hash;
1187 struct kretprobe_instance *ri; 1184 struct kretprobe_instance *ri;
1188 struct hlist_node *pos, *next; 1185 struct hlist_node *next;
1189 struct hlist_head *head; 1186 struct hlist_head *head;
1190 1187
1191 /* No race here */ 1188 /* No race here */
1192 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 1189 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1193 kretprobe_table_lock(hash, &flags); 1190 kretprobe_table_lock(hash, &flags);
1194 head = &kretprobe_inst_table[hash]; 1191 head = &kretprobe_inst_table[hash];
1195 hlist_for_each_entry_safe(ri, pos, next, head, hlist) { 1192 hlist_for_each_entry_safe(ri, next, head, hlist) {
1196 if (ri->rp == rp) 1193 if (ri->rp == rp)
1197 ri->rp = NULL; 1194 ri->rp = NULL;
1198 } 1195 }
@@ -2028,7 +2025,6 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2028{ 2025{
2029 struct module *mod = data; 2026 struct module *mod = data;
2030 struct hlist_head *head; 2027 struct hlist_head *head;
2031 struct hlist_node *node;
2032 struct kprobe *p; 2028 struct kprobe *p;
2033 unsigned int i; 2029 unsigned int i;
2034 int checkcore = (val == MODULE_STATE_GOING); 2030 int checkcore = (val == MODULE_STATE_GOING);
@@ -2045,7 +2041,7 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2045 mutex_lock(&kprobe_mutex); 2041 mutex_lock(&kprobe_mutex);
2046 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2042 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2047 head = &kprobe_table[i]; 2043 head = &kprobe_table[i];
2048 hlist_for_each_entry_rcu(p, node, head, hlist) 2044 hlist_for_each_entry_rcu(p, head, hlist)
2049 if (within_module_init((unsigned long)p->addr, mod) || 2045 if (within_module_init((unsigned long)p->addr, mod) ||
2050 (checkcore && 2046 (checkcore &&
2051 within_module_core((unsigned long)p->addr, mod))) { 2047 within_module_core((unsigned long)p->addr, mod))) {
@@ -2192,7 +2188,6 @@ static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
2192static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) 2188static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2193{ 2189{
2194 struct hlist_head *head; 2190 struct hlist_head *head;
2195 struct hlist_node *node;
2196 struct kprobe *p, *kp; 2191 struct kprobe *p, *kp;
2197 const char *sym = NULL; 2192 const char *sym = NULL;
2198 unsigned int i = *(loff_t *) v; 2193 unsigned int i = *(loff_t *) v;
@@ -2201,7 +2196,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2201 2196
2202 head = &kprobe_table[i]; 2197 head = &kprobe_table[i];
2203 preempt_disable(); 2198 preempt_disable();
2204 hlist_for_each_entry_rcu(p, node, head, hlist) { 2199 hlist_for_each_entry_rcu(p, head, hlist) {
2205 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2200 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2206 &offset, &modname, namebuf); 2201 &offset, &modname, namebuf);
2207 if (kprobe_aggrprobe(p)) { 2202 if (kprobe_aggrprobe(p)) {
@@ -2236,7 +2231,6 @@ static const struct file_operations debugfs_kprobes_operations = {
2236static void __kprobes arm_all_kprobes(void) 2231static void __kprobes arm_all_kprobes(void)
2237{ 2232{
2238 struct hlist_head *head; 2233 struct hlist_head *head;
2239 struct hlist_node *node;
2240 struct kprobe *p; 2234 struct kprobe *p;
2241 unsigned int i; 2235 unsigned int i;
2242 2236
@@ -2249,7 +2243,7 @@ static void __kprobes arm_all_kprobes(void)
2249 /* Arming kprobes doesn't optimize kprobe itself */ 2243 /* Arming kprobes doesn't optimize kprobe itself */
2250 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2244 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2251 head = &kprobe_table[i]; 2245 head = &kprobe_table[i];
2252 hlist_for_each_entry_rcu(p, node, head, hlist) 2246 hlist_for_each_entry_rcu(p, head, hlist)
2253 if (!kprobe_disabled(p)) 2247 if (!kprobe_disabled(p))
2254 arm_kprobe(p); 2248 arm_kprobe(p);
2255 } 2249 }
@@ -2265,7 +2259,6 @@ already_enabled:
2265static void __kprobes disarm_all_kprobes(void) 2259static void __kprobes disarm_all_kprobes(void)
2266{ 2260{
2267 struct hlist_head *head; 2261 struct hlist_head *head;
2268 struct hlist_node *node;
2269 struct kprobe *p; 2262 struct kprobe *p;
2270 unsigned int i; 2263 unsigned int i;
2271 2264
@@ -2282,7 +2275,7 @@ static void __kprobes disarm_all_kprobes(void)
2282 2275
2283 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2276 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2284 head = &kprobe_table[i]; 2277 head = &kprobe_table[i];
2285 hlist_for_each_entry_rcu(p, node, head, hlist) { 2278 hlist_for_each_entry_rcu(p, head, hlist) {
2286 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 2279 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2287 disarm_kprobe(p, false); 2280 disarm_kprobe(p, false);
2288 } 2281 }
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8a0efac4f99d..259db207b5d9 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -4088,7 +4088,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4088} 4088}
4089EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); 4089EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4090 4090
4091static void print_held_locks_bug(struct task_struct *curr) 4091static void print_held_locks_bug(void)
4092{ 4092{
4093 if (!debug_locks_off()) 4093 if (!debug_locks_off())
4094 return; 4094 return;
@@ -4097,22 +4097,21 @@ static void print_held_locks_bug(struct task_struct *curr)
4097 4097
4098 printk("\n"); 4098 printk("\n");
4099 printk("=====================================\n"); 4099 printk("=====================================\n");
4100 printk("[ BUG: lock held at task exit time! ]\n"); 4100 printk("[ BUG: %s/%d still has locks held! ]\n",
4101 current->comm, task_pid_nr(current));
4101 print_kernel_ident(); 4102 print_kernel_ident();
4102 printk("-------------------------------------\n"); 4103 printk("-------------------------------------\n");
4103 printk("%s/%d is exiting with locks still held!\n", 4104 lockdep_print_held_locks(current);
4104 curr->comm, task_pid_nr(curr));
4105 lockdep_print_held_locks(curr);
4106
4107 printk("\nstack backtrace:\n"); 4105 printk("\nstack backtrace:\n");
4108 dump_stack(); 4106 dump_stack();
4109} 4107}
4110 4108
4111void debug_check_no_locks_held(struct task_struct *task) 4109void debug_check_no_locks_held(void)
4112{ 4110{
4113 if (unlikely(task->lockdep_depth > 0)) 4111 if (unlikely(current->lockdep_depth > 0))
4114 print_held_locks_bug(task); 4112 print_held_locks_bug();
4115} 4113}
4114EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4116 4115
4117void debug_show_all_locks(void) 4116void debug_show_all_locks(void)
4118{ 4117{
diff --git a/kernel/pid.c b/kernel/pid.c
index f2c6a6825098..047dc6264638 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -350,10 +350,9 @@ void disable_pid_allocation(struct pid_namespace *ns)
350 350
351struct pid *find_pid_ns(int nr, struct pid_namespace *ns) 351struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
352{ 352{
353 struct hlist_node *elem;
354 struct upid *pnr; 353 struct upid *pnr;
355 354
356 hlist_for_each_entry_rcu(pnr, elem, 355 hlist_for_each_entry_rcu(pnr,
357 &pid_hash[pid_hashfn(nr, ns)], pid_chain) 356 &pid_hash[pid_hashfn(nr, ns)], pid_chain)
358 if (pnr->nr == nr && pnr->ns == ns) 357 if (pnr->nr == nr && pnr->ns == ns)
359 return container_of(pnr, struct pid, 358 return container_of(pnr, struct pid,
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 7edfe4b901e7..6edbb2c55c22 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -552,24 +552,22 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
552 return -EAGAIN; 552 return -EAGAIN;
553 553
554 spin_lock_init(&new_timer->it_lock); 554 spin_lock_init(&new_timer->it_lock);
555 retry: 555
556 if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) { 556 idr_preload(GFP_KERNEL);
557 error = -EAGAIN;
558 goto out;
559 }
560 spin_lock_irq(&idr_lock); 557 spin_lock_irq(&idr_lock);
561 error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id); 558 error = idr_alloc(&posix_timers_id, new_timer, 0, 0, GFP_NOWAIT);
562 spin_unlock_irq(&idr_lock); 559 spin_unlock_irq(&idr_lock);
563 if (error) { 560 idr_preload_end();
564 if (error == -EAGAIN) 561 if (error < 0) {
565 goto retry;
566 /* 562 /*
567 * Weird looking, but we return EAGAIN if the IDR is 563 * Weird looking, but we return EAGAIN if the IDR is
568 * full (proper POSIX return value for this) 564 * full (proper POSIX return value for this)
569 */ 565 */
570 error = -EAGAIN; 566 if (error == -ENOSPC)
567 error = -EAGAIN;
571 goto out; 568 goto out;
572 } 569 }
570 new_timer_id = error;
573 571
574 it_id_set = IT_ID_SET; 572 it_id_set = IT_ID_SET;
575 new_timer->it_id = (timer_t) new_timer_id; 573 new_timer->it_id = (timer_t) new_timer_id;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2b5243176aba..12af4270c9c1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1752,9 +1752,8 @@ EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1752static void fire_sched_in_preempt_notifiers(struct task_struct *curr) 1752static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1753{ 1753{
1754 struct preempt_notifier *notifier; 1754 struct preempt_notifier *notifier;
1755 struct hlist_node *node;
1756 1755
1757 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) 1756 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
1758 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 1757 notifier->ops->sched_in(notifier, raw_smp_processor_id());
1759} 1758}
1760 1759
@@ -1763,9 +1762,8 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
1763 struct task_struct *next) 1762 struct task_struct *next)
1764{ 1763{
1765 struct preempt_notifier *notifier; 1764 struct preempt_notifier *notifier;
1766 struct hlist_node *node;
1767 1765
1768 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) 1766 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
1769 notifier->ops->sched_out(notifier, next); 1767 notifier->ops->sched_out(notifier, next);
1770} 1768}
1771 1769
diff --git a/kernel/signal.c b/kernel/signal.c
index 2a7ae2963185..2676aac4103d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1157,11 +1157,11 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1157static void print_fatal_signal(int signr) 1157static void print_fatal_signal(int signr)
1158{ 1158{
1159 struct pt_regs *regs = signal_pt_regs(); 1159 struct pt_regs *regs = signal_pt_regs();
1160 printk("%s/%d: potentially unexpected fatal signal %d.\n", 1160 printk(KERN_INFO "%s/%d: potentially unexpected fatal signal %d.\n",
1161 current->comm, task_pid_nr(current), signr); 1161 current->comm, task_pid_nr(current), signr);
1162 1162
1163#if defined(__i386__) && !defined(__arch_um__) 1163#if defined(__i386__) && !defined(__arch_um__)
1164 printk("code at %08lx: ", regs->ip); 1164 printk(KERN_INFO "code at %08lx: ", regs->ip);
1165 { 1165 {
1166 int i; 1166 int i;
1167 for (i = 0; i < 16; i++) { 1167 for (i = 0; i < 16; i++) {
@@ -1169,11 +1169,11 @@ static void print_fatal_signal(int signr)
1169 1169
1170 if (get_user(insn, (unsigned char *)(regs->ip + i))) 1170 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1171 break; 1171 break;
1172 printk("%02x ", insn); 1172 printk(KERN_CONT "%02x ", insn);
1173 } 1173 }
1174 } 1174 }
1175 printk(KERN_CONT "\n");
1175#endif 1176#endif
1176 printk("\n");
1177 preempt_disable(); 1177 preempt_disable();
1178 show_regs(regs); 1178 show_regs(regs);
1179 preempt_enable(); 1179 preempt_enable();
@@ -2996,7 +2996,8 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2996 /* Not even root can pretend to send signals from the kernel. 2996 /* Not even root can pretend to send signals from the kernel.
2997 * Nor can they impersonate a kill()/tgkill(), which adds source info. 2997 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2998 */ 2998 */
2999 if (info->si_code >= 0 || info->si_code == SI_TKILL) { 2999 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3000 (task_pid_vnr(current) != pid)) {
3000 /* We used to allow any < 0 si_code */ 3001 /* We used to allow any < 0 si_code */
3001 WARN_ON_ONCE(info->si_code < 0); 3002 WARN_ON_ONCE(info->si_code < 0);
3002 return -EPERM; 3003 return -EPERM;
@@ -3045,7 +3046,8 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3045 /* Not even root can pretend to send signals from the kernel. 3046 /* Not even root can pretend to send signals from the kernel.
3046 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3047 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3047 */ 3048 */
3048 if (info->si_code >= 0 || info->si_code == SI_TKILL) { 3049 if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
3050 (task_pid_vnr(current) != pid)) {
3049 /* We used to allow any < 0 si_code */ 3051 /* We used to allow any < 0 si_code */
3050 WARN_ON_ONCE(info->si_code < 0); 3052 WARN_ON_ONCE(info->si_code < 0);
3051 return -EPERM; 3053 return -EPERM;
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index d4abac261779..b9bde5727829 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -131,7 +131,7 @@ static int smpboot_thread_fn(void *data)
131 continue; 131 continue;
132 } 132 }
133 133
134 BUG_ON(td->cpu != smp_processor_id()); 134 //BUG_ON(td->cpu != smp_processor_id());
135 135
136 /* Check for state change setup */ 136 /* Check for state change setup */
137 switch (td->status) { 137 switch (td->status) {
diff --git a/kernel/sys.c b/kernel/sys.c
index e10566bee399..81f56445fba9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2185,11 +2185,6 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2185 2185
2186char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; 2186char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
2187 2187
2188static void argv_cleanup(struct subprocess_info *info)
2189{
2190 argv_free(info->argv);
2191}
2192
2193static int __orderly_poweroff(void) 2188static int __orderly_poweroff(void)
2194{ 2189{
2195 int argc; 2190 int argc;
@@ -2209,9 +2204,8 @@ static int __orderly_poweroff(void)
2209 } 2204 }
2210 2205
2211 ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC, 2206 ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC,
2212 NULL, argv_cleanup, NULL); 2207 NULL, NULL, NULL);
2213 if (ret == -ENOMEM) 2208 argv_free(argv);
2214 argv_free(argv);
2215 2209
2216 return ret; 2210 return ret;
2217} 2211}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index d8df00e69c14..d1b4ee67d2df 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2095,7 +2095,7 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
2095static void validate_coredump_safety(void) 2095static void validate_coredump_safety(void)
2096{ 2096{
2097#ifdef CONFIG_COREDUMP 2097#ifdef CONFIG_COREDUMP
2098 if (suid_dumpable == SUID_DUMPABLE_SAFE && 2098 if (suid_dumpable == SUID_DUMP_ROOT &&
2099 core_pattern[0] != '/' && core_pattern[0] != '|') { 2099 core_pattern[0] != '/' && core_pattern[0] != '|') {
2100 printk(KERN_WARNING "Unsafe core_pattern used with "\ 2100 printk(KERN_WARNING "Unsafe core_pattern used with "\
2101 "suid_dumpable=2. Pipe handler or fully qualified "\ 2101 "suid_dumpable=2. Pipe handler or fully qualified "\
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index b25115e8c7f3..ebf72358e86a 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1171,9 +1171,10 @@ static ssize_t bin_dn_node_address(struct file *file,
1171 1171
1172 /* Convert the decnet address to binary */ 1172 /* Convert the decnet address to binary */
1173 result = -EIO; 1173 result = -EIO;
1174 nodep = strchr(buf, '.') + 1; 1174 nodep = strchr(buf, '.');
1175 if (!nodep) 1175 if (!nodep)
1176 goto out; 1176 goto out;
1177 ++nodep;
1177 1178
1178 area = simple_strtoul(buf, NULL, 10); 1179 area = simple_strtoul(buf, NULL, 10);
1179 node = simple_strtoul(nodep, NULL, 10); 1180 node = simple_strtoul(nodep, NULL, 10);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 98ca94a41819..ab25b88aae56 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -762,7 +762,6 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
762{ 762{
763 struct ftrace_profile *rec; 763 struct ftrace_profile *rec;
764 struct hlist_head *hhd; 764 struct hlist_head *hhd;
765 struct hlist_node *n;
766 unsigned long key; 765 unsigned long key;
767 766
768 key = hash_long(ip, ftrace_profile_bits); 767 key = hash_long(ip, ftrace_profile_bits);
@@ -771,7 +770,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
771 if (hlist_empty(hhd)) 770 if (hlist_empty(hhd))
772 return NULL; 771 return NULL;
773 772
774 hlist_for_each_entry_rcu(rec, n, hhd, node) { 773 hlist_for_each_entry_rcu(rec, hhd, node) {
775 if (rec->ip == ip) 774 if (rec->ip == ip)
776 return rec; 775 return rec;
777 } 776 }
@@ -1133,7 +1132,6 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1133 unsigned long key; 1132 unsigned long key;
1134 struct ftrace_func_entry *entry; 1133 struct ftrace_func_entry *entry;
1135 struct hlist_head *hhd; 1134 struct hlist_head *hhd;
1136 struct hlist_node *n;
1137 1135
1138 if (ftrace_hash_empty(hash)) 1136 if (ftrace_hash_empty(hash))
1139 return NULL; 1137 return NULL;
@@ -1145,7 +1143,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1145 1143
1146 hhd = &hash->buckets[key]; 1144 hhd = &hash->buckets[key];
1147 1145
1148 hlist_for_each_entry_rcu(entry, n, hhd, hlist) { 1146 hlist_for_each_entry_rcu(entry, hhd, hlist) {
1149 if (entry->ip == ip) 1147 if (entry->ip == ip)
1150 return entry; 1148 return entry;
1151 } 1149 }
@@ -1202,7 +1200,7 @@ remove_hash_entry(struct ftrace_hash *hash,
1202static void ftrace_hash_clear(struct ftrace_hash *hash) 1200static void ftrace_hash_clear(struct ftrace_hash *hash)
1203{ 1201{
1204 struct hlist_head *hhd; 1202 struct hlist_head *hhd;
1205 struct hlist_node *tp, *tn; 1203 struct hlist_node *tn;
1206 struct ftrace_func_entry *entry; 1204 struct ftrace_func_entry *entry;
1207 int size = 1 << hash->size_bits; 1205 int size = 1 << hash->size_bits;
1208 int i; 1206 int i;
@@ -1212,7 +1210,7 @@ static void ftrace_hash_clear(struct ftrace_hash *hash)
1212 1210
1213 for (i = 0; i < size; i++) { 1211 for (i = 0; i < size; i++) {
1214 hhd = &hash->buckets[i]; 1212 hhd = &hash->buckets[i];
1215 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) 1213 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1216 free_hash_entry(hash, entry); 1214 free_hash_entry(hash, entry);
1217 } 1215 }
1218 FTRACE_WARN_ON(hash->count); 1216 FTRACE_WARN_ON(hash->count);
@@ -1275,7 +1273,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1275{ 1273{
1276 struct ftrace_func_entry *entry; 1274 struct ftrace_func_entry *entry;
1277 struct ftrace_hash *new_hash; 1275 struct ftrace_hash *new_hash;
1278 struct hlist_node *tp;
1279 int size; 1276 int size;
1280 int ret; 1277 int ret;
1281 int i; 1278 int i;
@@ -1290,7 +1287,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1290 1287
1291 size = 1 << hash->size_bits; 1288 size = 1 << hash->size_bits;
1292 for (i = 0; i < size; i++) { 1289 for (i = 0; i < size; i++) {
1293 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) { 1290 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1294 ret = add_hash_entry(new_hash, entry->ip); 1291 ret = add_hash_entry(new_hash, entry->ip);
1295 if (ret < 0) 1292 if (ret < 0)
1296 goto free_hash; 1293 goto free_hash;
@@ -1316,7 +1313,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1316 struct ftrace_hash **dst, struct ftrace_hash *src) 1313 struct ftrace_hash **dst, struct ftrace_hash *src)
1317{ 1314{
1318 struct ftrace_func_entry *entry; 1315 struct ftrace_func_entry *entry;
1319 struct hlist_node *tp, *tn; 1316 struct hlist_node *tn;
1320 struct hlist_head *hhd; 1317 struct hlist_head *hhd;
1321 struct ftrace_hash *old_hash; 1318 struct ftrace_hash *old_hash;
1322 struct ftrace_hash *new_hash; 1319 struct ftrace_hash *new_hash;
@@ -1362,7 +1359,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1362 size = 1 << src->size_bits; 1359 size = 1 << src->size_bits;
1363 for (i = 0; i < size; i++) { 1360 for (i = 0; i < size; i++) {
1364 hhd = &src->buckets[i]; 1361 hhd = &src->buckets[i];
1365 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) { 1362 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1366 if (bits > 0) 1363 if (bits > 0)
1367 key = hash_long(entry->ip, bits); 1364 key = hash_long(entry->ip, bits);
1368 else 1365 else
@@ -2901,7 +2898,6 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2901{ 2898{
2902 struct ftrace_func_probe *entry; 2899 struct ftrace_func_probe *entry;
2903 struct hlist_head *hhd; 2900 struct hlist_head *hhd;
2904 struct hlist_node *n;
2905 unsigned long key; 2901 unsigned long key;
2906 2902
2907 key = hash_long(ip, FTRACE_HASH_BITS); 2903 key = hash_long(ip, FTRACE_HASH_BITS);
@@ -2917,7 +2913,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2917 * on the hash. rcu_read_lock is too dangerous here. 2913 * on the hash. rcu_read_lock is too dangerous here.
2918 */ 2914 */
2919 preempt_disable_notrace(); 2915 preempt_disable_notrace();
2920 hlist_for_each_entry_rcu(entry, n, hhd, node) { 2916 hlist_for_each_entry_rcu(entry, hhd, node) {
2921 if (entry->ip == ip) 2917 if (entry->ip == ip)
2922 entry->ops->func(ip, parent_ip, &entry->data); 2918 entry->ops->func(ip, parent_ip, &entry->data);
2923 } 2919 }
@@ -3068,7 +3064,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3068 void *data, int flags) 3064 void *data, int flags)
3069{ 3065{
3070 struct ftrace_func_probe *entry; 3066 struct ftrace_func_probe *entry;
3071 struct hlist_node *n, *tmp; 3067 struct hlist_node *tmp;
3072 char str[KSYM_SYMBOL_LEN]; 3068 char str[KSYM_SYMBOL_LEN];
3073 int type = MATCH_FULL; 3069 int type = MATCH_FULL;
3074 int i, len = 0; 3070 int i, len = 0;
@@ -3091,7 +3087,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3091 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3087 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3092 struct hlist_head *hhd = &ftrace_func_hash[i]; 3088 struct hlist_head *hhd = &ftrace_func_hash[i];
3093 3089
3094 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { 3090 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3095 3091
3096 /* break up if statements for readability */ 3092 /* break up if statements for readability */
3097 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) 3093 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 194d79602dc7..697e88d13907 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -739,12 +739,11 @@ static int task_state_char(unsigned long state)
739struct trace_event *ftrace_find_event(int type) 739struct trace_event *ftrace_find_event(int type)
740{ 740{
741 struct trace_event *event; 741 struct trace_event *event;
742 struct hlist_node *n;
743 unsigned key; 742 unsigned key;
744 743
745 key = type & (EVENT_HASHSIZE - 1); 744 key = type & (EVENT_HASHSIZE - 1);
746 745
747 hlist_for_each_entry(event, n, &event_hash[key], node) { 746 hlist_for_each_entry(event, &event_hash[key], node) {
748 if (event->type == type) 747 if (event->type == type)
749 return event; 748 return event;
750 } 749 }
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d96ba22dabfa..0c05a4592047 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -192,12 +192,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
192static struct tracepoint_entry *get_tracepoint(const char *name) 192static struct tracepoint_entry *get_tracepoint(const char *name)
193{ 193{
194 struct hlist_head *head; 194 struct hlist_head *head;
195 struct hlist_node *node;
196 struct tracepoint_entry *e; 195 struct tracepoint_entry *e;
197 u32 hash = jhash(name, strlen(name), 0); 196 u32 hash = jhash(name, strlen(name), 0);
198 197
199 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; 198 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
200 hlist_for_each_entry(e, node, head, hlist) { 199 hlist_for_each_entry(e, head, hlist) {
201 if (!strcmp(name, e->name)) 200 if (!strcmp(name, e->name))
202 return e; 201 return e;
203 } 202 }
@@ -211,13 +210,12 @@ static struct tracepoint_entry *get_tracepoint(const char *name)
211static struct tracepoint_entry *add_tracepoint(const char *name) 210static struct tracepoint_entry *add_tracepoint(const char *name)
212{ 211{
213 struct hlist_head *head; 212 struct hlist_head *head;
214 struct hlist_node *node;
215 struct tracepoint_entry *e; 213 struct tracepoint_entry *e;
216 size_t name_len = strlen(name) + 1; 214 size_t name_len = strlen(name) + 1;
217 u32 hash = jhash(name, name_len-1, 0); 215 u32 hash = jhash(name, name_len-1, 0);
218 216
219 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; 217 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
220 hlist_for_each_entry(e, node, head, hlist) { 218 hlist_for_each_entry(e, head, hlist) {
221 if (!strcmp(name, e->name)) { 219 if (!strcmp(name, e->name)) {
222 printk(KERN_NOTICE 220 printk(KERN_NOTICE
223 "tracepoint %s busy\n", name); 221 "tracepoint %s busy\n", name);
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
index 1744bb80f1fb..394f70b17162 100644
--- a/kernel/user-return-notifier.c
+++ b/kernel/user-return-notifier.c
@@ -34,11 +34,11 @@ EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
34void fire_user_return_notifiers(void) 34void fire_user_return_notifiers(void)
35{ 35{
36 struct user_return_notifier *urn; 36 struct user_return_notifier *urn;
37 struct hlist_node *tmp1, *tmp2; 37 struct hlist_node *tmp2;
38 struct hlist_head *head; 38 struct hlist_head *head;
39 39
40 head = &get_cpu_var(return_notifier_list); 40 head = &get_cpu_var(return_notifier_list);
41 hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link) 41 hlist_for_each_entry_safe(urn, tmp2, head, link)
42 urn->on_user_return(urn); 42 urn->on_user_return(urn);
43 put_cpu_var(return_notifier_list); 43 put_cpu_var(return_notifier_list);
44} 44}
diff --git a/kernel/user.c b/kernel/user.c
index 57ebfd42023c..e81978e8c03b 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -105,9 +105,8 @@ static void uid_hash_remove(struct user_struct *up)
105static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) 105static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
106{ 106{
107 struct user_struct *user; 107 struct user_struct *user;
108 struct hlist_node *h;
109 108
110 hlist_for_each_entry(user, h, hashent, uidhash_node) { 109 hlist_for_each_entry(user, hashent, uidhash_node) {
111 if (uid_eq(user->uid, uid)) { 110 if (uid_eq(user->uid, uid)) {
112 atomic_inc(&user->__count); 111 atomic_inc(&user->__count);
113 return user; 112 return user;
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 08b197e8c485..a47fc5de3113 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -30,7 +30,7 @@ static struct uts_namespace *create_uts_ns(void)
30/* 30/*
31 * Clone a new ns copying an original utsname, setting refcount to 1 31 * Clone a new ns copying an original utsname, setting refcount to 1
32 * @old_ns: namespace to clone 32 * @old_ns: namespace to clone
33 * Return NULL on error (failure to kmalloc), new ns otherwise 33 * Return ERR_PTR(-ENOMEM) on error (failure to kmalloc), new ns otherwise
34 */ 34 */
35static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns, 35static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns,
36 struct uts_namespace *old_ns) 36 struct uts_namespace *old_ns)
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 63da38c2d820..4f69f9a5e221 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -15,6 +15,8 @@
15#include <linux/sysctl.h> 15#include <linux/sysctl.h>
16#include <linux/wait.h> 16#include <linux/wait.h>
17 17
18#ifdef CONFIG_PROC_SYSCTL
19
18static void *get_uts(ctl_table *table, int write) 20static void *get_uts(ctl_table *table, int write)
19{ 21{
20 char *which = table->data; 22 char *which = table->data;
@@ -38,7 +40,6 @@ static void put_uts(ctl_table *table, int write, void *which)
38 up_write(&uts_sem); 40 up_write(&uts_sem);
39} 41}
40 42
41#ifdef CONFIG_PROC_SYSCTL
42/* 43/*
43 * Special case of dostring for the UTS structure. This has locks 44 * Special case of dostring for the UTS structure. This has locks
44 * to observe. Should this be in kernel/sys.c ???? 45 * to observe. Should this be in kernel/sys.c ????
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f4feacad3812..81f2457811eb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -251,8 +251,8 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
251 for ((pool) = &std_worker_pools(cpu)[0]; \ 251 for ((pool) = &std_worker_pools(cpu)[0]; \
252 (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++) 252 (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
253 253
254#define for_each_busy_worker(worker, i, pos, pool) \ 254#define for_each_busy_worker(worker, i, pool) \
255 hash_for_each(pool->busy_hash, i, pos, worker, hentry) 255 hash_for_each(pool->busy_hash, i, worker, hentry)
256 256
257static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, 257static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
258 unsigned int sw) 258 unsigned int sw)
@@ -909,9 +909,8 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
909 struct work_struct *work) 909 struct work_struct *work)
910{ 910{
911 struct worker *worker; 911 struct worker *worker;
912 struct hlist_node *tmp;
913 912
914 hash_for_each_possible(pool->busy_hash, worker, tmp, hentry, 913 hash_for_each_possible(pool->busy_hash, worker, hentry,
915 (unsigned long)work) 914 (unsigned long)work)
916 if (worker->current_work == work && 915 if (worker->current_work == work &&
917 worker->current_func == work->func) 916 worker->current_func == work->func)
@@ -1626,7 +1625,6 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1626static void rebind_workers(struct worker_pool *pool) 1625static void rebind_workers(struct worker_pool *pool)
1627{ 1626{
1628 struct worker *worker, *n; 1627 struct worker *worker, *n;
1629 struct hlist_node *pos;
1630 int i; 1628 int i;
1631 1629
1632 lockdep_assert_held(&pool->assoc_mutex); 1630 lockdep_assert_held(&pool->assoc_mutex);
@@ -1648,7 +1646,7 @@ static void rebind_workers(struct worker_pool *pool)
1648 } 1646 }
1649 1647
1650 /* rebind busy workers */ 1648 /* rebind busy workers */
1651 for_each_busy_worker(worker, i, pos, pool) { 1649 for_each_busy_worker(worker, i, pool) {
1652 struct work_struct *rebind_work = &worker->rebind_work; 1650 struct work_struct *rebind_work = &worker->rebind_work;
1653 struct workqueue_struct *wq; 1651 struct workqueue_struct *wq;
1654 1652
@@ -3423,7 +3421,6 @@ static void wq_unbind_fn(struct work_struct *work)
3423 int cpu = smp_processor_id(); 3421 int cpu = smp_processor_id();
3424 struct worker_pool *pool; 3422 struct worker_pool *pool;
3425 struct worker *worker; 3423 struct worker *worker;
3426 struct hlist_node *pos;
3427 int i; 3424 int i;
3428 3425
3429 for_each_std_worker_pool(pool, cpu) { 3426 for_each_std_worker_pool(pool, cpu) {
@@ -3442,7 +3439,7 @@ static void wq_unbind_fn(struct work_struct *work)
3442 list_for_each_entry(worker, &pool->idle_list, entry) 3439 list_for_each_entry(worker, &pool->idle_list, entry)
3443 worker->flags |= WORKER_UNBOUND; 3440 worker->flags |= WORKER_UNBOUND;
3444 3441
3445 for_each_busy_worker(worker, i, pos, pool) 3442 for_each_busy_worker(worker, i, pool)
3446 worker->flags |= WORKER_UNBOUND; 3443 worker->flags |= WORKER_UNBOUND;
3447 3444
3448 pool->flags |= POOL_DISASSOCIATED; 3445 pool->flags |= POOL_DISASSOCIATED;
diff --git a/lib/Makefile b/lib/Makefile
index 02ed6c04cd7d..d7946ff75b2e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -23,7 +23,7 @@ lib-y += kobject.o klist.o
23obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 23obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
24 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 24 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
25 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ 25 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
26 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o 26 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
27obj-y += kstrtox.o 27obj-y += kstrtox.o
28obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 28obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
29 29
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index d11808ca4bc4..37061ede8b81 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -109,11 +109,10 @@ static void fill_pool(void)
109 */ 109 */
110static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 110static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
111{ 111{
112 struct hlist_node *node;
113 struct debug_obj *obj; 112 struct debug_obj *obj;
114 int cnt = 0; 113 int cnt = 0;
115 114
116 hlist_for_each_entry(obj, node, &b->list, node) { 115 hlist_for_each_entry(obj, &b->list, node) {
117 cnt++; 116 cnt++;
118 if (obj->object == addr) 117 if (obj->object == addr)
119 return obj; 118 return obj;
@@ -213,7 +212,7 @@ static void free_object(struct debug_obj *obj)
213static void debug_objects_oom(void) 212static void debug_objects_oom(void)
214{ 213{
215 struct debug_bucket *db = obj_hash; 214 struct debug_bucket *db = obj_hash;
216 struct hlist_node *node, *tmp; 215 struct hlist_node *tmp;
217 HLIST_HEAD(freelist); 216 HLIST_HEAD(freelist);
218 struct debug_obj *obj; 217 struct debug_obj *obj;
219 unsigned long flags; 218 unsigned long flags;
@@ -227,7 +226,7 @@ static void debug_objects_oom(void)
227 raw_spin_unlock_irqrestore(&db->lock, flags); 226 raw_spin_unlock_irqrestore(&db->lock, flags);
228 227
229 /* Now free them */ 228 /* Now free them */
230 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 229 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
231 hlist_del(&obj->node); 230 hlist_del(&obj->node);
232 free_object(obj); 231 free_object(obj);
233 } 232 }
@@ -658,7 +657,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
658static void __debug_check_no_obj_freed(const void *address, unsigned long size) 657static void __debug_check_no_obj_freed(const void *address, unsigned long size)
659{ 658{
660 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 659 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
661 struct hlist_node *node, *tmp; 660 struct hlist_node *tmp;
662 HLIST_HEAD(freelist); 661 HLIST_HEAD(freelist);
663 struct debug_obj_descr *descr; 662 struct debug_obj_descr *descr;
664 enum debug_obj_state state; 663 enum debug_obj_state state;
@@ -678,7 +677,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
678repeat: 677repeat:
679 cnt = 0; 678 cnt = 0;
680 raw_spin_lock_irqsave(&db->lock, flags); 679 raw_spin_lock_irqsave(&db->lock, flags);
681 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 680 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
682 cnt++; 681 cnt++;
683 oaddr = (unsigned long) obj->object; 682 oaddr = (unsigned long) obj->object;
684 if (oaddr < saddr || oaddr >= eaddr) 683 if (oaddr < saddr || oaddr >= eaddr)
@@ -702,7 +701,7 @@ repeat:
702 raw_spin_unlock_irqrestore(&db->lock, flags); 701 raw_spin_unlock_irqrestore(&db->lock, flags);
703 702
704 /* Now free them */ 703 /* Now free them */
705 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 704 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
706 hlist_del(&obj->node); 705 hlist_del(&obj->node);
707 free_object(obj); 706 free_object(obj);
708 } 707 }
@@ -1013,7 +1012,7 @@ void __init debug_objects_early_init(void)
1013static int __init debug_objects_replace_static_objects(void) 1012static int __init debug_objects_replace_static_objects(void)
1014{ 1013{
1015 struct debug_bucket *db = obj_hash; 1014 struct debug_bucket *db = obj_hash;
1016 struct hlist_node *node, *tmp; 1015 struct hlist_node *tmp;
1017 struct debug_obj *obj, *new; 1016 struct debug_obj *obj, *new;
1018 HLIST_HEAD(objects); 1017 HLIST_HEAD(objects);
1019 int i, cnt = 0; 1018 int i, cnt = 0;
@@ -1033,7 +1032,7 @@ static int __init debug_objects_replace_static_objects(void)
1033 local_irq_disable(); 1032 local_irq_disable();
1034 1033
1035 /* Remove the statically allocated objects from the pool */ 1034 /* Remove the statically allocated objects from the pool */
1036 hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) 1035 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1037 hlist_del(&obj->node); 1036 hlist_del(&obj->node);
1038 /* Move the allocated objects to the pool */ 1037 /* Move the allocated objects to the pool */
1039 hlist_move_list(&objects, &obj_pool); 1038 hlist_move_list(&objects, &obj_pool);
@@ -1042,7 +1041,7 @@ static int __init debug_objects_replace_static_objects(void)
1042 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 1041 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1043 hlist_move_list(&db->list, &objects); 1042 hlist_move_list(&db->list, &objects);
1044 1043
1045 hlist_for_each_entry(obj, node, &objects, node) { 1044 hlist_for_each_entry(obj, &objects, node) {
1046 new = hlist_entry(obj_pool.first, typeof(*obj), node); 1045 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1047 hlist_del(&new->node); 1046 hlist_del(&new->node);
1048 /* copy object data */ 1047 /* copy object data */
@@ -1057,7 +1056,7 @@ static int __init debug_objects_replace_static_objects(void)
1057 obj_pool_used); 1056 obj_pool_used);
1058 return 0; 1057 return 0;
1059free: 1058free:
1060 hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { 1059 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1061 hlist_del(&obj->node); 1060 hlist_del(&obj->node);
1062 kmem_cache_free(obj_cache, obj); 1061 kmem_cache_free(obj_cache, obj);
1063 } 1062 }
diff --git a/lib/devres.c b/lib/devres.c
index 88ad75952a76..823533138fa0 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -227,6 +227,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
227 devm_ioport_map_match, (void *)addr)); 227 devm_ioport_map_match, (void *)addr));
228} 228}
229EXPORT_SYMBOL(devm_ioport_unmap); 229EXPORT_SYMBOL(devm_ioport_unmap);
230#endif /* CONFIG_HAS_IOPORT */
230 231
231#ifdef CONFIG_PCI 232#ifdef CONFIG_PCI
232/* 233/*
@@ -432,4 +433,3 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
432} 433}
433EXPORT_SYMBOL(pcim_iounmap_regions); 434EXPORT_SYMBOL(pcim_iounmap_regions);
434#endif /* CONFIG_PCI */ 435#endif /* CONFIG_PCI */
435#endif /* CONFIG_HAS_IOPORT */
diff --git a/lib/idr.c b/lib/idr.c
index 648239079dd2..73f4d53c02f3 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -35,10 +35,41 @@
35#include <linux/string.h> 35#include <linux/string.h>
36#include <linux/idr.h> 36#include <linux/idr.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/percpu.h>
39#include <linux/hardirq.h>
40
41#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
42#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
43
44/* Leave the possibility of an incomplete final layer */
45#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
46
47/* Number of id_layer structs to leave in free list */
48#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
38 49
39static struct kmem_cache *idr_layer_cache; 50static struct kmem_cache *idr_layer_cache;
51static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
52static DEFINE_PER_CPU(int, idr_preload_cnt);
40static DEFINE_SPINLOCK(simple_ida_lock); 53static DEFINE_SPINLOCK(simple_ida_lock);
41 54
55/* the maximum ID which can be allocated given idr->layers */
56static int idr_max(int layers)
57{
58 int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
59
60 return (1 << bits) - 1;
61}
62
63/*
64 * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is
65 * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and
66 * so on.
67 */
68static int idr_layer_prefix_mask(int layer)
69{
70 return ~idr_max(layer + 1);
71}
72
42static struct idr_layer *get_from_free_list(struct idr *idp) 73static struct idr_layer *get_from_free_list(struct idr *idp)
43{ 74{
44 struct idr_layer *p; 75 struct idr_layer *p;
@@ -54,6 +85,50 @@ static struct idr_layer *get_from_free_list(struct idr *idp)
54 return(p); 85 return(p);
55} 86}
56 87
88/**
89 * idr_layer_alloc - allocate a new idr_layer
90 * @gfp_mask: allocation mask
91 * @layer_idr: optional idr to allocate from
92 *
93 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
94 * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
95 * an idr_layer from @idr->id_free.
96 *
97 * @layer_idr is to maintain backward compatibility with the old alloc
98 * interface - idr_pre_get() and idr_get_new*() - and will be removed
99 * together with per-pool preload buffer.
100 */
101static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
102{
103 struct idr_layer *new;
104
105 /* this is the old path, bypass to get_from_free_list() */
106 if (layer_idr)
107 return get_from_free_list(layer_idr);
108
109 /* try to allocate directly from kmem_cache */
110 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
111 if (new)
112 return new;
113
114 /*
115 * Try to fetch one from the per-cpu preload buffer if in process
116 * context. See idr_preload() for details.
117 */
118 if (in_interrupt())
119 return NULL;
120
121 preempt_disable();
122 new = __this_cpu_read(idr_preload_head);
123 if (new) {
124 __this_cpu_write(idr_preload_head, new->ary[0]);
125 __this_cpu_dec(idr_preload_cnt);
126 new->ary[0] = NULL;
127 }
128 preempt_enable();
129 return new;
130}
131
57static void idr_layer_rcu_free(struct rcu_head *head) 132static void idr_layer_rcu_free(struct rcu_head *head)
58{ 133{
59 struct idr_layer *layer; 134 struct idr_layer *layer;
@@ -62,8 +137,10 @@ static void idr_layer_rcu_free(struct rcu_head *head)
62 kmem_cache_free(idr_layer_cache, layer); 137 kmem_cache_free(idr_layer_cache, layer);
63} 138}
64 139
65static inline void free_layer(struct idr_layer *p) 140static inline void free_layer(struct idr *idr, struct idr_layer *p)
66{ 141{
142 if (idr->hint && idr->hint == p)
143 RCU_INIT_POINTER(idr->hint, NULL);
67 call_rcu(&p->rcu_head, idr_layer_rcu_free); 144 call_rcu(&p->rcu_head, idr_layer_rcu_free);
68} 145}
69 146
@@ -92,18 +169,18 @@ static void idr_mark_full(struct idr_layer **pa, int id)
92 struct idr_layer *p = pa[0]; 169 struct idr_layer *p = pa[0];
93 int l = 0; 170 int l = 0;
94 171
95 __set_bit(id & IDR_MASK, &p->bitmap); 172 __set_bit(id & IDR_MASK, p->bitmap);
96 /* 173 /*
97 * If this layer is full mark the bit in the layer above to 174 * If this layer is full mark the bit in the layer above to
98 * show that this part of the radix tree is full. This may 175 * show that this part of the radix tree is full. This may
99 * complete the layer above and require walking up the radix 176 * complete the layer above and require walking up the radix
100 * tree. 177 * tree.
101 */ 178 */
102 while (p->bitmap == IDR_FULL) { 179 while (bitmap_full(p->bitmap, IDR_SIZE)) {
103 if (!(p = pa[++l])) 180 if (!(p = pa[++l]))
104 break; 181 break;
105 id = id >> IDR_BITS; 182 id = id >> IDR_BITS;
106 __set_bit((id & IDR_MASK), &p->bitmap); 183 __set_bit((id & IDR_MASK), p->bitmap);
107 } 184 }
108} 185}
109 186
@@ -133,12 +210,29 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
133} 210}
134EXPORT_SYMBOL(idr_pre_get); 211EXPORT_SYMBOL(idr_pre_get);
135 212
136static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) 213/**
214 * sub_alloc - try to allocate an id without growing the tree depth
215 * @idp: idr handle
216 * @starting_id: id to start search at
217 * @id: pointer to the allocated handle
218 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
219 * @gfp_mask: allocation mask for idr_layer_alloc()
220 * @layer_idr: optional idr passed to idr_layer_alloc()
221 *
222 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
223 * growing its depth. Returns
224 *
225 * the allocated id >= 0 if successful,
226 * -EAGAIN if the tree needs to grow for allocation to succeed,
227 * -ENOSPC if the id space is exhausted,
228 * -ENOMEM if more idr_layers need to be allocated.
229 */
230static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
231 gfp_t gfp_mask, struct idr *layer_idr)
137{ 232{
138 int n, m, sh; 233 int n, m, sh;
139 struct idr_layer *p, *new; 234 struct idr_layer *p, *new;
140 int l, id, oid; 235 int l, id, oid;
141 unsigned long bm;
142 236
143 id = *starting_id; 237 id = *starting_id;
144 restart: 238 restart:
@@ -150,8 +244,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
150 * We run around this while until we reach the leaf node... 244 * We run around this while until we reach the leaf node...
151 */ 245 */
152 n = (id >> (IDR_BITS*l)) & IDR_MASK; 246 n = (id >> (IDR_BITS*l)) & IDR_MASK;
153 bm = ~p->bitmap; 247 m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
154 m = find_next_bit(&bm, IDR_SIZE, n);
155 if (m == IDR_SIZE) { 248 if (m == IDR_SIZE) {
156 /* no space available go back to previous layer. */ 249 /* no space available go back to previous layer. */
157 l++; 250 l++;
@@ -161,7 +254,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
161 /* if already at the top layer, we need to grow */ 254 /* if already at the top layer, we need to grow */
162 if (id >= 1 << (idp->layers * IDR_BITS)) { 255 if (id >= 1 << (idp->layers * IDR_BITS)) {
163 *starting_id = id; 256 *starting_id = id;
164 return IDR_NEED_TO_GROW; 257 return -EAGAIN;
165 } 258 }
166 p = pa[l]; 259 p = pa[l];
167 BUG_ON(!p); 260 BUG_ON(!p);
@@ -180,17 +273,18 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
180 id = ((id >> sh) ^ n ^ m) << sh; 273 id = ((id >> sh) ^ n ^ m) << sh;
181 } 274 }
182 if ((id >= MAX_IDR_BIT) || (id < 0)) 275 if ((id >= MAX_IDR_BIT) || (id < 0))
183 return IDR_NOMORE_SPACE; 276 return -ENOSPC;
184 if (l == 0) 277 if (l == 0)
185 break; 278 break;
186 /* 279 /*
187 * Create the layer below if it is missing. 280 * Create the layer below if it is missing.
188 */ 281 */
189 if (!p->ary[m]) { 282 if (!p->ary[m]) {
190 new = get_from_free_list(idp); 283 new = idr_layer_alloc(gfp_mask, layer_idr);
191 if (!new) 284 if (!new)
192 return -1; 285 return -ENOMEM;
193 new->layer = l-1; 286 new->layer = l-1;
287 new->prefix = id & idr_layer_prefix_mask(new->layer);
194 rcu_assign_pointer(p->ary[m], new); 288 rcu_assign_pointer(p->ary[m], new);
195 p->count++; 289 p->count++;
196 } 290 }
@@ -203,7 +297,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
203} 297}
204 298
205static int idr_get_empty_slot(struct idr *idp, int starting_id, 299static int idr_get_empty_slot(struct idr *idp, int starting_id,
206 struct idr_layer **pa) 300 struct idr_layer **pa, gfp_t gfp_mask,
301 struct idr *layer_idr)
207{ 302{
208 struct idr_layer *p, *new; 303 struct idr_layer *p, *new;
209 int layers, v, id; 304 int layers, v, id;
@@ -214,8 +309,8 @@ build_up:
214 p = idp->top; 309 p = idp->top;
215 layers = idp->layers; 310 layers = idp->layers;
216 if (unlikely(!p)) { 311 if (unlikely(!p)) {
217 if (!(p = get_from_free_list(idp))) 312 if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
218 return -1; 313 return -ENOMEM;
219 p->layer = 0; 314 p->layer = 0;
220 layers = 1; 315 layers = 1;
221 } 316 }
@@ -223,7 +318,7 @@ build_up:
223 * Add a new layer to the top of the tree if the requested 318 * Add a new layer to the top of the tree if the requested
224 * id is larger than the currently allocated space. 319 * id is larger than the currently allocated space.
225 */ 320 */
226 while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { 321 while (id > idr_max(layers)) {
227 layers++; 322 layers++;
228 if (!p->count) { 323 if (!p->count) {
229 /* special case: if the tree is currently empty, 324 /* special case: if the tree is currently empty,
@@ -231,9 +326,10 @@ build_up:
231 * upwards. 326 * upwards.
232 */ 327 */
233 p->layer++; 328 p->layer++;
329 WARN_ON_ONCE(p->prefix);
234 continue; 330 continue;
235 } 331 }
236 if (!(new = get_from_free_list(idp))) { 332 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
237 /* 333 /*
238 * The allocation failed. If we built part of 334 * The allocation failed. If we built part of
239 * the structure tear it down. 335 * the structure tear it down.
@@ -242,45 +338,42 @@ build_up:
242 for (new = p; p && p != idp->top; new = p) { 338 for (new = p; p && p != idp->top; new = p) {
243 p = p->ary[0]; 339 p = p->ary[0];
244 new->ary[0] = NULL; 340 new->ary[0] = NULL;
245 new->bitmap = new->count = 0; 341 new->count = 0;
342 bitmap_clear(new->bitmap, 0, IDR_SIZE);
246 __move_to_free_list(idp, new); 343 __move_to_free_list(idp, new);
247 } 344 }
248 spin_unlock_irqrestore(&idp->lock, flags); 345 spin_unlock_irqrestore(&idp->lock, flags);
249 return -1; 346 return -ENOMEM;
250 } 347 }
251 new->ary[0] = p; 348 new->ary[0] = p;
252 new->count = 1; 349 new->count = 1;
253 new->layer = layers-1; 350 new->layer = layers-1;
254 if (p->bitmap == IDR_FULL) 351 new->prefix = id & idr_layer_prefix_mask(new->layer);
255 __set_bit(0, &new->bitmap); 352 if (bitmap_full(p->bitmap, IDR_SIZE))
353 __set_bit(0, new->bitmap);
256 p = new; 354 p = new;
257 } 355 }
258 rcu_assign_pointer(idp->top, p); 356 rcu_assign_pointer(idp->top, p);
259 idp->layers = layers; 357 idp->layers = layers;
260 v = sub_alloc(idp, &id, pa); 358 v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
261 if (v == IDR_NEED_TO_GROW) 359 if (v == -EAGAIN)
262 goto build_up; 360 goto build_up;
263 return(v); 361 return(v);
264} 362}
265 363
266static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) 364/*
365 * @id and @pa are from a successful allocation from idr_get_empty_slot().
366 * Install the user pointer @ptr and mark the slot full.
367 */
368static void idr_fill_slot(struct idr *idr, void *ptr, int id,
369 struct idr_layer **pa)
267{ 370{
268 struct idr_layer *pa[MAX_IDR_LEVEL]; 371 /* update hint used for lookup, cleared from free_layer() */
269 int id; 372 rcu_assign_pointer(idr->hint, pa[0]);
270
271 id = idr_get_empty_slot(idp, starting_id, pa);
272 if (id >= 0) {
273 /*
274 * Successfully found an empty slot. Install the user
275 * pointer and mark the slot full.
276 */
277 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
278 (struct idr_layer *)ptr);
279 pa[0]->count++;
280 idr_mark_full(pa, id);
281 }
282 373
283 return id; 374 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
375 pa[0]->count++;
376 idr_mark_full(pa, id);
284} 377}
285 378
286/** 379/**
@@ -303,49 +396,124 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
303 */ 396 */
304int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) 397int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
305{ 398{
399 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
306 int rv; 400 int rv;
307 401
308 rv = idr_get_new_above_int(idp, ptr, starting_id); 402 rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
309 /*
310 * This is a cheap hack until the IDR code can be fixed to
311 * return proper error values.
312 */
313 if (rv < 0) 403 if (rv < 0)
314 return _idr_rc_to_errno(rv); 404 return rv == -ENOMEM ? -EAGAIN : rv;
405
406 idr_fill_slot(idp, ptr, rv, pa);
315 *id = rv; 407 *id = rv;
316 return 0; 408 return 0;
317} 409}
318EXPORT_SYMBOL(idr_get_new_above); 410EXPORT_SYMBOL(idr_get_new_above);
319 411
320/** 412/**
321 * idr_get_new - allocate new idr entry 413 * idr_preload - preload for idr_alloc()
322 * @idp: idr handle 414 * @gfp_mask: allocation mask to use for preloading
323 * @ptr: pointer you want associated with the id
324 * @id: pointer to the allocated handle
325 * 415 *
326 * If allocation from IDR's private freelist fails, idr_get_new_above() will 416 * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
327 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill 417 * process context and each idr_preload() invocation should be matched with
328 * IDR's preallocation and then retry the idr_get_new_above() call. 418 * idr_preload_end(). Note that preemption is disabled while preloaded.
329 * 419 *
330 * If the idr is full idr_get_new_above() will return %-ENOSPC. 420 * The first idr_alloc() in the preloaded section can be treated as if it
421 * were invoked with @gfp_mask used for preloading. This allows using more
422 * permissive allocation masks for idrs protected by spinlocks.
423 *
424 * For example, if idr_alloc() below fails, the failure can be treated as
425 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
426 *
427 * idr_preload(GFP_KERNEL);
428 * spin_lock(lock);
429 *
430 * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
331 * 431 *
332 * @id returns a value in the range %0 ... %0x7fffffff 432 * spin_unlock(lock);
433 * idr_preload_end();
434 * if (id < 0)
435 * error;
333 */ 436 */
334int idr_get_new(struct idr *idp, void *ptr, int *id) 437void idr_preload(gfp_t gfp_mask)
335{ 438{
336 int rv; 439 /*
440 * Consuming preload buffer from non-process context breaks preload
441 * allocation guarantee. Disallow usage from those contexts.
442 */
443 WARN_ON_ONCE(in_interrupt());
444 might_sleep_if(gfp_mask & __GFP_WAIT);
445
446 preempt_disable();
337 447
338 rv = idr_get_new_above_int(idp, ptr, 0);
339 /* 448 /*
340 * This is a cheap hack until the IDR code can be fixed to 449 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
341 * return proper error values. 450 * return value from idr_alloc() needs to be checked for failure
451 * anyway. Silently give up if allocation fails. The caller can
452 * treat failures from idr_alloc() as if idr_alloc() were called
453 * with @gfp_mask which should be enough.
342 */ 454 */
343 if (rv < 0) 455 while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
344 return _idr_rc_to_errno(rv); 456 struct idr_layer *new;
345 *id = rv; 457
346 return 0; 458 preempt_enable();
459 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
460 preempt_disable();
461 if (!new)
462 break;
463
464 /* link the new one to per-cpu preload list */
465 new->ary[0] = __this_cpu_read(idr_preload_head);
466 __this_cpu_write(idr_preload_head, new);
467 __this_cpu_inc(idr_preload_cnt);
468 }
347} 469}
348EXPORT_SYMBOL(idr_get_new); 470EXPORT_SYMBOL(idr_preload);
471
472/**
473 * idr_alloc - allocate new idr entry
474 * @idr: the (initialized) idr
475 * @ptr: pointer to be associated with the new id
476 * @start: the minimum id (inclusive)
477 * @end: the maximum id (exclusive, <= 0 for max)
478 * @gfp_mask: memory allocation flags
479 *
480 * Allocate an id in [start, end) and associate it with @ptr. If no ID is
481 * available in the specified range, returns -ENOSPC. On memory allocation
482 * failure, returns -ENOMEM.
483 *
484 * Note that @end is treated as max when <= 0. This is to always allow
485 * using @start + N as @end as long as N is inside integer range.
486 *
487 * The user is responsible for exclusively synchronizing all operations
488 * which may modify @idr. However, read-only accesses such as idr_find()
489 * or iteration can be performed under RCU read lock provided the user
490 * destroys @ptr in RCU-safe way after removal from idr.
491 */
492int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
493{
494 int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */
495 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
496 int id;
497
498 might_sleep_if(gfp_mask & __GFP_WAIT);
499
500 /* sanity checks */
501 if (WARN_ON_ONCE(start < 0))
502 return -EINVAL;
503 if (unlikely(max < start))
504 return -ENOSPC;
505
506 /* allocate id */
507 id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
508 if (unlikely(id < 0))
509 return id;
510 if (unlikely(id > max))
511 return -ENOSPC;
512
513 idr_fill_slot(idr, ptr, id, pa);
514 return id;
515}
516EXPORT_SYMBOL_GPL(idr_alloc);
349 517
350static void idr_remove_warning(int id) 518static void idr_remove_warning(int id)
351{ 519{
@@ -357,7 +525,7 @@ static void idr_remove_warning(int id)
357static void sub_remove(struct idr *idp, int shift, int id) 525static void sub_remove(struct idr *idp, int shift, int id)
358{ 526{
359 struct idr_layer *p = idp->top; 527 struct idr_layer *p = idp->top;
360 struct idr_layer **pa[MAX_IDR_LEVEL]; 528 struct idr_layer **pa[MAX_IDR_LEVEL + 1];
361 struct idr_layer ***paa = &pa[0]; 529 struct idr_layer ***paa = &pa[0];
362 struct idr_layer *to_free; 530 struct idr_layer *to_free;
363 int n; 531 int n;
@@ -367,26 +535,26 @@ static void sub_remove(struct idr *idp, int shift, int id)
367 535
368 while ((shift > 0) && p) { 536 while ((shift > 0) && p) {
369 n = (id >> shift) & IDR_MASK; 537 n = (id >> shift) & IDR_MASK;
370 __clear_bit(n, &p->bitmap); 538 __clear_bit(n, p->bitmap);
371 *++paa = &p->ary[n]; 539 *++paa = &p->ary[n];
372 p = p->ary[n]; 540 p = p->ary[n];
373 shift -= IDR_BITS; 541 shift -= IDR_BITS;
374 } 542 }
375 n = id & IDR_MASK; 543 n = id & IDR_MASK;
376 if (likely(p != NULL && test_bit(n, &p->bitmap))){ 544 if (likely(p != NULL && test_bit(n, p->bitmap))) {
377 __clear_bit(n, &p->bitmap); 545 __clear_bit(n, p->bitmap);
378 rcu_assign_pointer(p->ary[n], NULL); 546 rcu_assign_pointer(p->ary[n], NULL);
379 to_free = NULL; 547 to_free = NULL;
380 while(*paa && ! --((**paa)->count)){ 548 while(*paa && ! --((**paa)->count)){
381 if (to_free) 549 if (to_free)
382 free_layer(to_free); 550 free_layer(idp, to_free);
383 to_free = **paa; 551 to_free = **paa;
384 **paa-- = NULL; 552 **paa-- = NULL;
385 } 553 }
386 if (!*paa) 554 if (!*paa)
387 idp->layers = 0; 555 idp->layers = 0;
388 if (to_free) 556 if (to_free)
389 free_layer(to_free); 557 free_layer(idp, to_free);
390 } else 558 } else
391 idr_remove_warning(id); 559 idr_remove_warning(id);
392} 560}
@@ -401,8 +569,9 @@ void idr_remove(struct idr *idp, int id)
401 struct idr_layer *p; 569 struct idr_layer *p;
402 struct idr_layer *to_free; 570 struct idr_layer *to_free;
403 571
404 /* Mask off upper bits we don't use for the search. */ 572 /* see comment in idr_find_slowpath() */
405 id &= MAX_IDR_MASK; 573 if (WARN_ON_ONCE(id < 0))
574 return;
406 575
407 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 576 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
408 if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 577 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
@@ -417,8 +586,9 @@ void idr_remove(struct idr *idp, int id)
417 p = idp->top->ary[0]; 586 p = idp->top->ary[0];
418 rcu_assign_pointer(idp->top, p); 587 rcu_assign_pointer(idp->top, p);
419 --idp->layers; 588 --idp->layers;
420 to_free->bitmap = to_free->count = 0; 589 to_free->count = 0;
421 free_layer(to_free); 590 bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
591 free_layer(idp, to_free);
422 } 592 }
423 while (idp->id_free_cnt >= MAX_IDR_FREE) { 593 while (idp->id_free_cnt >= MAX_IDR_FREE) {
424 p = get_from_free_list(idp); 594 p = get_from_free_list(idp);
@@ -433,34 +603,21 @@ void idr_remove(struct idr *idp, int id)
433} 603}
434EXPORT_SYMBOL(idr_remove); 604EXPORT_SYMBOL(idr_remove);
435 605
436/** 606void __idr_remove_all(struct idr *idp)
437 * idr_remove_all - remove all ids from the given idr tree
438 * @idp: idr handle
439 *
440 * idr_destroy() only frees up unused, cached idp_layers, but this
441 * function will remove all id mappings and leave all idp_layers
442 * unused.
443 *
444 * A typical clean-up sequence for objects stored in an idr tree will
445 * use idr_for_each() to free all objects, if necessay, then
446 * idr_remove_all() to remove all ids, and idr_destroy() to free
447 * up the cached idr_layers.
448 */
449void idr_remove_all(struct idr *idp)
450{ 607{
451 int n, id, max; 608 int n, id, max;
452 int bt_mask; 609 int bt_mask;
453 struct idr_layer *p; 610 struct idr_layer *p;
454 struct idr_layer *pa[MAX_IDR_LEVEL]; 611 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
455 struct idr_layer **paa = &pa[0]; 612 struct idr_layer **paa = &pa[0];
456 613
457 n = idp->layers * IDR_BITS; 614 n = idp->layers * IDR_BITS;
458 p = idp->top; 615 p = idp->top;
459 rcu_assign_pointer(idp->top, NULL); 616 rcu_assign_pointer(idp->top, NULL);
460 max = 1 << n; 617 max = idr_max(idp->layers);
461 618
462 id = 0; 619 id = 0;
463 while (id < max) { 620 while (id >= 0 && id <= max) {
464 while (n > IDR_BITS && p) { 621 while (n > IDR_BITS && p) {
465 n -= IDR_BITS; 622 n -= IDR_BITS;
466 *paa++ = p; 623 *paa++ = p;
@@ -472,21 +629,32 @@ void idr_remove_all(struct idr *idp)
472 /* Get the highest bit that the above add changed from 0->1. */ 629 /* Get the highest bit that the above add changed from 0->1. */
473 while (n < fls(id ^ bt_mask)) { 630 while (n < fls(id ^ bt_mask)) {
474 if (p) 631 if (p)
475 free_layer(p); 632 free_layer(idp, p);
476 n += IDR_BITS; 633 n += IDR_BITS;
477 p = *--paa; 634 p = *--paa;
478 } 635 }
479 } 636 }
480 idp->layers = 0; 637 idp->layers = 0;
481} 638}
482EXPORT_SYMBOL(idr_remove_all); 639EXPORT_SYMBOL(__idr_remove_all);
483 640
484/** 641/**
485 * idr_destroy - release all cached layers within an idr tree 642 * idr_destroy - release all cached layers within an idr tree
486 * @idp: idr handle 643 * @idp: idr handle
644 *
645 * Free all id mappings and all idp_layers. After this function, @idp is
646 * completely unused and can be freed / recycled. The caller is
647 * responsible for ensuring that no one else accesses @idp during or after
648 * idr_destroy().
649 *
650 * A typical clean-up sequence for objects stored in an idr tree will use
651 * idr_for_each() to free all objects, if necessay, then idr_destroy() to
652 * free up the id mappings and cached idr_layers.
487 */ 653 */
488void idr_destroy(struct idr *idp) 654void idr_destroy(struct idr *idp)
489{ 655{
656 __idr_remove_all(idp);
657
490 while (idp->id_free_cnt) { 658 while (idp->id_free_cnt) {
491 struct idr_layer *p = get_from_free_list(idp); 659 struct idr_layer *p = get_from_free_list(idp);
492 kmem_cache_free(idr_layer_cache, p); 660 kmem_cache_free(idr_layer_cache, p);
@@ -494,32 +662,28 @@ void idr_destroy(struct idr *idp)
494} 662}
495EXPORT_SYMBOL(idr_destroy); 663EXPORT_SYMBOL(idr_destroy);
496 664
497/** 665void *idr_find_slowpath(struct idr *idp, int id)
498 * idr_find - return pointer for given id
499 * @idp: idr handle
500 * @id: lookup key
501 *
502 * Return the pointer given the id it has been registered with. A %NULL
503 * return indicates that @id is not valid or you passed %NULL in
504 * idr_get_new().
505 *
506 * This function can be called under rcu_read_lock(), given that the leaf
507 * pointers lifetimes are correctly managed.
508 */
509void *idr_find(struct idr *idp, int id)
510{ 666{
511 int n; 667 int n;
512 struct idr_layer *p; 668 struct idr_layer *p;
513 669
670 /*
671 * If @id is negative, idr_find() used to ignore the sign bit and
672 * performed lookup with the rest of bits, which is weird and can
673 * lead to very obscure bugs. We're now returning NULL for all
674 * negative IDs but just in case somebody was depending on the sign
675 * bit being ignored, let's trigger WARN_ON_ONCE() so that they can
676 * be detected and fixed. WARN_ON_ONCE() can later be removed.
677 */
678 if (WARN_ON_ONCE(id < 0))
679 return NULL;
680
514 p = rcu_dereference_raw(idp->top); 681 p = rcu_dereference_raw(idp->top);
515 if (!p) 682 if (!p)
516 return NULL; 683 return NULL;
517 n = (p->layer+1) * IDR_BITS; 684 n = (p->layer+1) * IDR_BITS;
518 685
519 /* Mask off upper bits we don't use for the search. */ 686 if (id > idr_max(p->layer + 1))
520 id &= MAX_IDR_MASK;
521
522 if (id >= (1 << n))
523 return NULL; 687 return NULL;
524 BUG_ON(n == 0); 688 BUG_ON(n == 0);
525 689
@@ -530,7 +694,7 @@ void *idr_find(struct idr *idp, int id)
530 } 694 }
531 return((void *)p); 695 return((void *)p);
532} 696}
533EXPORT_SYMBOL(idr_find); 697EXPORT_SYMBOL(idr_find_slowpath);
534 698
535/** 699/**
536 * idr_for_each - iterate through all stored pointers 700 * idr_for_each - iterate through all stored pointers
@@ -555,15 +719,15 @@ int idr_for_each(struct idr *idp,
555{ 719{
556 int n, id, max, error = 0; 720 int n, id, max, error = 0;
557 struct idr_layer *p; 721 struct idr_layer *p;
558 struct idr_layer *pa[MAX_IDR_LEVEL]; 722 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
559 struct idr_layer **paa = &pa[0]; 723 struct idr_layer **paa = &pa[0];
560 724
561 n = idp->layers * IDR_BITS; 725 n = idp->layers * IDR_BITS;
562 p = rcu_dereference_raw(idp->top); 726 p = rcu_dereference_raw(idp->top);
563 max = 1 << n; 727 max = idr_max(idp->layers);
564 728
565 id = 0; 729 id = 0;
566 while (id < max) { 730 while (id >= 0 && id <= max) {
567 while (n > 0 && p) { 731 while (n > 0 && p) {
568 n -= IDR_BITS; 732 n -= IDR_BITS;
569 *paa++ = p; 733 *paa++ = p;
@@ -601,7 +765,7 @@ EXPORT_SYMBOL(idr_for_each);
601 */ 765 */
602void *idr_get_next(struct idr *idp, int *nextidp) 766void *idr_get_next(struct idr *idp, int *nextidp)
603{ 767{
604 struct idr_layer *p, *pa[MAX_IDR_LEVEL]; 768 struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
605 struct idr_layer **paa = &pa[0]; 769 struct idr_layer **paa = &pa[0];
606 int id = *nextidp; 770 int id = *nextidp;
607 int n, max; 771 int n, max;
@@ -611,9 +775,9 @@ void *idr_get_next(struct idr *idp, int *nextidp)
611 if (!p) 775 if (!p)
612 return NULL; 776 return NULL;
613 n = (p->layer + 1) * IDR_BITS; 777 n = (p->layer + 1) * IDR_BITS;
614 max = 1 << n; 778 max = idr_max(p->layer + 1);
615 779
616 while (id < max) { 780 while (id >= 0 && id <= max) {
617 while (n > 0 && p) { 781 while (n > 0 && p) {
618 n -= IDR_BITS; 782 n -= IDR_BITS;
619 *paa++ = p; 783 *paa++ = p;
@@ -625,7 +789,14 @@ void *idr_get_next(struct idr *idp, int *nextidp)
625 return p; 789 return p;
626 } 790 }
627 791
628 id += 1 << n; 792 /*
793 * Proceed to the next layer at the current level. Unlike
794 * idr_for_each(), @id isn't guaranteed to be aligned to
795 * layer boundary at this point and adding 1 << n may
796 * incorrectly skip IDs. Make sure we jump to the
797 * beginning of the next layer using round_up().
798 */
799 id = round_up(id + 1, 1 << n);
629 while (n < fls(id)) { 800 while (n < fls(id)) {
630 n += IDR_BITS; 801 n += IDR_BITS;
631 p = *--paa; 802 p = *--paa;
@@ -653,14 +824,16 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
653 int n; 824 int n;
654 struct idr_layer *p, *old_p; 825 struct idr_layer *p, *old_p;
655 826
827 /* see comment in idr_find_slowpath() */
828 if (WARN_ON_ONCE(id < 0))
829 return ERR_PTR(-EINVAL);
830
656 p = idp->top; 831 p = idp->top;
657 if (!p) 832 if (!p)
658 return ERR_PTR(-EINVAL); 833 return ERR_PTR(-EINVAL);
659 834
660 n = (p->layer+1) * IDR_BITS; 835 n = (p->layer+1) * IDR_BITS;
661 836
662 id &= MAX_IDR_MASK;
663
664 if (id >= (1 << n)) 837 if (id >= (1 << n))
665 return ERR_PTR(-EINVAL); 838 return ERR_PTR(-EINVAL);
666 839
@@ -671,7 +844,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
671 } 844 }
672 845
673 n = id & IDR_MASK; 846 n = id & IDR_MASK;
674 if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) 847 if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
675 return ERR_PTR(-ENOENT); 848 return ERR_PTR(-ENOENT);
676 849
677 old_p = p->ary[n]; 850 old_p = p->ary[n];
@@ -780,7 +953,7 @@ EXPORT_SYMBOL(ida_pre_get);
780 */ 953 */
781int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) 954int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
782{ 955{
783 struct idr_layer *pa[MAX_IDR_LEVEL]; 956 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
784 struct ida_bitmap *bitmap; 957 struct ida_bitmap *bitmap;
785 unsigned long flags; 958 unsigned long flags;
786 int idr_id = starting_id / IDA_BITMAP_BITS; 959 int idr_id = starting_id / IDA_BITMAP_BITS;
@@ -789,9 +962,9 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
789 962
790 restart: 963 restart:
791 /* get vacant slot */ 964 /* get vacant slot */
792 t = idr_get_empty_slot(&ida->idr, idr_id, pa); 965 t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
793 if (t < 0) 966 if (t < 0)
794 return _idr_rc_to_errno(t); 967 return t == -ENOMEM ? -EAGAIN : t;
795 968
796 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) 969 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
797 return -ENOSPC; 970 return -ENOSPC;
@@ -852,25 +1025,6 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
852EXPORT_SYMBOL(ida_get_new_above); 1025EXPORT_SYMBOL(ida_get_new_above);
853 1026
854/** 1027/**
855 * ida_get_new - allocate new ID
856 * @ida: idr handle
857 * @p_id: pointer to the allocated handle
858 *
859 * Allocate new ID. It should be called with any required locks.
860 *
861 * If memory is required, it will return %-EAGAIN, you should unlock
862 * and go back to the idr_pre_get() call. If the idr is full, it will
863 * return %-ENOSPC.
864 *
865 * @p_id returns a value in the range %0 ... %0x7fffffff.
866 */
867int ida_get_new(struct ida *ida, int *p_id)
868{
869 return ida_get_new_above(ida, 0, p_id);
870}
871EXPORT_SYMBOL(ida_get_new);
872
873/**
874 * ida_remove - remove the given ID 1028 * ida_remove - remove the given ID
875 * @ida: ida handle 1029 * @ida: ida handle
876 * @id: ID to free 1030 * @id: ID to free
@@ -887,7 +1041,7 @@ void ida_remove(struct ida *ida, int id)
887 /* clear full bits while looking up the leaf idr_layer */ 1041 /* clear full bits while looking up the leaf idr_layer */
888 while ((shift > 0) && p) { 1042 while ((shift > 0) && p) {
889 n = (idr_id >> shift) & IDR_MASK; 1043 n = (idr_id >> shift) & IDR_MASK;
890 __clear_bit(n, &p->bitmap); 1044 __clear_bit(n, p->bitmap);
891 p = p->ary[n]; 1045 p = p->ary[n];
892 shift -= IDR_BITS; 1046 shift -= IDR_BITS;
893 } 1047 }
@@ -896,7 +1050,7 @@ void ida_remove(struct ida *ida, int id)
896 goto err; 1050 goto err;
897 1051
898 n = idr_id & IDR_MASK; 1052 n = idr_id & IDR_MASK;
899 __clear_bit(n, &p->bitmap); 1053 __clear_bit(n, p->bitmap);
900 1054
901 bitmap = (void *)p->ary[n]; 1055 bitmap = (void *)p->ary[n];
902 if (!test_bit(offset, bitmap->bitmap)) 1056 if (!test_bit(offset, bitmap->bitmap))
@@ -905,7 +1059,7 @@ void ida_remove(struct ida *ida, int id)
905 /* update bitmap and remove it if empty */ 1059 /* update bitmap and remove it if empty */
906 __clear_bit(offset, bitmap->bitmap); 1060 __clear_bit(offset, bitmap->bitmap);
907 if (--bitmap->nr_busy == 0) { 1061 if (--bitmap->nr_busy == 0) {
908 __set_bit(n, &p->bitmap); /* to please idr_remove() */ 1062 __set_bit(n, p->bitmap); /* to please idr_remove() */
909 idr_remove(&ida->idr, idr_id); 1063 idr_remove(&ida->idr, idr_id);
910 free_bitmap(ida, bitmap); 1064 free_bitmap(ida, bitmap);
911 } 1065 }
diff --git a/kernel/kfifo.c b/lib/kfifo.c
index 59dcf5b81d24..7b7f83027b7b 100644
--- a/kernel/kfifo.c
+++ b/lib/kfifo.c
@@ -42,8 +42,7 @@ int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
42 * round down to the next power of 2, since our 'let the indices 42 * round down to the next power of 2, since our 'let the indices
43 * wrap' technique works only in this case. 43 * wrap' technique works only in this case.
44 */ 44 */
45 if (!is_power_of_2(size)) 45 size = roundup_pow_of_two(size);
46 size = rounddown_pow_of_two(size);
47 46
48 fifo->in = 0; 47 fifo->in = 0;
49 fifo->out = 0; 48 fifo->out = 0;
@@ -83,8 +82,7 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer,
83{ 82{
84 size /= esize; 83 size /= esize;
85 84
86 if (!is_power_of_2(size)) 85 size = roundup_pow_of_two(size);
87 size = rounddown_pow_of_two(size);
88 86
89 fifo->in = 0; 87 fifo->in = 0;
90 fifo->out = 0; 88 fifo->out = 0;
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index d71d89498943..8335d39d2ccd 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -262,12 +262,11 @@ static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
262static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, 262static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
263 bool include_changing) 263 bool include_changing)
264{ 264{
265 struct hlist_node *n;
266 struct lc_element *e; 265 struct lc_element *e;
267 266
268 BUG_ON(!lc); 267 BUG_ON(!lc);
269 BUG_ON(!lc->nr_elements); 268 BUG_ON(!lc->nr_elements);
270 hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) { 269 hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
271 /* "about to be changed" elements, pending transaction commit, 270 /* "about to be changed" elements, pending transaction commit,
272 * are hashed by their "new number". "Normal" elements have 271 * are hashed by their "new number". "Normal" elements have
273 * lc_number == lc_new_number. */ 272 * lc_number == lc_new_number. */
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 7874b01e816e..b83c144d731f 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -394,6 +394,44 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
394} 394}
395EXPORT_SYMBOL(sg_alloc_table_from_pages); 395EXPORT_SYMBOL(sg_alloc_table_from_pages);
396 396
397void __sg_page_iter_start(struct sg_page_iter *piter,
398 struct scatterlist *sglist, unsigned int nents,
399 unsigned long pgoffset)
400{
401 piter->__pg_advance = 0;
402 piter->__nents = nents;
403
404 piter->page = NULL;
405 piter->sg = sglist;
406 piter->sg_pgoffset = pgoffset;
407}
408EXPORT_SYMBOL(__sg_page_iter_start);
409
410static int sg_page_count(struct scatterlist *sg)
411{
412 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
413}
414
415bool __sg_page_iter_next(struct sg_page_iter *piter)
416{
417 if (!piter->__nents || !piter->sg)
418 return false;
419
420 piter->sg_pgoffset += piter->__pg_advance;
421 piter->__pg_advance = 1;
422
423 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
424 piter->sg_pgoffset -= sg_page_count(piter->sg);
425 piter->sg = sg_next(piter->sg);
426 if (!--piter->__nents || !piter->sg)
427 return false;
428 }
429 piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset);
430
431 return true;
432}
433EXPORT_SYMBOL(__sg_page_iter_next);
434
397/** 435/**
398 * sg_miter_start - start mapping iteration over a sg list 436 * sg_miter_start - start mapping iteration over a sg list
399 * @miter: sg mapping iter to be started 437 * @miter: sg mapping iter to be started
@@ -411,9 +449,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
411{ 449{
412 memset(miter, 0, sizeof(struct sg_mapping_iter)); 450 memset(miter, 0, sizeof(struct sg_mapping_iter));
413 451
414 miter->__sg = sgl; 452 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
415 miter->__nents = nents;
416 miter->__offset = 0;
417 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); 453 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
418 miter->__flags = flags; 454 miter->__flags = flags;
419} 455}
@@ -438,36 +474,35 @@ EXPORT_SYMBOL(sg_miter_start);
438 */ 474 */
439bool sg_miter_next(struct sg_mapping_iter *miter) 475bool sg_miter_next(struct sg_mapping_iter *miter)
440{ 476{
441 unsigned int off, len;
442
443 /* check for end and drop resources from the last iteration */
444 if (!miter->__nents)
445 return false;
446
447 sg_miter_stop(miter); 477 sg_miter_stop(miter);
448 478
449 /* get to the next sg if necessary. __offset is adjusted by stop */ 479 /*
450 while (miter->__offset == miter->__sg->length) { 480 * Get to the next page if necessary.
451 if (--miter->__nents) { 481 * __remaining, __offset is adjusted by sg_miter_stop
452 miter->__sg = sg_next(miter->__sg); 482 */
453 miter->__offset = 0; 483 if (!miter->__remaining) {
454 } else 484 struct scatterlist *sg;
485 unsigned long pgoffset;
486
487 if (!__sg_page_iter_next(&miter->piter))
455 return false; 488 return false;
456 }
457 489
458 /* map the next page */ 490 sg = miter->piter.sg;
459 off = miter->__sg->offset + miter->__offset; 491 pgoffset = miter->piter.sg_pgoffset;
460 len = miter->__sg->length - miter->__offset;
461 492
462 miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); 493 miter->__offset = pgoffset ? 0 : sg->offset;
463 off &= ~PAGE_MASK; 494 miter->__remaining = sg->offset + sg->length -
464 miter->length = min_t(unsigned int, len, PAGE_SIZE - off); 495 (pgoffset << PAGE_SHIFT) - miter->__offset;
465 miter->consumed = miter->length; 496 miter->__remaining = min_t(unsigned long, miter->__remaining,
497 PAGE_SIZE - miter->__offset);
498 }
499 miter->page = miter->piter.page;
500 miter->consumed = miter->length = miter->__remaining;
466 501
467 if (miter->__flags & SG_MITER_ATOMIC) 502 if (miter->__flags & SG_MITER_ATOMIC)
468 miter->addr = kmap_atomic(miter->page) + off; 503 miter->addr = kmap_atomic(miter->page) + miter->__offset;
469 else 504 else
470 miter->addr = kmap(miter->page) + off; 505 miter->addr = kmap(miter->page) + miter->__offset;
471 506
472 return true; 507 return true;
473} 508}
@@ -494,6 +529,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
494 /* drop resources from the last iteration */ 529 /* drop resources from the last iteration */
495 if (miter->addr) { 530 if (miter->addr) {
496 miter->__offset += miter->consumed; 531 miter->__offset += miter->consumed;
532 miter->__remaining -= miter->consumed;
497 533
498 if (miter->__flags & SG_MITER_TO_SG) 534 if (miter->__flags & SG_MITER_TO_SG)
499 flush_kernel_dcache_page(miter->page); 535 flush_kernel_dcache_page(miter->page);
diff --git a/mm/Kconfig b/mm/Kconfig
index 2c7aea7106f9..ae55c1e04d10 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -287,7 +287,7 @@ config NR_QUICK
287 287
288config VIRT_TO_BUS 288config VIRT_TO_BUS
289 def_bool y 289 def_bool y
290 depends on !ARCH_NO_VIRT_TO_BUS 290 depends on HAVE_VIRT_TO_BUS
291 291
292config MMU_NOTIFIER 292config MMU_NOTIFIER
293 bool 293 bool
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bfa142e67b1c..e2f7f5aaaafb 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1906,9 +1906,8 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
1906static struct mm_slot *get_mm_slot(struct mm_struct *mm) 1906static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1907{ 1907{
1908 struct mm_slot *mm_slot; 1908 struct mm_slot *mm_slot;
1909 struct hlist_node *node;
1910 1909
1911 hash_for_each_possible(mm_slots_hash, mm_slot, node, hash, (unsigned long)mm) 1910 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
1912 if (mm == mm_slot->mm) 1911 if (mm == mm_slot->mm)
1913 return mm_slot; 1912 return mm_slot;
1914 1913
diff --git a/mm/internal.h b/mm/internal.h
index 1c0c4cc0fcf7..8562de0a5197 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -195,7 +195,7 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
195 * must be called with vma's mmap_sem held for read or write, and page locked. 195 * must be called with vma's mmap_sem held for read or write, and page locked.
196 */ 196 */
197extern void mlock_vma_page(struct page *page); 197extern void mlock_vma_page(struct page *page);
198extern void munlock_vma_page(struct page *page); 198extern unsigned int munlock_vma_page(struct page *page);
199 199
200/* 200/*
201 * Clear the page's PageMlocked(). This can be useful in a situation where 201 * Clear the page's PageMlocked(). This can be useful in a situation where
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 83dd5fbf5e60..c8d7f3110fd0 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -436,7 +436,7 @@ static int get_object(struct kmemleak_object *object)
436 */ 436 */
437static void free_object_rcu(struct rcu_head *rcu) 437static void free_object_rcu(struct rcu_head *rcu)
438{ 438{
439 struct hlist_node *elem, *tmp; 439 struct hlist_node *tmp;
440 struct kmemleak_scan_area *area; 440 struct kmemleak_scan_area *area;
441 struct kmemleak_object *object = 441 struct kmemleak_object *object =
442 container_of(rcu, struct kmemleak_object, rcu); 442 container_of(rcu, struct kmemleak_object, rcu);
@@ -445,8 +445,8 @@ static void free_object_rcu(struct rcu_head *rcu)
445 * Once use_count is 0 (guaranteed by put_object), there is no other 445 * Once use_count is 0 (guaranteed by put_object), there is no other
446 * code accessing this object, hence no need for locking. 446 * code accessing this object, hence no need for locking.
447 */ 447 */
448 hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { 448 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
449 hlist_del(elem); 449 hlist_del(&area->node);
450 kmem_cache_free(scan_area_cache, area); 450 kmem_cache_free(scan_area_cache, area);
451 } 451 }
452 kmem_cache_free(object_cache, object); 452 kmem_cache_free(object_cache, object);
@@ -1177,7 +1177,6 @@ static void scan_block(void *_start, void *_end,
1177static void scan_object(struct kmemleak_object *object) 1177static void scan_object(struct kmemleak_object *object)
1178{ 1178{
1179 struct kmemleak_scan_area *area; 1179 struct kmemleak_scan_area *area;
1180 struct hlist_node *elem;
1181 unsigned long flags; 1180 unsigned long flags;
1182 1181
1183 /* 1182 /*
@@ -1205,7 +1204,7 @@ static void scan_object(struct kmemleak_object *object)
1205 spin_lock_irqsave(&object->lock, flags); 1204 spin_lock_irqsave(&object->lock, flags);
1206 } 1205 }
1207 } else 1206 } else
1208 hlist_for_each_entry(area, elem, &object->area_list, node) 1207 hlist_for_each_entry(area, &object->area_list, node)
1209 scan_block((void *)area->start, 1208 scan_block((void *)area->start,
1210 (void *)(area->start + area->size), 1209 (void *)(area->start + area->size),
1211 object, 0); 1210 object, 0);
diff --git a/mm/ksm.c b/mm/ksm.c
index ab2ba9ad3c59..85bfd4c16346 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -320,10 +320,9 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
320 320
321static struct mm_slot *get_mm_slot(struct mm_struct *mm) 321static struct mm_slot *get_mm_slot(struct mm_struct *mm)
322{ 322{
323 struct hlist_node *node;
324 struct mm_slot *slot; 323 struct mm_slot *slot;
325 324
326 hash_for_each_possible(mm_slots_hash, slot, node, link, (unsigned long)mm) 325 hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
327 if (slot->mm == mm) 326 if (slot->mm == mm)
328 return slot; 327 return slot;
329 328
@@ -496,9 +495,8 @@ static inline int get_kpfn_nid(unsigned long kpfn)
496static void remove_node_from_stable_tree(struct stable_node *stable_node) 495static void remove_node_from_stable_tree(struct stable_node *stable_node)
497{ 496{
498 struct rmap_item *rmap_item; 497 struct rmap_item *rmap_item;
499 struct hlist_node *hlist;
500 498
501 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 499 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
502 if (rmap_item->hlist.next) 500 if (rmap_item->hlist.next)
503 ksm_pages_sharing--; 501 ksm_pages_sharing--;
504 else 502 else
@@ -1898,7 +1896,6 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
1898{ 1896{
1899 struct stable_node *stable_node; 1897 struct stable_node *stable_node;
1900 struct rmap_item *rmap_item; 1898 struct rmap_item *rmap_item;
1901 struct hlist_node *hlist;
1902 unsigned int mapcount = page_mapcount(page); 1899 unsigned int mapcount = page_mapcount(page);
1903 int referenced = 0; 1900 int referenced = 0;
1904 int search_new_forks = 0; 1901 int search_new_forks = 0;
@@ -1910,7 +1907,7 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
1910 if (!stable_node) 1907 if (!stable_node)
1911 return 0; 1908 return 0;
1912again: 1909again:
1913 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1910 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
1914 struct anon_vma *anon_vma = rmap_item->anon_vma; 1911 struct anon_vma *anon_vma = rmap_item->anon_vma;
1915 struct anon_vma_chain *vmac; 1912 struct anon_vma_chain *vmac;
1916 struct vm_area_struct *vma; 1913 struct vm_area_struct *vma;
@@ -1952,7 +1949,6 @@ out:
1952int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) 1949int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
1953{ 1950{
1954 struct stable_node *stable_node; 1951 struct stable_node *stable_node;
1955 struct hlist_node *hlist;
1956 struct rmap_item *rmap_item; 1952 struct rmap_item *rmap_item;
1957 int ret = SWAP_AGAIN; 1953 int ret = SWAP_AGAIN;
1958 int search_new_forks = 0; 1954 int search_new_forks = 0;
@@ -1964,7 +1960,7 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
1964 if (!stable_node) 1960 if (!stable_node)
1965 return SWAP_FAIL; 1961 return SWAP_FAIL;
1966again: 1962again:
1967 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1963 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
1968 struct anon_vma *anon_vma = rmap_item->anon_vma; 1964 struct anon_vma *anon_vma = rmap_item->anon_vma;
1969 struct anon_vma_chain *vmac; 1965 struct anon_vma_chain *vmac;
1970 struct vm_area_struct *vma; 1966 struct vm_area_struct *vma;
@@ -2005,7 +2001,6 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
2005 struct vm_area_struct *, unsigned long, void *), void *arg) 2001 struct vm_area_struct *, unsigned long, void *), void *arg)
2006{ 2002{
2007 struct stable_node *stable_node; 2003 struct stable_node *stable_node;
2008 struct hlist_node *hlist;
2009 struct rmap_item *rmap_item; 2004 struct rmap_item *rmap_item;
2010 int ret = SWAP_AGAIN; 2005 int ret = SWAP_AGAIN;
2011 int search_new_forks = 0; 2006 int search_new_forks = 0;
@@ -2017,7 +2012,7 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
2017 if (!stable_node) 2012 if (!stable_node)
2018 return ret; 2013 return ret;
2019again: 2014again:
2020 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 2015 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
2021 struct anon_vma *anon_vma = rmap_item->anon_vma; 2016 struct anon_vma *anon_vma = rmap_item->anon_vma;
2022 struct anon_vma_chain *vmac; 2017 struct anon_vma_chain *vmac;
2023 struct vm_area_struct *vma; 2018 struct vm_area_struct *vma;
diff --git a/mm/mlock.c b/mm/mlock.c
index e6638f565d42..1c5e33fce639 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -102,13 +102,16 @@ void mlock_vma_page(struct page *page)
102 * can't isolate the page, we leave it for putback_lru_page() and vmscan 102 * can't isolate the page, we leave it for putback_lru_page() and vmscan
103 * [page_referenced()/try_to_unmap()] to deal with. 103 * [page_referenced()/try_to_unmap()] to deal with.
104 */ 104 */
105void munlock_vma_page(struct page *page) 105unsigned int munlock_vma_page(struct page *page)
106{ 106{
107 unsigned int page_mask = 0;
108
107 BUG_ON(!PageLocked(page)); 109 BUG_ON(!PageLocked(page));
108 110
109 if (TestClearPageMlocked(page)) { 111 if (TestClearPageMlocked(page)) {
110 mod_zone_page_state(page_zone(page), NR_MLOCK, 112 unsigned int nr_pages = hpage_nr_pages(page);
111 -hpage_nr_pages(page)); 113 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
114 page_mask = nr_pages - 1;
112 if (!isolate_lru_page(page)) { 115 if (!isolate_lru_page(page)) {
113 int ret = SWAP_AGAIN; 116 int ret = SWAP_AGAIN;
114 117
@@ -141,6 +144,8 @@ void munlock_vma_page(struct page *page)
141 count_vm_event(UNEVICTABLE_PGMUNLOCKED); 144 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
142 } 145 }
143 } 146 }
147
148 return page_mask;
144} 149}
145 150
146/** 151/**
@@ -159,7 +164,6 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
159 unsigned long start, unsigned long end, int *nonblocking) 164 unsigned long start, unsigned long end, int *nonblocking)
160{ 165{
161 struct mm_struct *mm = vma->vm_mm; 166 struct mm_struct *mm = vma->vm_mm;
162 unsigned long addr = start;
163 unsigned long nr_pages = (end - start) / PAGE_SIZE; 167 unsigned long nr_pages = (end - start) / PAGE_SIZE;
164 int gup_flags; 168 int gup_flags;
165 169
@@ -189,7 +193,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
189 * We made sure addr is within a VMA, so the following will 193 * We made sure addr is within a VMA, so the following will
190 * not result in a stack expansion that recurses back here. 194 * not result in a stack expansion that recurses back here.
191 */ 195 */
192 return __get_user_pages(current, mm, addr, nr_pages, gup_flags, 196 return __get_user_pages(current, mm, start, nr_pages, gup_flags,
193 NULL, NULL, nonblocking); 197 NULL, NULL, nonblocking);
194} 198}
195 199
@@ -226,13 +230,12 @@ static int __mlock_posix_error_return(long retval)
226void munlock_vma_pages_range(struct vm_area_struct *vma, 230void munlock_vma_pages_range(struct vm_area_struct *vma,
227 unsigned long start, unsigned long end) 231 unsigned long start, unsigned long end)
228{ 232{
229 unsigned long addr;
230
231 lru_add_drain();
232 vma->vm_flags &= ~VM_LOCKED; 233 vma->vm_flags &= ~VM_LOCKED;
233 234
234 for (addr = start; addr < end; addr += PAGE_SIZE) { 235 while (start < end) {
235 struct page *page; 236 struct page *page;
237 unsigned int page_mask, page_increm;
238
236 /* 239 /*
237 * Although FOLL_DUMP is intended for get_dump_page(), 240 * Although FOLL_DUMP is intended for get_dump_page(),
238 * it just so happens that its special treatment of the 241 * it just so happens that its special treatment of the
@@ -240,13 +243,22 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
240 * suits munlock very well (and if somehow an abnormal page 243 * suits munlock very well (and if somehow an abnormal page
241 * has sneaked into the range, we won't oops here: great). 244 * has sneaked into the range, we won't oops here: great).
242 */ 245 */
243 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 246 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
247 &page_mask);
244 if (page && !IS_ERR(page)) { 248 if (page && !IS_ERR(page)) {
245 lock_page(page); 249 lock_page(page);
246 munlock_vma_page(page); 250 lru_add_drain();
251 /*
252 * Any THP page found by follow_page_mask() may have
253 * gotten split before reaching munlock_vma_page(),
254 * so we need to recompute the page_mask here.
255 */
256 page_mask = munlock_vma_page(page);
247 unlock_page(page); 257 unlock_page(page);
248 put_page(page); 258 put_page(page);
249 } 259 }
260 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
261 start += page_increm * PAGE_SIZE;
250 cond_resched(); 262 cond_resched();
251 } 263 }
252} 264}
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 2175fb0d501c..be04122fb277 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -95,11 +95,10 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
95 unsigned long address) 95 unsigned long address)
96{ 96{
97 struct mmu_notifier *mn; 97 struct mmu_notifier *mn;
98 struct hlist_node *n;
99 int young = 0, id; 98 int young = 0, id;
100 99
101 id = srcu_read_lock(&srcu); 100 id = srcu_read_lock(&srcu);
102 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 101 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
103 if (mn->ops->clear_flush_young) 102 if (mn->ops->clear_flush_young)
104 young |= mn->ops->clear_flush_young(mn, mm, address); 103 young |= mn->ops->clear_flush_young(mn, mm, address);
105 } 104 }
@@ -112,11 +111,10 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
112 unsigned long address) 111 unsigned long address)
113{ 112{
114 struct mmu_notifier *mn; 113 struct mmu_notifier *mn;
115 struct hlist_node *n;
116 int young = 0, id; 114 int young = 0, id;
117 115
118 id = srcu_read_lock(&srcu); 116 id = srcu_read_lock(&srcu);
119 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 117 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
120 if (mn->ops->test_young) { 118 if (mn->ops->test_young) {
121 young = mn->ops->test_young(mn, mm, address); 119 young = mn->ops->test_young(mn, mm, address);
122 if (young) 120 if (young)
@@ -132,11 +130,10 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
132 pte_t pte) 130 pte_t pte)
133{ 131{
134 struct mmu_notifier *mn; 132 struct mmu_notifier *mn;
135 struct hlist_node *n;
136 int id; 133 int id;
137 134
138 id = srcu_read_lock(&srcu); 135 id = srcu_read_lock(&srcu);
139 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 136 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
140 if (mn->ops->change_pte) 137 if (mn->ops->change_pte)
141 mn->ops->change_pte(mn, mm, address, pte); 138 mn->ops->change_pte(mn, mm, address, pte);
142 } 139 }
@@ -147,11 +144,10 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
147 unsigned long address) 144 unsigned long address)
148{ 145{
149 struct mmu_notifier *mn; 146 struct mmu_notifier *mn;
150 struct hlist_node *n;
151 int id; 147 int id;
152 148
153 id = srcu_read_lock(&srcu); 149 id = srcu_read_lock(&srcu);
154 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 150 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
155 if (mn->ops->invalidate_page) 151 if (mn->ops->invalidate_page)
156 mn->ops->invalidate_page(mn, mm, address); 152 mn->ops->invalidate_page(mn, mm, address);
157 } 153 }
@@ -162,11 +158,10 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
162 unsigned long start, unsigned long end) 158 unsigned long start, unsigned long end)
163{ 159{
164 struct mmu_notifier *mn; 160 struct mmu_notifier *mn;
165 struct hlist_node *n;
166 int id; 161 int id;
167 162
168 id = srcu_read_lock(&srcu); 163 id = srcu_read_lock(&srcu);
169 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 164 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
170 if (mn->ops->invalidate_range_start) 165 if (mn->ops->invalidate_range_start)
171 mn->ops->invalidate_range_start(mn, mm, start, end); 166 mn->ops->invalidate_range_start(mn, mm, start, end);
172 } 167 }
@@ -178,11 +173,10 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
178 unsigned long start, unsigned long end) 173 unsigned long start, unsigned long end)
179{ 174{
180 struct mmu_notifier *mn; 175 struct mmu_notifier *mn;
181 struct hlist_node *n;
182 int id; 176 int id;
183 177
184 id = srcu_read_lock(&srcu); 178 id = srcu_read_lock(&srcu);
185 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 179 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
186 if (mn->ops->invalidate_range_end) 180 if (mn->ops->invalidate_range_end)
187 mn->ops->invalidate_range_end(mn, mm, start, end); 181 mn->ops->invalidate_range_end(mn, mm, start, end);
188 } 182 }
diff --git a/net/9p/error.c b/net/9p/error.c
index 2ab2de76010f..126fd0dceea2 100644
--- a/net/9p/error.c
+++ b/net/9p/error.c
@@ -221,15 +221,13 @@ EXPORT_SYMBOL(p9_error_init);
221int p9_errstr2errno(char *errstr, int len) 221int p9_errstr2errno(char *errstr, int len)
222{ 222{
223 int errno; 223 int errno;
224 struct hlist_node *p;
225 struct errormap *c; 224 struct errormap *c;
226 int bucket; 225 int bucket;
227 226
228 errno = 0; 227 errno = 0;
229 p = NULL;
230 c = NULL; 228 c = NULL;
231 bucket = jhash(errstr, len, 0) % ERRHASHSZ; 229 bucket = jhash(errstr, len, 0) % ERRHASHSZ;
232 hlist_for_each_entry(c, p, &hash_errmap[bucket], list) { 230 hlist_for_each_entry(c, &hash_errmap[bucket], list) {
233 if (c->namelen == len && !memcmp(c->name, errstr, len)) { 231 if (c->namelen == len && !memcmp(c->name, errstr, len)) {
234 errno = c->val; 232 errno = c->val;
235 break; 233 break;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index de2e950a0a7a..74dea377fe5b 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -655,7 +655,7 @@ static struct p9_trans_module p9_virtio_trans = {
655 .create = p9_virtio_create, 655 .create = p9_virtio_create,
656 .close = p9_virtio_close, 656 .close = p9_virtio_close,
657 .request = p9_virtio_request, 657 .request = p9_virtio_request,
658 .zc_request = p9_virtio_zc_request, 658 //.zc_request = p9_virtio_zc_request,
659 .cancel = p9_virtio_cancel, 659 .cancel = p9_virtio_cancel,
660 /* 660 /*
661 * We leave one entry for input and one entry for response 661 * We leave one entry for input and one entry for response
diff --git a/net/9p/util.c b/net/9p/util.c
index 6ceeeb384de7..59f278e64f58 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -87,23 +87,18 @@ EXPORT_SYMBOL(p9_idpool_destroy);
87 87
88int p9_idpool_get(struct p9_idpool *p) 88int p9_idpool_get(struct p9_idpool *p)
89{ 89{
90 int i = 0; 90 int i;
91 int error;
92 unsigned long flags; 91 unsigned long flags;
93 92
94retry: 93 idr_preload(GFP_NOFS);
95 if (idr_pre_get(&p->pool, GFP_NOFS) == 0)
96 return -1;
97
98 spin_lock_irqsave(&p->lock, flags); 94 spin_lock_irqsave(&p->lock, flags);
99 95
100 /* no need to store exactly p, we just need something non-null */ 96 /* no need to store exactly p, we just need something non-null */
101 error = idr_get_new(&p->pool, p, &i); 97 i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT);
102 spin_unlock_irqrestore(&p->lock, flags);
103 98
104 if (error == -EAGAIN) 99 spin_unlock_irqrestore(&p->lock, flags);
105 goto retry; 100 idr_preload_end();
106 else if (error) 101 if (i < 0)
107 return -1; 102 return -1;
108 103
109 p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p); 104 p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 33475291c9c1..4a141e3cf076 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -93,10 +93,9 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to,
93 struct atalk_iface *atif) 93 struct atalk_iface *atif)
94{ 94{
95 struct sock *s; 95 struct sock *s;
96 struct hlist_node *node;
97 96
98 read_lock_bh(&atalk_sockets_lock); 97 read_lock_bh(&atalk_sockets_lock);
99 sk_for_each(s, node, &atalk_sockets) { 98 sk_for_each(s, &atalk_sockets) {
100 struct atalk_sock *at = at_sk(s); 99 struct atalk_sock *at = at_sk(s);
101 100
102 if (to->sat_port != at->src_port) 101 if (to->sat_port != at->src_port)
@@ -141,11 +140,10 @@ static struct sock *atalk_find_or_insert_socket(struct sock *sk,
141 struct sockaddr_at *sat) 140 struct sockaddr_at *sat)
142{ 141{
143 struct sock *s; 142 struct sock *s;
144 struct hlist_node *node;
145 struct atalk_sock *at; 143 struct atalk_sock *at;
146 144
147 write_lock_bh(&atalk_sockets_lock); 145 write_lock_bh(&atalk_sockets_lock);
148 sk_for_each(s, node, &atalk_sockets) { 146 sk_for_each(s, &atalk_sockets) {
149 at = at_sk(s); 147 at = at_sk(s);
150 148
151 if (at->src_net == sat->sat_addr.s_net && 149 if (at->src_net == sat->sat_addr.s_net &&
@@ -1084,9 +1082,8 @@ static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat)
1084 sat->sat_port < ATPORT_LAST; 1082 sat->sat_port < ATPORT_LAST;
1085 sat->sat_port++) { 1083 sat->sat_port++) {
1086 struct sock *s; 1084 struct sock *s;
1087 struct hlist_node *node;
1088 1085
1089 sk_for_each(s, node, &atalk_sockets) { 1086 sk_for_each(s, &atalk_sockets) {
1090 struct atalk_sock *at = at_sk(s); 1087 struct atalk_sock *at = at_sk(s);
1091 1088
1092 if (at->src_net == sat->sat_addr.s_net && 1089 if (at->src_net == sat->sat_addr.s_net &&
diff --git a/net/atm/common.c b/net/atm/common.c
index 806fc0a40051..7b491006eaf4 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -270,11 +270,11 @@ void atm_dev_release_vccs(struct atm_dev *dev)
270 write_lock_irq(&vcc_sklist_lock); 270 write_lock_irq(&vcc_sklist_lock);
271 for (i = 0; i < VCC_HTABLE_SIZE; i++) { 271 for (i = 0; i < VCC_HTABLE_SIZE; i++) {
272 struct hlist_head *head = &vcc_hash[i]; 272 struct hlist_head *head = &vcc_hash[i];
273 struct hlist_node *node, *tmp; 273 struct hlist_node *tmp;
274 struct sock *s; 274 struct sock *s;
275 struct atm_vcc *vcc; 275 struct atm_vcc *vcc;
276 276
277 sk_for_each_safe(s, node, tmp, head) { 277 sk_for_each_safe(s, tmp, head) {
278 vcc = atm_sk(s); 278 vcc = atm_sk(s);
279 if (vcc->dev == dev) { 279 if (vcc->dev == dev) {
280 vcc_release_async(vcc, -EPIPE); 280 vcc_release_async(vcc, -EPIPE);
@@ -317,11 +317,10 @@ static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
317static int check_ci(const struct atm_vcc *vcc, short vpi, int vci) 317static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
318{ 318{
319 struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)]; 319 struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
320 struct hlist_node *node;
321 struct sock *s; 320 struct sock *s;
322 struct atm_vcc *walk; 321 struct atm_vcc *walk;
323 322
324 sk_for_each(s, node, head) { 323 sk_for_each(s, head) {
325 walk = atm_sk(s); 324 walk = atm_sk(s);
326 if (walk->dev != vcc->dev) 325 if (walk->dev != vcc->dev)
327 continue; 326 continue;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 2e3d942e77f1..f23916be18fb 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -842,7 +842,9 @@ static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl,
842 --*l; 842 --*l;
843 } 843 }
844 844
845 hlist_for_each_entry_from(tmp, e, next) { 845 tmp = container_of(e, struct lec_arp_table, next);
846
847 hlist_for_each_entry_from(tmp, next) {
846 if (--*l < 0) 848 if (--*l < 0)
847 break; 849 break;
848 } 850 }
@@ -1307,7 +1309,6 @@ lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry)
1307static int 1309static int
1308lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) 1310lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1309{ 1311{
1310 struct hlist_node *node;
1311 struct lec_arp_table *entry; 1312 struct lec_arp_table *entry;
1312 int i, remove_vcc = 1; 1313 int i, remove_vcc = 1;
1313 1314
@@ -1326,7 +1327,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1326 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT 1327 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT
1327 */ 1328 */
1328 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1329 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1329 hlist_for_each_entry(entry, node, 1330 hlist_for_each_entry(entry,
1330 &priv->lec_arp_tables[i], next) { 1331 &priv->lec_arp_tables[i], next) {
1331 if (memcmp(to_remove->atm_addr, 1332 if (memcmp(to_remove->atm_addr,
1332 entry->atm_addr, ATM_ESA_LEN) == 0) { 1333 entry->atm_addr, ATM_ESA_LEN) == 0) {
@@ -1364,14 +1365,13 @@ static const char *get_status_string(unsigned char st)
1364 1365
1365static void dump_arp_table(struct lec_priv *priv) 1366static void dump_arp_table(struct lec_priv *priv)
1366{ 1367{
1367 struct hlist_node *node;
1368 struct lec_arp_table *rulla; 1368 struct lec_arp_table *rulla;
1369 char buf[256]; 1369 char buf[256];
1370 int i, j, offset; 1370 int i, j, offset;
1371 1371
1372 pr_info("Dump %p:\n", priv); 1372 pr_info("Dump %p:\n", priv);
1373 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1373 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1374 hlist_for_each_entry(rulla, node, 1374 hlist_for_each_entry(rulla,
1375 &priv->lec_arp_tables[i], next) { 1375 &priv->lec_arp_tables[i], next) {
1376 offset = 0; 1376 offset = 0;
1377 offset += sprintf(buf, "%d: %p\n", i, rulla); 1377 offset += sprintf(buf, "%d: %p\n", i, rulla);
@@ -1403,7 +1403,7 @@ static void dump_arp_table(struct lec_priv *priv)
1403 1403
1404 if (!hlist_empty(&priv->lec_no_forward)) 1404 if (!hlist_empty(&priv->lec_no_forward))
1405 pr_info("No forward\n"); 1405 pr_info("No forward\n");
1406 hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) { 1406 hlist_for_each_entry(rulla, &priv->lec_no_forward, next) {
1407 offset = 0; 1407 offset = 0;
1408 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); 1408 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1409 offset += sprintf(buf + offset, " Atm:"); 1409 offset += sprintf(buf + offset, " Atm:");
@@ -1428,7 +1428,7 @@ static void dump_arp_table(struct lec_priv *priv)
1428 1428
1429 if (!hlist_empty(&priv->lec_arp_empty_ones)) 1429 if (!hlist_empty(&priv->lec_arp_empty_ones))
1430 pr_info("Empty ones\n"); 1430 pr_info("Empty ones\n");
1431 hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) { 1431 hlist_for_each_entry(rulla, &priv->lec_arp_empty_ones, next) {
1432 offset = 0; 1432 offset = 0;
1433 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); 1433 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1434 offset += sprintf(buf + offset, " Atm:"); 1434 offset += sprintf(buf + offset, " Atm:");
@@ -1453,7 +1453,7 @@ static void dump_arp_table(struct lec_priv *priv)
1453 1453
1454 if (!hlist_empty(&priv->mcast_fwds)) 1454 if (!hlist_empty(&priv->mcast_fwds))
1455 pr_info("Multicast Forward VCCs\n"); 1455 pr_info("Multicast Forward VCCs\n");
1456 hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) { 1456 hlist_for_each_entry(rulla, &priv->mcast_fwds, next) {
1457 offset = 0; 1457 offset = 0;
1458 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); 1458 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1459 offset += sprintf(buf + offset, " Atm:"); 1459 offset += sprintf(buf + offset, " Atm:");
@@ -1487,7 +1487,7 @@ static void dump_arp_table(struct lec_priv *priv)
1487static void lec_arp_destroy(struct lec_priv *priv) 1487static void lec_arp_destroy(struct lec_priv *priv)
1488{ 1488{
1489 unsigned long flags; 1489 unsigned long flags;
1490 struct hlist_node *node, *next; 1490 struct hlist_node *next;
1491 struct lec_arp_table *entry; 1491 struct lec_arp_table *entry;
1492 int i; 1492 int i;
1493 1493
@@ -1499,7 +1499,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
1499 1499
1500 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1500 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1501 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1501 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1502 hlist_for_each_entry_safe(entry, node, next, 1502 hlist_for_each_entry_safe(entry, next,
1503 &priv->lec_arp_tables[i], next) { 1503 &priv->lec_arp_tables[i], next) {
1504 lec_arp_remove(priv, entry); 1504 lec_arp_remove(priv, entry);
1505 lec_arp_put(entry); 1505 lec_arp_put(entry);
@@ -1507,7 +1507,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
1507 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); 1507 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1508 } 1508 }
1509 1509
1510 hlist_for_each_entry_safe(entry, node, next, 1510 hlist_for_each_entry_safe(entry, next,
1511 &priv->lec_arp_empty_ones, next) { 1511 &priv->lec_arp_empty_ones, next) {
1512 del_timer_sync(&entry->timer); 1512 del_timer_sync(&entry->timer);
1513 lec_arp_clear_vccs(entry); 1513 lec_arp_clear_vccs(entry);
@@ -1516,7 +1516,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
1516 } 1516 }
1517 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); 1517 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1518 1518
1519 hlist_for_each_entry_safe(entry, node, next, 1519 hlist_for_each_entry_safe(entry, next,
1520 &priv->lec_no_forward, next) { 1520 &priv->lec_no_forward, next) {
1521 del_timer_sync(&entry->timer); 1521 del_timer_sync(&entry->timer);
1522 lec_arp_clear_vccs(entry); 1522 lec_arp_clear_vccs(entry);
@@ -1525,7 +1525,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
1525 } 1525 }
1526 INIT_HLIST_HEAD(&priv->lec_no_forward); 1526 INIT_HLIST_HEAD(&priv->lec_no_forward);
1527 1527
1528 hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { 1528 hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) {
1529 /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ 1529 /* No timer, LANEv2 7.1.20 and 2.3.5.3 */
1530 lec_arp_clear_vccs(entry); 1530 lec_arp_clear_vccs(entry);
1531 hlist_del(&entry->next); 1531 hlist_del(&entry->next);
@@ -1542,14 +1542,13 @@ static void lec_arp_destroy(struct lec_priv *priv)
1542static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, 1542static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
1543 const unsigned char *mac_addr) 1543 const unsigned char *mac_addr)
1544{ 1544{
1545 struct hlist_node *node;
1546 struct hlist_head *head; 1545 struct hlist_head *head;
1547 struct lec_arp_table *entry; 1546 struct lec_arp_table *entry;
1548 1547
1549 pr_debug("%pM\n", mac_addr); 1548 pr_debug("%pM\n", mac_addr);
1550 1549
1551 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; 1550 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
1552 hlist_for_each_entry(entry, node, head, next) { 1551 hlist_for_each_entry(entry, head, next) {
1553 if (ether_addr_equal(mac_addr, entry->mac_addr)) 1552 if (ether_addr_equal(mac_addr, entry->mac_addr))
1554 return entry; 1553 return entry;
1555 } 1554 }
@@ -1686,7 +1685,7 @@ static void lec_arp_check_expire(struct work_struct *work)
1686 unsigned long flags; 1685 unsigned long flags;
1687 struct lec_priv *priv = 1686 struct lec_priv *priv =
1688 container_of(work, struct lec_priv, lec_arp_work.work); 1687 container_of(work, struct lec_priv, lec_arp_work.work);
1689 struct hlist_node *node, *next; 1688 struct hlist_node *next;
1690 struct lec_arp_table *entry; 1689 struct lec_arp_table *entry;
1691 unsigned long now; 1690 unsigned long now;
1692 int i; 1691 int i;
@@ -1696,7 +1695,7 @@ static void lec_arp_check_expire(struct work_struct *work)
1696restart: 1695restart:
1697 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1696 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1698 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1697 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1699 hlist_for_each_entry_safe(entry, node, next, 1698 hlist_for_each_entry_safe(entry, next,
1700 &priv->lec_arp_tables[i], next) { 1699 &priv->lec_arp_tables[i], next) {
1701 if (__lec_arp_check_expire(entry, now, priv)) { 1700 if (__lec_arp_check_expire(entry, now, priv)) {
1702 struct sk_buff *skb; 1701 struct sk_buff *skb;
@@ -1823,14 +1822,14 @@ lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
1823 unsigned long permanent) 1822 unsigned long permanent)
1824{ 1823{
1825 unsigned long flags; 1824 unsigned long flags;
1826 struct hlist_node *node, *next; 1825 struct hlist_node *next;
1827 struct lec_arp_table *entry; 1826 struct lec_arp_table *entry;
1828 int i; 1827 int i;
1829 1828
1830 pr_debug("\n"); 1829 pr_debug("\n");
1831 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1830 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1832 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1831 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1833 hlist_for_each_entry_safe(entry, node, next, 1832 hlist_for_each_entry_safe(entry, next,
1834 &priv->lec_arp_tables[i], next) { 1833 &priv->lec_arp_tables[i], next) {
1835 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) && 1834 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) &&
1836 (permanent || 1835 (permanent ||
@@ -1855,7 +1854,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
1855 unsigned int targetless_le_arp) 1854 unsigned int targetless_le_arp)
1856{ 1855{
1857 unsigned long flags; 1856 unsigned long flags;
1858 struct hlist_node *node, *next; 1857 struct hlist_node *next;
1859 struct lec_arp_table *entry, *tmp; 1858 struct lec_arp_table *entry, *tmp;
1860 int i; 1859 int i;
1861 1860
@@ -1870,7 +1869,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
1870 * we have no entry in the cache. 7.1.30 1869 * we have no entry in the cache. 7.1.30
1871 */ 1870 */
1872 if (!hlist_empty(&priv->lec_arp_empty_ones)) { 1871 if (!hlist_empty(&priv->lec_arp_empty_ones)) {
1873 hlist_for_each_entry_safe(entry, node, next, 1872 hlist_for_each_entry_safe(entry, next,
1874 &priv->lec_arp_empty_ones, next) { 1873 &priv->lec_arp_empty_ones, next) {
1875 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { 1874 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
1876 hlist_del(&entry->next); 1875 hlist_del(&entry->next);
@@ -1915,7 +1914,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
1915 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); 1914 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
1916 del_timer(&entry->timer); 1915 del_timer(&entry->timer);
1917 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1916 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1918 hlist_for_each_entry(tmp, node, 1917 hlist_for_each_entry(tmp,
1919 &priv->lec_arp_tables[i], next) { 1918 &priv->lec_arp_tables[i], next) {
1920 if (entry != tmp && 1919 if (entry != tmp &&
1921 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { 1920 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) {
@@ -1956,7 +1955,6 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
1956 void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) 1955 void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb))
1957{ 1956{
1958 unsigned long flags; 1957 unsigned long flags;
1959 struct hlist_node *node;
1960 struct lec_arp_table *entry; 1958 struct lec_arp_table *entry;
1961 int i, found_entry = 0; 1959 int i, found_entry = 0;
1962 1960
@@ -2026,7 +2024,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2026 ioc_data->atm_addr[16], ioc_data->atm_addr[17], 2024 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2027 ioc_data->atm_addr[18], ioc_data->atm_addr[19]); 2025 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2028 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2026 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2029 hlist_for_each_entry(entry, node, 2027 hlist_for_each_entry(entry,
2030 &priv->lec_arp_tables[i], next) { 2028 &priv->lec_arp_tables[i], next) {
2031 if (memcmp 2029 if (memcmp
2032 (ioc_data->atm_addr, entry->atm_addr, 2030 (ioc_data->atm_addr, entry->atm_addr,
@@ -2103,7 +2101,6 @@ out:
2103static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) 2101static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
2104{ 2102{
2105 unsigned long flags; 2103 unsigned long flags;
2106 struct hlist_node *node;
2107 struct lec_arp_table *entry; 2104 struct lec_arp_table *entry;
2108 int i; 2105 int i;
2109 2106
@@ -2111,7 +2108,7 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
2111restart: 2108restart:
2112 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2109 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2113 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2110 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2114 hlist_for_each_entry(entry, node, 2111 hlist_for_each_entry(entry,
2115 &priv->lec_arp_tables[i], next) { 2112 &priv->lec_arp_tables[i], next) {
2116 if (entry->flush_tran_id == tran_id && 2113 if (entry->flush_tran_id == tran_id &&
2117 entry->status == ESI_FLUSH_PENDING) { 2114 entry->status == ESI_FLUSH_PENDING) {
@@ -2140,13 +2137,12 @@ lec_set_flush_tran_id(struct lec_priv *priv,
2140 const unsigned char *atm_addr, unsigned long tran_id) 2137 const unsigned char *atm_addr, unsigned long tran_id)
2141{ 2138{
2142 unsigned long flags; 2139 unsigned long flags;
2143 struct hlist_node *node;
2144 struct lec_arp_table *entry; 2140 struct lec_arp_table *entry;
2145 int i; 2141 int i;
2146 2142
2147 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2143 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2148 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) 2144 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
2149 hlist_for_each_entry(entry, node, 2145 hlist_for_each_entry(entry,
2150 &priv->lec_arp_tables[i], next) { 2146 &priv->lec_arp_tables[i], next) {
2151 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { 2147 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
2152 entry->flush_tran_id = tran_id; 2148 entry->flush_tran_id = tran_id;
@@ -2198,7 +2194,7 @@ out:
2198static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) 2194static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2199{ 2195{
2200 unsigned long flags; 2196 unsigned long flags;
2201 struct hlist_node *node, *next; 2197 struct hlist_node *next;
2202 struct lec_arp_table *entry; 2198 struct lec_arp_table *entry;
2203 int i; 2199 int i;
2204 2200
@@ -2208,7 +2204,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2208 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2204 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2209 2205
2210 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2206 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2211 hlist_for_each_entry_safe(entry, node, next, 2207 hlist_for_each_entry_safe(entry, next,
2212 &priv->lec_arp_tables[i], next) { 2208 &priv->lec_arp_tables[i], next) {
2213 if (vcc == entry->vcc) { 2209 if (vcc == entry->vcc) {
2214 lec_arp_remove(priv, entry); 2210 lec_arp_remove(priv, entry);
@@ -2219,7 +2215,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2219 } 2215 }
2220 } 2216 }
2221 2217
2222 hlist_for_each_entry_safe(entry, node, next, 2218 hlist_for_each_entry_safe(entry, next,
2223 &priv->lec_arp_empty_ones, next) { 2219 &priv->lec_arp_empty_ones, next) {
2224 if (entry->vcc == vcc) { 2220 if (entry->vcc == vcc) {
2225 lec_arp_clear_vccs(entry); 2221 lec_arp_clear_vccs(entry);
@@ -2229,7 +2225,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2229 } 2225 }
2230 } 2226 }
2231 2227
2232 hlist_for_each_entry_safe(entry, node, next, 2228 hlist_for_each_entry_safe(entry, next,
2233 &priv->lec_no_forward, next) { 2229 &priv->lec_no_forward, next) {
2234 if (entry->recv_vcc == vcc) { 2230 if (entry->recv_vcc == vcc) {
2235 lec_arp_clear_vccs(entry); 2231 lec_arp_clear_vccs(entry);
@@ -2239,7 +2235,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2239 } 2235 }
2240 } 2236 }
2241 2237
2242 hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { 2238 hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) {
2243 if (entry->recv_vcc == vcc) { 2239 if (entry->recv_vcc == vcc) {
2244 lec_arp_clear_vccs(entry); 2240 lec_arp_clear_vccs(entry);
2245 /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ 2241 /* No timer, LANEv2 7.1.20 and 2.3.5.3 */
@@ -2257,13 +2253,13 @@ lec_arp_check_empties(struct lec_priv *priv,
2257 struct atm_vcc *vcc, struct sk_buff *skb) 2253 struct atm_vcc *vcc, struct sk_buff *skb)
2258{ 2254{
2259 unsigned long flags; 2255 unsigned long flags;
2260 struct hlist_node *node, *next; 2256 struct hlist_node *next;
2261 struct lec_arp_table *entry, *tmp; 2257 struct lec_arp_table *entry, *tmp;
2262 struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; 2258 struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data;
2263 unsigned char *src = hdr->h_source; 2259 unsigned char *src = hdr->h_source;
2264 2260
2265 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2261 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2266 hlist_for_each_entry_safe(entry, node, next, 2262 hlist_for_each_entry_safe(entry, next,
2267 &priv->lec_arp_empty_ones, next) { 2263 &priv->lec_arp_empty_ones, next) {
2268 if (vcc == entry->vcc) { 2264 if (vcc == entry->vcc) {
2269 del_timer(&entry->timer); 2265 del_timer(&entry->timer);
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 86767ca908a3..4176887e72eb 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -217,7 +217,6 @@ static void purge_vcc(struct atm_vcc *vcc)
217 217
218static void sigd_close(struct atm_vcc *vcc) 218static void sigd_close(struct atm_vcc *vcc)
219{ 219{
220 struct hlist_node *node;
221 struct sock *s; 220 struct sock *s;
222 int i; 221 int i;
223 222
@@ -231,7 +230,7 @@ static void sigd_close(struct atm_vcc *vcc)
231 for (i = 0; i < VCC_HTABLE_SIZE; ++i) { 230 for (i = 0; i < VCC_HTABLE_SIZE; ++i) {
232 struct hlist_head *head = &vcc_hash[i]; 231 struct hlist_head *head = &vcc_hash[i];
233 232
234 sk_for_each(s, node, head) { 233 sk_for_each(s, head) {
235 vcc = atm_sk(s); 234 vcc = atm_sk(s);
236 235
237 purge_vcc(vcc); 236 purge_vcc(vcc);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 69a06c47b648..7b11f8bc5071 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -81,14 +81,13 @@ static void ax25_kill_by_device(struct net_device *dev)
81{ 81{
82 ax25_dev *ax25_dev; 82 ax25_dev *ax25_dev;
83 ax25_cb *s; 83 ax25_cb *s;
84 struct hlist_node *node;
85 84
86 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) 85 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
87 return; 86 return;
88 87
89 spin_lock_bh(&ax25_list_lock); 88 spin_lock_bh(&ax25_list_lock);
90again: 89again:
91 ax25_for_each(s, node, &ax25_list) { 90 ax25_for_each(s, &ax25_list) {
92 if (s->ax25_dev == ax25_dev) { 91 if (s->ax25_dev == ax25_dev) {
93 s->ax25_dev = NULL; 92 s->ax25_dev = NULL;
94 spin_unlock_bh(&ax25_list_lock); 93 spin_unlock_bh(&ax25_list_lock);
@@ -158,10 +157,9 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
158 struct net_device *dev, int type) 157 struct net_device *dev, int type)
159{ 158{
160 ax25_cb *s; 159 ax25_cb *s;
161 struct hlist_node *node;
162 160
163 spin_lock(&ax25_list_lock); 161 spin_lock(&ax25_list_lock);
164 ax25_for_each(s, node, &ax25_list) { 162 ax25_for_each(s, &ax25_list) {
165 if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) 163 if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
166 continue; 164 continue;
167 if (s->sk && !ax25cmp(&s->source_addr, addr) && 165 if (s->sk && !ax25cmp(&s->source_addr, addr) &&
@@ -187,10 +185,9 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
187{ 185{
188 struct sock *sk = NULL; 186 struct sock *sk = NULL;
189 ax25_cb *s; 187 ax25_cb *s;
190 struct hlist_node *node;
191 188
192 spin_lock(&ax25_list_lock); 189 spin_lock(&ax25_list_lock);
193 ax25_for_each(s, node, &ax25_list) { 190 ax25_for_each(s, &ax25_list) {
194 if (s->sk && !ax25cmp(&s->source_addr, my_addr) && 191 if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
195 !ax25cmp(&s->dest_addr, dest_addr) && 192 !ax25cmp(&s->dest_addr, dest_addr) &&
196 s->sk->sk_type == type) { 193 s->sk->sk_type == type) {
@@ -213,10 +210,9 @@ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr,
213 ax25_digi *digi, struct net_device *dev) 210 ax25_digi *digi, struct net_device *dev)
214{ 211{
215 ax25_cb *s; 212 ax25_cb *s;
216 struct hlist_node *node;
217 213
218 spin_lock_bh(&ax25_list_lock); 214 spin_lock_bh(&ax25_list_lock);
219 ax25_for_each(s, node, &ax25_list) { 215 ax25_for_each(s, &ax25_list) {
220 if (s->sk && s->sk->sk_type != SOCK_SEQPACKET) 216 if (s->sk && s->sk->sk_type != SOCK_SEQPACKET)
221 continue; 217 continue;
222 if (s->ax25_dev == NULL) 218 if (s->ax25_dev == NULL)
@@ -248,10 +244,9 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
248{ 244{
249 ax25_cb *s; 245 ax25_cb *s;
250 struct sk_buff *copy; 246 struct sk_buff *copy;
251 struct hlist_node *node;
252 247
253 spin_lock(&ax25_list_lock); 248 spin_lock(&ax25_list_lock);
254 ax25_for_each(s, node, &ax25_list) { 249 ax25_for_each(s, &ax25_list) {
255 if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && 250 if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
256 s->sk->sk_type == SOCK_RAW && 251 s->sk->sk_type == SOCK_RAW &&
257 s->sk->sk_protocol == proto && 252 s->sk->sk_protocol == proto &&
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c
index 5ea7fd3e2af9..e05bd57b5afd 100644
--- a/net/ax25/ax25_ds_subr.c
+++ b/net/ax25/ax25_ds_subr.c
@@ -39,7 +39,6 @@ void ax25_ds_nr_error_recovery(ax25_cb *ax25)
39void ax25_ds_enquiry_response(ax25_cb *ax25) 39void ax25_ds_enquiry_response(ax25_cb *ax25)
40{ 40{
41 ax25_cb *ax25o; 41 ax25_cb *ax25o;
42 struct hlist_node *node;
43 42
44 /* Please note that neither DK4EG's nor DG2FEF's 43 /* Please note that neither DK4EG's nor DG2FEF's
45 * DAMA spec mention the following behaviour as seen 44 * DAMA spec mention the following behaviour as seen
@@ -80,7 +79,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25)
80 ax25_ds_set_timer(ax25->ax25_dev); 79 ax25_ds_set_timer(ax25->ax25_dev);
81 80
82 spin_lock(&ax25_list_lock); 81 spin_lock(&ax25_list_lock);
83 ax25_for_each(ax25o, node, &ax25_list) { 82 ax25_for_each(ax25o, &ax25_list) {
84 if (ax25o == ax25) 83 if (ax25o == ax25)
85 continue; 84 continue;
86 85
@@ -159,10 +158,9 @@ static int ax25_check_dama_slave(ax25_dev *ax25_dev)
159{ 158{
160 ax25_cb *ax25; 159 ax25_cb *ax25;
161 int res = 0; 160 int res = 0;
162 struct hlist_node *node;
163 161
164 spin_lock(&ax25_list_lock); 162 spin_lock(&ax25_list_lock);
165 ax25_for_each(ax25, node, &ax25_list) 163 ax25_for_each(ax25, &ax25_list)
166 if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { 164 if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) {
167 res = 1; 165 res = 1;
168 break; 166 break;
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 993c439b4f71..951cd57bb07d 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -70,7 +70,6 @@ static void ax25_ds_timeout(unsigned long arg)
70{ 70{
71 ax25_dev *ax25_dev = (struct ax25_dev *) arg; 71 ax25_dev *ax25_dev = (struct ax25_dev *) arg;
72 ax25_cb *ax25; 72 ax25_cb *ax25;
73 struct hlist_node *node;
74 73
75 if (ax25_dev == NULL || !ax25_dev->dama.slave) 74 if (ax25_dev == NULL || !ax25_dev->dama.slave)
76 return; /* Yikes! */ 75 return; /* Yikes! */
@@ -81,7 +80,7 @@ static void ax25_ds_timeout(unsigned long arg)
81 } 80 }
82 81
83 spin_lock(&ax25_list_lock); 82 spin_lock(&ax25_list_lock);
84 ax25_for_each(ax25, node, &ax25_list) { 83 ax25_for_each(ax25, &ax25_list) {
85 if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE)) 84 if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE))
86 continue; 85 continue;
87 86
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 7d5f24b82cc8..7f16e8a931b2 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -193,10 +193,9 @@ int ax25_listen_mine(ax25_address *callsign, struct net_device *dev)
193void ax25_link_failed(ax25_cb *ax25, int reason) 193void ax25_link_failed(ax25_cb *ax25, int reason)
194{ 194{
195 struct ax25_linkfail *lf; 195 struct ax25_linkfail *lf;
196 struct hlist_node *node;
197 196
198 spin_lock_bh(&linkfail_lock); 197 spin_lock_bh(&linkfail_lock);
199 hlist_for_each_entry(lf, node, &ax25_linkfail_list, lf_node) 198 hlist_for_each_entry(lf, &ax25_linkfail_list, lf_node)
200 lf->func(ax25, reason); 199 lf->func(ax25, reason);
201 spin_unlock_bh(&linkfail_lock); 200 spin_unlock_bh(&linkfail_lock);
202} 201}
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 957999e43ff7..71c4badbc807 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -54,10 +54,9 @@ EXPORT_SYMBOL(ax25_uid_policy);
54ax25_uid_assoc *ax25_findbyuid(kuid_t uid) 54ax25_uid_assoc *ax25_findbyuid(kuid_t uid)
55{ 55{
56 ax25_uid_assoc *ax25_uid, *res = NULL; 56 ax25_uid_assoc *ax25_uid, *res = NULL;
57 struct hlist_node *node;
58 57
59 read_lock(&ax25_uid_lock); 58 read_lock(&ax25_uid_lock);
60 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { 59 ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
61 if (uid_eq(ax25_uid->uid, uid)) { 60 if (uid_eq(ax25_uid->uid, uid)) {
62 ax25_uid_hold(ax25_uid); 61 ax25_uid_hold(ax25_uid);
63 res = ax25_uid; 62 res = ax25_uid;
@@ -74,7 +73,6 @@ EXPORT_SYMBOL(ax25_findbyuid);
74int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) 73int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
75{ 74{
76 ax25_uid_assoc *ax25_uid; 75 ax25_uid_assoc *ax25_uid;
77 struct hlist_node *node;
78 ax25_uid_assoc *user; 76 ax25_uid_assoc *user;
79 unsigned long res; 77 unsigned long res;
80 78
@@ -82,7 +80,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
82 case SIOCAX25GETUID: 80 case SIOCAX25GETUID:
83 res = -ENOENT; 81 res = -ENOENT;
84 read_lock(&ax25_uid_lock); 82 read_lock(&ax25_uid_lock);
85 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { 83 ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
86 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { 84 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
87 res = from_kuid_munged(current_user_ns(), ax25_uid->uid); 85 res = from_kuid_munged(current_user_ns(), ax25_uid->uid);
88 break; 86 break;
@@ -126,7 +124,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
126 124
127 ax25_uid = NULL; 125 ax25_uid = NULL;
128 write_lock(&ax25_uid_lock); 126 write_lock(&ax25_uid_lock);
129 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { 127 ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
130 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) 128 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0)
131 break; 129 break;
132 } 130 }
@@ -212,11 +210,10 @@ const struct file_operations ax25_uid_fops = {
212void __exit ax25_uid_free(void) 210void __exit ax25_uid_free(void)
213{ 211{
214 ax25_uid_assoc *ax25_uid; 212 ax25_uid_assoc *ax25_uid;
215 struct hlist_node *node;
216 213
217 write_lock(&ax25_uid_lock); 214 write_lock(&ax25_uid_lock);
218again: 215again:
219 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { 216 ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
220 hlist_del_init(&ax25_uid->uid_node); 217 hlist_del_init(&ax25_uid->uid_node);
221 ax25_uid_put(ax25_uid); 218 ax25_uid_put(ax25_uid);
222 goto again; 219 goto again;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 72fe1bbf7721..a0b253ecadaf 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -487,7 +487,6 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
487 */ 487 */
488 struct batadv_forw_packet *forw_packet_aggr = NULL; 488 struct batadv_forw_packet *forw_packet_aggr = NULL;
489 struct batadv_forw_packet *forw_packet_pos = NULL; 489 struct batadv_forw_packet *forw_packet_pos = NULL;
490 struct hlist_node *tmp_node;
491 struct batadv_ogm_packet *batadv_ogm_packet; 490 struct batadv_ogm_packet *batadv_ogm_packet;
492 bool direct_link; 491 bool direct_link;
493 unsigned long max_aggregation_jiffies; 492 unsigned long max_aggregation_jiffies;
@@ -500,7 +499,7 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
500 spin_lock_bh(&bat_priv->forw_bat_list_lock); 499 spin_lock_bh(&bat_priv->forw_bat_list_lock);
501 /* own packets are not to be aggregated */ 500 /* own packets are not to be aggregated */
502 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { 501 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
503 hlist_for_each_entry(forw_packet_pos, tmp_node, 502 hlist_for_each_entry(forw_packet_pos,
504 &bat_priv->forw_bat_list, list) { 503 &bat_priv->forw_bat_list, list) {
505 if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet, 504 if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
506 bat_priv, packet_len, 505 bat_priv, packet_len,
@@ -655,7 +654,6 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
655 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 654 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
656 struct batadv_neigh_node *router = NULL; 655 struct batadv_neigh_node *router = NULL;
657 struct batadv_orig_node *orig_node_tmp; 656 struct batadv_orig_node *orig_node_tmp;
658 struct hlist_node *node;
659 int if_num; 657 int if_num;
660 uint8_t sum_orig, sum_neigh; 658 uint8_t sum_orig, sum_neigh;
661 uint8_t *neigh_addr; 659 uint8_t *neigh_addr;
@@ -665,7 +663,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
665 "update_originator(): Searching and updating originator entry of received packet\n"); 663 "update_originator(): Searching and updating originator entry of received packet\n");
666 664
667 rcu_read_lock(); 665 rcu_read_lock();
668 hlist_for_each_entry_rcu(tmp_neigh_node, node, 666 hlist_for_each_entry_rcu(tmp_neigh_node,
669 &orig_node->neigh_list, list) { 667 &orig_node->neigh_list, list) {
670 neigh_addr = tmp_neigh_node->addr; 668 neigh_addr = tmp_neigh_node->addr;
671 if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && 669 if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
@@ -801,7 +799,6 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
801{ 799{
802 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 800 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
803 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node; 801 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
804 struct hlist_node *node;
805 uint8_t total_count; 802 uint8_t total_count;
806 uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; 803 uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
807 unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; 804 unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
@@ -810,7 +807,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
810 807
811 /* find corresponding one hop neighbor */ 808 /* find corresponding one hop neighbor */
812 rcu_read_lock(); 809 rcu_read_lock();
813 hlist_for_each_entry_rcu(tmp_neigh_node, node, 810 hlist_for_each_entry_rcu(tmp_neigh_node,
814 &orig_neigh_node->neigh_list, list) { 811 &orig_neigh_node->neigh_list, list) {
815 if (!batadv_compare_eth(tmp_neigh_node->addr, 812 if (!batadv_compare_eth(tmp_neigh_node->addr,
816 orig_neigh_node->orig)) 813 orig_neigh_node->orig))
@@ -920,7 +917,6 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
920 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 917 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
921 struct batadv_orig_node *orig_node; 918 struct batadv_orig_node *orig_node;
922 struct batadv_neigh_node *tmp_neigh_node; 919 struct batadv_neigh_node *tmp_neigh_node;
923 struct hlist_node *node;
924 int is_duplicate = 0; 920 int is_duplicate = 0;
925 int32_t seq_diff; 921 int32_t seq_diff;
926 int need_update = 0; 922 int need_update = 0;
@@ -943,7 +939,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
943 goto out; 939 goto out;
944 940
945 rcu_read_lock(); 941 rcu_read_lock();
946 hlist_for_each_entry_rcu(tmp_neigh_node, node, 942 hlist_for_each_entry_rcu(tmp_neigh_node,
947 &orig_node->neigh_list, list) { 943 &orig_node->neigh_list, list) {
948 is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits, 944 is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
949 orig_node->last_real_seqno, 945 orig_node->last_real_seqno,
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 30f46526cbbd..6a4f728680ae 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -144,7 +144,6 @@ static struct batadv_bla_claim
144{ 144{
145 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 145 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
146 struct hlist_head *head; 146 struct hlist_head *head;
147 struct hlist_node *node;
148 struct batadv_bla_claim *claim; 147 struct batadv_bla_claim *claim;
149 struct batadv_bla_claim *claim_tmp = NULL; 148 struct batadv_bla_claim *claim_tmp = NULL;
150 int index; 149 int index;
@@ -156,7 +155,7 @@ static struct batadv_bla_claim
156 head = &hash->table[index]; 155 head = &hash->table[index];
157 156
158 rcu_read_lock(); 157 rcu_read_lock();
159 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 158 hlist_for_each_entry_rcu(claim, head, hash_entry) {
160 if (!batadv_compare_claim(&claim->hash_entry, data)) 159 if (!batadv_compare_claim(&claim->hash_entry, data))
161 continue; 160 continue;
162 161
@@ -185,7 +184,6 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv,
185{ 184{
186 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 185 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
187 struct hlist_head *head; 186 struct hlist_head *head;
188 struct hlist_node *node;
189 struct batadv_bla_backbone_gw search_entry, *backbone_gw; 187 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
190 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL; 188 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
191 int index; 189 int index;
@@ -200,7 +198,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv,
200 head = &hash->table[index]; 198 head = &hash->table[index];
201 199
202 rcu_read_lock(); 200 rcu_read_lock();
203 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 201 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
204 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry, 202 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
205 &search_entry)) 203 &search_entry))
206 continue; 204 continue;
@@ -221,7 +219,7 @@ static void
221batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) 219batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
222{ 220{
223 struct batadv_hashtable *hash; 221 struct batadv_hashtable *hash;
224 struct hlist_node *node, *node_tmp; 222 struct hlist_node *node_tmp;
225 struct hlist_head *head; 223 struct hlist_head *head;
226 struct batadv_bla_claim *claim; 224 struct batadv_bla_claim *claim;
227 int i; 225 int i;
@@ -236,13 +234,13 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
236 list_lock = &hash->list_locks[i]; 234 list_lock = &hash->list_locks[i];
237 235
238 spin_lock_bh(list_lock); 236 spin_lock_bh(list_lock);
239 hlist_for_each_entry_safe(claim, node, node_tmp, 237 hlist_for_each_entry_safe(claim, node_tmp,
240 head, hash_entry) { 238 head, hash_entry) {
241 if (claim->backbone_gw != backbone_gw) 239 if (claim->backbone_gw != backbone_gw)
242 continue; 240 continue;
243 241
244 batadv_claim_free_ref(claim); 242 batadv_claim_free_ref(claim);
245 hlist_del_rcu(node); 243 hlist_del_rcu(&claim->hash_entry);
246 } 244 }
247 spin_unlock_bh(list_lock); 245 spin_unlock_bh(list_lock);
248 } 246 }
@@ -460,7 +458,6 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
460 struct batadv_hard_iface *primary_if, 458 struct batadv_hard_iface *primary_if,
461 short vid) 459 short vid)
462{ 460{
463 struct hlist_node *node;
464 struct hlist_head *head; 461 struct hlist_head *head;
465 struct batadv_hashtable *hash; 462 struct batadv_hashtable *hash;
466 struct batadv_bla_claim *claim; 463 struct batadv_bla_claim *claim;
@@ -481,7 +478,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
481 head = &hash->table[i]; 478 head = &hash->table[i];
482 479
483 rcu_read_lock(); 480 rcu_read_lock();
484 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 481 hlist_for_each_entry_rcu(claim, head, hash_entry) {
485 /* only own claims are interesting */ 482 /* only own claims are interesting */
486 if (claim->backbone_gw != backbone_gw) 483 if (claim->backbone_gw != backbone_gw)
487 continue; 484 continue;
@@ -958,7 +955,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
958static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) 955static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
959{ 956{
960 struct batadv_bla_backbone_gw *backbone_gw; 957 struct batadv_bla_backbone_gw *backbone_gw;
961 struct hlist_node *node, *node_tmp; 958 struct hlist_node *node_tmp;
962 struct hlist_head *head; 959 struct hlist_head *head;
963 struct batadv_hashtable *hash; 960 struct batadv_hashtable *hash;
964 spinlock_t *list_lock; /* protects write access to the hash lists */ 961 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -973,7 +970,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
973 list_lock = &hash->list_locks[i]; 970 list_lock = &hash->list_locks[i];
974 971
975 spin_lock_bh(list_lock); 972 spin_lock_bh(list_lock);
976 hlist_for_each_entry_safe(backbone_gw, node, node_tmp, 973 hlist_for_each_entry_safe(backbone_gw, node_tmp,
977 head, hash_entry) { 974 head, hash_entry) {
978 if (now) 975 if (now)
979 goto purge_now; 976 goto purge_now;
@@ -992,7 +989,7 @@ purge_now:
992 989
993 batadv_bla_del_backbone_claims(backbone_gw); 990 batadv_bla_del_backbone_claims(backbone_gw);
994 991
995 hlist_del_rcu(node); 992 hlist_del_rcu(&backbone_gw->hash_entry);
996 batadv_backbone_gw_free_ref(backbone_gw); 993 batadv_backbone_gw_free_ref(backbone_gw);
997 } 994 }
998 spin_unlock_bh(list_lock); 995 spin_unlock_bh(list_lock);
@@ -1013,7 +1010,6 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1013 int now) 1010 int now)
1014{ 1011{
1015 struct batadv_bla_claim *claim; 1012 struct batadv_bla_claim *claim;
1016 struct hlist_node *node;
1017 struct hlist_head *head; 1013 struct hlist_head *head;
1018 struct batadv_hashtable *hash; 1014 struct batadv_hashtable *hash;
1019 int i; 1015 int i;
@@ -1026,7 +1022,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1026 head = &hash->table[i]; 1022 head = &hash->table[i];
1027 1023
1028 rcu_read_lock(); 1024 rcu_read_lock();
1029 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 1025 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1030 if (now) 1026 if (now)
1031 goto purge_now; 1027 goto purge_now;
1032 if (!batadv_compare_eth(claim->backbone_gw->orig, 1028 if (!batadv_compare_eth(claim->backbone_gw->orig,
@@ -1062,7 +1058,6 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1062 struct batadv_hard_iface *oldif) 1058 struct batadv_hard_iface *oldif)
1063{ 1059{
1064 struct batadv_bla_backbone_gw *backbone_gw; 1060 struct batadv_bla_backbone_gw *backbone_gw;
1065 struct hlist_node *node;
1066 struct hlist_head *head; 1061 struct hlist_head *head;
1067 struct batadv_hashtable *hash; 1062 struct batadv_hashtable *hash;
1068 __be16 group; 1063 __be16 group;
@@ -1086,7 +1081,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1086 head = &hash->table[i]; 1081 head = &hash->table[i];
1087 1082
1088 rcu_read_lock(); 1083 rcu_read_lock();
1089 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1084 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1090 /* own orig still holds the old value. */ 1085 /* own orig still holds the old value. */
1091 if (!batadv_compare_eth(backbone_gw->orig, 1086 if (!batadv_compare_eth(backbone_gw->orig,
1092 oldif->net_dev->dev_addr)) 1087 oldif->net_dev->dev_addr))
@@ -1112,7 +1107,6 @@ static void batadv_bla_periodic_work(struct work_struct *work)
1112 struct delayed_work *delayed_work; 1107 struct delayed_work *delayed_work;
1113 struct batadv_priv *bat_priv; 1108 struct batadv_priv *bat_priv;
1114 struct batadv_priv_bla *priv_bla; 1109 struct batadv_priv_bla *priv_bla;
1115 struct hlist_node *node;
1116 struct hlist_head *head; 1110 struct hlist_head *head;
1117 struct batadv_bla_backbone_gw *backbone_gw; 1111 struct batadv_bla_backbone_gw *backbone_gw;
1118 struct batadv_hashtable *hash; 1112 struct batadv_hashtable *hash;
@@ -1140,7 +1134,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
1140 head = &hash->table[i]; 1134 head = &hash->table[i];
1141 1135
1142 rcu_read_lock(); 1136 rcu_read_lock();
1143 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1137 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1144 if (!batadv_compare_eth(backbone_gw->orig, 1138 if (!batadv_compare_eth(backbone_gw->orig,
1145 primary_if->net_dev->dev_addr)) 1139 primary_if->net_dev->dev_addr))
1146 continue; 1140 continue;
@@ -1322,7 +1316,6 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
1322{ 1316{
1323 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 1317 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1324 struct hlist_head *head; 1318 struct hlist_head *head;
1325 struct hlist_node *node;
1326 struct batadv_bla_backbone_gw *backbone_gw; 1319 struct batadv_bla_backbone_gw *backbone_gw;
1327 int i; 1320 int i;
1328 1321
@@ -1336,7 +1329,7 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
1336 head = &hash->table[i]; 1329 head = &hash->table[i];
1337 1330
1338 rcu_read_lock(); 1331 rcu_read_lock();
1339 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1332 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1340 if (batadv_compare_eth(backbone_gw->orig, orig)) { 1333 if (batadv_compare_eth(backbone_gw->orig, orig)) {
1341 rcu_read_unlock(); 1334 rcu_read_unlock();
1342 return 1; 1335 return 1;
@@ -1607,7 +1600,6 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1607 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 1600 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
1608 struct batadv_bla_claim *claim; 1601 struct batadv_bla_claim *claim;
1609 struct batadv_hard_iface *primary_if; 1602 struct batadv_hard_iface *primary_if;
1610 struct hlist_node *node;
1611 struct hlist_head *head; 1603 struct hlist_head *head;
1612 uint32_t i; 1604 uint32_t i;
1613 bool is_own; 1605 bool is_own;
@@ -1628,7 +1620,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1628 head = &hash->table[i]; 1620 head = &hash->table[i];
1629 1621
1630 rcu_read_lock(); 1622 rcu_read_lock();
1631 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 1623 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1632 is_own = batadv_compare_eth(claim->backbone_gw->orig, 1624 is_own = batadv_compare_eth(claim->backbone_gw->orig,
1633 primary_addr); 1625 primary_addr);
1634 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n", 1626 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n",
@@ -1652,7 +1644,6 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1652 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 1644 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1653 struct batadv_bla_backbone_gw *backbone_gw; 1645 struct batadv_bla_backbone_gw *backbone_gw;
1654 struct batadv_hard_iface *primary_if; 1646 struct batadv_hard_iface *primary_if;
1655 struct hlist_node *node;
1656 struct hlist_head *head; 1647 struct hlist_head *head;
1657 int secs, msecs; 1648 int secs, msecs;
1658 uint32_t i; 1649 uint32_t i;
@@ -1674,7 +1665,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1674 head = &hash->table[i]; 1665 head = &hash->table[i];
1675 1666
1676 rcu_read_lock(); 1667 rcu_read_lock();
1677 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1668 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1678 msecs = jiffies_to_msecs(jiffies - 1669 msecs = jiffies_to_msecs(jiffies -
1679 backbone_gw->lasttime); 1670 backbone_gw->lasttime);
1680 secs = msecs / 1000; 1671 secs = msecs / 1000;
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 761a59002e34..d54188a112ea 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -83,7 +83,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
83{ 83{
84 spinlock_t *list_lock; /* protects write access to the hash lists */ 84 spinlock_t *list_lock; /* protects write access to the hash lists */
85 struct batadv_dat_entry *dat_entry; 85 struct batadv_dat_entry *dat_entry;
86 struct hlist_node *node, *node_tmp; 86 struct hlist_node *node_tmp;
87 struct hlist_head *head; 87 struct hlist_head *head;
88 uint32_t i; 88 uint32_t i;
89 89
@@ -95,7 +95,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
95 list_lock = &bat_priv->dat.hash->list_locks[i]; 95 list_lock = &bat_priv->dat.hash->list_locks[i];
96 96
97 spin_lock_bh(list_lock); 97 spin_lock_bh(list_lock);
98 hlist_for_each_entry_safe(dat_entry, node, node_tmp, head, 98 hlist_for_each_entry_safe(dat_entry, node_tmp, head,
99 hash_entry) { 99 hash_entry) {
100 /* if an helper function has been passed as parameter, 100 /* if an helper function has been passed as parameter,
101 * ask it if the entry has to be purged or not 101 * ask it if the entry has to be purged or not
@@ -103,7 +103,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
103 if (to_purge && !to_purge(dat_entry)) 103 if (to_purge && !to_purge(dat_entry))
104 continue; 104 continue;
105 105
106 hlist_del_rcu(node); 106 hlist_del_rcu(&dat_entry->hash_entry);
107 batadv_dat_entry_free_ref(dat_entry); 107 batadv_dat_entry_free_ref(dat_entry);
108 } 108 }
109 spin_unlock_bh(list_lock); 109 spin_unlock_bh(list_lock);
@@ -235,7 +235,6 @@ static struct batadv_dat_entry *
235batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip) 235batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
236{ 236{
237 struct hlist_head *head; 237 struct hlist_head *head;
238 struct hlist_node *node;
239 struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL; 238 struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL;
240 struct batadv_hashtable *hash = bat_priv->dat.hash; 239 struct batadv_hashtable *hash = bat_priv->dat.hash;
241 uint32_t index; 240 uint32_t index;
@@ -247,7 +246,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
247 head = &hash->table[index]; 246 head = &hash->table[index];
248 247
249 rcu_read_lock(); 248 rcu_read_lock();
250 hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { 249 hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
251 if (dat_entry->ip != ip) 250 if (dat_entry->ip != ip)
252 continue; 251 continue;
253 252
@@ -465,7 +464,6 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
465 batadv_dat_addr_t max = 0, tmp_max = 0; 464 batadv_dat_addr_t max = 0, tmp_max = 0;
466 struct batadv_orig_node *orig_node, *max_orig_node = NULL; 465 struct batadv_orig_node *orig_node, *max_orig_node = NULL;
467 struct batadv_hashtable *hash = bat_priv->orig_hash; 466 struct batadv_hashtable *hash = bat_priv->orig_hash;
468 struct hlist_node *node;
469 struct hlist_head *head; 467 struct hlist_head *head;
470 int i; 468 int i;
471 469
@@ -481,7 +479,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
481 head = &hash->table[i]; 479 head = &hash->table[i];
482 480
483 rcu_read_lock(); 481 rcu_read_lock();
484 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 482 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
485 /* the dht space is a ring and addresses are unsigned */ 483 /* the dht space is a ring and addresses are unsigned */
486 tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr + 484 tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr +
487 ip_key; 485 ip_key;
@@ -686,7 +684,6 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
686 struct batadv_hashtable *hash = bat_priv->dat.hash; 684 struct batadv_hashtable *hash = bat_priv->dat.hash;
687 struct batadv_dat_entry *dat_entry; 685 struct batadv_dat_entry *dat_entry;
688 struct batadv_hard_iface *primary_if; 686 struct batadv_hard_iface *primary_if;
689 struct hlist_node *node;
690 struct hlist_head *head; 687 struct hlist_head *head;
691 unsigned long last_seen_jiffies; 688 unsigned long last_seen_jiffies;
692 int last_seen_msecs, last_seen_secs, last_seen_mins; 689 int last_seen_msecs, last_seen_secs, last_seen_mins;
@@ -704,7 +701,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
704 head = &hash->table[i]; 701 head = &hash->table[i];
705 702
706 rcu_read_lock(); 703 rcu_read_lock();
707 hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { 704 hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
708 last_seen_jiffies = jiffies - dat_entry->last_update; 705 last_seen_jiffies = jiffies - dat_entry->last_update;
709 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies); 706 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
710 last_seen_mins = last_seen_msecs / 60000; 707 last_seen_mins = last_seen_msecs / 60000;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 074107f2cfaa..34f99a46ec1d 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -114,7 +114,6 @@ static struct batadv_gw_node *
114batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) 114batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
115{ 115{
116 struct batadv_neigh_node *router; 116 struct batadv_neigh_node *router;
117 struct hlist_node *node;
118 struct batadv_gw_node *gw_node, *curr_gw = NULL; 117 struct batadv_gw_node *gw_node, *curr_gw = NULL;
119 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 118 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
120 uint32_t gw_divisor; 119 uint32_t gw_divisor;
@@ -127,7 +126,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
127 gw_divisor *= 64; 126 gw_divisor *= 64;
128 127
129 rcu_read_lock(); 128 rcu_read_lock();
130 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { 129 hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
131 if (gw_node->deleted) 130 if (gw_node->deleted)
132 continue; 131 continue;
133 132
@@ -344,7 +343,6 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
344 struct batadv_orig_node *orig_node, 343 struct batadv_orig_node *orig_node,
345 uint8_t new_gwflags) 344 uint8_t new_gwflags)
346{ 345{
347 struct hlist_node *node;
348 struct batadv_gw_node *gw_node, *curr_gw; 346 struct batadv_gw_node *gw_node, *curr_gw;
349 347
350 /* Note: We don't need a NULL check here, since curr_gw never gets 348 /* Note: We don't need a NULL check here, since curr_gw never gets
@@ -355,7 +353,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
355 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 353 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
356 354
357 rcu_read_lock(); 355 rcu_read_lock();
358 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { 356 hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
359 if (gw_node->orig_node != orig_node) 357 if (gw_node->orig_node != orig_node)
360 continue; 358 continue;
361 359
@@ -403,7 +401,7 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
403void batadv_gw_node_purge(struct batadv_priv *bat_priv) 401void batadv_gw_node_purge(struct batadv_priv *bat_priv)
404{ 402{
405 struct batadv_gw_node *gw_node, *curr_gw; 403 struct batadv_gw_node *gw_node, *curr_gw;
406 struct hlist_node *node, *node_tmp; 404 struct hlist_node *node_tmp;
407 unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT); 405 unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
408 int do_deselect = 0; 406 int do_deselect = 0;
409 407
@@ -411,7 +409,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
411 409
412 spin_lock_bh(&bat_priv->gw.list_lock); 410 spin_lock_bh(&bat_priv->gw.list_lock);
413 411
414 hlist_for_each_entry_safe(gw_node, node, node_tmp, 412 hlist_for_each_entry_safe(gw_node, node_tmp,
415 &bat_priv->gw.list, list) { 413 &bat_priv->gw.list, list) {
416 if (((!gw_node->deleted) || 414 if (((!gw_node->deleted) ||
417 (time_before(jiffies, gw_node->deleted + timeout))) && 415 (time_before(jiffies, gw_node->deleted + timeout))) &&
@@ -476,7 +474,6 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
476 struct batadv_priv *bat_priv = netdev_priv(net_dev); 474 struct batadv_priv *bat_priv = netdev_priv(net_dev);
477 struct batadv_hard_iface *primary_if; 475 struct batadv_hard_iface *primary_if;
478 struct batadv_gw_node *gw_node; 476 struct batadv_gw_node *gw_node;
479 struct hlist_node *node;
480 int gw_count = 0; 477 int gw_count = 0;
481 478
482 primary_if = batadv_seq_print_text_primary_if_get(seq); 479 primary_if = batadv_seq_print_text_primary_if_get(seq);
@@ -490,7 +487,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
490 primary_if->net_dev->dev_addr, net_dev->name); 487 primary_if->net_dev->dev_addr, net_dev->name);
491 488
492 rcu_read_lock(); 489 rcu_read_lock();
493 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { 490 hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
494 if (gw_node->deleted) 491 if (gw_node->deleted)
495 continue; 492 continue;
496 493
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 21fe6987733b..0488d70c8c35 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -345,9 +345,8 @@ void batadv_recv_handler_unregister(uint8_t packet_type)
345static struct batadv_algo_ops *batadv_algo_get(char *name) 345static struct batadv_algo_ops *batadv_algo_get(char *name)
346{ 346{
347 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; 347 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
348 struct hlist_node *node;
349 348
350 hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) { 349 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
351 if (strcmp(bat_algo_ops_tmp->name, name) != 0) 350 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
352 continue; 351 continue;
353 352
@@ -411,11 +410,10 @@ out:
411int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) 410int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
412{ 411{
413 struct batadv_algo_ops *bat_algo_ops; 412 struct batadv_algo_ops *bat_algo_ops;
414 struct hlist_node *node;
415 413
416 seq_printf(seq, "Available routing algorithms:\n"); 414 seq_printf(seq, "Available routing algorithms:\n");
417 415
418 hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) { 416 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
419 seq_printf(seq, "%s\n", bat_algo_ops->name); 417 seq_printf(seq, "%s\n", bat_algo_ops->name);
420 } 418 }
421 419
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 457ea445217c..96fb80b724dc 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -118,7 +118,7 @@ out:
118 118
119static void batadv_orig_node_free_rcu(struct rcu_head *rcu) 119static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
120{ 120{
121 struct hlist_node *node, *node_tmp; 121 struct hlist_node *node_tmp;
122 struct batadv_neigh_node *neigh_node, *tmp_neigh_node; 122 struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
123 struct batadv_orig_node *orig_node; 123 struct batadv_orig_node *orig_node;
124 124
@@ -134,7 +134,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
134 } 134 }
135 135
136 /* for all neighbors towards this originator ... */ 136 /* for all neighbors towards this originator ... */
137 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 137 hlist_for_each_entry_safe(neigh_node, node_tmp,
138 &orig_node->neigh_list, list) { 138 &orig_node->neigh_list, list) {
139 hlist_del_rcu(&neigh_node->list); 139 hlist_del_rcu(&neigh_node->list);
140 batadv_neigh_node_free_ref(neigh_node); 140 batadv_neigh_node_free_ref(neigh_node);
@@ -161,7 +161,7 @@ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
161void batadv_originator_free(struct batadv_priv *bat_priv) 161void batadv_originator_free(struct batadv_priv *bat_priv)
162{ 162{
163 struct batadv_hashtable *hash = bat_priv->orig_hash; 163 struct batadv_hashtable *hash = bat_priv->orig_hash;
164 struct hlist_node *node, *node_tmp; 164 struct hlist_node *node_tmp;
165 struct hlist_head *head; 165 struct hlist_head *head;
166 spinlock_t *list_lock; /* spinlock to protect write access */ 166 spinlock_t *list_lock; /* spinlock to protect write access */
167 struct batadv_orig_node *orig_node; 167 struct batadv_orig_node *orig_node;
@@ -179,9 +179,9 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
179 list_lock = &hash->list_locks[i]; 179 list_lock = &hash->list_locks[i];
180 180
181 spin_lock_bh(list_lock); 181 spin_lock_bh(list_lock);
182 hlist_for_each_entry_safe(orig_node, node, node_tmp, 182 hlist_for_each_entry_safe(orig_node, node_tmp,
183 head, hash_entry) { 183 head, hash_entry) {
184 hlist_del_rcu(node); 184 hlist_del_rcu(&orig_node->hash_entry);
185 batadv_orig_node_free_ref(orig_node); 185 batadv_orig_node_free_ref(orig_node);
186 } 186 }
187 spin_unlock_bh(list_lock); 187 spin_unlock_bh(list_lock);
@@ -274,7 +274,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
274 struct batadv_orig_node *orig_node, 274 struct batadv_orig_node *orig_node,
275 struct batadv_neigh_node **best_neigh_node) 275 struct batadv_neigh_node **best_neigh_node)
276{ 276{
277 struct hlist_node *node, *node_tmp; 277 struct hlist_node *node_tmp;
278 struct batadv_neigh_node *neigh_node; 278 struct batadv_neigh_node *neigh_node;
279 bool neigh_purged = false; 279 bool neigh_purged = false;
280 unsigned long last_seen; 280 unsigned long last_seen;
@@ -285,7 +285,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
285 spin_lock_bh(&orig_node->neigh_list_lock); 285 spin_lock_bh(&orig_node->neigh_list_lock);
286 286
287 /* for all neighbors towards this originator ... */ 287 /* for all neighbors towards this originator ... */
288 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 288 hlist_for_each_entry_safe(neigh_node, node_tmp,
289 &orig_node->neigh_list, list) { 289 &orig_node->neigh_list, list) {
290 last_seen = neigh_node->last_seen; 290 last_seen = neigh_node->last_seen;
291 if_incoming = neigh_node->if_incoming; 291 if_incoming = neigh_node->if_incoming;
@@ -348,7 +348,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
348static void _batadv_purge_orig(struct batadv_priv *bat_priv) 348static void _batadv_purge_orig(struct batadv_priv *bat_priv)
349{ 349{
350 struct batadv_hashtable *hash = bat_priv->orig_hash; 350 struct batadv_hashtable *hash = bat_priv->orig_hash;
351 struct hlist_node *node, *node_tmp; 351 struct hlist_node *node_tmp;
352 struct hlist_head *head; 352 struct hlist_head *head;
353 spinlock_t *list_lock; /* spinlock to protect write access */ 353 spinlock_t *list_lock; /* spinlock to protect write access */
354 struct batadv_orig_node *orig_node; 354 struct batadv_orig_node *orig_node;
@@ -363,13 +363,13 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
363 list_lock = &hash->list_locks[i]; 363 list_lock = &hash->list_locks[i];
364 364
365 spin_lock_bh(list_lock); 365 spin_lock_bh(list_lock);
366 hlist_for_each_entry_safe(orig_node, node, node_tmp, 366 hlist_for_each_entry_safe(orig_node, node_tmp,
367 head, hash_entry) { 367 head, hash_entry) {
368 if (batadv_purge_orig_node(bat_priv, orig_node)) { 368 if (batadv_purge_orig_node(bat_priv, orig_node)) {
369 if (orig_node->gw_flags) 369 if (orig_node->gw_flags)
370 batadv_gw_node_delete(bat_priv, 370 batadv_gw_node_delete(bat_priv,
371 orig_node); 371 orig_node);
372 hlist_del_rcu(node); 372 hlist_del_rcu(&orig_node->hash_entry);
373 batadv_orig_node_free_ref(orig_node); 373 batadv_orig_node_free_ref(orig_node);
374 continue; 374 continue;
375 } 375 }
@@ -408,7 +408,6 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
408 struct net_device *net_dev = (struct net_device *)seq->private; 408 struct net_device *net_dev = (struct net_device *)seq->private;
409 struct batadv_priv *bat_priv = netdev_priv(net_dev); 409 struct batadv_priv *bat_priv = netdev_priv(net_dev);
410 struct batadv_hashtable *hash = bat_priv->orig_hash; 410 struct batadv_hashtable *hash = bat_priv->orig_hash;
411 struct hlist_node *node, *node_tmp;
412 struct hlist_head *head; 411 struct hlist_head *head;
413 struct batadv_hard_iface *primary_if; 412 struct batadv_hard_iface *primary_if;
414 struct batadv_orig_node *orig_node; 413 struct batadv_orig_node *orig_node;
@@ -434,7 +433,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
434 head = &hash->table[i]; 433 head = &hash->table[i];
435 434
436 rcu_read_lock(); 435 rcu_read_lock();
437 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 436 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
438 neigh_node = batadv_orig_node_get_router(orig_node); 437 neigh_node = batadv_orig_node_get_router(orig_node);
439 if (!neigh_node) 438 if (!neigh_node)
440 continue; 439 continue;
@@ -453,7 +452,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
453 neigh_node->addr, 452 neigh_node->addr,
454 neigh_node->if_incoming->net_dev->name); 453 neigh_node->if_incoming->net_dev->name);
455 454
456 hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp, 455 hlist_for_each_entry_rcu(neigh_node_tmp,
457 &orig_node->neigh_list, list) { 456 &orig_node->neigh_list, list) {
458 seq_printf(seq, " %pM (%3i)", 457 seq_printf(seq, " %pM (%3i)",
459 neigh_node_tmp->addr, 458 neigh_node_tmp->addr,
@@ -511,7 +510,6 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
511{ 510{
512 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 511 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
513 struct batadv_hashtable *hash = bat_priv->orig_hash; 512 struct batadv_hashtable *hash = bat_priv->orig_hash;
514 struct hlist_node *node;
515 struct hlist_head *head; 513 struct hlist_head *head;
516 struct batadv_orig_node *orig_node; 514 struct batadv_orig_node *orig_node;
517 uint32_t i; 515 uint32_t i;
@@ -524,7 +522,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
524 head = &hash->table[i]; 522 head = &hash->table[i];
525 523
526 rcu_read_lock(); 524 rcu_read_lock();
527 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 525 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
528 spin_lock_bh(&orig_node->ogm_cnt_lock); 526 spin_lock_bh(&orig_node->ogm_cnt_lock);
529 ret = batadv_orig_node_add_if(orig_node, max_if_num); 527 ret = batadv_orig_node_add_if(orig_node, max_if_num);
530 spin_unlock_bh(&orig_node->ogm_cnt_lock); 528 spin_unlock_bh(&orig_node->ogm_cnt_lock);
@@ -595,7 +593,6 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
595{ 593{
596 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 594 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
597 struct batadv_hashtable *hash = bat_priv->orig_hash; 595 struct batadv_hashtable *hash = bat_priv->orig_hash;
598 struct hlist_node *node;
599 struct hlist_head *head; 596 struct hlist_head *head;
600 struct batadv_hard_iface *hard_iface_tmp; 597 struct batadv_hard_iface *hard_iface_tmp;
601 struct batadv_orig_node *orig_node; 598 struct batadv_orig_node *orig_node;
@@ -609,7 +606,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
609 head = &hash->table[i]; 606 head = &hash->table[i];
610 607
611 rcu_read_lock(); 608 rcu_read_lock();
612 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 609 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
613 spin_lock_bh(&orig_node->ogm_cnt_lock); 610 spin_lock_bh(&orig_node->ogm_cnt_lock);
614 ret = batadv_orig_node_del_if(orig_node, max_if_num, 611 ret = batadv_orig_node_del_if(orig_node, max_if_num,
615 hard_iface->if_num); 612 hard_iface->if_num);
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 286bf743e76a..7df48fa7669d 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -68,7 +68,6 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
68{ 68{
69 struct batadv_hashtable *hash = bat_priv->orig_hash; 69 struct batadv_hashtable *hash = bat_priv->orig_hash;
70 struct hlist_head *head; 70 struct hlist_head *head;
71 struct hlist_node *node;
72 struct batadv_orig_node *orig_node, *orig_node_tmp = NULL; 71 struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
73 int index; 72 int index;
74 73
@@ -79,7 +78,7 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
79 head = &hash->table[index]; 78 head = &hash->table[index];
80 79
81 rcu_read_lock(); 80 rcu_read_lock();
82 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 81 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
83 if (!batadv_compare_eth(orig_node, data)) 82 if (!batadv_compare_eth(orig_node, data))
84 continue; 83 continue;
85 84
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 60ba03fc8390..5ee21cebbbb0 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -37,7 +37,6 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
37{ 37{
38 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 38 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
39 struct batadv_hashtable *hash = bat_priv->orig_hash; 39 struct batadv_hashtable *hash = bat_priv->orig_hash;
40 struct hlist_node *node;
41 struct hlist_head *head; 40 struct hlist_head *head;
42 struct batadv_orig_node *orig_node; 41 struct batadv_orig_node *orig_node;
43 unsigned long *word; 42 unsigned long *word;
@@ -49,7 +48,7 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
49 head = &hash->table[i]; 48 head = &hash->table[i];
50 49
51 rcu_read_lock(); 50 rcu_read_lock();
52 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 51 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
53 spin_lock_bh(&orig_node->ogm_cnt_lock); 52 spin_lock_bh(&orig_node->ogm_cnt_lock);
54 word_index = hard_iface->if_num * BATADV_NUM_WORDS; 53 word_index = hard_iface->if_num * BATADV_NUM_WORDS;
55 word = &(orig_node->bcast_own[word_index]); 54 word = &(orig_node->bcast_own[word_index]);
@@ -146,7 +145,6 @@ out:
146void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node, 145void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
147 struct batadv_neigh_node *neigh_node) 146 struct batadv_neigh_node *neigh_node)
148{ 147{
149 struct hlist_node *node;
150 struct batadv_neigh_node *tmp_neigh_node, *router = NULL; 148 struct batadv_neigh_node *tmp_neigh_node, *router = NULL;
151 uint8_t interference_candidate = 0; 149 uint8_t interference_candidate = 0;
152 150
@@ -169,7 +167,7 @@ void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
169 * interface. If we do, we won't select this candidate because of 167 * interface. If we do, we won't select this candidate because of
170 * possible interference. 168 * possible interference.
171 */ 169 */
172 hlist_for_each_entry_rcu(tmp_neigh_node, node, 170 hlist_for_each_entry_rcu(tmp_neigh_node,
173 &orig_node->neigh_list, list) { 171 &orig_node->neigh_list, list) {
174 if (tmp_neigh_node == neigh_node) 172 if (tmp_neigh_node == neigh_node)
175 continue; 173 continue;
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 80ca65fc89a1..a67cffde37ae 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -316,7 +316,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
316 const struct batadv_hard_iface *hard_iface) 316 const struct batadv_hard_iface *hard_iface)
317{ 317{
318 struct batadv_forw_packet *forw_packet; 318 struct batadv_forw_packet *forw_packet;
319 struct hlist_node *tmp_node, *safe_tmp_node; 319 struct hlist_node *safe_tmp_node;
320 bool pending; 320 bool pending;
321 321
322 if (hard_iface) 322 if (hard_iface)
@@ -329,7 +329,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
329 329
330 /* free bcast list */ 330 /* free bcast list */
331 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 331 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
332 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 332 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
333 &bat_priv->forw_bcast_list, list) { 333 &bat_priv->forw_bcast_list, list) {
334 /* if purge_outstanding_packets() was called with an argument 334 /* if purge_outstanding_packets() was called with an argument
335 * we delete only packets belonging to the given interface 335 * we delete only packets belonging to the given interface
@@ -355,7 +355,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
355 355
356 /* free batman packet list */ 356 /* free batman packet list */
357 spin_lock_bh(&bat_priv->forw_bat_list_lock); 357 spin_lock_bh(&bat_priv->forw_bat_list_lock);
358 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 358 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
359 &bat_priv->forw_bat_list, list) { 359 &bat_priv->forw_bat_list, list) {
360 /* if purge_outstanding_packets() was called with an argument 360 /* if purge_outstanding_packets() was called with an argument
361 * we delete only packets belonging to the given interface 361 * we delete only packets belonging to the given interface
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d44672f4a349..98a66a021a60 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -56,7 +56,6 @@ static struct batadv_tt_common_entry *
56batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data) 56batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
57{ 57{
58 struct hlist_head *head; 58 struct hlist_head *head;
59 struct hlist_node *node;
60 struct batadv_tt_common_entry *tt_common_entry; 59 struct batadv_tt_common_entry *tt_common_entry;
61 struct batadv_tt_common_entry *tt_common_entry_tmp = NULL; 60 struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
62 uint32_t index; 61 uint32_t index;
@@ -68,7 +67,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
68 head = &hash->table[index]; 67 head = &hash->table[index];
69 68
70 rcu_read_lock(); 69 rcu_read_lock();
71 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { 70 hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) {
72 if (!batadv_compare_eth(tt_common_entry, data)) 71 if (!batadv_compare_eth(tt_common_entry, data))
73 continue; 72 continue;
74 73
@@ -257,7 +256,6 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
257 struct batadv_tt_local_entry *tt_local; 256 struct batadv_tt_local_entry *tt_local;
258 struct batadv_tt_global_entry *tt_global; 257 struct batadv_tt_global_entry *tt_global;
259 struct hlist_head *head; 258 struct hlist_head *head;
260 struct hlist_node *node;
261 struct batadv_tt_orig_list_entry *orig_entry; 259 struct batadv_tt_orig_list_entry *orig_entry;
262 int hash_added; 260 int hash_added;
263 bool roamed_back = false; 261 bool roamed_back = false;
@@ -339,7 +337,7 @@ check_roaming:
339 /* These node are probably going to update their tt table */ 337 /* These node are probably going to update their tt table */
340 head = &tt_global->orig_list; 338 head = &tt_global->orig_list;
341 rcu_read_lock(); 339 rcu_read_lock();
342 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 340 hlist_for_each_entry_rcu(orig_entry, head, list) {
343 batadv_send_roam_adv(bat_priv, tt_global->common.addr, 341 batadv_send_roam_adv(bat_priv, tt_global->common.addr,
344 orig_entry->orig_node); 342 orig_entry->orig_node);
345 } 343 }
@@ -470,7 +468,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
470 struct batadv_tt_common_entry *tt_common_entry; 468 struct batadv_tt_common_entry *tt_common_entry;
471 struct batadv_tt_local_entry *tt_local; 469 struct batadv_tt_local_entry *tt_local;
472 struct batadv_hard_iface *primary_if; 470 struct batadv_hard_iface *primary_if;
473 struct hlist_node *node;
474 struct hlist_head *head; 471 struct hlist_head *head;
475 uint32_t i; 472 uint32_t i;
476 int last_seen_secs; 473 int last_seen_secs;
@@ -494,7 +491,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
494 head = &hash->table[i]; 491 head = &hash->table[i];
495 492
496 rcu_read_lock(); 493 rcu_read_lock();
497 hlist_for_each_entry_rcu(tt_common_entry, node, 494 hlist_for_each_entry_rcu(tt_common_entry,
498 head, hash_entry) { 495 head, hash_entry) {
499 tt_local = container_of(tt_common_entry, 496 tt_local = container_of(tt_common_entry,
500 struct batadv_tt_local_entry, 497 struct batadv_tt_local_entry,
@@ -605,9 +602,9 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
605{ 602{
606 struct batadv_tt_local_entry *tt_local_entry; 603 struct batadv_tt_local_entry *tt_local_entry;
607 struct batadv_tt_common_entry *tt_common_entry; 604 struct batadv_tt_common_entry *tt_common_entry;
608 struct hlist_node *node, *node_tmp; 605 struct hlist_node *node_tmp;
609 606
610 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head, 607 hlist_for_each_entry_safe(tt_common_entry, node_tmp, head,
611 hash_entry) { 608 hash_entry) {
612 tt_local_entry = container_of(tt_common_entry, 609 tt_local_entry = container_of(tt_common_entry,
613 struct batadv_tt_local_entry, 610 struct batadv_tt_local_entry,
@@ -651,7 +648,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
651 spinlock_t *list_lock; /* protects write access to the hash lists */ 648 spinlock_t *list_lock; /* protects write access to the hash lists */
652 struct batadv_tt_common_entry *tt_common_entry; 649 struct batadv_tt_common_entry *tt_common_entry;
653 struct batadv_tt_local_entry *tt_local; 650 struct batadv_tt_local_entry *tt_local;
654 struct hlist_node *node, *node_tmp; 651 struct hlist_node *node_tmp;
655 struct hlist_head *head; 652 struct hlist_head *head;
656 uint32_t i; 653 uint32_t i;
657 654
@@ -665,9 +662,9 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
665 list_lock = &hash->list_locks[i]; 662 list_lock = &hash->list_locks[i];
666 663
667 spin_lock_bh(list_lock); 664 spin_lock_bh(list_lock);
668 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 665 hlist_for_each_entry_safe(tt_common_entry, node_tmp,
669 head, hash_entry) { 666 head, hash_entry) {
670 hlist_del_rcu(node); 667 hlist_del_rcu(&tt_common_entry->hash_entry);
671 tt_local = container_of(tt_common_entry, 668 tt_local = container_of(tt_common_entry,
672 struct batadv_tt_local_entry, 669 struct batadv_tt_local_entry,
673 common); 670 common);
@@ -724,11 +721,10 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
724{ 721{
725 struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL; 722 struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
726 const struct hlist_head *head; 723 const struct hlist_head *head;
727 struct hlist_node *node;
728 724
729 rcu_read_lock(); 725 rcu_read_lock();
730 head = &entry->orig_list; 726 head = &entry->orig_list;
731 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) { 727 hlist_for_each_entry_rcu(tmp_orig_entry, head, list) {
732 if (tmp_orig_entry->orig_node != orig_node) 728 if (tmp_orig_entry->orig_node != orig_node)
733 continue; 729 continue;
734 if (!atomic_inc_not_zero(&tmp_orig_entry->refcount)) 730 if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
@@ -940,12 +936,11 @@ batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
940{ 936{
941 struct batadv_neigh_node *router = NULL; 937 struct batadv_neigh_node *router = NULL;
942 struct hlist_head *head; 938 struct hlist_head *head;
943 struct hlist_node *node;
944 struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL; 939 struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
945 int best_tq = 0; 940 int best_tq = 0;
946 941
947 head = &tt_global_entry->orig_list; 942 head = &tt_global_entry->orig_list;
948 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 943 hlist_for_each_entry_rcu(orig_entry, head, list) {
949 router = batadv_orig_node_get_router(orig_entry->orig_node); 944 router = batadv_orig_node_get_router(orig_entry->orig_node);
950 if (!router) 945 if (!router)
951 continue; 946 continue;
@@ -973,7 +968,6 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
973 struct seq_file *seq) 968 struct seq_file *seq)
974{ 969{
975 struct hlist_head *head; 970 struct hlist_head *head;
976 struct hlist_node *node;
977 struct batadv_tt_orig_list_entry *orig_entry, *best_entry; 971 struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
978 struct batadv_tt_common_entry *tt_common_entry; 972 struct batadv_tt_common_entry *tt_common_entry;
979 uint16_t flags; 973 uint16_t flags;
@@ -997,7 +991,7 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
997 991
998 head = &tt_global_entry->orig_list; 992 head = &tt_global_entry->orig_list;
999 993
1000 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 994 hlist_for_each_entry_rcu(orig_entry, head, list) {
1001 if (best_entry == orig_entry) 995 if (best_entry == orig_entry)
1002 continue; 996 continue;
1003 997
@@ -1020,7 +1014,6 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
1020 struct batadv_tt_common_entry *tt_common_entry; 1014 struct batadv_tt_common_entry *tt_common_entry;
1021 struct batadv_tt_global_entry *tt_global; 1015 struct batadv_tt_global_entry *tt_global;
1022 struct batadv_hard_iface *primary_if; 1016 struct batadv_hard_iface *primary_if;
1023 struct hlist_node *node;
1024 struct hlist_head *head; 1017 struct hlist_head *head;
1025 uint32_t i; 1018 uint32_t i;
1026 1019
@@ -1039,7 +1032,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
1039 head = &hash->table[i]; 1032 head = &hash->table[i];
1040 1033
1041 rcu_read_lock(); 1034 rcu_read_lock();
1042 hlist_for_each_entry_rcu(tt_common_entry, node, 1035 hlist_for_each_entry_rcu(tt_common_entry,
1043 head, hash_entry) { 1036 head, hash_entry) {
1044 tt_global = container_of(tt_common_entry, 1037 tt_global = container_of(tt_common_entry,
1045 struct batadv_tt_global_entry, 1038 struct batadv_tt_global_entry,
@@ -1059,13 +1052,13 @@ static void
1059batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry) 1052batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
1060{ 1053{
1061 struct hlist_head *head; 1054 struct hlist_head *head;
1062 struct hlist_node *node, *safe; 1055 struct hlist_node *safe;
1063 struct batadv_tt_orig_list_entry *orig_entry; 1056 struct batadv_tt_orig_list_entry *orig_entry;
1064 1057
1065 spin_lock_bh(&tt_global_entry->list_lock); 1058 spin_lock_bh(&tt_global_entry->list_lock);
1066 head = &tt_global_entry->orig_list; 1059 head = &tt_global_entry->orig_list;
1067 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { 1060 hlist_for_each_entry_safe(orig_entry, safe, head, list) {
1068 hlist_del_rcu(node); 1061 hlist_del_rcu(&orig_entry->list);
1069 batadv_tt_orig_list_entry_free_ref(orig_entry); 1062 batadv_tt_orig_list_entry_free_ref(orig_entry);
1070 } 1063 }
1071 spin_unlock_bh(&tt_global_entry->list_lock); 1064 spin_unlock_bh(&tt_global_entry->list_lock);
@@ -1078,18 +1071,18 @@ batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
1078 const char *message) 1071 const char *message)
1079{ 1072{
1080 struct hlist_head *head; 1073 struct hlist_head *head;
1081 struct hlist_node *node, *safe; 1074 struct hlist_node *safe;
1082 struct batadv_tt_orig_list_entry *orig_entry; 1075 struct batadv_tt_orig_list_entry *orig_entry;
1083 1076
1084 spin_lock_bh(&tt_global_entry->list_lock); 1077 spin_lock_bh(&tt_global_entry->list_lock);
1085 head = &tt_global_entry->orig_list; 1078 head = &tt_global_entry->orig_list;
1086 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { 1079 hlist_for_each_entry_safe(orig_entry, safe, head, list) {
1087 if (orig_entry->orig_node == orig_node) { 1080 if (orig_entry->orig_node == orig_node) {
1088 batadv_dbg(BATADV_DBG_TT, bat_priv, 1081 batadv_dbg(BATADV_DBG_TT, bat_priv,
1089 "Deleting %pM from global tt entry %pM: %s\n", 1082 "Deleting %pM from global tt entry %pM: %s\n",
1090 orig_node->orig, 1083 orig_node->orig,
1091 tt_global_entry->common.addr, message); 1084 tt_global_entry->common.addr, message);
1092 hlist_del_rcu(node); 1085 hlist_del_rcu(&orig_entry->list);
1093 batadv_tt_orig_list_entry_free_ref(orig_entry); 1086 batadv_tt_orig_list_entry_free_ref(orig_entry);
1094 } 1087 }
1095 } 1088 }
@@ -1108,7 +1101,6 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
1108{ 1101{
1109 bool last_entry = true; 1102 bool last_entry = true;
1110 struct hlist_head *head; 1103 struct hlist_head *head;
1111 struct hlist_node *node;
1112 struct batadv_tt_orig_list_entry *orig_entry; 1104 struct batadv_tt_orig_list_entry *orig_entry;
1113 1105
1114 /* no local entry exists, case 1: 1106 /* no local entry exists, case 1:
@@ -1117,7 +1109,7 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
1117 1109
1118 rcu_read_lock(); 1110 rcu_read_lock();
1119 head = &tt_global_entry->orig_list; 1111 head = &tt_global_entry->orig_list;
1120 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 1112 hlist_for_each_entry_rcu(orig_entry, head, list) {
1121 if (orig_entry->orig_node != orig_node) { 1113 if (orig_entry->orig_node != orig_node) {
1122 last_entry = false; 1114 last_entry = false;
1123 break; 1115 break;
@@ -1202,7 +1194,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
1202 struct batadv_tt_common_entry *tt_common_entry; 1194 struct batadv_tt_common_entry *tt_common_entry;
1203 uint32_t i; 1195 uint32_t i;
1204 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 1196 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1205 struct hlist_node *node, *safe; 1197 struct hlist_node *safe;
1206 struct hlist_head *head; 1198 struct hlist_head *head;
1207 spinlock_t *list_lock; /* protects write access to the hash lists */ 1199 spinlock_t *list_lock; /* protects write access to the hash lists */
1208 1200
@@ -1214,7 +1206,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
1214 list_lock = &hash->list_locks[i]; 1206 list_lock = &hash->list_locks[i];
1215 1207
1216 spin_lock_bh(list_lock); 1208 spin_lock_bh(list_lock);
1217 hlist_for_each_entry_safe(tt_common_entry, node, safe, 1209 hlist_for_each_entry_safe(tt_common_entry, safe,
1218 head, hash_entry) { 1210 head, hash_entry) {
1219 tt_global = container_of(tt_common_entry, 1211 tt_global = container_of(tt_common_entry,
1220 struct batadv_tt_global_entry, 1212 struct batadv_tt_global_entry,
@@ -1227,7 +1219,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
1227 batadv_dbg(BATADV_DBG_TT, bat_priv, 1219 batadv_dbg(BATADV_DBG_TT, bat_priv,
1228 "Deleting global tt entry %pM: %s\n", 1220 "Deleting global tt entry %pM: %s\n",
1229 tt_global->common.addr, message); 1221 tt_global->common.addr, message);
1230 hlist_del_rcu(node); 1222 hlist_del_rcu(&tt_common_entry->hash_entry);
1231 batadv_tt_global_entry_free_ref(tt_global); 1223 batadv_tt_global_entry_free_ref(tt_global);
1232 } 1224 }
1233 } 1225 }
@@ -1262,7 +1254,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
1262{ 1254{
1263 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 1255 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1264 struct hlist_head *head; 1256 struct hlist_head *head;
1265 struct hlist_node *node, *node_tmp; 1257 struct hlist_node *node_tmp;
1266 spinlock_t *list_lock; /* protects write access to the hash lists */ 1258 spinlock_t *list_lock; /* protects write access to the hash lists */
1267 uint32_t i; 1259 uint32_t i;
1268 char *msg = NULL; 1260 char *msg = NULL;
@@ -1274,7 +1266,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
1274 list_lock = &hash->list_locks[i]; 1266 list_lock = &hash->list_locks[i];
1275 1267
1276 spin_lock_bh(list_lock); 1268 spin_lock_bh(list_lock);
1277 hlist_for_each_entry_safe(tt_common, node, node_tmp, head, 1269 hlist_for_each_entry_safe(tt_common, node_tmp, head,
1278 hash_entry) { 1270 hash_entry) {
1279 tt_global = container_of(tt_common, 1271 tt_global = container_of(tt_common,
1280 struct batadv_tt_global_entry, 1272 struct batadv_tt_global_entry,
@@ -1287,7 +1279,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
1287 "Deleting global tt entry (%pM): %s\n", 1279 "Deleting global tt entry (%pM): %s\n",
1288 tt_global->common.addr, msg); 1280 tt_global->common.addr, msg);
1289 1281
1290 hlist_del_rcu(node); 1282 hlist_del_rcu(&tt_common->hash_entry);
1291 1283
1292 batadv_tt_global_entry_free_ref(tt_global); 1284 batadv_tt_global_entry_free_ref(tt_global);
1293 } 1285 }
@@ -1301,7 +1293,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
1301 spinlock_t *list_lock; /* protects write access to the hash lists */ 1293 spinlock_t *list_lock; /* protects write access to the hash lists */
1302 struct batadv_tt_common_entry *tt_common_entry; 1294 struct batadv_tt_common_entry *tt_common_entry;
1303 struct batadv_tt_global_entry *tt_global; 1295 struct batadv_tt_global_entry *tt_global;
1304 struct hlist_node *node, *node_tmp; 1296 struct hlist_node *node_tmp;
1305 struct hlist_head *head; 1297 struct hlist_head *head;
1306 uint32_t i; 1298 uint32_t i;
1307 1299
@@ -1315,9 +1307,9 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
1315 list_lock = &hash->list_locks[i]; 1307 list_lock = &hash->list_locks[i];
1316 1308
1317 spin_lock_bh(list_lock); 1309 spin_lock_bh(list_lock);
1318 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 1310 hlist_for_each_entry_safe(tt_common_entry, node_tmp,
1319 head, hash_entry) { 1311 head, hash_entry) {
1320 hlist_del_rcu(node); 1312 hlist_del_rcu(&tt_common_entry->hash_entry);
1321 tt_global = container_of(tt_common_entry, 1313 tt_global = container_of(tt_common_entry,
1322 struct batadv_tt_global_entry, 1314 struct batadv_tt_global_entry,
1323 common); 1315 common);
@@ -1397,7 +1389,6 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1397 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 1389 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1398 struct batadv_tt_common_entry *tt_common; 1390 struct batadv_tt_common_entry *tt_common;
1399 struct batadv_tt_global_entry *tt_global; 1391 struct batadv_tt_global_entry *tt_global;
1400 struct hlist_node *node;
1401 struct hlist_head *head; 1392 struct hlist_head *head;
1402 uint32_t i; 1393 uint32_t i;
1403 int j; 1394 int j;
@@ -1406,7 +1397,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1406 head = &hash->table[i]; 1397 head = &hash->table[i];
1407 1398
1408 rcu_read_lock(); 1399 rcu_read_lock();
1409 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) { 1400 hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
1410 tt_global = container_of(tt_common, 1401 tt_global = container_of(tt_common,
1411 struct batadv_tt_global_entry, 1402 struct batadv_tt_global_entry,
1412 common); 1403 common);
@@ -1449,7 +1440,6 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
1449 uint16_t total = 0, total_one; 1440 uint16_t total = 0, total_one;
1450 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 1441 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
1451 struct batadv_tt_common_entry *tt_common; 1442 struct batadv_tt_common_entry *tt_common;
1452 struct hlist_node *node;
1453 struct hlist_head *head; 1443 struct hlist_head *head;
1454 uint32_t i; 1444 uint32_t i;
1455 int j; 1445 int j;
@@ -1458,7 +1448,7 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
1458 head = &hash->table[i]; 1448 head = &hash->table[i];
1459 1449
1460 rcu_read_lock(); 1450 rcu_read_lock();
1461 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) { 1451 hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
1462 /* not yet committed clients have not to be taken into 1452 /* not yet committed clients have not to be taken into
1463 * account while computing the CRC 1453 * account while computing the CRC
1464 */ 1454 */
@@ -1597,7 +1587,6 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1597 struct batadv_tt_common_entry *tt_common_entry; 1587 struct batadv_tt_common_entry *tt_common_entry;
1598 struct batadv_tt_query_packet *tt_response; 1588 struct batadv_tt_query_packet *tt_response;
1599 struct batadv_tt_change *tt_change; 1589 struct batadv_tt_change *tt_change;
1600 struct hlist_node *node;
1601 struct hlist_head *head; 1590 struct hlist_head *head;
1602 struct sk_buff *skb = NULL; 1591 struct sk_buff *skb = NULL;
1603 uint16_t tt_tot, tt_count; 1592 uint16_t tt_tot, tt_count;
@@ -1627,7 +1616,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1627 for (i = 0; i < hash->size; i++) { 1616 for (i = 0; i < hash->size; i++) {
1628 head = &hash->table[i]; 1617 head = &hash->table[i];
1629 1618
1630 hlist_for_each_entry_rcu(tt_common_entry, node, 1619 hlist_for_each_entry_rcu(tt_common_entry,
1631 head, hash_entry) { 1620 head, hash_entry) {
1632 if (tt_count == tt_tot) 1621 if (tt_count == tt_tot)
1633 break; 1622 break;
@@ -2307,7 +2296,6 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
2307 uint32_t i; 2296 uint32_t i;
2308 uint16_t changed_num = 0; 2297 uint16_t changed_num = 0;
2309 struct hlist_head *head; 2298 struct hlist_head *head;
2310 struct hlist_node *node;
2311 struct batadv_tt_common_entry *tt_common_entry; 2299 struct batadv_tt_common_entry *tt_common_entry;
2312 2300
2313 if (!hash) 2301 if (!hash)
@@ -2317,7 +2305,7 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
2317 head = &hash->table[i]; 2305 head = &hash->table[i];
2318 2306
2319 rcu_read_lock(); 2307 rcu_read_lock();
2320 hlist_for_each_entry_rcu(tt_common_entry, node, 2308 hlist_for_each_entry_rcu(tt_common_entry,
2321 head, hash_entry) { 2309 head, hash_entry) {
2322 if (enable) { 2310 if (enable) {
2323 if ((tt_common_entry->flags & flags) == flags) 2311 if ((tt_common_entry->flags & flags) == flags)
@@ -2342,7 +2330,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2342 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 2330 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
2343 struct batadv_tt_common_entry *tt_common; 2331 struct batadv_tt_common_entry *tt_common;
2344 struct batadv_tt_local_entry *tt_local; 2332 struct batadv_tt_local_entry *tt_local;
2345 struct hlist_node *node, *node_tmp; 2333 struct hlist_node *node_tmp;
2346 struct hlist_head *head; 2334 struct hlist_head *head;
2347 spinlock_t *list_lock; /* protects write access to the hash lists */ 2335 spinlock_t *list_lock; /* protects write access to the hash lists */
2348 uint32_t i; 2336 uint32_t i;
@@ -2355,7 +2343,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2355 list_lock = &hash->list_locks[i]; 2343 list_lock = &hash->list_locks[i];
2356 2344
2357 spin_lock_bh(list_lock); 2345 spin_lock_bh(list_lock);
2358 hlist_for_each_entry_safe(tt_common, node, node_tmp, head, 2346 hlist_for_each_entry_safe(tt_common, node_tmp, head,
2359 hash_entry) { 2347 hash_entry) {
2360 if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING)) 2348 if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
2361 continue; 2349 continue;
@@ -2365,7 +2353,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2365 tt_common->addr); 2353 tt_common->addr);
2366 2354
2367 atomic_dec(&bat_priv->tt.local_entry_num); 2355 atomic_dec(&bat_priv->tt.local_entry_num);
2368 hlist_del_rcu(node); 2356 hlist_del_rcu(&tt_common->hash_entry);
2369 tt_local = container_of(tt_common, 2357 tt_local = container_of(tt_common,
2370 struct batadv_tt_local_entry, 2358 struct batadv_tt_local_entry,
2371 common); 2359 common);
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 22d2785177d1..c053244b97bd 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -97,7 +97,6 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
97{ 97{
98 struct batadv_hashtable *hash = bat_priv->vis.hash; 98 struct batadv_hashtable *hash = bat_priv->vis.hash;
99 struct hlist_head *head; 99 struct hlist_head *head;
100 struct hlist_node *node;
101 struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; 100 struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
102 uint32_t index; 101 uint32_t index;
103 102
@@ -108,8 +107,8 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
108 head = &hash->table[index]; 107 head = &hash->table[index];
109 108
110 rcu_read_lock(); 109 rcu_read_lock();
111 hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) { 110 hlist_for_each_entry_rcu(vis_info, head, hash_entry) {
112 if (!batadv_vis_info_cmp(node, data)) 111 if (!batadv_vis_info_cmp(&vis_info->hash_entry, data))
113 continue; 112 continue;
114 113
115 vis_info_tmp = vis_info; 114 vis_info_tmp = vis_info;
@@ -128,9 +127,8 @@ static void batadv_vis_data_insert_interface(const uint8_t *interface,
128 bool primary) 127 bool primary)
129{ 128{
130 struct batadv_vis_if_list_entry *entry; 129 struct batadv_vis_if_list_entry *entry;
131 struct hlist_node *pos;
132 130
133 hlist_for_each_entry(entry, pos, if_list, list) { 131 hlist_for_each_entry(entry, if_list, list) {
134 if (batadv_compare_eth(entry->addr, interface)) 132 if (batadv_compare_eth(entry->addr, interface))
135 return; 133 return;
136 } 134 }
@@ -148,9 +146,8 @@ static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
148 const struct hlist_head *if_list) 146 const struct hlist_head *if_list)
149{ 147{
150 struct batadv_vis_if_list_entry *entry; 148 struct batadv_vis_if_list_entry *entry;
151 struct hlist_node *pos;
152 149
153 hlist_for_each_entry(entry, pos, if_list, list) { 150 hlist_for_each_entry(entry, if_list, list) {
154 if (entry->primary) 151 if (entry->primary)
155 seq_printf(seq, "PRIMARY, "); 152 seq_printf(seq, "PRIMARY, ");
156 else 153 else
@@ -198,9 +195,8 @@ static void batadv_vis_data_read_entries(struct seq_file *seq,
198{ 195{
199 int i; 196 int i;
200 struct batadv_vis_if_list_entry *entry; 197 struct batadv_vis_if_list_entry *entry;
201 struct hlist_node *pos;
202 198
203 hlist_for_each_entry(entry, pos, list, list) { 199 hlist_for_each_entry(entry, list, list) {
204 seq_printf(seq, "%pM,", entry->addr); 200 seq_printf(seq, "%pM,", entry->addr);
205 201
206 for (i = 0; i < packet->entries; i++) 202 for (i = 0; i < packet->entries; i++)
@@ -218,17 +214,16 @@ static void batadv_vis_data_read_entries(struct seq_file *seq,
218static void batadv_vis_seq_print_text_bucket(struct seq_file *seq, 214static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
219 const struct hlist_head *head) 215 const struct hlist_head *head)
220{ 216{
221 struct hlist_node *node;
222 struct batadv_vis_info *info; 217 struct batadv_vis_info *info;
223 struct batadv_vis_packet *packet; 218 struct batadv_vis_packet *packet;
224 uint8_t *entries_pos; 219 uint8_t *entries_pos;
225 struct batadv_vis_info_entry *entries; 220 struct batadv_vis_info_entry *entries;
226 struct batadv_vis_if_list_entry *entry; 221 struct batadv_vis_if_list_entry *entry;
227 struct hlist_node *pos, *n; 222 struct hlist_node *n;
228 223
229 HLIST_HEAD(vis_if_list); 224 HLIST_HEAD(vis_if_list);
230 225
231 hlist_for_each_entry_rcu(info, node, head, hash_entry) { 226 hlist_for_each_entry_rcu(info, head, hash_entry) {
232 packet = (struct batadv_vis_packet *)info->skb_packet->data; 227 packet = (struct batadv_vis_packet *)info->skb_packet->data;
233 entries_pos = (uint8_t *)packet + sizeof(*packet); 228 entries_pos = (uint8_t *)packet + sizeof(*packet);
234 entries = (struct batadv_vis_info_entry *)entries_pos; 229 entries = (struct batadv_vis_info_entry *)entries_pos;
@@ -240,7 +235,7 @@ static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
240 batadv_vis_data_read_entries(seq, &vis_if_list, packet, 235 batadv_vis_data_read_entries(seq, &vis_if_list, packet,
241 entries); 236 entries);
242 237
243 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) { 238 hlist_for_each_entry_safe(entry, n, &vis_if_list, list) {
244 hlist_del(&entry->list); 239 hlist_del(&entry->list);
245 kfree(entry); 240 kfree(entry);
246 } 241 }
@@ -519,7 +514,6 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
519{ 514{
520 struct batadv_hashtable *hash = bat_priv->orig_hash; 515 struct batadv_hashtable *hash = bat_priv->orig_hash;
521 struct batadv_neigh_node *router; 516 struct batadv_neigh_node *router;
522 struct hlist_node *node;
523 struct hlist_head *head; 517 struct hlist_head *head;
524 struct batadv_orig_node *orig_node; 518 struct batadv_orig_node *orig_node;
525 struct batadv_vis_packet *packet; 519 struct batadv_vis_packet *packet;
@@ -532,7 +526,7 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
532 head = &hash->table[i]; 526 head = &hash->table[i];
533 527
534 rcu_read_lock(); 528 rcu_read_lock();
535 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 529 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
536 router = batadv_orig_node_get_router(orig_node); 530 router = batadv_orig_node_get_router(orig_node);
537 if (!router) 531 if (!router)
538 continue; 532 continue;
@@ -571,7 +565,6 @@ static bool batadv_vis_packet_full(const struct batadv_vis_info *info)
571static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) 565static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
572{ 566{
573 struct batadv_hashtable *hash = bat_priv->orig_hash; 567 struct batadv_hashtable *hash = bat_priv->orig_hash;
574 struct hlist_node *node;
575 struct hlist_head *head; 568 struct hlist_head *head;
576 struct batadv_orig_node *orig_node; 569 struct batadv_orig_node *orig_node;
577 struct batadv_neigh_node *router; 570 struct batadv_neigh_node *router;
@@ -605,7 +598,7 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
605 head = &hash->table[i]; 598 head = &hash->table[i];
606 599
607 rcu_read_lock(); 600 rcu_read_lock();
608 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 601 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
609 router = batadv_orig_node_get_router(orig_node); 602 router = batadv_orig_node_get_router(orig_node);
610 if (!router) 603 if (!router)
611 continue; 604 continue;
@@ -644,7 +637,7 @@ next:
644 head = &hash->table[i]; 637 head = &hash->table[i];
645 638
646 rcu_read_lock(); 639 rcu_read_lock();
647 hlist_for_each_entry_rcu(tt_common_entry, node, head, 640 hlist_for_each_entry_rcu(tt_common_entry, head,
648 hash_entry) { 641 hash_entry) {
649 packet_pos = skb_put(info->skb_packet, sizeof(*entry)); 642 packet_pos = skb_put(info->skb_packet, sizeof(*entry));
650 entry = (struct batadv_vis_info_entry *)packet_pos; 643 entry = (struct batadv_vis_info_entry *)packet_pos;
@@ -673,14 +666,14 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
673{ 666{
674 uint32_t i; 667 uint32_t i;
675 struct batadv_hashtable *hash = bat_priv->vis.hash; 668 struct batadv_hashtable *hash = bat_priv->vis.hash;
676 struct hlist_node *node, *node_tmp; 669 struct hlist_node *node_tmp;
677 struct hlist_head *head; 670 struct hlist_head *head;
678 struct batadv_vis_info *info; 671 struct batadv_vis_info *info;
679 672
680 for (i = 0; i < hash->size; i++) { 673 for (i = 0; i < hash->size; i++) {
681 head = &hash->table[i]; 674 head = &hash->table[i];
682 675
683 hlist_for_each_entry_safe(info, node, node_tmp, 676 hlist_for_each_entry_safe(info, node_tmp,
684 head, hash_entry) { 677 head, hash_entry) {
685 /* never purge own data. */ 678 /* never purge own data. */
686 if (info == bat_priv->vis.my_info) 679 if (info == bat_priv->vis.my_info)
@@ -688,7 +681,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
688 681
689 if (batadv_has_timed_out(info->first_seen, 682 if (batadv_has_timed_out(info->first_seen,
690 BATADV_VIS_TIMEOUT)) { 683 BATADV_VIS_TIMEOUT)) {
691 hlist_del(node); 684 hlist_del(&info->hash_entry);
692 batadv_send_list_del(info); 685 batadv_send_list_del(info);
693 kref_put(&info->refcount, batadv_free_info); 686 kref_put(&info->refcount, batadv_free_info);
694 } 687 }
@@ -700,7 +693,6 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
700 struct batadv_vis_info *info) 693 struct batadv_vis_info *info)
701{ 694{
702 struct batadv_hashtable *hash = bat_priv->orig_hash; 695 struct batadv_hashtable *hash = bat_priv->orig_hash;
703 struct hlist_node *node;
704 struct hlist_head *head; 696 struct hlist_head *head;
705 struct batadv_orig_node *orig_node; 697 struct batadv_orig_node *orig_node;
706 struct batadv_vis_packet *packet; 698 struct batadv_vis_packet *packet;
@@ -715,7 +707,7 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
715 head = &hash->table[i]; 707 head = &hash->table[i];
716 708
717 rcu_read_lock(); 709 rcu_read_lock();
718 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 710 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
719 /* if it's a vis server and reachable, send it. */ 711 /* if it's a vis server and reachable, send it. */
720 if (!(orig_node->flags & BATADV_VIS_SERVER)) 712 if (!(orig_node->flags & BATADV_VIS_SERVER))
721 continue; 713 continue;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 07f073935811..6a93614f2c49 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -70,14 +70,13 @@ static struct bt_sock_list hci_sk_list = {
70void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) 70void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
71{ 71{
72 struct sock *sk; 72 struct sock *sk;
73 struct hlist_node *node;
74 struct sk_buff *skb_copy = NULL; 73 struct sk_buff *skb_copy = NULL;
75 74
76 BT_DBG("hdev %p len %d", hdev, skb->len); 75 BT_DBG("hdev %p len %d", hdev, skb->len);
77 76
78 read_lock(&hci_sk_list.lock); 77 read_lock(&hci_sk_list.lock);
79 78
80 sk_for_each(sk, node, &hci_sk_list.head) { 79 sk_for_each(sk, &hci_sk_list.head) {
81 struct hci_filter *flt; 80 struct hci_filter *flt;
82 struct sk_buff *nskb; 81 struct sk_buff *nskb;
83 82
@@ -142,13 +141,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
142void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) 141void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
143{ 142{
144 struct sock *sk; 143 struct sock *sk;
145 struct hlist_node *node;
146 144
147 BT_DBG("len %d", skb->len); 145 BT_DBG("len %d", skb->len);
148 146
149 read_lock(&hci_sk_list.lock); 147 read_lock(&hci_sk_list.lock);
150 148
151 sk_for_each(sk, node, &hci_sk_list.head) { 149 sk_for_each(sk, &hci_sk_list.head) {
152 struct sk_buff *nskb; 150 struct sk_buff *nskb;
153 151
154 /* Skip the original socket */ 152 /* Skip the original socket */
@@ -176,7 +174,6 @@ void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
176void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) 174void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
177{ 175{
178 struct sock *sk; 176 struct sock *sk;
179 struct hlist_node *node;
180 struct sk_buff *skb_copy = NULL; 177 struct sk_buff *skb_copy = NULL;
181 __le16 opcode; 178 __le16 opcode;
182 179
@@ -210,7 +207,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
210 207
211 read_lock(&hci_sk_list.lock); 208 read_lock(&hci_sk_list.lock);
212 209
213 sk_for_each(sk, node, &hci_sk_list.head) { 210 sk_for_each(sk, &hci_sk_list.head) {
214 struct sk_buff *nskb; 211 struct sk_buff *nskb;
215 212
216 if (sk->sk_state != BT_BOUND) 213 if (sk->sk_state != BT_BOUND)
@@ -251,13 +248,12 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
251static void send_monitor_event(struct sk_buff *skb) 248static void send_monitor_event(struct sk_buff *skb)
252{ 249{
253 struct sock *sk; 250 struct sock *sk;
254 struct hlist_node *node;
255 251
256 BT_DBG("len %d", skb->len); 252 BT_DBG("len %d", skb->len);
257 253
258 read_lock(&hci_sk_list.lock); 254 read_lock(&hci_sk_list.lock);
259 255
260 sk_for_each(sk, node, &hci_sk_list.head) { 256 sk_for_each(sk, &hci_sk_list.head) {
261 struct sk_buff *nskb; 257 struct sk_buff *nskb;
262 258
263 if (sk->sk_state != BT_BOUND) 259 if (sk->sk_state != BT_BOUND)
@@ -393,11 +389,10 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
393 389
394 if (event == HCI_DEV_UNREG) { 390 if (event == HCI_DEV_UNREG) {
395 struct sock *sk; 391 struct sock *sk;
396 struct hlist_node *node;
397 392
398 /* Detach sockets from device */ 393 /* Detach sockets from device */
399 read_lock(&hci_sk_list.lock); 394 read_lock(&hci_sk_list.lock);
400 sk_for_each(sk, node, &hci_sk_list.head) { 395 sk_for_each(sk, &hci_sk_list.head) {
401 bh_lock_sock_nested(sk); 396 bh_lock_sock_nested(sk);
402 if (hci_pi(sk)->hdev == hdev) { 397 if (hci_pi(sk)->hdev == hdev) {
403 hci_pi(sk)->hdev = NULL; 398 hci_pi(sk)->hdev = NULL;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index ce3f6658f4b2..c23bae86263b 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -107,15 +107,14 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
107static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) 107static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
108{ 108{
109 struct sock *sk = NULL; 109 struct sock *sk = NULL;
110 struct hlist_node *node;
111 110
112 sk_for_each(sk, node, &rfcomm_sk_list.head) { 111 sk_for_each(sk, &rfcomm_sk_list.head) {
113 if (rfcomm_pi(sk)->channel == channel && 112 if (rfcomm_pi(sk)->channel == channel &&
114 !bacmp(&bt_sk(sk)->src, src)) 113 !bacmp(&bt_sk(sk)->src, src))
115 break; 114 break;
116 } 115 }
117 116
118 return node ? sk : NULL; 117 return sk ? sk : NULL;
119} 118}
120 119
121/* Find socket with channel and source bdaddr. 120/* Find socket with channel and source bdaddr.
@@ -124,11 +123,10 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
124static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) 123static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
125{ 124{
126 struct sock *sk = NULL, *sk1 = NULL; 125 struct sock *sk = NULL, *sk1 = NULL;
127 struct hlist_node *node;
128 126
129 read_lock(&rfcomm_sk_list.lock); 127 read_lock(&rfcomm_sk_list.lock);
130 128
131 sk_for_each(sk, node, &rfcomm_sk_list.head) { 129 sk_for_each(sk, &rfcomm_sk_list.head) {
132 if (state && sk->sk_state != state) 130 if (state && sk->sk_state != state)
133 continue; 131 continue;
134 132
@@ -145,7 +143,7 @@ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *
145 143
146 read_unlock(&rfcomm_sk_list.lock); 144 read_unlock(&rfcomm_sk_list.lock);
147 145
148 return node ? sk : sk1; 146 return sk ? sk : sk1;
149} 147}
150 148
151static void rfcomm_sock_destruct(struct sock *sk) 149static void rfcomm_sock_destruct(struct sock *sk)
@@ -970,11 +968,10 @@ done:
970static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) 968static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
971{ 969{
972 struct sock *sk; 970 struct sock *sk;
973 struct hlist_node *node;
974 971
975 read_lock(&rfcomm_sk_list.lock); 972 read_lock(&rfcomm_sk_list.lock);
976 973
977 sk_for_each(sk, node, &rfcomm_sk_list.head) { 974 sk_for_each(sk, &rfcomm_sk_list.head) {
978 seq_printf(f, "%pMR %pMR %d %d\n", 975 seq_printf(f, "%pMR %pMR %d %d\n",
979 &bt_sk(sk)->src, &bt_sk(sk)->dst, 976 &bt_sk(sk)->src, &bt_sk(sk)->dst,
980 sk->sk_state, rfcomm_pi(sk)->channel); 977 sk->sk_state, rfcomm_pi(sk)->channel);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index b5178d62064e..79d87d8d4f51 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -259,10 +259,9 @@ drop:
259/* -------- Socket interface ---------- */ 259/* -------- Socket interface ---------- */
260static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) 260static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
261{ 261{
262 struct hlist_node *node;
263 struct sock *sk; 262 struct sock *sk;
264 263
265 sk_for_each(sk, node, &sco_sk_list.head) { 264 sk_for_each(sk, &sco_sk_list.head) {
266 if (sk->sk_state != BT_LISTEN) 265 if (sk->sk_state != BT_LISTEN)
267 continue; 266 continue;
268 267
@@ -279,11 +278,10 @@ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
279static struct sock *sco_get_sock_listen(bdaddr_t *src) 278static struct sock *sco_get_sock_listen(bdaddr_t *src)
280{ 279{
281 struct sock *sk = NULL, *sk1 = NULL; 280 struct sock *sk = NULL, *sk1 = NULL;
282 struct hlist_node *node;
283 281
284 read_lock(&sco_sk_list.lock); 282 read_lock(&sco_sk_list.lock);
285 283
286 sk_for_each(sk, node, &sco_sk_list.head) { 284 sk_for_each(sk, &sco_sk_list.head) {
287 if (sk->sk_state != BT_LISTEN) 285 if (sk->sk_state != BT_LISTEN)
288 continue; 286 continue;
289 287
@@ -298,7 +296,7 @@ static struct sock *sco_get_sock_listen(bdaddr_t *src)
298 296
299 read_unlock(&sco_sk_list.lock); 297 read_unlock(&sco_sk_list.lock);
300 298
301 return node ? sk : sk1; 299 return sk ? sk : sk1;
302} 300}
303 301
304static void sco_sock_destruct(struct sock *sk) 302static void sco_sock_destruct(struct sock *sk)
@@ -951,14 +949,13 @@ static void sco_conn_ready(struct sco_conn *conn)
951int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) 949int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
952{ 950{
953 struct sock *sk; 951 struct sock *sk;
954 struct hlist_node *node;
955 int lm = 0; 952 int lm = 0;
956 953
957 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); 954 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
958 955
959 /* Find listening sockets */ 956 /* Find listening sockets */
960 read_lock(&sco_sk_list.lock); 957 read_lock(&sco_sk_list.lock);
961 sk_for_each(sk, node, &sco_sk_list.head) { 958 sk_for_each(sk, &sco_sk_list.head) {
962 if (sk->sk_state != BT_LISTEN) 959 if (sk->sk_state != BT_LISTEN)
963 continue; 960 continue;
964 961
@@ -1018,11 +1015,10 @@ drop:
1018static int sco_debugfs_show(struct seq_file *f, void *p) 1015static int sco_debugfs_show(struct seq_file *f, void *p)
1019{ 1016{
1020 struct sock *sk; 1017 struct sock *sk;
1021 struct hlist_node *node;
1022 1018
1023 read_lock(&sco_sk_list.lock); 1019 read_lock(&sco_sk_list.lock);
1024 1020
1025 sk_for_each(sk, node, &sco_sk_list.head) { 1021 sk_for_each(sk, &sco_sk_list.head) {
1026 seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src, 1022 seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src,
1027 &bt_sk(sk)->dst, sk->sk_state); 1023 &bt_sk(sk)->dst, sk->sk_state);
1028 } 1024 }
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 8117900af4de..b0812c91c0f0 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -181,9 +181,9 @@ void br_fdb_cleanup(unsigned long _data)
181 spin_lock(&br->hash_lock); 181 spin_lock(&br->hash_lock);
182 for (i = 0; i < BR_HASH_SIZE; i++) { 182 for (i = 0; i < BR_HASH_SIZE; i++) {
183 struct net_bridge_fdb_entry *f; 183 struct net_bridge_fdb_entry *f;
184 struct hlist_node *h, *n; 184 struct hlist_node *n;
185 185
186 hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { 186 hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
187 unsigned long this_timer; 187 unsigned long this_timer;
188 if (f->is_static) 188 if (f->is_static)
189 continue; 189 continue;
@@ -207,8 +207,8 @@ void br_fdb_flush(struct net_bridge *br)
207 spin_lock_bh(&br->hash_lock); 207 spin_lock_bh(&br->hash_lock);
208 for (i = 0; i < BR_HASH_SIZE; i++) { 208 for (i = 0; i < BR_HASH_SIZE; i++) {
209 struct net_bridge_fdb_entry *f; 209 struct net_bridge_fdb_entry *f;
210 struct hlist_node *h, *n; 210 struct hlist_node *n;
211 hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { 211 hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
212 if (!f->is_static) 212 if (!f->is_static)
213 fdb_delete(br, f); 213 fdb_delete(br, f);
214 } 214 }
@@ -266,10 +266,9 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
266 const unsigned char *addr, 266 const unsigned char *addr,
267 __u16 vid) 267 __u16 vid)
268{ 268{
269 struct hlist_node *h;
270 struct net_bridge_fdb_entry *fdb; 269 struct net_bridge_fdb_entry *fdb;
271 270
272 hlist_for_each_entry_rcu(fdb, h, 271 hlist_for_each_entry_rcu(fdb,
273 &br->hash[br_mac_hash(addr, vid)], hlist) { 272 &br->hash[br_mac_hash(addr, vid)], hlist) {
274 if (ether_addr_equal(fdb->addr.addr, addr) && 273 if (ether_addr_equal(fdb->addr.addr, addr) &&
275 fdb->vlan_id == vid) { 274 fdb->vlan_id == vid) {
@@ -315,14 +314,13 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
315{ 314{
316 struct __fdb_entry *fe = buf; 315 struct __fdb_entry *fe = buf;
317 int i, num = 0; 316 int i, num = 0;
318 struct hlist_node *h;
319 struct net_bridge_fdb_entry *f; 317 struct net_bridge_fdb_entry *f;
320 318
321 memset(buf, 0, maxnum*sizeof(struct __fdb_entry)); 319 memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
322 320
323 rcu_read_lock(); 321 rcu_read_lock();
324 for (i = 0; i < BR_HASH_SIZE; i++) { 322 for (i = 0; i < BR_HASH_SIZE; i++) {
325 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { 323 hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
326 if (num >= maxnum) 324 if (num >= maxnum)
327 goto out; 325 goto out;
328 326
@@ -363,10 +361,9 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
363 const unsigned char *addr, 361 const unsigned char *addr,
364 __u16 vid) 362 __u16 vid)
365{ 363{
366 struct hlist_node *h;
367 struct net_bridge_fdb_entry *fdb; 364 struct net_bridge_fdb_entry *fdb;
368 365
369 hlist_for_each_entry(fdb, h, head, hlist) { 366 hlist_for_each_entry(fdb, head, hlist) {
370 if (ether_addr_equal(fdb->addr.addr, addr) && 367 if (ether_addr_equal(fdb->addr.addr, addr) &&
371 fdb->vlan_id == vid) 368 fdb->vlan_id == vid)
372 return fdb; 369 return fdb;
@@ -378,10 +375,9 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
378 const unsigned char *addr, 375 const unsigned char *addr,
379 __u16 vid) 376 __u16 vid)
380{ 377{
381 struct hlist_node *h;
382 struct net_bridge_fdb_entry *fdb; 378 struct net_bridge_fdb_entry *fdb;
383 379
384 hlist_for_each_entry_rcu(fdb, h, head, hlist) { 380 hlist_for_each_entry_rcu(fdb, head, hlist) {
385 if (ether_addr_equal(fdb->addr.addr, addr) && 381 if (ether_addr_equal(fdb->addr.addr, addr) &&
386 fdb->vlan_id == vid) 382 fdb->vlan_id == vid)
387 return fdb; 383 return fdb;
@@ -593,10 +589,9 @@ int br_fdb_dump(struct sk_buff *skb,
593 goto out; 589 goto out;
594 590
595 for (i = 0; i < BR_HASH_SIZE; i++) { 591 for (i = 0; i < BR_HASH_SIZE; i++) {
596 struct hlist_node *h;
597 struct net_bridge_fdb_entry *f; 592 struct net_bridge_fdb_entry *f;
598 593
599 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { 594 hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
600 if (idx < cb->args[0]) 595 if (idx < cb->args[0])
601 goto skip; 596 goto skip;
602 597
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 38991e03646d..9f97b850fc65 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -18,7 +18,6 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
18{ 18{
19 struct net_bridge *br = netdev_priv(dev); 19 struct net_bridge *br = netdev_priv(dev);
20 struct net_bridge_port *p; 20 struct net_bridge_port *p;
21 struct hlist_node *n;
22 struct nlattr *nest; 21 struct nlattr *nest;
23 22
24 if (!br->multicast_router || hlist_empty(&br->router_list)) 23 if (!br->multicast_router || hlist_empty(&br->router_list))
@@ -28,7 +27,7 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
28 if (nest == NULL) 27 if (nest == NULL)
29 return -EMSGSIZE; 28 return -EMSGSIZE;
30 29
31 hlist_for_each_entry_rcu(p, n, &br->router_list, rlist) { 30 hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
32 if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex)) 31 if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex))
33 goto fail; 32 goto fail;
34 } 33 }
@@ -61,12 +60,11 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
61 return -EMSGSIZE; 60 return -EMSGSIZE;
62 61
63 for (i = 0; i < mdb->max; i++) { 62 for (i = 0; i < mdb->max; i++) {
64 struct hlist_node *h;
65 struct net_bridge_mdb_entry *mp; 63 struct net_bridge_mdb_entry *mp;
66 struct net_bridge_port_group *p, **pp; 64 struct net_bridge_port_group *p, **pp;
67 struct net_bridge_port *port; 65 struct net_bridge_port *port;
68 66
69 hlist_for_each_entry_rcu(mp, h, &mdb->mhash[i], hlist[mdb->ver]) { 67 hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
70 if (idx < s_idx) 68 if (idx < s_idx)
71 goto skip; 69 goto skip;
72 70
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 7d886b0a8b7b..10e6fce1bb62 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -86,9 +86,8 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
86 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 86 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
87{ 87{
88 struct net_bridge_mdb_entry *mp; 88 struct net_bridge_mdb_entry *mp;
89 struct hlist_node *p;
90 89
91 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 90 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
92 if (br_ip_equal(&mp->addr, dst)) 91 if (br_ip_equal(&mp->addr, dst))
93 return mp; 92 return mp;
94 } 93 }
@@ -178,13 +177,12 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
178 int elasticity) 177 int elasticity)
179{ 178{
180 struct net_bridge_mdb_entry *mp; 179 struct net_bridge_mdb_entry *mp;
181 struct hlist_node *p;
182 int maxlen; 180 int maxlen;
183 int len; 181 int len;
184 int i; 182 int i;
185 183
186 for (i = 0; i < old->max; i++) 184 for (i = 0; i < old->max; i++)
187 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) 185 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
188 hlist_add_head(&mp->hlist[new->ver], 186 hlist_add_head(&mp->hlist[new->ver],
189 &new->mhash[br_ip_hash(new, &mp->addr)]); 187 &new->mhash[br_ip_hash(new, &mp->addr)]);
190 188
@@ -194,7 +192,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
194 maxlen = 0; 192 maxlen = 0;
195 for (i = 0; i < new->max; i++) { 193 for (i = 0; i < new->max; i++) {
196 len = 0; 194 len = 0;
197 hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver]) 195 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
198 len++; 196 len++;
199 if (len > maxlen) 197 if (len > maxlen)
200 maxlen = len; 198 maxlen = len;
@@ -510,14 +508,13 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
510{ 508{
511 struct net_bridge_mdb_htable *mdb; 509 struct net_bridge_mdb_htable *mdb;
512 struct net_bridge_mdb_entry *mp; 510 struct net_bridge_mdb_entry *mp;
513 struct hlist_node *p;
514 unsigned int count = 0; 511 unsigned int count = 0;
515 unsigned int max; 512 unsigned int max;
516 int elasticity; 513 int elasticity;
517 int err; 514 int err;
518 515
519 mdb = rcu_dereference_protected(br->mdb, 1); 516 mdb = rcu_dereference_protected(br->mdb, 1);
520 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 517 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
521 count++; 518 count++;
522 if (unlikely(br_ip_equal(group, &mp->addr))) 519 if (unlikely(br_ip_equal(group, &mp->addr)))
523 return mp; 520 return mp;
@@ -882,10 +879,10 @@ void br_multicast_disable_port(struct net_bridge_port *port)
882{ 879{
883 struct net_bridge *br = port->br; 880 struct net_bridge *br = port->br;
884 struct net_bridge_port_group *pg; 881 struct net_bridge_port_group *pg;
885 struct hlist_node *p, *n; 882 struct hlist_node *n;
886 883
887 spin_lock(&br->multicast_lock); 884 spin_lock(&br->multicast_lock);
888 hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist) 885 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
889 br_multicast_del_pg(br, pg); 886 br_multicast_del_pg(br, pg);
890 887
891 if (!hlist_unhashed(&port->rlist)) 888 if (!hlist_unhashed(&port->rlist))
@@ -1025,12 +1022,12 @@ static void br_multicast_add_router(struct net_bridge *br,
1025 struct net_bridge_port *port) 1022 struct net_bridge_port *port)
1026{ 1023{
1027 struct net_bridge_port *p; 1024 struct net_bridge_port *p;
1028 struct hlist_node *n, *slot = NULL; 1025 struct hlist_node *slot = NULL;
1029 1026
1030 hlist_for_each_entry(p, n, &br->router_list, rlist) { 1027 hlist_for_each_entry(p, &br->router_list, rlist) {
1031 if ((unsigned long) port >= (unsigned long) p) 1028 if ((unsigned long) port >= (unsigned long) p)
1032 break; 1029 break;
1033 slot = n; 1030 slot = &p->rlist;
1034 } 1031 }
1035 1032
1036 if (slot) 1033 if (slot)
@@ -1653,7 +1650,7 @@ void br_multicast_stop(struct net_bridge *br)
1653{ 1650{
1654 struct net_bridge_mdb_htable *mdb; 1651 struct net_bridge_mdb_htable *mdb;
1655 struct net_bridge_mdb_entry *mp; 1652 struct net_bridge_mdb_entry *mp;
1656 struct hlist_node *p, *n; 1653 struct hlist_node *n;
1657 u32 ver; 1654 u32 ver;
1658 int i; 1655 int i;
1659 1656
@@ -1670,7 +1667,7 @@ void br_multicast_stop(struct net_bridge *br)
1670 1667
1671 ver = mdb->ver; 1668 ver = mdb->ver;
1672 for (i = 0; i < mdb->max; i++) { 1669 for (i = 0; i < mdb->max; i++) {
1673 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], 1670 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
1674 hlist[ver]) { 1671 hlist[ver]) {
1675 del_timer(&mp->timer); 1672 del_timer(&mp->timer);
1676 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1673 call_rcu_bh(&mp->rcu, br_multicast_free_group);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index ddac1ee2ed20..c48e5220bbac 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -516,7 +516,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
516{ 516{
517 struct receiver *r = NULL; 517 struct receiver *r = NULL;
518 struct hlist_head *rl; 518 struct hlist_head *rl;
519 struct hlist_node *next;
520 struct dev_rcv_lists *d; 519 struct dev_rcv_lists *d;
521 520
522 if (dev && dev->type != ARPHRD_CAN) 521 if (dev && dev->type != ARPHRD_CAN)
@@ -540,7 +539,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
540 * been registered before. 539 * been registered before.
541 */ 540 */
542 541
543 hlist_for_each_entry_rcu(r, next, rl, list) { 542 hlist_for_each_entry_rcu(r, rl, list) {
544 if (r->can_id == can_id && r->mask == mask && 543 if (r->can_id == can_id && r->mask == mask &&
545 r->func == func && r->data == data) 544 r->func == func && r->data == data)
546 break; 545 break;
@@ -552,7 +551,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
552 * will be NULL, while r will point to the last item of the list. 551 * will be NULL, while r will point to the last item of the list.
553 */ 552 */
554 553
555 if (!next) { 554 if (!r) {
556 printk(KERN_ERR "BUG: receive list entry not found for " 555 printk(KERN_ERR "BUG: receive list entry not found for "
557 "dev %s, id %03X, mask %03X\n", 556 "dev %s, id %03X, mask %03X\n",
558 DNAME(dev), can_id, mask); 557 DNAME(dev), can_id, mask);
@@ -590,7 +589,6 @@ static inline void deliver(struct sk_buff *skb, struct receiver *r)
590static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) 589static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
591{ 590{
592 struct receiver *r; 591 struct receiver *r;
593 struct hlist_node *n;
594 int matches = 0; 592 int matches = 0;
595 struct can_frame *cf = (struct can_frame *)skb->data; 593 struct can_frame *cf = (struct can_frame *)skb->data;
596 canid_t can_id = cf->can_id; 594 canid_t can_id = cf->can_id;
@@ -600,7 +598,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
600 598
601 if (can_id & CAN_ERR_FLAG) { 599 if (can_id & CAN_ERR_FLAG) {
602 /* check for error message frame entries only */ 600 /* check for error message frame entries only */
603 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) { 601 hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) {
604 if (can_id & r->mask) { 602 if (can_id & r->mask) {
605 deliver(skb, r); 603 deliver(skb, r);
606 matches++; 604 matches++;
@@ -610,13 +608,13 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
610 } 608 }
611 609
612 /* check for unfiltered entries */ 610 /* check for unfiltered entries */
613 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) { 611 hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) {
614 deliver(skb, r); 612 deliver(skb, r);
615 matches++; 613 matches++;
616 } 614 }
617 615
618 /* check for can_id/mask entries */ 616 /* check for can_id/mask entries */
619 hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) { 617 hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) {
620 if ((can_id & r->mask) == r->can_id) { 618 if ((can_id & r->mask) == r->can_id) {
621 deliver(skb, r); 619 deliver(skb, r);
622 matches++; 620 matches++;
@@ -624,7 +622,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
624 } 622 }
625 623
626 /* check for inverted can_id/mask entries */ 624 /* check for inverted can_id/mask entries */
627 hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) { 625 hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) {
628 if ((can_id & r->mask) != r->can_id) { 626 if ((can_id & r->mask) != r->can_id) {
629 deliver(skb, r); 627 deliver(skb, r);
630 matches++; 628 matches++;
@@ -636,7 +634,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
636 return matches; 634 return matches;
637 635
638 if (can_id & CAN_EFF_FLAG) { 636 if (can_id & CAN_EFF_FLAG) {
639 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) { 637 hlist_for_each_entry_rcu(r, &d->rx[RX_EFF], list) {
640 if (r->can_id == can_id) { 638 if (r->can_id == can_id) {
641 deliver(skb, r); 639 deliver(skb, r);
642 matches++; 640 matches++;
@@ -644,7 +642,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
644 } 642 }
645 } else { 643 } else {
646 can_id &= CAN_SFF_MASK; 644 can_id &= CAN_SFF_MASK;
647 hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) { 645 hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) {
648 deliver(skb, r); 646 deliver(skb, r);
649 matches++; 647 matches++;
650 } 648 }
diff --git a/net/can/gw.c b/net/can/gw.c
index c185fcd5e828..2d117dc5ebea 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -457,11 +457,11 @@ static int cgw_notifier(struct notifier_block *nb,
457 if (msg == NETDEV_UNREGISTER) { 457 if (msg == NETDEV_UNREGISTER) {
458 458
459 struct cgw_job *gwj = NULL; 459 struct cgw_job *gwj = NULL;
460 struct hlist_node *n, *nx; 460 struct hlist_node *nx;
461 461
462 ASSERT_RTNL(); 462 ASSERT_RTNL();
463 463
464 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { 464 hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
465 465
466 if (gwj->src.dev == dev || gwj->dst.dev == dev) { 466 if (gwj->src.dev == dev || gwj->dst.dev == dev) {
467 hlist_del(&gwj->list); 467 hlist_del(&gwj->list);
@@ -575,12 +575,11 @@ cancel:
575static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb) 575static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
576{ 576{
577 struct cgw_job *gwj = NULL; 577 struct cgw_job *gwj = NULL;
578 struct hlist_node *n;
579 int idx = 0; 578 int idx = 0;
580 int s_idx = cb->args[0]; 579 int s_idx = cb->args[0];
581 580
582 rcu_read_lock(); 581 rcu_read_lock();
583 hlist_for_each_entry_rcu(gwj, n, &cgw_list, list) { 582 hlist_for_each_entry_rcu(gwj, &cgw_list, list) {
584 if (idx < s_idx) 583 if (idx < s_idx)
585 goto cont; 584 goto cont;
586 585
@@ -858,11 +857,11 @@ out:
858static void cgw_remove_all_jobs(void) 857static void cgw_remove_all_jobs(void)
859{ 858{
860 struct cgw_job *gwj = NULL; 859 struct cgw_job *gwj = NULL;
861 struct hlist_node *n, *nx; 860 struct hlist_node *nx;
862 861
863 ASSERT_RTNL(); 862 ASSERT_RTNL();
864 863
865 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { 864 hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
866 hlist_del(&gwj->list); 865 hlist_del(&gwj->list);
867 cgw_unregister_filter(gwj); 866 cgw_unregister_filter(gwj);
868 kfree(gwj); 867 kfree(gwj);
@@ -872,7 +871,7 @@ static void cgw_remove_all_jobs(void)
872static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 871static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
873{ 872{
874 struct cgw_job *gwj = NULL; 873 struct cgw_job *gwj = NULL;
875 struct hlist_node *n, *nx; 874 struct hlist_node *nx;
876 struct rtcanmsg *r; 875 struct rtcanmsg *r;
877 struct cf_mod mod; 876 struct cf_mod mod;
878 struct can_can_gw ccgw; 877 struct can_can_gw ccgw;
@@ -907,7 +906,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
907 ASSERT_RTNL(); 906 ASSERT_RTNL();
908 907
909 /* remove only the first matching entry */ 908 /* remove only the first matching entry */
910 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { 909 hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
911 910
912 if (gwj->flags != r->flags) 911 if (gwj->flags != r->flags)
913 continue; 912 continue;
diff --git a/net/can/proc.c b/net/can/proc.c
index 497335892146..1ab8c888f102 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -195,9 +195,8 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
195 struct net_device *dev) 195 struct net_device *dev)
196{ 196{
197 struct receiver *r; 197 struct receiver *r;
198 struct hlist_node *n;
199 198
200 hlist_for_each_entry_rcu(r, n, rx_list, list) { 199 hlist_for_each_entry_rcu(r, rx_list, list) {
201 char *fmt = (r->can_id & CAN_EFF_FLAG)? 200 char *fmt = (r->can_id & CAN_EFF_FLAG)?
202 " %-5s %08x %08x %pK %pK %8ld %s\n" : 201 " %-5s %08x %08x %pK %pK %8ld %s\n" :
203 " %-5s %03x %08x %pK %pK %8ld %s\n"; 202 " %-5s %03x %08x %pK %pK %8ld %s\n";
diff --git a/net/core/dev.c b/net/core/dev.c
index 18d8b5acc343..a06a7a58dd11 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -658,11 +658,10 @@ __setup("netdev=", netdev_boot_setup);
658 658
659struct net_device *__dev_get_by_name(struct net *net, const char *name) 659struct net_device *__dev_get_by_name(struct net *net, const char *name)
660{ 660{
661 struct hlist_node *p;
662 struct net_device *dev; 661 struct net_device *dev;
663 struct hlist_head *head = dev_name_hash(net, name); 662 struct hlist_head *head = dev_name_hash(net, name);
664 663
665 hlist_for_each_entry(dev, p, head, name_hlist) 664 hlist_for_each_entry(dev, head, name_hlist)
666 if (!strncmp(dev->name, name, IFNAMSIZ)) 665 if (!strncmp(dev->name, name, IFNAMSIZ))
667 return dev; 666 return dev;
668 667
@@ -684,11 +683,10 @@ EXPORT_SYMBOL(__dev_get_by_name);
684 683
685struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 684struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
686{ 685{
687 struct hlist_node *p;
688 struct net_device *dev; 686 struct net_device *dev;
689 struct hlist_head *head = dev_name_hash(net, name); 687 struct hlist_head *head = dev_name_hash(net, name);
690 688
691 hlist_for_each_entry_rcu(dev, p, head, name_hlist) 689 hlist_for_each_entry_rcu(dev, head, name_hlist)
692 if (!strncmp(dev->name, name, IFNAMSIZ)) 690 if (!strncmp(dev->name, name, IFNAMSIZ))
693 return dev; 691 return dev;
694 692
@@ -735,11 +733,10 @@ EXPORT_SYMBOL(dev_get_by_name);
735 733
736struct net_device *__dev_get_by_index(struct net *net, int ifindex) 734struct net_device *__dev_get_by_index(struct net *net, int ifindex)
737{ 735{
738 struct hlist_node *p;
739 struct net_device *dev; 736 struct net_device *dev;
740 struct hlist_head *head = dev_index_hash(net, ifindex); 737 struct hlist_head *head = dev_index_hash(net, ifindex);
741 738
742 hlist_for_each_entry(dev, p, head, index_hlist) 739 hlist_for_each_entry(dev, head, index_hlist)
743 if (dev->ifindex == ifindex) 740 if (dev->ifindex == ifindex)
744 return dev; 741 return dev;
745 742
@@ -760,11 +757,10 @@ EXPORT_SYMBOL(__dev_get_by_index);
760 757
761struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 758struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
762{ 759{
763 struct hlist_node *p;
764 struct net_device *dev; 760 struct net_device *dev;
765 struct hlist_head *head = dev_index_hash(net, ifindex); 761 struct hlist_head *head = dev_index_hash(net, ifindex);
766 762
767 hlist_for_each_entry_rcu(dev, p, head, index_hlist) 763 hlist_for_each_entry_rcu(dev, head, index_hlist)
768 if (dev->ifindex == ifindex) 764 if (dev->ifindex == ifindex)
769 return dev; 765 return dev;
770 766
diff --git a/net/core/flow.c b/net/core/flow.c
index 43f7495df27a..c56ea6f7f6c7 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -132,14 +132,14 @@ static void __flow_cache_shrink(struct flow_cache *fc,
132 int shrink_to) 132 int shrink_to)
133{ 133{
134 struct flow_cache_entry *fle; 134 struct flow_cache_entry *fle;
135 struct hlist_node *entry, *tmp; 135 struct hlist_node *tmp;
136 LIST_HEAD(gc_list); 136 LIST_HEAD(gc_list);
137 int i, deleted = 0; 137 int i, deleted = 0;
138 138
139 for (i = 0; i < flow_cache_hash_size(fc); i++) { 139 for (i = 0; i < flow_cache_hash_size(fc); i++) {
140 int saved = 0; 140 int saved = 0;
141 141
142 hlist_for_each_entry_safe(fle, entry, tmp, 142 hlist_for_each_entry_safe(fle, tmp,
143 &fcp->hash_table[i], u.hlist) { 143 &fcp->hash_table[i], u.hlist) {
144 if (saved < shrink_to && 144 if (saved < shrink_to &&
145 flow_entry_valid(fle)) { 145 flow_entry_valid(fle)) {
@@ -211,7 +211,6 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
211 struct flow_cache *fc = &flow_cache_global; 211 struct flow_cache *fc = &flow_cache_global;
212 struct flow_cache_percpu *fcp; 212 struct flow_cache_percpu *fcp;
213 struct flow_cache_entry *fle, *tfle; 213 struct flow_cache_entry *fle, *tfle;
214 struct hlist_node *entry;
215 struct flow_cache_object *flo; 214 struct flow_cache_object *flo;
216 size_t keysize; 215 size_t keysize;
217 unsigned int hash; 216 unsigned int hash;
@@ -235,7 +234,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
235 flow_new_hash_rnd(fc, fcp); 234 flow_new_hash_rnd(fc, fcp);
236 235
237 hash = flow_hash_code(fc, fcp, key, keysize); 236 hash = flow_hash_code(fc, fcp, key, keysize);
238 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { 237 hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
239 if (tfle->net == net && 238 if (tfle->net == net &&
240 tfle->family == family && 239 tfle->family == family &&
241 tfle->dir == dir && 240 tfle->dir == dir &&
@@ -301,13 +300,13 @@ static void flow_cache_flush_tasklet(unsigned long data)
301 struct flow_cache *fc = info->cache; 300 struct flow_cache *fc = info->cache;
302 struct flow_cache_percpu *fcp; 301 struct flow_cache_percpu *fcp;
303 struct flow_cache_entry *fle; 302 struct flow_cache_entry *fle;
304 struct hlist_node *entry, *tmp; 303 struct hlist_node *tmp;
305 LIST_HEAD(gc_list); 304 LIST_HEAD(gc_list);
306 int i, deleted = 0; 305 int i, deleted = 0;
307 306
308 fcp = this_cpu_ptr(fc->percpu); 307 fcp = this_cpu_ptr(fc->percpu);
309 for (i = 0; i < flow_cache_hash_size(fc); i++) { 308 for (i = 0; i < flow_cache_hash_size(fc); i++) {
310 hlist_for_each_entry_safe(fle, entry, tmp, 309 hlist_for_each_entry_safe(fle, tmp,
311 &fcp->hash_table[i], u.hlist) { 310 &fcp->hash_table[i], u.hlist) {
312 if (flow_entry_valid(fle)) 311 if (flow_entry_valid(fle))
313 continue; 312 continue;
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 0f6bb6f8d391..3174f1998ee6 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -16,12 +16,11 @@ static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff
16{ 16{
17 struct net *net = seq_file_net(seq); 17 struct net *net = seq_file_net(seq);
18 struct net_device *dev; 18 struct net_device *dev;
19 struct hlist_node *p;
20 struct hlist_head *h; 19 struct hlist_head *h;
21 unsigned int count = 0, offset = get_offset(*pos); 20 unsigned int count = 0, offset = get_offset(*pos);
22 21
23 h = &net->dev_name_head[get_bucket(*pos)]; 22 h = &net->dev_name_head[get_bucket(*pos)];
24 hlist_for_each_entry_rcu(dev, p, h, name_hlist) { 23 hlist_for_each_entry_rcu(dev, h, name_hlist) {
25 if (++count == offset) 24 if (++count == offset)
26 return dev; 25 return dev;
27 } 26 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d8aa20f6a46e..b376410ff259 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1060,7 +1060,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1060 int idx = 0, s_idx; 1060 int idx = 0, s_idx;
1061 struct net_device *dev; 1061 struct net_device *dev;
1062 struct hlist_head *head; 1062 struct hlist_head *head;
1063 struct hlist_node *node;
1064 struct nlattr *tb[IFLA_MAX+1]; 1063 struct nlattr *tb[IFLA_MAX+1];
1065 u32 ext_filter_mask = 0; 1064 u32 ext_filter_mask = 0;
1066 1065
@@ -1080,7 +1079,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1080 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1079 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1081 idx = 0; 1080 idx = 0;
1082 head = &net->dev_index_head[h]; 1081 head = &net->dev_index_head[h];
1083 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 1082 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1084 if (idx < s_idx) 1083 if (idx < s_idx)
1085 goto cont; 1084 goto cont;
1086 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1085 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index c4a2def5b7bd..c21f200eed93 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -175,12 +175,11 @@ static struct hlist_head *dn_find_list(struct sock *sk)
175static int check_port(__le16 port) 175static int check_port(__le16 port)
176{ 176{
177 struct sock *sk; 177 struct sock *sk;
178 struct hlist_node *node;
179 178
180 if (port == 0) 179 if (port == 0)
181 return -1; 180 return -1;
182 181
183 sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) { 182 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
184 struct dn_scp *scp = DN_SK(sk); 183 struct dn_scp *scp = DN_SK(sk);
185 if (scp->addrloc == port) 184 if (scp->addrloc == port)
186 return -1; 185 return -1;
@@ -374,11 +373,10 @@ int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn,
374struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr) 373struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
375{ 374{
376 struct hlist_head *list = listen_hash(addr); 375 struct hlist_head *list = listen_hash(addr);
377 struct hlist_node *node;
378 struct sock *sk; 376 struct sock *sk;
379 377
380 read_lock(&dn_hash_lock); 378 read_lock(&dn_hash_lock);
381 sk_for_each(sk, node, list) { 379 sk_for_each(sk, list) {
382 struct dn_scp *scp = DN_SK(sk); 380 struct dn_scp *scp = DN_SK(sk);
383 if (sk->sk_state != TCP_LISTEN) 381 if (sk->sk_state != TCP_LISTEN)
384 continue; 382 continue;
@@ -414,11 +412,10 @@ struct sock *dn_find_by_skb(struct sk_buff *skb)
414{ 412{
415 struct dn_skb_cb *cb = DN_SKB_CB(skb); 413 struct dn_skb_cb *cb = DN_SKB_CB(skb);
416 struct sock *sk; 414 struct sock *sk;
417 struct hlist_node *node;
418 struct dn_scp *scp; 415 struct dn_scp *scp;
419 416
420 read_lock(&dn_hash_lock); 417 read_lock(&dn_hash_lock);
421 sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) { 418 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
422 scp = DN_SK(sk); 419 scp = DN_SK(sk);
423 if (cb->src != dn_saddr2dn(&scp->peer)) 420 if (cb->src != dn_saddr2dn(&scp->peer))
424 continue; 421 continue;
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index f968c1b58f47..6c2445bcaba1 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -483,7 +483,6 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
483 unsigned int h, s_h; 483 unsigned int h, s_h;
484 unsigned int e = 0, s_e; 484 unsigned int e = 0, s_e;
485 struct dn_fib_table *tb; 485 struct dn_fib_table *tb;
486 struct hlist_node *node;
487 int dumped = 0; 486 int dumped = 0;
488 487
489 if (!net_eq(net, &init_net)) 488 if (!net_eq(net, &init_net))
@@ -498,7 +497,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
498 497
499 for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) { 498 for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) {
500 e = 0; 499 e = 0;
501 hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) { 500 hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) {
502 if (e < s_e) 501 if (e < s_e)
503 goto next; 502 goto next;
504 if (dumped) 503 if (dumped)
@@ -828,7 +827,6 @@ out:
828struct dn_fib_table *dn_fib_get_table(u32 n, int create) 827struct dn_fib_table *dn_fib_get_table(u32 n, int create)
829{ 828{
830 struct dn_fib_table *t; 829 struct dn_fib_table *t;
831 struct hlist_node *node;
832 unsigned int h; 830 unsigned int h;
833 831
834 if (n < RT_TABLE_MIN) 832 if (n < RT_TABLE_MIN)
@@ -839,7 +837,7 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create)
839 837
840 h = n & (DN_FIB_TABLE_HASHSZ - 1); 838 h = n & (DN_FIB_TABLE_HASHSZ - 1);
841 rcu_read_lock(); 839 rcu_read_lock();
842 hlist_for_each_entry_rcu(t, node, &dn_fib_table_hash[h], hlist) { 840 hlist_for_each_entry_rcu(t, &dn_fib_table_hash[h], hlist) {
843 if (t->n == n) { 841 if (t->n == n) {
844 rcu_read_unlock(); 842 rcu_read_unlock();
845 return t; 843 return t;
@@ -885,11 +883,10 @@ void dn_fib_flush(void)
885{ 883{
886 int flushed = 0; 884 int flushed = 0;
887 struct dn_fib_table *tb; 885 struct dn_fib_table *tb;
888 struct hlist_node *node;
889 unsigned int h; 886 unsigned int h;
890 887
891 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { 888 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
892 hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) 889 hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist)
893 flushed += tb->flush(tb); 890 flushed += tb->flush(tb);
894 } 891 }
895 892
@@ -908,12 +905,12 @@ void __init dn_fib_table_init(void)
908void __exit dn_fib_table_cleanup(void) 905void __exit dn_fib_table_cleanup(void)
909{ 906{
910 struct dn_fib_table *t; 907 struct dn_fib_table *t;
911 struct hlist_node *node, *next; 908 struct hlist_node *next;
912 unsigned int h; 909 unsigned int h;
913 910
914 write_lock(&dn_fib_tables_lock); 911 write_lock(&dn_fib_tables_lock);
915 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { 912 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
916 hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h], 913 hlist_for_each_entry_safe(t, next, &dn_fib_table_hash[h],
917 hlist) { 914 hlist) {
918 hlist_del(&t->hlist); 915 hlist_del(&t->hlist);
919 kfree(t); 916 kfree(t);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 16705611589a..e0da175f8e5b 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -350,7 +350,6 @@ static inline int ieee802154_match_sock(u8 *hw_addr, u16 pan_id,
350int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) 350int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
351{ 351{
352 struct sock *sk, *prev = NULL; 352 struct sock *sk, *prev = NULL;
353 struct hlist_node *node;
354 int ret = NET_RX_SUCCESS; 353 int ret = NET_RX_SUCCESS;
355 u16 pan_id, short_addr; 354 u16 pan_id, short_addr;
356 355
@@ -361,7 +360,7 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
361 short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev); 360 short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
362 361
363 read_lock(&dgram_lock); 362 read_lock(&dgram_lock);
364 sk_for_each(sk, node, &dgram_head) { 363 sk_for_each(sk, &dgram_head) {
365 if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr, 364 if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr,
366 dgram_sk(sk))) { 365 dgram_sk(sk))) {
367 if (prev) { 366 if (prev) {
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index 50e823927d49..41f538b8e59c 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -221,10 +221,9 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
221void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb) 221void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
222{ 222{
223 struct sock *sk; 223 struct sock *sk;
224 struct hlist_node *node;
225 224
226 read_lock(&raw_lock); 225 read_lock(&raw_lock);
227 sk_for_each(sk, node, &raw_head) { 226 sk_for_each(sk, &raw_head) {
228 bh_lock_sock(sk); 227 bh_lock_sock(sk);
229 if (!sk->sk_bound_dev_if || 228 if (!sk->sk_bound_dev_if ||
230 sk->sk_bound_dev_if == dev->ifindex) { 229 sk->sk_bound_dev_if == dev->ifindex) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5281314886c1..f678507bc829 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -139,10 +139,9 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
139 u32 hash = inet_addr_hash(net, addr); 139 u32 hash = inet_addr_hash(net, addr);
140 struct net_device *result = NULL; 140 struct net_device *result = NULL;
141 struct in_ifaddr *ifa; 141 struct in_ifaddr *ifa;
142 struct hlist_node *node;
143 142
144 rcu_read_lock(); 143 rcu_read_lock();
145 hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) { 144 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
146 if (ifa->ifa_local == addr) { 145 if (ifa->ifa_local == addr) {
147 struct net_device *dev = ifa->ifa_dev->dev; 146 struct net_device *dev = ifa->ifa_dev->dev;
148 147
@@ -588,7 +587,6 @@ static void check_lifetime(struct work_struct *work)
588{ 587{
589 unsigned long now, next, next_sec, next_sched; 588 unsigned long now, next, next_sec, next_sched;
590 struct in_ifaddr *ifa; 589 struct in_ifaddr *ifa;
591 struct hlist_node *node;
592 int i; 590 int i;
593 591
594 now = jiffies; 592 now = jiffies;
@@ -596,8 +594,7 @@ static void check_lifetime(struct work_struct *work)
596 594
597 rcu_read_lock(); 595 rcu_read_lock();
598 for (i = 0; i < IN4_ADDR_HSIZE; i++) { 596 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
599 hlist_for_each_entry_rcu(ifa, node, 597 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
600 &inet_addr_lst[i], hash) {
601 unsigned long age; 598 unsigned long age;
602 599
603 if (ifa->ifa_flags & IFA_F_PERMANENT) 600 if (ifa->ifa_flags & IFA_F_PERMANENT)
@@ -1493,7 +1490,6 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1493 struct in_device *in_dev; 1490 struct in_device *in_dev;
1494 struct in_ifaddr *ifa; 1491 struct in_ifaddr *ifa;
1495 struct hlist_head *head; 1492 struct hlist_head *head;
1496 struct hlist_node *node;
1497 1493
1498 s_h = cb->args[0]; 1494 s_h = cb->args[0];
1499 s_idx = idx = cb->args[1]; 1495 s_idx = idx = cb->args[1];
@@ -1503,7 +1499,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1503 idx = 0; 1499 idx = 0;
1504 head = &net->dev_index_head[h]; 1500 head = &net->dev_index_head[h];
1505 rcu_read_lock(); 1501 rcu_read_lock();
1506 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 1502 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1507 if (idx < s_idx) 1503 if (idx < s_idx)
1508 goto cont; 1504 goto cont;
1509 if (h > s_h || idx > s_idx) 1505 if (h > s_h || idx > s_idx)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 99f00d39d10b..eb4bb12b3eb4 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -112,7 +112,6 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
112struct fib_table *fib_get_table(struct net *net, u32 id) 112struct fib_table *fib_get_table(struct net *net, u32 id)
113{ 113{
114 struct fib_table *tb; 114 struct fib_table *tb;
115 struct hlist_node *node;
116 struct hlist_head *head; 115 struct hlist_head *head;
117 unsigned int h; 116 unsigned int h;
118 117
@@ -122,7 +121,7 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
122 121
123 rcu_read_lock(); 122 rcu_read_lock();
124 head = &net->ipv4.fib_table_hash[h]; 123 head = &net->ipv4.fib_table_hash[h];
125 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { 124 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
126 if (tb->tb_id == id) { 125 if (tb->tb_id == id) {
127 rcu_read_unlock(); 126 rcu_read_unlock();
128 return tb; 127 return tb;
@@ -137,13 +136,12 @@ static void fib_flush(struct net *net)
137{ 136{
138 int flushed = 0; 137 int flushed = 0;
139 struct fib_table *tb; 138 struct fib_table *tb;
140 struct hlist_node *node;
141 struct hlist_head *head; 139 struct hlist_head *head;
142 unsigned int h; 140 unsigned int h;
143 141
144 for (h = 0; h < FIB_TABLE_HASHSZ; h++) { 142 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
145 head = &net->ipv4.fib_table_hash[h]; 143 head = &net->ipv4.fib_table_hash[h];
146 hlist_for_each_entry(tb, node, head, tb_hlist) 144 hlist_for_each_entry(tb, head, tb_hlist)
147 flushed += fib_table_flush(tb); 145 flushed += fib_table_flush(tb);
148 } 146 }
149 147
@@ -656,7 +654,6 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
656 unsigned int h, s_h; 654 unsigned int h, s_h;
657 unsigned int e = 0, s_e; 655 unsigned int e = 0, s_e;
658 struct fib_table *tb; 656 struct fib_table *tb;
659 struct hlist_node *node;
660 struct hlist_head *head; 657 struct hlist_head *head;
661 int dumped = 0; 658 int dumped = 0;
662 659
@@ -670,7 +667,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
670 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { 667 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
671 e = 0; 668 e = 0;
672 head = &net->ipv4.fib_table_hash[h]; 669 head = &net->ipv4.fib_table_hash[h];
673 hlist_for_each_entry(tb, node, head, tb_hlist) { 670 hlist_for_each_entry(tb, head, tb_hlist) {
674 if (e < s_e) 671 if (e < s_e)
675 goto next; 672 goto next;
676 if (dumped) 673 if (dumped)
@@ -1117,11 +1114,11 @@ static void ip_fib_net_exit(struct net *net)
1117 for (i = 0; i < FIB_TABLE_HASHSZ; i++) { 1114 for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
1118 struct fib_table *tb; 1115 struct fib_table *tb;
1119 struct hlist_head *head; 1116 struct hlist_head *head;
1120 struct hlist_node *node, *tmp; 1117 struct hlist_node *tmp;
1121 1118
1122 head = &net->ipv4.fib_table_hash[i]; 1119 head = &net->ipv4.fib_table_hash[i];
1123 hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { 1120 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
1124 hlist_del(node); 1121 hlist_del(&tb->tb_hlist);
1125 fib_table_flush(tb); 1122 fib_table_flush(tb);
1126 fib_free_table(tb); 1123 fib_free_table(tb);
1127 } 1124 }
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 4797a800faf8..8f6cb7a87cd6 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -298,14 +298,13 @@ static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
298static struct fib_info *fib_find_info(const struct fib_info *nfi) 298static struct fib_info *fib_find_info(const struct fib_info *nfi)
299{ 299{
300 struct hlist_head *head; 300 struct hlist_head *head;
301 struct hlist_node *node;
302 struct fib_info *fi; 301 struct fib_info *fi;
303 unsigned int hash; 302 unsigned int hash;
304 303
305 hash = fib_info_hashfn(nfi); 304 hash = fib_info_hashfn(nfi);
306 head = &fib_info_hash[hash]; 305 head = &fib_info_hash[hash];
307 306
308 hlist_for_each_entry(fi, node, head, fib_hash) { 307 hlist_for_each_entry(fi, head, fib_hash) {
309 if (!net_eq(fi->fib_net, nfi->fib_net)) 308 if (!net_eq(fi->fib_net, nfi->fib_net))
310 continue; 309 continue;
311 if (fi->fib_nhs != nfi->fib_nhs) 310 if (fi->fib_nhs != nfi->fib_nhs)
@@ -331,7 +330,6 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi)
331int ip_fib_check_default(__be32 gw, struct net_device *dev) 330int ip_fib_check_default(__be32 gw, struct net_device *dev)
332{ 331{
333 struct hlist_head *head; 332 struct hlist_head *head;
334 struct hlist_node *node;
335 struct fib_nh *nh; 333 struct fib_nh *nh;
336 unsigned int hash; 334 unsigned int hash;
337 335
@@ -339,7 +337,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev)
339 337
340 hash = fib_devindex_hashfn(dev->ifindex); 338 hash = fib_devindex_hashfn(dev->ifindex);
341 head = &fib_info_devhash[hash]; 339 head = &fib_info_devhash[hash];
342 hlist_for_each_entry(nh, node, head, nh_hash) { 340 hlist_for_each_entry(nh, head, nh_hash) {
343 if (nh->nh_dev == dev && 341 if (nh->nh_dev == dev &&
344 nh->nh_gw == gw && 342 nh->nh_gw == gw &&
345 !(nh->nh_flags & RTNH_F_DEAD)) { 343 !(nh->nh_flags & RTNH_F_DEAD)) {
@@ -721,10 +719,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
721 719
722 for (i = 0; i < old_size; i++) { 720 for (i = 0; i < old_size; i++) {
723 struct hlist_head *head = &fib_info_hash[i]; 721 struct hlist_head *head = &fib_info_hash[i];
724 struct hlist_node *node, *n; 722 struct hlist_node *n;
725 struct fib_info *fi; 723 struct fib_info *fi;
726 724
727 hlist_for_each_entry_safe(fi, node, n, head, fib_hash) { 725 hlist_for_each_entry_safe(fi, n, head, fib_hash) {
728 struct hlist_head *dest; 726 struct hlist_head *dest;
729 unsigned int new_hash; 727 unsigned int new_hash;
730 728
@@ -739,10 +737,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
739 737
740 for (i = 0; i < old_size; i++) { 738 for (i = 0; i < old_size; i++) {
741 struct hlist_head *lhead = &fib_info_laddrhash[i]; 739 struct hlist_head *lhead = &fib_info_laddrhash[i];
742 struct hlist_node *node, *n; 740 struct hlist_node *n;
743 struct fib_info *fi; 741 struct fib_info *fi;
744 742
745 hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) { 743 hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
746 struct hlist_head *ldest; 744 struct hlist_head *ldest;
747 unsigned int new_hash; 745 unsigned int new_hash;
748 746
@@ -1096,13 +1094,12 @@ int fib_sync_down_addr(struct net *net, __be32 local)
1096 int ret = 0; 1094 int ret = 0;
1097 unsigned int hash = fib_laddr_hashfn(local); 1095 unsigned int hash = fib_laddr_hashfn(local);
1098 struct hlist_head *head = &fib_info_laddrhash[hash]; 1096 struct hlist_head *head = &fib_info_laddrhash[hash];
1099 struct hlist_node *node;
1100 struct fib_info *fi; 1097 struct fib_info *fi;
1101 1098
1102 if (fib_info_laddrhash == NULL || local == 0) 1099 if (fib_info_laddrhash == NULL || local == 0)
1103 return 0; 1100 return 0;
1104 1101
1105 hlist_for_each_entry(fi, node, head, fib_lhash) { 1102 hlist_for_each_entry(fi, head, fib_lhash) {
1106 if (!net_eq(fi->fib_net, net)) 1103 if (!net_eq(fi->fib_net, net))
1107 continue; 1104 continue;
1108 if (fi->fib_prefsrc == local) { 1105 if (fi->fib_prefsrc == local) {
@@ -1120,13 +1117,12 @@ int fib_sync_down_dev(struct net_device *dev, int force)
1120 struct fib_info *prev_fi = NULL; 1117 struct fib_info *prev_fi = NULL;
1121 unsigned int hash = fib_devindex_hashfn(dev->ifindex); 1118 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1122 struct hlist_head *head = &fib_info_devhash[hash]; 1119 struct hlist_head *head = &fib_info_devhash[hash];
1123 struct hlist_node *node;
1124 struct fib_nh *nh; 1120 struct fib_nh *nh;
1125 1121
1126 if (force) 1122 if (force)
1127 scope = -1; 1123 scope = -1;
1128 1124
1129 hlist_for_each_entry(nh, node, head, nh_hash) { 1125 hlist_for_each_entry(nh, head, nh_hash) {
1130 struct fib_info *fi = nh->nh_parent; 1126 struct fib_info *fi = nh->nh_parent;
1131 int dead; 1127 int dead;
1132 1128
@@ -1232,7 +1228,6 @@ int fib_sync_up(struct net_device *dev)
1232 struct fib_info *prev_fi; 1228 struct fib_info *prev_fi;
1233 unsigned int hash; 1229 unsigned int hash;
1234 struct hlist_head *head; 1230 struct hlist_head *head;
1235 struct hlist_node *node;
1236 struct fib_nh *nh; 1231 struct fib_nh *nh;
1237 int ret; 1232 int ret;
1238 1233
@@ -1244,7 +1239,7 @@ int fib_sync_up(struct net_device *dev)
1244 head = &fib_info_devhash[hash]; 1239 head = &fib_info_devhash[hash];
1245 ret = 0; 1240 ret = 0;
1246 1241
1247 hlist_for_each_entry(nh, node, head, nh_hash) { 1242 hlist_for_each_entry(nh, head, nh_hash) {
1248 struct fib_info *fi = nh->nh_parent; 1243 struct fib_info *fi = nh->nh_parent;
1249 int alive; 1244 int alive;
1250 1245
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 61e03da3e1f5..ff06b7543d9f 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -920,10 +920,9 @@ nomem:
920static struct leaf_info *find_leaf_info(struct leaf *l, int plen) 920static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
921{ 921{
922 struct hlist_head *head = &l->list; 922 struct hlist_head *head = &l->list;
923 struct hlist_node *node;
924 struct leaf_info *li; 923 struct leaf_info *li;
925 924
926 hlist_for_each_entry_rcu(li, node, head, hlist) 925 hlist_for_each_entry_rcu(li, head, hlist)
927 if (li->plen == plen) 926 if (li->plen == plen)
928 return li; 927 return li;
929 928
@@ -943,12 +942,11 @@ static inline struct list_head *get_fa_head(struct leaf *l, int plen)
943static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) 942static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
944{ 943{
945 struct leaf_info *li = NULL, *last = NULL; 944 struct leaf_info *li = NULL, *last = NULL;
946 struct hlist_node *node;
947 945
948 if (hlist_empty(head)) { 946 if (hlist_empty(head)) {
949 hlist_add_head_rcu(&new->hlist, head); 947 hlist_add_head_rcu(&new->hlist, head);
950 } else { 948 } else {
951 hlist_for_each_entry(li, node, head, hlist) { 949 hlist_for_each_entry(li, head, hlist) {
952 if (new->plen > li->plen) 950 if (new->plen > li->plen)
953 break; 951 break;
954 952
@@ -1354,9 +1352,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
1354{ 1352{
1355 struct leaf_info *li; 1353 struct leaf_info *li;
1356 struct hlist_head *hhead = &l->list; 1354 struct hlist_head *hhead = &l->list;
1357 struct hlist_node *node;
1358 1355
1359 hlist_for_each_entry_rcu(li, node, hhead, hlist) { 1356 hlist_for_each_entry_rcu(li, hhead, hlist) {
1360 struct fib_alias *fa; 1357 struct fib_alias *fa;
1361 1358
1362 if (l->key != (key & li->mask_plen)) 1359 if (l->key != (key & li->mask_plen))
@@ -1740,10 +1737,10 @@ static int trie_flush_leaf(struct leaf *l)
1740{ 1737{
1741 int found = 0; 1738 int found = 0;
1742 struct hlist_head *lih = &l->list; 1739 struct hlist_head *lih = &l->list;
1743 struct hlist_node *node, *tmp; 1740 struct hlist_node *tmp;
1744 struct leaf_info *li = NULL; 1741 struct leaf_info *li = NULL;
1745 1742
1746 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) { 1743 hlist_for_each_entry_safe(li, tmp, lih, hlist) {
1747 found += trie_flush_list(&li->falh); 1744 found += trie_flush_list(&li->falh);
1748 1745
1749 if (list_empty(&li->falh)) { 1746 if (list_empty(&li->falh)) {
@@ -1895,14 +1892,13 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
1895 struct sk_buff *skb, struct netlink_callback *cb) 1892 struct sk_buff *skb, struct netlink_callback *cb)
1896{ 1893{
1897 struct leaf_info *li; 1894 struct leaf_info *li;
1898 struct hlist_node *node;
1899 int i, s_i; 1895 int i, s_i;
1900 1896
1901 s_i = cb->args[4]; 1897 s_i = cb->args[4];
1902 i = 0; 1898 i = 0;
1903 1899
1904 /* rcu_read_lock is hold by caller */ 1900 /* rcu_read_lock is hold by caller */
1905 hlist_for_each_entry_rcu(li, node, &l->list, hlist) { 1901 hlist_for_each_entry_rcu(li, &l->list, hlist) {
1906 if (i < s_i) { 1902 if (i < s_i) {
1907 i++; 1903 i++;
1908 continue; 1904 continue;
@@ -2092,14 +2088,13 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2092 if (IS_LEAF(n)) { 2088 if (IS_LEAF(n)) {
2093 struct leaf *l = (struct leaf *)n; 2089 struct leaf *l = (struct leaf *)n;
2094 struct leaf_info *li; 2090 struct leaf_info *li;
2095 struct hlist_node *tmp;
2096 2091
2097 s->leaves++; 2092 s->leaves++;
2098 s->totdepth += iter.depth; 2093 s->totdepth += iter.depth;
2099 if (iter.depth > s->maxdepth) 2094 if (iter.depth > s->maxdepth)
2100 s->maxdepth = iter.depth; 2095 s->maxdepth = iter.depth;
2101 2096
2102 hlist_for_each_entry_rcu(li, tmp, &l->list, hlist) 2097 hlist_for_each_entry_rcu(li, &l->list, hlist)
2103 ++s->prefixes; 2098 ++s->prefixes;
2104 } else { 2099 } else {
2105 const struct tnode *tn = (const struct tnode *) n; 2100 const struct tnode *tn = (const struct tnode *) n;
@@ -2200,10 +2195,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2200 2195
2201 for (h = 0; h < FIB_TABLE_HASHSZ; h++) { 2196 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2202 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; 2197 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2203 struct hlist_node *node;
2204 struct fib_table *tb; 2198 struct fib_table *tb;
2205 2199
2206 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { 2200 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2207 struct trie *t = (struct trie *) tb->tb_data; 2201 struct trie *t = (struct trie *) tb->tb_data;
2208 struct trie_stat stat; 2202 struct trie_stat stat;
2209 2203
@@ -2245,10 +2239,9 @@ static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
2245 2239
2246 for (h = 0; h < FIB_TABLE_HASHSZ; h++) { 2240 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2247 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; 2241 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2248 struct hlist_node *node;
2249 struct fib_table *tb; 2242 struct fib_table *tb;
2250 2243
2251 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { 2244 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2252 struct rt_trie_node *n; 2245 struct rt_trie_node *n;
2253 2246
2254 for (n = fib_trie_get_first(iter, 2247 for (n = fib_trie_get_first(iter,
@@ -2298,7 +2291,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2298 /* new hash chain */ 2291 /* new hash chain */
2299 while (++h < FIB_TABLE_HASHSZ) { 2292 while (++h < FIB_TABLE_HASHSZ) {
2300 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; 2293 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2301 hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) { 2294 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2302 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); 2295 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2303 if (n) 2296 if (n)
2304 goto found; 2297 goto found;
@@ -2381,13 +2374,12 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
2381 } else { 2374 } else {
2382 struct leaf *l = (struct leaf *) n; 2375 struct leaf *l = (struct leaf *) n;
2383 struct leaf_info *li; 2376 struct leaf_info *li;
2384 struct hlist_node *node;
2385 __be32 val = htonl(l->key); 2377 __be32 val = htonl(l->key);
2386 2378
2387 seq_indent(seq, iter->depth); 2379 seq_indent(seq, iter->depth);
2388 seq_printf(seq, " |-- %pI4\n", &val); 2380 seq_printf(seq, " |-- %pI4\n", &val);
2389 2381
2390 hlist_for_each_entry_rcu(li, node, &l->list, hlist) { 2382 hlist_for_each_entry_rcu(li, &l->list, hlist) {
2391 struct fib_alias *fa; 2383 struct fib_alias *fa;
2392 2384
2393 list_for_each_entry_rcu(fa, &li->falh, fa_list) { 2385 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
@@ -2532,7 +2524,6 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2532{ 2524{
2533 struct leaf *l = v; 2525 struct leaf *l = v;
2534 struct leaf_info *li; 2526 struct leaf_info *li;
2535 struct hlist_node *node;
2536 2527
2537 if (v == SEQ_START_TOKEN) { 2528 if (v == SEQ_START_TOKEN) {
2538 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway " 2529 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
@@ -2541,7 +2532,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2541 return 0; 2532 return 0;
2542 } 2533 }
2543 2534
2544 hlist_for_each_entry_rcu(li, node, &l->list, hlist) { 2535 hlist_for_each_entry_rcu(li, &l->list, hlist) {
2545 struct fib_alias *fa; 2536 struct fib_alias *fa;
2546 __be32 mask, prefix; 2537 __be32 mask, prefix;
2547 2538
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 11cb4979a465..7d1874be1df3 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -57,7 +57,6 @@ int inet_csk_bind_conflict(const struct sock *sk,
57 const struct inet_bind_bucket *tb, bool relax) 57 const struct inet_bind_bucket *tb, bool relax)
58{ 58{
59 struct sock *sk2; 59 struct sock *sk2;
60 struct hlist_node *node;
61 int reuse = sk->sk_reuse; 60 int reuse = sk->sk_reuse;
62 int reuseport = sk->sk_reuseport; 61 int reuseport = sk->sk_reuseport;
63 kuid_t uid = sock_i_uid((struct sock *)sk); 62 kuid_t uid = sock_i_uid((struct sock *)sk);
@@ -69,7 +68,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
69 * one this bucket belongs to. 68 * one this bucket belongs to.
70 */ 69 */
71 70
72 sk_for_each_bound(sk2, node, &tb->owners) { 71 sk_for_each_bound(sk2, &tb->owners) {
73 if (sk != sk2 && 72 if (sk != sk2 &&
74 !inet_v6_ipv6only(sk2) && 73 !inet_v6_ipv6only(sk2) &&
75 (!sk->sk_bound_dev_if || 74 (!sk->sk_bound_dev_if ||
@@ -95,7 +94,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
95 } 94 }
96 } 95 }
97 } 96 }
98 return node != NULL; 97 return sk2 != NULL;
99} 98}
100EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); 99EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
101 100
@@ -106,7 +105,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
106{ 105{
107 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 106 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
108 struct inet_bind_hashbucket *head; 107 struct inet_bind_hashbucket *head;
109 struct hlist_node *node;
110 struct inet_bind_bucket *tb; 108 struct inet_bind_bucket *tb;
111 int ret, attempts = 5; 109 int ret, attempts = 5;
112 struct net *net = sock_net(sk); 110 struct net *net = sock_net(sk);
@@ -129,7 +127,7 @@ again:
129 head = &hashinfo->bhash[inet_bhashfn(net, rover, 127 head = &hashinfo->bhash[inet_bhashfn(net, rover,
130 hashinfo->bhash_size)]; 128 hashinfo->bhash_size)];
131 spin_lock(&head->lock); 129 spin_lock(&head->lock);
132 inet_bind_bucket_for_each(tb, node, &head->chain) 130 inet_bind_bucket_for_each(tb, &head->chain)
133 if (net_eq(ib_net(tb), net) && tb->port == rover) { 131 if (net_eq(ib_net(tb), net) && tb->port == rover) {
134 if (((tb->fastreuse > 0 && 132 if (((tb->fastreuse > 0 &&
135 sk->sk_reuse && 133 sk->sk_reuse &&
@@ -183,7 +181,7 @@ have_snum:
183 head = &hashinfo->bhash[inet_bhashfn(net, snum, 181 head = &hashinfo->bhash[inet_bhashfn(net, snum,
184 hashinfo->bhash_size)]; 182 hashinfo->bhash_size)];
185 spin_lock(&head->lock); 183 spin_lock(&head->lock);
186 inet_bind_bucket_for_each(tb, node, &head->chain) 184 inet_bind_bucket_for_each(tb, &head->chain)
187 if (net_eq(ib_net(tb), net) && tb->port == snum) 185 if (net_eq(ib_net(tb), net) && tb->port == snum)
188 goto tb_found; 186 goto tb_found;
189 } 187 }
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 2e453bde6992..245ae078a07f 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -33,9 +33,9 @@ static void inet_frag_secret_rebuild(unsigned long dummy)
33 get_random_bytes(&f->rnd, sizeof(u32)); 33 get_random_bytes(&f->rnd, sizeof(u32));
34 for (i = 0; i < INETFRAGS_HASHSZ; i++) { 34 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
35 struct inet_frag_queue *q; 35 struct inet_frag_queue *q;
36 struct hlist_node *p, *n; 36 struct hlist_node *n;
37 37
38 hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) { 38 hlist_for_each_entry_safe(q, n, &f->hash[i], list) {
39 unsigned int hval = f->hashfn(q); 39 unsigned int hval = f->hashfn(q);
40 40
41 if (hval != i) { 41 if (hval != i) {
@@ -203,7 +203,6 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
203{ 203{
204 struct inet_frag_queue *qp; 204 struct inet_frag_queue *qp;
205#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
206 struct hlist_node *n;
207#endif 206#endif
208 unsigned int hash; 207 unsigned int hash;
209 208
@@ -219,7 +218,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
219 * such entry could be created on other cpu, while we 218 * such entry could be created on other cpu, while we
220 * promoted read lock to write lock. 219 * promoted read lock to write lock.
221 */ 220 */
222 hlist_for_each_entry(qp, n, &f->hash[hash], list) { 221 hlist_for_each_entry(qp, &f->hash[hash], list) {
223 if (qp->net == nf && f->match(qp, arg)) { 222 if (qp->net == nf && f->match(qp, arg)) {
224 atomic_inc(&qp->refcnt); 223 atomic_inc(&qp->refcnt);
225 write_unlock(&f->lock); 224 write_unlock(&f->lock);
@@ -278,9 +277,8 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
278 __releases(&f->lock) 277 __releases(&f->lock)
279{ 278{
280 struct inet_frag_queue *q; 279 struct inet_frag_queue *q;
281 struct hlist_node *n;
282 280
283 hlist_for_each_entry(q, n, &f->hash[hash], list) { 281 hlist_for_each_entry(q, &f->hash[hash], list) {
284 if (q->net == nf && f->match(q, key)) { 282 if (q->net == nf && f->match(q, key)) {
285 atomic_inc(&q->refcnt); 283 atomic_inc(&q->refcnt);
286 read_unlock(&f->lock); 284 read_unlock(&f->lock);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 0ce0595d9861..6af375afeeef 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -120,13 +120,12 @@ int __inet_inherit_port(struct sock *sk, struct sock *child)
120 * that the listener socket's icsk_bind_hash is the same 120 * that the listener socket's icsk_bind_hash is the same
121 * as that of the child socket. We have to look up or 121 * as that of the child socket. We have to look up or
122 * create a new bind bucket for the child here. */ 122 * create a new bind bucket for the child here. */
123 struct hlist_node *node; 123 inet_bind_bucket_for_each(tb, &head->chain) {
124 inet_bind_bucket_for_each(tb, node, &head->chain) {
125 if (net_eq(ib_net(tb), sock_net(sk)) && 124 if (net_eq(ib_net(tb), sock_net(sk)) &&
126 tb->port == port) 125 tb->port == port)
127 break; 126 break;
128 } 127 }
129 if (!node) { 128 if (!tb) {
130 tb = inet_bind_bucket_create(table->bind_bucket_cachep, 129 tb = inet_bind_bucket_create(table->bind_bucket_cachep,
131 sock_net(sk), head, port); 130 sock_net(sk), head, port);
132 if (!tb) { 131 if (!tb) {
@@ -493,7 +492,6 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
493 int i, remaining, low, high, port; 492 int i, remaining, low, high, port;
494 static u32 hint; 493 static u32 hint;
495 u32 offset = hint + port_offset; 494 u32 offset = hint + port_offset;
496 struct hlist_node *node;
497 struct inet_timewait_sock *tw = NULL; 495 struct inet_timewait_sock *tw = NULL;
498 496
499 inet_get_local_port_range(&low, &high); 497 inet_get_local_port_range(&low, &high);
@@ -512,7 +510,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
512 * because the established check is already 510 * because the established check is already
513 * unique enough. 511 * unique enough.
514 */ 512 */
515 inet_bind_bucket_for_each(tb, node, &head->chain) { 513 inet_bind_bucket_for_each(tb, &head->chain) {
516 if (net_eq(ib_net(tb), net) && 514 if (net_eq(ib_net(tb), net) &&
517 tb->port == port) { 515 tb->port == port) {
518 if (tb->fastreuse >= 0 || 516 if (tb->fastreuse >= 0 ||
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 2784db3155fb..1f27c9f4afd0 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -216,7 +216,6 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
216 const int slot) 216 const int slot)
217{ 217{
218 struct inet_timewait_sock *tw; 218 struct inet_timewait_sock *tw;
219 struct hlist_node *node;
220 unsigned int killed; 219 unsigned int killed;
221 int ret; 220 int ret;
222 221
@@ -229,7 +228,7 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
229 killed = 0; 228 killed = 0;
230 ret = 0; 229 ret = 0;
231rescan: 230rescan:
232 inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { 231 inet_twsk_for_each_inmate(tw, &twdr->cells[slot]) {
233 __inet_twsk_del_dead_node(tw); 232 __inet_twsk_del_dead_node(tw);
234 spin_unlock(&twdr->death_lock); 233 spin_unlock(&twdr->death_lock);
235 __inet_twsk_kill(tw, twdr->hashinfo); 234 __inet_twsk_kill(tw, twdr->hashinfo);
@@ -438,10 +437,10 @@ void inet_twdr_twcal_tick(unsigned long data)
438 437
439 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { 438 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
440 if (time_before_eq(j, now)) { 439 if (time_before_eq(j, now)) {
441 struct hlist_node *node, *safe; 440 struct hlist_node *safe;
442 struct inet_timewait_sock *tw; 441 struct inet_timewait_sock *tw;
443 442
444 inet_twsk_for_each_inmate_safe(tw, node, safe, 443 inet_twsk_for_each_inmate_safe(tw, safe,
445 &twdr->twcal_row[slot]) { 444 &twdr->twcal_row[slot]) {
446 __inet_twsk_del_dead_node(tw); 445 __inet_twsk_del_dead_node(tw);
447 __inet_twsk_kill(tw, twdr->hashinfo); 446 __inet_twsk_kill(tw, twdr->hashinfo);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 53ddebc292b6..dd44e0ab600c 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -111,9 +111,7 @@ EXPORT_SYMBOL_GPL(raw_unhash_sk);
111static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, 111static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
112 unsigned short num, __be32 raddr, __be32 laddr, int dif) 112 unsigned short num, __be32 raddr, __be32 laddr, int dif)
113{ 113{
114 struct hlist_node *node; 114 sk_for_each_from(sk) {
115
116 sk_for_each_from(sk, node) {
117 struct inet_sock *inet = inet_sk(sk); 115 struct inet_sock *inet = inet_sk(sk);
118 116
119 if (net_eq(sock_net(sk), net) && inet->inet_num == num && 117 if (net_eq(sock_net(sk), net) && inet->inet_num == num &&
@@ -914,9 +912,7 @@ static struct sock *raw_get_first(struct seq_file *seq)
914 912
915 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; 913 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
916 ++state->bucket) { 914 ++state->bucket) {
917 struct hlist_node *node; 915 sk_for_each(sk, &state->h->ht[state->bucket])
918
919 sk_for_each(sk, node, &state->h->ht[state->bucket])
920 if (sock_net(sk) == seq_file_net(seq)) 916 if (sock_net(sk) == seq_file_net(seq))
921 goto found; 917 goto found;
922 } 918 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 145d3bf8df86..4a8ec457310f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -954,7 +954,6 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
954{ 954{
955 struct tcp_sock *tp = tcp_sk(sk); 955 struct tcp_sock *tp = tcp_sk(sk);
956 struct tcp_md5sig_key *key; 956 struct tcp_md5sig_key *key;
957 struct hlist_node *pos;
958 unsigned int size = sizeof(struct in_addr); 957 unsigned int size = sizeof(struct in_addr);
959 struct tcp_md5sig_info *md5sig; 958 struct tcp_md5sig_info *md5sig;
960 959
@@ -968,7 +967,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
968 if (family == AF_INET6) 967 if (family == AF_INET6)
969 size = sizeof(struct in6_addr); 968 size = sizeof(struct in6_addr);
970#endif 969#endif
971 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) { 970 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
972 if (key->family != family) 971 if (key->family != family)
973 continue; 972 continue;
974 if (!memcmp(&key->addr, addr, size)) 973 if (!memcmp(&key->addr, addr, size))
@@ -1069,14 +1068,14 @@ static void tcp_clear_md5_list(struct sock *sk)
1069{ 1068{
1070 struct tcp_sock *tp = tcp_sk(sk); 1069 struct tcp_sock *tp = tcp_sk(sk);
1071 struct tcp_md5sig_key *key; 1070 struct tcp_md5sig_key *key;
1072 struct hlist_node *pos, *n; 1071 struct hlist_node *n;
1073 struct tcp_md5sig_info *md5sig; 1072 struct tcp_md5sig_info *md5sig;
1074 1073
1075 md5sig = rcu_dereference_protected(tp->md5sig_info, 1); 1074 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1076 1075
1077 if (!hlist_empty(&md5sig->head)) 1076 if (!hlist_empty(&md5sig->head))
1078 tcp_free_md5sig_pool(); 1077 tcp_free_md5sig_pool();
1079 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) { 1078 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1080 hlist_del_rcu(&key->node); 1079 hlist_del_rcu(&key->node);
1081 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); 1080 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1082 kfree_rcu(key, rcu); 1081 kfree_rcu(key, rcu);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4dc0d44a5d31..f2c7e615f902 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1419,11 +1419,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1419 struct net_device *dev, int strict) 1419 struct net_device *dev, int strict)
1420{ 1420{
1421 struct inet6_ifaddr *ifp; 1421 struct inet6_ifaddr *ifp;
1422 struct hlist_node *node;
1423 unsigned int hash = inet6_addr_hash(addr); 1422 unsigned int hash = inet6_addr_hash(addr);
1424 1423
1425 rcu_read_lock_bh(); 1424 rcu_read_lock_bh();
1426 hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { 1425 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1427 if (!net_eq(dev_net(ifp->idev->dev), net)) 1426 if (!net_eq(dev_net(ifp->idev->dev), net))
1428 continue; 1427 continue;
1429 if (ipv6_addr_equal(&ifp->addr, addr) && 1428 if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -1445,9 +1444,8 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1445{ 1444{
1446 unsigned int hash = inet6_addr_hash(addr); 1445 unsigned int hash = inet6_addr_hash(addr);
1447 struct inet6_ifaddr *ifp; 1446 struct inet6_ifaddr *ifp;
1448 struct hlist_node *node;
1449 1447
1450 hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) { 1448 hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
1451 if (!net_eq(dev_net(ifp->idev->dev), net)) 1449 if (!net_eq(dev_net(ifp->idev->dev), net))
1452 continue; 1450 continue;
1453 if (ipv6_addr_equal(&ifp->addr, addr)) { 1451 if (ipv6_addr_equal(&ifp->addr, addr)) {
@@ -1487,10 +1485,9 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
1487{ 1485{
1488 struct inet6_ifaddr *ifp, *result = NULL; 1486 struct inet6_ifaddr *ifp, *result = NULL;
1489 unsigned int hash = inet6_addr_hash(addr); 1487 unsigned int hash = inet6_addr_hash(addr);
1490 struct hlist_node *node;
1491 1488
1492 rcu_read_lock_bh(); 1489 rcu_read_lock_bh();
1493 hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) { 1490 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
1494 if (!net_eq(dev_net(ifp->idev->dev), net)) 1491 if (!net_eq(dev_net(ifp->idev->dev), net))
1495 continue; 1492 continue;
1496 if (ipv6_addr_equal(&ifp->addr, addr)) { 1493 if (ipv6_addr_equal(&ifp->addr, addr)) {
@@ -2907,11 +2904,10 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2907 /* Step 2: clear hash table */ 2904 /* Step 2: clear hash table */
2908 for (i = 0; i < IN6_ADDR_HSIZE; i++) { 2905 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
2909 struct hlist_head *h = &inet6_addr_lst[i]; 2906 struct hlist_head *h = &inet6_addr_lst[i];
2910 struct hlist_node *n;
2911 2907
2912 spin_lock_bh(&addrconf_hash_lock); 2908 spin_lock_bh(&addrconf_hash_lock);
2913 restart: 2909 restart:
2914 hlist_for_each_entry_rcu(ifa, n, h, addr_lst) { 2910 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
2915 if (ifa->idev == idev) { 2911 if (ifa->idev == idev) {
2916 hlist_del_init_rcu(&ifa->addr_lst); 2912 hlist_del_init_rcu(&ifa->addr_lst);
2917 addrconf_del_timer(ifa); 2913 addrconf_del_timer(ifa);
@@ -3218,8 +3214,7 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
3218 } 3214 }
3219 3215
3220 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { 3216 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
3221 struct hlist_node *n; 3217 hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket],
3222 hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
3223 addr_lst) { 3218 addr_lst) {
3224 if (!net_eq(dev_net(ifa->idev->dev), net)) 3219 if (!net_eq(dev_net(ifa->idev->dev), net))
3225 continue; 3220 continue;
@@ -3244,9 +3239,8 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
3244{ 3239{
3245 struct if6_iter_state *state = seq->private; 3240 struct if6_iter_state *state = seq->private;
3246 struct net *net = seq_file_net(seq); 3241 struct net *net = seq_file_net(seq);
3247 struct hlist_node *n = &ifa->addr_lst;
3248 3242
3249 hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) { 3243 hlist_for_each_entry_continue_rcu_bh(ifa, addr_lst) {
3250 if (!net_eq(dev_net(ifa->idev->dev), net)) 3244 if (!net_eq(dev_net(ifa->idev->dev), net))
3251 continue; 3245 continue;
3252 state->offset++; 3246 state->offset++;
@@ -3255,7 +3249,7 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
3255 3249
3256 while (++state->bucket < IN6_ADDR_HSIZE) { 3250 while (++state->bucket < IN6_ADDR_HSIZE) {
3257 state->offset = 0; 3251 state->offset = 0;
3258 hlist_for_each_entry_rcu_bh(ifa, n, 3252 hlist_for_each_entry_rcu_bh(ifa,
3259 &inet6_addr_lst[state->bucket], addr_lst) { 3253 &inet6_addr_lst[state->bucket], addr_lst) {
3260 if (!net_eq(dev_net(ifa->idev->dev), net)) 3254 if (!net_eq(dev_net(ifa->idev->dev), net))
3261 continue; 3255 continue;
@@ -3357,11 +3351,10 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
3357{ 3351{
3358 int ret = 0; 3352 int ret = 0;
3359 struct inet6_ifaddr *ifp = NULL; 3353 struct inet6_ifaddr *ifp = NULL;
3360 struct hlist_node *n;
3361 unsigned int hash = inet6_addr_hash(addr); 3354 unsigned int hash = inet6_addr_hash(addr);
3362 3355
3363 rcu_read_lock_bh(); 3356 rcu_read_lock_bh();
3364 hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) { 3357 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
3365 if (!net_eq(dev_net(ifp->idev->dev), net)) 3358 if (!net_eq(dev_net(ifp->idev->dev), net))
3366 continue; 3359 continue;
3367 if (ipv6_addr_equal(&ifp->addr, addr) && 3360 if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -3383,7 +3376,6 @@ static void addrconf_verify(unsigned long foo)
3383{ 3376{
3384 unsigned long now, next, next_sec, next_sched; 3377 unsigned long now, next, next_sec, next_sched;
3385 struct inet6_ifaddr *ifp; 3378 struct inet6_ifaddr *ifp;
3386 struct hlist_node *node;
3387 int i; 3379 int i;
3388 3380
3389 rcu_read_lock_bh(); 3381 rcu_read_lock_bh();
@@ -3395,7 +3387,7 @@ static void addrconf_verify(unsigned long foo)
3395 3387
3396 for (i = 0; i < IN6_ADDR_HSIZE; i++) { 3388 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3397restart: 3389restart:
3398 hlist_for_each_entry_rcu_bh(ifp, node, 3390 hlist_for_each_entry_rcu_bh(ifp,
3399 &inet6_addr_lst[i], addr_lst) { 3391 &inet6_addr_lst[i], addr_lst) {
3400 unsigned long age; 3392 unsigned long age;
3401 3393
@@ -3866,7 +3858,6 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3866 struct net_device *dev; 3858 struct net_device *dev;
3867 struct inet6_dev *idev; 3859 struct inet6_dev *idev;
3868 struct hlist_head *head; 3860 struct hlist_head *head;
3869 struct hlist_node *node;
3870 3861
3871 s_h = cb->args[0]; 3862 s_h = cb->args[0];
3872 s_idx = idx = cb->args[1]; 3863 s_idx = idx = cb->args[1];
@@ -3876,7 +3867,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3876 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 3867 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3877 idx = 0; 3868 idx = 0;
3878 head = &net->dev_index_head[h]; 3869 head = &net->dev_index_head[h];
3879 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 3870 hlist_for_each_entry_rcu(dev, head, index_hlist) {
3880 if (idx < s_idx) 3871 if (idx < s_idx)
3881 goto cont; 3872 goto cont;
3882 if (h > s_h || idx > s_idx) 3873 if (h > s_h || idx > s_idx)
@@ -4222,7 +4213,6 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
4222 struct net_device *dev; 4213 struct net_device *dev;
4223 struct inet6_dev *idev; 4214 struct inet6_dev *idev;
4224 struct hlist_head *head; 4215 struct hlist_head *head;
4225 struct hlist_node *node;
4226 4216
4227 s_h = cb->args[0]; 4217 s_h = cb->args[0];
4228 s_idx = cb->args[1]; 4218 s_idx = cb->args[1];
@@ -4231,7 +4221,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
4231 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4221 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4232 idx = 0; 4222 idx = 0;
4233 head = &net->dev_index_head[h]; 4223 head = &net->dev_index_head[h];
4234 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 4224 hlist_for_each_entry_rcu(dev, head, index_hlist) {
4235 if (idx < s_idx) 4225 if (idx < s_idx)
4236 goto cont; 4226 goto cont;
4237 idev = __in6_dev_get(dev); 4227 idev = __in6_dev_get(dev);
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index ff76eecfd622..aad64352cb60 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -173,9 +173,8 @@ static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net,
173 const struct in6_addr *addr, 173 const struct in6_addr *addr,
174 int type, int ifindex) 174 int type, int ifindex)
175{ 175{
176 struct hlist_node *pos;
177 struct ip6addrlbl_entry *p; 176 struct ip6addrlbl_entry *p;
178 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { 177 hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
179 if (__ip6addrlbl_match(net, p, addr, type, ifindex)) 178 if (__ip6addrlbl_match(net, p, addr, type, ifindex))
180 return p; 179 return p;
181 } 180 }
@@ -261,9 +260,9 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
261 if (hlist_empty(&ip6addrlbl_table.head)) { 260 if (hlist_empty(&ip6addrlbl_table.head)) {
262 hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head); 261 hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
263 } else { 262 } else {
264 struct hlist_node *pos, *n; 263 struct hlist_node *n;
265 struct ip6addrlbl_entry *p = NULL; 264 struct ip6addrlbl_entry *p = NULL;
266 hlist_for_each_entry_safe(p, pos, n, 265 hlist_for_each_entry_safe(p, n,
267 &ip6addrlbl_table.head, list) { 266 &ip6addrlbl_table.head, list) {
268 if (p->prefixlen == newp->prefixlen && 267 if (p->prefixlen == newp->prefixlen &&
269 net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) && 268 net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
@@ -319,13 +318,13 @@ static int __ip6addrlbl_del(struct net *net,
319 int ifindex) 318 int ifindex)
320{ 319{
321 struct ip6addrlbl_entry *p = NULL; 320 struct ip6addrlbl_entry *p = NULL;
322 struct hlist_node *pos, *n; 321 struct hlist_node *n;
323 int ret = -ESRCH; 322 int ret = -ESRCH;
324 323
325 ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", 324 ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n",
326 __func__, prefix, prefixlen, ifindex); 325 __func__, prefix, prefixlen, ifindex);
327 326
328 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { 327 hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
329 if (p->prefixlen == prefixlen && 328 if (p->prefixlen == prefixlen &&
330 net_eq(ip6addrlbl_net(p), net) && 329 net_eq(ip6addrlbl_net(p), net) &&
331 p->ifindex == ifindex && 330 p->ifindex == ifindex &&
@@ -380,11 +379,11 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
380static void __net_exit ip6addrlbl_net_exit(struct net *net) 379static void __net_exit ip6addrlbl_net_exit(struct net *net)
381{ 380{
382 struct ip6addrlbl_entry *p = NULL; 381 struct ip6addrlbl_entry *p = NULL;
383 struct hlist_node *pos, *n; 382 struct hlist_node *n;
384 383
385 /* Remove all labels belonging to the exiting net */ 384 /* Remove all labels belonging to the exiting net */
386 spin_lock(&ip6addrlbl_table.lock); 385 spin_lock(&ip6addrlbl_table.lock);
387 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { 386 hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
388 if (net_eq(ip6addrlbl_net(p), net)) { 387 if (net_eq(ip6addrlbl_net(p), net)) {
389 hlist_del_rcu(&p->list); 388 hlist_del_rcu(&p->list);
390 ip6addrlbl_put(p); 389 ip6addrlbl_put(p);
@@ -505,12 +504,11 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
505{ 504{
506 struct net *net = sock_net(skb->sk); 505 struct net *net = sock_net(skb->sk);
507 struct ip6addrlbl_entry *p; 506 struct ip6addrlbl_entry *p;
508 struct hlist_node *pos;
509 int idx = 0, s_idx = cb->args[0]; 507 int idx = 0, s_idx = cb->args[0];
510 int err; 508 int err;
511 509
512 rcu_read_lock(); 510 rcu_read_lock();
513 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { 511 hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
514 if (idx >= s_idx && 512 if (idx >= s_idx &&
515 net_eq(ip6addrlbl_net(p), net)) { 513 net_eq(ip6addrlbl_net(p), net)) {
516 if ((err = ip6addrlbl_fill(skb, p, 514 if ((err = ip6addrlbl_fill(skb, p,
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index b386a2ce4c6f..9bfab19ff3c0 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -31,7 +31,6 @@ int inet6_csk_bind_conflict(const struct sock *sk,
31 const struct inet_bind_bucket *tb, bool relax) 31 const struct inet_bind_bucket *tb, bool relax)
32{ 32{
33 const struct sock *sk2; 33 const struct sock *sk2;
34 const struct hlist_node *node;
35 int reuse = sk->sk_reuse; 34 int reuse = sk->sk_reuse;
36 int reuseport = sk->sk_reuseport; 35 int reuseport = sk->sk_reuseport;
37 kuid_t uid = sock_i_uid((struct sock *)sk); 36 kuid_t uid = sock_i_uid((struct sock *)sk);
@@ -41,7 +40,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
41 * See comment in inet_csk_bind_conflict about sock lookup 40 * See comment in inet_csk_bind_conflict about sock lookup
42 * vs net namespaces issues. 41 * vs net namespaces issues.
43 */ 42 */
44 sk_for_each_bound(sk2, node, &tb->owners) { 43 sk_for_each_bound(sk2, &tb->owners) {
45 if (sk != sk2 && 44 if (sk != sk2 &&
46 (!sk->sk_bound_dev_if || 45 (!sk->sk_bound_dev_if ||
47 !sk2->sk_bound_dev_if || 46 !sk2->sk_bound_dev_if ||
@@ -58,7 +57,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
58 } 57 }
59 } 58 }
60 59
61 return node != NULL; 60 return sk2 != NULL;
62} 61}
63 62
64EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); 63EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 710cafd2e1a9..192dd1a0e188 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -224,7 +224,6 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
224{ 224{
225 struct fib6_table *tb; 225 struct fib6_table *tb;
226 struct hlist_head *head; 226 struct hlist_head *head;
227 struct hlist_node *node;
228 unsigned int h; 227 unsigned int h;
229 228
230 if (id == 0) 229 if (id == 0)
@@ -232,7 +231,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
232 h = id & (FIB6_TABLE_HASHSZ - 1); 231 h = id & (FIB6_TABLE_HASHSZ - 1);
233 rcu_read_lock(); 232 rcu_read_lock();
234 head = &net->ipv6.fib_table_hash[h]; 233 head = &net->ipv6.fib_table_hash[h];
235 hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { 234 hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
236 if (tb->tb6_id == id) { 235 if (tb->tb6_id == id) {
237 rcu_read_unlock(); 236 rcu_read_unlock();
238 return tb; 237 return tb;
@@ -363,7 +362,6 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
363 struct rt6_rtnl_dump_arg arg; 362 struct rt6_rtnl_dump_arg arg;
364 struct fib6_walker_t *w; 363 struct fib6_walker_t *w;
365 struct fib6_table *tb; 364 struct fib6_table *tb;
366 struct hlist_node *node;
367 struct hlist_head *head; 365 struct hlist_head *head;
368 int res = 0; 366 int res = 0;
369 367
@@ -398,7 +396,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
398 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { 396 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
399 e = 0; 397 e = 0;
400 head = &net->ipv6.fib_table_hash[h]; 398 head = &net->ipv6.fib_table_hash[h];
401 hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { 399 hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
402 if (e < s_e) 400 if (e < s_e)
403 goto next; 401 goto next;
404 res = fib6_dump_table(tb, skb, cb); 402 res = fib6_dump_table(tb, skb, cb);
@@ -1520,14 +1518,13 @@ void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg
1520 int prune, void *arg) 1518 int prune, void *arg)
1521{ 1519{
1522 struct fib6_table *table; 1520 struct fib6_table *table;
1523 struct hlist_node *node;
1524 struct hlist_head *head; 1521 struct hlist_head *head;
1525 unsigned int h; 1522 unsigned int h;
1526 1523
1527 rcu_read_lock(); 1524 rcu_read_lock();
1528 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { 1525 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
1529 head = &net->ipv6.fib_table_hash[h]; 1526 head = &net->ipv6.fib_table_hash[h];
1530 hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { 1527 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
1531 read_lock_bh(&table->tb6_lock); 1528 read_lock_bh(&table->tb6_lock);
1532 fib6_clean_tree(net, &table->tb6_root, 1529 fib6_clean_tree(net, &table->tb6_root,
1533 func, prune, arg); 1530 func, prune, arg);
@@ -1540,14 +1537,13 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
1540 int prune, void *arg) 1537 int prune, void *arg)
1541{ 1538{
1542 struct fib6_table *table; 1539 struct fib6_table *table;
1543 struct hlist_node *node;
1544 struct hlist_head *head; 1540 struct hlist_head *head;
1545 unsigned int h; 1541 unsigned int h;
1546 1542
1547 rcu_read_lock(); 1543 rcu_read_lock();
1548 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { 1544 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
1549 head = &net->ipv6.fib_table_hash[h]; 1545 head = &net->ipv6.fib_table_hash[h];
1550 hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { 1546 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
1551 write_lock_bh(&table->tb6_lock); 1547 write_lock_bh(&table->tb6_lock);
1552 fib6_clean_tree(net, &table->tb6_root, 1548 fib6_clean_tree(net, &table->tb6_root,
1553 func, prune, arg); 1549 func, prune, arg);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c65907db8c44..330b5e7b7df6 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -71,10 +71,9 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
71 unsigned short num, const struct in6_addr *loc_addr, 71 unsigned short num, const struct in6_addr *loc_addr,
72 const struct in6_addr *rmt_addr, int dif) 72 const struct in6_addr *rmt_addr, int dif)
73{ 73{
74 struct hlist_node *node;
75 bool is_multicast = ipv6_addr_is_multicast(loc_addr); 74 bool is_multicast = ipv6_addr_is_multicast(loc_addr);
76 75
77 sk_for_each_from(sk, node) 76 sk_for_each_from(sk)
78 if (inet_sk(sk)->inet_num == num) { 77 if (inet_sk(sk)->inet_num == num) {
79 struct ipv6_pinfo *np = inet6_sk(sk); 78 struct ipv6_pinfo *np = inet6_sk(sk);
80 79
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 6cc48012b730..de2bcfaaf759 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -89,9 +89,8 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const
89{ 89{
90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
91 struct xfrm6_tunnel_spi *x6spi; 91 struct xfrm6_tunnel_spi *x6spi;
92 struct hlist_node *pos;
93 92
94 hlist_for_each_entry_rcu(x6spi, pos, 93 hlist_for_each_entry_rcu(x6spi,
95 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 94 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
96 list_byaddr) { 95 list_byaddr) {
97 if (xfrm6_addr_equal(&x6spi->addr, saddr)) 96 if (xfrm6_addr_equal(&x6spi->addr, saddr))
@@ -120,9 +119,8 @@ static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
120 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 119 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
121 struct xfrm6_tunnel_spi *x6spi; 120 struct xfrm6_tunnel_spi *x6spi;
122 int index = xfrm6_tunnel_spi_hash_byspi(spi); 121 int index = xfrm6_tunnel_spi_hash_byspi(spi);
123 struct hlist_node *pos;
124 122
125 hlist_for_each_entry(x6spi, pos, 123 hlist_for_each_entry(x6spi,
126 &xfrm6_tn->spi_byspi[index], 124 &xfrm6_tn->spi_byspi[index],
127 list_byspi) { 125 list_byspi) {
128 if (x6spi->spi == spi) 126 if (x6spi->spi == spi)
@@ -203,11 +201,11 @@ static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
203{ 201{
204 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 202 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
205 struct xfrm6_tunnel_spi *x6spi; 203 struct xfrm6_tunnel_spi *x6spi;
206 struct hlist_node *pos, *n; 204 struct hlist_node *n;
207 205
208 spin_lock_bh(&xfrm6_tunnel_spi_lock); 206 spin_lock_bh(&xfrm6_tunnel_spi_lock);
209 207
210 hlist_for_each_entry_safe(x6spi, pos, n, 208 hlist_for_each_entry_safe(x6spi, n,
211 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 209 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
212 list_byaddr) 210 list_byaddr)
213 { 211 {
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index dfd6faaf0ea7..f547a47d381c 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -228,9 +228,8 @@ static struct sock *__ipxitf_find_socket(struct ipx_interface *intrfc,
228 __be16 port) 228 __be16 port)
229{ 229{
230 struct sock *s; 230 struct sock *s;
231 struct hlist_node *node;
232 231
233 sk_for_each(s, node, &intrfc->if_sklist) 232 sk_for_each(s, &intrfc->if_sklist)
234 if (ipx_sk(s)->port == port) 233 if (ipx_sk(s)->port == port)
235 goto found; 234 goto found;
236 s = NULL; 235 s = NULL;
@@ -259,12 +258,11 @@ static struct sock *ipxitf_find_internal_socket(struct ipx_interface *intrfc,
259 __be16 port) 258 __be16 port)
260{ 259{
261 struct sock *s; 260 struct sock *s;
262 struct hlist_node *node;
263 261
264 ipxitf_hold(intrfc); 262 ipxitf_hold(intrfc);
265 spin_lock_bh(&intrfc->if_sklist_lock); 263 spin_lock_bh(&intrfc->if_sklist_lock);
266 264
267 sk_for_each(s, node, &intrfc->if_sklist) { 265 sk_for_each(s, &intrfc->if_sklist) {
268 struct ipx_sock *ipxs = ipx_sk(s); 266 struct ipx_sock *ipxs = ipx_sk(s);
269 267
270 if (ipxs->port == port && 268 if (ipxs->port == port &&
@@ -282,14 +280,14 @@ found:
282static void __ipxitf_down(struct ipx_interface *intrfc) 280static void __ipxitf_down(struct ipx_interface *intrfc)
283{ 281{
284 struct sock *s; 282 struct sock *s;
285 struct hlist_node *node, *t; 283 struct hlist_node *t;
286 284
287 /* Delete all routes associated with this interface */ 285 /* Delete all routes associated with this interface */
288 ipxrtr_del_routes(intrfc); 286 ipxrtr_del_routes(intrfc);
289 287
290 spin_lock_bh(&intrfc->if_sklist_lock); 288 spin_lock_bh(&intrfc->if_sklist_lock);
291 /* error sockets */ 289 /* error sockets */
292 sk_for_each_safe(s, node, t, &intrfc->if_sklist) { 290 sk_for_each_safe(s, t, &intrfc->if_sklist) {
293 struct ipx_sock *ipxs = ipx_sk(s); 291 struct ipx_sock *ipxs = ipx_sk(s);
294 292
295 s->sk_err = ENOLINK; 293 s->sk_err = ENOLINK;
@@ -385,12 +383,11 @@ static int ipxitf_demux_socket(struct ipx_interface *intrfc,
385 int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node, 383 int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node,
386 IPX_NODE_LEN); 384 IPX_NODE_LEN);
387 struct sock *s; 385 struct sock *s;
388 struct hlist_node *node;
389 int rc; 386 int rc;
390 387
391 spin_lock_bh(&intrfc->if_sklist_lock); 388 spin_lock_bh(&intrfc->if_sklist_lock);
392 389
393 sk_for_each(s, node, &intrfc->if_sklist) { 390 sk_for_each(s, &intrfc->if_sklist) {
394 struct ipx_sock *ipxs = ipx_sk(s); 391 struct ipx_sock *ipxs = ipx_sk(s);
395 392
396 if (ipxs->port == ipx->ipx_dest.sock && 393 if (ipxs->port == ipx->ipx_dest.sock &&
@@ -446,12 +443,11 @@ static struct sock *ncp_connection_hack(struct ipx_interface *intrfc,
446 connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8); 443 connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8);
447 444
448 if (connection) { 445 if (connection) {
449 struct hlist_node *node;
450 /* Now we have to look for a special NCP connection handling 446 /* Now we have to look for a special NCP connection handling
451 * socket. Only these sockets have ipx_ncp_conn != 0, set by 447 * socket. Only these sockets have ipx_ncp_conn != 0, set by
452 * SIOCIPXNCPCONN. */ 448 * SIOCIPXNCPCONN. */
453 spin_lock_bh(&intrfc->if_sklist_lock); 449 spin_lock_bh(&intrfc->if_sklist_lock);
454 sk_for_each(sk, node, &intrfc->if_sklist) 450 sk_for_each(sk, &intrfc->if_sklist)
455 if (ipx_sk(sk)->ipx_ncp_conn == connection) { 451 if (ipx_sk(sk)->ipx_ncp_conn == connection) {
456 sock_hold(sk); 452 sock_hold(sk);
457 goto found; 453 goto found;
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 02ff7f2f60d4..65e8833a2510 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -103,19 +103,18 @@ out:
103static __inline__ struct sock *ipx_get_socket_idx(loff_t pos) 103static __inline__ struct sock *ipx_get_socket_idx(loff_t pos)
104{ 104{
105 struct sock *s = NULL; 105 struct sock *s = NULL;
106 struct hlist_node *node;
107 struct ipx_interface *i; 106 struct ipx_interface *i;
108 107
109 list_for_each_entry(i, &ipx_interfaces, node) { 108 list_for_each_entry(i, &ipx_interfaces, node) {
110 spin_lock_bh(&i->if_sklist_lock); 109 spin_lock_bh(&i->if_sklist_lock);
111 sk_for_each(s, node, &i->if_sklist) { 110 sk_for_each(s, &i->if_sklist) {
112 if (!pos) 111 if (!pos)
113 break; 112 break;
114 --pos; 113 --pos;
115 } 114 }
116 spin_unlock_bh(&i->if_sklist_lock); 115 spin_unlock_bh(&i->if_sklist_lock);
117 if (!pos) { 116 if (!pos) {
118 if (node) 117 if (s)
119 goto found; 118 goto found;
120 break; 119 break;
121 } 120 }
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index cd6f7a991d80..a7d11ffe4284 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -156,14 +156,13 @@ static int afiucv_pm_freeze(struct device *dev)
156{ 156{
157 struct iucv_sock *iucv; 157 struct iucv_sock *iucv;
158 struct sock *sk; 158 struct sock *sk;
159 struct hlist_node *node;
160 int err = 0; 159 int err = 0;
161 160
162#ifdef CONFIG_PM_DEBUG 161#ifdef CONFIG_PM_DEBUG
163 printk(KERN_WARNING "afiucv_pm_freeze\n"); 162 printk(KERN_WARNING "afiucv_pm_freeze\n");
164#endif 163#endif
165 read_lock(&iucv_sk_list.lock); 164 read_lock(&iucv_sk_list.lock);
166 sk_for_each(sk, node, &iucv_sk_list.head) { 165 sk_for_each(sk, &iucv_sk_list.head) {
167 iucv = iucv_sk(sk); 166 iucv = iucv_sk(sk);
168 switch (sk->sk_state) { 167 switch (sk->sk_state) {
169 case IUCV_DISCONN: 168 case IUCV_DISCONN:
@@ -194,13 +193,12 @@ static int afiucv_pm_freeze(struct device *dev)
194static int afiucv_pm_restore_thaw(struct device *dev) 193static int afiucv_pm_restore_thaw(struct device *dev)
195{ 194{
196 struct sock *sk; 195 struct sock *sk;
197 struct hlist_node *node;
198 196
199#ifdef CONFIG_PM_DEBUG 197#ifdef CONFIG_PM_DEBUG
200 printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); 198 printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
201#endif 199#endif
202 read_lock(&iucv_sk_list.lock); 200 read_lock(&iucv_sk_list.lock);
203 sk_for_each(sk, node, &iucv_sk_list.head) { 201 sk_for_each(sk, &iucv_sk_list.head) {
204 switch (sk->sk_state) { 202 switch (sk->sk_state) {
205 case IUCV_CONNECTED: 203 case IUCV_CONNECTED:
206 sk->sk_err = EPIPE; 204 sk->sk_err = EPIPE;
@@ -390,9 +388,8 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
390static struct sock *__iucv_get_sock_by_name(char *nm) 388static struct sock *__iucv_get_sock_by_name(char *nm)
391{ 389{
392 struct sock *sk; 390 struct sock *sk;
393 struct hlist_node *node;
394 391
395 sk_for_each(sk, node, &iucv_sk_list.head) 392 sk_for_each(sk, &iucv_sk_list.head)
396 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) 393 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
397 return sk; 394 return sk;
398 395
@@ -1678,7 +1675,6 @@ static int iucv_callback_connreq(struct iucv_path *path,
1678 unsigned char user_data[16]; 1675 unsigned char user_data[16];
1679 unsigned char nuser_data[16]; 1676 unsigned char nuser_data[16];
1680 unsigned char src_name[8]; 1677 unsigned char src_name[8];
1681 struct hlist_node *node;
1682 struct sock *sk, *nsk; 1678 struct sock *sk, *nsk;
1683 struct iucv_sock *iucv, *niucv; 1679 struct iucv_sock *iucv, *niucv;
1684 int err; 1680 int err;
@@ -1689,7 +1685,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1689 read_lock(&iucv_sk_list.lock); 1685 read_lock(&iucv_sk_list.lock);
1690 iucv = NULL; 1686 iucv = NULL;
1691 sk = NULL; 1687 sk = NULL;
1692 sk_for_each(sk, node, &iucv_sk_list.head) 1688 sk_for_each(sk, &iucv_sk_list.head)
1693 if (sk->sk_state == IUCV_LISTEN && 1689 if (sk->sk_state == IUCV_LISTEN &&
1694 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { 1690 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1695 /* 1691 /*
@@ -2115,7 +2111,6 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2115static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, 2111static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2116 struct packet_type *pt, struct net_device *orig_dev) 2112 struct packet_type *pt, struct net_device *orig_dev)
2117{ 2113{
2118 struct hlist_node *node;
2119 struct sock *sk; 2114 struct sock *sk;
2120 struct iucv_sock *iucv; 2115 struct iucv_sock *iucv;
2121 struct af_iucv_trans_hdr *trans_hdr; 2116 struct af_iucv_trans_hdr *trans_hdr;
@@ -2132,7 +2127,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2132 iucv = NULL; 2127 iucv = NULL;
2133 sk = NULL; 2128 sk = NULL;
2134 read_lock(&iucv_sk_list.lock); 2129 read_lock(&iucv_sk_list.lock);
2135 sk_for_each(sk, node, &iucv_sk_list.head) { 2130 sk_for_each(sk, &iucv_sk_list.head) {
2136 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { 2131 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2137 if ((!memcmp(&iucv_sk(sk)->src_name, 2132 if ((!memcmp(&iucv_sk(sk)->src_name,
2138 trans_hdr->destAppName, 8)) && 2133 trans_hdr->destAppName, 8)) &&
@@ -2225,10 +2220,9 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2225 struct sk_buff *list_skb; 2220 struct sk_buff *list_skb;
2226 struct sk_buff *nskb; 2221 struct sk_buff *nskb;
2227 unsigned long flags; 2222 unsigned long flags;
2228 struct hlist_node *node;
2229 2223
2230 read_lock_irqsave(&iucv_sk_list.lock, flags); 2224 read_lock_irqsave(&iucv_sk_list.lock, flags);
2231 sk_for_each(sk, node, &iucv_sk_list.head) 2225 sk_for_each(sk, &iucv_sk_list.head)
2232 if (sk == isk) { 2226 if (sk == isk) {
2233 iucv = iucv_sk(sk); 2227 iucv = iucv_sk(sk);
2234 break; 2228 break;
@@ -2299,14 +2293,13 @@ static int afiucv_netdev_event(struct notifier_block *this,
2299 unsigned long event, void *ptr) 2293 unsigned long event, void *ptr)
2300{ 2294{
2301 struct net_device *event_dev = (struct net_device *)ptr; 2295 struct net_device *event_dev = (struct net_device *)ptr;
2302 struct hlist_node *node;
2303 struct sock *sk; 2296 struct sock *sk;
2304 struct iucv_sock *iucv; 2297 struct iucv_sock *iucv;
2305 2298
2306 switch (event) { 2299 switch (event) {
2307 case NETDEV_REBOOT: 2300 case NETDEV_REBOOT:
2308 case NETDEV_GOING_DOWN: 2301 case NETDEV_GOING_DOWN:
2309 sk_for_each(sk, node, &iucv_sk_list.head) { 2302 sk_for_each(sk, &iucv_sk_list.head) {
2310 iucv = iucv_sk(sk); 2303 iucv = iucv_sk(sk);
2311 if ((iucv->hs_dev == event_dev) && 2304 if ((iucv->hs_dev == event_dev) &&
2312 (sk->sk_state == IUCV_CONNECTED)) { 2305 (sk->sk_state == IUCV_CONNECTED)) {
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 9ef79851f297..556fdafdd1ea 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -225,7 +225,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
225{ 225{
226 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 226 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
227 struct sock *sk; 227 struct sock *sk;
228 struct hlist_node *node;
229 struct sk_buff *skb2 = NULL; 228 struct sk_buff *skb2 = NULL;
230 int err = -ESRCH; 229 int err = -ESRCH;
231 230
@@ -236,7 +235,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
236 return -ENOMEM; 235 return -ENOMEM;
237 236
238 rcu_read_lock(); 237 rcu_read_lock();
239 sk_for_each_rcu(sk, node, &net_pfkey->table) { 238 sk_for_each_rcu(sk, &net_pfkey->table) {
240 struct pfkey_sock *pfk = pfkey_sk(sk); 239 struct pfkey_sock *pfk = pfkey_sk(sk);
241 int err2; 240 int err2;
242 241
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index dcfd64e83ab7..d36875f3427e 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -221,10 +221,9 @@ static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
221 struct hlist_head *session_list = 221 struct hlist_head *session_list =
222 l2tp_session_id_hash_2(pn, session_id); 222 l2tp_session_id_hash_2(pn, session_id);
223 struct l2tp_session *session; 223 struct l2tp_session *session;
224 struct hlist_node *walk;
225 224
226 rcu_read_lock_bh(); 225 rcu_read_lock_bh();
227 hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) { 226 hlist_for_each_entry_rcu(session, session_list, global_hlist) {
228 if (session->session_id == session_id) { 227 if (session->session_id == session_id) {
229 rcu_read_unlock_bh(); 228 rcu_read_unlock_bh();
230 return session; 229 return session;
@@ -253,7 +252,6 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
253{ 252{
254 struct hlist_head *session_list; 253 struct hlist_head *session_list;
255 struct l2tp_session *session; 254 struct l2tp_session *session;
256 struct hlist_node *walk;
257 255
258 /* In L2TPv3, session_ids are unique over all tunnels and we 256 /* In L2TPv3, session_ids are unique over all tunnels and we
259 * sometimes need to look them up before we know the 257 * sometimes need to look them up before we know the
@@ -264,7 +262,7 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
264 262
265 session_list = l2tp_session_id_hash(tunnel, session_id); 263 session_list = l2tp_session_id_hash(tunnel, session_id);
266 read_lock_bh(&tunnel->hlist_lock); 264 read_lock_bh(&tunnel->hlist_lock);
267 hlist_for_each_entry(session, walk, session_list, hlist) { 265 hlist_for_each_entry(session, session_list, hlist) {
268 if (session->session_id == session_id) { 266 if (session->session_id == session_id) {
269 read_unlock_bh(&tunnel->hlist_lock); 267 read_unlock_bh(&tunnel->hlist_lock);
270 return session; 268 return session;
@@ -279,13 +277,12 @@ EXPORT_SYMBOL_GPL(l2tp_session_find);
279struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) 277struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
280{ 278{
281 int hash; 279 int hash;
282 struct hlist_node *walk;
283 struct l2tp_session *session; 280 struct l2tp_session *session;
284 int count = 0; 281 int count = 0;
285 282
286 read_lock_bh(&tunnel->hlist_lock); 283 read_lock_bh(&tunnel->hlist_lock);
287 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { 284 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
288 hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) { 285 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
289 if (++count > nth) { 286 if (++count > nth) {
290 read_unlock_bh(&tunnel->hlist_lock); 287 read_unlock_bh(&tunnel->hlist_lock);
291 return session; 288 return session;
@@ -306,12 +303,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
306{ 303{
307 struct l2tp_net *pn = l2tp_pernet(net); 304 struct l2tp_net *pn = l2tp_pernet(net);
308 int hash; 305 int hash;
309 struct hlist_node *walk;
310 struct l2tp_session *session; 306 struct l2tp_session *session;
311 307
312 rcu_read_lock_bh(); 308 rcu_read_lock_bh();
313 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { 309 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
314 hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) { 310 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
315 if (!strcmp(session->ifname, ifname)) { 311 if (!strcmp(session->ifname, ifname)) {
316 rcu_read_unlock_bh(); 312 rcu_read_unlock_bh();
317 return session; 313 return session;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index f7ac8f42fee2..7f41b7051269 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -49,10 +49,9 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
49 49
50static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) 50static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
51{ 51{
52 struct hlist_node *node;
53 struct sock *sk; 52 struct sock *sk;
54 53
55 sk_for_each_bound(sk, node, &l2tp_ip_bind_table) { 54 sk_for_each_bound(sk, &l2tp_ip_bind_table) {
56 struct inet_sock *inet = inet_sk(sk); 55 struct inet_sock *inet = inet_sk(sk);
57 struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); 56 struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
58 57
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 8ee4a86ae996..41f2f8126ebc 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -60,10 +60,9 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
60 struct in6_addr *laddr, 60 struct in6_addr *laddr,
61 int dif, u32 tunnel_id) 61 int dif, u32 tunnel_id)
62{ 62{
63 struct hlist_node *node;
64 struct sock *sk; 63 struct sock *sk;
65 64
66 sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) { 65 sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
67 struct in6_addr *addr = inet6_rcv_saddr(sk); 66 struct in6_addr *addr = inet6_rcv_saddr(sk);
68 struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); 67 struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
69 68
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 7c5073badc73..78be45cda5c1 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -393,12 +393,11 @@ static void llc_sap_mcast(struct llc_sap *sap,
393{ 393{
394 int i = 0, count = 256 / sizeof(struct sock *); 394 int i = 0, count = 256 / sizeof(struct sock *);
395 struct sock *sk, *stack[count]; 395 struct sock *sk, *stack[count];
396 struct hlist_node *node;
397 struct llc_sock *llc; 396 struct llc_sock *llc;
398 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex); 397 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex);
399 398
400 spin_lock_bh(&sap->sk_lock); 399 spin_lock_bh(&sap->sk_lock);
401 hlist_for_each_entry(llc, node, dev_hb, dev_hash_node) { 400 hlist_for_each_entry(llc, dev_hb, dev_hash_node) {
402 401
403 sk = &llc->sk; 402 sk = &llc->sk;
404 403
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index d0dd11153a6c..1a8591b77a13 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -647,8 +647,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
647 647
648 spin_lock_init(&local->ack_status_lock); 648 spin_lock_init(&local->ack_status_lock);
649 idr_init(&local->ack_status_frames); 649 idr_init(&local->ack_status_frames);
650 /* preallocate at least one entry */
651 idr_pre_get(&local->ack_status_frames, GFP_KERNEL);
652 650
653 sta_info_init(local); 651 sta_info_init(local);
654 652
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 6b3c4e119c63..dc7c8df40c2c 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -72,9 +72,9 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
72 * it's used twice. So it is illegal to do 72 * it's used twice. So it is illegal to do
73 * for_each_mesh_entry(rcu_dereference(...), ...) 73 * for_each_mesh_entry(rcu_dereference(...), ...)
74 */ 74 */
75#define for_each_mesh_entry(tbl, p, node, i) \ 75#define for_each_mesh_entry(tbl, node, i) \
76 for (i = 0; i <= tbl->hash_mask; i++) \ 76 for (i = 0; i <= tbl->hash_mask; i++) \
77 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) 77 hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
78 78
79 79
80static struct mesh_table *mesh_table_alloc(int size_order) 80static struct mesh_table *mesh_table_alloc(int size_order)
@@ -139,7 +139,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
139 } 139 }
140 if (free_leafs) { 140 if (free_leafs) {
141 spin_lock_bh(&tbl->gates_lock); 141 spin_lock_bh(&tbl->gates_lock);
142 hlist_for_each_entry_safe(gate, p, q, 142 hlist_for_each_entry_safe(gate, q,
143 tbl->known_gates, list) { 143 tbl->known_gates, list) {
144 hlist_del(&gate->list); 144 hlist_del(&gate->list);
145 kfree(gate); 145 kfree(gate);
@@ -333,12 +333,11 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
333 struct ieee80211_sub_if_data *sdata) 333 struct ieee80211_sub_if_data *sdata)
334{ 334{
335 struct mesh_path *mpath; 335 struct mesh_path *mpath;
336 struct hlist_node *n;
337 struct hlist_head *bucket; 336 struct hlist_head *bucket;
338 struct mpath_node *node; 337 struct mpath_node *node;
339 338
340 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; 339 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
341 hlist_for_each_entry_rcu(node, n, bucket, list) { 340 hlist_for_each_entry_rcu(node, bucket, list) {
342 mpath = node->mpath; 341 mpath = node->mpath;
343 if (mpath->sdata == sdata && 342 if (mpath->sdata == sdata &&
344 ether_addr_equal(dst, mpath->dst)) { 343 ether_addr_equal(dst, mpath->dst)) {
@@ -389,11 +388,10 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
389{ 388{
390 struct mesh_table *tbl = rcu_dereference(mesh_paths); 389 struct mesh_table *tbl = rcu_dereference(mesh_paths);
391 struct mpath_node *node; 390 struct mpath_node *node;
392 struct hlist_node *p;
393 int i; 391 int i;
394 int j = 0; 392 int j = 0;
395 393
396 for_each_mesh_entry(tbl, p, node, i) { 394 for_each_mesh_entry(tbl, node, i) {
397 if (sdata && node->mpath->sdata != sdata) 395 if (sdata && node->mpath->sdata != sdata)
398 continue; 396 continue;
399 if (j++ == idx) { 397 if (j++ == idx) {
@@ -417,13 +415,12 @@ int mesh_path_add_gate(struct mesh_path *mpath)
417{ 415{
418 struct mesh_table *tbl; 416 struct mesh_table *tbl;
419 struct mpath_node *gate, *new_gate; 417 struct mpath_node *gate, *new_gate;
420 struct hlist_node *n;
421 int err; 418 int err;
422 419
423 rcu_read_lock(); 420 rcu_read_lock();
424 tbl = rcu_dereference(mesh_paths); 421 tbl = rcu_dereference(mesh_paths);
425 422
426 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) 423 hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
427 if (gate->mpath == mpath) { 424 if (gate->mpath == mpath) {
428 err = -EEXIST; 425 err = -EEXIST;
429 goto err_rcu; 426 goto err_rcu;
@@ -460,9 +457,9 @@ err_rcu:
460static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) 457static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
461{ 458{
462 struct mpath_node *gate; 459 struct mpath_node *gate;
463 struct hlist_node *p, *q; 460 struct hlist_node *q;
464 461
465 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) { 462 hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
466 if (gate->mpath != mpath) 463 if (gate->mpath != mpath)
467 continue; 464 continue;
468 spin_lock_bh(&tbl->gates_lock); 465 spin_lock_bh(&tbl->gates_lock);
@@ -504,7 +501,6 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst)
504 struct mesh_path *mpath, *new_mpath; 501 struct mesh_path *mpath, *new_mpath;
505 struct mpath_node *node, *new_node; 502 struct mpath_node *node, *new_node;
506 struct hlist_head *bucket; 503 struct hlist_head *bucket;
507 struct hlist_node *n;
508 int grow = 0; 504 int grow = 0;
509 int err = 0; 505 int err = 0;
510 u32 hash_idx; 506 u32 hash_idx;
@@ -550,7 +546,7 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst)
550 spin_lock(&tbl->hashwlock[hash_idx]); 546 spin_lock(&tbl->hashwlock[hash_idx]);
551 547
552 err = -EEXIST; 548 err = -EEXIST;
553 hlist_for_each_entry(node, n, bucket, list) { 549 hlist_for_each_entry(node, bucket, list) {
554 mpath = node->mpath; 550 mpath = node->mpath;
555 if (mpath->sdata == sdata && 551 if (mpath->sdata == sdata &&
556 ether_addr_equal(dst, mpath->dst)) 552 ether_addr_equal(dst, mpath->dst))
@@ -640,7 +636,6 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
640 struct mesh_path *mpath, *new_mpath; 636 struct mesh_path *mpath, *new_mpath;
641 struct mpath_node *node, *new_node; 637 struct mpath_node *node, *new_node;
642 struct hlist_head *bucket; 638 struct hlist_head *bucket;
643 struct hlist_node *n;
644 int grow = 0; 639 int grow = 0;
645 int err = 0; 640 int err = 0;
646 u32 hash_idx; 641 u32 hash_idx;
@@ -680,7 +675,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
680 spin_lock(&tbl->hashwlock[hash_idx]); 675 spin_lock(&tbl->hashwlock[hash_idx]);
681 676
682 err = -EEXIST; 677 err = -EEXIST;
683 hlist_for_each_entry(node, n, bucket, list) { 678 hlist_for_each_entry(node, bucket, list) {
684 mpath = node->mpath; 679 mpath = node->mpath;
685 if (mpath->sdata == sdata && 680 if (mpath->sdata == sdata &&
686 ether_addr_equal(dst, mpath->dst)) 681 ether_addr_equal(dst, mpath->dst))
@@ -725,14 +720,13 @@ void mesh_plink_broken(struct sta_info *sta)
725 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 720 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
726 struct mesh_path *mpath; 721 struct mesh_path *mpath;
727 struct mpath_node *node; 722 struct mpath_node *node;
728 struct hlist_node *p;
729 struct ieee80211_sub_if_data *sdata = sta->sdata; 723 struct ieee80211_sub_if_data *sdata = sta->sdata;
730 int i; 724 int i;
731 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); 725 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
732 726
733 rcu_read_lock(); 727 rcu_read_lock();
734 tbl = rcu_dereference(mesh_paths); 728 tbl = rcu_dereference(mesh_paths);
735 for_each_mesh_entry(tbl, p, node, i) { 729 for_each_mesh_entry(tbl, node, i) {
736 mpath = node->mpath; 730 mpath = node->mpath;
737 if (rcu_dereference(mpath->next_hop) == sta && 731 if (rcu_dereference(mpath->next_hop) == sta &&
738 mpath->flags & MESH_PATH_ACTIVE && 732 mpath->flags & MESH_PATH_ACTIVE &&
@@ -792,13 +786,12 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
792 struct mesh_table *tbl; 786 struct mesh_table *tbl;
793 struct mesh_path *mpath; 787 struct mesh_path *mpath;
794 struct mpath_node *node; 788 struct mpath_node *node;
795 struct hlist_node *p;
796 int i; 789 int i;
797 790
798 rcu_read_lock(); 791 rcu_read_lock();
799 read_lock_bh(&pathtbl_resize_lock); 792 read_lock_bh(&pathtbl_resize_lock);
800 tbl = resize_dereference_mesh_paths(); 793 tbl = resize_dereference_mesh_paths();
801 for_each_mesh_entry(tbl, p, node, i) { 794 for_each_mesh_entry(tbl, node, i) {
802 mpath = node->mpath; 795 mpath = node->mpath;
803 if (rcu_dereference(mpath->next_hop) == sta) { 796 if (rcu_dereference(mpath->next_hop) == sta) {
804 spin_lock(&tbl->hashwlock[i]); 797 spin_lock(&tbl->hashwlock[i]);
@@ -815,11 +808,10 @@ static void table_flush_by_iface(struct mesh_table *tbl,
815{ 808{
816 struct mesh_path *mpath; 809 struct mesh_path *mpath;
817 struct mpath_node *node; 810 struct mpath_node *node;
818 struct hlist_node *p;
819 int i; 811 int i;
820 812
821 WARN_ON(!rcu_read_lock_held()); 813 WARN_ON(!rcu_read_lock_held());
822 for_each_mesh_entry(tbl, p, node, i) { 814 for_each_mesh_entry(tbl, node, i) {
823 mpath = node->mpath; 815 mpath = node->mpath;
824 if (mpath->sdata != sdata) 816 if (mpath->sdata != sdata)
825 continue; 817 continue;
@@ -865,7 +857,6 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
865 struct mesh_path *mpath; 857 struct mesh_path *mpath;
866 struct mpath_node *node; 858 struct mpath_node *node;
867 struct hlist_head *bucket; 859 struct hlist_head *bucket;
868 struct hlist_node *n;
869 int hash_idx; 860 int hash_idx;
870 int err = 0; 861 int err = 0;
871 862
@@ -875,7 +866,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
875 bucket = &tbl->hash_buckets[hash_idx]; 866 bucket = &tbl->hash_buckets[hash_idx];
876 867
877 spin_lock(&tbl->hashwlock[hash_idx]); 868 spin_lock(&tbl->hashwlock[hash_idx]);
878 hlist_for_each_entry(node, n, bucket, list) { 869 hlist_for_each_entry(node, bucket, list) {
879 mpath = node->mpath; 870 mpath = node->mpath;
880 if (mpath->sdata == sdata && 871 if (mpath->sdata == sdata &&
881 ether_addr_equal(addr, mpath->dst)) { 872 ether_addr_equal(addr, mpath->dst)) {
@@ -920,7 +911,6 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
920int mesh_path_send_to_gates(struct mesh_path *mpath) 911int mesh_path_send_to_gates(struct mesh_path *mpath)
921{ 912{
922 struct ieee80211_sub_if_data *sdata = mpath->sdata; 913 struct ieee80211_sub_if_data *sdata = mpath->sdata;
923 struct hlist_node *n;
924 struct mesh_table *tbl; 914 struct mesh_table *tbl;
925 struct mesh_path *from_mpath = mpath; 915 struct mesh_path *from_mpath = mpath;
926 struct mpath_node *gate = NULL; 916 struct mpath_node *gate = NULL;
@@ -935,7 +925,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
935 if (!known_gates) 925 if (!known_gates)
936 return -EHOSTUNREACH; 926 return -EHOSTUNREACH;
937 927
938 hlist_for_each_entry_rcu(gate, n, known_gates, list) { 928 hlist_for_each_entry_rcu(gate, known_gates, list) {
939 if (gate->mpath->sdata != sdata) 929 if (gate->mpath->sdata != sdata)
940 continue; 930 continue;
941 931
@@ -951,7 +941,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
951 } 941 }
952 } 942 }
953 943
954 hlist_for_each_entry_rcu(gate, n, known_gates, list) 944 hlist_for_each_entry_rcu(gate, known_gates, list)
955 if (gate->mpath->sdata == sdata) { 945 if (gate->mpath->sdata == sdata) {
956 mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); 946 mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
957 mesh_path_tx_pending(gate->mpath); 947 mesh_path_tx_pending(gate->mpath);
@@ -1096,12 +1086,11 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1096 struct mesh_table *tbl; 1086 struct mesh_table *tbl;
1097 struct mesh_path *mpath; 1087 struct mesh_path *mpath;
1098 struct mpath_node *node; 1088 struct mpath_node *node;
1099 struct hlist_node *p;
1100 int i; 1089 int i;
1101 1090
1102 rcu_read_lock(); 1091 rcu_read_lock();
1103 tbl = rcu_dereference(mesh_paths); 1092 tbl = rcu_dereference(mesh_paths);
1104 for_each_mesh_entry(tbl, p, node, i) { 1093 for_each_mesh_entry(tbl, node, i) {
1105 if (node->mpath->sdata != sdata) 1094 if (node->mpath->sdata != sdata)
1106 continue; 1095 continue;
1107 mpath = node->mpath; 1096 mpath = node->mpath;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5b9602b62405..de8548bf0a7f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2017,24 +2017,14 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
2017 skb = skb_clone(skb, GFP_ATOMIC); 2017 skb = skb_clone(skb, GFP_ATOMIC);
2018 if (skb) { 2018 if (skb) {
2019 unsigned long flags; 2019 unsigned long flags;
2020 int id, r; 2020 int id;
2021 2021
2022 spin_lock_irqsave(&local->ack_status_lock, flags); 2022 spin_lock_irqsave(&local->ack_status_lock, flags);
2023 r = idr_get_new_above(&local->ack_status_frames, 2023 id = idr_alloc(&local->ack_status_frames, orig_skb,
2024 orig_skb, 1, &id); 2024 1, 0x10000, GFP_ATOMIC);
2025 if (r == -EAGAIN) {
2026 idr_pre_get(&local->ack_status_frames,
2027 GFP_ATOMIC);
2028 r = idr_get_new_above(&local->ack_status_frames,
2029 orig_skb, 1, &id);
2030 }
2031 if (WARN_ON(!id) || id > 0xffff) {
2032 idr_remove(&local->ack_status_frames, id);
2033 r = -ERANGE;
2034 }
2035 spin_unlock_irqrestore(&local->ack_status_lock, flags); 2025 spin_unlock_irqrestore(&local->ack_status_lock, flags);
2036 2026
2037 if (!r) { 2027 if (id >= 0) {
2038 info_id = id; 2028 info_id = id;
2039 info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 2029 info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
2040 } else if (skb_shared(skb)) { 2030 } else if (skb_shared(skb)) {
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 9f00db7e03f2..704e514e02ab 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -259,13 +259,12 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
259{ 259{
260 unsigned int hash; 260 unsigned int hash;
261 struct ip_vs_conn *cp; 261 struct ip_vs_conn *cp;
262 struct hlist_node *n;
263 262
264 hash = ip_vs_conn_hashkey_param(p, false); 263 hash = ip_vs_conn_hashkey_param(p, false);
265 264
266 ct_read_lock(hash); 265 ct_read_lock(hash);
267 266
268 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { 267 hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
269 if (cp->af == p->af && 268 if (cp->af == p->af &&
270 p->cport == cp->cport && p->vport == cp->vport && 269 p->cport == cp->cport && p->vport == cp->vport &&
271 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && 270 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
@@ -344,13 +343,12 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
344{ 343{
345 unsigned int hash; 344 unsigned int hash;
346 struct ip_vs_conn *cp; 345 struct ip_vs_conn *cp;
347 struct hlist_node *n;
348 346
349 hash = ip_vs_conn_hashkey_param(p, false); 347 hash = ip_vs_conn_hashkey_param(p, false);
350 348
351 ct_read_lock(hash); 349 ct_read_lock(hash);
352 350
353 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { 351 hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
354 if (!ip_vs_conn_net_eq(cp, p->net)) 352 if (!ip_vs_conn_net_eq(cp, p->net))
355 continue; 353 continue;
356 if (p->pe_data && p->pe->ct_match) { 354 if (p->pe_data && p->pe->ct_match) {
@@ -394,7 +392,6 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
394{ 392{
395 unsigned int hash; 393 unsigned int hash;
396 struct ip_vs_conn *cp, *ret=NULL; 394 struct ip_vs_conn *cp, *ret=NULL;
397 struct hlist_node *n;
398 395
399 /* 396 /*
400 * Check for "full" addressed entries 397 * Check for "full" addressed entries
@@ -403,7 +400,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
403 400
404 ct_read_lock(hash); 401 ct_read_lock(hash);
405 402
406 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { 403 hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
407 if (cp->af == p->af && 404 if (cp->af == p->af &&
408 p->vport == cp->cport && p->cport == cp->dport && 405 p->vport == cp->cport && p->cport == cp->dport &&
409 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && 406 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
@@ -953,11 +950,10 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
953 int idx; 950 int idx;
954 struct ip_vs_conn *cp; 951 struct ip_vs_conn *cp;
955 struct ip_vs_iter_state *iter = seq->private; 952 struct ip_vs_iter_state *iter = seq->private;
956 struct hlist_node *n;
957 953
958 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 954 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
959 ct_read_lock_bh(idx); 955 ct_read_lock_bh(idx);
960 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) { 956 hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
961 if (pos-- == 0) { 957 if (pos-- == 0) {
962 iter->l = &ip_vs_conn_tab[idx]; 958 iter->l = &ip_vs_conn_tab[idx];
963 return cp; 959 return cp;
@@ -981,7 +977,6 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
981{ 977{
982 struct ip_vs_conn *cp = v; 978 struct ip_vs_conn *cp = v;
983 struct ip_vs_iter_state *iter = seq->private; 979 struct ip_vs_iter_state *iter = seq->private;
984 struct hlist_node *e;
985 struct hlist_head *l = iter->l; 980 struct hlist_head *l = iter->l;
986 int idx; 981 int idx;
987 982
@@ -990,15 +985,15 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
990 return ip_vs_conn_array(seq, 0); 985 return ip_vs_conn_array(seq, 0);
991 986
992 /* more on same hash chain? */ 987 /* more on same hash chain? */
993 if ((e = cp->c_list.next)) 988 if (cp->c_list.next)
994 return hlist_entry(e, struct ip_vs_conn, c_list); 989 return hlist_entry(cp->c_list.next, struct ip_vs_conn, c_list);
995 990
996 idx = l - ip_vs_conn_tab; 991 idx = l - ip_vs_conn_tab;
997 ct_read_unlock_bh(idx); 992 ct_read_unlock_bh(idx);
998 993
999 while (++idx < ip_vs_conn_tab_size) { 994 while (++idx < ip_vs_conn_tab_size) {
1000 ct_read_lock_bh(idx); 995 ct_read_lock_bh(idx);
1001 hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) { 996 hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
1002 iter->l = &ip_vs_conn_tab[idx]; 997 iter->l = &ip_vs_conn_tab[idx];
1003 return cp; 998 return cp;
1004 } 999 }
@@ -1200,14 +1195,13 @@ void ip_vs_random_dropentry(struct net *net)
1200 */ 1195 */
1201 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { 1196 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
1202 unsigned int hash = net_random() & ip_vs_conn_tab_mask; 1197 unsigned int hash = net_random() & ip_vs_conn_tab_mask;
1203 struct hlist_node *n;
1204 1198
1205 /* 1199 /*
1206 * Lock is actually needed in this loop. 1200 * Lock is actually needed in this loop.
1207 */ 1201 */
1208 ct_write_lock_bh(hash); 1202 ct_write_lock_bh(hash);
1209 1203
1210 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) { 1204 hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
1211 if (cp->flags & IP_VS_CONN_F_TEMPLATE) 1205 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
1212 /* connection template */ 1206 /* connection template */
1213 continue; 1207 continue;
@@ -1255,14 +1249,12 @@ static void ip_vs_conn_flush(struct net *net)
1255 1249
1256flush_again: 1250flush_again:
1257 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 1251 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1258 struct hlist_node *n;
1259
1260 /* 1252 /*
1261 * Lock is actually needed in this loop. 1253 * Lock is actually needed in this loop.
1262 */ 1254 */
1263 ct_write_lock_bh(idx); 1255 ct_write_lock_bh(idx);
1264 1256
1265 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) { 1257 hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
1266 if (!ip_vs_conn_net_eq(cp, net)) 1258 if (!ip_vs_conn_net_eq(cp, net))
1267 continue; 1259 continue;
1268 IP_VS_DBG(4, "del connection\n"); 1260 IP_VS_DBG(4, "del connection\n");
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 3921e5bc1235..8c10e3db3d9b 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -90,14 +90,13 @@ __nf_ct_expect_find(struct net *net, u16 zone,
90 const struct nf_conntrack_tuple *tuple) 90 const struct nf_conntrack_tuple *tuple)
91{ 91{
92 struct nf_conntrack_expect *i; 92 struct nf_conntrack_expect *i;
93 struct hlist_node *n;
94 unsigned int h; 93 unsigned int h;
95 94
96 if (!net->ct.expect_count) 95 if (!net->ct.expect_count)
97 return NULL; 96 return NULL;
98 97
99 h = nf_ct_expect_dst_hash(tuple); 98 h = nf_ct_expect_dst_hash(tuple);
100 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) { 99 hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
101 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && 100 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 nf_ct_zone(i->master) == zone) 101 nf_ct_zone(i->master) == zone)
103 return i; 102 return i;
@@ -130,14 +129,13 @@ nf_ct_find_expectation(struct net *net, u16 zone,
130 const struct nf_conntrack_tuple *tuple) 129 const struct nf_conntrack_tuple *tuple)
131{ 130{
132 struct nf_conntrack_expect *i, *exp = NULL; 131 struct nf_conntrack_expect *i, *exp = NULL;
133 struct hlist_node *n;
134 unsigned int h; 132 unsigned int h;
135 133
136 if (!net->ct.expect_count) 134 if (!net->ct.expect_count)
137 return NULL; 135 return NULL;
138 136
139 h = nf_ct_expect_dst_hash(tuple); 137 h = nf_ct_expect_dst_hash(tuple);
140 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { 138 hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
141 if (!(i->flags & NF_CT_EXPECT_INACTIVE) && 139 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
142 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && 140 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
143 nf_ct_zone(i->master) == zone) { 141 nf_ct_zone(i->master) == zone) {
@@ -172,13 +170,13 @@ void nf_ct_remove_expectations(struct nf_conn *ct)
172{ 170{
173 struct nf_conn_help *help = nfct_help(ct); 171 struct nf_conn_help *help = nfct_help(ct);
174 struct nf_conntrack_expect *exp; 172 struct nf_conntrack_expect *exp;
175 struct hlist_node *n, *next; 173 struct hlist_node *next;
176 174
177 /* Optimization: most connection never expect any others. */ 175 /* Optimization: most connection never expect any others. */
178 if (!help) 176 if (!help)
179 return; 177 return;
180 178
181 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { 179 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
182 if (del_timer(&exp->timeout)) { 180 if (del_timer(&exp->timeout)) {
183 nf_ct_unlink_expect(exp); 181 nf_ct_unlink_expect(exp);
184 nf_ct_expect_put(exp); 182 nf_ct_expect_put(exp);
@@ -348,9 +346,8 @@ static void evict_oldest_expect(struct nf_conn *master,
348{ 346{
349 struct nf_conn_help *master_help = nfct_help(master); 347 struct nf_conn_help *master_help = nfct_help(master);
350 struct nf_conntrack_expect *exp, *last = NULL; 348 struct nf_conntrack_expect *exp, *last = NULL;
351 struct hlist_node *n;
352 349
353 hlist_for_each_entry(exp, n, &master_help->expectations, lnode) { 350 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
354 if (exp->class == new->class) 351 if (exp->class == new->class)
355 last = exp; 352 last = exp;
356 } 353 }
@@ -369,7 +366,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
369 struct nf_conn_help *master_help = nfct_help(master); 366 struct nf_conn_help *master_help = nfct_help(master);
370 struct nf_conntrack_helper *helper; 367 struct nf_conntrack_helper *helper;
371 struct net *net = nf_ct_exp_net(expect); 368 struct net *net = nf_ct_exp_net(expect);
372 struct hlist_node *n, *next; 369 struct hlist_node *next;
373 unsigned int h; 370 unsigned int h;
374 int ret = 1; 371 int ret = 1;
375 372
@@ -378,7 +375,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
378 goto out; 375 goto out;
379 } 376 }
380 h = nf_ct_expect_dst_hash(&expect->tuple); 377 h = nf_ct_expect_dst_hash(&expect->tuple);
381 hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) { 378 hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
382 if (expect_matches(i, expect)) { 379 if (expect_matches(i, expect)) {
383 if (del_timer(&i->timeout)) { 380 if (del_timer(&i->timeout)) {
384 nf_ct_unlink_expect(i); 381 nf_ct_unlink_expect(i);
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 013cdf69fe29..a9740bd6fe54 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -116,14 +116,13 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
116{ 116{
117 struct nf_conntrack_helper *helper; 117 struct nf_conntrack_helper *helper;
118 struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) }; 118 struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
119 struct hlist_node *n;
120 unsigned int h; 119 unsigned int h;
121 120
122 if (!nf_ct_helper_count) 121 if (!nf_ct_helper_count)
123 return NULL; 122 return NULL;
124 123
125 h = helper_hash(tuple); 124 h = helper_hash(tuple);
126 hlist_for_each_entry_rcu(helper, n, &nf_ct_helper_hash[h], hnode) { 125 hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) {
127 if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask)) 126 if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask))
128 return helper; 127 return helper;
129 } 128 }
@@ -134,11 +133,10 @@ struct nf_conntrack_helper *
134__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) 133__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
135{ 134{
136 struct nf_conntrack_helper *h; 135 struct nf_conntrack_helper *h;
137 struct hlist_node *n;
138 unsigned int i; 136 unsigned int i;
139 137
140 for (i = 0; i < nf_ct_helper_hsize; i++) { 138 for (i = 0; i < nf_ct_helper_hsize; i++) {
141 hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) { 139 hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) {
142 if (!strcmp(h->name, name) && 140 if (!strcmp(h->name, name) &&
143 h->tuple.src.l3num == l3num && 141 h->tuple.src.l3num == l3num &&
144 h->tuple.dst.protonum == protonum) 142 h->tuple.dst.protonum == protonum)
@@ -357,7 +355,6 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
357{ 355{
358 int ret = 0; 356 int ret = 0;
359 struct nf_conntrack_helper *cur; 357 struct nf_conntrack_helper *cur;
360 struct hlist_node *n;
361 unsigned int h = helper_hash(&me->tuple); 358 unsigned int h = helper_hash(&me->tuple);
362 359
363 BUG_ON(me->expect_policy == NULL); 360 BUG_ON(me->expect_policy == NULL);
@@ -365,7 +362,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
365 BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); 362 BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
366 363
367 mutex_lock(&nf_ct_helper_mutex); 364 mutex_lock(&nf_ct_helper_mutex);
368 hlist_for_each_entry(cur, n, &nf_ct_helper_hash[h], hnode) { 365 hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
369 if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && 366 if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
370 cur->tuple.src.l3num == me->tuple.src.l3num && 367 cur->tuple.src.l3num == me->tuple.src.l3num &&
371 cur->tuple.dst.protonum == me->tuple.dst.protonum) { 368 cur->tuple.dst.protonum == me->tuple.dst.protonum) {
@@ -386,13 +383,13 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
386{ 383{
387 struct nf_conntrack_tuple_hash *h; 384 struct nf_conntrack_tuple_hash *h;
388 struct nf_conntrack_expect *exp; 385 struct nf_conntrack_expect *exp;
389 const struct hlist_node *n, *next; 386 const struct hlist_node *next;
390 const struct hlist_nulls_node *nn; 387 const struct hlist_nulls_node *nn;
391 unsigned int i; 388 unsigned int i;
392 389
393 /* Get rid of expectations */ 390 /* Get rid of expectations */
394 for (i = 0; i < nf_ct_expect_hsize; i++) { 391 for (i = 0; i < nf_ct_expect_hsize; i++) {
395 hlist_for_each_entry_safe(exp, n, next, 392 hlist_for_each_entry_safe(exp, next,
396 &net->ct.expect_hash[i], hnode) { 393 &net->ct.expect_hash[i], hnode) {
397 struct nf_conn_help *help = nfct_help(exp->master); 394 struct nf_conn_help *help = nfct_help(exp->master);
398 if ((rcu_dereference_protected( 395 if ((rcu_dereference_protected(
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 5d60e04f9679..9904b15f600e 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2370,14 +2370,13 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2370 struct net *net = sock_net(skb->sk); 2370 struct net *net = sock_net(skb->sk);
2371 struct nf_conntrack_expect *exp, *last; 2371 struct nf_conntrack_expect *exp, *last;
2372 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 2372 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2373 struct hlist_node *n;
2374 u_int8_t l3proto = nfmsg->nfgen_family; 2373 u_int8_t l3proto = nfmsg->nfgen_family;
2375 2374
2376 rcu_read_lock(); 2375 rcu_read_lock();
2377 last = (struct nf_conntrack_expect *)cb->args[1]; 2376 last = (struct nf_conntrack_expect *)cb->args[1];
2378 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { 2377 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2379restart: 2378restart:
2380 hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]], 2379 hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]],
2381 hnode) { 2380 hnode) {
2382 if (l3proto && exp->tuple.src.l3num != l3proto) 2381 if (l3proto && exp->tuple.src.l3num != l3proto)
2383 continue; 2382 continue;
@@ -2510,7 +2509,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2510 struct nf_conntrack_expect *exp; 2509 struct nf_conntrack_expect *exp;
2511 struct nf_conntrack_tuple tuple; 2510 struct nf_conntrack_tuple tuple;
2512 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2511 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2513 struct hlist_node *n, *next; 2512 struct hlist_node *next;
2514 u_int8_t u3 = nfmsg->nfgen_family; 2513 u_int8_t u3 = nfmsg->nfgen_family;
2515 unsigned int i; 2514 unsigned int i;
2516 u16 zone; 2515 u16 zone;
@@ -2557,7 +2556,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2557 /* delete all expectations for this helper */ 2556 /* delete all expectations for this helper */
2558 spin_lock_bh(&nf_conntrack_lock); 2557 spin_lock_bh(&nf_conntrack_lock);
2559 for (i = 0; i < nf_ct_expect_hsize; i++) { 2558 for (i = 0; i < nf_ct_expect_hsize; i++) {
2560 hlist_for_each_entry_safe(exp, n, next, 2559 hlist_for_each_entry_safe(exp, next,
2561 &net->ct.expect_hash[i], 2560 &net->ct.expect_hash[i],
2562 hnode) { 2561 hnode) {
2563 m_help = nfct_help(exp->master); 2562 m_help = nfct_help(exp->master);
@@ -2575,7 +2574,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2575 /* This basically means we have to flush everything*/ 2574 /* This basically means we have to flush everything*/
2576 spin_lock_bh(&nf_conntrack_lock); 2575 spin_lock_bh(&nf_conntrack_lock);
2577 for (i = 0; i < nf_ct_expect_hsize; i++) { 2576 for (i = 0; i < nf_ct_expect_hsize; i++) {
2578 hlist_for_each_entry_safe(exp, n, next, 2577 hlist_for_each_entry_safe(exp, next,
2579 &net->ct.expect_hash[i], 2578 &net->ct.expect_hash[i],
2580 hnode) { 2579 hnode) {
2581 if (del_timer(&exp->timeout)) { 2580 if (del_timer(&exp->timeout)) {
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 069229d919b6..0e7d423324c3 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -855,11 +855,11 @@ static int refresh_signalling_expectation(struct nf_conn *ct,
855{ 855{
856 struct nf_conn_help *help = nfct_help(ct); 856 struct nf_conn_help *help = nfct_help(ct);
857 struct nf_conntrack_expect *exp; 857 struct nf_conntrack_expect *exp;
858 struct hlist_node *n, *next; 858 struct hlist_node *next;
859 int found = 0; 859 int found = 0;
860 860
861 spin_lock_bh(&nf_conntrack_lock); 861 spin_lock_bh(&nf_conntrack_lock);
862 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { 862 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
863 if (exp->class != SIP_EXPECT_SIGNALLING || 863 if (exp->class != SIP_EXPECT_SIGNALLING ||
864 !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || 864 !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
865 exp->tuple.dst.protonum != proto || 865 exp->tuple.dst.protonum != proto ||
@@ -881,10 +881,10 @@ static void flush_expectations(struct nf_conn *ct, bool media)
881{ 881{
882 struct nf_conn_help *help = nfct_help(ct); 882 struct nf_conn_help *help = nfct_help(ct);
883 struct nf_conntrack_expect *exp; 883 struct nf_conntrack_expect *exp;
884 struct hlist_node *n, *next; 884 struct hlist_node *next;
885 885
886 spin_lock_bh(&nf_conntrack_lock); 886 spin_lock_bh(&nf_conntrack_lock);
887 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { 887 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
888 if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media) 888 if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media)
889 continue; 889 continue;
890 if (!del_timer(&exp->timeout)) 890 if (!del_timer(&exp->timeout))
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 5f2f9109f461..8d5769c6d16e 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -191,9 +191,8 @@ find_appropriate_src(struct net *net, u16 zone,
191 unsigned int h = hash_by_src(net, zone, tuple); 191 unsigned int h = hash_by_src(net, zone, tuple);
192 const struct nf_conn_nat *nat; 192 const struct nf_conn_nat *nat;
193 const struct nf_conn *ct; 193 const struct nf_conn *ct;
194 const struct hlist_node *n;
195 194
196 hlist_for_each_entry_rcu(nat, n, &net->ct.nat_bysource[h], bysource) { 195 hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
197 ct = nat->ct; 196 ct = nat->ct;
198 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) { 197 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
199 /* Copy source part from reply tuple. */ 198 /* Copy source part from reply tuple. */
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 945950a8b1f1..a191b6db657e 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -282,7 +282,6 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
282 const char *helper_name; 282 const char *helper_name;
283 struct nf_conntrack_helper *cur, *helper = NULL; 283 struct nf_conntrack_helper *cur, *helper = NULL;
284 struct nf_conntrack_tuple tuple; 284 struct nf_conntrack_tuple tuple;
285 struct hlist_node *n;
286 int ret = 0, i; 285 int ret = 0, i;
287 286
288 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) 287 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
@@ -296,7 +295,7 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
296 295
297 rcu_read_lock(); 296 rcu_read_lock();
298 for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { 297 for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
299 hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) { 298 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
300 299
301 /* skip non-userspace conntrack helpers. */ 300 /* skip non-userspace conntrack helpers. */
302 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 301 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
@@ -452,13 +451,12 @@ static int
452nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 451nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
453{ 452{
454 struct nf_conntrack_helper *cur, *last; 453 struct nf_conntrack_helper *cur, *last;
455 struct hlist_node *n;
456 454
457 rcu_read_lock(); 455 rcu_read_lock();
458 last = (struct nf_conntrack_helper *)cb->args[1]; 456 last = (struct nf_conntrack_helper *)cb->args[1];
459 for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) { 457 for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) {
460restart: 458restart:
461 hlist_for_each_entry_rcu(cur, n, 459 hlist_for_each_entry_rcu(cur,
462 &nf_ct_helper_hash[cb->args[0]], hnode) { 460 &nf_ct_helper_hash[cb->args[0]], hnode) {
463 461
464 /* skip non-userspace conntrack helpers. */ 462 /* skip non-userspace conntrack helpers. */
@@ -495,7 +493,6 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
495{ 493{
496 int ret = -ENOENT, i; 494 int ret = -ENOENT, i;
497 struct nf_conntrack_helper *cur; 495 struct nf_conntrack_helper *cur;
498 struct hlist_node *n;
499 struct sk_buff *skb2; 496 struct sk_buff *skb2;
500 char *helper_name = NULL; 497 char *helper_name = NULL;
501 struct nf_conntrack_tuple tuple; 498 struct nf_conntrack_tuple tuple;
@@ -520,7 +517,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
520 } 517 }
521 518
522 for (i = 0; i < nf_ct_helper_hsize; i++) { 519 for (i = 0; i < nf_ct_helper_hsize; i++) {
523 hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) { 520 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
524 521
525 /* skip non-userspace conntrack helpers. */ 522 /* skip non-userspace conntrack helpers. */
526 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 523 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
@@ -568,7 +565,7 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
568{ 565{
569 char *helper_name = NULL; 566 char *helper_name = NULL;
570 struct nf_conntrack_helper *cur; 567 struct nf_conntrack_helper *cur;
571 struct hlist_node *n, *tmp; 568 struct hlist_node *tmp;
572 struct nf_conntrack_tuple tuple; 569 struct nf_conntrack_tuple tuple;
573 bool tuple_set = false, found = false; 570 bool tuple_set = false, found = false;
574 int i, j = 0, ret; 571 int i, j = 0, ret;
@@ -585,7 +582,7 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
585 } 582 }
586 583
587 for (i = 0; i < nf_ct_helper_hsize; i++) { 584 for (i = 0; i < nf_ct_helper_hsize; i++) {
588 hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i], 585 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
589 hnode) { 586 hnode) {
590 /* skip non-userspace conntrack helpers. */ 587 /* skip non-userspace conntrack helpers. */
591 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 588 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
@@ -654,13 +651,13 @@ err_out:
654static void __exit nfnl_cthelper_exit(void) 651static void __exit nfnl_cthelper_exit(void)
655{ 652{
656 struct nf_conntrack_helper *cur; 653 struct nf_conntrack_helper *cur;
657 struct hlist_node *n, *tmp; 654 struct hlist_node *tmp;
658 int i; 655 int i;
659 656
660 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); 657 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
661 658
662 for (i=0; i<nf_ct_helper_hsize; i++) { 659 for (i=0; i<nf_ct_helper_hsize; i++) {
663 hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i], 660 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
664 hnode) { 661 hnode) {
665 /* skip non-userspace conntrack helpers. */ 662 /* skip non-userspace conntrack helpers. */
666 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 663 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 92fd8eca0d31..f248db572972 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -87,11 +87,10 @@ static struct nfulnl_instance *
87__instance_lookup(u_int16_t group_num) 87__instance_lookup(u_int16_t group_num)
88{ 88{
89 struct hlist_head *head; 89 struct hlist_head *head;
90 struct hlist_node *pos;
91 struct nfulnl_instance *inst; 90 struct nfulnl_instance *inst;
92 91
93 head = &instance_table[instance_hashfn(group_num)]; 92 head = &instance_table[instance_hashfn(group_num)];
94 hlist_for_each_entry_rcu(inst, pos, head, hlist) { 93 hlist_for_each_entry_rcu(inst, head, hlist) {
95 if (inst->group_num == group_num) 94 if (inst->group_num == group_num)
96 return inst; 95 return inst;
97 } 96 }
@@ -717,11 +716,11 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
717 /* destroy all instances for this portid */ 716 /* destroy all instances for this portid */
718 spin_lock_bh(&instances_lock); 717 spin_lock_bh(&instances_lock);
719 for (i = 0; i < INSTANCE_BUCKETS; i++) { 718 for (i = 0; i < INSTANCE_BUCKETS; i++) {
720 struct hlist_node *tmp, *t2; 719 struct hlist_node *t2;
721 struct nfulnl_instance *inst; 720 struct nfulnl_instance *inst;
722 struct hlist_head *head = &instance_table[i]; 721 struct hlist_head *head = &instance_table[i];
723 722
724 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { 723 hlist_for_each_entry_safe(inst, t2, head, hlist) {
725 if ((net_eq(n->net, &init_net)) && 724 if ((net_eq(n->net, &init_net)) &&
726 (n->portid == inst->peer_portid)) 725 (n->portid == inst->peer_portid))
727 __instance_destroy(inst); 726 __instance_destroy(inst);
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 3158d87b56a8..858fd52c1040 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -80,11 +80,10 @@ static struct nfqnl_instance *
80instance_lookup(u_int16_t queue_num) 80instance_lookup(u_int16_t queue_num)
81{ 81{
82 struct hlist_head *head; 82 struct hlist_head *head;
83 struct hlist_node *pos;
84 struct nfqnl_instance *inst; 83 struct nfqnl_instance *inst;
85 84
86 head = &instance_table[instance_hashfn(queue_num)]; 85 head = &instance_table[instance_hashfn(queue_num)];
87 hlist_for_each_entry_rcu(inst, pos, head, hlist) { 86 hlist_for_each_entry_rcu(inst, head, hlist) {
88 if (inst->queue_num == queue_num) 87 if (inst->queue_num == queue_num)
89 return inst; 88 return inst;
90 } 89 }
@@ -583,11 +582,10 @@ nfqnl_dev_drop(int ifindex)
583 rcu_read_lock(); 582 rcu_read_lock();
584 583
585 for (i = 0; i < INSTANCE_BUCKETS; i++) { 584 for (i = 0; i < INSTANCE_BUCKETS; i++) {
586 struct hlist_node *tmp;
587 struct nfqnl_instance *inst; 585 struct nfqnl_instance *inst;
588 struct hlist_head *head = &instance_table[i]; 586 struct hlist_head *head = &instance_table[i];
589 587
590 hlist_for_each_entry_rcu(inst, tmp, head, hlist) 588 hlist_for_each_entry_rcu(inst, head, hlist)
591 nfqnl_flush(inst, dev_cmp, ifindex); 589 nfqnl_flush(inst, dev_cmp, ifindex);
592 } 590 }
593 591
@@ -627,11 +625,11 @@ nfqnl_rcv_nl_event(struct notifier_block *this,
627 /* destroy all instances for this portid */ 625 /* destroy all instances for this portid */
628 spin_lock(&instances_lock); 626 spin_lock(&instances_lock);
629 for (i = 0; i < INSTANCE_BUCKETS; i++) { 627 for (i = 0; i < INSTANCE_BUCKETS; i++) {
630 struct hlist_node *tmp, *t2; 628 struct hlist_node *t2;
631 struct nfqnl_instance *inst; 629 struct nfqnl_instance *inst;
632 struct hlist_head *head = &instance_table[i]; 630 struct hlist_head *head = &instance_table[i];
633 631
634 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { 632 hlist_for_each_entry_safe(inst, t2, head, hlist) {
635 if ((n->net == &init_net) && 633 if ((n->net == &init_net) &&
636 (n->portid == inst->peer_portid)) 634 (n->portid == inst->peer_portid))
637 __instance_destroy(inst); 635 __instance_destroy(inst);
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index f264032b8c56..370adf622cef 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -43,12 +43,11 @@ static void xt_rateest_hash_insert(struct xt_rateest *est)
43struct xt_rateest *xt_rateest_lookup(const char *name) 43struct xt_rateest *xt_rateest_lookup(const char *name)
44{ 44{
45 struct xt_rateest *est; 45 struct xt_rateest *est;
46 struct hlist_node *n;
47 unsigned int h; 46 unsigned int h;
48 47
49 h = xt_rateest_hash(name); 48 h = xt_rateest_hash(name);
50 mutex_lock(&xt_rateest_mutex); 49 mutex_lock(&xt_rateest_mutex);
51 hlist_for_each_entry(est, n, &rateest_hash[h], list) { 50 hlist_for_each_entry(est, &rateest_hash[h], list) {
52 if (strcmp(est->name, name) == 0) { 51 if (strcmp(est->name, name) == 0) {
53 est->refcnt++; 52 est->refcnt++;
54 mutex_unlock(&xt_rateest_mutex); 53 mutex_unlock(&xt_rateest_mutex);
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 70b5591a2586..c40b2695633b 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -101,7 +101,7 @@ static int count_them(struct net *net,
101{ 101{
102 const struct nf_conntrack_tuple_hash *found; 102 const struct nf_conntrack_tuple_hash *found;
103 struct xt_connlimit_conn *conn; 103 struct xt_connlimit_conn *conn;
104 struct hlist_node *pos, *n; 104 struct hlist_node *n;
105 struct nf_conn *found_ct; 105 struct nf_conn *found_ct;
106 struct hlist_head *hash; 106 struct hlist_head *hash;
107 bool addit = true; 107 bool addit = true;
@@ -115,7 +115,7 @@ static int count_them(struct net *net,
115 rcu_read_lock(); 115 rcu_read_lock();
116 116
117 /* check the saved connections */ 117 /* check the saved connections */
118 hlist_for_each_entry_safe(conn, pos, n, hash, node) { 118 hlist_for_each_entry_safe(conn, n, hash, node) {
119 found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE, 119 found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE,
120 &conn->tuple); 120 &conn->tuple);
121 found_ct = NULL; 121 found_ct = NULL;
@@ -258,14 +258,14 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
258{ 258{
259 const struct xt_connlimit_info *info = par->matchinfo; 259 const struct xt_connlimit_info *info = par->matchinfo;
260 struct xt_connlimit_conn *conn; 260 struct xt_connlimit_conn *conn;
261 struct hlist_node *pos, *n; 261 struct hlist_node *n;
262 struct hlist_head *hash = info->data->iphash; 262 struct hlist_head *hash = info->data->iphash;
263 unsigned int i; 263 unsigned int i;
264 264
265 nf_ct_l3proto_module_put(par->family); 265 nf_ct_l3proto_module_put(par->family);
266 266
267 for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) { 267 for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) {
268 hlist_for_each_entry_safe(conn, pos, n, &hash[i], node) { 268 hlist_for_each_entry_safe(conn, n, &hash[i], node) {
269 hlist_del(&conn->node); 269 hlist_del(&conn->node);
270 kfree(conn); 270 kfree(conn);
271 } 271 }
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 98218c896d2e..f330e8beaf69 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -141,11 +141,10 @@ dsthash_find(const struct xt_hashlimit_htable *ht,
141 const struct dsthash_dst *dst) 141 const struct dsthash_dst *dst)
142{ 142{
143 struct dsthash_ent *ent; 143 struct dsthash_ent *ent;
144 struct hlist_node *pos;
145 u_int32_t hash = hash_dst(ht, dst); 144 u_int32_t hash = hash_dst(ht, dst);
146 145
147 if (!hlist_empty(&ht->hash[hash])) { 146 if (!hlist_empty(&ht->hash[hash])) {
148 hlist_for_each_entry_rcu(ent, pos, &ht->hash[hash], node) 147 hlist_for_each_entry_rcu(ent, &ht->hash[hash], node)
149 if (dst_cmp(ent, dst)) { 148 if (dst_cmp(ent, dst)) {
150 spin_lock(&ent->lock); 149 spin_lock(&ent->lock);
151 return ent; 150 return ent;
@@ -297,8 +296,8 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
297 spin_lock_bh(&ht->lock); 296 spin_lock_bh(&ht->lock);
298 for (i = 0; i < ht->cfg.size; i++) { 297 for (i = 0; i < ht->cfg.size; i++) {
299 struct dsthash_ent *dh; 298 struct dsthash_ent *dh;
300 struct hlist_node *pos, *n; 299 struct hlist_node *n;
301 hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) { 300 hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
302 if ((*select)(ht, dh)) 301 if ((*select)(ht, dh))
303 dsthash_free(ht, dh); 302 dsthash_free(ht, dh);
304 } 303 }
@@ -343,9 +342,8 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
343{ 342{
344 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 343 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
345 struct xt_hashlimit_htable *hinfo; 344 struct xt_hashlimit_htable *hinfo;
346 struct hlist_node *pos;
347 345
348 hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) { 346 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
349 if (!strcmp(name, hinfo->pde->name) && 347 if (!strcmp(name, hinfo->pde->name) &&
350 hinfo->family == family) { 348 hinfo->family == family) {
351 hinfo->use++; 349 hinfo->use++;
@@ -821,10 +819,9 @@ static int dl_seq_show(struct seq_file *s, void *v)
821 struct xt_hashlimit_htable *htable = s->private; 819 struct xt_hashlimit_htable *htable = s->private;
822 unsigned int *bucket = (unsigned int *)v; 820 unsigned int *bucket = (unsigned int *)v;
823 struct dsthash_ent *ent; 821 struct dsthash_ent *ent;
824 struct hlist_node *pos;
825 822
826 if (!hlist_empty(&htable->hash[*bucket])) { 823 if (!hlist_empty(&htable->hash[*bucket])) {
827 hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) 824 hlist_for_each_entry(ent, &htable->hash[*bucket], node)
828 if (dl_seq_real_show(ent, htable->family, s)) 825 if (dl_seq_real_show(ent, htable->family, s))
829 return -1; 826 return -1;
830 } 827 }
@@ -877,7 +874,6 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
877static void __net_exit hashlimit_proc_net_exit(struct net *net) 874static void __net_exit hashlimit_proc_net_exit(struct net *net)
878{ 875{
879 struct xt_hashlimit_htable *hinfo; 876 struct xt_hashlimit_htable *hinfo;
880 struct hlist_node *pos;
881 struct proc_dir_entry *pde; 877 struct proc_dir_entry *pde;
882 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 878 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
883 879
@@ -890,7 +886,7 @@ static void __net_exit hashlimit_proc_net_exit(struct net *net)
890 if (pde == NULL) 886 if (pde == NULL)
891 pde = hashlimit_net->ip6t_hashlimit; 887 pde = hashlimit_net->ip6t_hashlimit;
892 888
893 hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) 889 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
894 remove_proc_entry(hinfo->pde->name, pde); 890 remove_proc_entry(hinfo->pde->name, pde);
895 891
896 hashlimit_net->ipt_hashlimit = NULL; 892 hashlimit_net->ipt_hashlimit = NULL;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 8097b4f3ead4..1e3fd5bfcd86 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -248,11 +248,10 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
248 struct nl_portid_hash *hash = &nl_table[protocol].hash; 248 struct nl_portid_hash *hash = &nl_table[protocol].hash;
249 struct hlist_head *head; 249 struct hlist_head *head;
250 struct sock *sk; 250 struct sock *sk;
251 struct hlist_node *node;
252 251
253 read_lock(&nl_table_lock); 252 read_lock(&nl_table_lock);
254 head = nl_portid_hashfn(hash, portid); 253 head = nl_portid_hashfn(hash, portid);
255 sk_for_each(sk, node, head) { 254 sk_for_each(sk, head) {
256 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) { 255 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
257 sock_hold(sk); 256 sock_hold(sk);
258 goto found; 257 goto found;
@@ -312,9 +311,9 @@ static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
312 311
313 for (i = 0; i <= omask; i++) { 312 for (i = 0; i <= omask; i++) {
314 struct sock *sk; 313 struct sock *sk;
315 struct hlist_node *node, *tmp; 314 struct hlist_node *tmp;
316 315
317 sk_for_each_safe(sk, node, tmp, &otable[i]) 316 sk_for_each_safe(sk, tmp, &otable[i])
318 __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid)); 317 __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
319 } 318 }
320 319
@@ -344,7 +343,6 @@ static void
344netlink_update_listeners(struct sock *sk) 343netlink_update_listeners(struct sock *sk)
345{ 344{
346 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; 345 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
347 struct hlist_node *node;
348 unsigned long mask; 346 unsigned long mask;
349 unsigned int i; 347 unsigned int i;
350 struct listeners *listeners; 348 struct listeners *listeners;
@@ -355,7 +353,7 @@ netlink_update_listeners(struct sock *sk)
355 353
356 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { 354 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
357 mask = 0; 355 mask = 0;
358 sk_for_each_bound(sk, node, &tbl->mc_list) { 356 sk_for_each_bound(sk, &tbl->mc_list) {
359 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) 357 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
360 mask |= nlk_sk(sk)->groups[i]; 358 mask |= nlk_sk(sk)->groups[i];
361 } 359 }
@@ -371,18 +369,17 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
371 struct hlist_head *head; 369 struct hlist_head *head;
372 int err = -EADDRINUSE; 370 int err = -EADDRINUSE;
373 struct sock *osk; 371 struct sock *osk;
374 struct hlist_node *node;
375 int len; 372 int len;
376 373
377 netlink_table_grab(); 374 netlink_table_grab();
378 head = nl_portid_hashfn(hash, portid); 375 head = nl_portid_hashfn(hash, portid);
379 len = 0; 376 len = 0;
380 sk_for_each(osk, node, head) { 377 sk_for_each(osk, head) {
381 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid)) 378 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
382 break; 379 break;
383 len++; 380 len++;
384 } 381 }
385 if (node) 382 if (osk)
386 goto err; 383 goto err;
387 384
388 err = -EBUSY; 385 err = -EBUSY;
@@ -575,7 +572,6 @@ static int netlink_autobind(struct socket *sock)
575 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; 572 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
576 struct hlist_head *head; 573 struct hlist_head *head;
577 struct sock *osk; 574 struct sock *osk;
578 struct hlist_node *node;
579 s32 portid = task_tgid_vnr(current); 575 s32 portid = task_tgid_vnr(current);
580 int err; 576 int err;
581 static s32 rover = -4097; 577 static s32 rover = -4097;
@@ -584,7 +580,7 @@ retry:
584 cond_resched(); 580 cond_resched();
585 netlink_table_grab(); 581 netlink_table_grab();
586 head = nl_portid_hashfn(hash, portid); 582 head = nl_portid_hashfn(hash, portid);
587 sk_for_each(osk, node, head) { 583 sk_for_each(osk, head) {
588 if (!net_eq(sock_net(osk), net)) 584 if (!net_eq(sock_net(osk), net))
589 continue; 585 continue;
590 if (nlk_sk(osk)->portid == portid) { 586 if (nlk_sk(osk)->portid == portid) {
@@ -1101,7 +1097,6 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
1101{ 1097{
1102 struct net *net = sock_net(ssk); 1098 struct net *net = sock_net(ssk);
1103 struct netlink_broadcast_data info; 1099 struct netlink_broadcast_data info;
1104 struct hlist_node *node;
1105 struct sock *sk; 1100 struct sock *sk;
1106 1101
1107 skb = netlink_trim(skb, allocation); 1102 skb = netlink_trim(skb, allocation);
@@ -1124,7 +1119,7 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
1124 1119
1125 netlink_lock_table(); 1120 netlink_lock_table();
1126 1121
1127 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) 1122 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1128 do_one_broadcast(sk, &info); 1123 do_one_broadcast(sk, &info);
1129 1124
1130 consume_skb(skb); 1125 consume_skb(skb);
@@ -1200,7 +1195,6 @@ out:
1200int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) 1195int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1201{ 1196{
1202 struct netlink_set_err_data info; 1197 struct netlink_set_err_data info;
1203 struct hlist_node *node;
1204 struct sock *sk; 1198 struct sock *sk;
1205 int ret = 0; 1199 int ret = 0;
1206 1200
@@ -1212,7 +1206,7 @@ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1212 1206
1213 read_lock(&nl_table_lock); 1207 read_lock(&nl_table_lock);
1214 1208
1215 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) 1209 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1216 ret += do_one_set_err(sk, &info); 1210 ret += do_one_set_err(sk, &info);
1217 1211
1218 read_unlock(&nl_table_lock); 1212 read_unlock(&nl_table_lock);
@@ -1676,10 +1670,9 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1676void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) 1670void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1677{ 1671{
1678 struct sock *sk; 1672 struct sock *sk;
1679 struct hlist_node *node;
1680 struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; 1673 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
1681 1674
1682 sk_for_each_bound(sk, node, &tbl->mc_list) 1675 sk_for_each_bound(sk, &tbl->mc_list)
1683 netlink_update_socket_mc(nlk_sk(sk), group, 0); 1676 netlink_update_socket_mc(nlk_sk(sk), group, 0);
1684} 1677}
1685 1678
@@ -1974,14 +1967,13 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1974 struct nl_seq_iter *iter = seq->private; 1967 struct nl_seq_iter *iter = seq->private;
1975 int i, j; 1968 int i, j;
1976 struct sock *s; 1969 struct sock *s;
1977 struct hlist_node *node;
1978 loff_t off = 0; 1970 loff_t off = 0;
1979 1971
1980 for (i = 0; i < MAX_LINKS; i++) { 1972 for (i = 0; i < MAX_LINKS; i++) {
1981 struct nl_portid_hash *hash = &nl_table[i].hash; 1973 struct nl_portid_hash *hash = &nl_table[i].hash;
1982 1974
1983 for (j = 0; j <= hash->mask; j++) { 1975 for (j = 0; j <= hash->mask; j++) {
1984 sk_for_each(s, node, &hash->table[j]) { 1976 sk_for_each(s, &hash->table[j]) {
1985 if (sock_net(s) != seq_file_net(seq)) 1977 if (sock_net(s) != seq_file_net(seq))
1986 continue; 1978 continue;
1987 if (off == pos) { 1979 if (off == pos) {
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 297b07a029de..d1fa1d9ffd2e 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -104,10 +104,9 @@ static void nr_remove_socket(struct sock *sk)
104static void nr_kill_by_device(struct net_device *dev) 104static void nr_kill_by_device(struct net_device *dev)
105{ 105{
106 struct sock *s; 106 struct sock *s;
107 struct hlist_node *node;
108 107
109 spin_lock_bh(&nr_list_lock); 108 spin_lock_bh(&nr_list_lock);
110 sk_for_each(s, node, &nr_list) 109 sk_for_each(s, &nr_list)
111 if (nr_sk(s)->device == dev) 110 if (nr_sk(s)->device == dev)
112 nr_disconnect(s, ENETUNREACH); 111 nr_disconnect(s, ENETUNREACH);
113 spin_unlock_bh(&nr_list_lock); 112 spin_unlock_bh(&nr_list_lock);
@@ -149,10 +148,9 @@ static void nr_insert_socket(struct sock *sk)
149static struct sock *nr_find_listener(ax25_address *addr) 148static struct sock *nr_find_listener(ax25_address *addr)
150{ 149{
151 struct sock *s; 150 struct sock *s;
152 struct hlist_node *node;
153 151
154 spin_lock_bh(&nr_list_lock); 152 spin_lock_bh(&nr_list_lock);
155 sk_for_each(s, node, &nr_list) 153 sk_for_each(s, &nr_list)
156 if (!ax25cmp(&nr_sk(s)->source_addr, addr) && 154 if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
157 s->sk_state == TCP_LISTEN) { 155 s->sk_state == TCP_LISTEN) {
158 bh_lock_sock(s); 156 bh_lock_sock(s);
@@ -170,10 +168,9 @@ found:
170static struct sock *nr_find_socket(unsigned char index, unsigned char id) 168static struct sock *nr_find_socket(unsigned char index, unsigned char id)
171{ 169{
172 struct sock *s; 170 struct sock *s;
173 struct hlist_node *node;
174 171
175 spin_lock_bh(&nr_list_lock); 172 spin_lock_bh(&nr_list_lock);
176 sk_for_each(s, node, &nr_list) { 173 sk_for_each(s, &nr_list) {
177 struct nr_sock *nr = nr_sk(s); 174 struct nr_sock *nr = nr_sk(s);
178 175
179 if (nr->my_index == index && nr->my_id == id) { 176 if (nr->my_index == index && nr->my_id == id) {
@@ -194,10 +191,9 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
194 ax25_address *dest) 191 ax25_address *dest)
195{ 192{
196 struct sock *s; 193 struct sock *s;
197 struct hlist_node *node;
198 194
199 spin_lock_bh(&nr_list_lock); 195 spin_lock_bh(&nr_list_lock);
200 sk_for_each(s, node, &nr_list) { 196 sk_for_each(s, &nr_list) {
201 struct nr_sock *nr = nr_sk(s); 197 struct nr_sock *nr = nr_sk(s);
202 198
203 if (nr->your_index == index && nr->your_id == id && 199 if (nr->your_index == index && nr->your_id == id &&
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 70ffff76a967..b976d5eff2de 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -49,10 +49,9 @@ static struct nr_node *nr_node_get(ax25_address *callsign)
49{ 49{
50 struct nr_node *found = NULL; 50 struct nr_node *found = NULL;
51 struct nr_node *nr_node; 51 struct nr_node *nr_node;
52 struct hlist_node *node;
53 52
54 spin_lock_bh(&nr_node_list_lock); 53 spin_lock_bh(&nr_node_list_lock);
55 nr_node_for_each(nr_node, node, &nr_node_list) 54 nr_node_for_each(nr_node, &nr_node_list)
56 if (ax25cmp(callsign, &nr_node->callsign) == 0) { 55 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
57 nr_node_hold(nr_node); 56 nr_node_hold(nr_node);
58 found = nr_node; 57 found = nr_node;
@@ -67,10 +66,9 @@ static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
67{ 66{
68 struct nr_neigh *found = NULL; 67 struct nr_neigh *found = NULL;
69 struct nr_neigh *nr_neigh; 68 struct nr_neigh *nr_neigh;
70 struct hlist_node *node;
71 69
72 spin_lock_bh(&nr_neigh_list_lock); 70 spin_lock_bh(&nr_neigh_list_lock);
73 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) 71 nr_neigh_for_each(nr_neigh, &nr_neigh_list)
74 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 && 72 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
75 nr_neigh->dev == dev) { 73 nr_neigh->dev == dev) {
76 nr_neigh_hold(nr_neigh); 74 nr_neigh_hold(nr_neigh);
@@ -114,10 +112,9 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
114 */ 112 */
115 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) { 113 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
116 struct nr_node *nr_nodet; 114 struct nr_node *nr_nodet;
117 struct hlist_node *node;
118 115
119 spin_lock_bh(&nr_node_list_lock); 116 spin_lock_bh(&nr_node_list_lock);
120 nr_node_for_each(nr_nodet, node, &nr_node_list) { 117 nr_node_for_each(nr_nodet, &nr_node_list) {
121 nr_node_lock(nr_nodet); 118 nr_node_lock(nr_nodet);
122 for (i = 0; i < nr_nodet->count; i++) 119 for (i = 0; i < nr_nodet->count; i++)
123 if (nr_nodet->routes[i].neighbour == nr_neigh) 120 if (nr_nodet->routes[i].neighbour == nr_neigh)
@@ -485,11 +482,11 @@ static int nr_dec_obs(void)
485{ 482{
486 struct nr_neigh *nr_neigh; 483 struct nr_neigh *nr_neigh;
487 struct nr_node *s; 484 struct nr_node *s;
488 struct hlist_node *node, *nodet; 485 struct hlist_node *nodet;
489 int i; 486 int i;
490 487
491 spin_lock_bh(&nr_node_list_lock); 488 spin_lock_bh(&nr_node_list_lock);
492 nr_node_for_each_safe(s, node, nodet, &nr_node_list) { 489 nr_node_for_each_safe(s, nodet, &nr_node_list) {
493 nr_node_lock(s); 490 nr_node_lock(s);
494 for (i = 0; i < s->count; i++) { 491 for (i = 0; i < s->count; i++) {
495 switch (s->routes[i].obs_count) { 492 switch (s->routes[i].obs_count) {
@@ -540,15 +537,15 @@ static int nr_dec_obs(void)
540void nr_rt_device_down(struct net_device *dev) 537void nr_rt_device_down(struct net_device *dev)
541{ 538{
542 struct nr_neigh *s; 539 struct nr_neigh *s;
543 struct hlist_node *node, *nodet, *node2, *node2t; 540 struct hlist_node *nodet, *node2t;
544 struct nr_node *t; 541 struct nr_node *t;
545 int i; 542 int i;
546 543
547 spin_lock_bh(&nr_neigh_list_lock); 544 spin_lock_bh(&nr_neigh_list_lock);
548 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { 545 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
549 if (s->dev == dev) { 546 if (s->dev == dev) {
550 spin_lock_bh(&nr_node_list_lock); 547 spin_lock_bh(&nr_node_list_lock);
551 nr_node_for_each_safe(t, node2, node2t, &nr_node_list) { 548 nr_node_for_each_safe(t, node2t, &nr_node_list) {
552 nr_node_lock(t); 549 nr_node_lock(t);
553 for (i = 0; i < t->count; i++) { 550 for (i = 0; i < t->count; i++) {
554 if (t->routes[i].neighbour == s) { 551 if (t->routes[i].neighbour == s) {
@@ -737,11 +734,10 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
737void nr_link_failed(ax25_cb *ax25, int reason) 734void nr_link_failed(ax25_cb *ax25, int reason)
738{ 735{
739 struct nr_neigh *s, *nr_neigh = NULL; 736 struct nr_neigh *s, *nr_neigh = NULL;
740 struct hlist_node *node;
741 struct nr_node *nr_node = NULL; 737 struct nr_node *nr_node = NULL;
742 738
743 spin_lock_bh(&nr_neigh_list_lock); 739 spin_lock_bh(&nr_neigh_list_lock);
744 nr_neigh_for_each(s, node, &nr_neigh_list) { 740 nr_neigh_for_each(s, &nr_neigh_list) {
745 if (s->ax25 == ax25) { 741 if (s->ax25 == ax25) {
746 nr_neigh_hold(s); 742 nr_neigh_hold(s);
747 nr_neigh = s; 743 nr_neigh = s;
@@ -761,7 +757,7 @@ void nr_link_failed(ax25_cb *ax25, int reason)
761 return; 757 return;
762 } 758 }
763 spin_lock_bh(&nr_node_list_lock); 759 spin_lock_bh(&nr_node_list_lock);
764 nr_node_for_each(nr_node, node, &nr_node_list) { 760 nr_node_for_each(nr_node, &nr_node_list) {
765 nr_node_lock(nr_node); 761 nr_node_lock(nr_node);
766 if (nr_node->which < nr_node->count && 762 if (nr_node->which < nr_node->count &&
767 nr_node->routes[nr_node->which].neighbour == nr_neigh) 763 nr_node->routes[nr_node->which].neighbour == nr_neigh)
@@ -1013,16 +1009,16 @@ void __exit nr_rt_free(void)
1013{ 1009{
1014 struct nr_neigh *s = NULL; 1010 struct nr_neigh *s = NULL;
1015 struct nr_node *t = NULL; 1011 struct nr_node *t = NULL;
1016 struct hlist_node *node, *nodet; 1012 struct hlist_node *nodet;
1017 1013
1018 spin_lock_bh(&nr_neigh_list_lock); 1014 spin_lock_bh(&nr_neigh_list_lock);
1019 spin_lock_bh(&nr_node_list_lock); 1015 spin_lock_bh(&nr_node_list_lock);
1020 nr_node_for_each_safe(t, node, nodet, &nr_node_list) { 1016 nr_node_for_each_safe(t, nodet, &nr_node_list) {
1021 nr_node_lock(t); 1017 nr_node_lock(t);
1022 nr_remove_node_locked(t); 1018 nr_remove_node_locked(t);
1023 nr_node_unlock(t); 1019 nr_node_unlock(t);
1024 } 1020 }
1025 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { 1021 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
1026 while(s->count) { 1022 while(s->count) {
1027 s->count--; 1023 s->count--;
1028 nr_neigh_put(s); 1024 nr_neigh_put(s);
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 746f5a2f9804..7f8266dd14cb 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -71,14 +71,14 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
71static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) 71static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
72{ 72{
73 struct sock *sk; 73 struct sock *sk;
74 struct hlist_node *node, *tmp; 74 struct hlist_node *tmp;
75 struct nfc_llcp_sock *llcp_sock; 75 struct nfc_llcp_sock *llcp_sock;
76 76
77 skb_queue_purge(&local->tx_queue); 77 skb_queue_purge(&local->tx_queue);
78 78
79 write_lock(&local->sockets.lock); 79 write_lock(&local->sockets.lock);
80 80
81 sk_for_each_safe(sk, node, tmp, &local->sockets.head) { 81 sk_for_each_safe(sk, tmp, &local->sockets.head) {
82 llcp_sock = nfc_llcp_sock(sk); 82 llcp_sock = nfc_llcp_sock(sk);
83 83
84 bh_lock_sock(sk); 84 bh_lock_sock(sk);
@@ -171,7 +171,6 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
171 u8 ssap, u8 dsap) 171 u8 ssap, u8 dsap)
172{ 172{
173 struct sock *sk; 173 struct sock *sk;
174 struct hlist_node *node;
175 struct nfc_llcp_sock *llcp_sock, *tmp_sock; 174 struct nfc_llcp_sock *llcp_sock, *tmp_sock;
176 175
177 pr_debug("ssap dsap %d %d\n", ssap, dsap); 176 pr_debug("ssap dsap %d %d\n", ssap, dsap);
@@ -183,7 +182,7 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
183 182
184 llcp_sock = NULL; 183 llcp_sock = NULL;
185 184
186 sk_for_each(sk, node, &local->sockets.head) { 185 sk_for_each(sk, &local->sockets.head) {
187 tmp_sock = nfc_llcp_sock(sk); 186 tmp_sock = nfc_llcp_sock(sk);
188 187
189 if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) { 188 if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
@@ -272,7 +271,6 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
272 u8 *sn, size_t sn_len) 271 u8 *sn, size_t sn_len)
273{ 272{
274 struct sock *sk; 273 struct sock *sk;
275 struct hlist_node *node;
276 struct nfc_llcp_sock *llcp_sock, *tmp_sock; 274 struct nfc_llcp_sock *llcp_sock, *tmp_sock;
277 275
278 pr_debug("sn %zd %p\n", sn_len, sn); 276 pr_debug("sn %zd %p\n", sn_len, sn);
@@ -284,7 +282,7 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
284 282
285 llcp_sock = NULL; 283 llcp_sock = NULL;
286 284
287 sk_for_each(sk, node, &local->sockets.head) { 285 sk_for_each(sk, &local->sockets.head) {
288 tmp_sock = nfc_llcp_sock(sk); 286 tmp_sock = nfc_llcp_sock(sk);
289 287
290 pr_debug("llcp sock %p\n", tmp_sock); 288 pr_debug("llcp sock %p\n", tmp_sock);
@@ -601,14 +599,13 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
601void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local, 599void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
602 struct sk_buff *skb, u8 direction) 600 struct sk_buff *skb, u8 direction)
603{ 601{
604 struct hlist_node *node;
605 struct sk_buff *skb_copy = NULL, *nskb; 602 struct sk_buff *skb_copy = NULL, *nskb;
606 struct sock *sk; 603 struct sock *sk;
607 u8 *data; 604 u8 *data;
608 605
609 read_lock(&local->raw_sockets.lock); 606 read_lock(&local->raw_sockets.lock);
610 607
611 sk_for_each(sk, node, &local->raw_sockets.head) { 608 sk_for_each(sk, &local->raw_sockets.head) {
612 if (sk->sk_state != LLCP_BOUND) 609 if (sk->sk_state != LLCP_BOUND)
613 continue; 610 continue;
614 611
@@ -697,11 +694,10 @@ static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local
697{ 694{
698 struct sock *sk; 695 struct sock *sk;
699 struct nfc_llcp_sock *llcp_sock; 696 struct nfc_llcp_sock *llcp_sock;
700 struct hlist_node *node;
701 697
702 read_lock(&local->connecting_sockets.lock); 698 read_lock(&local->connecting_sockets.lock);
703 699
704 sk_for_each(sk, node, &local->connecting_sockets.head) { 700 sk_for_each(sk, &local->connecting_sockets.head) {
705 llcp_sock = nfc_llcp_sock(sk); 701 llcp_sock = nfc_llcp_sock(sk);
706 702
707 if (llcp_sock->ssap == ssap) { 703 if (llcp_sock->ssap == ssap) {
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 9dc537df46c4..e87a26506dba 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -158,11 +158,10 @@ static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
158struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) 158struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
159{ 159{
160 struct vport *vport; 160 struct vport *vport;
161 struct hlist_node *n;
162 struct hlist_head *head; 161 struct hlist_head *head;
163 162
164 head = vport_hash_bucket(dp, port_no); 163 head = vport_hash_bucket(dp, port_no);
165 hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) { 164 hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
166 if (vport->port_no == port_no) 165 if (vport->port_no == port_no)
167 return vport; 166 return vport;
168 } 167 }
@@ -1386,9 +1385,9 @@ static void __dp_destroy(struct datapath *dp)
1386 1385
1387 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { 1386 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1388 struct vport *vport; 1387 struct vport *vport;
1389 struct hlist_node *node, *n; 1388 struct hlist_node *n;
1390 1389
1391 hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node) 1390 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1392 if (vport->port_no != OVSP_LOCAL) 1391 if (vport->port_no != OVSP_LOCAL)
1393 ovs_dp_detach_port(vport); 1392 ovs_dp_detach_port(vport);
1394 } 1393 }
@@ -1825,10 +1824,9 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1825 rcu_read_lock(); 1824 rcu_read_lock();
1826 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { 1825 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1827 struct vport *vport; 1826 struct vport *vport;
1828 struct hlist_node *n;
1829 1827
1830 j = 0; 1828 j = 0;
1831 hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) { 1829 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1832 if (j >= skip && 1830 if (j >= skip &&
1833 ovs_vport_cmd_fill_info(vport, skb, 1831 ovs_vport_cmd_fill_info(vport, skb,
1834 NETLINK_CB(cb->skb).portid, 1832 NETLINK_CB(cb->skb).portid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index c3294cebc4f2..20605ecf100b 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -299,10 +299,10 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
299 for (i = 0; i < table->n_buckets; i++) { 299 for (i = 0; i < table->n_buckets; i++) {
300 struct sw_flow *flow; 300 struct sw_flow *flow;
301 struct hlist_head *head = flex_array_get(table->buckets, i); 301 struct hlist_head *head = flex_array_get(table->buckets, i);
302 struct hlist_node *node, *n; 302 struct hlist_node *n;
303 int ver = table->node_ver; 303 int ver = table->node_ver;
304 304
305 hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) { 305 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
306 hlist_del_rcu(&flow->hash_node[ver]); 306 hlist_del_rcu(&flow->hash_node[ver]);
307 ovs_flow_free(flow); 307 ovs_flow_free(flow);
308 } 308 }
@@ -332,7 +332,6 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
332{ 332{
333 struct sw_flow *flow; 333 struct sw_flow *flow;
334 struct hlist_head *head; 334 struct hlist_head *head;
335 struct hlist_node *n;
336 int ver; 335 int ver;
337 int i; 336 int i;
338 337
@@ -340,7 +339,7 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
340 while (*bucket < table->n_buckets) { 339 while (*bucket < table->n_buckets) {
341 i = 0; 340 i = 0;
342 head = flex_array_get(table->buckets, *bucket); 341 head = flex_array_get(table->buckets, *bucket);
343 hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) { 342 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
344 if (i < *last) { 343 if (i < *last) {
345 i++; 344 i++;
346 continue; 345 continue;
@@ -367,11 +366,10 @@ static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new
367 for (i = 0; i < old->n_buckets; i++) { 366 for (i = 0; i < old->n_buckets; i++) {
368 struct sw_flow *flow; 367 struct sw_flow *flow;
369 struct hlist_head *head; 368 struct hlist_head *head;
370 struct hlist_node *n;
371 369
372 head = flex_array_get(old->buckets, i); 370 head = flex_array_get(old->buckets, i);
373 371
374 hlist_for_each_entry(flow, n, head, hash_node[old_ver]) 372 hlist_for_each_entry(flow, head, hash_node[old_ver])
375 ovs_flow_tbl_insert(new, flow); 373 ovs_flow_tbl_insert(new, flow);
376 } 374 }
377 old->keep_flows = true; 375 old->keep_flows = true;
@@ -766,14 +764,13 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
766 struct sw_flow_key *key, int key_len) 764 struct sw_flow_key *key, int key_len)
767{ 765{
768 struct sw_flow *flow; 766 struct sw_flow *flow;
769 struct hlist_node *n;
770 struct hlist_head *head; 767 struct hlist_head *head;
771 u32 hash; 768 u32 hash;
772 769
773 hash = ovs_flow_hash(key, key_len); 770 hash = ovs_flow_hash(key, key_len);
774 771
775 head = find_bucket(table, hash); 772 head = find_bucket(table, hash);
776 hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) { 773 hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
777 774
778 if (flow->hash == hash && 775 if (flow->hash == hash &&
779 !memcmp(&flow->key, key, key_len)) { 776 !memcmp(&flow->key, key, key_len)) {
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 70af0bedbac4..ba717cc038b3 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -86,9 +86,8 @@ struct vport *ovs_vport_locate(struct net *net, const char *name)
86{ 86{
87 struct hlist_head *bucket = hash_bucket(net, name); 87 struct hlist_head *bucket = hash_bucket(net, name);
88 struct vport *vport; 88 struct vport *vport;
89 struct hlist_node *node;
90 89
91 hlist_for_each_entry_rcu(vport, node, bucket, hash_node) 90 hlist_for_each_entry_rcu(vport, bucket, hash_node)
92 if (!strcmp(name, vport->ops->get_name(vport)) && 91 if (!strcmp(name, vport->ops->get_name(vport)) &&
93 net_eq(ovs_dp_get_net(vport->dp), net)) 92 net_eq(ovs_dp_get_net(vport->dp), net))
94 return vport; 93 return vport;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c7bfeff10767..1d6793dbfbae 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3263,12 +3263,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3263static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) 3263static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3264{ 3264{
3265 struct sock *sk; 3265 struct sock *sk;
3266 struct hlist_node *node;
3267 struct net_device *dev = data; 3266 struct net_device *dev = data;
3268 struct net *net = dev_net(dev); 3267 struct net *net = dev_net(dev);
3269 3268
3270 rcu_read_lock(); 3269 rcu_read_lock();
3271 sk_for_each_rcu(sk, node, &net->packet.sklist) { 3270 sk_for_each_rcu(sk, &net->packet.sklist) {
3272 struct packet_sock *po = pkt_sk(sk); 3271 struct packet_sock *po = pkt_sk(sk);
3273 3272
3274 switch (msg) { 3273 switch (msg) {
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 8db6e21c46bd..d3fcd1ebef7e 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -172,13 +172,12 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
172 struct packet_diag_req *req; 172 struct packet_diag_req *req;
173 struct net *net; 173 struct net *net;
174 struct sock *sk; 174 struct sock *sk;
175 struct hlist_node *node;
176 175
177 net = sock_net(skb->sk); 176 net = sock_net(skb->sk);
178 req = nlmsg_data(cb->nlh); 177 req = nlmsg_data(cb->nlh);
179 178
180 mutex_lock(&net->packet.sklist_lock); 179 mutex_lock(&net->packet.sklist_lock);
181 sk_for_each(sk, node, &net->packet.sklist) { 180 sk_for_each(sk, &net->packet.sklist) {
182 if (!net_eq(sock_net(sk), net)) 181 if (!net_eq(sock_net(sk), net))
183 continue; 182 continue;
184 if (num < s_num) 183 if (num < s_num)
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 576f22c9c76e..e77411735de8 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -640,11 +640,10 @@ static struct sock *pep_find_pipe(const struct hlist_head *hlist,
640 const struct sockaddr_pn *dst, 640 const struct sockaddr_pn *dst,
641 u8 pipe_handle) 641 u8 pipe_handle)
642{ 642{
643 struct hlist_node *node;
644 struct sock *sknode; 643 struct sock *sknode;
645 u16 dobj = pn_sockaddr_get_object(dst); 644 u16 dobj = pn_sockaddr_get_object(dst);
646 645
647 sk_for_each(sknode, node, hlist) { 646 sk_for_each(sknode, hlist) {
648 struct pep_sock *pnnode = pep_sk(sknode); 647 struct pep_sock *pnnode = pep_sk(sknode);
649 648
650 /* Ports match, but addresses might not: */ 649 /* Ports match, but addresses might not: */
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index b7e982782255..1afd1381cdc7 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -76,7 +76,6 @@ static struct hlist_head *pn_hash_list(u16 obj)
76 */ 76 */
77struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) 77struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
78{ 78{
79 struct hlist_node *node;
80 struct sock *sknode; 79 struct sock *sknode;
81 struct sock *rval = NULL; 80 struct sock *rval = NULL;
82 u16 obj = pn_sockaddr_get_object(spn); 81 u16 obj = pn_sockaddr_get_object(spn);
@@ -84,7 +83,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
84 struct hlist_head *hlist = pn_hash_list(obj); 83 struct hlist_head *hlist = pn_hash_list(obj);
85 84
86 rcu_read_lock(); 85 rcu_read_lock();
87 sk_for_each_rcu(sknode, node, hlist) { 86 sk_for_each_rcu(sknode, hlist) {
88 struct pn_sock *pn = pn_sk(sknode); 87 struct pn_sock *pn = pn_sk(sknode);
89 BUG_ON(!pn->sobject); /* unbound socket */ 88 BUG_ON(!pn->sobject); /* unbound socket */
90 89
@@ -120,10 +119,9 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
120 119
121 rcu_read_lock(); 120 rcu_read_lock();
122 for (h = 0; h < PN_HASHSIZE; h++) { 121 for (h = 0; h < PN_HASHSIZE; h++) {
123 struct hlist_node *node;
124 struct sock *sknode; 122 struct sock *sknode;
125 123
126 sk_for_each(sknode, node, hlist) { 124 sk_for_each(sknode, hlist) {
127 struct sk_buff *clone; 125 struct sk_buff *clone;
128 126
129 if (!net_eq(sock_net(sknode), net)) 127 if (!net_eq(sock_net(sknode), net))
@@ -543,12 +541,11 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
543{ 541{
544 struct net *net = seq_file_net(seq); 542 struct net *net = seq_file_net(seq);
545 struct hlist_head *hlist = pnsocks.hlist; 543 struct hlist_head *hlist = pnsocks.hlist;
546 struct hlist_node *node;
547 struct sock *sknode; 544 struct sock *sknode;
548 unsigned int h; 545 unsigned int h;
549 546
550 for (h = 0; h < PN_HASHSIZE; h++) { 547 for (h = 0; h < PN_HASHSIZE; h++) {
551 sk_for_each_rcu(sknode, node, hlist) { 548 sk_for_each_rcu(sknode, hlist) {
552 if (!net_eq(net, sock_net(sknode))) 549 if (!net_eq(net, sock_net(sknode)))
553 continue; 550 continue;
554 if (!pos) 551 if (!pos)
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 637bde56c9db..b5ad65a0067e 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -52,13 +52,12 @@ static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
52 struct rds_sock *insert) 52 struct rds_sock *insert)
53{ 53{
54 struct rds_sock *rs; 54 struct rds_sock *rs;
55 struct hlist_node *node;
56 struct hlist_head *head = hash_to_bucket(addr, port); 55 struct hlist_head *head = hash_to_bucket(addr, port);
57 u64 cmp; 56 u64 cmp;
58 u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port); 57 u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
59 58
60 rcu_read_lock(); 59 rcu_read_lock();
61 hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) { 60 hlist_for_each_entry_rcu(rs, head, rs_bound_node) {
62 cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) | 61 cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
63 be16_to_cpu(rs->rs_bound_port); 62 be16_to_cpu(rs->rs_bound_port);
64 63
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 9e07c756d1f9..642ad42c416b 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -69,9 +69,8 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
69 struct rds_transport *trans) 69 struct rds_transport *trans)
70{ 70{
71 struct rds_connection *conn, *ret = NULL; 71 struct rds_connection *conn, *ret = NULL;
72 struct hlist_node *pos;
73 72
74 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { 73 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
75 if (conn->c_faddr == faddr && conn->c_laddr == laddr && 74 if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
76 conn->c_trans == trans) { 75 conn->c_trans == trans) {
77 ret = conn; 76 ret = conn;
@@ -376,7 +375,6 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
376 int want_send) 375 int want_send)
377{ 376{
378 struct hlist_head *head; 377 struct hlist_head *head;
379 struct hlist_node *pos;
380 struct list_head *list; 378 struct list_head *list;
381 struct rds_connection *conn; 379 struct rds_connection *conn;
382 struct rds_message *rm; 380 struct rds_message *rm;
@@ -390,7 +388,7 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
390 388
391 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 389 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
392 i++, head++) { 390 i++, head++) {
393 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { 391 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
394 if (want_send) 392 if (want_send)
395 list = &conn->c_send_queue; 393 list = &conn->c_send_queue;
396 else 394 else
@@ -439,7 +437,6 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
439{ 437{
440 uint64_t buffer[(item_len + 7) / 8]; 438 uint64_t buffer[(item_len + 7) / 8];
441 struct hlist_head *head; 439 struct hlist_head *head;
442 struct hlist_node *pos;
443 struct rds_connection *conn; 440 struct rds_connection *conn;
444 size_t i; 441 size_t i;
445 442
@@ -450,7 +447,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
450 447
451 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 448 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
452 i++, head++) { 449 i++, head++) {
453 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { 450 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
454 451
455 /* XXX no c_lock usage.. */ 452 /* XXX no c_lock usage.. */
456 if (!visitor(conn, buffer)) 453 if (!visitor(conn, buffer))
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index b768fe9d5e7a..cf68e6e4054a 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -165,10 +165,9 @@ static void rose_remove_socket(struct sock *sk)
165void rose_kill_by_neigh(struct rose_neigh *neigh) 165void rose_kill_by_neigh(struct rose_neigh *neigh)
166{ 166{
167 struct sock *s; 167 struct sock *s;
168 struct hlist_node *node;
169 168
170 spin_lock_bh(&rose_list_lock); 169 spin_lock_bh(&rose_list_lock);
171 sk_for_each(s, node, &rose_list) { 170 sk_for_each(s, &rose_list) {
172 struct rose_sock *rose = rose_sk(s); 171 struct rose_sock *rose = rose_sk(s);
173 172
174 if (rose->neighbour == neigh) { 173 if (rose->neighbour == neigh) {
@@ -186,10 +185,9 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
186static void rose_kill_by_device(struct net_device *dev) 185static void rose_kill_by_device(struct net_device *dev)
187{ 186{
188 struct sock *s; 187 struct sock *s;
189 struct hlist_node *node;
190 188
191 spin_lock_bh(&rose_list_lock); 189 spin_lock_bh(&rose_list_lock);
192 sk_for_each(s, node, &rose_list) { 190 sk_for_each(s, &rose_list) {
193 struct rose_sock *rose = rose_sk(s); 191 struct rose_sock *rose = rose_sk(s);
194 192
195 if (rose->device == dev) { 193 if (rose->device == dev) {
@@ -246,10 +244,9 @@ static void rose_insert_socket(struct sock *sk)
246static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) 244static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
247{ 245{
248 struct sock *s; 246 struct sock *s;
249 struct hlist_node *node;
250 247
251 spin_lock_bh(&rose_list_lock); 248 spin_lock_bh(&rose_list_lock);
252 sk_for_each(s, node, &rose_list) { 249 sk_for_each(s, &rose_list) {
253 struct rose_sock *rose = rose_sk(s); 250 struct rose_sock *rose = rose_sk(s);
254 251
255 if (!rosecmp(&rose->source_addr, addr) && 252 if (!rosecmp(&rose->source_addr, addr) &&
@@ -258,7 +255,7 @@ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
258 goto found; 255 goto found;
259 } 256 }
260 257
261 sk_for_each(s, node, &rose_list) { 258 sk_for_each(s, &rose_list) {
262 struct rose_sock *rose = rose_sk(s); 259 struct rose_sock *rose = rose_sk(s);
263 260
264 if (!rosecmp(&rose->source_addr, addr) && 261 if (!rosecmp(&rose->source_addr, addr) &&
@@ -278,10 +275,9 @@ found:
278struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) 275struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
279{ 276{
280 struct sock *s; 277 struct sock *s;
281 struct hlist_node *node;
282 278
283 spin_lock_bh(&rose_list_lock); 279 spin_lock_bh(&rose_list_lock);
284 sk_for_each(s, node, &rose_list) { 280 sk_for_each(s, &rose_list) {
285 struct rose_sock *rose = rose_sk(s); 281 struct rose_sock *rose = rose_sk(s);
286 282
287 if (rose->lci == lci && rose->neighbour == neigh) 283 if (rose->lci == lci && rose->neighbour == neigh)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a181b484812a..c297e2a8e2a1 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -545,7 +545,7 @@ static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
545void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) 545void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
546{ 546{
547 struct Qdisc_class_common *cl; 547 struct Qdisc_class_common *cl;
548 struct hlist_node *n, *next; 548 struct hlist_node *next;
549 struct hlist_head *nhash, *ohash; 549 struct hlist_head *nhash, *ohash;
550 unsigned int nsize, nmask, osize; 550 unsigned int nsize, nmask, osize;
551 unsigned int i, h; 551 unsigned int i, h;
@@ -564,7 +564,7 @@ void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
564 564
565 sch_tree_lock(sch); 565 sch_tree_lock(sch);
566 for (i = 0; i < osize; i++) { 566 for (i = 0; i < osize; i++) {
567 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) { 567 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
568 h = qdisc_class_hash(cl->classid, nmask); 568 h = qdisc_class_hash(cl->classid, nmask);
569 hlist_add_head(&cl->hnode, &nhash[h]); 569 hlist_add_head(&cl->hnode, &nhash[h]);
570 } 570 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 0e19948470b8..13aa47aa2ffb 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1041,14 +1041,13 @@ static void cbq_adjust_levels(struct cbq_class *this)
1041static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) 1041static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1042{ 1042{
1043 struct cbq_class *cl; 1043 struct cbq_class *cl;
1044 struct hlist_node *n;
1045 unsigned int h; 1044 unsigned int h;
1046 1045
1047 if (q->quanta[prio] == 0) 1046 if (q->quanta[prio] == 0)
1048 return; 1047 return;
1049 1048
1050 for (h = 0; h < q->clhash.hashsize; h++) { 1049 for (h = 0; h < q->clhash.hashsize; h++) {
1051 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 1050 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1052 /* BUGGGG... Beware! This expression suffer of 1051 /* BUGGGG... Beware! This expression suffer of
1053 * arithmetic overflows! 1052 * arithmetic overflows!
1054 */ 1053 */
@@ -1087,10 +1086,9 @@ static void cbq_sync_defmap(struct cbq_class *cl)
1087 continue; 1086 continue;
1088 1087
1089 for (h = 0; h < q->clhash.hashsize; h++) { 1088 for (h = 0; h < q->clhash.hashsize; h++) {
1090 struct hlist_node *n;
1091 struct cbq_class *c; 1089 struct cbq_class *c;
1092 1090
1093 hlist_for_each_entry(c, n, &q->clhash.hash[h], 1091 hlist_for_each_entry(c, &q->clhash.hash[h],
1094 common.hnode) { 1092 common.hnode) {
1095 if (c->split == split && c->level < level && 1093 if (c->split == split && c->level < level &&
1096 c->defmap & (1<<i)) { 1094 c->defmap & (1<<i)) {
@@ -1210,7 +1208,6 @@ cbq_reset(struct Qdisc *sch)
1210{ 1208{
1211 struct cbq_sched_data *q = qdisc_priv(sch); 1209 struct cbq_sched_data *q = qdisc_priv(sch);
1212 struct cbq_class *cl; 1210 struct cbq_class *cl;
1213 struct hlist_node *n;
1214 int prio; 1211 int prio;
1215 unsigned int h; 1212 unsigned int h;
1216 1213
@@ -1228,7 +1225,7 @@ cbq_reset(struct Qdisc *sch)
1228 q->active[prio] = NULL; 1225 q->active[prio] = NULL;
1229 1226
1230 for (h = 0; h < q->clhash.hashsize; h++) { 1227 for (h = 0; h < q->clhash.hashsize; h++) {
1231 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 1228 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1232 qdisc_reset(cl->q); 1229 qdisc_reset(cl->q);
1233 1230
1234 cl->next_alive = NULL; 1231 cl->next_alive = NULL;
@@ -1697,7 +1694,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1697static void cbq_destroy(struct Qdisc *sch) 1694static void cbq_destroy(struct Qdisc *sch)
1698{ 1695{
1699 struct cbq_sched_data *q = qdisc_priv(sch); 1696 struct cbq_sched_data *q = qdisc_priv(sch);
1700 struct hlist_node *n, *next; 1697 struct hlist_node *next;
1701 struct cbq_class *cl; 1698 struct cbq_class *cl;
1702 unsigned int h; 1699 unsigned int h;
1703 1700
@@ -1710,11 +1707,11 @@ static void cbq_destroy(struct Qdisc *sch)
1710 * be bound to classes which have been destroyed already. --TGR '04 1707 * be bound to classes which have been destroyed already. --TGR '04
1711 */ 1708 */
1712 for (h = 0; h < q->clhash.hashsize; h++) { 1709 for (h = 0; h < q->clhash.hashsize; h++) {
1713 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) 1710 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
1714 tcf_destroy_chain(&cl->filter_list); 1711 tcf_destroy_chain(&cl->filter_list);
1715 } 1712 }
1716 for (h = 0; h < q->clhash.hashsize; h++) { 1713 for (h = 0; h < q->clhash.hashsize; h++) {
1717 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h], 1714 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1718 common.hnode) 1715 common.hnode)
1719 cbq_destroy_class(sch, cl); 1716 cbq_destroy_class(sch, cl);
1720 } 1717 }
@@ -2013,14 +2010,13 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2013{ 2010{
2014 struct cbq_sched_data *q = qdisc_priv(sch); 2011 struct cbq_sched_data *q = qdisc_priv(sch);
2015 struct cbq_class *cl; 2012 struct cbq_class *cl;
2016 struct hlist_node *n;
2017 unsigned int h; 2013 unsigned int h;
2018 2014
2019 if (arg->stop) 2015 if (arg->stop)
2020 return; 2016 return;
2021 2017
2022 for (h = 0; h < q->clhash.hashsize; h++) { 2018 for (h = 0; h < q->clhash.hashsize; h++) {
2023 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { 2019 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
2024 if (arg->count < arg->skip) { 2020 if (arg->count < arg->skip) {
2025 arg->count++; 2021 arg->count++;
2026 continue; 2022 continue;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 71e50c80315f..759b308d1a8d 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -293,14 +293,13 @@ static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
293{ 293{
294 struct drr_sched *q = qdisc_priv(sch); 294 struct drr_sched *q = qdisc_priv(sch);
295 struct drr_class *cl; 295 struct drr_class *cl;
296 struct hlist_node *n;
297 unsigned int i; 296 unsigned int i;
298 297
299 if (arg->stop) 298 if (arg->stop)
300 return; 299 return;
301 300
302 for (i = 0; i < q->clhash.hashsize; i++) { 301 for (i = 0; i < q->clhash.hashsize; i++) {
303 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 302 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
304 if (arg->count < arg->skip) { 303 if (arg->count < arg->skip) {
305 arg->count++; 304 arg->count++;
306 continue; 305 continue;
@@ -451,11 +450,10 @@ static void drr_reset_qdisc(struct Qdisc *sch)
451{ 450{
452 struct drr_sched *q = qdisc_priv(sch); 451 struct drr_sched *q = qdisc_priv(sch);
453 struct drr_class *cl; 452 struct drr_class *cl;
454 struct hlist_node *n;
455 unsigned int i; 453 unsigned int i;
456 454
457 for (i = 0; i < q->clhash.hashsize; i++) { 455 for (i = 0; i < q->clhash.hashsize; i++) {
458 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 456 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
459 if (cl->qdisc->q.qlen) 457 if (cl->qdisc->q.qlen)
460 list_del(&cl->alist); 458 list_del(&cl->alist);
461 qdisc_reset(cl->qdisc); 459 qdisc_reset(cl->qdisc);
@@ -468,13 +466,13 @@ static void drr_destroy_qdisc(struct Qdisc *sch)
468{ 466{
469 struct drr_sched *q = qdisc_priv(sch); 467 struct drr_sched *q = qdisc_priv(sch);
470 struct drr_class *cl; 468 struct drr_class *cl;
471 struct hlist_node *n, *next; 469 struct hlist_node *next;
472 unsigned int i; 470 unsigned int i;
473 471
474 tcf_destroy_chain(&q->filter_list); 472 tcf_destroy_chain(&q->filter_list);
475 473
476 for (i = 0; i < q->clhash.hashsize; i++) { 474 for (i = 0; i < q->clhash.hashsize; i++) {
477 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 475 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
478 common.hnode) 476 common.hnode)
479 drr_destroy_class(sch, cl); 477 drr_destroy_class(sch, cl);
480 } 478 }
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6c2ec4510540..9facea03faeb 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1389,7 +1389,6 @@ static void
1389hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1389hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1390{ 1390{
1391 struct hfsc_sched *q = qdisc_priv(sch); 1391 struct hfsc_sched *q = qdisc_priv(sch);
1392 struct hlist_node *n;
1393 struct hfsc_class *cl; 1392 struct hfsc_class *cl;
1394 unsigned int i; 1393 unsigned int i;
1395 1394
@@ -1397,7 +1396,7 @@ hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1397 return; 1396 return;
1398 1397
1399 for (i = 0; i < q->clhash.hashsize; i++) { 1398 for (i = 0; i < q->clhash.hashsize; i++) {
1400 hlist_for_each_entry(cl, n, &q->clhash.hash[i], 1399 hlist_for_each_entry(cl, &q->clhash.hash[i],
1401 cl_common.hnode) { 1400 cl_common.hnode) {
1402 if (arg->count < arg->skip) { 1401 if (arg->count < arg->skip) {
1403 arg->count++; 1402 arg->count++;
@@ -1523,11 +1522,10 @@ hfsc_reset_qdisc(struct Qdisc *sch)
1523{ 1522{
1524 struct hfsc_sched *q = qdisc_priv(sch); 1523 struct hfsc_sched *q = qdisc_priv(sch);
1525 struct hfsc_class *cl; 1524 struct hfsc_class *cl;
1526 struct hlist_node *n;
1527 unsigned int i; 1525 unsigned int i;
1528 1526
1529 for (i = 0; i < q->clhash.hashsize; i++) { 1527 for (i = 0; i < q->clhash.hashsize; i++) {
1530 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1528 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1531 hfsc_reset_class(cl); 1529 hfsc_reset_class(cl);
1532 } 1530 }
1533 q->eligible = RB_ROOT; 1531 q->eligible = RB_ROOT;
@@ -1540,16 +1538,16 @@ static void
1540hfsc_destroy_qdisc(struct Qdisc *sch) 1538hfsc_destroy_qdisc(struct Qdisc *sch)
1541{ 1539{
1542 struct hfsc_sched *q = qdisc_priv(sch); 1540 struct hfsc_sched *q = qdisc_priv(sch);
1543 struct hlist_node *n, *next; 1541 struct hlist_node *next;
1544 struct hfsc_class *cl; 1542 struct hfsc_class *cl;
1545 unsigned int i; 1543 unsigned int i;
1546 1544
1547 for (i = 0; i < q->clhash.hashsize; i++) { 1545 for (i = 0; i < q->clhash.hashsize; i++) {
1548 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1546 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1549 tcf_destroy_chain(&cl->filter_list); 1547 tcf_destroy_chain(&cl->filter_list);
1550 } 1548 }
1551 for (i = 0; i < q->clhash.hashsize; i++) { 1549 for (i = 0; i < q->clhash.hashsize; i++) {
1552 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 1550 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1553 cl_common.hnode) 1551 cl_common.hnode)
1554 hfsc_destroy_class(sch, cl); 1552 hfsc_destroy_class(sch, cl);
1555 } 1553 }
@@ -1564,12 +1562,11 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1564 unsigned char *b = skb_tail_pointer(skb); 1562 unsigned char *b = skb_tail_pointer(skb);
1565 struct tc_hfsc_qopt qopt; 1563 struct tc_hfsc_qopt qopt;
1566 struct hfsc_class *cl; 1564 struct hfsc_class *cl;
1567 struct hlist_node *n;
1568 unsigned int i; 1565 unsigned int i;
1569 1566
1570 sch->qstats.backlog = 0; 1567 sch->qstats.backlog = 0;
1571 for (i = 0; i < q->clhash.hashsize; i++) { 1568 for (i = 0; i < q->clhash.hashsize; i++) {
1572 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1569 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1573 sch->qstats.backlog += cl->qdisc->qstats.backlog; 1570 sch->qstats.backlog += cl->qdisc->qstats.backlog;
1574 } 1571 }
1575 1572
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 03c2692ca01e..571f1d211f4d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -949,11 +949,10 @@ static void htb_reset(struct Qdisc *sch)
949{ 949{
950 struct htb_sched *q = qdisc_priv(sch); 950 struct htb_sched *q = qdisc_priv(sch);
951 struct htb_class *cl; 951 struct htb_class *cl;
952 struct hlist_node *n;
953 unsigned int i; 952 unsigned int i;
954 953
955 for (i = 0; i < q->clhash.hashsize; i++) { 954 for (i = 0; i < q->clhash.hashsize; i++) {
956 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 955 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
957 if (cl->level) 956 if (cl->level)
958 memset(&cl->un.inner, 0, sizeof(cl->un.inner)); 957 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
959 else { 958 else {
@@ -1218,7 +1217,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1218static void htb_destroy(struct Qdisc *sch) 1217static void htb_destroy(struct Qdisc *sch)
1219{ 1218{
1220 struct htb_sched *q = qdisc_priv(sch); 1219 struct htb_sched *q = qdisc_priv(sch);
1221 struct hlist_node *n, *next; 1220 struct hlist_node *next;
1222 struct htb_class *cl; 1221 struct htb_class *cl;
1223 unsigned int i; 1222 unsigned int i;
1224 1223
@@ -1232,11 +1231,11 @@ static void htb_destroy(struct Qdisc *sch)
1232 tcf_destroy_chain(&q->filter_list); 1231 tcf_destroy_chain(&q->filter_list);
1233 1232
1234 for (i = 0; i < q->clhash.hashsize; i++) { 1233 for (i = 0; i < q->clhash.hashsize; i++) {
1235 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) 1234 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
1236 tcf_destroy_chain(&cl->filter_list); 1235 tcf_destroy_chain(&cl->filter_list);
1237 } 1236 }
1238 for (i = 0; i < q->clhash.hashsize; i++) { 1237 for (i = 0; i < q->clhash.hashsize; i++) {
1239 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 1238 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1240 common.hnode) 1239 common.hnode)
1241 htb_destroy_class(sch, cl); 1240 htb_destroy_class(sch, cl);
1242 } 1241 }
@@ -1516,14 +1515,13 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1516{ 1515{
1517 struct htb_sched *q = qdisc_priv(sch); 1516 struct htb_sched *q = qdisc_priv(sch);
1518 struct htb_class *cl; 1517 struct htb_class *cl;
1519 struct hlist_node *n;
1520 unsigned int i; 1518 unsigned int i;
1521 1519
1522 if (arg->stop) 1520 if (arg->stop)
1523 return; 1521 return;
1524 1522
1525 for (i = 0; i < q->clhash.hashsize; i++) { 1523 for (i = 0; i < q->clhash.hashsize; i++) {
1526 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 1524 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1527 if (arg->count < arg->skip) { 1525 if (arg->count < arg->skip) {
1528 arg->count++; 1526 arg->count++;
1529 continue; 1527 continue;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 6ed37652a4c3..e9a77f621c3d 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -276,9 +276,8 @@ static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
276 u32 lmax, u32 weight) 276 u32 lmax, u32 weight)
277{ 277{
278 struct qfq_aggregate *agg; 278 struct qfq_aggregate *agg;
279 struct hlist_node *n;
280 279
281 hlist_for_each_entry(agg, n, &q->nonfull_aggs, nonfull_next) 280 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
282 if (agg->lmax == lmax && agg->class_weight == weight) 281 if (agg->lmax == lmax && agg->class_weight == weight)
283 return agg; 282 return agg;
284 283
@@ -670,14 +669,13 @@ static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
670{ 669{
671 struct qfq_sched *q = qdisc_priv(sch); 670 struct qfq_sched *q = qdisc_priv(sch);
672 struct qfq_class *cl; 671 struct qfq_class *cl;
673 struct hlist_node *n;
674 unsigned int i; 672 unsigned int i;
675 673
676 if (arg->stop) 674 if (arg->stop)
677 return; 675 return;
678 676
679 for (i = 0; i < q->clhash.hashsize; i++) { 677 for (i = 0; i < q->clhash.hashsize; i++) {
680 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 678 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
681 if (arg->count < arg->skip) { 679 if (arg->count < arg->skip) {
682 arg->count++; 680 arg->count++;
683 continue; 681 continue;
@@ -1376,11 +1374,10 @@ static unsigned int qfq_drop_from_slot(struct qfq_sched *q,
1376 struct hlist_head *slot) 1374 struct hlist_head *slot)
1377{ 1375{
1378 struct qfq_aggregate *agg; 1376 struct qfq_aggregate *agg;
1379 struct hlist_node *n;
1380 struct qfq_class *cl; 1377 struct qfq_class *cl;
1381 unsigned int len; 1378 unsigned int len;
1382 1379
1383 hlist_for_each_entry(agg, n, slot, next) { 1380 hlist_for_each_entry(agg, slot, next) {
1384 list_for_each_entry(cl, &agg->active, alist) { 1381 list_for_each_entry(cl, &agg->active, alist) {
1385 1382
1386 if (!cl->qdisc->ops->drop) 1383 if (!cl->qdisc->ops->drop)
@@ -1459,11 +1456,10 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
1459{ 1456{
1460 struct qfq_sched *q = qdisc_priv(sch); 1457 struct qfq_sched *q = qdisc_priv(sch);
1461 struct qfq_class *cl; 1458 struct qfq_class *cl;
1462 struct hlist_node *n;
1463 unsigned int i; 1459 unsigned int i;
1464 1460
1465 for (i = 0; i < q->clhash.hashsize; i++) { 1461 for (i = 0; i < q->clhash.hashsize; i++) {
1466 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { 1462 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1467 if (cl->qdisc->q.qlen > 0) 1463 if (cl->qdisc->q.qlen > 0)
1468 qfq_deactivate_class(q, cl); 1464 qfq_deactivate_class(q, cl);
1469 1465
@@ -1477,13 +1473,13 @@ static void qfq_destroy_qdisc(struct Qdisc *sch)
1477{ 1473{
1478 struct qfq_sched *q = qdisc_priv(sch); 1474 struct qfq_sched *q = qdisc_priv(sch);
1479 struct qfq_class *cl; 1475 struct qfq_class *cl;
1480 struct hlist_node *n, *next; 1476 struct hlist_node *next;
1481 unsigned int i; 1477 unsigned int i;
1482 1478
1483 tcf_destroy_chain(&q->filter_list); 1479 tcf_destroy_chain(&q->filter_list);
1484 1480
1485 for (i = 0; i < q->clhash.hashsize; i++) { 1481 for (i = 0; i < q->clhash.hashsize; i++) {
1486 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 1482 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1487 common.hnode) { 1483 common.hnode) {
1488 qfq_destroy_class(sch, cl); 1484 qfq_destroy_class(sch, cl);
1489 } 1485 }
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 2f95f5a5145d..43cd0dd9149d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1591,32 +1591,31 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1591/* Set an association id for a given association */ 1591/* Set an association id for a given association */
1592int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) 1592int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1593{ 1593{
1594 int assoc_id; 1594 bool preload = gfp & __GFP_WAIT;
1595 int error = 0; 1595 int ret;
1596 1596
1597 /* If the id is already assigned, keep it. */ 1597 /* If the id is already assigned, keep it. */
1598 if (asoc->assoc_id) 1598 if (asoc->assoc_id)
1599 return error; 1599 return 0;
1600retry:
1601 if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
1602 return -ENOMEM;
1603 1600
1601 if (preload)
1602 idr_preload(gfp);
1604 spin_lock_bh(&sctp_assocs_id_lock); 1603 spin_lock_bh(&sctp_assocs_id_lock);
1605 error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, 1604 /* 0 is not a valid id, idr_low is always >= 1 */
1606 idr_low, &assoc_id); 1605 ret = idr_alloc(&sctp_assocs_id, asoc, idr_low, 0, GFP_NOWAIT);
1607 if (!error) { 1606 if (ret >= 0) {
1608 idr_low = assoc_id + 1; 1607 idr_low = ret + 1;
1609 if (idr_low == INT_MAX) 1608 if (idr_low == INT_MAX)
1610 idr_low = 1; 1609 idr_low = 1;
1611 } 1610 }
1612 spin_unlock_bh(&sctp_assocs_id_lock); 1611 spin_unlock_bh(&sctp_assocs_id_lock);
1613 if (error == -EAGAIN) 1612 if (preload)
1614 goto retry; 1613 idr_preload_end();
1615 else if (error) 1614 if (ret < 0)
1616 return error; 1615 return ret;
1617 1616
1618 asoc->assoc_id = (sctp_assoc_t) assoc_id; 1617 asoc->assoc_id = (sctp_assoc_t)ret;
1619 return error; 1618 return 0;
1620} 1619}
1621 1620
1622/* Free the ASCONF queue */ 1621/* Free the ASCONF queue */
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 73aad3d16a45..2b3ef03c6098 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -332,7 +332,6 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
332 struct sctp_transport *t = NULL; 332 struct sctp_transport *t = NULL;
333 struct sctp_hashbucket *head; 333 struct sctp_hashbucket *head;
334 struct sctp_ep_common *epb; 334 struct sctp_ep_common *epb;
335 struct hlist_node *node;
336 int hash; 335 int hash;
337 int rport; 336 int rport;
338 337
@@ -350,7 +349,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
350 rport); 349 rport);
351 head = &sctp_assoc_hashtable[hash]; 350 head = &sctp_assoc_hashtable[hash];
352 read_lock(&head->lock); 351 read_lock(&head->lock);
353 sctp_for_each_hentry(epb, node, &head->chain) { 352 sctp_for_each_hentry(epb, &head->chain) {
354 tmp = sctp_assoc(epb); 353 tmp = sctp_assoc(epb);
355 if (tmp->ep != ep || rport != tmp->peer.port) 354 if (tmp->ep != ep || rport != tmp->peer.port)
356 continue; 355 continue;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 965bbbbe48d4..4b2c83146aa7 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -784,13 +784,12 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
784 struct sctp_hashbucket *head; 784 struct sctp_hashbucket *head;
785 struct sctp_ep_common *epb; 785 struct sctp_ep_common *epb;
786 struct sctp_endpoint *ep; 786 struct sctp_endpoint *ep;
787 struct hlist_node *node;
788 int hash; 787 int hash;
789 788
790 hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port)); 789 hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
791 head = &sctp_ep_hashtable[hash]; 790 head = &sctp_ep_hashtable[hash];
792 read_lock(&head->lock); 791 read_lock(&head->lock);
793 sctp_for_each_hentry(epb, node, &head->chain) { 792 sctp_for_each_hentry(epb, &head->chain) {
794 ep = sctp_ep(epb); 793 ep = sctp_ep(epb);
795 if (sctp_endpoint_is_match(ep, net, laddr)) 794 if (sctp_endpoint_is_match(ep, net, laddr))
796 goto hit; 795 goto hit;
@@ -876,7 +875,6 @@ static struct sctp_association *__sctp_lookup_association(
876 struct sctp_ep_common *epb; 875 struct sctp_ep_common *epb;
877 struct sctp_association *asoc; 876 struct sctp_association *asoc;
878 struct sctp_transport *transport; 877 struct sctp_transport *transport;
879 struct hlist_node *node;
880 int hash; 878 int hash;
881 879
882 /* Optimize here for direct hit, only listening connections can 880 /* Optimize here for direct hit, only listening connections can
@@ -886,7 +884,7 @@ static struct sctp_association *__sctp_lookup_association(
886 ntohs(peer->v4.sin_port)); 884 ntohs(peer->v4.sin_port));
887 head = &sctp_assoc_hashtable[hash]; 885 head = &sctp_assoc_hashtable[hash];
888 read_lock(&head->lock); 886 read_lock(&head->lock);
889 sctp_for_each_hentry(epb, node, &head->chain) { 887 sctp_for_each_hentry(epb, &head->chain) {
890 asoc = sctp_assoc(epb); 888 asoc = sctp_assoc(epb);
891 transport = sctp_assoc_is_match(asoc, net, local, peer); 889 transport = sctp_assoc_is_match(asoc, net, local, peer);
892 if (transport) 890 if (transport)
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 8c19e97262ca..ab3bba8cb0a8 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -213,7 +213,6 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
213 struct sctp_ep_common *epb; 213 struct sctp_ep_common *epb;
214 struct sctp_endpoint *ep; 214 struct sctp_endpoint *ep;
215 struct sock *sk; 215 struct sock *sk;
216 struct hlist_node *node;
217 int hash = *(loff_t *)v; 216 int hash = *(loff_t *)v;
218 217
219 if (hash >= sctp_ep_hashsize) 218 if (hash >= sctp_ep_hashsize)
@@ -222,7 +221,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
222 head = &sctp_ep_hashtable[hash]; 221 head = &sctp_ep_hashtable[hash];
223 sctp_local_bh_disable(); 222 sctp_local_bh_disable();
224 read_lock(&head->lock); 223 read_lock(&head->lock);
225 sctp_for_each_hentry(epb, node, &head->chain) { 224 sctp_for_each_hentry(epb, &head->chain) {
226 ep = sctp_ep(epb); 225 ep = sctp_ep(epb);
227 sk = epb->sk; 226 sk = epb->sk;
228 if (!net_eq(sock_net(sk), seq_file_net(seq))) 227 if (!net_eq(sock_net(sk), seq_file_net(seq)))
@@ -321,7 +320,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
321 struct sctp_ep_common *epb; 320 struct sctp_ep_common *epb;
322 struct sctp_association *assoc; 321 struct sctp_association *assoc;
323 struct sock *sk; 322 struct sock *sk;
324 struct hlist_node *node;
325 int hash = *(loff_t *)v; 323 int hash = *(loff_t *)v;
326 324
327 if (hash >= sctp_assoc_hashsize) 325 if (hash >= sctp_assoc_hashsize)
@@ -330,7 +328,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
330 head = &sctp_assoc_hashtable[hash]; 328 head = &sctp_assoc_hashtable[hash];
331 sctp_local_bh_disable(); 329 sctp_local_bh_disable();
332 read_lock(&head->lock); 330 read_lock(&head->lock);
333 sctp_for_each_hentry(epb, node, &head->chain) { 331 sctp_for_each_hentry(epb, &head->chain) {
334 assoc = sctp_assoc(epb); 332 assoc = sctp_assoc(epb);
335 sk = epb->sk; 333 sk = epb->sk;
336 if (!net_eq(sock_net(sk), seq_file_net(seq))) 334 if (!net_eq(sock_net(sk), seq_file_net(seq)))
@@ -436,7 +434,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
436 struct sctp_hashbucket *head; 434 struct sctp_hashbucket *head;
437 struct sctp_ep_common *epb; 435 struct sctp_ep_common *epb;
438 struct sctp_association *assoc; 436 struct sctp_association *assoc;
439 struct hlist_node *node;
440 struct sctp_transport *tsp; 437 struct sctp_transport *tsp;
441 int hash = *(loff_t *)v; 438 int hash = *(loff_t *)v;
442 439
@@ -447,7 +444,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
447 sctp_local_bh_disable(); 444 sctp_local_bh_disable();
448 read_lock(&head->lock); 445 read_lock(&head->lock);
449 rcu_read_lock(); 446 rcu_read_lock();
450 sctp_for_each_hentry(epb, node, &head->chain) { 447 sctp_for_each_hentry(epb, &head->chain) {
451 if (!net_eq(sock_net(epb->sk), seq_file_net(seq))) 448 if (!net_eq(sock_net(epb->sk), seq_file_net(seq)))
452 continue; 449 continue;
453 assoc = sctp_assoc(epb); 450 assoc = sctp_assoc(epb);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index cedd9bf67b8c..c99458df3f3f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5882,8 +5882,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
5882static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 5882static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5883{ 5883{
5884 struct sctp_bind_hashbucket *head; /* hash list */ 5884 struct sctp_bind_hashbucket *head; /* hash list */
5885 struct sctp_bind_bucket *pp; /* hash list port iterator */ 5885 struct sctp_bind_bucket *pp;
5886 struct hlist_node *node;
5887 unsigned short snum; 5886 unsigned short snum;
5888 int ret; 5887 int ret;
5889 5888
@@ -5910,7 +5909,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5910 index = sctp_phashfn(sock_net(sk), rover); 5909 index = sctp_phashfn(sock_net(sk), rover);
5911 head = &sctp_port_hashtable[index]; 5910 head = &sctp_port_hashtable[index];
5912 sctp_spin_lock(&head->lock); 5911 sctp_spin_lock(&head->lock);
5913 sctp_for_each_hentry(pp, node, &head->chain) 5912 sctp_for_each_hentry(pp, &head->chain)
5914 if ((pp->port == rover) && 5913 if ((pp->port == rover) &&
5915 net_eq(sock_net(sk), pp->net)) 5914 net_eq(sock_net(sk), pp->net))
5916 goto next; 5915 goto next;
@@ -5938,7 +5937,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5938 */ 5937 */
5939 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 5938 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
5940 sctp_spin_lock(&head->lock); 5939 sctp_spin_lock(&head->lock);
5941 sctp_for_each_hentry(pp, node, &head->chain) { 5940 sctp_for_each_hentry(pp, &head->chain) {
5942 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 5941 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
5943 goto pp_found; 5942 goto pp_found;
5944 } 5943 }
@@ -5970,7 +5969,7 @@ pp_found:
5970 * that this port/socket (sk) combination are already 5969 * that this port/socket (sk) combination are already
5971 * in an endpoint. 5970 * in an endpoint.
5972 */ 5971 */
5973 sk_for_each_bound(sk2, node, &pp->owner) { 5972 sk_for_each_bound(sk2, &pp->owner) {
5974 struct sctp_endpoint *ep2; 5973 struct sctp_endpoint *ep2;
5975 ep2 = sctp_sk(sk2)->ep; 5974 ep2 = sctp_sk(sk2)->ep;
5976 5975
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 392adc41e2e5..f5294047df77 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -407,7 +407,6 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
407{ 407{
408 LIST_HEAD(free); 408 LIST_HEAD(free);
409 struct rpc_cred_cache *cache = auth->au_credcache; 409 struct rpc_cred_cache *cache = auth->au_credcache;
410 struct hlist_node *pos;
411 struct rpc_cred *cred = NULL, 410 struct rpc_cred *cred = NULL,
412 *entry, *new; 411 *entry, *new;
413 unsigned int nr; 412 unsigned int nr;
@@ -415,7 +414,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
415 nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits); 414 nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits);
416 415
417 rcu_read_lock(); 416 rcu_read_lock();
418 hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) { 417 hlist_for_each_entry_rcu(entry, &cache->hashtable[nr], cr_hash) {
419 if (!entry->cr_ops->crmatch(acred, entry, flags)) 418 if (!entry->cr_ops->crmatch(acred, entry, flags))
420 continue; 419 continue;
421 spin_lock(&cache->lock); 420 spin_lock(&cache->lock);
@@ -439,7 +438,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
439 } 438 }
440 439
441 spin_lock(&cache->lock); 440 spin_lock(&cache->lock);
442 hlist_for_each_entry(entry, pos, &cache->hashtable[nr], cr_hash) { 441 hlist_for_each_entry(entry, &cache->hashtable[nr], cr_hash) {
443 if (!entry->cr_ops->crmatch(acred, entry, flags)) 442 if (!entry->cr_ops->crmatch(acred, entry, flags))
444 continue; 443 continue;
445 cred = get_rpccred(entry); 444 cred = get_rpccred(entry);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index f3897d10f649..39a4112faf54 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -670,13 +670,13 @@ static void cache_revisit_request(struct cache_head *item)
670{ 670{
671 struct cache_deferred_req *dreq; 671 struct cache_deferred_req *dreq;
672 struct list_head pending; 672 struct list_head pending;
673 struct hlist_node *lp, *tmp; 673 struct hlist_node *tmp;
674 int hash = DFR_HASH(item); 674 int hash = DFR_HASH(item);
675 675
676 INIT_LIST_HEAD(&pending); 676 INIT_LIST_HEAD(&pending);
677 spin_lock(&cache_defer_lock); 677 spin_lock(&cache_defer_lock);
678 678
679 hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash) 679 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
680 if (dreq->item == item) { 680 if (dreq->item == item) {
681 __unhash_deferred_req(dreq); 681 __unhash_deferred_req(dreq);
682 list_add(&dreq->recent, &pending); 682 list_add(&dreq->recent, &pending);
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 7963569fc04f..2af7b0cba43a 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -138,13 +138,12 @@ auth_domain_lookup(char *name, struct auth_domain *new)
138{ 138{
139 struct auth_domain *hp; 139 struct auth_domain *hp;
140 struct hlist_head *head; 140 struct hlist_head *head;
141 struct hlist_node *np;
142 141
143 head = &auth_domain_table[hash_str(name, DN_HASHBITS)]; 142 head = &auth_domain_table[hash_str(name, DN_HASHBITS)];
144 143
145 spin_lock(&auth_domain_lock); 144 spin_lock(&auth_domain_lock);
146 145
147 hlist_for_each_entry(hp, np, head, hash) { 146 hlist_for_each_entry(hp, head, hash) {
148 if (strcmp(hp->name, name)==0) { 147 if (strcmp(hp->name, name)==0) {
149 kref_get(&hp->ref); 148 kref_get(&hp->ref);
150 spin_unlock(&auth_domain_lock); 149 spin_unlock(&auth_domain_lock);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 46754779fd3d..24b167914311 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -473,11 +473,10 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
473static struct name_seq *nametbl_find_seq(u32 type) 473static struct name_seq *nametbl_find_seq(u32 type)
474{ 474{
475 struct hlist_head *seq_head; 475 struct hlist_head *seq_head;
476 struct hlist_node *seq_node;
477 struct name_seq *ns; 476 struct name_seq *ns;
478 477
479 seq_head = &table.types[hash(type)]; 478 seq_head = &table.types[hash(type)];
480 hlist_for_each_entry(ns, seq_node, seq_head, ns_list) { 479 hlist_for_each_entry(ns, seq_head, ns_list) {
481 if (ns->type == type) 480 if (ns->type == type)
482 return ns; 481 return ns;
483 } 482 }
@@ -853,7 +852,6 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
853 u32 type, u32 lowbound, u32 upbound) 852 u32 type, u32 lowbound, u32 upbound)
854{ 853{
855 struct hlist_head *seq_head; 854 struct hlist_head *seq_head;
856 struct hlist_node *seq_node;
857 struct name_seq *seq; 855 struct name_seq *seq;
858 int all_types; 856 int all_types;
859 int ret = 0; 857 int ret = 0;
@@ -873,7 +871,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
873 upbound = ~0; 871 upbound = ~0;
874 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { 872 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
875 seq_head = &table.types[i]; 873 seq_head = &table.types[i];
876 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { 874 hlist_for_each_entry(seq, seq_head, ns_list) {
877 ret += nameseq_list(seq, buf + ret, len - ret, 875 ret += nameseq_list(seq, buf + ret, len - ret,
878 depth, seq->type, 876 depth, seq->type,
879 lowbound, upbound, i); 877 lowbound, upbound, i);
@@ -889,7 +887,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
889 ret += nametbl_header(buf + ret, len - ret, depth); 887 ret += nametbl_header(buf + ret, len - ret, depth);
890 i = hash(type); 888 i = hash(type);
891 seq_head = &table.types[i]; 889 seq_head = &table.types[i];
892 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { 890 hlist_for_each_entry(seq, seq_head, ns_list) {
893 if (seq->type == type) { 891 if (seq->type == type) {
894 ret += nameseq_list(seq, buf + ret, len - ret, 892 ret += nameseq_list(seq, buf + ret, len - ret,
895 depth, type, 893 depth, type,
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 48f39dd3eae8..6e6c434872e8 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -69,12 +69,11 @@ static unsigned int tipc_hashfn(u32 addr)
69struct tipc_node *tipc_node_find(u32 addr) 69struct tipc_node *tipc_node_find(u32 addr)
70{ 70{
71 struct tipc_node *node; 71 struct tipc_node *node;
72 struct hlist_node *pos;
73 72
74 if (unlikely(!in_own_cluster_exact(addr))) 73 if (unlikely(!in_own_cluster_exact(addr)))
75 return NULL; 74 return NULL;
76 75
77 hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) { 76 hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) {
78 if (node->addr == addr) 77 if (node->addr == addr)
79 return node; 78 return node;
80 } 79 }
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 87d284289012..51be64f163ec 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -263,9 +263,8 @@ static struct sock *__unix_find_socket_byname(struct net *net,
263 int len, int type, unsigned int hash) 263 int len, int type, unsigned int hash)
264{ 264{
265 struct sock *s; 265 struct sock *s;
266 struct hlist_node *node;
267 266
268 sk_for_each(s, node, &unix_socket_table[hash ^ type]) { 267 sk_for_each(s, &unix_socket_table[hash ^ type]) {
269 struct unix_sock *u = unix_sk(s); 268 struct unix_sock *u = unix_sk(s);
270 269
271 if (!net_eq(sock_net(s), net)) 270 if (!net_eq(sock_net(s), net))
@@ -298,10 +297,9 @@ static inline struct sock *unix_find_socket_byname(struct net *net,
298static struct sock *unix_find_socket_byinode(struct inode *i) 297static struct sock *unix_find_socket_byinode(struct inode *i)
299{ 298{
300 struct sock *s; 299 struct sock *s;
301 struct hlist_node *node;
302 300
303 spin_lock(&unix_table_lock); 301 spin_lock(&unix_table_lock);
304 sk_for_each(s, node, 302 sk_for_each(s,
305 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { 303 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
306 struct dentry *dentry = unix_sk(s)->path.dentry; 304 struct dentry *dentry = unix_sk(s)->path.dentry;
307 305
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 5ac19dc1d5e4..d591091603bf 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -192,10 +192,9 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
192 slot < ARRAY_SIZE(unix_socket_table); 192 slot < ARRAY_SIZE(unix_socket_table);
193 s_num = 0, slot++) { 193 s_num = 0, slot++) {
194 struct sock *sk; 194 struct sock *sk;
195 struct hlist_node *node;
196 195
197 num = 0; 196 num = 0;
198 sk_for_each(sk, node, &unix_socket_table[slot]) { 197 sk_for_each(sk, &unix_socket_table[slot]) {
199 if (!net_eq(sock_net(sk), net)) 198 if (!net_eq(sock_net(sk), net))
200 continue; 199 continue;
201 if (num < s_num) 200 if (num < s_num)
@@ -226,9 +225,7 @@ static struct sock *unix_lookup_by_ino(int ino)
226 225
227 spin_lock(&unix_table_lock); 226 spin_lock(&unix_table_lock);
228 for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) { 227 for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
229 struct hlist_node *node; 228 sk_for_each(sk, &unix_socket_table[i])
230
231 sk_for_each(sk, node, &unix_socket_table[i])
232 if (ino == sock_i_ino(sk)) { 229 if (ino == sock_i_ino(sk)) {
233 sock_hold(sk); 230 sock_hold(sk);
234 spin_unlock(&unix_table_lock); 231 spin_unlock(&unix_table_lock);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index a306bc66000e..37ca9694aabe 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -208,11 +208,10 @@ static void x25_remove_socket(struct sock *sk)
208static void x25_kill_by_device(struct net_device *dev) 208static void x25_kill_by_device(struct net_device *dev)
209{ 209{
210 struct sock *s; 210 struct sock *s;
211 struct hlist_node *node;
212 211
213 write_lock_bh(&x25_list_lock); 212 write_lock_bh(&x25_list_lock);
214 213
215 sk_for_each(s, node, &x25_list) 214 sk_for_each(s, &x25_list)
216 if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev) 215 if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev)
217 x25_disconnect(s, ENETUNREACH, 0, 0); 216 x25_disconnect(s, ENETUNREACH, 0, 0);
218 217
@@ -280,12 +279,11 @@ static struct sock *x25_find_listener(struct x25_address *addr,
280{ 279{
281 struct sock *s; 280 struct sock *s;
282 struct sock *next_best; 281 struct sock *next_best;
283 struct hlist_node *node;
284 282
285 read_lock_bh(&x25_list_lock); 283 read_lock_bh(&x25_list_lock);
286 next_best = NULL; 284 next_best = NULL;
287 285
288 sk_for_each(s, node, &x25_list) 286 sk_for_each(s, &x25_list)
289 if ((!strcmp(addr->x25_addr, 287 if ((!strcmp(addr->x25_addr,
290 x25_sk(s)->source_addr.x25_addr) || 288 x25_sk(s)->source_addr.x25_addr) ||
291 !strcmp(addr->x25_addr, 289 !strcmp(addr->x25_addr,
@@ -323,9 +321,8 @@ found:
323static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) 321static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb)
324{ 322{
325 struct sock *s; 323 struct sock *s;
326 struct hlist_node *node;
327 324
328 sk_for_each(s, node, &x25_list) 325 sk_for_each(s, &x25_list)
329 if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { 326 if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) {
330 sock_hold(s); 327 sock_hold(s);
331 goto found; 328 goto found;
@@ -1782,11 +1779,10 @@ static struct notifier_block x25_dev_notifier = {
1782void x25_kill_by_neigh(struct x25_neigh *nb) 1779void x25_kill_by_neigh(struct x25_neigh *nb)
1783{ 1780{
1784 struct sock *s; 1781 struct sock *s;
1785 struct hlist_node *node;
1786 1782
1787 write_lock_bh(&x25_list_lock); 1783 write_lock_bh(&x25_list_lock);
1788 1784
1789 sk_for_each(s, node, &x25_list) 1785 sk_for_each(s, &x25_list)
1790 if (x25_sk(s)->neighbour == nb) 1786 if (x25_sk(s)->neighbour == nb)
1791 x25_disconnect(s, ENETUNREACH, 0, 0); 1787 x25_disconnect(s, ENETUNREACH, 0, 0);
1792 1788
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 5b47180986f8..167c67d46c6a 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -379,27 +379,27 @@ static void xfrm_dst_hash_transfer(struct hlist_head *list,
379 struct hlist_head *ndsttable, 379 struct hlist_head *ndsttable,
380 unsigned int nhashmask) 380 unsigned int nhashmask)
381{ 381{
382 struct hlist_node *entry, *tmp, *entry0 = NULL; 382 struct hlist_node *tmp, *entry0 = NULL;
383 struct xfrm_policy *pol; 383 struct xfrm_policy *pol;
384 unsigned int h0 = 0; 384 unsigned int h0 = 0;
385 385
386redo: 386redo:
387 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) { 387 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
388 unsigned int h; 388 unsigned int h;
389 389
390 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 390 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
391 pol->family, nhashmask); 391 pol->family, nhashmask);
392 if (!entry0) { 392 if (!entry0) {
393 hlist_del(entry); 393 hlist_del(&pol->bydst);
394 hlist_add_head(&pol->bydst, ndsttable+h); 394 hlist_add_head(&pol->bydst, ndsttable+h);
395 h0 = h; 395 h0 = h;
396 } else { 396 } else {
397 if (h != h0) 397 if (h != h0)
398 continue; 398 continue;
399 hlist_del(entry); 399 hlist_del(&pol->bydst);
400 hlist_add_after(entry0, &pol->bydst); 400 hlist_add_after(entry0, &pol->bydst);
401 } 401 }
402 entry0 = entry; 402 entry0 = &pol->bydst;
403 } 403 }
404 if (!hlist_empty(list)) { 404 if (!hlist_empty(list)) {
405 entry0 = NULL; 405 entry0 = NULL;
@@ -411,10 +411,10 @@ static void xfrm_idx_hash_transfer(struct hlist_head *list,
411 struct hlist_head *nidxtable, 411 struct hlist_head *nidxtable,
412 unsigned int nhashmask) 412 unsigned int nhashmask)
413{ 413{
414 struct hlist_node *entry, *tmp; 414 struct hlist_node *tmp;
415 struct xfrm_policy *pol; 415 struct xfrm_policy *pol;
416 416
417 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) { 417 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
418 unsigned int h; 418 unsigned int h;
419 419
420 h = __idx_hash(pol->index, nhashmask); 420 h = __idx_hash(pol->index, nhashmask);
@@ -544,7 +544,6 @@ static u32 xfrm_gen_index(struct net *net, int dir)
544 static u32 idx_generator; 544 static u32 idx_generator;
545 545
546 for (;;) { 546 for (;;) {
547 struct hlist_node *entry;
548 struct hlist_head *list; 547 struct hlist_head *list;
549 struct xfrm_policy *p; 548 struct xfrm_policy *p;
550 u32 idx; 549 u32 idx;
@@ -556,7 +555,7 @@ static u32 xfrm_gen_index(struct net *net, int dir)
556 idx = 8; 555 idx = 8;
557 list = net->xfrm.policy_byidx + idx_hash(net, idx); 556 list = net->xfrm.policy_byidx + idx_hash(net, idx);
558 found = 0; 557 found = 0;
559 hlist_for_each_entry(p, entry, list, byidx) { 558 hlist_for_each_entry(p, list, byidx) {
560 if (p->index == idx) { 559 if (p->index == idx) {
561 found = 1; 560 found = 1;
562 break; 561 break;
@@ -628,13 +627,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
628 struct xfrm_policy *pol; 627 struct xfrm_policy *pol;
629 struct xfrm_policy *delpol; 628 struct xfrm_policy *delpol;
630 struct hlist_head *chain; 629 struct hlist_head *chain;
631 struct hlist_node *entry, *newpos; 630 struct hlist_node *newpos;
632 631
633 write_lock_bh(&xfrm_policy_lock); 632 write_lock_bh(&xfrm_policy_lock);
634 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 633 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
635 delpol = NULL; 634 delpol = NULL;
636 newpos = NULL; 635 newpos = NULL;
637 hlist_for_each_entry(pol, entry, chain, bydst) { 636 hlist_for_each_entry(pol, chain, bydst) {
638 if (pol->type == policy->type && 637 if (pol->type == policy->type &&
639 !selector_cmp(&pol->selector, &policy->selector) && 638 !selector_cmp(&pol->selector, &policy->selector) &&
640 xfrm_policy_mark_match(policy, pol) && 639 xfrm_policy_mark_match(policy, pol) &&
@@ -691,13 +690,12 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
691{ 690{
692 struct xfrm_policy *pol, *ret; 691 struct xfrm_policy *pol, *ret;
693 struct hlist_head *chain; 692 struct hlist_head *chain;
694 struct hlist_node *entry;
695 693
696 *err = 0; 694 *err = 0;
697 write_lock_bh(&xfrm_policy_lock); 695 write_lock_bh(&xfrm_policy_lock);
698 chain = policy_hash_bysel(net, sel, sel->family, dir); 696 chain = policy_hash_bysel(net, sel, sel->family, dir);
699 ret = NULL; 697 ret = NULL;
700 hlist_for_each_entry(pol, entry, chain, bydst) { 698 hlist_for_each_entry(pol, chain, bydst) {
701 if (pol->type == type && 699 if (pol->type == type &&
702 (mark & pol->mark.m) == pol->mark.v && 700 (mark & pol->mark.m) == pol->mark.v &&
703 !selector_cmp(sel, &pol->selector) && 701 !selector_cmp(sel, &pol->selector) &&
@@ -729,7 +727,6 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
729{ 727{
730 struct xfrm_policy *pol, *ret; 728 struct xfrm_policy *pol, *ret;
731 struct hlist_head *chain; 729 struct hlist_head *chain;
732 struct hlist_node *entry;
733 730
734 *err = -ENOENT; 731 *err = -ENOENT;
735 if (xfrm_policy_id2dir(id) != dir) 732 if (xfrm_policy_id2dir(id) != dir)
@@ -739,7 +736,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
739 write_lock_bh(&xfrm_policy_lock); 736 write_lock_bh(&xfrm_policy_lock);
740 chain = net->xfrm.policy_byidx + idx_hash(net, id); 737 chain = net->xfrm.policy_byidx + idx_hash(net, id);
741 ret = NULL; 738 ret = NULL;
742 hlist_for_each_entry(pol, entry, chain, byidx) { 739 hlist_for_each_entry(pol, chain, byidx) {
743 if (pol->type == type && pol->index == id && 740 if (pol->type == type && pol->index == id &&
744 (mark & pol->mark.m) == pol->mark.v) { 741 (mark & pol->mark.m) == pol->mark.v) {
745 xfrm_pol_hold(pol); 742 xfrm_pol_hold(pol);
@@ -772,10 +769,9 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
772 769
773 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 770 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
774 struct xfrm_policy *pol; 771 struct xfrm_policy *pol;
775 struct hlist_node *entry;
776 int i; 772 int i;
777 773
778 hlist_for_each_entry(pol, entry, 774 hlist_for_each_entry(pol,
779 &net->xfrm.policy_inexact[dir], bydst) { 775 &net->xfrm.policy_inexact[dir], bydst) {
780 if (pol->type != type) 776 if (pol->type != type)
781 continue; 777 continue;
@@ -789,7 +785,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
789 } 785 }
790 } 786 }
791 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 787 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
792 hlist_for_each_entry(pol, entry, 788 hlist_for_each_entry(pol,
793 net->xfrm.policy_bydst[dir].table + i, 789 net->xfrm.policy_bydst[dir].table + i,
794 bydst) { 790 bydst) {
795 if (pol->type != type) 791 if (pol->type != type)
@@ -828,11 +824,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
828 824
829 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 825 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
830 struct xfrm_policy *pol; 826 struct xfrm_policy *pol;
831 struct hlist_node *entry;
832 int i; 827 int i;
833 828
834 again1: 829 again1:
835 hlist_for_each_entry(pol, entry, 830 hlist_for_each_entry(pol,
836 &net->xfrm.policy_inexact[dir], bydst) { 831 &net->xfrm.policy_inexact[dir], bydst) {
837 if (pol->type != type) 832 if (pol->type != type)
838 continue; 833 continue;
@@ -852,7 +847,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
852 847
853 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 848 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
854 again2: 849 again2:
855 hlist_for_each_entry(pol, entry, 850 hlist_for_each_entry(pol,
856 net->xfrm.policy_bydst[dir].table + i, 851 net->xfrm.policy_bydst[dir].table + i,
857 bydst) { 852 bydst) {
858 if (pol->type != type) 853 if (pol->type != type)
@@ -980,7 +975,6 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
980 int err; 975 int err;
981 struct xfrm_policy *pol, *ret; 976 struct xfrm_policy *pol, *ret;
982 const xfrm_address_t *daddr, *saddr; 977 const xfrm_address_t *daddr, *saddr;
983 struct hlist_node *entry;
984 struct hlist_head *chain; 978 struct hlist_head *chain;
985 u32 priority = ~0U; 979 u32 priority = ~0U;
986 980
@@ -992,7 +986,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
992 read_lock_bh(&xfrm_policy_lock); 986 read_lock_bh(&xfrm_policy_lock);
993 chain = policy_hash_direct(net, daddr, saddr, family, dir); 987 chain = policy_hash_direct(net, daddr, saddr, family, dir);
994 ret = NULL; 988 ret = NULL;
995 hlist_for_each_entry(pol, entry, chain, bydst) { 989 hlist_for_each_entry(pol, chain, bydst) {
996 err = xfrm_policy_match(pol, fl, type, family, dir); 990 err = xfrm_policy_match(pol, fl, type, family, dir);
997 if (err) { 991 if (err) {
998 if (err == -ESRCH) 992 if (err == -ESRCH)
@@ -1008,7 +1002,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
1008 } 1002 }
1009 } 1003 }
1010 chain = &net->xfrm.policy_inexact[dir]; 1004 chain = &net->xfrm.policy_inexact[dir];
1011 hlist_for_each_entry(pol, entry, chain, bydst) { 1005 hlist_for_each_entry(pol, chain, bydst) {
1012 err = xfrm_policy_match(pol, fl, type, family, dir); 1006 err = xfrm_policy_match(pol, fl, type, family, dir);
1013 if (err) { 1007 if (err) {
1014 if (err == -ESRCH) 1008 if (err == -ESRCH)
@@ -3041,13 +3035,12 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector
3041 u8 dir, u8 type) 3035 u8 dir, u8 type)
3042{ 3036{
3043 struct xfrm_policy *pol, *ret = NULL; 3037 struct xfrm_policy *pol, *ret = NULL;
3044 struct hlist_node *entry;
3045 struct hlist_head *chain; 3038 struct hlist_head *chain;
3046 u32 priority = ~0U; 3039 u32 priority = ~0U;
3047 3040
3048 read_lock_bh(&xfrm_policy_lock); 3041 read_lock_bh(&xfrm_policy_lock);
3049 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); 3042 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
3050 hlist_for_each_entry(pol, entry, chain, bydst) { 3043 hlist_for_each_entry(pol, chain, bydst) {
3051 if (xfrm_migrate_selector_match(sel, &pol->selector) && 3044 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3052 pol->type == type) { 3045 pol->type == type) {
3053 ret = pol; 3046 ret = pol;
@@ -3056,7 +3049,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector
3056 } 3049 }
3057 } 3050 }
3058 chain = &init_net.xfrm.policy_inexact[dir]; 3051 chain = &init_net.xfrm.policy_inexact[dir];
3059 hlist_for_each_entry(pol, entry, chain, bydst) { 3052 hlist_for_each_entry(pol, chain, bydst) {
3060 if (xfrm_migrate_selector_match(sel, &pol->selector) && 3053 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3061 pol->type == type && 3054 pol->type == type &&
3062 pol->priority < priority) { 3055 pol->priority < priority) {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index ae01bdbcb294..2c341bdaf47c 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -72,10 +72,10 @@ static void xfrm_hash_transfer(struct hlist_head *list,
72 struct hlist_head *nspitable, 72 struct hlist_head *nspitable,
73 unsigned int nhashmask) 73 unsigned int nhashmask)
74{ 74{
75 struct hlist_node *entry, *tmp; 75 struct hlist_node *tmp;
76 struct xfrm_state *x; 76 struct xfrm_state *x;
77 77
78 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) { 78 hlist_for_each_entry_safe(x, tmp, list, bydst) {
79 unsigned int h; 79 unsigned int h;
80 80
81 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, 81 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
@@ -368,14 +368,14 @@ static void xfrm_state_gc_task(struct work_struct *work)
368{ 368{
369 struct net *net = container_of(work, struct net, xfrm.state_gc_work); 369 struct net *net = container_of(work, struct net, xfrm.state_gc_work);
370 struct xfrm_state *x; 370 struct xfrm_state *x;
371 struct hlist_node *entry, *tmp; 371 struct hlist_node *tmp;
372 struct hlist_head gc_list; 372 struct hlist_head gc_list;
373 373
374 spin_lock_bh(&xfrm_state_gc_lock); 374 spin_lock_bh(&xfrm_state_gc_lock);
375 hlist_move_list(&net->xfrm.state_gc_list, &gc_list); 375 hlist_move_list(&net->xfrm.state_gc_list, &gc_list);
376 spin_unlock_bh(&xfrm_state_gc_lock); 376 spin_unlock_bh(&xfrm_state_gc_lock);
377 377
378 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist) 378 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
379 xfrm_state_gc_destroy(x); 379 xfrm_state_gc_destroy(x);
380 380
381 wake_up(&net->xfrm.km_waitq); 381 wake_up(&net->xfrm.km_waitq);
@@ -577,10 +577,9 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
577 int i, err = 0; 577 int i, err = 0;
578 578
579 for (i = 0; i <= net->xfrm.state_hmask; i++) { 579 for (i = 0; i <= net->xfrm.state_hmask; i++) {
580 struct hlist_node *entry;
581 struct xfrm_state *x; 580 struct xfrm_state *x;
582 581
583 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { 582 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
584 if (xfrm_id_proto_match(x->id.proto, proto) && 583 if (xfrm_id_proto_match(x->id.proto, proto) &&
585 (err = security_xfrm_state_delete(x)) != 0) { 584 (err = security_xfrm_state_delete(x)) != 0) {
586 xfrm_audit_state_delete(x, 0, 585 xfrm_audit_state_delete(x, 0,
@@ -613,10 +612,9 @@ int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
613 612
614 err = -ESRCH; 613 err = -ESRCH;
615 for (i = 0; i <= net->xfrm.state_hmask; i++) { 614 for (i = 0; i <= net->xfrm.state_hmask; i++) {
616 struct hlist_node *entry;
617 struct xfrm_state *x; 615 struct xfrm_state *x;
618restart: 616restart:
619 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { 617 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
620 if (!xfrm_state_kern(x) && 618 if (!xfrm_state_kern(x) &&
621 xfrm_id_proto_match(x->id.proto, proto)) { 619 xfrm_id_proto_match(x->id.proto, proto)) {
622 xfrm_state_hold(x); 620 xfrm_state_hold(x);
@@ -685,9 +683,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
685{ 683{
686 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); 684 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
687 struct xfrm_state *x; 685 struct xfrm_state *x;
688 struct hlist_node *entry;
689 686
690 hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) { 687 hlist_for_each_entry(x, net->xfrm.state_byspi+h, byspi) {
691 if (x->props.family != family || 688 if (x->props.family != family ||
692 x->id.spi != spi || 689 x->id.spi != spi ||
693 x->id.proto != proto || 690 x->id.proto != proto ||
@@ -710,9 +707,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
710{ 707{
711 unsigned int h = xfrm_src_hash(net, daddr, saddr, family); 708 unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
712 struct xfrm_state *x; 709 struct xfrm_state *x;
713 struct hlist_node *entry;
714 710
715 hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) { 711 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
716 if (x->props.family != family || 712 if (x->props.family != family ||
717 x->id.proto != proto || 713 x->id.proto != proto ||
718 !xfrm_addr_equal(&x->id.daddr, daddr, family) || 714 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
@@ -798,7 +794,6 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
798 static xfrm_address_t saddr_wildcard = { }; 794 static xfrm_address_t saddr_wildcard = { };
799 struct net *net = xp_net(pol); 795 struct net *net = xp_net(pol);
800 unsigned int h, h_wildcard; 796 unsigned int h, h_wildcard;
801 struct hlist_node *entry;
802 struct xfrm_state *x, *x0, *to_put; 797 struct xfrm_state *x, *x0, *to_put;
803 int acquire_in_progress = 0; 798 int acquire_in_progress = 0;
804 int error = 0; 799 int error = 0;
@@ -810,7 +805,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
810 805
811 spin_lock_bh(&xfrm_state_lock); 806 spin_lock_bh(&xfrm_state_lock);
812 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); 807 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
813 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 808 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
814 if (x->props.family == encap_family && 809 if (x->props.family == encap_family &&
815 x->props.reqid == tmpl->reqid && 810 x->props.reqid == tmpl->reqid &&
816 (mark & x->mark.m) == x->mark.v && 811 (mark & x->mark.m) == x->mark.v &&
@@ -826,7 +821,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
826 goto found; 821 goto found;
827 822
828 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); 823 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
829 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { 824 hlist_for_each_entry(x, net->xfrm.state_bydst+h_wildcard, bydst) {
830 if (x->props.family == encap_family && 825 if (x->props.family == encap_family &&
831 x->props.reqid == tmpl->reqid && 826 x->props.reqid == tmpl->reqid &&
832 (mark & x->mark.m) == x->mark.v && 827 (mark & x->mark.m) == x->mark.v &&
@@ -906,11 +901,10 @@ xfrm_stateonly_find(struct net *net, u32 mark,
906{ 901{
907 unsigned int h; 902 unsigned int h;
908 struct xfrm_state *rx = NULL, *x = NULL; 903 struct xfrm_state *rx = NULL, *x = NULL;
909 struct hlist_node *entry;
910 904
911 spin_lock(&xfrm_state_lock); 905 spin_lock(&xfrm_state_lock);
912 h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 906 h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
913 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 907 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
914 if (x->props.family == family && 908 if (x->props.family == family &&
915 x->props.reqid == reqid && 909 x->props.reqid == reqid &&
916 (mark & x->mark.m) == x->mark.v && 910 (mark & x->mark.m) == x->mark.v &&
@@ -972,12 +966,11 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
972 unsigned short family = xnew->props.family; 966 unsigned short family = xnew->props.family;
973 u32 reqid = xnew->props.reqid; 967 u32 reqid = xnew->props.reqid;
974 struct xfrm_state *x; 968 struct xfrm_state *x;
975 struct hlist_node *entry;
976 unsigned int h; 969 unsigned int h;
977 u32 mark = xnew->mark.v & xnew->mark.m; 970 u32 mark = xnew->mark.v & xnew->mark.m;
978 971
979 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); 972 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
980 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 973 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
981 if (x->props.family == family && 974 if (x->props.family == family &&
982 x->props.reqid == reqid && 975 x->props.reqid == reqid &&
983 (mark & x->mark.m) == x->mark.v && 976 (mark & x->mark.m) == x->mark.v &&
@@ -1004,11 +997,10 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
1004 const xfrm_address_t *saddr, int create) 997 const xfrm_address_t *saddr, int create)
1005{ 998{
1006 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 999 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1007 struct hlist_node *entry;
1008 struct xfrm_state *x; 1000 struct xfrm_state *x;
1009 u32 mark = m->v & m->m; 1001 u32 mark = m->v & m->m;
1010 1002
1011 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 1003 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1012 if (x->props.reqid != reqid || 1004 if (x->props.reqid != reqid ||
1013 x->props.mode != mode || 1005 x->props.mode != mode ||
1014 x->props.family != family || 1006 x->props.family != family ||
@@ -1215,12 +1207,11 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1215{ 1207{
1216 unsigned int h; 1208 unsigned int h;
1217 struct xfrm_state *x; 1209 struct xfrm_state *x;
1218 struct hlist_node *entry;
1219 1210
1220 if (m->reqid) { 1211 if (m->reqid) {
1221 h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr, 1212 h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr,
1222 m->reqid, m->old_family); 1213 m->reqid, m->old_family);
1223 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) { 1214 hlist_for_each_entry(x, init_net.xfrm.state_bydst+h, bydst) {
1224 if (x->props.mode != m->mode || 1215 if (x->props.mode != m->mode ||
1225 x->id.proto != m->proto) 1216 x->id.proto != m->proto)
1226 continue; 1217 continue;
@@ -1237,7 +1228,7 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1237 } else { 1228 } else {
1238 h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr, 1229 h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr,
1239 m->old_family); 1230 m->old_family);
1240 hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) { 1231 hlist_for_each_entry(x, init_net.xfrm.state_bysrc+h, bysrc) {
1241 if (x->props.mode != m->mode || 1232 if (x->props.mode != m->mode ||
1242 x->id.proto != m->proto) 1233 x->id.proto != m->proto)
1243 continue; 1234 continue;
@@ -1466,10 +1457,9 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 s
1466 int i; 1457 int i;
1467 1458
1468 for (i = 0; i <= net->xfrm.state_hmask; i++) { 1459 for (i = 0; i <= net->xfrm.state_hmask; i++) {
1469 struct hlist_node *entry;
1470 struct xfrm_state *x; 1460 struct xfrm_state *x;
1471 1461
1472 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { 1462 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
1473 if (x->km.seq == seq && 1463 if (x->km.seq == seq &&
1474 (mark & x->mark.m) == x->mark.v && 1464 (mark & x->mark.m) == x->mark.v &&
1475 x->km.state == XFRM_STATE_ACQ) { 1465 x->km.state == XFRM_STATE_ACQ) {
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 747bcd768da0..b28cc384a5bc 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2930,7 +2930,7 @@ sub process {
2930 my $var = $1; 2930 my $var = $1;
2931 if ($var !~ /$Constant/ && 2931 if ($var !~ /$Constant/ &&
2932 $var =~ /[A-Z]\w*[a-z]|[a-z]\w*[A-Z]/ && 2932 $var =~ /[A-Z]\w*[a-z]|[a-z]\w*[A-Z]/ &&
2933 $var !~ /^Page[A-Z]/ && 2933 $var !~ /"^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ &&
2934 !defined $camelcase{$var}) { 2934 !defined $camelcase{$var}) {
2935 $camelcase{$var} = 1; 2935 $camelcase{$var} = 1;
2936 WARN("CAMELCASE", 2936 WARN("CAMELCASE",
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 18d4ab55606b..ce4cc837b748 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -611,6 +611,10 @@ sub get_maintainers {
611 $hash{$tvi} = $value_pd; 611 $hash{$tvi} = $value_pd;
612 } 612 }
613 } 613 }
614 } elsif ($type eq 'K') {
615 if ($file =~ m/$value/x) {
616 $hash{$tvi} = 0;
617 }
614 } 618 }
615 } 619 }
616 } 620 }
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index f565536a2bef..4305b2f2ec5e 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1750,7 +1750,7 @@ sub dump_struct($$) {
1750 # strip kmemcheck_bitfield_{begin,end}.*; 1750 # strip kmemcheck_bitfield_{begin,end}.*;
1751 $members =~ s/kmemcheck_bitfield_.*?;//gos; 1751 $members =~ s/kmemcheck_bitfield_.*?;//gos;
1752 # strip attributes 1752 # strip attributes
1753 $members =~ s/__aligned\s*\(\d+\)//gos; 1753 $members =~ s/__aligned\s*\(.+\)//gos;
1754 1754
1755 create_parameterlist($members, ';', $file); 1755 create_parameterlist($members, ';', $file);
1756 check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested); 1756 check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 55a6271bce7a..ff63fe00c195 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -45,12 +45,11 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
45{ 45{
46 struct ima_queue_entry *qe, *ret = NULL; 46 struct ima_queue_entry *qe, *ret = NULL;
47 unsigned int key; 47 unsigned int key;
48 struct hlist_node *pos;
49 int rc; 48 int rc;
50 49
51 key = ima_hash_key(digest_value); 50 key = ima_hash_key(digest_value);
52 rcu_read_lock(); 51 rcu_read_lock();
53 hlist_for_each_entry_rcu(qe, pos, &ima_htable.queue[key], hnext) { 52 hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
54 rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE); 53 rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE);
55 if (rc == 0) { 54 if (rc == 0) {
56 ret = qe; 55 ret = qe;
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 4d3fab47e643..dad36a6ab45f 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -188,11 +188,9 @@ int avc_get_hash_stats(char *page)
188 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 188 for (i = 0; i < AVC_CACHE_SLOTS; i++) {
189 head = &avc_cache.slots[i]; 189 head = &avc_cache.slots[i];
190 if (!hlist_empty(head)) { 190 if (!hlist_empty(head)) {
191 struct hlist_node *next;
192
193 slots_used++; 191 slots_used++;
194 chain_len = 0; 192 chain_len = 0;
195 hlist_for_each_entry_rcu(node, next, head, list) 193 hlist_for_each_entry_rcu(node, head, list)
196 chain_len++; 194 chain_len++;
197 if (chain_len > max_chain_len) 195 if (chain_len > max_chain_len)
198 max_chain_len = chain_len; 196 max_chain_len = chain_len;
@@ -241,7 +239,6 @@ static inline int avc_reclaim_node(void)
241 int hvalue, try, ecx; 239 int hvalue, try, ecx;
242 unsigned long flags; 240 unsigned long flags;
243 struct hlist_head *head; 241 struct hlist_head *head;
244 struct hlist_node *next;
245 spinlock_t *lock; 242 spinlock_t *lock;
246 243
247 for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { 244 for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
@@ -253,7 +250,7 @@ static inline int avc_reclaim_node(void)
253 continue; 250 continue;
254 251
255 rcu_read_lock(); 252 rcu_read_lock();
256 hlist_for_each_entry(node, next, head, list) { 253 hlist_for_each_entry(node, head, list) {
257 avc_node_delete(node); 254 avc_node_delete(node);
258 avc_cache_stats_incr(reclaims); 255 avc_cache_stats_incr(reclaims);
259 ecx++; 256 ecx++;
@@ -301,11 +298,10 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
301 struct avc_node *node, *ret = NULL; 298 struct avc_node *node, *ret = NULL;
302 int hvalue; 299 int hvalue;
303 struct hlist_head *head; 300 struct hlist_head *head;
304 struct hlist_node *next;
305 301
306 hvalue = avc_hash(ssid, tsid, tclass); 302 hvalue = avc_hash(ssid, tsid, tclass);
307 head = &avc_cache.slots[hvalue]; 303 head = &avc_cache.slots[hvalue];
308 hlist_for_each_entry_rcu(node, next, head, list) { 304 hlist_for_each_entry_rcu(node, head, list) {
309 if (ssid == node->ae.ssid && 305 if (ssid == node->ae.ssid &&
310 tclass == node->ae.tclass && 306 tclass == node->ae.tclass &&
311 tsid == node->ae.tsid) { 307 tsid == node->ae.tsid) {
@@ -394,7 +390,6 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
394 node = avc_alloc_node(); 390 node = avc_alloc_node();
395 if (node) { 391 if (node) {
396 struct hlist_head *head; 392 struct hlist_head *head;
397 struct hlist_node *next;
398 spinlock_t *lock; 393 spinlock_t *lock;
399 394
400 hvalue = avc_hash(ssid, tsid, tclass); 395 hvalue = avc_hash(ssid, tsid, tclass);
@@ -404,7 +399,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
404 lock = &avc_cache.slots_lock[hvalue]; 399 lock = &avc_cache.slots_lock[hvalue];
405 400
406 spin_lock_irqsave(lock, flag); 401 spin_lock_irqsave(lock, flag);
407 hlist_for_each_entry(pos, next, head, list) { 402 hlist_for_each_entry(pos, head, list) {
408 if (pos->ae.ssid == ssid && 403 if (pos->ae.ssid == ssid &&
409 pos->ae.tsid == tsid && 404 pos->ae.tsid == tsid &&
410 pos->ae.tclass == tclass) { 405 pos->ae.tclass == tclass) {
@@ -541,7 +536,6 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
541 unsigned long flag; 536 unsigned long flag;
542 struct avc_node *pos, *node, *orig = NULL; 537 struct avc_node *pos, *node, *orig = NULL;
543 struct hlist_head *head; 538 struct hlist_head *head;
544 struct hlist_node *next;
545 spinlock_t *lock; 539 spinlock_t *lock;
546 540
547 node = avc_alloc_node(); 541 node = avc_alloc_node();
@@ -558,7 +552,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
558 552
559 spin_lock_irqsave(lock, flag); 553 spin_lock_irqsave(lock, flag);
560 554
561 hlist_for_each_entry(pos, next, head, list) { 555 hlist_for_each_entry(pos, head, list) {
562 if (ssid == pos->ae.ssid && 556 if (ssid == pos->ae.ssid &&
563 tsid == pos->ae.tsid && 557 tsid == pos->ae.tsid &&
564 tclass == pos->ae.tclass && 558 tclass == pos->ae.tclass &&
@@ -614,7 +608,6 @@ out:
614static void avc_flush(void) 608static void avc_flush(void)
615{ 609{
616 struct hlist_head *head; 610 struct hlist_head *head;
617 struct hlist_node *next;
618 struct avc_node *node; 611 struct avc_node *node;
619 spinlock_t *lock; 612 spinlock_t *lock;
620 unsigned long flag; 613 unsigned long flag;
@@ -630,7 +623,7 @@ static void avc_flush(void)
630 * prevent RCU grace periods from ending. 623 * prevent RCU grace periods from ending.
631 */ 624 */
632 rcu_read_lock(); 625 rcu_read_lock();
633 hlist_for_each_entry(node, next, head, list) 626 hlist_for_each_entry(node, head, list)
634 avc_node_delete(node); 627 avc_node_delete(node);
635 rcu_read_unlock(); 628 rcu_read_unlock();
636 spin_unlock_irqrestore(lock, flag); 629 spin_unlock_irqrestore(lock, flag);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index bc4ad7977438..c8be0fbc5145 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -314,7 +314,6 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
314struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 314struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
315{ 315{
316 struct hlist_head *head; 316 struct hlist_head *head;
317 struct hlist_node *pos;
318 struct perf_sample_id *sid; 317 struct perf_sample_id *sid;
319 int hash; 318 int hash;
320 319
@@ -324,7 +323,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
324 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 323 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
325 head = &evlist->heads[hash]; 324 head = &evlist->heads[hash];
326 325
327 hlist_for_each_entry(sid, pos, head, node) 326 hlist_for_each_entry(sid, head, node)
328 if (sid->id == id) 327 if (sid->id == id)
329 return sid->evsel; 328 return sid->evsel;
330 329
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 85baf11e2acd..3cc0ad7ae863 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,4 +1,10 @@
1TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug 1TARGETS = breakpoints
2TARGETS += kcmp
3TARGETS += mqueue
4TARGETS += vm
5TARGETS += cpu-hotplug
6TARGETS += memory-hotplug
7TARGETS += efivarfs
2 8
3all: 9all:
4 for TARGET in $(TARGETS); do \ 10 for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/README.txt b/tools/testing/selftests/README.txt
new file mode 100644
index 000000000000..5e2faf9c55d3
--- /dev/null
+++ b/tools/testing/selftests/README.txt
@@ -0,0 +1,42 @@
1Linux Kernel Selftests
2
3The kernel contains a set of "self tests" under the tools/testing/selftests/
4directory. These are intended to be small unit tests to exercise individual
5code paths in the kernel.
6
7Running the selftests
8=====================
9
10To build the tests:
11
12 $ make -C tools/testing/selftests
13
14
15To run the tests:
16
17 $ make -C tools/testing/selftests run_tests
18
19- note that some tests will require root privileges.
20
21
22To run only tests targetted for a single subsystem:
23
24 $ make -C tools/testing/selftests TARGETS=cpu-hotplug run_tests
25
26See the top-level tools/testing/selftests/Makefile for the list of all possible
27targets.
28
29
30Contributing new tests
31======================
32
33In general, the rules for for selftests are
34
35 * Do as much as you can if you're not root;
36
37 * Don't take too long;
38
39 * Don't break the build on any architecture, and
40
41 * Don't cause the top-level "make run_tests" to fail if your feature is
42 unconfigured.
diff --git a/tools/testing/selftests/efivarfs/Makefile b/tools/testing/selftests/efivarfs/Makefile
new file mode 100644
index 000000000000..29e8c6bc81b0
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/Makefile
@@ -0,0 +1,12 @@
1CC = $(CROSS_COMPILE)gcc
2CFLAGS = -Wall
3
4test_objs = open-unlink create-read
5
6all: $(test_objs)
7
8run_tests: all
9 @/bin/bash ./efivarfs.sh || echo "efivarfs selftests: [FAIL]"
10
11clean:
12 rm -f $(test_objs)
diff --git a/tools/testing/selftests/efivarfs/create-read.c b/tools/testing/selftests/efivarfs/create-read.c
new file mode 100644
index 000000000000..7feef1880968
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/create-read.c
@@ -0,0 +1,38 @@
1#include <stdio.h>
2#include <stdint.h>
3#include <stdlib.h>
4#include <unistd.h>
5#include <sys/types.h>
6#include <sys/stat.h>
7#include <fcntl.h>
8#include <errno.h>
9#include <string.h>
10
11int main(int argc, char **argv)
12{
13 const char *path;
14 char buf[4];
15 int fd, rc;
16
17 if (argc < 2) {
18 fprintf(stderr, "usage: %s <path>\n", argv[0]);
19 return EXIT_FAILURE;
20 }
21
22 path = argv[1];
23
24 /* create a test variable */
25 fd = open(path, O_RDWR | O_CREAT, 0600);
26 if (fd < 0) {
27 perror("open(O_WRONLY)");
28 return EXIT_FAILURE;
29 }
30
31 rc = read(fd, buf, sizeof(buf));
32 if (rc != 0) {
33 fprintf(stderr, "Reading a new var should return EOF\n");
34 return EXIT_FAILURE;
35 }
36
37 return EXIT_SUCCESS;
38}
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
new file mode 100644
index 000000000000..880cdd5dc63f
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh
@@ -0,0 +1,139 @@
1#!/bin/bash
2
3efivarfs_mount=/sys/firmware/efi/efivars
4test_guid=210be57c-9849-4fc7-a635-e6382d1aec27
5
6check_prereqs()
7{
8 local msg="skip all tests:"
9
10 if [ $UID != 0 ]; then
11 echo $msg must be run as root >&2
12 exit 0
13 fi
14
15 if ! grep -q "^\S\+ $efivarfs_mount efivarfs" /proc/mounts; then
16 echo $msg efivarfs is not mounted on $efivarfs_mount >&2
17 exit 0
18 fi
19}
20
21run_test()
22{
23 local test="$1"
24
25 echo "--------------------"
26 echo "running $test"
27 echo "--------------------"
28
29 if [ "$(type -t $test)" = 'function' ]; then
30 ( $test )
31 else
32 ( ./$test )
33 fi
34
35 if [ $? -ne 0 ]; then
36 echo " [FAIL]"
37 rc=1
38 else
39 echo " [PASS]"
40 fi
41}
42
43test_create()
44{
45 local attrs='\x07\x00\x00\x00'
46 local file=$efivarfs_mount/$FUNCNAME-$test_guid
47
48 printf "$attrs\x00" > $file
49
50 if [ ! -e $file ]; then
51 echo "$file couldn't be created" >&2
52 exit 1
53 fi
54
55 if [ $(stat -c %s $file) -ne 5 ]; then
56 echo "$file has invalid size" >&2
57 exit 1
58 fi
59}
60
61test_create_empty()
62{
63 local file=$efivarfs_mount/$FUNCNAME-$test_guid
64
65 : > $file
66
67 if [ ! -e $file ]; then
68 echo "$file can not be created without writing" >&2
69 exit 1
70 fi
71}
72
73test_create_read()
74{
75 local file=$efivarfs_mount/$FUNCNAME-$test_guid
76 ./create-read $file
77}
78
79test_delete()
80{
81 local attrs='\x07\x00\x00\x00'
82 local file=$efivarfs_mount/$FUNCNAME-$test_guid
83
84 printf "$attrs\x00" > $file
85
86 if [ ! -e $file ]; then
87 echo "$file couldn't be created" >&2
88 exit 1
89 fi
90
91 rm $file
92
93 if [ -e $file ]; then
94 echo "$file couldn't be deleted" >&2
95 exit 1
96 fi
97
98}
99
100# test that we can remove a variable by issuing a write with only
101# attributes specified
102test_zero_size_delete()
103{
104 local attrs='\x07\x00\x00\x00'
105 local file=$efivarfs_mount/$FUNCNAME-$test_guid
106
107 printf "$attrs\x00" > $file
108
109 if [ ! -e $file ]; then
110 echo "$file does not exist" >&2
111 exit 1
112 fi
113
114 printf "$attrs" > $file
115
116 if [ -e $file ]; then
117 echo "$file should have been deleted" >&2
118 exit 1
119 fi
120}
121
122test_open_unlink()
123{
124 local file=$efivarfs_mount/$FUNCNAME-$test_guid
125 ./open-unlink $file
126}
127
128check_prereqs
129
130rc=0
131
132run_test test_create
133run_test test_create_empty
134run_test test_create_read
135run_test test_delete
136run_test test_zero_size_delete
137run_test test_open_unlink
138
139exit $rc
diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
new file mode 100644
index 000000000000..8c0764407b3c
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/open-unlink.c
@@ -0,0 +1,63 @@
1#include <stdio.h>
2#include <stdint.h>
3#include <stdlib.h>
4#include <unistd.h>
5#include <sys/types.h>
6#include <sys/stat.h>
7#include <fcntl.h>
8
9int main(int argc, char **argv)
10{
11 const char *path;
12 char buf[5];
13 int fd, rc;
14
15 if (argc < 2) {
16 fprintf(stderr, "usage: %s <path>\n", argv[0]);
17 return EXIT_FAILURE;
18 }
19
20 path = argv[1];
21
22 /* attributes: EFI_VARIABLE_NON_VOLATILE |
23 * EFI_VARIABLE_BOOTSERVICE_ACCESS |
24 * EFI_VARIABLE_RUNTIME_ACCESS
25 */
26 *(uint32_t *)buf = 0x7;
27 buf[4] = 0;
28
29 /* create a test variable */
30 fd = open(path, O_WRONLY | O_CREAT);
31 if (fd < 0) {
32 perror("open(O_WRONLY)");
33 return EXIT_FAILURE;
34 }
35
36 rc = write(fd, buf, sizeof(buf));
37 if (rc != sizeof(buf)) {
38 perror("write");
39 return EXIT_FAILURE;
40 }
41
42 close(fd);
43
44 fd = open(path, O_RDONLY);
45 if (fd < 0) {
46 perror("open");
47 return EXIT_FAILURE;
48 }
49
50 if (unlink(path) < 0) {
51 perror("unlink");
52 return EXIT_FAILURE;
53 }
54
55 rc = read(fd, buf, sizeof(buf));
56 if (rc > 0) {
57 fprintf(stderr, "reading from an unlinked variable "
58 "shouldn't be possible\n");
59 return EXIT_FAILURE;
60 }
61
62 return EXIT_SUCCESS;
63}
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index b6eea5cc7b34..adb17f266b28 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -268,14 +268,13 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
268 struct kvm_irq_routing_table *irq_rt) 268 struct kvm_irq_routing_table *irq_rt)
269{ 269{
270 struct kvm_kernel_irq_routing_entry *e; 270 struct kvm_kernel_irq_routing_entry *e;
271 struct hlist_node *n;
272 271
273 if (irqfd->gsi >= irq_rt->nr_rt_entries) { 272 if (irqfd->gsi >= irq_rt->nr_rt_entries) {
274 rcu_assign_pointer(irqfd->irq_entry, NULL); 273 rcu_assign_pointer(irqfd->irq_entry, NULL);
275 return; 274 return;
276 } 275 }
277 276
278 hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) { 277 hlist_for_each_entry(e, &irq_rt->map[irqfd->gsi], link) {
279 /* Only fast-path MSI. */ 278 /* Only fast-path MSI. */
280 if (e->type == KVM_IRQ_ROUTING_MSI) 279 if (e->type == KVM_IRQ_ROUTING_MSI)
281 rcu_assign_pointer(irqfd->irq_entry, e); 280 rcu_assign_pointer(irqfd->irq_entry, e);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index ff6d40e2c06d..e9073cf4d040 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -173,7 +173,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
173 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; 173 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
174 int ret = -1, i = 0; 174 int ret = -1, i = 0;
175 struct kvm_irq_routing_table *irq_rt; 175 struct kvm_irq_routing_table *irq_rt;
176 struct hlist_node *n;
177 176
178 trace_kvm_set_irq(irq, level, irq_source_id); 177 trace_kvm_set_irq(irq, level, irq_source_id);
179 178
@@ -184,7 +183,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
184 rcu_read_lock(); 183 rcu_read_lock();
185 irq_rt = rcu_dereference(kvm->irq_routing); 184 irq_rt = rcu_dereference(kvm->irq_routing);
186 if (irq < irq_rt->nr_rt_entries) 185 if (irq < irq_rt->nr_rt_entries)
187 hlist_for_each_entry(e, n, &irq_rt->map[irq], link) 186 hlist_for_each_entry(e, &irq_rt->map[irq], link)
188 irq_set[i++] = *e; 187 irq_set[i++] = *e;
189 rcu_read_unlock(); 188 rcu_read_unlock();
190 189
@@ -212,7 +211,6 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
212 struct kvm_kernel_irq_routing_entry *e; 211 struct kvm_kernel_irq_routing_entry *e;
213 int ret = -EINVAL; 212 int ret = -EINVAL;
214 struct kvm_irq_routing_table *irq_rt; 213 struct kvm_irq_routing_table *irq_rt;
215 struct hlist_node *n;
216 214
217 trace_kvm_set_irq(irq, level, irq_source_id); 215 trace_kvm_set_irq(irq, level, irq_source_id);
218 216
@@ -227,7 +225,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
227 rcu_read_lock(); 225 rcu_read_lock();
228 irq_rt = rcu_dereference(kvm->irq_routing); 226 irq_rt = rcu_dereference(kvm->irq_routing);
229 if (irq < irq_rt->nr_rt_entries) 227 if (irq < irq_rt->nr_rt_entries)
230 hlist_for_each_entry(e, n, &irq_rt->map[irq], link) { 228 hlist_for_each_entry(e, &irq_rt->map[irq], link) {
231 if (likely(e->type == KVM_IRQ_ROUTING_MSI)) 229 if (likely(e->type == KVM_IRQ_ROUTING_MSI))
232 ret = kvm_set_msi_inatomic(e, kvm); 230 ret = kvm_set_msi_inatomic(e, kvm);
233 else 231 else
@@ -241,13 +239,12 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
241bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) 239bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
242{ 240{
243 struct kvm_irq_ack_notifier *kian; 241 struct kvm_irq_ack_notifier *kian;
244 struct hlist_node *n;
245 int gsi; 242 int gsi;
246 243
247 rcu_read_lock(); 244 rcu_read_lock();
248 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 245 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
249 if (gsi != -1) 246 if (gsi != -1)
250 hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list, 247 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
251 link) 248 link)
252 if (kian->gsi == gsi) { 249 if (kian->gsi == gsi) {
253 rcu_read_unlock(); 250 rcu_read_unlock();
@@ -263,7 +260,6 @@ EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
263void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) 260void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
264{ 261{
265 struct kvm_irq_ack_notifier *kian; 262 struct kvm_irq_ack_notifier *kian;
266 struct hlist_node *n;
267 int gsi; 263 int gsi;
268 264
269 trace_kvm_ack_irq(irqchip, pin); 265 trace_kvm_ack_irq(irqchip, pin);
@@ -271,7 +267,7 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
271 rcu_read_lock(); 267 rcu_read_lock();
272 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 268 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
273 if (gsi != -1) 269 if (gsi != -1)
274 hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list, 270 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
275 link) 271 link)
276 if (kian->gsi == gsi) 272 if (kian->gsi == gsi)
277 kian->irq_acked(kian); 273 kian->irq_acked(kian);
@@ -369,13 +365,12 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
369 bool mask) 365 bool mask)
370{ 366{
371 struct kvm_irq_mask_notifier *kimn; 367 struct kvm_irq_mask_notifier *kimn;
372 struct hlist_node *n;
373 int gsi; 368 int gsi;
374 369
375 rcu_read_lock(); 370 rcu_read_lock();
376 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 371 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
377 if (gsi != -1) 372 if (gsi != -1)
378 hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link) 373 hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
379 if (kimn->irq == gsi) 374 if (kimn->irq == gsi)
380 kimn->func(kimn, mask); 375 kimn->func(kimn, mask);
381 rcu_read_unlock(); 376 rcu_read_unlock();
@@ -396,13 +391,12 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
396 int delta; 391 int delta;
397 unsigned max_pin; 392 unsigned max_pin;
398 struct kvm_kernel_irq_routing_entry *ei; 393 struct kvm_kernel_irq_routing_entry *ei;
399 struct hlist_node *n;
400 394
401 /* 395 /*
402 * Do not allow GSI to be mapped to the same irqchip more than once. 396 * Do not allow GSI to be mapped to the same irqchip more than once.
403 * Allow only one to one mapping between GSI and MSI. 397 * Allow only one to one mapping between GSI and MSI.
404 */ 398 */
405 hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link) 399 hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
406 if (ei->type == KVM_IRQ_ROUTING_MSI || 400 if (ei->type == KVM_IRQ_ROUTING_MSI ||
407 ue->type == KVM_IRQ_ROUTING_MSI || 401 ue->type == KVM_IRQ_ROUTING_MSI ||
408 ue->u.irqchip.irqchip == ei->irqchip.irqchip) 402 ue->u.irqchip.irqchip == ei->irqchip.irqchip)